diff options
Diffstat (limited to 'drivers/net')
718 files changed, 222501 insertions, 31215 deletions
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c index 4bee99ba7dbb..be71868d1513 100644 --- a/drivers/net/3c501.c +++ b/drivers/net/3c501.c @@ -174,8 +174,6 @@ struct net_device * __init el1_probe(int unit) mem_start = dev->mem_start & 7; } - SET_MODULE_OWNER(dev); - if (io > 0x1ff) { /* Check a single specified location. */ err = el1_probe1(dev, io); } else if (io != 0) { @@ -317,7 +315,6 @@ static int __init el1_probe1(struct net_device *dev, int ioaddr) dev->tx_timeout = &el_timeout; dev->watchdog_timeo = HZ; dev->stop = &el1_close; - dev->get_stats = &el1_get_stats; dev->set_multicast_list = &set_multicast_list; dev->ethtool_ops = &netdev_ethtool_ops; return 0; @@ -376,7 +373,7 @@ static void el_timeout(struct net_device *dev) if (el_debug) printk (KERN_DEBUG "%s: transmit timed out, txsr %#2x axsr=%02x rxsr=%02x.\n", dev->name, inb(TX_STATUS), inb(AX_STATUS), inb(RX_STATUS)); - lp->stats.tx_errors++; + dev->stats.tx_errors++; outb(TX_NORM, TX_CMD); outb(RX_NORM, RX_CMD); outb(AX_OFF, AX_CMD); /* Just trigger a false interrupt. */ @@ -443,7 +440,7 @@ static int el_start_xmit(struct sk_buff *skb, struct net_device *dev) lp->tx_pkt_start = gp_start; lp->collisions = 0; - lp->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += skb->len; /* * Command mode with status cleared should [in theory] @@ -590,7 +587,7 @@ static irqreturn_t el_interrupt(int irq, void *dev_id) printk (KERN_DEBUG "%s: Transmit failed 16 times, Ethernet jammed?\n",dev->name); outb(AX_SYS, AX_CMD); lp->txing = 0; - lp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; netif_wake_queue(dev); } else if (txsr & TX_COLLISION) @@ -608,7 +605,7 @@ static irqreturn_t el_interrupt(int irq, void *dev_id) outb(AX_SYS, AX_CMD); outw(lp->tx_pkt_start, GP_LOW); outb(AX_XMIT, AX_CMD); - lp->stats.collisions++; + dev->stats.collisions++; spin_unlock(&lp->lock); goto out; } @@ -617,7 +614,7 @@ static irqreturn_t el_interrupt(int irq, void *dev_id) /* * It worked.. we will now fall through and receive */ - lp->stats.tx_packets++; + dev->stats.tx_packets++; if (el_debug > 6) printk(KERN_DEBUG " Tx succeeded %s\n", (txsr & TX_RDY) ? "." : "but tx is busy!"); @@ -642,10 +639,10 @@ static irqreturn_t el_interrupt(int irq, void *dev_id) * Just reading rx_status fixes most errors. */ if (rxsr & RX_MISSED) - lp->stats.rx_missed_errors++; + dev->stats.rx_missed_errors++; else if (rxsr & RX_RUNT) { /* Handled to avoid board lock-up. */ - lp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; if (el_debug > 5) printk(KERN_DEBUG " runt.\n"); } @@ -696,7 +693,6 @@ out: static void el_receive(struct net_device *dev) { - struct net_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; int pkt_len; struct sk_buff *skb; @@ -710,7 +706,7 @@ static void el_receive(struct net_device *dev) { if (el_debug) printk(KERN_DEBUG "%s: bogus packet, length=%d\n", dev->name, pkt_len); - lp->stats.rx_over_errors++; + dev->stats.rx_over_errors++; return; } @@ -729,7 +725,7 @@ static void el_receive(struct net_device *dev) if (skb == NULL) { printk(KERN_INFO "%s: Memory squeeze, dropping packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; return; } else @@ -744,8 +740,8 @@ static void el_receive(struct net_device *dev) skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes+=pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes+=pkt_len; } return; } @@ -813,23 +809,6 @@ static int el1_close(struct net_device *dev) } /** - * el1_get_stats: - * @dev: The card to get the statistics for - * - * In smarter devices this function is needed to pull statistics off the - * board itself. The 3c501 has no hardware statistics. We maintain them all - * so they are by definition always up to date. - * - * Returns the statistics for the card from the card private data - */ - -static struct net_device_stats *el1_get_stats(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - return &lp->stats; -} - -/** * set_multicast_list: * @dev: The device to adjust * diff --git a/drivers/net/3c501.h b/drivers/net/3c501.h index c56a2c62f7de..cfec64efff78 100644 --- a/drivers/net/3c501.h +++ b/drivers/net/3c501.h @@ -11,7 +11,6 @@ static irqreturn_t el_interrupt(int irq, void *dev_id); static void el_receive(struct net_device *dev); static void el_reset(struct net_device *dev); static int el1_close(struct net_device *dev); -static struct net_device_stats *el1_get_stats(struct net_device *dev); static void set_multicast_list(struct net_device *dev); static const struct ethtool_ops netdev_ethtool_ops; @@ -29,7 +28,6 @@ static int el_debug = EL_DEBUG; struct net_local { - struct net_device_stats stats; int tx_pkt_start; /* The length of the current Tx packet. */ int collisions; /* Tx collisions this packet */ int loading; /* Spot buffer load collisions */ diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c index bc7e906571d3..9c23336750e2 100644 --- a/drivers/net/3c503.c +++ b/drivers/net/3c503.c @@ -95,8 +95,6 @@ static int __init do_el2_probe(struct net_device *dev) int base_addr = dev->base_addr; int irq = dev->irq; - SET_MODULE_OWNER(dev); - if (base_addr > 0x1ff) /* Check a single specified location. */ return el2_probe1(dev, base_addr); else if (base_addr != 0) /* Don't probe at all. */ @@ -179,6 +177,7 @@ el2_probe1(struct net_device *dev, int ioaddr) int i, iobase_reg, membase_reg, saved_406, wordlength, retval; static unsigned version_printed; unsigned long vendor_id; + DECLARE_MAC_BUF(mac); if (!request_region(ioaddr, EL2_IO_EXTENT, DRV_NAME)) return -EBUSY; @@ -228,7 +227,8 @@ el2_probe1(struct net_device *dev, int ioaddr) /* Retrieve and print the ethernet address. */ for (i = 0; i < 6; i++) - printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i)); + dev->dev_addr[i] = inb(ioaddr + i); + printk("%s", print_mac(mac, dev->dev_addr)); /* Map the 8390 back into the window. */ outb(ECNTRL_THIN, ioaddr + 0x406); diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c index e985a85a5623..9c6573419f5a 100644 --- a/drivers/net/3c505.c +++ b/drivers/net/3c505.c @@ -169,21 +169,6 @@ static int elp_debug; /***************************************************************** * - * useful macros - * - *****************************************************************/ - -#ifndef TRUE -#define TRUE 1 -#endif - -#ifndef FALSE -#define FALSE 0 -#endif - - -/***************************************************************** - * * List of I/O-addresses we try to auto-sense * Last element MUST BE 0! *****************************************************************/ @@ -270,7 +255,7 @@ static inline void set_hsf(struct net_device *dev, int hsf) spin_unlock_irqrestore(&adapter->lock, flags); } -static int start_receive(struct net_device *, pcb_struct *); +static bool start_receive(struct net_device *, pcb_struct *); static inline void adapter_reset(struct net_device *dev) { @@ -328,28 +313,28 @@ static inline void check_3c505_dma(struct net_device *dev) } /* Primitive functions used by send_pcb() */ -static inline unsigned int send_pcb_slow(unsigned int base_addr, unsigned char byte) +static inline bool send_pcb_slow(unsigned int base_addr, unsigned char byte) { unsigned long timeout; outb_command(byte, base_addr); for (timeout = jiffies + 5*HZ/100; time_before(jiffies, timeout);) { if (inb_status(base_addr) & HCRE) - return FALSE; + return false; } printk(KERN_WARNING "3c505: send_pcb_slow timed out\n"); - return TRUE; + return true; } -static inline unsigned int send_pcb_fast(unsigned int base_addr, unsigned char byte) +static inline bool send_pcb_fast(unsigned int base_addr, unsigned char byte) { unsigned int timeout; outb_command(byte, base_addr); for (timeout = 0; timeout < 40000; timeout++) { if (inb_status(base_addr) & HCRE) - return FALSE; + return false; } printk(KERN_WARNING "3c505: send_pcb_fast timed out\n"); - return TRUE; + return true; } /* Check to see if the receiver needs restarting, and kick it if so */ @@ -386,7 +371,7 @@ static inline void prime_rx(struct net_device *dev) * timeout is reduced to 500us). */ -static int send_pcb(struct net_device *dev, pcb_struct * pcb) +static bool send_pcb(struct net_device *dev, pcb_struct * pcb) { int i; unsigned long timeout; @@ -396,14 +381,14 @@ static int send_pcb(struct net_device *dev, pcb_struct * pcb) check_3c505_dma(dev); if (adapter->dmaing && adapter->current_dma.direction == 0) - return FALSE; + return false; /* Avoid contention */ if (test_and_set_bit(1, &adapter->send_pcb_semaphore)) { if (elp_debug >= 3) { printk(KERN_DEBUG "%s: send_pcb entered while threaded\n", dev->name); } - return FALSE; + return false; } /* * load each byte into the command register and @@ -435,7 +420,7 @@ static int send_pcb(struct net_device *dev, pcb_struct * pcb) switch (GET_ASF(dev->base_addr)) { case ASF_PCB_ACK: adapter->send_pcb_semaphore = 0; - return TRUE; + return true; case ASF_PCB_NAK: #ifdef ELP_DEBUG @@ -453,7 +438,7 @@ static int send_pcb(struct net_device *dev, pcb_struct * pcb) spin_unlock_irqrestore(&adapter->lock, flags); abort: adapter->send_pcb_semaphore = 0; - return FALSE; + return false; } @@ -470,7 +455,7 @@ static int send_pcb(struct net_device *dev, pcb_struct * pcb) * *****************************************************************/ -static int receive_pcb(struct net_device *dev, pcb_struct * pcb) +static bool receive_pcb(struct net_device *dev, pcb_struct * pcb) { int i, j; int total_length; @@ -487,7 +472,7 @@ static int receive_pcb(struct net_device *dev, pcb_struct * pcb) while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && time_before(jiffies, timeout)); if (time_after_eq(jiffies, timeout)) { TIMEOUT_MSG(__LINE__); - return FALSE; + return false; } pcb->command = inb_command(dev->base_addr); @@ -497,14 +482,14 @@ static int receive_pcb(struct net_device *dev, pcb_struct * pcb) if (time_after_eq(jiffies, timeout)) { TIMEOUT_MSG(__LINE__); printk(KERN_INFO "%s: status %02x\n", dev->name, stat); - return FALSE; + return false; } pcb->length = inb_command(dev->base_addr); if (pcb->length > MAX_PCB_DATA) { INVALID_PCB_MSG(pcb->length); adapter_reset(dev); - return FALSE; + return false; } /* read the data */ spin_lock_irqsave(&adapter->lock, flags); @@ -519,7 +504,7 @@ static int receive_pcb(struct net_device *dev, pcb_struct * pcb) spin_unlock_irqrestore(&adapter->lock, flags); if (j >= 20000) { TIMEOUT_MSG(__LINE__); - return FALSE; + return false; } /* woops, the last "data" byte was really the length! */ total_length = pcb->data.raw[--i]; @@ -529,7 +514,7 @@ static int receive_pcb(struct net_device *dev, pcb_struct * pcb) if (elp_debug >= 2) printk(KERN_WARNING "%s: mangled PCB received\n", dev->name); set_hsf(dev, HSF_PCB_NAK); - return FALSE; + return false; } if (pcb->command == CMD_RECEIVE_PACKET_COMPLETE) { @@ -538,14 +523,14 @@ static int receive_pcb(struct net_device *dev, pcb_struct * pcb) set_hsf(dev, HSF_PCB_NAK); printk(KERN_WARNING "%s: PCB rejected, transfer in progress and backlog full\n", dev->name); pcb->command = 0; - return TRUE; + return true; } else { pcb->command = 0xff; } } } set_hsf(dev, HSF_PCB_ACK); - return TRUE; + return true; } /****************************************************** @@ -555,9 +540,9 @@ static int receive_pcb(struct net_device *dev, pcb_struct * pcb) * ******************************************************/ -static int start_receive(struct net_device *dev, pcb_struct * tx_pcb) +static bool start_receive(struct net_device *dev, pcb_struct * tx_pcb) { - int status; + bool status; elp_device *adapter = dev->priv; if (elp_debug >= 3) @@ -984,7 +969,7 @@ static int elp_open(struct net_device *dev) * ******************************************************/ -static int send_packet(struct net_device *dev, struct sk_buff *skb) +static bool send_packet(struct net_device *dev, struct sk_buff *skb) { elp_device *adapter = dev->priv; unsigned long target; @@ -998,7 +983,7 @@ static int send_packet(struct net_device *dev, struct sk_buff *skb) if (test_and_set_bit(0, (void *) &adapter->busy)) { if (elp_debug >= 2) printk(KERN_DEBUG "%s: transmit blocked\n", dev->name); - return FALSE; + return false; } adapter->stats.tx_bytes += nlen; @@ -1015,7 +1000,7 @@ static int send_packet(struct net_device *dev, struct sk_buff *skb) if (!send_pcb(dev, &adapter->tx_pcb)) { adapter->busy = 0; - return FALSE; + return false; } /* if this happens, we die */ if (test_and_set_bit(0, (void *) &adapter->dmaing)) @@ -1047,7 +1032,7 @@ static int send_packet(struct net_device *dev, struct sk_buff *skb) if (elp_debug >= 3) printk(KERN_DEBUG "%s: DMA transfer started\n", dev->name); - return TRUE; + return true; } /* @@ -1401,8 +1386,7 @@ static int __init elplus_setup(struct net_device *dev) unsigned long timeout; unsigned long cookie = 0; int err = -ENODEV; - - SET_MODULE_OWNER(dev); + DECLARE_MAC_BUF(mac); /* * setup adapter structure @@ -1538,11 +1522,10 @@ static int __init elplus_setup(struct net_device *dev) /* * print remainder of startup message */ - printk(KERN_INFO "%s: 3c505 at %#lx, irq %d, dma %d, ", - dev->name, dev->base_addr, dev->irq, dev->dma); - printk("addr %02x:%02x:%02x:%02x:%02x:%02x, ", - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + printk(KERN_INFO "%s: 3c505 at %#lx, irq %d, dma %d, " + "addr %s, ", + dev->name, dev->base_addr, dev->irq, dev->dma, + print_mac(mac, dev->dev_addr)); /* * read more information from the adapter diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c index eed4299dc426..964d31ac9449 100644 --- a/drivers/net/3c507.c +++ b/drivers/net/3c507.c @@ -118,7 +118,6 @@ enum commands { /* Information that need to be kept for each board. */ struct net_local { - struct net_device_stats stats; int last_restart; ushort rx_head; ushort rx_tail; @@ -289,7 +288,6 @@ static int el16_send_packet(struct sk_buff *skb, struct net_device *dev); static irqreturn_t el16_interrupt(int irq, void *dev_id); static void el16_rx(struct net_device *dev); static int el16_close(struct net_device *dev); -static struct net_device_stats *el16_get_stats(struct net_device *dev); static void el16_tx_timeout (struct net_device *dev); static void hardware_send_packet(struct net_device *dev, void *buf, short length, short pad); @@ -327,8 +325,6 @@ struct net_device * __init el16_probe(int unit) mem_start = dev->mem_start & 15; } - SET_MODULE_OWNER(dev); - if (io > 0x1ff) /* Check a single specified location. */ err = el16_probe1(dev, io); else if (io != 0) @@ -361,6 +357,7 @@ static int __init el16_probe1(struct net_device *dev, int ioaddr) static unsigned char init_ID_done, version_printed; int i, irq, irqval, retval; struct net_local *lp; + DECLARE_MAC_BUF(mac); if (init_ID_done == 0) { ushort lrs_state = 0xff; @@ -406,10 +403,9 @@ static int __init el16_probe1(struct net_device *dev, int ioaddr) dev->base_addr = ioaddr; outb(0x01, ioaddr + MISC_CTRL); - for (i = 0; i < 6; i++) { + for (i = 0; i < 6; i++) dev->dev_addr[i] = inb(ioaddr + i); - printk(" %02x", dev->dev_addr[i]); - } + printk(" %s", print_mac(mac, dev->dev_addr)); if (mem_start) net_debug = mem_start & 7; @@ -457,7 +453,6 @@ static int __init el16_probe1(struct net_device *dev, int ioaddr) dev->open = el16_open; dev->stop = el16_close; dev->hard_start_xmit = el16_send_packet; - dev->get_stats = el16_get_stats; dev->tx_timeout = el16_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; dev->ethtool_ops = &netdev_ethtool_ops; @@ -491,7 +486,7 @@ static void el16_tx_timeout (struct net_device *dev) readw(shmem + iSCB_STATUS) & 0x8000 ? "IRQ conflict" : "network cable problem"); /* Try to restart the adaptor. */ - if (lp->last_restart == lp->stats.tx_packets) { + if (lp->last_restart == dev->stats.tx_packets) { if (net_debug > 1) printk ("Resetting board.\n"); /* Completely reset the adaptor. */ @@ -503,7 +498,7 @@ static void el16_tx_timeout (struct net_device *dev) printk ("Kicking board.\n"); writew(0xf000 | CUC_START | RX_START, shmem + iSCB_CMD); outb (0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */ - lp->last_restart = lp->stats.tx_packets; + lp->last_restart = dev->stats.tx_packets; } dev->trans_start = jiffies; netif_wake_queue (dev); @@ -522,7 +517,7 @@ static int el16_send_packet (struct sk_buff *skb, struct net_device *dev) spin_lock_irqsave (&lp->lock, flags); - lp->stats.tx_bytes += length; + dev->stats.tx_bytes += length; /* Disable the 82586's input to the interrupt line. */ outb (0x80, ioaddr + MISC_CTRL); @@ -581,14 +576,14 @@ static irqreturn_t el16_interrupt(int irq, void *dev_id) } /* Tx unsuccessful or some interesting status bit set. */ if (!(tx_status & 0x2000) || (tx_status & 0x0f3f)) { - lp->stats.tx_errors++; - if (tx_status & 0x0600) lp->stats.tx_carrier_errors++; - if (tx_status & 0x0100) lp->stats.tx_fifo_errors++; - if (!(tx_status & 0x0040)) lp->stats.tx_heartbeat_errors++; - if (tx_status & 0x0020) lp->stats.tx_aborted_errors++; - lp->stats.collisions += tx_status & 0xf; + dev->stats.tx_errors++; + if (tx_status & 0x0600) dev->stats.tx_carrier_errors++; + if (tx_status & 0x0100) dev->stats.tx_fifo_errors++; + if (!(tx_status & 0x0040)) dev->stats.tx_heartbeat_errors++; + if (tx_status & 0x0020) dev->stats.tx_aborted_errors++; + dev->stats.collisions += tx_status & 0xf; } - lp->stats.tx_packets++; + dev->stats.tx_packets++; if (net_debug > 5) printk("Reaped %x, Tx status %04x.\n" , lp->tx_reap, tx_status); lp->tx_reap += TX_BUF_SIZE; @@ -667,17 +662,6 @@ static int el16_close(struct net_device *dev) return 0; } -/* Get the current statistics. This may be called with the card open or - closed. */ -static struct net_device_stats *el16_get_stats(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - - /* ToDo: decide if there are any useful statistics from the SCB. */ - - return &lp->stats; -} - /* Initialize the Rx-block list. */ static void init_rx_bufs(struct net_device *dev) { @@ -854,12 +838,12 @@ static void el16_rx(struct net_device *dev) pkt_len); } else if ((frame_status & 0x2000) == 0) { /* Frame Rxed, but with error. */ - lp->stats.rx_errors++; - if (frame_status & 0x0800) lp->stats.rx_crc_errors++; - if (frame_status & 0x0400) lp->stats.rx_frame_errors++; - if (frame_status & 0x0200) lp->stats.rx_fifo_errors++; - if (frame_status & 0x0100) lp->stats.rx_over_errors++; - if (frame_status & 0x0080) lp->stats.rx_length_errors++; + dev->stats.rx_errors++; + if (frame_status & 0x0800) dev->stats.rx_crc_errors++; + if (frame_status & 0x0400) dev->stats.rx_frame_errors++; + if (frame_status & 0x0200) dev->stats.rx_fifo_errors++; + if (frame_status & 0x0100) dev->stats.rx_over_errors++; + if (frame_status & 0x0080) dev->stats.rx_length_errors++; } else { /* Malloc up new buffer. */ struct sk_buff *skb; @@ -868,7 +852,7 @@ static void el16_rx(struct net_device *dev) skb = dev_alloc_skb(pkt_len+2); if (skb == NULL) { printk("%s: Memory squeeze, dropping packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; break; } @@ -880,8 +864,8 @@ static void el16_rx(struct net_device *dev) skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } /* Clear the status word and set End-of-List on the rx frame. */ diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c index 127f60841b10..edda6e10ebe5 100644 --- a/drivers/net/3c509.c +++ b/drivers/net/3c509.c @@ -299,7 +299,7 @@ static struct isapnp_device_id el3_isapnp_adapters[] __initdata = { { } /* terminate list */ }; -static u16 el3_isapnp_phys_addr[8][3]; +static __be16 el3_isapnp_phys_addr[8][3]; static int nopnp; #endif /* __ISAPNP__ */ @@ -313,8 +313,9 @@ static int nopnp; static int __init el3_common_init(struct net_device *dev) { struct el3_private *lp = netdev_priv(dev); - short i; int err; + DECLARE_MAC_BUF(mac); + const char *if_names[] = {"10baseT", "AUI", "undefined", "BNC"}; spin_lock_init(&lp->lock); @@ -346,17 +347,10 @@ static int __init el3_common_init(struct net_device *dev) return err; } - { - const char *if_names[] = {"10baseT", "AUI", "undefined", "BNC"}; - printk("%s: 3c5x9 found at %#3.3lx, %s port, address ", - dev->name, dev->base_addr, - if_names[(dev->if_port & 0x03)]); - } - - /* Read in the station address. */ - for (i = 0; i < 6; i++) - printk(" %2.2x", dev->dev_addr[i]); - printk(", IRQ %d.\n", dev->irq); + printk(KERN_INFO "%s: 3c5x9 found at %#3.3lx, %s port, " + "address %s, IRQ %d.\n", + dev->name, dev->base_addr, if_names[(dev->if_port & 0x03)], + print_mac(mac, dev->dev_addr), dev->irq); if (el3_debug > 0) printk(KERN_INFO "%s", version); @@ -385,7 +379,7 @@ static int __init el3_probe(int card_idx) struct el3_private *lp; short lrs_state = 0xff, i; int ioaddr, irq, if_port; - u16 phys_addr[3]; + __be16 phys_addr[3]; static int current_tag; int err = -ENODEV; #if defined(__ISAPNP__) @@ -432,7 +426,6 @@ __again: return -ENOMEM; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &idev->dev); pnp_cards++; @@ -524,8 +517,6 @@ no_pnp: if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); - netdev_boot_setup_check(dev); /* Set passed-in IRQ or I/O Addr. */ @@ -644,7 +635,6 @@ static int __init el3_mca_probe(struct device *device) return -ENOMEM; } - SET_MODULE_OWNER(dev); netdev_boot_setup_check(dev); memcpy(dev->dev_addr, phys_addr, sizeof(phys_addr)); @@ -704,8 +694,6 @@ static int __init el3_eisa_probe (struct device *device) return -ENOMEM; } - SET_MODULE_OWNER(dev); - netdev_boot_setup_check(dev); memcpy(dev->dev_addr, phys_addr, sizeof(phys_addr)); diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c index 290166d5e7d1..275e7510ebaf 100644 --- a/drivers/net/3c515.c +++ b/drivers/net/3c515.c @@ -501,8 +501,6 @@ static struct net_device *corkscrew_scan(int unit) netdev_boot_setup_check(dev); } - SET_MODULE_OWNER(dev); - #ifdef __ISAPNP__ if(nopnp == 1) goto no_pnp; @@ -571,6 +569,7 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr, unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */ int i; int irq; + DECLARE_MAC_BUF(mac); if (idev) { irq = pnp_irq(idev, 0); @@ -632,8 +631,7 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr, checksum = (checksum ^ (checksum >> 8)) & 0xff; if (checksum != 0x00) printk(" ***INVALID CHECKSUM %4.4x*** ", checksum); - for (i = 0; i < 6; i++) - printk("%c%2.2x", i ? ':' : ' ', dev->dev_addr[i]); + printk(" %s", print_mac(mac, dev->dev_addr)); if (eeprom[16] == 0x11c7) { /* Corkscrew */ if (request_dma(dev->dma, "3c515")) { printk(", DMA %d allocation failed", dev->dma); diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c index ab18343e58ef..239fc42fb8df 100644 --- a/drivers/net/3c523.c +++ b/drivers/net/3c523.c @@ -383,8 +383,8 @@ void alloc586(struct net_device *dev) static int elmc_getinfo(char *buf, int slot, void *d) { int len = 0; - struct net_device *dev = (struct net_device *) d; - int i; + struct net_device *dev = d; + DECLARE_MAC_BUF(mac); if (dev == NULL) return len; @@ -399,12 +399,8 @@ static int elmc_getinfo(char *buf, int slot, void *d) len += sprintf(buf + len, "Transceiver: %s\n", dev->if_port ? "External" : "Internal"); len += sprintf(buf + len, "Device: %s\n", dev->name); - len += sprintf(buf + len, "Hardware Address:"); - for (i = 0; i < 6; i++) { - len += sprintf(buf + len, " %02x", dev->dev_addr[i]); - } - buf[len++] = '\n'; - buf[len] = 0; + len += sprintf(buf + len, "Hardware Address: %s\n", + print_mac(mac, dev->dev_addr)); return len; } /* elmc_getinfo() */ @@ -422,8 +418,8 @@ static int __init do_elmc_probe(struct net_device *dev) unsigned int size = 0; int retval; struct priv *pr = dev->priv; + DECLARE_MAC_BUF(mac); - SET_MODULE_OWNER(dev); if (MCA_bus == 0) { return -ENODEV; } @@ -545,12 +541,11 @@ static int __init do_elmc_probe(struct net_device *dev) /* The hardware address for the 3c523 is stored in the first six bytes of the IO address. */ - printk(KERN_INFO "%s: hardware address ", dev->name); - for (i = 0; i < 6; i++) { + for (i = 0; i < 6; i++) dev->dev_addr[i] = inb(dev->base_addr + i); - printk(" %02x", dev->dev_addr[i]); - } - printk("\n"); + + printk(KERN_INFO "%s: hardware address %s\n", + dev->name, print_mac(mac, dev->dev_addr)); dev->open = &elmc_open; dev->stop = &elmc_close; diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c index c7b571be20e0..b72b89d53ec8 100644 --- a/drivers/net/3c527.c +++ b/drivers/net/3c527.c @@ -257,8 +257,6 @@ struct net_device *__init mc32_probe(int unit) if (unit >= 0) sprintf(dev->name, "eth%d", unit); - SET_MODULE_OWNER(dev); - /* Do not check any supplied i/o locations. POS registers usually don't fail :) */ @@ -338,6 +336,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot) "82586 initialisation failure", "Adapter list configuration error" }; + DECLARE_MAC_BUF(mac); /* Time to play MCA games */ @@ -398,17 +397,17 @@ static int __init mc32_probe1(struct net_device *dev, int slot) * Go PROM browsing */ - printk("%s: Address ", dev->name); - /* Retrieve and print the ethernet address. */ for (i = 0; i < 6; i++) { mca_write_pos(slot, 6, i+12); mca_write_pos(slot, 7, 0); - printk(" %2.2x", dev->dev_addr[i] = mca_read_pos(slot,3)); + dev->dev_addr[i] = mca_read_pos(slot,3); } + printk("%s: Address %s", dev->name, print_mac(mac, dev->dev_addr)); + mca_write_pos(slot, 6, 0); mca_write_pos(slot, 7, 0); diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 6deb20fc7a08..8d3893da06f5 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -538,10 +538,10 @@ enum MasterCtrl { #define LAST_FRAG 0x80000000 /* Last Addr/Len pair in descriptor. */ #define DN_COMPLETE 0x00010000 /* This packet has been downloaded */ struct boom_rx_desc { - u32 next; /* Last entry points to 0. */ - s32 status; - u32 addr; /* Up to 63 addr/len pairs possible. */ - s32 length; /* Set LAST_FRAG to indicate last pair. */ + __le32 next; /* Last entry points to 0. */ + __le32 status; + __le32 addr; /* Up to 63 addr/len pairs possible. */ + __le32 length; /* Set LAST_FRAG to indicate last pair. */ }; /* Values for the Rx status entry. */ enum rx_desc_status { @@ -558,16 +558,16 @@ enum rx_desc_status { #endif struct boom_tx_desc { - u32 next; /* Last entry points to 0. */ - s32 status; /* bits 0:12 length, others see below. */ + __le32 next; /* Last entry points to 0. */ + __le32 status; /* bits 0:12 length, others see below. */ #if DO_ZEROCOPY struct { - u32 addr; - s32 length; + __le32 addr; + __le32 length; } frag[1+MAX_SKB_FRAGS]; #else - u32 addr; - s32 length; + __le32 addr; + __le32 length; #endif }; @@ -705,7 +705,7 @@ static struct { static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, int chip_idx, int card_idx); -static void vortex_up(struct net_device *dev); +static int vortex_up(struct net_device *dev); static void vortex_down(struct net_device *dev, int final); static int vortex_open(struct net_device *dev); static void mdio_sync(void __iomem *ioaddr, int bits); @@ -841,8 +841,11 @@ static int vortex_resume(struct pci_dev *pdev) return -EBUSY; } if (netif_running(dev)) { - vortex_up(dev); - netif_device_attach(dev); + err = vortex_up(dev); + if (err) + return err; + else + netif_device_attach(dev); } } return 0; @@ -1011,6 +1014,7 @@ static int __devinit vortex_probe1(struct device *gendev, char *print_name = "3c59x"; struct pci_dev *pdev = NULL; struct eisa_device *edev = NULL; + DECLARE_MAC_BUF(mac); if (!printed_version) { printk (version); @@ -1033,7 +1037,6 @@ static int __devinit vortex_probe1(struct device *gendev, printk (KERN_ERR PFX "unable to allocate etherdev, aborting\n"); goto out; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, gendev); vp = netdev_priv(dev); @@ -1128,7 +1131,7 @@ static int __devinit vortex_probe1(struct device *gendev, + sizeof(struct boom_tx_desc) * TX_RING_SIZE, &vp->rx_ring_dma); retval = -ENOMEM; - if (vp->rx_ring == 0) + if (!vp->rx_ring) goto free_region; vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE); @@ -1201,12 +1204,10 @@ static int __devinit vortex_probe1(struct device *gendev, if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO)) printk(" ***INVALID CHECKSUM %4.4x*** ", checksum); for (i = 0; i < 3; i++) - ((u16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]); + ((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]); memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); - if (print_info) { - for (i = 0; i < 6; i++) - printk("%c%2.2x", i ? ':' : ' ', dev->dev_addr[i]); - } + if (print_info) + printk(" %s", print_mac(mac, dev->dev_addr)); /* Unfortunately an all zero eeprom passes the checksum and this gets found in the wild in failure cases. Crypto is hard 8) */ if (!is_valid_ether_addr(dev->dev_addr)) { @@ -1484,19 +1485,24 @@ static void vortex_check_media(struct net_device *dev, unsigned int init) } } -static void +static int vortex_up(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; unsigned int config; - int i, mii_reg1, mii_reg5; + int i, mii_reg1, mii_reg5, err; if (VORTEX_PCI(vp)) { pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */ if (vp->pm_state_valid) pci_restore_state(VORTEX_PCI(vp)); - pci_enable_device(VORTEX_PCI(vp)); + err = pci_enable_device(VORTEX_PCI(vp)); + if (err) { + printk(KERN_WARNING "%s: Could not enable device \n", + dev->name); + goto err_out; + } } /* Before initializing select the active media port. */ @@ -1555,6 +1561,7 @@ vortex_up(struct net_device *dev) mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR); mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA); vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0); + vp->mii.full_duplex = vp->full_duplex; vortex_check_media(dev, 1); } @@ -1660,6 +1667,8 @@ vortex_up(struct net_device *dev) if (vp->cb_fn_base) /* The PCMCIA people are idiots. */ iowrite32(0x8000, vp->cb_fn_base + 4); netif_start_queue (dev); +err_out: + return err; } static int @@ -1673,7 +1682,7 @@ vortex_open(struct net_device *dev) if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ? &boomerang_interrupt : &vortex_interrupt, IRQF_SHARED, dev->name, dev))) { printk(KERN_ERR "%s: Could not reserve IRQ %d\n", dev->name, dev->irq); - goto out; + goto err; } if (vp->full_bus_master_rx) { /* Boomerang bus master. */ @@ -1702,20 +1711,22 @@ vortex_open(struct net_device *dev) } } retval = -ENOMEM; - goto out_free_irq; + goto err_free_irq; } /* Wrap the ring. */ vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma); } - vortex_up(dev); - return 0; + retval = vortex_up(dev); + if (!retval) + goto out; -out_free_irq: +err_free_irq: free_irq(dev->irq, dev); -out: +err: if (vortex_debug > 1) printk(KERN_ERR "%s: vortex_open() fails: returning %d\n", dev->name, retval); +out: return retval; } @@ -2489,7 +2500,7 @@ boomerang_rx(struct net_device *dev) /* Check if the packet is long enough to just accept without copying to a properly sized skbuff. */ - if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != 0) { + if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); /* 'skb_put()' points to the start of sk_buff data area. */ @@ -2822,9 +2833,14 @@ static void vortex_set_msglevel(struct net_device *dev, u32 dbg) vortex_debug = dbg; } -static int vortex_get_stats_count(struct net_device *dev) +static int vortex_get_sset_count(struct net_device *dev, int sset) { - return VORTEX_NUM_STATS; + switch (sset) { + case ETH_SS_STATS: + return VORTEX_NUM_STATS; + default: + return -EOPNOTSUPP; + } } static void vortex_get_ethtool_stats(struct net_device *dev, @@ -2881,12 +2897,11 @@ static const struct ethtool_ops vortex_ethtool_ops = { .get_msglevel = vortex_get_msglevel, .set_msglevel = vortex_set_msglevel, .get_ethtool_stats = vortex_get_ethtool_stats, - .get_stats_count = vortex_get_stats_count, + .get_sset_count = vortex_get_sset_count, .get_settings = vortex_get_settings, .set_settings = vortex_set_settings, .get_link = ethtool_op_get_link, .nway_reset = vortex_nway_reset, - .get_perm_addr = ethtool_op_get_perm_addr, }; #ifdef CONFIG_PCI @@ -2899,7 +2914,7 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; unsigned long flags; - int state = 0; + pci_power_t state = 0; if(VORTEX_PCI(vp)) state = VORTEX_PCI(vp)->current_state; diff --git a/drivers/net/7990.c b/drivers/net/7990.c index e89ace109a5d..224e0bff1ae0 100644 --- a/drivers/net/7990.c +++ b/drivers/net/7990.c @@ -305,18 +305,18 @@ static int lance_rx (struct net_device *dev) /* We got an incomplete frame? */ if ((bits & LE_R1_POK) != LE_R1_POK) { - lp->stats.rx_over_errors++; - lp->stats.rx_errors++; + dev->stats.rx_over_errors++; + dev->stats.rx_errors++; continue; } else if (bits & LE_R1_ERR) { /* Count only the end frame as a rx error, * not the beginning */ - if (bits & LE_R1_BUF) lp->stats.rx_fifo_errors++; - if (bits & LE_R1_CRC) lp->stats.rx_crc_errors++; - if (bits & LE_R1_OFL) lp->stats.rx_over_errors++; - if (bits & LE_R1_FRA) lp->stats.rx_frame_errors++; - if (bits & LE_R1_EOP) lp->stats.rx_errors++; + if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; + if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; + if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; + if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; + if (bits & LE_R1_EOP) dev->stats.rx_errors++; } else { len = (rd->mblength & 0xfff) - 4; skb = dev_alloc_skb (len+2); @@ -324,7 +324,7 @@ static int lance_rx (struct net_device *dev) if (skb == 0) { printk ("%s: Memory squeeze, deferring packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; rd->mblength = 0; rd->rmd1_bits = LE_R1_OWN; lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; @@ -339,8 +339,8 @@ static int lance_rx (struct net_device *dev) skb->protocol = eth_type_trans (skb, dev); netif_rx (skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; } /* Return the packet to the pool */ @@ -377,12 +377,12 @@ static int lance_tx (struct net_device *dev) if (td->tmd1_bits & LE_T1_ERR) { status = td->misc; - lp->stats.tx_errors++; - if (status & LE_T3_RTY) lp->stats.tx_aborted_errors++; - if (status & LE_T3_LCOL) lp->stats.tx_window_errors++; + dev->stats.tx_errors++; + if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; + if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; if (status & LE_T3_CLOS) { - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (lp->auto_select) { lp->tpe = 1 - lp->tpe; printk("%s: Carrier Lost, trying %s\n", @@ -400,7 +400,7 @@ static int lance_tx (struct net_device *dev) /* buffer errors and underflows turn off the transmitter */ /* Restart the adapter */ if (status & (LE_T3_BUF|LE_T3_UFL)) { - lp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; printk ("%s: Tx: ERR_BUF|ERR_UFL, restarting\n", dev->name); @@ -420,13 +420,13 @@ static int lance_tx (struct net_device *dev) /* One collision before packet was sent. */ if (td->tmd1_bits & LE_T1_EONE) - lp->stats.collisions++; + dev->stats.collisions++; /* More than one collision, be optimistic. */ if (td->tmd1_bits & LE_T1_EMORE) - lp->stats.collisions += 2; + dev->stats.collisions += 2; - lp->stats.tx_packets++; + dev->stats.tx_packets++; } j = (j + 1) & lp->tx_ring_mod_mask; @@ -471,9 +471,9 @@ lance_interrupt (int irq, void *dev_id) /* Log misc errors. */ if (csr0 & LE_C0_BABL) - lp->stats.tx_errors++; /* Tx babble. */ + dev->stats.tx_errors++; /* Tx babble. */ if (csr0 & LE_C0_MISS) - lp->stats.rx_errors++; /* Missed a Rx frame. */ + dev->stats.rx_errors++; /* Missed a Rx frame. */ if (csr0 & LE_C0_MERR) { printk("%s: Bus master arbitration failure, status %4.4x.\n", dev->name, csr0); @@ -589,13 +589,6 @@ int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) return 0; } -struct net_device_stats *lance_get_stats (struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - - return &lp->stats; -} - /* taken from the depca driver via a2065.c */ static void lance_load_multicast (struct net_device *dev) { diff --git a/drivers/net/7990.h b/drivers/net/7990.h index b1212b5ed92f..0a5837b96421 100644 --- a/drivers/net/7990.h +++ b/drivers/net/7990.h @@ -111,7 +111,6 @@ struct lance_private int lance_log_rx_bufs, lance_log_tx_bufs; int rx_ring_mod_mask, tx_ring_mod_mask; - struct net_device_stats stats; int tpe; /* TPE is selected */ int auto_select; /* cable-selection is by carrier */ unsigned short busmaster_regval; @@ -246,7 +245,6 @@ struct lance_private extern int lance_open(struct net_device *dev); extern int lance_close (struct net_device *dev); extern int lance_start_xmit (struct sk_buff *skb, struct net_device *dev); -extern struct net_device_stats *lance_get_stats (struct net_device *dev); extern void lance_set_multicast (struct net_device *dev); extern void lance_tx_timeout(struct net_device *dev); #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index e970e64bf966..a453eda834d5 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c @@ -78,7 +78,7 @@ #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) #define CP_VLAN_TAG_USED 1 #define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \ - do { (tx_desc)->opts2 = (vlan_tag_value); } while (0) + do { (tx_desc)->opts2 = cpu_to_le32(vlan_tag_value); } while (0) #else #define CP_VLAN_TAG_USED 0 #define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \ @@ -303,25 +303,25 @@ static const unsigned int cp_rx_config = (RX_DMA_BURST << RxCfgDMAShift); struct cp_desc { - u32 opts1; - u32 opts2; - u64 addr; + __le32 opts1; + __le32 opts2; + __le64 addr; }; struct cp_dma_stats { - u64 tx_ok; - u64 rx_ok; - u64 tx_err; - u32 rx_err; - u16 rx_fifo; - u16 frame_align; - u32 tx_ok_1col; - u32 tx_ok_mcol; - u64 rx_ok_phys; - u64 rx_ok_bcast; - u32 rx_ok_mcast; - u16 tx_abort; - u16 tx_underrun; + __le64 tx_ok; + __le64 rx_ok; + __le64 tx_err; + __le32 rx_err; + __le16 rx_fifo; + __le16 frame_align; + __le32 tx_ok_1col; + __le32 tx_ok_mcol; + __le64 rx_ok_phys; + __le64 rx_ok_bcast; + __le32 rx_ok_mcast; + __le16 tx_abort; + __le16 tx_underrun; } __attribute__((packed)); struct cp_extra_stats { @@ -334,6 +334,8 @@ struct cp_private { spinlock_t lock; u32 msg_enable; + struct napi_struct napi; + struct pci_dev *pdev; u32 rx_config; u16 cpcmd; @@ -460,9 +462,9 @@ static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb, cp->dev->last_rx = jiffies; #if CP_VLAN_TAG_USED - if (cp->vlgrp && (desc->opts2 & RxVlanTagged)) { + if (cp->vlgrp && (desc->opts2 & cpu_to_le32(RxVlanTagged))) { vlan_hwaccel_receive_skb(skb, cp->vlgrp, - be16_to_cpu(desc->opts2 & 0xffff)); + swab16(le32_to_cpu(desc->opts2) & 0xffff)); } else #endif netif_receive_skb(skb); @@ -501,12 +503,12 @@ static inline unsigned int cp_rx_csum_ok (u32 status) return 0; } -static int cp_rx_poll (struct net_device *dev, int *budget) +static int cp_rx_poll(struct napi_struct *napi, int budget) { - struct cp_private *cp = netdev_priv(dev); - unsigned rx_tail = cp->rx_tail; - unsigned rx_work = dev->quota; - unsigned rx; + struct cp_private *cp = container_of(napi, struct cp_private, napi); + struct net_device *dev = cp->dev; + unsigned int rx_tail = cp->rx_tail; + int rx; rx_status_loop: rx = 0; @@ -560,7 +562,7 @@ rx_status_loop: skb_reserve(new_skb, RX_OFFSET); - pci_unmap_single(cp->pdev, mapping, + dma_unmap_single(&cp->pdev->dev, mapping, buflen, PCI_DMA_FROMDEVICE); /* Handle checksum offloading for incoming packets. */ @@ -571,7 +573,7 @@ rx_status_loop: skb_put(skb, len); - mapping = pci_map_single(cp->pdev, new_skb->data, buflen, + mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen, PCI_DMA_FROMDEVICE); cp->rx_skb[rx_tail] = new_skb; @@ -588,33 +590,28 @@ rx_next: desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz); rx_tail = NEXT_RX(rx_tail); - if (!rx_work--) + if (rx >= budget) break; } cp->rx_tail = rx_tail; - dev->quota -= rx; - *budget -= rx; - /* if we did not reach work limit, then we're done with * this round of polling */ - if (rx_work) { + if (rx < budget) { unsigned long flags; if (cpr16(IntrStatus) & cp_rx_intr_mask) goto rx_status_loop; - local_irq_save(flags); + spin_lock_irqsave(&cp->lock, flags); cpw16_f(IntrMask, cp_intr_mask); - __netif_rx_complete(dev); - local_irq_restore(flags); - - return 0; /* done */ + __netif_rx_complete(dev, napi); + spin_unlock_irqrestore(&cp->lock, flags); } - return 1; /* not done */ + return rx; } static irqreturn_t cp_interrupt (int irq, void *dev_instance) @@ -647,9 +644,9 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance) } if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr)) - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &cp->napi)) { cpw16_f(IntrMask, cp_norx_intr_mask); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &cp->napi); } if (status & (TxOK | TxErr | TxEmpty | SWInt)) @@ -704,7 +701,7 @@ static void cp_tx (struct cp_private *cp) skb = cp->tx_skb[tx_tail]; BUG_ON(!skb); - pci_unmap_single(cp->pdev, le64_to_cpu(txd->addr), + dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), le32_to_cpu(txd->opts1) & 0xffff, PCI_DMA_TODEVICE); @@ -768,7 +765,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) #if CP_VLAN_TAG_USED if (cp->vlgrp && vlan_tx_tag_present(skb)) - vlan_tag = TxVlanTag | cpu_to_be16(vlan_tx_tag_get(skb)); + vlan_tag = TxVlanTag | swab16(vlan_tx_tag_get(skb)); #endif entry = cp->tx_head; @@ -782,7 +779,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) dma_addr_t mapping; len = skb->len; - mapping = pci_map_single(cp->pdev, skb->data, len, PCI_DMA_TODEVICE); + mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); CP_VLAN_TX_TAG(txd, vlan_tag); txd->addr = cpu_to_le64(mapping); wmb(); @@ -818,7 +815,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) */ first_eor = eor; first_len = skb_headlen(skb); - first_mapping = pci_map_single(cp->pdev, skb->data, + first_mapping = dma_map_single(&cp->pdev->dev, skb->data, first_len, PCI_DMA_TODEVICE); cp->tx_skb[entry] = skb; entry = NEXT_TX(entry); @@ -830,7 +827,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) dma_addr_t mapping; len = this_frag->size; - mapping = pci_map_single(cp->pdev, + mapping = dma_map_single(&cp->pdev->dev, ((void *) page_address(this_frag->page) + this_frag->page_offset), len, PCI_DMA_TODEVICE); @@ -1021,8 +1018,8 @@ static void cp_init_hw (struct cp_private *cp) cpw8_f (Cfg9346, Cfg9346_Unlock); /* Restore our idea of the MAC address. */ - cpw32_f (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0))); - cpw32_f (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4))); + cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); + cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4))); cp_start_hw(cp); cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */ @@ -1069,8 +1066,8 @@ static int cp_refill_rx (struct cp_private *cp) skb_reserve(skb, RX_OFFSET); - mapping = pci_map_single(cp->pdev, skb->data, cp->rx_buf_sz, - PCI_DMA_FROMDEVICE); + mapping = dma_map_single(&cp->pdev->dev, skb->data, + cp->rx_buf_sz, PCI_DMA_FROMDEVICE); cp->rx_skb[i] = skb; cp->rx_ring[i].opts2 = 0; @@ -1110,7 +1107,8 @@ static int cp_alloc_rings (struct cp_private *cp) { void *mem; - mem = pci_alloc_consistent(cp->pdev, CP_RING_BYTES, &cp->ring_dma); + mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES, + &cp->ring_dma, GFP_KERNEL); if (!mem) return -ENOMEM; @@ -1128,7 +1126,7 @@ static void cp_clean_rings (struct cp_private *cp) for (i = 0; i < CP_RX_RING_SIZE; i++) { if (cp->rx_skb[i]) { desc = cp->rx_ring + i; - pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr), + dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), cp->rx_buf_sz, PCI_DMA_FROMDEVICE); dev_kfree_skb(cp->rx_skb[i]); } @@ -1139,7 +1137,7 @@ static void cp_clean_rings (struct cp_private *cp) struct sk_buff *skb = cp->tx_skb[i]; desc = cp->tx_ring + i; - pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr), + dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), le32_to_cpu(desc->opts1) & 0xffff, PCI_DMA_TODEVICE); if (le32_to_cpu(desc->opts1) & LastFrag) @@ -1158,7 +1156,8 @@ static void cp_clean_rings (struct cp_private *cp) static void cp_free_rings (struct cp_private *cp) { cp_clean_rings(cp); - pci_free_consistent(cp->pdev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma); + dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring, + cp->ring_dma); cp->rx_ring = NULL; cp->tx_ring = NULL; } @@ -1175,6 +1174,8 @@ static int cp_open (struct net_device *dev) if (rc) return rc; + napi_enable(&cp->napi); + cp_init_hw(cp); rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev); @@ -1188,6 +1189,7 @@ static int cp_open (struct net_device *dev) return 0; err_out_hw: + napi_disable(&cp->napi); cp_stop_hw(cp); cp_free_rings(cp); return rc; @@ -1198,6 +1200,8 @@ static int cp_close (struct net_device *dev) struct cp_private *cp = netdev_priv(dev); unsigned long flags; + napi_disable(&cp->napi); + if (netif_msg_ifdown(cp)) printk(KERN_DEBUG "%s: disabling interface\n", dev->name); @@ -1379,9 +1383,14 @@ static int cp_get_regs_len(struct net_device *dev) return CP_REGS_SIZE; } -static int cp_get_stats_count (struct net_device *dev) +static int cp_get_sset_count (struct net_device *dev, int sset) { - return CP_NUM_STATS; + switch (sset) { + case ETH_SS_STATS: + return CP_NUM_STATS; + default: + return -EOPNOTSUPP; + } } static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) @@ -1517,7 +1526,8 @@ static void cp_get_ethtool_stats (struct net_device *dev, dma_addr_t dma; int i; - nic_stats = pci_alloc_consistent(cp->pdev, sizeof(*nic_stats), &dma); + nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats), + &dma, GFP_KERNEL); if (!nic_stats) return; @@ -1552,13 +1562,13 @@ static void cp_get_ethtool_stats (struct net_device *dev, tmp_stats[i++] = cp->cp_stats.rx_frags; BUG_ON(i != CP_NUM_STATS); - pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma); + dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma); } static const struct ethtool_ops cp_ethtool_ops = { .get_drvinfo = cp_get_drvinfo, .get_regs_len = cp_get_regs_len, - .get_stats_count = cp_get_stats_count, + .get_sset_count = cp_get_sset_count, .get_settings = cp_get_settings, .set_settings = cp_set_settings, .nway_reset = cp_nway_reset, @@ -1567,18 +1577,14 @@ static const struct ethtool_ops cp_ethtool_ops = { .set_msglevel = cp_set_msglevel, .get_rx_csum = cp_get_rx_csum, .set_rx_csum = cp_set_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, /* local! */ - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, - .get_tso = ethtool_op_get_tso, .set_tso = ethtool_op_set_tso, .get_regs = cp_get_regs, .get_wol = cp_get_wol, .set_wol = cp_set_wol, .get_strings = cp_get_strings, .get_ethtool_stats = cp_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, .get_eeprom_len = cp_get_eeprom_len, .get_eeprom = cp_get_eeprom, .set_eeprom = cp_set_eeprom, @@ -1822,6 +1828,7 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) void __iomem *regs; resource_size_t pciaddr; unsigned int addr_len, i, pci_using_dac; + DECLARE_MAC_BUF(mac); #ifndef MODULE static int version_printed; @@ -1841,7 +1848,6 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) dev = alloc_etherdev(sizeof(struct cp_private)); if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); cp = netdev_priv(dev); @@ -1924,8 +1930,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) /* read MAC address from EEPROM */ addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6; for (i = 0; i < 3; i++) - ((u16 *) (dev->dev_addr))[i] = - le16_to_cpu (read_eeprom (regs, i + 7, addr_len)); + ((__le16 *) (dev->dev_addr))[i] = + cpu_to_le16(read_eeprom (regs, i + 7, addr_len)); memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); dev->open = cp_open; @@ -1934,11 +1940,10 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) dev->hard_start_xmit = cp_start_xmit; dev->get_stats = cp_get_stats; dev->do_ioctl = cp_ioctl; - dev->poll = cp_rx_poll; #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = cp_poll_controller; #endif - dev->weight = 16; /* arbitrary? from NAPI_HOWTO.txt. */ + netif_napi_add(dev, &cp->napi, cp_rx_poll, 16); #ifdef BROKEN dev->change_mtu = cp_change_mtu; #endif @@ -1965,13 +1970,10 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_iomap; printk (KERN_INFO "%s: RTL-8139C+ at 0x%lx, " - "%02x:%02x:%02x:%02x:%02x:%02x, " - "IRQ %d\n", + "%s, IRQ %d\n", dev->name, dev->base_addr, - dev->dev_addr[0], dev->dev_addr[1], - dev->dev_addr[2], dev->dev_addr[3], - dev->dev_addr[4], dev->dev_addr[5], + print_mac(mac, dev->dev_addr), dev->irq); pci_set_drvdata(pdev, dev); diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index 327eaa7b4999..973b684c11e3 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c @@ -291,198 +291,197 @@ static struct { /* Symbolic offsets to registers. */ enum RTL8139_registers { - MAC0 = 0, /* Ethernet hardware address. */ - MAR0 = 8, /* Multicast filter. */ - TxStatus0 = 0x10, /* Transmit status (Four 32bit registers). */ - TxAddr0 = 0x20, /* Tx descriptors (also four 32bit). */ - RxBuf = 0x30, - ChipCmd = 0x37, - RxBufPtr = 0x38, - RxBufAddr = 0x3A, - IntrMask = 0x3C, - IntrStatus = 0x3E, - TxConfig = 0x40, - RxConfig = 0x44, - Timer = 0x48, /* A general-purpose counter. */ - RxMissed = 0x4C, /* 24 bits valid, write clears. */ - Cfg9346 = 0x50, - Config0 = 0x51, - Config1 = 0x52, - FlashReg = 0x54, - MediaStatus = 0x58, - Config3 = 0x59, - Config4 = 0x5A, /* absent on RTL-8139A */ - HltClk = 0x5B, - MultiIntr = 0x5C, - TxSummary = 0x60, - BasicModeCtrl = 0x62, - BasicModeStatus = 0x64, - NWayAdvert = 0x66, - NWayLPAR = 0x68, - NWayExpansion = 0x6A, + MAC0 = 0, /* Ethernet hardware address. */ + MAR0 = 8, /* Multicast filter. */ + TxStatus0 = 0x10, /* Transmit status (Four 32bit registers). */ + TxAddr0 = 0x20, /* Tx descriptors (also four 32bit). */ + RxBuf = 0x30, + ChipCmd = 0x37, + RxBufPtr = 0x38, + RxBufAddr = 0x3A, + IntrMask = 0x3C, + IntrStatus = 0x3E, + TxConfig = 0x40, + RxConfig = 0x44, + Timer = 0x48, /* A general-purpose counter. */ + RxMissed = 0x4C, /* 24 bits valid, write clears. */ + Cfg9346 = 0x50, + Config0 = 0x51, + Config1 = 0x52, + FlashReg = 0x54, + MediaStatus = 0x58, + Config3 = 0x59, + Config4 = 0x5A, /* absent on RTL-8139A */ + HltClk = 0x5B, + MultiIntr = 0x5C, + TxSummary = 0x60, + BasicModeCtrl = 0x62, + BasicModeStatus = 0x64, + NWayAdvert = 0x66, + NWayLPAR = 0x68, + NWayExpansion = 0x6A, /* Undocumented registers, but required for proper operation. */ - FIFOTMS = 0x70, /* FIFO Control and test. */ - CSCR = 0x74, /* Chip Status and Configuration Register. */ - PARA78 = 0x78, - PARA7c = 0x7c, /* Magic transceiver parameter register. */ - Config5 = 0xD8, /* absent on RTL-8139A */ + FIFOTMS = 0x70, /* FIFO Control and test. */ + CSCR = 0x74, /* Chip Status and Configuration Register. */ + PARA78 = 0x78, + PARA7c = 0x7c, /* Magic transceiver parameter register. */ + Config5 = 0xD8, /* absent on RTL-8139A */ }; enum ClearBitMasks { - MultiIntrClear = 0xF000, - ChipCmdClear = 0xE2, - Config1Clear = (1<<7)|(1<<6)|(1<<3)|(1<<2)|(1<<1), + MultiIntrClear = 0xF000, + ChipCmdClear = 0xE2, + Config1Clear = (1<<7)|(1<<6)|(1<<3)|(1<<2)|(1<<1), }; enum ChipCmdBits { - CmdReset = 0x10, - CmdRxEnb = 0x08, - CmdTxEnb = 0x04, - RxBufEmpty = 0x01, + CmdReset = 0x10, + CmdRxEnb = 0x08, + CmdTxEnb = 0x04, + RxBufEmpty = 0x01, }; /* Interrupt register bits, using my own meaningful names. */ enum IntrStatusBits { - PCIErr = 0x8000, - PCSTimeout = 0x4000, - RxFIFOOver = 0x40, - RxUnderrun = 0x20, - RxOverflow = 0x10, - TxErr = 0x08, - TxOK = 0x04, - RxErr = 0x02, - RxOK = 0x01, - - RxAckBits = RxFIFOOver | RxOverflow | RxOK, + PCIErr = 0x8000, + PCSTimeout = 0x4000, + RxFIFOOver = 0x40, + RxUnderrun = 0x20, + RxOverflow = 0x10, + TxErr = 0x08, + TxOK = 0x04, + RxErr = 0x02, + RxOK = 0x01, + + RxAckBits = RxFIFOOver | RxOverflow | RxOK, }; enum TxStatusBits { - TxHostOwns = 0x2000, - TxUnderrun = 0x4000, - TxStatOK = 0x8000, - TxOutOfWindow = 0x20000000, - TxAborted = 0x40000000, - TxCarrierLost = 0x80000000, + TxHostOwns = 0x2000, + TxUnderrun = 0x4000, + TxStatOK = 0x8000, + TxOutOfWindow = 0x20000000, + TxAborted = 0x40000000, + TxCarrierLost = 0x80000000, }; enum RxStatusBits { - RxMulticast = 0x8000, - RxPhysical = 0x4000, - RxBroadcast = 0x2000, - RxBadSymbol = 0x0020, - RxRunt = 0x0010, - RxTooLong = 0x0008, - RxCRCErr = 0x0004, - RxBadAlign = 0x0002, - RxStatusOK = 0x0001, + RxMulticast = 0x8000, + RxPhysical = 0x4000, + RxBroadcast = 0x2000, + RxBadSymbol = 0x0020, + RxRunt = 0x0010, + RxTooLong = 0x0008, + RxCRCErr = 0x0004, + RxBadAlign = 0x0002, + RxStatusOK = 0x0001, }; /* Bits in RxConfig. */ enum rx_mode_bits { - AcceptErr = 0x20, - AcceptRunt = 0x10, - AcceptBroadcast = 0x08, - AcceptMulticast = 0x04, - AcceptMyPhys = 0x02, - AcceptAllPhys = 0x01, + AcceptErr = 0x20, + AcceptRunt = 0x10, + AcceptBroadcast = 0x08, + AcceptMulticast = 0x04, + AcceptMyPhys = 0x02, + AcceptAllPhys = 0x01, }; /* Bits in TxConfig. */ enum tx_config_bits { - /* Interframe Gap Time. Only TxIFG96 doesn't violate IEEE 802.3 */ - TxIFGShift = 24, - TxIFG84 = (0 << TxIFGShift), /* 8.4us / 840ns (10 / 100Mbps) */ - TxIFG88 = (1 << TxIFGShift), /* 8.8us / 880ns (10 / 100Mbps) */ - TxIFG92 = (2 << TxIFGShift), /* 9.2us / 920ns (10 / 100Mbps) */ - TxIFG96 = (3 << TxIFGShift), /* 9.6us / 960ns (10 / 100Mbps) */ - - TxLoopBack = (1 << 18) | (1 << 17), /* enable loopback test mode */ - TxCRC = (1 << 16), /* DISABLE appending CRC to end of Tx packets */ - TxClearAbt = (1 << 0), /* Clear abort (WO) */ - TxDMAShift = 8, /* DMA burst value (0-7) is shifted this many bits */ - TxRetryShift = 4, /* TXRR value (0-15) is shifted this many bits */ - - TxVersionMask = 0x7C800000, /* mask out version bits 30-26, 23 */ + TxIFGShift = 24, + TxIFG84 = (0 << TxIFGShift), /* 8.4us / 840ns (10 / 100Mbps) */ + TxIFG88 = (1 << TxIFGShift), /* 8.8us / 880ns (10 / 100Mbps) */ + TxIFG92 = (2 << TxIFGShift), /* 9.2us / 920ns (10 / 100Mbps) */ + TxIFG96 = (3 << TxIFGShift), /* 9.6us / 960ns (10 / 100Mbps) */ + + TxLoopBack = (1 << 18) | (1 << 17), /* enable loopback test mode */ + TxCRC = (1 << 16), /* DISABLE Tx pkt CRC append */ + TxClearAbt = (1 << 0), /* Clear abort (WO) */ + TxDMAShift = 8, /* DMA burst value (0-7) is shifted X many bits */ + TxRetryShift = 4, /* TXRR value (0-15) is shifted X many bits */ + + TxVersionMask = 0x7C800000, /* mask out version bits 30-26, 23 */ }; /* Bits in Config1 */ enum Config1Bits { - Cfg1_PM_Enable = 0x01, - Cfg1_VPD_Enable = 0x02, - Cfg1_PIO = 0x04, - Cfg1_MMIO = 0x08, - LWAKE = 0x10, /* not on 8139, 8139A */ + Cfg1_PM_Enable = 0x01, + Cfg1_VPD_Enable = 0x02, + Cfg1_PIO = 0x04, + Cfg1_MMIO = 0x08, + LWAKE = 0x10, /* not on 8139, 8139A */ Cfg1_Driver_Load = 0x20, - Cfg1_LED0 = 0x40, - Cfg1_LED1 = 0x80, - SLEEP = (1 << 1), /* only on 8139, 8139A */ - PWRDN = (1 << 0), /* only on 8139, 8139A */ + Cfg1_LED0 = 0x40, + Cfg1_LED1 = 0x80, + SLEEP = (1 << 1), /* only on 8139, 8139A */ + PWRDN = (1 << 0), /* only on 8139, 8139A */ }; /* Bits in Config3 */ enum Config3Bits { - Cfg3_FBtBEn = (1 << 0), /* 1 = Fast Back to Back */ - Cfg3_FuncRegEn = (1 << 1), /* 1 = enable CardBus Function registers */ - Cfg3_CLKRUN_En = (1 << 2), /* 1 = enable CLKRUN */ - Cfg3_CardB_En = (1 << 3), /* 1 = enable CardBus registers */ - Cfg3_LinkUp = (1 << 4), /* 1 = wake up on link up */ - Cfg3_Magic = (1 << 5), /* 1 = wake up on Magic Packet (tm) */ - Cfg3_PARM_En = (1 << 6), /* 0 = software can set twister parameters */ - Cfg3_GNTSel = (1 << 7), /* 1 = delay 1 clock from PCI GNT signal */ + Cfg3_FBtBEn = (1 << 0), /* 1 = Fast Back to Back */ + Cfg3_FuncRegEn = (1 << 1), /* 1 = enable CardBus Function registers */ + Cfg3_CLKRUN_En = (1 << 2), /* 1 = enable CLKRUN */ + Cfg3_CardB_En = (1 << 3), /* 1 = enable CardBus registers */ + Cfg3_LinkUp = (1 << 4), /* 1 = wake up on link up */ + Cfg3_Magic = (1 << 5), /* 1 = wake up on Magic Packet (tm) */ + Cfg3_PARM_En = (1 << 6), /* 0 = software can set twister parameters */ + Cfg3_GNTSel = (1 << 7), /* 1 = delay 1 clock from PCI GNT signal */ }; /* Bits in Config4 */ enum Config4Bits { - LWPTN = (1 << 2), /* not on 8139, 8139A */ + LWPTN = (1 << 2), /* not on 8139, 8139A */ }; /* Bits in Config5 */ enum Config5Bits { - Cfg5_PME_STS = (1 << 0), /* 1 = PCI reset resets PME_Status */ - Cfg5_LANWake = (1 << 1), /* 1 = enable LANWake signal */ - Cfg5_LDPS = (1 << 2), /* 0 = save power when link is down */ - Cfg5_FIFOAddrPtr = (1 << 3), /* Realtek internal SRAM testing */ - Cfg5_UWF = (1 << 4), /* 1 = accept unicast wakeup frame */ - Cfg5_MWF = (1 << 5), /* 1 = accept multicast wakeup frame */ - Cfg5_BWF = (1 << 6), /* 1 = accept broadcast wakeup frame */ + Cfg5_PME_STS = (1 << 0), /* 1 = PCI reset resets PME_Status */ + Cfg5_LANWake = (1 << 1), /* 1 = enable LANWake signal */ + Cfg5_LDPS = (1 << 2), /* 0 = save power when link is down */ + Cfg5_FIFOAddrPtr= (1 << 3), /* Realtek internal SRAM testing */ + Cfg5_UWF = (1 << 4), /* 1 = accept unicast wakeup frame */ + Cfg5_MWF = (1 << 5), /* 1 = accept multicast wakeup frame */ + Cfg5_BWF = (1 << 6), /* 1 = accept broadcast wakeup frame */ }; enum RxConfigBits { /* rx fifo threshold */ - RxCfgFIFOShift = 13, - RxCfgFIFONone = (7 << RxCfgFIFOShift), + RxCfgFIFOShift = 13, + RxCfgFIFONone = (7 << RxCfgFIFOShift), /* Max DMA burst */ - RxCfgDMAShift = 8, + RxCfgDMAShift = 8, RxCfgDMAUnlimited = (7 << RxCfgDMAShift), /* rx ring buffer length */ - RxCfgRcv8K = 0, - RxCfgRcv16K = (1 << 11), - RxCfgRcv32K = (1 << 12), - RxCfgRcv64K = (1 << 11) | (1 << 12), + RxCfgRcv8K = 0, + RxCfgRcv16K = (1 << 11), + RxCfgRcv32K = (1 << 12), + RxCfgRcv64K = (1 << 11) | (1 << 12), /* Disable packet wrap at end of Rx buffer. (not possible with 64k) */ - RxNoWrap = (1 << 7), + RxNoWrap = (1 << 7), }; /* Twister tuning parameters from RealTek. Completely undocumented, but required to tune bad links on some boards. */ enum CSCRBits { - CSCR_LinkOKBit = 0x0400, - CSCR_LinkChangeBit = 0x0800, - CSCR_LinkStatusBits = 0x0f000, - CSCR_LinkDownOffCmd = 0x003c0, - CSCR_LinkDownCmd = 0x0f3c0, + CSCR_LinkOKBit = 0x0400, + CSCR_LinkChangeBit = 0x0800, + CSCR_LinkStatusBits = 0x0f000, + CSCR_LinkDownOffCmd = 0x003c0, + CSCR_LinkDownCmd = 0x0f3c0, }; enum Cfg9346Bits { - Cfg9346_Lock = 0x00, - Cfg9346_Unlock = 0xC0, + Cfg9346_Lock = 0x00, + Cfg9346_Unlock = 0xC0, }; typedef enum { - CH_8139 = 0, + CH_8139 = 0, CH_8139_K, CH_8139A, CH_8139A_G, @@ -495,8 +494,8 @@ typedef enum { } chip_t; enum chip_flags { - HasHltClk = (1 << 0), - HasLWake = (1 << 1), + HasHltClk = (1 << 0), + HasLWake = (1 << 1), }; #define HW_REVID(b30, b29, b28, b27, b26, b23, b22) \ @@ -569,36 +568,46 @@ struct rtl_extra_stats { }; struct rtl8139_private { - void __iomem *mmio_addr; - int drv_flags; - struct pci_dev *pci_dev; - u32 msg_enable; - struct net_device_stats stats; - unsigned char *rx_ring; - unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */ - unsigned int tx_flag; - unsigned long cur_tx; - unsigned long dirty_tx; - unsigned char *tx_buf[NUM_TX_DESC]; /* Tx bounce buffers */ - unsigned char *tx_bufs; /* Tx bounce buffer region. */ - dma_addr_t rx_ring_dma; - dma_addr_t tx_bufs_dma; - signed char phys[4]; /* MII device addresses. */ - char twistie, twist_row, twist_col; /* Twister tune state. */ - unsigned int watchdog_fired : 1; - unsigned int default_port : 4; /* Last dev->if_port value. */ - unsigned int have_thread : 1; - spinlock_t lock; - spinlock_t rx_lock; - chip_t chipset; - u32 rx_config; - struct rtl_extra_stats xstats; - - struct delayed_work thread; - - struct mii_if_info mii; - unsigned int regs_len; - unsigned long fifo_copy_timeout; + void __iomem *mmio_addr; + int drv_flags; + struct pci_dev *pci_dev; + u32 msg_enable; + struct napi_struct napi; + struct net_device *dev; + struct net_device_stats stats; + + unsigned char *rx_ring; + unsigned int cur_rx; /* RX buf index of next pkt */ + dma_addr_t rx_ring_dma; + + unsigned int tx_flag; + unsigned long cur_tx; + unsigned long dirty_tx; + unsigned char *tx_buf[NUM_TX_DESC]; /* Tx bounce buffers */ + unsigned char *tx_bufs; /* Tx bounce buffer region. */ + dma_addr_t tx_bufs_dma; + + signed char phys[4]; /* MII device addresses. */ + + /* Twister tune state. */ + char twistie, twist_row, twist_col; + + unsigned int watchdog_fired : 1; + unsigned int default_port : 4; /* Last dev->if_port value. */ + unsigned int have_thread : 1; + + spinlock_t lock; + spinlock_t rx_lock; + + chip_t chipset; + u32 rx_config; + struct rtl_extra_stats xstats; + + struct delayed_work thread; + + struct mii_if_info mii; + unsigned int regs_len; + unsigned long fifo_copy_timeout; }; MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>"); @@ -625,10 +634,10 @@ static void rtl8139_tx_timeout (struct net_device *dev); static void rtl8139_init_ring (struct net_device *dev); static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev); -static int rtl8139_poll(struct net_device *dev, int *budget); #ifdef CONFIG_NET_POLL_CONTROLLER static void rtl8139_poll_controller(struct net_device *dev); #endif +static int rtl8139_poll(struct napi_struct *napi, int budget); static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance); static int rtl8139_close (struct net_device *dev); static int netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); @@ -646,24 +655,11 @@ static const struct ethtool_ops rtl8139_ethtool_ops; #define RTL_W16_F(reg, val16) do { iowrite16 ((val16), ioaddr + (reg)); ioread16 (ioaddr + (reg)); } while (0) #define RTL_W32_F(reg, val32) do { iowrite32 ((val32), ioaddr + (reg)); ioread32 (ioaddr + (reg)); } while (0) - -#define MMIO_FLUSH_AUDIT_COMPLETE 1 -#if MMIO_FLUSH_AUDIT_COMPLETE - /* write MMIO register */ #define RTL_W8(reg, val8) iowrite8 ((val8), ioaddr + (reg)) #define RTL_W16(reg, val16) iowrite16 ((val16), ioaddr + (reg)) #define RTL_W32(reg, val32) iowrite32 ((val32), ioaddr + (reg)) -#else - -/* write MMIO register, then flush */ -#define RTL_W8 RTL_W8_F -#define RTL_W16 RTL_W16_F -#define RTL_W32 RTL_W32_F - -#endif /* MMIO_FLUSH_AUDIT_COMPLETE */ - /* read MMIO register */ #define RTL_R8(reg) ioread8 (ioaddr + (reg)) #define RTL_R16(reg) ioread16 (ioaddr + (reg)) @@ -770,7 +766,6 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev, dev_err(&pdev->dev, "Unable to alloc new net device\n"); return -ENOMEM; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); tp = netdev_priv(dev); @@ -931,6 +926,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev, int i, addr_len, option; void __iomem *ioaddr; static int board_idx = -1; + DECLARE_MAC_BUF(mac); assert (pdev != NULL); assert (ent != NULL); @@ -963,6 +959,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev, assert (dev != NULL); tp = netdev_priv(dev); + tp->dev = dev; ioaddr = tp->mmio_addr; assert (ioaddr != NULL); @@ -976,8 +973,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev, /* The Rtl8139-specific entries in the device structure. */ dev->open = rtl8139_open; dev->hard_start_xmit = rtl8139_start_xmit; - dev->poll = rtl8139_poll; - dev->weight = 64; + netif_napi_add(dev, &tp->napi, rtl8139_poll, 64); dev->stop = rtl8139_close; dev->get_stats = rtl8139_get_stats; dev->set_multicast_list = rtl8139_set_rx_mode; @@ -1022,14 +1018,11 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev, pci_set_drvdata (pdev, dev); printk (KERN_INFO "%s: %s at 0x%lx, " - "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, " - "IRQ %d\n", + "%s, IRQ %d\n", dev->name, board_info[ent->driver_data].name, dev->base_addr, - dev->dev_addr[0], dev->dev_addr[1], - dev->dev_addr[2], dev->dev_addr[3], - dev->dev_addr[4], dev->dev_addr[5], + print_mac(mac, dev->dev_addr), dev->irq); printk (KERN_DEBUG "%s: Identified 8139 chip type '%s'\n", @@ -1314,24 +1307,26 @@ static int rtl8139_open (struct net_device *dev) if (retval) return retval; - tp->tx_bufs = pci_alloc_consistent(tp->pci_dev, TX_BUF_TOT_LEN, - &tp->tx_bufs_dma); - tp->rx_ring = pci_alloc_consistent(tp->pci_dev, RX_BUF_TOT_LEN, - &tp->rx_ring_dma); + tp->tx_bufs = dma_alloc_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN, + &tp->tx_bufs_dma, GFP_KERNEL); + tp->rx_ring = dma_alloc_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN, + &tp->rx_ring_dma, GFP_KERNEL); if (tp->tx_bufs == NULL || tp->rx_ring == NULL) { free_irq(dev->irq, dev); if (tp->tx_bufs) - pci_free_consistent(tp->pci_dev, TX_BUF_TOT_LEN, + dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN, tp->tx_bufs, tp->tx_bufs_dma); if (tp->rx_ring) - pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN, + dma_free_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN, tp->rx_ring, tp->rx_ring_dma); return -ENOMEM; } + napi_enable(&tp->napi); + tp->mii.full_duplex = tp->mii.force_media; tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000; @@ -2103,39 +2098,32 @@ static void rtl8139_weird_interrupt (struct net_device *dev, } } -static int rtl8139_poll(struct net_device *dev, int *budget) +static int rtl8139_poll(struct napi_struct *napi, int budget) { - struct rtl8139_private *tp = netdev_priv(dev); + struct rtl8139_private *tp = container_of(napi, struct rtl8139_private, napi); + struct net_device *dev = tp->dev; void __iomem *ioaddr = tp->mmio_addr; - int orig_budget = min(*budget, dev->quota); - int done = 1; + int work_done; spin_lock(&tp->rx_lock); - if (likely(RTL_R16(IntrStatus) & RxAckBits)) { - int work_done; - - work_done = rtl8139_rx(dev, tp, orig_budget); - if (likely(work_done > 0)) { - *budget -= work_done; - dev->quota -= work_done; - done = (work_done < orig_budget); - } - } + work_done = 0; + if (likely(RTL_R16(IntrStatus) & RxAckBits)) + work_done += rtl8139_rx(dev, tp, budget); - if (done) { + if (work_done < budget) { unsigned long flags; /* * Order is important since data can get interrupted * again when we think we are done. */ - local_irq_save(flags); + spin_lock_irqsave(&tp->lock, flags); RTL_W16_F(IntrMask, rtl8139_intr_mask); - __netif_rx_complete(dev); - local_irq_restore(flags); + __netif_rx_complete(dev, napi); + spin_unlock_irqrestore(&tp->lock, flags); } spin_unlock(&tp->rx_lock); - return !done; + return work_done; } /* The interrupt handler does all of the Rx thread work and cleans up @@ -2180,9 +2168,9 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance) /* Receive packets are processed by poll routine. If not running start it now. */ if (status & RxAckBits){ - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &tp->napi)) { RTL_W16_F (IntrMask, rtl8139_norx_intr_mask); - __netif_rx_schedule (dev); + __netif_rx_schedule(dev, &tp->napi); } } @@ -2223,7 +2211,8 @@ static int rtl8139_close (struct net_device *dev) void __iomem *ioaddr = tp->mmio_addr; unsigned long flags; - netif_stop_queue (dev); + netif_stop_queue(dev); + napi_disable(&tp->napi); if (netif_msg_ifdown(tp)) printk(KERN_DEBUG "%s: Shutting down ethercard, status was 0x%4.4x.\n", @@ -2248,10 +2237,10 @@ static int rtl8139_close (struct net_device *dev) rtl8139_tx_clear (tp); - pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN, - tp->rx_ring, tp->rx_ring_dma); - pci_free_consistent(tp->pci_dev, TX_BUF_TOT_LEN, - tp->tx_bufs, tp->tx_bufs_dma); + dma_free_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN, + tp->rx_ring, tp->rx_ring_dma); + dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN, + tp->tx_bufs, tp->tx_bufs_dma); tp->rx_ring = NULL; tp->tx_bufs = NULL; @@ -2417,9 +2406,14 @@ static void rtl8139_get_regs(struct net_device *dev, struct ethtool_regs *regs, } #endif /* CONFIG_8139TOO_MMIO */ -static int rtl8139_get_stats_count(struct net_device *dev) +static int rtl8139_get_sset_count(struct net_device *dev, int sset) { - return RTL_NUM_STATS; + switch (sset) { + case ETH_SS_STATS: + return RTL_NUM_STATS; + default: + return -EOPNOTSUPP; + } } static void rtl8139_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) @@ -2450,9 +2444,8 @@ static const struct ethtool_ops rtl8139_ethtool_ops = { .get_wol = rtl8139_get_wol, .set_wol = rtl8139_set_wol, .get_strings = rtl8139_get_strings, - .get_stats_count = rtl8139_get_stats_count, + .get_sset_count = rtl8139_get_sset_count, .get_ethtool_stats = rtl8139_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/82596.c b/drivers/net/82596.c index 3ff1155459a3..bb30d5be7824 100644 --- a/drivers/net/82596.c +++ b/drivers/net/82596.c @@ -57,6 +57,7 @@ #include <asm/io.h> #include <asm/dma.h> #include <asm/pgtable.h> +#include <asm/cacheflush.h> static char version[] __initdata = "82596.c $Revision: 1.5 $\n"; @@ -325,7 +326,6 @@ struct i596_private { struct i596_cmd *cmd_head; int cmd_backlog; unsigned long last_cmd; - struct net_device_stats stats; struct i596_rfd rfds[RX_RING_SIZE]; struct i596_rbd rbds[RX_RING_SIZE]; struct tx_cmd tx_cmds[TX_RING_SIZE]; @@ -359,7 +359,6 @@ static int i596_open(struct net_device *dev); static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev); static irqreturn_t i596_interrupt(int irq, void *dev_id); static int i596_close(struct net_device *dev); -static struct net_device_stats *i596_get_stats(struct net_device *dev); static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); static void i596_tx_timeout (struct net_device *dev); static void print_eth(unsigned char *buf, char *str); @@ -827,7 +826,7 @@ memory_squeeze: if (skb == NULL) { /* XXX tulip.c can defer packets here!! */ printk(KERN_WARNING "%s: i596_rx Memory squeeze, dropping packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; } else { if (!rx_in_place) { @@ -843,28 +842,28 @@ memory_squeeze: #endif netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes+=pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes+=pkt_len; } } else { DEB(DEB_ERRORS, printk(KERN_DEBUG "%s: Error, rfd.stat = 0x%04x\n", dev->name, rfd->stat)); - lp->stats.rx_errors++; + dev->stats.rx_errors++; if ((rfd->stat) & 0x0001) - lp->stats.collisions++; + dev->stats.collisions++; if ((rfd->stat) & 0x0080) - lp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; if ((rfd->stat) & 0x0100) - lp->stats.rx_over_errors++; + dev->stats.rx_over_errors++; if ((rfd->stat) & 0x0200) - lp->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; if ((rfd->stat) & 0x0400) - lp->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if ((rfd->stat) & 0x0800) - lp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; if ((rfd->stat) & 0x1000) - lp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; } /* Clear the buffer descriptor count and EOF + F flags */ @@ -915,8 +914,8 @@ static void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp) dev_kfree_skb(skb); - lp->stats.tx_errors++; - lp->stats.tx_aborted_errors++; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; ptr->v_next = ptr->b_next = I596_NULL; tx_cmd->cmd.command = 0; /* Mark as free */ @@ -1037,10 +1036,10 @@ static void i596_tx_timeout (struct net_device *dev) DEB(DEB_ERRORS,printk(KERN_ERR "%s: transmit timed out, status resetting.\n", dev->name)); - lp->stats.tx_errors++; + dev->stats.tx_errors++; /* Try to restart the adaptor */ - if (lp->last_restart == lp->stats.tx_packets) { + if (lp->last_restart == dev->stats.tx_packets) { DEB(DEB_ERRORS,printk(KERN_ERR "Resetting board.\n")); /* Shutdown and restart */ i596_reset (dev, lp, ioaddr); @@ -1049,7 +1048,7 @@ static void i596_tx_timeout (struct net_device *dev) DEB(DEB_ERRORS,printk(KERN_ERR "Kicking board.\n")); lp->scb.command = CUC_START | RX_START; CA (dev); - lp->last_restart = lp->stats.tx_packets; + lp->last_restart = dev->stats.tx_packets; } dev->trans_start = jiffies; @@ -1081,7 +1080,7 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) if (tx_cmd->cmd.command) { printk(KERN_NOTICE "%s: xmit ring full, dropping packet.\n", dev->name); - lp->stats.tx_dropped++; + dev->stats.tx_dropped++; dev_kfree_skb(skb); } else { @@ -1106,8 +1105,8 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued")); i596_add_cmd(dev, &tx_cmd->cmd); - lp->stats.tx_packets++; - lp->stats.tx_bytes += length; + dev->stats.tx_packets++; + dev->stats.tx_bytes += length; } netif_start_queue(dev); @@ -1117,15 +1116,12 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) static void print_eth(unsigned char *add, char *str) { - int i; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); - printk(KERN_DEBUG "i596 0x%p, ", add); - for (i = 0; i < 6; i++) - printk(" %02X", add[i + 6]); - printk(" -->"); - for (i = 0; i < 6; i++) - printk(" %02X", add[i]); - printk(" %02X%02X, %s\n", add[12], add[13], str); + printk(KERN_DEBUG "i596 0x%p, %s --> %s %02X%02X, %s\n", + add, print_mac(mac, add + 6), print_mac(mac2, add), + add[12], add[13], str); } static int io = 0x300; @@ -1233,11 +1229,9 @@ struct net_device * __init i82596_probe(int unit) DEB(DEB_PROBE,printk(KERN_INFO "%s", version)); /* The 82596-specific entries in the device structure. */ - SET_MODULE_OWNER(dev); dev->open = i596_open; dev->stop = i596_close; dev->hard_start_xmit = i596_start_xmit; - dev->get_stats = i596_get_stats; dev->set_multicast_list = set_multicast_list; dev->tx_timeout = i596_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; @@ -1343,17 +1337,17 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id) if ((ptr->status) & STAT_OK) { DEB(DEB_TXADDR,print_eth(skb->data, "tx-done")); } else { - lp->stats.tx_errors++; + dev->stats.tx_errors++; if ((ptr->status) & 0x0020) - lp->stats.collisions++; + dev->stats.collisions++; if (!((ptr->status) & 0x0040)) - lp->stats.tx_heartbeat_errors++; + dev->stats.tx_heartbeat_errors++; if ((ptr->status) & 0x0400) - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if ((ptr->status) & 0x0800) - lp->stats.collisions++; + dev->stats.collisions++; if ((ptr->status) & 0x1000) - lp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; } dev_kfree_skb_irq(skb); @@ -1408,8 +1402,8 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id) if (netif_running(dev)) { DEB(DEB_ERRORS,printk(KERN_ERR "%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status)); ack_cmd |= RX_START; - lp->stats.rx_errors++; - lp->stats.rx_fifo_errors++; + dev->stats.rx_errors++; + dev->stats.rx_fifo_errors++; rebuild_rx_bufs(dev); } } @@ -1492,14 +1486,6 @@ static int i596_close(struct net_device *dev) return 0; } -static struct net_device_stats * - i596_get_stats(struct net_device *dev) -{ - struct i596_private *lp = dev->priv; - - return &lp->stats; -} - /* * Set or clear the multicast filter for this adaptor. */ @@ -1550,6 +1536,7 @@ static void set_multicast_list(struct net_device *dev) struct dev_mc_list *dmi; unsigned char *cp; struct mc_cmd *cmd; + DECLARE_MAC_BUF(mac); if (wait_cfg(dev, &lp->mc_cmd.cmd, 1000, "multicast list change request timed out")) return; @@ -1560,8 +1547,8 @@ static void set_multicast_list(struct net_device *dev) for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) { memcpy(cp, dmi->dmi_addr, 6); if (i596_debug > 1) - DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n", - dev->name, cp[0],cp[1],cp[2],cp[3],cp[4],cp[5])); + DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %s\n", + dev->name, print_mac(mac, cp))); } i596_add_cmd(dev, &cmd->cmd); } diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 43d03178064d..9c635a237a9d 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -5,6 +5,7 @@ menuconfig NETDEVICES default y if UML + depends on NET bool "Network device support" ---help--- You can say N here if you don't intend to connect your Linux box to @@ -134,6 +135,12 @@ config TUN If you don't know what to use this for, you don't need it. +config VETH + tristate "Virtual ethernet device" + ---help--- + The device is an ethernet tunnel. Devices are created in pairs. When + one end receives the packet it appears on its pair and vice versa. + config NET_SB1000 tristate "General Instruments Surfboard 1000" depends on PNP @@ -158,6 +165,15 @@ config NET_SB1000 If you don't have this card, of course say N. +config IP1000 + tristate "IP1000 Gigabit Ethernet support" + depends on PCI && EXPERIMENTAL + ---help--- + This driver supports IP1000 gigabit Ethernet cards. + + To compile this driver as a module, choose M here: the module + will be called ipg. This is recommended. + source "drivers/net/arcnet/Kconfig" source "drivers/net/phy/Kconfig" @@ -224,6 +240,13 @@ config AX88796 AX88796 driver, using platform bus to provide chip detection and resources +config AX88796_93CX6 + bool "ASIX AX88796 external 93CX6 eeprom support" + depends on AX88796 + select EEPROM_93CX6 + help + Select this if your platform comes with an external 93CX6 eeprom. + config MACE tristate "MACE (Power Mac ethernet) support" depends on PPC_PMAC && PPC32 @@ -465,10 +488,6 @@ config MIPS_AU1X00_ENET If you have an Alchemy Semi AU1X00 based system say Y. Otherwise, say N. -config NET_SB1250_MAC - tristate "SB1250 Ethernet support" - depends on SIBYTE_SB1xxx_SOC - config SGI_IOC3_ETH bool "SGI IOC3 Ethernet" depends on PCI && SGI_IP27 @@ -479,26 +498,6 @@ config SGI_IOC3_ETH the Ethernet-HOWTO, available from <http://www.tldp.org/docs.html#howto>. -config SGI_IOC3_ETH_HW_RX_CSUM - bool "Receive hardware checksums" - depends on SGI_IOC3_ETH && INET - default y - help - The SGI IOC3 network adapter supports TCP and UDP checksums in - hardware to offload processing of these checksums from the CPU. At - the moment only acceleration of IPv4 is supported. This option - enables offloading for checksums on receive. If unsure, say Y. - -config SGI_IOC3_ETH_HW_TX_CSUM - bool "Transmit hardware checksums" - depends on SGI_IOC3_ETH && INET - default y - help - The SGI IOC3 network adapter supports TCP and UDP checksums in - hardware to offload processing of these checksums from the CPU. At - the moment only acceleration of IPv4 is supported. This option - enables offloading for checksums on transmit. If unsure, say Y. - config MIPS_SIM_NET tristate "MIPS simulator Network device" depends on MIPS_SIM @@ -838,6 +837,52 @@ config ULTRA32 <file:Documentation/networking/net-modules.txt>. The module will be called smc-ultra32. +config BFIN_MAC + tristate "Blackfin 536/537 on-chip mac support" + depends on NET_ETHERNET && (BF537 || BF536) && (!BF537_PORT_H) + select CRC32 + select MII + select PHYLIB + select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE + help + This is the driver for blackfin on-chip mac device. Say Y if you want it + compiled into the kernel. This driver is also available as a module + ( = code which can be inserted in and removed from the running kernel + whenever you want). The module will be called bfin_mac. + +config BFIN_MAC_USE_L1 + bool "Use L1 memory for rx/tx packets" + depends on BFIN_MAC && BF537 + default y + help + To get maximum network performace, you should use L1 memory as rx/tx buffers. + Say N here if you want to reserve L1 memory for other uses. + +config BFIN_TX_DESC_NUM + int "Number of transmit buffer packets" + depends on BFIN_MAC + range 6 10 if BFIN_MAC_USE_L1 + range 10 100 + default "10" + help + Set the number of buffer packets used in driver. + +config BFIN_RX_DESC_NUM + int "Number of receive buffer packets" + depends on BFIN_MAC + range 20 100 if BFIN_MAC_USE_L1 + range 20 800 + default "20" + help + Set the number of buffer packets used in driver. + +config BFIN_MAC_RMII + bool "RMII PHY Interface (EXPERIMENTAL)" + depends on BFIN_MAC && EXPERIMENTAL + default n + help + Use Reduced PHY MII Interface + config SMC9194 tristate "SMC 9194 support" depends on NET_VENDOR_SMC && (ISA || MAC && BROKEN) @@ -899,7 +944,7 @@ config SMC911X tristate "SMSC LAN911[5678] support" select CRC32 select MII - depends on ARCH_PXA + depends on ARCH_PXA || SUPERH help This is a driver for SMSC's LAN911x series of Ethernet chipsets including the new LAN9115, LAN9116, LAN9117, and LAN9118. @@ -1205,75 +1250,8 @@ config IBMVETH <file:Documentation/networking/net-modules.txt>. The module will be called ibmveth. -config IBM_EMAC - tristate "PowerPC 4xx on-chip Ethernet support" - depends on 4xx && !PPC_MERGE - help - This driver supports the PowerPC 4xx EMAC family of on-chip - Ethernet controllers. - -config IBM_EMAC_RXB - int "Number of receive buffers" - depends on IBM_EMAC - default "128" - -config IBM_EMAC_TXB - int "Number of transmit buffers" - depends on IBM_EMAC - default "64" - -config IBM_EMAC_POLL_WEIGHT - int "MAL NAPI polling weight" - depends on IBM_EMAC - default "32" - -config IBM_EMAC_RX_COPY_THRESHOLD - int "RX skb copy threshold (bytes)" - depends on IBM_EMAC - default "256" - -config IBM_EMAC_RX_SKB_HEADROOM - int "Additional RX skb headroom (bytes)" - depends on IBM_EMAC - default "0" - help - Additional receive skb headroom. Note, that driver - will always reserve at least 2 bytes to make IP header - aligned, so usually there is no need to add any additional - headroom. - - If unsure, set to 0. - -config IBM_EMAC_PHY_RX_CLK_FIX - bool "PHY Rx clock workaround" - depends on IBM_EMAC && (405EP || 440GX || 440EP || 440GR) - help - Enable this if EMAC attached to a PHY which doesn't generate - RX clock if there is no link, if this is the case, you will - see "TX disable timeout" or "RX disable timeout" in the system - log. - - If unsure, say N. - -config IBM_EMAC_DEBUG - bool "Debugging" - depends on IBM_EMAC - default n - -config IBM_EMAC_ZMII - bool - depends on IBM_EMAC && (NP405H || NP405L || 44x) - default y - -config IBM_EMAC_RGMII - bool - depends on IBM_EMAC && 440GX - default y - -config IBM_EMAC_TAH - bool - depends on IBM_EMAC && 440GX - default y +source "drivers/net/ibm_emac/Kconfig" +source "drivers/net/ibm_newemac/Kconfig" config NET_PCI bool "EISA, VLB, PCI and on board controllers" @@ -1408,18 +1386,38 @@ config APRICOT called apricot. config B44 - tristate "Broadcom 4400 ethernet support" - depends on NET_PCI && PCI + tristate "Broadcom 440x/47xx ethernet support" + depends on SSB_POSSIBLE + select SSB select MII help - If you have a network (Ethernet) controller of this type, say Y and - read the Ethernet-HOWTO, available from + If you have a network (Ethernet) controller of this type, say Y + or M and read the Ethernet-HOWTO, available from <http://www.tldp.org/docs.html#howto>. To compile this driver as a module, choose M here and read <file:Documentation/networking/net-modules.txt>. The module will be called b44. +# Auto-select SSB PCI-HOST support, if possible +config B44_PCI_AUTOSELECT + bool + depends on B44 && SSB_PCIHOST_POSSIBLE + select SSB_PCIHOST + default y + +# Auto-select SSB PCICORE driver, if possible +config B44_PCICORE_AUTOSELECT + bool + depends on B44 && SSB_DRIVER_PCICORE_POSSIBLE + select SSB_DRIVER_PCICORE + default y + +config B44_PCI + bool + depends on B44_PCI_AUTOSELECT && B44_PCICORE_AUTOSELECT + default y + config FORCEDETH tristate "nForce Ethernet support" depends on NET_PCI && PCI @@ -1468,21 +1466,6 @@ config TC35815 depends on NET_PCI && PCI && MIPS select MII -config DGRS - tristate "Digi Intl. RightSwitch SE-X support" - depends on NET_PCI && (PCI || EISA) - ---help--- - This is support for the Digi International RightSwitch series of - PCI/EISA Ethernet switch cards. These include the SE-4 and the SE-6 - models. If you have a network card of this type, say Y and read the - Ethernet-HOWTO, available from - <http://www.tldp.org/docs.html#howto>. More specific - information is contained in <file:Documentation/networking/dgrs.txt>. - - To compile this driver as a module, choose M here and read - <file:Documentation/networking/net-modules.txt>. The module - will be called dgrs. - config EEPRO100 tristate "EtherExpressPro/100 support (eepro100, original Becker driver)" depends on NET_PCI && PCI @@ -1923,6 +1906,16 @@ menuconfig NETDEV_1000 bool "Ethernet (1000 Mbit)" depends on !UML default y + ---help--- + Ethernet (also called IEEE 802.3 or ISO 8802-2) is the most common + type of Local Area Network (LAN) in universities and companies. + + Say Y here to get to see options for Gigabit Ethernet drivers. + This option alone does not add any kernel code. + Note that drivers supporting both 100 and 1000 MBit may be listed + under "Ethernet (10 or 100MBit)" instead. + + If you say N, all options in this submenu will be skipped and disabled. if NETDEV_1000 @@ -1954,14 +1947,15 @@ config ACENIC_OMIT_TIGON_I The safe and default value for this is N. config DL2K - tristate "D-Link DL2000-based Gigabit Ethernet support" + tristate "DL2000/TC902x-based Gigabit Ethernet support" depends on PCI select CRC32 help - This driver supports D-Link 2000-based gigabit ethernet cards, which - includes + This driver supports DL2000/TC902x-based Gigabit ethernet cards, + which includes D-Link DGE-550T Gigabit Ethernet Adapter. D-Link DL2000-based Gigabit Ethernet Adapter. + Sundance/Tamarack TC902x Gigabit Ethernet Adapter. To compile this driver as a module, choose M here: the module will be called dl2k. @@ -2014,6 +2008,29 @@ config E1000_DISABLE_PACKET_SPLIT If in doubt, say N. +config E1000E + tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support" + depends on PCI + ---help--- + This driver supports the PCI-Express Intel(R) PRO/1000 gigabit + ethernet family of adapters. For PCI or PCI-X e1000 adapters, + use the regular e1000 driver For more information on how to + identify your adapter, go to the Adapter & Driver ID Guide at: + + <http://support.intel.com/support/network/adapter/pro100/21397.htm> + + For general information and support, go to the Intel support + website at: + + <http://support.intel.com> + + More specific information on configuring the driver is in + <file:Documentation/networking/e1000e.txt>. + + To compile this driver as a module, choose M here and read + <file:Documentation/networking/net-modules.txt>. The module + will be called e1000e. + source "drivers/net/ixp2000/Kconfig" config MYRI_SBUS @@ -2098,6 +2115,19 @@ config R8169_VLAN If in doubt, say Y. +config SB1250_MAC + tristate "SB1250 Gigabit Ethernet support" + depends on SIBYTE_SB1xxx_SOC + select PHYLIB + ---help--- + This driver supports Gigabit Ethernet interfaces based on the + Broadcom SiByte family of System-On-a-Chip parts. They include + the BCM1120, BCM1125, BCM1125H, BCM1250, BCM1255, BCM1280, BCM1455 + and BCM1480 chips. + + To compile this driver as a module, choose M here: the module + will be called sb1250-mac. + config SIS190 tristate "SiS190/SiS191 gigabit ethernet support" depends on PCI @@ -2122,7 +2152,7 @@ config SKGE with better performance and more complete ethtool support. It does not support the link failover and network management - features available in the hardware. + features that "portable" vendor supplied sk98lin driver does. This driver supports adapters based on the original Yukon chipset: Marvell 88E8001, Belkin F5D5005, CNet GigaCard, DLink DGE-530T, @@ -2160,6 +2190,93 @@ config SKY2_DEBUG If unsure, say N. +config SK98LIN + tristate "Marvell Yukon Chipset / SysKonnect SK-98xx Support (DEPRECATED)" + depends on PCI + ---help--- + Say Y here if you have a Marvell Yukon or SysKonnect SK-98xx/SK-95xx + compliant Gigabit Ethernet Adapter. + + This driver supports the original Yukon chipset. This driver is + deprecated and will be removed from the kernel in the near future, + it has been replaced by the skge driver. skge is cleaner and + seems to work better. + + This driver does not support the newer Yukon2 chipset. A separate + driver, sky2, is provided to support Yukon2-based adapters. + + The following adapters are supported by this driver: + - 3Com 3C940 Gigabit LOM Ethernet Adapter + - 3Com 3C941 Gigabit LOM Ethernet Adapter + - Allied Telesyn AT-2970LX Gigabit Ethernet Adapter + - Allied Telesyn AT-2970LX/2SC Gigabit Ethernet Adapter + - Allied Telesyn AT-2970SX Gigabit Ethernet Adapter + - Allied Telesyn AT-2970SX/2SC Gigabit Ethernet Adapter + - Allied Telesyn AT-2970TX Gigabit Ethernet Adapter + - Allied Telesyn AT-2970TX/2TX Gigabit Ethernet Adapter + - Allied Telesyn AT-2971SX Gigabit Ethernet Adapter + - Allied Telesyn AT-2971T Gigabit Ethernet Adapter + - Belkin Gigabit Desktop Card 10/100/1000Base-T Adapter, Copper RJ-45 + - EG1032 v2 Instant Gigabit Network Adapter + - EG1064 v2 Instant Gigabit Network Adapter + - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Abit) + - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Albatron) + - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Asus) + - Marvell 88E8001 Gigabit LOM Ethernet Adapter (ECS) + - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Epox) + - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Foxconn) + - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Gigabyte) + - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Iwill) + - Marvell 88E8050 Gigabit LOM Ethernet Adapter (Intel) + - Marvell RDK-8001 Adapter + - Marvell RDK-8002 Adapter + - Marvell RDK-8003 Adapter + - Marvell RDK-8004 Adapter + - Marvell RDK-8006 Adapter + - Marvell RDK-8007 Adapter + - Marvell RDK-8008 Adapter + - Marvell RDK-8009 Adapter + - Marvell RDK-8010 Adapter + - Marvell RDK-8011 Adapter + - Marvell RDK-8012 Adapter + - Marvell RDK-8052 Adapter + - Marvell Yukon Gigabit Ethernet 10/100/1000Base-T Adapter (32 bit) + - Marvell Yukon Gigabit Ethernet 10/100/1000Base-T Adapter (64 bit) + - N-Way PCI-Bus Giga-Card 1000/100/10Mbps(L) + - SK-9521 10/100/1000Base-T Adapter + - SK-9521 V2.0 10/100/1000Base-T Adapter + - SK-9821 Gigabit Ethernet Server Adapter (SK-NET GE-T) + - SK-9821 V2.0 Gigabit Ethernet 10/100/1000Base-T Adapter + - SK-9822 Gigabit Ethernet Server Adapter (SK-NET GE-T dual link) + - SK-9841 Gigabit Ethernet Server Adapter (SK-NET GE-LX) + - SK-9841 V2.0 Gigabit Ethernet 1000Base-LX Adapter + - SK-9842 Gigabit Ethernet Server Adapter (SK-NET GE-LX dual link) + - SK-9843 Gigabit Ethernet Server Adapter (SK-NET GE-SX) + - SK-9843 V2.0 Gigabit Ethernet 1000Base-SX Adapter + - SK-9844 Gigabit Ethernet Server Adapter (SK-NET GE-SX dual link) + - SK-9851 V2.0 Gigabit Ethernet 1000Base-SX Adapter + - SK-9861 Gigabit Ethernet Server Adapter (SK-NET GE-SX Volition) + - SK-9861 V2.0 Gigabit Ethernet 1000Base-SX Adapter + - SK-9862 Gigabit Ethernet Server Adapter (SK-NET GE-SX Volition dual link) + - SK-9871 Gigabit Ethernet Server Adapter (SK-NET GE-ZX) + - SK-9871 V2.0 Gigabit Ethernet 1000Base-ZX Adapter + - SK-9872 Gigabit Ethernet Server Adapter (SK-NET GE-ZX dual link) + - SMC EZ Card 1000 (SMC9452TXV.2) + + The adapters support Jumbo Frames. + The dual link adapters support link-failover and dual port features. + Both Marvell Yukon and SysKonnect SK-98xx/SK-95xx adapters support + the scatter-gather functionality with sendfile(). Please refer to + <file:Documentation/networking/sk98lin.txt> for more information about + optional driver parameters. + Questions concerning this driver may be addressed to: + <linux@syskonnect.de> + + If you want to compile this driver as a module ( = code which can be + inserted in and removed from the running kernel whenever you want), + say M here and read <file:Documentation/kbuild/modules.txt>. The module will + be called sk98lin. This is recommended. + config VIA_VELOCITY tristate "VIA Velocity support" depends on PCI @@ -2294,6 +2411,11 @@ menuconfig NETDEV_10000 bool "Ethernet (10000 Mbit)" depends on !UML default y + ---help--- + Say Y here to get to see options for 10 Gigabit Ethernet drivers. + This option alone does not add any kernel code. + + If you say N, all options in this submenu will be skipped and disabled. if NETDEV_10000 @@ -2354,18 +2476,42 @@ config CHELSIO_T3 config EHEA tristate "eHEA Ethernet support" depends on IBMEBUS + select INET_LRO ---help--- This driver supports the IBM pSeries eHEA ethernet adapter. To compile the driver as a module, choose M here. The module will be called ehea. +config IXGBE + tristate "Intel(R) 10GbE PCI Express adapters support" + depends on PCI + ---help--- + This driver supports Intel(R) 10GbE PCI Express family of + adapters. For more information on how to identify your adapter, go + to the Adapter & Driver ID Guide at: + + <http://support.intel.com/support/network/adapter/pro100/21397.htm> + + For general information and support, go to the Intel support + website at: + + <http://support.intel.com> + + More specific information on configuring the driver is in + <file:Documentation/networking/ixgbe.txt>. + + To compile this driver as a module, choose M here and read + <file:Documentation/networking/net-modules.txt>. The module + will be called ixgbe. + config IXGB tristate "Intel(R) PRO/10GbE support" depends on PCI ---help--- - This driver supports Intel(R) PRO/10GbE family of - adapters. For more information on how to identify your adapter, go + This driver supports Intel(R) PRO/10GbE family of adapters for + PCI-X type cards. For PCI-E type cards, use the "ixgbe" driver + instead. For more information on how to identify your adapter, go to the Adapter & Driver ID Guide at: <http://support.intel.com/support/network/adapter/pro100/21397.htm> @@ -2429,6 +2575,7 @@ config MYRI10GE depends on PCI select FW_LOADER select CRC32 + select INET_LRO ---help--- This driver supports Myricom Myri-10G Dual Protocol interface in Ethernet mode. If the eeprom on your board is not recent enough, @@ -2447,6 +2594,13 @@ config NETXEN_NIC help This enables the support for NetXen's Gigabit Ethernet card. +config NIU + tristate "Sun Neptune 10Gbit Ethernet support" + depends on PCI + help + This enables support for cards based upon Sun's + Neptune chipset. + config PASEMI_MAC tristate "PA Semi 1/10Gbit MAC" depends on PPC64 && PCI @@ -2470,6 +2624,12 @@ config MLX4_DEBUG debug_level module parameter (which can also be set after the driver is loaded through sysfs). +config TEHUTI + tristate "Tehuti Networks 10G Ethernet" + depends on PCI + help + Tehuti Networks 10G Ethernet NIC + endif # NETDEV_10000 source "drivers/net/tokenring/Kconfig" @@ -2486,6 +2646,18 @@ source "drivers/atm/Kconfig" source "drivers/s390/net/Kconfig" +config XEN_NETDEV_FRONTEND + tristate "Xen network device frontend driver" + depends on XEN + default y + help + The network device frontend driver allows the kernel to + access network devices exported exported by a virtual + machine containing a physical network device driver. The + frontend driver is intended for unprivileged guest domains; + if you are compiling a kernel for a Xen guest, you almost + certainly want to enable this. + config ISERIES_VETH tristate "iSeries Virtual Ethernet driver support" depends on PPC_ISERIES @@ -2794,7 +2966,7 @@ config PPPOATM config PPPOL2TP tristate "PPP over L2TP (EXPERIMENTAL)" - depends on EXPERIMENTAL && PPP + depends on EXPERIMENTAL && PPP && INET help Support for PPP-over-L2TP socket family. L2TP is a protocol used by ISPs and enterprises to tunnel PPP traffic over UDP @@ -2915,6 +3087,16 @@ config NETCONSOLE If you want to log kernel messages over the network, enable this. See <file:Documentation/networking/netconsole.txt> for details. +config NETCONSOLE_DYNAMIC + bool "Dynamic reconfiguration of logging targets (EXPERIMENTAL)" + depends on NETCONSOLE && SYSFS && EXPERIMENTAL + select CONFIGFS_FS + help + This option enables the ability to dynamically reconfigure target + parameters (interface, IP addresses, port numbers, MAC addresses) + at runtime through a userspace interface exported using configfs. + See <file:Documentation/networking/netconsole.txt> for details. + config NETPOLL def_bool NETCONSOLE diff --git a/drivers/net/Makefile b/drivers/net/Makefile index eb4167622a6a..d2e0f35da42e 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -3,14 +3,19 @@ # obj-$(CONFIG_E1000) += e1000/ +obj-$(CONFIG_E1000E) += e1000e/ obj-$(CONFIG_IBM_EMAC) += ibm_emac/ +obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac/ +obj-$(CONFIG_IXGBE) += ixgbe/ obj-$(CONFIG_IXGB) += ixgb/ +obj-$(CONFIG_IP1000) += ipg.o obj-$(CONFIG_CHELSIO_T1) += chelsio/ obj-$(CONFIG_CHELSIO_T3) += cxgb3/ obj-$(CONFIG_EHEA) += ehea/ obj-$(CONFIG_BONDING) += bonding/ obj-$(CONFIG_ATL1) += atl1/ obj-$(CONFIG_GIANFAR) += gianfar_driver.o +obj-$(CONFIG_TEHUTI) += tehuti.o gianfar_driver-objs := gianfar.o \ gianfar_ethtool.o \ @@ -18,7 +23,7 @@ gianfar_driver-objs := gianfar.o \ gianfar_sysfs.o obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o -ucc_geth_driver-objs := ucc_geth.o ucc_geth_mii.o +ucc_geth_driver-objs := ucc_geth.o ucc_geth_mii.o ucc_geth_ethtool.o # # link order important here @@ -39,7 +44,6 @@ obj-$(CONFIG_SUNVNET) += sunvnet.o obj-$(CONFIG_MACE) += mace.o obj-$(CONFIG_BMAC) += bmac.o -obj-$(CONFIG_DGRS) += dgrs.o obj-$(CONFIG_VORTEX) += 3c59x.o obj-$(CONFIG_TYPHOON) += typhoon.o obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o @@ -66,6 +70,7 @@ ps3_gelic-objs += ps3_gelic_net.o obj-$(CONFIG_TC35815) += tc35815.o obj-$(CONFIG_SKGE) += skge.o obj-$(CONFIG_SKY2) += sky2.o +obj-$(CONFIG_SK98LIN) += sk98lin/ obj-$(CONFIG_SKFP) += skfp/ obj-$(CONFIG_VIA_RHINE) += via-rhine.o obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o @@ -105,7 +110,7 @@ obj-$(CONFIG_E2100) += e2100.o 8390.o obj-$(CONFIG_ES3210) += es3210.o 8390.o obj-$(CONFIG_LNE390) += lne390.o 8390.o obj-$(CONFIG_NE3210) += ne3210.o 8390.o -obj-$(CONFIG_NET_SB1250_MAC) += sb1250-mac.o +obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o obj-$(CONFIG_B44) += b44.o obj-$(CONFIG_FORCEDETH) += forcedeth.o obj-$(CONFIG_NE_H8300) += ne-h8300.o @@ -127,6 +132,8 @@ obj-$(CONFIG_PPPOL2TP) += pppox.o pppol2tp.o obj-$(CONFIG_SLIP) += slip.o obj-$(CONFIG_SLHC) += slhc.o +obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o + obj-$(CONFIG_DUMMY) += dummy.o obj-$(CONFIG_IFB) += ifb.o obj-$(CONFIG_MACVLAN) += macvlan.o @@ -175,6 +182,7 @@ obj-$(CONFIG_ZORRO8390) += zorro8390.o obj-$(CONFIG_HPLANCE) += hplance.o 7990.o obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o obj-$(CONFIG_EQUALIZER) += eql.o +obj-$(CONFIG_LGUEST_NET) += lguest_net.o obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o @@ -189,6 +197,7 @@ obj-$(CONFIG_MACSONIC) += macsonic.o obj-$(CONFIG_MACMACE) += macmace.o obj-$(CONFIG_MAC89x0) += mac89x0.o obj-$(CONFIG_TUN) += tun.o +obj-$(CONFIG_VETH) += veth.o obj-$(CONFIG_NET_NETX) += netx-eth.o obj-$(CONFIG_DL2K) += dl2k.o obj-$(CONFIG_R8169) += r8169.o @@ -198,6 +207,7 @@ obj-$(CONFIG_S2IO) += s2io.o obj-$(CONFIG_MYRI10GE) += myri10ge/ obj-$(CONFIG_SMC91X) += smc91x.o obj-$(CONFIG_SMC911X) += smc911x.o +obj-$(CONFIG_BFIN_MAC) += bfin_mac.o obj-$(CONFIG_DM9000) += dm9000.o obj-$(CONFIG_FEC_8XX) += fec_8xx/ obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o @@ -231,3 +241,4 @@ obj-$(CONFIG_NETCONSOLE) += netconsole.o obj-$(CONFIG_FS_ENET) += fs_enet/ obj-$(CONFIG_NETXEN_NIC) += netxen/ +obj-$(CONFIG_NIU) += niu.o diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c index a45de6975bfe..18f7f815f66e 100644 --- a/drivers/net/a2065.c +++ b/drivers/net/a2065.c @@ -119,7 +119,6 @@ struct lance_private { int lance_log_rx_bufs, lance_log_tx_bufs; int rx_ring_mod_mask, tx_ring_mod_mask; - struct net_device_stats stats; int tpe; /* cable-selection is TPE */ int auto_select; /* cable-selection by carrier */ unsigned short busmaster_regval; @@ -294,18 +293,18 @@ static int lance_rx (struct net_device *dev) /* We got an incomplete frame? */ if ((bits & LE_R1_POK) != LE_R1_POK) { - lp->stats.rx_over_errors++; - lp->stats.rx_errors++; + dev->stats.rx_over_errors++; + dev->stats.rx_errors++; continue; } else if (bits & LE_R1_ERR) { /* Count only the end frame as a rx error, * not the beginning */ - if (bits & LE_R1_BUF) lp->stats.rx_fifo_errors++; - if (bits & LE_R1_CRC) lp->stats.rx_crc_errors++; - if (bits & LE_R1_OFL) lp->stats.rx_over_errors++; - if (bits & LE_R1_FRA) lp->stats.rx_frame_errors++; - if (bits & LE_R1_EOP) lp->stats.rx_errors++; + if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; + if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; + if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; + if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; + if (bits & LE_R1_EOP) dev->stats.rx_errors++; } else { len = (rd->mblength & 0xfff) - 4; skb = dev_alloc_skb (len+2); @@ -313,7 +312,7 @@ static int lance_rx (struct net_device *dev) if (skb == 0) { printk(KERN_WARNING "%s: Memory squeeze, " "deferring packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; rd->mblength = 0; rd->rmd1_bits = LE_R1_OWN; lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; @@ -328,8 +327,8 @@ static int lance_rx (struct net_device *dev) skb->protocol = eth_type_trans (skb, dev); netif_rx (skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; } /* Return the packet to the pool */ @@ -364,12 +363,12 @@ static int lance_tx (struct net_device *dev) if (td->tmd1_bits & LE_T1_ERR) { status = td->misc; - lp->stats.tx_errors++; - if (status & LE_T3_RTY) lp->stats.tx_aborted_errors++; - if (status & LE_T3_LCOL) lp->stats.tx_window_errors++; + dev->stats.tx_errors++; + if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; + if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; if (status & LE_T3_CLOS) { - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (lp->auto_select) { lp->tpe = 1 - lp->tpe; printk(KERN_ERR "%s: Carrier Lost, " @@ -388,7 +387,7 @@ static int lance_tx (struct net_device *dev) /* buffer errors and underflows turn off the transmitter */ /* Restart the adapter */ if (status & (LE_T3_BUF|LE_T3_UFL)) { - lp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, " "restarting\n", dev->name); @@ -408,13 +407,13 @@ static int lance_tx (struct net_device *dev) /* One collision before packet was sent. */ if (td->tmd1_bits & LE_T1_EONE) - lp->stats.collisions++; + dev->stats.collisions++; /* More than one collision, be optimistic. */ if (td->tmd1_bits & LE_T1_EMORE) - lp->stats.collisions += 2; + dev->stats.collisions += 2; - lp->stats.tx_packets++; + dev->stats.tx_packets++; } j = (j + 1) & lp->tx_ring_mod_mask; @@ -459,9 +458,9 @@ static irqreturn_t lance_interrupt (int irq, void *dev_id) /* Log misc errors. */ if (csr0 & LE_C0_BABL) - lp->stats.tx_errors++; /* Tx babble. */ + dev->stats.tx_errors++; /* Tx babble. */ if (csr0 & LE_C0_MISS) - lp->stats.rx_errors++; /* Missed a Rx frame. */ + dev->stats.rx_errors++; /* Missed a Rx frame. */ if (csr0 & LE_C0_MERR) { printk(KERN_ERR "%s: Bus master arbitration failure, status " "%4.4x.\n", dev->name, csr0); @@ -606,7 +605,7 @@ static int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) /* Now, give the packet to the lance */ ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN); lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask; - lp->stats.tx_bytes += skblen; + dev->stats.tx_bytes += skblen; if (TX_BUFFS_AVAIL <= 0) netif_stop_queue(dev); @@ -621,13 +620,6 @@ static int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) return status; } -static struct net_device_stats *lance_get_stats (struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - - return &lp->stats; -} - /* taken from the depca driver */ static void lance_load_multicast (struct net_device *dev) { @@ -724,6 +716,7 @@ static int __devinit a2065_init_one(struct zorro_dev *z, unsigned long board, base_addr, mem_start; struct resource *r1, *r2; int err; + DECLARE_MAC_BUF(mac); board = z->resource.start; base_addr = board+A2065_LANCE; @@ -746,7 +739,6 @@ static int __devinit a2065_init_one(struct zorro_dev *z, return -ENOMEM; } - SET_MODULE_OWNER(dev); priv = netdev_priv(dev); r1->name = dev->name; @@ -783,7 +775,6 @@ static int __devinit a2065_init_one(struct zorro_dev *z, dev->hard_start_xmit = &lance_start_xmit; dev->tx_timeout = &lance_tx_timeout; dev->watchdog_timeo = 5*HZ; - dev->get_stats = &lance_get_stats; dev->set_multicast_list = &lance_set_multicast; dev->dma = 0; @@ -802,9 +793,8 @@ static int __devinit a2065_init_one(struct zorro_dev *z, zorro_set_drvdata(z, dev); printk(KERN_INFO "%s: A2065 at 0x%08lx, Ethernet Address " - "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, board, - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + "%s\n", dev->name, board, + print_mac(mac, dev->dev_addr)); return 0; } diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c index 644c408515df..5136d94923aa 100644 --- a/drivers/net/ac3200.c +++ b/drivers/net/ac3200.c @@ -103,8 +103,6 @@ static int __init do_ac3200_probe(struct net_device *dev) int irq = dev->irq; int mem_start = dev->mem_start; - SET_MODULE_OWNER(dev); - if (ioaddr > 0x1ff) /* Check a single specified location. */ return ac_probe1(ioaddr, dev); else if (ioaddr > 0) /* Don't probe at all. */ @@ -148,6 +146,7 @@ out: static int __init ac_probe1(int ioaddr, struct net_device *dev) { int i, retval; + DECLARE_MAC_BUF(mac); if (!request_region(ioaddr, AC_IO_EXTENT, DRV_NAME)) return -EBUSY; @@ -169,10 +168,11 @@ static int __init ac_probe1(int ioaddr, struct net_device *dev) inb(ioaddr + AC_ID_PORT + 2), inb(ioaddr + AC_ID_PORT + 3)); #endif - printk("AC3200 in EISA slot %d, node", ioaddr/0x1000); - for(i = 0; i < 6; i++) - printk(" %02x", dev->dev_addr[i] = inb(ioaddr + AC_SA_PROM + i)); + for (i = 0; i < 6; i++) + dev->dev_addr[i] = inb(ioaddr + AC_SA_PROM + i); + printk(KERN_DEBUG "AC3200 in EISA slot %d, node %s", + ioaddr/0x1000, print_mac(mac, dev->dev_addr)); #if 0 /* Check the vendor ID/prefix. Redundant after checking the EISA ID */ if (inb(ioaddr + AC_SA_PROM + 0) != AC_ADDR0 diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c index b78a4e5ceeb2..6c192650d349 100644 --- a/drivers/net/acenic.c +++ b/drivers/net/acenic.c @@ -406,7 +406,7 @@ MODULE_DEVICE_TABLE(pci, acenic_pci_tbl); #define DEF_STAT (2 * TICKS_PER_SEC) -static int link[ACE_MAX_MOD_PARMS]; +static int link_state[ACE_MAX_MOD_PARMS]; static int trace[ACE_MAX_MOD_PARMS]; static int tx_coal_tick[ACE_MAX_MOD_PARMS]; static int rx_coal_tick[ACE_MAX_MOD_PARMS]; @@ -419,7 +419,7 @@ MODULE_AUTHOR("Jes Sorensen <jes@trained-monkey.org>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("AceNIC/3C985/GA620 Gigabit Ethernet driver"); -module_param_array(link, int, NULL, 0); +module_param_array_named(link, link_state, int, NULL, 0); module_param_array(trace, int, NULL, 0); module_param_array(tx_coal_tick, int, NULL, 0); module_param_array(max_tx_desc, int, NULL, 0); @@ -465,7 +465,6 @@ static int __devinit acenic_probe_one(struct pci_dev *pdev, return -ENOMEM; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); ap = dev->priv; @@ -894,6 +893,7 @@ static int __devinit ace_init(struct net_device *dev) int board_idx, ecode = 0; short i; unsigned char cache_size; + DECLARE_MAC_BUF(mac); ap = netdev_priv(dev); regs = ap->regs; @@ -987,36 +987,32 @@ static int __devinit ace_init(struct net_device *dev) mac1 = 0; for(i = 0; i < 4; i++) { - int tmp; + int t; mac1 = mac1 << 8; - tmp = read_eeprom_byte(dev, 0x8c+i); - if (tmp < 0) { + t = read_eeprom_byte(dev, 0x8c+i); + if (t < 0) { ecode = -EIO; goto init_error; } else - mac1 |= (tmp & 0xff); + mac1 |= (t & 0xff); } mac2 = 0; for(i = 4; i < 8; i++) { - int tmp; + int t; mac2 = mac2 << 8; - tmp = read_eeprom_byte(dev, 0x8c+i); - if (tmp < 0) { + t = read_eeprom_byte(dev, 0x8c+i); + if (t < 0) { ecode = -EIO; goto init_error; } else - mac2 |= (tmp & 0xff); + mac2 |= (t & 0xff); } writel(mac1, ®s->MacAddrHi); writel(mac2, ®s->MacAddrLo); - printk("MAC: %02x:%02x:%02x:%02x:%02x:%02x\n", - (mac1 >> 8) & 0xff, mac1 & 0xff, (mac2 >> 24) &0xff, - (mac2 >> 16) & 0xff, (mac2 >> 8) & 0xff, mac2 & 0xff); - dev->dev_addr[0] = (mac1 >> 8) & 0xff; dev->dev_addr[1] = mac1 & 0xff; dev->dev_addr[2] = (mac2 >> 24) & 0xff; @@ -1024,6 +1020,8 @@ static int __devinit ace_init(struct net_device *dev) dev->dev_addr[4] = (mac2 >> 8) & 0xff; dev->dev_addr[5] = mac2 & 0xff; + printk("MAC: %s\n", print_mac(mac, dev->dev_addr)); + /* * Looks like this is necessary to deal with on all architectures, * even this %$#%$# N440BX Intel based thing doesn't get it right. @@ -1307,10 +1305,10 @@ static int __devinit ace_init(struct net_device *dev) writel(TX_RING_BASE, ®s->WinBase); if (ACE_IS_TIGON_I(ap)) { - ap->tx_ring = (struct tx_desc *) regs->Window; + ap->tx_ring = (__force struct tx_desc *) regs->Window; for (i = 0; i < (TIGON_I_TX_RING_ENTRIES * sizeof(struct tx_desc)) / sizeof(u32); i++) - writel(0, (void __iomem *)ap->tx_ring + i * 4); + writel(0, (__force void __iomem *)ap->tx_ring + i * 4); set_aceaddr(&info->tx_ctrl.rngptr, TX_RING_BASE); } else { @@ -1396,8 +1394,8 @@ static int __devinit ace_init(struct net_device *dev) /* * Override link default parameters */ - if ((board_idx >= 0) && link[board_idx]) { - int option = link[board_idx]; + if ((board_idx >= 0) && link_state[board_idx]) { + int option = link_state[board_idx]; tmp = LNK_ENABLE; @@ -2385,8 +2383,9 @@ static int ace_close(struct net_device *dev) if (mapping) { if (ACE_IS_TIGON_I(ap)) { - struct tx_desc __iomem *tx - = (struct tx_desc __iomem *) &ap->tx_ring[i]; + /* NB: TIGON_1 is special, tx_ring is in io space */ + struct tx_desc __iomem *tx; + tx = (__force struct tx_desc __iomem *) &ap->tx_ring[i]; writel(0, &tx->addr.addrhi); writel(0, &tx->addr.addrlo); writel(0, &tx->flagsize); @@ -2446,7 +2445,7 @@ ace_load_tx_bd(struct ace_private *ap, struct tx_desc *desc, u64 addr, #endif if (ACE_IS_TIGON_I(ap)) { - struct tx_desc __iomem *io = (struct tx_desc __iomem *) desc; + struct tx_desc __iomem *io = (__force struct tx_desc __iomem *) desc; writel(addr >> 32, &io->addr.addrhi); writel(addr & 0xffffffff, &io->addr.addrlo); writel(flagsize, &io->flagsize); @@ -2938,7 +2937,7 @@ static void __devinit ace_clear(struct ace_regs __iomem *regs, u32 dest, int siz * This operation requires the NIC to be halted and is performed with * interrupts disabled and with the spinlock hold. */ -int __devinit ace_load_firmware(struct net_device *dev) +static int __devinit ace_load_firmware(struct net_device *dev) { struct ace_private *ap = netdev_priv(dev); struct ace_regs __iomem *regs = ap->regs; @@ -3128,12 +3127,6 @@ static int __devinit read_eeprom_byte(struct net_device *dev, int result = 0; short i; - if (!dev) { - printk(KERN_ERR "No device!\n"); - result = -ENODEV; - goto out; - } - /* * Don't take interrupts on this CPU will bit banging * the %#%#@$ I2C device diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c index a61b2f89fc33..1cc74ec88a58 100644 --- a/drivers/net/amd8111e.c +++ b/drivers/net/amd8111e.c @@ -709,7 +709,8 @@ static int amd8111e_tx(struct net_device *dev) lp->tx_complete_idx++; /*COAL update tx coalescing parameters */ lp->coal_conf.tx_packets++; - lp->coal_conf.tx_bytes += lp->tx_ring[tx_index].buff_count; + lp->coal_conf.tx_bytes += + le16_to_cpu(lp->tx_ring[tx_index].buff_count); if (netif_queue_stopped(dev) && lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){ @@ -723,9 +724,10 @@ static int amd8111e_tx(struct net_device *dev) #ifdef CONFIG_AMD8111E_NAPI /* This function handles the driver receive operation in polling mode */ -static int amd8111e_rx_poll(struct net_device *dev, int * budget) +static int amd8111e_rx_poll(struct napi_struct *napi, int budget) { - struct amd8111e_priv *lp = netdev_priv(dev); + struct amd8111e_priv *lp = container_of(napi, struct amd8111e_priv, napi); + struct net_device *dev = lp->amd8111e_net_dev; int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK; void __iomem *mmio = lp->mmio; struct sk_buff *skb,*new_skb; @@ -737,7 +739,7 @@ static int amd8111e_rx_poll(struct net_device *dev, int * budget) #if AMD8111E_VLAN_TAG_USED short vtag; #endif - int rx_pkt_limit = dev->quota; + int rx_pkt_limit = budget; unsigned long flags; do{ @@ -838,21 +840,14 @@ static int amd8111e_rx_poll(struct net_device *dev, int * budget) } while(intr0 & RINT0); /* Receive descriptor is empty now */ - dev->quota -= num_rx_pkt; - *budget -= num_rx_pkt; - spin_lock_irqsave(&lp->lock, flags); - netif_rx_complete(dev); + __netif_rx_complete(dev, napi); writel(VAL0|RINTEN0, mmio + INTEN0); writel(VAL2 | RDMD0, mmio + CMD0); spin_unlock_irqrestore(&lp->lock, flags); - return 0; rx_not_empty: - /* Do not call a netif_rx_complete */ - dev->quota -= num_rx_pkt; - *budget -= num_rx_pkt; - return 1; + return num_rx_pkt; } #else @@ -1287,11 +1282,11 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id) /* Check if Receive Interrupt has occurred. */ #ifdef CONFIG_AMD8111E_NAPI if(intr0 & RINT0){ - if(netif_rx_schedule_prep(dev)){ + if(netif_rx_schedule_prep(dev, &lp->napi)){ /* Disable receive interupts */ writel(RINTEN0, mmio + INTEN0); /* Schedule a polling routine */ - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &lp->napi); } else if (intren0 & RINTEN0) { printk("************Driver bug! \ @@ -1345,6 +1340,8 @@ static int amd8111e_close(struct net_device * dev) struct amd8111e_priv *lp = netdev_priv(dev); netif_stop_queue(dev); + napi_disable(&lp->napi); + spin_lock_irq(&lp->lock); amd8111e_disable_interrupt(lp); @@ -1375,12 +1372,15 @@ static int amd8111e_open(struct net_device * dev ) dev->name, dev)) return -EAGAIN; + napi_enable(&lp->napi); + spin_lock_irq(&lp->lock); amd8111e_init_hw_default(lp); if(amd8111e_restart(dev)){ spin_unlock_irq(&lp->lock); + napi_disable(&lp->napi); if (dev->irq) free_irq(dev->irq, dev); return -ENOMEM; @@ -1405,7 +1405,7 @@ This function checks if there is any transmit descriptors available to queue mo static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp ) { int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK; - if(lp->tx_skbuff[tx_index] != 0) + if (lp->tx_skbuff[tx_index]) return -1; else return 0; @@ -1442,7 +1442,7 @@ static int amd8111e_start_xmit(struct sk_buff *skb, struct net_device * dev) lp->tx_dma_addr[tx_index] = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); lp->tx_ring[tx_index].buff_phy_addr = - (u32) cpu_to_le32(lp->tx_dma_addr[tx_index]); + cpu_to_le32(lp->tx_dma_addr[tx_index]); /* Set FCS and LTINT bits */ wmb(); @@ -1935,6 +1935,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev, unsigned long reg_addr,reg_len; struct amd8111e_priv* lp; struct net_device* dev; + DECLARE_MAC_BUF(mac); err = pci_enable_device(pdev); if(err){ @@ -1983,7 +1984,6 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev, goto err_free_reg; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); #if AMD8111E_VLAN_TAG_USED @@ -1999,7 +1999,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev, spin_lock_init(&lp->lock); lp->mmio = ioremap(reg_addr, reg_len); - if (lp->mmio == 0) { + if (!lp->mmio) { printk(KERN_ERR "amd8111e: Cannot map device registers, " "exiting\n"); err = -ENOMEM; @@ -2008,7 +2008,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev, /* Initializing MAC address */ for(i = 0; i < ETH_ADDR_LEN; i++) - dev->dev_addr[i] =readb(lp->mmio + PADR + i); + dev->dev_addr[i] = readb(lp->mmio + PADR + i); /* Setting user defined parametrs */ lp->ext_phy_option = speed_duplex[card_idx]; @@ -2031,8 +2031,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev, dev->tx_timeout = amd8111e_tx_timeout; dev->watchdog_timeo = AMD8111E_TX_TIMEOUT; #ifdef CONFIG_AMD8111E_NAPI - dev->poll = amd8111e_rx_poll; - dev->weight = 32; + netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32); #endif #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = amd8111e_poll; @@ -2078,11 +2077,10 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev, /* display driver and device information */ chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28; - printk(KERN_INFO "%s: AMD-8111e Driver Version: %s\n", dev->name,MODULE_VERS); - printk(KERN_INFO "%s: [ Rev %x ] PCI 10/100BaseT Ethernet ", dev->name, chip_version); - for (i = 0; i < 6; i++) - printk("%2.2x%c",dev->dev_addr[i],i == 5 ? ' ' : ':'); - printk( "\n"); + printk(KERN_INFO "%s: AMD-8111e Driver Version: %s\n", + dev->name,MODULE_VERS); + printk(KERN_INFO "%s: [ Rev %x ] PCI 10/100BaseT Ethernet %s\n", + dev->name, chip_version, print_mac(mac, dev->dev_addr)); if (lp->ext_phy_id) printk(KERN_INFO "%s: Found MII PHY ID 0x%08x at address 0x%02x\n", dev->name, lp->ext_phy_id, lp->ext_phy_addr); diff --git a/drivers/net/amd8111e.h b/drivers/net/amd8111e.h index e65080a5994a..28c60a71ed50 100644 --- a/drivers/net/amd8111e.h +++ b/drivers/net/amd8111e.h @@ -655,32 +655,32 @@ typedef enum { struct amd8111e_tx_dr{ - u16 buff_count; /* Size of the buffer pointed by this descriptor */ + __le16 buff_count; /* Size of the buffer pointed by this descriptor */ - u16 tx_flags; + __le16 tx_flags; - u16 tag_ctrl_info; + __le16 tag_ctrl_info; - u16 tag_ctrl_cmd; + __le16 tag_ctrl_cmd; - u32 buff_phy_addr; + __le32 buff_phy_addr; - u32 reserved; + __le32 reserved; }; struct amd8111e_rx_dr{ - u32 reserved; + __le32 reserved; - u16 msg_count; /* Received message len */ + __le16 msg_count; /* Received message len */ - u16 tag_ctrl_info; + __le16 tag_ctrl_info; - u16 buff_count; /* Len of the buffer pointed by descriptor. */ + __le16 buff_count; /* Len of the buffer pointed by descriptor. */ - u16 rx_flags; + __le16 rx_flags; - u32 buff_phy_addr; + __le32 buff_phy_addr; }; struct amd8111e_link_config{ @@ -763,6 +763,8 @@ struct amd8111e_priv{ /* Reg memory mapped address */ void __iomem *mmio; + struct napi_struct napi; + spinlock_t lock; /* Guard lock */ unsigned long rx_idx, tx_idx; /* The next free ring entry */ unsigned long tx_complete_idx; diff --git a/drivers/net/apne.c b/drivers/net/apne.c index 954191119d35..c12cbdf368b1 100644 --- a/drivers/net/apne.c +++ b/drivers/net/apne.c @@ -148,7 +148,6 @@ struct net_device * __init apne_probe(int unit) sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); } - SET_MODULE_OWNER(dev); /* disable pcmcia irq for readtuple */ pcmcia_disable_irq(); @@ -205,6 +204,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr) int neX000, ctron; #endif static unsigned version_printed; + DECLARE_MAC_BUF(mac); if (ei_debug && version_printed++ == 0) printk(version); @@ -247,7 +247,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr) {0x00, NE_EN0_RSARHI}, {E8390_RREAD+E8390_START, NE_CMD}, }; - for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++) { + for (i = 0; i < ARRAY_SIZE(program_seq); i++) { outb(program_seq[i].value, ioaddr + program_seq[i].offset); } @@ -317,12 +317,12 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr) i = request_irq(dev->irq, apne_interrupt, IRQF_SHARED, DRV_NAME, dev); if (i) return i; - for(i = 0; i < ETHER_ADDR_LEN; i++) { - printk(" %2.2x", SA_prom[i]); + for(i = 0; i < ETHER_ADDR_LEN; i++) dev->dev_addr[i] = SA_prom[i]; - } - printk("\n%s: %s found.\n", dev->name, name); + printk(" %s\n", print_mac(mac, dev->dev_addr)); + + printk("%s: %s found.\n", dev->name, name); ei_status.name = name; ei_status.tx_start_page = start_page; diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index da6ffa8cd81e..92c3a4cf0bb1 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c @@ -194,10 +194,6 @@ static void cops_timeout(struct net_device *dev); static void cops_rx (struct net_device *dev); static int cops_send_packet (struct sk_buff *skb, struct net_device *dev); static void set_multicast_list (struct net_device *dev); -static int cops_hard_header (struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, void *saddr, - unsigned len); - static int cops_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); static int cops_close (struct net_device *dev); static struct net_device_stats *cops_get_stats (struct net_device *dev); @@ -235,8 +231,6 @@ struct net_device * __init cops_probe(int unit) base_addr = dev->base_addr = io; } - SET_MODULE_OWNER(dev); - if (base_addr > 0x1ff) { /* Check a single specified location. */ err = cops_probe1(dev, base_addr); } else if (base_addr != 0) { /* Don't probe at all. */ @@ -333,7 +327,6 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr) dev->base_addr = ioaddr; lp = netdev_priv(dev); - memset(lp, 0, sizeof(struct cops_local)); spin_lock_init(&lp->lock); /* Copy local board variable to lp struct. */ @@ -342,7 +335,7 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr) dev->hard_start_xmit = cops_send_packet; dev->tx_timeout = cops_timeout; dev->watchdog_timeo = HZ * 2; - dev->hard_header = cops_hard_header; + dev->get_stats = cops_get_stats; dev->open = cops_open; dev->stop = cops_close; @@ -947,19 +940,6 @@ static void set_multicast_list(struct net_device *dev) } /* - * Another Dummy function to keep the Appletalk layer happy. - */ - -static int cops_hard_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, void *saddr, - unsigned len) -{ - if(cops_debug >= 3) - printk("%s: cops_hard_header executed. Wow!\n", dev->name); - return 0; -} - -/* * System ioctls for the COPS LocalTalk card. */ diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c index f22e46dfd770..1071144edd66 100644 --- a/drivers/net/appletalk/ipddp.c +++ b/drivers/net/appletalk/ipddp.c @@ -65,7 +65,6 @@ static struct net_device * __init ipddp_init(void) if (!dev) return ERR_PTR(-ENOMEM); - SET_MODULE_OWNER(dev); strcpy(dev->name, "ipddp%d"); if (version_printed++ == 0) @@ -117,7 +116,7 @@ static struct net_device_stats *ipddp_get_stats(struct net_device *dev) */ static int ipddp_xmit(struct sk_buff *skb, struct net_device *dev) { - u32 paddr = ((struct rtable*)skb->dst)->rt_gateway; + __be32 paddr = ((struct rtable*)skb->dst)->rt_gateway; struct ddpehdr *ddp; struct ipddp_route *rt; struct atalk_addr *our_addr; diff --git a/drivers/net/appletalk/ipddp.h b/drivers/net/appletalk/ipddp.h index 52072fb0c610..531519da99a3 100644 --- a/drivers/net/appletalk/ipddp.h +++ b/drivers/net/appletalk/ipddp.h @@ -14,7 +14,7 @@ struct ipddp_route { struct net_device *dev; /* Carrier device */ - __u32 ip; /* IP address */ + __be32 ip; /* IP address */ struct atalk_addr at; /* Gateway appletalk address */ int flags; struct ipddp_route *next; diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c index 6a6cbd331a16..6ab2c2d4d673 100644 --- a/drivers/net/appletalk/ltpc.c +++ b/drivers/net/appletalk/ltpc.c @@ -870,15 +870,6 @@ static void set_multicast_list(struct net_device *dev) /* Actually netatalk needs fixing! */ } -static int ltpc_hard_header (struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, void *saddr, unsigned len) -{ - if(debug & DEBUG_VERBOSE) - printk("ltpc_hard_header called for device %s\n", - dev->name); - return 0; -} - static int ltpc_poll_counter; static void ltpc_poll(unsigned long l) @@ -1046,8 +1037,6 @@ struct net_device * __init ltpc_probe(void) if (!dev) goto out; - SET_MODULE_OWNER(dev); - /* probe for the I/O port address */ if (io != 0x240 && request_region(0x220,8,"ltpc")) { @@ -1143,7 +1132,6 @@ struct net_device * __init ltpc_probe(void) /* Fill in the fields of the device structure with ethernet-generic values. */ dev->hard_start_xmit = ltpc_xmit; - dev->hard_header = ltpc_hard_header; dev->get_stats = ltpc_get_stats; /* add the ltpc-specific things */ diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index 681e20b8466f..c59c8067de99 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c @@ -102,8 +102,8 @@ static int arcnet_close(struct net_device *dev); static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev); static void arcnet_timeout(struct net_device *dev); static int arcnet_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, void *saddr, - unsigned len); + unsigned short type, const void *daddr, + const void *saddr, unsigned len); static int arcnet_rebuild_header(struct sk_buff *skb); static struct net_device_stats *arcnet_get_stats(struct net_device *dev); static int go_tx(struct net_device *dev); @@ -317,11 +317,17 @@ static int choose_mtu(void) return mtu == 65535 ? XMTU : mtu; } +static const struct header_ops arcnet_header_ops = { + .create = arcnet_header, + .rebuild = arcnet_rebuild_header, +}; + /* Setup a struct device for ARCnet. */ static void arcdev_setup(struct net_device *dev) { dev->type = ARPHRD_ARCNET; + dev->header_ops = &arcnet_header_ops; dev->hard_header_len = sizeof(struct archdr); dev->mtu = choose_mtu(); @@ -342,8 +348,6 @@ static void arcdev_setup(struct net_device *dev) dev->hard_start_xmit = arcnet_send_packet; dev->tx_timeout = arcnet_timeout; dev->get_stats = arcnet_get_stats; - dev->hard_header = arcnet_header; - dev->rebuild_header = arcnet_rebuild_header; } struct net_device *alloc_arcdev(char *name) @@ -488,10 +492,10 @@ static int arcnet_close(struct net_device *dev) static int arcnet_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, void *saddr, - unsigned len) + unsigned short type, const void *daddr, + const void *saddr, unsigned len) { - struct arcnet_local *lp = dev->priv; + const struct arcnet_local *lp = netdev_priv(dev); uint8_t _daddr, proto_num; struct ArcProto *proto; diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c index 1f0302735416..6599f1046c7b 100644 --- a/drivers/net/arcnet/com90io.c +++ b/drivers/net/arcnet/com90io.c @@ -398,8 +398,6 @@ static int __init com90io_init(void) if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); - dev->base_addr = io; dev->irq = irq; if (dev->irq == 2) diff --git a/drivers/net/arcnet/rfc1051.c b/drivers/net/arcnet/rfc1051.c index 2de8877ece29..dab185bc51f1 100644 --- a/drivers/net/arcnet/rfc1051.c +++ b/drivers/net/arcnet/rfc1051.c @@ -34,7 +34,7 @@ #define VERSION "arcnet: RFC1051 \"simple standard\" (`s') encapsulation support loaded.\n" -static unsigned short type_trans(struct sk_buff *skb, struct net_device *dev); +static __be16 type_trans(struct sk_buff *skb, struct net_device *dev); static void rx(struct net_device *dev, int bufnum, struct archdr *pkthdr, int length); static int build_header(struct sk_buff *skb, struct net_device *dev, @@ -86,7 +86,7 @@ MODULE_LICENSE("GPL"); * * With ARCnet we have to convert everything to Ethernet-style stuff. */ -static unsigned short type_trans(struct sk_buff *skb, struct net_device *dev) +static __be16 type_trans(struct sk_buff *skb, struct net_device *dev) { struct arcnet_local *lp = dev->priv; struct archdr *pkt = (struct archdr *) skb->data; diff --git a/drivers/net/arcnet/rfc1201.c b/drivers/net/arcnet/rfc1201.c index 460a095000c2..6d6d95cc4404 100644 --- a/drivers/net/arcnet/rfc1201.c +++ b/drivers/net/arcnet/rfc1201.c @@ -34,7 +34,7 @@ MODULE_LICENSE("GPL"); #define VERSION "arcnet: RFC1201 \"standard\" (`a') encapsulation support loaded.\n" -static unsigned short type_trans(struct sk_buff *skb, struct net_device *dev); +static __be16 type_trans(struct sk_buff *skb, struct net_device *dev); static void rx(struct net_device *dev, int bufnum, struct archdr *pkthdr, int length); static int build_header(struct sk_buff *skb, struct net_device *dev, @@ -88,7 +88,7 @@ module_exit(arcnet_rfc1201_exit); * * With ARCnet we have to convert everything to Ethernet-style stuff. */ -static unsigned short type_trans(struct sk_buff *skb, struct net_device *dev) +static __be16 type_trans(struct sk_buff *skb, struct net_device *dev) { struct archdr *pkt = (struct archdr *) skb->data; struct arc_rfc1201 *soft = &pkt->soft.rfc1201; @@ -456,7 +456,7 @@ static void load_pkt(struct net_device *dev, struct arc_hardware *hard, excsoft.proto = soft->proto; excsoft.split_flag = 0xff; - excsoft.sequence = 0xffff; + excsoft.sequence = htons(0xffff); hard->offset[0] = 0; ofs = 512 - softlen; diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c index bc5a38a6705f..3fa3bccd1adb 100644 --- a/drivers/net/ariadne.c +++ b/drivers/net/ariadne.c @@ -166,6 +166,7 @@ static int __devinit ariadne_init_one(struct zorro_dev *z, struct net_device *dev; struct ariadne_private *priv; int err; + DECLARE_MAC_BUF(mac); r1 = request_mem_region(base_addr, sizeof(struct Am79C960), "Am79C960"); if (!r1) @@ -183,7 +184,6 @@ static int __devinit ariadne_init_one(struct zorro_dev *z, return -ENOMEM; } - SET_MODULE_OWNER(dev); priv = netdev_priv(dev); r1->name = dev->name; @@ -217,9 +217,8 @@ static int __devinit ariadne_init_one(struct zorro_dev *z, zorro_set_drvdata(z, dev); printk(KERN_INFO "%s: Ariadne at 0x%08lx, Ethernet Address " - "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, board, - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + "%s\n", dev->name, board, + print_mac(mac, dev->dev_addr)); return 0; } @@ -615,21 +614,17 @@ static int ariadne_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Fill in a Tx ring entry */ #if 0 - printk(KERN_DEBUG "TX pkt type 0x%04x from ", ((u_short *)skb->data)[6]); - { - int i; - u_char *ptr = &((u_char *)skb->data)[6]; - for (i = 0; i < 6; i++) - printk("%02x", ptr[i]); - } - printk(" to "); - { - int i; - u_char *ptr = (u_char *)skb->data; - for (i = 0; i < 6; i++) - printk("%02x", ptr[i]); - } - printk(" data 0x%08x len %d\n", (int)skb->data, (int)skb->len); +{ + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); + + printk(KERN_DEBUG "TX pkt type 0x%04x from %s to %s " + " data 0x%08x len %d\n", + ((u_short *)skb->data)[6], + print_mac(mac, ((const u8 *)skb->data)+6), + print_mac(mac, (const u8 *)skb->data), + (int)skb->data, (int)skb->len); +} #endif local_irq_save(flags); @@ -749,22 +744,22 @@ static int ariadne_rx(struct net_device *dev) skb_copy_to_linear_data(skb, (char *)priv->rx_buff[entry], pkt_len); skb->protocol=eth_type_trans(skb,dev); #if 0 +{ + DECLARE_MAC_BUF(mac); + printk(KERN_DEBUG "RX pkt type 0x%04x from ", ((u_short *)skb->data)[6]); { - int i; u_char *ptr = &((u_char *)skb->data)[6]; - for (i = 0; i < 6; i++) - printk("%02x", ptr[i]); + printk("%s", print_mac(mac, ptr)); } printk(" to "); { - int i; u_char *ptr = (u_char *)skb->data; - for (i = 0; i < 6; i++) - printk("%02x", ptr[i]); + printk("%s", print_mac(mac, ptr)); } printk(" data 0x%08x len %d\n", (int)skb->data, (int)skb->len); +} #endif netif_rx(skb); diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c index 2143eeb7a2b0..ba6bd03a015f 100644 --- a/drivers/net/arm/am79c961a.c +++ b/drivers/net/arm/am79c961a.c @@ -414,7 +414,7 @@ static void am79c961_setmulticastlist (struct net_device *dev) /* * Update the multicast hash table */ - for (i = 0; i < sizeof(multi_hash) / sizeof(multi_hash[0]); i++) + for (i = 0; i < ARRAY_SIZE(multi_hash); i++) write_rreg(dev->base_addr, i + LADRL, multi_hash[i]); /* @@ -741,12 +741,10 @@ static int __init am79c961_probe(struct platform_device *pdev) ret = register_netdev(dev); if (ret == 0) { - printk(KERN_INFO "%s: ether address ", dev->name); - - /* Retrive and print the ethernet address. */ - for (i = 0; i < 6; i++) - printk (i == 5 ? "%02x\n" : "%02x:", dev->dev_addr[i]); + DECLARE_MAC_BUF(mac); + printk(KERN_INFO "%s: ether address %s\n", + dev->name, print_mac(mac, dev->dev_addr)); return 0; } diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c index ef2cc80256a3..25b114a4e2b1 100644 --- a/drivers/net/arm/at91_ether.c +++ b/drivers/net/arm/at91_ether.c @@ -485,6 +485,7 @@ static void update_mac_address(struct net_device *dev) static int set_mac_address(struct net_device *dev, void* addr) { struct sockaddr *address = addr; + DECLARE_MAC_BUF(mac); if (!is_valid_ether_addr(address->sa_data)) return -EADDRNOTAVAIL; @@ -492,9 +493,8 @@ static int set_mac_address(struct net_device *dev, void* addr) memcpy(dev->dev_addr, address->sa_data, dev->addr_len); update_mac_address(dev); - printk("%s: Setting MAC address to %02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + printk("%s: Setting MAC address to %s\n", dev->name, + print_mac(mac, dev->dev_addr)); return 0; } @@ -979,6 +979,7 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add struct at91_private *lp; unsigned int val; int res; + DECLARE_MAC_BUF(mac); dev = alloc_etherdev(sizeof(struct at91_private)); if (!dev) @@ -986,7 +987,6 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add dev->base_addr = AT91_VA_BASE_EMAC; dev->irq = AT91RM9200_ID_EMAC; - SET_MODULE_OWNER(dev); /* Install the interrupt handler */ if (request_irq(dev->irq, at91ether_interrupt, 0, dev->name, dev)) { @@ -1082,12 +1082,11 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add } /* Display ethernet banner */ - printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%02x:%02x:%02x:%02x:%02x:%02x)\n", - dev->name, (uint) dev->base_addr, dev->irq, - at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_SPD ? "100-" : "10-", - at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_FD ? "FullDuplex" : "HalfDuplex", - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%s)\n", + dev->name, (uint) dev->base_addr, dev->irq, + at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_SPD ? "100-" : "10-", + at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_FD ? "FullDuplex" : "HalfDuplex", + print_mac(mac, dev->dev_addr)); if ((phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) printk(KERN_INFO "%s: Davicom 9161 PHY %s\n", dev->name, (lp->phy_media == PORT_FIBRE) ? "(Fiber)" : "(Copper)"); else if (phy_type == MII_LXT971A_ID) diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c index f6ece1d43f6e..7f016f3d5bf0 100644 --- a/drivers/net/arm/ep93xx_eth.c +++ b/drivers/net/arm/ep93xx_eth.c @@ -169,6 +169,9 @@ struct ep93xx_priv spinlock_t tx_pending_lock; unsigned int tx_pending; + struct net_device *dev; + struct napi_struct napi; + struct net_device_stats stats; struct mii_if_info mii; @@ -190,15 +193,11 @@ static struct net_device_stats *ep93xx_get_stats(struct net_device *dev) return &(ep->stats); } -static int ep93xx_rx(struct net_device *dev, int *budget) +static int ep93xx_rx(struct net_device *dev, int processed, int budget) { struct ep93xx_priv *ep = netdev_priv(dev); - int rx_done; - int processed; - rx_done = 0; - processed = 0; - while (*budget > 0) { + while (processed < budget) { int entry; struct ep93xx_rstat *rstat; u32 rstat0; @@ -211,10 +210,8 @@ static int ep93xx_rx(struct net_device *dev, int *budget) rstat0 = rstat->rstat0; rstat1 = rstat->rstat1; - if (!(rstat0 & RSTAT0_RFP) || !(rstat1 & RSTAT1_RFP)) { - rx_done = 1; + if (!(rstat0 & RSTAT0_RFP) || !(rstat1 & RSTAT1_RFP)) break; - } rstat->rstat0 = 0; rstat->rstat1 = 0; @@ -275,8 +272,6 @@ static int ep93xx_rx(struct net_device *dev, int *budget) err: ep->rx_pointer = (entry + 1) & (RX_QUEUE_ENTRIES - 1); processed++; - dev->quota--; - (*budget)--; } if (processed) { @@ -284,7 +279,7 @@ err: wrw(ep, REG_RXSTSENQ, processed); } - return !rx_done; + return processed; } static int ep93xx_have_more_rx(struct ep93xx_priv *ep) @@ -293,36 +288,32 @@ static int ep93xx_have_more_rx(struct ep93xx_priv *ep) return !!((rstat->rstat0 & RSTAT0_RFP) && (rstat->rstat1 & RSTAT1_RFP)); } -static int ep93xx_poll(struct net_device *dev, int *budget) +static int ep93xx_poll(struct napi_struct *napi, int budget) { - struct ep93xx_priv *ep = netdev_priv(dev); - - /* - * @@@ Have to stop polling if device is downed while we - * are polling. - */ + struct ep93xx_priv *ep = container_of(napi, struct ep93xx_priv, napi); + struct net_device *dev = ep->dev; + int rx = 0; poll_some_more: - if (ep93xx_rx(dev, budget)) - return 1; - - netif_rx_complete(dev); - - spin_lock_irq(&ep->rx_lock); - wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); - if (ep93xx_have_more_rx(ep)) { - wrl(ep, REG_INTEN, REG_INTEN_TX); - wrl(ep, REG_INTSTSP, REG_INTSTS_RX); + rx = ep93xx_rx(dev, rx, budget); + if (rx < budget) { + int more = 0; + + spin_lock_irq(&ep->rx_lock); + __netif_rx_complete(dev, napi); + wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); + if (ep93xx_have_more_rx(ep)) { + wrl(ep, REG_INTEN, REG_INTEN_TX); + wrl(ep, REG_INTSTSP, REG_INTSTS_RX); + more = 1; + } spin_unlock_irq(&ep->rx_lock); - if (netif_rx_reschedule(dev, 0)) + if (more && netif_rx_reschedule(dev, napi)) goto poll_some_more; - - return 0; } - spin_unlock_irq(&ep->rx_lock); - return 0; + return rx; } static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) @@ -426,9 +417,9 @@ static irqreturn_t ep93xx_irq(int irq, void *dev_id) if (status & REG_INTSTS_RX) { spin_lock(&ep->rx_lock); - if (likely(__netif_rx_schedule_prep(dev))) { + if (likely(__netif_rx_schedule_prep(dev, &ep->napi))) { wrl(ep, REG_INTEN, REG_INTEN_TX); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &ep->napi); } spin_unlock(&ep->rx_lock); } @@ -648,7 +639,10 @@ static int ep93xx_open(struct net_device *dev) dev->dev_addr[4], dev->dev_addr[5]); } + napi_enable(&ep->napi); + if (ep93xx_start_hw(dev)) { + napi_disable(&ep->napi); ep93xx_free_buffers(ep); return -EIO; } @@ -662,6 +656,7 @@ static int ep93xx_open(struct net_device *dev) err = request_irq(ep->irq, ep93xx_irq, IRQF_SHARED, dev->name, dev); if (err) { + napi_disable(&ep->napi); ep93xx_stop_hw(dev); ep93xx_free_buffers(ep); return err; @@ -678,6 +673,7 @@ static int ep93xx_close(struct net_device *dev) { struct ep93xx_priv *ep = netdev_priv(dev); + napi_disable(&ep->napi); netif_stop_queue(dev); wrl(ep, REG_GIINTMSK, 0); @@ -788,14 +784,12 @@ struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) dev->get_stats = ep93xx_get_stats; dev->ethtool_ops = &ep93xx_ethtool_ops; - dev->poll = ep93xx_poll; dev->hard_start_xmit = ep93xx_xmit; dev->open = ep93xx_open; dev->stop = ep93xx_close; dev->do_ioctl = ep93xx_ioctl; dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; - dev->weight = 64; return dev; } @@ -847,6 +841,8 @@ static int ep93xx_eth_probe(struct platform_device *pdev) goto err_out; } ep = netdev_priv(dev); + ep->dev = dev; + netif_napi_add(dev, &ep->napi, ep93xx_poll, 64); platform_set_drvdata(pdev, dev); diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c index f21148e7b579..3bb9e293e2ef 100644 --- a/drivers/net/arm/ether1.c +++ b/drivers/net/arm/ether1.c @@ -36,7 +36,6 @@ #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> -#include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/slab.h> @@ -75,7 +74,7 @@ static void ether1_timeout(struct net_device *dev); /* ------------------------------------------------------------------------- */ -static char version[] __initdata = "ether1 ethernet driver (c) 2000 Russell King v1.07\n"; +static char version[] __devinitdata = "ether1 ethernet driver (c) 2000 Russell King v1.07\n"; #define BUS_16 16 #define BUS_8 8 @@ -997,6 +996,7 @@ ether1_probe(struct expansion_card *ec, const struct ecard_id *id) { struct net_device *dev; int i, ret = 0; + DECLARE_MAC_BUF(mac); ether1_banner(); @@ -1010,7 +1010,6 @@ ether1_probe(struct expansion_card *ec, const struct ecard_id *id) goto release; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &ec->dev); dev->irq = ec->irq; @@ -1045,12 +1044,9 @@ ether1_probe(struct expansion_card *ec, const struct ecard_id *id) if (ret) goto free; - printk(KERN_INFO "%s: ether1 in slot %d, ", - dev->name, ec->slot_no); + printk(KERN_INFO "%s: ether1 in slot %d, %s\n", + dev->name, ec->slot_no, print_mac(mac, dev->dev_addr)); - for (i = 0; i < 6; i++) - printk ("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':'); - ecard_set_drvdata(ec, dev); return 0; diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c index da713500654d..67e96ae85035 100644 --- a/drivers/net/arm/ether3.c +++ b/drivers/net/arm/ether3.c @@ -51,7 +51,6 @@ #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> -#include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/slab.h> @@ -69,7 +68,7 @@ #include <asm/ecard.h> #include <asm/io.h> -static char version[] __initdata = "ether3 ethernet driver (c) 1995-2000 R.M.King v1.17\n"; +static char version[] __devinitdata = "ether3 ethernet driver (c) 1995-2000 R.M.King v1.17\n"; #include "ether3.h" @@ -464,7 +463,7 @@ static void ether3_setmulticastlist(struct net_device *dev) if (dev->flags & IFF_PROMISC) { /* promiscuous mode */ priv(dev)->regs.config1 |= CFG1_RECVPROMISC; - } else if (dev->flags & IFF_ALLMULTI) { + } else if (dev->flags & IFF_ALLMULTI || dev->mc_count) { priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI; } else priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD; @@ -776,7 +775,8 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id) { const struct ether3_data *data = id->data; struct net_device *dev; - int i, bus_type, ret; + int bus_type, ret; + DECLARE_MAC_BUF(mac); ether3_banner(); @@ -790,7 +790,6 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id) goto release; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &ec->dev); priv(dev)->base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); @@ -860,9 +859,8 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id) if (ret) goto free; - printk("%s: %s in slot %d, ", dev->name, data->name, ec->slot_no); - for (i = 0; i < 6; i++) - printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':'); + printk("%s: %s in slot %d, %s\n", + dev->name, data->name, ec->slot_no, print_mac(mac, dev->dev_addr)); ecard_set_drvdata(ec, dev); return 0; diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c index 769ba69451f4..00081d2b9cd5 100644 --- a/drivers/net/arm/etherh.c +++ b/drivers/net/arm/etherh.c @@ -31,7 +31,6 @@ #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> -#include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/slab.h> @@ -649,6 +648,7 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id) struct net_device *dev; struct etherh_priv *eh; int i, ret; + DECLARE_MAC_BUF(mac); etherh_banner(); @@ -662,7 +662,6 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id) goto release; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &ec->dev); dev->open = etherh_open; @@ -747,11 +746,8 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id) if (ret) goto free; - printk(KERN_INFO "%s: %s in slot %d, ", - dev->name, data->name, ec->slot_no); - - for (i = 0; i < 6; i++) - printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':'); + printk(KERN_INFO "%s: %s in slot %d, %s\n", + dev->name, data->name, ec->slot_no, print_mac(mac, dev->dev_addr)); ecard_set_drvdata(ec, dev); diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c index bed8e0ebaf19..b032c1bf492f 100644 --- a/drivers/net/at1700.c +++ b/drivers/net/at1700.c @@ -109,7 +109,6 @@ typedef unsigned char uchar; /* Information that need to be kept for each board. */ struct net_local { - struct net_device_stats stats; spinlock_t lock; unsigned char mc_filter[8]; uint jumpered:1; /* Set iff the board has jumper config. */ @@ -164,7 +163,6 @@ static int net_send_packet(struct sk_buff *skb, struct net_device *dev); static irqreturn_t net_interrupt(int irq, void *dev_id); static void net_rx(struct net_device *dev); static int net_close(struct net_device *dev); -static struct net_device_stats *net_get_stats(struct net_device *dev); static void set_rx_mode(struct net_device *dev); static void net_tx_timeout (struct net_device *dev); @@ -225,8 +223,6 @@ struct net_device * __init at1700_probe(int unit) dev->irq = irq; } - SET_MODULE_OWNER(dev); - if (io > 0x1ff) { /* Check a single specified location. */ err = at1700_probe1(dev, io); } else if (io != 0) { /* Don't probe at all. */ @@ -269,6 +265,7 @@ static int __init at1700_probe1(struct net_device *dev, int ioaddr) unsigned int i, irq, is_fmv18x = 0, is_at1700 = 0; int slot, ret = -ENODEV; struct net_local *lp = netdev_priv(dev); + DECLARE_MAC_BUF(mac); if (!request_region(ioaddr, AT1700_IO_EXTENT, DRV_NAME)) return -EBUSY; @@ -392,16 +389,15 @@ found: if (is_at1700) { for(i = 0; i < 3; i++) { unsigned short eeprom_val = read_eeprom(ioaddr, 4+i); - printk("%04x", eeprom_val); ((unsigned short *)dev->dev_addr)[i] = ntohs(eeprom_val); } } else { for(i = 0; i < 6; i++) { unsigned char val = inb(ioaddr + SAPROM + i); - printk("%02x", val); dev->dev_addr[i] = val; } } + printk("%s", print_mac(mac, dev->dev_addr)); /* The EEPROM word 12 bit 0x0400 means use regular 100 ohm 10baseT signals, rather than 150 ohm shielded twisted pair compensation. @@ -458,7 +454,6 @@ found: dev->open = net_open; dev->stop = net_close; dev->hard_start_xmit = net_send_packet; - dev->get_stats = net_get_stats; dev->set_multicast_list = &set_rx_mode; dev->tx_timeout = net_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; @@ -573,7 +568,7 @@ static void net_tx_timeout (struct net_device *dev) dev->name, inw(ioaddr + TX_STATUS), inw(ioaddr + TX_INTR), inw(ioaddr + TX_MODE), inw(ioaddr + CONFIG_0), inw(ioaddr + DATAPORT), inw(ioaddr + TX_START), inw(ioaddr + MODE13 - 1), inw(ioaddr + RX_CTRL)); - lp->stats.tx_errors++; + dev->stats.tx_errors++; /* ToDo: We should try to restart the adaptor... */ outw(0xffff, ioaddr + MODE24); outw (0xffff, ioaddr + TX_STATUS); @@ -693,10 +688,10 @@ static irqreturn_t net_interrupt(int irq, void *dev_id) printk("%s: 16 Collision occur during Txing.\n", dev->name); /* Cancel sending a packet. */ outb(0x03, ioaddr + COL16CNTL); - lp->stats.collisions++; + dev->stats.collisions++; } if (status & 0x82) { - lp->stats.tx_packets++; + dev->stats.tx_packets++; /* The Tx queue has any packets and is not being transferred a packet from the host, start transmitting. */ @@ -721,7 +716,6 @@ static irqreturn_t net_interrupt(int irq, void *dev_id) static void net_rx(struct net_device *dev) { - struct net_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; int boguscount = 5; @@ -740,11 +734,11 @@ net_rx(struct net_device *dev) #endif if ((status & 0xF0) != 0x20) { /* There was an error. */ - lp->stats.rx_errors++; - if (status & 0x08) lp->stats.rx_length_errors++; - if (status & 0x04) lp->stats.rx_frame_errors++; - if (status & 0x02) lp->stats.rx_crc_errors++; - if (status & 0x01) lp->stats.rx_over_errors++; + dev->stats.rx_errors++; + if (status & 0x08) dev->stats.rx_length_errors++; + if (status & 0x04) dev->stats.rx_frame_errors++; + if (status & 0x02) dev->stats.rx_crc_errors++; + if (status & 0x01) dev->stats.rx_over_errors++; } else { /* Malloc up new buffer. */ struct sk_buff *skb; @@ -755,7 +749,7 @@ net_rx(struct net_device *dev) /* Prime the FIFO and then flush the packet. */ inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT); outb(0x05, ioaddr + RX_CTRL); - lp->stats.rx_errors++; + dev->stats.rx_errors++; break; } skb = dev_alloc_skb(pkt_len+3); @@ -765,7 +759,7 @@ net_rx(struct net_device *dev) /* Prime the FIFO and then flush the packet. */ inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT); outb(0x05, ioaddr + RX_CTRL); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; break; } skb_reserve(skb,2); @@ -774,8 +768,8 @@ net_rx(struct net_device *dev) skb->protocol=eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } if (--boguscount <= 0) break; @@ -824,17 +818,6 @@ static int net_close(struct net_device *dev) return 0; } -/* Get the current statistics. - This may be called with the card open or closed. - There are no on-chip counters, so this function is trivial. -*/ -static struct net_device_stats * -net_get_stats(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - return &lp->stats; -} - /* Set the multicast/promiscuous mode for this adaptor. */ diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c index dfa8b9ba4c80..ebf1a3a88e15 100644 --- a/drivers/net/atarilance.c +++ b/drivers/net/atarilance.c @@ -224,7 +224,6 @@ struct lance_private { int dirty_tx; /* Ring entries to be freed. */ /* copy function */ void *(*memcpy_f)( void *, const void *, size_t ); - struct net_device_stats stats; /* This must be long for set_bit() */ long tx_full; spinlock_t devlock; @@ -263,7 +262,7 @@ struct lance_addr { (highest byte stripped) */ }; -#define N_LANCE_ADDR (sizeof(lance_addr_list)/sizeof(*lance_addr_list)) +#define N_LANCE_ADDR ARRAY_SIZE(lance_addr_list) /* Definitions for the Lance */ @@ -347,7 +346,6 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ); static irqreturn_t lance_interrupt( int irq, void *dev_id ); static int lance_rx( struct net_device *dev ); static int lance_close( struct net_device *dev ); -static struct net_device_stats *lance_get_stats( struct net_device *dev ); static void set_multicast_list( struct net_device *dev ); static int lance_set_mac_address( struct net_device *dev, void *addr ); static void lance_tx_timeout (struct net_device *dev); @@ -390,7 +388,6 @@ struct net_device * __init atarilance_probe(int unit) sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); } - SET_MODULE_OWNER(dev); for( i = 0; i < N_LANCE_ADDR; ++i ) { if (lance_probe1( dev, &lance_addr_list[i] )) { @@ -470,6 +467,7 @@ static unsigned long __init lance_probe1( struct net_device *dev, int i; static int did_version; unsigned short save1, save2; + DECLARE_MAC_BUF(mac); PROBE_PRINT(( "Probing for Lance card at mem %#lx io %#lx\n", (long)memaddr, (long)ioaddr )); @@ -598,8 +596,7 @@ static unsigned long __init lance_probe1( struct net_device *dev, i = IO->mem; break; } - for( i = 0; i < 6; ++i ) - printk( "%02x%s", dev->dev_addr[i], (i < 5) ? ":" : "\n" ); + printk("%s\n", print_mac(mac, dev->dev_addr)); if (lp->cardtype == OLD_RIEBL) { printk( "%s: Warning: This is a default ethernet address!\n", dev->name ); @@ -632,7 +629,6 @@ static unsigned long __init lance_probe1( struct net_device *dev, dev->open = &lance_open; dev->hard_start_xmit = &lance_start_xmit; dev->stop = &lance_close; - dev->get_stats = &lance_get_stats; dev->set_multicast_list = &set_multicast_list; dev->set_mac_address = &lance_set_mac_address; @@ -640,13 +636,6 @@ static unsigned long __init lance_probe1( struct net_device *dev, dev->tx_timeout = lance_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; - -#if 0 - dev->start = 0; -#endif - - memset( &lp->stats, 0, sizeof(lp->stats) ); - return( 1 ); } @@ -754,7 +743,7 @@ static void lance_tx_timeout (struct net_device *dev) * little endian mode. */ REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0); - lp->stats.tx_errors++; + dev->stats.tx_errors++; #ifndef final_version { int i; DPRINTK( 2, ( "Ring data: dirty_tx %d cur_tx %d%s cur_rx %d\n", @@ -790,6 +779,8 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) int entry, len; struct lance_tx_head *head; unsigned long flags; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name, DREG )); @@ -812,17 +803,13 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) /* Fill in a Tx ring entry */ if (lance_debug >= 3) { - u_char *p; - int i; - printk( "%s: TX pkt type 0x%04x from ", dev->name, - ((u_short *)skb->data)[6]); - for( p = &((u_char *)skb->data)[6], i = 0; i < 6; i++ ) - printk("%02x%s", *p++, i != 5 ? ":" : "" ); - printk(" to "); - for( p = (u_char *)skb->data, i = 0; i < 6; i++ ) - printk("%02x%s", *p++, i != 5 ? ":" : "" ); - printk(" data at 0x%08x len %d\n", (int)skb->data, - (int)skb->len ); + printk( "%s: TX pkt type 0x%04x from " + "%s to %s" + " data at 0x%08x len %d\n", + dev->name, ((u_short *)skb->data)[6], + print_mac(mac, &skb->data[6]), + print_mac(mac2, skb->data), + (int)skb->data, (int)skb->len ); } /* We're not prepared for the int until the last flags are set/reset. And @@ -842,7 +829,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) head->misc = 0; lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len ); head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP; - lp->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += skb->len; dev_kfree_skb( skb ); lp->cur_tx++; while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) { @@ -913,13 +900,13 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id ) if (status & TMD1_ERR) { /* There was an major error, log it. */ int err_status = MEM->tx_head[entry].misc; - lp->stats.tx_errors++; - if (err_status & TMD3_RTRY) lp->stats.tx_aborted_errors++; - if (err_status & TMD3_LCAR) lp->stats.tx_carrier_errors++; - if (err_status & TMD3_LCOL) lp->stats.tx_window_errors++; + dev->stats.tx_errors++; + if (err_status & TMD3_RTRY) dev->stats.tx_aborted_errors++; + if (err_status & TMD3_LCAR) dev->stats.tx_carrier_errors++; + if (err_status & TMD3_LCOL) dev->stats.tx_window_errors++; if (err_status & TMD3_UFLO) { /* Ackk! On FIFO errors the Tx unit is turned off! */ - lp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; /* Remove this verbosity later! */ DPRINTK( 1, ( "%s: Tx FIFO error! Status %04x\n", dev->name, csr0 )); @@ -928,8 +915,8 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id ) } } else { if (status & (TMD1_MORE | TMD1_ONE | TMD1_DEF)) - lp->stats.collisions++; - lp->stats.tx_packets++; + dev->stats.collisions++; + dev->stats.tx_packets++; } /* XXX MSch: free skb?? */ @@ -956,8 +943,8 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id ) } /* Log misc errors. */ - if (csr0 & CSR0_BABL) lp->stats.tx_errors++; /* Tx babble. */ - if (csr0 & CSR0_MISS) lp->stats.rx_errors++; /* Missed a Rx frame. */ + if (csr0 & CSR0_BABL) dev->stats.tx_errors++; /* Tx babble. */ + if (csr0 & CSR0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */ if (csr0 & CSR0_MERR) { DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), " "status %04x.\n", dev->name, csr0 )); @@ -998,11 +985,11 @@ static int lance_rx( struct net_device *dev ) buffers it's possible for a jabber packet to use two buffers, with only the last correctly noting the error. */ if (status & RMD1_ENP) /* Only count a general error at the */ - lp->stats.rx_errors++; /* end of a packet.*/ - if (status & RMD1_FRAM) lp->stats.rx_frame_errors++; - if (status & RMD1_OFLO) lp->stats.rx_over_errors++; - if (status & RMD1_CRC) lp->stats.rx_crc_errors++; - if (status & RMD1_BUFF) lp->stats.rx_fifo_errors++; + dev->stats.rx_errors++; /* end of a packet.*/ + if (status & RMD1_FRAM) dev->stats.rx_frame_errors++; + if (status & RMD1_OFLO) dev->stats.rx_over_errors++; + if (status & RMD1_CRC) dev->stats.rx_crc_errors++; + if (status & RMD1_BUFF) dev->stats.rx_fifo_errors++; head->flag &= (RMD1_ENP|RMD1_STP); } else { /* Malloc up new buffer, compatible with net-3. */ @@ -1011,7 +998,7 @@ static int lance_rx( struct net_device *dev ) if (pkt_len < 60) { printk( "%s: Runt packet!\n", dev->name ); - lp->stats.rx_errors++; + dev->stats.rx_errors++; } else { skb = dev_alloc_skb( pkt_len+2 ); @@ -1024,7 +1011,7 @@ static int lance_rx( struct net_device *dev ) break; if (i > RX_RING_SIZE - 2) { - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; head->flag |= RMD1_OWN_CHIP; lp->cur_rx++; } @@ -1032,19 +1019,18 @@ static int lance_rx( struct net_device *dev ) } if (lance_debug >= 3) { - u_char *data = PKTBUF_ADDR(head), *p; - printk( "%s: RX pkt type 0x%04x from ", dev->name, - ((u_short *)data)[6]); - for( p = &data[6], i = 0; i < 6; i++ ) - printk("%02x%s", *p++, i != 5 ? ":" : "" ); - printk(" to "); - for( p = data, i = 0; i < 6; i++ ) - printk("%02x%s", *p++, i != 5 ? ":" : "" ); - printk(" data %02x %02x %02x %02x %02x %02x %02x %02x " + u_char *data = PKTBUF_ADDR(head); + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); + + printk(KERN_DEBUG "%s: RX pkt type 0x%04x from %s to %s ", + "data %02x %02x %02x %02x %02x %02x %02x %02x " "len %d\n", + dev->name, ((u_short *)data)[6], + print_mac(mac, &data[6]), print_mac(mac2, data), data[15], data[16], data[17], data[18], data[19], data[20], data[21], data[22], - pkt_len ); + pkt_len); } skb_reserve( skb, 2 ); /* 16 byte align */ @@ -1053,8 +1039,8 @@ static int lance_rx( struct net_device *dev ) skb->protocol = eth_type_trans( skb, dev ); netif_rx( skb ); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } } @@ -1091,14 +1077,6 @@ static int lance_close( struct net_device *dev ) } -static struct net_device_stats *lance_get_stats( struct net_device *dev ) - -{ struct lance_private *lp = (struct lance_private *)dev->priv; - - return &lp->stats; -} - - /* Set or clear the multicast filter for this adaptor. num_addrs == -1 Promiscuous mode, receive all packets num_addrs == 0 Normal mode, clear multicast list diff --git a/drivers/net/atl1/atl1_ethtool.c b/drivers/net/atl1/atl1_ethtool.c index 1f616c5c1473..68a83be843ab 100644 --- a/drivers/net/atl1/atl1_ethtool.c +++ b/drivers/net/atl1/atl1_ethtool.c @@ -88,9 +88,14 @@ static void atl1_get_ethtool_stats(struct net_device *netdev, } -static int atl1_get_stats_count(struct net_device *netdev) +static int atl1_get_sset_count(struct net_device *netdev, int sset) { - return ARRAY_SIZE(atl1_gstrings_stats); + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(atl1_gstrings_stats); + default: + return -EOPNOTSUPP; + } } static int atl1_get_settings(struct net_device *netdev, @@ -489,15 +494,12 @@ const struct ethtool_ops atl1_ethtool_ops = { .get_pauseparam = atl1_get_pauseparam, .set_pauseparam = atl1_set_pauseparam, .get_rx_csum = atl1_get_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_hw_csum, .get_link = ethtool_op_get_link, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, .get_strings = atl1_get_strings, .nway_reset = atl1_nway_reset, .get_ethtool_stats = atl1_get_ethtool_stats, - .get_stats_count = atl1_get_stats_count, - .get_tso = ethtool_op_get_tso, + .get_sset_count = atl1_get_sset_count, .set_tso = ethtool_op_set_tso, }; diff --git a/drivers/net/atl1/atl1_hw.c b/drivers/net/atl1/atl1_hw.c index ef886bdeac13..9d3bd22e3a82 100644 --- a/drivers/net/atl1/atl1_hw.c +++ b/drivers/net/atl1/atl1_hw.c @@ -603,7 +603,7 @@ static struct atl1_spi_flash_dev flash_table[] = { static void atl1_init_flash_opcode(struct atl1_hw *hw) { - if (hw->flash_vendor >= sizeof(flash_table) / sizeof(flash_table[0])) + if (hw->flash_vendor >= ARRAY_SIZE(flash_table)) hw->flash_vendor = 0; /* ATMEL */ /* Init OP table */ diff --git a/drivers/net/atl1/atl1_hw.h b/drivers/net/atl1/atl1_hw.h index 100c09c66e64..939aa0f53f6e 100644 --- a/drivers/net/atl1/atl1_hw.h +++ b/drivers/net/atl1/atl1_hw.h @@ -680,11 +680,6 @@ void atl1_check_options(struct atl1_adapter *adapter); #define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */ #define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */ -/* The size (in bytes) of a ethernet packet */ -#define ENET_HEADER_SIZE 14 -#define MAXIMUM_ETHERNET_FRAME_SIZE 1518 /* with FCS */ -#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* with FCS */ -#define ETHERNET_FCS_SIZE 4 #define MAX_JUMBO_FRAME_SIZE 0x2800 #define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */ @@ -929,8 +924,8 @@ enum atl1_dma_req_block { atl1_dma_req_128 = 0, atl1_dma_req_256 = 1, atl1_dma_req_512 = 2, - atl1_dam_req_1024 = 3, - atl1_dam_req_2048 = 4, + atl1_dma_req_1024 = 3, + atl1_dma_req_2048 = 4, atl1_dma_req_4096 = 5 }; diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c index fd1e156f1747..35b0a7dd4ef4 100644 --- a/drivers/net/atl1/atl1_main.c +++ b/drivers/net/atl1/atl1_main.c @@ -59,6 +59,7 @@ #include <linux/skbuff.h> #include <linux/etherdevice.h> #include <linux/if_vlan.h> +#include <linux/if_ether.h> #include <linux/irqreturn.h> #include <linux/workqueue.h> #include <linux/timer.h> @@ -75,7 +76,6 @@ #include <linux/compiler.h> #include <linux/delay.h> #include <linux/mii.h> -#include <linux/interrupt.h> #include <net/checksum.h> #include <asm/atomic.h> @@ -120,8 +120,8 @@ static int __devinit atl1_sw_init(struct atl1_adapter *adapter) struct atl1_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; - hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; - hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; + hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; adapter->wol = 0; adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7; @@ -314,7 +314,7 @@ err_nomem: return -ENOMEM; } -void atl1_init_ring_ptrs(struct atl1_adapter *adapter) +static void atl1_init_ring_ptrs(struct atl1_adapter *adapter) { struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; @@ -688,9 +688,9 @@ static int atl1_change_mtu(struct net_device *netdev, int new_mtu) { struct atl1_adapter *adapter = netdev_priv(netdev); int old_mtu = netdev->mtu; - int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; - if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || + if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); return -EINVAL; @@ -908,8 +908,8 @@ static u32 atl1_configure(struct atl1_adapter *adapter) /* config DMA Engine */ value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | - ((((u32) hw->dmaw_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) - << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN | + ((((u32) hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK) + << DMA_CTRL_DMAW_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN | DMA_CTRL_DMAW_EN; value |= (u32) hw->dma_ord; if (atl1_rcb_128 == hw->rcb_value) @@ -917,7 +917,10 @@ static u32 atl1_configure(struct atl1_adapter *adapter) iowrite32(value, hw->hw_addr + REG_DMA_CTRL); /* config CMB / SMB */ - value = hw->cmb_rrd | ((u32) hw->cmb_tpd << 16); + value = (hw->cmb_tpd > adapter->tpd_ring.count) ? + hw->cmb_tpd : adapter->tpd_ring.count; + value <<= 16; + value |= hw->cmb_rrd; iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH); value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16); iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER); @@ -1334,7 +1337,7 @@ rrd_ok: skb = buffer_info->skb; length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size); - skb_put(skb, length - ETHERNET_FCS_SIZE); + skb_put(skb, length - ETH_FCS_LEN); /* Receive Checksum Offload */ atl1_rx_checksum(adapter, rrd, skb); @@ -1364,7 +1367,6 @@ rrd_ok: if (count) { u32 tpd_next_to_use; u32 rfd_next_to_use; - u32 rrd_next_to_clean; spin_lock(&adapter->mb_lock); @@ -1422,7 +1424,7 @@ static void atl1_intr_tx(struct atl1_adapter *adapter) netif_wake_queue(adapter->netdev); } -static u16 tpd_avail(struct atl1_tpd_ring *tpd_ring) +static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring) { u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); u16 next_to_use = atomic_read(&tpd_ring->next_to_use); @@ -1453,7 +1455,7 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, IPPROTO_TCP, 0); ipofst = skb_network_offset(skb); - if (ipofst != ENET_HEADER_SIZE) /* 802.3 frame */ + if (ipofst != ETH_HLEN) /* 802.3 frame */ tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT; tso->tsopl |= (iph->ihl & @@ -1509,7 +1511,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, unsigned int f; u16 tpd_next_to_use; u16 proto_hdr_len; - u16 i, m, len12; + u16 len12; first_buf_len -= skb->data_len; nr_frags = skb_shinfo(skb)->nr_frags; @@ -1533,6 +1535,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, tpd_next_to_use = 0; if (first_buf_len > proto_hdr_len) { + int i, m; + len12 = first_buf_len - proto_hdr_len; m = (len12 + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; @@ -1700,15 +1704,13 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) } } - local_irq_save(flags); - if (!spin_trylock(&adapter->lock)) { + if (!spin_trylock_irqsave(&adapter->lock, flags)) { /* Can't get lock - tell upper layer to requeue */ - local_irq_restore(flags); dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx locked\n"); return NETDEV_TX_LOCKED; } - if (tpd_avail(&adapter->tpd_ring) < count) { + if (atl1_tpd_avail(&adapter->tpd_ring) < count) { /* not enough descriptors */ netif_stop_queue(netdev); spin_unlock_irqrestore(&adapter->lock, flags); @@ -2201,21 +2203,26 @@ static int __devinit atl1_probe(struct pci_dev *pdev, struct net_device *netdev; struct atl1_adapter *adapter; static int cards_found = 0; - bool pci_using_64 = true; int err; err = pci_enable_device(pdev); if (err) return err; - err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); + /* + * The atl1 chip can DMA to 64-bit addresses, but it uses a single + * shared register for the high 32 bits, so only a single, aligned, + * 4 GB physical address range can be used at a time. + * + * Supporting 64-bit DMA on this hardware is more trouble than it's + * worth. It is far easier to limit to 32-bit DMA than update + * various kernel subsystems to support the mechanics required by a + * fixed-high-32-bit system. + */ + err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); if (err) { - err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); - if (err) { - dev_err(&pdev->dev, "no usable DMA configuration\n"); - goto err_dma; - } - pci_using_64 = false; + dev_err(&pdev->dev, "no usable DMA configuration\n"); + goto err_dma; } /* Mark all PCI regions associated with PCI device * pdev as being reserved by owner atl1_driver_name @@ -2234,7 +2241,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev, err = -ENOMEM; goto err_alloc_etherdev; } - SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); @@ -2280,7 +2286,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev, netdev->ethtool_ops = &atl1_ethtool_ops; adapter->bd_number = cards_found; - adapter->pci_using_64 = pci_using_64; /* setup the private structure */ err = atl1_sw_init(adapter); @@ -2297,9 +2302,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev, */ /* netdev->features |= NETIF_F_TSO; */ - if (pci_using_64) - netdev->features |= NETIF_F_HIGHDMA; - netdev->features |= NETIF_F_LLTX; /* diff --git a/drivers/net/atp.c b/drivers/net/atp.c index 82d78ff8399b..62f09e59d9c4 100644 --- a/drivers/net/atp.c +++ b/drivers/net/atp.c @@ -171,7 +171,6 @@ static char mux_8012[] = { 0xff, 0xf7, 0xff, 0xfb, 0xf3, 0xfb, 0xff, 0xf7,}; struct net_local { spinlock_t lock; struct net_device *next_module; - struct net_device_stats stats; struct timer_list timer; /* Media selection timer. */ long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */ int saved_tx_size; @@ -205,7 +204,6 @@ static irqreturn_t atp_interrupt(int irq, void *dev_id); static void net_rx(struct net_device *dev); static void read_block(long ioaddr, int length, unsigned char *buffer, int data_mode); static int net_close(struct net_device *dev); -static struct net_device_stats *net_get_stats(struct net_device *dev); static void set_rx_mode_8002(struct net_device *dev); static void set_rx_mode_8012(struct net_device *dev); static void tx_timeout(struct net_device *dev); @@ -250,6 +248,7 @@ static int __init atp_probe1(long ioaddr) struct net_local *lp; int saved_ctrl_reg, status, i; int res; + DECLARE_MAC_BUF(mac); outb(0xff, ioaddr + PAR_DATA); /* Save the original value of the Control register, in case we guessed @@ -299,7 +298,6 @@ static int __init atp_probe1(long ioaddr) dev = alloc_etherdev(sizeof(struct net_local)); if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); /* Find the IRQ used by triggering an interrupt. */ write_reg_byte(ioaddr, CMR2, 0x01); /* No accept mode, IRQ out. */ @@ -325,10 +323,9 @@ static int __init atp_probe1(long ioaddr) printk(KERN_INFO "%s", version); #endif - printk(KERN_NOTICE "%s: Pocket adapter found at %#3lx, IRQ %d, SAPROM " - "%02X:%02X:%02X:%02X:%02X:%02X.\n", dev->name, dev->base_addr, - dev->irq, dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + printk(KERN_NOTICE "%s: Pocket adapter found at %#3lx, IRQ %d, " + "SAPROM %s.\n", + dev->name, dev->base_addr, dev->irq, print_mac(mac, dev->dev_addr)); /* Reset the ethernet hardware and activate the printer pass-through. */ write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX); @@ -349,7 +346,6 @@ static int __init atp_probe1(long ioaddr) dev->open = net_open; dev->stop = net_close; dev->hard_start_xmit = atp_send_packet; - dev->get_stats = net_get_stats; dev->set_multicast_list = lp->chip_type == RTL8002 ? &set_rx_mode_8002 : &set_rx_mode_8012; dev->tx_timeout = tx_timeout; @@ -539,18 +535,17 @@ static void write_packet(long ioaddr, int length, unsigned char *packet, int pad static void tx_timeout(struct net_device *dev) { - struct net_local *np = netdev_priv(dev); long ioaddr = dev->base_addr; printk(KERN_WARNING "%s: Transmit timed out, %s?\n", dev->name, inb(ioaddr + PAR_CONTROL) & 0x10 ? "network cable problem" : "IRQ conflict"); - np->stats.tx_errors++; + dev->stats.tx_errors++; /* Try to restart the adapter. */ hardware_init(dev); dev->trans_start = jiffies; netif_wake_queue(dev); - np->stats.tx_errors++; + dev->stats.tx_errors++; } static int atp_send_packet(struct sk_buff *skb, struct net_device *dev) @@ -630,7 +625,7 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance) /* We acknowledged the normal Rx interrupt, so if the interrupt is still outstanding we must have a Rx error. */ if (read_status & (CMR1_IRQ << 3)) { /* Overrun. */ - lp->stats.rx_over_errors++; + dev->stats.rx_over_errors++; /* Set to no-accept mode long enough to remove a packet. */ write_reg_high(ioaddr, CMR2, CMR2h_OFF); net_rx(dev); @@ -650,9 +645,9 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance) and reinitialize the adapter. */ write_reg(ioaddr, ISR, ISR_TxErr + ISR_TxOK); if (status & (ISR_TxErr<<3)) { - lp->stats.collisions++; + dev->stats.collisions++; if (++lp->re_tx > 15) { - lp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; hardware_init(dev); break; } @@ -661,7 +656,7 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance) write_reg(ioaddr, CMR1, CMR1_ReXmit + CMR1_Xmit); } else { /* Finish up the transmit. */ - lp->stats.tx_packets++; + dev->stats.tx_packets++; lp->pac_cnt_in_tx_buf--; if ( lp->saved_tx_size) { trigger_send(ioaddr, lp->saved_tx_size); @@ -679,7 +674,7 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance) "%ld jiffies status %02x CMR1 %02x.\n", dev->name, num_tx_since_rx, jiffies - dev->last_rx, status, (read_nibble(ioaddr, CMR1) >> 3) & 15); - lp->stats.rx_missed_errors++; + dev->stats.rx_missed_errors++; hardware_init(dev); num_tx_since_rx = 0; break; @@ -736,13 +731,13 @@ static void atp_timed_checker(unsigned long data) struct net_local *lp = netdev_priv(atp_timed_dev); write_reg_byte(ioaddr, PAR0 + i, atp_timed_dev->dev_addr[i]); if (i == 2) - lp->stats.tx_errors++; + dev->stats.tx_errors++; else if (i == 3) - lp->stats.tx_dropped++; + dev->stats.tx_dropped++; else if (i == 4) - lp->stats.collisions++; + dev->stats.collisions++; else - lp->stats.rx_errors++; + dev->stats.rx_errors++; } #endif } @@ -766,14 +761,14 @@ static void net_rx(struct net_device *dev) printk(KERN_DEBUG " rx_count %04x %04x %04x %04x..", rx_head.pad, rx_head.rx_count, rx_head.rx_status, rx_head.cur_addr); if ((rx_head.rx_status & 0x77) != 0x01) { - lp->stats.rx_errors++; - if (rx_head.rx_status & 0x0004) lp->stats.rx_frame_errors++; - else if (rx_head.rx_status & 0x0002) lp->stats.rx_crc_errors++; + dev->stats.rx_errors++; + if (rx_head.rx_status & 0x0004) dev->stats.rx_frame_errors++; + else if (rx_head.rx_status & 0x0002) dev->stats.rx_crc_errors++; if (net_debug > 3) printk(KERN_DEBUG "%s: Unknown ATP Rx error %04x.\n", dev->name, rx_head.rx_status); if (rx_head.rx_status & 0x0020) { - lp->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; write_reg_high(ioaddr, CMR1, CMR1h_TxENABLE); write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE); } else if (rx_head.rx_status & 0x0050) @@ -788,7 +783,7 @@ static void net_rx(struct net_device *dev) if (skb == NULL) { printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; goto done; } @@ -797,8 +792,8 @@ static void net_rx(struct net_device *dev) skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } done: write_reg(ioaddr, CMR1, CMR1_NextPkt); @@ -850,15 +845,6 @@ net_close(struct net_device *dev) return 0; } -/* Get the current statistics. This may be called with the card open or - closed. */ -static struct net_device_stats * -net_get_stats(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - return &lp->stats; -} - /* * Set or clear the multicast filter for this adapter. */ diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index e86b3691765b..b46c5d8a77bd 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c @@ -90,7 +90,6 @@ static int au1000_rx(struct net_device *); static irqreturn_t au1000_interrupt(int, void *); static void au1000_tx_timeout(struct net_device *); static void set_rx_mode(struct net_device *); -static struct net_device_stats *au1000_get_stats(struct net_device *); static int au1000_ioctl(struct net_device *, struct ifreq *, int); static int mdio_read(struct net_device *, int, int); static void mdio_write(struct net_device *, int, int, u16); @@ -772,7 +771,6 @@ static struct net_device * au1000_probe(int port_num) dev->open = au1000_open; dev->hard_start_xmit = au1000_tx; dev->stop = au1000_close; - dev->get_stats = au1000_get_stats; dev->set_multicast_list = &set_rx_mode; dev->do_ioctl = &au1000_ioctl; SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops); @@ -1038,7 +1036,7 @@ static void __exit au1000_cleanup_module(void) static void update_tx_stats(struct net_device *dev, u32 status) { struct au1000_private *aup = (struct au1000_private *) dev->priv; - struct net_device_stats *ps = &aup->stats; + struct net_device_stats *ps = &dev->stats; if (status & TX_FRAME_ABORTED) { if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) { @@ -1094,7 +1092,7 @@ static void au1000_tx_ack(struct net_device *dev) static int au1000_tx(struct sk_buff *skb, struct net_device *dev) { struct au1000_private *aup = (struct au1000_private *) dev->priv; - struct net_device_stats *ps = &aup->stats; + struct net_device_stats *ps = &dev->stats; volatile tx_dma_t *ptxd; u32 buff_stat; db_dest_t *pDB; @@ -1148,7 +1146,7 @@ static int au1000_tx(struct sk_buff *skb, struct net_device *dev) static inline void update_rx_stats(struct net_device *dev, u32 status) { struct au1000_private *aup = (struct au1000_private *) dev->priv; - struct net_device_stats *ps = &aup->stats; + struct net_device_stats *ps = &dev->stats; ps->rx_packets++; if (status & RX_MCAST_FRAME) @@ -1201,7 +1199,7 @@ static int au1000_rx(struct net_device *dev) printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name); - aup->stats.rx_dropped++; + dev->stats.rx_dropped++; continue; } skb_reserve(skb, 2); /* 16 byte IP header align */ @@ -1324,18 +1322,5 @@ static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd); } -static struct net_device_stats *au1000_get_stats(struct net_device *dev) -{ - struct au1000_private *aup = (struct au1000_private *) dev->priv; - - if (au1000_debug > 4) - printk("%s: au1000_get_stats: dev=%p\n", dev->name, dev); - - if (netif_device_present(dev)) { - return &aup->stats; - } - return 0; -} - module_init(au1000_init_module); module_exit(au1000_cleanup_module); diff --git a/drivers/net/au1000_eth.h b/drivers/net/au1000_eth.h index 52fe00dd6d24..f3baeaa12854 100644 --- a/drivers/net/au1000_eth.h +++ b/drivers/net/au1000_eth.h @@ -115,6 +115,5 @@ struct au1000_private { u32 vaddr; /* virtual address of rx/tx buffers */ dma_addr_t dma_addr; /* dma address of rx/tx buffers */ - struct net_device_stats stats; spinlock_t lock; /* Serialise access to device */ }; diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c index 1d882360b34d..9fe0517cf893 100644 --- a/drivers/net/ax88796.c +++ b/drivers/net/ax88796.c @@ -24,6 +24,7 @@ #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> +#include <linux/eeprom_93cx6.h> #include <net/ax88796.h> @@ -580,9 +581,39 @@ static const struct ethtool_ops ax_ethtool_ops = { .set_settings = ax_set_settings, .nway_reset = ax_nway_reset, .get_link = ax_get_link, - .get_perm_addr = ethtool_op_get_perm_addr, }; +#ifdef CONFIG_AX88796_93CX6 +static void ax_eeprom_register_read(struct eeprom_93cx6 *eeprom) +{ + struct ei_device *ei_local = eeprom->data; + u8 reg = ei_inb(ei_local->mem + AX_MEMR); + + eeprom->reg_data_in = reg & AX_MEMR_EEI; + eeprom->reg_data_out = reg & AX_MEMR_EEO; /* Input pin */ + eeprom->reg_data_clock = reg & AX_MEMR_EECLK; + eeprom->reg_chip_select = reg & AX_MEMR_EECS; +} + +static void ax_eeprom_register_write(struct eeprom_93cx6 *eeprom) +{ + struct ei_device *ei_local = eeprom->data; + u8 reg = ei_inb(ei_local->mem + AX_MEMR); + + reg &= ~(AX_MEMR_EEI | AX_MEMR_EECLK | AX_MEMR_EECS); + + if (eeprom->reg_data_in) + reg |= AX_MEMR_EEI; + if (eeprom->reg_data_clock) + reg |= AX_MEMR_EECLK; + if (eeprom->reg_chip_select) + reg |= AX_MEMR_EECS; + + ei_outb(reg, ei_local->mem + AX_MEMR); + udelay(10); +} +#endif + /* setup code */ static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local) @@ -641,6 +672,23 @@ static int ax_init_dev(struct net_device *dev, int first_init) memcpy(dev->dev_addr, SA_prom, 6); } +#ifdef CONFIG_AX88796_93CX6 + if (first_init && ax->plat->flags & AXFLG_HAS_93CX6) { + unsigned char mac_addr[6]; + struct eeprom_93cx6 eeprom; + + eeprom.data = ei_local; + eeprom.register_read = ax_eeprom_register_read; + eeprom.register_write = ax_eeprom_register_write; + eeprom.width = PCI_EEPROM_WIDTH_93C56; + + eeprom_93cx6_multiread(&eeprom, 0, + (__le16 __force *)mac_addr, + sizeof(mac_addr) >> 1); + + memcpy(dev->dev_addr, mac_addr, 6); + } +#endif if (ax->plat->wordlength == 2) { /* We must set the 8390 for word mode. */ ei_outb(ax->plat->dcr_val, ei_local->mem + EN0_DCFG); @@ -819,11 +867,12 @@ static int ax_probe(struct platform_device *pdev) } ei_status.mem = ioremap(res->start, size); - dev->base_addr = (long)ei_status.mem; + dev->base_addr = (unsigned long)ei_status.mem; if (ei_status.mem == NULL) { - dev_err(&pdev->dev, "Cannot ioremap area (%08zx,%08zx)\n", - res->start, res->end); + dev_err(&pdev->dev, "Cannot ioremap area (%08llx,%08llx)\n", + (unsigned long long)res->start, + (unsigned long long)res->end); ret = -ENXIO; goto exit_req; diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 96fb0ec905a7..3d247f3f4a3c 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c @@ -1,8 +1,11 @@ -/* b44.c: Broadcom 4400 device driver. +/* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver. * * Copyright (C) 2002 David S. Miller (davem@redhat.com) - * Fixed by Pekka Pietikainen (pp@ee.oulu.fi) + * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi) + * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org) + * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org) * Copyright (C) 2006 Broadcom Corporation. + * Copyright (C) 2007 Michael Buesch <mb@bu3sch.de> * * Distribute under GPL. */ @@ -21,17 +24,18 @@ #include <linux/delay.h> #include <linux/init.h> #include <linux/dma-mapping.h> +#include <linux/ssb/ssb.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/irq.h> + #include "b44.h" #define DRV_MODULE_NAME "b44" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "1.01" -#define DRV_MODULE_RELDATE "Jun 16, 2006" +#define DRV_MODULE_VERSION "2.0" #define B44_DEF_MSG_ENABLE \ (NETIF_MSG_DRV | \ @@ -85,10 +89,10 @@ #define B44_ETHIPV4UDP_HLEN 42 static char version[] __devinitdata = - DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION "\n"; -MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller"); -MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver"); +MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller"); +MODULE_DESCRIPTION("Broadcom 44xx/47xx 10/100 PCI ethernet driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); @@ -96,18 +100,28 @@ static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */ module_param(b44_debug, int, 0); MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value"); -static struct pci_device_id b44_pci_tbl[] = { - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { } /* terminate list with empty entry */ -}; +#ifdef CONFIG_B44_PCI +static const struct pci_device_id b44_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) }, + { 0 } /* terminate list with empty entry */ +}; MODULE_DEVICE_TABLE(pci, b44_pci_tbl); +static struct pci_driver b44_pci_driver = { + .name = DRV_MODULE_NAME, + .id_table = b44_pci_tbl, +}; +#endif /* CONFIG_B44_PCI */ + +static const struct ssb_device_id b44_ssb_tbl[] = { + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV), + SSB_DEVTABLE_END +}; +MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl); + static void b44_halt(struct b44 *); static void b44_init_rings(struct b44 *); @@ -119,6 +133,7 @@ static void b44_init_hw(struct b44 *, int); static int dma_desc_align_mask; static int dma_desc_sync_size; +static int instance; static const char b44_gstrings[][ETH_GSTRING_LEN] = { #define _B44(x...) # x, @@ -126,35 +141,35 @@ B44_STAT_REG_DECLARE #undef _B44 }; -static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev, - dma_addr_t dma_base, - unsigned long offset, - enum dma_data_direction dir) +static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev, + dma_addr_t dma_base, + unsigned long offset, + enum dma_data_direction dir) { - dma_sync_single_range_for_device(&pdev->dev, dma_base, - offset & dma_desc_align_mask, - dma_desc_sync_size, dir); + dma_sync_single_range_for_device(sdev->dev, dma_base, + offset & dma_desc_align_mask, + dma_desc_sync_size, dir); } -static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev, - dma_addr_t dma_base, - unsigned long offset, - enum dma_data_direction dir) +static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev, + dma_addr_t dma_base, + unsigned long offset, + enum dma_data_direction dir) { - dma_sync_single_range_for_cpu(&pdev->dev, dma_base, - offset & dma_desc_align_mask, - dma_desc_sync_size, dir); + dma_sync_single_range_for_cpu(sdev->dev, dma_base, + offset & dma_desc_align_mask, + dma_desc_sync_size, dir); } static inline unsigned long br32(const struct b44 *bp, unsigned long reg) { - return readl(bp->regs + reg); + return ssb_read32(bp->sdev, reg); } static inline void bw32(const struct b44 *bp, unsigned long reg, unsigned long val) { - writel(val, bp->regs + reg); + ssb_write32(bp->sdev, reg, val); } static int b44_wait_bit(struct b44 *bp, unsigned long reg, @@ -182,117 +197,29 @@ static int b44_wait_bit(struct b44 *bp, unsigned long reg, return 0; } -/* Sonics SiliconBackplane support routines. ROFL, you should see all the - * buzz words used on this company's website :-) - * - * All of these routines must be invoked with bp->lock held and - * interrupts disabled. - */ - -#define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */ -#define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */ - -static u32 ssb_get_core_rev(struct b44 *bp) -{ - return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK); -} - -static u32 ssb_pci_setup(struct b44 *bp, u32 cores) -{ - u32 bar_orig, pci_rev, val; - - pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig); - pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR); - pci_rev = ssb_get_core_rev(bp); - - val = br32(bp, B44_SBINTVEC); - val |= cores; - bw32(bp, B44_SBINTVEC, val); - - val = br32(bp, SSB_PCI_TRANS_2); - val |= SSB_PCI_PREF | SSB_PCI_BURST; - bw32(bp, SSB_PCI_TRANS_2, val); - - pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig); - - return pci_rev; -} - -static void ssb_core_disable(struct b44 *bp) -{ - if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET) - return; - - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK)); - b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0); - b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1); - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK | - SBTMSLOW_REJECT | SBTMSLOW_RESET)); - br32(bp, B44_SBTMSLOW); - udelay(1); - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET)); - br32(bp, B44_SBTMSLOW); - udelay(1); -} - -static void ssb_core_reset(struct b44 *bp) +static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index) { u32 val; - ssb_core_disable(bp); - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC)); - br32(bp, B44_SBTMSLOW); - udelay(1); - - /* Clear SERR if set, this is a hw bug workaround. */ - if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR) - bw32(bp, B44_SBTMSHIGH, 0); - - val = br32(bp, B44_SBIMSTATE); - if (val & (SBIMSTATE_IBE | SBIMSTATE_TO)) - bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO)); - - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC)); - br32(bp, B44_SBTMSLOW); - udelay(1); - - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK)); - br32(bp, B44_SBTMSLOW); - udelay(1); -} + bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ | + (index << CAM_CTRL_INDEX_SHIFT))); -static int ssb_core_unit(struct b44 *bp) -{ -#if 0 - u32 val = br32(bp, B44_SBADMATCH0); - u32 base; + b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1); - type = val & SBADMATCH0_TYPE_MASK; - switch (type) { - case 0: - base = val & SBADMATCH0_BS0_MASK; - break; + val = br32(bp, B44_CAM_DATA_LO); - case 1: - base = val & SBADMATCH0_BS1_MASK; - break; + data[2] = (val >> 24) & 0xFF; + data[3] = (val >> 16) & 0xFF; + data[4] = (val >> 8) & 0xFF; + data[5] = (val >> 0) & 0xFF; - case 2: - default: - base = val & SBADMATCH0_BS2_MASK; - break; - }; -#endif - return 0; -} + val = br32(bp, B44_CAM_DATA_HI); -static int ssb_is_core_up(struct b44 *bp) -{ - return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK)) - == SBTMSLOW_CLOCK); + data[0] = (val >> 8) & 0xFF; + data[1] = (val >> 0) & 0xFF; } -static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index) +static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index) { u32 val; @@ -328,14 +255,14 @@ static void b44_enable_ints(struct b44 *bp) bw32(bp, B44_IMASK, bp->imask); } -static int b44_readphy(struct b44 *bp, int reg, u32 *val) +static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val) { int err; bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII); bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) | - (bp->phy_addr << MDIO_DATA_PMD_SHIFT) | + (phy_addr << MDIO_DATA_PMD_SHIFT) | (reg << MDIO_DATA_RA_SHIFT) | (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT))); err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0); @@ -344,29 +271,40 @@ static int b44_readphy(struct b44 *bp, int reg, u32 *val) return err; } -static int b44_writephy(struct b44 *bp, int reg, u32 val) +static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val) { bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII); bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) | - (bp->phy_addr << MDIO_DATA_PMD_SHIFT) | + (phy_addr << MDIO_DATA_PMD_SHIFT) | (reg << MDIO_DATA_RA_SHIFT) | (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) | (val & MDIO_DATA_DATA))); return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0); } +static inline int b44_readphy(struct b44 *bp, int reg, u32 *val) +{ + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) + return 0; + + return __b44_readphy(bp, bp->phy_addr, reg, val); +} + +static inline int b44_writephy(struct b44 *bp, int reg, u32 val) +{ + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) + return 0; + + return __b44_writephy(bp, bp->phy_addr, reg, val); +} + /* miilib interface */ -/* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional - * due to code existing before miilib use was added to this driver. - * Someone should remove this artificial driver limitation in - * b44_{read,write}phy. bp->phy_addr itself is fine (and needed). - */ static int b44_mii_read(struct net_device *dev, int phy_id, int location) { u32 val; struct b44 *bp = netdev_priv(dev); - int rc = b44_readphy(bp, location, &val); + int rc = __b44_readphy(bp, phy_id, location, &val); if (rc) return 0xffffffff; return val; @@ -376,7 +314,7 @@ static void b44_mii_write(struct net_device *dev, int phy_id, int location, int val) { struct b44 *bp = netdev_priv(dev); - b44_writephy(bp, location, val); + __b44_writephy(bp, phy_id, location, val); } static int b44_phy_reset(struct b44 *bp) @@ -384,6 +322,8 @@ static int b44_phy_reset(struct b44 *bp) u32 val; int err; + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) + return 0; err = b44_writephy(bp, MII_BMCR, BMCR_RESET); if (err) return err; @@ -442,11 +382,52 @@ static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote) __b44_set_flow_ctrl(bp, pause_enab); } +#ifdef SSB_DRIVER_MIPS +extern char *nvram_get(char *name); +static void b44_wap54g10_workaround(struct b44 *bp) +{ + const char *str; + u32 val; + int err; + + /* + * workaround for bad hardware design in Linksys WAP54G v1.0 + * see https://dev.openwrt.org/ticket/146 + * check and reset bit "isolate" + */ + str = nvram_get("boardnum"); + if (!str) + return; + if (simple_strtoul(str, NULL, 0) == 2) { + err = __b44_readphy(bp, 0, MII_BMCR, &val); + if (err) + goto error; + if (!(val & BMCR_ISOLATE)) + return; + val &= ~BMCR_ISOLATE; + err = __b44_writephy(bp, 0, MII_BMCR, val); + if (err) + goto error; + } + return; +error: + printk(KERN_WARNING PFX "PHY: cannot reset MII transceiver isolate bit.\n"); +} +#else +static inline void b44_wap54g10_workaround(struct b44 *bp) +{ +} +#endif + static int b44_setup_phy(struct b44 *bp) { u32 val; int err; + b44_wap54g10_workaround(bp); + + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) + return 0; if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0) goto out; if ((err = b44_writephy(bp, B44_MII_ALEDCTRL, @@ -542,6 +523,19 @@ static void b44_check_phy(struct b44 *bp) { u32 bmsr, aux; + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) { + bp->flags |= B44_FLAG_100_BASE_T; + bp->flags |= B44_FLAG_FULL_DUPLEX; + if (!netif_carrier_ok(bp->dev)) { + u32 val = br32(bp, B44_TX_CTRL); + val |= TX_CTRL_DUPLEX; + bw32(bp, B44_TX_CTRL, val); + netif_carrier_on(bp->dev); + b44_link_report(bp); + } + return; + } + if (!b44_readphy(bp, MII_BMSR, &bmsr) && !b44_readphy(bp, B44_MII_AUXCTRL, &aux) && (bmsr != 0xffff)) { @@ -617,10 +611,10 @@ static void b44_tx(struct b44 *bp) BUG_ON(skb == NULL); - pci_unmap_single(bp->pdev, - pci_unmap_addr(rp, mapping), + dma_unmap_single(bp->sdev->dev, + rp->mapping, skb->len, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); rp->skb = NULL; dev_kfree_skb_irq(skb); } @@ -657,9 +651,9 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) if (skb == NULL) return -ENOMEM; - mapping = pci_map_single(bp->pdev, skb->data, + mapping = dma_map_single(bp->sdev->dev, skb->data, RX_PKT_BUF_SZ, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); /* Hardware bug work-around, the chip is unable to do PCI DMA to/from anything above 1GB :-( */ @@ -667,18 +661,19 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { /* Sigh... */ if (!dma_mapping_error(mapping)) - pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); + dma_unmap_single(bp->sdev->dev, mapping, + RX_PKT_BUF_SZ, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); if (skb == NULL) return -ENOMEM; - mapping = pci_map_single(bp->pdev, skb->data, + mapping = dma_map_single(bp->sdev->dev, skb->data, RX_PKT_BUF_SZ, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); if (dma_mapping_error(mapping) || mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { if (!dma_mapping_error(mapping)) - pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); + dma_unmap_single(bp->sdev->dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); dev_kfree_skb_any(skb); return -ENOMEM; } @@ -691,7 +686,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) rh->flags = 0; map->skb = skb; - pci_unmap_addr_set(map, mapping, mapping); + map->mapping = mapping; if (src_map != NULL) src_map->skb = NULL; @@ -705,9 +700,9 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) dp->addr = cpu_to_le32((u32) mapping + RX_PKT_OFFSET + bp->dma_offset); if (bp->flags & B44_FLAG_RX_RING_HACK) - b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma, - dest_idx * sizeof(dp), - DMA_BIDIRECTIONAL); + b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, + dest_idx * sizeof(dp), + DMA_BIDIRECTIONAL); return RX_PKT_BUF_SZ; } @@ -730,13 +725,12 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) rh = (struct rx_header *) src_map->skb->data; rh->len = 0; rh->flags = 0; - pci_unmap_addr_set(dest_map, mapping, - pci_unmap_addr(src_map, mapping)); + dest_map->mapping = src_map->mapping; if (bp->flags & B44_FLAG_RX_RING_HACK) - b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma, - src_idx * sizeof(src_desc), - DMA_BIDIRECTIONAL); + b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma, + src_idx * sizeof(src_desc), + DMA_BIDIRECTIONAL); ctrl = src_desc->ctrl; if (dest_idx == (B44_RX_RING_SIZE - 1)) @@ -750,13 +744,13 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) src_map->skb = NULL; if (bp->flags & B44_FLAG_RX_RING_HACK) - b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma, - dest_idx * sizeof(dest_desc), - DMA_BIDIRECTIONAL); + b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, + dest_idx * sizeof(dest_desc), + DMA_BIDIRECTIONAL); - pci_dma_sync_single_for_device(bp->pdev, le32_to_cpu(src_desc->addr), - RX_PKT_BUF_SZ, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(bp->sdev->dev, le32_to_cpu(src_desc->addr), + RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); } static int b44_rx(struct b44 *bp, int budget) @@ -772,13 +766,13 @@ static int b44_rx(struct b44 *bp, int budget) while (cons != prod && budget > 0) { struct ring_info *rp = &bp->rx_buffers[cons]; struct sk_buff *skb = rp->skb; - dma_addr_t map = pci_unmap_addr(rp, mapping); + dma_addr_t map = rp->mapping; struct rx_header *rh; u16 len; - pci_dma_sync_single_for_cpu(bp->pdev, map, + dma_sync_single_for_cpu(bp->sdev->dev, map, RX_PKT_BUF_SZ, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); rh = (struct rx_header *) skb->data; len = le16_to_cpu(rh->len); if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) || @@ -810,8 +804,8 @@ static int b44_rx(struct b44 *bp, int budget) skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); if (skb_size < 0) goto drop_it; - pci_unmap_single(bp->pdev, map, - skb_size, PCI_DMA_FROMDEVICE); + dma_unmap_single(bp->sdev->dev, map, + skb_size, DMA_FROM_DEVICE); /* Leave out rx_header */ skb_put(skb, len + RX_PKT_OFFSET); skb_pull(skb, RX_PKT_OFFSET); @@ -848,10 +842,11 @@ static int b44_rx(struct b44 *bp, int budget) return received; } -static int b44_poll(struct net_device *netdev, int *budget) +static int b44_poll(struct napi_struct *napi, int budget) { - struct b44 *bp = netdev_priv(netdev); - int done; + struct b44 *bp = container_of(napi, struct b44, napi); + struct net_device *netdev = bp->dev; + int work_done; spin_lock_irq(&bp->lock); @@ -862,22 +857,9 @@ static int b44_poll(struct net_device *netdev, int *budget) } spin_unlock_irq(&bp->lock); - done = 1; - if (bp->istat & ISTAT_RX) { - int orig_budget = *budget; - int work_done; - - if (orig_budget > netdev->quota) - orig_budget = netdev->quota; - - work_done = b44_rx(bp, orig_budget); - - *budget -= work_done; - netdev->quota -= work_done; - - if (work_done >= orig_budget) - done = 0; - } + work_done = 0; + if (bp->istat & ISTAT_RX) + work_done += b44_rx(bp, budget); if (bp->istat & ISTAT_ERRORS) { unsigned long flags; @@ -888,15 +870,15 @@ static int b44_poll(struct net_device *netdev, int *budget) b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY); netif_wake_queue(bp->dev); spin_unlock_irqrestore(&bp->lock, flags); - done = 1; + work_done = 0; } - if (done) { - netif_rx_complete(netdev); + if (work_done < budget) { + netif_rx_complete(netdev, napi); b44_enable_ints(bp); } - return (done ? 0 : 1); + return work_done; } static irqreturn_t b44_interrupt(int irq, void *dev_id) @@ -924,13 +906,13 @@ static irqreturn_t b44_interrupt(int irq, void *dev_id) goto irq_ack; } - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &bp->napi)) { /* NOTE: These writes are posted by the readback of * the ISTAT register below. */ bp->istat = istat; __b44_disable_ints(bp); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &bp->napi); } else { printk(KERN_ERR PFX "%s: Error, poll already scheduled\n", dev->name); @@ -982,24 +964,25 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) goto err_out; } - mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); + mapping = dma_map_single(bp->sdev->dev, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { struct sk_buff *bounce_skb; /* Chip can't handle DMA to/from >1GB, use bounce buffer */ if (!dma_mapping_error(mapping)) - pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE); + dma_unmap_single(bp->sdev->dev, mapping, len, + DMA_TO_DEVICE); bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA); if (!bounce_skb) goto err_out; - mapping = pci_map_single(bp->pdev, bounce_skb->data, - len, PCI_DMA_TODEVICE); + mapping = dma_map_single(bp->sdev->dev, bounce_skb->data, + len, DMA_TO_DEVICE); if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { if (!dma_mapping_error(mapping)) - pci_unmap_single(bp->pdev, mapping, - len, PCI_DMA_TODEVICE); + dma_unmap_single(bp->sdev->dev, mapping, + len, DMA_TO_DEVICE); dev_kfree_skb_any(bounce_skb); goto err_out; } @@ -1011,7 +994,7 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) entry = bp->tx_prod; bp->tx_buffers[entry].skb = skb; - pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping); + bp->tx_buffers[entry].mapping = mapping; ctrl = (len & DESC_CTRL_LEN); ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF; @@ -1022,9 +1005,9 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset); if (bp->flags & B44_FLAG_TX_RING_HACK) - b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma, - entry * sizeof(bp->tx_ring[0]), - DMA_TO_DEVICE); + b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma, + entry * sizeof(bp->tx_ring[0]), + DMA_TO_DEVICE); entry = NEXT_TX(entry); @@ -1097,10 +1080,8 @@ static void b44_free_rings(struct b44 *bp) if (rp->skb == NULL) continue; - pci_unmap_single(bp->pdev, - pci_unmap_addr(rp, mapping), - RX_PKT_BUF_SZ, - PCI_DMA_FROMDEVICE); + dma_unmap_single(bp->sdev->dev, rp->mapping, RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); dev_kfree_skb_any(rp->skb); rp->skb = NULL; } @@ -1111,10 +1092,8 @@ static void b44_free_rings(struct b44 *bp) if (rp->skb == NULL) continue; - pci_unmap_single(bp->pdev, - pci_unmap_addr(rp, mapping), - rp->skb->len, - PCI_DMA_TODEVICE); + dma_unmap_single(bp->sdev->dev, rp->mapping, rp->skb->len, + DMA_TO_DEVICE); dev_kfree_skb_any(rp->skb); rp->skb = NULL; } @@ -1136,14 +1115,14 @@ static void b44_init_rings(struct b44 *bp) memset(bp->tx_ring, 0, B44_TX_RING_BYTES); if (bp->flags & B44_FLAG_RX_RING_HACK) - dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma, - DMA_TABLE_BYTES, - PCI_DMA_BIDIRECTIONAL); + dma_sync_single_for_device(bp->sdev->dev, bp->rx_ring_dma, + DMA_TABLE_BYTES, + DMA_BIDIRECTIONAL); if (bp->flags & B44_FLAG_TX_RING_HACK) - dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma, - DMA_TABLE_BYTES, - PCI_DMA_TODEVICE); + dma_sync_single_for_device(bp->sdev->dev, bp->tx_ring_dma, + DMA_TABLE_BYTES, + DMA_TO_DEVICE); for (i = 0; i < bp->rx_pending; i++) { if (b44_alloc_rx_skb(bp, -1, i) < 0) @@ -1163,24 +1142,24 @@ static void b44_free_consistent(struct b44 *bp) bp->tx_buffers = NULL; if (bp->rx_ring) { if (bp->flags & B44_FLAG_RX_RING_HACK) { - dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma, - DMA_TABLE_BYTES, - DMA_BIDIRECTIONAL); + dma_unmap_single(bp->sdev->dev, bp->rx_ring_dma, + DMA_TABLE_BYTES, + DMA_BIDIRECTIONAL); kfree(bp->rx_ring); } else - pci_free_consistent(bp->pdev, DMA_TABLE_BYTES, + dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES, bp->rx_ring, bp->rx_ring_dma); bp->rx_ring = NULL; bp->flags &= ~B44_FLAG_RX_RING_HACK; } if (bp->tx_ring) { if (bp->flags & B44_FLAG_TX_RING_HACK) { - dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma, - DMA_TABLE_BYTES, - DMA_TO_DEVICE); + dma_unmap_single(bp->sdev->dev, bp->tx_ring_dma, + DMA_TABLE_BYTES, + DMA_TO_DEVICE); kfree(bp->tx_ring); } else - pci_free_consistent(bp->pdev, DMA_TABLE_BYTES, + dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES, bp->tx_ring, bp->tx_ring_dma); bp->tx_ring = NULL; bp->flags &= ~B44_FLAG_TX_RING_HACK; @@ -1191,22 +1170,22 @@ static void b44_free_consistent(struct b44 *bp) * Must not be invoked with interrupt sources disabled and * the hardware shutdown down. Can sleep. */ -static int b44_alloc_consistent(struct b44 *bp) +static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) { int size; size = B44_RX_RING_SIZE * sizeof(struct ring_info); - bp->rx_buffers = kzalloc(size, GFP_KERNEL); + bp->rx_buffers = kzalloc(size, gfp); if (!bp->rx_buffers) goto out_err; size = B44_TX_RING_SIZE * sizeof(struct ring_info); - bp->tx_buffers = kzalloc(size, GFP_KERNEL); + bp->tx_buffers = kzalloc(size, gfp); if (!bp->tx_buffers) goto out_err; size = DMA_TABLE_BYTES; - bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma); + bp->rx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->rx_ring_dma, gfp); if (!bp->rx_ring) { /* Allocation may have failed due to pci_alloc_consistent insisting on use of GFP_DMA, which is more restrictive @@ -1214,13 +1193,13 @@ static int b44_alloc_consistent(struct b44 *bp) struct dma_desc *rx_ring; dma_addr_t rx_ring_dma; - rx_ring = kzalloc(size, GFP_KERNEL); + rx_ring = kzalloc(size, gfp); if (!rx_ring) goto out_err; - rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring, - DMA_TABLE_BYTES, - DMA_BIDIRECTIONAL); + rx_ring_dma = dma_map_single(bp->sdev->dev, rx_ring, + DMA_TABLE_BYTES, + DMA_BIDIRECTIONAL); if (dma_mapping_error(rx_ring_dma) || rx_ring_dma + size > DMA_30BIT_MASK) { @@ -1233,21 +1212,21 @@ static int b44_alloc_consistent(struct b44 *bp) bp->flags |= B44_FLAG_RX_RING_HACK; } - bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma); + bp->tx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->tx_ring_dma, gfp); if (!bp->tx_ring) { - /* Allocation may have failed due to pci_alloc_consistent + /* Allocation may have failed due to dma_alloc_coherent insisting on use of GFP_DMA, which is more restrictive than necessary... */ struct dma_desc *tx_ring; dma_addr_t tx_ring_dma; - tx_ring = kzalloc(size, GFP_KERNEL); + tx_ring = kzalloc(size, gfp); if (!tx_ring) goto out_err; - tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring, - DMA_TABLE_BYTES, - DMA_TO_DEVICE); + tx_ring_dma = dma_map_single(bp->sdev->dev, tx_ring, + DMA_TABLE_BYTES, + DMA_TO_DEVICE); if (dma_mapping_error(tx_ring_dma) || tx_ring_dma + size > DMA_30BIT_MASK) { @@ -1282,7 +1261,9 @@ static void b44_clear_stats(struct b44 *bp) /* bp->lock is held. */ static void b44_chip_reset(struct b44 *bp) { - if (ssb_is_core_up(bp)) { + struct ssb_device *sdev = bp->sdev; + + if (ssb_device_is_enabled(bp->sdev)) { bw32(bp, B44_RCV_LAZY, 0); bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE); b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1); @@ -1294,19 +1275,25 @@ static void b44_chip_reset(struct b44 *bp) } bw32(bp, B44_DMARX_CTRL, 0); bp->rx_prod = bp->rx_cons = 0; - } else { - ssb_pci_setup(bp, (bp->core_unit == 0 ? - SBINTVEC_ENET0 : - SBINTVEC_ENET1)); - } - - ssb_core_reset(bp); + } else + ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev); + ssb_device_enable(bp->sdev, 0); b44_clear_stats(bp); - /* Make PHY accessible. */ - bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE | - (0x0d & MDIO_CTRL_MAXF_MASK))); + switch (sdev->bus->bustype) { + case SSB_BUSTYPE_SSB: + bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE | + (((ssb_clockspeed(sdev->bus) + (B44_MDC_RATIO / 2)) / B44_MDC_RATIO) + & MDIO_CTRL_MAXF_MASK))); + break; + case SSB_BUSTYPE_PCI: + case SSB_BUSTYPE_PCMCIA: + bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE | + (0x0d & MDIO_CTRL_MAXF_MASK))); + break; + } + br32(bp, B44_MDIO_CTRL); if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) { @@ -1349,6 +1336,7 @@ static int b44_set_mac_addr(struct net_device *dev, void *p) { struct b44 *bp = netdev_priv(dev); struct sockaddr *addr = p; + u32 val; if (netif_running(dev)) return -EBUSY; @@ -1359,7 +1347,11 @@ static int b44_set_mac_addr(struct net_device *dev, void *p) memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); spin_lock_irq(&bp->lock); - __b44_set_mac_addr(bp); + + val = br32(bp, B44_RXCONFIG); + if (!(val & RXCONFIG_CAM_ABSENT)) + __b44_set_mac_addr(bp); + spin_unlock_irq(&bp->lock); return 0; @@ -1416,10 +1408,12 @@ static int b44_open(struct net_device *dev) struct b44 *bp = netdev_priv(dev); int err; - err = b44_alloc_consistent(bp); + err = b44_alloc_consistent(bp, GFP_KERNEL); if (err) goto out; + napi_enable(&bp->napi); + b44_init_rings(bp); b44_init_hw(bp, B44_FULL_RESET); @@ -1427,6 +1421,7 @@ static int b44_open(struct net_device *dev) err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev); if (unlikely(err < 0)) { + napi_disable(&bp->napi); b44_chip_reset(bp); b44_free_rings(bp); b44_free_consistent(bp); @@ -1445,18 +1440,6 @@ out: return err; } -#if 0 -/*static*/ void b44_dump_state(struct b44 *bp) -{ - u32 val32, val32_2, val32_3, val32_4, val32_5; - u16 val16; - - pci_read_config_word(bp->pdev, PCI_STATUS, &val16); - printk("DEBUG: PCI status [%04x] \n", val16); - -} -#endif - #ifdef CONFIG_NET_POLL_CONTROLLER /* * Polling receive - used by netconsole and other diagnostic tools @@ -1519,14 +1502,13 @@ static void b44_setup_pseudo_magicp(struct b44 *bp) u8 *pwol_pattern; u8 pwol_mask[B44_PMASK_SIZE]; - pwol_pattern = kmalloc(B44_PATTERN_SIZE, GFP_KERNEL); + pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL); if (!pwol_pattern) { printk(KERN_ERR PFX "Memory not available for WOL\n"); return; } /* Ipv4 magic packet pattern - pattern 0.*/ - memset(pwol_pattern, 0, B44_PATTERN_SIZE); memset(pwol_mask, 0, B44_PMASK_SIZE); plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask, B44_ETHIPV4UDP_HLEN); @@ -1568,10 +1550,24 @@ static void b44_setup_pseudo_magicp(struct b44 *bp) } +#ifdef CONFIG_B44_PCI +static void b44_setup_wol_pci(struct b44 *bp) +{ + u16 val; + + if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) { + bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE); + pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val); + pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE); + } +} +#else +static inline void b44_setup_wol_pci(struct b44 *bp) { } +#endif /* CONFIG_B44_PCI */ + static void b44_setup_wol(struct b44 *bp) { u32 val; - u16 pmval; bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI); @@ -1595,13 +1591,7 @@ static void b44_setup_wol(struct b44 *bp) } else { b44_setup_pseudo_magicp(bp); } - - val = br32(bp, B44_SBTMSLOW); - bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE); - - pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval); - pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE); - + b44_setup_wol_pci(bp); } static int b44_close(struct net_device *dev) @@ -1610,15 +1600,12 @@ static int b44_close(struct net_device *dev) netif_stop_queue(dev); - netif_poll_disable(dev); + napi_disable(&bp->napi); del_timer_sync(&bp->timer); spin_lock_irq(&bp->lock); -#if 0 - b44_dump_state(bp); -#endif b44_halt(bp); b44_free_rings(bp); netif_carrier_off(dev); @@ -1627,8 +1614,6 @@ static int b44_close(struct net_device *dev) free_irq(dev->irq, dev); - netif_poll_enable(dev); - if (bp->flags & B44_FLAG_WOL_ENABLE) { b44_init_hw(bp, B44_PARTIAL_RESET); b44_setup_wol(bp); @@ -1701,7 +1686,7 @@ static void __b44_set_rx_mode(struct net_device *dev) val = br32(bp, B44_RXCONFIG); val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI); - if (dev->flags & IFF_PROMISC) { + if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) { val |= RXCONFIG_PROMISC; bw32(bp, B44_RXCONFIG, val); } else { @@ -1749,11 +1734,19 @@ static void b44_set_msglevel(struct net_device *dev, u32 value) static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) { struct b44 *bp = netdev_priv(dev); - struct pci_dev *pci_dev = bp->pdev; + struct ssb_bus *bus = bp->sdev->bus; - strcpy (info->driver, DRV_MODULE_NAME); - strcpy (info->version, DRV_MODULE_VERSION); - strcpy (info->bus_info, pci_name(pci_dev)); + strncpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strncpy(info->version, DRV_MODULE_VERSION, sizeof(info->driver)); + switch (bus->bustype) { + case SSB_BUSTYPE_PCI: + strncpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info)); + break; + case SSB_BUSTYPE_PCMCIA: + case SSB_BUSTYPE_SSB: + strncpy(info->bus_info, "SSB", sizeof(info->bus_info)); + break; + } } static int b44_nway_reset(struct net_device *dev) @@ -1969,9 +1962,14 @@ static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data) } } -static int b44_get_stats_count(struct net_device *dev) +static int b44_get_sset_count(struct net_device *dev, int sset) { - return ARRAY_SIZE(b44_gstrings); + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(b44_gstrings); + default: + return -EOPNOTSUPP; + } } static void b44_get_ethtool_stats(struct net_device *dev, @@ -2032,9 +2030,8 @@ static const struct ethtool_ops b44_ethtool_ops = { .get_msglevel = b44_get_msglevel, .set_msglevel = b44_set_msglevel, .get_strings = b44_get_strings, - .get_stats_count = b44_get_stats_count, + .get_sset_count = b44_get_sset_count, .get_ethtool_stats = b44_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, }; static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) @@ -2053,33 +2050,23 @@ out: return err; } -/* Read 128-bytes of EEPROM. */ -static int b44_read_eeprom(struct b44 *bp, u8 *data) -{ - long i; - __le16 *ptr = (__le16 *) data; - - for (i = 0; i < 128; i += 2) - ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i)); - - return 0; -} - static int __devinit b44_get_invariants(struct b44 *bp) { - u8 eeprom[128]; - int err; + struct ssb_device *sdev = bp->sdev; + int err = 0; + u8 *addr; - err = b44_read_eeprom(bp, &eeprom[0]); - if (err) - goto out; + bp->dma_offset = ssb_dma_translation(sdev); - bp->dev->dev_addr[0] = eeprom[79]; - bp->dev->dev_addr[1] = eeprom[78]; - bp->dev->dev_addr[2] = eeprom[81]; - bp->dev->dev_addr[3] = eeprom[80]; - bp->dev->dev_addr[4] = eeprom[83]; - bp->dev->dev_addr[5] = eeprom[82]; + if (sdev->bus->bustype == SSB_BUSTYPE_SSB && + instance > 1) { + addr = sdev->bus->sprom.r1.et1mac; + bp->phy_addr = sdev->bus->sprom.r1.et1phyaddr; + } else { + addr = sdev->bus->sprom.r1.et0mac; + bp->phy_addr = sdev->bus->sprom.r1.et0phyaddr; + } + memcpy(bp->dev->dev_addr, addr, 6); if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){ printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n"); @@ -2088,103 +2075,53 @@ static int __devinit b44_get_invariants(struct b44 *bp) memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len); - bp->phy_addr = eeprom[90] & 0x1f; - bp->imask = IMASK_DEF; - bp->core_unit = ssb_core_unit(bp); - bp->dma_offset = SB_PCI_DMA; - /* XXX - really required? bp->flags |= B44_FLAG_BUGGY_TXPTR; - */ + */ - if (ssb_get_core_rev(bp) >= 7) - bp->flags |= B44_FLAG_B0_ANDLATER; + if (bp->sdev->id.revision >= 7) + bp->flags |= B44_FLAG_B0_ANDLATER; -out: return err; } -static int __devinit b44_init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) +static int __devinit b44_init_one(struct ssb_device *sdev, + const struct ssb_device_id *ent) { static int b44_version_printed = 0; - unsigned long b44reg_base, b44reg_len; struct net_device *dev; struct b44 *bp; - int err, i; + int err; + DECLARE_MAC_BUF(mac); + + instance++; if (b44_version_printed++ == 0) printk(KERN_INFO "%s", version); - err = pci_enable_device(pdev); - if (err) { - dev_err(&pdev->dev, "Cannot enable PCI device, " - "aborting.\n"); - return err; - } - - if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { - dev_err(&pdev->dev, - "Cannot find proper PCI device " - "base address, aborting.\n"); - err = -ENODEV; - goto err_out_disable_pdev; - } - - err = pci_request_regions(pdev, DRV_MODULE_NAME); - if (err) { - dev_err(&pdev->dev, - "Cannot obtain PCI resources, aborting.\n"); - goto err_out_disable_pdev; - } - - pci_set_master(pdev); - - err = pci_set_dma_mask(pdev, (u64) DMA_30BIT_MASK); - if (err) { - dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n"); - goto err_out_free_res; - } - - err = pci_set_consistent_dma_mask(pdev, (u64) DMA_30BIT_MASK); - if (err) { - dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n"); - goto err_out_free_res; - } - - b44reg_base = pci_resource_start(pdev, 0); - b44reg_len = pci_resource_len(pdev, 0); dev = alloc_etherdev(sizeof(*bp)); if (!dev) { - dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n"); + dev_err(sdev->dev, "Etherdev alloc failed, aborting.\n"); err = -ENOMEM; - goto err_out_free_res; + goto out; } - SET_MODULE_OWNER(dev); - SET_NETDEV_DEV(dev,&pdev->dev); + SET_NETDEV_DEV(dev, sdev->dev); /* No interesting netdevice features in this card... */ dev->features |= 0; bp = netdev_priv(dev); - bp->pdev = pdev; + bp->sdev = sdev; bp->dev = dev; bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE); spin_lock_init(&bp->lock); - bp->regs = ioremap(b44reg_base, b44reg_len); - if (bp->regs == 0UL) { - dev_err(&pdev->dev, "Cannot map device registers, aborting.\n"); - err = -ENOMEM; - goto err_out_free_dev; - } - bp->rx_pending = B44_DEF_RX_RING_PENDING; bp->tx_pending = B44_DEF_TX_RING_PENDING; @@ -2196,23 +2133,34 @@ static int __devinit b44_init_one(struct pci_dev *pdev, dev->set_mac_address = b44_set_mac_addr; dev->do_ioctl = b44_ioctl; dev->tx_timeout = b44_tx_timeout; - dev->poll = b44_poll; - dev->weight = 64; + netif_napi_add(dev, &bp->napi, b44_poll, 64); dev->watchdog_timeo = B44_TX_TIMEOUT; #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = b44_poll_controller; #endif dev->change_mtu = b44_change_mtu; - dev->irq = pdev->irq; + dev->irq = sdev->irq; SET_ETHTOOL_OPS(dev, &b44_ethtool_ops); netif_carrier_off(dev); + err = ssb_bus_powerup(sdev->bus, 0); + if (err) { + dev_err(sdev->dev, + "Failed to powerup the bus\n"); + goto err_out_free_dev; + } + err = ssb_dma_set_mask(sdev, DMA_30BIT_MASK); + if (err) { + dev_err(sdev->dev, + "Required 30BIT DMA mask unsupported by the system.\n"); + goto err_out_powerdown; + } err = b44_get_invariants(bp); if (err) { - dev_err(&pdev->dev, + dev_err(sdev->dev, "Problem fetching invariants of chip, aborting.\n"); - goto err_out_iounmap; + goto err_out_powerdown; } bp->mii_if.dev = dev; @@ -2231,61 +2179,49 @@ static int __devinit b44_init_one(struct pci_dev *pdev, err = register_netdev(dev); if (err) { - dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); - goto err_out_iounmap; + dev_err(sdev->dev, "Cannot register net device, aborting.\n"); + goto err_out_powerdown; } - pci_set_drvdata(pdev, dev); - - pci_save_state(bp->pdev); + ssb_set_drvdata(sdev, dev); /* Chip reset provides power to the b44 MAC & PCI cores, which * is necessary for MAC register access. */ b44_chip_reset(bp); - printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name); - for (i = 0; i < 6; i++) - printk("%2.2x%c", dev->dev_addr[i], - i == 5 ? '\n' : ':'); + printk(KERN_INFO "%s: Broadcom 44xx/47xx 10/100BaseT Ethernet %s\n", + dev->name, print_mac(mac, dev->dev_addr)); return 0; -err_out_iounmap: - iounmap(bp->regs); +err_out_powerdown: + ssb_bus_may_powerdown(sdev->bus); err_out_free_dev: free_netdev(dev); -err_out_free_res: - pci_release_regions(pdev); - -err_out_disable_pdev: - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); +out: return err; } -static void __devexit b44_remove_one(struct pci_dev *pdev) +static void __devexit b44_remove_one(struct ssb_device *sdev) { - struct net_device *dev = pci_get_drvdata(pdev); - struct b44 *bp = netdev_priv(dev); + struct net_device *dev = ssb_get_drvdata(sdev); unregister_netdev(dev); - iounmap(bp->regs); + ssb_bus_may_powerdown(sdev->bus); free_netdev(dev); - pci_release_regions(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); + ssb_set_drvdata(sdev, NULL); } -static int b44_suspend(struct pci_dev *pdev, pm_message_t state) +static int b44_suspend(struct ssb_device *sdev, pm_message_t state) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = ssb_get_drvdata(sdev); struct b44 *bp = netdev_priv(dev); - if (!netif_running(dev)) - return 0; + if (!netif_running(dev)) + return 0; del_timer_sync(&bp->timer); @@ -2303,33 +2239,29 @@ static int b44_suspend(struct pci_dev *pdev, pm_message_t state) b44_init_hw(bp, B44_PARTIAL_RESET); b44_setup_wol(bp); } - pci_disable_device(pdev); + return 0; } -static int b44_resume(struct pci_dev *pdev) +static int b44_resume(struct ssb_device *sdev) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = ssb_get_drvdata(sdev); struct b44 *bp = netdev_priv(dev); int rc = 0; - pci_restore_state(pdev); - rc = pci_enable_device(pdev); + rc = ssb_bus_powerup(sdev->bus, 0); if (rc) { - printk(KERN_ERR PFX "%s: pci_enable_device failed\n", - dev->name); + dev_err(sdev->dev, + "Failed to powerup the bus\n"); return rc; } - pci_set_master(pdev); - if (!netif_running(dev)) return 0; rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev); if (rc) { printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name); - pci_disable_device(pdev); return rc; } @@ -2348,29 +2280,53 @@ static int b44_resume(struct pci_dev *pdev) return 0; } -static struct pci_driver b44_driver = { +static struct ssb_driver b44_ssb_driver = { .name = DRV_MODULE_NAME, - .id_table = b44_pci_tbl, + .id_table = b44_ssb_tbl, .probe = b44_init_one, .remove = __devexit_p(b44_remove_one), - .suspend = b44_suspend, - .resume = b44_resume, + .suspend = b44_suspend, + .resume = b44_resume, }; +static inline int b44_pci_init(void) +{ + int err = 0; +#ifdef CONFIG_B44_PCI + err = ssb_pcihost_register(&b44_pci_driver); +#endif + return err; +} + +static inline void b44_pci_exit(void) +{ +#ifdef CONFIG_B44_PCI + ssb_pcihost_unregister(&b44_pci_driver); +#endif +} + static int __init b44_init(void) { unsigned int dma_desc_align_size = dma_get_cache_alignment(); + int err; /* Setup paramaters for syncing RX/TX DMA descriptors */ dma_desc_align_mask = ~(dma_desc_align_size - 1); dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc)); - return pci_register_driver(&b44_driver); + err = b44_pci_init(); + if (err) + return err; + err = ssb_driver_register(&b44_ssb_driver); + if (err) + b44_pci_exit(); + return err; } static void __exit b44_cleanup(void) { - pci_unregister_driver(&b44_driver); + ssb_driver_unregister(&b44_ssb_driver); + b44_pci_exit(); } module_init(b44_init); diff --git a/drivers/net/b44.h b/drivers/net/b44.h index e537e63f292e..7db0c84a7950 100644 --- a/drivers/net/b44.h +++ b/drivers/net/b44.h @@ -129,6 +129,7 @@ #define RXCONFIG_FLOW 0x00000020 /* Flow Control Enable */ #define RXCONFIG_FLOW_ACCEPT 0x00000040 /* Accept Unicast Flow Control Frame */ #define RXCONFIG_RFILT 0x00000080 /* Reject Filter */ +#define RXCONFIG_CAM_ABSENT 0x00000100 /* CAM Absent */ #define B44_RXMAXLEN 0x0404UL /* EMAC RX Max Packet Length */ #define B44_TXMAXLEN 0x0408UL /* EMAC TX Max Packet Length */ #define B44_MDIO_CTRL 0x0410UL /* EMAC MDIO Control */ @@ -227,76 +228,6 @@ #define B44_RX_PAUSE 0x05D4UL /* MIB RX Pause Packets */ #define B44_RX_NPAUSE 0x05D8UL /* MIB RX Non-Pause Packets */ -/* Silicon backplane register definitions */ -#define B44_SBIMSTATE 0x0F90UL /* SB Initiator Agent State */ -#define SBIMSTATE_PC 0x0000000f /* Pipe Count */ -#define SBIMSTATE_AP_MASK 0x00000030 /* Arbitration Priority */ -#define SBIMSTATE_AP_BOTH 0x00000000 /* Use both timeslices and token */ -#define SBIMSTATE_AP_TS 0x00000010 /* Use timeslices only */ -#define SBIMSTATE_AP_TK 0x00000020 /* Use token only */ -#define SBIMSTATE_AP_RSV 0x00000030 /* Reserved */ -#define SBIMSTATE_IBE 0x00020000 /* In Band Error */ -#define SBIMSTATE_TO 0x00040000 /* Timeout */ -#define B44_SBINTVEC 0x0F94UL /* SB Interrupt Mask */ -#define SBINTVEC_PCI 0x00000001 /* Enable interrupts for PCI */ -#define SBINTVEC_ENET0 0x00000002 /* Enable interrupts for enet 0 */ -#define SBINTVEC_ILINE20 0x00000004 /* Enable interrupts for iline20 */ -#define SBINTVEC_CODEC 0x00000008 /* Enable interrupts for v90 codec */ -#define SBINTVEC_USB 0x00000010 /* Enable interrupts for usb */ -#define SBINTVEC_EXTIF 0x00000020 /* Enable interrupts for external i/f */ -#define SBINTVEC_ENET1 0x00000040 /* Enable interrupts for enet 1 */ -#define B44_SBTMSLOW 0x0F98UL /* SB Target State Low */ -#define SBTMSLOW_RESET 0x00000001 /* Reset */ -#define SBTMSLOW_REJECT 0x00000002 /* Reject */ -#define SBTMSLOW_CLOCK 0x00010000 /* Clock Enable */ -#define SBTMSLOW_FGC 0x00020000 /* Force Gated Clocks On */ -#define SBTMSLOW_PE 0x40000000 /* Power Management Enable */ -#define SBTMSLOW_BE 0x80000000 /* BIST Enable */ -#define B44_SBTMSHIGH 0x0F9CUL /* SB Target State High */ -#define SBTMSHIGH_SERR 0x00000001 /* S-error */ -#define SBTMSHIGH_INT 0x00000002 /* Interrupt */ -#define SBTMSHIGH_BUSY 0x00000004 /* Busy */ -#define SBTMSHIGH_GCR 0x20000000 /* Gated Clock Request */ -#define SBTMSHIGH_BISTF 0x40000000 /* BIST Failed */ -#define SBTMSHIGH_BISTD 0x80000000 /* BIST Done */ -#define B44_SBIDHIGH 0x0FFCUL /* SB Identification High */ -#define SBIDHIGH_RC_MASK 0x0000000f /* Revision Code */ -#define SBIDHIGH_CC_MASK 0x0000fff0 /* Core Code */ -#define SBIDHIGH_CC_SHIFT 4 -#define SBIDHIGH_VC_MASK 0xffff0000 /* Vendor Code */ -#define SBIDHIGH_VC_SHIFT 16 - -/* SSB PCI config space registers. */ -#define SSB_PMCSR 0x44 -#define SSB_PE 0x100 -#define SSB_BAR0_WIN 0x80 -#define SSB_BAR1_WIN 0x84 -#define SSB_SPROM_CONTROL 0x88 -#define SSB_BAR1_CONTROL 0x8c - -/* SSB core and host control registers. */ -#define SSB_CONTROL 0x0000UL -#define SSB_ARBCONTROL 0x0010UL -#define SSB_ISTAT 0x0020UL -#define SSB_IMASK 0x0024UL -#define SSB_MBOX 0x0028UL -#define SSB_BCAST_ADDR 0x0050UL -#define SSB_BCAST_DATA 0x0054UL -#define SSB_PCI_TRANS_0 0x0100UL -#define SSB_PCI_TRANS_1 0x0104UL -#define SSB_PCI_TRANS_2 0x0108UL -#define SSB_SPROM 0x0800UL - -#define SSB_PCI_MEM 0x00000000 -#define SSB_PCI_IO 0x00000001 -#define SSB_PCI_CFG0 0x00000002 -#define SSB_PCI_CFG1 0x00000003 -#define SSB_PCI_PREF 0x00000004 -#define SSB_PCI_BURST 0x00000008 -#define SSB_PCI_MASK0 0xfc000000 -#define SSB_PCI_MASK1 0xfc000000 -#define SSB_PCI_MASK2 0xc0000000 - /* 4400 PHY registers */ #define B44_MII_AUXCTRL 24 /* Auxiliary Control */ #define MII_AUXCTRL_DUPLEX 0x0001 /* Full Duplex */ @@ -346,10 +277,12 @@ struct rx_header { struct ring_info { struct sk_buff *skb; - DECLARE_PCI_UNMAP_ADDR(mapping); + dma_addr_t mapping; }; #define B44_MCAST_TABLE_SIZE 32 +#define B44_PHY_ADDR_NO_PHY 30 +#define B44_MDC_RATIO 5000000 #define B44_STAT_REG_DECLARE \ _B44(tx_good_octets) \ @@ -410,6 +343,8 @@ B44_STAT_REG_DECLARE #undef _B44 }; +struct ssb_device; + struct b44 { spinlock_t lock; @@ -423,6 +358,8 @@ struct b44 { struct ring_info *rx_buffers; struct ring_info *tx_buffers; + struct napi_struct napi; + u32 dma_offset; u32 flags; #define B44_FLAG_B0_ANDLATER 0x00000001 @@ -450,8 +387,7 @@ struct b44 { struct net_device_stats stats; struct b44_hw_stats hw_stats; - void __iomem *regs; - struct pci_dev *pdev; + struct ssb_device *sdev; struct net_device *dev; dma_addr_t rx_ring_dma, tx_ring_dma; @@ -459,7 +395,6 @@ struct b44 { u32 rx_pending; u32 tx_pending; u8 phy_addr; - u8 core_unit; struct mii_if_info mii_if; }; diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c new file mode 100644 index 000000000000..53fe7ded5d50 --- /dev/null +++ b/drivers/net/bfin_mac.c @@ -0,0 +1,1081 @@ +/* + * File: drivers/net/bfin_mac.c + * Based on: + * Maintainer: + * Bryan Wu <bryan.wu@analog.com> + * + * Original author: + * Luke Yang <luke.yang@analog.com> + * + * Created: + * Description: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software ; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation ; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY ; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program ; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/timer.h> +#include <linux/errno.h> +#include <linux/irq.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/crc32.h> +#include <linux/device.h> +#include <linux/spinlock.h> +#include <linux/ethtool.h> +#include <linux/mii.h> +#include <linux/phy.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/platform_device.h> + +#include <asm/dma.h> +#include <linux/dma-mapping.h> + +#include <asm/blackfin.h> +#include <asm/cacheflush.h> +#include <asm/portmux.h> + +#include "bfin_mac.h" + +#define DRV_NAME "bfin_mac" +#define DRV_VERSION "1.1" +#define DRV_AUTHOR "Bryan Wu, Luke Yang" +#define DRV_DESC "Blackfin BF53[67] on-chip Ethernet MAC driver" + +MODULE_AUTHOR(DRV_AUTHOR); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION(DRV_DESC); + +#if defined(CONFIG_BFIN_MAC_USE_L1) +# define bfin_mac_alloc(dma_handle, size) l1_data_sram_zalloc(size) +# define bfin_mac_free(dma_handle, ptr) l1_data_sram_free(ptr) +#else +# define bfin_mac_alloc(dma_handle, size) \ + dma_alloc_coherent(NULL, size, dma_handle, GFP_KERNEL) +# define bfin_mac_free(dma_handle, ptr) \ + dma_free_coherent(NULL, sizeof(*ptr), ptr, dma_handle) +#endif + +#define PKT_BUF_SZ 1580 + +#define MAX_TIMEOUT_CNT 500 + +/* pointers to maintain transmit list */ +static struct net_dma_desc_tx *tx_list_head; +static struct net_dma_desc_tx *tx_list_tail; +static struct net_dma_desc_rx *rx_list_head; +static struct net_dma_desc_rx *rx_list_tail; +static struct net_dma_desc_rx *current_rx_ptr; +static struct net_dma_desc_tx *current_tx_ptr; +static struct net_dma_desc_tx *tx_desc; +static struct net_dma_desc_rx *rx_desc; + +static void bf537mac_disable(void); +static void bf537mac_enable(void); + +static void desc_list_free(void) +{ + struct net_dma_desc_rx *r; + struct net_dma_desc_tx *t; + int i; +#if !defined(CONFIG_BFIN_MAC_USE_L1) + dma_addr_t dma_handle = 0; +#endif + + if (tx_desc) { + t = tx_list_head; + for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) { + if (t) { + if (t->skb) { + dev_kfree_skb(t->skb); + t->skb = NULL; + } + t = t->next; + } + } + bfin_mac_free(dma_handle, tx_desc); + } + + if (rx_desc) { + r = rx_list_head; + for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) { + if (r) { + if (r->skb) { + dev_kfree_skb(r->skb); + r->skb = NULL; + } + r = r->next; + } + } + bfin_mac_free(dma_handle, rx_desc); + } +} + +static int desc_list_init(void) +{ + int i; + struct sk_buff *new_skb; +#if !defined(CONFIG_BFIN_MAC_USE_L1) + /* + * This dma_handle is useless in Blackfin dma_alloc_coherent(). + * The real dma handler is the return value of dma_alloc_coherent(). + */ + dma_addr_t dma_handle; +#endif + + tx_desc = bfin_mac_alloc(&dma_handle, + sizeof(struct net_dma_desc_tx) * + CONFIG_BFIN_TX_DESC_NUM); + if (tx_desc == NULL) + goto init_error; + + rx_desc = bfin_mac_alloc(&dma_handle, + sizeof(struct net_dma_desc_rx) * + CONFIG_BFIN_RX_DESC_NUM); + if (rx_desc == NULL) + goto init_error; + + /* init tx_list */ + tx_list_head = tx_list_tail = tx_desc; + + for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) { + struct net_dma_desc_tx *t = tx_desc + i; + struct dma_descriptor *a = &(t->desc_a); + struct dma_descriptor *b = &(t->desc_b); + + /* + * disable DMA + * read from memory WNR = 0 + * wordsize is 32 bits + * 6 half words is desc size + * large desc flow + */ + a->config = WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE; + a->start_addr = (unsigned long)t->packet; + a->x_count = 0; + a->next_dma_desc = b; + + /* + * enabled DMA + * write to memory WNR = 1 + * wordsize is 32 bits + * disable interrupt + * 6 half words is desc size + * large desc flow + */ + b->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE; + b->start_addr = (unsigned long)(&(t->status)); + b->x_count = 0; + + t->skb = NULL; + tx_list_tail->desc_b.next_dma_desc = a; + tx_list_tail->next = t; + tx_list_tail = t; + } + tx_list_tail->next = tx_list_head; /* tx_list is a circle */ + tx_list_tail->desc_b.next_dma_desc = &(tx_list_head->desc_a); + current_tx_ptr = tx_list_head; + + /* init rx_list */ + rx_list_head = rx_list_tail = rx_desc; + + for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) { + struct net_dma_desc_rx *r = rx_desc + i; + struct dma_descriptor *a = &(r->desc_a); + struct dma_descriptor *b = &(r->desc_b); + + /* allocate a new skb for next time receive */ + new_skb = dev_alloc_skb(PKT_BUF_SZ + 2); + if (!new_skb) { + printk(KERN_NOTICE DRV_NAME + ": init: low on mem - packet dropped\n"); + goto init_error; + } + skb_reserve(new_skb, 2); + r->skb = new_skb; + + /* + * enabled DMA + * write to memory WNR = 1 + * wordsize is 32 bits + * disable interrupt + * 6 half words is desc size + * large desc flow + */ + a->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE; + /* since RXDWA is enabled */ + a->start_addr = (unsigned long)new_skb->data - 2; + a->x_count = 0; + a->next_dma_desc = b; + + /* + * enabled DMA + * write to memory WNR = 1 + * wordsize is 32 bits + * enable interrupt + * 6 half words is desc size + * large desc flow + */ + b->config = DMAEN | WNR | WDSIZE_32 | DI_EN | + NDSIZE_6 | DMAFLOW_LARGE; + b->start_addr = (unsigned long)(&(r->status)); + b->x_count = 0; + + rx_list_tail->desc_b.next_dma_desc = a; + rx_list_tail->next = r; + rx_list_tail = r; + } + rx_list_tail->next = rx_list_head; /* rx_list is a circle */ + rx_list_tail->desc_b.next_dma_desc = &(rx_list_head->desc_a); + current_rx_ptr = rx_list_head; + + return 0; + +init_error: + desc_list_free(); + printk(KERN_ERR DRV_NAME ": kmalloc failed\n"); + return -ENOMEM; +} + + +/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/ + +/* Set FER regs to MUX in Ethernet pins */ +static int setup_pin_mux(int action) +{ +#if defined(CONFIG_BFIN_MAC_RMII) + u16 pin_req[] = P_RMII0; +#else + u16 pin_req[] = P_MII0; +#endif + + if (action) { + if (peripheral_request_list(pin_req, DRV_NAME)) { + printk(KERN_ERR DRV_NAME + ": Requesting Peripherals failed\n"); + return -EFAULT; + } + } else + peripheral_free_list(pin_req); + + return 0; +} + +/* + * MII operations + */ +/* Wait until the previous MDC/MDIO transaction has completed */ +static void mdio_poll(void) +{ + int timeout_cnt = MAX_TIMEOUT_CNT; + + /* poll the STABUSY bit */ + while ((bfin_read_EMAC_STAADD()) & STABUSY) { + mdelay(10); + if (timeout_cnt-- < 0) { + printk(KERN_ERR DRV_NAME + ": wait MDC/MDIO transaction to complete timeout\n"); + break; + } + } +} + +/* Read an off-chip register in a PHY through the MDC/MDIO port */ +static int mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) +{ + mdio_poll(); + + /* read mode */ + bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) | + SET_REGAD((u16) regnum) | + STABUSY); + + mdio_poll(); + + return (int) bfin_read_EMAC_STADAT(); +} + +/* Write an off-chip register in a PHY through the MDC/MDIO port */ +static int mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum, + u16 value) +{ + mdio_poll(); + + bfin_write_EMAC_STADAT((u32) value); + + /* write mode */ + bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) | + SET_REGAD((u16) regnum) | + STAOP | + STABUSY); + + mdio_poll(); + + return 0; +} + +static int mdiobus_reset(struct mii_bus *bus) +{ + return 0; +} + +static void bf537_adjust_link(struct net_device *dev) +{ + struct bf537mac_local *lp = netdev_priv(dev); + struct phy_device *phydev = lp->phydev; + unsigned long flags; + int new_state = 0; + + spin_lock_irqsave(&lp->lock, flags); + if (phydev->link) { + /* Now we make sure that we can be in full duplex mode. + * If not, we operate in half-duplex mode. */ + if (phydev->duplex != lp->old_duplex) { + u32 opmode = bfin_read_EMAC_OPMODE(); + new_state = 1; + + if (phydev->duplex) + opmode |= FDMODE; + else + opmode &= ~(FDMODE); + + bfin_write_EMAC_OPMODE(opmode); + lp->old_duplex = phydev->duplex; + } + + if (phydev->speed != lp->old_speed) { +#if defined(CONFIG_BFIN_MAC_RMII) + u32 opmode = bfin_read_EMAC_OPMODE(); + bf537mac_disable(); + switch (phydev->speed) { + case 10: + opmode |= RMII_10; + break; + case 100: + opmode &= ~(RMII_10); + break; + default: + printk(KERN_WARNING + "%s: Ack! Speed (%d) is not 10/100!\n", + DRV_NAME, phydev->speed); + break; + } + bfin_write_EMAC_OPMODE(opmode); + bf537mac_enable(); +#endif + + new_state = 1; + lp->old_speed = phydev->speed; + } + + if (!lp->old_link) { + new_state = 1; + lp->old_link = 1; + netif_schedule(dev); + } + } else if (lp->old_link) { + new_state = 1; + lp->old_link = 0; + lp->old_speed = 0; + lp->old_duplex = -1; + } + + if (new_state) { + u32 opmode = bfin_read_EMAC_OPMODE(); + phy_print_status(phydev); + pr_debug("EMAC_OPMODE = 0x%08x\n", opmode); + } + + spin_unlock_irqrestore(&lp->lock, flags); +} + +static int mii_probe(struct net_device *dev) +{ + struct bf537mac_local *lp = netdev_priv(dev); + struct phy_device *phydev = NULL; + unsigned short sysctl; + int i; + + /* Enable PHY output early */ + if (!(bfin_read_VR_CTL() & PHYCLKOE)) + bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE); + + /* MDC = 2.5 MHz */ + sysctl = bfin_read_EMAC_SYSCTL(); + sysctl |= SET_MDCDIV(24); + bfin_write_EMAC_SYSCTL(sysctl); + + /* search for connect PHY device */ + for (i = 0; i < PHY_MAX_ADDR; i++) { + struct phy_device *const tmp_phydev = lp->mii_bus.phy_map[i]; + + if (!tmp_phydev) + continue; /* no PHY here... */ + + phydev = tmp_phydev; + break; /* found it */ + } + + /* now we are supposed to have a proper phydev, to attach to... */ + if (!phydev) { + printk(KERN_INFO "%s: Don't found any phy device at all\n", + dev->name); + return -ENODEV; + } + +#if defined(CONFIG_BFIN_MAC_RMII) + phydev = phy_connect(dev, phydev->dev.bus_id, &bf537_adjust_link, 0, + PHY_INTERFACE_MODE_RMII); +#else + phydev = phy_connect(dev, phydev->dev.bus_id, &bf537_adjust_link, 0, + PHY_INTERFACE_MODE_MII); +#endif + + if (IS_ERR(phydev)) { + printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); + return PTR_ERR(phydev); + } + + /* mask with MAC supported features */ + phydev->supported &= (SUPPORTED_10baseT_Half + | SUPPORTED_10baseT_Full + | SUPPORTED_100baseT_Half + | SUPPORTED_100baseT_Full + | SUPPORTED_Autoneg + | SUPPORTED_Pause | SUPPORTED_Asym_Pause + | SUPPORTED_MII + | SUPPORTED_TP); + + phydev->advertising = phydev->supported; + + lp->old_link = 0; + lp->old_speed = 0; + lp->old_duplex = -1; + lp->phydev = phydev; + + printk(KERN_INFO "%s: attached PHY driver [%s] " + "(mii_bus:phy_addr=%s, irq=%d)\n", + DRV_NAME, phydev->drv->name, phydev->dev.bus_id, phydev->irq); + + return 0; +} + +/**************************************************************************/ +void setup_system_regs(struct net_device *dev) +{ + unsigned short sysctl; + + /* + * Odd word alignment for Receive Frame DMA word + * Configure checksum support and rcve frame word alignment + */ + sysctl = bfin_read_EMAC_SYSCTL(); +#if defined(BFIN_MAC_CSUM_OFFLOAD) + sysctl |= RXDWA | RXCKS; +#else + sysctl |= RXDWA; +#endif + bfin_write_EMAC_SYSCTL(sysctl); + + bfin_write_EMAC_MMC_CTL(RSTC | CROLL); + + /* Initialize the TX DMA channel registers */ + bfin_write_DMA2_X_COUNT(0); + bfin_write_DMA2_X_MODIFY(4); + bfin_write_DMA2_Y_COUNT(0); + bfin_write_DMA2_Y_MODIFY(0); + + /* Initialize the RX DMA channel registers */ + bfin_write_DMA1_X_COUNT(0); + bfin_write_DMA1_X_MODIFY(4); + bfin_write_DMA1_Y_COUNT(0); + bfin_write_DMA1_Y_MODIFY(0); +} + +static void setup_mac_addr(u8 *mac_addr) +{ + u32 addr_low = le32_to_cpu(*(__le32 *) & mac_addr[0]); + u16 addr_hi = le16_to_cpu(*(__le16 *) & mac_addr[4]); + + /* this depends on a little-endian machine */ + bfin_write_EMAC_ADDRLO(addr_low); + bfin_write_EMAC_ADDRHI(addr_hi); +} + +static int bf537mac_set_mac_address(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + if (netif_running(dev)) + return -EBUSY; + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + setup_mac_addr(dev->dev_addr); + return 0; +} + +static void adjust_tx_list(void) +{ + int timeout_cnt = MAX_TIMEOUT_CNT; + + if (tx_list_head->status.status_word != 0 + && current_tx_ptr != tx_list_head) { + goto adjust_head; /* released something, just return; */ + } + + /* + * if nothing released, check wait condition + * current's next can not be the head, + * otherwise the dma will not stop as we want + */ + if (current_tx_ptr->next->next == tx_list_head) { + while (tx_list_head->status.status_word == 0) { + mdelay(10); + if (tx_list_head->status.status_word != 0 + || !(bfin_read_DMA2_IRQ_STATUS() & 0x08)) { + goto adjust_head; + } + if (timeout_cnt-- < 0) { + printk(KERN_ERR DRV_NAME + ": wait for adjust tx list head timeout\n"); + break; + } + } + if (tx_list_head->status.status_word != 0) { + goto adjust_head; + } + } + + return; + +adjust_head: + do { + tx_list_head->desc_a.config &= ~DMAEN; + tx_list_head->status.status_word = 0; + if (tx_list_head->skb) { + dev_kfree_skb(tx_list_head->skb); + tx_list_head->skb = NULL; + } else { + printk(KERN_ERR DRV_NAME + ": no sk_buff in a transmitted frame!\n"); + } + tx_list_head = tx_list_head->next; + } while (tx_list_head->status.status_word != 0 + && current_tx_ptr != tx_list_head); + return; + +} + +static int bf537mac_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct bf537mac_local *lp = netdev_priv(dev); + unsigned int data; + + current_tx_ptr->skb = skb; + + /* + * Is skb->data always 16-bit aligned? + * Do we need to memcpy((char *)(tail->packet + 2), skb->data, len)? + */ + if ((((unsigned int)(skb->data)) & 0x02) == 2) { + /* move skb->data to current_tx_ptr payload */ + data = (unsigned int)(skb->data) - 2; + *((unsigned short *)data) = (unsigned short)(skb->len); + current_tx_ptr->desc_a.start_addr = (unsigned long)data; + /* this is important! */ + blackfin_dcache_flush_range(data, (data + (skb->len)) + 2); + + } else { + *((unsigned short *)(current_tx_ptr->packet)) = + (unsigned short)(skb->len); + memcpy((char *)(current_tx_ptr->packet + 2), skb->data, + (skb->len)); + current_tx_ptr->desc_a.start_addr = + (unsigned long)current_tx_ptr->packet; + if (current_tx_ptr->status.status_word != 0) + current_tx_ptr->status.status_word = 0; + blackfin_dcache_flush_range((unsigned int)current_tx_ptr-> + packet, + (unsigned int)(current_tx_ptr-> + packet + skb->len) + + 2); + } + + /* enable this packet's dma */ + current_tx_ptr->desc_a.config |= DMAEN; + + /* tx dma is running, just return */ + if (bfin_read_DMA2_IRQ_STATUS() & 0x08) + goto out; + + /* tx dma is not running */ + bfin_write_DMA2_NEXT_DESC_PTR(&(current_tx_ptr->desc_a)); + /* dma enabled, read from memory, size is 6 */ + bfin_write_DMA2_CONFIG(current_tx_ptr->desc_a.config); + /* Turn on the EMAC tx */ + bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE); + +out: + adjust_tx_list(); + current_tx_ptr = current_tx_ptr->next; + dev->trans_start = jiffies; + dev->stats.tx_packets++; + dev->stats.tx_bytes += (skb->len); + return 0; +} + +static void bf537mac_rx(struct net_device *dev) +{ + struct sk_buff *skb, *new_skb; + struct bf537mac_local *lp = netdev_priv(dev); + unsigned short len; + + /* allocate a new skb for next time receive */ + skb = current_rx_ptr->skb; + new_skb = dev_alloc_skb(PKT_BUF_SZ + 2); + if (!new_skb) { + printk(KERN_NOTICE DRV_NAME + ": rx: low on mem - packet dropped\n"); + dev->stats.rx_dropped++; + goto out; + } + /* reserve 2 bytes for RXDWA padding */ + skb_reserve(new_skb, 2); + current_rx_ptr->skb = new_skb; + current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2; + + len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN); + skb_put(skb, len); + blackfin_dcache_invalidate_range((unsigned long)skb->head, + (unsigned long)skb->tail); + + dev->last_rx = jiffies; + skb->dev = dev; + skb->protocol = eth_type_trans(skb, dev); +#if defined(BFIN_MAC_CSUM_OFFLOAD) + skb->csum = current_rx_ptr->status.ip_payload_csum; + skb->ip_summed = CHECKSUM_PARTIAL; +#endif + + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + current_rx_ptr->status.status_word = 0x00000000; + current_rx_ptr = current_rx_ptr->next; + +out: + return; +} + +/* interrupt routine to handle rx and error signal */ +static irqreturn_t bf537mac_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + int number = 0; + +get_one_packet: + if (current_rx_ptr->status.status_word == 0) { + /* no more new packet received */ + if (number == 0) { + if (current_rx_ptr->next->status.status_word != 0) { + current_rx_ptr = current_rx_ptr->next; + goto real_rx; + } + } + bfin_write_DMA1_IRQ_STATUS(bfin_read_DMA1_IRQ_STATUS() | + DMA_DONE | DMA_ERR); + return IRQ_HANDLED; + } + +real_rx: + bf537mac_rx(dev); + number++; + goto get_one_packet; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void bf537mac_poll(struct net_device *dev) +{ + disable_irq(IRQ_MAC_RX); + bf537mac_interrupt(IRQ_MAC_RX, dev); + enable_irq(IRQ_MAC_RX); +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +static void bf537mac_disable(void) +{ + unsigned int opmode; + + opmode = bfin_read_EMAC_OPMODE(); + opmode &= (~RE); + opmode &= (~TE); + /* Turn off the EMAC */ + bfin_write_EMAC_OPMODE(opmode); +} + +/* + * Enable Interrupts, Receive, and Transmit + */ +static void bf537mac_enable(void) +{ + u32 opmode; + + pr_debug("%s: %s\n", DRV_NAME, __FUNCTION__); + + /* Set RX DMA */ + bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a)); + bfin_write_DMA1_CONFIG(rx_list_head->desc_a.config); + + /* Wait MII done */ + mdio_poll(); + + /* We enable only RX here */ + /* ASTP : Enable Automatic Pad Stripping + PR : Promiscuous Mode for test + PSF : Receive frames with total length less than 64 bytes. + FDMODE : Full Duplex Mode + LB : Internal Loopback for test + RE : Receiver Enable */ + opmode = bfin_read_EMAC_OPMODE(); + if (opmode & FDMODE) + opmode |= PSF; + else + opmode |= DRO | DC | PSF; + opmode |= RE; + +#if defined(CONFIG_BFIN_MAC_RMII) + opmode |= RMII; /* For Now only 100MBit are supported */ +#ifdef CONFIG_BF_REV_0_2 + opmode |= TE; +#endif +#endif + /* Turn on the EMAC rx */ + bfin_write_EMAC_OPMODE(opmode); +} + +/* Our watchdog timed out. Called by the networking layer */ +static void bf537mac_timeout(struct net_device *dev) +{ + pr_debug("%s: %s\n", dev->name, __FUNCTION__); + + bf537mac_disable(); + + /* reset tx queue */ + tx_list_tail = tx_list_head->next; + + bf537mac_enable(); + + /* We can accept TX packets again */ + dev->trans_start = jiffies; + netif_wake_queue(dev); +} + +/* + * This routine will, depending on the values passed to it, + * either make it accept multicast packets, go into + * promiscuous mode (for TCPDUMP and cousins) or accept + * a select set of multicast packets + */ +static void bf537mac_set_multicast_list(struct net_device *dev) +{ + u32 sysctl; + + if (dev->flags & IFF_PROMISC) { + printk(KERN_INFO "%s: set to promisc mode\n", dev->name); + sysctl = bfin_read_EMAC_OPMODE(); + sysctl |= RAF; + bfin_write_EMAC_OPMODE(sysctl); + } else if (dev->flags & IFF_ALLMULTI || dev->mc_count) { + /* accept all multicast */ + sysctl = bfin_read_EMAC_OPMODE(); + sysctl |= PAM; + bfin_write_EMAC_OPMODE(sysctl); + } else { + /* clear promisc or multicast mode */ + sysctl = bfin_read_EMAC_OPMODE(); + sysctl &= ~(RAF | PAM); + bfin_write_EMAC_OPMODE(sysctl); + } +} + +/* + * this puts the device in an inactive state + */ +static void bf537mac_shutdown(struct net_device *dev) +{ + /* Turn off the EMAC */ + bfin_write_EMAC_OPMODE(0x00000000); + /* Turn off the EMAC RX DMA */ + bfin_write_DMA1_CONFIG(0x0000); + bfin_write_DMA2_CONFIG(0x0000); +} + +/* + * Open and Initialize the interface + * + * Set up everything, reset the card, etc.. + */ +static int bf537mac_open(struct net_device *dev) +{ + struct bf537mac_local *lp = netdev_priv(dev); + int retval; + pr_debug("%s: %s\n", dev->name, __FUNCTION__); + + /* + * Check that the address is valid. If its not, refuse + * to bring the device up. The user must specify an + * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx + */ + if (!is_valid_ether_addr(dev->dev_addr)) { + printk(KERN_WARNING DRV_NAME ": no valid ethernet hw addr\n"); + return -EINVAL; + } + + /* initial rx and tx list */ + retval = desc_list_init(); + + if (retval) + return retval; + + phy_start(lp->phydev); + setup_system_regs(dev); + bf537mac_disable(); + bf537mac_enable(); + + pr_debug("hardware init finished\n"); + netif_start_queue(dev); + netif_carrier_on(dev); + + return 0; +} + +/* + * + * this makes the board clean up everything that it can + * and not talk to the outside world. Caused by + * an 'ifconfig ethX down' + */ +static int bf537mac_close(struct net_device *dev) +{ + struct bf537mac_local *lp = netdev_priv(dev); + pr_debug("%s: %s\n", dev->name, __FUNCTION__); + + netif_stop_queue(dev); + netif_carrier_off(dev); + + phy_stop(lp->phydev); + + /* clear everything */ + bf537mac_shutdown(dev); + + /* free the rx/tx buffers */ + desc_list_free(); + + return 0; +} + +static int __init bf537mac_probe(struct net_device *dev) +{ + struct bf537mac_local *lp = netdev_priv(dev); + int retval; + int i; + + /* Grab the MAC address in the MAC */ + *(__le32 *) (&(dev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO()); + *(__le16 *) (&(dev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI()); + + /* probe mac */ + /*todo: how to proble? which is revision_register */ + bfin_write_EMAC_ADDRLO(0x12345678); + if (bfin_read_EMAC_ADDRLO() != 0x12345678) { + pr_debug("can't detect bf537 mac!\n"); + retval = -ENODEV; + goto err_out; + } + + /* set the GPIO pins to Ethernet mode */ + retval = setup_pin_mux(1); + if (retval) + return retval; + + /*Is it valid? (Did bootloader initialize it?) */ + if (!is_valid_ether_addr(dev->dev_addr)) { + /* Grab the MAC from the board somehow - this is done in the + arch/blackfin/mach-bf537/boards/eth_mac.c */ + get_bf537_ether_addr(dev->dev_addr); + } + + /* If still not valid, get a random one */ + if (!is_valid_ether_addr(dev->dev_addr)) { + random_ether_addr(dev->dev_addr); + } + + setup_mac_addr(dev->dev_addr); + + /* MDIO bus initial */ + lp->mii_bus.priv = dev; + lp->mii_bus.read = mdiobus_read; + lp->mii_bus.write = mdiobus_write; + lp->mii_bus.reset = mdiobus_reset; + lp->mii_bus.name = "bfin_mac_mdio"; + lp->mii_bus.id = 0; + lp->mii_bus.irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); + for (i = 0; i < PHY_MAX_ADDR; ++i) + lp->mii_bus.irq[i] = PHY_POLL; + + mdiobus_register(&lp->mii_bus); + + retval = mii_probe(dev); + if (retval) + return retval; + + /* Fill in the fields of the device structure with ethernet values. */ + ether_setup(dev); + + dev->open = bf537mac_open; + dev->stop = bf537mac_close; + dev->hard_start_xmit = bf537mac_hard_start_xmit; + dev->set_mac_address = bf537mac_set_mac_address; + dev->tx_timeout = bf537mac_timeout; + dev->set_multicast_list = bf537mac_set_multicast_list; +#ifdef CONFIG_NET_POLL_CONTROLLER + dev->poll_controller = bf537mac_poll; +#endif + + spin_lock_init(&lp->lock); + + /* now, enable interrupts */ + /* register irq handler */ + if (request_irq + (IRQ_MAC_RX, bf537mac_interrupt, IRQF_DISABLED | IRQF_SHARED, + "BFIN537_MAC_RX", dev)) { + printk(KERN_WARNING DRV_NAME + ": Unable to attach BlackFin MAC RX interrupt\n"); + return -EBUSY; + } + + + retval = register_netdev(dev); + if (retval == 0) { + /* now, print out the card info, in a short format.. */ + printk(KERN_INFO "%s: Version %s, %s\n", + DRV_NAME, DRV_VERSION, DRV_DESC); + } + +err_out: + return retval; +} + +static int bfin_mac_probe(struct platform_device *pdev) +{ + struct net_device *ndev; + + ndev = alloc_etherdev(sizeof(struct bf537mac_local)); + if (!ndev) { + printk(KERN_WARNING DRV_NAME ": could not allocate device\n"); + return -ENOMEM; + } + + SET_NETDEV_DEV(ndev, &pdev->dev); + + platform_set_drvdata(pdev, ndev); + + if (bf537mac_probe(ndev) != 0) { + platform_set_drvdata(pdev, NULL); + free_netdev(ndev); + printk(KERN_WARNING DRV_NAME ": not found\n"); + return -ENODEV; + } + + return 0; +} + +static int bfin_mac_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + + unregister_netdev(ndev); + + free_irq(IRQ_MAC_RX, ndev); + + free_netdev(ndev); + + setup_pin_mux(0); + + return 0; +} + +#ifdef CONFIG_PM +static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg) +{ + struct net_device *net_dev = platform_get_drvdata(pdev); + + if (netif_running(net_dev)) + bf537mac_close(net_dev); + + return 0; +} + +static int bfin_mac_resume(struct platform_device *pdev) +{ + struct net_device *net_dev = platform_get_drvdata(pdev); + + if (netif_running(net_dev)) + bf537mac_open(net_dev); + + return 0; +} +#else +#define bfin_mac_suspend NULL +#define bfin_mac_resume NULL +#endif /* CONFIG_PM */ + +static struct platform_driver bfin_mac_driver = { + .probe = bfin_mac_probe, + .remove = bfin_mac_remove, + .resume = bfin_mac_resume, + .suspend = bfin_mac_suspend, + .driver = { + .name = DRV_NAME, + }, +}; + +static int __init bfin_mac_init(void) +{ + return platform_driver_register(&bfin_mac_driver); +} + +module_init(bfin_mac_init); + +static void __exit bfin_mac_cleanup(void) +{ + platform_driver_unregister(&bfin_mac_driver); +} + +module_exit(bfin_mac_cleanup); diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h new file mode 100644 index 000000000000..3a107ad75381 --- /dev/null +++ b/drivers/net/bfin_mac.h @@ -0,0 +1,95 @@ +/* + * File: drivers/net/bfin_mac.c + * Based on: + * Maintainer: + * Bryan Wu <bryan.wu@analog.com> + * + * Original author: + * Luke Yang <luke.yang@analog.com> + * + * Created: + * Description: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software ; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation ; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY ; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program ; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#define BFIN_MAC_CSUM_OFFLOAD + +struct dma_descriptor { + struct dma_descriptor *next_dma_desc; + unsigned long start_addr; + unsigned short config; + unsigned short x_count; +}; + +struct status_area_rx { +#if defined(BFIN_MAC_CSUM_OFFLOAD) + unsigned short ip_hdr_csum; /* ip header checksum */ + /* ip payload(udp or tcp or others) checksum */ + unsigned short ip_payload_csum; +#endif + unsigned long status_word; /* the frame status word */ +}; + +struct status_area_tx { + unsigned long status_word; /* the frame status word */ +}; + +/* use two descriptors for a packet */ +struct net_dma_desc_rx { + struct net_dma_desc_rx *next; + struct sk_buff *skb; + struct dma_descriptor desc_a; + struct dma_descriptor desc_b; + struct status_area_rx status; +}; + +/* use two descriptors for a packet */ +struct net_dma_desc_tx { + struct net_dma_desc_tx *next; + struct sk_buff *skb; + struct dma_descriptor desc_a; + struct dma_descriptor desc_b; + unsigned char packet[1560]; + struct status_area_tx status; +}; + +struct bf537mac_local { + /* + * these are things that the kernel wants me to keep, so users + * can find out semi-useless statistics of how well the card is + * performing + */ + struct net_device_stats stats; + + unsigned char Mac[6]; /* MAC address of the board */ + spinlock_t lock; + + /* MII and PHY stuffs */ + int old_link; /* used by bf537_adjust_link */ + int old_speed; + int old_duplex; + + struct phy_device *phydev; + struct mii_bus mii_bus; +}; + +extern void get_bf537_ether_addr(char *addr); diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c index 9b8d7d9dbe86..a42bd19646d3 100644 --- a/drivers/net/bmac.c +++ b/drivers/net/bmac.c @@ -19,6 +19,7 @@ #include <linux/spinlock.h> #include <linux/crc32.h> #include <linux/bitrev.h> +#include <linux/ethtool.h> #include <asm/prom.h> #include <asm/dbdma.h> #include <asm/io.h> @@ -74,7 +75,6 @@ struct bmac_data { int tx_fill; int tx_empty; unsigned char tx_fullup; - struct net_device_stats stats; struct timer_list tx_timeout; int timeout_active; int sleeping; @@ -144,7 +144,6 @@ static unsigned char *bmac_emergency_rxbuf; static int bmac_open(struct net_device *dev); static int bmac_close(struct net_device *dev); static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev); -static struct net_device_stats *bmac_stats(struct net_device *dev); static void bmac_set_multicast(struct net_device *dev); static void bmac_reset_and_enable(struct net_device *dev); static void bmac_start_chip(struct net_device *dev); @@ -667,7 +666,7 @@ static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev) bp->tx_bufs[bp->tx_fill] = skb; bp->tx_fill = i; - bp->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += skb->len; dbdma_continue(td); @@ -706,8 +705,8 @@ static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id) nb = RX_BUFLEN - residual - 2; if (nb < (ETHERMINPACKET - ETHERCRC)) { skb = NULL; - bp->stats.rx_length_errors++; - bp->stats.rx_errors++; + dev->stats.rx_length_errors++; + dev->stats.rx_errors++; } else { skb = bp->rx_bufs[i]; bp->rx_bufs[i] = NULL; @@ -718,10 +717,10 @@ static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id) skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; - ++bp->stats.rx_packets; - bp->stats.rx_bytes += nb; + ++dev->stats.rx_packets; + dev->stats.rx_bytes += nb; } else { - ++bp->stats.rx_dropped; + ++dev->stats.rx_dropped; } dev->last_rx = jiffies; if ((skb = bp->rx_bufs[i]) == NULL) { @@ -784,7 +783,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id) } if (bp->tx_bufs[bp->tx_empty]) { - ++bp->stats.tx_packets; + ++dev->stats.tx_packets; dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]); } bp->tx_bufs[bp->tx_empty] = NULL; @@ -806,13 +805,6 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id) return IRQ_HANDLED; } -static struct net_device_stats *bmac_stats(struct net_device *dev) -{ - struct bmac_data *p = netdev_priv(dev); - - return &p->stats; -} - #ifndef SUNHME_MULTICAST /* Real fast bit-reversal algorithm, 6-bit values */ static int reverse6[64] = { @@ -1079,17 +1071,17 @@ static irqreturn_t bmac_misc_intr(int irq, void *dev_id) } /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */ /* bmac_txdma_intr_inner(irq, dev_id); */ - /* if (status & FrameReceived) bp->stats.rx_dropped++; */ - if (status & RxErrorMask) bp->stats.rx_errors++; - if (status & RxCRCCntExp) bp->stats.rx_crc_errors++; - if (status & RxLenCntExp) bp->stats.rx_length_errors++; - if (status & RxOverFlow) bp->stats.rx_over_errors++; - if (status & RxAlignCntExp) bp->stats.rx_frame_errors++; - - /* if (status & FrameSent) bp->stats.tx_dropped++; */ - if (status & TxErrorMask) bp->stats.tx_errors++; - if (status & TxUnderrun) bp->stats.tx_fifo_errors++; - if (status & TxNormalCollExp) bp->stats.collisions++; + /* if (status & FrameReceived) dev->stats.rx_dropped++; */ + if (status & RxErrorMask) dev->stats.rx_errors++; + if (status & RxCRCCntExp) dev->stats.rx_crc_errors++; + if (status & RxLenCntExp) dev->stats.rx_length_errors++; + if (status & RxOverFlow) dev->stats.rx_over_errors++; + if (status & RxAlignCntExp) dev->stats.rx_frame_errors++; + + /* if (status & FrameSent) dev->stats.tx_dropped++; */ + if (status & TxErrorMask) dev->stats.tx_errors++; + if (status & TxUnderrun) dev->stats.tx_fifo_errors++; + if (status & TxNormalCollExp) dev->stats.collisions++; return IRQ_HANDLED; } @@ -1246,6 +1238,17 @@ static void bmac_reset_and_enable(struct net_device *dev) } spin_unlock_irqrestore(&bp->lock, flags); } +static void bmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct bmac_data *bp = netdev_priv(dev); + strcpy(info->driver, "bmac"); + strcpy(info->bus_info, bp->mdev->ofdev.dev.bus_id); +} + +static const struct ethtool_ops bmac_ethtool_ops = { + .get_drvinfo = bmac_get_drvinfo, + .get_link = ethtool_op_get_link, +}; static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_id *match) { @@ -1255,6 +1258,7 @@ static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_i unsigned char addr[6]; struct net_device *dev; int is_bmac_plus = ((int)match->data) != 0; + DECLARE_MAC_BUF(mac); if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) { printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n"); @@ -1279,7 +1283,6 @@ static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_i } bp = netdev_priv(dev); - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &mdev->ofdev.dev); macio_set_drvdata(mdev, dev); @@ -1311,8 +1314,8 @@ static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_i dev->open = bmac_open; dev->stop = bmac_close; + dev->ethtool_ops = &bmac_ethtool_ops; dev->hard_start_xmit = bmac_output; - dev->get_stats = bmac_stats; dev->set_multicast_list = bmac_set_multicast; dev->set_mac_address = bmac_set_address; @@ -1365,9 +1368,8 @@ static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_i goto err_out_irq2; } - printk(KERN_INFO "%s: BMAC%s at", dev->name, (is_bmac_plus? "+": "")); - for (j = 0; j < 6; ++j) - printk("%c%.2x", (j? ':': ' '), dev->dev_addr[j]); + printk(KERN_INFO "%s: BMAC%s at %s", + dev->name, (is_bmac_plus ? "+" : ""), print_mac(mac, dev->dev_addr)); XXDEBUG((", base_addr=%#0lx", dev->base_addr)); printk("\n"); @@ -1530,7 +1532,7 @@ static void bmac_tx_timeout(unsigned long data) XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n", bp->tx_empty, bp->tx_fill, bp->tx_fullup)); i = bp->tx_empty; - ++bp->stats.tx_errors; + ++dev->stats.tx_errors; if (i != bp->tx_fill) { dev_kfree_skb(bp->tx_bufs[i]); bp->tx_bufs[i] = NULL; diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index d23861c8658c..78ed633ceb82 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -52,10 +52,12 @@ #include "bnx2_fw.h" #include "bnx2_fw2.h" +#define FW_BUF_SIZE 0x8000 + #define DRV_MODULE_NAME "bnx2" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "1.6.2" -#define DRV_MODULE_RELDATE "July 6, 2007" +#define DRV_MODULE_VERSION "1.6.7" +#define DRV_MODULE_RELDATE "October 10, 2007" #define RUN_AT(x) (jiffies + (x)) @@ -126,91 +128,102 @@ static struct pci_device_id bnx2_pci_tbl[] = { static struct flash_spec flash_table[] = { +#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE) +#define NONBUFFERED_FLAGS (BNX2_NV_WREN) /* Slow EEPROM */ {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, - 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, + BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, "EEPROM - slow"}, /* Expansion entry 0001 */ {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, - 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 0001"}, /* Saifun SA25F010 (non-buffered flash) */ /* strap, cfg1, & write1 need updates */ {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, - 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, "Non-buffered flash (128kB)"}, /* Saifun SA25F020 (non-buffered flash) */ /* strap, cfg1, & write1 need updates */ {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, - 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, "Non-buffered flash (256kB)"}, /* Expansion entry 0100 */ {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, - 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 0100"}, /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, - 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, /* Entry 0110: ST M45PE20 (non-buffered flash)*/ {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, - 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, /* Saifun SA25F005 (non-buffered flash) */ /* strap, cfg1, & write1 need updates */ {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, - 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, "Non-buffered flash (64kB)"}, /* Fast EEPROM */ {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, - 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, + BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, "EEPROM - fast"}, /* Expansion entry 1001 */ {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, - 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 1001"}, /* Expansion entry 1010 */ {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, - 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 1010"}, /* ATMEL AT45DB011B (buffered flash) */ {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, - 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, + BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, "Buffered flash (128kB)"}, /* Expansion entry 1100 */ {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, - 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 1100"}, /* Expansion entry 1101 */ {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, - 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 1101"}, /* Ateml Expansion entry 1110 */ {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, - 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, + BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, BUFFERED_FLASH_BYTE_ADDR_MASK, 0, "Entry 1110 (Atmel)"}, /* ATMEL AT45DB021B (buffered flash) */ {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, - 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, + BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, "Buffered flash (256kB)"}, }; +static struct flash_spec flash_5709 = { + .flags = BNX2_NV_BUFFERED, + .page_bits = BCM5709_FLASH_PAGE_BITS, + .page_size = BCM5709_FLASH_PAGE_SIZE, + .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, + .total_size = BUFFERED_FLASH_TOTAL_SIZE*2, + .name = "5709 Buffered flash (256kB)", +}; + MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); static inline u32 bnx2_tx_avail(struct bnx2 *bp) @@ -417,7 +430,7 @@ bnx2_netif_stop(struct bnx2 *bp) { bnx2_disable_int_sync(bp); if (netif_running(bp->dev)) { - netif_poll_disable(bp->dev); + napi_disable(&bp->napi); netif_tx_disable(bp->dev); bp->dev->trans_start = jiffies; /* prevent tx timeout */ } @@ -429,7 +442,7 @@ bnx2_netif_start(struct bnx2 *bp) if (atomic_dec_and_test(&bp->intr_sem)) { if (netif_running(bp->dev)) { netif_wake_queue(bp->dev); - netif_poll_enable(bp->dev); + napi_enable(&bp->napi); bnx2_enable_int(bp); } } @@ -2540,7 +2553,7 @@ bnx2_msi(int irq, void *dev_instance) if (unlikely(atomic_read(&bp->intr_sem) != 0)) return IRQ_HANDLED; - netif_rx_schedule(dev); + netif_rx_schedule(dev, &bp->napi); return IRQ_HANDLED; } @@ -2557,7 +2570,7 @@ bnx2_msi_1shot(int irq, void *dev_instance) if (unlikely(atomic_read(&bp->intr_sem) != 0)) return IRQ_HANDLED; - netif_rx_schedule(dev); + netif_rx_schedule(dev, &bp->napi); return IRQ_HANDLED; } @@ -2593,9 +2606,9 @@ bnx2_interrupt(int irq, void *dev_instance) if (unlikely(atomic_read(&bp->intr_sem) != 0)) return IRQ_HANDLED; - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &bp->napi)) { bp->last_status_idx = sblk->status_idx; - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &bp->napi); } return IRQ_HANDLED; @@ -2620,10 +2633,8 @@ bnx2_has_work(struct bnx2 *bp) return 0; } -static int -bnx2_poll(struct net_device *dev, int *budget) +static int bnx2_poll_work(struct bnx2 *bp, int work_done, int budget) { - struct bnx2 *bp = netdev_priv(dev); struct status_block *sblk = bp->status_blk; u32 status_attn_bits = sblk->status_attn_bits; u32 status_attn_bits_ack = sblk->status_attn_bits_ack; @@ -2641,44 +2652,54 @@ bnx2_poll(struct net_device *dev, int *budget) REG_RD(bp, BNX2_HC_COMMAND); } - if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons) + if (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons) bnx2_tx_int(bp); - if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) { - int orig_budget = *budget; - int work_done; + if (sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) + work_done += bnx2_rx_int(bp, budget - work_done); - if (orig_budget > dev->quota) - orig_budget = dev->quota; + return work_done; +} - work_done = bnx2_rx_int(bp, orig_budget); - *budget -= work_done; - dev->quota -= work_done; - } +static int bnx2_poll(struct napi_struct *napi, int budget) +{ + struct bnx2 *bp = container_of(napi, struct bnx2, napi); + int work_done = 0; + struct status_block *sblk = bp->status_blk; - bp->last_status_idx = bp->status_blk->status_idx; - rmb(); + while (1) { + work_done = bnx2_poll_work(bp, work_done, budget); + + if (unlikely(work_done >= budget)) + break; - if (!bnx2_has_work(bp)) { - netif_rx_complete(dev); - if (likely(bp->flags & USING_MSI_FLAG)) { + /* bp->last_status_idx is used below to tell the hw how + * much work has been processed, so we must read it before + * checking for more work. + */ + bp->last_status_idx = sblk->status_idx; + rmb(); + if (likely(!bnx2_has_work(bp))) { + netif_rx_complete(bp->dev, napi); + if (likely(bp->flags & USING_MSI_FLAG)) { + REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | + bp->last_status_idx); + break; + } REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | + BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx); - return 0; - } - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, - BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | - BNX2_PCICFG_INT_ACK_CMD_MASK_INT | - bp->last_status_idx); - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, - BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | - bp->last_status_idx); - return 0; + REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | + bp->last_status_idx); + break; + } } - return 1; + return work_done; } /* Called with rtnl_lock from vlan functions and also netif_tx_lock @@ -2756,92 +2777,6 @@ bnx2_set_rx_mode(struct net_device *dev) spin_unlock_bh(&bp->phy_lock); } -#define FW_BUF_SIZE 0x8000 - -static int -bnx2_gunzip_init(struct bnx2 *bp) -{ - if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL) - goto gunzip_nomem1; - - if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL) - goto gunzip_nomem2; - - bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL); - if (bp->strm->workspace == NULL) - goto gunzip_nomem3; - - return 0; - -gunzip_nomem3: - kfree(bp->strm); - bp->strm = NULL; - -gunzip_nomem2: - vfree(bp->gunzip_buf); - bp->gunzip_buf = NULL; - -gunzip_nomem1: - printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for " - "uncompression.\n", bp->dev->name); - return -ENOMEM; -} - -static void -bnx2_gunzip_end(struct bnx2 *bp) -{ - kfree(bp->strm->workspace); - - kfree(bp->strm); - bp->strm = NULL; - - if (bp->gunzip_buf) { - vfree(bp->gunzip_buf); - bp->gunzip_buf = NULL; - } -} - -static int -bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen) -{ - int n, rc; - - /* check gzip header */ - if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) - return -EINVAL; - - n = 10; - -#define FNAME 0x8 - if (zbuf[3] & FNAME) - while ((zbuf[n++] != 0) && (n < len)); - - bp->strm->next_in = zbuf + n; - bp->strm->avail_in = len - n; - bp->strm->next_out = bp->gunzip_buf; - bp->strm->avail_out = FW_BUF_SIZE; - - rc = zlib_inflateInit2(bp->strm, -MAX_WBITS); - if (rc != Z_OK) - return rc; - - rc = zlib_inflate(bp->strm, Z_FINISH); - - *outlen = FW_BUF_SIZE - bp->strm->avail_out; - *outbuf = bp->gunzip_buf; - - if ((rc != Z_OK) && (rc != Z_STREAM_END)) - printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n", - bp->dev->name, bp->strm->msg); - - zlib_inflateEnd(bp->strm); - - if (rc == Z_STREAM_END) - return 0; - - return rc; -} - static void load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len, u32 rv2p_proc) @@ -2891,19 +2826,13 @@ load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw) /* Load the Text area. */ offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); if (fw->gz_text) { - u32 text_len; - void *text; + int j; - rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text, - &text_len); - if (rc) + rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text, + fw->gz_text_len); + if (rc < 0) return rc; - fw->text = text; - } - if (fw->gz_text) { - int j; - for (j = 0; j < (fw->text_len / 4); j++, offset += 4) { REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j])); } @@ -2921,21 +2850,21 @@ load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw) /* Load the SBSS area. */ offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); - if (fw->sbss) { + if (fw->sbss_len) { int j; for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) { - REG_WR_IND(bp, offset, fw->sbss[j]); + REG_WR_IND(bp, offset, 0); } } /* Load the BSS area. */ offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); - if (fw->bss) { + if (fw->bss_len) { int j; for (j = 0; j < (fw->bss_len/4); j++, offset += 4) { - REG_WR_IND(bp, offset, fw->bss[j]); + REG_WR_IND(bp, offset, 0); } } @@ -2968,27 +2897,24 @@ bnx2_init_cpus(struct bnx2 *bp) { struct cpu_reg cpu_reg; struct fw_info *fw; - int rc = 0; + int rc; void *text; - u32 text_len; - - if ((rc = bnx2_gunzip_init(bp)) != 0) - return rc; /* Initialize the RV2P processor. */ - rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text, - &text_len); - if (rc) + text = vmalloc(FW_BUF_SIZE); + if (!text) + return -ENOMEM; + rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1)); + if (rc < 0) goto init_cpu_err; - load_rv2p_fw(bp, text, text_len, RV2P_PROC1); + load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1); - rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text, - &text_len); - if (rc) + rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2)); + if (rc < 0) goto init_cpu_err; - load_rv2p_fw(bp, text, text_len, RV2P_PROC2); + load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2); /* Initialize the RX Processor. */ cpu_reg.mode = BNX2_RXP_CPU_MODE; @@ -3009,6 +2935,7 @@ bnx2_init_cpus(struct bnx2 *bp) else fw = &bnx2_rxp_fw_06; + fw->text = text; rc = load_cpu_fw(bp, &cpu_reg, fw); if (rc) goto init_cpu_err; @@ -3032,6 +2959,7 @@ bnx2_init_cpus(struct bnx2 *bp) else fw = &bnx2_txp_fw_06; + fw->text = text; rc = load_cpu_fw(bp, &cpu_reg, fw); if (rc) goto init_cpu_err; @@ -3055,6 +2983,7 @@ bnx2_init_cpus(struct bnx2 *bp) else fw = &bnx2_tpat_fw_06; + fw->text = text; rc = load_cpu_fw(bp, &cpu_reg, fw); if (rc) goto init_cpu_err; @@ -3078,6 +3007,7 @@ bnx2_init_cpus(struct bnx2 *bp) else fw = &bnx2_com_fw_06; + fw->text = text; rc = load_cpu_fw(bp, &cpu_reg, fw); if (rc) goto init_cpu_err; @@ -3099,12 +3029,13 @@ bnx2_init_cpus(struct bnx2 *bp) if (CHIP_NUM(bp) == CHIP_NUM_5709) { fw = &bnx2_cp_fw_09; + fw->text = text; rc = load_cpu_fw(bp, &cpu_reg, fw); if (rc) goto init_cpu_err; } init_cpu_err: - bnx2_gunzip_end(bp); + vfree(text); return rc; } @@ -3289,7 +3220,7 @@ bnx2_enable_nvram_write(struct bnx2 *bp) val = REG_RD(bp, BNX2_MISC_CFG); REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI); - if (!bp->flash_info->buffered) { + if (bp->flash_info->flags & BNX2_NV_WREN) { int j; REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); @@ -3349,7 +3280,7 @@ bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset) u32 cmd; int j; - if (bp->flash_info->buffered) + if (bp->flash_info->flags & BNX2_NV_BUFFERED) /* Buffered flash, no erase needed */ return 0; @@ -3392,8 +3323,8 @@ bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags) /* Build the command word. */ cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags; - /* Calculate an offset of a buffered flash. */ - if (bp->flash_info->buffered) { + /* Calculate an offset of a buffered flash, not needed for 5709. */ + if (bp->flash_info->flags & BNX2_NV_TRANSLATE) { offset = ((offset / bp->flash_info->page_size) << bp->flash_info->page_bits) + (offset % bp->flash_info->page_size); @@ -3439,8 +3370,8 @@ bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags) /* Build the command word. */ cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags; - /* Calculate an offset of a buffered flash. */ - if (bp->flash_info->buffered) { + /* Calculate an offset of a buffered flash, not needed for 5709. */ + if (bp->flash_info->flags & BNX2_NV_TRANSLATE) { offset = ((offset / bp->flash_info->page_size) << bp->flash_info->page_bits) + (offset % bp->flash_info->page_size); @@ -3478,15 +3409,19 @@ static int bnx2_init_nvram(struct bnx2 *bp) { u32 val; - int j, entry_count, rc; + int j, entry_count, rc = 0; struct flash_spec *flash; + if (CHIP_NUM(bp) == CHIP_NUM_5709) { + bp->flash_info = &flash_5709; + goto get_flash_size; + } + /* Determine the selected interface. */ val = REG_RD(bp, BNX2_NVM_CFG1); - entry_count = sizeof(flash_table) / sizeof(struct flash_spec); + entry_count = ARRAY_SIZE(flash_table); - rc = 0; if (val & 0x40000000) { /* Flash interface has been reconfigured */ @@ -3542,6 +3477,7 @@ bnx2_init_nvram(struct bnx2 *bp) return -ENODEV; } +get_flash_size: val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2); val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK; if (val) @@ -3706,7 +3642,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, buf = align_buf; } - if (bp->flash_info->buffered == 0) { + if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { flash_buffer = kmalloc(264, GFP_KERNEL); if (flash_buffer == NULL) { rc = -ENOMEM; @@ -3739,7 +3675,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, bnx2_enable_nvram_access(bp); cmd_flags = BNX2_NVM_COMMAND_FIRST; - if (bp->flash_info->buffered == 0) { + if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { int j; /* Read the whole page into the buffer @@ -3767,7 +3703,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, /* Loop to write back the buffer data from page_start to * data_start */ i = 0; - if (bp->flash_info->buffered == 0) { + if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { /* Erase the page */ if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0) goto nvram_write_end; @@ -3791,7 +3727,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, /* Loop to write the new data from data_start to data_end */ for (addr = data_start; addr < data_end; addr += 4, i += 4) { if ((addr == page_end - 4) || - ((bp->flash_info->buffered) && + ((bp->flash_info->flags & BNX2_NV_BUFFERED) && (addr == data_end - 4))) { cmd_flags |= BNX2_NVM_COMMAND_LAST; @@ -3808,7 +3744,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, /* Loop to write back the buffer data from data_end * to page_end */ - if (bp->flash_info->buffered == 0) { + if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { for (addr = data_end; addr < page_end; addr += 4, i += 4) { @@ -3856,12 +3792,6 @@ bnx2_init_remote_phy(struct bnx2 *bp) return; if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) { - if (netif_running(bp->dev)) { - val = BNX2_DRV_ACK_CAP_SIGNATURE | - BNX2_FW_CAP_REMOTE_PHY_CAPABLE; - REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB, - val); - } bp->phy_flags |= REMOTE_PHY_CAP_FLAG; val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS); @@ -3869,6 +3799,22 @@ bnx2_init_remote_phy(struct bnx2 *bp) bp->phy_port = PORT_FIBRE; else bp->phy_port = PORT_TP; + + if (netif_running(bp->dev)) { + u32 sig; + + if (val & BNX2_LINK_STATUS_LINK_UP) { + bp->link_up = 1; + netif_carrier_on(bp->dev); + } else { + bp->link_up = 0; + netif_carrier_off(bp->dev); + } + sig = BNX2_DRV_ACK_CAP_SIGNATURE | + BNX2_FW_CAP_REMOTE_PHY_CAPABLE; + REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB, + sig); + } } } @@ -3877,6 +3823,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) { u32 val; int i, rc = 0; + u8 old_port; /* Wait for the current PCI transaction to complete before * issuing a reset. */ @@ -3918,11 +3865,13 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) /* Chip reset. */ REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val); + /* Reading back any register after chip reset will hang the + * bus on 5706 A0 and A1. The msleep below provides plenty + * of margin for write posting. + */ if ((CHIP_ID(bp) == CHIP_ID_5706_A0) || - (CHIP_ID(bp) == CHIP_ID_5706_A1)) { - current->state = TASK_UNINTERRUPTIBLE; - schedule_timeout(HZ / 50); - } + (CHIP_ID(bp) == CHIP_ID_5706_A1)) + msleep(20); /* Reset takes approximate 30 usec */ for (i = 0; i < 10; i++) { @@ -3953,8 +3902,9 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) return rc; spin_lock_bh(&bp->phy_lock); + old_port = bp->phy_port; bnx2_init_remote_phy(bp); - if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) + if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port) bnx2_set_default_remote_link(bp); spin_unlock_bh(&bp->phy_lock); @@ -4107,7 +4057,7 @@ bnx2_init_chip(struct bnx2 *bp) if (CHIP_NUM(bp) == CHIP_NUM_5708) REG_WR(bp, BNX2_HC_STATS_TICKS, 0); else - REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00); + REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks); REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ if (CHIP_ID(bp) == CHIP_ID_5706_A1) @@ -4127,10 +4077,6 @@ bnx2_init_chip(struct bnx2 *bp) REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS); - if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) & - BNX2_PORT_FEATURE_ASF_ENABLED) - bp->flags |= ASF_ENABLE_FLAG; - /* Initialize the receive filter. */ bnx2_set_rx_mode(bp->dev); @@ -4647,6 +4593,9 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) bnx2_set_mac_loopback(bp); } else if (loopback_mode == BNX2_PHY_LOOPBACK) { + if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) + return 0; + bp->loopback = PHY_LOOPBACK; bnx2_set_phy_loopback(bp); } @@ -4815,6 +4764,11 @@ bnx2_test_link(struct bnx2 *bp) { u32 bmsr; + if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) { + if (bp->link_up) + return 0; + return -ENODEV; + } spin_lock_bh(&bp->phy_lock); bnx2_enable_bmsr1(bp); bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); @@ -5025,6 +4979,8 @@ bnx2_open(struct net_device *dev) if (rc) return rc; + napi_enable(&bp->napi); + if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) { if (pci_enable_msi(bp->pdev) == 0) { bp->flags |= USING_MSI_FLAG; @@ -5035,6 +4991,7 @@ bnx2_open(struct net_device *dev) rc = bnx2_request_irq(bp); if (rc) { + napi_disable(&bp->napi); bnx2_free_mem(bp); return rc; } @@ -5042,6 +4999,7 @@ bnx2_open(struct net_device *dev) rc = bnx2_init_nic(bp); if (rc) { + napi_disable(&bp->napi); bnx2_free_irq(bp); bnx2_free_skbs(bp); bnx2_free_mem(bp); @@ -5074,6 +5032,7 @@ bnx2_open(struct net_device *dev) rc = bnx2_request_irq(bp); if (rc) { + napi_disable(&bp->napi); bnx2_free_skbs(bp); bnx2_free_mem(bp); del_timer_sync(&bp->timer); @@ -5287,7 +5246,8 @@ bnx2_close(struct net_device *dev) while (bp->in_reset_task) msleep(1); - bnx2_netif_stop(bp); + bnx2_disable_int_sync(bp); + napi_disable(&bp->napi); del_timer_sync(&bp->timer); if (bp->flags & NO_WOL_FLAG) reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN; @@ -5786,8 +5746,9 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC) bp->stats_ticks = USEC_PER_SEC; } - if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00; - bp->stats_ticks &= 0xffff00; + if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS) + bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS; + bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS; if (netif_running(bp->dev)) { bnx2_netif_stop(bp); @@ -6055,9 +6016,16 @@ static struct { }; static int -bnx2_self_test_count(struct net_device *dev) +bnx2_get_sset_count(struct net_device *dev, int sset) { - return BNX2_NUM_TESTS; + switch (sset) { + case ETH_SS_TEST: + return BNX2_NUM_TESTS; + case ETH_SS_STATS: + return BNX2_NUM_STATS; + default: + return -EOPNOTSUPP; + } } static void @@ -6131,12 +6099,6 @@ bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf) } } -static int -bnx2_get_stats_count(struct net_device *dev) -{ - return BNX2_NUM_STATS; -} - static void bnx2_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *buf) @@ -6244,19 +6206,14 @@ static const struct ethtool_ops bnx2_ethtool_ops = { .set_pauseparam = bnx2_set_pauseparam, .get_rx_csum = bnx2_get_rx_csum, .set_rx_csum = bnx2_set_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = bnx2_set_tx_csum, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, - .get_tso = ethtool_op_get_tso, .set_tso = bnx2_set_tso, - .self_test_count = bnx2_self_test_count, .self_test = bnx2_self_test, .get_strings = bnx2_get_strings, .phys_id = bnx2_phys_id, - .get_stats_count = bnx2_get_stats_count, .get_ethtool_stats = bnx2_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, + .get_sset_count = bnx2_get_sset_count, }; /* Called with rtnl_lock */ @@ -6462,7 +6419,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) u32 reg; u64 dma_mask, persist_dma_mask; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); bp = netdev_priv(dev); @@ -6629,6 +6585,21 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) if (i != 2) bp->fw_version[j++] = '.'; } + reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE); + if (reg & BNX2_PORT_FEATURE_WOL_ENABLED) + bp->wol = 1; + + if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) { + bp->flags |= ASF_ENABLE_FLAG; + + for (i = 0; i < 30; i++) { + reg = REG_RD_IND(bp, bp->shmem_base + + BNX2_BC_STATE_CONDITION); + if (reg & BNX2_CONDITION_MFW_RUN_MASK) + break; + msleep(10); + } + } reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION); reg &= BNX2_CONDITION_MFW_RUN_MASK; if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN && @@ -6672,7 +6643,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->rx_ticks_int = 18; bp->rx_ticks = 18; - bp->stats_ticks = 1000000 & 0xffff00; + bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS; bp->timer_interval = HZ; bp->current_interval = HZ; @@ -6688,11 +6659,14 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->phy_port = PORT_TP; if (bp->phy_flags & PHY_SERDES_FLAG) { bp->phy_port = PORT_FIBRE; - bp->flags |= NO_WOL_FLAG; + reg = REG_RD_IND(bp, bp->shmem_base + + BNX2_SHARED_HW_CFG_CONFIG); + if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) { + bp->flags |= NO_WOL_FLAG; + bp->wol = 0; + } if (CHIP_NUM(bp) != CHIP_NUM_5706) { bp->phy_addr = 2; - reg = REG_RD_IND(bp, bp->shmem_base + - BNX2_SHARED_HW_CFG_CONFIG); if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G) bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG; } @@ -6701,13 +6675,16 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) } else if (CHIP_NUM(bp) == CHIP_NUM_5706 || CHIP_NUM(bp) == CHIP_NUM_5708) bp->phy_flags |= PHY_CRC_FIX_FLAG; - else if (CHIP_ID(bp) == CHIP_ID_5709_A0) + else if (CHIP_ID(bp) == CHIP_ID_5709_A0 || + CHIP_ID(bp) == CHIP_ID_5709_A1) bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG; if ((CHIP_ID(bp) == CHIP_ID_5708_A0) || (CHIP_ID(bp) == CHIP_ID_5708_B0) || - (CHIP_ID(bp) == CHIP_ID_5708_B1)) + (CHIP_ID(bp) == CHIP_ID_5708_B1)) { bp->flags |= NO_WOL_FLAG; + bp->wol = 0; + } if (CHIP_ID(bp) == CHIP_ID_5706_A0) { bp->tx_quick_cons_trip_int = @@ -6800,8 +6777,9 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) static int version_printed = 0; struct net_device *dev = NULL; struct bnx2 *bp; - int rc, i; + int rc; char str[40]; + DECLARE_MAC_BUF(mac); if (version_printed++ == 0) printk(KERN_INFO "%s", version); @@ -6831,11 +6809,10 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) #ifdef BCM_VLAN dev->vlan_rx_register = bnx2_vlan_rx_register; #endif - dev->poll = bnx2_poll; dev->ethtool_ops = &bnx2_ethtool_ops; - dev->weight = 64; bp = netdev_priv(dev); + netif_napi_add(dev, &bp->napi, bnx2_poll, 64); #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) dev->poll_controller = poll_bnx2; @@ -6870,19 +6847,14 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, " - "IRQ %d, ", + "IRQ %d, node addr %s\n", dev->name, bp->name, ((CHIP_ID(bp) & 0xf000) >> 12) + 'A', ((CHIP_ID(bp) & 0x0ff0) >> 4), bnx2_bus_string(bp, str), dev->base_addr, - bp->pdev->irq); - - printk("node addr "); - for (i = 0; i < 6; i++) - printk("%2.2x", dev->dev_addr[i]); - printk("\n"); + bp->pdev->irq, print_mac(mac, dev->dev_addr)); return 0; } @@ -6913,6 +6885,11 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state) struct bnx2 *bp = netdev_priv(dev); u32 reset_code; + /* PCI register 4 needs to be saved whether netif_running() or not. + * MSI address and data need to be saved if using MSI and + * netif_running(). + */ + pci_save_state(pdev); if (!netif_running(dev)) return 0; @@ -6928,7 +6905,6 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state) reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; bnx2_reset_chip(bp, reset_code); bnx2_free_skbs(bp); - pci_save_state(pdev); bnx2_set_power_state(bp, pci_choose_state(pdev, state)); return 0; } @@ -6939,10 +6915,10 @@ bnx2_resume(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct bnx2 *bp = netdev_priv(dev); + pci_restore_state(pdev); if (!netif_running(dev)) return 0; - pci_restore_state(pdev); bnx2_set_power_state(bp, PCI_D0); netif_device_attach(dev); bnx2_init_nic(bp); diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index d8cd1afeb23d..1dce0d1a2581 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h @@ -6433,6 +6433,11 @@ struct sw_bd { #define ST_MICRO_FLASH_PAGE_SIZE 256 #define ST_MICRO_FLASH_BASE_TOTAL_SIZE 65536 +#define BCM5709_FLASH_PAGE_BITS 8 +#define BCM5709_FLASH_PHY_PAGE_SIZE (1 << BCM5709_FLASH_PAGE_BITS) +#define BCM5709_FLASH_BYTE_ADDR_MASK (BCM5709_FLASH_PHY_PAGE_SIZE-1) +#define BCM5709_FLASH_PAGE_SIZE 256 + #define NVRAM_TIMEOUT_COUNT 30000 @@ -6449,7 +6454,10 @@ struct flash_spec { u32 config2; u32 config3; u32 write1; - u32 buffered; + u32 flags; +#define BNX2_NV_BUFFERED 0x00000001 +#define BNX2_NV_TRANSLATE 0x00000002 +#define BNX2_NV_WREN 0x00000004 u32 page_bits; u32 page_size; u32 addr_mask; @@ -6465,6 +6473,8 @@ struct bnx2 { struct net_device *dev; struct pci_dev *pdev; + struct napi_struct napi; + atomic_t intr_sem; struct status_block *status_blk; @@ -6671,9 +6681,6 @@ struct bnx2 { u32 flash_size; int status_stats_size; - - struct z_stream_s *strm; - void *gunzip_buf; }; static u32 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset); @@ -6745,13 +6752,11 @@ struct fw_info { const u32 sbss_addr; const u32 sbss_len; const u32 sbss_index; - const u32 *sbss; /* BSS section. */ const u32 bss_addr; const u32 bss_len; const u32 bss_index; - const u32 *bss; /* Read-only section. */ const u32 rodata_addr; @@ -6903,6 +6908,7 @@ struct fw_info { #define BNX2_SHARED_HW_CFG_LED_MODE_MAC 0 #define BNX2_SHARED_HW_CFG_LED_MODE_GPHY1 0x100 #define BNX2_SHARED_HW_CFG_LED_MODE_GPHY2 0x200 +#define BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX 0x8000 #define BNX2_SHARED_HW_CFG_CONFIG2 0x00000040 #define BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK 0x00fff000 diff --git a/drivers/net/bnx2_fw.h b/drivers/net/bnx2_fw.h index b49f439e0f67..a6d78243163b 100644 --- a/drivers/net/bnx2_fw.h +++ b/drivers/net/bnx2_fw.h @@ -15,7 +15,8 @@ */ static u8 bnx2_COM_b06FwText[] = { - 0x1f, 0x8b, 0x08, 0x00, 0x45, 0x30, 0xe7, 0x45, 0x00, 0x03, 0xdc, 0x5a, +/* 0x1f, 0x8b, 0x08, 0x00, 0x45, 0x30, 0xe7, 0x45, 0x00, 0x03, */ + 0xdc, 0x5a, 0x6b, 0x6c, 0x1c, 0xd7, 0x75, 0x3e, 0x33, 0x3b, 0x4b, 0xae, 0xc8, 0x15, 0x35, 0xa2, 0xc6, 0xf4, 0x5a, 0xa2, 0xed, 0x5d, 0x72, 0x28, 0x12, 0x96, 0xec, 0x6e, 0x68, 0xda, 0x62, 0x8c, 0x8d, 0xb4, 0xd9, 0xa5, 0x0c, 0xa1, @@ -1047,8 +1048,6 @@ static const u32 bnx2_COM_b06FwRodata[(0x88/4) + 1] = { 0x08002bd8, 0x08002c08, 0x08002c38, 0x00000000, 0x080060cc, 0x080060cc, 0x080060cc, 0x080060cc, 0x080060cc, 0x08006100, 0x08006100, 0x08006140, 0x0800614c, 0x0800614c, 0x080060cc, 0x00000000, 0x00000000 }; -static const u32 bnx2_COM_b06FwBss[(0x88/4) + 1] = { 0x0 }; -static const u32 bnx2_COM_b06FwSbss[(0x60/4) + 1] = { 0x0 }; static struct fw_info bnx2_com_fw_06 = { .ver_major = 0x3, @@ -1071,12 +1070,10 @@ static struct fw_info bnx2_com_fw_06 = { .sbss_addr = 0x08007e00, .sbss_len = 0x60, .sbss_index = 0x0, - .sbss = bnx2_COM_b06FwSbss, .bss_addr = 0x08007e60, .bss_len = 0x88, .bss_index = 0x0, - .bss = bnx2_COM_b06FwBss, .rodata_addr = 0x08007d58, .rodata_len = 0x88, @@ -1085,8 +1082,9 @@ static struct fw_info bnx2_com_fw_06 = { }; static u8 bnx2_RXP_b06FwText[] = { - 0x1f, 0x8b, 0x08, 0x08, 0xcb, 0xa3, 0x46, 0x45, 0x00, 0x03, 0x74, 0x65, - 0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, 0xec, 0x5c, 0x6f, 0x6c, +/* 0x1f, 0x8b, 0x08, 0x08, 0xcb, 0xa3, 0x46, 0x45, 0x00, 0x03, 0x74, 0x65, + 0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, */ + 0xec, 0x5c, 0x6f, 0x6c, 0x1c, 0xc7, 0x75, 0x7f, 0x3b, 0xbb, 0xa4, 0x4e, 0xd4, 0x91, 0x5c, 0x1e, 0x4f, 0xf4, 0x49, 0x66, 0x94, 0x5d, 0x71, 0x25, 0x5e, 0x2d, 0xc6, 0x5d, 0x31, 0x57, 0x9b, 0x08, 0xce, 0xf1, 0x79, 0xef, 0x64, 0xb1, 0x86, 0x0a, @@ -1760,9 +1758,6 @@ static u32 bnx2_RXP_b06FwRodata[(0x278/4) + 1] = { 0x08006030, 0x08006030, 0x08006018, 0x08006030, 0x08006030, 0x08006030, 0x08006024, 0x00000000, 0x00000000 }; -static u32 bnx2_RXP_b06FwBss[(0x13dc/4) + 1] = { 0x0 }; -static u32 bnx2_RXP_b06FwSbss[(0x2c/4) + 1] = { 0x0 }; - static struct fw_info bnx2_rxp_fw_06 = { .ver_major = 0x2, .ver_minor = 0x8, @@ -1784,12 +1779,10 @@ static struct fw_info bnx2_rxp_fw_06 = { .sbss_addr = 0x080069c0, .sbss_len = 0x2c, .sbss_index = 0x0, - .sbss = bnx2_RXP_b06FwSbss, .bss_addr = 0x080069f0, .bss_len = 0x13dc, .bss_index = 0x0, - .bss = bnx2_RXP_b06FwBss, .rodata_addr = 0x08006728, .rodata_len = 0x278, @@ -1798,8 +1791,9 @@ static struct fw_info bnx2_rxp_fw_06 = { }; static u8 bnx2_rv2p_proc1[] = { - 0x1f, 0x8b, 0x08, 0x08, 0x5e, 0xd0, 0x41, 0x44, 0x00, 0x03, 0x74, 0x65, - 0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, 0xc5, 0x56, 0xcf, 0x6b, +/* 0x1f, 0x8b, 0x08, 0x08, 0x5e, 0xd0, 0x41, 0x44, 0x00, 0x03, 0x74, 0x65, + 0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, */ + 0xc5, 0x56, 0xcf, 0x6b, 0x13, 0x51, 0x10, 0x9e, 0xec, 0x6e, 0xb2, 0xdb, 0x74, 0xbb, 0x1b, 0x2b, 0xda, 0xa0, 0xb1, 0x8d, 0x51, 0x6a, 0x7f, 0xa4, 0xb4, 0x11, 0x0f, 0x82, 0x42, 0x25, 0x3d, 0x04, 0x54, 0x44, 0x7a, 0x28, 0x22, 0x82, 0x36, 0x8a, @@ -1877,8 +1871,9 @@ static u8 bnx2_rv2p_proc1[] = { 0x12, 0x3d, 0x80, 0x0b, 0x00, 0x00, 0x00 }; static u8 bnx2_rv2p_proc2[] = { - 0x1f, 0x8b, 0x08, 0x08, 0x7e, 0xd1, 0x41, 0x44, 0x00, 0x03, 0x74, 0x65, - 0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, 0xcd, 0x58, 0x5b, 0x6c, +/* 0x1f, 0x8b, 0x08, 0x08, 0x7e, 0xd1, 0x41, 0x44, 0x00, 0x03, 0x74, 0x65, + 0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, */ + 0xcd, 0x58, 0x5b, 0x6c, 0x54, 0x55, 0x14, 0x3d, 0xf3, 0xe8, 0xcc, 0x9d, 0xe9, 0xed, 0x9d, 0xf2, 0xb2, 0x03, 0xad, 0x08, 0xe5, 0xd1, 0x56, 0x29, 0xe8, 0x54, 0xab, 0x18, 0x15, 0x2c, 0x5a, 0x8c, 0x26, 0x68, 0xf0, 0xf9, 0x63, 0x14, 0x04, 0xda, @@ -2057,8 +2052,9 @@ static u8 bnx2_rv2p_proc2[] = { 0x17, 0x00, 0x00, 0x00 }; static u8 bnx2_TPAT_b06FwText[] = { - 0x1f, 0x8b, 0x08, 0x08, 0x47, 0xd2, 0x41, 0x44, 0x00, 0x03, 0x74, 0x65, - 0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, 0xc5, 0x57, 0x4d, 0x68, +/* 0x1f, 0x8b, 0x08, 0x08, 0x47, 0xd2, 0x41, 0x44, 0x00, 0x03, 0x74, 0x65, + 0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, */ + 0xc5, 0x57, 0x4d, 0x68, 0x1c, 0xe7, 0x19, 0x7e, 0xe7, 0x77, 0x47, 0x62, 0x25, 0x8d, 0x93, 0x3d, 0xac, 0x5d, 0xa5, 0x99, 0x91, 0x46, 0x3f, 0x54, 0x26, 0x9e, 0x84, 0xa5, 0x56, 0x61, 0x20, 0xe3, 0x99, 0x95, 0x2c, 0x0c, 0x05, 0x07, 0x42, 0x08, @@ -2252,8 +2248,6 @@ static u8 bnx2_TPAT_b06FwText[] = { static u32 bnx2_TPAT_b06FwData[(0x0/4) + 1] = { 0x0 }; static u32 bnx2_TPAT_b06FwRodata[(0x0/4) + 1] = { 0x0 }; -static u32 bnx2_TPAT_b06FwBss[(0x250/4) + 1] = { 0x0 }; -static u32 bnx2_TPAT_b06FwSbss[(0x34/4) + 1] = { 0x0 }; static struct fw_info bnx2_tpat_fw_06 = { .ver_major = 0x1, @@ -2276,12 +2270,10 @@ static struct fw_info bnx2_tpat_fw_06 = { .sbss_addr = 0x08001a60, .sbss_len = 0x34, .sbss_index = 0x0, - .sbss = bnx2_TPAT_b06FwSbss, .bss_addr = 0x08001aa0, .bss_len = 0x250, .bss_index = 0x0, - .bss = bnx2_TPAT_b06FwBss, .rodata_addr = 0x00000000, .rodata_len = 0x0, @@ -2290,8 +2282,9 @@ static struct fw_info bnx2_tpat_fw_06 = { }; static u8 bnx2_TXP_b06FwText[] = { - 0x1f, 0x8b, 0x08, 0x08, 0x21, 0xd3, 0x41, 0x44, 0x00, 0x03, 0x74, 0x65, - 0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, 0xed, 0x5c, 0x6d, 0x6c, +/* 0x1f, 0x8b, 0x08, 0x08, 0x21, 0xd3, 0x41, 0x44, 0x00, 0x03, 0x74, 0x65, + 0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, */ + 0xed, 0x5c, 0x6d, 0x6c, 0x1b, 0xf7, 0x79, 0x7f, 0xee, 0x85, 0xd2, 0x51, 0x96, 0xe9, 0x93, 0xc2, 0x78, 0x6c, 0xc0, 0xa6, 0x77, 0xd6, 0x51, 0x66, 0x20, 0xb5, 0xa0, 0x05, 0x36, 0x55, 0x87, 0x43, 0x73, 0x3e, 0x52, 0x2f, 0x4e, 0x5c, 0x57, 0x71, @@ -2708,8 +2701,6 @@ static u8 bnx2_TXP_b06FwText[] = { static u32 bnx2_TXP_b06FwData[(0x0/4) + 1] = { 0x0 }; static u32 bnx2_TXP_b06FwRodata[(0x0/4) + 1] = { 0x0 }; -static u32 bnx2_TXP_b06FwBss[(0x1c4/4) + 1] = { 0x0 }; -static u32 bnx2_TXP_b06FwSbss[(0x38/4) + 1] = { 0x0 }; static struct fw_info bnx2_txp_fw_06 = { .ver_major = 0x1, @@ -2732,12 +2723,10 @@ static struct fw_info bnx2_txp_fw_06 = { .sbss_addr = 0x08005760, .sbss_len = 0x38, .sbss_index = 0x0, - .sbss = bnx2_TXP_b06FwSbss, .bss_addr = 0x080057a0, .bss_len = 0x1c4, .bss_index = 0x0, - .bss = bnx2_TXP_b06FwBss, .rodata_addr = 0x00000000, .rodata_len = 0x0, diff --git a/drivers/net/bnx2_fw2.h b/drivers/net/bnx2_fw2.h index 2c067531f031..5bd52bead9bf 100644 --- a/drivers/net/bnx2_fw2.h +++ b/drivers/net/bnx2_fw2.h @@ -15,7 +15,8 @@ */ static u8 bnx2_COM_b09FwText[] = { - 0x1f, 0x8b, 0x08, 0x00, 0x0e, 0x34, 0xe7, 0x45, 0x00, 0x03, 0xdc, 0x5b, +/* 0x1f, 0x8b, 0x08, 0x00, 0x0e, 0x34, 0xe7, 0x45, 0x00, 0x03, */ + 0xdc, 0x5b, 0x6d, 0x70, 0x5c, 0xd5, 0x79, 0x7e, 0xef, 0xd9, 0xbb, 0xf2, 0x5a, 0x92, 0xe5, 0x6b, 0x79, 0x23, 0x16, 0x4b, 0xc0, 0xae, 0x75, 0x6d, 0x69, 0xb0, 0x43, 0x16, 0xa1, 0x80, 0x9a, 0xd9, 0xc0, 0xb2, 0x2b, 0x33, 0x9e, 0x0c, @@ -1045,8 +1046,6 @@ static const u32 bnx2_COM_b09FwRodata[(0x88/4) + 1] = { 0x08002b3c, 0x08002b6c, 0x08002b9c, 0x00000000, 0x0800604c, 0x0800604c, 0x0800604c, 0x0800604c, 0x0800604c, 0x08006078, 0x08006078, 0x080060b8, 0x080060c4, 0x080060c4, 0x0800604c, 0x00000000, 0x00000000 }; -static const u32 bnx2_COM_b09FwBss[(0x88/4) + 1] = { 0x0 }; -static const u32 bnx2_COM_b09FwSbss[(0x60/4) + 1] = { 0x0 }; static struct fw_info bnx2_com_fw_09 = { .ver_major = 0x3, @@ -1069,12 +1068,10 @@ static struct fw_info bnx2_com_fw_09 = { .sbss_addr = 0x08007e60, .sbss_len = 0x60, .sbss_index = 0x0, - .sbss = bnx2_COM_b09FwSbss, .bss_addr = 0x08007ec0, .bss_len = 0x88, .bss_index = 0x0, - .bss = bnx2_COM_b09FwBss, .rodata_addr = 0x08007dc0, .rodata_len = 0x88, @@ -1083,7 +1080,8 @@ static struct fw_info bnx2_com_fw_09 = { }; static u8 bnx2_CP_b09FwText[] = { - 0x1f, 0x8b, 0x08, 0x00, 0x0f, 0x34, 0xe7, 0x45, 0x00, 0x03, 0xbd, 0x7d, +/* 0x1f, 0x8b, 0x08, 0x00, 0x0f, 0x34, 0xe7, 0x45, 0x00, 0x03, */ + 0xbd, 0x7d, 0x0d, 0x74, 0x5c, 0x57, 0x7d, 0xe7, 0xff, 0xdd, 0x19, 0x49, 0x63, 0x59, 0x96, 0x9f, 0xe5, 0x89, 0x32, 0x51, 0x84, 0x3d, 0x23, 0x3d, 0xd9, 0x22, 0x12, 0xe1, 0xc5, 0x11, 0xac, 0xda, 0x2a, 0xe9, 0x30, 0x92, 0x3f, 0x12, @@ -2241,8 +2239,6 @@ static const u32 bnx2_CP_b09FwRodata[(0x118/4) + 1] = { 0x080032e8, 0x08003300, 0x08003320, 0x08003358, 0x08003338, 0x08003338, 0x080050d4, 0x080050d4, 0x080050d4, 0x080050d4, 0x080050d4, 0x080050fc, 0x080050fc, 0x08005124, 0x08005174, 0x08005144, 0x00000000 }; -static const u32 bnx2_CP_b09FwBss[(0x3b0/4) + 1] = { 0x0 }; -static const u32 bnx2_CP_b09FwSbss[(0xa1/4) + 1] = { 0x0 }; static struct fw_info bnx2_cp_fw_09 = { .ver_major = 0x3, @@ -2265,12 +2261,10 @@ static struct fw_info bnx2_cp_fw_09 = { .sbss_addr = 0x08007024, .sbss_len = 0xa1, .sbss_index = 0x0, - .sbss = bnx2_CP_b09FwSbss, .bss_addr = 0x080070d0, .bss_len = 0x3b0, .bss_index = 0x0, - .bss = bnx2_CP_b09FwBss, .rodata_addr = 0x08006ee8, .rodata_len = 0x118, @@ -2279,7 +2273,8 @@ static struct fw_info bnx2_cp_fw_09 = { }; static u8 bnx2_RXP_b09FwText[] = { - 0x1f, 0x8b, 0x08, 0x00, 0x0e, 0x34, 0xe7, 0x45, 0x00, 0x03, 0xec, 0x5c, +/* 0x1f, 0x8b, 0x08, 0x00, 0x0e, 0x34, 0xe7, 0x45, 0x00, 0x03, */ + 0xec, 0x5c, 0x5d, 0x6c, 0x1c, 0xd7, 0x75, 0x3e, 0xf3, 0x43, 0x6a, 0x49, 0xf1, 0x67, 0xb8, 0x5c, 0xb1, 0x2b, 0x99, 0x96, 0x77, 0xc9, 0x91, 0xc8, 0x58, 0x8a, 0x31, 0xa2, 0x09, 0x5b, 0x48, 0x17, 0xf6, 0x76, 0x76, 0x25, 0xb1, 0xb1, @@ -2950,8 +2945,6 @@ static const u32 bnx2_RXP_b09FwRodata[(0x278/4) + 1] = { 0x08006058, 0x08006070, 0x08006070, 0x08006070, 0x08006058, 0x08006070, 0x08006070, 0x08006070, 0x08006058, 0x08006070, 0x08006070, 0x08006070, 0x08006064, 0x00000000, 0x00000000 }; -static const u32 bnx2_RXP_b09FwBss[(0x13dc/4) + 1] = { 0x0 }; -static const u32 bnx2_RXP_b09FwSbss[(0x20/4) + 1] = { 0x0 }; static struct fw_info bnx2_rxp_fw_09 = { .ver_major = 0x3, @@ -2974,12 +2967,10 @@ static struct fw_info bnx2_rxp_fw_09 = { .sbss_addr = 0x08006a00, .sbss_len = 0x20, .sbss_index = 0x0, - .sbss = bnx2_RXP_b09FwSbss, .bss_addr = 0x08006a20, .bss_len = 0x13dc, .bss_index = 0x0, - .bss = bnx2_RXP_b09FwBss, .rodata_addr = 0x08006768, .rodata_len = 0x278, @@ -2988,7 +2979,8 @@ static struct fw_info bnx2_rxp_fw_09 = { }; static u8 bnx2_TPAT_b09FwText[] = { - 0x1f, 0x8b, 0x08, 0x00, 0x0e, 0x34, 0xe7, 0x45, 0x00, 0x03, 0xcd, 0x58, +/* 0x1f, 0x8b, 0x08, 0x00, 0x0e, 0x34, 0xe7, 0x45, 0x00, 0x03, */ + 0xcd, 0x58, 0x5d, 0x68, 0x1c, 0xd7, 0x15, 0x3e, 0xf3, 0xb7, 0x3b, 0x52, 0x24, 0xeb, 0x5a, 0xd9, 0xa6, 0xeb, 0xa0, 0x34, 0x33, 0xda, 0x91, 0xac, 0x22, 0x13, 0x4f, 0x9d, 0x25, 0x16, 0x65, 0x21, 0x93, 0xd9, 0x91, 0xac, 0x98, 0x3c, @@ -3241,8 +3233,6 @@ static u8 bnx2_TPAT_b09FwText[] = { static const u32 bnx2_TPAT_b09FwData[(0x0/4) + 1] = { 0x0 }; static const u32 bnx2_TPAT_b09FwRodata[(0x0/4) + 1] = { 0x0 }; -static const u32 bnx2_TPAT_b09FwBss[(0x850/4) + 1] = { 0x0 }; -static const u32 bnx2_TPAT_b09FwSbss[(0x2c/4) + 1] = { 0x0 }; static struct fw_info bnx2_tpat_fw_09 = { .ver_major = 0x3, @@ -3265,12 +3255,10 @@ static struct fw_info bnx2_tpat_fw_09 = { .sbss_addr = 0x08002088, .sbss_len = 0x2c, .sbss_index = 0x0, - .sbss = bnx2_TPAT_b09FwSbss, .bss_addr = 0x080020c0, .bss_len = 0x850, .bss_index = 0x0, - .bss = bnx2_TPAT_b09FwBss, .rodata_addr = 0x00000000, .rodata_len = 0x0, @@ -3279,7 +3267,8 @@ static struct fw_info bnx2_tpat_fw_09 = { }; static u8 bnx2_TXP_b09FwText[] = { - 0x1f, 0x8b, 0x08, 0x00, 0x0e, 0x34, 0xe7, 0x45, 0x00, 0x03, 0xcd, 0x7c, +/* 0x1f, 0x8b, 0x08, 0x00, 0x0e, 0x34, 0xe7, 0x45, 0x00, 0x03, */ + 0xcd, 0x7c, 0x6f, 0x70, 0x5b, 0xd7, 0x95, 0xdf, 0x79, 0xef, 0x81, 0x24, 0x48, 0xd1, 0xd4, 0x13, 0x17, 0x56, 0x60, 0x87, 0x71, 0x00, 0xf1, 0x81, 0x66, 0x42, 0xae, 0x04, 0x2b, 0x4c, 0xc2, 0x6d, 0xd1, 0xf8, 0x05, 0x00, 0x29, 0x48, @@ -4055,8 +4044,6 @@ static const u32 bnx2_TXP_b09FwRodata[(0x30/4) + 1] = { 0x08004060, 0x0800408c, 0x080040d4, 0x080040d4, 0x08003f60, 0x08003f8c, 0x08003f8c, 0x080040d4, 0x080040d4, 0x080040d4, 0x08003ff4, 0x00000000, 0x00000000 }; -static const u32 bnx2_TXP_b09FwBss[(0xa20/4) + 1] = { 0x0 }; -static const u32 bnx2_TXP_b09FwSbss[(0x8c/4) + 1] = { 0x0 }; static struct fw_info bnx2_txp_fw_09 = { .ver_major = 0x3, @@ -4079,12 +4066,10 @@ static struct fw_info bnx2_txp_fw_09 = { .sbss_addr = 0x08004750, .sbss_len = 0x8c, .sbss_index = 0x0, - .sbss = bnx2_TXP_b09FwSbss, .bss_addr = 0x080047e0, .bss_len = 0xa20, .bss_index = 0x0, - .bss = bnx2_TXP_b09FwBss, .rodata_addr = 0x08004638, .rodata_len = 0x30, diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index f829e4ad8b49..7a045a37056e 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -29,6 +29,7 @@ #include <linux/ethtool.h> #include <linux/if_bonding.h> #include <linux/pkt_sched.h> +#include <net/net_namespace.h> #include "bonding.h" #include "bond_3ad.h" @@ -100,7 +101,6 @@ static u16 __get_link_speed(struct port *port); static u8 __get_duplex(struct port *port); static inline void __initialize_port_locks(struct port *port); //conversions -static void __htons_lacpdu(struct lacpdu *lacpdu); static u16 __ad_timer_to_ticks(u16 timer_type, u16 Par); @@ -419,26 +419,6 @@ static inline void __initialize_port_locks(struct port *port) } //conversions -/** - * __htons_lacpdu - convert the contents of a LACPDU to network byte order - * @lacpdu: the speicifed lacpdu - * - * For each multi-byte field in the lacpdu, convert its content - */ -static void __htons_lacpdu(struct lacpdu *lacpdu) -{ - if (lacpdu) { - lacpdu->actor_system_priority = htons(lacpdu->actor_system_priority); - lacpdu->actor_key = htons(lacpdu->actor_key); - lacpdu->actor_port_priority = htons(lacpdu->actor_port_priority); - lacpdu->actor_port = htons(lacpdu->actor_port); - lacpdu->partner_system_priority = htons(lacpdu->partner_system_priority); - lacpdu->partner_key = htons(lacpdu->partner_key); - lacpdu->partner_port_priority = htons(lacpdu->partner_port_priority); - lacpdu->partner_port = htons(lacpdu->partner_port); - lacpdu->collector_max_delay = htons(lacpdu->collector_max_delay); - } -} /** * __ad_timer_to_ticks - convert a given timer type to AD module ticks @@ -826,11 +806,11 @@ static inline void __update_lacpdu_from_port(struct port *port) * lacpdu->actor_information_length initialized */ - lacpdu->actor_system_priority = port->actor_system_priority; + lacpdu->actor_system_priority = htons(port->actor_system_priority); lacpdu->actor_system = port->actor_system; - lacpdu->actor_key = port->actor_oper_port_key; - lacpdu->actor_port_priority = port->actor_port_priority; - lacpdu->actor_port = port->actor_port_number; + lacpdu->actor_key = htons(port->actor_oper_port_key); + lacpdu->actor_port_priority = htons(port->actor_port_priority); + lacpdu->actor_port = htons(port->actor_port_number); lacpdu->actor_state = port->actor_oper_port_state; /* lacpdu->reserved_3_1 initialized @@ -838,11 +818,11 @@ static inline void __update_lacpdu_from_port(struct port *port) * lacpdu->partner_information_length initialized */ - lacpdu->partner_system_priority = port->partner_oper_system_priority; + lacpdu->partner_system_priority = htons(port->partner_oper_system_priority); lacpdu->partner_system = port->partner_oper_system; - lacpdu->partner_key = port->partner_oper_key; - lacpdu->partner_port_priority = port->partner_oper_port_priority; - lacpdu->partner_port = port->partner_oper_port_number; + lacpdu->partner_key = htons(port->partner_oper_key); + lacpdu->partner_port_priority = htons(port->partner_oper_port_priority); + lacpdu->partner_port = htons(port->partner_oper_port_number); lacpdu->partner_state = port->partner_oper_port_state; /* lacpdu->reserved_3_2 initialized @@ -854,9 +834,6 @@ static inline void __update_lacpdu_from_port(struct port *port) * terminator_length initialized * reserved_50[50] initialized */ - - /* Convert all non u8 parameters to Big Endian for transmit */ - __htons_lacpdu(lacpdu); } ////////////////////////////////////////////////////////////////////////////////////// @@ -1833,7 +1810,7 @@ static void ad_initialize_lacpdu(struct lacpdu *lacpdu) } lacpdu->tlv_type_collector_info = 0x03; lacpdu->collector_information_length= 0x10; - lacpdu->collector_max_delay = AD_COLLECTOR_MAX_DELAY; + lacpdu->collector_max_delay = htons(AD_COLLECTOR_MAX_DELAY); for (index=0; index<=11; index++) { lacpdu->reserved_12[index]=0; } @@ -2448,6 +2425,9 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac struct slave *slave = NULL; int ret = NET_RX_DROP; + if (dev->nd_net != &init_net) + goto out; + if (!(dev->flags & IFF_MASTER)) goto out; diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h index 6ad5ad6e65d5..862952fa6fd9 100644 --- a/drivers/net/bonding/bond_3ad.h +++ b/drivers/net/bonding/bond_3ad.h @@ -108,7 +108,7 @@ typedef enum { typedef struct ad_header { struct mac_addr destination_address; struct mac_addr source_address; - u16 length_type; + __be16 length_type; } ad_header_t; // Link Aggregation Control Protocol(LACP) data unit structure(43.4.2.2 in the 802.3ad standard) @@ -117,25 +117,25 @@ typedef struct lacpdu { u8 version_number; u8 tlv_type_actor_info; // = actor information(type/length/value) u8 actor_information_length; // = 20 - u16 actor_system_priority; + __be16 actor_system_priority; struct mac_addr actor_system; - u16 actor_key; - u16 actor_port_priority; - u16 actor_port; + __be16 actor_key; + __be16 actor_port_priority; + __be16 actor_port; u8 actor_state; u8 reserved_3_1[3]; // = 0 u8 tlv_type_partner_info; // = partner information u8 partner_information_length; // = 20 - u16 partner_system_priority; + __be16 partner_system_priority; struct mac_addr partner_system; - u16 partner_key; - u16 partner_port_priority; - u16 partner_port; + __be16 partner_key; + __be16 partner_port_priority; + __be16 partner_port; u8 partner_state; u8 reserved_3_2[3]; // = 0 u8 tlv_type_collector_info; // = collector information u8 collector_information_length; // = 16 - u16 collector_max_delay; + __be16 collector_max_delay; u8 reserved_12[12]; u8 tlv_type_terminator; // = terminator u8 terminator_length; // = 0 diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 92c3b6f6a8e7..aea2217c56eb 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -87,20 +87,20 @@ static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC; struct learning_pkt { u8 mac_dst[ETH_ALEN]; u8 mac_src[ETH_ALEN]; - u16 type; + __be16 type; u8 padding[ETH_ZLEN - ETH_HLEN]; }; struct arp_pkt { - u16 hw_addr_space; - u16 prot_addr_space; + __be16 hw_addr_space; + __be16 prot_addr_space; u8 hw_addr_len; u8 prot_addr_len; - u16 op_code; + __be16 op_code; u8 mac_src[ETH_ALEN]; /* sender hardware address */ - u32 ip_src; /* sender IP address */ + __be32 ip_src; /* sender IP address */ u8 mac_dst[ETH_ALEN]; /* target hardware address */ - u32 ip_dst; /* target IP address */ + __be32 ip_dst; /* target IP address */ }; #pragma pack() @@ -345,6 +345,9 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct struct arp_pkt *arp = (struct arp_pkt *)skb->data; int res = NET_RX_DROP; + if (bond_dev->nd_net != &init_net) + goto out; + if (!(bond_dev->flags & IFF_MASTER)) goto out; @@ -579,7 +582,7 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla } /* mark all clients using src_ip to be updated */ -static void rlb_req_update_subnet_clients(struct bonding *bond, u32 src_ip) +static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct rlb_client_info *client_info; @@ -1264,7 +1267,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) struct ethhdr *eth_data; struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct slave *tx_slave = NULL; - static const u32 ip_bcast = 0xffffffff; + static const __be32 ip_bcast = htonl(0xffffffff); int hash_size = 0; int do_tx_balance = 1; u32 hash_index = 0; @@ -1308,8 +1311,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) hash_size = sizeof(ipv6_hdr(skb)->daddr); break; case ETH_P_IPX: - if (ipx_hdr(skb)->ipx_checksum != - __constant_htons(IPX_NO_CHECKSUM)) { + if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) { /* something is wrong with this packet */ do_tx_balance = 0; break; diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h index 28f2a2fd1b5a..fd8726429890 100644 --- a/drivers/net/bonding/bond_alb.h +++ b/drivers/net/bonding/bond_alb.h @@ -60,8 +60,8 @@ struct tlb_client_info { * ------------------------------------------------------------------------- */ struct rlb_client_info { - u32 ip_src; /* the server IP address */ - u32 ip_dst; /* the client IP address */ + __be32 ip_src; /* the server IP address */ + __be32 ip_dst; /* the client IP address */ u8 mac_dst[ETH_ALEN]; /* the client MAC address */ u32 next; /* The next Hash table entry index */ u32 prev; /* The previous Hash table entry index */ diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index cb9cb3013f42..64bfec32e2a6 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -75,6 +75,7 @@ #include <linux/if_vlan.h> #include <linux/if_bonding.h> #include <net/route.h> +#include <net/net_namespace.h> #include "bonding.h" #include "bond_3ad.h" #include "bond_alb.h" @@ -143,7 +144,7 @@ static struct proc_dir_entry *bond_proc_dir = NULL; #endif extern struct rw_semaphore bonding_rwsem; -static u32 arp_target[BOND_MAX_ARP_TARGETS] = { 0, } ; +static __be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0, } ; static int arp_ip_count = 0; static int bond_mode = BOND_MODE_ROUNDROBIN; static int xmit_hashtype= BOND_XMIT_POLICY_LAYER2; @@ -613,38 +614,20 @@ down: static int bond_update_speed_duplex(struct slave *slave) { struct net_device *slave_dev = slave->dev; - static int (* ioctl)(struct net_device *, struct ifreq *, int); - struct ifreq ifr; struct ethtool_cmd etool; + int res; /* Fake speed and duplex */ slave->speed = SPEED_100; slave->duplex = DUPLEX_FULL; - if (slave_dev->ethtool_ops) { - int res; - - if (!slave_dev->ethtool_ops->get_settings) { - return -1; - } - - res = slave_dev->ethtool_ops->get_settings(slave_dev, &etool); - if (res < 0) { - return -1; - } - - goto verify; - } + if (!slave_dev->ethtool_ops || !slave_dev->ethtool_ops->get_settings) + return -1; - ioctl = slave_dev->do_ioctl; - strncpy(ifr.ifr_name, slave_dev->name, IFNAMSIZ); - etool.cmd = ETHTOOL_GSET; - ifr.ifr_data = (char*)&etool; - if (!ioctl || (IOCTL(slave_dev, &ifr, SIOCETHTOOL) < 0)) { + res = slave_dev->ethtool_ops->get_settings(slave_dev, &etool); + if (res < 0) return -1; - } -verify: switch (etool.speed) { case SPEED_10: case SPEED_100: @@ -690,7 +673,6 @@ static int bond_check_dev_link(struct bonding *bond, struct net_device *slave_de static int (* ioctl)(struct net_device *, struct ifreq *, int); struct ifreq ifr; struct mii_ioctl_data *mii; - struct ethtool_value etool; if (bond->params.use_carrier) { return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0; @@ -721,9 +703,10 @@ static int bond_check_dev_link(struct bonding *bond, struct net_device *slave_de } } - /* try SIOCETHTOOL ioctl, some drivers cache ETHTOOL_GLINK */ - /* for a period of time so we attempt to get link status */ - /* from it last if the above MII ioctls fail... */ + /* + * Some drivers cache ETHTOOL_GLINK for a period of time so we only + * attempt to get link status from it if the above MII ioctls fail. + */ if (slave_dev->ethtool_ops) { if (slave_dev->ethtool_ops->get_link) { u32 link; @@ -734,23 +717,9 @@ static int bond_check_dev_link(struct bonding *bond, struct net_device *slave_de } } - if (ioctl) { - strncpy(ifr.ifr_name, slave_dev->name, IFNAMSIZ); - etool.cmd = ETHTOOL_GLINK; - ifr.ifr_data = (char*)&etool; - if (IOCTL(slave_dev, &ifr, SIOCETHTOOL) == 0) { - if (etool.data == 1) { - return BMSR_LSTATUS; - } else { - dprintk("SIOCETHTOOL shows link down\n"); - return 0; - } - } - } - /* * If reporting, report that either there's no dev->do_ioctl, - * or both SIOCGMIIREG and SIOCETHTOOL failed (meaning that we + * or both SIOCGMIIREG and get_link failed (meaning that we * cannot report link status). If not reporting, pretend * we're ok. */ @@ -1234,43 +1203,35 @@ static int bond_sethwaddr(struct net_device *bond_dev, return 0; } -#define BOND_INTERSECT_FEATURES \ - (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_TSO | NETIF_F_UFO) +#define BOND_VLAN_FEATURES \ + (NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX | \ + NETIF_F_HW_VLAN_FILTER) /* * Compute the common dev->feature set available to all slaves. Some - * feature bits are managed elsewhere, so preserve feature bits set on - * master device that are not part of the examined set. + * feature bits are managed elsewhere, so preserve those feature bits + * on the master device. */ static int bond_compute_features(struct bonding *bond) { - unsigned long features = BOND_INTERSECT_FEATURES; struct slave *slave; struct net_device *bond_dev = bond->dev; + unsigned long features = bond_dev->features; unsigned short max_hard_header_len = ETH_HLEN; int i; + features &= ~(NETIF_F_ALL_CSUM | BOND_VLAN_FEATURES); + features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | + NETIF_F_GSO_MASK | NETIF_F_NO_CSUM; + bond_for_each_slave(bond, slave, i) { - features &= (slave->dev->features & BOND_INTERSECT_FEATURES); + features = netdev_compute_features(features, + slave->dev->features); if (slave->dev->hard_header_len > max_hard_header_len) max_hard_header_len = slave->dev->hard_header_len; } - if ((features & NETIF_F_SG) && - !(features & NETIF_F_ALL_CSUM)) - features &= ~NETIF_F_SG; - - /* - * features will include NETIF_F_TSO (NETIF_F_UFO) iff all - * slave devices support NETIF_F_TSO (NETIF_F_UFO), which - * implies that all slaves also support scatter-gather - * (NETIF_F_SG), which implies that features also includes - * NETIF_F_SG. So no need to check whether we have an - * illegal combination of NETIF_F_{TSO,UFO} and - * !NETIF_F_SG - */ - - features |= (bond_dev->features & ~BOND_INTERSECT_FEATURES); + features |= (bond_dev->features & BOND_VLAN_FEATURES); bond_dev->features = features; bond_dev->hard_header_len = max_hard_header_len; @@ -1643,6 +1604,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) struct slave *slave, *oldcurrent; struct sockaddr addr; int mac_addr_differ; + DECLARE_MAC_BUF(mac); /* slave is not a slave or master is not master of this slave */ if (!(slave_dev->flags & IFF_SLAVE) || @@ -1670,19 +1632,13 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) ETH_ALEN); if (!mac_addr_differ && (bond->slave_cnt > 1)) { printk(KERN_WARNING DRV_NAME - ": %s: Warning: the permanent HWaddr of %s " - "- %02X:%02X:%02X:%02X:%02X:%02X - is " - "still in use by %s. Set the HWaddr of " - "%s to a different address to avoid " - "conflicts.\n", + ": %s: Warning: the permanent HWaddr of %s - " + "%s - is still in use by %s. " + "Set the HWaddr of %s to a different address " + "to avoid conflicts.\n", bond_dev->name, slave_dev->name, - slave->perm_hwaddr[0], - slave->perm_hwaddr[1], - slave->perm_hwaddr[2], - slave->perm_hwaddr[3], - slave->perm_hwaddr[4], - slave->perm_hwaddr[5], + print_mac(mac, slave->perm_hwaddr), bond_dev->name, slave_dev->name); } @@ -2270,7 +2226,7 @@ out: } -static u32 bond_glean_dev_ip(struct net_device *dev) +static __be32 bond_glean_dev_ip(struct net_device *dev) { struct in_device *idev; struct in_ifaddr *ifa; @@ -2313,7 +2269,7 @@ static int bond_has_ip(struct bonding *bond) return 0; } -static int bond_has_this_ip(struct bonding *bond, u32 ip) +static int bond_has_this_ip(struct bonding *bond, __be32 ip) { struct vlan_entry *vlan, *vlan_next; @@ -2337,7 +2293,7 @@ static int bond_has_this_ip(struct bonding *bond, u32 ip) * switches in VLAN mode (especially if ports are configured as * "native" to a VLAN) might not pass non-tagged frames. */ -static void bond_arp_send(struct net_device *slave_dev, int arp_op, u32 dest_ip, u32 src_ip, unsigned short vlan_id) +static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_ip, __be32 src_ip, unsigned short vlan_id) { struct sk_buff *skb; @@ -2365,7 +2321,7 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, u32 dest_ip, static void bond_arp_send_all(struct bonding *bond, struct slave *slave) { int i, vlan_id, rv; - u32 *targets = bond->params.arp_targets; + __be32 *targets = bond->params.arp_targets; struct vlan_entry *vlan, *vlan_next; struct net_device *vlan_dev; struct flowi fl; @@ -2470,10 +2426,10 @@ static void bond_send_gratuitous_arp(struct bonding *bond) } } -static void bond_validate_arp(struct bonding *bond, struct slave *slave, u32 sip, u32 tip) +static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip) { int i; - u32 *targets = bond->params.arp_targets; + __be32 *targets = bond->params.arp_targets; targets = bond->params.arp_targets; for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) { @@ -2495,7 +2451,10 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack struct slave *slave; struct bonding *bond; unsigned char *arp_ptr; - u32 sip, tip; + __be32 sip, tip; + + if (dev->nd_net != &init_net) + goto out; if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER)) goto out; @@ -3042,6 +3001,7 @@ static void bond_info_show_master(struct seq_file *seq) if (bond->params.mode == BOND_MODE_8023AD) { struct ad_info ad_info; + DECLARE_MAC_BUF(mac); seq_puts(seq, "\n802.3ad info\n"); seq_printf(seq, "LACP rate: %s\n", @@ -3061,13 +3021,8 @@ static void bond_info_show_master(struct seq_file *seq) ad_info.actor_key); seq_printf(seq, "\tPartner Key: %d\n", ad_info.partner_key); - seq_printf(seq, "\tPartner Mac Address: %02x:%02x:%02x:%02x:%02x:%02x\n", - ad_info.partner_system[0], - ad_info.partner_system[1], - ad_info.partner_system[2], - ad_info.partner_system[3], - ad_info.partner_system[4], - ad_info.partner_system[5]); + seq_printf(seq, "\tPartner Mac Address: %s\n", + print_mac(mac, ad_info.partner_system)); } } } @@ -3075,6 +3030,7 @@ static void bond_info_show_master(struct seq_file *seq) static void bond_info_show_slave(struct seq_file *seq, const struct slave *slave) { struct bonding *bond = seq->private; + DECLARE_MAC_BUF(mac); seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name); seq_printf(seq, "MII Status: %s\n", @@ -3083,10 +3039,8 @@ static void bond_info_show_slave(struct seq_file *seq, const struct slave *slave slave->link_failure_count); seq_printf(seq, - "Permanent HW addr: %02x:%02x:%02x:%02x:%02x:%02x\n", - slave->perm_hwaddr[0], slave->perm_hwaddr[1], - slave->perm_hwaddr[2], slave->perm_hwaddr[3], - slave->perm_hwaddr[4], slave->perm_hwaddr[5]); + "Permanent HW addr: %s\n", + print_mac(mac, slave->perm_hwaddr)); if (bond->params.mode == BOND_MODE_8023AD) { const struct aggregator *agg @@ -3184,7 +3138,7 @@ static void bond_create_proc_dir(void) { int len = strlen(DRV_NAME); - for (bond_proc_dir = proc_net->subdir; bond_proc_dir; + for (bond_proc_dir = init_net.proc_net->subdir; bond_proc_dir; bond_proc_dir = bond_proc_dir->next) { if ((bond_proc_dir->namelen == len) && !memcmp(bond_proc_dir->name, DRV_NAME, len)) { @@ -3193,7 +3147,7 @@ static void bond_create_proc_dir(void) } if (!bond_proc_dir) { - bond_proc_dir = proc_mkdir(DRV_NAME, proc_net); + bond_proc_dir = proc_mkdir(DRV_NAME, init_net.proc_net); if (bond_proc_dir) { bond_proc_dir->owner = THIS_MODULE; } else { @@ -3228,7 +3182,7 @@ static void bond_destroy_proc_dir(void) bond_proc_dir->owner = NULL; } } else { - remove_proc_entry(DRV_NAME, proc_net); + remove_proc_entry(DRV_NAME, init_net.proc_net); bond_proc_dir = NULL; } } @@ -3335,6 +3289,9 @@ static int bond_netdev_event(struct notifier_block *this, unsigned long event, v { struct net_device *event_dev = (struct net_device *)ptr; + if (event_dev->nd_net != &init_net) + return NOTIFY_DONE; + dprintk("event_dev: %s, event: %lx\n", (event_dev ? event_dev->name : "None"), event); @@ -3470,14 +3427,14 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb, { struct ethhdr *data = (struct ethhdr *)skb->data; struct iphdr *iph = ip_hdr(skb); - u16 *layer4hdr = (u16 *)((u32 *)iph + iph->ihl); + __be16 *layer4hdr = (__be16 *)((u32 *)iph + iph->ihl); int layer4_xor = 0; if (skb->protocol == __constant_htons(ETH_P_IP)) { if (!(iph->frag_off & __constant_htons(IP_MF|IP_OFFSET)) && (iph->protocol == IPPROTO_TCP || iph->protocol == IPPROTO_UDP)) { - layer4_xor = htons((*layer4hdr ^ *(layer4hdr + 1))); + layer4_xor = ntohs((*layer4hdr ^ *(layer4hdr + 1))); } return (layer4_xor ^ ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count; @@ -3752,7 +3709,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd } down_write(&(bonding_rwsem)); - slave_dev = dev_get_by_name(ifr->ifr_slave); + slave_dev = dev_get_by_name(&init_net, ifr->ifr_slave); dprintk("slave_dev=%p: \n", slave_dev); @@ -4235,10 +4192,6 @@ static void bond_ethtool_get_drvinfo(struct net_device *bond_dev, } static const struct ethtool_ops bond_ethtool_ops = { - .get_tx_csum = ethtool_op_get_tx_csum, - .get_tso = ethtool_op_get_tso, - .get_ufo = ethtool_op_get_ufo, - .get_sg = ethtool_op_get_sg, .get_drvinfo = bond_ethtool_get_drvinfo, }; @@ -4568,7 +4521,7 @@ static int bond_check_params(struct bond_params *params) arp_ip_target[arp_ip_count]); arp_interval = 0; } else { - u32 ip = in_aton(arp_ip_target[arp_ip_count]); + __be32 ip = in_aton(arp_ip_target[arp_ip_count]); arp_target[arp_ip_count] = ip; } } @@ -4707,8 +4660,6 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond goto out_netdev; } - SET_MODULE_OWNER(bond_dev); - res = register_netdevice(bond_dev); if (res < 0) { goto out_bond; diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 60cccf2aa959..6f49ca7e9b66 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -31,10 +31,10 @@ #include <linux/inetdevice.h> #include <linux/in.h> #include <linux/sysfs.h> -#include <linux/string.h> #include <linux/ctype.h> #include <linux/inet.h> #include <linux/rtnetlink.h> +#include <net/net_namespace.h> /* #define BONDING_DEBUG 1 */ #include "bonding.h" @@ -299,7 +299,7 @@ static ssize_t bonding_store_slaves(struct device *d, read_unlock_bh(&bond->lock); printk(KERN_INFO DRV_NAME ": %s: Adding slave %s.\n", bond->dev->name, ifname); - dev = dev_get_by_name(ifname); + dev = dev_get_by_name(&init_net, ifname); if (!dev) { printk(KERN_INFO DRV_NAME ": %s: Interface %s does not exist!\n", @@ -682,16 +682,16 @@ static ssize_t bonding_store_arp_targets(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { - u32 newtarget; + __be32 newtarget; int i = 0, done = 0, ret = count; struct bonding *bond = to_bond(d); - u32 *targets; + __be32 *targets; targets = bond->params.arp_targets; newtarget = in_aton(buf + 1); /* look for adds */ if (buf[0] == '+') { - if ((newtarget == 0) || (newtarget == INADDR_BROADCAST)) { + if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) { printk(KERN_ERR DRV_NAME ": %s: invalid ARP target %u.%u.%u.%u specified for addition\n", bond->dev->name, NIPQUAD(newtarget)); @@ -727,7 +727,7 @@ static ssize_t bonding_store_arp_targets(struct device *d, } else if (buf[0] == '-') { - if ((newtarget == 0) || (newtarget == INADDR_BROADCAST)) { + if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) { printk(KERN_ERR DRV_NAME ": %s: invalid ARP target %d.%d.%d.%d specified for removal\n", bond->dev->name, NIPQUAD(newtarget)); @@ -1361,17 +1361,14 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d, { int count = 0; struct bonding *bond = to_bond(d); + DECLARE_MAC_BUF(mac); if (bond->params.mode == BOND_MODE_8023AD) { struct ad_info ad_info; if (!bond_3ad_get_active_agg_info(bond, &ad_info)) { - count = sprintf(buf,"%02x:%02x:%02x:%02x:%02x:%02x\n", - ad_info.partner_system[0], - ad_info.partner_system[1], - ad_info.partner_system[2], - ad_info.partner_system[3], - ad_info.partner_system[4], - ad_info.partner_system[5]) + 1; + count = sprintf(buf,"%s\n", + print_mac(mac, ad_info.partner_system)) + + 1; } } else diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 6dcbd25e3ef0..2a6af7d23728 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -132,7 +132,7 @@ struct bond_params { int downdelay; int lacp_fast; char primary[IFNAMSIZ]; - u32 arp_targets[BOND_MAX_ARP_TARGETS]; + __be32 arp_targets[BOND_MAX_ARP_TARGETS]; }; struct bond_parm_tbl { @@ -142,7 +142,7 @@ struct bond_parm_tbl { struct vlan_entry { struct list_head vlan_list; - u32 vlan_ip; + __be32 vlan_ip; unsigned short vlan_id; }; @@ -193,7 +193,7 @@ struct bonding { struct list_head bond_list; struct dev_mc_list *mc_list; int (*xmit_hash_policy)(struct sk_buff *, struct net_device *, int); - u32 master_ip; + __be32 master_ip; u16 flags; struct ad_bond_info ad_info; struct alb_bond_info alb_info; diff --git a/drivers/net/bsd_comp.c b/drivers/net/bsd_comp.c index 7845eaf6f29f..88edb986691a 100644 --- a/drivers/net/bsd_comp.c +++ b/drivers/net/bsd_comp.c @@ -395,20 +395,18 @@ static void *bsd_alloc (unsigned char *options, int opt_len, int decomp) * Allocate the main control structure for this instance. */ maxmaxcode = MAXCODE(bits); - db = kmalloc(sizeof (struct bsd_db), + db = kzalloc(sizeof (struct bsd_db), GFP_KERNEL); if (!db) { return NULL; } - memset (db, 0, sizeof(struct bsd_db)); /* * Allocate space for the dictionary. This may be more than one page in * length. */ - db->dict = (struct bsd_dict *) vmalloc (hsize * - sizeof (struct bsd_dict)); + db->dict = vmalloc(hsize * sizeof(struct bsd_dict)); if (!db->dict) { bsd_free (db); @@ -427,8 +425,7 @@ static void *bsd_alloc (unsigned char *options, int opt_len, int decomp) */ else { - db->lens = (unsigned short *) vmalloc ((maxmaxcode + 1) * - sizeof (db->lens[0])); + db->lens = vmalloc((maxmaxcode + 1) * sizeof(db->lens[0])); if (!db->lens) { bsd_free (db); diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index f6e4030c73d1..563bf5f6fa2a 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c @@ -2485,7 +2485,7 @@ static irqreturn_t cas_interruptN(int irq, void *dev_id) if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ #ifdef USE_NAPI cas_mask_intr(cp); - netif_rx_schedule(dev); + netif_rx_schedule(dev, &cp->napi); #else cas_rx_ringN(cp, ring, 0); #endif @@ -2536,7 +2536,7 @@ static irqreturn_t cas_interrupt1(int irq, void *dev_id) if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ #ifdef USE_NAPI cas_mask_intr(cp); - netif_rx_schedule(dev); + netif_rx_schedule(dev, &cp->napi); #else cas_rx_ringN(cp, 1, 0); #endif @@ -2592,7 +2592,7 @@ static irqreturn_t cas_interrupt(int irq, void *dev_id) if (status & INTR_RX_DONE) { #ifdef USE_NAPI cas_mask_intr(cp); - netif_rx_schedule(dev); + netif_rx_schedule(dev, &cp->napi); #else cas_rx_ringN(cp, 0, 0); #endif @@ -2607,9 +2607,10 @@ static irqreturn_t cas_interrupt(int irq, void *dev_id) #ifdef USE_NAPI -static int cas_poll(struct net_device *dev, int *budget) +static int cas_poll(struct napi_struct *napi, int budget) { - struct cas *cp = netdev_priv(dev); + struct cas *cp = container_of(napi, struct cas, napi); + struct net_device *dev = cp->dev; int i, enable_intr, todo, credits; u32 status = readl(cp->regs + REG_INTR_STATUS); unsigned long flags; @@ -2620,20 +2621,18 @@ static int cas_poll(struct net_device *dev, int *budget) /* NAPI rx packets. we spread the credits across all of the * rxc rings - */ - todo = min(*budget, dev->quota); - - /* to make sure we're fair with the work we loop through each + * + * to make sure we're fair with the work we loop through each * ring N_RX_COMP_RING times with a request of - * todo / N_RX_COMP_RINGS + * budget / N_RX_COMP_RINGS */ enable_intr = 1; credits = 0; for (i = 0; i < N_RX_COMP_RINGS; i++) { int j; for (j = 0; j < N_RX_COMP_RINGS; j++) { - credits += cas_rx_ringN(cp, j, todo / N_RX_COMP_RINGS); - if (credits >= todo) { + credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS); + if (credits >= budget) { enable_intr = 0; goto rx_comp; } @@ -2641,9 +2640,6 @@ static int cas_poll(struct net_device *dev, int *budget) } rx_comp: - *budget -= credits; - dev->quota -= credits; - /* final rx completion */ spin_lock_irqsave(&cp->lock, flags); if (status) @@ -2674,11 +2670,10 @@ rx_comp: #endif spin_unlock_irqrestore(&cp->lock, flags); if (enable_intr) { - netif_rx_complete(dev); + netif_rx_complete(dev, napi); cas_unmask_intr(cp); - return 0; } - return 1; + return credits; } #endif @@ -4351,6 +4346,9 @@ static int cas_open(struct net_device *dev) goto err_spare; } +#ifdef USE_NAPI + napi_enable(&cp->napi); +#endif /* init hw */ cas_lock_all_save(cp, flags); cas_clean_rings(cp); @@ -4376,6 +4374,9 @@ static int cas_close(struct net_device *dev) unsigned long flags; struct cas *cp = netdev_priv(dev); +#ifdef USE_NAPI + napi_enable(&cp->napi); +#endif /* Make sure we don't get distracted by suspend/resume */ mutex_lock(&cp->pm_mutex); @@ -4771,9 +4772,14 @@ static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs, cas_read_regs(cp, p, regs->len / sizeof(u32)); } -static int cas_get_stats_count(struct net_device *dev) +static int cas_get_sset_count(struct net_device *dev, int sset) { - return CAS_NUM_STAT_KEYS; + switch (sset) { + case ETH_SS_STATS: + return CAS_NUM_STAT_KEYS; + default: + return -EOPNOTSUPP; + } } static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -4817,7 +4823,7 @@ static const struct ethtool_ops cas_ethtool_ops = { .set_msglevel = cas_set_msglevel, .get_regs_len = cas_get_regs_len, .get_regs = cas_get_regs, - .get_stats_count = cas_get_stats_count, + .get_sset_count = cas_get_sset_count, .get_strings = cas_get_strings, .get_ethtool_stats = cas_get_ethtool_stats, }; @@ -4876,6 +4882,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev, int i, err, pci_using_dac; u16 pci_cmd; u8 orig_cacheline_size = 0, cas_cacheline_size = 0; + DECLARE_MAC_BUF(mac); if (cas_version_printed++ == 0) printk(KERN_INFO "%s", version); @@ -4899,7 +4906,6 @@ static int __devinit cas_init_one(struct pci_dev *pdev, err = -ENOMEM; goto err_out_disable_pdev; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); err = pci_request_regions(pdev, dev->name); @@ -5062,8 +5068,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev, dev->watchdog_timeo = CAS_TX_TIMEOUT; dev->change_mtu = cas_change_mtu; #ifdef USE_NAPI - dev->poll = cas_poll; - dev->weight = 64; + netif_napi_add(dev, &cp->napi, cas_poll, 64); #endif #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = cas_netpoll; @@ -5085,16 +5090,12 @@ static int __devinit cas_init_one(struct pci_dev *pdev, i = readl(cp->regs + REG_BIM_CFG); printk(KERN_INFO "%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) " - "Ethernet[%d] ", dev->name, + "Ethernet[%d] %s\n", dev->name, (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "", (i & BIM_CFG_32BIT) ? "32" : "64", (i & BIM_CFG_66MHZ) ? "66" : "33", - (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq); - - for (i = 0; i < 6; i++) - printk("%2.2x%c", dev->dev_addr[i], - i == 5 ? ' ' : ':'); - printk("\n"); + (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq, + print_mac(mac, dev->dev_addr)); pci_set_drvdata(pdev, dev); cp->hw_running = 1; diff --git a/drivers/net/cassini.h b/drivers/net/cassini.h index a970804487c7..2f93f83342d2 100644 --- a/drivers/net/cassini.h +++ b/drivers/net/cassini.h @@ -4280,6 +4280,8 @@ struct cas { int rx_cur[N_RX_COMP_RINGS], rx_new[N_RX_COMP_RINGS]; int rx_last[N_RX_DESC_RINGS]; + struct napi_struct napi; + /* Set when chip is actually in operational state * (ie. not power managed) */ int hw_running; diff --git a/drivers/net/chelsio/Makefile b/drivers/net/chelsio/Makefile index 743ad8b41b5e..57a4b262fd3f 100644 --- a/drivers/net/chelsio/Makefile +++ b/drivers/net/chelsio/Makefile @@ -4,6 +4,6 @@ obj-$(CONFIG_CHELSIO_T1) += cxgb.o -cxgb-$(CONFIG_CHELSIO_T1_1G) += mac.o mv88e1xxx.o vsc7326.o +cxgb-$(CONFIG_CHELSIO_T1_1G) += mv88e1xxx.o vsc7326.o cxgb-objs := cxgb2.o espi.o tp.o pm3393.o sge.o subr.o \ mv88x201x.o my3126.o $(cxgb-y) diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h index 8ba702c8b560..846ca5383d3c 100644 --- a/drivers/net/chelsio/common.h +++ b/drivers/net/chelsio/common.h @@ -278,6 +278,7 @@ struct adapter { struct peespi *espi; struct petp *tp; + struct napi_struct napi; struct port_info port[MAX_NPORTS]; struct delayed_work stats_update_task; struct timer_list stats_update_timer; @@ -371,6 +372,7 @@ extern void t1_interrupts_enable(adapter_t *adapter); extern void t1_interrupts_disable(adapter_t *adapter); extern void t1_interrupts_clear(adapter_t *adapter); extern int t1_elmer0_ext_intr_handler(adapter_t *adapter); +extern void t1_elmer0_ext_intr(adapter_t *adapter); extern int t1_slow_intr_handler(adapter_t *adapter); extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc); diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c index 231ce43b97cf..2dbf8dc116c6 100644 --- a/drivers/net/chelsio/cxgb2.c +++ b/drivers/net/chelsio/cxgb2.c @@ -255,8 +255,11 @@ static int cxgb_open(struct net_device *dev) struct adapter *adapter = dev->priv; int other_ports = adapter->open_device_map & PORT_MASK; - if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) + napi_enable(&adapter->napi); + if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) { + napi_disable(&adapter->napi); return err; + } __set_bit(dev->if_port, &adapter->open_device_map); link_start(&adapter->port[dev->if_port]); @@ -274,6 +277,7 @@ static int cxgb_close(struct net_device *dev) struct cmac *mac = p->mac; netif_stop_queue(dev); + napi_disable(&adapter->napi); mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX); netif_carrier_off(dev); @@ -435,9 +439,14 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) strcpy(info->bus_info, pci_name(adapter->pdev)); } -static int get_stats_count(struct net_device *dev) +static int get_sset_count(struct net_device *dev, int sset) { - return ARRAY_SIZE(stats_strings); + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(stats_strings); + default: + return -EOPNOTSUPP; + } } static void get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -790,17 +799,14 @@ static const struct ethtool_ops t1_ethtool_ops = { .set_pauseparam = set_pauseparam, .get_rx_csum = get_rx_csum, .set_rx_csum = set_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, .get_link = ethtool_op_get_link, .get_strings = get_strings, - .get_stats_count = get_stats_count, + .get_sset_count = get_sset_count, .get_ethtool_stats = get_stats, .get_regs_len = get_regs_len, .get_regs = get_regs, - .get_tso = ethtool_op_get_tso, .set_tso = set_tso, }; @@ -1032,7 +1038,6 @@ static int __devinit init_one(struct pci_dev *pdev, goto out_free_dev; } - SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &pdev->dev); if (!adapter) { @@ -1113,8 +1118,7 @@ static int __devinit init_one(struct pci_dev *pdev, netdev->poll_controller = t1_netpoll; #endif #ifdef CONFIG_CHELSIO_T1_NAPI - netdev->weight = 64; - netdev->poll = t1_poll; + netif_napi_add(netdev, &adapter->napi, t1_poll, 64); #endif SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); diff --git a/drivers/net/chelsio/mac.c b/drivers/net/chelsio/mac.c deleted file mode 100644 index 1d972825eac3..000000000000 --- a/drivers/net/chelsio/mac.c +++ /dev/null @@ -1,368 +0,0 @@ -/* $Date: 2005/10/22 00:42:59 $ $RCSfile: mac.c,v $ $Revision: 1.32 $ */ -#include "gmac.h" -#include "regs.h" -#include "fpga_defs.h" - -#define MAC_CSR_INTERFACE_GMII 0x0 -#define MAC_CSR_INTERFACE_TBI 0x1 -#define MAC_CSR_INTERFACE_MII 0x2 -#define MAC_CSR_INTERFACE_RMII 0x3 - -/* Chelsio's MAC statistics. */ -struct mac_statistics { - - /* Transmit */ - u32 TxFramesTransmittedOK; - u32 TxReserved1; - u32 TxReserved2; - u32 TxOctetsTransmittedOK; - u32 TxFramesWithDeferredXmissions; - u32 TxLateCollisions; - u32 TxFramesAbortedDueToXSCollisions; - u32 TxFramesLostDueToIntMACXmitError; - u32 TxReserved3; - u32 TxMulticastFrameXmittedOK; - u32 TxBroadcastFramesXmittedOK; - u32 TxFramesWithExcessiveDeferral; - u32 TxPAUSEMACCtrlFramesTransmitted; - - /* Receive */ - u32 RxFramesReceivedOK; - u32 RxFrameCheckSequenceErrors; - u32 RxAlignmentErrors; - u32 RxOctetsReceivedOK; - u32 RxFramesLostDueToIntMACRcvError; - u32 RxMulticastFramesReceivedOK; - u32 RxBroadcastFramesReceivedOK; - u32 RxInRangeLengthErrors; - u32 RxTxOutOfRangeLengthField; - u32 RxFrameTooLongErrors; - u32 RxPAUSEMACCtrlFramesReceived; -}; - -static int static_aPorts[] = { - FPGA_GMAC_INTERRUPT_PORT0, - FPGA_GMAC_INTERRUPT_PORT1, - FPGA_GMAC_INTERRUPT_PORT2, - FPGA_GMAC_INTERRUPT_PORT3 -}; - -struct _cmac_instance { - u32 index; -}; - -static int mac_intr_enable(struct cmac *mac) -{ - u32 mac_intr; - - if (t1_is_asic(mac->adapter)) { - /* ASIC */ - - /* We don't use the on chip MAC for ASIC products. */ - } else { - /* FPGA */ - - /* Set parent gmac interrupt. */ - mac_intr = readl(mac->adapter->regs + A_PL_ENABLE); - mac_intr |= FPGA_PCIX_INTERRUPT_GMAC; - writel(mac_intr, mac->adapter->regs + A_PL_ENABLE); - - mac_intr = readl(mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_ENABLE); - mac_intr |= static_aPorts[mac->instance->index]; - writel(mac_intr, - mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_ENABLE); - } - - return 0; -} - -static int mac_intr_disable(struct cmac *mac) -{ - u32 mac_intr; - - if (t1_is_asic(mac->adapter)) { - /* ASIC */ - - /* We don't use the on chip MAC for ASIC products. */ - } else { - /* FPGA */ - - /* Set parent gmac interrupt. */ - mac_intr = readl(mac->adapter->regs + A_PL_ENABLE); - mac_intr &= ~FPGA_PCIX_INTERRUPT_GMAC; - writel(mac_intr, mac->adapter->regs + A_PL_ENABLE); - - mac_intr = readl(mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_ENABLE); - mac_intr &= ~(static_aPorts[mac->instance->index]); - writel(mac_intr, - mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_ENABLE); - } - - return 0; -} - -static int mac_intr_clear(struct cmac *mac) -{ - u32 mac_intr; - - if (t1_is_asic(mac->adapter)) { - /* ASIC */ - - /* We don't use the on chip MAC for ASIC products. */ - } else { - /* FPGA */ - - /* Set parent gmac interrupt. */ - writel(FPGA_PCIX_INTERRUPT_GMAC, - mac->adapter->regs + A_PL_CAUSE); - mac_intr = readl(mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE); - mac_intr |= (static_aPorts[mac->instance->index]); - writel(mac_intr, - mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE); - } - - return 0; -} - -static int mac_get_address(struct cmac *mac, u8 addr[6]) -{ - u32 data32_lo, data32_hi; - - data32_lo = readl(mac->adapter->regs - + MAC_REG_IDLO(mac->instance->index)); - data32_hi = readl(mac->adapter->regs - + MAC_REG_IDHI(mac->instance->index)); - - addr[0] = (u8) ((data32_hi >> 8) & 0xFF); - addr[1] = (u8) ((data32_hi) & 0xFF); - addr[2] = (u8) ((data32_lo >> 24) & 0xFF); - addr[3] = (u8) ((data32_lo >> 16) & 0xFF); - addr[4] = (u8) ((data32_lo >> 8) & 0xFF); - addr[5] = (u8) ((data32_lo) & 0xFF); - return 0; -} - -static int mac_reset(struct cmac *mac) -{ - u32 data32; - int mac_in_reset, time_out = 100; - int idx = mac->instance->index; - - data32 = readl(mac->adapter->regs + MAC_REG_CSR(idx)); - writel(data32 | F_MAC_RESET, - mac->adapter->regs + MAC_REG_CSR(idx)); - - do { - data32 = readl(mac->adapter->regs + MAC_REG_CSR(idx)); - - mac_in_reset = data32 & F_MAC_RESET; - if (mac_in_reset) - udelay(1); - } while (mac_in_reset && --time_out); - - if (mac_in_reset) { - CH_ERR("%s: MAC %d reset timed out\n", - mac->adapter->name, idx); - return 2; - } - - return 0; -} - -static int mac_set_rx_mode(struct cmac *mac, struct t1_rx_mode *rm) -{ - u32 val; - - val = readl(mac->adapter->regs - + MAC_REG_CSR(mac->instance->index)); - val &= ~(F_MAC_PROMISC | F_MAC_MC_ENABLE); - val |= V_MAC_PROMISC(t1_rx_mode_promisc(rm) != 0); - val |= V_MAC_MC_ENABLE(t1_rx_mode_allmulti(rm) != 0); - writel(val, - mac->adapter->regs + MAC_REG_CSR(mac->instance->index)); - - return 0; -} - -static int mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, - int fc) -{ - u32 data32; - - data32 = readl(mac->adapter->regs - + MAC_REG_CSR(mac->instance->index)); - data32 &= ~(F_MAC_HALF_DUPLEX | V_MAC_SPEED(M_MAC_SPEED) | - V_INTERFACE(M_INTERFACE) | F_MAC_TX_PAUSE_ENABLE | - F_MAC_RX_PAUSE_ENABLE); - - switch (speed) { - case SPEED_10: - case SPEED_100: - data32 |= V_INTERFACE(MAC_CSR_INTERFACE_MII); - data32 |= V_MAC_SPEED(speed == SPEED_10 ? 0 : 1); - break; - case SPEED_1000: - data32 |= V_INTERFACE(MAC_CSR_INTERFACE_GMII); - data32 |= V_MAC_SPEED(2); - break; - } - - if (duplex >= 0) - data32 |= V_MAC_HALF_DUPLEX(duplex == DUPLEX_HALF); - - if (fc >= 0) { - data32 |= V_MAC_RX_PAUSE_ENABLE((fc & PAUSE_RX) != 0); - data32 |= V_MAC_TX_PAUSE_ENABLE((fc & PAUSE_TX) != 0); - } - - writel(data32, - mac->adapter->regs + MAC_REG_CSR(mac->instance->index)); - return 0; -} - -static int mac_enable(struct cmac *mac, int which) -{ - u32 val; - - val = readl(mac->adapter->regs - + MAC_REG_CSR(mac->instance->index)); - if (which & MAC_DIRECTION_RX) - val |= F_MAC_RX_ENABLE; - if (which & MAC_DIRECTION_TX) - val |= F_MAC_TX_ENABLE; - writel(val, - mac->adapter->regs + MAC_REG_CSR(mac->instance->index)); - return 0; -} - -static int mac_disable(struct cmac *mac, int which) -{ - u32 val; - - val = readl(mac->adapter->regs - + MAC_REG_CSR(mac->instance->index)); - if (which & MAC_DIRECTION_RX) - val &= ~F_MAC_RX_ENABLE; - if (which & MAC_DIRECTION_TX) - val &= ~F_MAC_TX_ENABLE; - writel(val, - mac->adapter->regs + MAC_REG_CSR(mac->instance->index)); - return 0; -} - -#if 0 -static int mac_set_ifs(struct cmac *mac, u32 mode) -{ - t1_write_reg_4(mac->adapter, - MAC_REG_IFS(mac->instance->index), - mode); - return 0; -} - -static int mac_enable_isl(struct cmac *mac) -{ - u32 data32 = readl(mac->adapter->regs - + MAC_REG_CSR(mac->instance->index)); - data32 |= F_MAC_RX_ENABLE | F_MAC_TX_ENABLE; - t1_write_reg_4(mac->adapter, - MAC_REG_CSR(mac->instance->index), - data32); - return 0; -} -#endif - -static int mac_set_mtu(struct cmac *mac, int mtu) -{ - if (mtu > 9600) - return -EINVAL; - writel(mtu + ETH_HLEN + VLAN_HLEN, - mac->adapter->regs + MAC_REG_LARGEFRAMELENGTH(mac->instance->index)); - - return 0; -} - -static const struct cmac_statistics *mac_update_statistics(struct cmac *mac, - int flag) -{ - struct mac_statistics st; - u32 *p = (u32 *) & st, i; - - writel(0, - mac->adapter->regs + MAC_REG_RMCNT(mac->instance->index)); - - for (i = 0; i < sizeof(st) / sizeof(u32); i++) - *p++ = readl(mac->adapter->regs - + MAC_REG_RMDATA(mac->instance->index)); - - /* XXX convert stats */ - return &mac->stats; -} - -static void mac_destroy(struct cmac *mac) -{ - kfree(mac); -} - -static struct cmac_ops chelsio_mac_ops = { - .destroy = mac_destroy, - .reset = mac_reset, - .interrupt_enable = mac_intr_enable, - .interrupt_disable = mac_intr_disable, - .interrupt_clear = mac_intr_clear, - .enable = mac_enable, - .disable = mac_disable, - .set_mtu = mac_set_mtu, - .set_rx_mode = mac_set_rx_mode, - .set_speed_duplex_fc = mac_set_speed_duplex_fc, - .macaddress_get = mac_get_address, - .statistics_update = mac_update_statistics, -}; - -static struct cmac *mac_create(adapter_t *adapter, int index) -{ - struct cmac *mac; - u32 data32; - - if (index >= 4) - return NULL; - - mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL); - if (!mac) - return NULL; - - mac->ops = &chelsio_mac_ops; - mac->instance = (cmac_instance *) (mac + 1); - - mac->instance->index = index; - mac->adapter = adapter; - - data32 = readl(adapter->regs + MAC_REG_CSR(mac->instance->index)); - data32 &= ~(F_MAC_RESET | F_MAC_PROMISC | F_MAC_PROMISC | - F_MAC_LB_ENABLE | F_MAC_RX_ENABLE | F_MAC_TX_ENABLE); - data32 |= F_MAC_JUMBO_ENABLE; - writel(data32, adapter->regs + MAC_REG_CSR(mac->instance->index)); - - /* Initialize the random backoff seed. */ - data32 = 0x55aa + (3 * index); - writel(data32, - adapter->regs + MAC_REG_GMRANDBACKOFFSEED(mac->instance->index)); - - /* Check to see if the mac address needs to be set manually. */ - data32 = readl(adapter->regs + MAC_REG_IDLO(mac->instance->index)); - if (data32 == 0 || data32 == 0xffffffff) { - /* - * Add a default MAC address if we can't read one. - */ - writel(0x43FFFFFF - index, - adapter->regs + MAC_REG_IDLO(mac->instance->index)); - writel(0x0007, - adapter->regs + MAC_REG_IDHI(mac->instance->index)); - } - - (void) mac_set_mtu(mac, 1500); - return mac; -} - -const struct gmac t1_chelsio_mac_ops = { - .create = mac_create -}; diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index e4f874a70fe5..ffa7e649a6ef 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c @@ -1620,23 +1620,20 @@ static int process_pure_responses(struct adapter *adapter) * or protection from interrupts as data interrupts are off at this point and * other adapter interrupts do not interfere. */ -int t1_poll(struct net_device *dev, int *budget) +int t1_poll(struct napi_struct *napi, int budget) { - struct adapter *adapter = dev->priv; + struct adapter *adapter = container_of(napi, struct adapter, napi); + struct net_device *dev = adapter->port[0].dev; int work_done; - work_done = process_responses(adapter, min(*budget, dev->quota)); - *budget -= work_done; - dev->quota -= work_done; - - if (unlikely(responses_pending(adapter))) - return 1; - - netif_rx_complete(dev); - writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); - - return 0; + work_done = process_responses(adapter, budget); + if (likely(!responses_pending(adapter))) { + netif_rx_complete(dev, napi); + writel(adapter->sge->respQ.cidx, + adapter->regs + A_SG_SLEEPING); + } + return work_done; } /* @@ -1653,13 +1650,13 @@ irqreturn_t t1_interrupt(int irq, void *data) writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); - if (__netif_rx_schedule_prep(dev)) { + if (napi_schedule_prep(&adapter->napi)) { if (process_pure_responses(adapter)) - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &adapter->napi); else { /* no data, no NAPI needed */ writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); - netif_poll_enable(dev); /* undo schedule_prep */ + napi_enable(&adapter->napi); /* undo schedule_prep */ } } return IRQ_HANDLED; diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h index d132a0ef2a22..713d9c55f24d 100644 --- a/drivers/net/chelsio/sge.h +++ b/drivers/net/chelsio/sge.h @@ -77,7 +77,7 @@ int t1_sge_configure(struct sge *, struct sge_params *); int t1_sge_set_coalesce_params(struct sge *, struct sge_params *); void t1_sge_destroy(struct sge *); irqreturn_t t1_interrupt(int irq, void *cookie); -int t1_poll(struct net_device *, int *); +int t1_poll(struct napi_struct *, int); int t1_start_xmit(struct sk_buff *skb, struct net_device *dev); void t1_set_vlan_accel(struct adapter *adapter, int on_off); diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c index 7de9a611e1f7..dc50151bed81 100644 --- a/drivers/net/chelsio/subr.c +++ b/drivers/net/chelsio/subr.c @@ -884,7 +884,7 @@ static int asic_slow_intr(adapter_t *adapter) if (cause & F_PL_INTR_PCIX) t1_pci_intr_handler(adapter); if (cause & F_PL_INTR_EXT) - t1_elmer0_ext_intr_handler(adapter); + t1_elmer0_ext_intr(adapter); /* Clear the interrupts just processed. */ writel(cause, adapter->regs + A_PL_CAUSE); diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c index 5bdf5ca85a65..314b2f68f78f 100644 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c @@ -618,12 +618,8 @@ e100_set_mac_address(struct net_device *dev, void *p) /* show it in the log as well */ - printk(KERN_INFO "%s: changed MAC to ", dev->name); - - for (i = 0; i < 5; i++) - printk("%02X:", dev->dev_addr[i]); - - printk("%02X\n", dev->dev_addr[i]); + printk(KERN_INFO "%s: changed MAC to %s\n", + dev->name, print_mac(mac, dev->dev_addr)); spin_unlock(&np->lock); diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c index 9774bb1b3e80..571750975137 100644 --- a/drivers/net/cs89x0.c +++ b/drivers/net/cs89x0.c @@ -516,8 +516,8 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular) unsigned rev_type = 0; int eeprom_buff[CHKSUM_LEN]; int retval; + DECLARE_MAC_BUF(mac); - SET_MODULE_OWNER(dev); /* Initialize the device structure. */ if (!modular) { memset(lp, 0, sizeof(*lp)); @@ -806,7 +806,7 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular) i = cs8900_irq_map[0]; #else /* Translate the IRQ using the IRQ mapping table. */ - if (i >= sizeof(cs8900_irq_map)/sizeof(cs8900_irq_map[0])) + if (i >= ARRAY_SIZE(cs8900_irq_map)) printk("\ncs89x0: invalid ISA interrupt number %d\n", i); else i = cs8900_irq_map[i]; @@ -841,11 +841,7 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular) } /* print the ethernet address. */ - printk(", MAC"); - for (i = 0; i < ETH_ALEN; i++) - { - printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]); - } + printk(", MAC %s", print_mac(mac, dev->dev_addr)); dev->open = net_open; dev->stop = net_close; @@ -1247,11 +1243,11 @@ write_irq(struct net_device *dev, int chip_type, int irq) if (chip_type == CS8900) { /* Search the mapping table for the corresponding IRQ pin. */ - for (i = 0; i != sizeof(cs8900_irq_map)/sizeof(cs8900_irq_map[0]); i++) + for (i = 0; i != ARRAY_SIZE(cs8900_irq_map); i++) if (cs8900_irq_map[i] == irq) break; /* Not found */ - if (i == sizeof(cs8900_irq_map)/sizeof(cs8900_irq_map[0])) + if (i == ARRAY_SIZE(cs8900_irq_map)) i = 3; writereg(dev, PP_CS8900_ISAINT, i); } else { @@ -1807,17 +1803,15 @@ static int set_mac_address(struct net_device *dev, void *p) int i; struct sockaddr *addr = p; - if (netif_running(dev)) return -EBUSY; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); if (net_debug) { - printk("%s: Setting MAC address to ", dev->name); - for (i = 0; i < dev->addr_len; i++) - printk(" %2.2x", dev->dev_addr[i]); - printk(".\n"); + DECLARE_MAC_BUF(mac); + printk("%s: Setting MAC address to %s.\n", + dev->name, print_mac(mac, dev->dev_addr)); } /* set the Ethernet address */ for (i=0; i < ETH_ALEN/2; i++) diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h index ab72563b81ee..044261703381 100644 --- a/drivers/net/cxgb3/adapter.h +++ b/drivers/net/cxgb3/adapter.h @@ -49,9 +49,13 @@ typedef irqreturn_t(*intr_handler_t) (int, void *); struct vlan_group; +struct adapter; +struct sge_qset; struct port_info { + struct adapter *adapter; struct vlan_group *vlan_grp; + struct sge_qset *qs; const struct port_type_info *port_type; u8 port_id; u8 rx_csum_offload; @@ -171,10 +175,12 @@ enum { /* per port SGE statistics */ }; struct sge_qset { /* an SGE queue set */ + struct adapter *adap; + struct napi_struct napi; struct sge_rspq rspq; struct sge_fl fl[SGE_RXQ_PER_SET]; struct sge_txq txq[SGE_TXQ_PER_SET]; - struct net_device *netdev; /* associated net device */ + struct net_device *netdev; unsigned long txq_stopped; /* which Tx queues are stopped */ struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ unsigned long port_stats[SGE_PSTAT_MAX]; @@ -219,12 +225,6 @@ struct adapter { struct delayed_work adap_check_task; struct work_struct ext_intr_handler_task; - /* - * Dummy netdevices are needed when using multiple receive queues with - * NAPI as each netdevice can service only one queue. - */ - struct net_device *dummy_netdev[SGE_QSETS - 1]; - struct dentry *debugfs_root; struct mutex mdio_lock; @@ -251,12 +251,6 @@ static inline struct port_info *adap2pinfo(struct adapter *adap, int idx) return netdev_priv(adap->port[idx]); } -/* - * We use the spare atalk_ptr to map a net device to its SGE queue set. - * This is a macro so it can be used as l-value. - */ -#define dev2qset(netdev) ((netdev)->atalk_ptr) - #define OFFLOAD_DEVMAP_BIT 15 #define tdev2adap(d) container_of(d, struct adapter, tdev) @@ -282,7 +276,7 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb); void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p); int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, int irq_vec_idx, const struct qset_params *p, - int ntxq, struct net_device *netdev); + int ntxq, struct net_device *dev); int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx, unsigned char *data); irqreturn_t t3_sge_intr_msix(int irq, void *cookie); diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h index 16378004507a..99c75d30f67a 100644 --- a/drivers/net/cxgb3/common.h +++ b/drivers/net/cxgb3/common.h @@ -97,6 +97,7 @@ enum { MAX_NPORTS = 2, /* max # of ports */ MAX_FRAME_SIZE = 10240, /* max MAC frame size, including header + FCS */ EEPROMSIZE = 8192, /* Serial EEPROM size */ + SERNUM_LEN = 16, /* Serial # length */ RSS_TABLE_SIZE = 64, /* size of RSS lookup and mapping tables */ TCB_SIZE = 128, /* TCB size */ NMTUS = 16, /* size of MTU table */ @@ -104,7 +105,7 @@ enum { PROTO_SRAM_LINES = 128, /* size of TP sram */ }; -#define MAX_RX_COALESCING_LEN 16224U +#define MAX_RX_COALESCING_LEN 12288U enum { PAUSE_RX = 1 << 0, @@ -126,8 +127,8 @@ enum { /* adapter interrupt-maintained statistics */ enum { TP_VERSION_MAJOR = 1, - TP_VERSION_MINOR = 0, - TP_VERSION_MICRO = 44 + TP_VERSION_MINOR = 1, + TP_VERSION_MICRO = 0 }; #define S_TP_VERSION_MAJOR 16 @@ -167,8 +168,8 @@ enum { }; struct sg_ent { /* SGE scatter/gather entry */ - u32 len[2]; - u64 addr[2]; + __be32 len[2]; + __be64 addr[2]; }; #ifndef SGE_NUM_GENBITS @@ -391,6 +392,7 @@ struct vpd_params { unsigned int uclk; unsigned int mdc; unsigned int mem_timing; + u8 sn[SERNUM_LEN + 1]; u8 eth_base[6]; u8 port_type[MAX_NPORTS]; unsigned short xauicfg[2]; @@ -436,6 +438,7 @@ enum { /* chip revisions */ T3_REV_A = 0, T3_REV_B = 2, T3_REV_B2 = 3, + T3_REV_C = 4, }; struct trace_params { @@ -507,9 +510,11 @@ struct cmac { unsigned int tx_xcnt; u64 tx_mcnt; unsigned int rx_xcnt; + unsigned int rx_ocnt; u64 rx_mcnt; unsigned int toggle_cnt; unsigned int txen; + u64 rx_pause; struct mac_stats stats; }; @@ -679,14 +684,15 @@ const struct adapter_info *t3_get_adapter_info(unsigned int board_id); int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data); int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data); int t3_seeprom_wp(struct adapter *adapter, int enable); -int t3_check_tpsram_version(struct adapter *adapter); +int t3_get_tp_version(struct adapter *adapter, u32 *vers); +int t3_check_tpsram_version(struct adapter *adapter, int *must_load); int t3_check_tpsram(struct adapter *adapter, u8 *tp_ram, unsigned int size); int t3_set_proto_sram(struct adapter *adap, u8 *data); int t3_read_flash(struct adapter *adapter, unsigned int addr, unsigned int nwords, u32 *data, int byte_oriented); int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size); int t3_get_fw_version(struct adapter *adapter, u32 *vers); -int t3_check_fw_version(struct adapter *adapter); +int t3_check_fw_version(struct adapter *adapter, int *must_load); int t3_init_hw(struct adapter *adapter, u32 fw_params); void mac_prep(struct cmac *mac, struct adapter *adapter, int index); void early_hw_init(struct adapter *adapter, const struct adapter_info *ai); diff --git a/drivers/net/cxgb3/cxgb3_ctl_defs.h b/drivers/net/cxgb3/cxgb3_ctl_defs.h index 2095ddacff78..6c4f32066919 100644 --- a/drivers/net/cxgb3/cxgb3_ctl_defs.h +++ b/drivers/net/cxgb3/cxgb3_ctl_defs.h @@ -33,27 +33,29 @@ #define _CXGB3_OFFLOAD_CTL_DEFS_H enum { - GET_MAX_OUTSTANDING_WR, - GET_TX_MAX_CHUNK, - GET_TID_RANGE, - GET_STID_RANGE, - GET_RTBL_RANGE, - GET_L2T_CAPACITY, - GET_MTUS, - GET_WR_LEN, - GET_IFF_FROM_MAC, - GET_DDP_PARAMS, - GET_PORTS, - - ULP_ISCSI_GET_PARAMS, - ULP_ISCSI_SET_PARAMS, - - RDMA_GET_PARAMS, - RDMA_CQ_OP, - RDMA_CQ_SETUP, - RDMA_CQ_DISABLE, - RDMA_CTRL_QP_SETUP, - RDMA_GET_MEM, + GET_MAX_OUTSTANDING_WR = 0, + GET_TX_MAX_CHUNK = 1, + GET_TID_RANGE = 2, + GET_STID_RANGE = 3, + GET_RTBL_RANGE = 4, + GET_L2T_CAPACITY = 5, + GET_MTUS = 6, + GET_WR_LEN = 7, + GET_IFF_FROM_MAC = 8, + GET_DDP_PARAMS = 9, + GET_PORTS = 10, + + ULP_ISCSI_GET_PARAMS = 11, + ULP_ISCSI_SET_PARAMS = 12, + + RDMA_GET_PARAMS = 13, + RDMA_CQ_OP = 14, + RDMA_CQ_SETUP = 15, + RDMA_CQ_DISABLE = 16, + RDMA_CTRL_QP_SETUP = 17, + RDMA_GET_MEM = 18, + + GET_RX_PAGE_INFO = 50, }; /* @@ -161,4 +163,12 @@ struct rdma_ctrlqp_setup { unsigned long long base_addr; unsigned int size; }; + +/* + * Offload TX/RX page information. + */ +struct ofld_page_info { + unsigned int page_size; /* Page size, should be a power of 2 */ + unsigned int num; /* Number of pages */ +}; #endif /* _CXGB3_OFFLOAD_CTL_DEFS_H */ diff --git a/drivers/net/cxgb3/cxgb3_defs.h b/drivers/net/cxgb3/cxgb3_defs.h index 483a594210a7..45e92164c260 100644 --- a/drivers/net/cxgb3/cxgb3_defs.h +++ b/drivers/net/cxgb3/cxgb3_defs.h @@ -79,9 +79,17 @@ static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t, static inline struct t3c_tid_entry *lookup_stid(const struct tid_info *t, unsigned int tid) { + union listen_entry *e; + if (tid < t->stid_base || tid >= t->stid_base + t->nstids) return NULL; - return &(stid2entry(t, tid)->t3c_tid); + + e = stid2entry(t, tid); + if ((void *)e->next >= (void *)t->tid_tab && + (void *)e->next < (void *)&t->atid_tab[t->natids]) + return NULL; + + return &e->t3c_tid; } /* @@ -90,9 +98,17 @@ static inline struct t3c_tid_entry *lookup_stid(const struct tid_info *t, static inline struct t3c_tid_entry *lookup_atid(const struct tid_info *t, unsigned int tid) { + union active_open_entry *e; + if (tid < t->atid_base || tid >= t->atid_base + t->natids) return NULL; - return &(atid2entry(t, tid)->t3c_tid); + + e = atid2entry(t, tid); + if ((void *)e->next >= (void *)t->tid_tab && + (void *)e->next < (void *)&t->atid_tab[t->natids]) + return NULL; + + return &e->t3c_tid; } int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n); diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 6fd1e5241833..61ffc925eae7 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -339,46 +339,17 @@ static void setup_rss(struct adapter *adap) V_RRCPLCPUSIZE(6), cpus, rspq_map); } -/* - * If we have multiple receive queues per port serviced by NAPI we need one - * netdevice per queue as NAPI operates on netdevices. We already have one - * netdevice, namely the one associated with the interface, so we use dummy - * ones for any additional queues. Note that these netdevices exist purely - * so that NAPI has something to work with, they do not represent network - * ports and are not registered. - */ -static int init_dummy_netdevs(struct adapter *adap) +static void init_napi(struct adapter *adap) { - int i, j, dummy_idx = 0; - struct net_device *nd; - - for_each_port(adap, i) { - struct net_device *dev = adap->port[i]; - const struct port_info *pi = netdev_priv(dev); - - for (j = 0; j < pi->nqsets - 1; j++) { - if (!adap->dummy_netdev[dummy_idx]) { - nd = alloc_netdev(0, "", ether_setup); - if (!nd) - goto free_all; + int i; - nd->priv = adap; - nd->weight = 64; - set_bit(__LINK_STATE_START, &nd->state); - adap->dummy_netdev[dummy_idx] = nd; - } - strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name); - dummy_idx++; - } - } - return 0; + for (i = 0; i < SGE_QSETS; i++) { + struct sge_qset *qs = &adap->sge.qs[i]; -free_all: - while (--dummy_idx >= 0) { - free_netdev(adap->dummy_netdev[dummy_idx]); - adap->dummy_netdev[dummy_idx] = NULL; + if (qs->adap) + netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll, + 64); } - return -ENOMEM; } /* @@ -389,20 +360,18 @@ free_all: static void quiesce_rx(struct adapter *adap) { int i; - struct net_device *dev; - for_each_port(adap, i) { - dev = adap->port[i]; - while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) - msleep(1); - } + for (i = 0; i < SGE_QSETS; i++) + if (adap->sge.qs[i].adap) + napi_disable(&adap->sge.qs[i].napi); +} - for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) { - dev = adap->dummy_netdev[i]; - if (dev) - while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) - msleep(1); - } +static void enable_all_napi(struct adapter *adap) +{ + int i; + for (i = 0; i < SGE_QSETS; i++) + if (adap->sge.qs[i].adap) + napi_enable(&adap->sge.qs[i].napi); } /** @@ -415,7 +384,7 @@ static void quiesce_rx(struct adapter *adap) */ static int setup_sge_qsets(struct adapter *adap) { - int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0; + int i, j, err, irq_idx = 0, qset_idx = 0; unsigned int ntxq = SGE_TXQ_PER_SET; if (adap->params.rev > 0 && !(adap->flags & USING_MSI)) @@ -423,15 +392,14 @@ static int setup_sge_qsets(struct adapter *adap) for_each_port(adap, i) { struct net_device *dev = adap->port[i]; - const struct port_info *pi = netdev_priv(dev); + struct port_info *pi = netdev_priv(dev); + pi->qs = &adap->sge.qs[pi->first_qset]; for (j = 0; j < pi->nqsets; ++j, ++qset_idx) { err = t3_sge_alloc_qset(adap, qset_idx, 1, (adap->flags & USING_MSIX) ? qset_idx + 1 : irq_idx, - &adap->params.sge.qset[qset_idx], ntxq, - j == 0 ? dev : - adap-> dummy_netdev[dummy_dev_idx++]); + &adap->params.sge.qset[qset_idx], ntxq, dev); if (err) { t3_free_sge_resources(adap); return err; @@ -482,7 +450,8 @@ static ssize_t attr_store(struct device *d, struct device_attribute *attr, #define CXGB3_SHOW(name, val_expr) \ static ssize_t format_##name(struct net_device *dev, char *buf) \ { \ - struct adapter *adap = dev->priv; \ + struct port_info *pi = netdev_priv(dev); \ + struct adapter *adap = pi->adapter; \ return sprintf(buf, "%u\n", val_expr); \ } \ static ssize_t show_##name(struct device *d, struct device_attribute *attr, \ @@ -493,7 +462,8 @@ static ssize_t show_##name(struct device *d, struct device_attribute *attr, \ static ssize_t set_nfilters(struct net_device *dev, unsigned int val) { - struct adapter *adap = dev->priv; + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0; if (adap->flags & FULL_INIT_DONE) @@ -515,7 +485,8 @@ static ssize_t store_nfilters(struct device *d, struct device_attribute *attr, static ssize_t set_nservers(struct net_device *dev, unsigned int val) { - struct adapter *adap = dev->priv; + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; if (adap->flags & FULL_INIT_DONE) return -EBUSY; @@ -556,9 +527,10 @@ static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs }; static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr, char *buf, int sched) { - ssize_t len; + struct port_info *pi = netdev_priv(to_net_dev(d)); + struct adapter *adap = pi->adapter; unsigned int v, addr, bpt, cpt; - struct adapter *adap = to_net_dev(d)->priv; + ssize_t len; addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2; rtnl_lock(); @@ -581,10 +553,11 @@ static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr, static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr, const char *buf, size_t len, int sched) { + struct port_info *pi = netdev_priv(to_net_dev(d)); + struct adapter *adap = pi->adapter; + unsigned int val; char *endp; ssize_t ret; - unsigned int val; - struct adapter *adap = to_net_dev(d)->priv; if (!capable(CAP_NET_ADMIN)) return -EPERM; @@ -721,6 +694,7 @@ static void bind_qsets(struct adapter *adap) } #define FW_FNAME "t3fw-%d.%d.%d.bin" +#define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin" static int upgrade_fw(struct adapter *adap) { @@ -739,6 +713,74 @@ static int upgrade_fw(struct adapter *adap) } ret = t3_load_fw(adap, fw->data, fw->size); release_firmware(fw); + + if (ret == 0) + dev_info(dev, "successful upgrade to firmware %d.%d.%d\n", + FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO); + else + dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n", + FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO); + + return ret; +} + +static inline char t3rev2char(struct adapter *adapter) +{ + char rev = 0; + + switch(adapter->params.rev) { + case T3_REV_B: + case T3_REV_B2: + rev = 'b'; + break; + case T3_REV_C: + rev = 'c'; + break; + } + return rev; +} + +static int update_tpsram(struct adapter *adap) +{ + const struct firmware *tpsram; + char buf[64]; + struct device *dev = &adap->pdev->dev; + int ret; + char rev; + + rev = t3rev2char(adap); + if (!rev) + return 0; + + snprintf(buf, sizeof(buf), TPSRAM_NAME, rev, + TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); + + ret = request_firmware(&tpsram, buf, dev); + if (ret < 0) { + dev_err(dev, "could not load TP SRAM: unable to load %s\n", + buf); + return ret; + } + + ret = t3_check_tpsram(adap, tpsram->data, tpsram->size); + if (ret) + goto release_tpsram; + + ret = t3_set_proto_sram(adap, tpsram->data); + if (ret == 0) + dev_info(dev, + "successful update of protocol engine " + "to %d.%d.%d\n", + TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); + else + dev_err(dev, "failed to update of protocol engine %d.%d.%d\n", + TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); + if (ret) + dev_err(dev, "loading protocol SRAM failed\n"); + +release_tpsram: + release_firmware(tpsram); + return ret; } @@ -754,30 +796,36 @@ static int upgrade_fw(struct adapter *adap) */ static int cxgb_up(struct adapter *adap) { - int err = 0; + int err; + int must_load; if (!(adap->flags & FULL_INIT_DONE)) { - err = t3_check_fw_version(adap); - if (err == -EINVAL) + err = t3_check_fw_version(adap, &must_load); + if (err == -EINVAL) { err = upgrade_fw(adap); - if (err) - goto out; + if (err && must_load) + goto out; + } - err = init_dummy_netdevs(adap); - if (err) - goto out; + err = t3_check_tpsram_version(adap, &must_load); + if (err == -EINVAL) { + err = update_tpsram(adap); + if (err && must_load) + goto out; + } err = t3_init_hw(adap, 0); if (err) goto out; t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12)); - + err = setup_sge_qsets(adap); if (err) goto out; setup_rss(adap); + init_napi(adap); adap->flags |= FULL_INIT_DONE; } @@ -804,6 +852,7 @@ static int cxgb_up(struct adapter *adap) adap->name, adap))) goto irq_err; + enable_all_napi(adap); t3_sge_start(adap); t3_intr_enable(adap); @@ -858,10 +907,11 @@ static void schedule_chk_task(struct adapter *adap) static int offload_open(struct net_device *dev) { - struct adapter *adapter = dev->priv; - struct t3cdev *tdev = T3CDEV(dev); + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + struct t3cdev *tdev = dev2t3cdev(dev); int adap_up = adapter->open_device_map & PORT_MASK; - int err = 0; + int err; if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) return 0; @@ -924,13 +974,15 @@ static int offload_close(struct t3cdev *tdev) static int cxgb_open(struct net_device *dev) { - int err; - struct adapter *adapter = dev->priv; struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; int other_ports = adapter->open_device_map & PORT_MASK; + int err; - if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) + if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) { + quiesce_rx(adapter); return err; + } set_bit(pi->port_id, &adapter->open_device_map); if (is_offload(adapter) && !ofld_disable) { @@ -951,17 +1003,17 @@ static int cxgb_open(struct net_device *dev) static int cxgb_close(struct net_device *dev) { - struct adapter *adapter = dev->priv; - struct port_info *p = netdev_priv(dev); + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; - t3_port_intr_disable(adapter, p->port_id); + t3_port_intr_disable(adapter, pi->port_id); netif_stop_queue(dev); - p->phy.ops->power_down(&p->phy, 1); + pi->phy.ops->power_down(&pi->phy, 1); netif_carrier_off(dev); - t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX); + t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX); spin_lock(&adapter->work_lock); /* sync with update task */ - clear_bit(p->port_id, &adapter->open_device_map); + clear_bit(pi->port_id, &adapter->open_device_map); spin_unlock(&adapter->work_lock); if (!(adapter->open_device_map & PORT_MASK)) @@ -976,13 +1028,13 @@ static int cxgb_close(struct net_device *dev) static struct net_device_stats *cxgb_get_stats(struct net_device *dev) { - struct adapter *adapter = dev->priv; - struct port_info *p = netdev_priv(dev); - struct net_device_stats *ns = &p->netstats; + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + struct net_device_stats *ns = &pi->netstats; const struct mac_stats *pstats; spin_lock(&adapter->stats_lock); - pstats = t3_mac_update_stats(&p->mac); + pstats = t3_mac_update_stats(&pi->mac); spin_unlock(&adapter->stats_lock); ns->tx_bytes = pstats->tx_octets; @@ -1015,14 +1067,16 @@ static struct net_device_stats *cxgb_get_stats(struct net_device *dev) static u32 get_msglevel(struct net_device *dev) { - struct adapter *adapter = dev->priv; + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; return adapter->msg_enable; } static void set_msglevel(struct net_device *dev, u32 val) { - struct adapter *adapter = dev->priv; + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; adapter->msg_enable = val; } @@ -1077,9 +1131,14 @@ static char stats_strings[][ETH_GSTRING_LEN] = { }; -static int get_stats_count(struct net_device *dev) +static int get_sset_count(struct net_device *dev, int sset) { - return ARRAY_SIZE(stats_strings); + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(stats_strings); + default: + return -EOPNOTSUPP; + } } #define T3_REGMAP_SIZE (3 * 1024) @@ -1096,10 +1155,13 @@ static int get_eeprom_len(struct net_device *dev) static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; u32 fw_vers = 0; - struct adapter *adapter = dev->priv; + u32 tp_vers = 0; t3_get_fw_version(adapter, &fw_vers); + t3_get_tp_version(adapter, &tp_vers); strcpy(info->driver, DRV_NAME); strcpy(info->version, DRV_VERSION); @@ -1108,11 +1170,14 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) strcpy(info->fw_version, "N/A"); else { snprintf(info->fw_version, sizeof(info->fw_version), - "%s %u.%u.%u", + "%s %u.%u.%u TP %u.%u.%u", G_FW_VERSION_TYPE(fw_vers) ? "T" : "N", G_FW_VERSION_MAJOR(fw_vers), G_FW_VERSION_MINOR(fw_vers), - G_FW_VERSION_MICRO(fw_vers)); + G_FW_VERSION_MICRO(fw_vers), + G_TP_VERSION_MAJOR(tp_vers), + G_TP_VERSION_MINOR(tp_vers), + G_TP_VERSION_MICRO(tp_vers)); } } @@ -1136,8 +1201,8 @@ static unsigned long collect_sge_port_stats(struct adapter *adapter, static void get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { - struct adapter *adapter = dev->priv; struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; const struct mac_stats *s; spin_lock(&adapter->stats_lock); @@ -1205,7 +1270,8 @@ static inline void reg_block_dump(struct adapter *ap, void *buf, static void get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) { - struct adapter *ap = dev->priv; + struct port_info *pi = netdev_priv(dev); + struct adapter *ap = pi->adapter; /* * Version scheme: @@ -1246,8 +1312,9 @@ static int restart_autoneg(struct net_device *dev) static int cxgb3_phys_id(struct net_device *dev, u32 data) { + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; int i; - struct adapter *adapter = dev->priv; if (data == 0) data = 2; @@ -1408,8 +1475,8 @@ static int set_rx_csum(struct net_device *dev, u32 data) static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) { - const struct adapter *adapter = dev->priv; - const struct port_info *pi = netdev_priv(dev); + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset]; e->rx_max_pending = MAX_RX_BUFFERS; @@ -1425,10 +1492,10 @@ static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) { - int i; + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; struct qset_params *q; - struct adapter *adapter = dev->priv; - const struct port_info *pi = netdev_priv(dev); + int i; if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS || @@ -1457,7 +1524,8 @@ static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) { - struct adapter *adapter = dev->priv; + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; struct qset_params *qsp = &adapter->params.sge.qset[0]; struct sge_qset *qs = &adapter->sge.qs[0]; @@ -1471,7 +1539,8 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) { - struct adapter *adapter = dev->priv; + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; struct qset_params *q = adapter->params.sge.qset; c->rx_coalesce_usecs = q->coalesce_usecs; @@ -1481,8 +1550,9 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, u8 * data) { + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; int i, err = 0; - struct adapter *adapter = dev->priv; u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL); if (!buf) @@ -1501,10 +1571,11 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 * data) { - u8 *buf; - int err = 0; + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; u32 aligned_offset, aligned_len, *p; - struct adapter *adapter = dev->priv; + u8 *buf; + int err; if (eeprom->magic != EEPROM_MAGIC) return -EINVAL; @@ -1568,22 +1639,18 @@ static const struct ethtool_ops cxgb_ethtool_ops = { .set_pauseparam = set_pauseparam, .get_rx_csum = get_rx_csum, .set_rx_csum = set_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, .get_link = ethtool_op_get_link, .get_strings = get_strings, .phys_id = cxgb3_phys_id, .nway_reset = restart_autoneg, - .get_stats_count = get_stats_count, + .get_sset_count = get_sset_count, .get_ethtool_stats = get_stats, .get_regs_len = get_regs_len, .get_regs = get_regs, .get_wol = get_wol, - .get_tso = ethtool_op_get_tso, .set_tso = ethtool_op_set_tso, - .get_perm_addr = ethtool_op_get_perm_addr }; static int in_range(int val, int lo, int hi) @@ -1593,9 +1660,10 @@ static int in_range(int val, int lo, int hi) static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) { - int ret; + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; u32 cmd; - struct adapter *adapter = dev->priv; + int ret; if (copy_from_user(&cmd, useraddr, sizeof(cmd))) return -EFAULT; @@ -1701,7 +1769,6 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) } case CHELSIO_SET_QSET_NUM:{ struct ch_reg edata; - struct port_info *pi = netdev_priv(dev); unsigned int i, first_qset = 0, other_qsets = 0; if (!capable(CAP_NET_ADMIN)) @@ -1733,7 +1800,6 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) } case CHELSIO_GET_QSET_NUM:{ struct ch_reg edata; - struct port_info *pi = netdev_priv(dev); edata.cmd = CHELSIO_GET_QSET_NUM; edata.val = pi->nqsets; @@ -1924,10 +1990,10 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) { - int ret, mmd; - struct adapter *adapter = dev->priv; - struct port_info *pi = netdev_priv(dev); struct mii_ioctl_data *data = if_mii(req); + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + int ret, mmd; switch (cmd) { case SIOCGMIIPHY: @@ -1995,9 +2061,9 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) static int cxgb_change_mtu(struct net_device *dev, int new_mtu) { - int ret; - struct adapter *adapter = dev->priv; struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + int ret; if (new_mtu < 81) /* accommodate SACK */ return -EINVAL; @@ -2014,8 +2080,8 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu) static int cxgb_set_mac_addr(struct net_device *dev, void *p) { - struct adapter *adapter = dev->priv; struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) @@ -2051,8 +2117,8 @@ static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p) static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp) { - struct adapter *adapter = dev->priv; struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; pi->vlan_grp = grp; if (adapter->params.rev > 0) @@ -2071,8 +2137,8 @@ static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp) #ifdef CONFIG_NET_POLL_CONTROLLER static void cxgb_netpoll(struct net_device *dev) { - struct adapter *adapter = dev->priv; struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; int qidx; for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) { @@ -2089,42 +2155,6 @@ static void cxgb_netpoll(struct net_device *dev) } #endif -#define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin" -int update_tpsram(struct adapter *adap) -{ - const struct firmware *tpsram; - char buf[64]; - struct device *dev = &adap->pdev->dev; - int ret; - char rev; - - rev = adap->params.rev == T3_REV_B2 ? 'b' : 'a'; - - snprintf(buf, sizeof(buf), TPSRAM_NAME, rev, - TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); - - ret = request_firmware(&tpsram, buf, dev); - if (ret < 0) { - dev_err(dev, "could not load TP SRAM: unable to load %s\n", - buf); - return ret; - } - - ret = t3_check_tpsram(adap, tpsram->data, tpsram->size); - if (ret) - goto release_tpsram; - - ret = t3_set_proto_sram(adap, tpsram->data); - if (ret) - dev_err(dev, "loading protocol SRAM failed\n"); - -release_tpsram: - release_firmware(tpsram); - - return ret; -} - - /* * Periodic accumulation of MAC statistics. */ @@ -2271,6 +2301,10 @@ void t3_fatal_err(struct adapter *adapter) if (adapter->flags & FULL_INIT_DONE) { t3_sge_stop(adapter); + t3_write_reg(adapter, A_XGM_TX_CTRL, 0); + t3_write_reg(adapter, A_XGM_RX_CTRL, 0); + t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0); + t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0); t3_intr_disable(adapter); } CH_ALERT(adapter, "encountered fatal error, operation suspended\n"); @@ -2330,10 +2364,12 @@ static void __devinit print_port_info(struct adapter *adap, (adap->flags & USING_MSIX) ? " MSI-X" : (adap->flags & USING_MSI) ? " MSI" : ""); if (adap->name == dev->name && adap->params.vpd.mclk) - printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n", + printk(KERN_INFO + "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n", adap->name, t3_mc7_size(&adap->cm) >> 20, t3_mc7_size(&adap->pmtx) >> 20, - t3_mc7_size(&adap->pmrx) >> 20); + t3_mc7_size(&adap->pmrx) >> 20, + adap->params.vpd.sn); } } @@ -2429,11 +2465,11 @@ static int __devinit init_one(struct pci_dev *pdev, goto out_free_dev; } - SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &pdev->dev); adapter->port[i] = netdev; pi = netdev_priv(netdev); + pi->adapter = adapter; pi->rx_csum_offload = 1; pi->nqsets = 1; pi->first_qset = i; @@ -2443,7 +2479,6 @@ static int __devinit init_one(struct pci_dev *pdev, netdev->irq = pdev->irq; netdev->mem_start = mmio_start; netdev->mem_end = mmio_start + mmio_len - 1; - netdev->priv = adapter; netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; netdev->features |= NETIF_F_LLTX; if (pci_using_dac) @@ -2463,23 +2498,15 @@ static int __devinit init_one(struct pci_dev *pdev, #ifdef CONFIG_NET_POLL_CONTROLLER netdev->poll_controller = cxgb_netpoll; #endif - netdev->weight = 64; SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops); } - pci_set_drvdata(pdev, adapter->port[0]); + pci_set_drvdata(pdev, adapter); if (t3_prep_adapter(adapter, ai, 1) < 0) { err = -ENODEV; goto out_free_dev; } - - err = t3_check_tpsram_version(adapter); - if (err == -EINVAL) - err = update_tpsram(adapter); - - if (err) - goto out_free_dev; /* * The card is now ready to go. If any errors occur during device @@ -2548,11 +2575,10 @@ out_release_regions: static void __devexit remove_one(struct pci_dev *pdev) { - struct net_device *dev = pci_get_drvdata(pdev); + struct adapter *adapter = pci_get_drvdata(pdev); - if (dev) { + if (adapter) { int i; - struct adapter *adapter = dev->priv; t3_sge_stop(adapter); sysfs_remove_group(&adapter->port[0]->dev.kobj, @@ -2572,12 +2598,6 @@ static void __devexit remove_one(struct pci_dev *pdev) t3_free_sge_resources(adapter); cxgb_disable_msi(adapter); - for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++) - if (adapter->dummy_netdev[i]) { - free_netdev(adapter->dummy_netdev[i]); - adapter->dummy_netdev[i] = NULL; - } - for_each_port(adapter, i) if (adapter->port[i]) free_netdev(adapter->port[i]); diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c index ebcf35e4cf5b..bd25421bc12a 100644 --- a/drivers/net/cxgb3/cxgb3_offload.c +++ b/drivers/net/cxgb3/cxgb3_offload.c @@ -57,7 +57,7 @@ static DEFINE_RWLOCK(adapter_list_lock); static LIST_HEAD(adapter_list); static const unsigned int MAX_ATIDS = 64 * 1024; -static const unsigned int ATID_BASE = 0x100000; +static const unsigned int ATID_BASE = 0x10000; static inline int offload_activated(struct t3cdev *tdev) { @@ -222,32 +222,32 @@ static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data) int ret = 0; switch (req) { - case RDMA_GET_PARAMS:{ - struct rdma_info *req = data; + case RDMA_GET_PARAMS: { + struct rdma_info *rdma = data; struct pci_dev *pdev = adapter->pdev; - req->udbell_physbase = pci_resource_start(pdev, 2); - req->udbell_len = pci_resource_len(pdev, 2); - req->tpt_base = + rdma->udbell_physbase = pci_resource_start(pdev, 2); + rdma->udbell_len = pci_resource_len(pdev, 2); + rdma->tpt_base = t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT); - req->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT); - req->pbl_base = + rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT); + rdma->pbl_base = t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT); - req->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT); - req->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT); - req->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT); - req->kdb_addr = adapter->regs + A_SG_KDOORBELL; - req->pdev = pdev; + rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT); + rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT); + rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT); + rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL; + rdma->pdev = pdev; break; } case RDMA_CQ_OP:{ unsigned long flags; - struct rdma_cq_op *req = data; + struct rdma_cq_op *rdma = data; /* may be called in any context */ spin_lock_irqsave(&adapter->sge.reg_lock, flags); - ret = t3_sge_cqcntxt_op(adapter, req->id, req->op, - req->credits); + ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op, + rdma->credits); spin_unlock_irqrestore(&adapter->sge.reg_lock, flags); break; } @@ -274,15 +274,15 @@ static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data) break; } case RDMA_CQ_SETUP:{ - struct rdma_cq_setup *req = data; + struct rdma_cq_setup *rdma = data; spin_lock_irq(&adapter->sge.reg_lock); ret = - t3_sge_init_cqcntxt(adapter, req->id, - req->base_addr, req->size, + t3_sge_init_cqcntxt(adapter, rdma->id, + rdma->base_addr, rdma->size, ASYNC_NOTIF_RSPQ, - req->ovfl_mode, req->credits, - req->credit_thres); + rdma->ovfl_mode, rdma->credits, + rdma->credit_thres); spin_unlock_irq(&adapter->sge.reg_lock); break; } @@ -292,13 +292,13 @@ static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data) spin_unlock_irq(&adapter->sge.reg_lock); break; case RDMA_CTRL_QP_SETUP:{ - struct rdma_ctrlqp_setup *req = data; + struct rdma_ctrlqp_setup *rdma = data; spin_lock_irq(&adapter->sge.reg_lock); ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0, SGE_CNTXT_RDMA, ASYNC_NOTIF_RSPQ, - req->base_addr, req->size, + rdma->base_addr, rdma->size, FW_RI_TID_START, 1, 0); spin_unlock_irq(&adapter->sge.reg_lock); break; @@ -317,6 +317,8 @@ static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data) struct iff_mac *iffmacp; struct ddp_params *ddpp; struct adap_ports *ports; + struct ofld_page_info *rx_page_info; + struct tp_params *tp = &adapter->params.tp; int i; switch (req) { @@ -382,6 +384,11 @@ static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data) if (!offload_running(adapter)) return -EAGAIN; return cxgb_rdma_ctl(adapter, req, data); + case GET_RX_PAGE_INFO: + rx_page_info = data; + rx_page_info->page_size = tp->rx_pg_size; + rx_page_info->num = tp->rx_num_pgs; + break; default: return -EOPNOTSUPP; } @@ -593,6 +600,16 @@ int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client, EXPORT_SYMBOL(cxgb3_alloc_stid); +/* Get the t3cdev associated with a net_device */ +struct t3cdev *dev2t3cdev(struct net_device *dev) +{ + const struct port_info *pi = netdev_priv(dev); + + return (struct t3cdev *)pi->adapter; +} + +EXPORT_SYMBOL(dev2t3cdev); + static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb) { struct cpl_smt_write_rpl *rpl = cplhdr(skb); @@ -677,10 +694,19 @@ static int do_cr(struct t3cdev *dev, struct sk_buff *skb) { struct cpl_pass_accept_req *req = cplhdr(skb); unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); + struct tid_info *t = &(T3C_DATA(dev))->tid_maps; struct t3c_tid_entry *t3c_tid; + unsigned int tid = GET_TID(req); - t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid); - if (t3c_tid->ctx && t3c_tid->client->handlers && + if (unlikely(tid >= t->ntids)) { + printk("%s: passive open TID %u too large\n", + dev->name, tid); + t3_fatal_err(tdev2adap(dev)); + return CPL_RET_BUF_DONE; + } + + t3c_tid = lookup_stid(t, stid); + if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) { return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ] (dev, skb, t3c_tid->ctx); @@ -699,7 +725,7 @@ static int do_cr(struct t3cdev *dev, struct sk_buff *skb) * the buffer. */ static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len, - int gfp) + gfp_t gfp) { if (likely(!skb_cloned(skb))) { BUG_ON(skb->len < len); @@ -762,16 +788,25 @@ static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb) { struct cpl_act_establish *req = cplhdr(skb); unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); + struct tid_info *t = &(T3C_DATA(dev))->tid_maps; struct t3c_tid_entry *t3c_tid; + unsigned int tid = GET_TID(req); - t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); + if (unlikely(tid >= t->ntids)) { + printk("%s: active establish TID %u too large\n", + dev->name, tid); + t3_fatal_err(tdev2adap(dev)); + return CPL_RET_BUF_DONE; + } + + t3c_tid = lookup_atid(t, atid); if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) { return t3c_tid->client->handlers[CPL_ACT_ESTABLISH] (dev, skb, t3c_tid->ctx); } else { printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", - dev->name, CPL_PASS_ACCEPT_REQ); + dev->name, CPL_ACT_ESTABLISH); return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; } } @@ -925,7 +960,7 @@ void cxgb_neigh_update(struct neighbour *neigh) struct net_device *dev = neigh->dev; if (dev && (is_offloading(dev))) { - struct t3cdev *tdev = T3CDEV(dev); + struct t3cdev *tdev = dev2t3cdev(dev); BUG_ON(!tdev); t3_l2t_update(tdev, neigh); @@ -973,9 +1008,9 @@ void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) "device ignored.\n", __FUNCTION__); return; } - tdev = T3CDEV(olddev); + tdev = dev2t3cdev(olddev); BUG_ON(!tdev); - if (tdev != T3CDEV(newdev)) { + if (tdev != dev2t3cdev(newdev)) { printk(KERN_WARNING "%s: Redirect to different " "offload device ignored.\n", __FUNCTION__); return; diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h index f15446a32efc..7a379138b5a6 100644 --- a/drivers/net/cxgb3/cxgb3_offload.h +++ b/drivers/net/cxgb3/cxgb3_offload.h @@ -51,6 +51,8 @@ void cxgb3_offload_deactivate(struct adapter *adapter); void cxgb3_set_dummy_ops(struct t3cdev *dev); +struct t3cdev *dev2t3cdev(struct net_device *dev); + /* * Client registration. Users of T3 driver must register themselves. * The T3 driver will call the add function of every client for each T3 diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h index aa80313c922e..5e1bc0dec5f1 100644 --- a/drivers/net/cxgb3/regs.h +++ b/drivers/net/cxgb3/regs.h @@ -172,6 +172,14 @@ #define A_SG_INT_CAUSE 0x5c +#define S_HIPIODRBDROPERR 11 +#define V_HIPIODRBDROPERR(x) ((x) << S_HIPIODRBDROPERR) +#define F_HIPIODRBDROPERR V_HIPIODRBDROPERR(1U) + +#define S_LOPIODRBDROPERR 10 +#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR) +#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U) + #define S_RSPQDISABLED 3 #define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED) #define F_RSPQDISABLED V_RSPQDISABLED(1U) @@ -1318,6 +1326,7 @@ #define V_D0_WEIGHT(x) ((x) << S_D0_WEIGHT) #define A_PM1_RX_CFG 0x5c0 +#define A_PM1_RX_MODE 0x5c4 #define A_PM1_RX_INT_ENABLE 0x5d8 @@ -1386,6 +1395,7 @@ #define A_PM1_RX_INT_CAUSE 0x5dc #define A_PM1_TX_CFG 0x5e0 +#define A_PM1_TX_MODE 0x5e4 #define A_PM1_TX_INT_ENABLE 0x5f8 diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index a2cfd68ac757..994b5d6404df 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -79,7 +79,7 @@ enum { }; struct tx_desc { - u64 flit[TX_DESC_FLITS]; + __be64 flit[TX_DESC_FLITS]; }; struct rx_desc { @@ -544,7 +544,7 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, * as HW contexts, packet buffers, and descriptor rings. Traffic to the * queue set must be quiesced prior to calling this. */ -void t3_free_qset(struct adapter *adapter, struct sge_qset *q) +static void t3_free_qset(struct adapter *adapter, struct sge_qset *q) { int i; struct pci_dev *pdev = adapter->pdev; @@ -591,9 +591,6 @@ void t3_free_qset(struct adapter *adapter, struct sge_qset *q) q->rspq.desc, q->rspq.phys_addr); } - if (q->netdev) - q->netdev->atalk_ptr = NULL; - memset(q, 0, sizeof(*q)); } @@ -907,8 +904,8 @@ static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb, const struct sge_txq *q, const struct sg_ent *sgl, unsigned int flits, unsigned int sgl_flits, - unsigned int gen, unsigned int wr_hi, - unsigned int wr_lo) + unsigned int gen, __be32 wr_hi, + __be32 wr_lo) { struct work_request_hdr *wrp = (struct work_request_hdr *)d; struct tx_sw_desc *sd = &q->sdesc[pidx]; @@ -1073,8 +1070,8 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned int ndesc, pidx, credits, gen, compl; const struct port_info *pi = netdev_priv(dev); - struct adapter *adap = dev->priv; - struct sge_qset *qs = dev2qset(dev); + struct adapter *adap = pi->adapter; + struct sge_qset *qs = pi->qs; struct sge_txq *q = &qs->txq[TXQ_ETH]; /* @@ -1182,8 +1179,8 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) * * Writes a packet as immediate data into a Tx descriptor. The packet * contains a work request at its beginning. We must write the packet - * carefully so the SGE doesn't read accidentally before it's written in - * its entirety. + * carefully so the SGE doesn't read it accidentally before it's written + * in its entirety. */ static inline void write_imm(struct tx_desc *d, struct sk_buff *skb, unsigned int len, unsigned int gen) @@ -1191,7 +1188,11 @@ static inline void write_imm(struct tx_desc *d, struct sk_buff *skb, struct work_request_hdr *from = (struct work_request_hdr *)skb->data; struct work_request_hdr *to = (struct work_request_hdr *)d; - memcpy(&to[1], &from[1], len - sizeof(*from)); + if (likely(!skb->data_len)) + memcpy(&to[1], &from[1], len - sizeof(*from)); + else + skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from)); + to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP | V_WR_BCNTLFLT(len & 7)); wmb(); @@ -1261,7 +1262,7 @@ static inline void reclaim_completed_tx_imm(struct sge_txq *q) static inline int immediate(const struct sk_buff *skb) { - return skb->len <= WR_LEN && !skb->data_len; + return skb->len <= WR_LEN; } /** @@ -1326,12 +1327,12 @@ static void restart_ctrlq(unsigned long data) struct sk_buff *skb; struct sge_qset *qs = (struct sge_qset *)data; struct sge_txq *q = &qs->txq[TXQ_CTRL]; - struct adapter *adap = qs->netdev->priv; spin_lock(&q->lock); again:reclaim_completed_tx_imm(q); - while (q->in_use < q->size && (skb = __skb_dequeue(&q->sendq)) != NULL) { + while (q->in_use < q->size && + (skb = __skb_dequeue(&q->sendq)) != NULL) { write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); @@ -1353,7 +1354,7 @@ static void restart_ctrlq(unsigned long data) } spin_unlock(&q->lock); - t3_write_reg(adap, A_SG_KDOORBELL, + t3_write_reg(qs->adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); } @@ -1467,12 +1468,13 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, */ static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb) { - unsigned int flits, cnt = skb_shinfo(skb)->nr_frags; + unsigned int flits, cnt; - if (skb->len <= WR_LEN && cnt == 0) + if (skb->len <= WR_LEN) return 1; /* packet fits as immediate data */ flits = skb_transport_offset(skb) / 8; /* headers */ + cnt = skb_shinfo(skb)->nr_frags; if (skb->tail != skb->transport_header) cnt++; return flits_to_desc(flits + sgl_len(cnt)); @@ -1531,7 +1533,8 @@ static void restart_offloadq(unsigned long data) struct sk_buff *skb; struct sge_qset *qs = (struct sge_qset *)data; struct sge_txq *q = &qs->txq[TXQ_OFLD]; - struct adapter *adap = qs->netdev->priv; + const struct port_info *pi = netdev_priv(qs->netdev); + struct adapter *adap = pi->adapter; spin_lock(&q->lock); again:reclaim_completed_tx(adap, q); @@ -1636,8 +1639,7 @@ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb) else { struct sge_qset *qs = rspq_to_qset(q); - if (__netif_rx_schedule_prep(qs->netdev)) - __netif_rx_schedule(qs->netdev); + napi_schedule(&qs->napi); q->rx_head = skb; } q->rx_tail = skb; @@ -1673,33 +1675,30 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev, * receive handler. Batches need to be of modest size as we do prefetches * on the packets in each. */ -static int ofld_poll(struct net_device *dev, int *budget) +static int ofld_poll(struct napi_struct *napi, int budget) { - struct adapter *adapter = dev->priv; - struct sge_qset *qs = dev2qset(dev); + struct sge_qset *qs = container_of(napi, struct sge_qset, napi); struct sge_rspq *q = &qs->rspq; - int work_done, limit = min(*budget, dev->quota), avail = limit; + struct adapter *adapter = qs->adap; + int work_done = 0; - while (avail) { + while (work_done < budget) { struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE]; int ngathered; spin_lock_irq(&q->lock); head = q->rx_head; if (!head) { - work_done = limit - avail; - *budget -= work_done; - dev->quota -= work_done; - __netif_rx_complete(dev); + napi_complete(napi); spin_unlock_irq(&q->lock); - return 0; + return work_done; } tail = q->rx_tail; q->rx_head = q->rx_tail = NULL; spin_unlock_irq(&q->lock); - for (ngathered = 0; avail && head; avail--) { + for (ngathered = 0; work_done < budget && head; work_done++) { prefetch(head->data); skbs[ngathered] = head; head = head->next; @@ -1721,10 +1720,8 @@ static int ofld_poll(struct net_device *dev, int *budget) } deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered); } - work_done = limit - avail; - *budget -= work_done; - dev->quota -= work_done; - return 1; + + return work_done; } /** @@ -2068,49 +2065,47 @@ static inline int is_pure_response(const struct rsp_desc *r) /** * napi_rx_handler - the NAPI handler for Rx processing - * @dev: the net device + * @napi: the napi instance * @budget: how many packets we can process in this round * * Handler for new data events when using NAPI. */ -static int napi_rx_handler(struct net_device *dev, int *budget) +static int napi_rx_handler(struct napi_struct *napi, int budget) { - struct adapter *adap = dev->priv; - struct sge_qset *qs = dev2qset(dev); - int effective_budget = min(*budget, dev->quota); - - int work_done = process_responses(adap, qs, effective_budget); - *budget -= work_done; - dev->quota -= work_done; - - if (work_done >= effective_budget) - return 1; + struct sge_qset *qs = container_of(napi, struct sge_qset, napi); + struct adapter *adap = qs->adap; + int work_done = process_responses(adap, qs, budget); - netif_rx_complete(dev); + if (likely(work_done < budget)) { + napi_complete(napi); - /* - * Because we don't atomically flush the following write it is - * possible that in very rare cases it can reach the device in a way - * that races with a new response being written plus an error interrupt - * causing the NAPI interrupt handler below to return unhandled status - * to the OS. To protect against this would require flushing the write - * and doing both the write and the flush with interrupts off. Way too - * expensive and unjustifiable given the rarity of the race. - * - * The race cannot happen at all with MSI-X. - */ - t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) | - V_NEWTIMER(qs->rspq.next_holdoff) | - V_NEWINDEX(qs->rspq.cidx)); - return 0; + /* + * Because we don't atomically flush the following + * write it is possible that in very rare cases it can + * reach the device in a way that races with a new + * response being written plus an error interrupt + * causing the NAPI interrupt handler below to return + * unhandled status to the OS. To protect against + * this would require flushing the write and doing + * both the write and the flush with interrupts off. + * Way too expensive and unjustifiable given the + * rarity of the race. + * + * The race cannot happen at all with MSI-X. + */ + t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) | + V_NEWTIMER(qs->rspq.next_holdoff) | + V_NEWINDEX(qs->rspq.cidx)); + } + return work_done; } /* * Returns true if the device is already scheduled for polling. */ -static inline int napi_is_scheduled(struct net_device *dev) +static inline int napi_is_scheduled(struct napi_struct *napi) { - return test_bit(__LINK_STATE_RX_SCHED, &dev->state); + return test_bit(NAPI_STATE_SCHED, &napi->state); } /** @@ -2193,8 +2188,7 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q) V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx)); return 0; } - if (likely(__netif_rx_schedule_prep(qs->netdev))) - __netif_rx_schedule(qs->netdev); + napi_schedule(&qs->napi); return 1; } @@ -2205,7 +2199,7 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q) irqreturn_t t3_sge_intr_msix(int irq, void *cookie) { struct sge_qset *qs = cookie; - struct adapter *adap = qs->netdev->priv; + struct adapter *adap = qs->adap; struct sge_rspq *q = &qs->rspq; spin_lock(&q->lock); @@ -2221,15 +2215,14 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie) * The MSI-X interrupt handler for an SGE response queue for the NAPI case * (i.e., response queue serviced by NAPI polling). */ -irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie) +static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie) { struct sge_qset *qs = cookie; - struct adapter *adap = qs->netdev->priv; struct sge_rspq *q = &qs->rspq; spin_lock(&q->lock); - if (handle_responses(adap, q) < 0) + if (handle_responses(qs->adap, q) < 0) q->unhandled_irqs++; spin_unlock(&q->lock); return IRQ_HANDLED; @@ -2272,11 +2265,13 @@ static irqreturn_t t3_intr_msi(int irq, void *cookie) return IRQ_HANDLED; } -static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q) +static int rspq_check_napi(struct sge_qset *qs) { - if (!napi_is_scheduled(dev) && is_new_response(&q->desc[q->cidx], q)) { - if (likely(__netif_rx_schedule_prep(dev))) - __netif_rx_schedule(dev); + struct sge_rspq *q = &qs->rspq; + + if (!napi_is_scheduled(&qs->napi) && + is_new_response(&q->desc[q->cidx], q)) { + napi_schedule(&qs->napi); return 1; } return 0; @@ -2289,7 +2284,7 @@ static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q) * one SGE response queue per port in this mode and protect all response * queues with queue 0's lock. */ -irqreturn_t t3_intr_msi_napi(int irq, void *cookie) +static irqreturn_t t3_intr_msi_napi(int irq, void *cookie) { int new_packets; struct adapter *adap = cookie; @@ -2297,10 +2292,9 @@ irqreturn_t t3_intr_msi_napi(int irq, void *cookie) spin_lock(&q->lock); - new_packets = rspq_check_napi(adap->sge.qs[0].netdev, q); + new_packets = rspq_check_napi(&adap->sge.qs[0]); if (adap->params.nports == 2) - new_packets += rspq_check_napi(adap->sge.qs[1].netdev, - &adap->sge.qs[1].rspq); + new_packets += rspq_check_napi(&adap->sge.qs[1]); if (!new_packets && t3_slow_intr_handler(adap) == 0) q->unhandled_irqs++; @@ -2403,9 +2397,9 @@ static irqreturn_t t3b_intr(int irq, void *cookie) static irqreturn_t t3b_intr_napi(int irq, void *cookie) { u32 map; - struct net_device *dev; struct adapter *adap = cookie; - struct sge_rspq *q0 = &adap->sge.qs[0].rspq; + struct sge_qset *qs0 = &adap->sge.qs[0]; + struct sge_rspq *q0 = &qs0->rspq; t3_write_reg(adap, A_PL_CLI, 0); map = t3_read_reg(adap, A_SG_DATA_INTR); @@ -2418,18 +2412,11 @@ static irqreturn_t t3b_intr_napi(int irq, void *cookie) if (unlikely(map & F_ERRINTR)) t3_slow_intr_handler(adap); - if (likely(map & 1)) { - dev = adap->sge.qs[0].netdev; - - if (likely(__netif_rx_schedule_prep(dev))) - __netif_rx_schedule(dev); - } - if (map & 2) { - dev = adap->sge.qs[1].netdev; + if (likely(map & 1)) + napi_schedule(&qs0->napi); - if (likely(__netif_rx_schedule_prep(dev))) - __netif_rx_schedule(dev); - } + if (map & 2) + napi_schedule(&adap->sge.qs[1].napi); spin_unlock(&q0->lock); return IRQ_HANDLED; @@ -2476,6 +2463,10 @@ void t3_sge_err_intr_handler(struct adapter *adapter) "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff); } + if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR)) + CH_ALERT(adapter, "SGE dropped %s priority doorbell\n", + status & F_HIPIODRBDROPERR ? "high" : "lo"); + t3_write_reg(adapter, A_SG_INT_CAUSE, status); if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED)) t3_fatal_err(adapter); @@ -2508,7 +2499,7 @@ static void sge_timer_cb(unsigned long data) { spinlock_t *lock; struct sge_qset *qs = (struct sge_qset *)data; - struct adapter *adap = qs->netdev->priv; + struct adapter *adap = qs->adap; if (spin_trylock(&qs->txq[TXQ_ETH].lock)) { reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]); @@ -2519,9 +2510,9 @@ static void sge_timer_cb(unsigned long data) spin_unlock(&qs->txq[TXQ_OFLD].lock); } lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock : - &adap->sge.qs[0].rspq.lock; + &adap->sge.qs[0].rspq.lock; if (spin_trylock_irq(lock)) { - if (!napi_is_scheduled(qs->netdev)) { + if (!napi_is_scheduled(&qs->napi)) { u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS); if (qs->fl[0].credits < qs->fl[0].size) @@ -2555,12 +2546,9 @@ static void sge_timer_cb(unsigned long data) */ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p) { - if (!qs->netdev) - return; - qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */ qs->rspq.polling = p->polling; - qs->netdev->poll = p->polling ? napi_rx_handler : ofld_poll; + qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll; } /** @@ -2580,7 +2568,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p) */ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, int irq_vec_idx, const struct qset_params *p, - int ntxq, struct net_device *netdev) + int ntxq, struct net_device *dev) { int i, ret = -ENOMEM; struct sge_qset *q = &adapter->sge.qs[id]; @@ -2701,16 +2689,10 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, } spin_unlock(&adapter->sge.reg_lock); - q->netdev = netdev; - t3_update_qset_coalesce(q, p); - /* - * We use atalk_ptr as a backpointer to a qset. In case a device is - * associated with multiple queue sets only the first one sets - * atalk_ptr. - */ - if (netdev->atalk_ptr == NULL) - netdev->atalk_ptr = q; + q->adap = adapter; + q->netdev = dev; + t3_update_qset_coalesce(q, p); refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL); refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL); diff --git a/drivers/net/cxgb3/sge_defs.h b/drivers/net/cxgb3/sge_defs.h index 514869e26a76..29b6c800b238 100644 --- a/drivers/net/cxgb3/sge_defs.h +++ b/drivers/net/cxgb3/sge_defs.h @@ -106,6 +106,10 @@ #define V_CQ_GEN(x) ((x) << S_CQ_GEN) #define F_CQ_GEN V_CQ_GEN(1U) +#define S_CQ_ERR 30 +#define V_CQ_ERR(x) ((x) << S_CQ_ERR) +#define F_CQ_ERR V_CQ_ERR(1U) + #define S_CQ_OVERFLOW_MODE 31 #define V_CQ_OVERFLOW_MODE(x) ((x) << S_CQ_OVERFLOW_MODE) #define F_CQ_OVERFLOW_MODE V_CQ_OVERFLOW_MODE(1U) diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c index dd3149d94ba8..d4ee00d32219 100644 --- a/drivers/net/cxgb3/t3_hw.c +++ b/drivers/net/cxgb3/t3_hw.c @@ -119,9 +119,9 @@ void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, * Reads registers that are accessed indirectly through an address/data * register pair. */ -void t3_read_indirect(struct adapter *adap, unsigned int addr_reg, - unsigned int data_reg, u32 *vals, unsigned int nregs, - unsigned int start_idx) +static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, u32 *vals, + unsigned int nregs, unsigned int start_idx) { while (nregs--) { t3_write_reg(adap, addr_reg, start_idx); @@ -505,7 +505,7 @@ struct t3_vpd { u8 vpdr_len[2]; VPD_ENTRY(pn, 16); /* part number */ VPD_ENTRY(ec, 16); /* EC level */ - VPD_ENTRY(sn, 16); /* serial number */ + VPD_ENTRY(sn, SERNUM_LEN); /* serial number */ VPD_ENTRY(na, 12); /* MAC address base */ VPD_ENTRY(cclk, 6); /* core clock */ VPD_ENTRY(mclk, 6); /* mem clock */ @@ -648,6 +648,7 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10); p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10); p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10); + memcpy(p->sn, vpd.sn_data, SERNUM_LEN); /* Old eeproms didn't have port information */ if (adapter->params.rev == 0 && !vpd.port0_data[0]) { @@ -848,16 +849,15 @@ static int t3_write_flash(struct adapter *adapter, unsigned int addr, } /** - * t3_check_tpsram_version - read the tp sram version + * t3_get_tp_version - read the tp sram version * @adapter: the adapter + * @vers: where to place the version * - * Reads the protocol sram version from serial eeprom. + * Reads the protocol sram version from sram. */ -int t3_check_tpsram_version(struct adapter *adapter) +int t3_get_tp_version(struct adapter *adapter, u32 *vers) { int ret; - u32 vers; - unsigned int major, minor; /* Get version loaded in SRAM */ t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0); @@ -866,7 +866,32 @@ int t3_check_tpsram_version(struct adapter *adapter) if (ret) return ret; - vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1); + *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1); + + return 0; +} + +/** + * t3_check_tpsram_version - read the tp sram version + * @adapter: the adapter + * @must_load: set to 1 if loading a new microcode image is required + * + * Reads the protocol sram version from flash. + */ +int t3_check_tpsram_version(struct adapter *adapter, int *must_load) +{ + int ret; + u32 vers; + unsigned int major, minor; + + if (adapter->params.rev == T3_REV_A) + return 0; + + *must_load = 1; + + ret = t3_get_tp_version(adapter, &vers); + if (ret) + return ret; major = G_TP_VERSION_MAJOR(vers); minor = G_TP_VERSION_MINOR(vers); @@ -874,6 +899,16 @@ int t3_check_tpsram_version(struct adapter *adapter) if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR) return 0; + if (major != TP_VERSION_MAJOR) + CH_ERR(adapter, "found wrong TP version (%u.%u), " + "driver needs version %d.%d\n", major, minor, + TP_VERSION_MAJOR, TP_VERSION_MINOR); + else { + *must_load = 0; + CH_ERR(adapter, "found wrong TP version (%u.%u), " + "driver compiled for version %d.%d\n", major, minor, + TP_VERSION_MAJOR, TP_VERSION_MINOR); + } return -EINVAL; } @@ -925,16 +960,18 @@ int t3_get_fw_version(struct adapter *adapter, u32 *vers) /** * t3_check_fw_version - check if the FW is compatible with this driver * @adapter: the adapter - * + * @must_load: set to 1 if loading a new FW image is required + * Checks if an adapter's FW is compatible with the driver. Returns 0 * if the versions are compatible, a negative error otherwise. */ -int t3_check_fw_version(struct adapter *adapter) +int t3_check_fw_version(struct adapter *adapter, int *must_load) { int ret; u32 vers; unsigned int type, major, minor; + *must_load = 1; ret = t3_get_fw_version(adapter, &vers); if (ret) return ret; @@ -947,9 +984,17 @@ int t3_check_fw_version(struct adapter *adapter) minor == FW_VERSION_MINOR) return 0; - CH_ERR(adapter, "found wrong FW version(%u.%u), " - "driver needs version %u.%u\n", major, minor, - FW_VERSION_MAJOR, FW_VERSION_MINOR); + if (major != FW_VERSION_MAJOR) + CH_ERR(adapter, "found wrong FW version(%u.%u), " + "driver needs version %u.%u\n", major, minor, + FW_VERSION_MAJOR, FW_VERSION_MINOR); + else { + *must_load = 0; + CH_WARN(adapter, "found wrong FW minor version(%u.%u), " + "driver compiled for version %u.%u\n", major, minor, + FW_VERSION_MAJOR, FW_VERSION_MINOR); + } + return -EINVAL; } @@ -1313,6 +1358,10 @@ static void pcie_intr_handler(struct adapter *adapter) {0} }; + if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR) + CH_ALERT(adapter, "PEX error code 0x%x\n", + t3_read_reg(adapter, A_PCIE_PEX_ERR)); + if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK, pcie_intr_info, adapter->irq_stats)) t3_fatal_err(adapter); @@ -1764,6 +1813,8 @@ void t3_intr_clear(struct adapter *adapter) for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i) t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff); + if (is_pcie(adapter)) + t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff); t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff); t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */ } @@ -1819,6 +1870,8 @@ void t3_port_intr_clear(struct adapter *adapter, int idx) phy->ops->intr_clear(phy); } +#define SG_CONTEXT_CMD_ATTEMPTS 100 + /** * t3_sge_write_context - write an SGE context * @adapter: the adapter @@ -1838,7 +1891,7 @@ static int t3_sge_write_context(struct adapter *adapter, unsigned int id, t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id)); return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, - 0, 5, 1); + 0, SG_CONTEXT_CMD_ATTEMPTS, 1); } /** @@ -1995,7 +2048,8 @@ int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr, base_addr >>= 32; t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) | - V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode)); + V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) | + V_CQ_ERR(ovfl_mode)); t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) | V_CQ_CREDIT_THRES(credit_thres)); return t3_sge_write_context(adapter, id, F_CQ); @@ -2023,7 +2077,7 @@ int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable) t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id)); return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, - 0, 5, 1); + 0, SG_CONTEXT_CMD_ATTEMPTS, 1); } /** @@ -2047,7 +2101,7 @@ int t3_sge_disable_fl(struct adapter *adapter, unsigned int id) t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id)); return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, - 0, 5, 1); + 0, SG_CONTEXT_CMD_ATTEMPTS, 1); } /** @@ -2071,7 +2125,7 @@ int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id) t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id)); return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, - 0, 5, 1); + 0, SG_CONTEXT_CMD_ATTEMPTS, 1); } /** @@ -2095,7 +2149,7 @@ int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id) t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id)); return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, - 0, 5, 1); + 0, SG_CONTEXT_CMD_ATTEMPTS, 1); } /** @@ -2120,7 +2174,7 @@ int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op, t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) | V_CONTEXT(id) | F_CQ); if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, - 0, 5, 1, &val)) + 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val)) return -EIO; if (op >= 2 && op < 7) { @@ -2130,7 +2184,8 @@ int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op, t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id)); if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, - F_CONTEXT_CMD_BUSY, 0, 5, 1)) + F_CONTEXT_CMD_BUSY, 0, + SG_CONTEXT_CMD_ATTEMPTS, 1)) return -EIO; return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0)); } @@ -2156,7 +2211,7 @@ static int t3_sge_read_context(unsigned int type, struct adapter *adapter, t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id)); if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0, - 5, 1)) + SG_CONTEXT_CMD_ATTEMPTS, 1)) return -EIO; data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0); data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1); @@ -3188,6 +3243,8 @@ int t3_init_hw(struct adapter *adapter, u32 fw_params) t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN); t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff); + t3_write_reg(adapter, A_PM1_RX_MODE, 0); + t3_write_reg(adapter, A_PM1_TX_MODE, 0); init_hw_for_avail_ports(adapter, adapter->params.nports); t3_sge_init(adapter, &adapter->params.sge); @@ -3350,7 +3407,7 @@ void early_hw_init(struct adapter *adapter, const struct adapter_info *ai) * Older PCIe cards lose their config space during reset, PCI-X * ones don't. */ -int t3_reset_adapter(struct adapter *adapter) +static int t3_reset_adapter(struct adapter *adapter) { int i, save_and_restore_pcie = adapter->params.rev < T3_REV_B2 && is_pcie(adapter); diff --git a/drivers/net/cxgb3/t3cdev.h b/drivers/net/cxgb3/t3cdev.h index fa4099bc0416..77fcc1a4984e 100644 --- a/drivers/net/cxgb3/t3cdev.h +++ b/drivers/net/cxgb3/t3cdev.h @@ -42,9 +42,6 @@ #define T3CNAMSIZ 16 -/* Get the t3cdev associated with a net_device */ -#define T3CDEV(netdev) (struct t3cdev *)(netdev->priv) - struct cxgb3_client; enum t3ctype { diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h index eb508bf8022a..ef1c6339c806 100644 --- a/drivers/net/cxgb3/version.h +++ b/drivers/net/cxgb3/version.h @@ -39,6 +39,6 @@ /* Firmware version */ #define FW_VERSION_MAJOR 4 -#define FW_VERSION_MINOR 3 +#define FW_VERSION_MINOR 6 #define FW_VERSION_MICRO 0 #endif /* __CHELSIO_VERSION_H */ diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c index c302b1a30cba..eeb766aeced9 100644 --- a/drivers/net/cxgb3/xgmac.c +++ b/drivers/net/cxgb3/xgmac.c @@ -142,7 +142,7 @@ int t3_mac_reset(struct cmac *mac) return 0; } -int t3b2_mac_reset(struct cmac *mac) +static int t3b2_mac_reset(struct cmac *mac) { struct adapter *adap = mac->adapter; unsigned int oft = mac->offset; @@ -437,12 +437,13 @@ int t3_mac_enable(struct cmac *mac, int which) struct mac_stats *s = &mac->stats; if (which & MAC_DIRECTION_TX) { - t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN); t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); t3_write_reg(adap, A_TP_PIO_DATA, 0xc0ede401); t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE); t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx); + t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN); + t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CNT_CH0 + idx); mac->tx_mcnt = s->tx_frames; mac->tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap, @@ -451,9 +452,11 @@ int t3_mac_enable(struct cmac *mac, int which) A_XGM_TX_SPI4_SOP_EOP_CNT + oft))); mac->rx_mcnt = s->rx_frames; + mac->rx_pause = s->rx_pause; mac->rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap, A_XGM_RX_SPI4_SOP_EOP_CNT + oft))); + mac->rx_ocnt = s->rx_fifo_ovfl; mac->txen = F_TXEN; mac->toggle_cnt = 0; } @@ -464,24 +467,19 @@ int t3_mac_enable(struct cmac *mac, int which) int t3_mac_disable(struct cmac *mac, int which) { - int idx = macidx(mac); struct adapter *adap = mac->adapter; - int val; if (which & MAC_DIRECTION_TX) { t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0); - t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); - t3_write_reg(adap, A_TP_PIO_DATA, 0xc000001f); - t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE); - t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx); mac->txen = 0; } if (which & MAC_DIRECTION_RX) { + int val = F_MAC_RESET_; + t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, F_PCS_RESET_, 0); msleep(100); t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0); - val = F_MAC_RESET_; if (is_10G(adap)) val |= F_PCS_RESET_; else if (uses_xaui(adap)) @@ -507,7 +505,7 @@ int t3b2_mac_watchdog_task(struct cmac *mac) tx_xcnt = 1; /* By default tx_xcnt is making progress */ tx_tcnt = mac->tx_tcnt; /* If tx_mcnt is progressing ignore tx_tcnt */ rx_xcnt = 1; /* By default rx_xcnt is making progress */ - if (tx_mcnt == mac->tx_mcnt) { + if (tx_mcnt == mac->tx_mcnt && mac->rx_pause == s->rx_pause) { tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap, A_XGM_TX_SPI4_SOP_EOP_CNT + mac->offset))); @@ -524,10 +522,7 @@ int t3b2_mac_watchdog_task(struct cmac *mac) goto rxcheck; } - if (((tx_tcnt != mac->tx_tcnt) && - (tx_xcnt == 0) && (mac->tx_xcnt == 0)) || - ((mac->tx_mcnt == tx_mcnt) && - (tx_xcnt != 0) && (mac->tx_xcnt != 0))) { + if ((tx_tcnt != mac->tx_tcnt) && (mac->tx_xcnt == 0)) { if (mac->toggle_cnt > 4) { status = 2; goto out; @@ -541,11 +536,14 @@ int t3b2_mac_watchdog_task(struct cmac *mac) } rxcheck: - if (rx_mcnt != mac->rx_mcnt) + if (rx_mcnt != mac->rx_mcnt) { rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap, A_XGM_RX_SPI4_SOP_EOP_CNT + - mac->offset))); - else + mac->offset))) + + (s->rx_fifo_ovfl - + mac->rx_ocnt); + mac->rx_ocnt = s->rx_fifo_ovfl; + } else goto out; if (mac->rx_mcnt != s->rx_frames && rx_xcnt == 0 && @@ -560,6 +558,7 @@ out: mac->tx_mcnt = s->tx_frames; mac->rx_xcnt = rx_xcnt; mac->rx_mcnt = s->rx_frames; + mac->rx_pause = s->rx_pause; if (status == 1) { t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0); t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */ diff --git a/drivers/net/de600.c b/drivers/net/de600.c index dae97b860daa..cb849b091f98 100644 --- a/drivers/net/de600.c +++ b/drivers/net/de600.c @@ -154,11 +154,6 @@ static int de600_close(struct net_device *dev) return 0; } -static struct net_device_stats *get_stats(struct net_device *dev) -{ - return (struct net_device_stats *)(dev->priv); -} - static inline void trigger_interrupt(struct net_device *dev) { de600_put_command(FLIP_IRQ); @@ -308,7 +303,7 @@ static int de600_tx_intr(struct net_device *dev, int irq_status) if (!(irq_status & TX_FAILED16)) { tx_fifo_out = (tx_fifo_out + 1) % TX_PAGES; ++free_tx_pages; - ((struct net_device_stats *)(dev->priv))->tx_packets++; + dev->stats.tx_packets++; netif_wake_queue(dev); } @@ -375,8 +370,8 @@ static void de600_rx_intr(struct net_device *dev) /* update stats */ dev->last_rx = jiffies; - ((struct net_device_stats *)(dev->priv))->rx_packets++; /* count all receives */ - ((struct net_device_stats *)(dev->priv))->rx_bytes += size; /* count all received bytes */ + dev->stats.rx_packets++; /* count all receives */ + dev->stats.rx_bytes += size; /* count all received bytes */ /* * If any worth-while packets have been received, netif_rx() @@ -389,12 +384,12 @@ static struct net_device * __init de600_probe(void) int i; struct net_device *dev; int err; + DECLARE_MAC_BUF(mac); - dev = alloc_etherdev(sizeof(struct net_device_stats)); + dev = alloc_etherdev(0); if (!dev) return ERR_PTR(-ENOMEM); - SET_MODULE_OWNER(dev); if (!request_region(DE600_IO, 3, "de600")) { printk(KERN_WARNING "DE600: port 0x%x busy\n", DE600_IO); @@ -444,12 +439,7 @@ static struct net_device * __init de600_probe(void) goto out1; } - printk(", Ethernet Address: %02X", dev->dev_addr[0]); - for (i = 1; i < ETH_ALEN; i++) - printk(":%02X",dev->dev_addr[i]); - printk("\n"); - - dev->get_stats = get_stats; + printk(", Ethernet Address: %s\n", print_mac(mac, dev->dev_addr)); dev->open = de600_open; dev->stop = de600_close; diff --git a/drivers/net/de600.h b/drivers/net/de600.h index 1288e48ba704..e80ecbabcf4e 100644 --- a/drivers/net/de600.h +++ b/drivers/net/de600.h @@ -121,7 +121,6 @@ static u8 de600_read_byte(unsigned char type, struct net_device *dev); /* Put in the device structure. */ static int de600_open(struct net_device *dev); static int de600_close(struct net_device *dev); -static struct net_device_stats *get_stats(struct net_device *dev); static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev); /* Dispatch from interrupts. */ diff --git a/drivers/net/de620.c b/drivers/net/de620.c index dc4892426174..3f5190c654cf 100644 --- a/drivers/net/de620.c +++ b/drivers/net/de620.c @@ -216,7 +216,6 @@ MODULE_PARM_DESC(de620_debug, "DE-620 debug level (0-2)"); /* Put in the device structure. */ static int de620_open(struct net_device *); static int de620_close(struct net_device *); -static struct net_device_stats *get_stats(struct net_device *); static void de620_set_multicast_list(struct net_device *); static int de620_start_xmit(struct sk_buff *, struct net_device *); @@ -480,16 +479,6 @@ static int de620_close(struct net_device *dev) /********************************************* * - * Return current statistics - * - */ -static struct net_device_stats *get_stats(struct net_device *dev) -{ - return (struct net_device_stats *)(dev->priv); -} - -/********************************************* - * * Set or clear the multicast filter for this adaptor. * (no real multicast implemented for the DE-620, but she can be promiscuous...) * @@ -579,7 +568,7 @@ static int de620_start_xmit(struct sk_buff *skb, struct net_device *dev) if(!(using_txbuf == (TXBF0 | TXBF1))) netif_wake_queue(dev); - ((struct net_device_stats *)(dev->priv))->tx_packets++; + dev->stats.tx_packets++; spin_unlock_irqrestore(&de620_lock, flags); dev_kfree_skb (skb); return 0; @@ -660,7 +649,7 @@ static int de620_rx_intr(struct net_device *dev) /* You win some, you lose some. And sometimes plenty... */ adapter_init(dev); netif_wake_queue(dev); - ((struct net_device_stats *)(dev->priv))->rx_over_errors++; + dev->stats.rx_over_errors++; return 0; } @@ -680,7 +669,7 @@ static int de620_rx_intr(struct net_device *dev) next_rx_page = header_buf.Rx_NextPage; /* at least a try... */ de620_send_command(dev, W_DUMMY); de620_set_register(dev, W_NPRF, next_rx_page); - ((struct net_device_stats *)(dev->priv))->rx_over_errors++; + dev->stats.rx_over_errors++; return 0; } next_rx_page = pagelink; @@ -693,7 +682,7 @@ static int de620_rx_intr(struct net_device *dev) skb = dev_alloc_skb(size+2); if (skb == NULL) { /* Yeah, but no place to put it... */ printk(KERN_WARNING "%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size); - ((struct net_device_stats *)(dev->priv))->rx_dropped++; + dev->stats.rx_dropped++; } else { /* Yep! Go get it! */ skb_reserve(skb,2); /* Align */ @@ -706,8 +695,8 @@ static int de620_rx_intr(struct net_device *dev) netif_rx(skb); /* deliver it "upstairs" */ dev->last_rx = jiffies; /* count all receives */ - ((struct net_device_stats *)(dev->priv))->rx_packets++; - ((struct net_device_stats *)(dev->priv))->rx_bytes += size; + dev->stats.rx_packets++; + dev->stats.rx_bytes += size; } } @@ -818,13 +807,12 @@ struct net_device * __init de620_probe(int unit) struct net_device *dev; int err = -ENOMEM; int i; + DECLARE_MAC_BUF(mac); - dev = alloc_etherdev(sizeof(struct net_device_stats)); + dev = alloc_etherdev(0); if (!dev) goto out; - SET_MODULE_OWNER(dev); - spin_lock_init(&de620_lock); /* @@ -866,13 +854,14 @@ struct net_device * __init de620_probe(int unit) } /* else, got it! */ - printk(", Ethernet Address: %2.2X", - dev->dev_addr[0] = nic_data.NodeID[0]); + dev->dev_addr[0] = nic_data.NodeID[0]; for (i = 1; i < ETH_ALEN; i++) { - printk(":%2.2X", dev->dev_addr[i] = nic_data.NodeID[i]); + dev->dev_addr[i] = nic_data.NodeID[i]; dev->broadcast[i] = 0xff; } + printk(", Ethernet Address: %s", print_mac(mac, dev->dev_addr)); + printk(" (%dk RAM,", (nic_data.RAM_Size) ? (nic_data.RAM_Size >> 2) : 64); @@ -881,7 +870,6 @@ struct net_device * __init de620_probe(int unit) else printk(" UTP)\n"); - dev->get_stats = get_stats; dev->open = de620_open; dev->stop = de620_close; dev->hard_start_xmit = de620_start_xmit; diff --git a/drivers/net/declance.c b/drivers/net/declance.c index b2577f40124e..00e0194bfef0 100644 --- a/drivers/net/declance.c +++ b/drivers/net/declance.c @@ -258,8 +258,6 @@ struct lance_private { int rx_new, tx_new; int rx_old, tx_old; - struct net_device_stats stats; - unsigned short busmaster_regval; struct timer_list multicast_timer; @@ -583,22 +581,22 @@ static int lance_rx(struct net_device *dev) /* We got an incomplete frame? */ if ((bits & LE_R1_POK) != LE_R1_POK) { - lp->stats.rx_over_errors++; - lp->stats.rx_errors++; + dev->stats.rx_over_errors++; + dev->stats.rx_errors++; } else if (bits & LE_R1_ERR) { /* Count only the end frame as a rx error, * not the beginning */ if (bits & LE_R1_BUF) - lp->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; if (bits & LE_R1_CRC) - lp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; if (bits & LE_R1_OFL) - lp->stats.rx_over_errors++; + dev->stats.rx_over_errors++; if (bits & LE_R1_FRA) - lp->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (bits & LE_R1_EOP) - lp->stats.rx_errors++; + dev->stats.rx_errors++; } else { len = (*rds_ptr(rd, mblength, lp->type) & 0xfff) - 4; skb = dev_alloc_skb(len + 2); @@ -606,7 +604,7 @@ static int lance_rx(struct net_device *dev) if (skb == 0) { printk("%s: Memory squeeze, deferring packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; *rds_ptr(rd, mblength, lp->type) = 0; *rds_ptr(rd, rmd1, lp->type) = ((lp->rx_buf_ptr_lnc[entry] >> 16) & @@ -614,7 +612,7 @@ static int lance_rx(struct net_device *dev) lp->rx_new = (entry + 1) & RX_RING_MOD_MASK; return 0; } - lp->stats.rx_bytes += len; + dev->stats.rx_bytes += len; skb_reserve(skb, 2); /* 16 byte align */ skb_put(skb, len); /* make room */ @@ -625,7 +623,7 @@ static int lance_rx(struct net_device *dev) skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; + dev->stats.rx_packets++; } /* Return the packet to the pool */ @@ -660,14 +658,14 @@ static void lance_tx(struct net_device *dev) if (*tds_ptr(td, tmd1, lp->type) & LE_T1_ERR) { status = *tds_ptr(td, misc, lp->type); - lp->stats.tx_errors++; + dev->stats.tx_errors++; if (status & LE_T3_RTY) - lp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; if (status & LE_T3_LCOL) - lp->stats.tx_window_errors++; + dev->stats.tx_window_errors++; if (status & LE_T3_CLOS) { - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; printk("%s: Carrier Lost\n", dev->name); /* Stop the lance */ writereg(&ll->rap, LE_CSR0); @@ -681,7 +679,7 @@ static void lance_tx(struct net_device *dev) * transmitter, restart the adapter. */ if (status & (LE_T3_BUF | LE_T3_UFL)) { - lp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n", dev->name); @@ -702,13 +700,13 @@ static void lance_tx(struct net_device *dev) /* One collision before packet was sent. */ if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EONE) - lp->stats.collisions++; + dev->stats.collisions++; /* More than one collision, be optimistic. */ if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EMORE) - lp->stats.collisions += 2; + dev->stats.collisions += 2; - lp->stats.tx_packets++; + dev->stats.tx_packets++; } j = (j + 1) & TX_RING_MOD_MASK; } @@ -754,10 +752,10 @@ static irqreturn_t lance_interrupt(const int irq, void *dev_id) lance_tx(dev); if (csr0 & LE_C0_BABL) - lp->stats.tx_errors++; + dev->stats.tx_errors++; if (csr0 & LE_C0_MISS) - lp->stats.rx_errors++; + dev->stats.rx_errors++; if (csr0 & LE_C0_MERR) { printk("%s: Memory error, status %04x\n", dev->name, csr0); @@ -912,7 +910,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) len = ETH_ZLEN; } - lp->stats.tx_bytes += len; + dev->stats.tx_bytes += len; entry = lp->tx_new; *lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len); @@ -938,13 +936,6 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } -static struct net_device_stats *lance_get_stats(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - - return &lp->stats; -} - static void lance_load_multicast(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); @@ -1036,6 +1027,7 @@ static int __init dec_lance_probe(struct device *bdev, const int type) int i, ret; unsigned long esar_base; unsigned char *esar; + DECLARE_MAC_BUF(mac); if (dec_lance_debug && version_printed++ == 0) printk(version); @@ -1223,28 +1215,26 @@ static int __init dec_lance_probe(struct device *bdev, const int type) */ switch (type) { case ASIC_LANCE: - printk("%s: IOASIC onboard LANCE, addr = ", name); + printk("%s: IOASIC onboard LANCE", name); break; case PMAD_LANCE: - printk("%s: PMAD-AA, addr = ", name); + printk("%s: PMAD-AA", name); break; case PMAX_LANCE: - printk("%s: PMAX onboard LANCE, addr = ", name); + printk("%s: PMAX onboard LANCE", name); break; } - for (i = 0; i < 6; i++) { + for (i = 0; i < 6; i++) dev->dev_addr[i] = esar[i * 4]; - printk("%2.2x%c", dev->dev_addr[i], i == 5 ? ',' : ':'); - } - printk(" irq = %d\n", dev->irq); + printk(", addr = %s, irq = %d\n", + print_mac(mac, dev->dev_addr), dev->irq); dev->open = &lance_open; dev->stop = &lance_close; dev->hard_start_xmit = &lance_start_xmit; dev->tx_timeout = &lance_tx_timeout; dev->watchdog_timeo = 5*HZ; - dev->get_stats = &lance_get_stats; dev->set_multicast_list = &lance_set_multicast; /* lp->ll is the location of the registers for lance card */ diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c index 7df23dc28190..b07613e61f53 100644 --- a/drivers/net/defxx.c +++ b/drivers/net/defxx.c @@ -200,6 +200,7 @@ /* Include files */ #include <linux/bitops.h> +#include <linux/compiler.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/eisa.h> @@ -240,8 +241,6 @@ static char version[] __devinitdata = */ #define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128) -#define __unused __attribute__ ((unused)) - #ifdef CONFIG_PCI #define DFX_BUS_PCI(dev) (dev->bus == &pci_bus_type) #else @@ -375,7 +374,7 @@ static inline void dfx_outl(DFX_board_t *bp, int offset, u32 data) static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data) { - struct device __unused *bdev = bp->bus_dev; + struct device __maybe_unused *bdev = bp->bus_dev; int dfx_bus_tc = DFX_BUS_TC(bdev); int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; @@ -399,7 +398,7 @@ static inline void dfx_inl(DFX_board_t *bp, int offset, u32 *data) static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data) { - struct device __unused *bdev = bp->bus_dev; + struct device __maybe_unused *bdev = bp->bus_dev; int dfx_bus_tc = DFX_BUS_TC(bdev); int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; @@ -540,7 +539,6 @@ static int __devinit dfx_register(struct device *bdev) goto err_out; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, bdev); bp = netdev_priv(dev); @@ -866,7 +864,7 @@ static void __devinit dfx_bus_uninit(struct net_device *dev) static void __devinit dfx_bus_config_check(DFX_board_t *bp) { - struct device __unused *bdev = bp->bus_dev; + struct device __maybe_unused *bdev = bp->bus_dev; int dfx_bus_eisa = DFX_BUS_EISA(bdev); int status; /* return code from adapter port control call */ u32 host_data; /* LW data returned from port control call */ @@ -3624,8 +3622,8 @@ static void __devexit dfx_unregister(struct device *bdev) } -static int __devinit __unused dfx_dev_register(struct device *); -static int __devexit __unused dfx_dev_unregister(struct device *); +static int __devinit __maybe_unused dfx_dev_register(struct device *); +static int __devexit __maybe_unused dfx_dev_unregister(struct device *); #ifdef CONFIG_PCI static int __devinit dfx_pci_register(struct pci_dev *, @@ -3699,7 +3697,7 @@ static struct tc_driver dfx_tc_driver = { }; #endif /* CONFIG_TC */ -static int __devinit __unused dfx_dev_register(struct device *dev) +static int __devinit __maybe_unused dfx_dev_register(struct device *dev) { int status; @@ -3709,7 +3707,7 @@ static int __devinit __unused dfx_dev_register(struct device *dev) return status; } -static int __devexit __unused dfx_dev_unregister(struct device *dev) +static int __devexit __maybe_unused dfx_dev_unregister(struct device *dev) { put_device(dev); dfx_unregister(dev); diff --git a/drivers/net/depca.c b/drivers/net/depca.c index 183497020bfc..ace39ec0a367 100644 --- a/drivers/net/depca.c +++ b/drivers/net/depca.c @@ -485,7 +485,6 @@ struct depca_private { /* Kernel-only (not device) fields */ int rx_new, tx_new; /* The next free ring entry */ int rx_old, tx_old; /* The ring entries to be free()ed. */ - struct net_device_stats stats; spinlock_t lock; struct { /* Private stats counters */ u32 bins[DEPCA_PKT_STAT_SZ]; @@ -522,7 +521,6 @@ static irqreturn_t depca_interrupt(int irq, void *dev_id); static int depca_close(struct net_device *dev); static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static void depca_tx_timeout(struct net_device *dev); -static struct net_device_stats *depca_get_stats(struct net_device *dev); static void set_multicast_list(struct net_device *dev); /* @@ -575,6 +573,7 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device) s16 nicsr; u_long ioaddr; u_long mem_start; + DECLARE_MAC_BUF(mac); /* * We are now supposed to enter this function with the @@ -634,14 +633,11 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device) printk(", h/w address "); status = get_hw_addr(dev); + printk("%s", print_mac(mac, dev->dev_addr)); if (status != 0) { printk(" which has an Ethernet PROM CRC error.\n"); return -ENXIO; } - for (i = 0; i < ETH_ALEN - 1; i++) { /* get the ethernet address */ - printk("%2.2x:", dev->dev_addr[i]); - } - printk("%2.2x", dev->dev_addr[i]); /* Set up the maximum amount of network RAM(kB) */ netRAM = ((lp->adapter != DEPCA) ? 64 : 48); @@ -801,7 +797,6 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device) dev->open = &depca_open; dev->hard_start_xmit = &depca_start_xmit; dev->stop = &depca_close; - dev->get_stats = &depca_get_stats; dev->set_multicast_list = &set_multicast_list; dev->do_ioctl = &depca_ioctl; dev->tx_timeout = depca_tx_timeout; @@ -1026,15 +1021,15 @@ static int depca_rx(struct net_device *dev) } if (status & R_ENP) { /* Valid frame status */ if (status & R_ERR) { /* There was an error. */ - lp->stats.rx_errors++; /* Update the error stats. */ + dev->stats.rx_errors++; /* Update the error stats. */ if (status & R_FRAM) - lp->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (status & R_OFLO) - lp->stats.rx_over_errors++; + dev->stats.rx_over_errors++; if (status & R_CRC) - lp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; if (status & R_BUFF) - lp->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; } else { short len, pkt_len = readw(&lp->rx_ring[entry].msg_length) - 4; struct sk_buff *skb; @@ -1063,8 +1058,8 @@ static int depca_rx(struct net_device *dev) ** Update stats */ dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; for (i = 1; i < DEPCA_PKT_STAT_SZ - 1; i++) { if (pkt_len < (i * DEPCA_PKT_BIN_SZ)) { lp->pktStats.bins[i]++; @@ -1087,7 +1082,7 @@ static int depca_rx(struct net_device *dev) } } else { printk("%s: Memory squeeze, deferring packet.\n", dev->name); - lp->stats.rx_dropped++; /* Really, deferred. */ + dev->stats.rx_dropped++; /* Really, deferred. */ break; } } @@ -1125,24 +1120,24 @@ static int depca_tx(struct net_device *dev) break; } else if (status & T_ERR) { /* An error occurred. */ status = readl(&lp->tx_ring[entry].misc); - lp->stats.tx_errors++; + dev->stats.tx_errors++; if (status & TMD3_RTRY) - lp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; if (status & TMD3_LCAR) - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (status & TMD3_LCOL) - lp->stats.tx_window_errors++; + dev->stats.tx_window_errors++; if (status & TMD3_UFLO) - lp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; if (status & (TMD3_BUFF | TMD3_UFLO)) { /* Trigger an immediate send demand. */ outw(CSR0, DEPCA_ADDR); outw(INEA | TDMD, DEPCA_DATA); } } else if (status & (T_MORE | T_ONE)) { - lp->stats.collisions++; + dev->stats.collisions++; } else { - lp->stats.tx_packets++; + dev->stats.tx_packets++; } /* Update all the pointers */ @@ -1234,15 +1229,6 @@ static int InitRestartDepca(struct net_device *dev) return status; } -static struct net_device_stats *depca_get_stats(struct net_device *dev) -{ - struct depca_private *lp = (struct depca_private *) dev->priv; - - /* Null body since there is no framing error counter */ - - return &lp->stats; -} - /* ** Set or clear the multicast filter for this adaptor. */ @@ -1855,6 +1841,7 @@ static void depca_dbg_open(struct net_device *dev) u_long ioaddr = dev->base_addr; struct depca_init *p = &lp->init_block; int i; + DECLARE_MAC_BUF(mac); if (depca_debug > 1) { /* Do not copy the shadow init block into shared memory */ @@ -1893,11 +1880,7 @@ static void depca_dbg_open(struct net_device *dev) printk("...0x%8.8x\n", readl(&lp->tx_ring[i].base)); printk("Initialisation block at 0x%8.8lx(Phys)\n", lp->mem_start); printk(" mode: 0x%4.4x\n", p->mode); - printk(" physical address: "); - for (i = 0; i < ETH_ALEN - 1; i++) { - printk("%2.2x:", p->phys_addr[i]); - } - printk("%2.2x\n", p->phys_addr[i]); + printk(" physical address: %s\n", print_mac(mac, p->phys_addr)); printk(" multicast hash table: "); for (i = 0; i < (HASH_TABLE_LEN >> 3) - 1; i++) { printk("%2.2x:", p->mcast_table[i]); diff --git a/drivers/net/dgrs.c b/drivers/net/dgrs.c deleted file mode 100644 index df62c0232f36..000000000000 --- a/drivers/net/dgrs.c +++ /dev/null @@ -1,1615 +0,0 @@ -/* - * Digi RightSwitch SE-X loadable device driver for Linux - * - * The RightSwitch is a 4 (EISA) or 6 (PCI) port etherswitch and - * a NIC on an internal board. - * - * Author: Rick Richardson, rick@remotepoint.com - * Derived from the SVR4.2 (UnixWare) driver for the same card. - * - * Copyright 1995-1996 Digi International Inc. - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - * For information on purchasing a RightSwitch SE-4 or SE-6 - * board, please contact Digi's sales department at 1-612-912-3444 - * or 1-800-DIGIBRD. Outside the U.S., please check our Web page - * at http://www.dgii.com for sales offices worldwide. - * - * OPERATION: - * When compiled as a loadable module, this driver can operate - * the board as either a 4/6 port switch with a 5th or 7th port - * that is a conventional NIC interface as far as the host is - * concerned, OR as 4/6 independent NICs. To select multi-NIC - * mode, add "nicmode=1" on the insmod load line for the driver. - * - * This driver uses the "dev" common ethernet device structure - * and a private "priv" (dev->priv) structure that contains - * mostly DGRS-specific information and statistics. To keep - * the code for both the switch mode and the multi-NIC mode - * as similar as possible, I have introduced the concept of - * "dev0"/"priv0" and "devN"/"privN" pointer pairs in subroutines - * where needed. The first pair of pointers points to the - * "dev" and "priv" structures of the zeroth (0th) device - * interface associated with a board. The second pair of - * pointers points to the current (Nth) device interface - * for the board: the one for which we are processing data. - * - * In switch mode, the pairs of pointers are always the same, - * that is, dev0 == devN and priv0 == privN. This is just - * like previous releases of this driver which did not support - * NIC mode. - * - * In multi-NIC mode, the pairs of pointers may be different. - * We use the devN and privN pointers to reference just the - * name, port number, and statistics for the current interface. - * We use the dev0 and priv0 pointers to access the variables - * that control access to the board, such as board address - * and simulated 82596 variables. This is because there is - * only one "fake" 82596 that serves as the interface to - * the board. We do not want to try to keep the variables - * associated with this 82596 in sync across all devices. - * - * This scheme works well. As you will see, except for - * initialization, there is very little difference between - * the two modes as far as this driver is concerned. On the - * receive side in NIC mode, the interrupt *always* comes in on - * the 0th interface (dev0/priv0). We then figure out which - * real 82596 port it came in on from looking at the "chan" - * member that the board firmware adds at the end of each - * RBD (a.k.a. TBD). We get the channel number like this: - * int chan = ((I596_RBD *) S2H(cbp->xmit.tbdp))->chan; - * - * On the transmit side in multi-NIC mode, we specify the - * output 82596 port by setting the new "dstchan" structure - * member that is at the end of the RFD, like this: - * priv0->rfdp->dstchan = privN->chan; - * - * TODO: - * - Multi-NIC mode is not yet supported when the driver is linked - * into the kernel. - * - Better handling of multicast addresses. - * - * Fixes: - * Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 11/01/2001 - * - fix dgrs_found_device wrt checking kmalloc return and - * rollbacking the partial steps of the whole process when - * one of the devices can't be allocated. Fix SET_MODULE_OWNER - * on the loop to use devN instead of repeated calls to dev. - * - * davej <davej@suse.de> - 9/2/2001 - * - Enable PCI device before reading ioaddr/irq - * - */ - -#include <linux/module.h> -#include <linux/eisa.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/delay.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/slab.h> -#include <linux/interrupt.h> -#include <linux/pci.h> -#include <linux/init.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/bitops.h> - -#include <asm/io.h> -#include <asm/byteorder.h> -#include <asm/uaccess.h> - -static char version[] __initdata = - "$Id: dgrs.c,v 1.13 2000/06/06 04:07:00 rick Exp $"; - -/* - * DGRS include files - */ -typedef unsigned char uchar; -#define vol volatile - -#include "dgrs.h" -#include "dgrs_es4h.h" -#include "dgrs_plx9060.h" -#include "dgrs_i82596.h" -#include "dgrs_ether.h" -#include "dgrs_asstruct.h" -#include "dgrs_bcomm.h" - -#ifdef CONFIG_PCI -static struct pci_device_id dgrs_pci_tbl[] = { - { SE6_PCI_VENDOR_ID, SE6_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, }, - { } /* Terminating entry */ -}; -MODULE_DEVICE_TABLE(pci, dgrs_pci_tbl); -#endif - -#ifdef CONFIG_EISA -static struct eisa_device_id dgrs_eisa_tbl[] = { - { "DBI0A01" }, - { } -}; -MODULE_DEVICE_TABLE(eisa, dgrs_eisa_tbl); -#endif - -MODULE_LICENSE("GPL"); - - -/* - * Firmware. Compiled separately for local compilation, - * but #included for Linux distribution. - */ -#ifndef NOFW - #include "dgrs_firmware.c" -#else - extern int dgrs_firmnum; - extern char dgrs_firmver[]; - extern char dgrs_firmdate[]; - extern uchar dgrs_code[]; - extern int dgrs_ncode; -#endif - -/* - * Linux out*() is backwards from all other operating systems - */ -#define OUTB(ADDR, VAL) outb(VAL, ADDR) -#define OUTW(ADDR, VAL) outw(VAL, ADDR) -#define OUTL(ADDR, VAL) outl(VAL, ADDR) - -/* - * Macros to convert switch to host and host to switch addresses - * (assumes a local variable priv points to board dependent struct) - */ -#define S2H(A) ( ((unsigned long)(A)&0x00ffffff) + priv0->vmem ) -#define S2HN(A) ( ((unsigned long)(A)&0x00ffffff) + privN->vmem ) -#define H2S(A) ( ((char *) (A) - priv0->vmem) + 0xA3000000 ) - -/* - * Convert a switch address to a "safe" address for use with the - * PLX 9060 DMA registers and the associated HW kludge that allows - * for host access of the DMA registers. - */ -#define S2DMA(A) ( (unsigned long)(A) & 0x00ffffff) - -/* - * "Space.c" variables, now settable from module interface - * Use the name below, minus the "dgrs_" prefix. See init_module(). - */ -static int dgrs_debug = 1; -static int dgrs_dma = 1; -static int dgrs_spantree = -1; -static int dgrs_hashexpire = -1; -static uchar dgrs_ipaddr[4] = { 0xff, 0xff, 0xff, 0xff}; -static uchar dgrs_iptrap[4] = { 0xff, 0xff, 0xff, 0xff}; -static __u32 dgrs_ipxnet = -1; -static int dgrs_nicmode; - -/* - * Private per-board data structure (dev->priv) - */ -typedef struct -{ - /* - * Stuff for generic ethercard I/F - */ - struct net_device_stats stats; - - /* - * DGRS specific data - */ - char *vmem; - - struct bios_comm *bcomm; /* Firmware BIOS comm structure */ - PORT *port; /* Ptr to PORT[0] struct in VM */ - I596_SCB *scbp; /* Ptr to SCB struct in VM */ - I596_RFD *rfdp; /* Current RFD list */ - I596_RBD *rbdp; /* Current RBD list */ - - volatile int intrcnt; /* Count of interrupts */ - - /* - * SE-4 (EISA) board variables - */ - uchar is_reg; /* EISA: Value for ES4H_IS reg */ - - /* - * SE-6 (PCI) board variables - * - * The PLX "expansion rom" space is used for DMA register - * access from the host on the SE-6. These are the physical - * and virtual addresses of that space. - */ - ulong plxreg; /* Phys address of PLX chip */ - char *vplxreg; /* Virtual address of PLX chip */ - ulong plxdma; /* Phys addr of PLX "expansion rom" */ - ulong volatile *vplxdma; /* Virtual addr of "expansion rom" */ - int use_dma; /* Flag: use DMA */ - DMACHAIN *dmadesc_s; /* area for DMA chains (SW addr.) */ - DMACHAIN *dmadesc_h; /* area for DMA chains (Host Virtual) */ - - /* - * Multi-NIC mode variables - * - * All entries of the devtbl[] array are valid for the 0th - * device (i.e. eth0, but not eth1...eth5). devtbl[0] is - * valid for all devices (i.e. eth0, eth1, ..., eth5). - */ - int nports; /* Number of physical ports (4 or 6) */ - int chan; /* Channel # (1-6) for this device */ - struct net_device *devtbl[6]; /* Ptrs to N device structs */ - -} DGRS_PRIV; - - -/* - * reset or un-reset the IDT processor - */ -static void -proc_reset(struct net_device *dev0, int reset) -{ - DGRS_PRIV *priv0 = (DGRS_PRIV *) dev0->priv; - - if (priv0->plxreg) - { - ulong val; - val = inl(dev0->base_addr + PLX_MISC_CSR); - if (reset) - val |= SE6_RESET; - else - val &= ~SE6_RESET; - OUTL(dev0->base_addr + PLX_MISC_CSR, val); - } - else - { - OUTB(dev0->base_addr + ES4H_PC, reset ? ES4H_PC_RESET : 0); - } -} - -/* - * See if the board supports bus master DMA - */ -static int -check_board_dma(struct net_device *dev0) -{ - DGRS_PRIV *priv0 = (DGRS_PRIV *) dev0->priv; - ulong x; - - /* - * If Space.c says not to use DMA, or if it's not a PLX based - * PCI board, or if the expansion ROM space is not PCI - * configured, then return false. - */ - if (!dgrs_dma || !priv0->plxreg || !priv0->plxdma) - return (0); - - /* - * Set the local address remap register of the "expansion rom" - * area to 0x80000000 so that we can use it to access the DMA - * registers from the host side. - */ - OUTL(dev0->base_addr + PLX_ROM_BASE_ADDR, 0x80000000); - - /* - * Set the PCI region descriptor to: - * Space 0: - * disable read-prefetch - * enable READY - * enable BURST - * 0 internal wait states - * Expansion ROM: (used for host DMA register access) - * disable read-prefetch - * enable READY - * disable BURST - * 0 internal wait states - */ - OUTL(dev0->base_addr + PLX_BUS_REGION, 0x49430343); - - /* - * Now map the DMA registers into our virtual space - */ - priv0->vplxdma = (ulong *) ioremap (priv0->plxdma, 256); - if (!priv0->vplxdma) - { - printk("%s: can't *remap() the DMA regs\n", dev0->name); - return (0); - } - - /* - * Now test to see if we can access the DMA registers - * If we write -1 and get back 1FFF, then we accessed the - * DMA register. Otherwise, we probably have an old board - * and wrote into regular RAM. - */ - priv0->vplxdma[PLX_DMA0_MODE/4] = 0xFFFFFFFF; - x = priv0->vplxdma[PLX_DMA0_MODE/4]; - if (x != 0x00001FFF) { - iounmap((void *)priv0->vplxdma); - return (0); - } - - return (1); -} - -/* - * Initiate DMA using PLX part on PCI board. Spin the - * processor until completed. All addresses are physical! - * - * If pciaddr is NULL, then it's a chaining DMA, and lcladdr is - * the address of the first DMA descriptor in the chain. - * - * If pciaddr is not NULL, then it's a single DMA. - * - * In either case, "lcladdr" must have been fixed up to make - * sure the MSB isn't set using the S2DMA macro before passing - * the address to this routine. - */ -static int -do_plx_dma( - struct net_device *dev, - ulong pciaddr, - ulong lcladdr, - int len, - int to_host -) -{ - int i; - ulong csr = 0; - DGRS_PRIV *priv = (DGRS_PRIV *) dev->priv; - - if (pciaddr) - { - /* - * Do a single, non-chain DMA - */ - priv->vplxdma[PLX_DMA0_PCI_ADDR/4] = pciaddr; - priv->vplxdma[PLX_DMA0_LCL_ADDR/4] = lcladdr; - priv->vplxdma[PLX_DMA0_SIZE/4] = len; - priv->vplxdma[PLX_DMA0_DESCRIPTOR/4] = to_host - ? PLX_DMA_DESC_TO_HOST - : PLX_DMA_DESC_TO_BOARD; - priv->vplxdma[PLX_DMA0_MODE/4] = - PLX_DMA_MODE_WIDTH32 - | PLX_DMA_MODE_WAITSTATES(0) - | PLX_DMA_MODE_READY - | PLX_DMA_MODE_NOBTERM - | PLX_DMA_MODE_BURST - | PLX_DMA_MODE_NOCHAIN; - } - else - { - /* - * Do a chaining DMA - */ - priv->vplxdma[PLX_DMA0_MODE/4] = - PLX_DMA_MODE_WIDTH32 - | PLX_DMA_MODE_WAITSTATES(0) - | PLX_DMA_MODE_READY - | PLX_DMA_MODE_NOBTERM - | PLX_DMA_MODE_BURST - | PLX_DMA_MODE_CHAIN; - priv->vplxdma[PLX_DMA0_DESCRIPTOR/4] = lcladdr; - } - - priv->vplxdma[PLX_DMA_CSR/4] = - PLX_DMA_CSR_0_ENABLE | PLX_DMA_CSR_0_START; - - /* - * Wait for DMA to complete - */ - for (i = 0; i < 1000000; ++i) - { - /* - * Spin the host CPU for 1 usec, so we don't thrash - * the PCI bus while the PLX 9060 is doing DMA. - */ - udelay(1); - - csr = (volatile unsigned long) priv->vplxdma[PLX_DMA_CSR/4]; - - if (csr & PLX_DMA_CSR_0_DONE) - break; - } - - if ( ! (csr & PLX_DMA_CSR_0_DONE) ) - { - printk("%s: DMA done never occurred. DMA disabled.\n", - dev->name); - priv->use_dma = 0; - return 1; - } - return 0; -} - -/* - * dgrs_rcv_frame() - * - * Process a received frame. This is called from the interrupt - * routine, and works for both switch mode and multi-NIC mode. - * - * Note that when in multi-NIC mode, we want to always access the - * hardware using the dev and priv structures of the first port, - * so that we are using only one set of variables to maintain - * the board interface status, but we want to use the Nth port - * dev and priv structures to maintain statistics and to pass - * the packet up. - * - * Only the first device structure is attached to the interrupt. - * We use the special "chan" variable at the end of the first RBD - * to select the Nth device in multi-NIC mode. - * - * We currently do chained DMA on a per-packet basis when the - * packet is "long", and we spin the CPU a short time polling - * for DMA completion. This avoids a second interrupt overhead, - * and gives the best performance for light traffic to the host. - * - * However, a better scheme that could be implemented would be - * to see how many packets are outstanding for the host, and if - * the number is "large", create a long chain to DMA several - * packets into the host in one go. In this case, we would set - * up some state variables to let the host CPU continue doing - * other things until a DMA completion interrupt comes along. - */ -static void -dgrs_rcv_frame( - struct net_device *dev0, - DGRS_PRIV *priv0, - I596_CB *cbp -) -{ - int len; - I596_TBD *tbdp; - struct sk_buff *skb; - uchar *putp; - uchar *p; - struct net_device *devN; - DGRS_PRIV *privN; - - /* - * Determine Nth priv and dev structure pointers - */ - if (dgrs_nicmode) - { /* Multi-NIC mode */ - int chan = ((I596_RBD *) S2H(cbp->xmit.tbdp))->chan; - - devN = priv0->devtbl[chan-1]; - /* - * If devN is null, we got an interrupt before the I/F - * has been initialized. Pitch the packet. - */ - if (devN == NULL) - goto out; - privN = (DGRS_PRIV *) devN->priv; - } - else - { /* Switch mode */ - devN = dev0; - privN = priv0; - } - - if (0) printk("%s: rcv len=%ld\n", devN->name, cbp->xmit.count); - - /* - * Allocate a message block big enough to hold the whole frame - */ - len = cbp->xmit.count; - if ((skb = dev_alloc_skb(len+5)) == NULL) - { - printk("%s: dev_alloc_skb failed for rcv buffer\n", devN->name); - ++privN->stats.rx_dropped; - /* discarding the frame */ - goto out; - } - skb_reserve(skb, 2); /* Align IP header */ - -again: - putp = p = skb_put(skb, len); - - /* - * There are three modes here for doing the packet copy. - * If we have DMA, and the packet is "long", we use the - * chaining mode of DMA. If it's shorter, we use single - * DMA's. Otherwise, we use memcpy(). - */ - if (priv0->use_dma && priv0->dmadesc_h && len > 64) - { - /* - * If we can use DMA and it's a long frame, copy it using - * DMA chaining. - */ - DMACHAIN *ddp_h; /* Host virtual DMA desc. pointer */ - DMACHAIN *ddp_s; /* Switch physical DMA desc. pointer */ - uchar *phys_p; - - /* - * Get the physical address of the STREAMS buffer. - * NOTE: allocb() guarantees that the whole buffer - * is in a single page if the length < 4096. - */ - phys_p = (uchar *) virt_to_phys(putp); - - ddp_h = priv0->dmadesc_h; - ddp_s = priv0->dmadesc_s; - tbdp = (I596_TBD *) S2H(cbp->xmit.tbdp); - for (;;) - { - int count; - int amt; - - count = tbdp->count; - amt = count & 0x3fff; - if (amt == 0) - break; /* For safety */ - if ( (p-putp) >= len) - { - printk("%s: cbp = %lx\n", devN->name, (long) H2S(cbp)); - proc_reset(dev0, 1); /* Freeze IDT */ - break; /* For Safety */ - } - - ddp_h->pciaddr = (ulong) phys_p; - ddp_h->lcladdr = S2DMA(tbdp->buf); - ddp_h->len = amt; - - phys_p += amt; - p += amt; - - if (count & I596_TBD_EOF) - { - ddp_h->next = PLX_DMA_DESC_TO_HOST - | PLX_DMA_DESC_EOC; - ++ddp_h; - break; - } - else - { - ++ddp_s; - ddp_h->next = PLX_DMA_DESC_TO_HOST - | (ulong) ddp_s; - tbdp = (I596_TBD *) S2H(tbdp->next); - ++ddp_h; - } - } - if (ddp_h - priv0->dmadesc_h) - { - int rc; - - rc = do_plx_dma(dev0, - 0, (ulong) priv0->dmadesc_s, len, 0); - if (rc) - { - printk("%s: Chained DMA failure\n", devN->name); - goto again; - } - } - } - else if (priv0->use_dma) - { - /* - * If we can use DMA and it's a shorter frame, copy it - * using single DMA transfers. - */ - uchar *phys_p; - - /* - * Get the physical address of the STREAMS buffer. - * NOTE: allocb() guarantees that the whole buffer - * is in a single page if the length < 4096. - */ - phys_p = (uchar *) virt_to_phys(putp); - - tbdp = (I596_TBD *) S2H(cbp->xmit.tbdp); - for (;;) - { - int count; - int amt; - int rc; - - count = tbdp->count; - amt = count & 0x3fff; - if (amt == 0) - break; /* For safety */ - if ( (p-putp) >= len) - { - printk("%s: cbp = %lx\n", devN->name, (long) H2S(cbp)); - proc_reset(dev0, 1); /* Freeze IDT */ - break; /* For Safety */ - } - rc = do_plx_dma(dev0, (ulong) phys_p, - S2DMA(tbdp->buf), amt, 1); - if (rc) - { - memcpy(p, S2H(tbdp->buf), amt); - printk("%s: Single DMA failed\n", devN->name); - } - phys_p += amt; - p += amt; - if (count & I596_TBD_EOF) - break; - tbdp = (I596_TBD *) S2H(tbdp->next); - } - } - else - { - /* - * Otherwise, copy it piece by piece using memcpy() - */ - tbdp = (I596_TBD *) S2H(cbp->xmit.tbdp); - for (;;) - { - int count; - int amt; - - count = tbdp->count; - amt = count & 0x3fff; - if (amt == 0) - break; /* For safety */ - if ( (p-putp) >= len) - { - printk("%s: cbp = %lx\n", devN->name, (long) H2S(cbp)); - proc_reset(dev0, 1); /* Freeze IDT */ - break; /* For Safety */ - } - memcpy(p, S2H(tbdp->buf), amt); - p += amt; - if (count & I596_TBD_EOF) - break; - tbdp = (I596_TBD *) S2H(tbdp->next); - } - } - - /* - * Pass the frame to upper half - */ - skb->protocol = eth_type_trans(skb, devN); - netif_rx(skb); - devN->last_rx = jiffies; - ++privN->stats.rx_packets; - privN->stats.rx_bytes += len; - -out: - cbp->xmit.status = I596_CB_STATUS_C | I596_CB_STATUS_OK; -} - -/* - * Start transmission of a frame - * - * The interface to the board is simple: we pretend that we are - * a fifth 82596 ethernet controller 'receiving' data, and copy the - * data into the same structures that a real 82596 would. This way, - * the board firmware handles the host 'port' the same as any other. - * - * NOTE: we do not use Bus master DMA for this routine. Turns out - * that it is not needed. Slave writes over the PCI bus are about - * as fast as DMA, due to the fact that the PLX part can do burst - * writes. The same is not true for data being read from the board. - * - * For multi-NIC mode, we tell the firmware the desired 82596 - * output port by setting the special "dstchan" member at the - * end of the traditional 82596 RFD structure. - */ - -static int dgrs_start_xmit(struct sk_buff *skb, struct net_device *devN) -{ - DGRS_PRIV *privN = (DGRS_PRIV *) devN->priv; - struct net_device *dev0; - DGRS_PRIV *priv0; - I596_RBD *rbdp; - int count; - int i, len, amt; - - /* - * Determine 0th priv and dev structure pointers - */ - if (dgrs_nicmode) - { - dev0 = privN->devtbl[0]; - priv0 = (DGRS_PRIV *) dev0->priv; - } - else - { - dev0 = devN; - priv0 = privN; - } - - if (dgrs_debug > 1) - printk("%s: xmit len=%d\n", devN->name, (int) skb->len); - - devN->trans_start = jiffies; - netif_start_queue(devN); - - if (priv0->rfdp->cmd & I596_RFD_EL) - { /* Out of RFD's */ - if (0) printk("%s: NO RFD's\n", devN->name); - goto no_resources; - } - - rbdp = priv0->rbdp; - count = 0; - priv0->rfdp->rbdp = (I596_RBD *) H2S(rbdp); - - i = 0; len = skb->len; - for (;;) - { - if (rbdp->size & I596_RBD_EL) - { /* Out of RBD's */ - if (0) printk("%s: NO RBD's\n", devN->name); - goto no_resources; - } - - amt = min_t(unsigned int, len, rbdp->size - count); - skb_copy_from_linear_data_offset(skb, i, S2H(rbdp->buf) + count, amt); - i += amt; - count += amt; - len -= amt; - if (len == 0) - { - if (skb->len < 60) - rbdp->count = 60 | I596_RBD_EOF; - else - rbdp->count = count | I596_RBD_EOF; - rbdp = (I596_RBD *) S2H(rbdp->next); - goto frame_done; - } - else if (count < 32) - { - /* More data to come, but we used less than 32 - * bytes of this RBD. Keep filling this RBD. - */ - {} /* Yes, we do nothing here */ - } - else - { - rbdp->count = count; - rbdp = (I596_RBD *) S2H(rbdp->next); - count = 0; - } - } - -frame_done: - priv0->rbdp = rbdp; - if (dgrs_nicmode) - priv0->rfdp->dstchan = privN->chan; - priv0->rfdp->status = I596_RFD_C | I596_RFD_OK; - priv0->rfdp = (I596_RFD *) S2H(priv0->rfdp->next); - - ++privN->stats.tx_packets; - - dev_kfree_skb (skb); - return (0); - -no_resources: - priv0->scbp->status |= I596_SCB_RNR; /* simulate I82596 */ - return (-EAGAIN); -} - -/* - * Open the interface - */ -static int -dgrs_open( struct net_device *dev ) -{ - netif_start_queue(dev); - return (0); -} - -/* - * Close the interface - */ -static int dgrs_close( struct net_device *dev ) -{ - netif_stop_queue(dev); - return (0); -} - -/* - * Get statistics - */ -static struct net_device_stats *dgrs_get_stats( struct net_device *dev ) -{ - DGRS_PRIV *priv = (DGRS_PRIV *) dev->priv; - - return (&priv->stats); -} - -/* - * Set multicast list and/or promiscuous mode - */ - -static void dgrs_set_multicast_list( struct net_device *dev) -{ - DGRS_PRIV *priv = (DGRS_PRIV *) dev->priv; - - priv->port->is_promisc = (dev->flags & IFF_PROMISC) ? 1 : 0; -} - -/* - * Unique ioctl's - */ -static int dgrs_ioctl(struct net_device *devN, struct ifreq *ifr, int cmd) -{ - DGRS_PRIV *privN = (DGRS_PRIV *) devN->priv; - DGRS_IOCTL ioc; - int i; - - if (cmd != DGRSIOCTL) - return -EINVAL; - - if(copy_from_user(&ioc, ifr->ifr_data, sizeof(DGRS_IOCTL))) - return -EFAULT; - - switch (ioc.cmd) - { - case DGRS_GETMEM: - if (ioc.len != sizeof(ulong)) - return -EINVAL; - if(copy_to_user(ioc.data, &devN->mem_start, ioc.len)) - return -EFAULT; - return (0); - case DGRS_SETFILTER: - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - if (ioc.port > privN->bcomm->bc_nports) - return -EINVAL; - if (ioc.filter >= NFILTERS) - return -EINVAL; - if (ioc.len > privN->bcomm->bc_filter_area_len) - return -EINVAL; - - /* Wait for old command to finish */ - for (i = 0; i < 1000; ++i) - { - if ( (volatile long) privN->bcomm->bc_filter_cmd <= 0 ) - break; - udelay(1); - } - if (i >= 1000) - return -EIO; - - privN->bcomm->bc_filter_port = ioc.port; - privN->bcomm->bc_filter_num = ioc.filter; - privN->bcomm->bc_filter_len = ioc.len; - - if (ioc.len) - { - if(copy_from_user(S2HN(privN->bcomm->bc_filter_area), - ioc.data, ioc.len)) - return -EFAULT; - privN->bcomm->bc_filter_cmd = BC_FILTER_SET; - } - else - privN->bcomm->bc_filter_cmd = BC_FILTER_CLR; - return(0); - default: - return -EOPNOTSUPP; - } -} - -/* - * Process interrupts - * - * dev, priv will always refer to the 0th device in Multi-NIC mode. - */ - -static irqreturn_t dgrs_intr(int irq, void *dev_id) -{ - struct net_device *dev0 = dev_id; - DGRS_PRIV *priv0 = dev0->priv; - I596_CB *cbp; - int cmd; - int i; - - ++priv0->intrcnt; - if (1) ++priv0->bcomm->bc_cnt[4]; - if (0) - { - static int cnt = 100; - if (--cnt > 0) - printk("%s: interrupt: irq %d\n", dev0->name, irq); - } - - /* - * Get 596 command - */ - cmd = priv0->scbp->cmd; - - /* - * See if RU has been restarted - */ - if ( (cmd & I596_SCB_RUC) == I596_SCB_RUC_START) - { - if (0) printk("%s: RUC start\n", dev0->name); - priv0->rfdp = (I596_RFD *) S2H(priv0->scbp->rfdp); - priv0->rbdp = (I596_RBD *) S2H(priv0->rfdp->rbdp); - priv0->scbp->status &= ~(I596_SCB_RNR|I596_SCB_RUS); - /* - * Tell upper half (halves) - */ - if (dgrs_nicmode) - { - for (i = 0; i < priv0->nports; ++i) - netif_wake_queue (priv0->devtbl[i]); - } - else - netif_wake_queue (dev0); - /* if (bd->flags & TX_QUEUED) - DL_sched(bd, bdd); */ - } - - /* - * See if any CU commands to process - */ - if ( (cmd & I596_SCB_CUC) != I596_SCB_CUC_START) - { - priv0->scbp->cmd = 0; /* Ignore all other commands */ - goto ack_intr; - } - priv0->scbp->status &= ~(I596_SCB_CNA|I596_SCB_CUS); - - /* - * Process a command - */ - cbp = (I596_CB *) S2H(priv0->scbp->cbp); - priv0->scbp->cmd = 0; /* Safe to clear the command */ - for (;;) - { - switch (cbp->nop.cmd & I596_CB_CMD) - { - case I596_CB_CMD_XMIT: - dgrs_rcv_frame(dev0, priv0, cbp); - break; - default: - cbp->nop.status = I596_CB_STATUS_C | I596_CB_STATUS_OK; - break; - } - if (cbp->nop.cmd & I596_CB_CMD_EL) - break; - cbp = (I596_CB *) S2H(cbp->nop.next); - } - priv0->scbp->status |= I596_SCB_CNA; - - /* - * Ack the interrupt - */ -ack_intr: - if (priv0->plxreg) - OUTL(dev0->base_addr + PLX_LCL2PCI_DOORBELL, 1); - - return IRQ_HANDLED; -} - -/* - * Download the board firmware - */ -static int __init -dgrs_download(struct net_device *dev0) -{ - DGRS_PRIV *priv0 = (DGRS_PRIV *) dev0->priv; - int is; - unsigned long i; - - static const int iv2is[16] = { - 0, 0, 0, ES4H_IS_INT3, - 0, ES4H_IS_INT5, 0, ES4H_IS_INT7, - 0, 0, ES4H_IS_INT10, ES4H_IS_INT11, - ES4H_IS_INT12, 0, 0, ES4H_IS_INT15 }; - - /* - * Map in the dual port memory - */ - priv0->vmem = ioremap(dev0->mem_start, 2048*1024); - if (!priv0->vmem) - { - printk("%s: cannot map in board memory\n", dev0->name); - return -ENXIO; - } - - /* - * Hold the processor and configure the board addresses - */ - if (priv0->plxreg) - { /* PCI bus */ - proc_reset(dev0, 1); - } - else - { /* EISA bus */ - is = iv2is[dev0->irq & 0x0f]; - if (!is) - { - printk("%s: Illegal IRQ %d\n", dev0->name, dev0->irq); - iounmap(priv0->vmem); - priv0->vmem = NULL; - return -ENXIO; - } - OUTB(dev0->base_addr + ES4H_AS_31_24, - (uchar) (dev0->mem_start >> 24) ); - OUTB(dev0->base_addr + ES4H_AS_23_16, - (uchar) (dev0->mem_start >> 16) ); - priv0->is_reg = ES4H_IS_LINEAR | is | - ((uchar) (dev0->mem_start >> 8) & ES4H_IS_AS15); - OUTB(dev0->base_addr + ES4H_IS, priv0->is_reg); - OUTB(dev0->base_addr + ES4H_EC, ES4H_EC_ENABLE); - OUTB(dev0->base_addr + ES4H_PC, ES4H_PC_RESET); - OUTB(dev0->base_addr + ES4H_MW, ES4H_MW_ENABLE | 0x00); - } - - /* - * See if we can do DMA on the SE-6 - */ - priv0->use_dma = check_board_dma(dev0); - if (priv0->use_dma) - printk("%s: Bus Master DMA is enabled.\n", dev0->name); - - /* - * Load and verify the code at the desired address - */ - memcpy(priv0->vmem, dgrs_code, dgrs_ncode); /* Load code */ - if (memcmp(priv0->vmem, dgrs_code, dgrs_ncode)) - { - iounmap(priv0->vmem); - priv0->vmem = NULL; - printk("%s: download compare failed\n", dev0->name); - return -ENXIO; - } - - /* - * Configurables - */ - priv0->bcomm = (struct bios_comm *) (priv0->vmem + 0x0100); - priv0->bcomm->bc_nowait = 1; /* Tell board to make printf not wait */ - priv0->bcomm->bc_squelch = 0; /* Flag from Space.c */ - priv0->bcomm->bc_150ohm = 0; /* Flag from Space.c */ - - priv0->bcomm->bc_spew = 0; /* Debug flag from Space.c */ - priv0->bcomm->bc_maxrfd = 0; /* Debug flag from Space.c */ - priv0->bcomm->bc_maxrbd = 0; /* Debug flag from Space.c */ - - /* - * Tell board we are operating in switch mode (1) or in - * multi-NIC mode (2). - */ - priv0->bcomm->bc_host = dgrs_nicmode ? BC_MULTINIC : BC_SWITCH; - - /* - * Request memory space on board for DMA chains - */ - if (priv0->use_dma) - priv0->bcomm->bc_hostarea_len = (2048/64) * 16; - - /* - * NVRAM configurables from Space.c - */ - priv0->bcomm->bc_spantree = dgrs_spantree; - priv0->bcomm->bc_hashexpire = dgrs_hashexpire; - memcpy(priv0->bcomm->bc_ipaddr, dgrs_ipaddr, 4); - memcpy(priv0->bcomm->bc_iptrap, dgrs_iptrap, 4); - memcpy(priv0->bcomm->bc_ipxnet, &dgrs_ipxnet, 4); - - /* - * Release processor, wait 8 seconds for board to initialize - */ - proc_reset(dev0, 0); - - for (i = jiffies + 8 * HZ; time_after(i, jiffies); ) - { - barrier(); /* Gcc 2.95 needs this */ - if (priv0->bcomm->bc_status >= BC_RUN) - break; - } - - if (priv0->bcomm->bc_status < BC_RUN) - { - printk("%s: board not operating\n", dev0->name); - iounmap(priv0->vmem); - priv0->vmem = NULL; - return -ENXIO; - } - - priv0->port = (PORT *) S2H(priv0->bcomm->bc_port); - priv0->scbp = (I596_SCB *) S2H(priv0->port->scbp); - priv0->rfdp = (I596_RFD *) S2H(priv0->scbp->rfdp); - priv0->rbdp = (I596_RBD *) S2H(priv0->rfdp->rbdp); - - priv0->scbp->status = I596_SCB_CNA; /* CU is idle */ - - /* - * Get switch physical and host virtual pointers to DMA - * chaining area. NOTE: the MSB of the switch physical - * address *must* be turned off. Otherwise, the HW kludge - * that allows host access of the PLX DMA registers will - * erroneously select the PLX registers. - */ - priv0->dmadesc_s = (DMACHAIN *) S2DMA(priv0->bcomm->bc_hostarea); - if (priv0->dmadesc_s) - priv0->dmadesc_h = (DMACHAIN *) S2H(priv0->dmadesc_s); - else - priv0->dmadesc_h = NULL; - - /* - * Enable board interrupts - */ - if (priv0->plxreg) - { /* PCI bus */ - OUTL(dev0->base_addr + PLX_INT_CSR, - inl(dev0->base_addr + PLX_INT_CSR) - | PLX_PCI_DOORBELL_IE); /* Enable intr to host */ - OUTL(dev0->base_addr + PLX_LCL2PCI_DOORBELL, 1); - } - else - { /* EISA bus */ - } - - return (0); -} - -/* - * Probe (init) a board - */ -static int __init -dgrs_probe1(struct net_device *dev) -{ - DGRS_PRIV *priv = (DGRS_PRIV *) dev->priv; - unsigned long i; - int rc; - - printk("%s: Digi RightSwitch io=%lx mem=%lx irq=%d plx=%lx dma=%lx\n", - dev->name, dev->base_addr, dev->mem_start, dev->irq, - priv->plxreg, priv->plxdma); - - /* - * Download the firmware and light the processor - */ - rc = dgrs_download(dev); - if (rc) - goto err_out; - - /* - * Get ether address of board - */ - printk("%s: Ethernet address", dev->name); - memcpy(dev->dev_addr, priv->port->ethaddr, 6); - for (i = 0; i < 6; ++i) - printk("%c%2.2x", i ? ':' : ' ', dev->dev_addr[i]); - printk("\n"); - - if (dev->dev_addr[0] & 1) - { - printk("%s: Illegal Ethernet Address\n", dev->name); - rc = -ENXIO; - goto err_out; - } - - /* - * ACK outstanding interrupts, hook the interrupt, - * and verify that we are getting interrupts from the board. - */ - if (priv->plxreg) - OUTL(dev->base_addr + PLX_LCL2PCI_DOORBELL, 1); - - rc = request_irq(dev->irq, &dgrs_intr, IRQF_SHARED, "RightSwitch", dev); - if (rc) - goto err_out; - - priv->intrcnt = 0; - for (i = jiffies + 2*HZ + HZ/2; time_after(i, jiffies); ) - { - cpu_relax(); - if (priv->intrcnt >= 2) - break; - } - if (priv->intrcnt < 2) - { - printk(KERN_ERR "%s: Not interrupting on IRQ %d (%d)\n", - dev->name, dev->irq, priv->intrcnt); - rc = -ENXIO; - goto err_free_irq; - } - - /* - * Entry points... - */ - dev->open = &dgrs_open; - dev->stop = &dgrs_close; - dev->get_stats = &dgrs_get_stats; - dev->hard_start_xmit = &dgrs_start_xmit; - dev->set_multicast_list = &dgrs_set_multicast_list; - dev->do_ioctl = &dgrs_ioctl; - - return rc; - -err_free_irq: - free_irq(dev->irq, dev); -err_out: - return rc; -} - -static int __init -dgrs_initclone(struct net_device *dev) -{ - DGRS_PRIV *priv = (DGRS_PRIV *) dev->priv; - int i; - - printk("%s: Digi RightSwitch port %d ", - dev->name, priv->chan); - for (i = 0; i < 6; ++i) - printk("%c%2.2x", i ? ':' : ' ', dev->dev_addr[i]); - printk("\n"); - - return (0); -} - -static struct net_device * __init -dgrs_found_device( - int io, - ulong mem, - int irq, - ulong plxreg, - ulong plxdma, - struct device *pdev -) -{ - DGRS_PRIV *priv; - struct net_device *dev; - int i, ret = -ENOMEM; - - dev = alloc_etherdev(sizeof(DGRS_PRIV)); - if (!dev) - goto err0; - - priv = (DGRS_PRIV *)dev->priv; - - dev->base_addr = io; - dev->mem_start = mem; - dev->mem_end = mem + 2048 * 1024 - 1; - dev->irq = irq; - priv->plxreg = plxreg; - priv->plxdma = plxdma; - priv->vplxdma = NULL; - - priv->chan = 1; - priv->devtbl[0] = dev; - - SET_MODULE_OWNER(dev); - SET_NETDEV_DEV(dev, pdev); - - ret = dgrs_probe1(dev); - if (ret) - goto err1; - - ret = register_netdev(dev); - if (ret) - goto err2; - - if ( !dgrs_nicmode ) - return dev; /* Switch mode, we are done */ - - /* - * Operating card as N separate NICs - */ - - priv->nports = priv->bcomm->bc_nports; - - for (i = 1; i < priv->nports; ++i) - { - struct net_device *devN; - DGRS_PRIV *privN; - /* Allocate new dev and priv structures */ - devN = alloc_etherdev(sizeof(DGRS_PRIV)); - ret = -ENOMEM; - if (!devN) - goto fail; - - /* Don't copy the network device structure! */ - - /* copy the priv structure of dev[0] */ - privN = (DGRS_PRIV *)devN->priv; - *privN = *priv; - - /* ... and zero out VM areas */ - privN->vmem = NULL; - privN->vplxdma = NULL; - /* ... and zero out IRQ */ - devN->irq = 0; - /* ... and base MAC address off address of 1st port */ - devN->dev_addr[5] += i; - - ret = dgrs_initclone(devN); - if (ret) - goto fail; - - SET_MODULE_OWNER(devN); - SET_NETDEV_DEV(dev, pdev); - - ret = register_netdev(devN); - if (ret) { - free_netdev(devN); - goto fail; - } - privN->chan = i+1; - priv->devtbl[i] = devN; - } - return dev; - - fail: - while (i >= 0) { - struct net_device *d = priv->devtbl[i--]; - unregister_netdev(d); - free_netdev(d); - } - - err2: - free_irq(dev->irq, dev); - err1: - free_netdev(dev); - err0: - return ERR_PTR(ret); -} - -static void __devexit dgrs_remove(struct net_device *dev) -{ - DGRS_PRIV *priv = dev->priv; - int i; - - unregister_netdev(dev); - - for (i = 1; i < priv->nports; ++i) { - struct net_device *d = priv->devtbl[i]; - if (d) { - unregister_netdev(d); - free_netdev(d); - } - } - - proc_reset(priv->devtbl[0], 1); - - if (priv->vmem) - iounmap(priv->vmem); - if (priv->vplxdma) - iounmap((uchar *) priv->vplxdma); - - if (dev->irq) - free_irq(dev->irq, dev); - - for (i = 1; i < priv->nports; ++i) { - if (priv->devtbl[i]) - unregister_netdev(priv->devtbl[i]); - } -} - -#ifdef CONFIG_PCI -static int __init dgrs_pci_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - struct net_device *dev; - int err; - uint io; - uint mem; - uint irq; - uint plxreg; - uint plxdma; - - /* - * Get and check the bus-master and latency values. - * Some PCI BIOSes fail to set the master-enable bit, - * and the latency timer must be set to the maximum - * value to avoid data corruption that occurs when the - * timer expires during a transfer. Yes, it's a bug. - */ - err = pci_enable_device(pdev); - if (err) - return err; - err = pci_request_regions(pdev, "RightSwitch"); - if (err) - return err; - - pci_set_master(pdev); - - plxreg = pci_resource_start (pdev, 0); - io = pci_resource_start (pdev, 1); - mem = pci_resource_start (pdev, 2); - pci_read_config_dword(pdev, 0x30, &plxdma); - irq = pdev->irq; - plxdma &= ~15; - - /* - * On some BIOSES, the PLX "expansion rom" (used for DMA) - * address comes up as "0". This is probably because - * the BIOS doesn't see a valid 55 AA ROM signature at - * the "ROM" start and zeroes the address. To get - * around this problem the SE-6 is configured to ask - * for 4 MB of space for the dual port memory. We then - * must set its range back to 2 MB, and use the upper - * half for DMA register access - */ - OUTL(io + PLX_SPACE0_RANGE, 0xFFE00000L); - if (plxdma == 0) - plxdma = mem + (2048L * 1024L); - pci_write_config_dword(pdev, 0x30, plxdma + 1); - pci_read_config_dword(pdev, 0x30, &plxdma); - plxdma &= ~15; - - dev = dgrs_found_device(io, mem, irq, plxreg, plxdma, &pdev->dev); - if (IS_ERR(dev)) { - pci_release_regions(pdev); - return PTR_ERR(dev); - } - - pci_set_drvdata(pdev, dev); - return 0; -} - -static void __devexit dgrs_pci_remove(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - - dgrs_remove(dev); - pci_release_regions(pdev); - free_netdev(dev); -} - -static struct pci_driver dgrs_pci_driver = { - .name = "dgrs", - .id_table = dgrs_pci_tbl, - .probe = dgrs_pci_probe, - .remove = __devexit_p(dgrs_pci_remove), -}; -#else -static struct pci_driver dgrs_pci_driver = {}; -#endif - - -#ifdef CONFIG_EISA -static int is2iv[8] __initdata = { 0, 3, 5, 7, 10, 11, 12, 15 }; - -static int __init dgrs_eisa_probe (struct device *gendev) -{ - struct net_device *dev; - struct eisa_device *edev = to_eisa_device(gendev); - uint io = edev->base_addr; - uint mem; - uint irq; - int rc = -ENODEV; /* Not EISA configured */ - - if (!request_region(io, 256, "RightSwitch")) { - printk(KERN_ERR "dgrs: eisa io 0x%x, which is busy.\n", io); - return -EBUSY; - } - - if ( ! (inb(io+ES4H_EC) & ES4H_EC_ENABLE) ) - goto err_out; - - mem = (inb(io+ES4H_AS_31_24) << 24) - + (inb(io+ES4H_AS_23_16) << 16); - - irq = is2iv[ inb(io+ES4H_IS) & ES4H_IS_INTMASK ]; - - dev = dgrs_found_device(io, mem, irq, 0L, 0L, gendev); - if (IS_ERR(dev)) { - rc = PTR_ERR(dev); - goto err_out; - } - - gendev->driver_data = dev; - return 0; - err_out: - release_region(io, 256); - return rc; -} - -static int __devexit dgrs_eisa_remove(struct device *gendev) -{ - struct net_device *dev = gendev->driver_data; - - dgrs_remove(dev); - - release_region(dev->base_addr, 256); - - free_netdev(dev); - return 0; -} - - -static struct eisa_driver dgrs_eisa_driver = { - .id_table = dgrs_eisa_tbl, - .driver = { - .name = "dgrs", - .probe = dgrs_eisa_probe, - .remove = __devexit_p(dgrs_eisa_remove), - } -}; -#endif - -/* - * Variables that can be overriden from module command line - */ -static int debug = -1; -static int dma = -1; -static int hashexpire = -1; -static int spantree = -1; -static int ipaddr[4] = { -1 }; -static int iptrap[4] = { -1 }; -static __u32 ipxnet = -1; -static int nicmode = -1; - -module_param(debug, int, 0); -module_param(dma, int, 0); -module_param(hashexpire, int, 0); -module_param(spantree, int, 0); -module_param_array(ipaddr, int, NULL, 0); -module_param_array(iptrap, int, NULL, 0); -module_param(ipxnet, int, 0); -module_param(nicmode, int, 0); -MODULE_PARM_DESC(debug, "Digi RightSwitch enable debugging (0-1)"); -MODULE_PARM_DESC(dma, "Digi RightSwitch enable BM DMA (0-1)"); -MODULE_PARM_DESC(nicmode, "Digi RightSwitch operating mode (1: switch, 2: multi-NIC)"); - -static int __init dgrs_init_module (void) -{ - int i; - int err; - - /* - * Command line variable overrides - * debug=NNN - * dma=0/1 - * spantree=0/1 - * hashexpire=NNN - * ipaddr=A,B,C,D - * iptrap=A,B,C,D - * ipxnet=NNN - * nicmode=NNN - */ - if (debug >= 0) - dgrs_debug = debug; - if (dma >= 0) - dgrs_dma = dma; - if (nicmode >= 0) - dgrs_nicmode = nicmode; - if (hashexpire >= 0) - dgrs_hashexpire = hashexpire; - if (spantree >= 0) - dgrs_spantree = spantree; - if (ipaddr[0] != -1) - for (i = 0; i < 4; ++i) - dgrs_ipaddr[i] = ipaddr[i]; - if (iptrap[0] != -1) - for (i = 0; i < 4; ++i) - dgrs_iptrap[i] = iptrap[i]; - if (ipxnet != -1) - dgrs_ipxnet = htonl( ipxnet ); - - if (dgrs_debug) - { - printk(KERN_INFO "dgrs: SW=%s FW=Build %d %s\nFW Version=%s\n", - version, dgrs_firmnum, dgrs_firmdate, dgrs_firmver); - } - - /* - * Find and configure all the cards - */ -#ifdef CONFIG_EISA - err = eisa_driver_register(&dgrs_eisa_driver); - if (err) - return err; -#endif - err = pci_register_driver(&dgrs_pci_driver); - if (err) - return err; - return 0; -} - -static void __exit dgrs_cleanup_module (void) -{ -#ifdef CONFIG_EISA - eisa_driver_unregister (&dgrs_eisa_driver); -#endif -#ifdef CONFIG_PCI - pci_unregister_driver (&dgrs_pci_driver); -#endif -} - -module_init(dgrs_init_module); -module_exit(dgrs_cleanup_module); diff --git a/drivers/net/dgrs.h b/drivers/net/dgrs.h deleted file mode 100644 index 6058d5301cb6..000000000000 --- a/drivers/net/dgrs.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * ioctl's for the Digi Intl. RightSwitch - * - * These network driver ioctl's are a bit obtuse compared to the usual - * ioctl's for a "normal" device driver. Hey, I didn't invent it. - * - * Typical use: - * - * struct ifreq ifr; - * DGRS_IOCTL ioc; - * int x; - * - * strcpy(ifr.ifr_name, "eth1"); - * ifr.ifr_data = (caddr_t) &ioc; - * ioc.cmd = DGRS_GETMEM; - * ioc.len = sizeof(x); - * ioc.data = (caddr_t) &x; - * rc = ioctl(fd, DGRSIOCTL, &ifr); - * printf("rc=%d mem=%x\n", rc, x); - * - */ -#include <linux/sockios.h> - -#define DGRSIOCTL SIOCDEVPRIVATE - -typedef struct dgrs_ioctl { - unsigned short cmd; /* Command to run */ - unsigned short len; /* Length of the data buffer */ - unsigned char __user *data; /* Pointer to the data buffer */ - unsigned short port; /* port number for command, if needed */ - unsigned short filter; /* filter number for command, if needed */ -} DGRS_IOCTL; - -/* - * Commands for the driver - */ -#define DGRS_GETMEM 0x01 /* Get the dual port memory address */ -#define DGRS_SETFILTER 0x02 /* Set a filter */ diff --git a/drivers/net/dgrs_asstruct.h b/drivers/net/dgrs_asstruct.h deleted file mode 100644 index f0e2121770f1..000000000000 --- a/drivers/net/dgrs_asstruct.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * For declaring structures shared with assembly routines - * - * $Id: asstruct.h,v 1.1.1.1 1994/10/23 05:08:32 rick Exp $ - */ - -#ifdef ASSEMBLER - -# define MO(t,a) (a) -# define VMO(t,a) (a) - -# define BEGIN_STRUCT(x) _Off=0 -# define S1A(t,x,n) _Off=(_Off+0)&~0; x=_Off; _Off=_Off+(1*n) -# define S2A(t,x,n) _Off=(_Off+1)&~1; x=_Off; _Off=_Off+(2*n) -# define S4A(t,x,n) _Off=(_Off+3)&~3; x=_Off; _Off=_Off+(4*n) -# define WORD(x) _Off=(_Off+3)&~3; x=_Off; _Off=_Off+4 -# define WORDA(x,n) _Off=(_Off+3)&~3; x=_Off; _Off=_Off+(4*n) -# define VWORD(x) _Off=(_Off+3)&~3; x=_Off; _Off=_Off+4 -# define S1(t,x) _Off=(_Off+0)&~0; x=_Off; _Off=_Off+1 -# define S2(t,x) _Off=(_Off+1)&~1; x=_Off; _Off=_Off+2 -# define S4(t,x) _Off=(_Off+3)&~3; x=_Off; _Off=_Off+4 -# define END_STRUCT(x) _Off=(_Off+3)&~3; x=_Off - -#else /* C */ - -#define VMO(t,a) (*(volatile t *)(a)) - -# define BEGIN_STRUCT(x) struct x { -# define S1(t,x) t x ; -# define S1A(t,x,n) t x[n] ; -# define S2(t,x) t x ; -# define S2A(t,x,n) t x[n] ; -# define S4(t,x) t x ; -# define S4A(t,x,n) t x[n] ; -# define END_STRUCT(x) } ; - -#endif diff --git a/drivers/net/dgrs_bcomm.h b/drivers/net/dgrs_bcomm.h deleted file mode 100644 index 5e9c25273981..000000000000 --- a/drivers/net/dgrs_bcomm.h +++ /dev/null @@ -1,148 +0,0 @@ -/* - * The bios low-memory structure - * - * Some of the variables in here can be used to set parameters that - * are stored in NVRAM and will retain their old values the next time - * the card is brought up. To use the values stored in NVRAM, the - * parameter should be set to "all ones". This tells the firmware to - * use the NVRAM value or a suitable default. The value that is used - * will be stored back into this structure by the firmware. If the - * value of the variable is not "all ones", then that value will be - * used and will be stored into NVRAM if it isn't already there. - * The variables this applies to are the following: - * Variable Set to: Gets default of: - * bc_hashexpire -1 300 (5 minutes) - * bc_spantree -1 1 (spanning tree on) - * bc_ipaddr FF:FF:FF:FF 0 (no SNMP IP address) - * bc_ipxnet FF:FF:FF:FF 0 (no SNMP IPX net) - * bc_iptrap FF:FF:FF:FF 0 (no SNMP IP trap address) - * - * Some variables MUST have their value set after the firmware - * is loaded onto the board, but before the processor is released. - * These are: - * bc_host 0 means no host "port", run as standalone switch. - * 1 means run as a switch, with a host port. (normal) - * 2 means run as multiple NICs, not as a switch. - * -1 means run in diagnostics mode. - * bc_nowait - * bc_hostarea_len - * bc_filter_len - * - */ -BEGIN_STRUCT(bios_comm) - S4(ulong, bc_intflag) /* Count of all interrupts */ - S4(ulong, bc_lbolt) /* Count of timer interrupts */ - S4(ulong, bc_maincnt) /* Count of main loops */ - S4(ulong, bc_hashcnt) /* Count of entries in hash table */ - S4A(ulong, bc_cnt, 8) /* Misc counters, for debugging */ - S4A(ulong, bc_flag, 8) /* Misc flags, for debugging */ - S4(ulong, bc_memsize) /* Size of memory */ - S4(ulong, bc_dcache) /* Size of working dcache */ - S4(ulong, bc_icache) /* Size of working icache */ - S4(long, bc_status) /* Firmware status */ - S1A(char, bc_file, 8) /* File name of assertion failure */ - S4(ulong, bc_line) /* Line # of assertion failure */ - S4(uchar *, bc_ramstart) - S4(uchar *, bc_ramend) - S4(uchar *, bc_heapstart) /* Start of heap (end of loaded memory) */ - S4(uchar *, bc_heapend) /* End of heap */ - - /* Configurable Parameters */ - S4(long, bc_host) /* 1=Host Port, 0=No Host Port, -1=Test Mode */ - S4(long, bc_nowait) /* Don't wait for 2host circ buffer to empty*/ - S4(long, bc_150ohm) /* 0 == 100 ohm UTP, 1 == 150 ohm STP */ - S4(long, bc_squelch) /* 0 == normal squelch, 1 == reduced squelch */ - S4(ulong, bc_hashexpire) /* Expiry time in seconds for hash table */ - S4(long, bc_spantree) /* 1 == enable IEEE spanning tree */ - - S2A(ushort, bc_eaddr, 3) /* New ether address */ - S2(ushort, bc_dummy1) /* padding for DOS compilers */ - - /* Various debugging aids */ - S4(long, bc_debug) /* Debugging is turned on */ - S4(long, bc_spew) /* Spew data on port 4 for bs_spew seconds */ - S4(long, bc_spewlen) /* Length of spewed data packets */ - S4(long, bc_maxrfd) /* If != 0, max number of RFD's to allocate */ - S4(long, bc_maxrbd) /* If != 0, max number of RBD's to allocate */ - - /* Circular buffers for messages to/from host */ - S4(ulong, bc_2host_head) - S4(ulong, bc_2host_tail) - S4(ulong, bc_2host_mask) - S1A(char, bc_2host, 0x200) /* Circ buff to host */ - - S4(ulong, bc_2idt_head) - S4(ulong, bc_2idt_tail) - S4(ulong, bc_2idt_mask) - S1A(char, bc_2idt, 0x200) /* Circ buff to idt */ - - /* Pointers to structures for driver access */ - S4(uchar *, bc_port) /* pointer to Port[] structures */ - S4(long, bc_nports) /* Number of ports */ - S4(long, bc_portlen) /* sizeof(PORT) */ - S4(uchar *, bc_hash) /* Pointer to hash table */ - S4(long, bc_hashlen) /* sizeof(Table) */ - - /* SNMP agent addresses */ - S1A(uchar, bc_ipaddr, 4) /* IP address for SNMP */ - S1A(uchar, bc_ipxnet, 4) /* IPX net address for SNMP */ - - S4(long, bc_nohostintr) /* Do not cause periodic host interrupts */ - - S4(uchar *, bc_dmaaddr) /* Physical addr of host DMA buf for diags */ - S4(ulong, bc_dmalen) /* Length of DMA buffer 0..2048 */ - - /* - * Board memory allocated on startup for use by host, usually - * for the purposes of creating DMA chain descriptors. The - * "len" must be set before the processor is released. The - * address of the area is returned in bc_hostarea. The area - * is guaranteed to be aligned on a 16 byte boundary. - */ - S4(ulong, bc_hostarea_len) /* RW: Number of bytes to allocate */ - S4(uchar *, bc_hostarea) /* RO: Address of allocated memory */ - - /* - * Variables for communicating filters into the board - */ - S4(ulong *, bc_filter_area) /* RO: Space to put filter into */ - S4(ulong, bc_filter_area_len) /* RO: Length of area, in bytes */ - S4(long, bc_filter_cmd) /* RW: Filter command, see below */ - S4(ulong, bc_filter_len) /* RW: Actual length of filter */ - S4(ulong, bc_filter_port) /* RW: Port # for filter 0..6 */ - S4(ulong, bc_filter_num) /* RW: Filter #, 0=input, 1=output */ - - /* more SNMP agent addresses */ - S1A(uchar, bc_iptrap, 4) /* IP address for SNMP */ - - S4A(long, bc_spare, 2) /* spares */ -END_STRUCT(bios_comm) - -#define bc VMO(struct bios_comm, 0xa3000100) - -/* - * bc_status values - */ -#define BC_INIT 0 -#define BC_RUN 100 - -/* - * bc_host values - */ -#define BC_DIAGS -1 -#define BC_SASWITCH 0 -#define BC_SWITCH 1 -#define BC_MULTINIC 2 - -/* - * Values for spew (debugging) - */ -#define BC_SPEW_ENABLE 0x80000000 - -/* - * filter commands - */ -#define BC_FILTER_ERR -1 -#define BC_FILTER_OK 0 -#define BC_FILTER_SET 1 -#define BC_FILTER_CLR 2 diff --git a/drivers/net/dgrs_es4h.h b/drivers/net/dgrs_es4h.h deleted file mode 100644 index 5518fba46b2c..000000000000 --- a/drivers/net/dgrs_es4h.h +++ /dev/null @@ -1,183 +0,0 @@ -/************************************************************************/ -/* */ -/* es4h.h: Hardware definition of the ES/4h Ethernet Switch, from */ -/* both the host and the 3051's point of view. */ -/* NOTE: this name is a misnomer now that there is a PCI */ -/* board. Everything that says "es4h" should really be */ -/* "se4". But we'll keep the old name for now. */ -/* */ -/* $Id: es4h.h,v 1.10 1996/08/22 17:16:53 rick Exp $ */ -/* */ -/************************************************************************/ - -/************************************************************************/ -/* */ -/* EISA I/O Registers. These are located at 0x1000 * slot-number */ -/* plus the indicated address. I.E. 0x4000-0x4009 for slot 4. */ -/* */ -/************************************************************************/ - -#define ES4H_MANUFmsb 0x00 /* Read-only */ -#define ES4H_MANUFlsb 0x01 /* Read-only */ -# define ES4H_MANUF_CODE 0x1049 /* = "DBI" */ - -#define ES4H_PRODUCT 0x02 /* Read-only */ -# define ES4H_PRODUCT_CODE 0x0A -# define EPC_PRODUCT_CODE 0x03 - -#define ES4H_REVISION 0x03 /* Read-only */ -# define ES4H_REVISION_CODE 0x01 - -#define ES4H_EC 0x04 /* EISA Control */ -# define ES4H_EC_RESET 0x04 /* WO, EISA reset */ -# define ES4H_EC_ENABLE 0x01 /* RW, EISA enable - set to */ - /* 1 before memory enable */ -#define ES4H_PC 0x05 /* Processor Control */ -# define ES4H_PC_RESET 0x04 /* RW, 3051 reset */ -# define ES4H_PC_INT 0x08 /* WO, assert 3051 intr. 3 */ - -#define ES4H_MW 0x06 /* Memory Window select and enable */ -# define ES4H_MW_ENABLE 0x80 /* WO, enable memory */ -# define ES4H_MW_SELECT_MASK 0x1f /* WO, 32k window selected */ - -#define ES4H_IS 0x07 /* Interrupt, addr select */ -# define ES4H_IS_INTMASK 0x07 /* WO, interrupt select */ -# define ES4H_IS_INTOFF 0x00 /* No IRQ */ -# define ES4H_IS_INT3 0x03 /* IRQ 3 */ -# define ES4H_IS_INT5 0x02 /* IRQ 5 */ -# define ES4H_IS_INT7 0x01 /* IRQ 7 */ -# define ES4H_IS_INT10 0x04 /* IRQ 10 */ -# define ES4H_IS_INT11 0x05 /* IRQ 11 */ -# define ES4H_IS_INT12 0x06 /* IRQ 12 */ -# define ES4H_IS_INT15 0x07 /* IRQ 15 */ -# define ES4H_IS_INTACK 0x10 /* WO, interrupt ack */ -# define ES4H_IS_INTPEND 0x10 /* RO, interrupt pending */ -# define ES4H_IS_LINEAR 0x40 /* WO, no memory windowing */ -# define ES4H_IS_AS15 0x80 /* RW, address select bit 15 */ - -#define ES4H_AS_23_16 0x08 /* Address select bits 23-16 */ -#define ES4H_AS_31_24 0x09 /* Address select bits 31-24 */ - -#define ES4H_IO_MAX 0x09 /* Size of I/O space */ - -/* - * PCI - */ -#define SE6_RESET PLX_USEROUT - -/************************************************************************/ -/* */ -/* 3051 Memory Map */ -/* */ -/* Note: 3051 has 4K I-cache, 2K D-cache. 1 cycle is 50 nsec. */ -/* */ -/************************************************************************/ -#define SE4_NPORTS 4 /* # of ethernet ports */ -#define SE6_NPORTS 6 /* # of ethernet ports */ -#define SE_NPORTS 6 /* Max # of ethernet ports */ - -#define ES4H_RAM_BASE 0x83000000 /* Base address of RAM */ -#define ES4H_RAM_SIZE 0x00200000 /* Size of RAM (2MB) */ -#define ES4H_RAM_INTBASE 0x83800000 /* Base of int-on-write RAM */ - /* a.k.a. PKT RAM */ - - /* Ethernet controllers */ - /* See: i82596.h */ -#define ES4H_ETHER0_PORT 0xA2000000 -#define ES4H_ETHER0_CMD 0xA2000100 -#define ES4H_ETHER1_PORT 0xA2000200 -#define ES4H_ETHER1_CMD 0xA2000300 -#define ES4H_ETHER2_PORT 0xA2000400 -#define ES4H_ETHER2_CMD 0xA2000500 -#define ES4H_ETHER3_PORT 0xA2000600 -#define ES4H_ETHER3_CMD 0xA2000700 -#define ES4H_ETHER4_PORT 0xA2000800 /* RS SE-6 only */ -#define ES4H_ETHER4_CMD 0xA2000900 /* RS SE-6 only */ -#define ES4H_ETHER5_PORT 0xA2000A00 /* RS SE-6 only */ -#define ES4H_ETHER5_CMD 0xA2000B00 /* RS SE-6 only */ - -#define ES4H_I8254 0xA2040000 /* 82C54 timers */ - /* See: i8254.h */ - -#define SE4_I8254_HZ (23000000/4) /* EISA clock input freq. */ -#define SE4_IDT_HZ (46000000) /* EISA CPU freq. */ -#define SE6_I8254_HZ (20000000/4) /* PCI clock input freq. */ -#define SE6_IDT_HZ (50000000) /* PCI CPU freq. */ -#define ES4H_I8254_HZ (23000000/4) /* EISA clock input freq. */ - -#define ES4H_GPP 0xA2050000 /* General purpose port */ - /* - * SE-4 (EISA) GPP bits - */ -# define ES4H_GPP_C0_100 0x0001 /* WO, Chan 0: 100 ohm TP */ -# define ES4H_GPP_C0_SQE 0x0002 /* WO, Chan 0: normal squelch */ -# define ES4H_GPP_C1_100 0x0004 /* WO, Chan 1: 100 ohm TP */ -# define ES4H_GPP_C1_SQE 0x0008 /* WO, Chan 1: normal squelch */ -# define ES4H_GPP_C2_100 0x0010 /* WO, Chan 2: 100 ohm TP */ -# define ES4H_GPP_C2_SQE 0x0020 /* WO, Chan 2: normal squelch */ -# define ES4H_GPP_C3_100 0x0040 /* WO, Chan 3: 100 ohm TP */ -# define ES4H_GPP_C3_SQE 0x0080 /* WO, Chan 3: normal squelch */ -# define ES4H_GPP_SQE 0x00AA /* WO, All: normal squelch */ -# define ES4H_GPP_100 0x0055 /* WO, All: 100 ohm TP */ -# define ES4H_GPP_HOSTINT 0x0100 /* RO, cause intr. to host */ - /* Hold high > 250 nsec */ -# define SE4_GPP_EED 0x0200 /* RW, EEPROM data bit */ -# define SE4_GPP_EECS 0x0400 /* RW, EEPROM chip select */ -# define SE4_GPP_EECK 0x0800 /* RW, EEPROM clock */ - - /* - * SE-6 (PCI) GPP bits - */ -# define SE6_GPP_EED 0x0001 /* RW, EEPROM data bit */ -# define SE6_GPP_EECS 0x0002 /* RW, EEPROM chip select */ -# define SE6_GPP_EECK 0x0004 /* RW, EEPROM clock */ -# define SE6_GPP_LINK 0x00fc /* R, Link status LEDs */ - -#define ES4H_INTVEC 0xA2060000 /* RO: Interrupt Vector */ -# define ES4H_IV_DMA0 0x01 /* Chan 0 DMA interrupt */ -# define ES4H_IV_PKT0 0x02 /* Chan 0 PKT interrupt */ -# define ES4H_IV_DMA1 0x04 /* Chan 1 DMA interrupt */ -# define ES4H_IV_PKT1 0x08 /* Chan 1 PKT interrupt */ -# define ES4H_IV_DMA2 0x10 /* Chan 2 DMA interrupt */ -# define ES4H_IV_PKT2 0x20 /* Chan 2 PKT interrupt */ -# define ES4H_IV_DMA3 0x40 /* Chan 3 DMA interrupt */ -# define ES4H_IV_PKT3 0x80 /* Chan 3 PKT interrupt */ - -#define ES4H_INTACK 0xA2060000 /* WO: Interrupt Ack */ -# define ES4H_INTACK_8254 0x01 /* Real Time Clock (int 0) */ -# define ES4H_INTACK_HOST 0x02 /* Host (int 1) */ -# define ES4H_INTACK_PKT0 0x04 /* Chan 0 Pkt (int 2) */ -# define ES4H_INTACK_PKT1 0x08 /* Chan 1 Pkt (int 3) */ -# define ES4H_INTACK_PKT2 0x10 /* Chan 2 Pkt (int 4) */ -# define ES4H_INTACK_PKT3 0x20 /* Chan 3 Pkt (int 5) */ - -#define SE6_PLX 0xA2070000 /* PLX 9060, SE-6 (PCI) only */ - /* see plx9060.h */ - -#define SE6_PCI_VENDOR_ID 0x114F /* Digi PCI vendor ID */ -#define SE6_PCI_DEVICE_ID 0x0003 /* RS SE-6 device ID */ -#define SE6_PCI_ID ((SE6_PCI_DEVICE_ID<<16) | SE6_PCI_VENDOR_ID) - -/* - * IDT Interrupts - */ -#define ES4H_INT_8254 IDT_INT0 -#define ES4H_INT_HOST IDT_INT1 -#define ES4H_INT_ETHER0 IDT_INT2 -#define ES4H_INT_ETHER1 IDT_INT3 -#define ES4H_INT_ETHER2 IDT_INT4 -#define ES4H_INT_ETHER3 IDT_INT5 - -/* - * Because there are differences between the SE-4 and the SE-6, - * we assume that the following globals will be set up at init - * time in main.c to containt the appropriate constants from above - */ -extern ushort Gpp; /* Softcopy of GPP register */ -extern ushort EEck; /* Clock bit */ -extern ushort EEcs; /* CS bit */ -extern ushort EEd; /* Data bit */ -extern ulong I8254_Hz; /* i8254 input frequency */ -extern ulong IDT_Hz; /* IDT CPU frequency */ -extern int Nports; /* Number of ethernet controllers */ -extern int Nchan; /* Nports+1 */ diff --git a/drivers/net/dgrs_ether.h b/drivers/net/dgrs_ether.h deleted file mode 100644 index 7539b596bff8..000000000000 --- a/drivers/net/dgrs_ether.h +++ /dev/null @@ -1,135 +0,0 @@ -/* - * A filtering function. There are two filters/port. Filter "0" - * is the input filter, and filter "1" is the output filter. - */ -typedef int (FILTER_FUNC)(uchar *pktp, int pktlen, ulong *scratch, int port); -#define NFILTERS 2 - -/* - * The per port structure - */ -typedef struct -{ - int chan; /* Channel number (0-3) */ - ulong portaddr; /* address of 596 port register */ - volatile ulong *ca; /* address of 596 chan attention */ - ulong intmask; /* Interrupt mask for this port */ - ulong intack; /* Ack bit for this port */ - - uchar ethaddr[6]; /* Ethernet address of this port */ - int is_promisc; /* Port is promiscuous */ - - int debug; /* Debugging turned on */ - - I596_ISCP *iscpp; /* Uncached ISCP pointer */ - I596_SCP *scpp; /* Uncached SCP pointer */ - I596_SCB *scbp; /* Uncached SCB pointer */ - - I596_ISCP iscp; - I596_SCB scb; - - /* Command Queue */ - I596_CB *cb0; - I596_CB *cbN; - I596_CB *cb_head; - I596_CB *cb_tail; - - /* Receive Queue */ - I596_RFD *rfd0; - I596_RFD *rfdN; - I596_RFD *rfd_head; - I596_RFD *rfd_tail; - - /* Receive Buffers */ - I596_RBD *rbd0; - I596_RBD *rbdN; - I596_RBD *rbd_head; - I596_RBD *rbd_tail; - int buf_size; /* Size of an RBD buffer */ - int buf_cnt; /* Total RBD's allocated */ - - /* Rx Statistics */ - ulong cnt_rx_cnt; /* Total packets rcvd, good and bad */ - ulong cnt_rx_good; /* Total good packets rcvd */ - ulong cnt_rx_bad; /* Total of all bad packets rcvd */ - /* Subtotals can be gotten from SCB */ - ulong cnt_rx_nores; /* No resources */ - ulong cnt_rx_bytes; /* Total bytes rcvd */ - - /* Tx Statistics */ - ulong cnt_tx_queued; - ulong cnt_tx_done; - ulong cnt_tx_freed; - ulong cnt_tx_nores; /* No resources */ - - ulong cnt_tx_bad; - ulong cnt_tx_err_late; - ulong cnt_tx_err_nocrs; - ulong cnt_tx_err_nocts; - ulong cnt_tx_err_under; - ulong cnt_tx_err_maxcol; - ulong cnt_tx_collisions; - - /* Special stuff for host */ -# define rfd_freed cnt_rx_cnt - ulong rbd_freed; - int host_timer; - - /* Added after first beta */ - ulong cnt_tx_races; /* Counts race conditions */ - int spanstate; - ulong cnt_st_tx; /* send span tree pkts */ - ulong cnt_st_fail_tx; /* Failures to send span tree pkts */ - ulong cnt_st_fail_rbd;/* Failures to send span tree pkts */ - ulong cnt_st_rx; /* rcv span tree pkts */ - ulong cnt_st_rx_bad; /* bogus st packets rcvd */ - ulong cnt_rx_fwd; /* Rcvd packets that were forwarded */ - - ulong cnt_rx_mcast; /* Multicast pkts received */ - ulong cnt_tx_mcast; /* Multicast pkts transmitted */ - ulong cnt_tx_bytes; /* Bytes transmitted */ - - /* - * Packet filtering - * Filter 0: input filter - * Filter 1: output filter - */ - - ulong *filter_space[NFILTERS]; - FILTER_FUNC *filter_func[NFILTERS]; - ulong filter_cnt[NFILTERS]; - ulong filter_len[NFILTERS]; - - ulong pad[ (512-300) / 4]; -} PORT; - -/* - * Port[0] is host interface - * Port[1..SE_NPORTS] are external 10 Base T ports. Fewer may be in - * use, depending on whether this is an SE-4 or - * an SE-6. - * Port[SE_NPORTS] Pseudo-port for Spanning tree and SNMP - */ -extern PORT Port[1+SE_NPORTS+1]; - -extern int Nports; /* Number of genuine ethernet controllers */ -extern int Nchan; /* ... plus one for host interface */ - -extern int FirstChan; /* 0 or 1, depedning on whether host is used */ -extern int NumChan; /* 4 or 5 */ - -/* - * A few globals - */ -extern int IsPromisc; -extern int MultiNicMode; - -/* - * Functions - */ -extern void eth_xmit_spew_on(PORT *p, int cnt); -extern void eth_xmit_spew_off(PORT *p); - -extern I596_RBD *alloc_rbds(PORT *p, int num); - -extern I596_CB * eth_cb_alloc(PORT *p); diff --git a/drivers/net/dgrs_firmware.c b/drivers/net/dgrs_firmware.c deleted file mode 100644 index 8c20d4c99937..000000000000 --- a/drivers/net/dgrs_firmware.c +++ /dev/null @@ -1,9966 +0,0 @@ -static const int dgrs_firmnum = 550; -static char dgrs_firmver[] = "$Version$"; -static char dgrs_firmdate[] = "11/16/96 03:45:15"; -static unsigned char dgrs_code[] __initdata = { - 213,5,192,8,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,64,40,35,41, - 101,115,52,104,46,98,105,110,32,32,32,32, - 32,32,49,46,48,32,48,48,47,48,48,47, - 57,52,0,64,40,35,41,67,111,112,121,114, - 105,103,104,116,32,49,57,57,53,44,32,68, - 105,103,105,32,73,110,116,101,114,110,97,116, - 105,111,110,97,108,46,32,32,65,108,108,32, - 82,105,103,104,116,115,32,82,101,115,101,114, - 118,101,100,46,0,0,0,0,97,5,192,8, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,255,255,0,16,0,0,0,0, - 0,0,0,0,8,0,224,3,0,0,0,0, - 148,255,189,39,16,0,161,175,20,0,162,175, - 24,0,163,175,28,0,164,175,32,0,165,175, - 36,0,166,175,40,0,167,175,44,0,168,175, - 48,0,169,175,52,0,170,175,56,0,171,175, - 60,0,172,175,64,0,173,175,68,0,174,175, - 72,0,175,175,76,0,184,175,80,0,185,175, - 88,0,190,175,92,0,191,175,0,112,8,64, - 18,72,0,0,16,80,0,0,0,96,11,64, - 84,0,168,175,96,0,169,175,100,0,170,175, - 104,0,171,175,33,56,0,1,0,131,24,60, - 0,1,24,39,0,0,8,143,0,0,0,0, - 1,0,8,33,0,0,8,175,0,104,5,64, - 0,96,6,64,124,0,168,48,212,255,0,21, - 0,0,0,0,36,64,166,0,0,255,8,49, - 27,0,0,17,0,0,0,0,130,65,8,0, - 2,131,9,60,33,72,40,1,0,220,41,141, - 66,64,8,0,2,131,10,60,33,80,72,1, - 0,224,74,141,0,0,0,0,38,80,70,1, - 1,255,74,49,33,40,192,0,38,48,202,0, - 0,96,134,64,66,64,8,0,2,131,4,60, - 33,32,136,0,0,226,132,144,9,248,32,1, - 0,0,0,0,104,0,166,143,0,0,0,0, - 0,96,134,64,0,104,5,64,227,255,0,16, - 0,0,0,0,104,0,168,143,96,0,169,143, - 100,0,170,143,0,0,0,0,0,96,136,64, - 19,0,32,1,17,0,64,1,20,0,162,143, - 24,0,163,143,28,0,164,143,32,0,165,143, - 36,0,166,143,40,0,167,143,44,0,168,143, - 48,0,169,143,52,0,170,143,56,0,171,143, - 60,0,172,143,64,0,173,143,68,0,174,143, - 72,0,175,143,76,0,184,143,80,0,185,143, - 88,0,190,143,92,0,191,143,0,0,0,0, - 84,0,186,143,16,0,161,143,108,0,189,39, - 8,0,64,3,16,0,0,66,0,96,26,64, - 0,0,0,0,255,255,27,60,254,0,123,55, - 0,0,0,0,36,208,91,3,0,0,0,0, - 0,96,154,64,0,0,0,0,0,112,26,64, - 0,0,0,0,16,0,0,66,0,0,0,0, - 8,0,64,3,0,0,0,0,255,255,8,36, - 133,255,0,17,0,0,0,0,1,0,8,37, - 130,255,0,21,0,0,0,0,255,255,8,36, - 33,8,0,1,126,255,40,20,0,0,0,0, - 1,0,33,36,123,255,32,20,0,0,0,0, - 255,255,2,36,120,255,72,20,0,0,0,0, - 1,0,66,36,117,255,64,20,0,0,0,0, - 255,255,3,36,114,255,104,20,0,0,0,0, - 1,0,99,36,111,255,96,20,0,0,0,0, - 255,255,4,36,108,255,136,20,0,0,0,0, - 1,0,132,36,105,255,128,20,0,0,0,0, - 255,255,5,36,102,255,168,20,0,0,0,0, - 1,0,165,36,99,255,160,20,0,0,0,0, - 255,255,6,36,96,255,200,20,0,0,0,0, - 1,0,198,36,93,255,192,20,0,0,0,0, - 255,255,7,36,90,255,232,20,0,0,0,0, - 1,0,231,36,87,255,224,20,0,0,0,0, - 255,255,9,36,84,255,40,21,0,0,0,0, - 1,0,41,37,81,255,32,21,0,0,0,0, - 255,255,10,36,78,255,72,21,0,0,0,0, - 1,0,74,37,75,255,64,21,0,0,0,0, - 255,255,11,36,72,255,104,21,0,0,0,0, - 1,0,107,37,69,255,96,21,0,0,0,0, - 255,255,12,36,66,255,136,21,0,0,0,0, - 1,0,140,37,63,255,128,21,0,0,0,0, - 255,255,13,36,60,255,168,21,0,0,0,0, - 1,0,173,37,57,255,160,21,0,0,0,0, - 255,255,14,36,54,255,200,21,0,0,0,0, - 1,0,206,37,51,255,192,21,0,0,0,0, - 255,255,15,36,48,255,232,21,0,0,0,0, - 1,0,239,37,45,255,224,21,0,0,0,0, - 255,255,24,36,42,255,8,23,0,0,0,0, - 1,0,24,39,39,255,0,23,0,0,0,0, - 255,255,16,36,36,255,8,22,0,0,0,0, - 1,0,16,38,33,255,0,22,0,0,0,0, - 255,255,17,36,30,255,40,22,0,0,0,0, - 1,0,49,38,27,255,32,22,0,0,0,0, - 255,255,18,36,24,255,72,22,0,0,0,0, - 1,0,82,38,21,255,64,22,0,0,0,0, - 255,255,19,36,18,255,104,22,0,0,0,0, - 1,0,115,38,15,255,96,22,0,0,0,0, - 255,255,20,36,12,255,136,22,0,0,0,0, - 1,0,148,38,9,255,128,22,0,0,0,0, - 255,255,21,36,6,255,168,22,0,0,0,0, - 1,0,181,38,3,255,160,22,0,0,0,0, - 255,255,22,36,0,255,200,22,0,0,0,0, - 1,0,214,38,253,254,192,22,0,0,0,0, - 255,255,23,36,250,254,232,22,0,0,0,0, - 1,0,247,38,247,254,224,22,0,0,0,0, - 255,255,26,36,244,254,72,23,0,0,0,0, - 1,0,90,39,241,254,64,23,0,0,0,0, - 255,255,27,36,238,254,104,23,0,0,0,0, - 1,0,123,39,235,254,96,23,0,0,0,0, - 255,255,28,36,232,254,136,23,0,0,0,0, - 1,0,156,39,229,254,128,23,0,0,0,0, - 255,255,29,36,226,254,168,23,0,0,0,0, - 1,0,189,39,223,254,160,23,0,0,0,0, - 255,255,30,36,220,254,200,23,0,0,0,0, - 1,0,222,39,217,254,192,23,0,0,0,0, - 255,255,31,36,214,254,232,23,0,0,0,0, - 1,0,255,39,211,254,224,23,0,0,0,0, - 0,131,24,60,0,1,24,39,0,32,1,60, - 37,192,1,3,0,96,8,64,0,0,0,0, - 1,0,1,60,37,64,1,1,0,96,136,64, - 33,16,0,0,165,165,3,60,165,165,99,52, - 0,128,1,60,0,0,35,172,0,128,9,60, - 0,0,41,141,0,0,0,0,0,96,10,64, - 0,0,0,0,8,0,1,60,36,80,65,1, - 29,0,64,21,0,0,0,0,27,0,105,20, - 0,0,0,0,0,1,2,36,0,128,1,60, - 33,8,34,0,0,0,32,172,64,16,2,0, - 1,0,1,60,1,0,33,52,43,8,65,0, - 248,255,32,20,0,0,0,0,255,255,3,36, - 0,128,1,60,0,0,35,172,0,1,2,36, - 0,128,3,60,33,24,98,0,0,0,99,140, - 0,0,0,0,7,0,96,20,0,0,0,0, - 64,16,2,0,1,0,1,60,1,0,33,52, - 43,8,65,0,245,255,32,20,0,0,0,0, - 0,96,128,64,0,0,0,0,84,0,2,175, - 0,96,8,64,0,0,0,0,3,0,1,60, - 37,64,1,1,0,96,136,64,33,16,0,0, - 165,165,3,60,165,165,99,52,0,128,1,60, - 0,0,35,172,0,128,9,60,0,0,41,141, - 0,0,0,0,0,96,10,64,0,0,0,0, - 8,0,1,60,36,80,65,1,29,0,64,21, - 0,0,0,0,27,0,105,20,0,0,0,0, - 0,1,2,36,0,128,1,60,33,8,34,0, - 0,0,32,172,64,16,2,0,1,0,1,60, - 1,0,33,52,43,8,65,0,248,255,32,20, - 0,0,0,0,255,255,3,36,0,128,1,60, - 0,0,35,172,0,1,2,36,0,128,3,60, - 33,24,98,0,0,0,99,140,0,0,0,0, - 7,0,96,20,0,0,0,0,64,16,2,0, - 1,0,1,60,1,0,33,52,43,8,65,0, - 245,255,32,20,0,0,0,0,0,96,128,64, - 0,0,0,0,88,0,2,175,88,0,9,143, - 0,0,0,0,17,0,32,17,0,0,0,0, - 0,0,0,0,3,0,2,60,0,0,0,0, - 0,96,130,64,0,128,8,60,37,72,40,1, - 0,0,0,161,4,0,0,161,8,0,0,161, - 12,0,0,161,16,0,0,161,20,0,0,161, - 24,0,0,161,32,0,8,37,247,255,9,21, - 252,255,0,161,84,0,9,143,0,0,0,0, - 17,0,32,17,0,0,0,0,0,0,0,0, - 1,0,2,60,0,0,0,0,0,96,130,64, - 0,128,8,60,37,72,40,1,0,0,0,161, - 4,0,0,161,8,0,0,161,12,0,0,161, - 16,0,0,161,20,0,0,161,24,0,0,161, - 32,0,8,37,247,255,9,21,252,255,0,161, - 32,0,8,60,0,96,136,64,0,104,128,64, - 0,131,2,60,152,28,66,36,255,31,9,60, - 255,255,41,53,36,16,73,0,0,128,9,60, - 37,16,73,0,8,0,64,0,0,0,0,0, - 2,131,8,60,224,210,8,37,252,255,1,36, - 36,64,1,1,3,131,9,60,124,18,41,37, - 252,255,1,36,36,72,33,1,0,0,10,36, - 0,0,10,173,254,255,9,21,4,0,8,37, - 3,131,8,60,128,18,8,37,252,255,1,36, - 36,64,1,1,31,131,9,60,252,255,41,53, - 252,255,1,36,36,72,33,1,237,254,10,60, - 175,222,74,53,0,0,10,173,254,255,9,21, - 4,0,8,37,2,131,8,60,0,212,8,37, - 252,255,1,36,36,64,1,1,2,131,9,60, - 252,219,41,37,252,255,1,36,36,72,33,1, - 173,222,10,60,239,190,74,53,0,0,10,173, - 254,255,9,21,4,0,8,37,0,4,8,60, - 0,0,0,0,0,24,136,64,0,0,0,0, - 2,131,29,60,0,220,189,39,0,0,30,36, - 2,131,28,60,51,8,192,12,16,78,156,39, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 232,255,189,39,16,0,191,175,8,128,132,39, - 15,63,192,12,0,0,0,0,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 232,255,189,39,16,0,191,175,12,128,132,39, - 15,63,192,12,0,0,0,0,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 232,255,189,39,16,0,191,175,16,128,132,39, - 15,63,192,12,0,0,0,0,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 232,255,189,39,16,0,191,175,20,128,132,39, - 15,63,192,12,0,0,0,0,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 232,255,189,39,24,133,131,143,6,0,2,36, - 20,0,191,175,6,0,98,20,16,0,176,175, - 7,162,3,60,228,0,99,52,1,0,2,36, - 184,7,192,8,0,0,98,172,0,128,130,151, - 5,162,16,60,0,1,66,52,120,63,192,12, - 0,0,2,166,0,128,130,151,0,0,0,0, - 255,254,66,48,0,0,2,166,20,0,191,143, - 16,0,176,143,8,0,224,3,24,0,189,39, - 232,255,189,39,33,16,128,0,3,0,64,4, - 16,0,191,175,254,255,2,60,192,29,66,52, - 0,163,4,60,96,1,132,52,0,163,1,60, - 92,1,34,172,0,163,1,60,104,1,38,172, - 204,63,192,12,8,0,6,36,228,63,192,12, - 255,255,4,36,204,7,192,8,0,0,0,0, - 16,0,191,143,24,0,189,39,8,0,224,3, - 0,0,0,0,216,255,189,39,1,0,6,36, - 3,131,2,60,143,18,66,36,240,255,3,36, - 36,16,67,0,0,163,1,60,120,1,34,172, - 0,163,2,60,120,1,66,140,33,56,0,0, - 32,0,191,175,28,0,177,175,24,0,176,175, - 16,0,160,175,0,163,1,60,116,1,34,172, - 0,163,3,60,112,1,99,140,0,163,2,60, - 116,1,66,140,0,163,4,60,116,1,132,140, - 35,136,98,0,84,64,192,12,33,40,32,2, - 13,0,64,16,0,0,0,0,1,131,4,60, - 96,127,132,36,24,128,144,39,33,40,0,2, - 1,131,7,60,128,127,231,36,15,63,192,12, - 148,0,6,36,1,0,4,36,33,40,0,2, - 188,7,192,12,148,0,6,36,2,0,33,6, - 33,16,32,2,3,0,34,38,131,136,2,0, - 0,163,2,60,116,1,66,140,0,0,0,0, - 6,0,32,18,237,254,3,60,175,222,99,52, - 0,0,67,172,255,255,49,38,253,255,32,22, - 4,0,66,36,32,0,191,143,28,0,177,143, - 24,0,176,143,8,0,224,3,40,0,189,39, - 224,255,189,39,15,0,132,36,240,255,3,36, - 20,0,177,175,0,163,17,60,120,1,49,142, - 0,163,2,60,120,1,66,140,36,32,131,0, - 33,16,68,0,0,163,1,60,120,1,34,172, - 0,163,3,60,120,1,99,140,0,163,2,60, - 112,1,66,140,24,0,191,175,43,16,67,0, - 13,0,64,16,16,0,176,175,1,131,4,60, - 96,127,132,36,24,128,144,39,33,40,0,2, - 1,131,7,60,176,127,231,36,15,63,192,12, - 171,0,6,36,1,0,4,36,33,40,0,2, - 188,7,192,12,171,0,6,36,33,16,32,2, - 24,0,191,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,216,255,189,39, - 3,0,2,60,7,162,3,60,36,0,191,175, - 32,0,176,175,0,163,1,60,92,1,32,172, - 0,0,99,140,79,17,66,52,32,0,98,20, - 87,0,4,60,76,0,8,60,64,75,8,53, - 250,2,7,60,128,240,231,52,7,162,6,60, - 152,0,198,52,67,73,3,60,67,3,99,52, - 7,162,4,60,48,1,132,52,7,162,5,60, - 0,1,165,52,6,0,2,36,24,133,130,175, - 0,163,1,60,204,5,34,172,4,0,2,36, - 50,133,130,167,2,0,2,36,48,133,130,167, - 1,0,2,36,20,133,130,167,119,119,2,36, - 28,133,136,175,16,133,135,175,0,0,195,172, - 0,0,130,172,67,1,2,36,0,0,162,172, - 109,8,192,8,31,131,4,60,240,188,132,52, - 189,2,3,60,128,231,99,52,4,0,2,36, - 24,133,130,175,0,163,1,60,204,5,34,172, - 0,8,2,36,50,133,130,167,0,4,2,36, - 48,133,130,167,0,2,2,36,20,133,130,167, - 28,133,132,175,16,133,131,175,31,131,4,60, - 0,240,132,52,24,133,131,143,0,131,2,60, - 0,163,1,60,108,1,34,172,0,163,1,60, - 112,1,36,172,1,0,99,36,32,133,131,175, - 210,7,192,12,0,0,0,0,0,163,2,60, - 132,1,66,140,0,128,128,167,2,0,64,20, - 85,0,2,36,0,128,130,167,0,163,2,60, - 136,1,66,140,0,0,0,0,5,0,64,20, - 0,0,0,0,0,128,130,151,0,0,0,0, - 170,0,66,52,0,128,130,167,0,128,131,151, - 5,162,2,60,0,0,67,164,188,64,192,12, - 1,0,16,36,2,131,4,60,0,220,132,36, - 2,131,5,60,0,224,165,36,2,131,6,60, - 0,226,198,36,8,0,7,36,2,131,2,60, - 112,154,66,36,16,0,162,175,2,131,2,60, - 144,154,66,36,20,0,162,175,2,131,2,60, - 176,154,66,36,24,0,162,175,0,131,2,60, - 36,30,66,36,0,163,1,60,92,1,48,172, - 240,64,192,12,28,0,162,175,0,163,3,60, - 124,1,99,140,40,133,128,175,2,0,98,40, - 7,0,64,16,2,0,2,36,18,0,97,4, - 255,255,2,36,7,0,98,16,0,0,0,0, - 196,8,192,8,0,0,0,0,16,0,98,16, - 0,0,0,0,196,8,192,8,0,0,0,0, - 24,133,133,143,1,131,4,60,15,63,192,12, - 208,127,132,36,0,163,1,60,112,25,192,12, - 124,1,32,172,207,8,192,8,0,0,0,0, - 211,8,192,12,0,0,0,0,207,8,192,8, - 0,0,0,0,40,133,144,175,31,10,192,12, - 0,0,0,0,207,8,192,8,0,0,0,0, - 1,131,4,60,96,127,132,36,24,128,144,39, - 33,40,0,2,32,128,135,39,15,63,192,12, - 58,1,6,36,1,0,4,36,33,40,0,2, - 188,7,192,12,58,1,6,36,36,0,191,143, - 32,0,176,143,8,0,224,3,40,0,189,39, - 192,255,189,39,56,0,191,175,52,0,181,175, - 48,0,180,175,44,0,179,175,40,0,178,175, - 36,0,177,175,180,10,192,12,32,0,176,175, - 33,32,0,0,2,0,2,36,0,163,1,60, - 244,57,192,12,92,1,34,172,3,0,2,36, - 0,163,1,60,0,12,192,12,92,1,34,172, - 1,0,4,36,4,0,2,36,0,163,1,60, - 34,11,192,12,92,1,34,172,5,0,2,36, - 0,163,1,60,92,1,34,172,0,163,19,60, - 124,1,115,142,0,163,3,60,160,1,99,140, - 1,0,98,46,80,133,130,175,0,128,2,60, - 5,0,98,20,0,0,0,0,32,133,130,143, - 0,0,0,0,252,8,192,8,255,255,66,36, - 32,133,130,143,0,0,0,0,84,133,130,175, - 130,11,192,12,0,0,0,0,33,32,64,0, - 2,0,5,36,232,3,6,36,0,131,7,60, - 196,37,231,36,156,11,192,12,16,0,160,175, - 35,35,192,12,0,0,0,0,6,0,2,36, - 0,163,1,60,84,35,192,12,92,1,34,172, - 7,0,2,36,0,163,1,60,141,47,192,12, - 92,1,34,172,8,0,2,36,0,163,1,60, - 120,50,192,12,92,1,34,172,9,0,2,36, - 0,163,1,60,92,1,34,172,0,163,2,60, - 240,5,66,140,0,0,0,0,8,0,64,16, - 10,0,2,36,0,163,4,60,240,5,132,140, - 13,8,192,12,0,0,0,0,0,163,1,60, - 244,5,34,172,10,0,2,36,0,163,1,60, - 92,1,34,172,157,15,192,12,1,0,21,36, - 2,131,2,60,192,246,66,36,33,160,64,0, - 80,133,131,143,11,0,2,36,0,163,1,60, - 92,1,34,172,100,0,2,36,0,163,1,60, - 92,1,34,172,84,133,130,143,64,26,3,0, - 33,136,116,0,64,18,2,0,33,144,84,0, - 0,163,2,60,8,1,66,140,0,0,0,0, - 1,0,66,36,0,163,1,60,8,1,34,172, - 0,163,2,60,8,1,66,140,0,163,2,60, - 124,1,66,140,0,0,0,0,14,0,83,16, - 0,0,0,0,4,0,64,16,33,152,64,0, - 80,133,128,175,76,9,192,8,0,0,0,0, - 80,133,149,175,2,131,4,60,163,23,192,12, - 192,246,132,36,80,133,130,143,0,0,0,0, - 64,18,2,0,33,136,84,0,0,163,2,60, - 0,6,66,140,0,0,0,0,3,0,64,24, - 33,128,32,2,239,15,192,12,0,0,0,0, - 43,16,18,2,11,0,64,16,33,32,0,2, - 151,18,192,12,10,0,5,36,27,22,192,12, - 33,32,0,2,142,22,192,12,33,32,0,2, - 0,2,16,38,43,16,18,2,247,255,64,20, - 33,32,0,2,184,11,192,12,0,0,0,0, - 54,9,192,8,0,0,0,0,56,0,191,143, - 52,0,181,143,48,0,180,143,44,0,179,143, - 40,0,178,143,36,0,177,143,32,0,176,143, - 8,0,224,3,64,0,189,39,4,128,130,143, - 232,255,189,39,20,0,191,175,16,0,176,175, - 1,0,67,36,4,128,131,175,255,255,3,36, - 4,0,67,20,255,31,4,60,0,163,1,60, - 8,1,32,172,255,31,4,60,255,255,132,52, - 0,163,16,60,0,163,2,60,8,1,66,140, - 208,132,131,143,220,5,16,54,35,16,67,0, - 0,163,1,60,16,1,34,172,2,131,2,60, - 192,246,66,36,36,16,68,0,0,160,3,60, - 37,16,67,0,0,163,3,60,8,1,99,140, - 28,0,68,140,0,0,5,142,3,131,2,60, - 20,18,66,140,208,132,131,175,36,133,132,175, - 18,0,162,16,0,163,4,60,99,59,192,12, - 220,5,132,52,255,0,5,60,255,0,165,52, - 0,255,6,60,0,0,4,142,0,255,198,52, - 0,20,4,0,2,28,4,0,37,16,67,0, - 2,26,2,0,36,24,101,0,0,18,2,0, - 36,16,70,0,37,24,98,0,176,133,132,175, - 184,133,131,175,0,163,16,60,16,6,16,54, - 0,0,3,142,3,131,2,60,68,18,66,140, - 0,0,0,0,18,0,98,16,0,163,4,60, - 119,59,192,12,16,6,132,52,255,0,5,60, - 255,0,165,52,0,255,6,60,0,0,4,142, - 0,255,198,52,0,20,4,0,2,28,4,0, - 37,16,67,0,2,26,2,0,36,24,101,0, - 0,18,2,0,36,16,70,0,37,24,98,0, - 196,133,132,175,192,133,131,175,0,163,16,60, - 224,5,16,54,0,0,3,142,3,131,2,60, - 24,18,66,140,0,0,0,0,18,0,98,16, - 0,163,4,60,139,59,192,12,224,5,132,52, - 255,0,5,60,255,0,165,52,0,255,6,60, - 0,0,4,142,0,255,198,52,0,20,4,0, - 2,28,4,0,37,16,67,0,2,26,2,0, - 36,24,101,0,0,18,2,0,36,16,70,0, - 37,24,98,0,188,133,132,175,180,133,131,175, - 44,133,131,143,0,163,2,60,144,1,66,140, - 0,0,0,0,5,0,98,16,0,0,0,0, - 0,163,4,60,144,1,132,140,159,59,192,12, - 0,0,0,0,0,163,3,60,140,1,99,140, - 3,131,2,60,64,18,66,140,0,0,0,0, - 5,0,98,16,0,0,0,0,0,163,4,60, - 140,1,132,140,51,60,192,12,0,0,0,0, - 44,133,130,143,0,0,0,0,3,0,64,16, - 0,0,0,0,116,38,192,12,0,0,0,0, - 164,7,192,12,0,0,0,0,36,128,130,143, - 0,0,0,0,1,0,66,36,36,128,130,175, - 60,0,66,40,8,0,64,20,0,0,0,0, - 3,131,2,60,24,18,66,140,36,128,128,175, - 3,0,64,16,0,0,0,0,222,48,192,12, - 0,0,0,0,0,163,2,60,48,1,66,140, - 0,0,0,0,20,0,64,16,0,0,0,0, - 0,163,1,60,48,1,32,172,0,163,1,60, - 16,1,32,172,0,163,1,60,20,1,32,172, - 0,163,1,60,24,1,32,172,0,163,1,60, - 28,1,32,172,0,163,1,60,32,1,32,172, - 0,163,1,60,36,1,32,172,0,163,1,60, - 40,1,32,172,0,163,1,60,201,13,192,12, - 44,1,32,172,20,0,191,143,16,0,176,143, - 8,0,224,3,24,0,189,39,216,255,189,39, - 36,0,191,175,32,0,178,175,28,0,177,175, - 180,10,192,12,24,0,176,175,33,32,0,0, - 2,0,2,36,0,163,1,60,244,57,192,12, - 92,1,34,172,3,0,2,36,0,163,1,60, - 0,12,192,12,92,1,34,172,1,0,4,36, - 4,0,2,36,0,163,1,60,34,11,192,12, - 92,1,34,172,32,133,131,143,5,0,2,36, - 0,163,1,60,92,1,34,172,80,133,128,175, - 84,133,131,175,130,11,192,12,0,0,0,0, - 33,32,64,0,2,0,5,36,232,3,6,36, - 0,131,7,60,196,37,231,36,156,11,192,12, - 16,0,160,175,0,163,2,60,240,5,66,140, - 0,0,0,0,8,0,64,16,10,0,2,36, - 0,163,4,60,240,5,132,140,13,8,192,12, - 0,0,0,0,0,163,1,60,244,5,34,172, - 10,0,2,36,0,163,1,60,92,1,34,172, - 100,0,2,36,80,133,131,143,2,131,4,60, - 192,246,132,36,0,163,1,60,92,1,34,172, - 84,133,130,143,64,26,3,0,33,144,100,0, - 64,18,2,0,33,136,68,0,0,163,2,60, - 8,1,66,140,33,128,64,2,1,0,66,36, - 0,163,1,60,8,1,34,172,0,163,2,60, - 8,1,66,140,43,16,17,2,11,0,64,16, - 33,32,0,2,151,18,192,12,10,0,5,36, - 27,22,192,12,33,32,0,2,142,22,192,12, - 33,32,0,2,0,2,16,38,43,16,17,2, - 247,255,64,20,33,32,0,2,184,11,192,12, - 0,0,0,0,91,10,192,8,0,0,0,0, - 36,0,191,143,32,0,178,143,28,0,177,143, - 24,0,176,143,8,0,224,3,40,0,189,39, - 4,128,130,143,232,255,189,39,16,0,191,175, - 1,0,67,36,4,128,131,175,255,255,3,36, - 4,0,67,20,255,31,4,60,0,163,1,60, - 8,1,32,172,255,31,4,60,0,163,2,60, - 8,1,66,140,212,132,131,143,255,255,132,52, - 35,16,67,0,0,163,1,60,16,1,34,172, - 2,131,2,60,192,246,66,36,36,16,68,0, - 0,160,3,60,37,16,67,0,0,163,3,60, - 8,1,99,140,28,0,66,140,212,132,131,175, - 36,133,130,175,164,7,192,12,0,0,0,0, - 0,163,2,60,48,1,66,140,0,0,0,0, - 20,0,64,16,0,0,0,0,0,163,1,60, - 48,1,32,172,0,163,1,60,16,1,32,172, - 0,163,1,60,20,1,32,172,0,163,1,60, - 24,1,32,172,0,163,1,60,28,1,32,172, - 0,163,1,60,32,1,32,172,0,163,1,60, - 36,1,32,172,0,163,1,60,40,1,32,172, - 0,163,1,60,201,13,192,12,44,1,32,172, - 16,0,191,143,24,0,189,39,8,0,224,3, - 0,0,0,0,224,255,189,39,24,0,191,175, - 20,0,177,175,120,63,192,12,16,0,176,175, - 52,0,2,36,4,162,1,60,12,0,34,160, - 120,63,192,12,232,3,16,36,28,133,130,143, - 0,0,0,0,27,0,80,0,2,0,0,22, - 0,0,0,0,13,0,7,0,18,16,0,0, - 4,162,17,60,120,63,192,12,0,0,34,162, - 28,133,130,143,0,0,0,0,27,0,80,0, - 2,0,0,22,0,0,0,0,13,0,7,0, - 18,16,0,0,33,40,0,0,33,32,0,0, - 6,162,3,60,2,18,2,0,0,0,34,162, - 1,0,2,36,0,163,1,60,4,1,32,172, - 0,0,98,172,2,131,1,60,33,8,36,0, - 8,245,32,172,1,0,165,36,22,0,162,44, - 250,255,64,20,20,0,132,36,31,131,4,60, - 0,240,132,52,52,128,131,143,1,0,2,36, - 68,133,128,175,48,128,130,175,64,133,128,175, - 32,131,1,60,252,239,36,172,8,0,96,16, - 31,131,5,60,252,239,165,52,31,131,6,60, - 1,131,4,60,224,127,132,36,15,63,192,12, - 0,240,198,52,52,128,128,175,24,0,191,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,232,255,189,39,16,0,176,175, - 116,0,2,36,20,0,191,175,4,162,1,60, - 12,0,34,160,130,63,192,12,33,128,128,0, - 4,162,1,60,4,0,48,160,130,63,192,12, - 3,130,16,0,4,162,1,60,130,63,192,12, - 4,0,48,160,20,0,191,143,16,0,176,143, - 8,0,224,3,24,0,189,39,224,255,189,39, - 64,0,2,36,24,0,191,175,20,0,177,175, - 16,0,176,175,4,162,1,60,130,63,192,12, - 12,0,34,160,4,162,17,60,4,0,49,146, - 0,0,0,0,130,63,192,12,255,0,49,50, - 4,162,16,60,4,0,16,146,0,0,0,0, - 130,63,192,12,255,0,16,50,0,130,16,0, - 37,16,17,2,24,0,191,143,20,0,177,143, - 16,0,176,143,8,0,224,3,32,0,189,39, - 48,128,130,143,232,255,189,39,16,0,176,175, - 33,128,128,0,3,0,64,20,20,0,191,175, - 180,10,192,12,0,0,0,0,5,0,0,18, - 0,0,0,0,236,63,192,12,1,4,4,36, - 50,11,192,8,0,0,0,0,228,63,192,12, - 0,4,4,36,20,0,191,143,16,0,176,143, - 8,0,224,3,24,0,189,39,216,255,189,39, - 6,162,3,60,1,0,2,36,32,0,191,175, - 28,0,177,175,24,0,176,175,0,0,98,172, - 0,163,2,60,4,1,66,140,33,136,224,0, - 1,0,66,36,0,163,1,60,4,1,34,172, - 56,128,130,143,0,163,3,60,4,1,99,140, - 1,0,66,36,56,128,130,175,232,3,66,40, - 21,0,64,20,255,127,3,60,68,133,130,143, - 254,255,99,52,56,128,128,175,1,0,66,36, - 43,24,98,0,68,133,130,175,13,0,96,16, - 0,0,0,0,2,131,4,60,28,128,132,36, - 60,128,144,39,33,40,0,2,2,131,7,60, - 60,128,231,36,15,63,192,12,144,0,6,36, - 1,0,4,36,33,40,0,2,188,7,192,12, - 144,0,6,36,64,133,134,143,0,0,0,0, - 14,0,192,24,33,24,0,0,2,131,5,60, - 0,245,165,36,33,32,0,0,2,131,2,60, - 33,16,68,0,0,245,66,140,20,0,132,36, - 1,0,99,36,255,255,66,36,0,0,162,172, - 42,16,102,0,247,255,64,20,20,0,165,36, - 31,131,4,60,252,239,132,52,31,131,2,60, - 0,0,131,140,255,255,66,52,0,0,113,172, - 4,0,99,36,43,16,67,0,3,0,64,16, - 0,0,0,0,31,131,3,60,0,240,99,52, - 0,0,131,172,32,0,191,143,28,0,177,143, - 24,0,176,143,8,0,224,3,40,0,189,39, - 64,133,130,143,232,255,189,39,20,0,191,175, - 22,0,66,40,13,0,64,20,16,0,176,175, - 2,131,4,60,28,128,132,36,60,128,144,39, - 33,40,0,2,2,131,7,60,84,128,231,36, - 15,63,192,12,173,0,6,36,1,0,4,36, - 33,40,0,2,188,7,192,12,173,0,6,36, - 64,133,130,143,0,0,0,0,1,0,67,36, - 64,133,131,175,20,0,191,143,16,0,176,143, - 8,0,224,3,24,0,189,39,128,16,4,0, - 33,16,68,0,16,0,163,143,128,16,2,0, - 2,131,1,60,33,8,34,0,4,245,38,172, - 2,131,1,60,33,8,34,0,12,245,39,172, - 2,131,1,60,33,8,34,0,0,245,38,172, - 2,131,1,60,33,8,34,0,8,245,37,172, - 2,131,1,60,33,8,34,0,16,245,35,172, - 8,0,224,3,33,16,0,1,128,16,4,0, - 33,16,68,0,128,16,2,0,2,131,1,60, - 33,8,34,0,8,0,224,3,8,245,32,172, - 64,133,130,143,192,255,189,39,40,0,180,175, - 33,160,0,0,56,0,191,175,52,0,183,175, - 48,0,182,175,44,0,181,175,36,0,179,175, - 32,0,178,175,28,0,177,175,48,0,64,24, - 24,0,176,175,1,0,23,36,2,0,22,36, - 2,131,16,60,12,245,16,38,4,0,19,38, - 244,255,17,38,252,255,18,38,33,168,0,0, - 0,0,67,142,0,0,0,0,7,0,119,16, - 2,0,98,40,25,0,64,20,0,0,0,0, - 9,0,118,16,0,0,0,0,236,11,192,8, - 20,0,16,38,0,0,34,142,0,0,0,0, - 17,0,64,28,0,0,0,0,230,11,192,8, - 0,0,64,174,0,0,34,142,0,0,0,0, - 11,0,64,28,0,0,0,0,2,131,2,60, - 33,16,85,0,4,245,66,140,0,0,0,0, - 0,0,34,174,0,0,100,142,0,0,2,142, - 0,0,0,0,9,248,64,0,0,0,0,0, - 20,0,16,38,20,0,115,38,20,0,49,38, - 20,0,82,38,64,133,130,143,1,0,148,38, - 42,16,130,2,218,255,64,20,20,0,181,38, - 56,0,191,143,52,0,183,143,48,0,182,143, - 44,0,181,143,40,0,180,143,36,0,179,143, - 32,0,178,143,28,0,177,143,24,0,176,143, - 8,0,224,3,64,0,189,39,0,0,0,0, - 2,131,3,60,192,246,99,36,0,2,2,36, - 0,163,1,60,200,5,35,172,0,163,1,60, - 208,5,34,172,0,163,2,60,124,1,66,140, - 216,255,189,39,16,0,176,175,33,128,0,0, - 28,0,179,175,255,255,19,36,24,0,178,175, - 21,0,114,36,20,0,177,175,32,0,191,175, - 1,0,66,44,80,133,130,175,139,14,192,12, - 20,0,113,36,184,24,192,12,0,0,0,0, - 27,67,192,12,33,32,0,2,6,0,83,20, - 1,0,16,38,2,131,4,60,15,63,192,12, - 112,128,132,36,126,12,192,8,1,0,2,36, - 0,0,34,162,3,18,2,0,0,0,66,162, - 2,0,82,38,3,0,2,42,241,255,64,20, - 2,0,49,38,2,131,17,60,212,246,49,38, - 33,32,32,2,33,40,0,0,255,127,6,60, - 247,24,192,12,255,255,198,52,255,31,3,60, - 255,255,99,52,236,255,48,38,36,0,34,38, - 36,16,67,0,0,160,3,60,37,16,67,0, - 0,32,3,36,236,255,32,174,2,131,1,60, - 220,246,32,172,2,131,1,60,204,246,32,172, - 2,131,1,60,236,246,34,172,0,0,67,164, - 222,21,192,12,33,32,0,2,122,15,192,12, - 33,32,0,2,242,21,192,12,33,32,0,2, - 32,133,130,143,1,0,16,36,42,16,2,2, - 12,0,64,16,255,31,3,60,236,1,49,38, - 133,12,192,12,33,32,0,2,242,21,192,12, - 33,32,32,2,32,133,130,143,1,0,16,38, - 42,16,2,2,248,255,64,20,0,2,49,38, - 255,31,3,60,255,255,99,52,2,131,16,60, - 192,4,16,38,7,0,2,36,0,0,2,174, - 56,0,2,38,36,16,67,0,0,160,3,60, - 37,16,67,0,0,32,3,36,2,131,1,60, - 220,4,32,172,2,131,1,60,204,4,32,172, - 2,131,1,60,236,4,34,172,0,0,67,164, - 2,131,2,60,212,246,66,140,2,131,3,60, - 216,246,99,132,20,0,2,174,24,0,3,166, - 2,131,2,60,217,4,66,144,0,0,0,0, - 7,0,66,36,2,131,1,60,217,4,34,160, - 112,15,192,12,33,32,0,2,33,32,0,2, - 19,15,192,12,32,0,5,36,20,0,16,38, - 33,32,0,2,7,0,5,36,255,127,6,60, - 247,24,192,12,255,255,198,52,33,16,0,0, - 32,0,191,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 40,0,189,39,200,255,189,39,48,0,180,175, - 33,160,128,0,255,31,6,60,255,255,198,52, - 64,26,20,0,2,131,2,60,192,246,66,36, - 40,0,178,175,33,144,98,0,255,255,132,38, - 64,18,4,0,0,162,3,60,33,16,67,0, - 52,0,191,175,44,0,179,175,36,0,177,175, - 32,0,176,175,4,0,66,174,0,1,66,36, - 8,0,66,174,0,16,2,36,4,16,130,0, - 12,0,66,174,4,0,2,36,4,16,130,0, - 0,160,5,60,16,0,66,174,48,0,66,38, - 36,16,70,0,37,16,69,0,36,0,66,174, - 64,16,4,0,33,16,68,0,128,16,2,0, - 2,131,3,60,240,231,99,36,33,16,67,0, - 36,16,70,0,37,16,69,0,40,0,66,174, - 56,0,66,38,36,16,70,0,37,16,69,0, - 0,0,84,174,44,0,66,174,32,0,64,174, - 2,131,2,60,212,246,66,140,2,131,3,60, - 216,246,99,132,20,0,66,174,24,0,67,166, - 25,0,66,146,0,0,0,0,33,32,84,0, - 2,131,2,60,0,227,66,36,36,16,70,0, - 37,128,69,0,2,131,2,60,32,227,66,36, - 36,16,70,0,25,0,68,162,40,133,131,143, - 0,0,0,0,3,0,96,16,37,136,69,0, - 255,255,130,36,25,0,66,162,12,0,68,142, - 28,0,64,174,228,63,192,12,1,0,132,52, - 4,0,68,142,0,0,0,0,76,67,192,12, - 33,40,0,0,76,63,192,12,0,0,0,0, - 76,63,192,12,0,0,0,0,255,255,2,36, - 4,0,2,174,4,0,2,142,0,0,0,0, - 0,0,2,174,4,0,68,142,0,0,0,0, - 76,67,192,12,1,0,5,54,4,0,4,38, - 33,40,0,0,255,255,6,36,211,67,192,12, - 208,7,7,36,8,0,64,20,255,255,2,52, - 2,131,4,60,184,128,132,36,4,0,6,142, - 0,0,0,0,15,63,192,12,33,40,128,2, - 255,255,2,52,48,1,34,174,4,0,68,142, - 0,0,0,0,76,67,192,12,3,0,37,54, - 48,1,36,38,33,40,0,0,255,255,6,52, - 211,67,192,12,208,7,7,36,7,0,64,20, - 0,0,0,0,2,131,4,60,8,129,132,36, - 48,1,38,142,0,0,0,0,15,63,192,12, - 33,40,128,2,143,63,192,12,0,0,0,0, - 40,0,69,142,4,0,68,142,0,0,0,0, - 76,67,192,12,2,0,165,52,44,0,81,142, - 84,128,131,143,80,128,132,143,100,0,2,36, - 0,0,32,166,2,0,32,166,4,0,32,174, - 8,0,32,174,12,0,32,174,16,0,32,174, - 24,0,32,174,20,0,32,174,28,0,32,174, - 32,0,32,174,36,0,34,166,38,0,34,166, - 36,0,35,166,38,0,36,166,36,0,83,142, - 1,0,2,36,0,0,98,174,44,0,66,142, - 0,0,0,0,4,0,98,174,40,0,67,142, - 116,0,2,60,0,0,98,172,40,0,67,142, - 36,0,66,142,0,0,0,0,8,0,98,172, - 8,0,66,142,0,0,0,0,0,0,64,172, - 0,0,98,142,0,0,0,0,10,0,64,16, - 33,128,0,0,208,7,2,42,7,0,64,16, - 0,0,0,0,143,63,192,12,0,0,0,0, - 0,0,98,142,0,0,0,0,248,255,64,20, - 1,0,16,38,0,0,98,142,0,0,0,0, - 6,0,64,16,33,32,32,2,2,131,4,60, - 76,129,132,36,15,63,192,12,33,40,128,2, - 33,32,32,2,8,0,5,36,0,0,34,150, - 8,0,6,36,0,240,66,48,0,6,66,52, - 2,0,34,166,8,0,66,142,208,7,7,36, - 129,67,192,12,0,0,64,172,6,0,64,20, - 2,0,36,38,2,131,4,60,160,129,132,36, - 15,63,192,12,33,40,128,2,2,0,36,38, - 33,40,0,0,0,0,34,150,33,48,0,0, - 0,240,66,48,2,0,34,166,8,0,66,142, - 208,7,7,36,129,67,192,12,0,0,64,172, - 4,0,64,20,0,0,0,0,2,131,4,60, - 15,63,192,12,248,129,132,36,143,63,192,12, - 0,0,0,0,108,0,80,142,0,128,2,52, - 0,0,0,166,2,0,2,166,44,0,66,142, - 0,32,5,36,4,0,80,172,44,0,67,142, - 0,241,2,52,2,0,98,164,8,0,66,142, - 0,32,6,36,0,0,64,172,44,0,68,142, - 0,0,0,0,129,67,192,12,208,7,7,36, - 12,0,64,20,0,0,0,0,44,0,66,142, - 0,0,0,0,0,0,69,148,2,131,4,60, - 15,63,192,12,16,130,132,36,254,255,4,36, - 2,131,5,60,44,130,165,36,188,7,192,12, - 1,1,6,36,108,0,80,142,2,128,2,52, - 0,0,0,166,2,0,2,166,14,0,2,36, - 8,0,2,162,200,0,2,36,9,0,2,162, - 65,0,2,36,10,0,2,162,46,0,2,36, - 11,0,2,162,87,0,2,36,12,0,0,162, - 13,0,2,162,242,0,2,36,14,0,0,162, - 15,0,2,162,1,0,2,36,16,0,2,162, - 8,0,2,36,17,0,2,162,88,128,130,143, - 0,0,0,0,6,0,64,16,64,0,2,36, - 2,131,4,60,15,63,192,12,56,130,132,36, - 88,128,128,175,64,0,2,36,18,0,2,162, - 255,0,2,36,19,0,2,162,63,0,2,36, - 20,0,0,162,21,0,2,162,44,0,66,142, - 0,32,5,36,4,0,80,172,44,0,67,142, - 0,33,2,36,2,0,98,164,8,0,66,142, - 0,32,6,36,0,0,64,172,44,0,68,142, - 0,0,0,0,129,67,192,12,208,7,7,36, - 12,0,64,20,0,0,0,0,44,0,66,142, - 0,0,0,0,0,0,69,148,2,131,4,60, - 15,63,192,12,16,130,132,36,253,255,4,36, - 2,131,5,60,44,130,165,36,188,7,192,12, - 85,1,6,36,222,21,192,12,33,32,64,2, - 122,15,192,12,33,32,64,2,52,0,191,143, - 48,0,180,143,44,0,179,143,40,0,178,143, - 36,0,177,143,32,0,176,143,8,0,224,3, - 56,0,189,39,248,255,189,39,32,133,133,143, - 0,0,0,0,50,0,160,24,33,32,0,0, - 2,131,3,60,192,246,99,36,44,0,98,140, - 152,0,96,172,156,0,96,172,160,0,96,172, - 164,0,96,172,168,0,96,172,172,0,96,172, - 176,0,96,172,180,0,96,172,184,0,96,172, - 188,0,96,172,192,0,96,172,196,0,96,172, - 200,0,96,172,204,0,96,172,208,0,96,172, - 212,0,96,172,216,0,96,172,224,0,96,172, - 232,0,96,172,236,0,96,172,240,0,96,172, - 244,0,96,172,248,0,96,172,252,0,96,172, - 0,1,96,172,4,1,96,172,8,1,96,172, - 12,0,64,172,44,0,98,140,0,0,0,0, - 16,0,64,172,44,0,98,140,0,0,0,0, - 24,0,64,172,44,0,98,140,0,0,0,0, - 20,0,64,172,44,0,98,140,1,0,132,36, - 28,0,64,172,44,0,98,140,0,2,99,36, - 32,0,64,172,42,16,133,0,210,255,64,20, - 0,0,0,0,33,32,0,0,0,163,3,60, - 0,1,99,52,32,0,5,36,33,16,131,0, - 188,0,69,160,1,0,132,36,0,2,130,44, - 251,255,64,20,0,0,0,0,8,0,224,3, - 8,0,189,39,0,0,0,0,124,133,130,143, - 232,255,189,39,20,0,191,175,17,0,64,20, - 16,0,176,175,208,7,16,36,7,0,0,26, - 0,0,0,0,143,63,192,12,255,255,16,38, - 124,133,130,143,0,0,0,0,249,255,64,16, - 0,0,0,0,6,0,0,22,0,0,0,0, - 2,131,4,60,15,63,192,12,80,130,132,36, - 45,14,192,8,33,16,0,0,220,63,192,12, - 33,32,0,0,33,32,64,0,124,133,144,143, - 128,133,130,143,4,0,3,142,255,255,66,36, - 128,133,130,175,124,133,131,175,220,63,192,12, - 0,0,0,0,33,16,0,2,20,0,191,143, - 16,0,176,143,8,0,224,3,24,0,189,39, - 232,255,189,39,96,133,130,143,33,40,128,0, - 43,16,162,0,6,0,64,20,16,0,191,175, - 100,133,130,143,0,0,0,0,43,16,162,0, - 6,0,64,20,0,0,0,0,2,131,4,60, - 15,63,192,12,116,130,132,36,71,14,192,8, - 0,0,0,0,124,133,131,143,128,133,130,143, - 124,133,133,175,1,0,66,36,4,0,163,172, - 128,133,130,175,16,0,191,143,24,0,189,39, - 8,0,224,3,0,0,0,0,108,133,130,143, - 232,255,189,39,20,0,191,175,17,0,64,20, - 16,0,176,175,208,7,16,36,7,0,0,26, - 0,0,0,0,143,63,192,12,255,255,16,38, - 108,133,130,143,0,0,0,0,249,255,64,16, - 0,0,0,0,6,0,0,22,0,0,0,0, - 2,131,4,60,15,63,192,12,148,130,132,36, - 108,14,192,8,33,16,0,0,220,63,192,12, - 33,32,0,0,33,32,64,0,108,133,144,143, - 120,133,130,143,0,0,3,142,255,255,66,36, - 120,133,130,175,108,133,131,175,220,63,192,12, - 0,0,0,0,33,16,0,2,20,0,191,143, - 16,0,176,143,8,0,224,3,24,0,189,39, - 232,255,189,39,104,133,130,143,33,40,128,0, - 43,16,162,0,6,0,64,20,16,0,191,175, - 112,133,130,143,0,0,0,0,43,16,162,0, - 6,0,64,20,0,0,0,0,2,131,4,60, - 15,63,192,12,184,130,132,36,135,14,192,8, - 0,0,0,0,108,133,130,143,0,0,0,0, - 0,0,162,172,120,133,130,143,108,133,133,175, - 1,0,66,36,120,133,130,175,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 232,255,189,39,20,0,191,175,16,0,176,175, - 124,133,128,175,13,8,192,12,0,32,4,36, - 255,31,3,60,255,255,99,52,255,1,16,36, - 36,16,67,0,0,160,3,60,37,16,67,0, - 96,133,130,175,0,32,66,36,100,133,130,175, - 0,17,16,0,96,133,132,143,255,255,16,38, - 49,14,192,12,33,32,130,0,251,255,1,6, - 0,17,16,0,0,2,2,36,132,133,130,175, - 108,133,128,175,13,8,192,12,18,0,4,60, - 255,31,3,60,255,255,99,52,255,17,16,36, - 36,16,67,0,0,160,3,60,37,16,67,0, - 18,0,3,60,104,133,130,175,33,16,67,0, - 112,133,130,175,0,18,16,0,104,133,132,143, - 255,255,16,38,112,14,192,12,33,32,130,0, - 251,255,1,6,0,18,16,0,0,18,2,36, - 116,133,130,175,20,0,191,143,16,0,176,143, - 8,0,224,3,24,0,189,39,0,0,0,0, - 0,0,0,0,0,0,0,0,0,163,2,60, - 168,1,66,140,216,255,189,39,28,0,177,175, - 33,136,128,0,32,0,178,175,33,144,160,0, - 36,0,191,175,17,0,64,16,24,0,176,175, - 0,163,2,60,168,1,66,140,0,0,0,0, - 42,16,82,0,12,0,64,16,128,128,18,0, - 0,0,34,142,0,163,18,60,168,1,82,142, - 0,0,0,0,6,0,64,20,128,128,18,0, - 2,131,4,60,224,130,132,36,15,63,192,12, - 33,40,64,2,128,128,18,0,33,128,18,2, - 128,128,16,0,13,8,192,12,33,32,0,2, - 255,31,3,60,255,255,99,52,33,32,0,0, - 36,16,67,0,0,160,3,60,37,16,67,0, - 112,0,34,174,112,0,35,142,33,16,80,0, - 15,0,64,26,116,0,34,174,8,0,5,36, - 1,0,132,36,20,0,98,36,4,0,98,172, - 2,0,101,164,0,0,96,164,8,0,96,172, - 14,0,96,164,12,0,96,164,33,24,64,0, - 42,16,146,0,246,255,64,20,1,0,132,36, - 255,255,132,36,116,0,35,142,112,0,34,142, - 0,0,0,0,240,255,98,172,116,0,35,142, - 0,0,0,0,218,255,98,148,0,0,0,0, - 0,128,66,52,218,255,98,164,116,0,35,142, - 0,0,0,0,238,255,98,148,0,0,0,0, - 0,128,66,52,238,255,98,164,116,0,34,142, - 112,0,35,142,216,255,66,36,120,0,35,174, - 124,0,34,174,36,0,191,143,32,0,178,143, - 28,0,177,143,24,0,176,143,8,0,224,3, - 40,0,189,39,200,255,189,39,32,0,178,175, - 33,144,128,0,0,1,2,36,48,0,191,175, - 44,0,181,175,40,0,180,175,36,0,179,175, - 28,0,177,175,24,0,176,175,144,0,66,174, - 0,163,2,60,172,1,66,140,0,0,0,0, - 17,0,64,16,33,160,160,0,0,163,2,60, - 172,1,66,140,0,0,0,0,42,16,84,0, - 12,0,64,16,128,128,20,0,0,0,66,142, - 0,163,20,60,172,1,148,142,0,0,0,0, - 6,0,64,20,128,128,20,0,2,131,4,60, - 236,130,132,36,15,63,192,12,33,40,128,2, - 128,128,20,0,33,128,20,2,128,128,16,0, - 33,32,0,2,13,8,192,12,148,0,84,174, - 255,31,3,60,255,255,99,52,33,152,0,0, - 36,16,67,0,0,160,3,60,37,16,67,0, - 128,0,66,174,128,0,81,142,33,16,80,0, - 15,0,128,26,132,0,66,174,0,1,21,36, - 20,0,48,38,4,0,48,174,75,14,192,12, - 0,0,32,174,8,0,34,174,12,0,53,174, - 0,0,66,142,1,0,115,38,16,0,34,162, - 17,0,32,162,42,16,116,2,244,255,64,20, - 33,136,0,2,132,0,67,142,128,0,66,142, - 0,0,0,0,240,255,98,172,132,0,67,142, - 0,0,0,0,228,255,98,140,0,0,0,0, - 0,128,66,52,228,255,98,172,132,0,67,142, - 0,0,0,0,248,255,98,140,0,0,0,0, - 0,128,66,52,248,255,98,172,132,0,66,142, - 128,0,67,142,216,255,66,36,136,0,67,174, - 140,0,66,174,48,0,191,143,44,0,181,143, - 40,0,180,143,36,0,179,143,32,0,178,143, - 28,0,177,143,24,0,176,143,8,0,224,3, - 56,0,189,39,152,0,128,172,156,0,128,172, - 160,0,128,172,164,0,128,172,168,0,128,172, - 252,0,128,172,0,1,128,172,152,0,128,172, - 8,0,224,3,216,0,128,172,232,255,189,39, - 16,0,176,175,20,0,191,175,112,15,192,12, - 33,128,128,0,33,32,0,2,192,14,192,12, - 0,4,5,36,33,32,0,2,19,15,192,12, - 128,2,5,36,120,0,3,142,136,0,2,142, - 0,0,0,0,8,0,98,172,44,0,3,142, - 120,0,2,142,0,0,0,0,8,0,98,172, - 0,0,2,142,0,0,0,0,255,255,66,36, - 6,0,66,44,7,0,64,16,16,0,3,36, - 44,0,2,142,0,0,0,0,2,0,67,164, - 8,0,2,142,0,0,0,0,0,0,64,172, - 20,0,191,143,16,0,176,143,8,0,224,3, - 24,0,189,39,184,255,189,39,0,32,6,36, - 68,0,191,175,64,0,190,175,60,0,183,175, - 56,0,182,175,52,0,181,175,48,0,180,175, - 44,0,179,175,40,0,178,175,36,0,177,175, - 32,0,176,175,0,163,1,60,252,5,38,172, - 13,8,192,12,0,32,4,36,255,31,4,60, - 255,255,132,52,33,168,0,0,255,31,6,60, - 255,255,198,52,2,131,3,60,212,247,99,36, - 16,0,101,36,8,0,126,36,248,255,119,36, - 33,176,96,0,36,16,68,0,0,160,3,60, - 37,16,67,0,16,0,166,175,0,163,1,60, - 248,5,34,172,0,163,1,60,0,6,32,172, - 33,160,0,0,33,128,224,2,33,152,160,0, - 33,144,192,3,33,136,192,2,32,133,130,143, - 0,0,32,174,0,0,0,174,0,0,64,174, - 42,16,162,2,10,0,64,16,0,0,96,174, - 0,32,4,36,13,8,192,12,24,0,165,175, - 16,0,166,143,0,128,3,60,36,16,70,0, - 37,16,67,0,0,0,2,174,24,0,165,143, - 4,0,16,38,4,0,115,38,4,0,82,38, - 1,0,148,38,2,0,130,42,234,255,64,20, - 4,0,49,38,0,2,165,36,0,2,222,39, - 0,2,247,38,1,0,181,38,7,0,162,42, - 222,255,64,20,0,2,214,38,68,0,191,143, - 64,0,190,143,60,0,183,143,56,0,182,143, - 52,0,181,143,48,0,180,143,44,0,179,143, - 40,0,178,143,36,0,177,143,32,0,176,143, - 8,0,224,3,72,0,189,39,0,163,4,60, - 0,6,132,140,0,163,3,60,8,6,99,140, - 32,133,130,143,224,255,189,39,16,0,176,175, - 0,163,16,60,12,6,16,142,20,0,177,175, - 0,163,17,60,4,6,49,142,43,16,98,0, - 42,0,64,16,24,0,191,175,2,0,2,46, - 40,0,64,16,255,255,2,36,0,163,2,60, - 252,5,66,140,0,0,0,0,43,16,81,0, - 34,0,64,20,255,255,2,36,64,18,3,0, - 2,131,3,60,192,246,99,36,33,24,67,0, - 1,0,2,36,5,0,130,16,2,0,2,36, - 18,0,130,16,128,16,16,0,36,16,192,8, - 0,0,0,0,128,128,16,0,33,128,3,2, - 12,1,4,142,0,163,5,60,248,5,165,140, - 33,48,32,2,80,68,192,12,36,1,17,174, - 12,1,4,142,12,1,2,142,33,40,32,2, - 114,68,192,12,20,1,2,174,36,16,192,8, - 0,0,0,0,33,16,67,0,20,1,64,172, - 36,1,64,172,0,163,1,60,42,16,192,8, - 0,6,32,172,255,255,2,36,0,163,1,60, - 0,6,34,172,24,0,191,143,20,0,177,143, - 16,0,176,143,8,0,224,3,32,0,189,39, - 176,133,136,143,188,133,137,143,232,255,189,39, - 3,0,0,21,16,0,191,175,124,0,32,17, - 0,0,0,0,12,0,194,148,0,0,0,0, - 0,26,2,0,2,18,2,0,37,56,98,0, - 255,255,227,48,221,5,98,44,36,0,64,20, - 170,170,2,52,0,8,2,36,23,0,98,20, - 6,8,2,36,21,0,0,17,0,0,0,0, - 32,0,194,148,30,0,195,148,0,20,2,0, - 37,56,67,0,36,0,195,148,0,161,2,52, - 5,0,98,16,8,0,2,36,34,0,195,148, - 0,0,0,0,98,0,98,20,0,0,0,0, - 3,0,232,16,255,255,2,36,94,0,226,20, - 0,0,0,0,226,46,192,12,14,0,6,36, - 177,16,192,8,0,0,0,0,7,0,98,20, - 255,255,227,48,71,0,0,17,55,129,2,52, - 108,43,192,12,14,0,6,36,177,16,192,8, - 0,0,0,0,162,16,192,8,55,129,2,52, - 14,0,195,148,0,0,0,0,61,0,98,20, - 255,255,2,52,16,0,195,144,3,0,2,36, - 55,0,98,20,255,255,2,52,20,0,194,148, - 0,0,0,0,0,26,2,0,2,18,2,0, - 37,56,98,0,255,255,227,48,0,8,2,36, - 23,0,98,20,6,8,2,36,21,0,0,17, - 0,0,0,0,40,0,194,148,38,0,195,148, - 0,20,2,0,37,56,67,0,44,0,195,148, - 0,161,2,52,5,0,98,16,8,0,2,36, - 42,0,195,148,0,0,0,0,49,0,98,20, - 0,0,0,0,3,0,232,16,255,255,2,36, - 45,0,226,20,0,0,0,0,226,46,192,12, - 22,0,6,36,177,16,192,8,0,0,0,0, - 7,0,98,20,255,255,227,48,6,0,0,17, - 55,129,2,52,108,43,192,12,22,0,6,36, - 177,16,192,8,0,0,0,0,55,129,2,52, - 30,0,98,20,0,0,0,0,28,0,32,17, - 144,15,3,36,38,0,194,148,28,0,198,140, - 24,0,67,20,0,0,0,0,3,0,201,16, - 0,0,0,0,20,0,192,20,0,0,0,0, - 175,16,192,8,22,0,6,36,14,0,195,148, - 0,0,0,0,14,0,98,20,0,0,0,0, - 12,0,32,17,144,15,3,36,30,0,194,148, - 20,0,198,140,8,0,67,20,0,0,0,0, - 3,0,201,16,0,0,0,0,4,0,192,20, - 0,0,0,0,14,0,6,36,126,49,192,12, - 0,0,0,0,16,0,191,143,24,0,189,39, - 8,0,224,3,0,0,0,0,128,255,189,39, - 116,0,183,175,33,184,128,0,3,0,3,36, - 124,0,191,175,120,0,190,175,112,0,182,175, - 108,0,181,175,104,0,180,175,100,0,179,175, - 96,0,178,175,92,0,177,175,88,0,176,175, - 0,0,245,142,8,0,178,140,192,17,21,0, - 3,131,4,60,33,32,130,0,20,13,132,140, - 8,0,84,142,0,0,0,0,59,0,131,16, - 5,0,130,44,57,0,64,16,128,16,4,0, - 2,131,1,60,33,8,34,0,104,131,34,140, - 0,0,0,0,8,0,64,0,0,0,0,0, - 44,133,130,143,0,0,0,0,48,0,64,16, - 6,0,132,38,4,0,131,150,2,131,2,60, - 68,207,66,148,0,0,0,0,6,0,98,20, - 33,32,0,0,0,0,130,142,48,129,131,143, - 0,0,0,0,38,16,67,0,1,0,68,44, - 72,1,128,16,33,32,160,2,114,42,192,12, - 33,40,128,2,45,18,192,8,33,32,64,2, - 44,133,130,143,0,0,0,0,27,0,64,16, - 6,0,132,38,4,0,131,150,2,131,2,60, - 68,207,66,148,0,0,0,0,6,0,98,20, - 33,32,0,0,0,0,130,142,48,129,131,143, - 0,0,0,0,38,16,67,0,1,0,68,44, - 5,0,128,16,33,32,160,2,114,42,192,12, - 33,40,128,2,45,18,192,8,33,32,64,2, - 6,0,132,38,0,163,6,60,140,1,198,140, - 0,0,0,0,247,24,192,12,33,40,160,2, - 45,18,192,8,33,32,64,2,6,0,132,38, - 0,163,6,60,140,1,198,140,0,0,0,0, - 247,24,192,12,33,40,160,2,203,24,192,12, - 33,32,128,2,20,1,227,142,0,0,0,0, - 14,0,96,16,33,240,64,0,33,32,128,2, - 16,0,166,39,18,0,69,150,0,0,0,0, - 9,248,96,0,33,56,192,3,6,0,64,16, - 33,32,64,2,28,1,226,142,0,0,0,0, - 1,0,66,36,45,18,192,8,28,1,226,174, - 132,0,193,7,7,0,2,36,4,0,131,150, - 2,131,2,60,68,207,66,148,0,0,0,0, - 6,0,98,20,33,32,0,0,0,0,130,142, - 48,129,131,143,0,0,0,0,38,16,67,0, - 1,0,68,44,9,0,128,16,255,255,2,36, - 44,133,130,143,0,0,0,0,251,0,64,16, - 33,32,160,2,114,42,192,12,33,40,128,2, - 45,18,192,8,33,32,64,2,10,0,194,23, - 0,0,0,0,8,0,160,18,0,0,0,0, - 36,133,130,143,0,0,0,0,8,0,64,16, - 1,0,19,36,80,133,147,143,69,17,192,8, - 0,0,0,0,0,1,226,142,80,133,147,143, - 1,0,66,36,0,1,226,174,84,133,130,143, - 0,0,0,0,35,16,83,0,255,255,66,36, - 17,0,66,162,84,133,130,143,33,128,96,2, - 42,16,2,2,15,0,64,16,64,18,16,0, - 2,131,3,60,192,246,99,36,33,136,67,0, - 5,0,21,18,0,0,0,0,247,22,192,12, - 33,32,32,2,217,0,64,16,33,16,0,0, - 84,133,130,143,1,0,16,38,42,16,2,2, - 246,255,64,20,0,2,49,38,84,133,130,143, - 33,128,96,2,42,16,2,2,55,0,64,16, - 64,18,16,0,2,131,3,60,192,246,99,36, - 33,152,67,0,33,136,64,0,192,177,16,0, - 41,0,21,18,0,0,0,0,2,131,2,60, - 33,16,81,0,216,247,66,140,0,0,0,0, - 15,0,64,16,33,32,128,2,16,0,166,39, - 18,0,69,150,0,0,0,0,9,248,64,0, - 33,56,160,2,8,0,64,16,0,0,0,0, - 2,131,2,60,33,16,81,0,224,247,66,140, - 0,0,0,0,1,0,66,36,140,17,192,8, - 32,1,98,174,44,133,130,143,0,0,0,0, - 7,0,64,16,3,0,8,36,3,131,2,60, - 33,16,86,0,20,13,66,140,0,0,0,0, - 6,0,72,20,0,0,0,0,33,32,96,2, - 6,23,192,12,33,40,64,2,146,17,192,8, - 0,2,115,38,17,0,66,146,0,0,0,0, - 255,255,66,36,17,0,66,162,17,0,66,146, - 0,2,115,38,0,2,49,38,84,133,130,143, - 1,0,16,38,42,16,2,2,208,255,64,20, - 128,0,214,38,254,255,2,36,4,0,194,23, - 33,32,224,2,33,40,64,2,47,16,192,12, - 33,48,128,2,17,0,66,146,0,0,0,0, - 140,0,64,16,33,32,64,2,36,18,192,8, - 0,0,0,0,26,0,194,23,0,0,0,0, - 36,133,130,143,0,0,0,0,11,0,64,16, - 33,32,224,2,9,0,160,18,1,0,2,36, - 17,0,66,162,2,131,4,60,192,246,132,36, - 6,23,192,12,33,40,64,2,126,0,64,16, - 33,16,0,0,33,32,224,2,33,40,64,2, - 47,16,192,12,33,48,128,2,36,133,130,143, - 0,0,0,0,115,0,64,16,33,32,64,2, - 116,0,160,22,1,0,2,36,45,18,192,8, - 0,0,0,0,87,0,213,19,64,130,30,0, - 2,131,2,60,33,16,80,0,216,247,66,140, - 0,0,0,0,18,0,64,16,33,32,128,2, - 16,0,166,39,18,0,69,150,0,0,0,0, - 9,248,64,0,33,56,160,2,11,0,64,16, - 33,32,64,2,2,131,2,60,33,16,80,0, - 224,247,66,140,0,0,0,0,1,0,66,36, - 2,131,1,60,33,8,48,0,224,247,34,172, - 45,18,192,8,17,0,128,160,36,133,130,143, - 0,0,0,0,43,0,64,16,0,0,0,0, - 41,0,192,19,0,0,0,0,39,0,160,18, - 64,18,30,0,2,131,16,60,192,246,16,38, - 33,136,80,0,247,22,192,12,33,32,32,2, - 74,0,64,16,33,16,0,0,247,22,192,12, - 33,32,0,2,63,0,64,16,2,0,2,36, - 17,0,66,162,44,133,130,143,0,0,0,0, - 7,0,64,16,192,17,30,0,3,131,3,60, - 33,24,98,0,20,13,99,140,3,0,2,36, - 6,0,98,20,0,0,0,0,33,32,32,2, - 6,23,192,12,33,40,64,2,0,18,192,8, - 0,0,0,0,17,0,66,146,0,0,0,0, - 255,255,66,36,17,0,66,162,17,0,66,146, - 2,131,4,60,192,246,132,36,6,23,192,12, - 33,40,64,2,36,18,192,8,0,0,0,0, - 44,133,130,143,0,0,0,0,7,0,64,16, - 192,17,30,0,3,131,3,60,33,24,98,0, - 20,13,99,140,3,0,2,36,28,0,98,20, - 0,0,0,0,1,0,2,36,17,0,66,162, - 64,18,30,0,2,131,4,60,192,246,132,36, - 32,18,192,8,33,32,68,0,36,133,130,143, - 0,0,0,0,17,0,64,16,0,0,0,0, - 15,0,192,19,1,0,2,36,17,0,66,162, - 2,131,4,60,192,246,132,36,6,23,192,12, - 33,40,64,2,13,0,64,16,33,16,0,0, - 252,0,226,142,0,0,0,0,1,0,66,36, - 47,18,192,8,252,0,226,174,48,18,192,8, - 33,16,0,0,17,0,64,162,33,32,64,2, - 152,21,192,12,0,0,0,0,1,0,2,36, - 124,0,191,143,120,0,190,143,116,0,183,143, - 112,0,182,143,108,0,181,143,104,0,180,143, - 100,0,179,143,96,0,178,143,92,0,177,143, - 88,0,176,143,8,0,224,3,128,0,189,39, - 216,255,189,39,24,0,178,175,33,144,128,0, - 32,0,191,175,28,0,179,175,20,0,177,175, - 16,0,176,175,8,0,177,140,0,0,66,142, - 8,0,38,142,36,0,64,16,0,0,0,0, - 28,0,66,142,0,0,0,0,18,0,64,20, - 1,0,2,36,0,0,194,144,0,0,0,0, - 1,0,66,48,13,0,64,20,1,0,2,36, - 4,0,195,148,24,0,66,150,0,0,0,0, - 6,0,98,20,33,32,0,0,0,0,194,140, - 20,0,67,142,0,0,0,0,38,16,67,0, - 1,0,68,44,10,0,128,16,1,0,2,36, - 17,0,34,162,2,131,4,60,192,246,132,36, - 6,23,192,12,33,40,32,2,45,0,64,16, - 33,16,0,0,139,18,192,8,0,0,0,0, - 17,0,32,162,152,21,192,12,33,32,32,2, - 144,18,192,8,1,0,2,36,16,0,179,140, - 0,0,0,0,6,0,96,26,0,0,0,0, - 32,133,130,143,0,0,0,0,42,16,98,2, - 15,0,64,20,1,0,2,36,2,131,4,60, - 248,130,132,36,2,131,16,60,24,131,16,38, - 33,40,0,2,2,131,7,60,36,131,231,36, - 15,63,192,12,188,2,6,36,1,0,4,36, - 33,40,0,2,188,7,192,12,188,2,6,36, - 1,0,2,36,17,0,34,162,64,18,19,0, - 2,131,4,60,192,246,132,36,33,32,68,0, - 6,23,192,12,33,40,32,2,6,0,64,16, - 33,16,0,0,252,0,66,142,0,0,0,0, - 1,0,66,36,252,0,66,174,1,0,2,36, - 32,0,191,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 40,0,189,39,72,255,189,39,164,0,181,175, - 33,168,128,0,180,0,191,175,176,0,190,175, - 172,0,183,175,168,0,182,175,160,0,180,175, - 156,0,179,175,152,0,178,175,148,0,177,175, - 144,0,176,175,88,0,165,175,120,0,160,175, - 120,0,168,142,0,0,0,0,96,0,168,175, - 124,0,169,142,0,0,0,0,15,2,160,24, - 104,0,169,175,96,0,168,143,0,0,0,0, - 0,0,4,149,0,0,0,0,0,128,130,48, - 9,2,64,16,0,0,0,0,128,0,160,175, - 8,0,2,141,136,0,169,142,255,255,8,36, - 18,0,72,16,112,0,169,175,112,0,169,143, - 0,0,0,0,0,0,35,141,4,0,40,141, - 128,0,169,143,255,63,98,48,33,72,34,1, - 0,128,99,48,112,0,168,175,246,255,96,16, - 128,0,169,175,96,0,168,143,0,0,0,0, - 8,0,2,141,128,0,169,151,0,0,0,0, - 18,0,73,164,0,32,130,48,200,1,64,16, - 0,0,0,0,40,133,130,143,0,0,0,0, - 75,0,64,16,3,0,8,36,96,0,168,143, - 0,0,0,0,8,0,16,141,0,0,162,142, - 8,0,5,142,30,0,64,16,0,0,0,0, - 28,0,162,142,0,0,0,0,18,0,64,20, - 1,0,9,36,0,0,162,144,0,0,0,0, - 1,0,66,48,13,0,64,20,0,0,0,0, - 4,0,163,148,24,0,162,150,0,0,0,0, - 6,0,98,20,33,32,0,0,0,0,162,140, - 20,0,163,142,0,0,0,0,38,16,67,0, - 1,0,68,44,6,0,128,16,1,0,9,36, - 17,0,9,162,2,131,4,60,192,246,132,36, - 18,19,192,8,33,40,0,2,17,0,0,162, - 130,20,192,8,33,32,0,2,16,0,17,141, - 0,0,0,0,6,0,32,26,0,0,0,0, - 32,133,130,143,0,0,0,0,42,16,34,2, - 15,0,64,20,1,0,9,36,2,131,4,60, - 248,130,132,36,2,131,5,60,24,131,165,36, - 2,131,7,60,36,131,231,36,15,63,192,12, - 188,2,6,36,1,0,4,36,2,131,5,60, - 24,131,165,36,188,7,192,12,188,2,6,36, - 1,0,9,36,17,0,9,162,64,34,17,0, - 2,131,8,60,192,246,8,37,33,32,136,0, - 33,40,0,2,6,23,192,12,0,0,0,0, - 112,1,64,16,33,16,0,0,252,0,162,142, - 0,0,0,0,1,0,66,36,132,20,192,8, - 252,0,162,174,0,0,182,142,96,0,169,143, - 192,17,22,0,8,0,50,141,3,131,3,60, - 33,24,98,0,20,13,99,140,8,0,84,142, - 0,0,0,0,59,0,104,16,5,0,98,44, - 57,0,64,16,128,16,3,0,2,131,1,60, - 33,8,34,0,128,131,34,140,0,0,0,0, - 8,0,64,0,0,0,0,0,44,133,130,143, - 0,0,0,0,48,0,64,16,6,0,132,38, - 4,0,131,150,2,131,2,60,68,207,66,148, - 0,0,0,0,6,0,98,20,33,32,0,0, - 0,0,130,142,48,129,131,143,0,0,0,0, - 38,16,67,0,1,0,68,44,67,1,128,16, - 33,32,192,2,114,42,192,12,33,40,128,2, - 130,20,192,8,33,32,64,2,44,133,130,143, - 0,0,0,0,27,0,64,16,6,0,132,38, - 4,0,131,150,2,131,2,60,68,207,66,148, - 0,0,0,0,6,0,98,20,33,32,0,0, - 0,0,130,142,48,129,131,143,0,0,0,0, - 38,16,67,0,1,0,68,44,5,0,128,16, - 33,32,192,2,114,42,192,12,33,40,128,2, - 130,20,192,8,33,32,64,2,6,0,132,38, - 0,163,6,60,140,1,198,140,0,0,0,0, - 247,24,192,12,33,40,192,2,130,20,192,8, - 33,32,64,2,6,0,132,38,0,163,6,60, - 140,1,198,140,0,0,0,0,247,24,192,12, - 33,40,192,2,203,24,192,12,33,32,128,2, - 20,1,163,142,0,0,0,0,14,0,96,16, - 33,240,64,0,33,32,128,2,16,0,166,39, - 18,0,69,150,0,0,0,0,9,248,96,0, - 33,56,192,3,6,0,64,16,33,32,64,2, - 28,1,162,142,0,0,0,0,1,0,66,36, - 130,20,192,8,28,1,162,174,132,0,193,7, - 7,0,2,36,4,0,131,150,2,131,2,60, - 68,207,66,148,0,0,0,0,6,0,98,20, - 33,32,0,0,0,0,130,142,48,129,131,143, - 0,0,0,0,38,16,67,0,1,0,68,44, - 9,0,128,16,255,255,9,36,44,133,130,143, - 0,0,0,0,246,0,64,16,33,32,192,2, - 114,42,192,12,33,40,128,2,130,20,192,8, - 33,32,64,2,10,0,201,23,0,0,0,0, - 8,0,192,18,0,0,0,0,36,133,130,143, - 0,0,0,0,8,0,64,16,1,0,19,36, - 80,133,147,143,159,19,192,8,0,0,0,0, - 0,1,162,142,80,133,147,143,1,0,66,36, - 0,1,162,174,84,133,130,143,0,0,0,0, - 35,16,83,0,255,255,66,36,17,0,66,162, - 84,133,130,143,33,136,96,2,42,16,34,2, - 15,0,64,16,64,18,17,0,2,131,8,60, - 192,246,8,37,33,128,72,0,5,0,54,18, - 0,0,0,0,247,22,192,12,33,32,0,2, - 212,0,64,16,33,16,0,0,84,133,130,143, - 1,0,49,38,42,16,34,2,246,255,64,20, - 0,2,16,38,84,133,130,143,33,136,96,2, - 42,16,34,2,55,0,64,16,64,18,17,0, - 2,131,9,60,192,246,41,37,33,152,73,0, - 33,128,64,0,192,185,17,0,41,0,54,18, - 0,0,0,0,2,131,2,60,33,16,80,0, - 216,247,66,140,0,0,0,0,15,0,64,16, - 33,32,128,2,16,0,166,39,18,0,69,150, - 0,0,0,0,9,248,64,0,33,56,192,2, - 8,0,64,16,0,0,0,0,2,131,2,60, - 33,16,80,0,224,247,66,140,0,0,0,0, - 1,0,66,36,230,19,192,8,32,1,98,174, - 44,133,130,143,0,0,0,0,7,0,64,16, - 3,0,8,36,3,131,2,60,33,16,87,0, - 20,13,66,140,0,0,0,0,6,0,72,20, - 0,0,0,0,33,32,96,2,6,23,192,12, - 33,40,64,2,236,19,192,8,0,2,115,38, - 17,0,66,146,0,0,0,0,255,255,66,36, - 17,0,66,162,17,0,66,146,0,2,115,38, - 0,2,16,38,84,133,130,143,1,0,49,38, - 42,16,34,2,208,255,64,20,128,0,247,38, - 254,255,2,36,4,0,194,23,33,32,160,2, - 33,40,64,2,47,16,192,12,33,48,128,2, - 17,0,66,146,0,0,0,0,135,0,64,16, - 33,32,64,2,22,19,192,8,0,0,0,0, - 26,0,194,23,0,0,0,0,36,133,130,143, - 0,0,0,0,11,0,64,16,33,32,160,2, - 9,0,192,18,1,0,9,36,17,0,73,162, - 2,131,4,60,192,246,132,36,6,23,192,12, - 33,40,64,2,121,0,64,16,33,16,0,0, - 33,32,160,2,33,40,64,2,47,16,192,12, - 33,48,128,2,36,133,130,143,0,0,0,0, - 110,0,64,16,33,32,64,2,111,0,192,22, - 1,0,2,36,130,20,192,8,0,0,0,0, - 89,0,214,19,64,130,30,0,2,131,2,60, - 33,16,80,0,216,247,66,140,0,0,0,0, - 18,0,64,16,33,32,128,2,16,0,166,39, - 18,0,69,150,0,0,0,0,9,248,64,0, - 33,56,192,2,11,0,64,16,33,32,64,2, - 2,131,8,60,192,246,8,37,2,131,2,60, - 33,16,80,0,224,247,66,140,33,24,8,2, - 1,0,66,36,32,1,98,172,130,20,192,8, - 17,0,128,160,36,133,130,143,0,0,0,0, - 44,0,64,16,0,0,0,0,42,0,192,19, - 0,0,0,0,40,0,192,18,64,18,30,0, - 2,131,9,60,192,246,41,37,33,128,73,0, - 247,22,192,12,33,32,0,2,69,0,64,16, - 33,16,0,0,2,131,4,60,247,22,192,12, - 192,246,132,36,57,0,64,16,2,0,2,36, - 17,0,66,162,44,133,130,143,0,0,0,0, - 7,0,64,16,192,17,30,0,3,131,1,60, - 33,8,34,0,20,13,34,140,3,0,8,36, - 6,0,72,20,0,0,0,0,33,32,0,2, - 6,23,192,12,33,40,64,2,91,20,192,8, - 0,0,0,0,17,0,66,146,0,0,0,0, - 255,255,66,36,17,0,66,162,17,0,66,146, - 2,131,4,60,192,246,132,36,6,23,192,12, - 33,40,64,2,22,19,192,8,0,0,0,0, - 44,133,130,143,0,0,0,0,7,0,64,16, - 192,17,30,0,3,131,1,60,33,8,34,0, - 20,13,34,140,3,0,9,36,22,0,73,20, - 0,0,0,0,1,0,8,36,17,0,72,162, - 64,34,30,0,2,131,9,60,192,246,41,37, - 33,32,137,0,18,19,192,8,33,40,64,2, - 36,133,130,143,0,0,0,0,10,0,64,16, - 0,0,0,0,8,0,192,19,1,0,8,36, - 17,0,72,162,2,131,4,60,192,246,132,36, - 18,19,192,8,33,40,64,2,133,20,192,8, - 33,16,0,0,17,0,64,162,33,32,64,2, - 152,21,192,12,0,0,0,0,1,0,2,36, - 52,0,64,16,0,0,0,0,152,0,162,142, - 0,0,0,0,1,0,66,36,152,0,162,174, - 156,0,162,142,168,0,163,142,1,0,66,36, - 156,0,162,174,128,0,169,143,0,0,0,0, - 33,24,105,0,163,20,192,8,168,0,163,174, - 152,0,162,142,160,0,163,142,1,0,66,36, - 1,0,99,36,152,0,162,174,160,0,163,174, - 96,0,168,143,0,0,0,0,8,0,2,141, - 255,255,9,36,4,0,73,16,0,0,0,0, - 8,0,4,141,152,21,192,12,0,0,0,0, - 120,0,168,143,112,0,169,143,1,0,8,37, - 120,0,168,175,136,0,169,174,96,0,168,143, - 8,128,2,52,0,0,0,165,2,0,2,165, - 104,0,169,143,8,0,2,36,2,0,34,165, - 4,0,40,141,96,0,169,143,104,0,168,175, - 4,0,41,141,120,0,168,143,96,0,169,175, - 88,0,169,143,0,0,0,0,42,16,9,1, - 243,253,64,20,0,0,0,0,96,0,168,143, - 44,0,163,142,120,0,168,174,104,0,169,143, - 0,0,0,0,124,0,169,174,0,0,98,148, - 0,0,0,0,0,16,66,48,43,0,64,16, - 0,0,0,0,2,0,98,148,0,0,0,0, - 39,0,64,20,0,0,0,0,0,0,2,149, - 0,0,0,0,35,0,64,20,0,0,0,0, - 2,0,2,149,8,0,3,36,255,255,66,48, - 30,0,67,20,0,0,0,0,136,0,162,142, - 0,0,0,0,12,0,66,140,0,0,0,0, - 0,128,66,48,23,0,64,20,0,0,0,0, - 164,0,162,142,44,0,163,142,1,0,66,36, - 164,0,162,174,8,0,104,172,136,0,162,142, - 0,0,0,0,8,0,2,173,44,0,163,142, - 16,16,2,36,2,0,98,164,0,0,162,142, - 0,0,0,0,5,0,64,20,0,0,0,0, - 164,7,192,12,0,0,0,0,239,20,192,8, - 0,0,0,0,8,0,162,142,0,0,0,0, - 0,0,64,172,180,0,191,143,176,0,190,143, - 172,0,183,143,168,0,182,143,164,0,181,143, - 160,0,180,143,156,0,179,143,152,0,178,143, - 148,0,177,143,144,0,176,143,8,0,224,3, - 184,0,189,39,216,255,189,39,28,0,177,175, - 33,136,128,0,32,0,178,175,33,144,160,0, - 96,128,132,39,6,0,37,38,24,0,176,175, - 104,128,144,39,36,0,191,175,31,21,192,12, - 33,48,0,2,108,128,132,39,33,40,32,2, - 31,21,192,12,33,48,0,2,10,0,64,26, - 33,128,0,0,116,128,132,39,33,16,17,2, - 12,0,69,144,0,0,0,0,15,63,192,12, - 1,0,16,38,42,16,18,2,248,255,64,20, - 0,0,0,0,124,128,132,39,15,63,192,12, - 0,0,0,0,36,0,191,143,32,0,178,143, - 28,0,177,143,24,0,176,143,8,0,224,3, - 40,0,189,39,208,255,189,39,40,0,191,175, - 2,0,162,144,0,0,163,144,1,0,167,144, - 16,0,162,175,3,0,162,144,33,64,128,0, - 20,0,162,175,4,0,162,144,2,131,4,60, - 68,131,132,36,24,0,162,175,5,0,162,144, - 33,40,0,1,32,0,166,175,33,48,96,0, - 15,63,192,12,28,0,162,175,40,0,191,143, - 48,0,189,39,8,0,224,3,0,0,0,0, - 248,255,189,39,136,0,135,140,255,255,163,36, - 12,0,160,16,33,48,224,0,255,255,5,36, - 12,0,194,140,0,0,0,0,0,128,66,48, - 8,0,64,20,33,16,0,0,255,255,99,36, - 0,0,192,172,4,0,198,140,247,255,101,20, - 0,0,0,0,136,0,134,172,33,16,224,0, - 8,0,224,3,8,0,189,39,224,255,189,39, - 16,0,176,175,33,128,160,0,28,0,191,175, - 24,0,178,175,33,0,128,20,20,0,177,175, - 84,133,130,143,80,133,131,143,0,0,0,0, - 35,16,67,0,17,0,2,162,80,133,145,143, - 84,133,130,143,0,0,0,0,42,16,34,2, - 19,0,64,16,64,18,17,0,2,131,3,60, - 192,246,99,36,33,144,67,0,33,32,64,2, - 6,23,192,12,33,40,0,2,6,0,64,20, - 0,0,0,0,17,0,2,146,0,0,0,0, - 255,255,66,36,17,0,2,162,17,0,2,146, - 84,133,130,143,1,0,49,38,42,16,34,2, - 242,255,64,20,0,2,82,38,17,0,2,146, - 144,21,192,8,0,0,0,0,36,133,130,143, - 0,0,0,0,25,0,64,16,1,0,2,36, - 0,0,130,140,0,0,0,0,20,0,64,16, - 2,0,2,36,17,0,2,162,6,23,192,12, - 33,40,0,2,19,0,64,16,33,16,0,0, - 2,131,4,60,192,246,132,36,6,23,192,12, - 33,40,0,2,7,0,64,20,0,0,0,0, - 17,0,2,146,0,0,0,0,255,255,66,36, - 17,0,2,162,17,0,2,146,0,0,0,0, - 144,21,192,8,1,0,2,36,1,0,2,36, - 17,0,2,162,6,23,192,12,33,40,0,2, - 28,0,191,143,24,0,178,143,20,0,177,143, - 16,0,176,143,8,0,224,3,32,0,189,39, - 0,0,0,0,0,0,0,0,0,129,9,52, - 16,0,130,144,2,131,3,60,192,246,99,36, - 64,18,2,0,33,56,67,0,140,0,230,140, - 0,1,8,36,4,0,197,140,0,0,131,140, - 0,0,128,172,12,0,137,172,4,0,164,172, - 12,0,200,172,33,48,160,0,216,0,226,140, - 33,40,128,0,1,0,66,36,0,128,99,48, - 4,0,96,20,216,0,226,172,4,0,132,140, - 161,21,192,8,0,0,0,0,8,0,224,3, - 140,0,230,172,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,8,0,224,3,0,0,0,0, - 172,0,128,172,176,0,128,172,180,0,128,172, - 184,0,128,172,188,0,128,172,192,0,128,172, - 196,0,128,172,200,0,128,172,204,0,128,172, - 208,0,128,172,212,0,128,172,224,0,128,172, - 8,1,128,172,4,1,128,172,236,0,128,172, - 240,0,128,172,232,0,128,172,244,0,128,172, - 8,0,224,3,248,0,128,172,224,255,189,39, - 16,0,176,175,33,128,128,0,20,0,177,175, - 0,2,17,36,24,0,191,175,13,8,192,12, - 0,48,4,36,255,31,3,60,255,255,99,52, - 33,32,0,0,36,16,67,0,0,128,3,60, - 37,40,67,0,33,24,160,0,0,128,6,52, - 1,0,132,36,24,0,98,36,0,0,96,164, - 2,0,102,164,4,0,98,172,33,24,64,0, - 42,16,145,0,249,255,64,20,1,0,132,36, - 255,255,132,36,64,16,17,0,33,16,81,0, - 192,16,2,0,33,16,69,0,48,0,163,36, - 236,255,69,172,108,0,3,174,104,0,3,174, - 96,0,5,174,100,0,2,174,24,0,191,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,248,255,189,39,0,32,14,60, - 4,0,177,175,7,0,17,60,0,0,176,175, - 4,0,16,60,2,131,25,60,192,246,57,39, - 0,1,15,36,108,0,152,140,104,0,137,140, - 176,0,140,140,180,0,141,140,94,0,56,17, - 0,0,0,0,4,0,43,141,0,0,0,0, - 37,16,110,1,0,0,66,148,0,0,0,0, - 0,128,66,48,86,0,64,16,37,16,46,1, - 0,0,67,140,0,0,0,0,36,16,113,0, - 76,0,80,20,0,32,98,48,41,0,64,20, - 15,0,98,48,188,0,130,140,0,0,0,0, - 1,0,66,36,188,0,130,172,0,8,98,48, - 6,0,64,16,0,4,98,48,192,0,130,140, - 0,0,0,0,1,0,66,36,192,0,130,172, - 0,4,98,48,6,0,64,16,0,2,98,48, - 196,0,130,140,0,0,0,0,1,0,66,36, - 196,0,130,172,0,2,98,48,6,0,64,16, - 0,1,98,48,200,0,130,140,0,0,0,0, - 1,0,66,36,200,0,130,172,0,1,98,48, - 6,0,64,16,32,0,98,48,204,0,130,140, - 0,0,0,0,1,0,66,36,204,0,130,172, - 32,0,98,48,6,0,64,16,15,0,98,48, - 208,0,130,140,0,0,0,0,1,0,66,36, - 208,0,130,172,15,0,98,48,212,0,131,140, - 8,0,37,141,33,24,98,0,212,0,131,172, - 17,0,162,144,1,0,140,37,255,255,66,36, - 17,0,162,160,25,0,64,20,37,24,46,1, - 16,0,162,144,1,0,173,37,64,18,2,0, - 33,64,89,0,140,0,7,141,0,129,10,52, - 4,0,230,140,0,0,163,140,0,0,160,172, - 12,0,170,172,4,0,197,172,12,0,239,172, - 33,56,192,0,216,0,2,141,33,48,160,0, - 1,0,66,36,0,128,99,48,4,0,96,20, - 216,0,2,173,4,0,165,140,114,22,192,8, - 0,0,0,0,140,0,7,173,37,24,46,1, - 0,128,2,60,0,0,98,172,40,22,192,8, - 33,72,96,1,104,0,137,172,176,0,140,172, - 180,0,141,172,4,0,177,143,0,0,176,143, - 8,0,224,3,8,0,189,39,224,255,189,39, - 16,0,176,175,33,128,128,0,24,0,191,175, - 20,0,177,175,44,0,17,142,0,0,0,0, - 0,0,34,150,0,0,0,0,0,32,66,48, - 89,0,64,16,0,0,0,0,2,0,34,150, - 0,0,0,0,0,1,66,48,84,0,64,20, - 0,0,0,0,27,22,192,12,0,0,0,0, - 104,0,4,142,0,0,0,0,2,0,130,148, - 0,128,3,52,255,255,66,48,75,0,67,16, - 0,0,0,0,224,0,2,142,0,0,0,0, - 1,0,66,36,224,0,2,174,4,0,36,174, - 0,0,128,164,4,0,130,140,0,0,0,0, - 0,0,64,164,0,0,2,142,0,0,0,0, - 51,0,64,16,0,33,2,36,2,0,34,150, - 0,0,0,0,47,0,64,16,0,33,2,36, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 2,0,34,150,0,0,0,0,211,255,64,20, - 0,33,2,36,2,0,34,166,0,0,2,142, - 0,0,0,0,5,0,64,16,0,0,0,0, - 8,0,2,142,0,0,0,0,242,22,192,8, - 0,0,64,172,164,7,192,12,0,0,0,0, - 24,0,191,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,108,0,133,140, - 0,0,0,0,4,0,162,140,0,0,0,0, - 4,0,67,140,104,0,130,140,0,0,0,0, - 5,0,98,20,33,16,160,0,184,0,131,140, - 33,16,0,0,1,0,99,36,184,0,131,172, - 8,0,224,3,0,0,0,0,224,255,189,39, - 16,0,176,175,33,128,128,0,28,0,191,175, - 24,0,178,175,20,0,177,175,108,0,18,142, - 8,1,6,142,44,0,17,142,4,0,66,142, - 104,0,7,142,4,0,66,140,18,0,163,148, - 172,0,4,142,0,0,0,0,6,0,71,20, - 255,255,99,48,184,0,3,142,33,16,0,0, - 1,0,99,36,157,23,192,8,184,0,3,174, - 33,48,195,0,1,0,130,36,172,0,2,174, - 8,1,6,174,8,0,162,140,4,1,3,142, - 0,0,70,144,33,32,64,2,8,0,69,174, - 12,0,64,174,1,0,194,48,2,0,64,16, - 1,0,98,36,4,1,2,174,0,0,2,142, - 0,0,0,0,35,0,64,20,0,0,0,0, - 18,0,162,148,0,0,0,0,255,255,66,48, - 12,0,66,174,0,0,34,150,0,0,0,0, - 0,32,66,48,24,0,64,16,12,0,2,36, - 2,0,34,150,0,0,0,0,0,1,66,48, - 19,0,64,20,12,0,2,36,4,0,242,16, - 0,0,0,0,27,22,192,12,33,32,0,2, - 12,0,2,36,2,0,66,166,104,0,4,142, - 0,0,0,0,0,0,128,164,4,0,130,140, - 0,0,0,0,0,0,64,164,0,33,2,36, - 4,0,36,174,164,7,192,12,2,0,34,166, - 154,23,192,8,0,0,0,0,154,23,192,8, - 2,0,130,164,0,0,34,150,0,0,0,0, - 0,32,66,48,69,0,64,16,12,0,2,36, - 4,0,242,16,0,0,0,0,27,22,192,12, - 33,32,0,2,12,0,2,36,2,0,66,166, - 104,0,4,142,0,0,0,0,0,0,128,164, - 4,0,130,140,0,0,0,0,0,0,64,164, - 4,0,36,174,2,0,34,150,0,0,0,0, - 47,0,64,16,0,33,2,36,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,2,0,34,150, - 0,0,0,0,211,255,64,20,0,33,2,36, - 2,0,34,166,8,0,2,142,0,0,0,0, - 154,23,192,8,0,0,64,172,2,0,66,166, - 4,0,67,142,1,0,2,36,108,0,3,174, - 28,0,191,143,24,0,178,143,20,0,177,143, - 16,0,176,143,8,0,224,3,32,0,189,39, - 216,255,189,39,20,0,177,175,33,136,128,0, - 24,0,178,175,2,131,18,60,192,131,82,38, - 28,0,179,175,0,1,19,36,32,0,191,175, - 16,0,176,175,104,0,48,142,108,0,34,142, - 0,0,0,0,4,0,2,22,0,0,0,0, - 2,0,2,150,245,23,192,8,0,0,0,0, - 2,0,2,150,0,0,0,0,7,0,66,48, - 11,0,64,20,33,40,64,2,2,131,4,60, - 160,131,132,36,2,131,7,60,252,131,231,36, - 15,63,192,12,0,2,6,36,1,0,4,36, - 33,40,64,2,188,7,192,12,0,2,6,36, - 2,0,2,150,4,0,3,36,7,0,66,48, - 40,0,67,20,0,128,2,52,8,0,3,142, - 0,0,0,0,17,0,98,144,0,0,0,0, - 255,255,66,36,17,0,98,160,17,0,98,144, - 0,0,0,0,30,0,64,20,0,128,2,52, - 180,0,34,142,33,32,96,0,1,0,66,36, - 180,0,34,174,16,0,130,144,2,131,3,60, - 192,246,99,36,64,18,2,0,33,56,67,0, - 140,0,230,140,0,129,8,52,4,0,197,140, - 0,0,131,140,0,0,128,172,12,0,136,172, - 4,0,164,172,12,0,211,172,33,48,160,0, - 216,0,226,140,33,40,128,0,1,0,66,36, - 0,128,99,48,4,0,96,20,216,0,226,172, - 4,0,132,140,223,23,192,8,0,0,0,0, - 140,0,230,172,0,128,2,52,2,0,2,166, - 0,0,0,166,4,0,16,142,174,23,192,8, - 0,0,0,0,44,0,35,142,104,0,48,174, - 0,0,98,148,0,0,0,0,0,32,66,52, - 0,0,98,164,32,0,191,143,28,0,179,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,40,0,189,39,0,163,2,60, - 0,1,66,52,0,0,66,140,2,131,3,60, - 192,6,99,36,255,3,66,48,60,0,66,36, - 0,128,66,52,8,0,224,3,0,0,98,172, - 208,255,189,39,28,0,177,175,33,136,128,0, - 32,0,178,175,33,144,160,0,24,0,176,175, - 0,163,16,60,0,163,2,60,164,1,66,140, - 0,1,16,54,44,0,191,175,40,0,180,175, - 4,0,64,20,36,0,179,175,60,0,2,36, - 0,163,1,60,164,1,34,172,0,163,2,60, - 164,1,66,140,0,0,0,0,221,5,66,40, - 3,0,64,20,220,5,2,36,0,163,1,60, - 164,1,34,172,0,163,3,60,164,1,99,140, - 255,255,2,36,21,0,98,20,0,0,0,0, - 128,128,130,143,0,0,0,0,5,0,67,20, - 2,0,5,36,130,11,192,12,0,0,0,0, - 128,128,130,175,2,0,5,36,10,0,6,36, - 128,128,132,143,0,131,7,60,8,96,231,36, - 156,11,192,12,16,0,160,175,0,0,2,142, - 2,131,3,60,192,6,99,36,255,3,66,48, - 66,24,192,8,64,0,66,36,0,163,2,60, - 164,1,66,140,2,131,3,60,192,6,99,36, - 0,128,66,52,0,0,98,172,255,31,4,60, - 255,255,132,52,2,131,2,60,208,6,66,36, - 36,16,68,0,0,160,5,60,37,16,69,0, - 2,131,3,60,176,12,99,36,2,131,1,60, - 196,6,32,172,2,131,1,60,200,6,34,172, - 12,0,2,36,0,0,96,164,2,131,1,60, - 178,12,34,164,6,0,65,6,36,16,100,0, - 37,16,69,0,2,131,1,60,180,12,34,172, - 99,24,192,8,255,31,18,60,2,131,2,60, - 178,12,66,148,0,0,0,0,0,128,66,52, - 2,131,1,60,178,12,34,164,255,31,18,60, - 255,255,82,54,2,131,2,60,192,6,66,36, - 36,16,82,0,0,160,20,60,37,16,84,0, - 2,131,1,60,184,12,34,172,2,131,1,60, - 188,12,32,172,44,0,34,142,0,0,0,0, - 0,0,66,148,2,131,19,60,176,12,115,38, - 0,32,66,48,15,0,64,20,33,40,0,0, - 2,131,4,60,160,131,132,36,2,131,16,60, - 192,131,16,38,33,40,0,2,2,131,7,60, - 24,132,231,36,15,63,192,12,71,2,6,36, - 1,0,4,36,33,40,0,2,188,7,192,12, - 71,2,6,36,33,40,0,0,33,48,0,0, - 36,16,114,2,44,0,35,142,37,16,84,0, - 4,0,98,172,44,0,36,142,208,7,7,36, - 129,67,192,12,2,0,132,36,12,0,64,20, - 0,0,0,0,44,0,34,142,0,0,0,0, - 2,0,69,148,2,131,4,60,15,63,192,12, - 56,132,132,36,255,255,4,36,2,131,5,60, - 192,131,165,36,188,7,192,12,79,2,6,36, - 44,0,34,142,0,33,3,36,2,0,67,164, - 8,0,34,142,0,0,0,0,0,0,64,172, - 44,0,191,143,40,0,180,143,36,0,179,143, - 32,0,178,143,28,0,177,143,24,0,176,143, - 8,0,224,3,48,0,189,39,232,255,189,39, - 128,128,132,143,0,128,2,52,16,0,191,175, - 2,131,1,60,3,0,128,4,178,12,34,164, - 177,11,192,12,0,0,0,0,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 8,0,224,3,0,0,0,0,0,0,0,0, - 0,0,0,0,240,255,2,52,2,131,1,60, - 33,8,34,0,208,12,32,172,240,255,66,36, - 251,255,65,4,0,0,0,0,2,131,2,60, - 208,12,66,36,0,163,1,60,12,1,32,172, - 0,163,1,60,212,5,34,172,1,0,2,60, - 148,133,128,175,144,133,128,175,0,163,1,60, - 8,0,224,3,216,5,34,172,0,0,136,148, - 4,0,138,148,1,0,2,49,34,0,64,20, - 2,0,137,148,0,25,10,0,38,24,106,0, - 38,24,105,0,240,255,99,48,2,131,15,60, - 208,12,239,37,33,40,111,0,68,133,142,143, - 128,0,3,36,0,0,162,140,4,0,171,148, - 23,0,64,16,43,16,194,1,9,0,64,16, - 6,0,172,148,7,0,11,21,8,0,173,148, - 5,0,44,21,0,0,0,0,3,0,77,21, - 10,0,162,148,8,0,224,3,0,0,0,0, - 255,255,99,36,10,0,96,16,240,255,165,36, - 43,16,175,0,238,255,64,16,0,0,162,140, - 248,127,229,37,248,127,165,36,218,24,192,8, - 0,0,162,140,8,0,224,3,254,255,2,36, - 8,0,224,3,255,255,2,36,8,0,224,3, - 0,0,0,0,0,0,136,148,68,133,142,143, - 1,0,2,49,53,0,64,20,2,0,137,148, - 0,131,2,60,4,0,138,148,12,1,89,140, - 0,25,10,0,38,24,106,0,38,24,105,0, - 240,255,99,48,2,131,15,60,208,12,239,37, - 33,56,111,0,128,0,3,36,0,0,248,140, - 4,0,235,148,43,16,216,1,14,0,64,16, - 6,0,236,148,24,0,11,21,8,0,237,148, - 22,0,44,21,255,127,2,60,20,0,77,21, - 255,255,66,52,43,16,2,3,2,0,64,16, - 33,16,198,1,0,0,226,172,10,0,229,164, - 8,0,224,3,0,0,2,36,3,0,0,23, - 1,0,57,35,0,131,2,60,12,1,89,172, - 33,16,198,1,0,0,226,172,10,0,229,164, - 4,0,232,164,6,0,233,164,8,0,234,164, - 8,0,224,3,1,0,2,36,255,255,99,36, - 11,0,96,16,0,0,0,0,240,255,231,36, - 43,16,239,0,221,255,64,16,0,0,248,140, - 248,127,231,37,248,127,231,36,8,25,192,8, - 0,0,248,140,8,0,224,3,0,0,2,36, - 144,133,130,143,0,0,0,0,1,0,66,32, - 144,133,130,175,8,0,224,3,255,255,2,36, - 8,0,224,3,0,0,0,0,164,128,130,143, - 0,0,0,0,7,0,130,20,232,255,189,39, - 160,128,130,143,2,131,3,60,208,12,99,36, - 0,17,2,0,108,25,192,8,33,16,67,0, - 42,16,130,0,3,0,64,16,255,255,2,36, - 164,128,128,175,160,128,130,175,164,128,130,143, - 160,128,131,143,35,48,130,0,1,0,101,36, - 0,16,162,40,25,0,64,16,0,25,5,0, - 68,133,135,143,2,131,2,60,33,16,67,0, - 208,12,66,140,0,0,0,0,43,16,226,0, - 4,0,64,16,0,0,0,0,255,255,198,36, - 6,0,192,16,0,16,162,40,1,0,165,36, - 0,16,162,40,243,255,64,20,16,0,99,36, - 0,16,162,40,7,0,64,16,0,25,5,0, - 2,131,2,60,208,12,66,36,160,128,133,175, - 164,128,132,175,108,25,192,8,33,16,98,0, - 33,16,0,0,255,255,3,36,164,128,128,175, - 160,128,131,175,8,0,224,3,24,0,189,39, - 0,0,0,0,0,0,0,0,24,255,189,39, - 228,0,191,175,224,0,190,175,220,0,183,175, - 216,0,182,175,212,0,181,175,208,0,180,175, - 204,0,179,175,200,0,178,175,196,0,177,175, - 192,0,176,175,44,28,192,12,0,0,0,0, - 176,128,132,39,15,63,192,12,1,0,17,36, - 24,0,176,39,164,68,192,12,33,32,0,2, - 24,0,162,131,0,0,0,0,137,25,192,8, - 32,0,8,36,0,0,2,130,32,0,8,36, - 253,255,72,16,1,0,16,38,255,255,16,38, - 9,0,8,36,249,255,72,16,1,0,16,38, - 255,255,16,38,0,0,2,146,0,0,0,0, - 208,255,66,36,10,0,66,44,27,0,64,16, - 33,32,0,2,33,40,0,0,212,68,192,12, - 33,48,0,0,0,0,3,146,0,0,0,0, - 208,255,99,36,10,0,99,44,9,0,96,16, - 33,136,64,0,1,0,16,38,0,0,2,146, - 0,0,0,0,208,255,66,36,10,0,66,44, - 251,255,64,20,1,0,16,38,255,255,16,38, - 0,0,2,130,32,0,8,36,253,255,72,16, - 1,0,16,38,255,255,16,38,9,0,8,36, - 249,255,72,16,1,0,16,38,255,255,16,38, - 0,0,2,130,0,0,3,146,0,0,0,0, - 22,0,64,16,104,0,180,39,32,0,8,36, - 19,0,72,16,9,0,8,36,17,0,72,16, - 32,0,5,36,9,0,4,36,208,255,98,36, - 10,0,66,44,12,0,64,20,0,0,0,0, - 1,0,16,38,0,0,131,162,0,0,2,130, - 0,0,3,146,0,0,0,0,5,0,64,16, - 1,0,148,38,3,0,69,16,0,0,0,0, - 243,255,68,20,208,255,98,36,0,0,128,162, - 104,0,180,39,0,0,2,130,32,0,8,36, - 253,255,72,16,1,0,16,38,255,255,16,38, - 9,0,8,36,249,255,72,16,1,0,16,38, - 255,255,16,38,33,240,0,2,0,0,196,131, - 0,0,0,0,32,69,192,12,144,0,190,175, - 11,0,64,16,33,32,192,3,33,40,0,0, - 212,68,192,12,33,48,0,0,33,152,64,0, - 33,32,192,3,33,40,0,0,44,69,192,12, - 16,0,6,36,232,25,192,8,33,144,64,0, - 255,255,18,36,255,255,19,36,0,0,3,130, - 0,0,2,146,0,0,0,0,17,0,96,16, - 32,0,8,36,15,0,104,16,1,0,16,38, - 255,255,16,38,32,0,4,36,0,22,2,0, - 3,22,2,0,9,0,8,36,8,0,72,16, - 0,0,0,0,1,0,16,38,0,0,3,130, - 0,0,2,146,3,0,96,16,0,0,0,0, - 246,255,100,20,0,22,2,0,0,0,2,130, - 32,0,8,36,253,255,72,16,1,0,16,38, - 255,255,16,38,9,0,8,36,249,255,72,16, - 1,0,16,38,255,255,16,38,33,184,0,2, - 33,32,224,2,33,40,0,0,212,68,192,12, - 33,48,0,0,33,32,224,2,33,40,0,0, - 16,0,6,36,44,69,192,12,33,176,64,0, - 0,0,227,130,0,0,0,0,15,0,96,16, - 33,168,64,0,32,0,8,36,12,0,104,16, - 32,0,3,36,0,0,2,130,9,0,8,36, - 8,0,72,16,0,0,0,0,1,0,16,38, - 0,0,2,130,0,0,0,0,3,0,64,16, - 0,0,0,0,248,255,67,20,0,0,0,0, - 0,0,131,130,0,0,0,0,121,0,98,44, - 244,1,64,16,128,16,3,0,2,131,1,60, - 33,8,34,0,160,138,34,140,0,0,0,0, - 8,0,64,0,0,0,0,0,1,0,131,130, - 104,0,2,36,26,0,98,16,105,0,98,40, - 7,0,64,16,116,0,2,36,34,0,96,16, - 98,0,2,36,11,0,98,16,0,0,0,0, - 26,28,192,8,0,0,0,0,5,0,98,16, - 119,0,8,36,27,0,104,16,33,16,32,2, - 26,28,192,8,0,0,0,0,4,162,2,60, - 33,144,66,2,2,0,130,130,0,0,0,0, - 214,1,64,20,0,0,0,0,2,131,4,60, - 108,132,132,36,0,0,70,146,82,26,192,8, - 0,0,0,0,2,0,130,130,0,0,0,0, - 205,1,64,20,0,0,0,0,2,131,4,60, - 120,132,132,36,0,0,70,150,0,0,0,0, - 15,63,192,12,33,40,64,2,125,25,192,8, - 0,0,0,0,33,16,32,2,37,255,64,16, - 255,255,49,38,0,0,80,142,2,131,4,60, - 132,132,132,36,33,40,64,2,4,0,82,38, - 15,63,192,12,33,48,0,2,33,16,32,2, - 247,255,64,20,255,255,49,38,125,25,192,8, - 0,0,0,0,1,0,131,130,104,0,2,36, - 23,0,98,16,105,0,98,40,7,0,64,16, - 116,0,2,36,25,0,96,16,98,0,2,36, - 11,0,98,16,0,0,0,0,26,28,192,8, - 0,0,0,0,5,0,98,16,119,0,8,36, - 17,0,104,16,0,0,0,0,26,28,192,8, - 0,0,0,0,4,162,2,60,33,144,66,2, - 2,0,130,130,0,0,0,0,158,1,64,20, - 0,0,0,0,125,25,192,8,0,0,85,162, - 2,0,130,130,0,0,0,0,152,1,64,20, - 0,0,0,0,125,25,192,8,0,0,85,166, - 125,25,192,8,0,0,85,174,0,163,16,60, - 31,163,17,60,255,255,49,54,0,0,2,142, - 0,0,0,0,4,0,82,20,0,0,0,0, - 180,128,132,39,15,63,192,12,33,40,0,2, - 4,0,16,38,43,16,48,2,246,255,64,16, - 0,0,0,0,125,25,192,8,0,0,0,0, - 33,16,32,2,228,254,64,16,255,255,49,38, - 33,32,96,2,164,32,192,12,33,40,192,2, - 33,16,32,2,251,255,64,20,255,255,49,38, - 125,25,192,8,0,0,0,0,1,0,130,130, - 0,0,0,0,117,1,64,20,33,32,32,2, - 133,29,192,12,33,40,96,2,125,25,192,8, - 0,0,0,0,33,32,96,2,33,40,32,2, - 234,31,192,12,33,48,192,2,125,25,192,8, - 0,0,0,0,1,0,130,130,0,0,0,0, - 103,1,64,20,33,16,32,2,200,254,64,16, - 255,255,49,38,33,32,96,2,33,40,192,2, - 182,29,192,12,33,48,0,2,33,16,32,2, - 250,255,64,20,255,255,49,38,125,25,192,8, - 0,0,0,0,33,32,32,2,33,40,96,2, - 33,48,192,2,38,30,192,12,33,56,0,2, - 125,25,192,8,0,0,0,0,5,162,2,60, - 0,0,69,144,2,131,4,60,15,63,192,12, - 144,132,132,36,125,25,192,8,0,0,0,0, - 0,163,1,60,20,1,32,172,14,0,32,18, - 33,128,0,0,164,7,192,12,1,0,16,38, - 143,63,192,12,0,0,0,0,143,63,192,12, - 0,0,0,0,143,63,192,12,0,0,0,0, - 143,63,192,12,0,0,0,0,43,16,17,2, - 244,255,64,20,0,0,0,0,184,63,192,12, - 0,0,0,0,0,163,16,60,20,1,16,142, - 0,0,0,0,7,0,17,22,33,40,32,2, - 2,131,4,60,164,132,132,36,15,63,192,12, - 33,40,32,2,125,25,192,8,0,0,0,0, - 2,131,4,60,188,132,132,36,15,63,192,12, - 33,48,0,2,125,25,192,8,0,0,0,0, - 0,0,226,130,7,162,8,60,16,0,64,16, - 33,144,72,2,33,16,32,2,134,254,64,16, - 255,255,49,38,0,0,85,174,2,131,4,60, - 132,132,132,36,33,40,64,2,15,63,192,12, - 33,48,160,2,4,0,82,38,33,16,32,2, - 247,255,64,20,255,255,49,38,125,25,192,8, - 0,0,0,0,33,16,32,2,119,254,64,16, - 255,255,49,38,0,0,80,142,2,131,4,60, - 132,132,132,36,33,40,64,2,4,0,82,38, - 15,63,192,12,33,48,0,2,33,16,32,2, - 247,255,64,20,255,255,49,38,125,25,192,8, - 0,0,0,0,7,162,16,60,64,0,17,38, - 2,131,4,60,228,132,132,36,33,40,0,2, - 0,0,6,142,0,0,0,0,15,63,192,12, - 4,0,16,38,42,16,17,2,247,255,64,20, - 7,162,8,60,128,0,16,37,176,0,17,37, - 2,131,4,60,228,132,132,36,33,40,0,2, - 0,0,6,142,0,0,0,0,15,63,192,12, - 4,0,16,38,42,16,17,2,247,255,64,20, - 7,162,8,60,192,0,16,37,240,0,17,37, - 2,131,4,60,228,132,132,36,33,40,0,2, - 0,0,6,142,0,0,0,0,15,63,192,12, - 4,0,16,38,42,16,17,2,247,255,64,20, - 0,0,0,0,125,25,192,8,0,0,0,0, - 1,0,130,130,0,0,0,0,222,0,64,20, - 33,16,32,2,63,254,64,16,255,255,49,38, - 33,32,96,2,213,29,192,12,33,40,160,2, - 33,16,32,2,251,255,64,20,255,255,49,38, - 125,25,192,8,0,0,0,0,1,0,130,130, - 0,0,0,0,208,0,64,20,33,16,32,2, - 49,254,64,16,255,255,49,38,33,32,96,2, - 161,31,192,12,33,40,192,2,33,16,32,2, - 251,255,64,20,255,255,49,38,125,25,192,8, - 0,0,0,0,33,16,32,2,38,254,64,16, - 255,255,49,38,208,32,192,12,33,32,0,0, - 33,32,0,0,164,32,192,12,33,40,0,0, - 40,29,192,12,0,0,0,0,133,29,192,12, - 255,255,4,36,33,16,32,2,245,255,64,20, - 255,255,49,38,125,25,192,8,0,0,0,0, - 1,0,131,130,87,0,2,36,27,0,98,16, - 88,0,98,40,7,0,64,16,114,0,2,36, - 37,0,96,16,82,0,2,36,9,0,98,16, - 0,0,0,0,125,25,192,8,0,0,0,0, - 5,0,98,16,119,0,8,36,15,0,104,16, - 0,0,0,0,125,25,192,8,0,0,0,0, - 2,0,130,130,0,0,0,0,159,0,64,20, - 0,0,0,0,60,65,192,12,33,32,96,2, - 184,128,132,39,33,40,96,2,15,63,192,12, - 33,48,64,0,125,25,192,8,0,0,0,0, - 2,0,130,130,0,0,0,0,147,0,64,20, - 33,32,96,2,162,65,192,12,33,40,160,2, - 242,253,64,20,33,40,160,2,2,131,4,60, - 8,133,132,36,15,63,192,12,33,48,96,2, - 125,25,192,8,0,0,0,0,33,16,32,2, - 233,253,64,16,255,255,49,38,40,29,192,12, - 0,0,0,0,33,16,32,2,252,255,64,20, - 255,255,49,38,125,25,192,8,0,0,0,0, - 1,0,133,130,87,0,2,36,29,0,162,16, - 88,0,162,40,5,0,64,16,82,0,2,36, - 11,0,162,16,33,16,32,2,125,25,192,8, - 0,0,0,0,114,0,2,36,5,0,162,16, - 119,0,8,36,19,0,168,16,33,32,64,2, - 125,25,192,8,0,0,0,0,33,16,32,2, - 206,253,64,16,255,255,49,38,168,69,192,12, - 33,32,64,2,184,128,132,39,33,40,64,2, - 15,63,192,12,33,48,64,0,1,0,82,38, - 33,16,32,2,247,255,64,20,255,255,49,38, - 125,25,192,8,0,0,0,0,33,32,64,2, - 29,70,192,12,33,40,160,2,189,253,64,20, - 33,40,160,2,2,131,4,60,40,133,132,36, - 15,63,192,12,33,48,64,2,125,25,192,8, - 0,0,0,0,144,0,164,143,122,28,192,12, - 0,0,0,0,125,25,192,8,0,0,0,0, - 33,16,96,2,175,253,64,16,255,255,115,38, - 143,63,192,12,0,0,0,0,33,16,96,2, - 252,255,64,20,255,255,115,38,125,25,192,8, - 0,0,0,0,33,16,32,2,165,253,64,16, - 255,255,49,38,33,32,96,2,208,32,192,12, - 33,40,192,2,33,16,32,2,251,255,64,20, - 255,255,49,38,125,25,192,8,0,0,0,0, - 1,0,130,146,0,0,0,0,159,255,66,36, - 0,22,2,0,3,30,2,0,24,0,98,44, - 27,0,64,16,128,16,3,0,2,131,1,60, - 33,8,34,0,136,140,34,140,0,0,0,0, - 8,0,64,0,0,0,0,0,12,33,192,12, - 33,32,64,2,125,25,192,8,0,0,0,0, - 15,33,192,12,33,32,96,2,125,25,192,8, - 0,0,0,0,18,33,192,12,33,32,96,2, - 125,25,192,8,0,0,0,0,22,33,192,12, - 33,32,96,2,125,25,192,8,0,0,0,0, - 25,33,192,12,33,32,64,2,125,25,192,8, - 0,0,0,0,33,32,64,2,7,33,192,12, - 33,40,192,2,125,25,192,8,0,0,0,0, - 16,0,182,175,33,32,32,2,33,40,192,3, - 33,48,224,2,161,33,192,12,33,56,160,2, - 125,25,192,8,0,0,0,0,33,136,0,0, - 2,131,4,60,72,133,132,36,15,63,192,12, - 1,0,49,38,32,0,34,46,250,255,64,20, - 0,0,0,0,125,25,192,8,0,0,0,0, - 2,131,4,60,92,133,132,36,15,63,192,12, - 33,40,128,2,123,25,192,8,0,0,0,0, - 228,0,191,143,224,0,190,143,220,0,183,143, - 216,0,182,143,212,0,181,143,208,0,180,143, - 204,0,179,143,200,0,178,143,196,0,177,143, - 192,0,176,143,8,0,224,3,232,0,189,39, - 232,255,189,39,2,131,5,60,192,154,165,36, - 20,0,191,175,16,0,176,175,0,0,162,140, - 0,0,0,0,9,0,64,16,33,128,160,0, - 0,0,5,142,192,128,132,39,15,63,192,12, - 4,0,16,38,0,0,2,142,0,0,0,0, - 249,255,64,20,0,0,0,0,20,0,191,143, - 16,0,176,143,8,0,224,3,24,0,189,39, - 0,0,132,144,0,0,0,0,208,255,130,36, - 10,0,66,44,4,0,64,16,0,22,4,0, - 3,22,2,0,89,28,192,8,208,255,66,36, - 159,255,130,36,6,0,66,44,4,0,64,16, - 0,22,4,0,3,22,2,0,89,28,192,8, - 169,255,66,36,191,255,130,36,6,0,66,44, - 3,0,64,20,0,22,4,0,89,28,192,8, - 255,255,2,36,3,22,2,0,201,255,66,36, - 8,0,224,3,0,0,0,0,216,255,189,39, - 24,0,178,175,33,144,128,0,32,0,191,175, - 28,0,179,175,20,0,177,175,16,0,176,175, - 0,0,81,142,0,0,0,0,65,28,192,12, - 33,32,32,2,33,24,64,0,255,255,19,36, - 9,0,115,16,0,129,3,0,65,28,192,12, - 1,0,36,38,33,24,64,0,4,0,115,16, - 2,0,34,38,0,0,66,174,115,28,192,8, - 37,16,3,2,255,255,2,36,32,0,191,143, - 28,0,179,143,24,0,178,143,20,0,177,143, - 16,0,176,143,8,0,224,3,40,0,189,39, - 176,255,189,39,64,0,180,175,33,160,128,0, - 72,0,191,175,68,0,181,175,60,0,179,175, - 56,0,178,175,52,0,177,175,48,0,176,175, - 0,0,130,130,0,0,0,0,53,0,64,20, - 33,128,0,0,27,67,192,12,33,32,0,0, - 1,0,4,36,27,67,192,12,33,128,64,0, - 2,0,4,36,27,67,192,12,33,136,64,0, - 33,24,64,0,255,255,2,36,5,0,2,18, - 0,0,0,0,3,0,34,18,0,0,0,0, - 6,0,98,20,255,255,2,52,2,131,4,60, - 15,63,192,12,60,137,132,36,29,29,192,8, - 0,0,0,0,5,0,2,18,0,0,0,0, - 3,0,34,18,0,0,0,0,6,0,98,20, - 1,0,2,50,2,131,4,60,15,63,192,12, - 104,137,132,36,29,29,192,8,0,0,0,0, - 6,0,64,16,255,0,5,50,2,131,4,60, - 15,63,192,12,132,137,132,36,29,29,192,8, - 0,0,0,0,2,131,4,60,176,137,132,36, - 3,50,16,0,3,18,17,0,16,0,162,175, - 255,0,98,48,20,0,162,175,3,18,3,0, - 255,0,39,50,15,63,192,12,24,0,162,175, - 29,29,192,8,0,0,0,0,40,0,180,175, - 58,0,21,36,32,0,19,36,255,255,18,36, - 32,0,177,39,40,0,162,143,0,0,0,0, - 0,0,67,128,0,0,0,0,3,0,117,16, - 0,0,0,0,3,0,115,20,0,0,0,0, - 1,0,66,36,40,0,162,175,91,28,192,12, - 40,0,164,39,33,24,64,0,75,0,114,16, - 0,0,0,0,40,0,162,143,0,0,35,166, - 0,0,67,128,0,0,0,0,3,0,117,16, - 0,0,0,0,3,0,115,20,0,0,0,0, - 1,0,66,36,40,0,162,175,91,28,192,12, - 40,0,164,39,33,24,64,0,60,0,114,16, - 1,0,16,38,0,0,34,150,0,26,3,0, - 37,16,67,0,0,0,34,166,3,0,2,42, - 220,255,64,20,2,0,49,38,32,0,165,151, - 0,0,0,0,1,0,162,48,7,0,64,16, - 0,0,0,0,2,131,4,60,208,137,132,36, - 15,63,192,12,255,0,165,48,25,29,192,8, - 0,0,0,0,36,0,162,151,0,0,0,0, - 0,7,66,48,6,0,64,16,0,0,0,0, - 2,131,4,60,15,63,192,12,0,138,132,36, - 25,29,192,8,0,0,0,0,255,66,192,12, - 33,32,0,0,1,0,4,36,34,0,165,151, - 0,0,0,0,255,66,192,12,33,128,0,0, - 36,0,165,151,0,0,0,0,255,66,192,12, - 2,0,4,36,2,131,4,60,15,63,192,12, - 32,138,132,36,2,131,4,60,80,138,132,36, - 15,63,192,12,33,40,0,2,196,128,132,39, - 200,128,134,39,31,21,192,12,32,0,165,39, - 36,0,162,151,1,0,16,38,0,1,66,36, - 36,0,162,167,8,0,2,42,7,0,64,16, - 0,0,0,0,8,29,192,8,0,0,0,0, - 2,131,4,60,116,138,132,36,15,63,192,12, - 33,40,128,2,72,0,191,143,68,0,181,143, - 64,0,180,143,60,0,179,143,56,0,178,143, - 52,0,177,143,48,0,176,143,8,0,224,3, - 80,0,189,39,0,0,0,0,0,0,0,0, - 224,255,189,39,16,0,176,175,33,128,0,0, - 20,0,177,175,33,136,0,0,24,0,191,175, - 33,32,0,2,162,65,192,12,33,40,0,0, - 43,0,64,16,0,0,0,0,1,0,16,38, - 64,0,2,42,249,255,64,20,33,32,0,2, - 33,128,0,0,85,85,17,36,33,32,0,2, - 162,65,192,12,85,85,5,36,32,0,64,16, - 0,0,0,0,1,0,16,38,64,0,2,42, - 249,255,64,20,33,32,0,2,33,128,0,0, - 170,170,17,52,33,32,0,2,162,65,192,12, - 170,170,5,52,21,0,64,16,0,0,0,0, - 1,0,16,38,64,0,2,42,249,255,64,20, - 33,32,0,2,33,128,0,0,255,255,17,52, - 33,32,0,2,162,65,192,12,255,255,5,52, - 10,0,64,16,0,0,0,0,1,0,16,38, - 64,0,2,42,249,255,64,20,33,32,0,2, - 2,131,4,60,15,63,192,12,240,140,132,36, - 101,29,192,8,0,0,0,0,60,65,192,12, - 33,32,0,2,2,131,4,60,4,141,132,36, - 33,40,32,2,33,48,0,2,15,63,192,12, - 33,56,64,0,24,0,191,143,20,0,177,143, - 16,0,176,143,8,0,224,3,32,0,189,39, - 0,0,0,0,0,0,0,0,232,255,189,39, - 16,0,191,175,210,7,192,12,0,0,0,0, - 139,14,192,12,0,0,0,0,180,10,192,12, - 0,0,0,0,32,133,132,143,1,0,2,36, - 42,16,68,0,9,0,64,16,0,2,3,36, - 64,34,4,0,2,131,1,60,33,8,35,0, - 196,246,32,172,0,2,99,36,42,16,100,0, - 250,255,64,20,0,0,0,0,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 208,255,189,39,24,0,178,175,33,144,128,0, - 32,0,180,175,33,160,160,0,44,0,191,175, - 40,0,182,175,36,0,181,175,28,0,179,175, - 20,0,177,175,3,0,128,26,16,0,176,175, - 149,29,192,8,1,0,147,38,1,0,20,36, - 32,133,147,143,255,255,82,38,255,255,2,36, - 20,0,66,18,255,255,21,36,2,131,22,60, - 192,246,214,38,108,29,192,12,33,128,128,2, - 42,16,19,2,10,0,64,16,64,18,16,0, - 33,136,86,0,242,21,192,12,33,32,32,2, - 133,12,192,12,33,32,0,2,1,0,16,38, - 42,16,19,2,249,255,64,20,0,2,49,38, - 255,255,82,38,240,255,85,22,0,0,0,0, - 44,0,191,143,40,0,182,143,36,0,181,143, - 32,0,180,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 48,0,189,39,216,255,189,39,24,0,178,175, - 33,144,160,0,28,0,179,175,33,152,192,0, - 32,0,191,175,20,0,177,175,3,0,128,24, - 16,0,176,175,195,29,192,8,1,0,145,36, - 1,0,4,36,32,133,145,143,33,128,128,0, - 42,16,17,2,8,0,64,16,33,32,0,2, - 33,40,64,2,250,29,192,12,33,48,96,2, - 1,0,16,38,42,16,17,2,250,255,64,20, - 33,32,0,2,32,0,191,143,28,0,179,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,40,0,189,39,224,255,189,39, - 24,0,191,175,20,0,177,175,3,0,128,24, - 16,0,176,175,222,29,192,8,1,0,145,36, - 1,0,4,36,32,133,145,143,33,128,128,0, - 42,16,17,2,7,0,64,16,0,0,0,0, - 237,29,192,12,33,32,0,2,1,0,16,38, - 42,16,17,2,251,255,64,20,0,0,0,0, - 24,0,191,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,64,34,4,0, - 2,131,2,60,192,246,66,36,33,32,130,0, - 44,0,131,140,1,0,2,36,32,0,130,172, - 16,0,2,36,2,0,98,164,8,0,130,140, - 0,0,0,0,8,0,224,3,0,0,64,172, - 208,255,189,39,33,48,128,0,64,18,6,0, - 2,131,3,60,192,246,99,36,36,0,177,175, - 33,136,67,0,40,0,191,175,32,0,176,175, - 4,0,34,142,0,0,0,0,4,0,64,20, - 33,128,160,0,1,0,4,36,133,29,192,12, - 33,40,192,0,3,0,0,30,221,5,2,42, - 17,30,192,8,1,0,16,36,3,0,64,20, - 33,32,32,2,220,5,16,36,33,32,32,2, - 208,7,5,36,108,0,131,140,12,0,2,36, - 2,0,98,164,16,0,162,39,8,0,98,172, - 0,128,2,54,12,0,96,172,16,0,162,175, - 255,255,2,36,20,0,162,175,2,131,2,60, - 0,155,66,36,98,31,192,12,24,0,162,175, - 40,0,191,143,36,0,177,143,32,0,176,143, - 8,0,224,3,48,0,189,39,56,254,189,39, - 160,1,176,175,33,128,192,0,48,1,164,175, - 33,32,224,0,64,18,5,0,2,131,3,60, - 192,246,99,36,33,16,67,0,56,1,162,175, - 64,18,16,0,33,16,67,0,40,0,168,39, - 196,1,191,175,192,1,190,175,188,1,183,175, - 184,1,182,175,180,1,181,175,176,1,180,175, - 172,1,179,175,168,1,178,175,164,1,177,175, - 64,1,162,175,12,0,160,24,96,1,168,175, - 32,133,131,143,0,0,0,0,42,16,163,0, - 19,1,64,16,1,0,2,36,5,0,0,26, - 42,16,3,2,15,1,64,16,1,0,2,36, - 3,0,176,20,33,40,0,0,86,31,192,8, - 1,0,2,36,212,68,192,12,33,48,0,0, - 6,0,65,4,104,1,162,175,33,72,64,0, - 35,72,9,0,104,1,169,175,87,30,192,8, - 112,1,160,175,1,0,8,36,112,1,168,175, - 1,0,4,36,133,29,192,12,33,40,0,0, - 237,29,192,12,33,32,0,2,24,0,169,39, - 56,1,168,143,255,0,2,36,80,1,169,175, - 108,0,8,141,43,1,163,39,72,1,168,175, - 0,0,98,160,255,255,66,36,253,255,65,4, - 255,255,99,36,64,1,169,143,0,0,0,0, - 120,0,41,141,64,1,168,143,128,1,169,175, - 124,0,8,141,64,1,169,143,136,1,168,175, - 44,0,34,141,0,0,0,0,12,0,64,172, - 44,0,34,141,0,0,0,0,16,0,64,172, - 44,0,34,141,120,1,160,175,32,0,64,172, - 44,0,34,141,88,1,160,175,24,0,64,172, - 48,1,168,143,0,0,0,0,168,0,0,25, - 40,0,169,39,144,1,169,175,88,1,168,143, - 0,0,0,0,255,0,2,49,4,0,86,36, - 60,0,194,42,2,0,64,16,0,0,0,0, - 60,0,22,36,104,1,169,143,0,0,0,0, - 2,0,32,17,0,0,0,0,104,1,182,143, - 56,1,164,143,72,1,168,143,12,0,2,36, - 2,0,2,165,80,1,169,143,0,128,194,54, - 8,0,9,173,12,0,0,173,0,0,34,173, - 255,255,8,36,4,0,40,173,144,1,168,143, - 0,0,0,0,8,0,40,173,88,1,168,143, - 96,1,169,143,208,7,5,36,98,31,192,12, - 0,0,40,173,0,128,5,52,0,128,6,52, - 128,1,164,143,0,0,0,0,129,67,192,12, - 2,0,7,36,13,0,64,20,0,0,0,0, - 88,1,165,143,2,131,4,60,15,63,192,12, - 64,141,132,36,120,1,169,143,0,0,0,0, - 1,0,41,37,20,0,34,41,117,0,64,16, - 120,1,169,175,32,31,192,8,0,0,0,0, - 128,1,168,143,64,1,169,143,8,0,2,141, - 255,255,8,36,136,0,53,141,0,0,0,0, - 50,0,72,16,33,184,0,0,1,0,4,36, - 4,0,18,36,4,0,3,36,0,0,190,142, - 8,0,166,142,112,1,169,143,255,63,212,51, - 30,0,32,17,33,184,244,2,42,16,116,0, - 27,0,64,16,33,152,96,0,144,1,168,143, - 0,0,0,0,33,136,72,2,33,128,102,0, - 15,0,128,16,0,0,0,0,0,0,2,146, - 0,0,35,146,0,0,0,0,10,0,67,16, - 33,48,192,2,2,131,4,60,92,141,132,36, - 88,1,165,143,16,0,163,175,0,0,2,146, - 33,56,64,2,15,63,192,12,20,0,162,175, - 33,32,0,0,1,0,115,38,1,0,16,38, - 1,0,49,38,42,16,116,2,235,255,64,20, - 1,0,82,38,33,24,0,0,4,0,181,142, - 0,128,194,51,217,255,64,16,0,0,0,0, - 128,1,169,143,0,0,0,0,8,0,34,141, - 0,0,0,0,25,0,128,16,18,0,87,164, - 9,0,246,18,33,48,192,2,2,131,4,60, - 140,141,132,36,88,1,165,143,0,0,0,0, - 15,63,192,12,33,56,224,2,5,31,192,8, - 0,0,0,0,64,1,168,143,0,0,0,0, - 136,0,2,141,96,1,169,143,8,0,70,140, - 0,0,34,141,0,0,198,140,0,0,0,0, - 7,0,194,16,0,0,0,0,88,1,165,143, - 2,131,4,60,15,63,192,12,184,141,132,36, - 64,1,168,143,0,0,0,0,136,0,4,141, - 152,21,192,12,0,0,0,0,64,1,169,143, - 0,0,0,0,136,0,53,173,128,1,168,143, - 8,128,2,52,0,0,0,165,2,0,2,165, - 8,0,0,173,12,0,0,165,136,1,169,143, - 8,0,2,36,2,0,34,165,4,0,40,141, - 128,1,169,143,136,1,168,175,4,0,41,141, - 64,1,168,143,128,1,169,175,120,0,9,173, - 136,1,169,143,0,0,0,0,124,0,9,173, - 88,1,168,143,48,1,169,143,1,0,8,37, - 42,16,9,1,91,255,64,20,88,1,168,175, - 64,1,168,143,0,0,0,0,44,0,3,141, - 0,0,0,0,12,0,98,140,0,0,0,0, - 5,0,64,16,0,0,0,0,12,0,101,140, - 2,131,4,60,15,63,192,12,212,141,132,36, - 64,1,169,143,0,0,0,0,44,0,35,141, - 0,0,0,0,16,0,98,140,0,0,0,0, - 5,0,64,16,0,0,0,0,16,0,101,140, - 2,131,4,60,15,63,192,12,240,141,132,36, - 64,1,168,143,0,0,0,0,44,0,3,141, - 0,0,0,0,32,0,98,140,0,0,0,0, - 5,0,64,16,0,0,0,0,32,0,101,140, - 2,131,4,60,15,63,192,12,16,142,132,36, - 64,1,169,143,0,0,0,0,44,0,35,141, - 0,0,0,0,24,0,98,140,0,0,0,0, - 5,0,64,16,0,0,0,0,24,0,101,140, - 2,131,4,60,15,63,192,12,48,142,132,36, - 196,1,191,143,192,1,190,143,188,1,183,143, - 184,1,182,143,180,1,181,143,176,1,180,143, - 172,1,179,143,168,1,178,143,164,1,177,143, - 160,1,176,143,8,0,224,3,200,1,189,39, - 224,255,189,39,16,0,176,175,33,128,128,0, - 24,0,191,175,20,0,177,175,44,0,4,142, - 0,0,0,0,0,0,130,148,0,0,0,0, - 0,32,66,48,7,0,64,20,33,136,160,0, - 0,0,133,148,2,131,4,60,15,63,192,12, - 80,142,132,36,156,31,192,8,3,0,2,36, - 2,0,132,36,33,40,0,0,33,48,0,0, - 129,67,192,12,33,56,32,2,13,0,64,16, - 33,40,0,0,44,0,3,142,0,33,2,36, - 2,0,98,164,8,0,2,142,33,48,0,0, - 0,0,64,172,44,0,4,142,33,56,32,2, - 129,67,192,12,2,0,132,36,9,0,64,20, - 0,32,5,36,44,0,2,142,0,0,0,0, - 2,0,69,148,2,131,4,60,15,63,192,12, - 108,142,132,36,156,31,192,8,1,0,2,36, - 44,0,4,142,0,32,6,36,129,67,192,12, - 33,56,32,2,8,0,64,20,33,16,0,0, - 44,0,2,142,0,0,0,0,0,0,69,148, - 2,131,4,60,15,63,192,12,132,142,132,36, - 2,0,2,36,24,0,191,143,20,0,177,143, - 16,0,176,143,8,0,224,3,32,0,189,39, - 224,255,189,39,24,0,178,175,33,144,160,0, - 28,0,191,175,20,0,177,175,7,0,128,4, - 16,0,176,175,24,133,130,143,0,0,0,0, - 255,255,66,36,42,16,68,0,4,0,64,16, - 33,136,128,0,24,133,130,143,33,32,0,0, - 255,255,81,36,33,128,128,0,42,16,48,2, - 7,0,64,20,33,32,0,2,193,31,192,12, - 33,40,64,2,1,0,16,38,42,16,48,2, - 251,255,64,16,33,32,0,2,28,0,191,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,8,0,224,3, - 0,0,0,0,232,255,189,39,16,0,191,175, - 236,63,192,12,1,16,4,36,85,0,2,36, - 131,131,1,60,128,18,34,160,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 216,255,189,39,28,0,177,175,33,136,128,0, - 32,0,178,175,33,144,160,0,212,128,132,39, - 36,0,191,175,15,63,192,12,24,0,176,175, - 9,0,64,26,33,128,0,0,0,0,37,146, - 1,0,49,38,220,128,132,39,15,63,192,12, - 1,0,16,38,42,16,18,2,249,255,64,20, - 0,0,0,0,228,128,132,39,15,63,192,12, - 0,0,0,0,36,0,191,143,32,0,178,143, - 28,0,177,143,24,0,176,143,8,0,224,3, - 40,0,189,39,48,255,189,39,33,56,128,0, - 192,0,178,175,33,144,160,0,200,0,180,175, - 33,160,192,0,255,255,226,36,6,0,66,44, - 204,0,191,175,196,0,179,175,188,0,177,175, - 2,0,64,20,184,0,176,175,1,0,7,36, - 2,0,64,30,0,0,0,0,1,0,18,36, - 2,0,128,30,64,18,7,0,60,0,20,36, - 2,131,3,60,192,246,99,36,33,136,67,0, - 4,0,34,142,0,0,0,0,4,0,64,20, - 33,152,64,2,1,0,4,36,133,29,192,12, - 33,40,224,0,255,31,4,60,255,255,132,52, - 0,128,133,54,120,0,162,39,36,16,68,0, - 0,160,3,60,37,16,67,0,104,0,165,175, - 108,0,160,175,112,0,162,175,12,0,2,36, - 80,0,160,167,82,0,162,167,80,0,162,39, - 36,16,68,0,37,128,67,0,104,0,162,39, - 36,16,68,0,232,128,132,143,37,16,67,0, - 84,0,176,175,88,0,162,175,92,0,160,175, - 5,0,128,16,4,0,2,36,82,0,162,167, - 255,255,2,36,92,0,165,175,88,0,162,175, - 44,0,34,142,0,0,0,0,0,0,66,148, - 0,0,0,0,0,32,66,48,7,0,64,20, - 33,40,0,0,255,255,4,36,2,131,5,60, - 184,142,165,36,188,7,192,12,208,1,6,36, - 33,40,0,0,44,0,34,142,33,48,0,0, - 4,0,80,172,44,0,36,142,208,7,7,36, - 129,67,192,12,2,0,132,36,12,0,64,20, - 0,0,0,0,44,0,34,142,0,0,0,0, - 2,0,69,148,2,131,4,60,15,63,192,12, - 108,142,132,36,255,255,4,36,2,131,5,60, - 184,142,165,36,188,7,192,12,216,1,6,36, - 34,11,192,12,1,0,4,36,0,163,16,60, - 4,1,16,142,0,163,2,60,4,1,66,140, - 0,0,0,0,252,255,2,18,0,33,3,36, - 44,0,34,142,0,0,0,0,2,0,67,164, - 8,0,34,142,0,0,0,0,0,0,64,172, - 0,163,16,60,4,1,16,142,44,0,36,142, - 0,0,0,0,4,0,130,140,0,0,0,0, - 0,0,66,148,0,0,0,0,0,128,66,48, - 10,0,64,20,0,0,0,0,44,0,35,142, - 0,0,0,0,4,0,98,140,0,0,0,0, - 0,0,66,148,0,0,0,0,0,128,66,48, - 250,255,64,16,0,0,0,0,255,255,115,38, - 19,0,96,18,33,40,64,2,44,0,35,142, - 0,0,0,0,4,0,98,140,0,0,0,0, - 0,0,66,148,0,0,0,0,0,128,66,48, - 229,255,64,16,0,0,0,0,4,0,98,140, - 0,0,0,0,0,0,66,148,0,0,0,0, - 0,128,66,48,250,255,64,20,0,0,0,0, - 89,32,192,8,0,0,0,0,2,131,4,60, - 200,142,132,36,33,48,128,2,0,163,3,60, - 4,1,99,140,0,128,2,52,82,0,162,167, - 35,128,112,0,15,63,192,12,33,56,0,2, - 19,0,0,18,64,41,18,0,35,40,178,0, - 128,40,5,0,33,40,178,0,192,40,5,0, - 26,0,176,0,2,0,0,22,0,0,0,0, - 13,0,7,0,255,255,1,36,4,0,1,22, - 0,128,1,60,2,0,161,20,0,0,0,0, - 13,0,6,0,18,40,0,0,236,128,132,39, - 15,63,192,12,0,0,0,0,204,0,191,143, - 200,0,180,143,196,0,179,143,192,0,178,143, - 188,0,177,143,184,0,176,143,8,0,224,3, - 208,0,189,39,224,255,189,39,20,0,177,175, - 33,136,128,0,24,0,191,175,180,10,192,12, - 16,0,176,175,34,11,192,12,1,0,4,36, - 16,133,132,143,0,163,16,60,4,1,16,142, - 193,63,192,12,0,0,0,0,0,163,2,60, - 4,1,66,140,0,0,0,0,35,40,80,0, - 73,252,162,36,99,0,66,44,4,0,64,16, - 0,0,0,0,2,131,4,60,196,32,192,8, - 0,143,132,36,5,0,160,20,0,0,0,0, - 2,131,4,60,36,143,132,36,196,32,192,8, - 33,40,0,0,2,131,4,60,76,143,132,36, - 15,63,192,12,1,0,16,36,3,0,48,18, - 0,0,0,0,34,11,192,12,33,32,0,0, - 0,129,144,175,24,0,191,143,20,0,177,143, - 16,0,176,143,8,0,224,3,32,0,189,39, - 200,255,189,39,32,0,178,175,33,144,128,0, - 33,48,64,2,44,0,181,175,1,131,21,60, - 60,252,181,38,33,56,160,2,40,0,180,175, - 2,131,20,60,144,143,148,38,36,0,179,175, - 0,163,19,60,120,1,115,142,0,163,3,60, - 120,1,99,140,32,131,2,60,48,0,191,175, - 28,0,177,175,24,0,176,175,16,0,180,175, - 33,32,96,2,35,136,67,0,84,64,192,12, - 33,40,32,2,3,0,64,18,33,128,64,0, - 10,0,0,22,0,0,0,0,16,0,180,175, - 33,32,96,2,33,40,32,2,33,48,64,2, - 244,63,192,12,33,56,160,2,33,128,2,2, - 5,0,0,18,33,40,96,2,2,131,4,60, - 168,143,132,36,252,32,192,8,33,40,96,2, - 2,131,4,60,204,143,132,36,15,63,192,12, - 33,48,177,0,48,0,191,143,44,0,181,143, - 40,0,180,143,36,0,179,143,32,0,178,143, - 28,0,177,143,24,0,176,143,8,0,224,3, - 56,0,189,39,0,163,1,60,232,5,36,172, - 0,163,1,60,8,0,224,3,236,5,37,172, - 28,129,132,175,8,0,224,3,0,0,0,0, - 16,129,132,175,8,0,224,3,0,0,0,0, - 15,0,132,48,20,129,132,175,8,0,224,3, - 0,0,0,0,24,129,132,175,8,0,224,3, - 0,0,0,0,32,129,132,175,8,0,224,3, - 0,0,0,0,33,72,128,0,33,80,160,0, - 33,88,192,0,7,162,4,60,48,1,132,52, - 7,162,8,60,0,1,8,53,20,129,130,143, - 24,129,131,143,128,48,2,0,28,129,130,143, - 3,0,197,52,2,0,96,16,0,0,130,172, - 67,0,197,52,16,129,130,143,0,0,0,0, - 2,0,64,16,33,24,160,0,0,1,99,52, - 36,129,130,143,0,0,0,0,2,0,64,16, - 0,0,0,0,0,4,99,52,32,129,130,143, - 0,0,3,173,3,0,64,16,7,162,5,60, - 0,0,2,173,7,162,5,60,4,1,165,52, - 7,162,6,60,8,1,198,52,255,0,2,60, - 255,255,66,52,7,162,3,60,12,1,99,52, - 7,162,4,60,16,1,132,52,36,16,66,1, - 0,0,169,172,0,0,194,172,43,16,7,0, - 192,16,2,0,0,0,107,172,8,0,224,3, - 0,0,130,172,7,162,3,60,40,1,99,52, - 3,0,2,36,0,163,1,60,20,1,32,172, - 8,0,224,3,0,0,98,172,232,255,189,39, - 16,0,191,175,33,24,0,0,7,162,6,60, - 40,1,198,52,15,0,4,60,63,66,132,52, - 0,0,197,140,0,0,0,0,16,0,162,48, - 7,0,64,20,1,0,99,36,42,16,131,0, - 249,255,64,16,0,0,0,0,2,131,4,60, - 122,33,192,8,240,143,132,36,36,129,130,143, - 0,0,0,0,3,0,64,20,33,24,0,0, - 125,33,192,8,33,16,0,0,1,0,5,36, - 15,0,4,60,63,66,132,52,0,163,2,60, - 20,1,66,140,0,0,0,0,247,255,69,16, - 1,0,99,36,42,16,131,0,249,255,64,16, - 0,0,0,0,0,163,5,60,20,1,165,140, - 2,131,4,60,24,144,132,36,15,63,192,12, - 0,0,0,0,1,0,2,36,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 224,255,189,39,24,0,191,175,33,72,192,0, - 255,31,3,60,255,255,99,52,33,64,0,0, - 36,32,131,0,0,160,2,60,37,32,130,0, - 36,40,163,0,16,0,32,25,37,40,162,0, - 0,0,134,144,0,0,167,144,0,0,0,0, - 7,0,199,16,1,0,165,36,2,131,4,60, - 72,144,132,36,15,63,192,12,33,40,0,1, - 157,33,192,8,1,0,2,36,1,0,8,37, - 42,16,9,1,242,255,64,20,1,0,132,36, - 33,16,0,0,24,0,191,143,32,0,189,39, - 8,0,224,3,0,0,0,0,0,163,2,60, - 232,5,66,140,152,255,189,39,80,0,180,175, - 120,0,180,143,64,0,176,175,33,128,160,0, - 68,0,177,175,33,136,192,0,72,0,178,175, - 33,144,224,0,100,0,191,175,96,0,190,175, - 92,0,183,175,88,0,182,175,84,0,181,175, - 76,0,179,175,12,0,64,16,16,0,164,175, - 0,163,2,60,236,5,66,140,0,0,0,0, - 7,0,64,16,0,0,0,0,0,163,2,60, - 236,5,66,140,0,0,0,0,1,8,66,44, - 10,0,64,20,16,0,2,60,0,163,5,60, - 232,5,165,140,0,163,6,60,236,5,198,140, - 2,131,4,60,15,63,192,12,124,144,132,36, - 7,35,192,8,0,0,0,0,16,0,168,143, - 0,0,0,0,43,16,72,0,6,0,64,16, - 0,0,0,0,2,131,4,60,15,63,192,12, - 172,144,132,36,7,35,192,8,0,0,0,0, - 224,132,130,143,0,0,0,0,11,0,64,20, - 0,0,0,0,0,163,4,60,236,5,132,140, - 13,8,192,12,0,0,0,0,255,31,3,60, - 255,255,99,52,36,16,67,0,0,160,3,60, - 37,16,67,0,224,132,130,175,228,132,130,143, - 0,0,0,0,11,0,64,20,0,0,0,0, - 0,163,4,60,236,5,132,140,13,8,192,12, - 0,0,0,0,255,31,3,60,255,255,99,52, - 36,16,67,0,0,160,3,60,37,16,67,0, - 228,132,130,175,224,132,133,143,0,163,6,60, - 232,5,198,140,228,132,135,143,2,131,4,60, - 15,63,192,12,208,144,132,36,16,129,133,143, - 20,129,134,143,2,131,4,60,15,63,192,12, - 8,145,132,36,7,162,2,60,232,0,66,52, - 0,0,83,140,1,0,3,130,105,0,2,36, - 7,0,98,20,251,255,2,60,1,0,2,36, - 36,129,130,175,4,0,2,60,0,8,66,52, - 10,34,192,8,37,152,98,2,36,129,128,175, - 255,247,66,52,36,152,98,2,7,162,2,60, - 232,0,66,52,0,0,83,172,0,0,5,130, - 114,0,2,36,3,0,162,16,82,0,2,36, - 3,0,162,20,119,0,2,36,42,34,192,8, - 33,176,0,0,3,0,162,16,87,0,2,36, - 3,0,162,20,108,0,2,36,42,34,192,8, - 1,0,22,36,3,0,162,16,76,0,2,36, - 3,0,162,20,116,0,2,36,42,34,192,8, - 2,0,22,36,118,0,162,16,84,0,2,36, - 116,0,162,16,0,0,0,0,2,131,4,60, - 15,63,192,12,52,145,132,36,7,35,192,8, - 0,0,0,0,0,0,38,130,0,0,0,0, - 12,0,192,16,99,0,2,36,3,0,194,16, - 67,0,2,36,4,0,194,20,33,152,0,0, - 5,0,19,36,61,34,192,8,5,0,21,36, - 2,131,1,60,80,155,50,160,61,34,192,8, - 33,168,0,0,33,168,0,0,5,0,19,36, - 2,131,1,60,80,155,32,160,16,0,168,143, - 0,163,18,60,236,5,82,142,0,0,0,0, - 197,0,0,17,255,255,20,37,255,255,194,38, - 2,0,87,44,2,0,30,36,33,128,160,2, - 42,16,112,2,73,0,64,20,0,0,0,0, - 42,0,224,18,5,0,2,36,13,0,2,22, - 0,0,0,0,25,0,64,26,33,136,0,0, - 224,132,130,143,0,0,0,0,33,16,81,0, - 0,0,81,160,1,0,49,38,42,16,50,2, - 249,255,64,20,33,48,64,2,105,34,192,8, - 0,0,0,0,2,131,3,60,33,24,112,0, - 80,155,99,144,0,0,0,0,9,0,64,26, - 33,136,0,0,224,132,130,143,0,0,0,0, - 33,16,81,0,1,0,49,38,0,0,67,160, - 42,16,50,2,249,255,64,20,0,0,0,0, - 33,48,64,2,0,163,4,60,232,5,132,140, - 224,132,133,143,0,0,0,0,28,33,192,12, - 1,0,7,36,76,33,192,12,0,0,0,0, - 83,33,192,12,0,0,0,0,147,0,64,20, - 0,0,0,0,3,0,192,18,33,48,64,2, - 22,0,222,22,0,0,0,0,0,163,4,60, - 232,5,132,140,228,132,133,143,0,0,0,0, - 28,33,192,12,33,56,0,0,76,33,192,12, - 0,0,0,0,83,33,192,12,0,0,0,0, - 131,0,64,20,0,0,0,0,8,0,222,22, - 0,0,0,0,224,132,132,143,228,132,133,143, - 0,0,0,0,129,33,192,12,33,48,64,2, - 122,0,64,20,0,0,0,0,1,0,16,38, - 42,16,112,2,185,255,64,16,0,0,0,0, - 255,255,148,38,255,255,2,36,178,255,130,22, - 33,128,160,2,7,35,192,8,0,0,0,0, - 180,10,192,12,0,0,0,0,34,11,192,12, - 1,0,4,36,0,0,34,130,0,0,0,0, - 6,0,64,16,33,184,0,0,24,0,160,175, - 2,131,1,60,88,155,52,164,171,34,192,8, - 33,176,0,0,6,0,23,36,4,0,2,36, - 24,0,160,175,2,131,1,60,88,155,34,164, - 33,176,0,0,0,8,30,36,24,0,177,143, - 0,0,0,0,42,16,241,2,83,0,64,20, - 64,16,17,0,2,131,8,60,88,155,8,37, - 33,168,72,0,0,0,178,150,0,0,0,0, - 26,0,210,3,2,0,64,22,0,0,0,0, - 13,0,7,0,255,255,1,36,4,0,65,22, - 0,128,1,60,2,0,193,23,0,0,0,0, - 13,0,6,0,18,16,0,0,16,0,168,143, - 0,0,0,0,24,0,72,0,33,56,192,2, - 33,128,0,0,0,163,19,60,4,1,115,142, - 0,163,4,60,232,5,132,140,224,132,133,143, - 18,160,0,0,0,0,0,0,0,0,0,0, - 28,33,192,12,33,48,64,2,10,0,128,26, - 0,0,0,0,76,33,192,12,0,0,0,0, - 83,33,192,12,0,0,0,0,48,0,64,20, - 1,0,16,38,42,16,20,2,248,255,64,20, - 0,0,0,0,2,131,5,60,140,145,165,36, - 0,163,16,60,4,1,16,142,3,0,192,18, - 0,0,0,0,2,131,5,60,128,145,165,36, - 2,131,4,60,96,145,132,36,15,63,192,12, - 33,48,64,2,19,0,19,18,24,0,146,2, - 18,24,0,0,35,16,19,2,0,0,0,0, - 27,0,98,0,2,0,64,20,0,0,0,0, - 13,0,7,0,18,16,0,0,2,131,4,60, - 152,145,132,36,64,41,2,0,35,40,162,0, - 128,40,5,0,33,40,162,0,15,63,192,12, - 192,40,5,0,255,34,192,8,2,0,181,38, - 2,131,4,60,168,145,132,36,15,63,192,12, - 2,0,181,38,1,0,49,38,42,16,241,2, - 178,255,64,16,0,0,0,0,1,0,214,38, - 2,0,194,42,166,255,64,20,0,0,0,0, - 100,0,191,143,96,0,190,143,92,0,183,143, - 88,0,182,143,84,0,181,143,80,0,180,143, - 76,0,179,143,72,0,178,143,68,0,177,143, - 64,0,176,143,8,0,224,3,104,0,189,39, - 0,0,0,0,43,16,134,0,0,0,164,175, - 4,0,165,175,8,0,166,175,7,0,64,20, - 12,0,167,175,43,16,196,0,5,0,64,20, - 1,0,2,36,43,16,167,0,2,0,64,16, - 43,16,229,0,255,255,2,36,8,0,224,3, - 0,0,0,0,232,255,189,39,3,131,4,60, - 208,12,132,36,170,0,5,36,16,0,191,175, - 144,71,192,12,60,0,6,36,2,131,6,60, - 112,155,198,36,2,131,2,60,212,246,66,140, - 2,131,3,60,216,246,99,132,0,0,194,172, - 4,0,195,164,2,131,2,60,138,155,66,148, - 2,131,3,60,132,155,99,148,2,131,4,60, - 134,155,132,148,2,131,5,60,136,155,165,148, - 3,131,1,60,216,12,34,164,2,131,2,60, - 130,155,66,148,3,131,10,60,218,12,74,37, - 3,0,199,136,0,0,199,152,4,0,200,128, - 5,0,201,128,3,0,71,169,0,0,71,185, - 4,0,72,161,5,0,73,161,3,131,1,60, - 238,12,35,164,3,131,1,60,242,12,36,164, - 3,131,1,60,246,12,37,164,3,131,1,60, - 240,12,34,164,16,0,191,143,24,0,189,39, - 8,0,224,3,0,0,0,0,3,131,2,60, - 216,12,66,140,3,131,3,60,220,12,99,140, - 3,131,1,60,208,12,34,172,3,131,1,60, - 212,12,35,172,3,131,2,60,238,12,66,148, - 3,131,3,60,240,12,99,148,3,131,4,60, - 242,12,132,148,232,255,189,39,16,0,176,175, - 3,131,1,60,234,12,35,164,24,133,131,143, - 20,0,191,175,3,131,1,60,224,12,32,172, - 3,131,1,60,228,12,32,172,3,131,1,60, - 248,12,32,172,3,131,1,60,252,12,32,172, - 3,131,1,60,8,13,32,164,3,131,1,60, - 4,13,32,164,3,131,1,60,232,12,34,164, - 33,16,68,0,3,131,1,60,236,12,36,164, - 3,131,1,60,244,12,34,164,8,0,96,24, - 1,0,16,36,150,35,192,12,33,32,0,2, - 24,133,130,143,1,0,16,38,42,16,80,0, - 250,255,64,16,0,0,0,0,206,35,192,12, - 0,0,0,0,52,36,192,12,0,0,0,0, - 1,0,2,36,3,131,1,60,0,13,34,164, - 3,0,2,36,3,131,1,60,2,13,32,164, - 3,131,1,60,20,13,34,172,2,131,1,60, - 164,247,34,172,20,0,191,143,16,0,176,143, - 8,0,224,3,24,0,189,39,224,255,189,39, - 20,0,177,175,33,136,128,0,16,0,176,175, - 192,129,17,0,3,131,4,60,16,13,132,36, - 33,32,4,2,187,0,5,36,24,0,191,175, - 144,71,192,12,128,0,6,36,2,131,2,60, - 140,155,66,148,100,0,3,36,3,131,1,60, - 33,8,48,0,24,13,35,172,0,18,2,0, - 37,16,34,2,3,131,1,60,33,8,48,0, - 16,13,34,164,22,36,192,12,33,32,32,2, - 4,0,2,36,64,138,17,0,3,131,1,60, - 33,8,48,0,20,13,34,172,2,131,1,60, - 33,8,49,0,164,247,34,172,3,131,1,60, - 33,8,48,0,52,13,32,172,3,131,1,60, - 33,8,48,0,56,13,32,172,3,131,1,60, - 33,8,48,0,106,13,32,164,3,131,1,60, - 33,8,48,0,110,13,32,164,3,131,1,60, - 33,8,48,0,114,13,32,164,3,131,1,60, - 33,8,48,0,120,13,32,172,24,0,191,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,24,133,130,143,216,255,189,39, - 20,0,177,175,1,0,17,36,36,0,191,175, - 32,0,180,175,28,0,179,175,24,0,178,175, - 55,0,64,24,16,0,176,175,3,131,20,60, - 228,12,148,38,3,131,2,60,56,13,66,36, - 128,0,83,36,124,0,82,36,128,0,16,36, - 0,0,130,142,0,0,0,0,6,0,34,22, - 33,32,32,2,0,0,64,174,101,36,192,12, - 0,0,96,174,8,36,192,8,128,0,115,38, - 3,131,4,60,33,32,144,0,40,13,132,140, - 3,131,5,60,33,40,176,0,44,13,165,140, - 244,255,134,142,248,255,135,142,20,35,192,12, - 0,0,0,0,17,0,64,20,33,32,32,2, - 3,131,3,60,33,24,112,0,48,13,99,148, - 3,131,2,60,33,16,80,0,16,13,66,148, - 0,0,0,0,8,0,98,20,0,0,0,0, - 3,131,1,60,33,8,48,0,106,13,32,164, - 101,36,192,12,33,32,32,2,8,36,192,8, - 128,0,115,38,0,0,64,174,125,36,192,12, - 0,0,96,174,128,0,115,38,128,0,82,38, - 24,133,130,143,1,0,49,38,42,16,81,0, - 210,255,64,16,128,0,16,38,36,0,191,143, - 32,0,180,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 40,0,189,39,192,33,4,0,3,131,2,60, - 28,13,66,36,33,24,130,0,3,131,5,60, - 208,12,165,140,3,131,6,60,212,12,198,140, - 0,0,101,172,4,0,102,172,12,0,66,36, - 3,131,3,60,224,12,99,140,33,16,130,0, - 3,131,1,60,33,8,36,0,36,13,35,172, - 3,131,3,60,216,12,99,140,3,131,5,60, - 220,12,165,140,0,0,67,172,4,0,69,172, - 3,131,2,60,33,16,68,0,16,13,66,148, - 3,131,1,60,33,8,36,0,8,0,224,3, - 48,13,34,164,24,133,130,143,224,255,189,39, - 20,0,177,175,1,0,17,36,24,0,191,175, - 38,0,64,24,16,0,176,175,128,0,16,36, - 3,131,4,60,33,32,144,0,40,13,132,140, - 3,131,5,60,33,40,176,0,44,13,165,140, - 3,131,6,60,216,12,198,140,3,131,7,60, - 220,12,231,140,20,35,192,12,0,0,0,0, - 18,0,64,20,0,0,0,0,3,131,3,60, - 33,24,112,0,48,13,99,148,3,131,2,60, - 33,16,80,0,16,13,66,148,0,0,0,0, - 9,0,98,20,0,0,0,0,3,131,2,60, - 33,16,80,0,20,13,66,140,0,0,0,0, - 3,0,64,16,0,0,0,0,203,36,192,12, - 33,32,32,2,24,133,130,143,1,0,49,38, - 42,16,81,0,221,255,64,16,128,0,16,38, - 24,0,191,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,192,41,4,0, - 3,131,3,60,33,24,101,0,20,13,99,140, - 4,0,2,36,16,0,98,20,240,255,189,39, - 1,0,2,36,64,26,4,0,3,131,1,60, - 33,8,37,0,20,13,34,172,2,131,1,60, - 33,8,35,0,164,247,34,172,1,0,2,36, - 3,131,1,60,33,8,37,0,110,13,34,164, - 3,131,1,60,33,8,37,0,112,13,32,164, - 8,0,224,3,16,0,189,39,224,255,189,39, - 24,0,178,175,33,144,128,0,16,0,176,175, - 192,129,18,0,28,0,191,175,20,0,177,175, - 3,131,2,60,33,16,80,0,20,13,66,140, - 0,0,0,0,18,0,64,16,4,0,17,36, - 16,0,81,16,254,255,66,36,2,0,66,44, - 4,0,64,16,64,18,18,0,161,36,192,12, - 0,0,0,0,64,18,18,0,3,131,1,60, - 33,8,48,0,20,13,49,172,2,131,1,60, - 33,8,34,0,164,247,49,172,3,131,1,60, - 33,8,48,0,110,13,32,164,28,0,191,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,3,131,4,60, - 208,12,132,140,3,131,5,60,212,12,165,140, - 3,131,6,60,216,12,198,140,3,131,7,60, - 220,12,231,140,232,255,189,39,16,0,191,175, - 20,35,192,12,0,0,0,0,10,0,64,20, - 1,0,2,36,3,131,1,60,252,12,34,172, - 1,0,2,36,3,131,1,60,4,13,34,164, - 3,131,1,60,6,13,32,164,197,36,192,8, - 1,0,2,36,3,131,2,60,248,12,66,140, - 0,0,0,0,9,0,64,20,1,0,2,36, - 72,37,192,12,0,0,0,0,1,0,2,36, - 3,131,1,60,8,13,34,164,3,131,1,60, - 10,13,32,164,1,0,2,36,3,131,1,60, - 248,12,34,172,16,0,191,143,24,0,189,39, - 8,0,224,3,0,0,0,0,224,255,189,39, - 20,0,177,175,33,136,128,0,16,0,176,175, - 192,129,17,0,24,0,191,175,3,131,2,60, - 33,16,80,0,114,13,66,132,0,0,0,0, - 5,0,64,16,1,0,2,36,3,131,1,60, - 33,8,48,0,67,37,192,8,56,13,34,172, - 3,131,4,60,208,12,132,36,3,131,2,60, - 64,13,66,36,33,24,2,2,3,131,1,60, - 33,8,48,0,60,13,32,164,0,0,133,140, - 4,0,134,140,0,0,101,172,4,0,102,172, - 12,0,66,36,3,131,3,60,224,12,99,140, - 33,16,2,2,3,131,1,60,33,8,48,0, - 72,13,35,172,3,131,3,60,216,12,99,140, - 3,131,5,60,220,12,165,140,0,0,67,172, - 4,0,69,172,3,131,2,60,33,16,80,0, - 16,13,66,148,3,131,1,60,33,8,48,0, - 84,13,34,164,0,0,132,140,3,131,5,60, - 212,12,165,140,3,131,6,60,216,12,198,140, - 3,131,7,60,220,12,231,140,20,35,192,12, - 0,0,0,0,5,0,64,20,0,0,0,0, - 3,131,1,60,33,8,48,0,22,37,192,8, - 86,13,32,164,3,131,2,60,228,12,66,140, - 2,131,3,60,128,155,99,148,192,17,2,0, - 3,131,1,60,33,8,34,0,108,13,34,148, - 0,0,0,0,33,16,67,0,3,131,1,60, - 33,8,48,0,86,13,34,164,3,131,2,60, - 232,12,66,148,192,129,17,0,3,131,1,60, - 33,8,48,0,88,13,34,164,3,131,2,60, - 234,12,66,148,33,32,32,2,3,131,1,60, - 33,8,48,0,90,13,34,164,3,131,3,60, - 236,12,99,148,3,131,2,60,33,16,80,0, - 52,13,66,140,3,131,5,60,60,13,165,36, - 3,131,1,60,33,8,48,0,52,13,32,172, - 3,131,1,60,33,8,48,0,96,13,34,172, - 3,131,1,60,33,8,48,0,92,13,35,164, - 3,131,2,60,252,12,66,140,3,131,1,60, - 33,8,48,0,100,13,34,172,80,40,192,12, - 33,40,5,2,1,0,2,36,3,131,1,60, - 33,8,48,0,56,13,32,172,3,131,1,60, - 33,8,48,0,114,13,34,164,3,131,1,60, - 33,8,48,0,116,13,32,164,24,0,191,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,232,255,189,39,128,0,2,36, - 3,131,4,60,228,12,132,140,3,131,5,60, - 104,13,165,36,16,0,191,175,192,25,4,0, - 3,131,1,60,33,8,35,0,104,13,34,164, - 151,40,192,12,33,40,101,0,2,131,4,60, - 15,63,192,12,12,146,132,36,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 232,255,189,39,16,0,191,175,102,37,192,12, - 0,0,0,0,13,38,192,12,0,0,0,0, - 16,0,191,143,24,0,189,39,8,0,224,3, - 0,0,0,0,24,133,130,143,208,255,189,39, - 24,0,178,175,33,144,0,0,28,0,179,175, - 1,0,19,36,44,0,191,175,40,0,182,175, - 36,0,181,175,32,0,180,175,20,0,177,175, - 110,0,64,24,16,0,176,175,3,131,21,60, - 216,12,181,38,3,131,22,60,16,13,214,38, - 128,0,208,38,128,0,20,36,192,17,18,0, - 3,131,4,60,33,32,148,0,40,13,132,140, - 3,131,5,60,33,40,180,0,44,13,165,140, - 0,0,166,142,3,131,7,60,220,12,231,140, - 0,0,0,0,20,35,192,12,33,136,86,0, - 10,0,64,20,0,0,0,0,3,131,3,60, - 33,24,116,0,48,13,99,148,3,131,2,60, - 33,16,84,0,16,13,66,148,0,0,0,0, - 74,0,98,16,0,0,0,0,4,0,2,142, - 0,0,0,0,70,0,64,16,0,0,0,0, - 12,0,4,142,16,0,5,142,0,0,166,142, - 3,131,7,60,220,12,231,140,20,35,192,12, - 0,0,0,0,61,0,65,4,0,0,0,0, - 58,0,64,18,0,0,0,0,12,0,4,142, - 16,0,5,142,12,0,38,142,16,0,39,142, - 20,35,192,12,0,0,0,0,50,0,64,4, - 0,0,0,0,12,0,4,142,16,0,5,142, - 12,0,38,142,16,0,39,142,20,35,192,12, - 0,0,0,0,43,0,64,20,0,0,0,0, - 20,0,5,142,8,0,3,142,20,0,36,142, - 8,0,34,142,33,40,163,0,33,32,130,0, - 43,16,164,0,33,0,64,20,0,0,0,0, - 32,0,164,20,0,0,0,0,24,0,4,142, - 28,0,5,142,24,0,38,142,28,0,39,142, - 20,35,192,12,0,0,0,0,23,0,64,4, - 0,0,0,0,24,0,4,142,28,0,5,142, - 24,0,38,142,28,0,39,142,20,35,192,12, - 0,0,0,0,16,0,64,20,0,0,0,0, - 32,0,4,150,32,0,35,150,0,0,0,0, - 43,16,131,0,9,0,64,20,0,0,0,0, - 8,0,131,20,0,0,0,0,0,0,2,150, - 0,0,35,150,0,0,0,0,43,16,67,0, - 2,0,64,16,0,0,0,0,33,144,96,2, - 128,0,16,38,24,133,130,143,1,0,115,38, - 42,16,83,0,154,255,64,16,128,0,148,38, - 3,131,1,60,228,12,50,172,12,0,64,22, - 192,17,18,0,3,131,2,60,216,12,66,140, - 3,131,3,60,220,12,99,140,3,131,1,60, - 208,12,34,172,3,131,1,60,212,12,35,172, - 3,131,1,60,3,38,192,8,224,12,32,172, - 3,131,3,60,33,24,98,0,28,13,99,140, - 3,131,4,60,33,32,130,0,32,13,132,140, - 3,131,1,60,208,12,35,172,3,131,1,60, - 212,12,36,172,3,131,3,60,33,24,98,0, - 36,13,99,140,3,131,1,60,33,8,34,0, - 24,13,34,140,0,0,0,0,33,24,98,0, - 3,131,1,60,224,12,35,172,44,0,191,143, - 40,0,182,143,36,0,181,143,32,0,180,143, - 28,0,179,143,24,0,178,143,20,0,177,143, - 16,0,176,143,8,0,224,3,48,0,189,39, - 24,133,130,143,208,255,189,39,36,0,181,175, - 1,0,21,36,44,0,191,175,40,0,182,175, - 32,0,180,175,28,0,179,175,24,0,178,175, - 20,0,177,175,82,0,64,24,16,0,176,175, - 3,131,22,60,216,12,214,38,3,131,2,60, - 48,13,66,36,128,0,84,36,96,0,83,36, - 124,0,82,36,120,0,81,36,128,0,16,36, - 0,0,36,142,0,0,69,142,0,0,198,142, - 3,131,7,60,220,12,231,140,20,35,192,12, - 0,0,0,0,55,0,64,20,0,0,0,0, - 0,0,131,150,0,0,98,150,0,0,0,0, - 50,0,98,20,0,0,0,0,3,131,4,60, - 33,32,144,0,28,13,132,140,3,131,5,60, - 33,40,176,0,32,13,165,140,248,255,198,142, - 3,131,7,60,212,12,231,140,20,35,192,12, - 0,0,0,0,37,0,64,16,0,0,0,0, - 8,0,196,142,3,131,3,60,33,24,112,0, - 36,13,99,140,0,0,0,0,43,16,131,0, - 29,0,64,20,0,0,0,0,27,0,131,20, - 0,0,0,0,0,0,196,142,3,131,5,60, - 220,12,165,140,0,0,38,142,0,0,71,142, - 20,35,192,12,0,0,0,0,16,0,64,4, - 0,0,0,0,0,0,196,142,3,131,5,60, - 220,12,165,140,0,0,38,142,0,0,71,142, - 20,35,192,12,0,0,0,0,9,0,64,20, - 0,0,0,0,0,0,99,150,0,0,130,150, - 0,0,0,0,43,16,67,0,3,0,64,20, - 0,0,0,0,22,36,192,12,33,32,160,2, - 128,0,148,38,128,0,115,38,128,0,82,38, - 128,0,49,38,24,133,130,143,1,0,181,38, - 42,16,85,0,185,255,64,16,128,0,16,38, - 44,0,191,143,40,0,182,143,36,0,181,143, - 32,0,180,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 48,0,189,39,216,255,189,39,3,131,4,60, - 0,13,132,36,32,0,191,175,28,0,179,175, - 24,0,178,175,20,0,177,175,16,0,176,175, - 0,0,130,132,0,0,0,0,14,0,64,16, - 0,0,0,0,3,131,2,60,2,13,66,148, - 3,131,3,60,234,12,99,148,1,0,66,36, - 3,131,1,60,2,13,34,164,255,255,66,48, - 43,16,67,0,3,0,64,20,0,0,0,0, - 11,39,192,12,0,0,128,164,3,131,4,60, - 8,13,132,36,0,0,130,132,0,0,0,0, - 14,0,64,16,0,0,0,0,3,131,2,60, - 10,13,66,148,3,131,3,60,234,12,99,148, - 1,0,66,36,3,131,1,60,10,13,34,164, - 255,255,66,48,43,16,67,0,3,0,64,20, - 0,0,0,0,24,39,192,12,0,0,128,164, - 3,131,4,60,4,13,132,36,0,0,130,132, - 0,0,0,0,14,0,64,16,0,0,0,0, - 3,131,2,60,6,13,66,148,3,131,3,60, - 244,12,99,148,1,0,66,36,3,131,1,60, - 6,13,34,164,255,255,66,48,43,16,67,0, - 3,0,64,20,0,0,0,0,37,39,192,12, - 0,0,128,164,24,133,130,143,0,0,0,0, - 78,0,64,24,1,0,17,36,3,131,2,60, - 16,13,66,36,226,0,83,36,128,0,82,36, - 128,0,16,36,3,131,2,60,33,16,80,0, - 110,13,66,132,0,0,0,0,18,0,64,16, - 0,0,0,0,3,131,2,60,33,16,80,0, - 112,13,66,148,0,0,0,0,1,0,66,36, - 96,0,66,166,3,131,3,60,236,12,99,148, - 255,255,66,48,43,16,67,0,6,0,64,20, - 0,0,0,0,3,131,1,60,33,8,48,0, - 110,13,32,164,79,39,192,12,33,32,32,2, - 3,131,2,60,33,16,80,0,106,13,66,132, - 0,0,0,0,18,0,64,16,0,0,0,0, - 3,131,2,60,33,16,80,0,108,13,66,148, - 0,0,0,0,1,0,66,36,92,0,66,166, - 3,131,3,60,232,12,99,148,255,255,66,48, - 43,16,67,0,6,0,64,20,0,0,0,0, - 3,131,1,60,33,8,48,0,106,13,32,164, - 129,39,192,12,33,32,32,2,0,0,98,134, - 0,0,0,0,16,0,64,16,0,0,0,0, - 3,131,2,60,33,16,80,0,116,13,66,148, - 0,0,0,0,1,0,66,36,100,0,66,166, - 3,131,3,60,246,12,99,148,255,255,66,48, - 43,16,67,0,4,0,64,20,0,0,0,0, - 0,0,96,166,191,39,192,12,33,32,32,2, - 128,0,115,38,128,0,82,38,24,133,130,143, - 1,0,49,38,42,16,81,0,185,255,64,16, - 128,0,16,38,32,0,191,143,28,0,179,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,40,0,189,39,232,255,189,39, - 16,0,191,175,52,36,192,12,0,0,0,0, - 1,0,2,36,3,131,1,60,0,13,34,164, - 3,131,1,60,2,13,32,164,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 232,255,189,39,16,0,191,175,72,37,192,12, - 0,0,0,0,1,0,2,36,3,131,1,60, - 8,13,34,164,3,131,1,60,10,13,32,164, - 16,0,191,143,24,0,189,39,8,0,224,3, - 0,0,0,0,240,255,189,39,3,131,1,60, - 248,12,32,172,3,131,1,60,252,12,32,172, - 8,0,224,3,16,0,189,39,24,133,130,143, - 224,255,189,39,20,0,177,175,1,0,17,36, - 24,0,191,175,23,0,64,24,16,0,176,175, - 128,0,16,36,3,131,4,60,33,32,144,0, - 40,13,132,140,3,131,5,60,33,40,176,0, - 44,13,165,140,3,131,6,60,216,12,198,140, - 3,131,7,60,220,12,231,140,20,35,192,12, - 0,0,0,0,3,0,64,20,1,0,49,38, - 74,39,192,8,1,0,2,36,24,133,130,143, - 0,0,0,0,42,16,81,0,236,255,64,16, - 128,0,16,38,33,16,0,0,24,0,191,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,232,255,189,39,192,41,4,0, - 16,0,191,175,3,131,3,60,33,24,101,0, - 20,13,99,140,1,0,2,36,16,0,98,20, - 2,0,2,36,64,26,4,0,3,131,1,60, - 33,8,37,0,20,13,34,172,2,131,1,60, - 33,8,35,0,164,247,34,172,1,0,2,36, - 3,131,1,60,33,8,37,0,110,13,34,164, - 3,131,1,60,33,8,37,0,125,39,192,8, - 112,13,32,164,21,0,98,20,3,0,3,36, - 64,18,4,0,3,131,1,60,33,8,37,0, - 20,13,35,172,2,131,1,60,33,8,34,0, - 164,247,35,172,3,131,2,60,33,16,69,0, - 120,13,66,140,0,0,0,0,1,0,66,36, - 3,131,1,60,33,8,37,0,44,39,192,12, - 120,13,34,172,3,0,64,16,0,0,0,0, - 161,36,192,12,0,0,0,0,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 224,255,189,39,16,0,176,175,33,128,128,0, - 20,0,177,175,3,131,17,60,208,12,49,38, - 24,0,191,175,0,0,36,142,3,131,5,60, - 212,12,165,140,3,131,6,60,216,12,198,140, - 3,131,7,60,220,12,231,140,20,35,192,12, - 0,0,0,0,33,32,0,2,22,36,192,12, - 1,0,80,44,92,37,192,12,0,0,0,0, - 206,35,192,12,0,0,0,0,33,0,0,22, - 0,0,0,0,0,0,36,142,3,131,5,60, - 212,12,165,140,3,131,6,60,216,12,198,140, - 3,131,7,60,220,12,231,140,20,35,192,12, - 0,0,0,0,22,0,64,20,0,0,0,0, - 3,131,2,60,238,12,66,148,3,131,3,60, - 240,12,99,148,3,131,4,60,242,12,132,148, - 3,131,1,60,232,12,34,164,3,131,1,60, - 234,12,35,164,3,131,1,60,161,36,192,12, - 236,12,36,164,3,131,1,60,52,36,192,12, - 8,13,32,164,1,0,2,36,3,131,1,60, - 0,13,34,164,3,131,1,60,2,13,32,164, - 24,0,191,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,232,255,189,39, - 192,17,4,0,16,0,191,175,3,131,1,60, - 33,8,34,0,56,13,34,140,0,0,0,0, - 3,0,64,16,0,0,0,0,203,36,192,12, - 0,0,0,0,16,0,191,143,24,0,189,39, - 8,0,224,3,0,0,0,0,3,131,1,60, - 248,12,32,172,3,131,1,60,8,0,224,3, - 8,13,32,164,232,255,189,39,192,25,4,0, - 1,0,2,36,16,0,191,175,3,131,1,60, - 33,8,35,0,203,36,192,12,52,13,34,172, - 16,0,191,143,24,0,189,39,8,0,224,3, - 0,0,0,0,192,33,4,0,3,131,2,60, - 28,13,66,36,33,24,130,0,4,0,166,140, - 8,0,167,140,0,0,102,172,4,0,103,172, - 12,0,66,36,12,0,163,140,33,16,130,0, - 3,131,1,60,33,8,36,0,36,13,35,172, - 16,0,163,140,20,0,166,140,0,0,67,172, - 4,0,70,172,24,0,163,148,1,0,2,36, - 3,131,1,60,33,8,36,0,106,13,34,164, - 3,131,1,60,33,8,36,0,48,13,35,164, - 26,0,162,148,3,131,1,60,33,8,36,0, - 8,0,224,3,108,13,34,164,28,0,130,148, - 3,131,1,60,232,12,34,164,30,0,130,148, - 3,131,1,60,234,12,34,164,32,0,130,148, - 3,131,1,60,236,12,34,164,40,0,130,140, - 3,131,1,60,8,0,224,3,252,12,34,172, - 224,255,189,39,16,0,176,175,33,128,160,0, - 192,25,4,0,3,131,2,60,16,13,66,36, - 20,0,177,175,33,136,98,0,24,0,191,175, - 4,0,4,142,8,0,5,142,12,0,38,142, - 16,0,39,142,20,35,192,12,0,0,0,0, - 48,0,64,4,1,0,2,36,4,0,4,142, - 8,0,5,142,12,0,38,142,16,0,39,142, - 20,35,192,12,0,0,0,0,40,0,64,20, - 33,16,0,0,12,0,4,142,20,0,35,142, - 0,0,0,0,43,16,131,0,34,0,64,20, - 1,0,2,36,32,0,131,20,33,16,0,0, - 16,0,4,142,20,0,5,142,24,0,38,142, - 28,0,39,142,20,35,192,12,0,0,0,0, - 24,0,64,4,1,0,2,36,16,0,4,142, - 20,0,5,142,24,0,38,142,28,0,39,142, - 20,35,192,12,0,0,0,0,16,0,64,20, - 33,16,0,0,16,0,4,142,20,0,5,142, - 3,131,6,60,216,12,198,140,3,131,7,60, - 220,12,231,140,20,35,192,12,0,0,0,0, - 6,0,64,20,1,0,2,36,24,0,3,150, - 32,0,34,150,0,0,0,0,43,16,67,0, - 1,0,66,56,24,0,191,143,20,0,177,143, - 16,0,176,143,8,0,224,3,32,0,189,39, - 44,133,130,143,216,255,189,39,20,0,177,175, - 33,136,128,0,32,0,180,175,33,160,160,0, - 36,0,191,175,28,0,179,175,24,0,178,175, - 53,0,64,16,16,0,176,175,2,131,19,60, - 192,4,115,38,33,32,96,2,54,21,192,12, - 1,0,5,36,33,128,64,0,8,0,0,22, - 64,26,17,0,2,131,2,60,33,16,67,0, - 176,247,66,140,33,24,99,2,1,0,66,36, - 143,40,192,8,240,242,98,172,8,0,4,142, - 64,146,17,0,20,242,101,38,33,40,69,2, - 172,41,192,12,33,48,128,2,33,24,64,0, - 60,0,98,40,2,0,64,16,0,242,98,38, - 60,0,3,36,33,136,66,2,33,32,32,2, - 33,40,0,2,1,0,2,36,17,0,2,162, - 0,128,98,52,0,0,2,174,6,23,192,12, - 18,0,3,166,10,0,64,20,33,32,0,2, - 2,131,2,60,33,16,82,0,172,247,66,140, - 0,0,0,0,1,0,66,36,152,21,192,12, - 236,0,34,174,143,40,192,8,0,0,0,0, - 2,131,2,60,33,16,82,0,168,247,66,140, - 0,0,0,0,1,0,66,36,232,0,34,174, - 36,0,191,143,32,0,180,143,28,0,179,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,40,0,189,39,44,133,130,143, - 216,255,189,39,20,0,177,175,33,136,128,0, - 32,0,180,175,33,160,160,0,36,0,191,175, - 28,0,179,175,24,0,178,175,53,0,64,16, - 16,0,176,175,2,131,19,60,192,4,115,38, - 33,32,96,2,54,21,192,12,1,0,5,36, - 33,128,64,0,8,0,0,22,64,26,17,0, - 2,131,2,60,33,16,67,0,176,247,66,140, - 33,24,99,2,1,0,66,36,214,40,192,8, - 240,242,98,172,8,0,4,142,64,146,17,0, - 20,242,101,38,33,40,69,2,74,42,192,12, - 33,48,128,2,33,24,64,0,60,0,98,40, - 2,0,64,16,0,242,98,38,60,0,3,36, - 33,136,66,2,33,32,32,2,33,40,0,2, - 1,0,2,36,17,0,2,162,0,128,98,52, - 0,0,2,174,6,23,192,12,18,0,3,166, - 10,0,64,20,33,32,0,2,2,131,2,60, - 33,16,82,0,172,247,66,140,0,0,0,0, - 1,0,66,36,152,21,192,12,236,0,34,174, - 214,40,192,8,0,0,0,0,2,131,2,60, - 33,16,82,0,168,247,66,140,0,0,0,0, - 1,0,66,36,232,0,34,174,36,0,191,143, - 32,0,180,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 40,0,189,39,216,255,189,39,24,0,178,175, - 33,144,128,0,20,0,177,175,33,136,160,0, - 16,0,176,175,33,128,192,0,36,0,191,175, - 32,0,180,175,28,0,179,175,4,0,36,142, - 8,0,37,142,160,133,134,143,2,131,7,60, - 180,211,231,140,20,35,192,12,0,0,0,0, - 11,0,64,20,0,0,0,0,2,131,4,60, - 144,146,132,36,15,63,192,12,33,40,0,2, - 100,129,132,39,108,129,134,39,31,21,192,12, - 6,0,5,38,92,41,192,8,0,0,0,0, - 3,131,20,60,208,12,148,38,0,0,132,142, - 3,131,5,60,212,12,165,140,3,131,6,60, - 216,12,198,140,3,131,7,60,220,12,231,140, - 0,0,0,0,20,35,192,12,192,129,18,0, - 3,131,3,60,33,24,112,0,20,13,99,140, - 0,0,0,0,80,0,96,16,1,0,83,44, - 33,32,64,2,11,40,192,12,33,40,32,2, - 50,0,64,16,33,32,64,2,223,39,192,12, - 33,40,32,2,92,37,192,12,0,0,0,0, - 206,35,192,12,0,0,0,0,0,0,132,142, - 3,131,5,60,212,12,165,140,3,131,6,60, - 216,12,198,140,3,131,7,60,220,12,231,140, - 20,35,192,12,0,0,0,0,16,0,64,16, - 0,0,0,0,14,0,96,18,0,0,0,0, - 3,131,2,60,248,12,66,140,3,131,1,60, - 9,0,64,16,0,13,32,164,3,131,1,60, - 72,37,192,12,4,13,32,164,1,0,2,36, - 3,131,1,60,8,13,34,164,3,131,1,60, - 10,13,32,164,3,131,2,60,228,12,66,140, - 0,0,0,0,38,0,66,22,0,0,0,0, - 254,39,192,12,33,32,32,2,52,36,192,12, - 0,0,0,0,36,0,34,142,0,0,0,0, - 30,0,64,16,0,0,0,0,206,39,192,12, - 0,0,0,0,92,41,192,8,0,0,0,0, - 3,131,4,60,33,32,144,0,40,13,132,140, - 3,131,5,60,33,40,176,0,44,13,165,140, - 3,131,6,60,216,12,198,140,3,131,7,60, - 220,12,231,140,20,35,192,12,0,0,0,0, - 12,0,64,20,0,0,0,0,3,131,3,60, - 33,24,112,0,48,13,99,148,3,131,2,60, - 33,16,80,0,16,13,66,148,0,0,0,0, - 3,0,98,20,0,0,0,0,203,36,192,12, - 33,32,64,2,36,0,191,143,32,0,180,143, - 28,0,179,143,24,0,178,143,20,0,177,143, - 16,0,176,143,8,0,224,3,40,0,189,39, - 224,255,189,39,20,0,177,175,33,136,128,0, - 16,0,176,175,192,129,17,0,24,0,191,175, - 3,131,2,60,33,16,80,0,20,13,66,140, - 0,0,0,0,28,0,64,16,0,0,0,0, - 3,131,4,60,33,32,144,0,40,13,132,140, - 3,131,5,60,33,40,176,0,44,13,165,140, - 3,131,6,60,216,12,198,140,3,131,7,60, - 220,12,231,140,20,35,192,12,0,0,0,0, - 14,0,64,20,0,0,0,0,3,131,3,60, - 33,24,112,0,48,13,99,148,3,131,2,60, - 33,16,80,0,16,13,66,148,0,0,0,0, - 5,0,98,20,0,0,0,0,161,36,192,12, - 0,0,0,0,211,39,192,12,33,32,32,2, - 24,0,191,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,2,18,5,0, - 0,0,130,160,8,0,224,3,1,0,133,160, - 2,22,5,0,0,0,130,160,2,20,5,0, - 1,0,130,160,2,18,5,0,2,0,130,160, - 8,0,224,3,3,0,133,160,0,0,130,144, - 1,0,131,144,0,18,2,0,8,0,224,3, - 37,16,98,0,0,0,130,144,1,0,131,144, - 2,0,133,144,0,22,2,0,0,28,3,0, - 33,16,67,0,0,42,5,0,3,0,131,144, - 33,16,69,0,8,0,224,3,37,16,67,0, - 224,255,189,39,16,0,176,175,33,128,128,0, - 24,0,191,175,20,0,177,175,48,129,135,39, - 3,0,226,136,0,0,226,152,4,0,227,128, - 5,0,228,128,3,0,2,170,0,0,2,186, - 4,0,3,162,5,0,4,162,3,0,162,136, - 0,0,162,152,4,0,163,128,5,0,164,128, - 9,0,2,170,6,0,2,186,10,0,3,162, - 11,0,4,162,12,0,4,38,38,0,5,36, - 144,41,192,12,33,136,192,0,14,0,4,38, - 144,41,192,12,66,66,5,36,17,0,4,38, - 33,40,0,0,3,0,2,36,144,41,192,12, - 16,0,2,162,19,0,0,162,20,0,0,162, - 40,0,34,142,0,0,0,0,43,32,2,0, - 36,0,34,142,0,0,0,0,3,0,64,16, - 33,24,128,0,218,41,192,8,128,0,130,52, - 33,16,96,0,21,0,2,162,4,0,37,150, - 0,0,0,0,144,41,192,12,22,0,4,38, - 9,0,34,138,6,0,34,154,10,0,35,130, - 11,0,36,130,27,0,2,170,24,0,2,186, - 28,0,3,162,29,0,4,162,12,0,37,142, - 0,0,0,0,148,41,192,12,30,0,4,38, - 16,0,37,150,0,0,0,0,144,41,192,12, - 34,0,4,38,21,0,34,138,18,0,34,154, - 22,0,35,130,23,0,36,130,39,0,2,170, - 36,0,2,186,40,0,3,162,41,0,4,162, - 24,0,37,150,0,0,0,0,144,41,192,12, - 42,0,4,38,26,0,37,150,0,0,0,0, - 144,41,192,12,44,0,4,38,28,0,37,150, - 0,0,0,0,144,41,192,12,46,0,4,38, - 30,0,37,150,0,0,0,0,144,41,192,12, - 48,0,4,38,32,0,37,150,0,0,0,0, - 144,41,192,12,50,0,4,38,52,0,2,36, - 24,0,191,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,224,255,189,39, - 16,0,176,175,33,128,160,0,24,0,191,175, - 20,0,177,175,21,0,2,146,33,136,128,0, - 1,0,66,48,40,0,34,174,22,0,2,146, - 22,0,4,38,128,0,66,48,156,41,192,12, - 36,0,34,174,4,0,34,166,27,0,2,138, - 24,0,2,154,28,0,3,130,29,0,4,130, - 9,0,34,170,6,0,34,186,10,0,35,162, - 11,0,36,162,161,41,192,12,30,0,4,38, - 34,0,4,38,156,41,192,12,12,0,34,174, - 16,0,34,166,39,0,2,138,36,0,2,154, - 40,0,3,130,41,0,4,130,21,0,34,170, - 18,0,34,186,22,0,35,162,23,0,36,162, - 156,41,192,12,42,0,4,38,44,0,4,38, - 156,41,192,12,24,0,34,166,46,0,4,38, - 156,41,192,12,26,0,34,166,48,0,4,38, - 156,41,192,12,28,0,34,166,50,0,4,38, - 156,41,192,12,30,0,34,166,32,0,34,166, - 24,0,191,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,232,255,189,39, - 16,0,176,175,33,128,128,0,20,0,191,175, - 48,129,134,39,3,0,194,136,0,0,194,152, - 4,0,195,128,5,0,196,128,3,0,2,170, - 0,0,2,186,4,0,3,162,5,0,4,162, - 3,0,162,136,0,0,162,152,4,0,163,128, - 5,0,164,128,9,0,2,170,6,0,2,186, - 10,0,3,162,11,0,4,162,12,0,4,38, - 144,41,192,12,7,0,5,36,14,0,4,38, - 144,41,192,12,66,66,5,36,17,0,4,38, - 33,40,0,0,3,0,2,36,144,41,192,12, - 16,0,2,162,21,0,2,36,128,0,3,36, - 19,0,0,162,20,0,3,162,20,0,191,143, - 16,0,176,143,8,0,224,3,24,0,189,39, - 176,255,189,39,68,0,177,175,64,0,176,175, - 33,128,160,0,72,0,191,175,14,0,3,146, - 66,0,2,36,9,0,98,20,33,136,128,0, - 15,0,2,146,0,0,0,0,6,0,67,20, - 64,26,17,0,16,0,3,146,3,0,2,36, - 11,0,98,16,0,0,0,0,64,26,17,0, - 2,131,2,60,33,16,67,0,184,247,66,140, - 0,0,0,0,1,0,66,36,2,131,1,60, - 33,8,35,0,182,42,192,8,184,247,34,172, - 20,0,3,146,0,0,0,0,5,0,96,16, - 128,0,2,36,21,0,98,16,64,26,17,0, - 179,42,192,8,0,0,0,0,16,0,164,39, - 64,26,17,0,2,131,2,60,33,16,67,0, - 180,247,66,140,0,0,0,0,1,0,66,36, - 2,131,1,60,33,8,35,0,180,247,34,172, - 17,42,192,12,33,40,0,2,33,32,32,2, - 16,0,165,39,222,40,192,12,33,48,0,2, - 182,42,192,8,0,0,0,0,2,131,2,60, - 33,16,67,0,180,247,66,140,0,0,0,0, - 1,0,66,36,2,131,1,60,33,8,35,0, - 180,247,34,172,100,41,192,12,33,32,32,2, - 182,42,192,8,0,0,0,0,112,129,132,39, - 15,63,192,12,0,0,0,0,72,0,191,143, - 68,0,177,143,64,0,176,143,8,0,224,3, - 80,0,189,39,8,0,224,3,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 1,0,162,48,8,0,64,16,255,255,198,48, - 67,40,5,0,64,16,5,0,33,16,68,0, - 0,0,66,144,0,0,0,0,203,42,192,8, - 33,48,194,0,67,40,5,0,255,255,165,36, - 255,255,2,36,6,0,162,16,255,255,3,36, - 0,0,130,148,2,0,132,36,255,255,165,36, - 252,255,163,20,33,48,194,0,255,255,195,48, - 2,20,6,0,33,48,98,0,255,255,195,48, - 2,20,6,0,33,48,98,0,8,0,224,3, - 255,255,194,48,208,255,189,39,16,0,176,175, - 33,128,128,0,28,0,179,175,33,152,160,0, - 24,0,178,175,33,144,192,0,36,0,181,175, - 33,168,0,2,32,0,180,175,33,160,0,0, - 40,0,191,175,20,0,177,175,12,0,3,142, - 0,0,2,142,0,0,0,0,35,24,98,0, - 42,16,114,0,2,0,64,16,33,136,64,2, - 33,136,96,0,13,0,32,18,33,40,96,2, - 35,144,81,2,8,0,2,142,0,0,4,142, - 33,48,32,2,80,68,192,12,33,32,68,0, - 8,0,2,142,0,0,2,142,0,0,2,142, - 33,152,113,2,33,16,81,0,0,0,2,174, - 0,0,2,142,0,0,0,0,4,0,64,18, - 33,160,130,2,4,0,16,142,233,42,192,8, - 0,0,0,0,18,0,180,166,33,16,0,2, - 40,0,191,143,36,0,181,143,32,0,180,143, - 28,0,179,143,24,0,178,143,20,0,177,143, - 16,0,176,143,8,0,224,3,48,0,189,39, - 224,255,189,39,24,0,178,175,33,144,128,0, - 2,131,4,60,192,4,132,36,36,0,165,175, - 1,0,5,36,28,0,191,175,20,0,177,175, - 54,21,192,12,16,0,176,175,33,136,64,0, - 8,0,32,22,33,40,0,0,2,131,2,60, - 176,5,66,140,0,0,0,0,1,0,66,36, - 2,131,1,60,102,43,192,8,176,5,34,172, - 0,1,3,36,8,0,48,142,8,0,2,36, - 16,0,2,166,6,0,2,36,18,0,2,162, - 4,0,2,36,14,0,3,166,19,0,2,162, - 20,0,3,166,2,131,6,60,212,4,198,36, - 3,0,194,136,0,0,194,152,4,0,195,132, - 25,0,2,170,22,0,2,186,26,0,3,166, - 2,131,1,60,195,211,34,136,176,133,130,155, - 0,0,0,0,31,0,2,170,28,0,2,186, - 32,0,4,38,144,71,192,12,6,0,6,36, - 39,0,162,139,36,0,162,155,0,0,0,0, - 41,0,2,170,38,0,2,186,132,129,133,39, - 3,0,162,136,0,0,162,152,4,0,163,128, - 5,0,164,128,3,0,2,170,0,0,2,186, - 4,0,3,162,5,0,4,162,2,131,5,60, - 212,4,165,36,3,0,162,136,0,0,162,152, - 4,0,163,128,5,0,164,128,9,0,2,170, - 6,0,2,186,10,0,3,162,11,0,4,162, - 33,32,64,2,8,6,2,36,12,0,2,166, - 60,128,2,52,0,0,34,174,60,0,2,36, - 18,0,34,166,74,21,192,12,33,40,32,2, - 3,0,64,20,0,0,0,0,152,21,192,12, - 33,32,32,2,28,0,191,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,216,255,189,39,32,0,180,175, - 33,160,128,0,16,0,176,175,33,128,160,0, - 36,0,191,175,28,0,179,175,24,0,178,175, - 20,0,177,175,8,0,2,142,33,152,192,0, - 33,136,83,0,6,0,35,150,0,1,4,36, - 5,0,100,16,0,2,2,36,113,0,98,16, - 0,0,0,0,15,44,192,8,0,0,0,0, - 24,0,35,150,176,133,130,151,0,0,0,0, - 139,0,98,20,0,0,0,0,26,0,35,150, - 2,131,2,60,194,211,66,148,0,0,0,0, - 133,0,98,20,0,0,0,0,0,0,34,150, - 0,0,0,0,129,0,68,20,8,0,2,36, - 2,0,35,150,0,0,0,0,125,0,98,20, - 6,4,2,36,4,0,35,150,0,0,0,0, - 121,0,98,20,0,0,0,0,2,131,4,60, - 192,4,132,36,54,21,192,12,1,0,5,36, - 33,144,64,0,8,0,64,22,0,0,0,0, - 2,131,2,60,176,5,66,140,0,0,0,0, - 1,0,66,36,2,131,1,60,15,44,192,8, - 176,5,34,172,8,0,5,142,8,0,80,142, - 9,0,162,136,6,0,162,152,10,0,163,128, - 11,0,164,128,3,0,2,170,0,0,2,186, - 4,0,3,162,5,0,4,162,2,131,6,60, - 212,4,198,36,3,0,194,136,0,0,194,152, - 4,0,195,128,5,0,196,128,9,0,2,170, - 6,0,2,186,10,0,3,162,11,0,4,162, - 12,0,4,38,12,0,165,36,33,128,19,2, - 80,68,192,12,244,255,102,38,0,1,2,36, - 0,0,2,166,8,0,2,36,2,0,2,166, - 6,0,2,36,4,0,2,162,4,0,2,36, - 5,0,2,162,0,2,2,36,6,0,2,166, - 2,131,5,60,212,4,165,36,3,0,162,136, - 0,0,162,152,4,0,163,132,11,0,2,170, - 8,0,2,186,12,0,3,166,2,131,1,60, - 195,211,34,136,176,133,130,155,0,0,0,0, - 17,0,2,170,14,0,2,186,11,0,34,138, - 8,0,34,154,12,0,35,134,21,0,2,170, - 18,0,2,186,22,0,3,166,17,0,34,138, - 14,0,34,154,0,0,0,0,27,0,2,170, - 24,0,2,186,33,32,128,2,33,40,64,2, - 60,128,2,52,0,0,66,174,60,0,2,36, - 74,21,192,12,18,0,66,166,38,0,64,20, - 0,0,0,0,152,21,192,12,33,32,64,2, - 15,44,192,8,0,0,0,0,14,0,35,150, - 196,133,130,151,0,0,0,0,29,0,98,20, - 0,0,0,0,16,0,35,150,2,131,2,60, - 214,211,66,148,0,0,0,0,23,0,98,20, - 0,0,0,0,0,0,34,150,0,0,0,0, - 19,0,68,20,8,0,2,36,2,0,35,150, - 0,0,0,0,15,0,98,20,6,4,2,36, - 4,0,35,150,0,0,0,0,11,0,98,20, - 0,0,0,0,68,133,130,143,140,129,134,39, - 11,0,35,138,8,0,35,154,12,0,36,134, - 3,0,195,168,0,0,195,184,4,0,196,164, - 20,0,66,36,152,129,130,175,36,0,191,143, - 32,0,180,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 40,0,189,39,192,255,189,39,80,0,169,143, - 84,0,168,143,56,0,180,175,33,160,128,0, - 44,0,177,175,88,0,177,143,16,0,164,39, - 52,0,179,175,92,0,179,143,3,131,3,60, - 32,17,99,36,40,0,176,175,33,128,192,0, - 60,0,191,175,48,0,178,175,0,0,98,140, - 8,0,50,142,1,0,66,36,0,0,98,172, - 3,0,162,136,0,0,162,152,4,0,163,128, - 5,0,170,128,3,0,66,170,0,0,66,186, - 4,0,67,162,5,0,74,162,2,131,10,60, - 212,4,74,37,3,0,66,137,0,0,66,153, - 4,0,67,129,5,0,69,129,9,0,66,170, - 6,0,66,186,10,0,67,162,11,0,69,162, - 69,0,2,36,16,0,162,163,17,0,168,163, - 18,0,34,150,240,132,131,143,33,48,0,0, - 25,0,169,163,20,0,66,36,0,66,2,0, - 255,255,66,48,2,18,2,0,37,64,2,1, - 0,74,3,0,255,255,98,48,2,18,2,0, - 37,72,34,1,3,131,2,60,0,17,66,140, - 22,0,160,167,26,0,160,167,18,0,168,167, - 176,133,136,143,1,0,99,36,240,132,131,175, - 20,0,169,167,24,0,162,163,28,0,168,175, - 3,0,226,136,0,0,226,152,0,0,0,0, - 35,0,162,171,32,0,162,187,192,42,192,12, - 20,0,5,36,39,16,2,0,26,0,162,167, - 14,0,2,36,44,0,2,22,8,0,2,36, - 12,0,66,166,19,0,162,139,16,0,162,155, - 23,0,163,139,20,0,163,155,27,0,164,139, - 24,0,164,155,31,0,165,139,28,0,165,155, - 17,0,66,170,14,0,66,186,21,0,67,170, - 18,0,67,186,25,0,68,170,22,0,68,186, - 29,0,69,170,26,0,69,186,35,0,162,139, - 32,0,162,155,0,0,0,0,33,0,66,170, - 30,0,66,186,34,0,2,36,0,0,34,174, - 0,0,35,142,18,0,34,150,0,0,0,0, - 33,16,67,0,18,0,34,166,18,0,34,150, - 0,0,0,0,60,0,66,44,68,0,64,16, - 33,32,128,2,0,0,98,142,18,0,35,150, - 60,0,66,36,35,16,67,0,0,0,98,174, - 60,0,2,36,18,0,34,166,201,44,192,8, - 33,32,128,2,164,129,133,39,3,0,162,136, - 0,0,162,152,4,0,163,128,5,0,164,128, - 17,0,66,170,14,0,66,186,18,0,67,162, - 19,0,68,162,8,0,2,36,20,0,66,166, - 19,0,162,139,16,0,162,155,23,0,163,139, - 20,0,163,155,27,0,164,139,24,0,164,155, - 31,0,165,139,28,0,165,155,25,0,66,170, - 22,0,66,186,29,0,67,170,26,0,67,186, - 33,0,68,170,30,0,68,186,37,0,69,170, - 34,0,69,186,35,0,162,139,32,0,162,155, - 0,0,0,0,41,0,66,170,38,0,66,186, - 42,0,2,36,0,0,34,174,0,0,35,142, - 18,0,34,150,0,0,0,0,33,16,67,0, - 18,0,34,166,18,0,34,150,0,0,0,0, - 60,0,66,44,8,0,64,16,0,0,0,0, - 0,0,98,142,18,0,35,150,60,0,66,36, - 35,16,67,0,0,0,98,174,60,0,2,36, - 18,0,34,166,18,0,34,150,0,0,0,0, - 0,26,2,0,2,18,2,0,37,24,98,0, - 12,0,67,166,33,32,128,2,74,21,192,12, - 33,40,32,2,8,0,64,20,33,32,32,2, - 3,131,3,60,36,17,99,36,0,0,98,140, - 0,0,0,0,1,0,66,36,152,21,192,12, - 0,0,98,172,60,0,191,143,56,0,180,143, - 52,0,179,143,48,0,178,143,44,0,177,143, - 40,0,176,143,8,0,224,3,64,0,189,39, - 176,255,189,39,56,0,180,175,112,0,180,143, - 48,0,178,175,100,0,178,143,52,0,179,175, - 104,0,179,143,64,0,182,175,33,176,128,0, - 72,0,190,175,33,240,160,0,60,0,181,175, - 33,168,224,0,68,0,183,175,108,0,183,143, - 2,131,4,60,192,4,132,36,76,0,191,175, - 44,0,177,175,40,0,176,175,32,0,166,175, - 7,2,130,38,2,130,2,0,54,21,192,12, - 33,40,0,2,33,136,64,0,8,0,32,22, - 0,74,18,0,2,131,4,60,232,146,132,36, - 33,40,128,2,15,63,192,12,33,48,0,2, - 74,45,192,8,0,0,0,0,255,255,66,50, - 2,18,2,0,37,72,34,1,0,66,19,0, - 255,255,98,50,2,18,2,0,37,64,2,1, - 8,0,130,38,0,58,2,0,255,255,66,48, - 2,18,2,0,37,56,226,0,0,163,4,60, - 220,5,132,52,4,0,5,36,4,0,34,142, - 0,17,6,36,8,0,80,140,4,0,35,142, - 8,0,2,36,0,0,98,172,0,0,9,166, - 2,0,8,166,6,0,0,166,192,42,192,12, - 4,0,7,166,33,32,160,2,4,0,5,36, - 192,42,192,12,255,255,70,48,4,0,4,38, - 2,0,5,36,192,42,192,12,255,255,70,48, - 33,32,0,2,8,0,5,36,192,42,192,12, - 255,255,70,48,33,32,224,2,33,40,128,2, - 192,42,192,12,255,255,70,48,39,24,2,0, - 255,255,98,48,2,0,64,20,33,40,224,2, - 255,255,3,52,6,0,3,166,4,0,36,142, - 0,0,0,0,220,42,192,12,33,48,128,2, - 33,32,192,2,0,0,67,140,33,40,192,3, - 0,128,99,52,0,0,67,172,4,0,35,142, - 32,0,166,143,18,0,99,148,33,56,160,2, - 18,0,35,166,96,0,170,143,17,0,3,36, - 16,0,163,175,24,0,177,175,28,0,162,175, - 23,44,192,12,20,0,170,175,3,131,3,60, - 124,17,99,36,0,0,98,140,0,0,0,0, - 1,0,66,36,0,0,98,172,76,0,191,143, - 72,0,190,143,68,0,183,143,64,0,182,143, - 60,0,181,143,56,0,180,143,52,0,179,143, - 48,0,178,143,44,0,177,143,40,0,176,143, - 8,0,224,3,80,0,189,39,128,255,189,39, - 116,0,183,175,33,184,128,0,112,0,182,175, - 33,176,160,0,104,0,180,175,33,160,192,0, - 108,0,181,175,33,168,224,0,40,0,164,39, - 96,0,178,175,144,0,178,143,33,40,0,0, - 100,0,179,175,148,0,179,143,16,0,6,36, - 120,0,191,175,92,0,177,175,144,71,192,12, - 88,0,176,175,56,0,177,39,33,32,32,2, - 33,40,0,0,2,0,16,36,40,0,176,167, - 2,0,162,150,0,0,0,0,42,0,162,167, - 19,0,130,138,16,0,130,154,0,0,0,0, - 47,0,162,171,44,0,162,187,144,71,192,12, - 16,0,6,36,33,32,64,2,33,40,96,2, - 40,0,166,39,33,56,32,2,56,0,176,167, - 0,0,162,150,2,131,16,60,8,239,16,38, - 58,0,162,167,15,0,130,138,12,0,130,154, - 0,0,0,0,63,0,162,171,60,0,162,187, - 242,5,2,36,84,0,162,167,72,0,162,39, - 72,0,160,167,76,0,176,175,80,0,176,175, - 247,71,192,12,16,0,162,175,255,255,3,36, - 22,0,67,16,12,0,145,38,33,32,224,2, - 6,0,197,38,35,48,150,2,0,0,163,150, - 4,0,2,36,16,0,162,175,161,0,2,36, - 20,0,162,175,28,0,176,175,0,18,3,0, - 2,26,3,0,37,16,67,0,255,255,66,48, - 24,0,162,175,80,0,162,143,76,0,163,143, - 33,56,32,2,35,16,67,0,255,255,66,48, - 220,44,192,12,32,0,162,175,120,0,191,143, - 116,0,183,143,112,0,182,143,108,0,181,143, - 104,0,180,143,100,0,179,143,96,0,178,143, - 92,0,177,143,88,0,176,143,8,0,224,3, - 128,0,189,39,196,133,130,143,184,255,189,39, - 64,0,191,175,60,0,177,175,109,0,64,16, - 56,0,176,175,255,255,3,36,106,0,67,16, - 0,0,0,0,176,133,130,143,176,133,145,39, - 102,0,64,16,0,0,0,0,100,0,67,16, - 0,0,0,0,2,131,2,60,8,239,66,36, - 44,0,162,175,48,0,162,175,242,5,2,36, - 40,0,160,167,6,0,128,16,52,0,162,167, - 1,0,2,36,23,0,130,16,0,0,0,0, - 36,46,192,8,0,0,0,0,2,131,16,60, - 160,204,16,38,156,71,192,12,33,32,0,2, - 0,163,4,60,4,1,132,140,204,204,3,60, - 205,204,99,52,25,0,131,0,33,40,32,2, - 33,48,0,2,33,56,64,0,40,0,164,39, - 16,64,0,0,194,16,8,0,0,0,0,0, - 104,56,192,12,16,0,162,175,251,45,192,8, - 33,24,64,0,3,131,2,60,28,18,66,148, - 0,0,0,0,2,0,66,48,61,0,64,16, - 0,0,0,0,2,131,16,60,160,204,16,38, - 156,71,192,12,33,32,0,2,0,163,4,60, - 4,1,132,140,204,204,3,60,205,204,99,52, - 25,0,131,0,33,40,32,2,33,48,0,2, - 33,56,64,0,40,0,164,39,16,64,0,0, - 194,16,8,0,0,0,0,0,105,57,192,12, - 16,0,162,175,33,24,64,0,255,255,2,36, - 39,0,98,16,0,0,0,0,140,129,130,147, - 0,0,0,0,1,0,66,48,7,0,64,20, - 0,0,0,0,68,133,131,143,152,129,130,143, - 0,0,0,0,43,16,67,0,11,0,64,16, - 33,32,0,0,68,133,131,143,148,129,130,143, - 0,0,0,0,6,0,98,16,33,32,0,0, - 196,133,133,143,148,129,131,175,17,43,192,12, - 33,32,0,0,33,32,0,0,140,129,133,39, - 14,0,6,36,4,0,2,36,16,0,162,175, - 162,0,2,36,20,0,162,175,24,0,162,175, - 2,131,2,60,8,239,66,36,28,0,162,175, - 48,0,162,143,44,0,163,143,196,133,135,39, - 35,16,67,0,255,255,66,48,220,44,192,12, - 32,0,162,175,64,0,191,143,60,0,177,143, - 56,0,176,143,8,0,224,3,72,0,189,39, - 208,255,189,39,36,0,179,175,33,152,128,0, - 40,0,180,175,33,160,160,0,32,0,178,175, - 24,0,176,175,33,128,224,0,44,0,191,175, - 28,0,177,175,6,0,2,150,64,0,177,143, - 0,0,0,0,20,0,64,16,33,144,192,0, - 12,0,68,38,8,0,5,36,192,42,192,12, - 0,17,6,36,4,0,4,38,2,0,5,36, - 192,42,192,12,255,255,70,48,33,32,0,2, - 33,40,32,2,192,42,192,12,255,255,70,48, - 255,255,66,48,255,255,3,52,4,0,67,16, - 0,0,0,0,3,131,3,60,111,46,192,8, - 120,17,99,36,4,0,2,150,0,0,0,0, - 0,26,2,0,2,18,2,0,37,24,98,0, - 255,255,99,48,4,0,113,16,8,0,7,38, - 3,131,3,60,111,46,192,8,120,17,99,36, - 2,0,2,150,0,0,0,0,0,26,2,0, - 2,18,2,0,37,24,98,0,255,255,99,48, - 161,0,2,36,15,0,98,20,248,255,40,38, - 33,32,96,2,33,40,128,2,3,131,3,60, - 112,17,99,36,0,0,98,140,33,48,64,2, - 16,0,167,175,33,56,0,2,20,0,168,175, - 1,0,66,36,86,45,192,12,0,0,98,172, - 115,46,192,8,0,0,0,0,3,131,3,60, - 116,17,99,36,0,0,98,140,0,0,0,0, - 1,0,66,36,0,0,98,172,44,0,191,143, - 40,0,180,143,36,0,179,143,32,0,178,143, - 28,0,177,143,24,0,176,143,8,0,224,3, - 48,0,189,39,192,255,189,39,52,0,181,175, - 33,168,128,0,44,0,179,175,33,152,160,0, - 48,0,180,175,33,160,192,0,32,0,176,175, - 33,128,224,0,33,32,0,2,33,48,0,0, - 40,0,178,175,80,0,178,143,3,131,3,60, - 144,16,99,36,56,0,191,175,36,0,177,175, - 0,0,98,140,33,40,64,2,1,0,66,36, - 192,42,192,12,0,0,98,172,255,255,66,48, - 255,255,3,52,8,0,67,16,8,0,2,36, - 3,131,2,60,148,16,66,140,0,0,0,0, - 1,0,66,36,3,131,1,60,217,46,192,8, - 148,16,34,172,0,0,3,150,0,0,0,0, - 58,0,98,20,255,1,69,38,2,131,4,60, - 192,4,132,36,3,131,2,60,172,16,66,140, - 3,131,3,60,196,16,99,140,1,0,66,36, - 1,0,99,36,3,131,1,60,172,16,34,172, - 3,131,1,60,196,16,35,172,54,21,192,12, - 2,42,5,0,33,136,64,0,8,0,32,22, - 33,32,0,2,3,131,2,60,200,16,66,140, - 0,0,0,0,1,0,66,36,3,131,1,60, - 217,46,192,8,200,16,34,172,33,40,64,2, - 33,48,0,0,0,0,0,162,192,42,192,12, - 2,0,0,166,33,40,0,2,39,16,2,0, - 2,0,162,164,4,0,36,142,0,0,0,0, - 220,42,192,12,33,48,64,2,33,32,160,2, - 6,0,101,38,35,48,147,2,0,0,67,140, - 12,0,135,38,0,128,99,52,0,0,67,172, - 1,0,3,36,18,0,50,166,16,0,163,175, - 4,0,3,36,20,0,163,175,24,0,177,175, - 23,44,192,12,28,0,162,175,3,131,2,60, - 228,16,66,140,0,0,0,0,1,0,66,36, - 3,131,1,60,228,16,34,172,56,0,191,143, - 52,0,181,143,48,0,180,143,44,0,179,143, - 40,0,178,143,36,0,177,143,32,0,176,143, - 8,0,224,3,64,0,189,39,200,255,189,39, - 44,0,181,175,33,168,128,0,3,131,3,60, - 4,17,99,36,48,0,191,175,40,0,180,175, - 36,0,179,175,32,0,178,175,28,0,177,175, - 24,0,176,175,0,0,98,140,33,136,160,0, - 1,0,66,36,0,0,98,172,18,0,34,150, - 0,0,0,0,255,255,84,48,243,5,130,46, - 8,0,64,20,33,152,192,0,3,131,2,60, - 8,17,66,140,0,0,0,0,1,0,66,36, - 3,131,1,60,132,47,192,8,8,17,34,172, - 2,131,18,60,18,233,82,38,33,32,64,2, - 0,0,48,142,8,0,37,142,255,63,16,50, - 80,68,192,12,33,48,0,2,0,0,34,142, - 0,0,0,0,0,128,66,48,5,0,64,20, - 33,144,80,2,4,0,49,142,0,0,0,0, - 1,47,192,8,33,32,64,2,2,131,18,60, - 18,233,82,38,33,128,114,2,16,0,17,38, - 33,32,32,2,176,133,133,39,168,71,192,12, - 4,0,6,36,9,0,64,16,33,32,32,2, - 128,129,133,39,168,71,192,12,4,0,6,36, - 4,0,64,16,0,0,0,0,3,131,3,60, - 128,47,192,8,12,17,99,36,0,0,4,146, - 64,0,2,36,240,0,131,48,4,0,98,16, - 15,0,130,48,3,131,3,60,128,47,192,8, - 8,17,99,36,128,136,2,0,20,0,34,42, - 4,0,64,16,33,32,0,2,3,131,3,60, - 128,47,192,8,8,17,99,36,33,40,32,2, - 192,42,192,12,33,48,0,0,255,255,66,48, - 255,255,3,52,4,0,67,16,0,0,0,0, - 3,131,3,60,128,47,192,8,8,17,99,36, - 6,0,2,150,0,0,0,0,63,255,66,48, - 18,0,64,16,33,56,17,2,3,131,3,60, - 48,17,99,36,0,0,98,140,0,0,0,0, - 1,0,66,36,0,0,98,172,3,131,2,60, - 56,17,66,140,3,131,3,60,24,17,99,140, - 1,0,66,36,1,0,99,36,3,131,1,60, - 56,17,34,172,3,131,1,60,132,47,192,8, - 24,17,35,172,2,0,2,150,0,0,0,0, - 0,26,2,0,2,18,2,0,37,24,98,0, - 255,255,99,48,35,64,113,0,35,16,242,0, - 35,16,130,2,42,16,72,0,4,0,64,16, - 1,0,2,36,3,131,3,60,128,47,192,8, - 24,17,99,36,9,0,3,146,0,0,0,0, - 5,0,98,16,17,0,2,36,15,0,98,16, - 33,32,160,2,126,47,192,8,0,0,0,0, - 33,32,160,2,33,40,64,2,3,131,3,60, - 28,17,99,36,0,0,98,140,33,48,0,2, - 16,0,168,175,1,0,66,36,123,46,192,12, - 0,0,98,172,132,47,192,8,0,0,0,0, - 33,40,64,2,3,131,3,60,28,17,99,36, - 0,0,98,140,33,48,0,2,16,0,168,175, - 1,0,66,36,41,46,192,12,0,0,98,172, - 132,47,192,8,0,0,0,0,3,131,3,60, - 20,17,99,36,0,0,98,140,0,0,0,0, - 1,0,66,36,0,0,98,172,48,0,191,143, - 44,0,181,143,40,0,180,143,36,0,179,143, - 32,0,178,143,28,0,177,143,24,0,176,143, - 8,0,224,3,56,0,189,39,232,255,189,39, - 255,0,12,60,255,0,140,53,0,255,13,60, - 0,255,173,53,16,0,176,175,3,131,16,60, - 0,17,16,38,33,32,0,2,33,40,0,0, - 0,163,9,60,220,5,41,141,0,163,10,60, - 16,6,74,141,0,163,11,60,224,5,107,141, - 20,0,191,175,0,28,9,0,2,20,9,0, - 37,24,98,0,0,60,10,0,2,20,10,0, - 37,56,226,0,0,68,11,0,2,20,11,0, - 37,64,2,1,2,18,3,0,36,16,76,0, - 0,26,3,0,36,24,109,0,37,16,67,0, - 184,133,130,175,2,18,7,0,36,16,76,0, - 0,58,7,0,36,56,237,0,37,16,71,0, - 192,133,130,175,2,18,8,0,36,16,76,0, - 0,66,8,0,36,64,13,1,37,16,72,0, - 176,133,137,175,196,133,138,175,188,133,139,175, - 180,133,130,175,144,71,192,12,76,0,6,36, - 3,131,4,60,144,16,132,36,33,40,0,0, - 32,0,2,36,0,0,2,174,10,0,2,36, - 3,131,1,60,44,17,34,172,144,71,192,12, - 104,0,6,36,3,131,4,60,112,17,132,36, - 33,40,0,0,144,71,192,12,16,0,6,36, - 3,131,4,60,80,17,132,36,33,40,0,0, - 144,71,192,12,32,0,6,36,20,0,191,143, - 16,0,176,143,8,0,224,3,24,0,189,39, - 176,255,189,39,100,0,162,143,96,0,169,143, - 72,0,182,175,33,176,128,0,48,0,176,175, - 104,0,176,143,34,0,164,39,60,0,179,175, - 108,0,179,143,3,131,3,60,104,17,99,36, - 56,0,178,175,2,131,18,60,212,4,82,38, - 52,0,177,175,33,136,192,0,68,0,181,175, - 112,0,181,143,4,0,6,36,76,0,191,175, - 64,0,180,175,0,66,2,0,255,255,66,48, - 2,18,2,0,37,64,2,1,0,0,98,140, - 8,0,116,142,1,0,66,36,0,0,98,172, - 3,0,162,136,0,0,162,152,4,0,163,128, - 5,0,170,128,3,0,130,170,0,0,130,186, - 4,0,131,162,5,0,138,162,3,0,66,138, - 0,0,66,154,4,0,67,130,5,0,69,130, - 9,0,130,170,6,0,130,186,10,0,131,162, - 11,0,133,162,255,255,2,52,16,0,162,167, - 18,0,98,150,33,40,0,0,20,0,160,163, - 21,0,160,163,30,0,66,36,0,26,2,0, - 255,255,66,48,2,18,2,0,37,24,98,0, - 18,0,163,167,3,0,226,136,0,0,226,152, - 0,0,0,0,25,0,162,171,22,0,162,187, - 3,0,34,137,0,0,34,153,4,0,35,129, - 5,0,39,129,29,0,162,171,26,0,162,187, - 30,0,163,163,31,0,167,163,144,71,192,12, - 32,0,168,167,3,0,66,138,0,0,66,154, - 4,0,67,134,41,0,162,171,38,0,162,187, - 42,0,163,167,33,16,0,2,0,130,16,0, - 255,255,66,48,2,18,2,0,37,128,2,2, - 14,0,2,36,58,0,34,22,44,0,176,167, - 18,0,162,151,0,0,0,0,12,0,130,166, - 19,0,162,139,16,0,162,155,23,0,163,139, - 20,0,163,155,27,0,164,139,24,0,164,155, - 31,0,165,139,28,0,165,155,17,0,130,170, - 14,0,130,186,21,0,131,170,18,0,131,186, - 25,0,132,170,22,0,132,186,29,0,133,170, - 26,0,133,186,35,0,162,139,32,0,162,155, - 39,0,163,139,36,0,163,155,43,0,164,139, - 40,0,164,155,44,0,165,131,33,0,130,170, - 30,0,130,186,37,0,131,170,34,0,131,186, - 41,0,132,170,38,0,132,186,42,0,133,162, - 45,0,162,131,0,0,0,0,43,0,130,162, - 44,0,2,36,0,0,98,174,0,0,99,142, - 18,0,98,150,0,0,0,0,33,16,67,0, - 18,0,98,166,18,0,98,150,0,0,0,0, - 60,0,66,44,80,0,64,16,33,32,192,2, - 0,0,162,142,18,0,99,150,60,0,66,36, - 35,16,67,0,0,0,162,174,60,0,2,36, - 18,0,98,166,172,48,192,8,33,32,192,2, - 208,129,133,39,3,0,162,136,0,0,162,152, - 4,0,163,128,5,0,164,128,17,0,130,170, - 14,0,130,186,18,0,131,162,19,0,132,162, - 129,55,2,36,20,0,130,166,19,0,162,139, - 16,0,162,155,23,0,163,139,20,0,163,155, - 27,0,164,139,24,0,164,155,31,0,165,139, - 28,0,165,155,25,0,130,170,22,0,130,186, - 29,0,131,170,26,0,131,186,33,0,132,170, - 30,0,132,186,37,0,133,170,34,0,133,186, - 35,0,162,139,32,0,162,155,39,0,163,139, - 36,0,163,155,43,0,164,139,40,0,164,155, - 44,0,165,131,41,0,130,170,38,0,130,186, - 45,0,131,170,42,0,131,186,49,0,132,170, - 46,0,132,186,50,0,133,162,45,0,162,131, - 0,0,0,0,51,0,130,162,52,0,2,36, - 0,0,98,174,0,0,99,142,18,0,98,150, - 0,0,0,0,33,16,67,0,18,0,98,166, - 18,0,98,150,0,0,0,0,60,0,66,44, - 8,0,64,16,0,0,0,0,0,0,162,142, - 18,0,99,150,60,0,66,36,35,16,67,0, - 0,0,162,174,60,0,2,36,18,0,98,166, - 18,0,98,150,0,0,0,0,0,26,2,0, - 2,18,2,0,37,24,98,0,12,0,131,166, - 33,32,192,2,74,21,192,12,33,40,96,2, - 8,0,64,20,33,32,96,2,3,131,3,60, - 108,17,99,36,0,0,98,140,0,0,0,0, - 1,0,66,36,152,21,192,12,0,0,98,172, - 76,0,191,143,72,0,182,143,68,0,181,143, - 64,0,180,143,60,0,179,143,56,0,178,143, - 52,0,177,143,48,0,176,143,8,0,224,3, - 80,0,189,39,33,24,0,0,5,0,7,36, - 58,0,6,36,0,0,162,144,0,0,0,0, - 2,17,2,0,2,131,1,60,33,8,34,0, - 176,155,34,144,0,0,0,0,0,0,130,160, - 0,0,162,144,1,0,132,36,15,0,66,48, - 2,131,1,60,33,8,34,0,176,155,34,144, - 1,0,165,36,0,0,130,160,3,0,103,16, - 1,0,132,36,0,0,134,160,1,0,132,36, - 1,0,99,36,6,0,98,40,233,255,64,20, - 0,0,0,0,8,0,224,3,0,0,0,0, - 128,255,189,39,2,101,2,36,0,2,3,36, - 112,0,176,175,44,0,176,39,33,32,0,2, - 33,40,0,0,48,0,6,36,120,0,191,175, - 116,0,177,175,40,0,162,167,144,71,192,12, - 42,0,163,167,3,131,17,60,96,18,49,38, - 2,131,5,60,224,147,165,36,188,71,192,12, - 33,32,32,2,18,0,64,20,33,32,0,2, - 2,131,5,60,236,147,165,36,0,0,162,140, - 4,0,163,140,8,0,164,140,44,0,162,175, - 48,0,163,175,52,0,164,175,12,0,162,128, - 0,0,0,0,56,0,162,163,2,131,5,60, - 212,4,165,36,193,48,192,12,56,0,164,39, - 8,49,192,8,92,0,177,39,33,40,32,2, - 204,63,192,12,48,0,6,36,92,0,177,39, - 33,32,32,2,33,40,0,0,144,71,192,12, - 4,0,6,36,2,131,4,60,212,4,132,36, - 0,0,130,140,4,0,131,132,96,0,162,175, - 100,0,163,167,4,82,2,36,0,1,3,36, - 236,255,132,36,2,0,5,36,102,0,162,167, - 54,21,192,12,104,0,163,167,33,128,64,0, - 22,0,0,18,40,0,165,39,4,0,4,142, - 0,0,0,0,220,42,192,12,66,0,6,36, - 33,32,0,0,0,0,67,140,132,129,133,39, - 0,128,99,52,0,0,67,172,4,0,3,142, - 14,0,6,36,18,0,99,148,33,56,32,2, - 18,0,3,166,82,4,3,36,16,0,165,175, - 20,0,163,175,24,0,163,175,28,0,176,175, - 214,47,192,12,32,0,162,175,120,0,191,143, - 116,0,177,143,112,0,176,143,8,0,224,3, - 128,0,189,39,144,255,189,39,104,0,180,175, - 33,160,128,0,100,0,179,175,33,152,160,0, - 92,0,177,175,33,136,192,0,33,32,224,0, - 40,0,166,39,56,0,167,39,96,0,178,175, - 2,131,18,60,8,239,82,38,88,0,176,175, - 128,0,176,143,242,5,2,36,84,0,162,167, - 72,0,162,39,108,0,191,175,72,0,160,167, - 76,0,178,175,80,0,178,175,16,0,162,175, - 247,71,192,12,33,40,0,2,255,255,3,36, - 37,0,67,16,255,1,5,38,2,131,4,60, - 192,4,132,36,54,21,192,12,2,42,5,0, - 33,128,64,0,30,0,0,18,33,40,64,2, - 80,0,166,143,76,0,162,143,4,0,4,142, - 35,48,194,0,220,42,192,12,255,255,198,48, - 33,32,128,2,0,0,67,140,6,0,101,38, - 0,128,99,52,0,0,67,172,4,0,3,142, - 35,48,51,2,18,0,99,148,18,0,39,38, - 18,0,3,166,28,0,40,150,22,0,35,38, - 16,0,163,175,15,144,3,52,24,0,163,175, - 28,0,176,175,32,0,162,175,0,18,8,0, - 2,66,8,0,37,16,72,0,255,255,66,48, - 214,47,192,12,20,0,162,175,108,0,191,143, - 104,0,180,143,100,0,179,143,96,0,178,143, - 92,0,177,143,88,0,176,143,8,0,224,3, - 112,0,189,39,200,255,189,39,44,0,181,175, - 33,168,128,0,28,0,177,175,33,136,160,0, - 48,0,191,175,40,0,180,175,36,0,179,175, - 32,0,178,175,24,0,176,175,18,0,34,150, - 0,0,0,0,255,255,84,48,243,5,130,46, - 4,0,64,20,33,152,192,0,3,131,3,60, - 241,49,192,8,84,17,99,36,2,131,18,60, - 16,233,82,38,33,32,64,2,0,0,48,142, - 8,0,37,142,255,63,16,50,80,68,192,12, - 33,48,0,2,0,0,34,142,0,0,0,0, - 0,128,66,48,5,0,64,20,33,144,80,2, - 4,0,49,142,0,0,0,0,148,49,192,8, - 33,32,64,2,2,131,2,60,16,233,66,36, - 33,128,98,2,6,0,17,38,33,32,32,2, - 0,163,5,60,224,5,165,52,168,71,192,12, - 4,0,6,36,9,0,64,16,33,32,32,2, - 224,129,133,39,168,71,192,12,4,0,6,36, - 5,0,64,16,10,0,17,38,3,131,3,60, - 241,49,192,8,88,17,99,36,10,0,17,38, - 33,32,32,2,2,131,5,60,212,4,165,36, - 168,71,192,12,6,0,6,36,9,0,64,16, - 33,32,32,2,228,129,133,39,168,71,192,12, - 6,0,6,36,4,0,64,16,0,0,0,0, - 3,131,3,60,241,49,192,8,88,17,99,36, - 0,0,3,150,255,255,2,52,4,0,98,16, - 30,0,7,38,3,131,3,60,241,49,192,8, - 88,17,99,36,2,0,2,150,2,131,5,60, - 16,233,165,36,0,26,2,0,2,18,2,0, - 37,24,98,0,255,255,99,48,226,255,104,36, - 35,16,229,0,35,16,130,2,42,16,72,0, - 4,0,64,16,0,0,0,0,3,131,3,60, - 241,49,192,8,96,17,99,36,16,0,2,150, - 0,0,0,0,0,26,2,0,2,18,2,0, - 37,24,98,0,255,255,99,48,15,144,2,52, - 11,0,98,20,33,32,160,2,3,131,3,60, - 100,17,99,36,0,0,98,140,33,48,0,2, - 16,0,168,175,1,0,66,36,54,49,192,12, - 0,0,98,172,245,49,192,8,0,0,0,0, - 3,131,3,60,92,17,99,36,0,0,98,140, - 0,0,0,0,1,0,66,36,0,0,98,172, - 48,0,191,143,44,0,181,143,40,0,180,143, - 36,0,179,143,32,0,178,143,28,0,177,143, - 24,0,176,143,8,0,224,3,56,0,189,39, - 0,0,0,0,0,0,0,0,232,255,189,39, - 16,0,191,175,13,8,192,12,0,8,4,36, - 8,133,130,175,16,0,191,143,24,0,189,39, - 8,0,224,3,0,0,0,0,232,255,189,39, - 45,0,128,16,16,0,191,175,240,129,133,143, - 7,0,130,36,194,16,2,0,10,0,160,20, - 1,0,70,36,8,133,133,143,0,133,130,39, - 0,133,133,175,0,0,162,172,0,8,2,36, - 240,129,133,175,2,131,1,60,20,211,32,172, - 4,0,162,172,0,0,164,140,0,0,0,0, - 4,0,131,140,0,0,0,0,43,16,102,0, - 14,0,64,20,0,0,0,0,5,0,102,20, - 35,16,102,0,0,0,130,140,0,0,0,0, - 43,50,192,8,0,0,162,172,4,0,130,172, - 192,16,2,0,33,32,130,0,4,0,134,172, - 240,129,133,175,57,50,192,8,8,0,130,36, - 240,129,130,143,0,0,0,0,4,0,130,16, - 33,40,128,0,0,0,132,140,28,50,192,8, - 0,0,0,0,2,131,4,60,15,63,192,12, - 64,148,132,36,33,16,0,0,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 56,0,128,16,248,255,132,36,240,129,133,143, - 0,0,0,0,78,50,192,8,43,16,164,0, - 0,0,163,140,0,0,0,0,43,16,163,0, - 5,0,64,20,43,16,164,0,12,0,64,20, - 43,16,131,0,10,0,64,20,0,0,0,0, - 33,40,96,0,43,16,164,0,244,255,64,16, - 0,0,0,0,0,0,162,140,0,0,0,0, - 43,16,130,0,239,255,64,16,0,0,0,0, - 4,0,134,140,0,0,163,140,192,16,6,0, - 33,16,130,0,11,0,67,20,0,0,0,0, - 4,0,98,140,0,0,0,0,33,16,194,0, - 4,0,130,172,0,0,162,140,0,0,0,0, - 0,0,66,140,0,0,0,0,102,50,192,8, - 0,0,130,172,0,0,131,172,4,0,163,140, - 0,0,0,0,192,16,3,0,33,16,162,0, - 9,0,68,20,0,0,0,0,4,0,130,140, - 0,0,0,0,33,16,98,0,4,0,162,172, - 0,0,130,140,0,0,0,0,117,50,192,8, - 0,0,162,172,0,0,164,172,240,129,133,175, - 8,0,224,3,0,0,0,0,232,255,189,39, - 16,0,191,175,0,50,192,12,0,0,0,0, - 178,45,192,12,33,32,0,0,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 1,0,3,36,5,0,195,20,255,255,2,36, - 0,0,226,140,0,0,0,0,43,16,2,0, - 35,16,2,0,8,0,224,3,0,0,0,0, - 224,255,189,39,16,0,176,175,33,128,224,0, - 20,0,177,175,48,0,177,143,1,0,2,36, - 5,0,162,20,24,0,191,175,0,0,194,140, - 0,0,0,0,8,0,64,16,0,0,0,0, - 11,0,2,36,33,32,0,2,33,40,32,2, - 48,72,192,12,96,0,2,174,1,0,66,36, - 100,0,2,174,17,0,34,146,0,0,0,0, - 1,0,66,52,17,0,34,162,24,0,191,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,8,0,224,3,0,0,0,0, - 16,0,163,143,0,0,0,0,17,0,98,144, - 0,0,0,0,2,0,66,52,8,0,224,3, - 17,0,98,160,8,0,224,3,0,0,0,0, - 224,255,189,39,16,0,176,175,33,128,128,0, - 244,129,131,151,255,0,2,36,28,0,191,175, - 24,0,178,175,20,0,177,175,4,0,2,174, - 60,0,0,174,1,0,98,36,244,129,130,167, - 10,0,3,166,3,0,162,136,0,0,162,152, - 7,0,163,136,4,0,163,152,11,0,164,136, - 8,0,164,152,15,0,167,136,12,0,167,152, - 15,0,2,170,12,0,2,186,19,0,3,170, - 16,0,3,186,23,0,4,170,20,0,4,186, - 27,0,7,170,24,0,7,186,3,0,194,136, - 0,0,194,152,7,0,195,136,4,0,195,152, - 11,0,196,136,8,0,196,152,15,0,197,136, - 12,0,197,152,31,0,2,170,28,0,2,186, - 35,0,3,170,32,0,3,186,39,0,4,170, - 36,0,4,186,43,0,5,170,40,0,5,186, - 80,0,2,142,76,0,3,142,0,0,0,0, - 35,16,67,0,255,255,81,48,88,0,3,150, - 3,0,2,36,13,0,98,16,0,0,0,0, - 2,131,18,60,160,204,82,38,156,71,192,12, - 33,32,64,2,7,0,81,20,33,32,64,2, - 76,0,5,142,0,0,0,0,168,71,192,12, - 33,48,32,2,21,0,64,16,33,16,0,0, - 2,131,18,60,192,204,82,38,156,71,192,12, - 33,32,64,2,7,0,81,20,33,32,64,2, - 76,0,5,142,0,0,0,0,168,71,192,12, - 33,48,32,2,9,0,64,16,33,16,0,0, - 3,131,3,60,132,17,99,36,0,0,98,140, - 1,0,4,36,1,0,66,36,178,45,192,12, - 0,0,98,172,1,0,2,36,28,0,191,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,0,0,0,0, - 0,0,0,0,224,255,189,39,20,0,177,175, - 33,136,224,0,16,0,176,175,48,0,176,143, - 24,0,191,175,156,71,192,12,33,32,32,2, - 0,0,2,174,33,16,32,2,24,0,191,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,8,0,224,3,33,16,224,0, - 0,0,227,140,204,204,2,60,205,204,66,52, - 25,0,98,0,16,32,0,0,0,0,0,0, - 0,0,0,0,8,0,224,3,194,16,4,0, - 224,255,189,39,16,0,176,175,33,128,224,0, - 33,32,0,2,33,40,0,0,20,0,177,175, - 48,0,177,143,24,0,191,175,208,71,192,12, - 16,0,6,36,2,0,64,20,35,16,80,0, - 16,0,2,36,0,0,34,174,33,16,0,2, - 24,0,191,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,232,255,189,39, - 40,0,164,143,44,0,165,143,16,0,191,175, - 205,59,192,12,0,0,0,0,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 232,255,189,39,40,0,164,143,44,0,165,143, - 16,0,191,175,239,59,192,12,0,0,0,0, - 16,0,191,143,24,0,189,39,8,0,224,3, - 0,0,0,0,232,255,189,39,40,0,164,143, - 44,0,165,143,16,0,191,175,17,60,192,12, - 0,0,0,0,16,0,191,143,24,0,189,39, - 8,0,224,3,0,0,0,0,8,0,224,3, - 33,16,224,0,0,0,226,140,8,0,224,3, - 0,0,0,0,216,255,189,39,24,0,176,175, - 56,0,176,143,32,0,191,175,28,0,177,175, - 36,0,2,142,1,0,3,36,20,0,81,140, - 187,0,163,20,0,0,0,0,0,0,195,140, - 0,0,0,0,183,0,96,16,0,0,0,0, - 32,133,130,143,0,0,0,0,43,16,67,0, - 178,0,64,20,255,255,104,36,64,18,8,0, - 2,131,3,60,192,246,99,36,33,40,67,0, - 255,255,132,36,22,0,130,44,170,0,64,16, - 128,16,4,0,2,131,1,60,33,8,34,0, - 144,148,34,140,0,0,0,0,8,0,64,0, - 0,0,0,0,2,0,2,36,16,0,2,162, - 17,0,2,146,0,0,195,140,0,0,0,0, - 15,52,192,8,2,0,66,52,33,32,32,2, - 17,0,3,146,4,0,2,36,16,0,2,162, - 40,0,0,166,44,0,17,174,2,0,99,52, - 156,71,192,12,17,0,3,162,255,255,66,48, - 33,16,34,2,48,0,2,174,40,52,192,8, - 52,0,0,166,17,0,3,146,2,0,2,36, - 16,0,2,162,243,51,192,8,40,0,17,174, - 17,0,3,146,2,0,2,36,16,0,2,162, - 243,51,192,8,40,0,17,174,66,0,2,36, - 13,0,0,21,16,0,2,162,24,133,132,143, - 0,0,0,0,64,25,4,0,35,24,100,0, - 128,17,3,0,35,16,67,0,192,16,2,0, - 33,16,68,0,128,24,2,0,33,16,67,0, - 178,51,192,8,192,17,2,0,152,0,2,60, - 128,150,66,52,40,0,2,174,17,0,2,146, - 0,0,0,0,199,51,192,8,2,0,66,52, - 17,0,3,146,4,0,2,36,16,0,2,162, - 20,0,162,36,44,0,2,174,26,0,162,36, - 40,0,0,166,48,0,2,174,243,51,192,8, - 52,0,0,166,2,0,2,36,16,0,2,162, - 17,0,2,146,1,0,3,36,40,0,3,174, - 2,0,66,52,40,52,192,8,17,0,2,162, - 17,0,3,146,0,0,0,0,241,51,192,8, - 67,0,2,36,65,0,2,36,16,0,2,162, - 17,0,2,146,168,0,163,140,0,0,0,0, - 15,52,192,8,2,0,66,52,65,0,2,36, - 16,0,2,162,156,0,162,140,0,1,164,140, - 22,52,192,8,0,0,0,0,65,0,2,36, - 16,0,2,162,17,0,2,146,0,1,163,140, - 0,0,0,0,15,52,192,8,2,0,66,52, - 65,0,2,36,16,0,2,162,17,0,2,146, - 164,0,163,140,0,0,0,0,15,52,192,8, - 2,0,66,52,65,0,2,36,16,0,2,162, - 17,0,2,146,160,0,163,140,0,0,0,0, - 15,52,192,8,2,0,66,52,17,0,3,146, - 65,0,2,36,16,0,2,162,40,0,0,174, - 2,0,99,52,40,52,192,8,17,0,3,162, - 65,0,2,36,16,0,2,162,172,0,162,140, - 4,1,164,140,22,52,192,8,0,0,0,0, - 65,0,2,36,16,0,2,162,17,0,2,146, - 4,1,163,140,0,0,0,0,15,52,192,8, - 2,0,66,52,65,0,2,36,16,0,2,162, - 17,0,2,146,184,0,163,140,0,0,0,0, - 15,52,192,8,2,0,66,52,65,0,2,36, - 16,0,2,162,17,0,2,146,188,0,163,140, - 2,0,66,52,40,0,3,174,40,52,192,8, - 17,0,2,162,66,0,2,36,16,0,2,162, - 172,0,162,140,176,0,164,140,17,0,3,146, - 35,16,68,0,2,0,99,52,40,0,2,174, - 40,52,192,8,17,0,3,162,16,0,160,175, - 33,32,224,0,33,40,0,2,2,131,7,60, - 96,204,231,36,226,76,192,12,2,0,6,36, - 40,52,192,8,0,0,0,0,33,32,224,0, - 200,76,192,12,33,40,0,2,32,0,191,143, - 28,0,177,143,24,0,176,143,8,0,224,3, - 40,0,189,39,224,255,189,39,16,0,176,175, - 33,128,224,0,20,0,177,175,48,0,177,143, - 1,0,2,36,10,0,162,20,24,0,191,175, - 0,0,198,140,0,0,0,0,6,0,192,16, - 0,0,0,0,32,133,130,143,0,0,0,0, - 43,16,70,0,5,0,64,16,7,0,2,36, - 33,32,0,2,33,40,32,2,70,52,192,8, - 11,0,2,36,7,0,130,16,33,32,0,2, - 33,40,32,2,17,0,2,36,48,72,192,12, - 96,0,2,174,1,0,66,36,100,0,2,174, - 17,0,34,146,0,0,0,0,1,0,66,52, - 17,0,34,162,24,0,191,143,20,0,177,143, - 16,0,176,143,8,0,224,3,32,0,189,39, - 208,255,189,39,32,0,176,175,64,0,176,143, - 36,0,177,175,33,136,224,0,4,0,160,20, - 40,0,191,175,1,0,2,36,106,52,192,8, - 24,0,162,175,0,0,198,140,32,133,130,143, - 0,0,0,0,43,16,194,0,3,0,64,16, - 1,0,194,36,106,52,192,8,24,0,162,175, - 17,0,2,146,0,0,0,0,18,0,66,52, - 116,52,192,8,17,0,2,162,16,0,176,175, - 1,0,5,36,24,0,166,39,97,51,192,12, - 33,56,32,2,33,32,32,2,33,40,0,2, - 1,0,6,36,253,76,192,12,24,0,167,39, - 40,0,191,143,36,0,177,143,32,0,176,143, - 8,0,224,3,48,0,189,39,16,0,163,143, - 1,0,2,36,13,0,162,20,14,0,2,36, - 0,0,198,140,0,0,0,0,9,0,192,16, - 0,0,0,0,32,133,130,143,0,0,0,0, - 43,16,70,0,4,0,64,20,14,0,2,36, - 7,0,2,36,2,0,130,16,14,0,2,36, - 96,0,226,172,17,0,98,144,0,0,0,0, - 2,0,66,52,8,0,224,3,17,0,98,160, - 16,0,162,143,0,0,0,0,8,0,224,3, - 0,0,226,172,0,0,226,140,8,0,224,3, - 0,0,0,0,232,255,189,39,40,0,168,143, - 1,0,2,36,61,0,162,20,16,0,191,175, - 0,0,197,140,0,0,0,0,57,0,160,16, - 0,0,0,0,32,133,130,143,0,0,0,0, - 43,16,69,0,52,0,64,20,255,255,132,36, - 5,0,130,44,49,0,64,16,128,16,4,0, - 2,131,1,60,33,8,34,0,232,148,34,140, - 0,0,0,0,8,0,64,0,0,0,0,0, - 64,0,2,36,16,0,2,161,0,163,5,60, - 220,5,165,52,3,0,162,136,0,0,162,152, - 0,0,0,0,43,0,2,169,40,0,2,185, - 17,0,2,145,0,0,0,0,213,52,192,8, - 2,0,66,52,2,0,2,36,16,0,2,161, - 17,0,2,145,0,0,195,140,0,0,0,0, - 198,52,192,8,2,0,66,52,64,0,2,36, - 16,0,2,161,17,0,2,145,128,132,131,143, - 2,0,66,52,40,0,3,173,218,52,192,8, - 17,0,2,161,2,0,2,36,16,0,2,161, - 17,0,2,145,0,0,0,0,211,52,192,8, - 1,0,3,36,2,0,2,36,16,0,2,161, - 17,0,2,145,220,5,3,36,40,0,3,173, - 2,0,66,52,218,52,192,8,17,0,2,161, - 33,32,224,0,200,76,192,12,33,40,0,1, - 16,0,191,143,24,0,189,39,8,0,224,3, - 0,0,0,0,208,255,189,39,32,0,176,175, - 64,0,176,143,36,0,177,175,33,136,224,0, - 4,0,160,20,40,0,191,175,1,0,2,36, - 245,52,192,8,24,0,162,175,0,0,198,140, - 32,133,130,143,0,0,0,0,43,16,194,0, - 3,0,64,16,1,0,194,36,245,52,192,8, - 24,0,162,175,17,0,2,146,0,0,0,0, - 18,0,66,52,255,52,192,8,17,0,2,162, - 16,0,176,175,1,0,5,36,24,0,166,39, - 150,52,192,12,33,56,32,2,33,32,32,2, - 33,40,0,2,1,0,6,36,253,76,192,12, - 24,0,167,39,40,0,191,143,36,0,177,143, - 32,0,176,143,8,0,224,3,48,0,189,39, - 232,255,189,39,40,0,165,143,16,0,191,175, - 200,76,192,12,33,32,224,0,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 16,0,163,143,14,0,2,36,96,0,226,172, - 17,0,98,144,0,0,0,0,2,0,66,52, - 8,0,224,3,17,0,98,160,224,255,189,39, - 16,0,176,175,33,128,224,0,17,0,2,36, - 24,0,191,175,20,0,177,175,96,0,2,174, - 48,0,177,143,33,32,0,2,48,72,192,12, - 33,40,32,2,1,0,66,36,100,0,2,174, - 17,0,34,146,0,0,0,0,1,0,66,52, - 17,0,34,162,24,0,191,143,20,0,177,143, - 16,0,176,143,8,0,224,3,32,0,189,39, - 16,0,163,143,0,0,0,0,17,0,98,144, - 0,0,0,0,18,0,66,52,8,0,224,3, - 17,0,98,160,8,0,224,3,33,16,224,0, - 224,255,189,39,48,0,168,143,1,0,2,36, - 114,0,162,20,24,0,191,175,0,0,195,140, - 0,0,0,0,110,0,96,16,0,0,0,0, - 32,133,130,143,0,0,0,0,43,16,67,0, - 105,0,64,20,255,255,98,36,64,18,2,0, - 2,131,3,60,192,246,99,36,33,24,67,0, - 255,255,132,36,17,0,130,44,97,0,64,16, - 128,16,4,0,2,131,1,60,33,8,34,0, - 0,149,34,140,0,0,0,0,8,0,64,0, - 0,0,0,0,2,0,2,36,16,0,2,161, - 17,0,2,145,0,0,195,140,0,0,0,0, - 140,53,192,8,2,0,66,52,2,0,2,36, - 16,0,2,161,44,0,99,140,17,0,2,145, - 16,0,99,140,0,0,0,0,101,53,192,8, - 2,0,66,52,2,0,2,36,16,0,2,161, - 44,0,99,140,17,0,2,145,12,0,99,140, - 2,0,66,52,17,0,2,161,173,53,192,8, - 40,0,3,173,2,0,2,36,16,0,2,161, - 17,0,2,145,212,0,99,140,0,0,0,0, - 140,53,192,8,2,0,66,52,2,0,2,36, - 16,0,2,161,17,0,2,145,192,0,99,140, - 0,0,0,0,140,53,192,8,2,0,66,52, - 2,0,2,36,16,0,2,161,17,0,2,145, - 208,0,99,140,0,0,0,0,140,53,192,8, - 2,0,66,52,2,0,2,36,16,0,2,161, - 204,0,98,140,184,0,100,140,17,0,3,145, - 33,16,68,0,2,0,99,52,40,0,2,173, - 173,53,192,8,17,0,3,161,2,0,2,36, - 16,0,2,161,17,0,2,145,196,0,99,140, - 2,0,66,52,40,0,3,173,173,53,192,8, - 17,0,2,161,17,0,3,145,2,0,2,36, - 16,0,2,161,40,0,0,173,2,0,99,52, - 173,53,192,8,17,0,3,161,2,0,2,36, - 16,0,2,161,44,0,100,140,17,0,2,145, - 20,0,131,140,24,0,132,140,2,0,66,52, - 17,0,2,161,33,24,100,0,173,53,192,8, - 40,0,3,173,16,0,160,175,33,32,224,0, - 33,40,0,1,2,131,7,60,104,204,231,36, - 226,76,192,12,11,0,6,36,173,53,192,8, - 0,0,0,0,33,32,224,0,200,76,192,12, - 33,40,0,1,24,0,191,143,32,0,189,39, - 8,0,224,3,0,0,0,0,208,255,189,39, - 32,0,176,175,64,0,176,143,36,0,177,175, - 33,136,224,0,4,0,160,20,40,0,191,175, - 1,0,2,36,200,53,192,8,24,0,162,175, - 0,0,198,140,32,133,130,143,0,0,0,0, - 43,16,194,0,3,0,64,16,1,0,194,36, - 200,53,192,8,24,0,162,175,17,0,2,146, - 0,0,0,0,18,0,66,52,210,53,192,8, - 17,0,2,162,16,0,176,175,1,0,5,36, - 24,0,166,39,52,53,192,12,33,56,32,2, - 33,32,32,2,33,40,0,2,1,0,6,36, - 253,76,192,12,24,0,167,39,40,0,191,143, - 36,0,177,143,32,0,176,143,8,0,224,3, - 48,0,189,39,0,0,226,140,8,0,224,3, - 0,0,0,0,3,131,2,60,28,18,66,148, - 0,0,0,0,2,0,66,48,2,0,64,16, - 2,0,3,36,1,0,3,36,8,0,224,3, - 33,16,96,0,232,255,189,39,40,0,164,143, - 16,0,191,175,1,0,132,56,186,59,192,12, - 1,0,132,44,16,0,191,143,24,0,189,39, - 8,0,224,3,0,0,0,0,16,0,163,143, - 6,0,2,36,0,0,98,172,8,0,224,3, - 33,16,224,0,224,255,189,39,48,0,168,143, - 1,0,2,36,52,0,162,20,24,0,191,175, - 0,0,197,140,0,0,0,0,48,0,160,16, - 0,0,0,0,24,133,130,143,0,0,0,0, - 43,16,69,0,43,0,64,20,255,255,132,36, - 5,0,130,44,40,0,64,16,128,16,4,0, - 2,131,1,60,33,8,34,0,72,149,34,140, - 0,0,0,0,8,0,64,0,0,0,0,0, - 2,0,2,36,16,0,2,161,17,0,2,145, - 0,0,195,140,2,0,66,52,40,0,3,173, - 45,54,192,8,17,0,2,161,2,0,2,36, - 16,0,2,161,0,0,194,140,17,0,3,145, - 1,0,66,36,2,0,99,52,40,0,2,173, - 45,54,192,8,17,0,3,161,16,0,160,175, - 33,32,224,0,33,40,0,1,2,131,7,60, - 148,204,231,36,226,76,192,12,2,0,6,36, - 45,54,192,8,0,0,0,0,17,0,3,145, - 2,0,2,36,16,0,2,161,40,0,0,173, - 2,0,99,52,45,54,192,8,17,0,3,161, - 33,32,224,0,200,76,192,12,33,40,0,1, - 24,0,191,143,32,0,189,39,8,0,224,3, - 0,0,0,0,208,255,189,39,32,0,176,175, - 64,0,176,143,36,0,177,175,33,136,224,0, - 4,0,160,20,40,0,191,175,1,0,2,36, - 72,54,192,8,24,0,162,175,0,0,198,140, - 24,133,130,143,0,0,0,0,43,16,194,0, - 3,0,64,16,1,0,194,36,72,54,192,8, - 24,0,162,175,17,0,2,146,0,0,0,0, - 18,0,66,52,82,54,192,8,17,0,2,162, - 16,0,176,175,1,0,5,36,24,0,166,39, - 242,53,192,12,33,56,32,2,33,32,32,2, - 33,40,0,2,1,0,6,36,253,76,192,12, - 24,0,167,39,40,0,191,143,36,0,177,143, - 32,0,176,143,8,0,224,3,48,0,189,39, - 0,0,226,148,8,0,224,3,0,0,0,0, - 8,0,224,3,33,16,224,0,16,0,163,143, - 8,0,2,36,0,0,98,172,8,0,224,3, - 33,16,224,0,224,255,189,39,16,0,176,175, - 48,0,176,143,1,0,2,36,24,0,191,175, - 126,0,162,20,20,0,177,175,0,0,198,140, - 0,0,0,0,122,0,192,16,0,0,0,0, - 24,133,130,143,0,0,0,0,43,16,70,0, - 117,0,64,20,192,17,6,0,3,131,3,60, - 16,13,99,36,33,136,67,0,255,255,132,36, - 10,0,130,44,110,0,64,16,128,16,4,0, - 2,131,1,60,33,8,34,0,96,149,34,140, - 0,0,0,0,8,0,64,0,0,0,0,0, - 17,0,3,146,2,0,2,36,16,0,2,162, - 211,54,192,8,40,0,6,174,2,0,2,36, - 16,0,2,162,0,0,34,150,17,0,3,146, - 0,0,0,0,143,54,192,8,2,18,2,0, - 2,0,2,36,16,0,2,162,4,0,34,142, - 17,0,3,146,1,0,66,36,2,0,99,52, - 40,0,2,174,232,54,192,8,17,0,3,162, - 2,0,2,36,16,0,2,162,4,0,34,142, - 0,0,0,0,2,0,64,16,2,0,3,36, - 1,0,3,36,17,0,2,146,40,0,3,174, - 2,0,66,52,232,54,192,8,17,0,2,162, - 2,0,2,36,16,0,2,162,17,0,2,146, - 8,0,35,142,0,0,0,0,226,54,192,8, - 2,0,66,52,9,50,192,12,8,0,4,36, - 33,48,64,0,15,0,34,138,12,0,34,154, - 19,0,35,138,16,0,35,154,3,0,194,168, - 0,0,194,184,7,0,195,168,196,54,192,8, - 4,0,195,184,2,0,2,36,16,0,2,162, - 17,0,2,146,20,0,35,142,0,0,0,0, - 226,54,192,8,2,0,66,52,9,50,192,12, - 8,0,4,36,33,48,64,0,27,0,34,138, - 24,0,34,154,31,0,35,138,28,0,35,154, - 3,0,194,168,0,0,194,184,7,0,195,168, - 4,0,195,184,0,0,194,148,0,0,0,0, - 0,26,2,0,2,18,2,0,37,24,98,0, - 0,0,195,164,17,0,3,146,4,0,2,36, - 16,0,2,162,1,0,2,36,40,0,2,166, - 8,0,194,36,44,0,6,174,48,0,2,174, - 52,0,0,166,2,0,99,52,232,54,192,8, - 17,0,3,162,2,0,2,36,16,0,2,162, - 17,0,2,146,32,0,35,150,0,0,0,0, - 226,54,192,8,2,0,66,52,2,0,2,36, - 16,0,2,162,17,0,2,146,104,0,35,142, - 2,0,66,52,40,0,3,174,232,54,192,8, - 17,0,2,162,33,32,224,0,200,76,192,12, - 33,40,0,2,24,0,191,143,20,0,177,143, - 16,0,176,143,8,0,224,3,32,0,189,39, - 224,255,189,39,16,0,176,175,33,128,224,0, - 20,0,177,175,48,0,177,143,1,0,2,36, - 10,0,162,20,24,0,191,175,0,0,198,140, - 0,0,0,0,6,0,192,16,0,0,0,0, - 24,133,130,143,0,0,0,0,43,16,70,0, - 5,0,64,16,2,0,2,36,33,32,0,2, - 33,40,32,2,13,55,192,8,11,0,2,36, - 14,0,130,16,2,0,130,44,5,0,64,20, - 6,0,130,44,3,0,64,16,4,0,130,44, - 8,0,64,16,0,0,0,0,33,32,0,2, - 33,40,32,2,17,0,2,36,48,72,192,12, - 96,0,2,174,1,0,66,36,100,0,2,174, - 17,0,34,146,0,0,0,0,1,0,66,52, - 17,0,34,162,24,0,191,143,20,0,177,143, - 16,0,176,143,8,0,224,3,32,0,189,39, - 208,255,189,39,32,0,176,175,64,0,176,143, - 36,0,177,175,33,136,224,0,4,0,160,20, - 40,0,191,175,1,0,2,36,49,55,192,8, - 24,0,162,175,0,0,198,140,24,133,130,143, - 0,0,0,0,43,16,194,0,3,0,64,16, - 1,0,194,36,49,55,192,8,24,0,162,175, - 17,0,2,146,0,0,0,0,18,0,66,52, - 59,55,192,8,17,0,2,162,16,0,176,175, - 1,0,5,36,24,0,166,39,97,54,192,12, - 33,56,32,2,33,32,32,2,33,40,0,2, - 1,0,6,36,253,76,192,12,24,0,167,39, - 40,0,191,143,36,0,177,143,32,0,176,143, - 8,0,224,3,48,0,189,39,232,255,189,39, - 33,64,128,0,16,0,176,175,40,0,176,143, - 1,0,2,36,57,0,162,20,20,0,191,175, - 0,0,196,140,0,0,0,0,54,0,128,16, - 14,0,2,36,24,133,130,143,0,0,0,0, - 43,16,68,0,49,0,64,20,14,0,2,36, - 192,17,4,0,3,131,3,60,16,13,99,36, - 33,48,67,0,4,0,2,36,21,0,2,17, - 5,0,2,45,5,0,64,16,2,0,2,36, - 8,0,2,17,14,0,2,36,129,55,192,8, - 96,0,226,172,5,0,2,36,28,0,2,17, - 14,0,2,36,129,55,192,8,96,0,226,172, - 0,0,195,144,0,0,0,0,0,0,195,164, - 40,0,2,142,0,0,0,0,0,18,2,0, - 37,24,98,0,129,55,192,8,0,0,195,164, - 40,0,3,142,0,0,0,0,5,0,101,16, - 2,0,2,36,7,0,98,16,14,0,2,36, - 129,55,192,8,96,0,226,172,187,42,192,12, - 1,0,5,36,129,55,192,8,0,0,0,0, - 187,42,192,12,33,40,0,0,129,55,192,8, - 0,0,0,0,40,0,2,142,0,0,0,0, - 129,55,192,8,8,0,194,172,14,0,2,36, - 96,0,226,172,17,0,2,146,0,0,0,0, - 2,0,66,52,17,0,2,162,20,0,191,143, - 16,0,176,143,8,0,224,3,24,0,189,39, - 216,255,189,39,20,0,177,175,33,136,128,0, - 28,0,179,175,33,152,160,0,24,0,178,175, - 33,144,224,0,16,0,176,175,56,0,176,143, - 1,0,2,36,46,0,98,22,32,0,191,175, - 0,0,196,140,0,0,0,0,42,0,128,16, - 0,0,0,0,58,25,192,12,0,0,0,0, - 33,32,64,0,37,0,128,16,2,0,2,36, - 11,0,34,18,3,0,34,46,5,0,64,16, - 3,0,2,36,15,0,51,18,4,0,2,36, - 195,55,192,8,33,32,64,2,20,0,34,18, - 33,32,64,2,195,55,192,8,0,0,0,0, - 2,0,2,36,16,0,2,162,17,0,2,146, - 10,0,131,132,2,0,66,52,40,0,3,174, - 197,55,192,8,17,0,2,162,17,0,3,146, - 16,0,2,162,4,0,130,36,44,0,2,174, - 10,0,130,36,40,0,0,166,48,0,2,174, - 191,55,192,8,52,0,0,166,17,0,3,146, - 2,0,2,36,16,0,2,162,40,0,17,174, - 2,0,99,52,197,55,192,8,17,0,3,162, - 33,32,64,2,200,76,192,12,33,40,0,2, - 32,0,191,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 40,0,189,39,208,255,189,39,32,0,176,175, - 64,0,176,143,40,0,178,175,33,144,128,0, - 36,0,177,175,33,136,224,0,3,0,160,20, - 44,0,191,175,218,55,192,8,1,0,2,36, - 0,0,194,140,0,0,0,0,1,0,66,36, - 24,0,162,175,24,0,164,143,58,25,192,12, - 0,0,0,0,6,0,64,20,33,32,64,2, - 17,0,2,146,0,0,0,0,18,0,66,52, - 239,55,192,8,17,0,2,162,16,0,176,175, - 1,0,5,36,24,0,166,39,137,55,192,12, - 33,56,32,2,33,32,32,2,33,40,0,2, - 1,0,6,36,253,76,192,12,24,0,167,39, - 44,0,191,143,40,0,178,143,36,0,177,143, - 32,0,176,143,8,0,224,3,48,0,189,39, - 232,255,189,39,40,0,168,143,1,0,2,36, - 63,0,162,20,16,0,191,175,0,0,195,140, - 0,0,0,0,59,0,96,16,0,0,0,0, - 24,133,130,143,0,0,0,0,43,16,67,0, - 54,0,64,20,64,18,3,0,2,131,3,60, - 192,246,99,36,33,24,67,0,255,255,132,36, - 5,0,130,44,47,0,64,16,128,16,4,0, - 2,131,1,60,33,8,34,0,136,149,34,140, - 0,0,0,0,8,0,64,0,0,0,0,0, - 2,0,2,36,16,0,2,161,17,0,2,145, - 0,0,195,140,0,0,0,0,43,56,192,8, - 2,0,66,52,2,0,2,36,16,0,2,161, - 17,0,2,145,220,5,3,36,40,0,3,173, - 2,0,66,52,59,56,192,8,17,0,2,161, - 65,0,2,36,16,0,2,161,17,0,2,145, - 156,0,99,140,0,0,0,0,43,56,192,8, - 2,0,66,52,65,0,2,36,16,0,2,161, - 17,0,2,145,172,0,99,140,2,0,66,52, - 40,0,3,173,59,56,192,8,17,0,2,161, - 65,0,2,36,16,0,2,161,156,0,98,140, - 252,0,100,140,17,0,3,145,35,16,68,0, - 2,0,99,52,40,0,2,173,59,56,192,8, - 17,0,3,161,33,32,224,0,200,76,192,12, - 33,40,0,1,16,0,191,143,24,0,189,39, - 8,0,224,3,0,0,0,0,208,255,189,39, - 32,0,176,175,64,0,176,143,36,0,177,175, - 33,136,224,0,4,0,160,20,40,0,191,175, - 1,0,2,36,86,56,192,8,24,0,162,175, - 0,0,198,140,24,133,130,143,0,0,0,0, - 43,16,194,0,3,0,64,16,1,0,194,36, - 86,56,192,8,24,0,162,175,17,0,2,146, - 0,0,0,0,18,0,66,52,96,56,192,8, - 17,0,2,162,16,0,176,175,1,0,5,36, - 24,0,166,39,245,55,192,12,33,56,32,2, - 33,32,32,2,33,40,0,2,1,0,6,36, - 253,76,192,12,24,0,167,39,40,0,191,143, - 36,0,177,143,32,0,176,143,8,0,224,3, - 48,0,189,39,0,0,0,0,0,0,0,0, - 0,0,0,0,200,255,189,39,72,0,163,143, - 44,0,177,175,33,136,128,0,20,0,165,175, - 33,40,224,0,2,131,2,60,172,210,66,140, - 152,132,135,143,33,32,0,0,48,0,191,175, - 40,0,176,175,24,0,160,175,28,0,160,175, - 36,0,160,175,16,0,162,175,104,77,192,12, - 32,0,163,175,33,128,64,0,3,0,0,22, - 33,32,0,2,143,56,192,8,33,16,0,0, - 197,80,192,12,33,40,32,2,255,255,3,36, - 5,0,67,20,0,0,0,0,167,83,192,12, - 33,32,0,2,143,56,192,8,33,16,0,0, - 167,83,192,12,33,32,0,2,8,0,34,142, - 4,0,35,142,0,0,0,0,35,16,67,0, - 255,255,66,48,48,0,191,143,44,0,177,143, - 40,0,176,143,8,0,224,3,56,0,189,39, - 200,255,189,39,44,0,177,175,33,136,128,0, - 72,0,168,143,33,32,0,0,20,0,165,175, - 33,40,224,0,2,131,3,60,172,210,99,140, - 152,132,135,143,1,0,2,36,48,0,191,175, - 40,0,176,175,24,0,162,175,28,0,160,175, - 36,0,160,175,16,0,163,175,104,77,192,12, - 32,0,168,175,33,128,64,0,3,0,0,22, - 33,32,0,2,188,56,192,8,33,16,0,0, - 197,80,192,12,33,40,32,2,255,255,3,36, - 5,0,67,20,0,0,0,0,167,83,192,12, - 33,32,0,2,188,56,192,8,33,16,0,0, - 167,83,192,12,33,32,0,2,8,0,34,142, - 4,0,35,142,0,0,0,0,35,16,67,0, - 255,255,66,48,48,0,191,143,44,0,177,143, - 40,0,176,143,8,0,224,3,56,0,189,39, - 176,255,189,39,44,0,177,175,108,0,177,143, - 68,0,183,175,96,0,183,143,72,0,190,175, - 100,0,190,143,48,0,178,175,33,144,128,0, - 56,0,180,175,33,160,160,0,52,0,179,175, - 33,152,192,0,40,0,176,175,33,128,224,0, - 60,0,181,175,1,0,21,36,76,0,191,175, - 3,0,53,18,64,0,182,175,9,57,192,8, - 255,255,2,36,4,0,6,36,2,131,22,60, - 48,205,214,38,160,132,132,143,104,0,165,143, - 128,32,4,0,80,68,192,12,33,32,150,0, - 33,32,0,0,33,40,0,2,152,132,135,143, - 2,0,2,36,24,0,162,175,160,132,130,143, - 2,131,3,60,172,210,99,140,33,48,96,2, - 20,0,180,175,28,0,160,175,32,0,183,175, - 36,0,181,175,1,0,81,36,104,77,192,12, - 16,0,163,175,33,128,64,0,23,0,0,18, - 33,40,0,0,16,0,190,175,33,32,0,2, - 33,48,32,2,108,84,192,12,33,56,192,2, - 255,255,17,36,13,0,81,16,33,32,0,2, - 197,80,192,12,33,40,64,2,9,0,81,16, - 0,0,0,0,167,83,192,12,33,32,0,2, - 8,0,66,142,4,0,67,142,0,0,0,0, - 35,16,67,0,9,57,192,8,255,255,66,48, - 167,83,192,12,33,32,0,2,33,16,0,0, - 76,0,191,143,72,0,190,143,68,0,183,143, - 64,0,182,143,60,0,181,143,56,0,180,143, - 52,0,179,143,48,0,178,143,44,0,177,143, - 40,0,176,143,8,0,224,3,80,0,189,39, - 176,255,189,39,44,0,177,175,108,0,177,143, - 68,0,183,175,96,0,183,143,72,0,190,175, - 100,0,190,143,48,0,178,175,33,144,128,0, - 56,0,180,175,33,160,160,0,52,0,179,175, - 33,152,192,0,40,0,176,175,33,128,224,0, - 60,0,181,175,1,0,21,36,76,0,191,175, - 3,0,53,18,64,0,182,175,93,57,192,8, - 255,255,2,36,4,0,6,36,2,131,22,60, - 48,205,214,38,160,132,132,143,104,0,165,143, - 128,32,4,0,80,68,192,12,33,32,150,0, - 33,32,0,0,33,40,0,2,152,132,135,143, - 3,0,2,36,24,0,162,175,160,132,130,143, - 2,131,3,60,172,210,99,140,33,48,96,2, - 20,0,180,175,28,0,160,175,32,0,183,175, - 36,0,181,175,1,0,81,36,104,77,192,12, - 16,0,163,175,33,128,64,0,23,0,0,18, - 33,40,0,0,16,0,190,175,33,32,0,2, - 33,48,32,2,108,84,192,12,33,56,192,2, - 255,255,17,36,13,0,81,16,33,32,0,2, - 197,80,192,12,33,40,64,2,9,0,81,16, - 0,0,0,0,167,83,192,12,33,32,0,2, - 8,0,66,142,4,0,67,142,0,0,0,0, - 35,16,67,0,93,57,192,8,255,255,66,48, - 167,83,192,12,33,32,0,2,33,16,0,0, - 76,0,191,143,72,0,190,143,68,0,183,143, - 64,0,182,143,60,0,181,143,56,0,180,143, - 52,0,179,143,48,0,178,143,44,0,177,143, - 40,0,176,143,8,0,224,3,80,0,189,39, - 200,255,189,39,44,0,177,175,33,136,128,0, - 72,0,168,143,33,32,0,0,20,0,165,175, - 33,40,224,0,2,131,3,60,172,210,99,140, - 152,132,135,143,4,0,2,36,48,0,191,175, - 40,0,176,175,24,0,162,175,28,0,160,175, - 36,0,160,175,16,0,163,175,104,77,192,12, - 32,0,168,175,33,128,64,0,3,0,0,22, - 33,32,0,2,145,57,192,8,33,16,0,0, - 197,80,192,12,33,40,32,2,255,255,3,36, - 5,0,67,20,0,0,0,0,167,83,192,12, - 33,32,0,2,145,57,192,8,33,16,0,0, - 167,83,192,12,33,32,0,2,8,0,34,142, - 4,0,35,142,0,0,0,0,35,16,67,0, - 255,255,66,48,48,0,191,143,44,0,177,143, - 40,0,176,143,8,0,224,3,56,0,189,39, - 200,255,189,39,44,0,177,175,33,136,128,0, - 72,0,163,143,33,32,0,0,20,0,165,175, - 33,40,224,0,164,132,135,143,2,131,2,60, - 92,205,66,36,16,0,162,175,6,0,2,36, - 24,0,162,175,1,0,2,36,48,0,191,175, - 40,0,176,175,28,0,162,175,36,0,160,175, - 104,77,192,12,32,0,163,175,33,128,64,0, - 3,0,0,22,33,32,0,2,191,57,192,8, - 33,16,0,0,197,80,192,12,33,40,32,2, - 255,255,3,36,5,0,67,20,0,0,0,0, - 167,83,192,12,33,32,0,2,191,57,192,8, - 33,16,0,0,167,83,192,12,33,32,0,2, - 8,0,34,142,4,0,35,142,0,0,0,0, - 35,16,67,0,255,255,66,48,48,0,191,143, - 44,0,177,143,40,0,176,143,8,0,224,3, - 56,0,189,39,200,255,189,39,44,0,177,175, - 33,136,128,0,72,0,163,143,33,32,0,0, - 20,0,165,175,33,40,224,0,164,132,135,143, - 2,131,2,60,92,205,66,36,16,0,162,175, - 6,0,2,36,24,0,162,175,2,0,2,36, - 48,0,191,175,40,0,176,175,28,0,162,175, - 36,0,160,175,104,77,192,12,32,0,163,175, - 33,128,64,0,3,0,0,22,33,32,0,2, - 237,57,192,8,33,16,0,0,197,80,192,12, - 33,40,32,2,255,255,3,36,5,0,67,20, - 0,0,0,0,167,83,192,12,33,32,0,2, - 237,57,192,8,33,16,0,0,167,83,192,12, - 33,32,0,2,8,0,34,142,4,0,35,142, - 0,0,0,0,35,16,67,0,255,255,66,48, - 48,0,191,143,44,0,177,143,40,0,176,143, - 8,0,224,3,56,0,189,39,0,0,0,0, - 0,0,0,0,224,255,189,39,24,0,178,175, - 33,144,128,0,20,0,177,175,3,131,17,60, - 0,18,49,38,16,0,176,175,33,128,0,0, - 28,0,191,175,208,133,128,175,60,65,192,12, - 33,32,0,2,0,0,34,166,1,0,16,38, - 64,0,2,42,250,255,64,20,2,0,49,38, - 3,131,3,60,18,18,99,144,255,0,2,36, - 3,0,98,16,0,0,0,0,6,0,64,18, - 0,163,4,60,75,59,192,12,32,0,4,36, - 87,59,192,12,255,0,4,36,0,163,4,60, - 220,5,132,52,176,132,133,39,168,71,192,12, - 4,0,6,36,25,0,64,20,0,163,4,60, - 3,131,16,60,20,18,16,38,33,32,0,2, - 176,132,133,39,168,71,192,12,4,0,6,36, - 3,0,64,16,0,163,4,60,7,0,64,18, - 0,0,0,0,220,5,132,52,33,40,0,0, - 144,71,192,12,4,0,6,36,47,58,192,8, - 0,163,4,60,0,163,5,60,220,5,165,52, - 3,0,2,138,0,0,2,154,0,0,0,0, - 3,0,162,168,0,0,162,184,0,163,4,60, - 99,59,192,12,220,5,132,52,0,163,4,60, - 16,6,132,52,176,132,133,39,168,71,192,12, - 4,0,6,36,25,0,64,20,0,163,4,60, - 3,131,16,60,68,18,16,38,33,32,0,2, - 176,132,133,39,168,71,192,12,4,0,6,36, - 3,0,64,16,0,163,4,60,7,0,64,18, - 0,0,0,0,16,6,132,52,33,40,0,0, - 144,71,192,12,4,0,6,36,80,58,192,8, - 0,163,4,60,0,163,5,60,16,6,165,52, - 3,0,2,138,0,0,2,154,0,0,0,0, - 3,0,162,168,0,0,162,184,0,163,4,60, - 119,59,192,12,16,6,132,52,0,163,4,60, - 224,5,132,52,176,132,133,39,168,71,192,12, - 4,0,6,36,25,0,64,20,0,163,4,60, - 3,131,16,60,24,18,16,38,33,32,0,2, - 176,132,133,39,168,71,192,12,4,0,6,36, - 3,0,64,16,0,163,4,60,7,0,64,18, - 0,0,0,0,224,5,132,52,33,40,0,0, - 144,71,192,12,4,0,6,36,113,58,192,8, - 0,163,4,60,0,163,5,60,224,5,165,52, - 3,0,2,138,0,0,2,154,0,0,0,0, - 3,0,162,168,0,0,162,184,0,163,4,60, - 139,59,192,12,224,5,132,52,3,131,3,60, - 28,18,99,36,0,0,98,148,0,0,0,0, - 0,128,66,48,3,0,64,20,1,0,2,36, - 2,0,64,18,0,0,0,0,0,0,98,164, - 0,163,2,60,144,1,66,140,0,0,0,0, - 7,0,64,20,0,0,0,0,3,131,3,60, - 28,18,99,36,0,0,98,148,0,0,0,0, - 146,58,192,8,254,255,66,48,0,163,2,60, - 144,1,66,140,0,0,0,0,7,0,64,24, - 0,0,0,0,3,131,3,60,28,18,99,36, - 0,0,98,148,0,0,0,0,1,0,66,52, - 0,0,98,164,3,131,4,60,28,18,132,148, - 0,0,0,0,159,59,192,12,1,0,132,48, - 3,131,3,60,80,18,99,144,255,0,2,36, - 3,0,98,16,0,0,0,0,5,0,64,18, - 0,0,0,0,2,131,4,60,160,149,132,36, - 205,59,192,12,14,0,5,36,3,131,3,60, - 96,18,99,144,255,0,2,36,3,0,98,16, - 0,0,0,0,5,0,64,18,0,0,0,0, - 2,131,4,60,176,149,132,36,239,59,192,12, - 11,0,5,36,3,131,3,60,112,18,99,144, - 255,0,2,36,3,0,98,16,0,0,0,0, - 5,0,64,18,0,0,0,0,2,131,4,60, - 188,149,132,36,17,60,192,12,15,0,5,36, - 0,163,2,60,140,1,66,140,0,0,0,0, - 7,0,64,16,15,0,2,60,0,163,3,60, - 140,1,99,140,64,66,66,52,43,16,67,0, - 26,0,64,16,0,0,0,0,3,131,3,60, - 64,18,99,140,255,255,2,36,3,0,98,16, - 44,1,2,36,4,0,64,18,0,0,0,0, - 0,163,1,60,221,58,192,8,140,1,34,172, - 5,0,96,20,15,0,4,60,1,0,2,36, - 0,163,1,60,221,58,192,8,140,1,34,172, - 64,66,132,52,43,16,131,0,4,0,64,16, - 0,0,0,0,0,163,1,60,221,58,192,8, - 140,1,36,172,0,163,1,60,140,1,35,172, - 0,163,4,60,140,1,132,140,51,60,192,12, - 0,0,0,0,28,0,191,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,208,255,189,39,20,0,177,175, - 33,136,128,0,36,0,181,175,33,168,160,0, - 28,0,179,175,33,152,192,0,44,0,191,175, - 40,0,182,175,32,0,180,175,24,0,178,175, - 168,71,192,12,16,0,176,175,76,0,64,16, - 0,0,0,0,3,131,22,60,0,18,214,38, - 35,16,54,2,194,31,2,0,33,16,67,0, - 67,144,2,0,1,0,98,38,194,31,2,0, - 33,16,67,0,67,128,2,0,255,255,20,38, - 64,0,130,46,14,0,64,20,64,0,66,46, - 2,131,4,60,204,149,132,36,180,132,144,39, - 33,40,0,2,2,131,7,60,236,149,231,36, - 15,63,192,12,143,0,6,36,1,0,4,36, - 33,40,0,2,188,7,192,12,143,0,6,36, - 64,0,66,46,14,0,64,20,33,32,32,2, - 2,131,4,60,204,149,132,36,180,132,144,39, - 33,40,0,2,2,131,7,60,20,150,231,36, - 15,63,192,12,144,0,6,36,1,0,4,36, - 33,40,0,2,188,7,192,12,144,0,6,36, - 33,32,32,2,33,40,160,2,80,68,192,12, - 33,48,96,2,64,16,18,0,33,136,86,0, - 33,128,128,2,255,255,2,36,25,0,2,18, - 255,255,20,36,180,132,147,39,33,32,64,2, - 208,133,130,143,1,0,82,38,1,0,66,36, - 208,133,130,175,0,0,37,150,0,0,0,0, - 162,65,192,12,2,0,49,38,10,0,64,20, - 33,40,96,2,2,131,4,60,204,149,132,36, - 188,132,135,39,15,63,192,12,159,0,6,36, - 1,0,4,36,33,40,96,2,188,7,192,12, - 159,0,6,36,255,255,16,38,235,255,20,22, - 33,32,64,2,44,0,191,143,40,0,182,143, - 36,0,181,143,32,0,180,143,28,0,179,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,48,0,189,39,224,255,189,39, - 16,0,164,163,3,131,4,60,18,18,132,36, - 16,0,165,39,24,0,191,175,231,58,192,12, - 1,0,6,36,24,0,191,143,32,0,189,39, - 8,0,224,3,0,0,0,0,224,255,189,39, - 16,0,164,163,3,131,4,60,19,18,132,36, - 16,0,165,39,24,0,191,175,231,58,192,12, - 1,0,6,36,24,0,191,143,32,0,189,39, - 8,0,224,3,0,0,0,0,232,255,189,39, - 33,40,128,0,16,0,176,175,3,131,16,60, - 20,18,16,38,33,32,0,2,20,0,191,175, - 231,58,192,12,4,0,6,36,0,163,5,60, - 220,5,165,52,3,0,2,138,0,0,2,154, - 0,0,0,0,3,0,162,168,0,0,162,184, - 20,0,191,143,16,0,176,143,8,0,224,3, - 24,0,189,39,232,255,189,39,33,40,128,0, - 16,0,176,175,3,131,16,60,68,18,16,38, - 33,32,0,2,20,0,191,175,231,58,192,12, - 4,0,6,36,0,163,5,60,16,6,165,52, - 3,0,2,138,0,0,2,154,0,0,0,0, - 3,0,162,168,0,0,162,184,20,0,191,143, - 16,0,176,143,8,0,224,3,24,0,189,39, - 232,255,189,39,33,40,128,0,16,0,176,175, - 3,131,16,60,24,18,16,38,33,32,0,2, - 20,0,191,175,231,58,192,12,4,0,6,36, - 0,163,5,60,224,5,165,52,3,0,2,138, - 0,0,2,154,0,0,0,0,3,0,162,168, - 0,0,162,184,20,0,191,143,16,0,176,143, - 8,0,224,3,24,0,189,39,3,131,2,60, - 28,18,66,148,224,255,189,39,24,0,191,175, - 8,0,128,16,16,0,162,167,1,0,66,52, - 16,0,162,167,1,0,2,36,44,133,130,175, - 0,163,1,60,177,59,192,8,144,1,34,172, - 254,255,66,48,16,0,162,167,44,133,128,175, - 0,163,1,60,144,1,32,172,3,131,4,60, - 28,18,132,36,16,0,165,39,231,58,192,12, - 2,0,6,36,24,0,191,143,32,0,189,39, - 8,0,224,3,0,0,0,0,3,131,2,60, - 28,18,66,148,224,255,189,39,24,0,191,175, - 3,0,128,16,16,0,162,167,195,59,192,8, - 2,0,66,52,253,255,66,48,16,0,162,167, - 3,131,4,60,28,18,132,36,16,0,165,39, - 231,58,192,12,2,0,6,36,24,0,191,143, - 32,0,189,39,8,0,224,3,0,0,0,0, - 216,255,189,39,32,0,191,175,33,56,128,0, - 33,48,160,0,3,0,226,136,0,0,226,152, - 7,0,227,136,4,0,227,152,11,0,228,136, - 8,0,228,152,15,0,229,136,12,0,229,152, - 19,0,162,171,16,0,162,187,23,0,163,171, - 20,0,163,187,27,0,164,171,24,0,164,187, - 31,0,165,171,28,0,165,187,16,0,194,44, - 3,0,64,16,16,0,163,39,33,16,102,0, - 0,0,64,160,3,131,4,60,80,18,132,36, - 33,40,224,0,231,58,192,12,16,0,6,36, - 32,0,191,143,40,0,189,39,8,0,224,3, - 0,0,0,0,216,255,189,39,32,0,191,175, - 33,56,128,0,33,48,160,0,3,0,226,136, - 0,0,226,152,7,0,227,136,4,0,227,152, - 11,0,228,136,8,0,228,152,15,0,229,136, - 12,0,229,152,19,0,162,171,16,0,162,187, - 23,0,163,171,20,0,163,187,27,0,164,171, - 24,0,164,187,31,0,165,171,28,0,165,187, - 16,0,194,44,3,0,64,16,16,0,163,39, - 33,16,102,0,0,0,64,160,3,131,4,60, - 96,18,132,36,33,40,224,0,231,58,192,12, - 16,0,6,36,32,0,191,143,40,0,189,39, - 8,0,224,3,0,0,0,0,216,255,189,39, - 32,0,191,175,33,56,128,0,33,48,160,0, - 3,0,226,136,0,0,226,152,7,0,227,136, - 4,0,227,152,11,0,228,136,8,0,228,152, - 15,0,229,136,12,0,229,152,19,0,162,171, - 16,0,162,187,23,0,163,171,20,0,163,187, - 27,0,164,171,24,0,164,187,31,0,165,171, - 28,0,165,187,16,0,194,44,3,0,64,16, - 16,0,163,39,33,16,102,0,0,0,64,160, - 3,131,4,60,112,18,132,36,33,40,224,0, - 231,58,192,12,16,0,6,36,32,0,191,143, - 40,0,189,39,8,0,224,3,0,0,0,0, - 232,255,189,39,15,0,2,60,54,66,66,52, - 24,0,164,175,246,255,132,36,43,16,68,0, - 3,0,64,16,16,0,191,175,44,1,2,36, - 24,0,162,175,3,131,4,60,64,18,132,36, - 24,0,165,39,231,58,192,12,4,0,6,36, - 24,0,162,143,0,163,1,60,140,1,34,172, - 16,0,191,143,24,0,189,39,8,0,224,3, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,232,255,189,39,16,0,191,175, - 0,38,4,0,196,64,192,12,3,38,4,0, - 16,0,191,143,24,0,189,39,8,0,224,3, - 0,0,0,0,232,255,189,39,16,0,191,175, - 0,38,4,0,196,64,192,12,3,38,4,0, - 16,0,191,143,24,0,189,39,8,0,224,3, - 0,0,0,0,160,255,189,39,112,0,162,143, - 72,0,176,175,33,128,224,0,88,0,180,175, - 33,160,0,0,84,0,179,175,33,152,192,0, - 92,0,191,175,80,0,178,175,7,0,160,16, - 76,0,177,175,6,0,65,4,51,0,177,39, - 45,0,20,36,3,0,0,18,35,16,2,0, - 255,255,16,38,51,0,177,39,51,0,160,163, - 27,0,68,0,2,0,128,20,0,0,0,0, - 13,0,7,0,18,24,0,0,16,16,0,0, - 2,131,1,60,33,8,34,0,128,205,34,144, - 255,255,49,38,2,0,0,18,0,0,34,162, - 255,255,16,38,33,16,96,0,241,255,64,20, - 1,0,3,36,0,22,19,0,3,22,2,0, - 11,0,67,20,33,32,128,2,255,255,16,38, - 255,255,2,36,7,0,2,18,0,0,0,0, - 255,255,18,36,196,64,192,12,32,0,4,36, - 255,255,16,38,252,255,18,22,33,32,128,2, - 4,0,128,16,0,22,19,0,196,64,192,12, - 0,0,0,0,0,22,19,0,3,22,2,0, - 2,0,3,36,14,0,67,20,255,255,2,36, - 255,255,16,38,11,0,2,18,255,255,18,36, - 196,64,192,12,48,0,4,36,255,255,16,38, - 6,0,18,18,0,0,0,0,156,60,192,8, - 0,0,0,0,0,38,4,0,196,64,192,12, - 3,38,4,0,0,0,34,130,0,0,36,146, - 0,0,0,0,249,255,64,20,1,0,49,38, - 255,255,49,38,0,22,19,0,3,22,2,0, - 3,0,3,36,9,0,67,20,255,255,16,38, - 255,255,2,36,6,0,2,18,255,255,17,36, - 196,64,192,12,32,0,4,36,255,255,16,38, - 252,255,17,22,0,0,0,0,92,0,191,143, - 88,0,180,143,84,0,179,143,80,0,178,143, - 76,0,177,143,72,0,176,143,8,0,224,3, - 96,0,189,39,200,255,189,39,40,0,178,175, - 33,144,128,0,32,0,176,175,33,128,160,0, - 36,0,177,175,33,136,192,0,33,32,32,2, - 48,0,191,175,156,71,192,12,44,0,179,175, - 33,32,0,0,33,24,64,0,42,16,112,0, - 2,0,64,16,33,152,64,2,35,32,3,2, - 0,22,18,0,3,22,2,0,1,0,3,36, - 11,0,67,20,33,128,128,0,255,255,16,38, - 255,255,2,36,8,0,2,18,0,22,19,0, - 255,255,18,36,196,64,192,12,32,0,4,36, - 255,255,16,38,252,255,18,22,0,0,0,0, - 0,22,19,0,3,22,2,0,2,0,3,36, - 14,0,67,20,255,255,2,36,255,255,16,38, - 11,0,2,18,255,255,18,36,196,64,192,12, - 48,0,4,36,255,255,16,38,6,0,18,18, - 0,0,0,0,233,60,192,8,0,0,0,0, - 0,38,4,0,196,64,192,12,3,38,4,0, - 0,0,34,130,0,0,36,146,0,0,0,0, - 249,255,64,20,1,0,49,38,255,255,49,38, - 0,22,19,0,3,22,2,0,3,0,3,36, - 9,0,67,20,255,255,16,38,255,255,2,36, - 6,0,2,18,255,255,17,36,196,64,192,12, - 32,0,4,36,255,255,16,38,252,255,17,22, - 0,0,0,0,48,0,191,143,44,0,179,143, - 40,0,178,143,36,0,177,143,32,0,176,143, - 8,0,224,3,56,0,189,39,32,255,189,39, - 192,0,178,175,33,144,160,0,196,0,179,175, - 33,152,0,0,220,0,191,175,216,0,190,175, - 212,0,183,175,208,0,182,175,204,0,181,175, - 200,0,180,175,188,0,177,175,184,0,176,175, - 1,0,130,128,0,0,0,0,229,1,64,16, - 33,136,0,0,255,255,22,36,1,0,23,36, - 2,0,30,36,1,0,149,36,0,0,162,146, - 0,0,0,0,219,255,66,36,0,22,2,0, - 3,30,2,0,84,0,98,44,217,1,64,16, - 128,16,3,0,2,131,1,60,33,8,34,0, - 80,150,34,140,0,0,0,0,8,0,64,0, - 0,0,0,0,209,61,192,8,37,0,4,36, - 2,131,16,60,64,150,16,38,156,71,192,12, - 33,32,0,2,59,61,192,8,0,0,0,0, - 0,38,4,0,196,64,192,12,3,38,4,0, - 0,0,2,130,0,0,4,146,0,0,0,0, - 249,255,64,20,1,0,16,38,3,63,192,8, - 1,0,162,38,0,38,18,0,209,61,192,8, - 3,38,4,0,2,0,3,36,33,128,32,2, - 33,40,64,2,33,160,0,0,51,0,177,39, - 51,0,160,163,27,0,163,0,2,0,96,20, - 0,0,0,0,13,0,7,0,18,40,0,0, - 16,16,0,0,2,131,1,60,33,8,34,0, - 128,205,34,144,255,255,49,38,2,0,0,18, - 0,0,34,162,255,255,16,38,242,255,160,20, - 0,0,0,0,10,0,119,22,0,22,20,0, - 255,255,16,38,8,0,22,18,3,38,2,0, - 255,255,18,36,196,64,192,12,32,0,4,36, - 255,255,16,38,252,255,18,22,0,22,20,0, - 3,38,2,0,3,0,128,16,0,0,0,0, - 196,64,192,12,0,0,0,0,14,0,126,22, - 0,0,0,0,255,255,16,38,11,0,22,18, - 255,255,18,36,196,64,192,12,48,0,4,36, - 255,255,16,38,6,0,18,18,0,0,0,0, - 111,61,192,8,0,0,0,0,0,38,4,0, - 196,64,192,12,3,38,4,0,0,0,34,130, - 0,0,36,146,0,0,0,0,249,255,64,20, - 1,0,49,38,255,255,49,38,3,0,6,36, - 80,0,102,22,66,0,4,36,255,255,16,38, - 77,0,22,18,255,255,17,36,196,64,192,12, - 32,0,4,36,255,255,16,38,252,255,17,22, - 66,0,4,36,209,61,192,8,0,0,0,0, - 8,0,3,36,33,128,32,2,33,40,64,2, - 33,160,0,0,51,0,177,39,51,0,160,163, - 27,0,163,0,2,0,96,20,0,0,0,0, - 13,0,7,0,18,40,0,0,16,16,0,0, - 2,131,1,60,33,8,34,0,128,205,34,144, - 255,255,49,38,2,0,0,18,0,0,34,162, - 255,255,16,38,242,255,160,20,0,0,0,0, - 10,0,119,22,0,22,20,0,255,255,16,38, - 8,0,22,18,3,38,2,0,255,255,18,36, - 196,64,192,12,32,0,4,36,255,255,16,38, - 252,255,18,22,0,22,20,0,3,38,2,0, - 3,0,128,16,0,0,0,0,196,64,192,12, - 0,0,0,0,14,0,126,22,0,0,0,0, - 255,255,16,38,11,0,22,18,255,255,18,36, - 196,64,192,12,48,0,4,36,255,255,16,38, - 6,0,18,18,0,0,0,0,182,61,192,8, - 0,0,0,0,0,38,4,0,196,64,192,12, - 3,38,4,0,0,0,34,130,0,0,36,146, - 0,0,0,0,249,255,64,20,1,0,49,38, - 255,255,49,38,3,0,6,36,9,0,102,22, - 81,0,4,36,255,255,16,38,6,0,22,18, - 255,255,17,36,196,64,192,12,32,0,4,36, - 255,255,16,38,252,255,17,22,81,0,4,36, - 196,64,192,12,0,0,0,0,3,63,192,8, - 1,0,162,38,33,128,32,2,33,16,64,2, - 33,160,0,0,5,0,65,6,10,0,4,36, - 45,0,20,36,2,0,32,18,35,16,18,0, - 255,255,48,38,51,0,177,39,51,0,160,163, - 27,0,68,0,2,0,128,20,0,0,0,0, - 13,0,7,0,18,24,0,0,16,16,0,0, - 2,131,1,60,33,8,34,0,128,205,34,144, - 255,255,49,38,2,0,0,18,0,0,34,162, - 255,255,16,38,33,16,96,0,241,255,64,20, - 0,0,0,0,10,0,119,22,33,32,128,2, - 255,255,16,38,7,0,22,18,0,0,0,0, - 255,255,18,36,196,64,192,12,32,0,4,36, - 255,255,16,38,252,255,18,22,33,32,128,2, - 3,0,128,16,0,0,0,0,196,64,192,12, - 0,0,0,0,14,0,126,22,0,0,0,0, - 255,255,16,38,11,0,22,18,255,255,18,36, - 196,64,192,12,48,0,4,36,255,255,16,38, - 6,0,18,18,0,0,0,0,4,62,192,8, - 0,0,0,0,0,38,4,0,196,64,192,12, - 3,38,4,0,0,0,34,130,0,0,36,146, - 0,0,0,0,249,255,64,20,1,0,49,38, - 255,255,49,38,3,0,6,36,237,0,102,22, - 1,0,162,38,255,255,16,38,234,0,22,18, - 255,255,17,36,196,64,192,12,32,0,4,36, - 255,255,16,38,252,255,17,22,1,0,162,38, - 3,63,192,8,0,0,0,0,10,0,3,36, - 33,128,32,2,33,40,64,2,33,160,0,0, - 51,0,177,39,51,0,160,163,27,0,163,0, - 2,0,96,20,0,0,0,0,13,0,7,0, - 18,40,0,0,16,16,0,0,2,131,1,60, - 33,8,34,0,128,205,34,144,255,255,49,38, - 2,0,0,18,0,0,34,162,255,255,16,38, - 242,255,160,20,0,0,0,0,10,0,119,22, - 0,22,20,0,255,255,16,38,8,0,22,18, - 3,38,2,0,255,255,18,36,196,64,192,12, - 32,0,4,36,255,255,16,38,252,255,18,22, - 0,22,20,0,3,38,2,0,3,0,128,16, - 0,0,0,0,196,64,192,12,0,0,0,0, - 14,0,126,22,0,0,0,0,255,255,16,38, - 11,0,22,18,255,255,18,36,196,64,192,12, - 48,0,4,36,255,255,16,38,6,0,18,18, - 0,0,0,0,75,62,192,8,0,0,0,0, - 0,38,4,0,196,64,192,12,3,38,4,0, - 0,0,34,130,0,0,36,146,0,0,0,0, - 249,255,64,20,1,0,49,38,255,255,49,38, - 3,0,6,36,166,0,102,22,1,0,162,38, - 255,255,16,38,163,0,22,18,255,255,17,36, - 196,64,192,12,32,0,4,36,255,255,16,38, - 252,255,17,22,1,0,162,38,3,63,192,8, - 0,0,0,0,192,132,144,39,156,71,192,12, - 33,32,0,2,112,62,192,8,0,0,0,0, - 0,38,4,0,196,64,192,12,3,38,4,0, - 0,0,2,130,0,0,4,146,0,0,0,0, - 249,255,64,20,1,0,16,38,16,0,3,36, - 33,128,32,2,33,40,64,2,33,160,0,0, - 51,0,177,39,51,0,160,163,27,0,163,0, - 2,0,96,20,0,0,0,0,13,0,7,0, - 18,40,0,0,16,16,0,0,2,131,1,60, - 33,8,34,0,128,205,34,144,255,255,49,38, - 2,0,0,18,0,0,34,162,255,255,16,38, - 242,255,160,20,0,0,0,0,10,0,119,22, - 0,22,20,0,255,255,16,38,8,0,22,18, - 3,38,2,0,255,255,18,36,196,64,192,12, - 32,0,4,36,255,255,16,38,252,255,18,22, - 0,22,20,0,3,38,2,0,3,0,128,16, - 0,0,0,0,196,64,192,12,0,0,0,0, - 14,0,126,22,0,0,0,0,255,255,16,38, - 11,0,22,18,255,255,18,36,196,64,192,12, - 48,0,4,36,255,255,16,38,6,0,18,18, - 0,0,0,0,159,62,192,8,0,0,0,0, - 0,38,4,0,196,64,192,12,3,38,4,0, - 0,0,34,130,0,0,36,146,0,0,0,0, - 249,255,64,20,1,0,49,38,255,255,49,38, - 3,0,6,36,82,0,102,22,1,0,162,38, - 255,255,16,38,79,0,22,18,255,255,17,36, - 196,64,192,12,32,0,4,36,255,255,16,38, - 252,255,17,22,1,0,162,38,3,63,192,8, - 0,0,0,0,156,71,192,12,33,32,64,2, - 33,24,64,0,42,16,113,0,2,0,64,16, - 33,32,0,0,35,32,35,2,10,0,119,22, - 33,128,128,0,255,255,16,38,7,0,22,18, - 0,0,0,0,255,255,17,36,196,64,192,12, - 32,0,4,36,255,255,16,38,252,255,17,22, - 0,0,0,0,14,0,126,22,0,0,0,0, - 255,255,16,38,11,0,22,18,255,255,17,36, - 196,64,192,12,48,0,4,36,255,255,16,38, - 6,0,17,18,0,0,0,0,211,62,192,8, - 0,0,0,0,0,38,4,0,196,64,192,12, - 3,38,4,0,0,0,66,130,0,0,68,146, - 0,0,0,0,249,255,64,20,1,0,82,38, - 255,255,82,38,3,0,6,36,30,0,102,22, - 1,0,162,38,255,255,16,38,27,0,22,18, - 255,255,17,36,196,64,192,12,32,0,4,36, - 255,255,16,38,252,255,17,22,1,0,162,38, - 3,63,192,8,0,0,0,0,253,62,192,8, - 3,0,19,36,3,0,96,22,128,16,17,0, - 2,0,19,36,128,16,17,0,33,16,81,0, - 64,16,2,0,0,0,163,130,208,255,66,36, - 2,0,96,22,33,136,67,0,1,0,19,36, - 1,0,181,38,0,0,162,130,0,0,0,0, - 33,254,64,20,0,0,0,0,1,0,130,36, - 220,0,191,143,216,0,190,143,212,0,183,143, - 208,0,182,143,204,0,181,143,200,0,180,143, - 196,0,179,143,192,0,178,143,188,0,177,143, - 184,0,176,143,8,0,224,3,224,0,189,39, - 0,0,164,175,4,0,165,175,8,0,166,175, - 12,0,167,175,200,255,189,39,59,0,162,39, - 252,255,3,36,36,16,67,0,52,0,191,175, - 48,0,180,175,44,0,179,175,40,0,178,175, - 36,0,177,175,32,0,176,175,56,0,164,175, - 0,0,80,140,4,0,81,36,0,0,2,130, - 0,0,3,146,0,0,0,0,31,0,64,16, - 37,0,20,36,69,0,19,36,252,255,18,36, - 0,22,3,0,3,38,2,0,18,0,148,20, - 0,0,0,0,1,0,3,130,0,0,0,0, - 4,0,100,16,33,32,0,2,4,0,115,20, - 3,0,34,38,33,32,0,2,56,63,192,8, - 33,40,0,0,36,16,82,0,4,0,81,36, - 0,0,69,140,33,32,0,2,13,61,192,12, - 0,0,0,0,62,63,192,8,33,128,64,0, - 196,64,192,12,1,0,16,38,0,0,2,130, - 0,0,3,146,0,0,0,0,230,255,64,20, - 0,22,3,0,52,0,191,143,48,0,180,143, - 44,0,179,143,40,0,178,143,36,0,177,143, - 32,0,176,143,8,0,224,3,56,0,189,39, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,8,0,224,3,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,8,0,224,3, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 8,0,224,3,0,0,0,0,33,72,224,3, - 170,3,8,36,76,63,192,12,0,0,0,0, - 255,255,8,33,252,255,0,21,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,8,0,32,1, - 0,0,0,0,33,88,224,3,232,3,10,36, - 143,63,192,12,0,0,0,0,255,255,74,33, - 252,255,64,21,0,0,0,0,8,0,96,1, - 0,0,0,0,250,255,132,32,130,32,4,0, - 255,255,132,32,0,0,0,0,253,255,128,20, - 0,0,0,0,8,0,224,3,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 248,255,189,39,255,255,195,36,10,0,192,16, - 33,56,160,0,255,255,6,36,0,0,162,144, - 1,0,165,36,0,0,130,160,4,0,64,16, - 1,0,132,36,255,255,99,36,249,255,102,20, - 0,0,0,0,33,16,224,0,8,0,224,3, - 8,0,189,39,0,96,2,64,0,0,0,0, - 38,64,68,0,1,255,8,49,38,64,2,1, - 0,96,136,64,8,0,224,3,1,255,66,48, - 0,96,2,64,0,0,0,0,1,255,132,48, - 39,32,128,0,36,64,68,0,0,96,136,64, - 8,0,224,3,1,255,66,48,0,96,2,64, - 0,0,0,0,0,255,132,48,37,64,68,0, - 1,0,8,53,0,96,136,64,8,0,224,3, - 1,255,66,48,176,255,189,39,64,0,182,175, - 33,176,128,0,52,0,179,175,33,152,160,0, - 72,0,190,175,33,240,192,0,68,0,183,175, - 33,184,224,0,60,0,181,175,33,168,0,0, - 56,0,180,175,33,160,0,0,76,0,191,175, - 48,0,178,175,44,0,177,175,40,0,176,175, - 2,131,6,60,33,48,212,0,160,205,198,144, - 0,0,0,0,28,0,96,26,33,128,0,0, - 33,24,192,2,33,32,118,2,0,0,102,160, - 1,0,99,36,42,16,100,0,252,255,64,20, - 0,0,0,0,19,0,96,26,33,128,0,0, - 255,0,210,48,33,136,192,2,0,0,39,146, - 0,0,0,0,9,0,242,16,0,0,0,0, - 5,0,224,18,33,40,0,2,96,0,164,143, - 0,0,0,0,9,248,224,2,33,48,64,2, - 32,0,192,23,1,0,181,38,1,0,16,38, - 42,16,19,2,241,255,64,20,1,0,49,38, - 1,0,148,38,4,0,130,46,220,255,64,20, - 0,0,0,0,7,0,96,26,33,128,0,0, - 33,24,192,2,0,0,112,160,1,0,16,38, - 42,16,19,2,252,255,64,20,1,0,99,36, - 20,0,96,26,33,128,0,0,33,136,192,2, - 0,0,39,146,255,0,6,50,11,0,230,16, - 0,0,0,0,5,0,224,18,0,0,0,0, - 96,0,164,143,0,0,0,0,9,248,224,2, - 33,40,0,2,3,0,192,19,1,0,181,38, - 72,64,192,8,1,0,2,36,1,0,16,38, - 42,16,19,2,239,255,64,20,1,0,49,38, - 33,16,160,2,76,0,191,143,72,0,190,143, - 68,0,183,143,64,0,182,143,60,0,181,143, - 56,0,180,143,52,0,179,143,48,0,178,143, - 44,0,177,143,40,0,176,143,8,0,224,3, - 80,0,189,39,160,255,189,39,88,0,190,175, - 33,240,128,0,68,0,179,175,33,152,160,0, - 76,0,181,175,33,168,224,0,72,0,180,175, - 33,160,0,0,33,16,96,2,92,0,191,175, - 84,0,183,175,80,0,182,175,64,0,178,175, - 60,0,177,175,56,0,176,175,2,0,97,6, - 16,0,166,175,3,0,98,38,131,152,2,0, - 33,184,0,0,2,131,22,60,164,205,214,38, - 0,0,210,142,0,0,0,0,7,0,96,18, - 33,128,0,0,33,24,192,3,0,0,114,172, - 1,0,16,38,43,16,19,2,252,255,64,20, - 4,0,99,36,20,0,96,18,33,128,0,0, - 33,136,192,3,0,0,39,142,0,0,0,0, - 11,0,242,16,128,40,16,0,5,0,160,18, - 0,0,0,0,112,0,164,143,0,0,0,0, - 9,248,160,2,33,48,64,2,16,0,168,143, - 0,0,0,0,34,0,0,21,1,0,148,38, - 1,0,16,38,43,16,19,2,239,255,64,20, - 4,0,49,38,1,0,247,38,4,0,226,46, - 222,255,64,20,4,0,214,38,7,0,96,18, - 33,128,0,0,33,24,192,3,0,0,112,172, - 1,0,16,38,43,16,19,2,252,255,64,20, - 4,0,99,36,22,0,96,18,33,128,0,0, - 33,136,192,3,0,0,39,142,0,0,0,0, - 13,0,240,16,128,40,16,0,5,0,160,18, - 0,0,0,0,112,0,164,143,0,0,0,0, - 9,248,160,2,33,48,0,2,16,0,168,143, - 0,0,0,0,3,0,0,17,1,0,148,38, - 174,64,192,8,1,0,2,36,1,0,16,38, - 43,16,19,2,237,255,64,20,4,0,49,38, - 33,16,128,2,92,0,191,143,88,0,190,143, - 84,0,183,143,80,0,182,143,76,0,181,143, - 72,0,180,143,68,0,179,143,64,0,178,143, - 60,0,177,143,56,0,176,143,8,0,224,3, - 96,0,189,39,0,0,0,0,0,0,0,0, - 255,1,2,36,0,163,1,60,176,1,32,172, - 0,163,1,60,180,1,32,172,0,163,1,60, - 8,0,224,3,184,1,34,172,232,255,189,39, - 16,0,176,175,33,128,128,0,20,0,191,175, - 220,63,192,12,33,32,0,0,33,40,64,0, - 0,163,3,60,180,1,99,140,0,163,2,60, - 184,1,66,140,0,163,4,60,128,1,132,140, - 1,0,99,36,11,0,128,20,36,24,98,0, - 0,163,2,60,176,1,66,140,0,0,0,0, - 6,0,98,20,0,0,0,0,0,163,2,60, - 128,1,66,140,0,0,0,0,247,255,64,16, - 0,0,0,0,0,163,2,60,180,1,66,140, - 33,32,160,0,0,163,1,60,33,8,34,0, - 188,1,48,160,0,163,1,60,220,63,192,12, - 180,1,35,172,20,0,191,143,16,0,176,143, - 8,0,224,3,24,0,189,39,8,0,224,3, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,192,255,189,39,33,80,0,0, - 80,0,185,143,84,0,184,143,88,0,175,143, - 1,0,2,36,60,0,177,175,92,0,177,143, - 4,112,226,0,12,0,224,24,56,0,176,175, - 1,0,9,36,33,64,160,3,33,24,32,3, - 0,0,98,140,4,0,99,36,1,0,74,37, - 4,16,73,0,0,0,2,173,42,16,71,1, - 249,255,64,20,4,0,8,37,46,0,192,25, - 33,80,0,0,255,0,16,36,33,72,0,0, - 0,0,145,172,0,0,160,164,34,0,224,24, - 0,0,208,160,33,88,128,0,33,96,160,0, - 33,104,192,0,33,64,32,3,33,24,160,3, - 0,0,98,140,0,0,0,0,36,16,66,1, - 19,0,64,16,0,0,0,0,0,0,2,141, - 0,0,0,0,128,16,2,0,33,16,88,0, - 0,0,66,140,0,0,0,0,0,0,98,173, - 0,0,2,141,0,0,0,0,64,16,2,0, - 33,16,79,0,0,0,66,148,0,0,0,0, - 0,0,130,165,0,0,2,141,0,0,0,0, - 47,65,192,8,0,0,162,161,4,0,8,37, - 1,0,41,37,42,16,39,1,229,255,64,20, - 4,0,99,36,1,0,198,36,2,0,165,36, - 1,0,74,37,42,16,78,1,213,255,64,20, - 4,0,132,36,60,0,177,143,56,0,176,143, - 8,0,224,3,64,0,189,39,0,0,0,0, - 0,0,0,0,0,0,0,0,216,255,189,39, - 63,0,132,48,28,0,179,175,128,1,147,52, - 50,133,130,151,48,133,132,151,0,128,131,151, - 20,0,177,175,16,0,176,175,5,162,16,60, - 32,0,191,175,24,0,178,175,39,16,68,0, - 36,24,98,0,0,128,131,167,0,0,3,166, - 76,63,192,12,0,1,17,36,0,128,130,151, - 48,133,131,151,5,162,18,60,37,16,67,0, - 0,128,130,167,0,0,2,166,36,16,51,2, - 6,0,64,16,0,0,0,0,0,128,131,151, - 20,133,130,151,0,0,0,0,96,65,192,8, - 37,24,98,0,20,133,130,151,0,128,131,151, - 39,16,2,0,36,24,98,0,0,128,131,167, - 0,0,67,166,76,63,192,12,66,136,17,0, - 0,128,130,151,50,133,131,151,0,0,0,0, - 37,16,67,0,0,128,130,167,76,63,192,12, - 0,0,66,166,50,133,130,151,0,128,131,151, - 39,16,2,0,36,24,98,0,255,255,34,50, - 0,128,131,167,0,0,67,166,226,255,64,20, - 36,16,51,2,33,136,0,0,16,0,16,36, - 5,162,18,60,255,255,19,36,76,63,192,12, - 0,0,0,0,0,128,131,151,50,133,130,151, - 0,0,0,0,37,24,98,0,0,128,131,167, - 76,63,192,12,0,0,67,166,8,0,0,18, - 0,0,0,0,0,0,67,150,20,133,130,151, - 0,0,0,0,36,16,67,0,2,0,64,16, - 64,136,17,0,1,0,49,54,76,63,192,12, - 255,255,16,38,50,133,130,151,0,128,131,151, - 39,16,2,0,36,24,98,0,0,128,131,167, - 230,255,19,22,0,0,67,166,48,133,130,151, - 0,128,131,151,39,16,2,0,36,24,98,0, - 5,162,2,60,0,128,131,167,0,0,67,164, - 255,255,34,50,32,0,191,143,28,0,179,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,40,0,189,39,208,255,189,39, - 36,0,181,175,33,168,160,0,32,0,180,175, - 63,0,148,48,33,32,128,2,44,0,191,175, - 40,0,182,175,28,0,179,175,24,0,178,175, - 20,0,177,175,60,65,192,12,16,0,176,175, - 33,152,64,0,255,255,163,50,255,255,98,50, - 3,0,98,20,48,1,22,36,245,66,192,8, - 1,0,2,36,5,162,16,60,50,133,130,151, - 48,133,132,151,0,128,131,151,39,16,68,0, - 36,24,98,0,0,128,131,167,0,0,3,166, - 76,63,192,12,0,1,17,36,0,128,130,151, - 48,133,131,151,5,162,18,60,37,16,67,0, - 0,128,130,167,0,0,2,166,36,16,54,2, - 6,0,64,16,0,0,0,0,0,128,131,151, - 20,133,130,151,0,0,0,0,210,65,192,8, - 37,24,98,0,20,133,130,151,0,128,131,151, - 39,16,2,0,36,24,98,0,0,128,131,167, - 0,0,67,166,76,63,192,12,66,136,17,0, - 0,128,130,151,50,133,131,151,0,0,0,0, - 37,16,67,0,0,128,130,167,76,63,192,12, - 0,0,66,166,50,133,130,151,0,128,131,151, - 39,16,2,0,36,24,98,0,255,255,34,50, - 0,128,131,167,0,0,67,166,226,255,64,20, - 36,16,54,2,255,255,163,50,255,255,98,50, - 39,16,2,0,36,24,98,0,84,0,96,16, - 192,1,147,54,5,162,16,60,50,133,130,151, - 48,133,132,151,0,128,131,151,39,16,68,0, - 36,24,98,0,0,128,131,167,0,0,3,166, - 76,63,192,12,0,1,17,36,0,128,130,151, - 48,133,131,151,5,162,18,60,37,16,67,0, - 0,128,130,167,0,0,2,166,36,16,51,2, - 6,0,64,16,0,0,0,0,0,128,131,151, - 20,133,130,151,0,0,0,0,8,66,192,8, - 37,24,98,0,20,133,130,151,0,128,131,151, - 39,16,2,0,36,24,98,0,0,128,131,167, - 0,0,67,166,76,63,192,12,66,136,17,0, - 0,128,130,151,50,133,131,151,0,0,0,0, - 37,16,67,0,0,128,130,167,76,63,192,12, - 0,0,66,166,50,133,130,151,0,128,131,151, - 39,16,2,0,36,24,98,0,255,255,34,50, - 0,128,131,167,0,0,67,166,226,255,64,20, - 36,16,51,2,5,162,16,60,50,133,130,151, - 48,133,132,151,0,128,131,151,39,16,68,0, - 36,24,98,0,0,128,131,167,76,63,192,12, - 0,0,3,166,0,128,131,151,48,133,130,151, - 0,0,0,0,37,24,98,0,0,0,3,166, - 0,0,4,150,20,133,130,151,0,128,131,167, - 36,16,68,0,9,0,64,20,0,0,0,0, - 76,63,192,12,0,0,0,0,0,0,3,150, - 20,133,130,151,0,0,0,0,36,16,67,0, - 249,255,64,16,0,0,0,0,48,133,130,151, - 0,128,131,151,39,16,2,0,36,24,98,0, - 5,162,2,60,0,128,131,167,0,0,67,164, - 255,255,163,50,255,255,2,52,125,0,98,16, - 64,1,147,54,5,162,16,60,50,133,130,151, - 48,133,132,151,0,128,131,151,39,16,68,0, - 36,24,98,0,0,128,131,167,0,0,3,166, - 76,63,192,12,0,1,17,36,0,128,130,151, - 48,133,131,151,5,162,18,60,37,16,67,0, - 0,128,130,167,0,0,2,166,36,16,51,2, - 6,0,64,16,0,0,0,0,0,128,131,151, - 20,133,130,151,0,0,0,0,95,66,192,8, - 37,24,98,0,20,133,130,151,0,128,131,151, - 39,16,2,0,36,24,98,0,0,128,131,167, - 0,0,67,166,76,63,192,12,66,136,17,0, - 0,128,130,151,50,133,131,151,0,0,0,0, - 37,16,67,0,0,128,130,167,76,63,192,12, - 0,0,66,166,50,133,130,151,0,128,131,151, - 39,16,2,0,36,24,98,0,255,255,34,50, - 0,128,131,167,0,0,67,166,226,255,64,20, - 36,16,51,2,33,144,160,2,0,128,16,52, - 0,128,130,151,48,133,131,151,5,162,17,60, - 37,16,67,0,5,162,3,60,0,128,130,167, - 0,0,98,164,36,16,18,2,6,0,64,16, - 0,0,0,0,0,128,131,151,20,133,130,151, - 0,0,0,0,136,66,192,8,37,24,98,0, - 20,133,130,151,0,128,131,151,39,16,2,0, - 36,24,98,0,0,128,131,167,0,0,35,166, - 76,63,192,12,66,128,16,0,0,128,130,151, - 50,133,131,151,0,0,0,0,37,16,67,0, - 0,128,130,167,76,63,192,12,0,0,34,166, - 50,133,130,151,0,128,131,151,39,16,2,0, - 36,24,98,0,255,255,2,50,0,128,131,167, - 0,0,35,166,226,255,64,20,36,16,18,2, - 5,162,16,60,50,133,130,151,48,133,132,151, - 0,128,131,151,39,16,68,0,36,24,98,0, - 0,128,131,167,76,63,192,12,0,0,3,166, - 0,128,131,151,48,133,130,151,0,0,0,0, - 37,24,98,0,0,0,3,166,0,0,4,150, - 20,133,130,151,0,128,131,167,36,16,68,0, - 9,0,64,20,0,0,0,0,76,63,192,12, - 0,0,0,0,0,0,3,150,20,133,130,151, - 0,0,0,0,36,16,67,0,249,255,64,16, - 0,0,0,0,48,133,130,151,0,128,131,151, - 39,16,2,0,36,24,98,0,5,162,2,60, - 0,128,131,167,0,0,67,164,0,1,19,36, - 5,162,16,60,50,133,130,151,48,133,132,151, - 0,128,131,151,39,16,68,0,36,24,98,0, - 0,128,131,167,0,0,3,166,76,63,192,12, - 0,1,17,36,0,128,130,151,48,133,131,151, - 5,162,18,60,37,16,67,0,0,128,130,167, - 0,0,2,166,36,16,113,2,6,0,64,16, - 0,0,0,0,0,128,131,151,20,133,130,151, - 0,0,0,0,220,66,192,8,37,24,98,0, - 20,133,130,151,0,128,131,151,39,16,2,0, - 36,24,98,0,0,128,131,167,0,0,67,166, - 76,63,192,12,66,136,17,0,0,128,130,151, - 50,133,131,151,0,0,0,0,37,16,67,0, - 0,128,130,167,76,63,192,12,0,0,66,166, - 50,133,130,151,0,128,131,151,39,16,2,0, - 36,24,98,0,255,255,34,50,0,128,131,167, - 0,0,67,166,226,255,64,20,36,16,113,2, - 60,65,192,12,33,32,128,2,38,16,162,2, - 255,255,66,48,1,0,66,44,44,0,191,143, - 40,0,182,143,36,0,181,143,32,0,180,143, - 28,0,179,143,24,0,178,143,20,0,177,143, - 16,0,176,143,8,0,224,3,48,0,189,39, - 224,255,189,39,24,0,178,175,33,144,0,0, - 64,16,4,0,16,0,176,175,33,128,68,0, - 33,32,0,2,20,0,177,175,255,255,177,48, - 28,0,191,175,162,65,192,12,33,40,32,2, - 8,0,64,16,1,0,4,38,162,65,192,12, - 33,40,32,2,4,0,64,16,2,0,4,38, - 162,65,192,12,33,40,32,2,43,144,2,0, - 33,16,64,2,28,0,191,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,216,255,189,39,64,16,4,0, - 24,0,178,175,33,144,68,0,33,32,64,2, - 36,0,191,175,32,0,180,175,28,0,179,175, - 20,0,177,175,60,65,192,12,16,0,176,175, - 1,0,84,38,33,32,128,2,60,65,192,12, - 33,136,64,0,2,0,83,38,33,32,96,2, - 60,65,192,12,33,128,64,0,255,255,35,50, - 255,255,17,50,8,0,113,20,0,0,0,0, - 255,255,66,48,3,0,34,18,33,32,96,2, - 162,65,192,12,33,40,32,2,66,67,192,8, - 33,16,32,2,255,255,80,48,4,0,112,16, - 33,32,128,2,5,0,48,22,255,255,2,36, - 33,32,64,2,162,65,192,12,33,40,0,2, - 33,16,0,2,36,0,191,143,32,0,180,143, - 28,0,179,143,24,0,178,143,20,0,177,143, - 16,0,176,143,8,0,224,3,40,0,189,39, - 0,0,0,0,0,0,0,0,0,96,8,64, - 0,0,0,0,254,255,1,36,36,72,1,1, - 0,96,137,64,0,0,133,164,2,0,132,32, - 2,44,5,0,0,0,133,164,0,96,136,64, - 8,0,224,3,0,0,0,0,208,255,189,39, - 32,0,178,175,33,144,128,0,28,0,177,175, - 33,136,160,0,36,0,179,175,33,152,192,0, - 40,0,180,175,33,160,224,0,44,0,191,175, - 2,0,32,22,24,0,176,175,255,255,17,36, - 0,0,66,142,0,0,0,0,36,16,81,0, - 3,0,83,20,0,0,0,0,121,67,192,8, - 1,0,2,36,11,0,128,26,33,128,0,0, - 143,63,192,12,0,0,0,0,0,0,66,142, - 0,0,0,0,36,16,81,0,246,255,83,16, - 1,0,16,38,42,16,20,2,247,255,64,20, - 0,0,0,0,33,16,0,0,44,0,191,143, - 40,0,180,143,36,0,179,143,32,0,178,143, - 28,0,177,143,24,0,176,143,8,0,224,3, - 48,0,189,39,208,255,189,39,32,0,178,175, - 33,144,128,0,28,0,177,175,33,136,160,0, - 36,0,179,175,33,152,192,0,40,0,180,175, - 33,160,224,0,44,0,191,175,2,0,32,22, - 24,0,176,175,255,255,17,52,0,0,66,150, - 0,0,0,0,36,16,81,0,3,0,83,20, - 0,0,0,0,162,67,192,8,1,0,2,36, - 11,0,128,26,33,128,0,0,143,63,192,12, - 0,0,0,0,0,0,66,150,0,0,0,0, - 36,16,81,0,246,255,83,16,1,0,16,38, - 42,16,20,2,247,255,64,20,0,0,0,0, - 33,16,0,0,44,0,191,143,40,0,180,143, - 36,0,179,143,32,0,178,143,28,0,177,143, - 24,0,176,143,8,0,224,3,48,0,189,39, - 208,255,189,39,32,0,178,175,33,144,128,0, - 28,0,177,175,33,136,160,0,36,0,179,175, - 33,152,192,0,40,0,180,175,33,160,224,0, - 44,0,191,175,2,0,32,22,24,0,176,175, - 255,0,17,36,0,0,66,146,0,0,0,0, - 36,16,81,0,3,0,83,20,0,0,0,0, - 203,67,192,8,1,0,2,36,11,0,128,26, - 33,128,0,0,143,63,192,12,0,0,0,0, - 0,0,66,146,0,0,0,0,36,16,81,0, - 246,255,83,16,1,0,16,38,42,16,20,2, - 247,255,64,20,0,0,0,0,33,16,0,0, - 44,0,191,143,40,0,180,143,36,0,179,143, - 32,0,178,143,28,0,177,143,24,0,176,143, - 8,0,224,3,48,0,189,39,208,255,189,39, - 32,0,178,175,33,144,128,0,28,0,177,175, - 33,136,160,0,36,0,179,175,33,152,192,0, - 40,0,180,175,33,160,224,0,44,0,191,175, - 2,0,32,22,24,0,176,175,255,255,17,36, - 0,0,66,142,0,0,0,0,36,16,81,0, - 3,0,83,20,0,0,0,0,244,67,192,8, - 1,0,2,36,11,0,128,26,33,128,0,0, - 143,63,192,12,0,0,0,0,0,0,66,142, - 0,0,0,0,36,16,81,0,246,255,83,20, - 1,0,16,38,42,16,20,2,247,255,64,20, - 0,0,0,0,33,16,0,0,44,0,191,143, - 40,0,180,143,36,0,179,143,32,0,178,143, - 28,0,177,143,24,0,176,143,8,0,224,3, - 48,0,189,39,208,255,189,39,32,0,178,175, - 33,144,128,0,28,0,177,175,33,136,160,0, - 36,0,179,175,33,152,192,0,40,0,180,175, - 33,160,224,0,44,0,191,175,2,0,32,22, - 24,0,176,175,255,255,17,52,0,0,66,150, - 0,0,0,0,36,16,81,0,3,0,83,20, - 0,0,0,0,29,68,192,8,1,0,2,36, - 11,0,128,26,33,128,0,0,143,63,192,12, - 0,0,0,0,0,0,66,150,0,0,0,0, - 36,16,81,0,246,255,83,20,1,0,16,38, - 42,16,20,2,247,255,64,20,0,0,0,0, - 33,16,0,0,44,0,191,143,40,0,180,143, - 36,0,179,143,32,0,178,143,28,0,177,143, - 24,0,176,143,8,0,224,3,48,0,189,39, - 208,255,189,39,32,0,178,175,33,144,128,0, - 28,0,177,175,33,136,160,0,36,0,179,175, - 33,152,192,0,40,0,180,175,33,160,224,0, - 44,0,191,175,2,0,32,22,24,0,176,175, - 255,0,17,36,0,0,66,146,0,0,0,0, - 36,16,81,0,3,0,83,20,0,0,0,0, - 70,68,192,8,1,0,2,36,11,0,128,26, - 33,128,0,0,143,63,192,12,0,0,0,0, - 0,0,66,146,0,0,0,0,36,16,81,0, - 246,255,83,20,1,0,16,38,42,16,20,2, - 247,255,64,20,0,0,0,0,33,16,0,0, - 44,0,191,143,40,0,180,143,36,0,179,143, - 32,0,178,143,28,0,177,143,24,0,176,143, - 8,0,224,3,48,0,189,39,0,0,0,0, - 0,0,0,0,248,255,189,39,255,255,195,36, - 8,0,192,16,33,56,128,0,255,255,6,36, - 0,0,162,144,1,0,165,36,255,255,99,36, - 0,0,130,160,251,255,102,20,1,0,132,36, - 33,16,224,0,8,0,224,3,8,0,189,39, - 0,0,0,0,0,0,0,0,0,96,8,64, - 1,0,9,60,0,96,137,64,15,0,138,48, - 33,40,170,0,192,255,165,36,0,0,128,160, - 16,0,128,160,32,0,128,160,251,255,160,28, - 48,0,128,160,64,0,132,36,0,96,136,64, - 0,0,0,0,0,0,0,0,8,0,224,3, - 0,0,0,0,0,0,0,0,1,131,2,60, - 224,17,66,36,0,32,9,60,37,16,73,0, - 8,0,64,0,0,0,0,0,0,96,8,64, - 3,0,9,60,0,96,137,64,15,0,138,48, - 33,40,170,0,192,255,165,36,0,0,128,160, - 16,0,128,160,32,0,128,160,251,255,160,28, - 48,0,128,160,64,0,132,36,0,96,136,64, - 0,0,0,0,0,0,0,0,8,0,224,3, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,163,3,60,192,3,99,140, - 0,163,2,60,188,3,66,140,0,0,0,0, - 16,0,98,16,255,255,2,36,0,163,2,60, - 188,3,66,140,0,163,1,60,33,8,34,0, - 200,3,34,144,0,163,3,60,188,3,99,140, - 0,163,4,60,196,3,132,140,0,22,2,0, - 3,22,2,0,1,0,99,36,36,24,100,0, - 0,163,1,60,188,3,35,172,8,0,224,3, - 0,0,0,0,0,163,6,60,0,1,198,52, - 10,0,9,36,255,255,8,36,13,0,7,36, - 0,163,3,60,192,3,99,140,0,163,2,60, - 188,3,66,140,0,0,0,0,17,0,98,16, - 255,255,5,36,0,163,2,60,188,3,66,140, - 0,0,0,0,33,16,70,0,200,2,66,144, - 0,0,0,0,0,22,2,0,3,46,2,0, - 0,163,2,60,188,3,66,140,0,163,3,60, - 196,3,99,140,1,0,66,36,36,16,67,0, - 0,163,1,60,188,3,34,172,11,0,169,16, - 11,0,162,40,5,0,64,16,0,0,0,0, - 228,255,168,16,0,0,0,0,206,68,192,8, - 0,0,133,160,224,255,167,16,0,0,0,0, - 206,68,192,8,0,0,133,160,208,68,192,8, - 0,0,128,160,169,68,192,8,1,0,132,36, - 8,0,224,3,0,0,0,0,0,0,0,0, - 0,0,0,0,208,255,189,39,24,0,176,175, - 33,128,128,0,36,0,179,175,33,152,160,0, - 28,0,177,175,33,136,0,0,32,0,178,175, - 33,144,0,2,5,0,64,22,40,0,191,175, - 54,0,96,22,33,16,0,0,22,69,192,8, - 0,0,32,174,0,0,67,130,0,0,0,0, - 233,68,192,8,32,0,2,36,0,0,3,130, - 32,0,2,36,253,255,98,16,1,0,16,38, - 255,255,16,38,9,0,2,36,249,255,98,16, - 1,0,16,38,255,255,16,38,0,0,3,130, - 45,0,2,36,4,0,98,20,43,0,2,36, - 1,0,16,38,250,68,192,8,1,0,17,36, - 3,0,98,20,33,32,0,2,1,0,16,38, - 33,32,0,2,44,69,192,12,16,0,165,39, - 7,0,96,18,33,24,64,0,16,0,162,143, - 0,0,0,0,2,0,80,20,0,0,0,0, - 33,16,64,2,0,0,98,174,6,0,32,18, - 0,128,2,60,43,16,67,0,5,0,64,20, - 255,127,2,60,19,69,192,8,33,16,96,0, - 5,0,97,4,255,127,2,60,7,0,32,18, - 255,255,66,52,22,69,192,8,0,128,2,60, - 33,16,96,0,2,0,32,18,0,0,0,0, - 35,16,2,0,40,0,191,143,36,0,179,143, - 32,0,178,143,28,0,177,143,24,0,176,143, - 8,0,224,3,48,0,189,39,0,0,0,0, - 0,0,0,0,0,0,0,0,208,255,130,36, - 10,0,66,44,7,0,64,20,1,0,2,36, - 191,255,130,36,6,0,66,44,3,0,64,20, - 1,0,2,36,159,255,130,36,6,0,66,44, - 8,0,224,3,0,0,0,0,248,255,189,39, - 0,0,176,175,33,56,0,0,33,72,0,0, - 33,80,0,0,33,112,0,0,5,0,128,20, - 33,200,128,0,110,0,160,20,33,16,0,0, - 163,69,192,8,0,0,192,173,0,0,131,128, - 32,0,2,36,253,255,98,16,1,0,132,36, - 255,255,132,36,9,0,2,36,249,255,98,16, - 1,0,132,36,255,255,132,36,0,0,131,128, - 43,0,2,36,3,0,98,20,45,0,2,36, - 75,69,192,8,1,0,132,36,3,0,98,20, - 0,0,0,0,1,0,132,36,1,0,14,36, - 3,0,192,16,16,0,2,36,16,0,194,20, - 0,0,0,0,0,0,131,128,48,0,2,36, - 9,0,98,20,10,0,2,36,1,0,131,128, - 88,0,2,36,3,0,98,16,120,0,2,36, - 3,0,98,20,8,0,2,36,16,0,2,36, - 2,0,132,36,2,0,192,20,0,0,0,0, - 33,48,64,0,0,0,131,128,255,255,2,36, - 27,0,70,0,2,0,192,20,0,0,0,0, - 13,0,7,0,18,64,0,0,16,192,0,0, - 0,0,0,0,0,0,0,0,42,0,96,16, - 48,0,98,44,48,0,207,36,11,0,205,40, - 87,0,204,36,55,0,203,36,5,0,64,20, - 43,16,111,0,3,0,64,16,0,0,0,0, - 130,69,192,8,208,255,99,36,30,0,160,21, - 97,0,98,44,6,0,64,20,65,0,98,44, - 43,16,108,0,3,0,64,16,65,0,98,44, - 130,69,192,8,169,255,99,36,21,0,64,20, - 43,16,107,0,19,0,64,16,0,0,0,0, - 201,255,99,36,43,16,7,1,6,0,64,20, - 1,0,9,36,6,0,232,20,24,0,230,0, - 43,16,3,3,3,0,64,16,0,0,0,0, - 1,0,10,36,24,0,230,0,1,0,132,36, - 18,128,0,0,33,56,3,2,0,0,131,128, - 0,0,0,0,220,255,96,20,48,0,98,44, - 5,0,64,17,0,0,0,0,13,0,160,16, - 255,255,2,36,163,69,192,8,0,0,164,172, - 6,0,160,16,0,0,0,0,3,0,32,21, - 0,0,0,0,160,69,192,8,0,0,185,172, - 0,0,164,172,2,0,192,17,33,16,224,0, - 35,16,2,0,0,0,176,143,8,0,224,3, - 8,0,189,39,0,0,0,0,0,0,0,0, - 200,255,189,39,16,0,176,175,7,162,16,60, - 236,0,16,54,255,240,3,60,255,255,99,52, - 63,0,132,48,36,0,181,175,128,1,149,52, - 24,0,178,175,0,1,18,36,20,0,177,175, - 7,162,17,60,236,0,49,54,44,0,183,175, - 0,4,23,60,32,0,180,175,255,251,20,60, - 255,255,148,54,40,0,182,175,0,1,22,60, - 28,0,179,175,255,254,19,60,48,0,191,175, - 0,0,2,142,0,0,0,0,36,16,67,0, - 224,133,130,175,0,0,2,174,76,63,192,12, - 255,255,115,54,224,133,130,143,0,2,3,60, - 37,16,67,0,224,133,130,175,0,0,2,174, - 36,16,85,2,5,0,64,16,0,0,0,0, - 224,133,130,143,0,0,0,0,214,69,192,8, - 37,16,87,0,224,133,130,143,0,0,0,0, - 36,16,84,0,224,133,130,175,76,63,192,12, - 0,0,34,174,224,133,130,143,0,0,0,0, - 37,16,86,0,224,133,130,175,0,0,34,174, - 76,63,192,12,66,144,18,0,224,133,130,143, - 0,0,0,0,36,16,83,0,224,133,130,175, - 0,0,34,174,255,255,66,50,230,255,64,20, - 36,16,85,2,33,136,0,0,16,0,16,36, - 7,162,18,60,236,0,82,54,0,1,22,60, - 0,8,21,60,255,254,19,60,255,255,115,54, - 255,255,20,36,76,63,192,12,0,0,0,0, - 224,133,130,143,0,0,0,0,37,16,86,0, - 224,133,130,175,76,63,192,12,0,0,66,174, - 7,0,0,18,0,0,0,0,0,0,66,142, - 0,0,0,0,36,16,85,0,2,0,64,16, - 64,136,17,0,1,0,49,54,76,63,192,12, - 255,255,16,38,224,133,130,143,0,0,0,0, - 36,16,83,0,224,133,130,175,0,0,66,174, - 232,255,20,22,7,162,4,60,236,0,132,52, - 255,253,3,60,224,133,130,143,255,255,99,52, - 36,16,67,0,224,133,130,175,0,0,130,172, - 255,255,34,50,48,0,191,143,44,0,183,143, - 40,0,182,143,36,0,181,143,32,0,180,143, - 28,0,179,143,24,0,178,143,20,0,177,143, - 16,0,176,143,8,0,224,3,56,0,189,39, - 200,255,189,39,48,0,190,175,33,240,160,0, - 40,0,182,175,63,0,150,48,33,32,192,2, - 52,0,191,175,44,0,183,175,36,0,181,175, - 32,0,180,175,28,0,179,175,24,0,178,175, - 20,0,177,175,168,69,192,12,16,0,176,175, - 33,152,64,0,255,255,195,51,255,255,98,50, - 3,0,98,20,7,162,16,60,131,71,192,8, - 1,0,2,36,236,0,16,54,255,252,3,60, - 255,255,99,52,0,1,18,36,7,162,17,60, - 236,0,49,54,255,251,21,60,255,255,181,54, - 0,1,23,60,255,254,20,60,224,133,130,143, - 0,0,0,0,36,16,67,0,224,133,130,175, - 0,0,2,174,76,63,192,12,255,255,148,54, - 224,133,130,143,0,2,3,60,37,16,67,0, - 224,133,130,175,0,0,2,174,48,1,66,50, - 5,0,64,16,0,4,6,60,224,133,130,143, - 0,0,0,0,83,70,192,8,37,16,70,0, - 224,133,130,143,0,0,0,0,36,16,85,0, - 224,133,130,175,76,63,192,12,0,0,34,174, - 224,133,130,143,0,0,0,0,37,16,87,0, - 224,133,130,175,0,0,34,174,76,63,192,12, - 66,144,18,0,224,133,130,143,0,0,0,0, - 36,16,84,0,224,133,130,175,0,0,34,174, - 255,255,66,50,230,255,64,20,48,1,66,50, - 255,255,195,51,255,255,98,50,39,16,2,0, - 36,24,98,0,88,0,96,16,192,1,213,54, - 7,162,16,60,236,0,16,54,255,252,3,60, - 255,255,99,52,0,1,18,36,7,162,17,60, - 236,0,49,54,255,251,20,60,255,255,148,54, - 0,1,23,60,255,254,19,60,224,133,130,143, - 0,0,0,0,36,16,67,0,224,133,130,175, - 0,0,2,174,76,63,192,12,255,255,115,54, - 224,133,130,143,0,2,3,60,37,16,67,0, - 224,133,130,175,0,0,2,174,36,16,85,2, - 5,0,64,16,0,4,6,60,224,133,130,143, - 0,0,0,0,140,70,192,8,37,16,70,0, - 224,133,130,143,0,0,0,0,36,16,84,0, - 224,133,130,175,76,63,192,12,0,0,34,174, - 224,133,130,143,0,0,0,0,37,16,87,0, - 224,133,130,175,0,0,34,174,76,63,192,12, - 66,144,18,0,224,133,130,143,0,0,0,0, - 36,16,83,0,224,133,130,175,0,0,34,174, - 255,255,66,50,230,255,64,20,36,16,85,2, - 7,162,16,60,236,0,16,54,255,252,3,60, - 224,133,130,143,255,255,99,52,36,16,67,0, - 224,133,130,175,76,63,192,12,0,0,2,174, - 224,133,130,143,0,2,3,60,37,16,67,0, - 224,133,130,175,0,0,2,174,0,0,2,142, - 0,8,3,60,36,16,67,0,11,0,64,20, - 7,162,4,60,7,162,16,60,236,0,16,54, - 0,8,17,60,76,63,192,12,0,0,0,0, - 0,0,2,142,0,0,0,0,36,16,81,0, - 250,255,64,16,7,162,4,60,236,0,132,52, - 255,253,3,60,224,133,130,143,255,255,99,52, - 36,16,67,0,224,133,130,175,0,0,130,172, - 255,255,195,51,255,255,2,52,133,0,98,16, - 64,1,213,54,7,162,16,60,236,0,16,54, - 255,252,3,60,255,255,99,52,0,1,18,36, - 7,162,17,60,236,0,49,54,255,251,20,60, - 255,255,148,54,0,1,23,60,255,254,19,60, - 224,133,130,143,0,0,0,0,36,16,67,0, - 224,133,130,175,0,0,2,174,76,63,192,12, - 255,255,115,54,224,133,130,143,0,2,3,60, - 37,16,67,0,224,133,130,175,0,0,2,174, - 36,16,85,2,5,0,64,16,0,4,6,60, - 224,133,130,143,0,0,0,0,231,70,192,8, - 37,16,70,0,224,133,130,143,0,0,0,0, - 36,16,84,0,224,133,130,175,76,63,192,12, - 0,0,34,174,224,133,130,143,0,0,0,0, - 37,16,87,0,224,133,130,175,0,0,34,174, - 76,63,192,12,66,144,18,0,224,133,130,143, - 0,0,0,0,36,16,83,0,224,133,130,175, - 0,0,34,174,255,255,66,50,230,255,64,20, - 36,16,85,2,33,160,192,3,7,162,2,60, - 236,0,66,52,0,128,17,52,7,162,16,60, - 236,0,16,54,0,4,23,60,255,251,19,60, - 255,255,115,54,0,1,21,60,255,254,18,60, - 255,255,82,54,224,133,131,143,0,2,4,60, - 37,24,100,0,224,133,131,175,0,0,67,172, - 36,16,52,2,5,0,64,16,0,0,0,0, - 224,133,130,143,0,0,0,0,20,71,192,8, - 37,16,87,0,224,133,130,143,0,0,0,0, - 36,16,83,0,224,133,130,175,76,63,192,12, - 0,0,2,174,224,133,130,143,0,0,0,0, - 37,16,85,0,224,133,130,175,0,0,2,174, - 76,63,192,12,66,136,17,0,224,133,130,143, - 0,0,0,0,36,16,82,0,224,133,130,175, - 0,0,2,174,255,255,34,50,230,255,64,20, - 36,16,52,2,7,162,16,60,236,0,16,54, - 255,252,3,60,224,133,130,143,255,255,99,52, - 36,16,67,0,224,133,130,175,76,63,192,12, - 0,0,2,174,224,133,130,143,0,2,3,60, - 37,16,67,0,224,133,130,175,0,0,2,174, - 0,0,2,142,0,8,3,60,36,16,67,0, - 11,0,64,20,7,162,4,60,7,162,16,60, - 236,0,16,54,0,8,17,60,76,63,192,12, - 0,0,0,0,0,0,2,142,0,0,0,0, - 36,16,81,0,250,255,64,16,7,162,4,60, - 236,0,132,52,255,253,3,60,224,133,130,143, - 255,255,99,52,36,16,67,0,224,133,130,175, - 0,0,130,172,7,162,16,60,236,0,16,54, - 255,252,3,60,255,255,99,52,0,1,18,36, - 7,162,17,60,236,0,49,54,0,4,23,60, - 255,251,20,60,255,255,148,54,0,1,21,60, - 255,254,19,60,224,133,130,143,0,0,0,0, - 36,16,67,0,224,133,130,175,0,0,2,174, - 76,63,192,12,255,255,115,54,224,133,130,143, - 0,2,3,60,37,16,67,0,224,133,130,175, - 0,0,2,174,0,1,66,50,5,0,64,16, - 0,0,0,0,224,133,130,143,0,0,0,0, - 108,71,192,8,37,16,87,0,224,133,130,143, - 0,0,0,0,36,16,84,0,224,133,130,175, - 76,63,192,12,0,0,34,174,224,133,130,143, - 0,0,0,0,37,16,85,0,224,133,130,175, - 0,0,34,174,76,63,192,12,66,144,18,0, - 224,133,130,143,0,0,0,0,36,16,83,0, - 224,133,130,175,0,0,34,174,255,255,66,50, - 230,255,64,20,0,1,66,50,168,69,192,12, - 33,32,192,2,38,16,194,3,255,255,66,48, - 1,0,66,44,52,0,191,143,48,0,190,143, - 44,0,183,143,40,0,182,143,36,0,181,143, - 32,0,180,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 56,0,189,39,0,0,0,0,248,255,189,39, - 255,255,195,36,6,0,192,16,33,16,128,0, - 255,255,6,36,0,0,133,160,255,255,99,36, - 253,255,102,20,1,0,132,36,8,0,189,39, - 8,0,224,3,0,0,0,0,159,71,192,8, - 33,24,0,0,1,0,99,36,0,0,130,128, - 0,0,0,0,252,255,64,20,1,0,132,36, - 8,0,224,3,33,16,96,0,0,0,0,0, - 0,0,0,0,0,0,0,0,255,255,198,36, - 10,0,192,16,0,0,0,0,0,0,131,128, - 0,0,162,128,0,0,0,0,5,0,98,20, - 0,0,0,0,1,0,132,36,255,255,198,36, - 248,255,192,20,1,0,165,36,0,0,131,144, - 0,0,162,144,0,0,0,0,8,0,224,3, - 35,16,98,0,0,0,0,0,0,0,0,0, - 0,0,0,0,196,71,192,8,240,255,189,39, - 0,0,163,128,3,22,2,0,8,0,67,20, - 0,0,0,0,1,0,132,36,1,0,165,36, - 0,0,130,128,0,0,131,144,0,0,0,0, - 246,255,64,20,0,22,3,0,0,0,131,144, - 0,0,162,144,0,0,0,0,35,16,98,0, - 8,0,224,3,16,0,189,39,0,0,0,0, - 255,255,198,36,9,0,192,4,33,16,0,0, - 0,0,130,144,0,0,0,0,5,0,69,16, - 33,16,128,0,255,255,198,36,250,255,193,4, - 1,0,132,36,33,16,0,0,8,0,224,3, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,2,0,192,20,255,255,2,36, - 1,0,2,36,8,0,224,3,0,0,226,172, - 232,255,189,39,20,0,191,175,16,0,176,175, - 40,0,176,143,33,32,192,0,4,0,5,142, - 0,0,0,0,15,86,192,12,255,255,230,48, - 3,0,64,16,255,255,2,36,243,71,192,8, - 0,0,2,174,0,0,0,174,20,0,191,143, - 16,0,176,143,8,0,224,3,24,0,189,39, - 208,255,189,39,40,0,191,175,33,24,128,0, - 64,0,162,143,32,0,160,175,36,0,162,175, - 12,0,66,148,0,0,0,0,2,0,64,20, - 33,32,160,0,120,5,2,36,255,255,66,48, - 16,0,162,175,1,131,2,60,148,31,66,36, - 20,0,162,175,1,131,2,60,128,31,66,36, - 24,0,162,175,32,0,162,39,28,0,162,175, - 166,85,192,12,33,40,96,0,32,0,162,143, - 40,0,191,143,48,0,189,39,8,0,224,3, - 0,0,0,0,0,0,0,0,88,0,131,148, - 4,0,2,36,9,0,98,20,0,0,0,0, - 33,72,192,8,116,0,132,36,33,16,69,0, - 128,16,2,0,8,0,131,140,0,0,0,0, - 46,72,192,8,33,16,67,0,104,0,132,36, - 12,0,128,16,33,16,0,0,4,0,130,140, - 0,0,0,0,42,16,162,0,243,255,64,20, - 0,17,5,0,4,0,130,140,12,0,132,140, - 0,0,0,0,247,255,128,20,35,40,162,0, - 33,16,0,0,8,0,224,3,0,0,0,0, - 88,0,131,148,4,0,2,36,3,0,98,20, - 33,48,0,0,55,72,192,8,116,0,132,36, - 104,0,132,36,8,0,130,140,0,0,0,0, - 43,16,162,0,14,0,64,16,255,255,2,36, - 92,72,192,8,0,0,0,0,35,24,163,0, - 0,17,3,0,35,16,67,0,0,26,2,0, - 33,16,67,0,0,28,2,0,33,16,67,0, - 35,16,2,0,131,16,2,0,92,72,192,8, - 33,16,194,0,18,0,128,16,0,0,0,0, - 4,0,131,140,0,0,0,0,0,17,3,0, - 33,16,67,0,128,16,2,0,8,0,131,140, - 0,0,0,0,33,16,67,0,43,16,162,0, - 233,255,64,20,0,0,0,0,4,0,130,140, - 12,0,132,140,0,0,0,0,241,255,128,20, - 33,48,194,0,255,255,2,36,8,0,224,3, - 0,0,0,0,0,0,0,0,0,0,0,0, - 8,0,130,36,144,0,163,140,120,132,135,39, - 2,0,96,16,20,0,137,36,33,56,96,0, - 0,0,72,140,4,0,68,140,132,72,192,8, - 0,0,0,0,30,0,0,25,0,0,0,0, - 4,0,227,140,0,0,0,0,4,0,98,140, - 0,0,0,0,55,0,64,16,255,255,2,36, - 0,0,135,140,0,0,98,140,0,0,0,0, - 8,0,71,16,0,0,0,0,8,0,99,36, - 4,0,98,140,0,0,0,0,248,255,64,20, - 255,255,2,36,168,72,192,8,0,0,0,0, - 0,0,130,140,0,0,0,0,4,0,34,173, - 4,0,103,140,255,255,8,37,4,0,132,36, - 0,0,226,148,0,0,0,0,1,0,66,48, - 226,255,64,16,0,0,0,0,0,0,226,148, - 0,0,0,0,64,0,66,48,27,0,64,20, - 255,255,2,36,0,0,226,148,0,0,0,0, - 1,0,66,48,17,0,64,16,0,0,0,0, - 13,0,192,16,1,0,2,36,64,0,162,140, - 0,0,0,0,9,0,64,20,1,0,2,36, - 28,0,226,140,4,0,163,140,0,0,0,0, - 36,16,67,0,3,0,64,20,1,0,2,36, - 168,72,192,8,255,255,2,36,164,72,192,8, - 0,0,34,165,0,0,32,165,8,0,40,173, - 12,0,36,173,16,0,39,173,33,16,0,0, - 8,0,224,3,0,0,0,0,200,255,189,39, - 48,0,191,175,44,0,179,175,40,0,178,175, - 36,0,177,175,32,0,176,175,33,152,128,0, - 33,128,160,0,33,144,192,0,0,0,4,142, - 0,0,0,0,48,0,130,40,2,0,64,16, - 8,0,113,38,48,0,4,36,0,0,36,174, - 9,50,192,12,128,32,4,0,3,0,64,20, - 4,0,34,174,214,72,192,8,255,255,2,36, - 144,0,66,142,120,132,132,39,2,0,64,16, - 0,0,0,0,33,32,64,0,16,0,160,175, - 20,0,179,175,24,0,178,175,0,0,5,142, - 4,0,6,142,0,0,0,0,221,72,192,12, - 33,56,32,2,33,128,64,0,4,0,0,26, - 0,0,0,0,0,0,34,142,214,72,192,8, - 0,0,0,0,110,86,192,12,33,32,32,2, - 33,16,0,2,48,0,191,143,44,0,179,143, - 40,0,178,143,36,0,177,143,32,0,176,143, - 8,0,224,3,56,0,189,39,184,255,189,39, - 68,0,191,175,64,0,190,175,60,0,183,175, - 56,0,182,175,52,0,181,175,48,0,180,175, - 44,0,179,175,40,0,178,175,36,0,177,175, - 32,0,176,175,33,144,128,0,33,176,160,0, - 33,136,224,0,88,0,183,143,92,0,179,143, - 96,0,190,143,32,0,226,38,0,0,36,142, - 0,0,0,0,42,16,130,0,22,0,64,16, - 33,160,192,0,4,0,132,36,9,50,192,12, - 128,32,4,0,33,128,64,0,3,0,0,22, - 33,32,0,2,153,73,192,8,255,255,2,36, - 0,0,38,142,4,0,37,142,0,0,0,0, - 80,68,192,12,128,48,6,0,4,0,36,142, - 61,50,192,12,0,0,0,0,4,0,48,174, - 0,0,34,142,0,0,0,0,4,0,66,36, - 0,0,34,174,0,0,66,150,0,0,0,0, - 1,0,66,48,96,0,64,20,0,0,0,0, - 33,0,192,30,0,0,0,0,4,0,80,142, - 0,0,0,0,4,0,2,142,0,0,0,0, - 108,0,64,16,128,160,23,0,1,0,242,38, - 4,0,34,142,0,0,0,0,33,16,130,2, - 0,0,3,142,0,0,0,0,0,0,67,172, - 0,0,2,142,0,0,0,0,24,0,98,174, - 16,0,178,175,20,0,179,175,24,0,190,175, - 4,0,4,142,33,40,0,0,33,48,0,0, - 221,72,192,12,33,56,32,2,112,0,64,20, - 8,0,16,38,4,0,2,142,0,0,0,0, - 234,255,64,20,33,16,0,0,153,73,192,8, - 0,0,0,0,4,0,80,142,0,0,0,0, - 4,0,2,142,0,0,0,0,76,0,64,16, - 128,168,23,0,1,0,242,38,0,0,3,142, - 0,0,132,142,0,0,0,0,43,16,100,0, - 42,0,64,20,0,0,0,0,19,0,100,20, - 255,255,197,38,4,0,34,142,0,0,0,0, - 33,16,162,2,0,0,67,172,0,0,2,142, - 0,0,0,0,24,0,98,174,16,0,178,175, - 20,0,179,175,24,0,190,175,4,0,4,142, - 4,0,134,38,221,72,192,12,33,56,32,2, - 25,0,64,16,8,0,16,38,153,73,192,8, - 0,0,0,0,0,0,130,142,0,0,0,0, - 43,16,67,0,17,0,64,16,33,40,0,0, - 4,0,34,142,0,0,0,0,33,16,162,2, - 0,0,67,172,0,0,2,142,0,0,0,0, - 24,0,98,174,16,0,178,175,20,0,179,175, - 24,0,190,175,4,0,4,142,33,48,0,0, - 221,72,192,12,33,56,32,2,52,0,64,20, - 0,0,0,0,8,0,16,38,4,0,2,142, - 0,0,0,0,205,255,64,20,33,16,0,0, - 153,73,192,8,0,0,0,0,0,0,66,150, - 0,0,0,0,64,0,66,48,40,0,64,20, - 33,16,0,0,3,0,66,146,0,0,0,0, - 1,0,66,48,11,0,64,16,33,24,64,2, - 64,0,194,143,0,0,0,0,9,0,64,20, - 0,0,0,0,28,0,98,140,4,0,195,143, - 0,0,0,0,36,16,67,0,3,0,64,20, - 0,0,0,0,153,73,192,8,33,16,0,0, - 14,0,192,26,33,16,246,2,0,0,34,174, - 4,0,36,142,128,128,23,0,33,32,4,2, - 33,40,128,2,80,68,192,12,128,48,22,0, - 28,0,118,174,4,0,34,142,0,0,0,0, - 33,128,2,2,149,73,192,8,32,0,112,174, - 0,0,55,174,28,0,96,174,32,0,96,174, - 36,0,114,174,1,0,2,36,20,0,98,166, - 1,0,2,36,68,0,191,143,64,0,190,143, - 60,0,183,143,56,0,182,143,52,0,181,143, - 48,0,180,143,44,0,179,143,40,0,178,143, - 36,0,177,143,32,0,176,143,8,0,224,3, - 72,0,189,39,3,0,160,28,33,16,0,0, - 0,0,224,172,1,0,2,36,8,0,224,3, - 0,0,0,0,208,255,189,39,44,0,191,175, - 40,0,178,175,36,0,177,175,32,0,176,175, - 33,144,128,0,33,136,224,0,64,0,176,143, - 0,0,0,0,6,0,160,24,24,0,160,175, - 17,0,2,146,0,0,0,0,18,0,66,52, - 200,73,192,8,17,0,2,162,33,32,32,2, - 33,40,0,2,1,0,6,36,253,76,192,12, - 24,0,167,39,36,0,2,142,16,0,176,175, - 8,0,66,140,33,32,64,2,1,0,5,36, - 24,0,166,39,9,248,64,0,33,56,32,2, - 44,0,191,143,40,0,178,143,36,0,177,143, - 32,0,176,143,8,0,224,3,48,0,189,39, - 224,255,189,39,28,0,191,175,24,0,178,175, - 20,0,177,175,33,144,128,0,33,136,160,0, - 31,0,81,18,16,0,176,175,4,0,80,142, - 0,0,0,0,4,0,2,142,0,0,0,0, - 10,0,64,16,0,0,0,0,4,0,4,142, - 0,0,0,0,206,73,192,12,33,40,32,2, - 8,0,16,38,4,0,2,142,0,0,0,0, - 248,255,64,20,0,0,0,0,0,0,66,150, - 0,0,0,0,32,0,66,48,4,0,64,16, - 0,0,0,0,4,0,68,142,61,50,192,12, - 0,0,0,0,0,0,66,150,0,0,0,0, - 16,0,66,48,3,0,64,16,0,0,0,0, - 61,50,192,12,33,32,64,2,28,0,191,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,120,132,131,39, - 2,0,128,16,0,0,0,0,33,24,128,0, - 0,0,167,140,4,0,165,140,26,74,192,8, - 0,0,0,0,28,0,224,24,0,0,0,0, - 4,0,99,140,0,0,0,0,4,0,98,140, - 0,0,0,0,11,0,64,16,0,0,0,0, - 0,0,164,140,0,0,98,140,0,0,0,0, - 9,0,68,16,0,0,0,0,8,0,99,36, - 4,0,98,140,0,0,0,0,248,255,64,20, - 0,0,0,0,0,0,192,172,33,74,192,8, - 2,0,2,36,4,0,99,140,255,255,231,36, - 4,0,165,36,0,0,98,148,0,0,0,0, - 1,0,66,48,228,255,64,16,0,0,0,0, - 0,0,195,172,42,16,7,0,8,0,224,3, - 0,0,0,0,208,255,189,39,44,0,191,175, - 40,0,182,175,36,0,181,175,32,0,180,175, - 28,0,179,175,24,0,178,175,20,0,177,175, - 16,0,176,175,33,168,192,0,0,0,162,140, - 0,0,0,0,3,0,64,28,33,48,0,0, - 220,74,192,8,5,0,2,36,120,132,147,39, - 2,0,128,16,0,0,224,172,33,152,128,0, - 0,0,177,140,4,0,180,140,84,74,192,8, - 0,0,0,0,29,0,32,26,0,0,0,0, - 4,0,102,142,0,0,0,0,4,0,194,140, - 0,0,0,0,23,0,64,16,0,0,0,0, - 0,0,131,142,0,0,194,140,0,0,0,0, - 6,0,67,16,0,0,0,0,8,0,198,36, - 4,0,194,140,0,0,0,0,248,255,64,20, - 0,0,0,0,4,0,194,140,0,0,0,0, - 9,0,64,16,0,0,0,0,255,255,49,38, - 4,0,148,38,33,152,64,0,0,0,98,150, - 0,0,0,0,1,0,66,48,227,255,64,16, - 0,0,0,0,29,0,32,22,0,0,0,0, - 0,0,99,150,0,0,0,0,1,0,98,48, - 11,0,64,16,4,0,98,48,123,0,64,16, - 3,0,2,36,0,0,162,150,0,0,0,0, - 1,0,66,48,118,0,64,16,4,0,2,36, - 0,0,243,172,219,74,192,8,4,0,213,172, - 0,0,98,150,0,0,0,0,4,0,66,48, - 110,0,64,16,3,0,2,36,0,0,162,150, - 0,0,0,0,1,0,66,48,105,0,64,20, - 4,0,2,36,0,0,243,172,219,74,192,8, - 4,0,213,172,0,0,98,150,0,0,0,0, - 1,0,66,48,97,0,64,20,2,0,2,36, - 2,0,34,42,23,0,64,20,33,144,160,2, - 56,0,22,36,9,50,192,12,16,0,4,36, - 33,128,64,0,40,0,0,18,128,16,17,0, - 33,16,84,0,252,255,66,140,0,0,0,0, - 0,0,2,174,4,0,18,174,8,0,0,174, - 12,0,0,174,9,50,192,12,8,0,4,36, - 33,144,64,0,24,0,64,18,255,255,49,38, - 0,0,86,166,2,0,34,42,236,255,64,16, - 4,0,80,174,4,0,102,142,0,0,0,0, - 4,0,194,140,0,0,0,0,6,0,64,16, - 1,0,17,36,8,0,198,36,4,0,194,140, - 0,0,0,0,252,255,64,20,1,0,49,38, - 1,0,36,38,9,50,192,12,192,32,4,0, - 33,128,64,0,12,0,0,22,33,32,64,2, - 173,74,192,8,0,0,0,0,0,0,18,142, - 0,0,0,0,61,50,192,12,33,32,0,2, - 33,32,64,2,206,73,192,12,33,40,160,2, - 220,74,192,8,1,0,2,36,4,0,102,142, - 0,0,0,0,192,74,192,8,33,168,0,2, - 4,0,194,140,0,0,0,0,14,0,64,16, - 0,0,0,0,0,0,194,140,4,0,195,140, - 0,0,2,174,4,0,3,174,8,0,198,36, - 8,0,16,38,255,255,49,38,0,0,194,140, - 0,0,131,142,0,0,0,0,43,16,67,0, - 240,255,64,20,0,0,0,0,0,0,130,142, - 0,0,0,0,0,0,2,174,4,0,18,174, - 8,0,4,38,33,40,192,0,80,68,192,12, - 192,48,17,0,4,0,102,142,4,0,117,174, - 0,0,98,150,0,0,0,0,32,0,66,48, - 3,0,64,16,0,0,0,0,61,50,192,12, - 33,32,192,0,0,0,98,150,0,0,0,0, - 32,0,66,52,0,0,98,166,33,16,0,0, - 44,0,191,143,40,0,182,143,36,0,181,143, - 32,0,180,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 48,0,189,39,232,255,189,39,20,0,191,175, - 16,0,176,175,0,0,162,140,0,0,0,0, - 85,0,64,24,33,16,0,0,120,132,144,39, - 2,0,128,16,0,0,0,0,33,128,128,0, - 0,0,164,140,4,0,165,140,0,0,0,0, - 0,0,168,140,0,0,2,150,0,0,0,0, - 33,24,64,0,1,0,66,48,34,0,64,20, - 33,56,0,2,32,0,128,24,8,0,98,48, - 4,0,6,142,5,0,64,16,0,0,0,0, - 12,0,194,140,0,0,0,0,3,0,64,16, - 0,0,0,0,33,56,0,2,0,0,168,140, - 0,0,195,140,0,0,162,140,0,0,0,0, - 10,0,98,16,0,0,0,0,0,0,163,140, - 4,0,194,140,0,0,0,0,20,0,64,16, - 8,0,198,36,0,0,194,140,0,0,0,0, - 249,255,67,20,0,0,0,0,255,255,132,36, - 4,0,208,140,0,0,0,0,0,0,3,150, - 0,0,0,0,1,0,98,48,224,255,64,16, - 4,0,165,36,36,0,128,20,33,16,0,0, - 0,0,2,150,0,0,0,0,2,0,66,48, - 3,0,64,20,0,0,0,0,65,75,192,8, - 33,16,0,0,4,0,230,140,0,0,0,0, - 0,0,194,140,0,0,0,0,7,0,72,16, - 0,0,0,0,8,0,198,36,0,0,194,140, - 0,0,0,0,253,255,72,20,8,0,198,36, - 248,255,198,36,4,0,199,140,0,0,0,0, - 10,0,224,16,33,32,224,0,8,0,194,140, - 12,0,195,140,0,0,194,172,4,0,195,172, - 8,0,198,36,4,0,194,140,0,0,0,0, - 248,255,64,20,33,32,224,0,206,73,192,12, - 33,40,0,2,33,16,0,2,20,0,191,143, - 16,0,176,143,8,0,224,3,24,0,189,39, - 0,0,0,0,0,0,0,0,0,0,0,0, - 192,255,189,39,56,0,191,175,52,0,181,175, - 48,0,180,175,44,0,179,175,40,0,178,175, - 36,0,177,175,32,0,176,175,33,136,128,0, - 33,144,160,0,33,152,192,0,33,160,224,0, - 80,0,181,143,0,0,0,0,36,0,162,142, - 0,0,0,0,20,0,80,140,33,32,128,2, - 48,72,192,12,33,40,160,2,16,0,3,142, - 0,0,0,0,16,0,163,175,20,0,180,175, - 24,0,162,175,0,0,2,142,1,0,4,36, - 33,40,32,2,33,48,64,2,9,248,64,0, - 33,56,96,2,6,0,64,20,33,32,128,2, - 17,0,162,146,0,0,0,0,1,0,66,52, - 113,75,192,8,17,0,162,162,33,40,160,2, - 59,77,192,12,33,48,64,0,56,0,191,143, - 52,0,181,143,48,0,180,143,44,0,179,143, - 40,0,178,143,36,0,177,143,32,0,176,143, - 8,0,224,3,64,0,189,39,192,255,189,39, - 60,0,191,175,56,0,178,175,52,0,177,175, - 48,0,176,175,33,136,224,0,80,0,178,143, - 36,0,160,175,36,0,66,142,0,0,0,0, - 20,0,67,140,2,0,80,144,0,0,0,0, - 255,0,2,50,254,255,71,36,70,0,226,44, - 110,0,64,16,128,16,7,0,2,131,1,60, - 33,8,34,0,160,151,34,140,0,0,0,0, - 8,0,64,0,0,0,0,0,16,0,177,175, - 8,0,98,140,16,0,103,140,9,248,64,0, - 0,0,0,0,2,0,3,36,16,0,67,162, - 251,75,192,8,40,0,66,174,16,0,177,175, - 8,0,98,140,16,0,103,140,9,248,64,0, - 0,0,0,0,40,0,162,175,16,0,80,162, - 40,0,162,143,0,0,0,0,251,75,192,8, - 40,0,66,174,32,0,162,39,16,0,162,175, - 20,0,177,175,36,0,162,39,24,0,162,175, - 8,0,98,140,16,0,103,140,9,248,64,0, - 0,0,0,0,33,48,64,0,16,0,80,162, - 17,0,66,146,0,0,0,0,2,0,66,52, - 17,0,66,162,36,0,162,143,0,0,0,0, - 43,16,2,0,40,0,66,166,44,0,70,174, - 32,0,162,151,0,0,0,0,33,16,194,0, - 48,0,66,174,255,75,192,8,52,0,64,166, - 16,0,177,175,36,0,162,39,20,0,162,175, - 8,0,98,140,16,0,103,140,9,248,64,0, - 0,0,0,0,33,128,64,0,8,0,0,22, - 33,32,32,2,16,0,160,175,33,40,64,2, - 33,48,0,0,226,76,192,12,33,56,0,0, - 255,75,192,8,0,0,0,0,36,0,162,143, - 0,0,0,0,16,0,162,175,0,0,6,142, - 4,0,7,142,0,0,0,0,226,76,192,12, - 33,40,64,2,36,0,162,143,0,0,0,0, - 35,0,64,16,0,0,0,0,61,50,192,12, - 33,32,0,2,255,75,192,8,0,0,0,0, - 5,0,2,36,251,75,192,8,16,0,66,162, - 16,0,177,175,40,0,162,39,20,0,162,175, - 8,0,98,140,16,0,103,140,9,248,64,0, - 0,0,0,0,33,48,64,0,7,0,192,16, - 64,0,2,36,3,0,194,136,0,0,194,152, - 0,0,0,0,43,0,162,171,40,0,162,187, - 64,0,2,36,16,0,66,162,40,0,162,143, - 0,0,0,0,251,75,192,8,40,0,66,174, - 5,0,2,36,96,0,34,174,17,0,66,146, - 0,0,0,0,2,0,66,52,17,0,66,162, - 60,0,191,143,56,0,178,143,52,0,177,143, - 48,0,176,143,8,0,224,3,64,0,189,39, - 80,255,189,39,168,0,191,175,164,0,179,175, - 160,0,178,175,156,0,177,175,152,0,176,175, - 33,152,128,0,33,136,224,0,192,0,178,143, - 0,0,0,0,36,0,66,142,0,0,0,0, - 20,0,67,140,0,0,0,0,16,0,98,140, - 0,0,0,0,16,0,162,175,20,0,177,175, - 4,0,98,140,0,0,0,0,9,248,64,0, - 24,0,167,39,33,128,64,0,6,0,0,22, - 33,32,32,2,17,0,66,146,0,0,0,0, - 18,0,66,52,45,76,192,8,17,0,66,162, - 33,40,64,2,33,48,0,2,253,76,192,12, - 24,0,167,39,16,0,178,175,33,32,96,2, - 33,40,0,2,24,0,166,39,122,75,192,12, - 33,56,32,2,168,0,191,143,164,0,179,143, - 160,0,178,143,156,0,177,143,152,0,176,143, - 8,0,224,3,176,0,189,39,192,255,189,39, - 56,0,191,175,52,0,181,175,48,0,180,175, - 44,0,179,175,40,0,178,175,36,0,177,175, - 32,0,176,175,33,152,128,0,33,160,160,0, - 33,168,192,0,33,136,224,0,80,0,178,143, - 0,0,0,0,36,0,66,142,0,0,0,0, - 20,0,80,140,33,32,32,2,48,72,192,12, - 33,40,64,2,16,0,3,142,0,0,0,0, - 16,0,163,175,20,0,177,175,24,0,162,175, - 0,0,2,142,33,32,0,0,33,40,96,2, - 33,48,128,2,9,248,64,0,33,56,160,2, - 5,0,64,16,33,32,32,2,200,76,192,12, - 33,40,64,2,95,76,192,8,0,0,0,0, - 16,0,178,175,33,32,96,2,33,40,128,2, - 33,48,160,2,122,75,192,12,33,56,32,2, - 56,0,191,143,52,0,181,143,48,0,180,143, - 44,0,179,143,40,0,178,143,36,0,177,143, - 32,0,176,143,8,0,224,3,64,0,189,39, - 192,255,189,39,56,0,191,175,52,0,181,175, - 48,0,180,175,44,0,179,175,40,0,178,175, - 36,0,177,175,32,0,176,175,33,152,128,0, - 33,160,160,0,33,168,192,0,80,0,177,143, - 0,0,0,0,36,0,34,142,0,0,0,0, - 20,0,82,140,2,0,66,144,0,0,0,0, - 254,255,67,36,70,0,98,44,57,0,64,16, - 33,128,224,0,128,16,3,0,2,131,1,60, - 33,8,34,0,184,152,34,140,0,0,0,0, - 8,0,64,0,0,0,0,0,33,32,0,2, - 48,72,192,12,33,40,32,2,40,0,35,142, - 0,0,0,0,16,0,163,175,20,0,176,175, - 173,76,192,8,24,0,162,175,33,32,0,2, - 48,72,192,12,33,40,32,2,44,0,35,142, - 0,0,0,0,16,0,163,175,48,0,35,142, - 44,0,36,142,0,0,0,0,35,24,100,0, - 170,76,192,8,255,255,99,48,33,32,0,2, - 48,72,192,12,33,40,32,2,40,0,35,142, - 0,0,0,0,16,0,163,175,44,0,35,142, - 0,0,0,0,171,76,192,8,20,0,163,175, - 33,32,0,2,48,72,192,12,33,40,32,2, - 40,0,35,38,16,0,163,175,4,0,3,36, - 20,0,163,175,24,0,176,175,28,0,162,175, - 12,0,66,142,33,32,96,2,33,40,128,2, - 16,0,71,142,0,0,0,0,9,248,64,0, - 33,48,160,2,184,76,192,8,0,0,0,0, - 5,0,2,36,96,0,2,174,17,0,34,146, - 0,0,0,0,2,0,66,52,17,0,34,162, - 56,0,191,143,52,0,181,143,48,0,180,143, - 44,0,179,143,40,0,178,143,36,0,177,143, - 32,0,176,143,8,0,224,3,64,0,189,39, - 0,0,0,0,0,0,0,0,0,0,0,0, - 224,255,189,39,24,0,191,175,20,0,177,175, - 16,0,176,175,33,128,128,0,64,0,2,142, - 0,0,0,0,7,0,64,20,33,136,160,0, - 2,0,2,36,48,72,192,12,96,0,2,174, - 1,0,66,36,217,76,192,8,100,0,2,174, - 129,0,2,36,16,0,34,162,17,0,34,146, - 0,0,0,0,2,0,66,52,17,0,34,162, - 24,0,191,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,232,255,189,39, - 20,0,191,175,16,0,176,175,33,128,128,0, - 33,64,160,0,33,32,192,0,33,40,224,0, - 40,0,162,143,17,0,3,145,0,0,0,0, - 2,0,99,52,17,0,3,161,6,0,3,36, - 4,0,64,16,16,0,3,161,40,0,4,173, - 249,76,192,8,44,0,5,173,80,86,192,12, - 40,0,6,37,2,0,64,16,5,0,2,36, - 96,0,2,174,20,0,191,143,16,0,176,143, - 8,0,224,3,24,0,189,39,200,255,189,39, - 48,0,191,175,44,0,183,175,40,0,182,175, - 36,0,181,175,32,0,180,175,28,0,179,175, - 24,0,178,175,20,0,177,175,16,0,176,175, - 33,176,128,0,33,144,160,0,33,152,192,0, - 37,0,96,18,33,184,224,0,28,0,84,38, - 8,0,67,142,28,0,66,142,0,0,0,0, - 35,128,98,0,42,16,83,0,23,0,64,16, - 33,168,19,2,9,50,192,12,128,32,21,0, - 33,136,64,0,8,0,32,22,128,128,16,0, - 5,0,2,36,96,0,194,174,17,0,66,146, - 0,0,0,0,2,0,66,52,48,77,192,8, - 17,0,66,162,33,32,32,2,12,0,69,142, - 0,0,0,0,80,68,192,12,33,48,0,2, - 110,86,192,12,8,0,68,38,12,0,81,174, - 33,128,48,2,4,0,144,174,4,0,132,142, - 33,40,224,2,80,68,192,12,128,48,19,0, - 0,0,147,174,8,0,85,174,48,0,191,143, - 44,0,183,143,40,0,182,143,36,0,181,143, - 32,0,180,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 56,0,189,39,224,255,189,39,24,0,191,175, - 20,0,177,175,16,0,176,175,33,136,128,0, - 5,0,192,24,33,128,160,0,3,0,2,36, - 96,0,34,174,95,77,192,8,100,0,38,174, - 19,0,195,36,19,0,98,44,9,0,64,16, - 128,16,3,0,2,131,1,60,33,8,34,0, - 208,153,34,140,0,0,0,0,8,0,64,0, - 0,0,0,0,89,77,192,8,2,0,6,36, - 89,77,192,8,5,0,6,36,89,77,192,8, - 3,0,6,36,89,77,192,8,1,0,6,36, - 35,48,6,0,96,0,38,174,33,32,32,2, - 48,72,192,12,33,40,0,2,1,0,66,36, - 100,0,34,174,17,0,2,146,0,0,0,0, - 1,0,66,52,17,0,2,162,24,0,191,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,200,255,189,39,52,0,191,175, - 48,0,190,175,44,0,183,175,40,0,182,175, - 36,0,181,175,32,0,180,175,28,0,179,175, - 24,0,178,175,20,0,177,175,16,0,176,175, - 33,152,128,0,33,168,160,0,33,160,192,0, - 76,0,182,143,80,0,183,143,84,0,190,143, - 92,0,177,143,0,0,0,0,209,83,192,12, - 33,144,224,0,33,128,64,0,3,0,0,22, - 4,0,2,36,168,77,192,8,33,16,0,0, - 88,0,2,166,64,0,19,174,33,32,64,2, - 72,0,165,143,0,0,0,0,80,86,192,12, - 92,0,6,38,255,255,3,36,23,0,67,16, - 0,0,0,0,3,0,194,138,0,0,194,154, - 0,0,0,0,103,0,2,170,100,0,2,186, - 104,0,23,174,108,0,30,174,88,0,168,143, - 0,0,0,0,112,0,8,174,72,0,0,166, - 76,0,20,174,255,255,162,50,33,16,130,2, - 80,0,2,174,84,0,0,166,9,0,32,18, - 120,0,17,174,224,83,192,12,33,32,32,2, - 6,0,64,20,124,0,2,174,167,83,192,12, - 33,32,0,2,168,77,192,8,33,16,0,0, - 124,0,0,174,33,16,0,2,52,0,191,143, - 48,0,190,143,44,0,183,143,40,0,182,143, - 36,0,181,143,32,0,180,143,28,0,179,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,56,0,189,39,224,255,189,39, - 28,0,191,175,24,0,178,175,20,0,177,175, - 16,0,176,175,33,144,128,0,92,0,68,142, - 128,86,192,12,0,0,0,0,96,0,68,142, - 0,0,0,0,128,86,192,12,33,128,64,0, - 100,0,68,142,0,0,0,0,128,86,192,12, - 33,136,64,0,33,128,17,2,6,0,16,38, - 33,16,80,0,90,0,66,166,191,79,192,12, - 104,0,68,38,255,255,67,48,90,0,68,150, - 128,0,98,44,5,0,64,20,2,0,130,36, - 0,1,98,44,2,0,64,20,3,0,130,36, - 4,0,130,36,33,16,67,0,90,0,66,166, - 80,0,66,142,76,0,67,142,90,0,80,150, - 64,0,68,142,0,0,0,0,128,86,192,12, - 35,136,67,0,255,255,67,48,90,0,68,150, - 0,0,0,0,128,0,130,44,9,0,64,20, - 0,1,130,44,4,0,64,16,0,0,0,0, - 33,24,112,0,237,77,192,8,6,0,99,36, - 33,24,112,0,237,77,192,8,7,0,99,36, - 33,24,112,0,5,0,99,36,255,255,36,50, - 128,0,130,44,5,0,64,20,1,0,130,36, - 0,1,130,44,2,0,64,20,2,0,130,36, - 3,0,130,36,33,16,98,0,2,0,66,166, - 2,0,67,150,2,0,68,150,0,0,0,0, - 128,0,130,44,6,0,64,20,1,0,99,36, - 0,1,130,44,4,0,64,20,2,0,98,36, - 3,78,192,8,3,0,98,36,1,0,98,36, - 0,0,66,166,0,0,66,150,28,0,191,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,216,255,189,39, - 32,0,191,175,28,0,179,175,24,0,178,175, - 20,0,177,175,16,0,176,175,33,152,128,0, - 171,86,192,12,92,0,100,38,104,0,100,142, - 0,0,0,0,128,86,192,12,33,144,64,0, - 108,0,100,142,0,0,0,0,128,86,192,12, - 33,136,64,0,112,0,100,142,0,0,0,0, - 153,86,192,12,33,128,64,0,10,0,82,38, - 33,136,50,2,33,128,17,2,4,0,16,38, - 33,16,80,0,90,0,98,166,191,79,192,12, - 116,0,100,38,255,255,67,48,90,0,100,150, - 128,0,98,44,5,0,64,20,2,0,130,36, - 0,1,98,44,2,0,64,20,3,0,130,36, - 4,0,130,36,33,16,67,0,90,0,98,166, - 80,0,98,142,76,0,99,142,90,0,113,150, - 64,0,100,142,0,0,0,0,128,86,192,12, - 35,128,67,0,255,255,67,48,90,0,100,150, - 0,0,0,0,128,0,130,44,9,0,64,20, - 0,1,130,44,4,0,64,16,0,0,0,0, - 33,24,113,0,74,78,192,8,6,0,99,36, - 33,24,113,0,74,78,192,8,7,0,99,36, - 33,24,113,0,5,0,99,36,255,255,4,50, - 128,0,130,44,5,0,64,20,1,0,130,36, - 0,1,130,44,2,0,64,20,2,0,130,36, - 3,0,130,36,33,16,98,0,2,0,98,166, - 2,0,99,150,2,0,100,150,0,0,0,0, - 128,0,130,44,6,0,64,20,1,0,99,36, - 0,1,130,44,4,0,64,20,2,0,112,36, - 96,78,192,8,3,0,112,36,1,0,112,36, - 255,255,2,50,32,0,191,143,28,0,179,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,40,0,189,39,208,255,189,39, - 40,0,191,175,36,0,179,175,32,0,178,175, - 28,0,177,175,24,0,176,175,33,128,128,0, - 33,152,192,0,33,144,224,0,33,136,160,0, - 16,0,4,36,32,0,5,36,1,131,6,60, - 56,97,198,36,242,86,192,12,33,56,0,2, - 255,255,36,50,1,131,5,60,56,97,165,36, - 44,87,192,12,33,48,0,2,16,0,176,175, - 2,0,4,36,33,40,0,0,1,131,7,60, - 56,97,231,36,94,87,192,12,33,48,96,2, - 8,0,71,142,4,0,66,142,0,0,0,0, - 35,56,226,0,1,131,2,60,56,97,66,36, - 16,0,162,175,20,0,176,175,4,0,4,36, - 33,40,0,0,4,0,70,142,0,0,0,0, - 194,87,192,12,255,255,231,48,40,0,191,143, - 36,0,179,143,32,0,178,143,28,0,177,143, - 24,0,176,143,8,0,224,3,48,0,189,39, - 216,255,189,39,36,0,191,175,32,0,178,175, - 28,0,177,175,24,0,176,175,33,128,128,0, - 33,136,160,0,88,0,4,150,160,0,5,36, - 1,131,6,60,56,97,198,36,242,86,192,12, - 33,56,32,2,90,0,4,150,1,131,5,60, - 56,97,165,36,44,87,192,12,33,48,32,2, - 16,0,177,175,6,0,4,36,33,40,0,0, - 1,131,7,60,56,97,231,36,15,88,192,12, - 92,0,6,38,1,131,18,60,56,97,82,38, - 16,0,178,175,20,0,177,175,33,32,0,0, - 64,0,5,36,100,0,6,38,194,87,192,12, - 4,0,7,36,16,0,177,175,2,0,4,36, - 33,40,0,0,104,0,6,142,0,0,0,0, - 94,87,192,12,33,56,64,2,16,0,177,175, - 2,0,4,36,33,40,0,0,108,0,6,142, - 0,0,0,0,94,87,192,12,33,56,64,2, - 16,0,177,175,3,0,4,36,64,0,5,36, - 112,0,6,142,0,0,0,0,143,87,192,12, - 33,56,64,2,116,0,4,38,7,79,192,12, - 33,40,32,2,36,0,191,143,32,0,178,143, - 28,0,177,143,24,0,176,143,8,0,224,3, - 40,0,189,39,216,255,189,39,32,0,191,175, - 28,0,177,175,24,0,176,175,33,128,128,0, - 33,136,160,0,88,0,4,150,160,0,5,36, - 1,131,6,60,56,97,198,36,242,86,192,12, - 33,56,32,2,90,0,4,150,1,131,5,60, - 56,97,165,36,44,87,192,12,33,48,32,2, - 16,0,177,175,2,0,4,36,92,0,6,142, - 1,131,7,60,56,97,231,36,94,87,192,12, - 33,40,0,0,16,0,177,175,2,0,4,36, - 96,0,6,142,1,131,7,60,56,97,231,36, - 94,87,192,12,33,40,0,0,16,0,177,175, - 2,0,4,36,100,0,6,142,1,131,7,60, - 56,97,231,36,94,87,192,12,33,40,0,0, - 104,0,4,38,7,79,192,12,33,40,32,2, - 32,0,191,143,28,0,177,143,24,0,176,143, - 8,0,224,3,40,0,189,39,200,255,189,39, - 52,0,191,175,48,0,180,175,44,0,179,175, - 40,0,178,175,36,0,177,175,32,0,176,175, - 33,144,128,0,33,136,160,0,16,0,4,36, - 32,0,5,36,1,131,6,60,56,97,198,36, - 242,86,192,12,33,56,32,2,0,0,68,150, - 1,131,5,60,56,97,165,36,44,87,192,12, - 33,48,32,2,155,0,64,18,0,0,0,0, - 1,131,20,60,56,97,148,38,8,0,80,142, - 0,0,0,0,145,0,0,18,0,0,0,0, - 4,0,66,142,0,0,0,0,141,0,64,24, - 33,152,0,0,16,0,4,36,32,0,5,36, - 33,48,128,2,242,86,192,12,33,56,32,2, - 4,0,4,150,33,40,128,2,44,87,192,12, - 33,48,32,2,16,0,177,175,6,0,4,36, - 33,40,0,0,8,0,6,38,15,88,192,12, - 33,56,128,2,16,0,3,146,65,0,2,36, - 47,0,98,16,66,0,98,40,18,0,64,16, - 5,0,2,36,88,0,98,16,6,0,98,40, - 7,0,64,16,2,0,2,36,30,0,98,16, - 4,0,2,36,51,0,98,16,4,0,4,36, - 174,79,192,8,1,0,115,38,6,0,2,36, - 68,0,98,16,64,0,2,36,78,0,98,16, - 33,32,0,0,174,79,192,8,1,0,115,38, - 68,0,2,36,47,0,98,16,69,0,98,40, - 7,0,64,16,66,0,2,36,24,0,98,16, - 67,0,2,36,25,0,98,16,3,0,4,36, - 174,79,192,8,1,0,115,38,131,0,98,40, - 83,0,64,16,128,0,98,40,68,0,64,16, - 0,0,0,0,174,79,192,8,1,0,115,38, - 16,0,177,175,2,0,4,36,40,0,6,142, - 1,131,7,60,56,97,231,36,94,87,192,12, - 33,40,0,0,174,79,192,8,1,0,115,38, - 16,0,177,175,111,79,192,8,1,0,4,36, - 16,0,177,175,111,79,192,8,2,0,4,36, - 16,0,177,175,40,0,6,142,1,131,7,60, - 56,97,231,36,143,87,192,12,64,0,5,36, - 174,79,192,8,1,0,115,38,48,0,7,142, - 44,0,2,142,0,0,0,0,35,56,226,0, - 16,0,180,175,20,0,177,175,134,79,192,8, - 33,40,0,0,48,0,7,142,44,0,2,142, - 0,0,0,0,35,56,226,0,16,0,180,175, - 20,0,177,175,4,0,4,36,64,0,5,36, - 44,0,6,142,0,0,0,0,194,87,192,12, - 255,255,231,48,174,79,192,8,1,0,115,38, - 16,0,177,175,6,0,4,36,33,40,0,0, - 1,131,7,60,56,97,231,36,15,88,192,12, - 40,0,6,38,174,79,192,8,1,0,115,38, - 5,0,4,36,164,79,192,8,33,40,0,0, - 16,0,180,175,20,0,177,175,64,0,5,36, - 40,0,6,38,194,87,192,12,4,0,7,36, - 174,79,192,8,1,0,115,38,16,0,4,146, - 16,0,5,146,31,0,132,48,224,0,165,48, - 1,131,6,60,56,97,198,36,242,86,192,12, - 33,56,32,2,33,32,0,0,1,131,5,60, - 56,97,165,36,44,87,192,12,33,48,32,2, - 1,0,115,38,4,0,66,142,0,0,0,0, - 42,16,98,2,117,255,64,20,68,0,16,38, - 12,0,82,142,0,0,0,0,105,255,64,22, - 0,0,0,0,52,0,191,143,48,0,180,143, - 44,0,179,143,40,0,178,143,36,0,177,143, - 32,0,176,143,8,0,224,3,56,0,189,39, - 200,255,189,39,52,0,191,175,48,0,182,175, - 44,0,181,175,40,0,180,175,36,0,179,175, - 32,0,178,175,28,0,177,175,24,0,176,175, - 33,168,128,0,33,152,160,2,113,0,160,18, - 33,144,0,0,4,0,22,36,8,0,112,142, - 0,0,0,0,104,0,0,18,0,0,0,0, - 4,0,98,142,0,0,0,0,100,0,64,24, - 33,160,0,0,171,86,192,12,8,0,4,38, - 255,255,67,48,128,0,98,44,5,0,64,20, - 2,0,113,36,0,1,98,44,2,0,64,20, - 3,0,113,36,4,0,113,36,16,0,3,146, - 64,0,2,36,50,0,98,16,65,0,98,40, - 16,0,64,16,68,0,2,36,34,0,118,16, - 5,0,98,40,5,0,64,16,2,0,2,36, - 20,0,98,16,255,255,36,50,22,80,192,8, - 0,0,0,0,5,0,2,36,35,0,98,16, - 6,0,2,36,29,0,98,16,255,255,36,50, - 22,80,192,8,0,0,0,0,19,0,98,16, - 68,0,98,40,12,0,64,20,131,0,98,40, - 28,0,64,16,128,0,98,40,27,0,64,20, - 255,255,36,50,22,80,192,8,18,0,0,166, - 40,0,4,142,128,86,192,12,0,0,0,0, - 21,80,192,8,18,0,2,166,40,0,4,142, - 153,86,192,12,0,0,0,0,21,80,192,8, - 18,0,2,166,48,0,2,142,44,0,3,142, - 0,0,0,0,35,16,67,0,21,80,192,8, - 18,0,2,166,171,86,192,12,40,0,4,38, - 21,80,192,8,18,0,2,166,21,80,192,8, - 18,0,0,166,18,0,22,166,255,255,36,50, - 18,0,3,150,0,0,0,0,128,0,98,44, - 6,0,64,20,1,0,132,36,0,1,98,44, - 4,0,64,20,2,0,98,36,33,80,192,8, - 3,0,98,36,1,0,98,36,33,16,130,0, - 4,0,2,166,4,0,4,150,0,0,0,0, - 1,0,132,36,4,0,3,150,0,0,0,0, - 128,0,98,44,6,0,64,20,255,255,69,50, - 0,1,98,44,4,0,64,20,2,0,162,36, - 49,80,192,8,3,0,162,36,1,0,162,36, - 33,144,68,0,1,0,148,38,4,0,98,142, - 0,0,0,0,42,16,130,2,158,255,64,20, - 68,0,16,38,12,0,115,142,0,0,0,0, - 146,255,96,22,0,0,0,0,0,0,178,166, - 255,255,66,50,52,0,191,143,48,0,182,143, - 44,0,181,143,40,0,180,143,36,0,179,143, - 32,0,178,143,28,0,177,143,24,0,176,143, - 8,0,224,3,56,0,189,39,224,255,189,39, - 24,0,191,175,20,0,177,175,16,0,176,175, - 33,128,128,0,171,86,192,12,8,0,4,38, - 255,255,67,48,128,0,98,44,5,0,64,20, - 2,0,113,36,0,1,98,44,2,0,64,20, - 3,0,113,36,4,0,113,36,16,0,3,146, - 64,0,2,36,52,0,98,16,65,0,98,40, - 17,0,64,16,4,0,2,36,37,0,98,16, - 0,0,0,0,5,0,98,40,5,0,64,16, - 2,0,2,36,22,0,98,16,255,255,36,50, - 145,80,192,8,0,0,0,0,5,0,2,36, - 36,0,98,16,6,0,2,36,30,0,98,16, - 255,255,36,50,145,80,192,8,0,0,0,0, - 68,0,2,36,20,0,98,16,0,0,0,0, - 68,0,98,40,12,0,64,20,131,0,98,40, - 28,0,64,16,128,0,98,40,27,0,64,20, - 255,255,36,50,145,80,192,8,18,0,0,166, - 40,0,4,142,128,86,192,12,0,0,0,0, - 144,80,192,8,18,0,2,166,40,0,4,142, - 153,86,192,12,0,0,0,0,144,80,192,8, - 18,0,2,166,48,0,2,142,44,0,3,142, - 0,0,0,0,143,80,192,8,35,16,67,0, - 171,86,192,12,40,0,4,38,144,80,192,8, - 18,0,2,166,144,80,192,8,18,0,0,166, - 4,0,2,36,18,0,2,166,255,255,36,50, - 18,0,3,150,0,0,0,0,128,0,98,44, - 6,0,64,20,1,0,132,36,0,1,98,44, - 4,0,64,20,2,0,98,36,156,80,192,8, - 3,0,98,36,1,0,98,36,33,16,130,0, - 4,0,2,166,4,0,3,150,4,0,4,150, - 0,0,0,0,128,0,130,44,6,0,64,20, - 1,0,99,36,0,1,130,44,4,0,64,20, - 2,0,98,36,170,80,192,8,3,0,98,36, - 1,0,98,36,255,255,66,48,24,0,191,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,232,255,189,39,16,0,191,175, - 64,0,130,140,0,0,0,0,12,0,64,20, - 33,16,0,0,88,0,131,148,4,0,2,36, - 5,0,98,16,0,0,0,0,180,77,192,12, - 0,0,0,0,193,80,192,8,255,255,66,48, - 11,78,192,12,0,0,0,0,255,255,66,48, - 16,0,191,143,24,0,189,39,8,0,224,3, - 0,0,0,0,224,255,189,39,24,0,191,175, - 20,0,177,175,16,0,176,175,33,128,128,0, - 176,80,192,12,33,136,160,0,33,32,0,2, - 33,40,32,2,213,80,192,12,255,255,70,48, - 24,0,191,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,224,255,189,39, - 28,0,191,175,24,0,178,175,20,0,177,175, - 16,0,176,175,33,144,128,0,33,136,192,0, - 255,255,34,50,41,0,64,16,33,128,160,0, - 4,0,2,142,0,0,0,0,11,0,64,20, - 255,255,35,50,9,50,192,12,255,255,36,50, - 33,24,64,0,32,0,96,16,1,0,2,36, - 0,0,2,166,4,0,3,174,8,0,3,174, - 242,80,192,8,12,0,17,166,12,0,2,150, - 0,0,0,0,43,16,67,0,23,0,64,20, - 255,255,2,36,64,0,66,142,0,0,0,0, - 19,0,64,20,255,255,2,36,33,32,0,2, - 2,0,69,150,33,48,0,0,104,78,192,12, - 72,0,71,38,88,0,67,150,4,0,2,36, - 5,0,98,16,33,32,64,2,217,78,192,12, - 33,40,0,2,8,81,192,8,33,16,0,0, - 153,78,192,12,33,40,0,2,8,81,192,8, - 33,16,0,0,255,255,2,36,28,0,191,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,0,0,0,0, - 0,0,0,0,168,255,189,39,80,0,191,175, - 76,0,183,175,72,0,182,175,68,0,181,175, - 64,0,180,175,60,0,179,175,56,0,178,175, - 52,0,177,175,48,0,176,175,33,24,128,0, - 33,152,160,0,33,176,192,0,33,184,224,0, - 104,0,162,143,0,0,0,0,2,0,64,20, - 44,0,160,175,40,0,162,39,0,0,64,172, - 24,0,164,39,33,40,96,0,156,88,192,12, - 33,48,96,2,33,136,64,0,82,0,32,18, - 33,16,0,0,209,83,192,12,0,0,0,0, - 33,144,64,0,5,0,64,22,1,0,2,36, - 183,88,192,12,33,32,32,2,124,81,192,8, - 33,16,0,0,148,0,66,162,196,88,192,12, - 33,32,32,2,224,0,85,48,44,0,176,39, - 33,32,32,2,124,89,192,12,33,40,0,2, - 33,160,64,0,33,32,32,2,198,89,192,12, - 33,40,0,2,2,0,66,166,44,0,162,143, - 0,0,0,0,14,0,64,20,0,0,0,0, - 8,0,35,142,4,0,34,142,0,0,0,0, - 35,128,98,0,2,0,66,150,0,0,0,0, - 33,128,2,2,42,16,19,2,16,0,64,20, - 33,32,32,2,42,16,112,2,16,0,64,16, - 0,0,0,0,167,83,192,12,33,32,64,2, - 183,88,192,12,33,32,32,2,3,131,3,60, - 140,17,99,36,0,0,98,140,0,0,0,0, - 1,0,66,36,0,0,98,172,124,81,192,8, - 33,16,0,0,33,40,0,2,42,89,192,12, - 33,48,0,0,255,0,162,50,255,255,131,50, - 37,16,67,0,48,0,3,36,8,0,67,20, - 33,32,64,2,16,0,176,175,33,40,32,2, - 33,48,192,2,135,81,192,12,33,56,224,2, - 117,81,192,8,33,128,64,0,3,131,3,60, - 140,17,99,36,0,0,98,140,0,0,0,0, - 1,0,66,36,0,0,98,172,33,128,0,0, - 3,0,0,22,0,0,0,0,167,83,192,12, - 33,32,64,2,183,88,192,12,33,32,32,2, - 33,16,0,2,80,0,191,143,76,0,183,143, - 72,0,182,143,68,0,181,143,64,0,180,143, - 60,0,179,143,56,0,178,143,52,0,177,143, - 48,0,176,143,8,0,224,3,88,0,189,39, - 176,255,189,39,76,0,191,175,72,0,182,175, - 68,0,181,175,64,0,180,175,60,0,179,175, - 56,0,178,175,52,0,177,175,48,0,176,175, - 33,128,128,0,33,136,160,0,33,160,192,0, - 33,168,224,0,96,0,182,143,24,0,160,175, - 33,32,32,2,24,0,165,39,2,0,6,36, - 239,90,192,12,33,56,0,0,64,0,2,174, - 24,0,162,143,0,0,0,0,155,0,64,20, - 0,0,0,0,64,0,2,142,0,0,0,0, - 4,0,64,16,33,32,32,2,3,131,3,60, - 60,82,192,8,148,17,99,36,16,0,160,175, - 72,0,5,38,24,0,166,39,110,90,192,12, - 4,0,7,36,24,0,162,143,0,0,0,0, - 139,0,64,20,0,0,0,0,196,88,192,12, - 33,32,32,2,224,0,66,48,160,0,3,36, - 133,0,67,20,33,32,32,2,124,89,192,12, - 24,0,165,39,33,144,64,0,33,32,32,2, - 198,89,192,12,24,0,165,39,33,152,64,0, - 24,0,162,143,0,0,0,0,122,0,64,20, - 0,0,0,0,255,255,66,50,5,0,66,44, - 118,0,64,16,0,0,0,0,8,0,34,142, - 4,0,35,142,0,0,0,0,35,16,67,0, - 255,255,99,50,33,16,67,0,110,0,194,22, - 33,32,0,2,88,0,18,166,90,0,19,166, - 33,40,128,2,178,50,192,12,33,48,160,2, - 118,0,64,20,33,16,0,0,255,255,67,50, - 4,0,2,36,24,0,98,16,33,32,32,2, - 24,0,165,39,2,0,6,36,239,90,192,12, - 33,56,0,0,92,0,2,174,33,32,32,2, - 24,0,165,39,2,0,6,36,239,90,192,12, - 33,56,0,0,96,0,2,174,33,32,32,2, - 24,0,165,39,2,0,6,36,239,90,192,12, - 33,56,0,0,100,0,2,174,24,0,162,143, - 0,0,0,0,78,0,64,20,33,32,32,2, - 67,82,192,8,104,0,5,38,4,0,2,36, - 88,0,2,166,90,0,19,166,124,0,0,174, - 16,0,160,175,92,0,5,38,24,0,166,39, - 186,91,192,12,6,0,7,36,24,0,162,143, - 0,0,0,0,63,0,64,20,100,0,4,38, - 33,40,0,0,144,71,192,12,4,0,6,36, - 32,0,160,167,40,0,160,175,36,0,160,175, - 44,0,160,167,32,0,178,39,64,0,2,36, - 16,0,162,175,33,32,32,2,33,40,64,2, - 24,0,166,39,110,90,192,12,33,56,0,0, - 24,0,162,143,0,0,0,0,5,0,64,16, - 0,0,0,0,24,92,192,12,33,32,64,2, - 58,82,192,8,0,0,0,0,40,0,162,143, - 36,0,163,143,0,0,0,0,35,16,67,0, - 255,255,70,48,5,0,194,44,2,0,64,20, - 0,0,0,0,4,0,6,36,8,0,192,16, - 33,32,32,2,36,0,165,143,0,0,0,0, - 80,68,192,12,100,0,4,38,24,92,192,12, - 32,0,164,39,33,32,32,2,24,0,165,39, - 2,0,6,36,239,90,192,12,33,56,0,0, - 104,0,2,174,33,32,32,2,24,0,165,39, - 2,0,6,36,239,90,192,12,33,56,0,0, - 108,0,2,174,33,32,32,2,24,0,165,39, - 3,0,6,36,239,90,192,12,64,0,7,36, - 112,0,2,174,24,0,162,143,0,0,0,0, - 9,0,64,16,33,32,32,2,3,131,3,60, - 140,17,99,36,0,0,98,140,0,0,0,0, - 1,0,66,36,0,0,98,172,73,82,192,8, - 33,16,0,0,116,0,5,38,33,48,192,2, - 163,82,192,12,33,56,0,2,255,255,3,36, - 248,255,67,16,33,16,0,2,76,0,191,143, - 72,0,182,143,68,0,181,143,64,0,180,143, - 60,0,179,143,56,0,178,143,52,0,177,143, - 48,0,176,143,8,0,224,3,80,0,189,39, - 184,255,189,39,64,0,191,175,60,0,183,175, - 56,0,182,175,52,0,181,175,48,0,180,175, - 44,0,179,175,40,0,178,175,36,0,177,175, - 32,0,176,175,33,128,128,0,16,0,160,175, - 8,0,2,142,4,0,3,142,0,0,0,0, - 35,184,67,0,33,144,0,0,255,255,162,48, - 45,0,64,16,33,136,0,0,3,131,19,60, - 140,17,115,38,255,255,22,36,255,255,181,48, - 8,0,3,142,4,0,2,142,0,0,0,0, - 35,160,98,0,0,0,2,146,0,0,0,0, - 128,0,66,48,32,0,64,20,33,32,0,2, - 124,89,192,12,16,0,165,39,33,32,0,2, - 198,89,192,12,16,0,165,39,33,40,64,0, - 16,0,162,143,0,0,0,0,6,0,64,20, - 33,32,0,2,255,255,165,48,251,88,192,12, - 1,0,6,36,7,0,86,20,0,0,0,0, - 0,0,98,142,0,0,0,0,1,0,66,36, - 0,0,98,174,147,82,192,8,255,255,17,36, - 8,0,2,142,4,0,3,142,0,0,0,0, - 35,16,67,0,33,16,66,2,35,144,84,0, - 255,255,66,50,43,16,85,0,217,255,64,20, - 1,0,49,38,33,32,0,2,33,40,224,2, - 251,88,192,12,33,48,0,0,33,16,32,2, - 64,0,191,143,60,0,183,143,56,0,182,143, - 52,0,181,143,48,0,180,143,44,0,179,143, - 40,0,178,143,36,0,177,143,32,0,176,143, - 8,0,224,3,72,0,189,39,192,255,189,39, - 56,0,191,175,52,0,181,175,48,0,180,175, - 44,0,179,175,40,0,178,175,36,0,177,175, - 32,0,176,175,33,144,128,0,33,152,160,0, - 33,168,224,0,16,0,160,175,124,89,192,12, - 16,0,165,39,33,32,64,2,198,89,192,12, - 16,0,165,39,0,0,98,166,16,0,162,143, - 0,0,0,0,28,0,64,20,0,0,0,0, - 12,0,66,142,8,0,67,142,0,0,0,0, - 35,16,67,0,0,0,99,150,255,255,66,48, - 20,0,98,20,0,0,0,0,4,0,96,174, - 0,0,101,150,0,0,0,0,83,82,192,12, - 33,32,64,2,33,32,64,0,255,255,2,36, - 46,0,130,16,0,0,0,0,3,0,128,20, - 0,0,0,0,246,82,192,8,8,0,96,174, - 224,83,192,12,4,0,100,174,10,0,64,20, - 8,0,98,174,247,82,192,8,255,255,2,36, - 3,131,3,60,140,17,99,36,0,0,98,140, - 0,0,0,0,1,0,66,36,210,82,192,8, - 0,0,98,172,8,0,112,142,4,0,98,142, - 0,0,0,0,23,0,64,24,33,136,0,0, - 255,255,20,36,33,32,64,2,124,89,192,12, - 16,0,165,39,33,32,64,2,198,89,192,12, - 16,0,165,39,4,0,2,166,16,0,162,143, - 0,0,0,0,233,255,64,20,33,32,64,2, - 33,40,0,2,0,83,192,12,33,48,160,2, - 226,255,84,16,1,0,49,38,4,0,98,142, - 0,0,0,0,42,16,34,2,236,255,64,20, - 68,0,16,38,33,16,0,0,56,0,191,143, - 52,0,181,143,48,0,180,143,44,0,179,143, - 40,0,178,143,36,0,177,143,32,0,176,143, - 8,0,224,3,64,0,189,39,184,255,189,39, - 68,0,191,175,64,0,180,175,60,0,179,175, - 56,0,178,175,52,0,177,175,48,0,176,175, - 33,128,128,0,33,144,160,0,24,0,160,175, - 16,0,160,175,8,0,69,38,24,0,166,39, - 186,91,192,12,6,0,7,36,24,0,162,143, - 0,0,0,0,103,0,64,20,0,0,0,0, - 196,88,192,12,33,32,0,2,224,0,84,48, - 33,32,0,2,124,89,192,12,24,0,165,39, - 33,152,64,0,33,32,0,2,198,89,192,12, - 24,0,165,39,33,136,64,0,24,0,162,143, - 0,0,0,0,88,0,64,20,37,16,116,2, - 18,0,81,166,16,0,66,162,16,0,67,146, - 64,0,2,36,48,0,98,16,65,0,98,40, - 16,0,64,16,4,0,2,36,31,0,98,16, - 5,0,98,40,5,0,64,16,2,0,2,36, - 22,0,98,16,33,32,0,2,121,83,192,8, - 0,0,0,0,5,0,2,36,65,0,98,16, - 6,0,2,36,27,0,98,16,33,32,0,2, - 121,83,192,8,0,0,0,0,68,0,2,36, - 15,0,98,16,68,0,98,40,8,0,64,20, - 33,32,0,2,131,0,98,40,57,0,64,16, - 128,0,98,40,51,0,64,16,0,0,0,0, - 121,83,192,8,0,0,0,0,255,255,37,50, - 164,90,192,12,24,0,166,39,117,83,192,8, - 40,0,66,174,33,32,0,2,255,255,37,50, - 40,0,70,38,19,90,192,12,24,0,167,39, - 117,83,192,8,0,0,0,0,255,255,37,50, - 40,0,70,38,24,91,192,12,24,0,167,39, - 117,83,192,8,0,0,0,0,40,0,68,38, - 33,40,0,0,144,71,192,12,4,0,6,36, - 32,0,160,167,40,0,160,175,36,0,160,175, - 44,0,160,167,33,32,0,2,255,255,37,50, - 32,0,166,39,19,90,192,12,24,0,167,39, - 40,0,162,143,36,0,163,143,0,0,0,0, - 35,16,67,0,255,255,70,48,5,0,194,44, - 2,0,64,20,0,0,0,0,4,0,6,36, - 7,0,192,16,0,0,0,0,36,0,165,143, - 0,0,0,0,80,68,192,12,40,0,68,38, - 24,92,192,12,32,0,164,39,24,0,162,143, - 0,0,0,0,8,0,64,16,33,16,0,0, - 3,131,3,60,140,17,99,36,0,0,98,140, - 0,0,0,0,1,0,66,36,0,0,98,172, - 255,255,2,36,68,0,191,143,64,0,180,143, - 60,0,179,143,56,0,178,143,52,0,177,143, - 48,0,176,143,8,0,224,3,72,0,189,39, - 232,255,189,39,20,0,191,175,16,0,176,175, - 33,128,128,0,76,0,2,142,0,0,0,0, - 3,0,64,16,0,0,0,0,24,92,192,12, - 72,0,4,38,88,0,3,150,4,0,2,36, - 5,0,98,20,0,0,0,0,110,86,192,12, - 92,0,4,38,157,83,192,8,116,0,4,38, - 13,84,192,12,104,0,4,38,120,0,4,38, - 13,84,192,12,0,0,0,0,148,0,3,146, - 0,0,0,0,248,83,192,12,33,32,0,2, - 20,0,191,143,16,0,176,143,8,0,224,3, - 24,0,189,39,232,255,189,39,20,0,191,175, - 16,0,176,175,33,128,128,0,5,0,0,18, - 0,0,0,0,136,83,192,12,0,0,0,0, - 61,50,192,12,33,32,0,2,20,0,191,143, - 16,0,176,143,8,0,224,3,24,0,189,39, - 224,255,189,39,24,0,191,175,20,0,177,175, - 16,0,176,175,33,136,128,0,9,50,192,12, - 16,0,4,36,33,128,64,0,11,0,0,18, - 33,32,0,2,33,40,0,0,144,71,192,12, - 16,0,6,36,224,83,192,12,33,32,32,2, - 4,0,64,16,8,0,2,174,4,0,17,174, - 204,83,192,8,33,16,0,2,61,50,192,12, - 33,32,0,2,33,16,0,0,24,0,191,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,232,255,189,39,20,0,191,175, - 16,0,176,175,9,50,192,12,152,0,4,36, - 33,128,64,0,4,0,0,18,33,16,0,0, - 248,83,192,12,33,32,0,2,33,16,0,2, - 20,0,191,143,16,0,176,143,8,0,224,3, - 24,0,189,39,224,255,189,39,24,0,191,175, - 20,0,177,175,0,17,4,0,33,16,68,0, - 128,136,2,0,11,0,32,18,16,0,176,175, - 9,50,192,12,33,32,32,2,33,128,64,0, - 4,0,0,18,33,32,0,2,33,40,0,0, - 144,71,192,12,33,48,32,2,243,83,192,8, - 33,16,0,2,33,16,0,0,24,0,191,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,232,255,189,39,20,0,191,175, - 16,0,176,175,33,128,128,0,33,40,0,0, - 144,71,192,12,152,0,6,36,255,0,2,36, - 88,0,2,166,120,5,2,36,58,0,2,166, - 72,0,0,166,80,0,0,174,76,0,0,174, - 84,0,0,166,148,0,0,162,149,0,0,162, - 20,0,191,143,16,0,176,143,8,0,224,3, - 24,0,189,39,208,255,189,39,40,0,191,175, - 36,0,179,175,32,0,178,175,28,0,177,175, - 24,0,176,175,33,128,128,0,31,0,0,18, - 1,0,19,36,8,0,18,142,0,0,0,0, - 16,0,64,18,0,0,0,0,4,0,2,142, - 0,0,0,0,9,0,64,24,33,136,0,0, - 59,84,192,12,33,32,64,2,1,0,49,38, - 4,0,2,142,0,0,0,0,42,16,34,2, - 249,255,64,20,68,0,82,38,8,0,4,142, - 61,50,192,12,0,0,0,0,12,0,17,142, - 4,0,96,18,0,0,0,0,33,152,0,0, - 49,84,192,8,4,0,0,174,61,50,192,12, - 33,32,0,2,33,128,32,2,227,255,0,22, - 0,0,0,0,40,0,191,143,36,0,179,143, - 32,0,178,143,28,0,177,143,24,0,176,143, - 8,0,224,3,48,0,189,39,232,255,189,39, - 20,0,191,175,16,0,176,175,33,128,128,0, - 60,0,2,142,0,0,0,0,4,0,64,16, - 0,0,0,0,9,248,64,0,0,0,0,0, - 60,0,0,174,110,86,192,12,8,0,4,38, - 78,84,192,12,33,32,0,2,20,0,191,143, - 16,0,176,143,8,0,224,3,24,0,189,39, - 232,255,189,39,16,0,191,175,16,0,131,144, - 6,0,2,36,18,0,98,16,7,0,98,40, - 5,0,64,16,4,0,2,36,6,0,98,16, - 0,0,0,0,103,84,192,8,0,0,0,0, - 68,0,2,36,11,0,98,20,0,0,0,0, - 44,0,130,140,0,0,0,0,7,0,64,16, - 0,0,0,0,24,92,192,12,40,0,132,36, - 103,84,192,8,0,0,0,0,110,86,192,12, - 40,0,132,36,16,0,191,143,24,0,189,39, - 8,0,224,3,0,0,0,0,0,0,0,0, - 216,255,189,39,32,0,191,175,28,0,179,175, - 24,0,178,175,20,0,177,175,16,0,176,175, - 33,136,192,0,56,0,179,143,0,0,0,0, - 20,72,192,12,33,144,224,0,33,128,64,0, - 11,0,0,18,33,32,32,2,33,40,64,2, - 80,86,192,12,8,0,6,38,255,255,3,36, - 5,0,67,16,2,0,2,36,16,0,2,162, - 40,0,19,174,133,84,192,8,33,16,0,0, - 255,255,2,36,32,0,191,143,28,0,179,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,40,0,189,39,216,255,189,39, - 32,0,191,175,28,0,177,175,24,0,176,175, - 33,128,128,0,96,0,5,174,100,0,6,174, - 128,0,2,142,0,0,0,0,11,0,64,16, - 0,0,0,0,13,84,192,12,104,0,4,38, - 124,0,2,142,0,0,0,0,108,0,2,174, - 128,0,2,142,0,0,0,0,112,0,2,174, - 128,0,0,174,124,0,0,174,88,0,17,150, - 2,0,2,36,88,0,2,166,176,80,192,12, - 33,32,0,2,33,56,64,0,64,0,2,142, - 0,0,0,0,35,0,64,20,255,255,35,50, - 3,0,2,36,14,0,98,16,255,255,227,48, - 58,0,2,150,0,0,0,0,43,16,67,0, - 9,0,64,16,12,0,4,38,48,0,2,142, - 28,0,5,38,52,0,7,142,0,0,0,0, - 9,248,64,0,1,0,6,36,214,84,192,8, - 0,0,0,0,96,0,2,142,0,0,0,0, - 250,255,67,36,13,0,98,44,13,0,64,16, - 128,16,3,0,2,131,1,60,33,8,34,0, - 32,154,34,140,0,0,0,0,8,0,64,0, - 0,0,0,0,204,84,192,8,2,0,2,36, - 204,84,192,8,3,0,2,36,5,0,2,36, - 96,0,2,174,52,0,2,142,0,0,0,0, - 16,0,162,175,44,0,2,142,12,0,4,38, - 28,0,5,38,33,48,0,2,9,248,64,0, - 255,255,231,48,32,0,191,143,28,0,177,143, - 24,0,176,143,8,0,224,3,40,0,189,39, - 224,255,189,39,28,0,191,175,24,0,176,175, - 33,128,128,0,96,0,5,142,0,0,0,0, - 6,0,160,16,0,0,0,0,100,0,6,142, - 140,84,192,12,0,0,0,0,56,85,192,8, - 0,0,0,0,176,80,192,12,33,32,0,2, - 33,56,64,0,88,0,2,150,0,0,0,0, - 2,0,66,44,11,0,64,16,255,255,227,48, - 58,0,2,150,0,0,0,0,43,16,67,0, - 6,0,64,16,33,32,0,2,1,0,5,36, - 140,84,192,12,33,48,0,0,56,85,192,8, - 0,0,0,0,96,0,2,142,0,0,0,0, - 49,0,64,20,2,0,2,36,88,0,3,150, - 3,0,2,36,33,0,98,16,4,0,98,40, - 7,0,64,16,2,0,98,40,41,0,64,16, - 2,0,2,36,39,0,96,4,0,0,0,0, - 13,85,192,8,0,0,0,0,5,0,2,36, - 34,0,98,20,2,0,2,36,3,131,3,60, - 236,17,99,36,0,0,98,140,0,0,0,0, - 1,0,66,36,104,0,4,38,25,0,128,16, - 0,0,98,172,3,131,5,60,176,17,165,36, - 0,0,162,140,108,0,3,142,0,0,0,0, - 33,16,67,0,0,0,162,172,12,0,132,140, - 0,0,0,0,248,255,128,20,2,0,2,36, - 47,85,192,8,88,0,2,166,3,131,4,60, - 236,17,132,36,0,0,130,140,0,0,0,0, - 1,0,66,36,0,0,130,172,200,255,130,140, - 108,0,3,142,0,0,0,0,33,16,67,0, - 200,255,130,172,2,0,2,36,88,0,2,166, - 52,0,2,142,0,0,0,0,16,0,162,175, - 44,0,2,142,12,0,4,38,28,0,5,38, - 33,48,0,2,9,248,64,0,255,255,231,48, - 28,0,191,143,24,0,176,143,8,0,224,3, - 32,0,189,39,232,255,189,39,20,0,191,175, - 16,0,176,175,33,128,128,0,88,0,3,150, - 1,0,2,36,25,0,98,16,2,0,98,40, - 5,0,64,16,3,0,2,36,7,0,96,16, - 0,0,0,0,116,85,192,8,0,0,0,0, - 37,0,98,16,0,0,0,0,116,85,192,8, - 0,0,0,0,112,0,4,142,108,0,3,142, - 0,0,0,0,34,0,96,16,0,0,0,0, - 17,0,130,144,0,0,0,0,2,0,66,48, - 33,0,64,16,255,255,99,36,250,255,96,20, - 68,0,132,36,116,85,192,8,0,0,0,0, - 112,0,4,142,108,0,3,142,0,0,0,0, - 8,0,96,16,0,0,0,0,17,0,130,144, - 0,0,0,0,2,0,66,48,19,0,64,16, - 255,255,99,36,250,255,96,20,68,0,132,36, - 118,93,192,12,33,32,0,2,241,255,64,28, - 255,255,66,40,7,0,64,16,0,0,0,0, - 92,85,192,8,0,0,0,0,120,94,192,12, - 33,32,0,2,5,0,64,20,0,0,0,0, - 219,84,192,12,33,32,0,2,167,83,192,12, - 33,32,0,2,20,0,191,143,16,0,176,143, - 8,0,224,3,24,0,189,39,232,255,189,39, - 20,0,191,175,16,0,176,175,33,128,128,0, - 88,0,3,150,1,0,2,36,17,0,98,16, - 2,0,98,40,5,0,64,16,3,0,2,36, - 9,0,96,16,0,0,0,0,156,85,192,8, - 0,0,0,0,13,0,98,16,5,0,2,36, - 7,0,98,16,0,0,0,0,156,85,192,8, - 0,0,0,0,60,95,192,12,33,32,0,2, - 154,85,192,8,0,0,0,0,0,93,192,12, - 33,32,0,2,154,85,192,8,0,0,0,0, - 252,93,192,12,33,32,0,2,5,0,64,20, - 0,0,0,0,60,85,192,12,33,32,0,2, - 162,85,192,8,0,0,0,0,167,83,192,12, - 33,32,0,2,20,0,191,143,16,0,176,143, - 8,0,224,3,24,0,189,39,192,255,189,39, - 60,0,191,175,56,0,182,175,52,0,181,175, - 48,0,180,175,44,0,179,175,40,0,178,175, - 36,0,177,175,32,0,176,175,33,64,128,0, - 33,136,192,0,33,152,224,0,84,0,182,143, - 88,0,181,143,92,0,180,143,80,0,178,151, - 3,131,3,60,128,17,99,36,0,0,98,140, - 0,0,0,0,1,0,66,36,0,0,98,172, - 24,0,162,39,16,0,162,175,33,32,160,0, - 16,81,192,12,33,40,0,1,33,128,64,0, - 57,0,0,18,255,255,66,50,58,0,3,150, - 0,0,0,0,43,16,67,0,2,0,64,16, - 0,0,0,0,58,0,18,166,44,0,22,174, - 48,0,21,174,52,0,20,174,24,0,162,143, - 0,0,0,0,7,0,64,16,1,0,2,36, - 219,84,192,12,33,32,0,2,167,83,192,12, - 33,32,0,2,5,86,192,8,0,0,0,0, - 88,0,3,150,0,0,0,0,16,0,98,16, - 2,0,98,40,5,0,64,16,3,0,2,36, - 9,0,96,16,0,0,0,0,244,85,192,8, - 0,0,0,0,11,0,98,16,5,0,2,36, - 31,0,98,16,0,0,0,0,244,85,192,8, - 0,0,0,0,3,131,3,60,239,85,192,8, - 184,17,99,36,3,131,3,60,239,85,192,8, - 188,17,99,36,3,131,3,60,192,17,99,36, - 0,0,98,140,0,0,0,0,1,0,66,36, - 3,86,192,8,0,0,98,172,3,131,3,60, - 140,17,99,36,0,0,98,140,0,0,0,0, - 1,0,66,36,0,0,98,172,167,83,192,12, - 33,32,0,2,33,32,32,2,33,40,96,2, - 1,0,6,36,9,248,160,2,33,56,128,2, - 5,86,192,8,0,0,0,0,124,85,192,12, - 33,32,0,2,60,0,191,143,56,0,182,143, - 52,0,181,143,48,0,180,143,44,0,179,143, - 40,0,178,143,36,0,177,143,32,0,176,143, - 8,0,224,3,64,0,189,39,232,255,189,39, - 20,0,191,175,16,0,176,175,33,128,128,0, - 213,80,192,12,255,255,198,48,53,0,64,20, - 255,255,2,36,3,131,3,60,144,17,99,36, - 0,0,98,140,0,0,0,0,1,0,66,36, - 0,0,98,172,96,0,2,142,0,0,0,0, - 32,0,64,16,4,0,2,36,92,0,98,140, - 0,0,0,0,1,0,66,36,92,0,98,172, - 96,0,2,142,0,0,0,0,255,255,67,36, - 5,0,98,44,32,0,64,16,128,16,3,0, - 2,131,1,60,33,8,34,0,88,154,34,140, - 0,0,0,0,8,0,64,0,0,0,0,0, - 3,131,3,60,70,86,192,8,204,17,99,36, - 3,131,3,60,70,86,192,8,212,17,99,36, - 3,131,3,60,70,86,192,8,216,17,99,36, - 3,131,3,60,70,86,192,8,208,17,99,36, - 3,131,3,60,70,86,192,8,220,17,99,36, - 88,0,3,150,0,0,0,0,8,0,98,20, - 33,16,0,0,3,131,3,60,240,17,99,36, - 0,0,98,140,0,0,0,0,1,0,66,36, - 0,0,98,172,33,16,0,0,20,0,191,143, - 16,0,176,143,8,0,224,3,24,0,189,39, - 0,0,0,0,224,255,189,39,28,0,191,175, - 24,0,178,175,20,0,177,175,16,0,176,175, - 33,144,160,0,33,128,192,0,4,0,0,174, - 14,0,128,16,0,0,4,174,128,136,4,0, - 9,50,192,12,33,32,32,2,3,0,64,20, - 4,0,2,174,104,86,192,8,255,255,2,36, - 5,0,32,18,33,40,64,2,4,0,4,142, - 0,0,0,0,80,68,192,12,33,48,32,2, - 33,16,0,0,28,0,191,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,232,255,189,39,20,0,191,175, - 16,0,176,175,33,128,128,0,4,0,4,142, - 0,0,0,0,4,0,128,16,0,0,0,0, - 61,50,192,12,0,0,0,0,4,0,0,174, - 0,0,0,174,20,0,191,143,16,0,176,143, - 8,0,224,3,24,0,189,39,0,0,0,0, - 0,0,0,0,11,0,128,4,128,0,130,40, - 20,0,64,20,1,0,3,36,255,127,2,36, - 42,16,68,0,16,0,64,16,2,0,3,36, - 127,0,2,60,255,255,66,52,148,86,192,8, - 42,16,68,0,128,255,130,40,9,0,64,16, - 1,0,3,36,0,128,130,40,6,0,64,16, - 2,0,3,36,128,255,2,60,42,16,130,0, - 2,0,64,20,4,0,3,36,3,0,3,36, - 8,0,224,3,33,16,96,0,128,0,130,44, - 14,0,64,20,1,0,2,36,255,127,2,36, - 43,16,68,0,9,0,64,16,127,0,2,60, - 255,255,66,52,43,16,68,0,6,0,64,16, - 3,0,2,36,4,0,128,4,5,0,2,36, - 169,86,192,8,4,0,2,36,2,0,2,36, - 8,0,224,3,0,0,0,0,4,0,135,140, - 0,0,130,140,0,0,0,0,65,0,64,16, - 33,16,0,0,0,0,227,140,4,0,231,36, - 128,16,3,0,33,16,67,0,192,16,2,0, - 0,0,227,140,0,0,0,0,33,24,67,0, - 128,0,98,44,17,0,64,20,4,0,231,36, - 0,64,98,44,15,0,64,20,2,0,5,36, - 31,0,2,60,255,255,66,52,43,16,67,0, - 7,0,64,16,255,15,2,60,255,255,66,52, - 43,16,67,0,6,0,64,20,5,0,5,36, - 204,86,192,8,4,0,5,36,204,86,192,8, - 3,0,5,36,1,0,5,36,2,0,6,36, - 0,0,130,140,0,0,0,0,42,16,194,0, - 31,0,64,16,255,255,162,48,31,0,9,60, - 255,255,41,53,255,15,8,60,255,255,8,53, - 0,0,132,140,0,0,227,140,4,0,231,36, - 128,0,98,44,16,0,64,20,255,255,165,48, - 0,64,98,44,11,0,64,20,43,16,35,1, - 7,0,64,16,43,16,3,1,3,0,64,20, - 0,0,0,0,236,86,192,8,4,0,165,36, - 236,86,192,8,5,0,165,36,236,86,192,8, - 3,0,165,36,236,86,192,8,2,0,165,36, - 1,0,165,36,1,0,198,36,42,16,196,0, - 232,255,64,20,255,255,162,48,8,0,224,3, - 0,0,0,0,208,255,189,39,40,0,191,175, - 33,72,192,0,224,0,165,48,255,255,130,48, - 31,0,66,44,7,0,64,16,33,48,160,0, - 37,16,133,0,16,0,162,163,33,32,224,0, - 16,0,165,39,38,87,192,8,1,0,6,36, - 32,0,163,39,33,40,0,0,31,0,194,52, - 24,0,162,163,255,255,130,48,8,0,64,16, - 25,0,168,39,127,0,130,48,0,0,98,160, - 1,0,99,36,255,255,130,48,194,33,2,0, - 250,255,128,20,1,0,165,36,1,0,166,36, - 33,16,160,0,255,255,66,48,2,0,66,44, - 13,0,64,20,255,255,165,36,255,255,4,52, - 255,255,99,36,0,0,98,144,0,0,0,0, - 128,0,66,52,0,0,2,161,1,0,8,37, - 33,16,160,0,255,255,66,48,2,0,66,44, - 246,255,64,16,33,40,164,0,255,255,98,144, - 0,0,0,0,0,0,2,161,33,32,224,0, - 24,0,165,39,255,255,198,48,9,248,32,1, - 0,0,0,0,40,0,191,143,48,0,189,39, - 8,0,224,3,0,0,0,0,208,255,189,39, - 40,0,191,175,33,72,160,0,255,255,130,48, - 128,0,66,44,6,0,64,16,33,64,192,0, - 16,0,164,163,33,32,0,1,16,0,165,39, - 88,87,192,8,1,0,6,36,24,0,167,39, - 32,0,165,39,255,255,130,48,7,0,64,16, - 33,24,0,0,0,0,164,160,1,0,165,36, - 255,255,130,48,2,34,2,0,251,255,128,20, - 1,0,99,36,128,0,98,52,0,0,226,160, - 1,0,231,36,1,0,102,36,33,16,96,0, - 255,255,66,48,11,0,64,16,255,255,99,36, - 255,255,4,52,255,255,165,36,0,0,162,144, - 0,0,0,0,0,0,226,160,1,0,231,36, - 33,16,96,0,255,255,66,48,248,255,64,20, - 33,24,100,0,33,32,0,1,24,0,165,39, - 255,255,198,48,9,248,32,1,0,0,0,0, - 40,0,191,143,48,0,189,39,8,0,224,3, - 0,0,0,0,200,255,189,39,48,0,191,175, - 44,0,181,175,40,0,180,175,36,0,179,175, - 32,0,178,175,28,0,177,175,24,0,176,175, - 33,136,160,0,33,144,192,0,33,152,224,0, - 72,0,180,143,33,128,128,0,128,86,192,12, - 33,32,64,2,33,168,64,0,255,255,4,50, - 192,0,37,50,33,48,96,2,242,86,192,12, - 33,56,128,2,255,255,176,50,33,32,0,2, - 33,40,96,2,44,87,192,12,33,48,128,2, - 16,0,162,39,33,24,80,0,255,255,99,36, - 6,0,98,16,0,0,114,160,16,0,162,39, - 3,146,18,0,255,255,99,36,253,255,98,20, - 0,0,114,160,33,32,128,2,16,0,165,39, - 9,248,96,2,255,255,166,50,48,0,191,143, - 44,0,181,143,40,0,180,143,36,0,179,143, - 32,0,178,143,28,0,177,143,24,0,176,143, - 8,0,224,3,56,0,189,39,200,255,189,39, - 48,0,191,175,44,0,181,175,40,0,180,175, - 36,0,179,175,32,0,178,175,28,0,177,175, - 24,0,176,175,33,136,160,0,33,144,192,0, - 33,160,224,0,72,0,181,143,33,128,128,0, - 153,86,192,12,33,32,64,2,33,152,64,0, - 255,255,4,50,192,0,37,50,33,48,128,2, - 242,86,192,12,33,56,160,2,255,255,112,50, - 33,32,0,2,33,40,128,2,44,87,192,12, - 33,48,160,2,16,0,162,39,33,32,80,0, - 9,0,0,18,255,255,99,38,255,255,5,52, - 255,255,132,36,0,0,146,160,2,146,18,0, - 33,16,96,0,255,255,66,48,250,255,64,20, - 33,24,101,0,33,32,160,2,16,0,165,39, - 9,248,128,2,255,255,102,50,48,0,191,143, - 44,0,181,143,40,0,180,143,36,0,179,143, - 32,0,178,143,28,0,177,143,24,0,176,143, - 8,0,224,3,56,0,189,39,216,255,189,39, - 32,0,191,175,28,0,179,175,24,0,178,175, - 20,0,177,175,16,0,176,175,33,152,192,0, - 56,0,178,143,60,0,177,143,33,128,224,0, - 255,255,132,48,192,0,165,48,33,48,64,2, - 242,86,192,12,33,56,32,2,255,255,16,50, - 33,32,0,2,33,40,64,2,44,87,192,12, - 33,48,32,2,4,0,0,18,33,32,32,2, - 33,40,96,2,9,248,64,2,33,48,0,2, - 32,0,191,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 40,0,189,39,224,255,189,39,24,0,191,175, - 33,72,160,0,128,0,130,44,18,0,64,20, - 33,64,192,0,0,64,130,44,13,0,64,20, - 31,0,2,60,255,255,66,52,43,16,68,0, - 7,0,64,16,255,15,2,60,255,255,66,52, - 43,16,68,0,8,0,64,20,5,0,6,36, - 250,87,192,8,4,0,6,36,250,87,192,8, - 3,0,6,36,250,87,192,8,2,0,6,36, - 1,0,6,36,255,255,194,48,16,0,163,39, - 33,40,98,0,9,0,163,16,33,56,0,0, - 16,0,163,39,255,255,165,36,127,0,130,48, - 37,16,226,0,0,0,162,160,194,33,4,0, - 250,255,163,20,128,0,7,36,33,32,0,1, - 16,0,165,39,9,248,32,1,255,255,198,48, - 24,0,191,143,32,0,189,39,8,0,224,3, - 0,0,0,0,208,255,189,39,44,0,191,175, - 40,0,182,175,36,0,181,175,32,0,180,175, - 28,0,179,175,24,0,178,175,20,0,177,175, - 16,0,176,175,33,144,160,0,33,168,192,0, - 33,160,224,0,64,0,182,143,33,136,128,0, - 4,0,179,142,0,0,0,0,171,86,192,12, - 33,32,160,2,33,128,64,0,255,255,36,50, - 192,0,69,50,33,48,128,2,242,86,192,12, - 33,56,192,2,255,255,16,50,33,32,0,2, - 33,40,128,2,44,87,192,12,33,48,192,2, - 23,0,0,18,33,40,128,2,0,0,98,142, - 4,0,115,38,128,32,2,0,33,32,130,0, - 192,32,4,0,0,0,98,142,4,0,115,38, - 33,32,130,0,226,87,192,12,33,48,192,2, - 63,88,192,8,2,0,16,36,0,0,100,142, - 4,0,115,38,226,87,192,12,33,48,192,2, - 1,0,16,38,0,0,162,142,0,0,0,0, - 42,16,2,2,247,255,64,20,33,40,128,2, - 44,0,191,143,40,0,182,143,36,0,181,143, - 32,0,180,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 48,0,189,39,224,255,189,39,28,0,191,175, - 24,0,178,175,20,0,177,175,16,0,176,175, - 33,136,128,0,33,144,192,0,255,255,67,50, - 12,0,34,150,0,0,0,0,43,16,67,0, - 4,0,64,16,1,0,2,36,12,0,50,150, - 0,0,0,0,255,255,67,50,11,0,98,16, - 2,0,98,40,5,0,64,16,2,0,2,36, - 50,0,96,16,255,255,80,50,137,88,192,8, - 0,0,0,0,15,0,98,16,255,255,80,50, - 137,88,192,8,0,0,0,0,8,0,35,142, - 0,0,0,0,1,0,98,36,8,0,34,174, - 0,0,162,144,0,0,0,0,0,0,98,160, - 12,0,34,150,0,0,0,0,255,255,66,36, - 149,88,192,8,12,0,34,166,8,0,35,142, - 0,0,0,0,1,0,98,36,8,0,34,174, - 0,0,162,144,0,0,0,0,0,0,98,160, - 8,0,35,142,0,0,0,0,1,0,98,36, - 8,0,34,174,1,0,162,144,0,0,0,0, - 0,0,98,160,12,0,34,150,0,0,0,0, - 254,255,66,36,149,88,192,8,12,0,34,166, - 8,0,36,142,0,0,0,0,80,68,192,12, - 33,48,0,2,12,0,34,150,0,0,0,0, - 35,16,82,0,12,0,34,166,8,0,34,142, - 0,0,0,0,33,128,2,2,8,0,48,174, - 255,255,66,50,28,0,191,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,224,255,189,39,24,0,191,175, - 20,0,177,175,16,0,176,175,33,128,160,0, - 10,0,128,20,33,136,192,0,9,50,192,12, - 16,0,4,36,33,32,64,0,3,0,128,20, - 1,0,2,36,178,88,192,8,33,16,0,0, - 173,88,192,8,0,0,130,160,0,0,128,160, - 4,0,144,172,8,0,144,172,33,16,17,2, - 12,0,130,172,33,16,128,0,24,0,191,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,232,255,189,39,16,0,191,175, - 0,0,130,144,0,0,0,0,1,0,66,48, - 3,0,64,16,0,0,0,0,61,50,192,12, - 0,0,0,0,16,0,191,143,24,0,189,39, - 8,0,224,3,0,0,0,0,0,0,130,144, - 0,0,0,0,128,0,66,48,16,0,64,20, - 255,255,2,36,8,0,130,140,12,0,131,140, - 0,0,0,0,43,16,67,0,7,0,64,20, - 0,0,0,0,0,0,130,144,0,0,0,0, - 128,0,66,52,0,0,130,160,216,88,192,8, - 255,255,2,36,8,0,130,140,0,0,0,0, - 0,0,66,144,8,0,224,3,0,0,0,0, - 30,0,192,24,33,56,192,0,0,0,130,144, - 0,0,0,0,128,0,66,48,16,0,64,20, - 255,0,3,36,8,0,131,140,12,0,130,140, - 0,0,0,0,43,16,98,0,5,0,64,16, - 1,0,98,36,8,0,130,172,0,0,99,144, - 240,88,192,8,0,0,0,0,0,0,130,144, - 0,0,0,0,128,0,66,52,0,0,130,160, - 255,0,3,36,0,0,130,144,0,0,0,0, - 128,0,66,48,5,0,64,20,0,0,0,0, - 0,0,163,160,255,255,198,36,228,255,192,28, - 1,0,165,36,8,0,224,3,35,16,230,0, - 1,0,2,36,15,0,194,16,2,0,194,40, - 5,0,64,16,2,0,2,36,7,0,192,16, - 255,255,2,36,40,89,192,8,0,0,0,0, - 11,0,194,16,255,255,2,36,40,89,192,8, - 0,0,0,0,4,0,130,140,0,0,0,0, - 21,89,192,8,33,40,162,0,8,0,130,140, - 0,0,0,0,19,89,192,8,33,40,162,0, - 12,0,130,140,0,0,0,0,35,40,69,0, - 4,0,130,140,0,0,0,0,43,16,162,0, - 17,0,64,20,255,255,2,36,12,0,130,140, - 0,0,0,0,43,16,69,0,12,0,64,20, - 255,255,2,36,12,0,130,140,0,0,0,0, - 43,16,162,0,5,0,64,16,0,0,0,0, - 0,0,130,144,0,0,0,0,127,0,66,48, - 0,0,130,160,8,0,133,172,33,16,0,0, - 8,0,224,3,0,0,0,0,12,0,130,140, - 4,0,131,140,0,0,0,0,35,56,67,0, - 1,0,2,36,15,0,194,16,2,0,194,40, - 5,0,64,16,2,0,2,36,7,0,192,16, - 255,255,2,36,87,89,192,8,0,0,0,0, - 12,0,194,16,255,255,2,36,87,89,192,8, - 0,0,0,0,4,0,130,140,0,0,0,0, - 66,89,192,8,33,16,162,0,8,0,130,140, - 0,0,0,0,33,16,162,0,72,89,192,8, - 12,0,130,172,12,0,130,140,0,0,0,0, - 35,16,69,0,12,0,130,172,8,0,130,140, - 12,0,131,140,0,0,0,0,43,16,67,0, - 5,0,64,16,0,0,0,0,0,0,130,144, - 0,0,0,0,85,89,192,8,127,0,66,48, - 0,0,130,144,0,0,0,0,128,0,66,52, - 0,0,130,160,33,16,224,0,8,0,224,3, - 0,0,0,0,232,255,189,39,20,0,191,175, - 16,0,176,175,12,0,128,20,33,128,160,0, - 9,50,192,12,16,0,4,36,33,32,64,0, - 3,0,128,20,0,0,0,0,119,89,192,8, - 33,16,0,0,0,0,2,146,0,0,0,0, - 108,89,192,8,1,0,66,52,0,0,2,146, - 0,0,0,0,254,0,66,48,0,0,130,160, - 4,0,2,142,0,0,0,0,4,0,130,172, - 8,0,2,142,0,0,0,0,8,0,130,172, - 12,0,2,142,0,0,0,0,12,0,130,172, - 33,16,128,0,20,0,191,143,16,0,176,143, - 8,0,224,3,24,0,189,39,0,0,0,0, - 0,0,130,144,0,0,0,0,128,0,66,48, - 17,0,64,20,31,0,3,36,8,0,131,140, - 12,0,130,140,0,0,0,0,43,16,98,0, - 6,0,64,16,1,0,98,36,8,0,130,172, - 0,0,98,144,0,0,0,0,145,89,192,8, - 31,0,67,48,0,0,130,144,0,0,0,0, - 128,0,66,52,0,0,130,160,31,0,3,36, - 0,0,130,144,0,0,0,0,128,0,66,48, - 4,0,64,16,1,0,2,36,0,0,162,172, - 196,89,192,8,33,16,0,0,255,0,99,48, - 31,0,2,36,6,0,98,16,33,16,96,0, - 196,89,192,8,0,0,0,0,1,0,2,36, - 195,89,192,8,0,0,162,172,33,48,0,0, - 0,0,130,144,0,0,0,0,128,0,66,48, - 16,0,64,20,255,0,3,36,8,0,131,140, - 12,0,130,140,0,0,0,0,43,16,98,0, - 5,0,64,16,1,0,98,36,8,0,130,172, - 0,0,99,144,183,89,192,8,0,0,0,0, - 0,0,130,144,0,0,0,0,128,0,66,52, - 0,0,130,160,255,0,3,36,0,0,130,144, - 0,0,0,0,128,0,66,48,228,255,64,20, - 128,0,98,48,4,0,64,16,127,0,98,48, - 37,16,194,0,163,89,192,8,192,49,2,0, - 255,0,98,48,37,48,70,0,255,255,194,48, - 8,0,224,3,0,0,0,0,0,0,130,144, - 0,0,0,0,128,0,66,48,16,0,64,20, - 255,0,6,36,8,0,131,140,12,0,130,140, - 0,0,0,0,43,16,98,0,5,0,64,16, - 1,0,98,36,8,0,130,172,0,0,102,144, - 218,89,192,8,0,0,0,0,0,0,130,144, - 0,0,0,0,128,0,66,52,0,0,130,160, - 255,0,6,36,0,0,130,144,0,0,0,0, - 128,0,66,48,13,0,64,20,1,0,2,36, - 255,0,195,48,128,0,2,36,4,0,98,20, - 2,0,2,36,0,0,162,172,17,90,192,8, - 255,255,2,52,128,0,194,48,6,0,64,20, - 33,24,0,0,17,90,192,8,255,0,194,48, - 0,0,162,172,17,90,192,8,33,16,0,0, - 127,0,194,48,32,0,64,16,255,255,71,36, - 0,26,3,0,0,0,130,144,0,0,0,0, - 128,0,66,48,16,0,64,20,255,255,102,48, - 8,0,131,140,12,0,130,140,0,0,0,0, - 43,16,98,0,6,0,64,16,1,0,98,36, - 8,0,130,172,0,0,98,144,0,0,0,0, - 7,90,192,8,37,24,194,0,0,0,130,144, - 0,0,0,0,128,0,66,52,0,0,130,160, - 255,0,195,52,0,0,130,144,0,0,0,0, - 128,0,66,48,224,255,64,20,1,0,2,36, - 33,16,224,0,255,0,66,48,226,255,64,20, - 255,255,231,36,255,255,98,48,8,0,224,3, - 0,0,0,0,216,255,189,39,36,0,191,175, - 32,0,180,175,28,0,179,175,24,0,178,175, - 20,0,177,175,16,0,176,175,33,152,128,0, - 33,128,192,0,33,144,160,0,255,255,81,50, - 33,0,32,18,33,160,224,0,255,255,2,52, - 30,0,34,18,0,0,0,0,9,50,192,12, - 33,32,32,2,33,24,64,0,29,0,96,16, - 1,0,2,36,0,0,2,166,4,0,3,174, - 8,0,3,174,12,0,18,166,33,32,96,2, - 8,0,5,142,0,0,0,0,218,88,192,12, - 33,48,32,2,33,24,64,0,255,255,100,48, - 10,0,145,20,1,0,2,36,12,0,2,150, - 0,0,0,0,35,16,67,0,12,0,2,166, - 8,0,2,142,0,0,0,0,33,16,130,0, - 68,90,192,8,8,0,2,174,68,90,192,8, - 0,0,130,174,0,0,0,166,4,0,0,174, - 8,0,0,174,12,0,0,166,36,0,191,143, - 32,0,180,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 40,0,189,39,224,255,189,39,28,0,191,175, - 24,0,178,175,20,0,177,175,16,0,176,175, - 33,144,128,0,33,136,160,0,33,128,192,0, - 124,89,192,12,33,40,0,2,33,32,64,2, - 198,89,192,12,33,40,0,2,33,40,64,0, - 0,0,2,142,0,0,0,0,7,0,64,20, - 33,32,64,2,255,255,165,48,33,48,32,2, - 19,90,192,12,33,56,0,2,104,90,192,8, - 0,0,0,0,0,0,32,166,4,0,32,174, - 8,0,32,174,12,0,32,166,28,0,191,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,216,255,189,39, - 36,0,191,175,32,0,180,175,28,0,179,175, - 24,0,178,175,20,0,177,175,16,0,176,175, - 33,152,128,0,33,136,160,0,33,144,192,0, - 56,0,176,147,0,0,0,0,196,88,192,12, - 33,160,224,0,224,0,66,48,7,0,80,20, - 33,32,96,2,124,89,192,12,33,40,64,2, - 255,255,66,48,255,255,131,50,7,0,67,16, - 33,32,96,2,0,0,66,142,0,0,0,0, - 16,0,64,20,4,0,2,36,152,90,192,8, - 0,0,66,174,198,89,192,12,33,40,64,2, - 33,40,64,0,0,0,66,142,0,0,0,0, - 7,0,64,20,33,32,96,2,255,255,165,48, - 33,48,32,2,19,90,192,12,33,56,64,2, - 156,90,192,8,0,0,0,0,0,0,32,166, - 4,0,32,174,8,0,32,174,12,0,32,166, - 36,0,191,143,32,0,180,143,28,0,179,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,40,0,189,39,33,56,0,0, - 255,255,168,36,255,255,165,48,50,0,160,16, - 1,0,9,36,1,0,12,36,4,0,10,36, - 3,0,11,36,255,255,5,52,0,0,130,144, - 0,0,0,0,128,0,66,48,16,0,64,20, - 255,0,3,36,8,0,131,140,12,0,130,140, - 0,0,0,0,43,16,98,0,5,0,64,16, - 1,0,98,36,8,0,130,172,0,0,99,144, - 193,90,192,8,0,0,0,0,0,0,130,144, - 0,0,0,0,128,0,66,52,0,0,130,160, - 255,0,3,36,0,0,130,144,0,0,0,0, - 128,0,66,48,3,0,64,16,0,0,0,0, - 218,90,192,8,0,0,204,172,11,0,32,17, - 255,255,2,49,5,0,74,20,33,72,0,0, - 3,0,96,16,0,0,0,0,218,90,192,8, - 0,0,203,172,128,0,98,48,3,0,64,16, - 0,18,7,0,255,255,7,36,0,18,7,0, - 37,56,67,0,33,16,0,1,255,255,66,48, - 212,255,64,20,33,64,5,1,8,0,224,3, - 33,16,224,0,224,255,189,39,24,0,191,175, - 20,0,177,175,16,0,176,175,33,128,128,0, - 124,89,192,12,33,136,160,0,33,32,0,2, - 198,89,192,12,33,40,32,2,33,32,0,2, - 255,255,69,48,164,90,192,12,33,48,32,2, - 24,0,191,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,216,255,189,39, - 32,0,191,175,28,0,179,175,24,0,178,175, - 20,0,177,175,16,0,176,175,33,144,128,0, - 33,136,160,0,33,152,192,0,196,88,192,12, - 33,128,224,0,224,0,66,48,255,0,16,50, - 7,0,80,20,33,32,64,2,124,89,192,12, - 33,40,32,2,255,255,66,48,255,255,99,50, - 8,0,67,16,33,32,64,2,0,0,34,142, - 0,0,0,0,2,0,64,20,4,0,2,36, - 0,0,34,174,17,91,192,8,33,16,0,0, - 198,89,192,12,33,40,32,2,33,32,64,2, - 255,255,69,48,164,90,192,12,33,48,32,2, - 32,0,191,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 40,0,189,39,208,255,189,39,44,0,191,175, - 40,0,180,175,36,0,179,175,32,0,178,175, - 28,0,177,175,24,0,176,175,33,128,128,0, - 33,152,192,0,33,160,224,0,0,0,96,174, - 4,0,96,174,8,0,3,142,4,0,2,142, - 0,0,0,0,35,24,98,0,255,255,165,48, - 35,0,160,24,33,144,0,0,1,0,6,36, - 0,0,2,146,0,0,0,0,128,0,66,48, - 16,0,64,20,255,0,4,36,8,0,4,142, - 12,0,2,142,0,0,0,0,43,16,130,0, - 5,0,64,16,1,0,130,36,8,0,2,174, - 0,0,132,144,64,91,192,8,0,0,0,0, - 0,0,2,146,0,0,0,0,128,0,66,52, - 0,0,2,162,255,0,4,36,0,0,2,146, - 0,0,0,0,128,0,66,48,3,0,64,16, - 128,0,130,48,150,91,192,8,0,0,134,174, - 2,0,64,20,0,0,0,0,1,0,82,38, - 255,255,165,36,224,255,160,28,0,0,0,0, - 33,32,0,2,33,40,96,0,251,88,192,12, - 33,48,0,0,68,0,64,18,1,0,81,38, - 9,50,192,12,128,32,17,0,33,40,64,0, - 63,0,160,16,0,0,0,0,0,0,113,174, - 4,0,101,174,59,0,64,26,33,56,0,0, - 1,0,8,36,33,48,0,0,0,0,2,146, - 0,0,0,0,128,0,66,48,16,0,64,20, - 255,0,4,36,8,0,3,142,12,0,2,142, - 0,0,0,0,43,16,98,0,5,0,64,16, - 1,0,98,36,8,0,2,174,0,0,100,144, - 114,91,192,8,0,0,0,0,0,0,2,146, - 0,0,0,0,128,0,66,52,0,0,2,162, - 255,0,4,36,0,0,2,146,0,0,0,0, - 128,0,66,48,3,0,64,16,192,49,6,0, - 150,91,192,8,0,0,136,174,127,0,130,48, - 37,48,194,0,128,0,130,48,225,255,64,20, - 0,0,0,0,18,0,224,20,40,0,194,44, - 4,0,64,16,80,0,194,44,0,0,160,172, - 145,91,192,8,4,0,165,36,5,0,64,16, - 216,255,194,36,0,0,168,172,4,0,165,36, - 146,91,192,8,0,0,162,172,2,0,2,36, - 0,0,162,172,4,0,165,36,176,255,194,36, - 146,91,192,8,0,0,162,172,0,0,166,172, - 1,0,231,36,42,16,242,0,200,255,64,20, - 4,0,165,36,44,0,191,143,40,0,180,143, - 36,0,179,143,32,0,178,143,28,0,177,143, - 24,0,176,143,8,0,224,3,48,0,189,39, - 224,255,189,39,28,0,191,175,24,0,178,175, - 20,0,177,175,16,0,176,175,33,136,128,0, - 33,144,160,0,33,128,192,0,124,89,192,12, - 33,40,0,2,33,32,32,2,198,89,192,12, - 33,40,0,2,33,40,64,0,0,0,2,142, - 0,0,0,0,5,0,64,20,33,32,32,2, - 255,255,165,48,33,48,64,2,24,91,192,12, - 33,56,0,2,28,0,191,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,216,255,189,39,36,0,191,175, - 32,0,180,175,28,0,179,175,24,0,178,175, - 20,0,177,175,16,0,176,175,33,144,128,0, - 33,160,160,0,33,136,192,0,56,0,176,147, - 0,0,0,0,196,88,192,12,33,152,224,0, - 224,0,66,48,7,0,80,20,33,32,64,2, - 124,89,192,12,33,40,32,2,255,255,66,48, - 255,255,99,50,7,0,67,16,33,32,64,2, - 0,0,34,142,0,0,0,0,14,0,64,20, - 4,0,2,36,226,91,192,8,0,0,34,174, - 198,89,192,12,33,40,32,2,33,40,64,0, - 0,0,34,142,0,0,0,0,5,0,64,20, - 33,32,64,2,255,255,165,48,33,48,128,2, - 24,91,192,12,33,56,32,2,36,0,191,143, - 32,0,180,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 40,0,189,39,0,0,0,0,0,0,0,0, - 216,255,189,39,32,0,191,175,28,0,179,175, - 24,0,178,175,20,0,177,175,16,0,176,175, - 33,152,128,0,8,0,99,142,4,0,98,142, - 0,0,0,0,35,128,98,0,255,255,4,50, - 19,0,128,16,33,136,160,0,9,50,192,12, - 0,0,0,0,33,144,64,0,3,0,64,22, - 255,255,16,50,17,92,192,8,255,255,2,36, - 33,32,64,2,4,0,101,142,0,0,0,0, - 80,68,192,12,33,48,0,2,1,0,2,36, - 0,0,34,166,4,0,50,174,33,128,80,2, - 15,92,192,8,8,0,48,174,0,0,32,166, - 4,0,32,174,8,0,32,174,12,0,32,166, - 33,16,0,0,32,0,191,143,28,0,179,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,40,0,189,39,232,255,189,39, - 20,0,191,175,16,0,176,175,33,128,128,0, - 0,0,2,150,0,0,0,0,1,0,66,48, - 7,0,64,16,0,0,0,0,4,0,4,142, - 0,0,0,0,3,0,128,16,0,0,0,0, - 61,50,192,12,0,0,0,0,0,0,0,166, - 8,0,0,174,4,0,0,174,12,0,0,166, - 20,0,191,143,16,0,176,143,8,0,224,3, - 24,0,189,39,224,255,189,39,24,0,191,175, - 20,0,177,175,16,0,176,175,33,128,128,0, - 8,0,163,140,4,0,162,140,0,0,0,0, - 35,136,98,0,255,255,35,50,12,0,2,150, - 0,0,0,0,43,16,67,0,4,0,64,16, - 255,255,38,50,12,0,17,150,0,0,0,0, - 255,255,38,50,6,0,192,16,255,255,34,50, - 8,0,4,142,4,0,165,140,80,68,192,12, - 0,0,0,0,255,255,34,50,8,0,3,142, - 0,0,0,0,33,16,67,0,8,0,2,174, - 12,0,2,150,0,0,0,0,35,16,81,0, - 12,0,2,166,24,0,191,143,20,0,177,143, - 16,0,176,143,8,0,224,3,32,0,189,39, - 1,0,2,36,23,0,194,16,2,0,194,40, - 5,0,64,16,2,0,2,36,7,0,192,16, - 255,255,2,36,132,92,192,8,0,0,0,0, - 23,0,194,16,255,255,2,36,132,92,192,8, - 0,0,0,0,255,255,162,48,4,0,131,140, - 0,0,0,0,33,48,67,0,8,0,130,140, - 0,0,0,0,35,16,67,0,12,0,131,148, - 0,0,0,0,33,16,67,0,124,92,192,8, - 35,40,69,0,255,255,162,48,8,0,131,140, - 0,0,0,0,33,48,67,0,12,0,130,148, - 0,0,0,0,124,92,192,8,35,40,69,0, - 12,0,130,148,8,0,131,140,0,0,0,0, - 33,48,67,0,255,255,162,48,35,48,194,0, - 4,0,130,140,0,0,0,0,43,16,194,0, - 4,0,64,20,255,255,2,36,8,0,134,172, - 12,0,133,164,33,16,0,0,8,0,224,3, - 0,0,0,0,216,255,189,39,32,0,191,175, - 28,0,179,175,24,0,178,175,20,0,177,175, - 16,0,176,175,33,128,128,0,33,152,160,0, - 8,0,3,142,4,0,2,142,0,0,0,0, - 35,144,98,0,255,255,66,50,12,0,3,150, - 0,0,0,0,33,16,67,0,255,255,99,50, - 42,16,67,0,35,0,64,16,1,0,2,36, - 0,0,3,150,0,0,0,0,32,0,98,20, - 255,255,2,36,9,50,192,12,255,255,100,50, - 33,136,64,0,3,0,32,22,255,255,70,50, - 189,92,192,8,255,255,2,36,5,0,192,16, - 0,0,0,0,4,0,5,142,0,0,0,0, - 80,68,192,12,33,32,32,2,0,0,2,150, - 0,0,0,0,1,0,66,48,7,0,64,16, - 0,0,0,0,4,0,4,142,0,0,0,0, - 3,0,128,16,0,0,0,0,61,50,192,12, - 0,0,0,0,4,0,17,174,255,255,66,50, - 33,16,34,2,8,0,2,174,35,16,114,2, - 12,0,2,166,33,16,0,0,32,0,191,143, - 28,0,179,143,24,0,178,143,20,0,177,143, - 16,0,176,143,8,0,224,3,40,0,189,39, - 216,255,189,39,32,0,191,175,28,0,179,175, - 24,0,178,175,20,0,177,175,16,0,176,175, - 33,128,128,0,33,136,192,0,255,255,36,50, - 35,0,128,16,33,152,160,0,8,0,2,142, - 4,0,3,142,0,0,0,0,35,16,67,0, - 255,255,66,48,12,0,3,150,0,0,0,0, - 33,16,67,0,42,16,68,0,20,0,64,16, - 0,0,0,0,9,50,192,12,0,0,0,0, - 33,144,64,0,24,0,64,18,255,255,2,36, - 0,0,2,150,0,0,0,0,1,0,66,48, - 8,0,64,16,1,0,2,36,4,0,4,142, - 0,0,0,0,4,0,128,16,0,0,0,0, - 61,50,192,12,0,0,0,0,1,0,2,36, - 0,0,2,166,4,0,18,174,4,0,4,142, - 33,40,96,2,80,68,192,12,255,255,38,50, - 33,32,0,2,255,255,37,50,85,92,192,12, - 33,48,0,0,33,16,0,0,32,0,191,143, - 28,0,179,143,24,0,178,143,20,0,177,143, - 16,0,176,143,8,0,224,3,40,0,189,39, - 0,0,0,0,0,0,0,0,0,0,0,0, - 184,255,189,39,64,0,191,175,60,0,183,175, - 56,0,182,175,52,0,181,175,48,0,180,175, - 44,0,179,175,40,0,178,175,36,0,177,175, - 32,0,176,175,33,144,128,0,112,0,84,142, - 0,0,0,0,92,0,128,18,1,0,2,36, - 108,0,81,142,0,0,0,0,88,0,32,18, - 0,0,0,0,96,0,87,38,136,0,66,174, - 140,0,64,174,96,0,64,174,100,0,64,174, - 224,83,192,12,33,32,32,2,33,168,64,0, - 25,0,160,18,33,128,160,2,108,0,66,142, - 0,0,0,0,124,0,66,174,128,0,84,174, - 108,0,81,174,112,0,85,174,29,0,32,26, - 33,152,0,0,255,255,22,36,33,32,0,2, - 8,0,133,38,33,48,64,2,170,72,192,12, - 1,0,7,36,10,0,86,16,33,32,64,2, - 14,0,64,20,0,0,0,0,64,0,66,142, - 0,0,0,0,10,0,64,20,2,0,5,36, - 56,93,192,8,1,0,102,38,33,32,64,2, - 5,0,5,36,33,48,0,0,140,84,192,12, - 0,0,0,0,107,93,192,8,1,0,2,36, - 1,0,115,38,68,0,16,38,42,16,113,2, - 230,255,64,20,68,0,148,38,40,0,32,18, - 33,128,160,2,17,0,2,146,0,0,0,0, - 34,0,66,48,32,0,64,20,0,0,0,0, - 36,0,2,142,16,0,176,175,16,0,66,140, - 24,0,4,142,28,0,5,142,32,0,6,142, - 0,0,0,0,9,248,64,0,33,56,64,2, - 17,0,2,146,0,0,0,0,32,0,66,52, - 17,0,2,162,0,0,226,142,0,0,0,0, - 15,0,64,16,0,0,0,0,255,255,49,38, - 15,0,32,18,68,0,16,38,17,0,3,146, - 0,0,0,0,32,0,98,48,2,0,64,20, - 34,0,98,52,17,0,2,162,255,255,49,38, - 248,255,32,22,68,0,16,38,107,93,192,8, - 33,16,0,0,255,255,49,38,218,255,32,22, - 68,0,16,38,33,16,0,0,64,0,191,143, - 60,0,183,143,56,0,182,143,52,0,181,143, - 48,0,180,143,44,0,179,143,40,0,178,143, - 36,0,177,143,32,0,176,143,8,0,224,3, - 72,0,189,39,168,255,189,39,80,0,191,175, - 76,0,183,175,72,0,182,175,68,0,181,175, - 64,0,180,175,60,0,179,175,56,0,178,175, - 52,0,177,175,48,0,176,175,33,144,128,0, - 96,0,66,142,0,0,0,0,3,0,64,16, - 33,32,0,0,240,93,192,8,255,255,2,36, - 116,0,66,142,0,0,0,0,7,0,64,16, - 104,0,67,38,12,0,99,140,0,0,0,0, - 12,0,98,140,0,0,0,0,251,255,64,20, - 0,0,0,0,8,0,116,140,0,0,0,0, - 92,0,128,18,33,16,0,0,4,0,115,140, - 0,0,0,0,88,0,96,18,0,0,0,0, - 64,0,66,142,0,0,0,0,55,0,64,20, - 0,0,0,0,33,128,128,2,52,0,96,26, - 33,136,0,0,255,255,23,36,2,0,22,36, - 5,0,21,36,17,0,2,146,0,0,0,0, - 16,0,66,48,40,0,64,16,24,0,165,39, - 8,0,3,142,28,0,2,142,0,0,0,0, - 35,24,98,0,24,0,163,175,12,0,2,142, - 0,0,0,0,28,0,162,175,128,24,3,0, - 33,24,98,0,252,255,98,140,0,0,0,0, - 1,0,66,36,252,255,98,172,12,0,0,174, - 8,0,0,174,33,32,0,2,33,48,64,2, - 170,72,192,12,1,0,7,36,6,0,87,16, - 0,0,0,0,9,0,64,20,1,0,34,38, - 96,0,86,174,196,93,192,8,100,0,66,174, - 96,0,85,174,110,86,192,12,24,0,164,39, - 240,93,192,8,255,255,2,36,110,86,192,12, - 24,0,164,39,17,0,2,146,0,0,0,0, - 12,0,66,48,17,0,2,162,1,0,4,36, - 1,0,49,38,42,16,51,2,209,255,64,20, - 68,0,16,38,27,0,128,16,33,128,128,2, - 23,0,96,26,33,136,0,0,17,0,2,146, - 0,0,0,0,34,0,66,48,14,0,64,20, - 0,0,0,0,36,0,2,142,16,0,176,175, - 16,0,66,140,24,0,4,142,28,0,5,142, - 32,0,6,142,0,0,0,0,9,248,64,0, - 33,56,64,2,17,0,2,146,0,0,0,0, - 32,0,66,52,17,0,2,162,1,0,49,38, - 42,16,51,2,235,255,64,20,68,0,16,38, - 240,93,192,8,1,0,2,36,33,16,0,0, - 80,0,191,143,76,0,183,143,72,0,182,143, - 68,0,181,143,64,0,180,143,60,0,179,143, - 56,0,178,143,52,0,177,143,48,0,176,143, - 8,0,224,3,88,0,189,39,0,0,0,0, - 200,255,189,39,48,0,191,175,44,0,179,175, - 40,0,178,175,36,0,177,175,32,0,176,175, - 33,144,128,0,96,0,64,174,100,0,64,174, - 112,0,80,142,0,0,0,0,105,0,0,18, - 33,16,0,0,108,0,81,142,0,0,0,0, - 101,0,32,18,0,0,0,0,64,0,66,142, - 0,0,0,0,43,0,64,20,33,32,64,2, - 50,0,32,26,33,152,0,0,33,32,0,2, - 33,40,64,2,96,72,192,12,1,0,6,36, - 13,0,64,20,33,32,64,2,20,0,2,150, - 0,0,0,0,1,0,66,48,34,0,64,16, - 2,0,5,36,36,0,2,142,0,0,0,0, - 3,0,66,144,0,0,0,0,2,0,66,48, - 3,0,64,20,0,0,0,0,63,94,192,8, - 2,0,5,36,36,0,2,142,0,0,0,0, - 32,0,66,140,4,0,67,142,0,0,0,0, - 36,16,67,0,16,0,64,16,33,32,64,2, - 36,0,2,142,16,0,3,146,2,0,66,144, - 0,0,0,0,11,0,98,20,3,0,5,36, - 1,0,115,38,42,16,113,2,219,255,64,20, - 68,0,16,38,69,94,192,8,96,0,83,38, - 5,0,5,36,64,94,192,8,33,48,0,0, - 2,0,5,36,1,0,102,38,140,84,192,12, - 0,0,0,0,113,94,192,8,1,0,2,36, - 96,0,83,38,112,0,80,142,0,0,0,0, - 41,0,32,18,33,16,0,0,17,0,2,146, - 0,0,0,0,17,0,66,48,32,0,64,20, - 0,0,0,0,36,0,2,142,16,0,176,175, - 4,0,66,140,24,0,4,142,28,0,5,142, - 32,0,6,142,0,0,0,0,9,248,64,0, - 33,56,64,2,17,0,2,146,0,0,0,0, - 16,0,66,52,17,0,2,162,0,0,98,142, - 0,0,0,0,15,0,64,16,0,0,0,0, - 255,255,49,38,15,0,32,18,68,0,16,38, - 17,0,3,146,0,0,0,0,16,0,98,48, - 2,0,64,20,17,0,98,52,17,0,2,162, - 255,255,49,38,248,255,32,22,68,0,16,38, - 113,94,192,8,33,16,0,0,255,255,49,38, - 218,255,32,22,68,0,16,38,33,16,0,0, - 48,0,191,143,44,0,179,143,40,0,178,143, - 36,0,177,143,32,0,176,143,8,0,224,3, - 56,0,189,39,192,255,189,39,56,0,191,175, - 52,0,183,175,48,0,182,175,44,0,181,175, - 40,0,180,175,36,0,179,175,32,0,178,175, - 28,0,177,175,24,0,176,175,33,144,128,0, - 112,0,84,142,0,0,0,0,171,0,128,18, - 33,16,0,0,108,0,85,142,0,0,0,0, - 166,0,160,18,32,0,2,36,56,0,67,146, - 0,0,0,0,75,0,98,16,96,0,83,38, - 33,0,98,40,5,0,64,16,64,0,2,36, - 9,0,96,16,33,16,0,0,49,95,192,8, - 0,0,0,0,85,0,98,16,128,0,2,36, - 137,0,98,16,33,16,0,0,49,95,192,8, - 0,0,0,0,33,136,160,2,8,0,32,18, - 33,128,128,2,17,0,2,146,0,0,0,0, - 1,0,66,48,139,0,64,16,255,255,49,38, - 250,255,32,22,68,0,16,38,0,0,98,142, - 0,0,0,0,136,0,64,20,33,16,0,0, - 33,136,160,2,43,0,32,18,33,128,128,2, - 14,0,22,36,64,0,23,36,17,0,2,146, - 0,0,0,0,34,0,66,48,33,0,64,20, - 0,0,0,0,36,0,2,142,16,0,176,175, - 12,0,66,140,24,0,4,142,28,0,5,142, - 32,0,6,142,0,0,0,0,9,248,64,0, - 33,56,64,2,17,0,2,146,0,0,0,0, - 32,0,66,52,17,0,2,162,0,0,98,142, - 0,0,0,0,16,0,64,16,0,0,0,0, - 255,255,49,38,10,0,32,18,68,0,16,38, - 17,0,3,146,0,0,0,0,32,0,98,48, - 2,0,64,20,192,0,98,52,17,0,2,162, - 255,255,49,38,248,255,32,22,68,0,16,38, - 0,0,118,174,236,94,192,8,56,0,87,162, - 255,255,49,38,217,255,32,22,68,0,16,38, - 32,0,2,36,56,0,66,162,0,0,98,142, - 0,0,0,0,13,0,64,20,64,0,2,36, - 33,136,160,2,81,0,32,18,33,128,128,2, - 17,0,2,146,0,0,0,0,2,0,66,48, - 74,0,64,16,255,255,49,38,250,255,32,22, - 68,0,16,38,49,95,192,8,33,16,0,0, - 56,0,66,162,14,0,2,36,0,0,98,174, - 33,136,160,2,53,0,32,18,33,128,128,2, - 2,0,23,36,15,0,22,36,17,0,2,146, - 0,0,0,0,194,0,66,48,5,0,64,16, - 0,0,0,0,19,0,87,16,0,0,0,0, - 32,95,192,8,255,255,49,38,64,0,2,142, - 0,0,0,0,34,0,64,16,0,0,0,0, - 16,0,176,175,64,0,2,142,24,0,4,142, - 28,0,5,142,32,0,6,142,0,0,0,0, - 9,248,64,0,33,56,64,2,17,0,2,146, - 0,0,0,0,30,95,192,8,64,0,66,52, - 64,0,2,142,0,0,0,0,13,0,64,16, - 0,0,0,0,16,0,176,175,64,0,2,142, - 24,0,4,142,28,0,5,142,32,0,6,142, - 0,0,0,0,9,248,64,0,33,56,64,2, - 17,0,2,146,0,0,0,0,30,95,192,8, - 64,0,66,52,0,0,118,174,17,0,2,146, - 0,0,0,0,128,0,66,52,17,0,2,162, - 255,255,49,38,208,255,32,22,68,0,16,38, - 33,136,160,2,12,0,32,18,33,128,128,2, - 17,0,2,146,0,0,0,0,128,0,66,48, - 5,0,64,16,255,255,49,38,250,255,32,22, - 68,0,16,38,49,95,192,8,33,16,0,0, - 49,95,192,8,1,0,2,36,33,16,0,0, - 56,0,191,143,52,0,183,143,48,0,182,143, - 44,0,181,143,40,0,180,143,36,0,179,143, - 32,0,178,143,28,0,177,143,24,0,176,143, - 8,0,224,3,64,0,189,39,184,255,189,39, - 64,0,191,175,60,0,183,175,56,0,182,175, - 52,0,181,175,48,0,180,175,44,0,179,175, - 40,0,178,175,36,0,177,175,32,0,176,175, - 33,160,128,0,112,0,147,142,0,0,0,0, - 129,0,96,18,33,16,0,0,108,0,146,142, - 0,0,0,0,125,0,64,18,96,0,151,38, - 96,0,128,174,100,0,128,174,224,83,192,12, - 33,32,64,2,33,168,64,0,5,0,160,22, - 33,136,64,2,33,32,128,2,5,0,5,36, - 123,95,192,8,33,48,0,0,36,0,64,18, - 33,128,160,2,5,0,22,36,16,0,22,162, - 8,0,100,142,12,0,101,142,0,0,0,0, - 80,86,192,12,8,0,6,38,5,0,64,20, - 0,0,0,0,255,255,49,38,68,0,115,38, - 245,255,32,22,68,0,16,38,21,0,32,18, - 42,16,50,2,7,0,64,16,33,128,160,2, - 59,84,192,12,33,32,0,2,1,0,49,38, - 42,16,50,2,251,255,64,20,68,0,16,38, - 61,50,192,12,33,32,160,2,33,32,128,2, - 5,0,5,36,123,95,192,8,33,48,0,0, - 2,0,5,36,1,0,38,38,140,84,192,12, - 0,0,0,0,203,95,192,8,1,0,2,36, - 124,0,146,174,112,0,130,142,0,0,0,0, - 128,0,130,174,112,0,149,174,33,128,160,2, - 27,0,64,26,33,136,0,0,33,32,0,2, - 33,40,128,2,96,72,192,12,1,0,6,36, - 13,0,64,20,0,0,0,0,20,0,2,150, - 0,0,0,0,1,0,66,48,8,0,64,16, - 0,0,0,0,36,0,2,142,0,0,0,0, - 3,0,66,144,0,0,0,0,1,0,66,48, - 5,0,64,20,0,0,0,0,64,0,130,142, - 0,0,0,0,221,255,64,16,33,32,128,2, - 1,0,49,38,42,16,50,2,231,255,64,20, - 68,0,16,38,40,0,64,18,33,128,160,2, - 17,0,2,146,0,0,0,0,34,0,66,48, - 32,0,64,20,0,0,0,0,36,0,2,142, - 16,0,176,175,8,0,66,140,24,0,4,142, - 28,0,5,142,32,0,6,142,0,0,0,0, - 9,248,64,0,33,56,128,2,17,0,2,146, - 0,0,0,0,32,0,66,52,17,0,2,162, - 0,0,226,142,0,0,0,0,15,0,64,16, - 0,0,0,0,255,255,82,38,15,0,64,18, - 68,0,16,38,17,0,3,146,0,0,0,0, - 32,0,98,48,2,0,64,20,34,0,98,52, - 17,0,2,162,255,255,82,38,248,255,64,22, - 68,0,16,38,203,95,192,8,33,16,0,0, - 255,255,82,38,218,255,64,22,68,0,16,38, - 33,16,0,0,64,0,191,143,60,0,183,143, - 56,0,182,143,52,0,181,143,48,0,180,143, - 44,0,179,143,40,0,178,143,36,0,177,143, - 32,0,176,143,8,0,224,3,72,0,189,39, - 0,0,0,0,0,0,0,0,37,115,58,37, - 100,58,32,102,97,105,108,101,100,32,97,115, - 115,101,114,116,105,111,110,32,96,37,115,39, - 10,0,0,0,114,97,109,116,101,115,116,100, - 119,40,98,99,46,98,99,95,104,101,97,112, - 115,116,97,114,116,44,32,108,101,110,44,32, - 49,44,32,48,44,32,48,41,32,61,61,32, - 48,0,0,0,98,99,46,98,99,95,104,101, - 97,112,101,110,100,32,60,61,32,98,99,46, - 98,99,95,114,97,109,101,110,100,0,0,0, - 35,32,112,111,114,116,115,58,32,37,100,10, - 0,0,0,0,42,42,42,80,114,111,102,105, - 108,105,110,103,32,64,32,37,120,44,32,37, - 120,10,0,0,103,111,116,32,104,101,114,101, - 32,99,97,117,115,101,61,37,120,32,115,116, - 97,116,117,115,61,37,120,32,118,101,99,61, - 37,120,10,0,37,115,58,37,100,58,32,102, - 97,105,108,101,100,32,97,115,115,101,114,116, - 105,111,110,32,96,37,115,39,10,0,0,0, - 83,101,99,111,110,100,115,32,60,32,48,120, - 55,70,70,70,102,102,102,102,0,0,0,0, - 84,105,109,101,114,115,85,115,101,100,32,60, - 32,78,84,73,77,69,82,83,0,0,0,0, - 0,0,0,0,69,69,80,82,79,77,32,105, - 115,32,98,97,100,10,0,0,80,111,114,116, - 32,37,100,32,101,116,104,101,114,32,97,100, - 100,114,101,115,115,58,32,37,48,50,88,58, - 37,48,50,88,58,37,48,50,88,58,37,48, - 50,88,58,37,48,50,88,58,37,48,50,88, - 10,0,0,0,35,35,35,32,56,50,53,57, - 54,32,67,104,97,110,32,37,100,58,32,115, - 101,108,102,116,101,115,116,32,102,97,105,108, - 101,100,32,40,37,120,41,10,0,0,0,0, - 42,42,42,32,56,50,53,57,54,32,80,111, - 114,116,32,37,100,58,32,115,101,108,102,116, - 101,115,116,32,112,97,115,115,101,100,10,0, - 56,50,53,57,54,32,80,111,114,116,32,37, - 100,58,32,100,117,109,112,32,102,97,105,108, - 101,100,32,40,37,120,41,10,0,0,0,0, - 42,42,42,32,56,50,53,57,54,32,80,111, - 114,116,32,37,100,58,32,100,117,109,112,32, - 112,97,115,115,101,100,10,0,35,35,35,32, - 56,50,53,57,54,32,80,111,114,116,32,37, - 100,58,32,83,67,80,32,102,101,116,99,104, - 32,102,97,105,108,101,100,10,0,0,0,0, - 42,42,42,32,56,50,53,57,54,32,80,111, - 114,116,32,37,100,58,32,83,67,80,32,102, - 101,116,99,104,32,112,97,115,115,101,100,32, - 37,120,32,10,0,0,0,0,35,35,35,32, - 56,50,53,57,54,32,80,111,114,116,32,37, - 100,58,32,66,85,83,84,73,77,69,82,83, - 32,108,111,97,100,32,102,97,105,108,101,100, - 10,0,0,0,42,42,42,32,56,50,53,57, - 54,32,80,111,114,116,32,37,100,58,32,66, - 85,83,84,73,77,69,82,83,32,108,111,97, - 100,32,112,97,115,115,101,100,10,0,0,0, - 35,35,35,32,65,67,75,32,100,105,100,32, - 110,111,116,32,111,99,99,117,114,10,0,0, - 35,35,35,32,115,116,97,116,117,115,32,115, - 116,105,108,108,32,98,117,115,121,58,32,37, - 120,10,0,0,101,116,104,95,105,110,105,116, - 46,99,0,0,42,42,42,76,49,87,65,10, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,35,35,35,32,84,66,68,32, - 98,108,111,99,107,115,32,97,114,101,110,39, - 116,32,98,101,105,110,103,32,102,114,101,101, - 100,10,0,0,65,116,116,101,109,112,116,32, - 116,111,32,102,114,101,101,32,98,111,103,117, - 115,32,84,66,68,32,37,120,10,0,0,0, - 35,35,35,32,66,85,70,32,98,108,111,99, - 107,115,32,97,114,101,110,39,116,32,98,101, - 105,110,103,32,102,114,101,101,100,10,0,0, - 65,116,116,101,109,112,116,32,116,111,32,102, - 114,101,101,32,98,111,103,117,115,32,66,85, - 70,32,37,120,10,0,0,0,0,0,0,0, - 0,0,0,0,82,70,68,115,32,37,100,32, - 10,0,0,0,82,66,68,115,32,37,100,32, - 10,0,0,0,37,115,58,37,100,58,32,102, - 97,105,108,101,100,32,97,115,115,101,114,116, - 105,111,110,32,96,37,115,39,10,0,0,0, - 101,116,104,95,114,99,118,46,99,0,0,0, - 100,115,116,99,104,97,110,32,62,61,32,49, - 32,38,38,32,100,115,116,99,104,97,110,32, - 60,32,78,99,104,97,110,0,37,115,37,48, - 50,88,58,37,48,50,88,58,37,48,50,88, - 58,37,48,50,88,58,37,48,50,88,58,37, - 48,50,88,37,115,0,0,0,176,72,0,131, - 80,67,0,131,164,67,0,131,24,68,0,131, - 80,67,0,131,0,0,0,0,4,82,0,131, - 184,76,0,131,12,77,0,131,128,77,0,131, - 184,76,0,131,0,0,0,0,0,0,0,0, - 0,0,0,0,37,115,58,37,100,58,32,102, - 97,105,108,101,100,32,97,115,115,101,114,116, - 105,111,110,32,96,37,115,39,10,0,0,0, - 101,116,104,95,120,109,105,116,46,99,0,0, - 99,98,112,45,62,110,111,112,46,99,109,100, - 32,61,61,32,73,53,57,54,95,67,66,95, - 67,77,68,95,78,79,80,124,73,53,57,54, - 95,67,66,95,67,77,68,95,69,76,0,0, - 99,98,112,45,62,110,111,112,46,99,109,100, - 32,38,32,73,53,57,54,95,67,66,95,67, - 77,68,0,0,112,45,62,115,99,98,112,45, - 62,115,116,97,116,117,115,32,38,32,73,53, - 57,54,95,83,67,66,95,67,78,65,0,0, - 35,35,35,32,99,109,100,32,115,116,105,108, - 108,32,98,117,115,121,58,32,37,120,10,0, - 37,100,61,37,100,44,37,120,44,37,100,10, - 0,0,0,0,39,37,115,39,32,37,120,32, - 37,120,10,0,37,48,56,120,58,32,37,48, - 50,120,10,0,37,48,56,120,58,32,37,48, - 52,120,10,0,37,48,56,120,58,32,37,48, - 56,120,10,0,108,105,110,107,32,115,116,97, - 116,101,32,37,48,50,120,10,0,0,0,0, - 42,42,42,32,103,111,116,32,37,100,32,105, - 110,116,101,114,114,117,112,116,115,10,0,0, - 35,35,35,32,69,120,112,101,99,116,101,100, - 32,37,100,32,98,117,116,32,103,111,116,32, - 37,100,32,105,110,116,101,114,114,117,112,116, - 115,10,0,0,80,76,88,57,48,54,48,32, - 65,100,100,114,101,115,115,32,61,32,37,88, - 32,68,65,84,65,32,61,32,37,88,32,10, - 0,0,0,0,42,42,42,32,87,114,105,116, - 101,32,111,102,32,37,120,32,116,111,32,37, - 100,32,102,97,105,108,101,100,10,0,0,0, - 42,42,42,32,87,114,105,116,101,32,111,102, - 32,37,120,32,116,111,32,37,120,32,102,97, - 105,108,101,100,10,0,0,0,42,42,42,42, - 42,42,42,42,42,42,42,42,42,42,42,42, - 0,0,0,0,42,42,42,32,73,108,108,101, - 103,97,108,32,99,111,109,109,97,110,100,32, - 39,37,115,39,10,0,0,0,45,45,45,32, - 99,111,109,109,97,110,100,115,32,109,44,116, - 44,101,44,69,44,97,44,120,44,108,44,115, - 44,112,32,99,97,110,32,98,101,32,112,114, - 101,102,105,120,101,100,32,119,105,116,104,32, - 97,32,114,101,112,101,97,116,32,99,111,117, - 110,116,0,0,108,32,120,112,111,114,116,32, - 114,112,111,114,116,32,91,108,101,110,93,32, - 32,32,32,32,32,32,32,32,32,76,111,111, - 112,98,97,99,107,32,116,101,115,116,32,102, - 114,111,109,32,120,112,111,114,116,32,40,49, - 45,54,41,32,116,111,32,114,112,111,114,116, - 32,40,49,45,54,41,0,0,105,32,32,32, - 32,32,32,32,32,32,73,110,116,101,114,114, - 117,112,116,32,72,111,115,116,32,32,124,32, - 32,115,32,91,112,111,114,116,93,32,91,108, - 101,110,93,32,66,97,99,107,50,98,97,99, - 107,32,120,109,105,116,32,99,110,116,32,112, - 97,99,107,101,116,115,0,0,80,32,32,32, - 32,32,32,32,32,32,84,101,115,116,32,80, - 76,88,32,57,48,54,48,32,32,32,124,32, - 32,100,32,91,114,124,119,124,108,124,116,93, - 32,91,118,97,108,124,39,99,39,93,32,32, - 82,101,97,100,47,87,114,105,116,101,47,76, - 111,111,112,47,84,105,109,101,32,68,77,65, - 0,0,0,0,76,32,32,32,32,32,32,32, - 32,32,82,101,97,100,32,76,105,110,107,32, - 76,69,68,115,32,32,124,32,32,112,32,114, - 101,103,110,111,32,91,118,97,108,93,32,32, - 82,101,97,100,47,91,119,114,105,116,101,93, - 32,80,76,88,32,114,101,103,105,115,116,101, - 114,0,0,0,65,32,97,100,100,114,32,32, - 32,32,83,101,116,32,101,116,104,101,114,32, - 97,100,100,114,32,32,124,32,32,36,32,115, - 99,114,105,112,116,32,32,32,32,32,32,32, - 82,101,97,100,32,99,109,100,115,32,102,114, - 111,109,32,102,105,108,101,32,39,115,99,114, - 105,112,116,39,0,0,0,0,120,32,91,112, - 111,114,116,93,32,32,84,120,32,101,116,104, - 101,114,32,32,32,32,32,32,32,32,124,32, - 32,82,32,91,112,111,114,116,93,32,32,32, - 32,32,32,32,82,120,32,101,116,104,101,114, - 32,91,111,110,32,112,111,114,116,32,49,45, - 54,93,0,0,72,32,32,32,32,32,32,32, - 32,32,84,111,103,103,108,101,32,70,67,67, - 32,116,101,115,116,32,124,32,32,113,44,94, - 68,44,94,90,32,32,32,32,32,32,32,32, - 81,117,105,116,0,0,0,0,97,32,32,32, - 32,32,32,32,32,32,84,101,115,116,32,97, - 108,108,32,32,32,32,32,32,32,32,124,32, - 32,90,32,109,115,101,99,115,32,32,32,32, - 32,32,32,32,80,97,117,115,101,32,102,111, - 114,32,97,32,119,104,105,108,101,0,0,0, - 69,32,32,32,32,32,32,32,32,32,84,101, - 115,116,32,69,69,80,82,79,77,32,32,32, - 32,32,124,32,32,83,114,32,97,100,100,114, - 59,32,83,119,32,97,100,100,114,32,118,97, - 108,59,32,32,82,101,97,100,47,87,114,105, - 116,101,32,80,76,88,32,69,50,32,114,101, - 103,0,0,0,101,32,91,112,111,114,116,93, - 32,32,84,101,115,116,32,101,116,104,101,114, - 110,101,116,32,32,32,124,32,32,69,114,32, - 97,100,100,114,59,32,69,119,32,97,100,100, - 114,32,118,97,108,59,32,32,82,101,97,100, - 47,87,114,105,116,101,32,69,69,80,82,79, - 77,32,114,101,103,0,0,0,116,32,32,32, - 32,32,32,32,32,32,84,101,115,116,32,116, - 105,109,101,114,115,32,32,32,32,32,124,32, - 32,119,91,42,93,32,97,100,100,114,32,118, - 97,108,32,32,87,114,105,116,101,32,109,101, - 109,111,114,121,58,32,119,98,32,119,104,44, - 32,119,119,44,32,119,116,0,109,32,32,32, - 32,32,32,32,32,32,84,101,115,116,32,109, - 101,109,111,114,121,32,32,32,32,32,124,32, - 32,114,91,42,93,32,97,100,100,114,32,32, - 32,32,32,32,82,101,97,100,32,109,101,109, - 111,114,121,58,32,114,98,32,114,104,44,32, - 114,119,44,32,114,116,0,0,42,42,42,32, - 82,105,103,104,116,83,119,105,116,99,104,32, - 68,105,97,103,110,111,115,116,105,99,115,32, - 109,101,110,117,32,42,42,42,0,0,0,0, - 45,45,45,32,84,104,114,101,101,32,99,111, - 112,105,101,115,32,111,102,32,97,100,100,114, - 101,115,115,32,100,111,32,110,111,116,32,97, - 103,114,101,101,33,10,0,0,45,45,45,32, - 69,116,104,101,114,32,65,100,100,114,101,115, - 115,32,78,111,116,32,83,101,116,33,10,0, - 45,45,45,32,69,116,104,101,114,32,65,100, - 100,114,101,115,115,32,105,115,32,97,32,109, - 117,108,116,105,99,97,115,116,32,97,100,100, - 114,101,115,115,33,10,0,0,42,42,42,32, - 37,48,50,88,37,48,50,88,37,48,50,88, - 58,37,48,50,88,58,37,48,50,88,37,48, - 50,88,10,0,45,45,45,32,70,105,114,115, - 116,32,98,121,116,101,32,40,37,48,50,88, - 41,32,105,115,32,97,32,98,114,111,97,100, - 99,97,115,116,32,97,100,100,114,101,115,115, - 10,0,0,0,45,45,45,32,76,97,115,116, - 32,100,105,103,105,116,32,109,117,115,116,32, - 98,101,32,48,32,111,114,32,56,10,0,0, - 42,42,42,32,69,105,103,104,116,32,101,116, - 104,101,114,110,101,116,32,97,100,100,114,101, - 115,115,101,115,32,104,97,118,101,32,98,101, - 101,110,32,117,115,101,100,58,10,0,0,0, - 42,42,42,32,80,111,114,116,32,37,100,32, - 101,116,104,101,114,110,101,116,32,97,100,100, - 114,101,115,115,32,105,115,32,0,0,0,0, - 45,45,45,32,66,97,100,32,101,116,104,101, - 114,32,97,100,100,114,101,115,115,32,39,37, - 115,39,32,115,112,101,99,105,102,105,101,100, - 10,0,0,0,0,0,0,0,244,101,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,244,101,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,64,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,28,111,0,131,104,112,0,131, - 104,112,0,131,132,111,0,131,152,109,0,131, - 104,112,0,131,104,112,0,131,244,101,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 24,107,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,76,108,0,131,104,112,0,131, - 228,108,0,131,112,110,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 28,109,0,131,104,112,0,131,48,111,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 84,109,0,131,104,112,0,131,104,112,0,131, - 32,112,0,131,136,106,0,131,28,106,0,131, - 104,112,0,131,104,112,0,131,52,107,0,131, - 104,112,0,131,104,112,0,131,252,106,0,131, - 88,111,0,131,104,112,0,131,104,112,0,131, - 196,107,0,131,104,112,0,131,180,104,0,131, - 168,106,0,131,92,106,0,131,104,112,0,131, - 104,112,0,131,148,105,0,131,192,106,0,131, - 0,0,0,0,188,111,0,131,204,111,0,131, - 12,112,0,131,12,112,0,131,12,112,0,131, - 12,112,0,131,12,112,0,131,12,112,0,131, - 12,112,0,131,12,112,0,131,12,112,0,131, - 12,112,0,131,252,111,0,131,12,112,0,131, - 12,112,0,131,12,112,0,131,12,112,0,131, - 236,111,0,131,12,112,0,131,12,112,0,131, - 12,112,0,131,12,112,0,131,220,111,0,131, - 252,111,0,131,0,0,0,0,0,0,0,0, - 42,42,42,32,69,69,80,82,79,77,32,80, - 97,115,115,101,100,10,0,0,33,33,33,32, - 69,69,80,82,79,77,32,70,97,105,108,117, - 114,101,58,32,87,114,111,116,101,32,37,48, - 52,120,32,97,116,32,37,100,44,32,103,111, - 116,32,37,48,52,120,10,0,0,0,0,0, - 0,0,0,0,0,0,0,0,35,35,35,70, - 114,97,109,101,32,37,100,32,100,105,100,32, - 110,111,116,32,97,114,114,105,118,101,10,0, - 35,35,35,32,70,114,97,109,101,32,37,100, - 44,32,108,101,110,32,37,100,44,32,98,121, - 116,101,32,37,100,44,32,119,97,110,116,32, - 37,120,32,103,111,116,32,37,120,10,0,0, - 35,35,35,70,114,97,109,101,32,37,100,32, - 119,114,111,110,103,32,108,101,110,103,116,104, - 32,40,119,97,110,116,32,37,100,32,103,111, - 116,32,37,100,41,10,0,0,35,35,35,70, - 114,97,109,101,32,37,100,58,32,103,111,116, - 32,115,101,113,32,37,100,10,0,0,0,0, - 35,35,35,32,37,100,32,67,82,67,32,101, - 114,114,111,114,115,32,111,99,99,117,114,101, - 100,10,0,0,35,35,35,32,37,100,32,65, - 108,105,103,110,32,101,114,114,111,114,115,32, - 111,99,99,117,114,101,100,10,0,0,0,0, - 35,35,35,32,37,100,32,83,104,111,114,116, - 32,101,114,114,111,114,115,32,111,99,99,117, - 114,101,100,10,0,0,0,0,35,35,35,32, - 37,100,32,79,118,101,114,114,117,110,32,101, - 114,114,111,114,115,32,111,99,99,117,114,101, - 100,10,0,0,35,35,35,32,67,85,32,115, - 116,105,108,108,32,114,117,110,110,105,110,103, - 58,32,37,120,10,0,0,0,35,35,35,32, - 99,109,100,32,115,116,105,108,108,32,98,117, - 115,121,58,32,37,120,10,0,35,35,35,32, - 115,116,97,116,117,115,32,115,116,105,108,108, - 32,98,117,115,121,58,32,37,120,10,0,0, - 67,66,61,37,120,44,32,84,66,68,61,37, - 120,44,32,66,85,70,61,37,120,10,0,0, - 116,101,115,116,95,101,116,104,101,114,46,99, - 0,0,0,0,37,100,32,102,114,97,109,101, - 115,32,111,102,32,108,101,110,103,116,104,32, - 37,100,32,115,101,110,116,32,105,110,32,37, - 100,32,109,115,101,99,115,10,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 42,42,42,32,56,50,53,52,32,84,105,109, - 101,114,32,48,32,79,75,44,32,99,111,117, - 110,116,32,119,97,115,32,37,100,10,0,0, - 42,42,42,32,56,50,53,52,32,84,105,109, - 101,114,32,48,32,110,111,116,32,105,110,116, - 101,114,114,117,112,116,105,110,103,32,37,100, - 10,0,0,0,42,42,42,32,56,50,53,52, - 32,84,105,109,101,114,32,48,32,115,112,101, - 101,100,32,119,114,111,110,103,44,32,103,111, - 116,32,37,100,32,115,104,111,117,108,100,32, - 98,101,32,49,48,48,48,10,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 9,37,120,58,32,119,97,110,116,32,37,120, - 32,103,111,116,32,37,120,10,0,0,0,0, - 45,45,45,32,82,65,77,32,84,101,115,116, - 32,111,102,32,37,120,32,116,111,32,37,120, - 32,102,97,105,108,101,100,10,0,0,0,0, - 42,42,42,32,82,65,77,32,84,101,115,116, - 32,111,102,32,37,120,32,116,111,32,37,120, - 32,112,97,115,115,101,100,10,0,0,0,0, - 35,35,35,32,68,77,65,32,68,79,78,69, - 32,110,101,118,101,114,32,111,99,99,117,114, - 114,101,100,46,32,32,99,115,114,32,61,32, - 37,120,10,0,35,35,35,32,72,111,115,116, - 32,110,101,118,101,114,32,103,111,116,32,68, - 77,65,32,105,110,116,101,114,114,117,112,116, - 46,32,98,99,95,99,110,116,32,61,32,37, - 100,10,0,0,35,35,35,32,68,77,65,32, - 101,114,114,111,114,32,97,116,32,105,110,100, - 101,120,32,37,100,58,32,119,97,110,116,101, - 100,32,37,48,50,120,32,103,111,116,32,37, - 48,50,120,10,0,0,0,0,35,35,35,32, - 73,108,108,101,103,97,108,32,72,111,115,116, - 32,97,100,100,114,32,40,61,37,120,41,32, - 111,114,32,108,101,110,103,116,104,32,40,61, - 37,100,41,10,0,0,0,0,35,35,35,32, - 67,111,117,110,116,32,99,97,110,110,111,116, - 32,98,101,32,62,32,49,48,50,52,42,49, - 48,50,52,10,0,0,0,0,42,42,42,32, - 108,99,108,46,66,117,102,49,32,61,32,37, - 120,32,45,45,62,32,104,111,115,116,46,66, - 117,102,32,61,32,37,120,32,45,45,62,32, - 108,99,108,46,66,117,102,50,32,61,32,37, - 120,10,0,0,42,42,42,32,62,32,68,98, - 32,37,100,32,40,98,117,114,115,116,41,59, - 32,62,32,68,119,32,37,100,32,40,119,97, - 105,116,115,116,97,116,101,115,41,10,0,0, - 35,35,35,32,83,101,99,111,110,100,32,97, - 114,103,32,109,117,115,116,32,98,101,32,39, - 114,39,32,111,114,32,39,119,39,32,111,114, - 32,39,108,39,10,0,0,0,42,42,42,32, - 68,77,65,32,37,115,32,105,110,32,37,52, - 100,32,98,121,116,101,32,99,104,117,110,107, - 115,58,32,0,32,32,116,111,32,104,111,115, - 116,0,0,0,102,114,111,109,32,104,111,115, - 116,0,0,0,37,56,100,32,98,121,116,101, - 115,47,115,101,99,46,10,0,116,105,109,101, - 32,116,111,111,32,115,104,111,114,116,32,116, - 111,32,109,101,97,115,117,114,101,10,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 80,37,100,45,62,37,115,32,10,0,0,0, - 116,114,97,110,115,109,105,116,32,112,101,110, - 100,105,110,103,32,111,110,32,37,100,10,0, - 116,114,97,110,115,109,105,116,32,99,111,110, - 102,105,103,32,111,110,32,37,100,10,0,0, - 116,114,97,110,115,109,105,116,32,116,99,110, - 10,0,0,0,116,99,110,32,101,120,112,10, - 0,0,0,0,102,111,114,119,97,114,100,95, - 100,101,108,97,121,32,101,120,112,32,37,100, - 10,0,0,0,109,101,115,115,97,103,101,95, - 97,103,101,32,101,120,112,32,37,100,10,0, - 104,111,108,100,32,101,120,112,32,37,100,10, - 0,0,0,0,84,120,67,79,78,70,73,71, - 37,100,10,0,84,120,84,67,78,37,100,10, - 0,0,0,0,114,99,118,32,99,111,110,102, - 105,103,32,111,110,32,37,100,10,0,0,0, - 90,69,82,79,32,114,111,111,116,33,32,97, - 116,32,37,120,32,0,0,0,115,117,112,101, - 114,99,101,100,101,115,32,37,100,10,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 65,82,80,82,69,81,32,37,120,33,10,0, - 65,82,80,82,69,80,32,37,120,33,10,0, - 83,101,110,100,32,85,68,80,32,37,100,10, - 0,0,0,0,78,111,32,82,66,68,39,115, - 32,105,110,32,85,68,80,32,40,37,100,32, - 37,100,41,10,0,0,0,0,83,101,110,116, - 32,85,68,80,32,37,100,10,0,0,0,0, - 83,78,77,80,32,39,37,99,39,32,108,101, - 110,32,37,100,10,0,0,0,69,110,118,111, - 121,32,114,99,61,37,100,10,0,0,0,0, - 71,101,110,32,116,114,97,112,32,37,100,32, - 114,99,61,37,100,10,0,0,66,97,100,32, - 85,68,80,32,99,104,101,99,107,115,117,109, - 32,37,120,32,108,101,110,32,37,100,10,0, - 66,97,100,32,85,68,80,32,108,101,110,103, - 116,104,32,119,97,110,116,32,37,100,32,103, - 111,116,32,37,100,10,0,0,66,97,100,32, - 73,67,77,80,32,99,104,101,99,107,115,117, - 109,10,0,0,78,111,32,82,66,68,39,115, - 32,105,110,32,73,67,77,80,10,0,0,0, - 66,97,100,32,73,80,32,99,104,101,99,107, - 115,117,109,10,0,0,0,0,84,114,117,110, - 99,97,116,101,100,32,73,80,10,0,0,0, - 83,69,78,84,32,73,80,88,33,10,0,0, - 110,111,32,115,121,115,78,97,109,101,0,0, - 114,105,103,104,116,115,119,105,116,99,104,45, - 0,0,0,0,78,111,32,82,66,68,39,115, - 32,105,110,32,115,101,110,100,95,115,97,112, - 10,0,0,0,78,111,32,82,66,68,39,115, - 32,105,110,32,73,80,88,10,0,0,0,0, - 84,114,117,110,99,97,116,101,100,32,73,80, - 88,10,0,0,0,0,0,0,0,0,0,0, - 77,97,108,108,111,99,32,114,101,116,117,114, - 110,115,32,78,85,76,76,33,0,0,0,0, - 0,0,0,0,0,0,0,0,68,105,103,105, - 32,73,110,116,108,46,32,82,105,103,104,116, - 83,119,105,116,99,104,32,83,69,45,88,0, - 73,110,116,101,108,32,56,50,53,57,54,0, - 0,0,0,0,0,0,0,0,8,206,0,131, - 36,206,0,131,92,206,0,131,112,206,0,131, - 132,206,0,131,220,206,0,131,4,207,0,131, - 4,207,0,131,36,207,0,131,52,207,0,131, - 80,207,0,131,104,207,0,131,132,207,0,131, - 160,207,0,131,188,207,0,131,188,207,0,131, - 216,207,0,131,240,207,0,131,12,208,0,131, - 40,208,0,131,72,208,0,131,112,208,0,131, - 180,210,0,131,232,210,0,131,4,211,0,131, - 36,211,0,131,60,211,0,131,0,0,0,0, - 64,213,0,131,92,213,0,131,124,213,0,131, - 160,213,0,131,60,214,0,131,60,214,0,131, - 60,214,0,131,188,213,0,131,216,213,0,131, - 244,213,0,131,28,214,0,131,168,214,0,131, - 60,214,0,131,168,214,0,131,168,214,0,131, - 88,214,0,131,132,214,0,131,0,0,0,0, - 36,216,0,131,68,216,0,131,104,216,0,131, - 140,216,0,131,140,216,0,131,0,0,0,0, - 248,217,0,131,12,218,0,131,40,218,0,131, - 76,218,0,131,124,218,0,131,152,218,0,131, - 200,218,0,131,228,218,0,131,88,219,0,131, - 116,219,0,131,64,224,0,131,92,224,0,131, - 124,224,0,131,152,224,0,131,184,224,0,131, - 0,0,0,0,110,111,32,115,121,115,67,111, - 110,116,97,99,116,0,0,0,110,111,32,115, - 121,115,78,97,109,101,0,0,110,111,32,115, - 121,115,76,111,99,97,116,105,111,110,0,0, - 37,115,58,37,100,58,32,102,97,105,108,101, - 100,32,97,115,115,101,114,116,105,111,110,32, - 96,37,115,39,10,0,0,0,110,117,109,114, - 101,103,115,32,60,61,32,78,86,82,65,77, - 95,78,82,69,71,83,32,38,38,32,110,117, - 109,114,101,103,115,32,62,32,48,0,0,0, - 102,105,114,115,116,114,101,103,32,60,32,78, - 86,82,65,77,95,78,82,69,71,83,32,38, - 38,32,102,105,114,115,116,114,101,103,32,62, - 61,32,48,0,0,0,0,0,10,13,69,82, - 82,79,82,32,45,0,0,0,0,0,0,0, - 192,244,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,192,251,0,131, - 8,252,0,131,8,252,0,131,200,251,0,131, - 212,251,0,131,212,251,0,131,212,251,0,131, - 212,251,0,131,212,251,0,131,212,251,0,131, - 212,251,0,131,212,251,0,131,212,251,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,200,244,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 212,249,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,20,245,0,131,8,245,0,131, - 84,247,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,244,251,0,131, - 8,252,0,131,8,252,0,131,48,246,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 240,250,0,131,8,252,0,131,132,248,0,131, - 8,252,0,131,8,252,0,131,160,249,0,131, - 72,46,1,131,228,47,1,131,152,46,1,131, - 132,47,1,131,0,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,144,47,1,131, - 108,46,1,131,108,46,1,131,108,46,1,131, - 152,46,1,131,152,46,1,131,228,47,1,131, - 108,46,1,131,20,50,1,131,216,50,1,131, - 56,50,1,131,224,50,1,131,104,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 144,50,1,131,20,50,1,131,20,50,1,131, - 20,50,1,131,56,50,1,131,56,50,1,131, - 216,50,1,131,20,50,1,131,124,53,1,131, - 96,53,1,131,96,53,1,131,96,53,1,131, - 96,53,1,131,96,53,1,131,96,53,1,131, - 96,53,1,131,96,53,1,131,96,53,1,131, - 96,53,1,131,96,53,1,131,96,53,1,131, - 96,53,1,131,88,53,1,131,64,53,1,131, - 80,53,1,131,72,53,1,131,64,53,1,131, - 0,0,0,0,28,83,1,131,36,83,1,131, - 36,83,1,131,36,83,1,131,36,83,1,131, - 28,83,1,131,44,83,1,131,44,83,1,131, - 44,83,1,131,44,83,1,131,44,83,1,131, - 28,83,1,131,44,83,1,131,0,0,0,0, - 196,88,1,131,232,88,1,131,208,88,1,131, - 220,88,1,131,244,88,1,131,0,0,0,0, - 4,0,0,0,5,0,0,0,6,0,0,0, - 7,0,0,0,2,0,0,0,3,0,0,0, - 0,0,0,0,1,0,0,0,72,30,0,131, - 72,30,0,131,216,44,0,131,0,30,0,131, - 108,30,0,131,108,30,0,131,108,30,0,131, - 108,30,0,131,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,20,137,1,131, - 204,136,1,131,132,136,1,131,56,136,1,131, - 236,135,1,131,172,135,1,131,120,135,1,131, - 52,135,1,131,232,134,1,131,160,134,1,131, - 80,134,1,131,8,134,1,131,188,133,1,131, - 120,133,1,131,0,0,0,0,0,0,0,0, - 72,72,72,72,72,72,72,72,72,72,72,72, - 72,72,72,72,72,72,72,72,72,72,72,72, - 72,72,72,72,72,72,72,72,72,72,72,72, - 72,72,72,72,72,72,72,72,72,72,72,72, - 72,72,72,72,72,72,72,72,72,72,72,72, - 72,72,72,72,72,72,72,72,72,72,72,72, - 72,72,72,72,72,0,0,0,0,255,85,170, - 0,0,0,0,4,0,8,0,16,0,32,0, - 64,0,0,1,0,8,0,0,0,0,0,0, - 0,0,0,0,0,4,3,2,1,0,0,0, - 7,0,0,0,1,0,1,0,1,0,2,0, - 20,0,15,0,1,0,0,128,128,0,0,0, - 100,0,0,0,96,207,1,131,92,207,1,131, - 88,207,1,131,84,207,1,131,80,207,1,131, - 0,0,0,0,0,0,0,0,48,49,50,51, - 52,53,54,55,56,57,65,66,67,68,69,70, - 0,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,64,204,0,131,156,202,0,131, - 96,148,1,131,1,0,4,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 200,155,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,124,204,0,131,156,202,0,131, - 168,210,1,131,1,0,6,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 4,156,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,132,204,0,131,156,202,0,131, - 4,1,0,163,1,0,67,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 64,156,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,168,204,0,131,248,204,0,131, - 80,18,3,131,1,0,4,3,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 124,156,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,168,204,0,131,32,205,0,131, - 96,18,3,131,1,0,4,3,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 184,156,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,168,204,0,131,72,205,0,131, - 112,18,3,131,1,0,4,3,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 244,156,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,112,205,0,131,156,202,0,131, - 2,0,0,0,1,0,2,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 48,157,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,0,0, - 220,155,1,131,2,0,0,0,24,156,1,131, - 3,0,0,0,84,156,1,131,4,0,0,0, - 144,156,1,131,5,0,0,0,204,156,1,131, - 6,0,0,0,8,157,1,131,7,0,0,0, - 68,157,1,131,0,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,120,205,0,131, - 156,202,0,131,48,211,1,131,1,0,2,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,172,157,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,2,1,180,208,0,131,132,205,0,131, - 164,202,0,131,76,209,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,4,1,180,208,0,131, - 132,205,0,131,164,202,0,131,76,209,0,131, - 124,148,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,2,1, - 180,208,0,131,132,205,0,131,164,202,0,131, - 76,209,0,131,6,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,2,1,180,208,0,131,132,205,0,131, - 164,202,0,131,76,209,0,131,220,5,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,66,1,180,208,0,131, - 132,205,0,131,164,202,0,131,76,209,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,4,1, - 180,208,0,131,132,205,0,131,164,202,0,131, - 76,209,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,2,3,180,208,0,131,132,205,0,131, - 228,209,0,131,76,209,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,2,1,180,208,0,131, - 132,205,0,131,164,202,0,131,76,209,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,67,1, - 180,208,0,131,132,205,0,131,164,202,0,131, - 76,209,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,65,1,180,208,0,131,132,205,0,131, - 164,202,0,131,76,209,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,65,1,180,208,0,131, - 132,205,0,131,164,202,0,131,76,209,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,65,1, - 180,208,0,131,132,205,0,131,164,202,0,131, - 76,209,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,65,1,180,208,0,131,132,205,0,131, - 164,202,0,131,76,209,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,65,1,180,208,0,131, - 132,205,0,131,164,202,0,131,76,209,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,65,1, - 180,208,0,131,132,205,0,131,164,202,0,131, - 76,209,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,65,1,180,208,0,131,132,205,0,131, - 164,202,0,131,76,209,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,65,1,180,208,0,131, - 132,205,0,131,164,202,0,131,76,209,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,65,1, - 180,208,0,131,132,205,0,131,164,202,0,131, - 76,209,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,65,1,180,208,0,131,132,205,0,131, - 164,202,0,131,76,209,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,65,1,180,208,0,131, - 132,205,0,131,164,202,0,131,76,209,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,66,1, - 180,208,0,131,132,205,0,131,164,202,0,131, - 76,209,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,6,1,180,208,0,131,132,205,0,131, - 164,202,0,131,76,209,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,0,0,232,157,1,131, - 2,0,0,0,16,158,1,131,3,0,0,0, - 56,158,1,131,4,0,0,0,96,158,1,131, - 5,0,0,0,136,158,1,131,6,0,0,0, - 176,158,1,131,7,0,0,0,216,158,1,131, - 8,0,0,0,0,159,1,131,9,0,0,0, - 40,159,1,131,10,0,0,0,80,159,1,131, - 11,0,0,0,120,159,1,131,12,0,0,0, - 160,159,1,131,13,0,0,0,200,159,1,131, - 14,0,0,0,240,159,1,131,15,0,0,0, - 24,160,1,131,16,0,0,0,64,160,1,131, - 17,0,0,0,104,160,1,131,18,0,0,0, - 144,160,1,131,19,0,0,0,184,160,1,131, - 20,0,0,0,224,160,1,131,21,0,0,0, - 8,161,1,131,22,0,0,0,48,161,1,131, - 0,0,0,0,0,0,0,0,1,0,0,0, - 32,208,1,131,0,0,0,0,0,0,0,0, - 1,0,0,0,192,157,1,131,2,0,0,0, - 40,208,1,131,0,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,112,205,0,131, - 60,210,0,131,2,0,0,0,1,0,2,3, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,56,162,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,120,205,0,131, - 60,210,0,131,0,17,3,131,1,0,2,3, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,116,162,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,4,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,176,162,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,8,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,236,162,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,12,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,40,163,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,16,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,100,163,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,20,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,160,163,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,24,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,220,163,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,28,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,24,164,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,32,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,84,164,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,36,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,144,164,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,40,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,204,164,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,120,205,0,131, - 156,202,0,131,44,17,3,131,1,0,2,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,8,165,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,48,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,68,165,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,52,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,128,165,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,56,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,188,165,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,60,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,248,165,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,64,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,52,166,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,68,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,112,166,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,64,1,44,202,0,131,88,210,0,131, - 164,202,0,131,120,211,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,2,1,44,202,0,131, - 88,210,0,131,164,202,0,131,120,211,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,64,1, - 44,202,0,131,88,210,0,131,164,202,0,131, - 120,211,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,2,1,44,202,0,131,88,210,0,131, - 164,202,0,131,120,211,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,2,1,44,202,0,131, - 88,210,0,131,164,202,0,131,120,211,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,0,0, - 172,166,1,131,2,0,0,0,212,166,1,131, - 3,0,0,0,252,166,1,131,4,0,0,0, - 36,167,1,131,5,0,0,0,76,167,1,131, - 0,0,0,0,0,0,0,0,1,0,0,0, - 56,208,1,131,0,0,0,0,0,0,0,0, - 1,0,64,3,84,212,0,131,16,212,0,131, - 52,212,0,131,172,212,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,2,3,84,212,0,131, - 16,212,0,131,52,212,0,131,172,212,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,2,3, - 84,212,0,131,16,212,0,131,52,212,0,131, - 172,212,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,2,3,84,212,0,131,16,212,0,131, - 52,212,0,131,172,212,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,2,3,84,212,0,131, - 16,212,0,131,52,212,0,131,172,212,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,2,3, - 84,212,0,131,16,212,0,131,52,212,0,131, - 172,212,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,64,3,84,212,0,131,16,212,0,131, - 52,212,0,131,172,212,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,2,3,84,212,0,131, - 16,212,0,131,52,212,0,131,172,212,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,2,1, - 84,212,0,131,16,212,0,131,164,202,0,131, - 172,212,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,2,3,84,212,0,131,16,212,0,131, - 52,212,0,131,172,212,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,64,3,84,212,0,131, - 16,212,0,131,52,212,0,131,172,212,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,2,3, - 84,212,0,131,16,212,0,131,52,212,0,131, - 172,212,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,6,1,84,212,0,131,16,212,0,131, - 164,202,0,131,172,212,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,0,0,180,167,1,131, - 2,0,0,0,220,167,1,131,3,0,0,0, - 4,168,1,131,4,0,0,0,44,168,1,131, - 5,0,0,0,84,168,1,131,6,0,0,0, - 124,168,1,131,7,0,0,0,164,168,1,131, - 8,0,0,0,204,168,1,131,9,0,0,0, - 244,168,1,131,10,0,0,0,28,169,1,131, - 11,0,0,0,68,169,1,131,12,0,0,0, - 108,169,1,131,13,0,0,0,148,169,1,131, - 0,0,0,0,0,0,0,0,1,0,0,0, - 72,208,1,131,0,0,0,0,0,0,0,0, - 1,0,2,3,84,212,0,131,16,212,0,131, - 52,212,0,131,172,212,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,4,3,84,212,0,131, - 16,212,0,131,52,212,0,131,172,212,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,64,3, - 84,212,0,131,16,212,0,131,52,212,0,131, - 172,212,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,2,3,84,212,0,131,16,212,0,131, - 52,212,0,131,172,212,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,0,0,60,170,1,131, - 2,0,0,0,100,170,1,131,3,0,0,0, - 140,170,1,131,4,0,0,0,180,170,1,131, - 0,0,0,0,0,0,0,0,1,0,0,0, - 88,208,1,131,0,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,72,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,20,171,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,0,0,76,162,1,131,2,0,0,0, - 136,162,1,131,3,0,0,0,196,162,1,131, - 4,0,0,0,0,163,1,131,5,0,0,0, - 60,163,1,131,6,0,0,0,120,163,1,131, - 7,0,0,0,180,163,1,131,8,0,0,0, - 240,163,1,131,9,0,0,0,44,164,1,131, - 10,0,0,0,104,164,1,131,11,0,0,0, - 164,164,1,131,12,0,0,0,224,164,1,131, - 13,0,0,0,28,165,1,131,14,0,0,0, - 88,165,1,131,15,0,0,0,148,165,1,131, - 16,0,0,0,208,165,1,131,17,0,0,0, - 12,166,1,131,18,0,0,0,72,166,1,131, - 19,0,0,0,132,166,1,131,20,0,0,0, - 64,208,1,131,21,0,0,0,80,208,1,131, - 22,0,0,0,96,208,1,131,23,0,0,0, - 40,171,1,131,0,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,144,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,16,172,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,148,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,76,172,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,152,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,136,172,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,156,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,196,172,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,160,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,0,173,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,164,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,60,173,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,168,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,120,173,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,172,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,180,173,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,176,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,240,173,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,180,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,44,174,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,184,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,104,174,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,188,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,164,174,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,192,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,224,174,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,196,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,28,175,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,200,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,88,175,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,204,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,148,175,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,208,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,208,175,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,212,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,12,176,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,216,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,72,176,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,220,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,132,176,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,224,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,192,176,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,228,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,252,176,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,232,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,56,177,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,236,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,116,177,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,240,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,176,177,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,244,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,236,177,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,0,0,36,172,1,131,2,0,0,0, - 96,172,1,131,3,0,0,0,156,172,1,131, - 4,0,0,0,216,172,1,131,5,0,0,0, - 20,173,1,131,6,0,0,0,80,173,1,131, - 7,0,0,0,140,173,1,131,8,0,0,0, - 200,173,1,131,9,0,0,0,4,174,1,131, - 10,0,0,0,64,174,1,131,11,0,0,0, - 124,174,1,131,12,0,0,0,184,174,1,131, - 13,0,0,0,244,174,1,131,14,0,0,0, - 48,175,1,131,15,0,0,0,108,175,1,131, - 16,0,0,0,168,175,1,131,17,0,0,0, - 228,175,1,131,18,0,0,0,32,176,1,131, - 19,0,0,0,92,176,1,131,20,0,0,0, - 152,176,1,131,21,0,0,0,212,176,1,131, - 22,0,0,0,16,177,1,131,23,0,0,0, - 76,177,1,131,24,0,0,0,136,177,1,131, - 25,0,0,0,196,177,1,131,26,0,0,0, - 0,178,1,131,0,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,112,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,0,179,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,116,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,60,179,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,120,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,120,179,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,124,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,180,179,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,200,212,0,131, - 156,202,0,131,220,5,0,163,1,0,64,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,240,179,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,112,205,0,131, - 156,202,0,131,161,0,0,0,1,0,2,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,44,180,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,0,0,4,180,1,131,2,0,0,0, - 64,180,1,131,0,0,0,0,0,0,0,0, - 1,0,0,0,120,208,1,131,0,0,0,0, - 0,0,0,0,1,0,0,0,20,179,1,131, - 2,0,0,0,80,179,1,131,3,0,0,0, - 140,179,1,131,4,0,0,0,200,179,1,131, - 5,0,0,0,128,208,1,131,0,0,0,0, - 0,0,0,0,1,0,2,1,44,202,0,131, - 208,212,0,131,164,202,0,131,196,214,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,65,1, - 44,202,0,131,208,212,0,131,164,202,0,131, - 196,214,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,65,1,44,202,0,131,208,212,0,131, - 164,202,0,131,196,214,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,65,1,44,202,0,131, - 208,212,0,131,164,202,0,131,196,214,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,65,1, - 44,202,0,131,208,212,0,131,164,202,0,131, - 196,214,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,65,1,44,202,0,131,208,212,0,131, - 164,202,0,131,196,214,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,65,1,44,202,0,131, - 208,212,0,131,164,202,0,131,196,214,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,65,1, - 44,202,0,131,208,212,0,131,164,202,0,131, - 196,214,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,65,1,44,202,0,131,208,212,0,131, - 164,202,0,131,196,214,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,65,1,44,202,0,131, - 208,212,0,131,164,202,0,131,196,214,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,65,1, - 44,202,0,131,208,212,0,131,164,202,0,131, - 196,214,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,65,1,44,202,0,131,208,212,0,131, - 164,202,0,131,196,214,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,65,1,44,202,0,131, - 208,212,0,131,164,202,0,131,196,214,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,6,1, - 44,202,0,131,208,212,0,131,164,202,0,131, - 196,214,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,0,0,192,180,1,131,2,0,0,0, - 232,180,1,131,3,0,0,0,16,181,1,131, - 4,0,0,0,56,181,1,131,5,0,0,0, - 96,181,1,131,6,0,0,0,136,181,1,131, - 7,0,0,0,176,181,1,131,8,0,0,0, - 216,181,1,131,9,0,0,0,0,182,1,131, - 10,0,0,0,40,182,1,131,11,0,0,0, - 80,182,1,131,13,0,0,0,120,182,1,131, - 16,0,0,0,160,182,1,131,17,0,0,0, - 200,182,1,131,0,0,0,0,0,0,0,0, - 1,0,0,0,144,208,1,131,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,1,0,0,0, - 160,208,1,131,2,0,0,0,168,208,1,131, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 1,0,0,0,184,208,1,131,2,0,0,0, - 192,208,1,131,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 1,0,0,0,208,208,1,131,2,0,0,0, - 216,208,1,131,3,0,0,0,224,208,1,131, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 1,0,0,0,240,208,1,131,2,0,0,0, - 248,208,1,131,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,1,0,0,0, - 8,209,1,131,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,1,0,0,0,24,209,1,131, - 2,0,0,0,32,209,1,131,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 1,0,0,0,48,209,1,131,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 1,0,0,0,64,209,1,131,0,0,0,0, - 0,0,0,0,1,0,0,0,232,208,1,131, - 2,0,0,0,0,209,1,131,3,0,0,0, - 16,209,1,131,4,0,0,0,40,209,1,131, - 5,0,0,0,56,209,1,131,6,0,0,0, - 72,209,1,131,0,0,0,0,0,0,0,0, - 2,0,0,0,152,208,1,131,6,0,0,0, - 176,208,1,131,7,0,0,0,200,208,1,131, - 8,0,0,0,80,209,1,131,0,0,0,0, - 0,0,0,0,7,0,0,0,88,209,1,131, - 0,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 128,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 8,185,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 144,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 68,185,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 148,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 128,185,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 132,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 188,185,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 136,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 248,185,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 140,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 52,186,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 156,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 112,186,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 160,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 172,186,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 164,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 232,186,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 168,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 36,187,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 172,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 96,187,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 176,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 156,187,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 180,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 216,187,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 184,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 20,188,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 188,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 80,188,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 192,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 140,188,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 196,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 200,188,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 200,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 4,189,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 204,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 64,189,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 208,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 124,189,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 212,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 184,189,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 220,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 244,189,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 224,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 48,190,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 228,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 108,190,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 232,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 168,190,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 236,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 228,190,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 240,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 32,191,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,104,215,0,131,140,215,0,131, - 0,0,0,0,1,0,2,3,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 92,191,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,0,0, - 28,185,1,131,2,0,0,0,88,185,1,131, - 3,0,0,0,148,185,1,131,4,0,0,0, - 208,185,1,131,5,0,0,0,12,186,1,131, - 6,0,0,0,72,186,1,131,8,0,0,0, - 132,186,1,131,9,0,0,0,192,186,1,131, - 10,0,0,0,252,186,1,131,11,0,0,0, - 56,187,1,131,12,0,0,0,116,187,1,131, - 13,0,0,0,176,187,1,131,14,0,0,0, - 236,187,1,131,15,0,0,0,40,188,1,131, - 16,0,0,0,100,188,1,131,17,0,0,0, - 160,188,1,131,18,0,0,0,220,188,1,131, - 19,0,0,0,24,189,1,131,20,0,0,0, - 84,189,1,131,21,0,0,0,144,189,1,131, - 22,0,0,0,204,189,1,131,24,0,0,0, - 8,190,1,131,25,0,0,0,68,190,1,131, - 26,0,0,0,128,190,1,131,27,0,0,0, - 188,190,1,131,28,0,0,0,248,190,1,131, - 29,0,0,0,52,191,1,131,30,0,0,0, - 112,191,1,131,0,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,180,215,0,131, - 156,202,0,131,218,12,3,131,1,0,4,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,128,192,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,120,205,0,131, - 156,202,0,131,40,211,1,131,1,0,2,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,188,192,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,112,205,0,131, - 156,202,0,131,2,0,0,0,1,0,2,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,248,192,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,2,1,44,202,0,131,200,215,0,131, - 164,202,0,131,196,216,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,2,1,44,202,0,131, - 200,215,0,131,164,202,0,131,196,216,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,6,1, - 44,202,0,131,200,215,0,131,164,202,0,131, - 196,216,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,65,1,44,202,0,131,200,215,0,131, - 164,202,0,131,196,216,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,65,1,44,202,0,131, - 200,215,0,131,164,202,0,131,196,216,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,0,0, - 52,193,1,131,2,0,0,0,92,193,1,131, - 3,0,0,0,132,193,1,131,4,0,0,0, - 172,193,1,131,5,0,0,0,212,193,1,131, - 0,0,0,0,0,0,0,0,1,0,0,0, - 112,209,1,131,0,0,0,0,0,0,0,0, - 1,0,0,0,148,192,1,131,2,0,0,0, - 208,192,1,131,3,0,0,0,12,193,1,131, - 4,0,0,0,120,209,1,131,0,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 112,205,0,131,156,202,0,131,3,0,0,0, - 1,0,2,1,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,100,194,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 92,217,0,131,60,210,0,131,216,12,3,131, - 1,0,2,3,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,160,194,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 104,217,0,131,156,202,0,131,0,0,0,0, - 1,0,67,1,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,220,194,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 104,217,0,131,156,202,0,131,0,0,0,0, - 1,0,65,1,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,24,195,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 112,217,0,131,156,202,0,131,216,12,3,131, - 1,0,4,1,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,84,195,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 120,205,0,131,156,202,0,131,224,12,3,131, - 1,0,2,1,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,144,195,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 120,205,0,131,156,202,0,131,228,12,3,131, - 1,0,2,1,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,204,195,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 92,217,0,131,156,202,0,131,232,12,3,131, - 1,0,2,1,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,8,196,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 92,217,0,131,156,202,0,131,234,12,3,131, - 1,0,2,1,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,68,196,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 92,217,0,131,156,202,0,131,246,12,3,131, - 1,0,2,1,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,128,196,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 92,217,0,131,156,202,0,131,236,12,3,131, - 1,0,2,1,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,188,196,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 92,217,0,131,60,210,0,131,238,12,3,131, - 1,0,2,3,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,248,196,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 92,217,0,131,60,210,0,131,240,12,3,131, - 1,0,2,3,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,52,197,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 92,217,0,131,60,210,0,131,242,12,3,131, - 1,0,2,3,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,112,197,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,2,1,180,219,0,131, - 132,217,0,131,164,202,0,131,104,220,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,2,3, - 180,219,0,131,132,217,0,131,0,221,0,131, - 104,220,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,2,1,180,219,0,131,132,217,0,131, - 164,202,0,131,104,220,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,2,3,180,219,0,131, - 132,217,0,131,0,221,0,131,104,220,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,2,3, - 180,219,0,131,132,217,0,131,0,221,0,131, - 104,220,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,4,1,180,219,0,131,132,217,0,131, - 164,202,0,131,104,220,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,2,1,180,219,0,131, - 132,217,0,131,164,202,0,131,104,220,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,4,1, - 180,219,0,131,132,217,0,131,164,202,0,131, - 104,220,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,4,1,180,219,0,131,132,217,0,131, - 164,202,0,131,104,220,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,65,1,180,219,0,131, - 132,217,0,131,164,202,0,131,104,220,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,0,0, - 172,197,1,131,2,0,0,0,212,197,1,131, - 3,0,0,0,252,197,1,131,4,0,0,0, - 36,198,1,131,5,0,0,0,76,198,1,131, - 6,0,0,0,116,198,1,131,7,0,0,0, - 156,198,1,131,8,0,0,0,196,198,1,131, - 9,0,0,0,236,198,1,131,10,0,0,0, - 20,199,1,131,0,0,0,0,0,0,0,0, - 1,0,0,0,136,209,1,131,0,0,0,0, - 0,0,0,0,1,0,0,0,120,194,1,131, - 2,0,0,0,180,194,1,131,3,0,0,0, - 240,194,1,131,4,0,0,0,44,195,1,131, - 5,0,0,0,104,195,1,131,6,0,0,0, - 164,195,1,131,7,0,0,0,224,195,1,131, - 8,0,0,0,28,196,1,131,9,0,0,0, - 88,196,1,131,10,0,0,0,148,196,1,131, - 11,0,0,0,208,196,1,131,12,0,0,0, - 12,197,1,131,13,0,0,0,72,197,1,131, - 14,0,0,0,132,197,1,131,15,0,0,0, - 144,209,1,131,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,76,210,0,131,156,202,0,131, - 160,211,1,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 44,200,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,120,205,0,131,60,210,0,131, - 140,1,0,163,1,0,2,3,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 104,200,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,4,1, - 44,202,0,131,36,222,0,131,164,202,0,131, - 48,223,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,2,1,44,202,0,131,36,222,0,131, - 164,202,0,131,48,223,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,2,1,44,202,0,131, - 36,222,0,131,164,202,0,131,48,223,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,0,0, - 164,200,1,131,2,0,0,0,204,200,1,131, - 3,0,0,0,244,200,1,131,0,0,0,0, - 0,0,0,0,1,0,0,0,168,209,1,131, - 0,0,0,0,0,0,0,0,1,0,2,1, - 44,202,0,131,212,223,0,131,164,202,0,131, - 252,224,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,2,1,44,202,0,131,212,223,0,131, - 164,202,0,131,252,224,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,65,1,44,202,0,131, - 212,223,0,131,164,202,0,131,252,224,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,65,1, - 44,202,0,131,212,223,0,131,164,202,0,131, - 252,224,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,65,1,44,202,0,131,212,223,0,131, - 164,202,0,131,252,224,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,0,0,76,201,1,131, - 2,0,0,0,116,201,1,131,3,0,0,0, - 156,201,1,131,4,0,0,0,196,201,1,131, - 5,0,0,0,236,201,1,131,0,0,0,0, - 0,0,0,0,1,0,0,0,184,209,1,131, - 0,0,0,0,0,0,0,0,1,0,0,0, - 64,200,1,131,2,0,0,0,124,200,1,131, - 3,0,0,0,176,209,1,131,4,0,0,0, - 192,209,1,131,0,0,0,0,0,0,0,0, - 1,0,0,0,128,209,1,131,2,0,0,0, - 152,209,1,131,3,0,0,0,160,209,1,131, - 4,0,0,0,200,209,1,131,0,0,0,0, - 0,0,0,0,1,0,0,0,24,208,1,131, - 2,0,0,0,48,208,1,131,4,0,0,0, - 104,208,1,131,5,0,0,0,112,208,1,131, - 7,0,0,0,136,208,1,131,10,0,0,0, - 96,209,1,131,11,0,0,0,104,209,1,131, - 17,0,0,0,208,209,1,131,0,0,0,0, - 0,0,0,0,1,0,0,0,216,209,1,131, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 1,0,0,0,240,209,1,131,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,1,0,0,0,8,210,1,131, - 2,0,0,0,16,210,1,131,3,0,0,0, - 24,210,1,131,4,0,0,0,32,210,1,131, - 5,0,0,0,40,210,1,131,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,1,0,0,0, - 56,210,1,131,2,0,0,0,64,210,1,131, - 0,0,0,0,0,0,0,0,1,0,0,0, - 72,210,1,131,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,1,0,0,0, - 48,210,1,131,2,0,0,0,80,210,1,131, - 3,0,0,0,88,210,1,131,0,0,0,0, - 0,0,0,0,1,0,0,0,16,208,1,131, - 2,0,0,0,224,209,1,131,3,0,0,0, - 232,209,1,131,4,0,0,0,248,209,1,131, - 5,0,0,0,0,210,1,131,6,0,0,0, - 96,210,1,131,0,0,0,0,0,0,0,0, - 1,0,0,0,104,210,1,131,0,0,0,0, - 0,0,0,0,6,0,0,0,112,210,1,131, - 0,0,0,0,0,0,0,0,3,0,0,0, - 120,210,1,131,0,0,0,0,0,0,0,0, - 1,0,0,0,128,210,1,131,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 1,0,0,0,3,0,0,0,6,0,0,0, - 1,0,0,0,2,0,0,0,1,0,0,0, - 10,0,0,0,7,0,0,0,8,0,0,0, - 2,0,0,0,2,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,112,117,98,108, - 105,99,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,112,114,105,118,97,116,101,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 83,78,77,80,95,116,114,97,112,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,1,0,0,0, - 3,0,0,0,6,0,0,0,1,0,0,0, - 4,0,0,0,1,0,0,0,76,1,0,0, - 5,0,0,0,1,0,0,0,1,0,0,0, - 0,0,0,0,0,0,0,0,1,0,0,0, - 3,0,0,0,6,0,0,0,1,0,0,0, - 2,0,0,0,1,0,0,0,2,0,0,0, - 2,0,0,0,1,0,0,0,1,0,0,0, - 0,0,0,0,1,0,0,0,3,0,0,0, - 6,0,0,0,1,0,0,0,2,0,0,0, - 1,0,0,0,17,0,0,0,0,0,0,0, - 0,0,0,0,48,49,50,51,52,53,54,55, - 56,57,65,66,67,68,69,70,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 255,85,170,0,255,255,255,255,85,85,85,85, - 170,170,170,170,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,64,40,35,41, - 32,67,111,112,121,114,105,103,104,116,32,40, - 99,41,32,49,57,56,54,32,45,32,49,57, - 57,53,32,32,69,112,105,108,111,103,117,101, - 32,84,101,99,104,110,111,108,111,103,121,32, - 67,111,114,112,111,114,97,116,105,111,110,10, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,255,255,255,255, - 72,10,0,0,78,10,0,0,83,10,0,0, - 69,10,0,0,109,97,105,110,46,99,0,0, - 48,0,0,0,55,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,1,0,0,0, - 0,0,0,0,116,105,109,101,114,46,99,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 100,0,0,0,100,0,0,0,1,0,0,0, - 0,0,0,0,115,114,99,32,0,0,0,0, - 32,0,0,0,100,115,116,32,0,0,0,0, - 32,37,48,50,88,0,0,0,10,0,0,0, - 255,255,255,255,48,48,48,48,48,48,0,0, - 48,48,48,48,48,49,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,255,255,255,255, - 0,0,0,0,0,0,0,0,0,0,0,0, - 62,32,0,0,37,120,10,0,37,120,58,9, - 37,120,10,0,37,115,10,0,0,0,0,0, - 10,0,0,0,0,0,0,0,0,0,0,0, - 68,85,77,80,10,0,0,0,37,48,50,120, - 32,0,0,0,10,0,0,0,0,0,0,0, - 37,100,32,112,112,115,10,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 1,0,0,0,0,0,0,0,1,0,0,0, - 119,119,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,1,128,194,0, - 0,0,0,0,1,128,194,0,0,16,0,0, - 66,76,75,0,70,87,68,0,76,82,78,0, - 76,73,83,0,68,73,83,0,72,69,76,76, - 79,10,0,0,116,99,32,101,120,112,10,0, - 102,114,111,109,32,0,0,0,10,0,0,0, - 87,101,105,114,100,0,0,0,0,0,0,0, - 0,0,0,0,255,255,255,255,255,255,255,255, - 255,255,0,0,255,255,255,255,255,255,0,0, - 0,0,0,0,0,0,0,0,80,65,68,37, - 100,10,0,0,170,170,3,0,0,0,0,0, - 83,69,78,84,33,10,0,0,85,68,80,10, - 0,0,0,0,73,67,77,80,10,0,0,0, - 69,67,72,79,10,0,0,0,73,80,10,0, - 170,170,3,0,0,0,0,0,73,80,88,33, - 10,0,0,0,0,0,0,0,255,255,255,255, - 255,255,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,192,155,1,131,0,0,0,0, - 108,157,1,131,0,0,0,0,88,161,1,131, - 0,0,0,0,16,162,1,131,0,0,0,0, - 32,162,1,131,0,0,0,0,116,167,1,131, - 0,0,0,0,164,167,1,131,0,0,0,0, - 188,169,1,131,0,0,0,0,44,170,1,131, - 0,0,0,0,220,170,1,131,0,0,0,0, - 4,171,1,131,0,0,0,0,80,171,1,131, - 0,0,0,0,40,178,1,131,0,0,0,0, - 104,180,1,131,0,0,0,0,128,180,1,131, - 0,0,0,0,144,180,1,131,0,0,0,0, - 240,182,1,131,0,0,0,0,104,183,1,131, - 0,0,0,0,120,183,1,131,0,0,0,0, - 128,183,1,131,0,0,0,0,136,183,1,131, - 0,0,0,0,160,183,1,131,0,0,0,0, - 168,183,1,131,0,0,0,0,176,183,1,131, - 0,0,0,0,200,183,1,131,0,0,0,0, - 208,183,1,131,0,0,0,0,216,183,1,131, - 0,0,0,0,224,183,1,131,0,0,0,0, - 0,184,1,131,0,0,0,0,8,184,1,131, - 0,0,0,0,16,184,1,131,0,0,0,0, - 40,184,1,131,0,0,0,0,48,184,1,131, - 0,0,0,0,64,184,1,131,0,0,0,0, - 72,184,1,131,0,0,0,0,80,184,1,131, - 0,0,0,0,104,184,1,131,0,0,0,0, - 112,184,1,131,0,0,0,0,128,184,1,131, - 0,0,0,0,136,184,1,131,0,0,0,0, - 152,184,1,131,0,0,0,0,208,184,1,131, - 0,0,0,0,248,184,1,131,0,0,0,0, - 152,191,1,131,0,0,0,0,252,193,1,131, - 0,0,0,0,44,194,1,131,0,0,0,0, - 60,194,1,131,0,0,0,0,60,199,1,131, - 0,0,0,0,148,199,1,131,0,0,0,0, - 164,199,1,131,0,0,0,0,36,200,1,131, - 0,0,0,0,28,201,1,131,0,0,0,0, - 60,201,1,131,0,0,0,0,20,202,1,131, - 0,0,0,0,68,202,1,131,0,0,0,0, - 84,202,1,131,0,0,0,0,124,202,1,131, - 0,0,0,0,164,202,1,131,0,0,0,0, - 236,202,1,131,0,0,0,0,252,202,1,131, - 0,0,0,0,4,203,1,131,0,0,0,0, - 12,203,1,131,0,0,0,0,28,203,1,131, - 0,0,0,0,36,203,1,131,0,0,0,0, - 44,203,1,131,0,0,0,0,52,203,1,131, - 0,0,0,0,60,203,1,131,0,0,0,0, - 68,203,1,131,0,0,0,0,76,203,1,131, - 0,0,0,0,124,203,1,131,0,0,0,0, - 132,203,1,131,0,0,0,0,140,203,1,131, - 0,0,0,0,164,203,1,131,0,0,0,0, - 180,203,1,131,0,0,0,0,188,203,1,131, - 0,0,0,0,220,203,1,131,0,0,0,0, - 20,204,1,131,0,0,0,0,36,204,1,131, - 0,0,0,0,52,204,1,131,0,0,0,0, - 68,204,1,131,255,255,255,0,0,0,0,0, - 0,0,0,0,0,0,0,0,1,0,0,0, - 10,0,0,0,10,0,0,0,0,205,1,131, - 10,0,0,0,7,0,0,0,0,0,0,0, - 0,0,0,0,255,255,255,255,110,118,114,97, - 109,46,99,0,114,99,0,0,48,120,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0 - } ; -static const int dgrs_ncode = 119520 ; diff --git a/drivers/net/dgrs_i82596.h b/drivers/net/dgrs_i82596.h deleted file mode 100644 index ac9217ad2138..000000000000 --- a/drivers/net/dgrs_i82596.h +++ /dev/null @@ -1,473 +0,0 @@ -/* - * i82596 ethernet controller bits and structures (little endian) - * - * $Id: i82596.h,v 1.8 1996/09/03 11:19:03 rick Exp $ - */ - -/************************************************************************/ -/* */ -/* PORT commands (p. 4-20). The least significant nibble is one */ -/* of these commands, the rest of the command is a memory address */ -/* aligned on a 16 byte boundary. Note that port commands must */ -/* be written to the PORT address and the PORT address+2 with two */ -/* halfword writes. Write the LSH first to PORT, then the MSH to */ -/* PORT+2. Blame Intel. */ -/* */ -/************************************************************************/ -#define I596_PORT_RESET 0x0 /* Reset. Wait 5 SysClks & 10 TxClks */ -#define I596_PORT_SELFTEST 0x1 /* Do a selftest */ -#define I596_PORT_SCP_ADDR 0x2 /* Set new SCP address */ -#define I596_PORT_DUMP 0x3 /* Dump internal data structures */ - -/* - * I596_ST: Selftest results (p. 4-21) - */ -typedef volatile struct -{ - ulong signature; /* ROM checksum */ - ulong result; /* Selftest results: non-zero is a failure */ -} I596_ST; - -#define I596_ST_SELFTEST_FAIL 0x1000 /* Selftest Failed */ -#define I596_ST_DIAGNOSE_FAIL 0x0020 /* Diagnose Failed */ -#define I596_ST_BUSTIMER_FAIL 0x0010 /* Bus Timer Failed */ -#define I596_ST_REGISTER_FAIL 0x0008 /* Register Failed */ -#define I596_ST_ROM_FAIL 0x0004 /* ROM Failed */ - -/* - * I596_DUMP: Dump results - */ -typedef volatile struct -{ - ulong dump[77]; -} I596_DUMP; - -/************************************************************************/ -/* */ -/* I596_TBD: Transmit Buffer Descriptor (p. 4-59) */ -/* */ -/************************************************************************/ -typedef volatile struct _I596_TBD -{ - ulong count; - vol struct _I596_TBD *next; - uchar *buf; - ushort unused1; - ushort unused2; -} I596_TBD; - -#define I596_TBD_NOLINK ((I596_TBD *) 0xffffffff) -#define I596_TBD_EOF 0x8000 -#define I596_TBD_COUNT_MASK 0x3fff - -/************************************************************************/ -/* */ -/* I596_TFD: Transmit Frame Descriptor (p. 4-56) */ -/* a.k.a. I596_CB_XMIT */ -/* */ -/************************************************************************/ -typedef volatile struct -{ - ushort status; - ushort cmd; - union _I596_CB *next; - I596_TBD *tbdp; - ulong count; /* for speed */ - - /* Application defined data follows structure... */ - -#if 0 /* We don't use these intel defined ones */ - uchar addr[6]; - ushort len; - uchar data[1]; -#else - ulong dstchan;/* Used by multi-NIC mode */ -#endif -} I596_TFD; - -#define I596_TFD_NOCRC 0x0010 /* cmd: No CRC insertion */ -#define I596_TFD_FLEX 0x0008 /* cmd: Flexible mode */ - -/************************************************************************/ -/* */ -/* I596_RBD: Receive Buffer Descriptor (p. 4-84) */ -/* */ -/************************************************************************/ -typedef volatile struct _I596_RBD -{ -#ifdef INTEL_RETENTIVE - ushort count; /* Length of data in buf */ - ushort offset; -#else - ulong count; /* Length of data in buf */ -#endif - vol struct _I596_RBD *next; /* Next buffer descriptor in list */ - uchar *buf; /* Data buffer */ -#ifdef INTEL_RETENTIVE - ushort size; /* Size of buf (constant) */ - ushort zero; -#else - ulong size; /* Size of buf (constant) */ -#endif - - /* Application defined data follows structure... */ - - uchar chan; - uchar refcnt; - ushort len; -} I596_RBD; - -#define I596_RBD_NOLINK ((I596_RBD *) 0xffffffff) -#define I596_RBD_EOF 0x8000 /* This is last buffer in a frame */ -#define I596_RBD_F 0x4000 /* The actual count is valid */ - -#define I596_RBD_EL 0x8000 /* Last buffer in list */ - -/************************************************************************/ -/* */ -/* I596_RFD: Receive Frame Descriptor (p. 4-79) */ -/* */ -/************************************************************************/ -typedef volatile struct _I596_RFD -{ - ushort status; - ushort cmd; - vol struct _I596_RFD *next; - vol struct _I596_RBD *rbdp; - ushort count; /* Len of data in RFD: always 0 */ - ushort size; /* Size of RFD buffer: always 0 */ - - /* Application defined data follows structure... */ - -# if 0 /* We don't use these intel defined ones */ - uchar addr[6]; - ushort len; - uchar data[1]; -# else - ulong dstchan;/* Used by multi-nic mode */ -# endif -} I596_RFD; - -#define I596_RFD_C 0x8000 /* status: frame complete */ -#define I596_RFD_B 0x4000 /* status: frame busy or waiting */ -#define I596_RFD_OK 0x2000 /* status: frame OK */ -#define I596_RFD_ERR_LENGTH 0x1000 /* status: length error */ -#define I596_RFD_ERR_CRC 0x0800 /* status: CRC error */ -#define I596_RFD_ERR_ALIGN 0x0400 /* status: alignment error */ -#define I596_RFD_ERR_NOBUFS 0x0200 /* status: resource error */ -#define I596_RFD_ERR_DMA 0x0100 /* status: DMA error */ -#define I596_RFD_ERR_SHORT 0x0080 /* status: too short error */ -#define I596_RFD_NOMATCH 0x0002 /* status: IA was not matched */ -#define I596_RFD_COLLISION 0x0001 /* status: collision during receive */ - -#define I596_RFD_EL 0x8000 /* cmd: end of RFD list */ -#define I596_RFD_FLEX 0x0008 /* cmd: Flexible mode */ -#define I596_RFD_EOF 0x8000 /* count: last buffer in the frame */ -#define I596_RFD_F 0x4000 /* count: The actual count is valid */ - -/************************************************************************/ -/* */ -/* Commands */ -/* */ -/************************************************************************/ - - /* values for cmd halfword in all the structs below */ -#define I596_CB_CMD 0x07 /* CB COMMANDS */ -#define I596_CB_CMD_NOP 0 -#define I596_CB_CMD_IA 1 -#define I596_CB_CMD_CONF 2 -#define I596_CB_CMD_MCAST 3 -#define I596_CB_CMD_XMIT 4 -#define I596_CB_CMD_TDR 5 -#define I596_CB_CMD_DUMP 6 -#define I596_CB_CMD_DIAG 7 - -#define I596_CB_CMD_EL 0x8000 /* CB is last in linked list */ -#define I596_CB_CMD_S 0x4000 /* Suspend after execution */ -#define I596_CB_CMD_I 0x2000 /* cause interrupt */ - - /* values for the status halfword in all the struct below */ -#define I596_CB_STATUS 0xF000 /* All four status bits */ -#define I596_CB_STATUS_C 0x8000 /* Command complete */ -#define I596_CB_STATUS_B 0x4000 /* Command busy executing */ -#define I596_CB_STATUS_C_OR_B 0xC000 /* Command complete or busy */ -#define I596_CB_STATUS_OK 0x2000 /* Command complete, no errors */ -#define I596_CB_STATUS_A 0x1000 /* Command busy executing */ - -#define I596_CB_NOLINK ((I596_CB *) 0xffffffff) - -/* - * I596_CB_NOP: NOP Command (p. 4-34) - */ -typedef volatile struct -{ - ushort status; - ushort cmd; - union _I596_CB *next; -} I596_CB_NOP; - -/* - * Same as above, but command and status in one ulong for speed - */ -typedef volatile struct -{ - ulong csr; - union _I596_CB *next; -} I596_CB_FAST; -#define FASTs(X) (X) -#define FASTc(X) ((X)<<16) - -/* - * I596_CB_IA: Individual (MAC) Address Command (p. 4-35) - */ -typedef volatile struct -{ - ushort status; - ushort cmd; - union _I596_CB *next; - uchar addr[6]; -} I596_CB_IA; - -/* - * I596_CB_CONF: Configure Command (p. 4-37) - */ -typedef volatile struct -{ - ushort status; - ushort cmd; - union _I596_CB *next; - uchar conf[14]; -} I596_CB_CONF; - -#define I596_CONF0_P 0x80 /* Enable RBD Prefetch Bit */ -#define I596_CONF0_COUNT 14 /* Count of configuration bytes */ - -#define I596_CONF1_MON_OFF 0xC0 /* Monitor mode: Monitor off */ -#define I596_CONF1_MON_ON 0x80 /* Monitor mode: Monitor on */ -#define I596_CONF1_TxFIFO(W) (W) /* TxFIFO trigger, in words */ - -#define I596_CONF2_SAVEBF 0x80 /* Save bad frames */ - -#define I596_CONF3_ADDRLEN(B) (B) /* Address length */ -#define I596_CONF3_NOSRCINSERT 0x08 /* Do not insert source address */ -#define I596_CONF3_PREAMBLE8 0x20 /* 8 byte preamble */ -#define I596_CONF3_LOOPOFF 0x00 /* Loopback: Off */ -#define I596_CONF3_LOOPINT 0x40 /* Loopback: internal */ -#define I596_CONF3_LOOPEXT 0xC0 /* Loopback: external */ - -#define I596_CONF4_LINPRI(ST) (ST) /* Linear priority: slot times */ -#define I596_CONF4_EXPPRI(ST) (ST) /* Exponential priority: slot times */ -#define I596_CONF4_IEEE_BOM 0 /* IEEE 802.3 backoff method */ - -#define I596_CONF5_IFS(X) (X) /* Interframe spacing in clocks */ - -#define I596_CONF6_ST_LOW(X) (X&255) /* Slot time, low byte */ - -#define I596_CONF7_ST_HI(X) (X>>8) /* Slot time, high bits */ -#define I596_CONF7_RETRY(X) (X<<4) /* Max retry number */ - -#define I596_CONF8_PROMISC 0x01 /* Rcv all frames */ -#define I596_CONF8_NOBROAD 0x02 -#define I596_CONF8_MANCHESTER 0x04 -#define I596_CONF8_TxNOCRS 0x08 -#define I596_CONF8_NOCRC 0x10 -#define I596_CONF8_CRC_CCITT 0x20 -#define I596_CONF8_BITSTUFFING 0x40 -#define I596_CONF8_PADDING 0x80 - -#define I596_CONF9_CSFILTER(X) (X) -#define I596_CONF9_CSINT(X) 0x08 -#define I596_CONF9_CDFILTER(X) (X<<4) -#define I596_CONF9_CDINT(X) 0x80 - -#define I596_CONF10_MINLEN(X) (X) /* Minimum frame length */ - -#define I596_CONF11_PRECRS_ 0x01 /* Preamble before carrier sense */ -#define I596_CONF11_LNGFLD_ 0x02 /* Padding in End of Carrier */ -#define I596_CONF11_CRCINM_ 0x04 /* CRC in memory */ -#define I596_CONF11_AUTOTX 0x08 /* Auto retransmit */ -#define I596_CONF11_CSBSAC_ 0x10 /* Collision detect by src addr cmp. */ -#define I596_CONF11_MCALL_ 0x20 /* Multicast all */ - -#define I596_CONF13_RESERVED 0x3f /* Reserved: must be ones */ -#define I596_CONF13_MULTIA 0x40 /* Enable multiple addr. reception */ -#define I596_CONF13_DISBOF 0x80 /* Disable backoff algorithm */ -/* - * I596_CB_MCAST: Multicast-Setup Command (p. 4-54) - */ -typedef volatile struct -{ - ushort status; - ushort cmd; - union _I596_CB *next; - ushort count; /* Number of 6-byte addrs that follow */ - uchar addr[6][1]; -} I596_CB_MCAST; - -/* - * I596_CB_XMIT: Transmit Command (p. 4-56) - */ -typedef I596_TFD I596_CB_XMIT; - -#define I596_CB_XMIT_NOCRC 0x0010 /* cmd: No CRC insertion */ -#define I596_CB_XMIT_FLEX 0x0008 /* cmd: Flexible memory mode */ - -#define I596_CB_XMIT_ERR_LATE 0x0800 /* status: error: late collision */ -#define I596_CB_XMIT_ERR_NOCRS 0x0400 /* status: error: no carriers sense */ -#define I596_CB_XMIT_ERR_NOCTS 0x0200 /* status: error: loss of CTS */ -#define I596_CB_XMIT_ERR_UNDER 0x0100 /* status: error: DMA underrun */ -#define I596_CB_XMIT_ERR_MAXCOL 0x0020 /* status: error: maximum collisions */ -#define I596_CB_XMIT_COLLISIONS 0x000f /* status: number of collisions */ - -/* - * I596_CB_TDR: Time Domain Reflectometry Command (p. 4-63) - */ -typedef volatile struct -{ - ushort status; - ushort cmd; - union _I596_CB *next; - ushort time; -} I596_CB_TDR; - -/* - * I596_CB_DUMP: Dump Command (p. 4-65) - */ -typedef volatile struct -{ - ushort status; - ushort cmd; - union _I596_CB *next; - uchar *buf; -} I596_CB_DUMP; - -/* - * I596_CB_DIAG: Diagnose Command (p. 4-77) - */ -typedef volatile struct -{ - ushort status; - ushort cmd; - union _I596_CB *next; -} I596_CB_DIAG; - -/* - * I596_CB: Command Block - */ -typedef union _I596_CB -{ - I596_CB_NOP nop; - I596_CB_IA ia; - I596_CB_CONF conf; - I596_CB_MCAST mcast; - I596_CB_XMIT xmit; - I596_CB_TDR tdr; - I596_CB_DUMP dump; - I596_CB_DIAG diag; - - /* command and status in one ulong for speed... */ - I596_CB_FAST fast; -} I596_CB; - -/************************************************************************/ -/* */ -/* I596_SCB: System Configuration Block (p. 4-26) */ -/* */ -/************************************************************************/ -typedef volatile struct -{ - volatile ushort status; /* Status word */ - volatile ushort cmd; /* Command word */ - I596_CB *cbp; - I596_RFD *rfdp; - ulong crc_errs; - ulong align_errs; - ulong resource_errs; - ulong overrun_errs; - ulong rcvcdt_errs; - ulong short_errs; - ushort toff; - ushort ton; -} I596_SCB; - - /* cmd halfword values */ -#define I596_SCB_ACK 0xF000 /* ACKNOWLEDGMENTS */ -#define I596_SCB_ACK_CX 0x8000 /* Ack command completion */ -#define I596_SCB_ACK_FR 0x4000 /* Ack received frame */ -#define I596_SCB_ACK_CNA 0x2000 /* Ack command unit not active */ -#define I596_SCB_ACK_RNR 0x1000 /* Ack rcv unit not ready */ -#define I596_SCB_ACK_ALL 0xF000 /* Ack everything */ - -#define I596_SCB_CUC 0x0700 /* COMMAND UNIT COMMANDS */ -#define I596_SCB_CUC_NOP 0x0000 /* No operation */ -#define I596_SCB_CUC_START 0x0100 /* Start execution of first CB */ -#define I596_SCB_CUC_RESUME 0x0200 /* Resume execution */ -#define I596_SCB_CUC_SUSPEND 0x0300 /* Suspend after current CB */ -#define I596_SCB_CUC_ABORT 0x0400 /* Abort current CB immediately */ -#define I596_SCB_CUC_LOAD 0x0500 /* Load Bus throttle timers */ -#define I596_SCB_CUC_LOADIMM 0x0600 /* Load Bus throttle timers, now */ - -#define I596_SCB_RUC 0x0070 /* RECEIVE UNIT COMMANDS */ -#define I596_SCB_RUC_NOP 0x0000 /* No operation */ -#define I596_SCB_RUC_START 0x0010 /* Start reception */ -#define I596_SCB_RUC_RESUME 0x0020 /* Resume reception */ -#define I596_SCB_RUC_SUSPEND 0x0030 /* Suspend reception */ -#define I596_SCB_RUC_ABORT 0x0040 /* Abort reception */ - -#define I596_SCB_RESET 0x0080 /* Hard reset chip */ - - /* status halfword values */ -#define I596_SCB_STAT 0xF000 /* STATUS */ -#define I596_SCB_CX 0x8000 /* command completion */ -#define I596_SCB_FR 0x4000 /* received frame */ -#define I596_SCB_CNA 0x2000 /* command unit not active */ -#define I596_SCB_RNR 0x1000 /* rcv unit not ready */ - -#define I596_SCB_CUS 0x0700 /* COMMAND UNIT STATUS */ -#define I596_SCB_CUS_IDLE 0x0000 /* Idle */ -#define I596_SCB_CUS_SUSPENDED 0x0100 /* Suspended */ -#define I596_SCB_CUS_ACTIVE 0x0200 /* Active */ - -#define I596_SCB_RUS 0x00F0 /* RECEIVE UNIT STATUS */ -#define I596_SCB_RUS_IDLE 0x0000 /* Idle */ -#define I596_SCB_RUS_SUSPENDED 0x0010 /* Suspended */ -#define I596_SCB_RUS_NORES 0x0020 /* No Resources */ -#define I596_SCB_RUS_READY 0x0040 /* Ready */ -#define I596_SCB_RUS_NORBDS 0x0080 /* No more RBDs modifier */ - -#define I596_SCB_LOADED 0x0008 /* Bus timers loaded */ - -/************************************************************************/ -/* */ -/* I596_ISCP: Intermediate System Configuration Ptr (p 4-26) */ -/* */ -/************************************************************************/ -typedef volatile struct -{ - ulong busy; /* Set to 1; I596 clears it when scbp is read */ - I596_SCB *scbp; -} I596_ISCP; - -/************************************************************************/ -/* */ -/* I596_SCP: System Configuration Pointer (p. 4-23) */ -/* */ -/************************************************************************/ -typedef volatile struct -{ - ulong sysbus; - ulong dummy; - I596_ISCP *iscpp; -} I596_SCP; - - /* .sysbus values */ -#define I596_SCP_RESERVED 0x400000 /* Reserved bits must be set */ -#define I596_SCP_INTLOW 0x200000 /* Intr. Polarity active low */ -#define I596_SCP_INTHIGH 0 /* Intr. Polarity active high */ -#define I596_SCP_LOCKDIS 0x100000 /* Lock Function disabled */ -#define I596_SCP_LOCKEN 0 /* Lock Function enabled */ -#define I596_SCP_ETHROTTLE 0x080000 /* External Bus Throttle */ -#define I596_SCP_ITHROTTLE 0 /* Internal Bus Throttle */ -#define I596_SCP_LINEAR 0x040000 /* Linear Mode */ -#define I596_SCP_SEGMENTED 0x020000 /* Segmented Mode */ -#define I596_SCP_82586 0x000000 /* 82586 Mode */ diff --git a/drivers/net/dgrs_plx9060.h b/drivers/net/dgrs_plx9060.h deleted file mode 100644 index 6888ae0d0ce0..000000000000 --- a/drivers/net/dgrs_plx9060.h +++ /dev/null @@ -1,175 +0,0 @@ -/* - * PLX 9060 PCI Interface chip - */ - -/* - * PCI configuration registers, same offset on local and PCI sides, - * but on PCI side, must use PCI BIOS calls to read/write. - */ -#define PCI_PLXREGS_BASE_ADDR 0x10 - -#define PCI_PLXREGS_IO_ADDR 0x14 - -#define PCI_SPACE0_BASE_ADDR 0x18 - -#define PCI_ROM_BASE_ADDR 0x30 -# define PCI_ROM_ENABLED 0x00000001 - -#define PCI_INT_LINE 0x3C - -/* - * Registers accessible directly from PCI and local side. - * Offset is from PCI side. Add PLX_LCL_OFFSET for local address. - */ -#define PLX_LCL_OFFSET 0x80 /* Offset of regs from local side */ - -/* - * Local Configuration Registers - */ -#define PLX_SPACE0_RANGE 0x00 /* Range for PCI to Lcl Addr Space 0 */ -#define PLX_SPACE0_BASE_ADDR 0x04 /* Lcl Base address remap */ - -#define PLX_ROM_RANGE 0x10 /* Range for expansion ROM (DMA) */ -#define PLX_ROM_BASE_ADDR 0x14 /* Lcl base address remap for ROM */ - -#define PLX_BUS_REGION 0x18 /* Bus Region Descriptors */ - -/* - * Shared Run Time Registers - */ -#define PLX_MBOX0 0x40 -#define PLX_MBOX1 0x44 -#define PLX_MBOX2 0x48 -#define PLX_MBOX3 0x4C -#define PLX_MBOX4 0x50 -#define PLX_MBOX5 0x54 -#define PLX_MBOX6 0x58 -#define PLX_MBOX7 0x5C - -#define PLX_PCI2LCL_DOORBELL 0x60 - -#define PLX_LCL2PCI_DOORBELL 0x64 - -#define PLX_INT_CSR 0x68 /* Interrupt Control/Status */ -# define PLX_LSERR_ENABLE 0x00000001 -# define PLX_LSERR_PE 0x00000002 -# define PLX_SERR 0x00000004 -# undef PLX_UNUSED /* 0x00000008 */ -# undef PLX_UNUSED /* 0x00000010 */ -# undef PLX_UNUSED /* 0x00000020 */ -# undef PLX_UNUSED /* 0x00000040 */ -# undef PLX_UNUSED /* 0x00000080 */ -# define PLX_PCI_IE 0x00000100 -# define PLX_PCI_DOORBELL_IE 0x00000200 -# define PLX_PCI_ABORT_IE 0x00000400 -# define PLX_PCI_LOCAL_IE 0x00000800 -# define PLX_RETRY_ABORT_ENABLE 0x00001000 -# define PLX_PCI_DOORBELL_INT 0x00002000 -# define PLX_PCI_ABORT_INT 0x00004000 -# define PLX_PCI_LOCAL_INT 0x00008000 -# define PLX_LCL_IE 0x00010000 -# define PLX_LCL_DOORBELL_IE 0x00020000 -# define PLX_LCL_DMA0_IE 0x00040000 -# define PLX_LCL_DMA1_IE 0x00080000 -# define PLX_LCL_DOORBELL_INT 0x00100000 -# define PLX_LCL_DMA0_INT 0x00200000 -# define PLX_LCL_DMA1_INT 0x00400000 -# define PLX_LCL_BIST_INT 0x00800000 -# define PLX_BM_DIRECT_ 0x01000000 -# define PLX_BM_DMA0_ 0x02000000 -# define PLX_BM_DMA1_ 0x04000000 -# define PLX_BM_ABORT_ 0x08000000 -# undef PLX_UNUSED /* 0x10000000 */ -# undef PLX_UNUSED /* 0x20000000 */ -# undef PLX_UNUSED /* 0x40000000 */ -# undef PLX_UNUSED /* 0x80000000 */ - -#define PLX_MISC_CSR 0x6c /* EEPROM,PCI,User,Init Control/Status*/ -# define PLX_USEROUT 0x00010000 -# define PLX_USERIN 0x00020000 -# define PLX_EECK 0x01000000 -# define PLX_EECS 0x02000000 -# define PLX_EEWD 0x04000000 -# define PLX_EERD 0x08000000 - -/* - * DMA registers. Offset is from local side - */ -#define PLX_DMA0_MODE 0x100 -# define PLX_DMA_MODE_WIDTH32 0x00000003 -# define PLX_DMA_MODE_WAITSTATES(X) ((X)<<2) -# define PLX_DMA_MODE_NOREADY 0x00000000 -# define PLX_DMA_MODE_READY 0x00000040 -# define PLX_DMA_MODE_NOBTERM 0x00000000 -# define PLX_DMA_MODE_BTERM 0x00000080 -# define PLX_DMA_MODE_NOBURST 0x00000000 -# define PLX_DMA_MODE_BURST 0x00000100 -# define PLX_DMA_MODE_NOCHAIN 0x00000000 -# define PLX_DMA_MODE_CHAIN 0x00000200 -# define PLX_DMA_MODE_DONE_IE 0x00000400 -# define PLX_DMA_MODE_ADDR_HOLD 0x00000800 - -#define PLX_DMA0_PCI_ADDR 0x104 - /* non-chaining mode PCI address */ - -#define PLX_DMA0_LCL_ADDR 0x108 - /* non-chaining mode local address */ - -#define PLX_DMA0_SIZE 0x10C - /* non-chaining mode length */ - -#define PLX_DMA0_DESCRIPTOR 0x110 -# define PLX_DMA_DESC_EOC 0x00000002 -# define PLX_DMA_DESC_TC_IE 0x00000004 -# define PLX_DMA_DESC_TO_HOST 0x00000008 -# define PLX_DMA_DESC_TO_BOARD 0x00000000 -# define PLX_DMA_DESC_NEXTADDR 0xFFFFfff0 - -#define PLX_DMA1_MODE 0x114 -#define PLX_DMA1_PCI_ADDR 0x118 -#define PLX_DMA1_LCL_ADDR 0x11C -#define PLX_DMA1_SIZE 0x110 -#define PLX_DMA1_DESCRIPTOR 0x124 - -#define PLX_DMA_CSR 0x128 -# define PLX_DMA_CSR_0_ENABLE 0x00000001 -# define PLX_DMA_CSR_0_START 0x00000002 -# define PLX_DMA_CSR_0_ABORT 0x00000004 -# define PLX_DMA_CSR_0_CLR_INTR 0x00000008 -# define PLX_DMA_CSR_0_DONE 0x00000010 -# define PLX_DMA_CSR_1_ENABLE 0x00000100 -# define PLX_DMA_CSR_1_START 0x00000200 -# define PLX_DMA_CSR_1_ABORT 0x00000400 -# define PLX_DMA_CSR_1_CLR_INTR 0x00000800 -# define PLX_DMA_CSR_1_DONE 0x00001000 - -#define PLX_DMA_ARB0 0x12C -# define PLX_DMA_ARB0_LATENCY_T 0x000000FF -# define PLX_DMA_ARB0_PAUSE_T 0x0000FF00 -# define PLX_DMA_ARB0_LATENCY_EN 0x00010000 -# define PLX_DMA_ARB0_PAUSE_EN 0x00020000 -# define PLX_DMA_ARB0_BREQ_EN 0x00040000 -# define PLX_DMA_ARB0_PRI 0x00180000 -# define PLX_DMA_ARB0_PRI_ROUND 0x00000000 -# define PLX_DMA_ARB0_PRI_0 0x00080000 -# define PLX_DMA_ARB0_PRI_1 0x00100000 - -#define PLX_DMA_ARB1 0x130 - /* Chan 0: FIFO DEPTH=16 */ -# define PLX_DMA_ARB1_0_P2L_LW_TRIG(X) ( ((X)&15) << 0 ) -# define PLX_DMA_ARB1_0_L2P_LR_TRIG(X) ( ((X)&15) << 4 ) -# define PLX_DMA_ARB1_0_L2P_PW_TRIG(X) ( ((X)&15) << 8 ) -# define PLX_DMA_ARB1_0_P2L_PR_TRIG(X) ( ((X)&15) << 12 ) - /* Chan 1: FIFO DEPTH=8 */ -# define PLX_DMA_ARB1_1_P2L_LW_TRIG(X) ( ((X)& 7) << 16 ) -# define PLX_DMA_ARB1_1_L2P_LR_TRIG(X) ( ((X)& 7) << 20 ) -# define PLX_DMA_ARB1_1_L2P_PW_TRIG(X) ( ((X)& 7) << 24 ) -# define PLX_DMA_ARB1_1_P2L_PR_TRIG(X) ( ((X)& 7) << 28 ) - -typedef struct _dmachain -{ - ulong pciaddr; - ulong lcladdr; - ulong len; - ulong next; -} DMACHAIN; diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c index 04e3710c9082..5066beb2e7bc 100644 --- a/drivers/net/dl2k.c +++ b/drivers/net/dl2k.c @@ -10,9 +10,9 @@ (at your option) any later version. */ -#define DRV_NAME "D-Link DL2000-based linux driver" -#define DRV_VERSION "v1.18" -#define DRV_RELDATE "2006/06/27" +#define DRV_NAME "DL2000/TC902x-based linux driver" +#define DRV_VERSION "v1.19" +#define DRV_RELDATE "2007/08/12" #include "dl2k.h" #include <linux/dma-mapping.h> @@ -97,6 +97,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) static int version_printed; void *ring_space; dma_addr_t ring_dma; + DECLARE_MAC_BUF(mac); if (!version_printed++) printk ("%s", version); @@ -116,7 +117,6 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) err = -ENOMEM; goto err_out_res; } - SET_MODULE_OWNER (dev); SET_NETDEV_DEV(dev, &pdev->dev); #ifdef MEM_MAPPING @@ -257,10 +257,8 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) card_idx++; - printk (KERN_INFO "%s: %s, %02x:%02x:%02x:%02x:%02x:%02x, IRQ %d\n", - dev->name, np->name, - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5], irq); + printk (KERN_INFO "%s: %s, %s, IRQ %d\n", + dev->name, np->name, print_mac(mac, dev->dev_addr), irq); if (tx_coalesce > 1) printk(KERN_INFO "tx_coalesce:\t%d packets\n", tx_coalesce); @@ -292,7 +290,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) return err; } -int +static int find_miiphy (struct net_device *dev) { int i, phy_found = 0; @@ -316,7 +314,7 @@ find_miiphy (struct net_device *dev) return 0; } -int +static int parse_eeprom (struct net_device *dev) { int i, j; @@ -339,17 +337,24 @@ parse_eeprom (struct net_device *dev) #ifdef MEM_MAPPING ioaddr = dev->base_addr; #endif - /* Check CRC */ - crc = ~ether_crc_le (256 - 4, sromdata); - if (psrom->crc != crc) { - printk (KERN_ERR "%s: EEPROM data CRC error.\n", dev->name); - return -1; + if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */ + /* Check CRC */ + crc = ~ether_crc_le (256 - 4, sromdata); + if (psrom->crc != crc) { + printk (KERN_ERR "%s: EEPROM data CRC error.\n", + dev->name); + return -1; + } } /* Set MAC address */ for (i = 0; i < 6; i++) dev->dev_addr[i] = psrom->mac_addr[i]; + if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) { + return 0; + } + /* Parse Software Information Block */ i = 0x30; psib = (u8 *) sromdata; @@ -1091,7 +1096,7 @@ clear_stats (struct net_device *dev) } -int +static int change_mtu (struct net_device *dev, int new_mtu) { struct netdev_private *np = netdev_priv(dev); @@ -1326,7 +1331,7 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) #define EEP_BUSY 0x8000 /* Read the EEPROM word */ /* We use I/O instruction to read/write eeprom to avoid fail on some machines */ -int +static int read_eeprom (long ioaddr, int eep_addr) { int i = 1000; diff --git a/drivers/net/dl2k.h b/drivers/net/dl2k.h index e443065a452e..5b801775f42d 100644 --- a/drivers/net/dl2k.h +++ b/drivers/net/dl2k.h @@ -692,6 +692,7 @@ struct netdev_private { static const struct pci_device_id rio_pci_tbl[] = { {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, }, + {0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, }, { } }; MODULE_DEVICE_TABLE (pci, rio_pci_tbl); diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index c3de81bf090a..27ac010900ab 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c @@ -148,7 +148,6 @@ typedef struct board_info { struct resource *irq_res; struct timer_list timer; - struct net_device_stats stats; unsigned char srom[128]; spinlock_t lock; @@ -166,8 +165,6 @@ static int dm9000_stop(struct net_device *); static void dm9000_timer(unsigned long); static void dm9000_init_dm9000(struct net_device *); -static struct net_device_stats *dm9000_get_stats(struct net_device *); - static irqreturn_t dm9000_interrupt(int, void *); static int dm9000_phy_read(struct net_device *dev, int phyaddr_unsused, int reg); @@ -416,7 +413,6 @@ dm9000_probe(struct platform_device *pdev) return -ENOMEM; } - SET_MODULE_OWNER(ndev); SET_NETDEV_DEV(ndev, &pdev->dev); PRINTK2("dm9000_probe()"); @@ -559,7 +555,6 @@ dm9000_probe(struct platform_device *pdev) ndev->tx_timeout = &dm9000_timeout; ndev->watchdog_timeo = msecs_to_jiffies(watchdog); ndev->stop = &dm9000_stop; - ndev->get_stats = &dm9000_get_stats; ndev->set_multicast_list = &dm9000_hash_table; #ifdef CONFIG_NET_POLL_CONTROLLER ndev->poll_controller = &dm9000_poll_controller; @@ -600,11 +595,10 @@ dm9000_probe(struct platform_device *pdev) ret = register_netdev(ndev); if (ret == 0) { - printk("%s: dm9000 at %p,%p IRQ %d MAC: ", - ndev->name, db->io_addr, db->io_data, ndev->irq); - for (i = 0; i < 5; i++) - printk("%02x:", ndev->dev_addr[i]); - printk("%02x\n", ndev->dev_addr[5]); + DECLARE_MAC_BUF(mac); + printk("%s: dm9000 at %p,%p IRQ %d MAC: %s\n", + ndev->name, db->io_addr, db->io_data, ndev->irq, + print_mac(mac, ndev->dev_addr)); } return 0; @@ -700,6 +694,7 @@ dm9000_init_dm9000(struct net_device *dev) static int dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev) { + unsigned long flags; board_info_t *db = (board_info_t *) dev->priv; PRINTK3("dm9000_start_xmit\n"); @@ -707,23 +702,17 @@ dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev) if (db->tx_pkt_cnt > 1) return 1; - netif_stop_queue(dev); - - /* Disable all interrupts */ - iow(db, DM9000_IMR, IMR_PAR); + spin_lock_irqsave(&db->lock, flags); /* Move data to DM9000 TX RAM */ writeb(DM9000_MWCMD, db->io_addr); (db->outblk)(db->io_data, skb->data, skb->len); - db->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += skb->len; + db->tx_pkt_cnt++; /* TX control: First packet immediately send, second packet queue */ - if (db->tx_pkt_cnt == 0) { - - /* First Packet */ - db->tx_pkt_cnt++; - + if (db->tx_pkt_cnt == 1) { /* Set TX length to DM9000 */ iow(db, DM9000_TXPLL, skb->len & 0xff); iow(db, DM9000_TXPLH, (skb->len >> 8) & 0xff); @@ -732,23 +721,17 @@ dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev) iow(db, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */ dev->trans_start = jiffies; /* save the time stamp */ - } else { /* Second packet */ - db->tx_pkt_cnt++; db->queue_pkt_len = skb->len; + netif_stop_queue(dev); } + spin_unlock_irqrestore(&db->lock, flags); + /* free this SKB */ dev_kfree_skb(skb); - /* Re-enable resource check */ - if (db->tx_pkt_cnt == 1) - netif_wake_queue(dev); - - /* Re-enable interrupt */ - iow(db, DM9000_IMR, IMR_PAR | IMR_PTM | IMR_PRM); - return 0; } @@ -802,7 +785,7 @@ dm9000_tx_done(struct net_device *dev, board_info_t * db) if (tx_status & (NSR_TX2END | NSR_TX1END)) { /* One packet sent complete */ db->tx_pkt_cnt--; - db->stats.tx_packets++; + dev->stats.tx_packets++; /* Queue packet check & send */ if (db->tx_pkt_cnt > 0) { @@ -864,17 +847,6 @@ dm9000_interrupt(int irq, void *dev_id) } /* - * Get statistics from driver. - */ -static struct net_device_stats * -dm9000_get_stats(struct net_device *dev) -{ - board_info_t *db = (board_info_t *) dev->priv; - return &db->stats; -} - - -/* * A periodic timer routine * Dynamic media sense, allocated Rx buffer... */ @@ -951,15 +923,15 @@ dm9000_rx(struct net_device *dev) GoodPacket = false; if (rxhdr.RxStatus & 0x100) { PRINTK1("fifo error\n"); - db->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; } if (rxhdr.RxStatus & 0x200) { PRINTK1("crc error\n"); - db->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; } if (rxhdr.RxStatus & 0x8000) { PRINTK1("length error\n"); - db->stats.rx_length_errors++; + dev->stats.rx_length_errors++; } } @@ -972,12 +944,12 @@ dm9000_rx(struct net_device *dev) /* Read received packet from RX SRAM */ (db->inblk)(db->io_data, rdptr, RxLen); - db->stats.rx_bytes += RxLen; + dev->stats.rx_bytes += RxLen; /* Pass to upper layer */ skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); - db->stats.rx_packets++; + dev->stats.rx_packets++; } else { /* need to dump the packet's data */ diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 756a6bcb038d..84e14f397d9a 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -71,7 +71,6 @@ static void dummy_setup(struct net_device *dev) dev->change_mtu = NULL; dev->flags |= IFF_NOARP; dev->flags &= ~IFF_MULTICAST; - SET_MODULE_OWNER(dev); random_ether_addr(dev->dev_addr); } diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 6b6401e9304e..64f35e20fd48 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c @@ -539,6 +539,7 @@ struct nic { struct csr __iomem *csr; enum scb_cmd_lo cuc_cmd; unsigned int cbs_avail; + struct napi_struct napi; struct cb *cbs; struct cb *cb_to_use; struct cb *cb_to_send; @@ -557,7 +558,6 @@ struct nic { enum mac mac; enum phy phy; struct params params; - struct net_device_stats net_stats; struct timer_list watchdog; struct timer_list blink_timer; struct mii_if_info mii; @@ -1482,7 +1482,8 @@ static void e100_set_multicast_list(struct net_device *netdev) static void e100_update_stats(struct nic *nic) { - struct net_device_stats *ns = &nic->net_stats; + struct net_device *dev = nic->netdev; + struct net_device_stats *ns = &dev->stats; struct stats *s = &nic->mem->stats; u32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause : (nic->mac < mac_82559_D101M) ? (u32 *)&s->xmt_tco_frames : @@ -1604,7 +1605,8 @@ static void e100_watchdog(unsigned long data) else nic->flags &= ~ich_10h_workaround; - mod_timer(&nic->watchdog, jiffies + E100_WATCHDOG_PERIOD); + mod_timer(&nic->watchdog, + round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); } static void e100_xmit_prepare(struct nic *nic, struct cb *cb, @@ -1659,6 +1661,7 @@ static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev) static int e100_tx_clean(struct nic *nic) { + struct net_device *dev = nic->netdev; struct cb *cb; int tx_cleaned = 0; @@ -1673,8 +1676,8 @@ static int e100_tx_clean(struct nic *nic) cb->status); if(likely(cb->skb != NULL)) { - nic->net_stats.tx_packets++; - nic->net_stats.tx_bytes += cb->skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += cb->skb->len; pci_unmap_single(nic->pdev, le32_to_cpu(cb->u.tcb.tbd.buf_addr), @@ -1805,6 +1808,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) static int e100_rx_indicate(struct nic *nic, struct rx *rx, unsigned int *work_done, unsigned int work_to_do) { + struct net_device *dev = nic->netdev; struct sk_buff *skb = rx->skb; struct rfd *rfd = (struct rfd *)skb->data; u16 rfd_status, actual_size; @@ -1849,8 +1853,8 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx, nic->rx_over_length_errors++; dev_kfree_skb_any(skb); } else { - nic->net_stats.rx_packets++; - nic->net_stats.rx_bytes += actual_size; + dev->stats.rx_packets++; + dev->stats.rx_bytes += actual_size; nic->netdev->last_rx = jiffies; netif_receive_skb(skb); if(work_done) @@ -1974,35 +1978,31 @@ static irqreturn_t e100_intr(int irq, void *dev_id) if(stat_ack & stat_ack_rnr) nic->ru_running = RU_SUSPENDED; - if(likely(netif_rx_schedule_prep(netdev))) { + if(likely(netif_rx_schedule_prep(netdev, &nic->napi))) { e100_disable_irq(nic); - __netif_rx_schedule(netdev); + __netif_rx_schedule(netdev, &nic->napi); } return IRQ_HANDLED; } -static int e100_poll(struct net_device *netdev, int *budget) +static int e100_poll(struct napi_struct *napi, int budget) { - struct nic *nic = netdev_priv(netdev); - unsigned int work_to_do = min(netdev->quota, *budget); + struct nic *nic = container_of(napi, struct nic, napi); + struct net_device *netdev = nic->netdev; unsigned int work_done = 0; int tx_cleaned; - e100_rx_clean(nic, &work_done, work_to_do); + e100_rx_clean(nic, &work_done, budget); tx_cleaned = e100_tx_clean(nic); /* If no Rx and Tx cleanup work was done, exit polling mode. */ if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) { - netif_rx_complete(netdev); + netif_rx_complete(netdev, napi); e100_enable_irq(nic); - return 0; } - *budget -= work_done; - netdev->quota -= work_done; - - return 1; + return work_done; } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -2017,12 +2017,6 @@ static void e100_netpoll(struct net_device *netdev) } #endif -static struct net_device_stats *e100_get_stats(struct net_device *netdev) -{ - struct nic *nic = netdev_priv(netdev); - return &nic->net_stats; -} - static int e100_set_mac_address(struct net_device *netdev, void *p) { struct nic *nic = netdev_priv(netdev); @@ -2071,7 +2065,7 @@ static int e100_up(struct nic *nic) nic->netdev->name, nic->netdev))) goto err_no_irq; netif_wake_queue(nic->netdev); - netif_poll_enable(nic->netdev); + napi_enable(&nic->napi); /* enable ints _after_ enabling poll, preventing a race between * disable ints+schedule */ e100_enable_irq(nic); @@ -2089,7 +2083,7 @@ err_rx_clean_list: static void e100_down(struct nic *nic) { /* wait here for poll to complete */ - netif_poll_disable(nic->netdev); + napi_disable(&nic->napi); netif_stop_queue(nic->netdev); e100_hw_reset(nic); free_irq(nic->pdev->irq, nic->netdev); @@ -2380,11 +2374,6 @@ static const char e100_gstrings_test[][ETH_GSTRING_LEN] = { }; #define E100_TEST_LEN sizeof(e100_gstrings_test) / ETH_GSTRING_LEN -static int e100_diag_test_count(struct net_device *netdev) -{ - return E100_TEST_LEN; -} - static void e100_diag_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) { @@ -2447,9 +2436,16 @@ static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = { #define E100_NET_STATS_LEN 21 #define E100_STATS_LEN sizeof(e100_gstrings_stats) / ETH_GSTRING_LEN -static int e100_get_stats_count(struct net_device *netdev) +static int e100_get_sset_count(struct net_device *netdev, int sset) { - return E100_STATS_LEN; + switch (sset) { + case ETH_SS_TEST: + return E100_TEST_LEN; + case ETH_SS_STATS: + return E100_STATS_LEN; + default: + return -EOPNOTSUPP; + } } static void e100_get_ethtool_stats(struct net_device *netdev, @@ -2459,7 +2455,7 @@ static void e100_get_ethtool_stats(struct net_device *netdev, int i; for(i = 0; i < E100_NET_STATS_LEN; i++) - data[i] = ((unsigned long *)&nic->net_stats)[i]; + data[i] = ((unsigned long *)&netdev->stats)[i]; data[i++] = nic->tx_deferred; data[i++] = nic->tx_single_collisions; @@ -2500,13 +2496,11 @@ static const struct ethtool_ops e100_ethtool_ops = { .set_eeprom = e100_set_eeprom, .get_ringparam = e100_get_ringparam, .set_ringparam = e100_set_ringparam, - .self_test_count = e100_diag_test_count, .self_test = e100_diag_test, .get_strings = e100_get_strings, .phys_id = e100_phys_id, - .get_stats_count = e100_get_stats_count, .get_ethtool_stats = e100_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, + .get_sset_count = e100_get_sset_count, }; static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) @@ -2555,6 +2549,7 @@ static int __devinit e100_probe(struct pci_dev *pdev, struct net_device *netdev; struct nic *nic; int err; + DECLARE_MAC_BUF(mac); if(!(netdev = alloc_etherdev(sizeof(struct nic)))) { if(((1 << debug) - 1) & NETIF_MSG_PROBE) @@ -2565,7 +2560,6 @@ static int __devinit e100_probe(struct pci_dev *pdev, netdev->open = e100_open; netdev->stop = e100_close; netdev->hard_start_xmit = e100_xmit_frame; - netdev->get_stats = e100_get_stats; netdev->set_multicast_list = e100_set_multicast_list; netdev->set_mac_address = e100_set_mac_address; netdev->change_mtu = e100_change_mtu; @@ -2573,14 +2567,13 @@ static int __devinit e100_probe(struct pci_dev *pdev, SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops); netdev->tx_timeout = e100_tx_timeout; netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; - netdev->poll = e100_poll; - netdev->weight = E100_NAPI_WEIGHT; #ifdef CONFIG_NET_POLL_CONTROLLER netdev->poll_controller = e100_netpoll; #endif strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); nic = netdev_priv(netdev); + netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT); nic->netdev = netdev; nic->pdev = pdev; nic->msg_enable = (1 << debug) - 1; @@ -2608,7 +2601,6 @@ static int __devinit e100_probe(struct pci_dev *pdev, goto err_out_free_res; } - SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &pdev->dev); if (use_io) @@ -2689,11 +2681,9 @@ static int __devinit e100_probe(struct pci_dev *pdev, goto err_out_free; } - DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, " - "MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n", - (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), pdev->irq, - netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], - netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); + DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %s\n", + (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), + pdev->irq, print_mac(mac, netdev->dev_addr)); return 0; @@ -2734,7 +2724,7 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state) struct nic *nic = netdev_priv(netdev); if (netif_running(netdev)) - netif_poll_disable(nic->netdev); + napi_disable(&nic->napi); del_timer_sync(&nic->watchdog); netif_carrier_off(nic->netdev); netif_device_detach(netdev); @@ -2780,7 +2770,7 @@ static void e100_shutdown(struct pci_dev *pdev) struct nic *nic = netdev_priv(netdev); if (netif_running(netdev)) - netif_poll_disable(nic->netdev); + napi_disable(&nic->napi); del_timer_sync(&nic->watchdog); netif_carrier_off(nic->netdev); @@ -2805,12 +2795,13 @@ static void e100_shutdown(struct pci_dev *pdev) static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); /* Similar to calling e100_down(), but avoids adpater I/O. */ netdev->stop(netdev); /* Detach; put netif into state similar to hotplug unplug. */ - netif_poll_enable(netdev); + napi_enable(&nic->napi); netif_device_detach(netdev); pci_disable_device(pdev); diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index 16a6edfeba41..781ed9968489 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h @@ -300,6 +300,7 @@ struct e1000_adapter { int cleaned_count); struct e1000_rx_ring *rx_ring; /* One per active queue */ #ifdef CONFIG_E1000_NAPI + struct napi_struct napi; struct net_device *polling_netdev; /* One per active queue */ #endif int num_tx_queues; diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c index bb08375b5f13..6c9a643426f5 100644 --- a/drivers/net/e1000/e1000_ethtool.c +++ b/drivers/net/e1000/e1000_ethtool.c @@ -106,8 +106,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = { }; #define E1000_QUEUE_STATS_LEN 0 -#define E1000_GLOBAL_STATS_LEN \ - sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats) +#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) #define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN) static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { "Register test (offline)", "Eeprom test (offline)", @@ -619,8 +618,6 @@ e1000_get_drvinfo(struct net_device *netdev, strncpy(drvinfo->fw_version, firmware_version, 32); strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); - drvinfo->n_stats = E1000_STATS_LEN; - drvinfo->testinfo_len = E1000_TEST_LEN; drvinfo->regdump_len = e1000_get_regs_len(netdev); drvinfo->eedump_len = e1000_get_eeprom_len(netdev); } @@ -1612,9 +1609,16 @@ e1000_link_test(struct e1000_adapter *adapter, uint64_t *data) } static int -e1000_diag_test_count(struct net_device *netdev) +e1000_get_sset_count(struct net_device *netdev, int sset) { - return E1000_TEST_LEN; + switch (sset) { + case ETH_SS_TEST: + return E1000_TEST_LEN; + case ETH_SS_STATS: + return E1000_STATS_LEN; + default: + return -EOPNOTSUPP; + } } extern void e1000_power_up_phy(struct e1000_adapter *); @@ -1706,6 +1710,7 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter, struct ethtool_wol case E1000_DEV_ID_82545EM_COPPER: case E1000_DEV_ID_82546GB_QUAD_COPPER: case E1000_DEV_ID_82546GB_PCIE: + case E1000_DEV_ID_82571EB_SERDES_QUAD: /* these don't support WoL at all */ wol->supported = 0; break; @@ -1723,7 +1728,9 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter, struct ethtool_wol retval = 0; break; case E1000_DEV_ID_82571EB_QUAD_COPPER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: + case E1000_DEV_ID_82571PT_QUAD_COPPER: case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: /* quad port adapters only support WoL on port A */ if (!adapter->quad_port_a) { @@ -1896,12 +1903,6 @@ e1000_nway_reset(struct net_device *netdev) return 0; } -static int -e1000_get_stats_count(struct net_device *netdev) -{ - return E1000_STATS_LEN; -} - static void e1000_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, uint64_t *data) @@ -1963,17 +1964,13 @@ static const struct ethtool_ops e1000_ethtool_ops = { .set_rx_csum = e1000_set_rx_csum, .get_tx_csum = e1000_get_tx_csum, .set_tx_csum = e1000_set_tx_csum, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, - .get_tso = ethtool_op_get_tso, .set_tso = e1000_set_tso, - .self_test_count = e1000_diag_test_count, .self_test = e1000_diag_test, .get_strings = e1000_get_strings, .phys_id = e1000_phys_id, - .get_stats_count = e1000_get_stats_count, .get_ethtool_stats = e1000_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, + .get_sset_count = e1000_get_sset_count, }; void e1000_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index 9be44699300b..8fa0fe4009d5 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c @@ -384,7 +384,11 @@ e1000_set_mac_type(struct e1000_hw *hw) case E1000_DEV_ID_82571EB_COPPER: case E1000_DEV_ID_82571EB_FIBER: case E1000_DEV_ID_82571EB_SERDES: + case E1000_DEV_ID_82571EB_SERDES_DUAL: + case E1000_DEV_ID_82571EB_SERDES_QUAD: case E1000_DEV_ID_82571EB_QUAD_COPPER: + case E1000_DEV_ID_82571PT_QUAD_COPPER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: hw->mac_type = e1000_82571; break; @@ -485,6 +489,8 @@ e1000_set_media_type(struct e1000_hw *hw) case E1000_DEV_ID_82545GM_SERDES: case E1000_DEV_ID_82546GB_SERDES: case E1000_DEV_ID_82571EB_SERDES: + case E1000_DEV_ID_82571EB_SERDES_DUAL: + case E1000_DEV_ID_82571EB_SERDES_QUAD: case E1000_DEV_ID_82572EI_SERDES: case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: hw->media_type = e1000_media_type_internal_serdes; @@ -865,10 +871,6 @@ e1000_init_hw(struct e1000_hw *hw) uint32_t ctrl; uint32_t i; int32_t ret_val; - uint16_t pcix_cmd_word; - uint16_t pcix_stat_hi_word; - uint16_t cmd_mmrbc; - uint16_t stat_mmrbc; uint32_t mta_size; uint32_t reg_data; uint32_t ctrl_ext; @@ -958,24 +960,9 @@ e1000_init_hw(struct e1000_hw *hw) break; default: /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */ - if (hw->bus_type == e1000_bus_type_pcix) { - e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd_word); - e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI, - &pcix_stat_hi_word); - cmd_mmrbc = (pcix_cmd_word & PCIX_COMMAND_MMRBC_MASK) >> - PCIX_COMMAND_MMRBC_SHIFT; - stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >> - PCIX_STATUS_HI_MMRBC_SHIFT; - if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K) - stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K; - if (cmd_mmrbc > stat_mmrbc) { - pcix_cmd_word &= ~PCIX_COMMAND_MMRBC_MASK; - pcix_cmd_word |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT; - e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER, - &pcix_cmd_word); - } - } - break; + if (hw->bus_type == e1000_bus_type_pcix && e1000_pcix_get_mmrbc(hw) > 2048) + e1000_pcix_set_mmrbc(hw, 2048); + break; } /* More time needed for PHY to initialize */ diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h index bd000b802ee7..a2a86c54a75c 100644 --- a/drivers/net/e1000/e1000_hw.h +++ b/drivers/net/e1000/e1000_hw.h @@ -424,6 +424,8 @@ void e1000_pci_clear_mwi(struct e1000_hw *hw); void e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); int32_t e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value); +void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc); +int e1000_pcix_get_mmrbc(struct e1000_hw *hw); /* Port I/O is only supported on 82544 and newer */ void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value); int32_t e1000_disable_pciex_master(struct e1000_hw *hw); @@ -475,7 +477,11 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); #define E1000_DEV_ID_82571EB_FIBER 0x105F #define E1000_DEV_ID_82571EB_SERDES 0x1060 #define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4 +#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5 +#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5 #define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE 0x10BC +#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9 +#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA #define E1000_DEV_ID_82572EI_COPPER 0x107D #define E1000_DEV_ID_82572EI_FIBER 0x107E #define E1000_DEV_ID_82572EI_SERDES 0x107F diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index f48b659e0c2b..047263830e6a 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -100,6 +100,7 @@ static struct pci_device_id e1000_pci_tbl[] = { INTEL_E1000_ETHERNET_DEVICE(0x1099), INTEL_E1000_ETHERNET_DEVICE(0x109A), INTEL_E1000_ETHERNET_DEVICE(0x10A4), + INTEL_E1000_ETHERNET_DEVICE(0x10A5), INTEL_E1000_ETHERNET_DEVICE(0x10B5), INTEL_E1000_ETHERNET_DEVICE(0x10B9), INTEL_E1000_ETHERNET_DEVICE(0x10BA), @@ -107,6 +108,9 @@ static struct pci_device_id e1000_pci_tbl[] = { INTEL_E1000_ETHERNET_DEVICE(0x10BC), INTEL_E1000_ETHERNET_DEVICE(0x10C4), INTEL_E1000_ETHERNET_DEVICE(0x10C5), + INTEL_E1000_ETHERNET_DEVICE(0x10D5), + INTEL_E1000_ETHERNET_DEVICE(0x10D9), + INTEL_E1000_ETHERNET_DEVICE(0x10DA), /* required last entry */ {0,} }; @@ -162,7 +166,7 @@ static irqreturn_t e1000_intr_msi(int irq, void *data); static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring); #ifdef CONFIG_E1000_NAPI -static int e1000_clean(struct net_device *poll_dev, int *budget); +static int e1000_clean(struct napi_struct *napi, int budget); static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter, struct e1000_rx_ring *rx_ring, int *work_done, int work_to_do); @@ -541,7 +545,7 @@ int e1000_up(struct e1000_adapter *adapter) clear_bit(__E1000_DOWN, &adapter->flags); #ifdef CONFIG_E1000_NAPI - netif_poll_enable(adapter->netdev); + napi_enable(&adapter->napi); #endif e1000_irq_enable(adapter); @@ -630,7 +634,7 @@ e1000_down(struct e1000_adapter *adapter) set_bit(__E1000_DOWN, &adapter->flags); #ifdef CONFIG_E1000_NAPI - netif_poll_disable(netdev); + napi_disable(&adapter->napi); #endif e1000_irq_disable(adapter); @@ -868,6 +872,8 @@ e1000_probe(struct pci_dev *pdev, int i, err, pci_using_dac; uint16_t eeprom_data = 0; uint16_t eeprom_apme_mask = E1000_EEPROM_APME; + DECLARE_MAC_BUF(mac); + if ((err = pci_enable_device(pdev))) return err; @@ -893,7 +899,6 @@ e1000_probe(struct pci_dev *pdev, if (!netdev) goto err_alloc_etherdev; - SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); @@ -932,8 +937,7 @@ e1000_probe(struct pci_dev *pdev, netdev->tx_timeout = &e1000_tx_timeout; netdev->watchdog_timeo = 5 * HZ; #ifdef CONFIG_E1000_NAPI - netdev->poll = &e1000_clean; - netdev->weight = 64; + netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); #endif netdev->vlan_rx_register = e1000_vlan_rx_register; netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid; @@ -1096,7 +1100,9 @@ e1000_probe(struct pci_dev *pdev, break; case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: case E1000_DEV_ID_82571EB_QUAD_COPPER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: + case E1000_DEV_ID_82571PT_QUAD_COPPER: /* if quad port adapter, disable WoL on all but port A */ if (global_quad_port_a != 0) adapter->eeprom_wol = 0; @@ -1128,8 +1134,7 @@ e1000_probe(struct pci_dev *pdev, "32-bit")); } - for (i = 0; i < 6; i++) - printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':'); + printk("%s\n", print_mac(mac, netdev->dev_addr)); /* reset the hardware with the new settings */ e1000_reset(adapter); @@ -1145,9 +1150,6 @@ e1000_probe(struct pci_dev *pdev, /* tell the stack to leave us alone until e1000_open() is called */ netif_carrier_off(netdev); netif_stop_queue(netdev); -#ifdef CONFIG_E1000_NAPI - netif_poll_disable(netdev); -#endif strcpy(netdev->name, "eth%d"); if ((err = register_netdev(netdev))) @@ -1216,12 +1218,13 @@ e1000_remove(struct pci_dev *pdev) * would have already happened in close and is redundant. */ e1000_release_hw_control(adapter); - unregister_netdev(netdev); #ifdef CONFIG_E1000_NAPI for (i = 0; i < adapter->num_rx_queues; i++) dev_put(&adapter->polling_netdev[i]); #endif + unregister_netdev(netdev); + if (!e1000_check_phy_reset_block(&adapter->hw)) e1000_phy_hw_reset(&adapter->hw); @@ -1319,8 +1322,6 @@ e1000_sw_init(struct e1000_adapter *adapter) #ifdef CONFIG_E1000_NAPI for (i = 0; i < adapter->num_rx_queues; i++) { adapter->polling_netdev[i].priv = adapter; - adapter->polling_netdev[i].poll = &e1000_clean; - adapter->polling_netdev[i].weight = 64; dev_hold(&adapter->polling_netdev[i]); set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state); } @@ -1437,7 +1438,7 @@ e1000_open(struct net_device *netdev) clear_bit(__E1000_DOWN, &adapter->flags); #ifdef CONFIG_E1000_NAPI - netif_poll_enable(netdev); + napi_enable(&adapter->napi); #endif e1000_irq_enable(adapter); @@ -3260,14 +3261,13 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; unsigned int tx_flags = 0; - unsigned int len = skb->len; + unsigned int len = skb->len - skb->data_len; unsigned long flags; - unsigned int nr_frags = 0; - unsigned int mss = 0; + unsigned int nr_frags; + unsigned int mss; int count = 0; int tso; unsigned int f; - len -= skb->data_len; /* This goes back to the question of how to logically map a tx queue * to a flow. Right now, performance is impacted slightly negatively @@ -3301,7 +3301,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) * points to just header, pull a few bytes of payload from * frags into skb->data */ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) { + if (skb->data_len && hdr_len == len) { switch (adapter->hw.mac_type) { unsigned int pull_size; case e1000_82544: @@ -3780,12 +3780,12 @@ e1000_intr_msi(int irq, void *data) } #ifdef CONFIG_E1000_NAPI - if (likely(netif_rx_schedule_prep(netdev))) { + if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) { adapter->total_tx_bytes = 0; adapter->total_tx_packets = 0; adapter->total_rx_bytes = 0; adapter->total_rx_packets = 0; - __netif_rx_schedule(netdev); + __netif_rx_schedule(netdev, &adapter->napi); } else e1000_irq_enable(adapter); #else @@ -3865,12 +3865,12 @@ e1000_intr(int irq, void *data) E1000_WRITE_REG(hw, IMC, ~0); E1000_WRITE_FLUSH(hw); } - if (likely(netif_rx_schedule_prep(netdev))) { + if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) { adapter->total_tx_bytes = 0; adapter->total_tx_packets = 0; adapter->total_rx_bytes = 0; adapter->total_rx_packets = 0; - __netif_rx_schedule(netdev); + __netif_rx_schedule(netdev, &adapter->napi); } else /* this really should not happen! if it does it is basically a * bug, but not a hard error, so enable ints and continue */ @@ -3918,10 +3918,10 @@ e1000_intr(int irq, void *data) **/ static int -e1000_clean(struct net_device *poll_dev, int *budget) +e1000_clean(struct napi_struct *napi, int budget) { - struct e1000_adapter *adapter; - int work_to_do = min(*budget, poll_dev->quota); + struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); + struct net_device *poll_dev = adapter->netdev; int tx_cleaned = 0, work_done = 0; /* Must NOT use netdev_priv macro here. */ @@ -3942,23 +3942,19 @@ e1000_clean(struct net_device *poll_dev, int *budget) } adapter->clean_rx(adapter, &adapter->rx_ring[0], - &work_done, work_to_do); - - *budget -= work_done; - poll_dev->quota -= work_done; + &work_done, budget); /* If no Tx and not enough Rx work done, exit the polling mode */ - if ((!tx_cleaned && (work_done == 0)) || + if ((!tx_cleaned && (work_done < budget)) || !netif_running(poll_dev)) { quit_polling: if (likely(adapter->itr_setting & 3)) e1000_set_itr(adapter); - netif_rx_complete(poll_dev); + netif_rx_complete(poll_dev, napi); e1000_irq_enable(adapter); - return 0; } - return 1; + return work_done; } #endif @@ -4906,6 +4902,20 @@ e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value) pci_write_config_word(adapter->pdev, reg, *value); } +int +e1000_pcix_get_mmrbc(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return pcix_get_mmrbc(adapter->pdev); +} + +void +e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) +{ + struct e1000_adapter *adapter = hw->back; + pcix_set_mmrbc(adapter->pdev, mmrbc); +} + int32_t e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value) { diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c new file mode 100644 index 000000000000..cf70522fc851 --- /dev/null +++ b/drivers/net/e1000e/82571.c @@ -0,0 +1,1351 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* + * 82571EB Gigabit Ethernet Controller + * 82571EB Gigabit Ethernet Controller (Fiber) + * 82572EI Gigabit Ethernet Controller (Copper) + * 82572EI Gigabit Ethernet Controller (Fiber) + * 82572EI Gigabit Ethernet Controller + * 82573V Gigabit Ethernet Controller (Copper) + * 82573E Gigabit Ethernet Controller (Copper) + * 82573L Gigabit Ethernet Controller + */ + +#include <linux/netdevice.h> +#include <linux/delay.h> +#include <linux/pci.h> + +#include "e1000.h" + +#define ID_LED_RESERVED_F746 0xF746 +#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) + +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 + +static s32 e1000_get_phy_id_82571(struct e1000_hw *hw); +static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw); +static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw); +static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw); +static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw); +static s32 e1000_setup_link_82571(struct e1000_hw *hw); +static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw); + +/** + * e1000_init_phy_params_82571 - Init PHY func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + if (hw->media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + return 0; + } + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + phy->type = e1000_phy_igp_2; + break; + case e1000_82573: + phy->type = e1000_phy_m88; + break; + default: + return -E1000_ERR_PHY; + break; + } + + /* This can only be done after all function pointers are setup. */ + ret_val = e1000_get_phy_id_82571(hw); + + /* Verify phy id */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + if (phy->id != IGP01E1000_I_PHY_ID) + return -E1000_ERR_PHY; + break; + case e1000_82573: + if (phy->id != M88E1111_I_PHY_ID) + return -E1000_ERR_PHY; + break; + default: + return -E1000_ERR_PHY; + break; + } + + return 0; +} + +/** + * e1000_init_nvm_params_82571 - Init NVM func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u16 size; + + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; + break; + } + + switch (hw->mac.type) { + case e1000_82573: + if (((eecd >> 15) & 0x3) == 0x3) { + nvm->type = e1000_nvm_flash_hw; + nvm->word_size = 2048; + /* Autonomous Flash update bit must be cleared due + * to Flash update issue. + */ + eecd &= ~E1000_EECD_AUPDEN; + ew32(EECD, eecd); + break; + } + /* Fall Through */ + default: + nvm->type = e1000_nvm_eeprom_spi; + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + nvm->word_size = 1 << size; + break; + } + + return 0; +} + +/** + * e1000_init_mac_params_82571 - Init MAC func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; + struct e1000_mac_operations *func = &mac->ops; + + /* Set media type */ + switch (adapter->pdev->device) { + case E1000_DEV_ID_82571EB_FIBER: + case E1000_DEV_ID_82572EI_FIBER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: + hw->media_type = e1000_media_type_fiber; + break; + case E1000_DEV_ID_82571EB_SERDES: + case E1000_DEV_ID_82572EI_SERDES: + hw->media_type = e1000_media_type_internal_serdes; + break; + default: + hw->media_type = e1000_media_type_copper; + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + /* Set if manageability features are enabled. */ + mac->arc_subsystem_valid = + (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0; + + /* check for link */ + switch (hw->media_type) { + case e1000_media_type_copper: + func->setup_physical_interface = e1000_setup_copper_link_82571; + func->check_for_link = e1000e_check_for_copper_link; + func->get_link_up_info = e1000e_get_speed_and_duplex_copper; + break; + case e1000_media_type_fiber: + func->setup_physical_interface = e1000_setup_fiber_serdes_link_82571; + func->check_for_link = e1000e_check_for_fiber_link; + func->get_link_up_info = e1000e_get_speed_and_duplex_fiber_serdes; + break; + case e1000_media_type_internal_serdes: + func->setup_physical_interface = e1000_setup_fiber_serdes_link_82571; + func->check_for_link = e1000e_check_for_serdes_link; + func->get_link_up_info = e1000e_get_speed_and_duplex_fiber_serdes; + break; + default: + return -E1000_ERR_CONFIG; + break; + } + + return 0; +} + +static s32 e1000_get_invariants_82571(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + static int global_quad_port_a; /* global port a indication */ + struct pci_dev *pdev = adapter->pdev; + u16 eeprom_data = 0; + int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; + s32 rc; + + rc = e1000_init_mac_params_82571(adapter); + if (rc) + return rc; + + rc = e1000_init_nvm_params_82571(hw); + if (rc) + return rc; + + rc = e1000_init_phy_params_82571(hw); + if (rc) + return rc; + + /* tag quad port adapters first, it's used below */ + switch (pdev->device) { + case E1000_DEV_ID_82571EB_QUAD_COPPER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: + case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: + adapter->flags |= FLAG_IS_QUAD_PORT; + /* mark the first port */ + if (global_quad_port_a == 0) + adapter->flags |= FLAG_IS_QUAD_PORT_A; + /* Reset for multiple quad port adapters */ + global_quad_port_a++; + if (global_quad_port_a == 4) + global_quad_port_a = 0; + break; + default: + break; + } + + switch (adapter->hw.mac.type) { + case e1000_82571: + /* these dual ports don't have WoL on port B at all */ + if (((pdev->device == E1000_DEV_ID_82571EB_FIBER) || + (pdev->device == E1000_DEV_ID_82571EB_SERDES) || + (pdev->device == E1000_DEV_ID_82571EB_COPPER)) && + (is_port_b)) + adapter->flags &= ~FLAG_HAS_WOL; + /* quad ports only support WoL on port A */ + if (adapter->flags & FLAG_IS_QUAD_PORT && + (!adapter->flags & FLAG_IS_QUAD_PORT_A)) + adapter->flags &= ~FLAG_HAS_WOL; + break; + + case e1000_82573: + if (pdev->device == E1000_DEV_ID_82573L) { + e1000_read_nvm(&adapter->hw, NVM_INIT_3GIO_3, 1, + &eeprom_data); + if (eeprom_data & NVM_WORD1A_ASPM_MASK) + adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES; + } + break; + default: + break; + } + + return 0; +} + +/** + * e1000_get_phy_id_82571 - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +static s32 e1000_get_phy_id_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* The 82571 firmware may still be configuring the PHY. + * In this case, we cannot access the PHY until the + * configuration is done. So we explicitly set the + * PHY ID. */ + phy->id = IGP01E1000_I_PHY_ID; + break; + case e1000_82573: + return e1000e_get_phy_id(hw); + break; + default: + return -E1000_ERR_PHY; + break; + } + + return 0; +} + +/** + * e1000_get_hw_semaphore_82571 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) +{ + u32 swsm; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = er32(SWSM); + ew32(SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (er32(SWSM) & E1000_SWSM_SWESMBI) + break; + + udelay(50); + } + + if (i == timeout) { + /* Release semaphores */ + e1000e_put_hw_semaphore(hw); + hw_dbg(hw, "Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000_put_hw_semaphore_82571 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw) +{ + u32 swsm; + + swsm = er32(SWSM); + + swsm &= ~E1000_SWSM_SWESMBI; + + ew32(SWSM, swsm); +} + +/** + * e1000_acquire_nvm_82571 - Request for access to the EEPROM + * @hw: pointer to the HW structure + * + * To gain access to the EEPROM, first we must obtain a hardware semaphore. + * Then for non-82573 hardware, set the EEPROM access request bit and wait + * for EEPROM access grant bit. If the access grant bit is not set, release + * hardware semaphore. + **/ +static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = e1000_get_hw_semaphore_82571(hw); + if (ret_val) + return ret_val; + + if (hw->mac.type != e1000_82573) + ret_val = e1000e_acquire_nvm(hw); + + if (ret_val) + e1000_put_hw_semaphore_82571(hw); + + return ret_val; +} + +/** + * e1000_release_nvm_82571 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +static void e1000_release_nvm_82571(struct e1000_hw *hw) +{ + e1000e_release_nvm(hw); + e1000_put_hw_semaphore_82571(hw); +} + +/** + * e1000_write_nvm_82571 - Write to EEPROM using appropriate interface + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * For non-82573 silicon, write data to EEPROM at offset using SPI interface. + * + * If e1000e_update_nvm_checksum is not called after this function, the + * EEPROM will most likley contain an invalid checksum. + **/ +static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 ret_val; + + switch (hw->mac.type) { + case e1000_82573: + ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data); + break; + case e1000_82571: + case e1000_82572: + ret_val = e1000e_write_nvm_spi(hw, offset, words, data); + break; + default: + ret_val = -E1000_ERR_NVM; + break; + } + + return ret_val; +} + +/** + * e1000_update_nvm_checksum_82571 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw) +{ + u32 eecd; + s32 ret_val; + u16 i; + + ret_val = e1000e_update_nvm_checksum_generic(hw); + if (ret_val) + return ret_val; + + /* If our nvm is an EEPROM, then we're done + * otherwise, commit the checksum to the flash NVM. */ + if (hw->nvm.type != e1000_nvm_flash_hw) + return ret_val; + + /* Check for pending operations. */ + for (i = 0; i < E1000_FLASH_UPDATES; i++) { + msleep(1); + if ((er32(EECD) & E1000_EECD_FLUPD) == 0) + break; + } + + if (i == E1000_FLASH_UPDATES) + return -E1000_ERR_NVM; + + /* Reset the firmware if using STM opcode. */ + if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) { + /* The enabling of and the actual reset must be done + * in two write cycles. + */ + ew32(HICR, E1000_HICR_FW_RESET_ENABLE); + e1e_flush(); + ew32(HICR, E1000_HICR_FW_RESET); + } + + /* Commit the write to flash */ + eecd = er32(EECD) | E1000_EECD_FLUPD; + ew32(EECD, eecd); + + for (i = 0; i < E1000_FLASH_UPDATES; i++) { + msleep(1); + if ((er32(EECD) & E1000_EECD_FLUPD) == 0) + break; + } + + if (i == E1000_FLASH_UPDATES) + return -E1000_ERR_NVM; + + return 0; +} + +/** + * e1000_validate_nvm_checksum_82571 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +static s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw) +{ + if (hw->nvm.type == e1000_nvm_flash_hw) + e1000_fix_nvm_checksum_82571(hw); + + return e1000e_validate_nvm_checksum_generic(hw); +} + +/** + * e1000_write_nvm_eewr_82571 - Write to EEPROM for 82573 silicon + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * After checking for invalid values, poll the EEPROM to ensure the previous + * command has completed before trying to write the next word. After write + * poll for completion. + * + * If e1000e_update_nvm_checksum is not called after this function, the + * EEPROM will most likley contain an invalid checksum. + **/ +static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i; + u32 eewr = 0; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg(hw, "nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + for (i = 0; i < words; i++) { + eewr = (data[i] << E1000_NVM_RW_REG_DATA) | + ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | + E1000_NVM_RW_REG_START; + + ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); + if (ret_val) + break; + + ew32(EEWR, eewr); + + ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); + if (ret_val) + break; + } + + return ret_val; +} + +/** + * e1000_get_cfg_done_82571 - Poll for configuration done + * @hw: pointer to the HW structure + * + * Reads the management control register for the config done bit to be set. + **/ +static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + + while (timeout) { + if (er32(EEMNGCTL) & + E1000_NVM_CFG_DONE_PORT_0) + break; + msleep(1); + timeout--; + } + if (!timeout) { + hw_dbg(hw, "MNG configuration cycle has not completed.\n"); + return -E1000_ERR_RESET; + } + + return 0; +} + +/** + * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: TRUE to enable LPLU, FALSE to disable + * + * Sets the LPLU D0 state according to the active flag. When activating LPLU + * this function also disables smart speed and vice versa. LPLU will not be + * activated unless the device autonegotiation advertisement meets standards + * of either 10 or 10/100 or 10/100/1000 at all duplexes. This is a function + * pointer entry point only called by PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + return ret_val; + + if (active) { + data |= IGP02E1000_PM_D0_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + if (ret_val) + return ret_val; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + return ret_val; + } else { + data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } + + return 0; +} + +/** + * e1000_reset_hw_82571 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a + * function pointer entry point called by the api module. + **/ +static s32 e1000_reset_hw_82571(struct e1000_hw *hw) +{ + u32 ctrl; + u32 extcnf_ctrl; + u32 ctrl_ext; + u32 icr; + s32 ret_val; + u16 i = 0; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000e_disable_pcie_master(hw); + if (ret_val) + hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); + + hw_dbg(hw, "Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + e1e_flush(); + + msleep(10); + + /* Must acquire the MDIO ownership before MAC reset. + * Ownership defaults to firmware after a reset. */ + if (hw->mac.type == e1000_82573) { + extcnf_ctrl = er32(EXTCNF_CTRL); + extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + + do { + ew32(EXTCNF_CTRL, extcnf_ctrl); + extcnf_ctrl = er32(EXTCNF_CTRL); + + if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) + break; + + extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + + msleep(2); + i++; + } while (i < MDIO_OWNERSHIP_TIMEOUT); + } + + ctrl = er32(CTRL); + + hw_dbg(hw, "Issuing a global reset to MAC\n"); + ew32(CTRL, ctrl | E1000_CTRL_RST); + + if (hw->nvm.type == e1000_nvm_flash_hw) { + udelay(10); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); + } + + ret_val = e1000e_get_auto_rd_done(hw); + if (ret_val) + /* We don't want to continue accessing MAC registers. */ + return ret_val; + + /* Phy configuration from NVM just starts after EECD_AUTO_RD is set. + * Need to wait for Phy configuration completion before accessing + * NVM and Phy. + */ + if (hw->mac.type == e1000_82573) + msleep(25); + + /* Clear any pending interrupt events. */ + ew32(IMC, 0xffffffff); + icr = er32(ICR); + + return 0; +} + +/** + * e1000_init_hw_82571 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +static s32 e1000_init_hw_82571(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 reg_data; + s32 ret_val; + u16 i; + u16 rar_count = mac->rar_entry_count; + + e1000_initialize_hw_bits_82571(hw); + + /* Initialize identification LED */ + ret_val = e1000e_id_led_init(hw); + if (ret_val) { + hw_dbg(hw, "Error initializing identification LED\n"); + return ret_val; + } + + /* Disabling VLAN filtering */ + hw_dbg(hw, "Initializing the IEEE VLAN\n"); + e1000e_clear_vfta(hw); + + /* Setup the receive address. */ + /* If, however, a locally administered address was assigned to the + * 82571, we must reserve a RAR for it to work around an issue where + * resetting one port will reload the MAC on the other port. + */ + if (e1000e_get_laa_state_82571(hw)) + rar_count--; + e1000e_init_rx_addrs(hw, rar_count); + + /* Zero out the Multicast HASH table */ + hw_dbg(hw, "Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Setup link and flow control */ + ret_val = e1000_setup_link_82571(hw); + + /* Set the transmit descriptor write-back policy */ + reg_data = er32(TXDCTL); + reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | + E1000_TXDCTL_COUNT_DESC; + ew32(TXDCTL, reg_data); + + /* ...for both queues. */ + if (mac->type != e1000_82573) { + reg_data = er32(TXDCTL1); + reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | + E1000_TXDCTL_COUNT_DESC; + ew32(TXDCTL1, reg_data); + } else { + e1000e_enable_tx_pkt_filtering(hw); + reg_data = er32(GCR); + reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; + ew32(GCR, reg_data); + } + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82571(hw); + + return ret_val; +} + +/** + * e1000_initialize_hw_bits_82571 - Initialize hardware-dependent bits + * @hw: pointer to the HW structure + * + * Initializes required hardware-dependent bits needed for normal operation. + **/ +static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) +{ + u32 reg; + + /* Transmit Descriptor Control 0 */ + reg = er32(TXDCTL); + reg |= (1 << 22); + ew32(TXDCTL, reg); + + /* Transmit Descriptor Control 1 */ + reg = er32(TXDCTL1); + reg |= (1 << 22); + ew32(TXDCTL1, reg); + + /* Transmit Arbitration Control 0 */ + reg = er32(TARC0); + reg &= ~(0xF << 27); /* 30:27 */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26); + break; + default: + break; + } + ew32(TARC0, reg); + + /* Transmit Arbitration Control 1 */ + reg = er32(TARC1); + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + reg &= ~((1 << 29) | (1 << 30)); + reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26); + if (er32(TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + ew32(TARC1, reg); + break; + default: + break; + } + + /* Device Control */ + if (hw->mac.type == e1000_82573) { + reg = er32(CTRL); + reg &= ~(1 << 29); + ew32(CTRL, reg); + } + + /* Extended Device Control */ + if (hw->mac.type == e1000_82573) { + reg = er32(CTRL_EXT); + reg &= ~(1 << 23); + reg |= (1 << 22); + ew32(CTRL_EXT, reg); + } +} + +/** + * e1000e_clear_vfta - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void e1000e_clear_vfta(struct e1000_hw *hw) +{ + u32 offset; + u32 vfta_value = 0; + u32 vfta_offset = 0; + u32 vfta_bit_in_reg = 0; + + if (hw->mac.type == e1000_82573) { + if (hw->mng_cookie.vlan_id != 0) { + /* The VFTA is a 4096b bit-field, each identifying + * a single VLAN ID. The following operations + * determine which 32b entry (i.e. offset) into the + * array we want to set the VLAN ID (i.e. bit) of + * the manageability unit. + */ + vfta_offset = (hw->mng_cookie.vlan_id >> + E1000_VFTA_ENTRY_SHIFT) & + E1000_VFTA_ENTRY_MASK; + vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id & + E1000_VFTA_ENTRY_BIT_SHIFT_MASK); + } + } + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + /* If the offset we want to clear is the same offset of the + * manageability VLAN ID, then clear all bits except that of + * the manageability unit. + */ + vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value); + e1e_flush(); + } +} + +/** + * e1000_mc_addr_list_update_82571 - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * @rar_used_count: the first RAR register free to program + * @rar_count: total number of supported Receive Address Registers + * + * Updates the Receive Address Registers and Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + * The parameter rar_count will usually be hw->mac.rar_entry_count + * unless there are workarounds that change this. + **/ +static void e1000_mc_addr_list_update_82571(struct e1000_hw *hw, + u8 *mc_addr_list, + u32 mc_addr_count, + u32 rar_used_count, + u32 rar_count) +{ + if (e1000e_get_laa_state_82571(hw)) + rar_count--; + + e1000e_mc_addr_list_update_generic(hw, mc_addr_list, mc_addr_count, + rar_used_count, rar_count); +} + +/** + * e1000_setup_link_82571 - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +static s32 e1000_setup_link_82571(struct e1000_hw *hw) +{ + /* 82573 does not have a word in the NVM to determine + * the default flow control setting, so we explicitly + * set it to full. + */ + if (hw->mac.type == e1000_82573) + hw->mac.fc = e1000_fc_full; + + return e1000e_setup_link(hw); +} + +/** + * e1000_setup_copper_link_82571 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw) +{ + u32 ctrl; + u32 led_ctrl; + s32 ret_val; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + + switch (hw->phy.type) { + case e1000_phy_m88: + ret_val = e1000e_copper_link_setup_m88(hw); + break; + case e1000_phy_igp_2: + ret_val = e1000e_copper_link_setup_igp(hw); + /* Setup activity LED */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + break; + default: + return -E1000_ERR_PHY; + break; + } + + if (ret_val) + return ret_val; + + ret_val = e1000e_setup_copper_link(hw); + + return ret_val; +} + +/** + * e1000_setup_fiber_serdes_link_82571 - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes links. + * Upon successful setup, poll for link. + **/ +static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw) +{ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* If SerDes loopback mode is entered, there is no form + * of reset to take the adapter out of that mode. So we + * have to explicitly take the adapter out of loopback + * mode. This prevents drivers from twidling their thumbs + * if another tool failed to take it out of loopback mode. + */ + ew32(SCTL, + E1000_SCTL_DISABLE_SERDES_LOOPBACK); + break; + default: + break; + } + + return e1000e_setup_fiber_serdes_link(hw); +} + +/** + * e1000_valid_led_default_82571 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + + if (hw->mac.type == e1000_82573 && + *data == ID_LED_RESERVED_F746) + *data = ID_LED_DEFAULT_82573; + else if (*data == ID_LED_RESERVED_0000 || + *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; + + return 0; +} + +/** + * e1000e_get_laa_state_82571 - Get locally administered address state + * @hw: pointer to the HW structure + * + * Retrieve and return the current locally administed address state. + **/ +bool e1000e_get_laa_state_82571(struct e1000_hw *hw) +{ + if (hw->mac.type != e1000_82571) + return 0; + + return hw->dev_spec.e82571.laa_is_present; +} + +/** + * e1000e_set_laa_state_82571 - Set locally administered address state + * @hw: pointer to the HW structure + * @state: enable/disable locally administered address + * + * Enable/Disable the current locally administed address state. + **/ +void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state) +{ + if (hw->mac.type != e1000_82571) + return; + + hw->dev_spec.e82571.laa_is_present = state; + + /* If workaround is activated... */ + if (state) + /* Hold a copy of the LAA in RAR[14] This is done so that + * between the time RAR[0] gets clobbered and the time it + * gets fixed, the actual LAA is in one of the RARs and no + * incoming packets directed to this port are dropped. + * Eventually the LAA will be in RAR[0] and RAR[14]. + */ + e1000e_rar_set(hw, hw->mac.addr, hw->mac.rar_entry_count - 1); +} + +/** + * e1000_fix_nvm_checksum_82571 - Fix EEPROM checksum + * @hw: pointer to the HW structure + * + * Verifies that the EEPROM has completed the update. After updating the + * EEPROM, we need to check bit 15 in work 0x23 for the checksum fix. If + * the checksum fix is not implemented, we need to set the bit and update + * the checksum. Otherwise, if bit 15 is set and the checksum is incorrect, + * we need to return bad checksum. + **/ +static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val; + u16 data; + + if (nvm->type != e1000_nvm_flash_hw) + return 0; + + /* Check bit 4 of word 10h. If it is 0, firmware is done updating + * 10h-12h. Checksum may need to be fixed. + */ + ret_val = e1000_read_nvm(hw, 0x10, 1, &data); + if (ret_val) + return ret_val; + + if (!(data & 0x10)) { + /* Read 0x23 and check bit 15. This bit is a 1 + * when the checksum has already been fixed. If + * the checksum is still wrong and this bit is a + * 1, we need to return bad checksum. Otherwise, + * we need to set this bit to a 1 and update the + * checksum. + */ + ret_val = e1000_read_nvm(hw, 0x23, 1, &data); + if (ret_val) + return ret_val; + + if (!(data & 0x8000)) { + data |= 0x8000; + ret_val = e1000_write_nvm(hw, 0x23, 1, &data); + if (ret_val) + return ret_val; + ret_val = e1000e_update_nvm_checksum(hw); + } + } + + return 0; +} + +/** + * e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw) +{ + u32 temp; + + e1000e_clear_hw_cntrs_base(hw); + + temp = er32(PRC64); + temp = er32(PRC127); + temp = er32(PRC255); + temp = er32(PRC511); + temp = er32(PRC1023); + temp = er32(PRC1522); + temp = er32(PTC64); + temp = er32(PTC127); + temp = er32(PTC255); + temp = er32(PTC511); + temp = er32(PTC1023); + temp = er32(PTC1522); + + temp = er32(ALGNERRC); + temp = er32(RXERRC); + temp = er32(TNCRS); + temp = er32(CEXTERR); + temp = er32(TSCTC); + temp = er32(TSCTFC); + + temp = er32(MGTPRC); + temp = er32(MGTPDC); + temp = er32(MGTPTC); + + temp = er32(IAC); + temp = er32(ICRXOC); + + temp = er32(ICRXPTC); + temp = er32(ICRXATC); + temp = er32(ICTXPTC); + temp = er32(ICTXATC); + temp = er32(ICTXQEC); + temp = er32(ICTXQMTC); + temp = er32(ICRXDMTC); +} + +static struct e1000_mac_operations e82571_mac_ops = { + .mng_mode_enab = E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT, + /* .check_for_link: media type dependent */ + .cleanup_led = e1000e_cleanup_led_generic, + .clear_hw_cntrs = e1000_clear_hw_cntrs_82571, + .get_bus_info = e1000e_get_bus_info_pcie, + /* .get_link_up_info: media type dependent */ + .led_on = e1000e_led_on_generic, + .led_off = e1000e_led_off_generic, + .mc_addr_list_update = e1000_mc_addr_list_update_82571, + .reset_hw = e1000_reset_hw_82571, + .init_hw = e1000_init_hw_82571, + .setup_link = e1000_setup_link_82571, + /* .setup_physical_interface: media type dependent */ +}; + +static struct e1000_phy_operations e82_phy_ops_igp = { + .acquire_phy = e1000_get_hw_semaphore_82571, + .check_reset_block = e1000e_check_reset_block_generic, + .commit_phy = NULL, + .force_speed_duplex = e1000e_phy_force_speed_duplex_igp, + .get_cfg_done = e1000_get_cfg_done_82571, + .get_cable_length = e1000e_get_cable_length_igp_2, + .get_phy_info = e1000e_get_phy_info_igp, + .read_phy_reg = e1000e_read_phy_reg_igp, + .release_phy = e1000_put_hw_semaphore_82571, + .reset_phy = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_phy_reg = e1000e_write_phy_reg_igp, +}; + +static struct e1000_phy_operations e82_phy_ops_m88 = { + .acquire_phy = e1000_get_hw_semaphore_82571, + .check_reset_block = e1000e_check_reset_block_generic, + .commit_phy = e1000e_phy_sw_reset, + .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, + .get_cfg_done = e1000e_get_cfg_done, + .get_cable_length = e1000e_get_cable_length_m88, + .get_phy_info = e1000e_get_phy_info_m88, + .read_phy_reg = e1000e_read_phy_reg_m88, + .release_phy = e1000_put_hw_semaphore_82571, + .reset_phy = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_phy_reg = e1000e_write_phy_reg_m88, +}; + +static struct e1000_nvm_operations e82571_nvm_ops = { + .acquire_nvm = e1000_acquire_nvm_82571, + .read_nvm = e1000e_read_nvm_spi, + .release_nvm = e1000_release_nvm_82571, + .update_nvm = e1000_update_nvm_checksum_82571, + .valid_led_default = e1000_valid_led_default_82571, + .validate_nvm = e1000_validate_nvm_checksum_82571, + .write_nvm = e1000_write_nvm_82571, +}; + +static struct e1000_nvm_operations e82573_nvm_ops = { + .acquire_nvm = e1000_acquire_nvm_82571, + .read_nvm = e1000e_read_nvm_eerd, + .release_nvm = e1000_release_nvm_82571, + .update_nvm = e1000_update_nvm_checksum_82571, + .valid_led_default = e1000_valid_led_default_82571, + .validate_nvm = e1000_validate_nvm_checksum_82571, + .write_nvm = e1000_write_nvm_82571, +}; + +struct e1000_info e1000_82571_info = { + .mac = e1000_82571, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_STATS_PTC_PRC + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_STATS_ICR_ICT + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_RESET_OVERWRITES_LAA /* errata */ + | FLAG_TARC_SPEED_MODE_BIT /* errata */ + | FLAG_APME_CHECK_PORT_B, + .pba = 38, + .get_invariants = e1000_get_invariants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_igp, + .nvm_ops = &e82571_nvm_ops, +}; + +struct e1000_info e1000_82572_info = { + .mac = e1000_82572, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_STATS_PTC_PRC + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_STATS_ICR_ICT + | FLAG_TARC_SPEED_MODE_BIT, /* errata */ + .pba = 38, + .get_invariants = e1000_get_invariants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_igp, + .nvm_ops = &e82571_nvm_ops, +}; + +struct e1000_info e1000_82573_info = { + .mac = e1000_82573, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_STATS_PTC_PRC + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_STATS_ICR_ICT + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_HAS_AMT + | FLAG_HAS_ASPM + | FLAG_HAS_ERT + | FLAG_HAS_SWSM_ON_LOAD, + .pba = 20, + .get_invariants = e1000_get_invariants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_m88, + .nvm_ops = &e82573_nvm_ops, +}; + diff --git a/drivers/net/e1000e/Makefile b/drivers/net/e1000e/Makefile new file mode 100644 index 000000000000..650f866e7ac2 --- /dev/null +++ b/drivers/net/e1000e/Makefile @@ -0,0 +1,37 @@ +################################################################################ +# +# Intel PRO/1000 Linux driver +# Copyright(c) 1999 - 2007 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# Linux NICS <linux.nics@intel.com> +# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +# Makefile for the Intel(R) PRO/1000 ethernet driver +# + +obj-$(CONFIG_E1000E) += e1000e.o + +e1000e-objs := 82571.o ich8lan.o es2lan.o \ + lib.o phy.o param.o ethtool.o netdev.o + diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h new file mode 100644 index 000000000000..b32ed45b4b34 --- /dev/null +++ b/drivers/net/e1000e/defines.h @@ -0,0 +1,739 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_DEFINES_H_ +#define _E1000_DEFINES_H_ + +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ + +/* Extended Device Control */ +#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ +#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ + +/* Receive Decriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ + +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address + * filtering */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host + * memory */ + +/* Receive Control */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SWFW_SYNC Definitions */ +#define E1000_SWFW_EEP_SM 0x1 +#define E1000_SWFW_PHY0_SM 0x2 +#define E1000_SWFW_PHY1_SM 0x4 + +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ + +/* Bit definitions for the Management Data IO (MDIO) and Management Data + * Clock (MDC) pins in the Device Control Register. + */ + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ + +/* Constants used to intrepret the masked PCI-X bus speed. */ + +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 + +/* 1000/H is not supported, nor spec-compliant. */ +#define E1000_ALL_SPEED_DUPLEX ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ + ADVERTISE_1000_FULL) +#define E1000_ALL_NOT_GIG ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) +#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX + +/* LED Control */ +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 + +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ + +/* Transmit Control */ +#define E1000_TCTL_EN 0x00000002 /* enable tx */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ + +/* Transmit Arbitration Count */ + +/* SerDes Control */ +#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 + +/* Receive Checksum Control */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ + +/* Header split receive */ +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLD_SHIFT 12 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF + +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82543_TIPG_IPGR2 6 +#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 +#define E1000_TIPG_IPGR2_SHIFT 20 + +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16 + +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + +/* PBA constants */ +#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */ +#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ + +#define E1000_PBS_16K E1000_PBA_16K + +#define IFS_MAX 80 +#define IFS_MIN 40 +#define IFS_RATIO 4 +#define IFS_STEP 10 +#define MIN_NUM_XMITS 1000 + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ + +/* Interrupt Cause Set */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ + +/* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. + still to be processed. */ + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* 802.1q VLAN Packet Size */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address */ +/* Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define E1000_RAR_ENTRIES 15 +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ + +/* Error Codes */ +#define E1000_ERR_NVM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 +#define E1000_ERR_SWFW_SYNC 13 +#define E1000_NOT_IMPLEMENTED 14 + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define FIBER_LINK_UP_LIMIT 50 +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 +#define PHY_FORCE_LIMIT 20 +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 +/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ +#define MDIO_OWNERSHIP_TIMEOUT 10 +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 + +/* Flow Control */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ + +/* PCI Express Control */ +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 + +#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +/* PHY Control Register */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 + +/* PHY Status Register */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ + +/* Autoneg Expansion Register */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ + /* 0=DTE device */ +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ + /* 0=Configure PHY as Slave */ +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ + /* 0=Automatic Master/Slave config */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ + + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Regiser */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ + +/* NVM Control */ +#define E1000_EECD_SK 0x00000001 /* NVM Clock */ +#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* NVM Data In */ +#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ +#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ +#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ +#define E1000_EECD_ADDR_BITS 0x00000400 /* NVM Addressing bits based on type + * (0-small, 1-large) */ +#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ + +#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write registers */ +#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_NVM_RW_REG_START 1 /* Start operation */ +#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ +#define E1000_FLASH_UPDATES 2000 + +/* NVM Word Offsets */ +#define NVM_ID_LED_SETTINGS 0x0004 +#define NVM_INIT_CONTROL2_REG 0x000F +#define NVM_INIT_CONTROL3_PORT_B 0x0014 +#define NVM_INIT_3GIO_3 0x001A +#define NVM_INIT_CONTROL3_PORT_A 0x0024 +#define NVM_CFG 0x0012 +#define NVM_CHECKSUM_REG 0x003F + +#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */ +#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */ + +/* Mask bits for fields in Word 0x0f of the NVM */ +#define NVM_WORD0F_PAUSE_MASK 0x3000 +#define NVM_WORD0F_PAUSE 0x1000 +#define NVM_WORD0F_ASM_DIR 0x2000 + +/* Mask bits for fields in Word 0x1a of the NVM */ +#define NVM_WORD1A_ASPM_MASK 0x000C + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA + +/* PBA (printed board assembly) number words */ +#define NVM_PBA_OFFSET_0 8 +#define NVM_PBA_OFFSET_1 9 + +#define NVM_WORD_SIZE_BASE_SHIFT 6 + +/* NVM Commands - SPI */ +#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ +#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ +#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ + +/* SPI NVM Status Register */ +#define NVM_STATUS_RDY_SPI 0x01 + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* PCI/PCI-X/PCI-EX Config space */ +#define PCI_HEADER_TYPE_REGISTER 0x0E +#define PCIE_LINK_STATUS 0x12 + +#define PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define PCIE_LINK_WIDTH_MASK 0x3F0 +#define PCIE_LINK_WIDTH_SHIFT 4 + +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF + +/* Bit definitions for valid PHY IDs. */ +/* I = Integrated + * E = External + */ +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define GG82563_E_PHY_ID 0x01410CA0 +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ + +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, + * 100BASE-TX/10BASE-T: + * MDI Mode + */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled + * all speeds. + */ + /* 1=Enable Extended 10BASE-T distance + * (Lower 10BASE-T RX Threshold) + * 0=Normal 10BASE-T RX Threshold */ + /* 1=5-Bit interface in 100BASE-TX + * 0=MII interface in 100BASE-TX */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M; + * 3=110-140M;4=>140M */ +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 + +/* Bits... + * 15-5: page + * 4-0: register offset + */ +#define GG82563_PAGE_SHIFT 5 +#define GG82563_REG(page, reg) \ + (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) +#define GG82563_MIN_ALT_REG 30 + +/* GG82563 Specific Registers */ +#define GG82563_PHY_SPEC_CTRL \ + GG82563_REG(0, 16) /* PHY Specific Control */ +#define GG82563_PHY_PAGE_SELECT \ + GG82563_REG(0, 22) /* Page Select */ +#define GG82563_PHY_SPEC_CTRL_2 \ + GG82563_REG(0, 26) /* PHY Specific Control 2 */ +#define GG82563_PHY_PAGE_SELECT_ALT \ + GG82563_REG(0, 29) /* Alternate Page Select */ + +#define GG82563_PHY_MAC_SPEC_CTRL \ + GG82563_REG(2, 21) /* MAC Specific Control Register */ + +#define GG82563_PHY_DSP_DISTANCE \ + GG82563_REG(5, 26) /* DSP Distance */ + +/* Page 193 - Port Control Registers */ +#define GG82563_PHY_KMRN_MODE_CTRL \ + GG82563_REG(193, 16) /* Kumeran Mode Control */ +#define GG82563_PHY_PWR_MGMT_CTRL \ + GG82563_REG(193, 20) /* Power Management Control */ + +/* Page 194 - KMRN Registers */ +#define GG82563_PHY_INBAND_CTRL \ + GG82563_REG(194, 18) /* Inband Control */ + +/* MDI Control */ +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_ERROR 0x40000000 + +/* SerDes Control */ +#define E1000_GEN_POLL_TIMEOUT 640 + +#endif /* _E1000_DEFINES_H_ */ diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h new file mode 100644 index 000000000000..d2499bb07c13 --- /dev/null +++ b/drivers/net/e1000e/e1000.h @@ -0,0 +1,514 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _E1000_H_ +#define _E1000_H_ + +#include <linux/types.h> +#include <linux/timer.h> +#include <linux/workqueue.h> +#include <linux/io.h> +#include <linux/netdevice.h> + +#include "hw.h" + +struct e1000_info; + +#define ndev_printk(level, netdev, format, arg...) \ + printk(level "%s: %s: " format, (netdev)->dev.parent->bus_id, \ + (netdev)->name, ## arg) + +#ifdef DEBUG +#define ndev_dbg(netdev, format, arg...) \ + ndev_printk(KERN_DEBUG , netdev, format, ## arg) +#else +#define ndev_dbg(netdev, format, arg...) do { (void)(netdev); } while (0) +#endif + +#define ndev_err(netdev, format, arg...) \ + ndev_printk(KERN_ERR , netdev, format, ## arg) +#define ndev_info(netdev, format, arg...) \ + ndev_printk(KERN_INFO , netdev, format, ## arg) +#define ndev_warn(netdev, format, arg...) \ + ndev_printk(KERN_WARNING , netdev, format, ## arg) +#define ndev_notice(netdev, format, arg...) \ + ndev_printk(KERN_NOTICE , netdev, format, ## arg) + + +/* TX/RX descriptor defines */ +#define E1000_DEFAULT_TXD 256 +#define E1000_MAX_TXD 4096 +#define E1000_MIN_TXD 80 + +#define E1000_DEFAULT_RXD 256 +#define E1000_MAX_RXD 4096 +#define E1000_MIN_RXD 80 + +/* Early Receive defines */ +#define E1000_ERT_2048 0x100 + +#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */ + +/* How many Tx Descriptors do we need to call netif_wake_queue ? */ +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define E1000_EEPROM_APME 0x0400 + +#define E1000_MNG_VLAN_NONE (-1) + +/* Number of packet split data buffers (not including the header buffer) */ +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) + +enum e1000_boards { + board_82571, + board_82572, + board_82573, + board_80003es2lan, + board_ich8lan, + board_ich9lan, +}; + +struct e1000_queue_stats { + u64 packets; + u64 bytes; +}; + +struct e1000_ps_page { + struct page *page; + u64 dma; /* must be u64 - written to hw */ +}; + +/* + * wrappers around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct e1000_buffer { + dma_addr_t dma; + struct sk_buff *skb; + union { + /* TX */ + struct { + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + }; + /* RX */ + struct page *page; + }; + +}; + +struct e1000_ring { + void *desc; /* pointer to ring memory */ + dma_addr_t dma; /* phys address of ring */ + unsigned int size; /* length of ring in bytes */ + unsigned int count; /* number of desc. in ring */ + + u16 next_to_use; + u16 next_to_clean; + + u16 head; + u16 tail; + + /* array of buffer information structs */ + struct e1000_buffer *buffer_info; + + /* arrays of page information for packet split */ + struct e1000_ps_page *ps_pages; + struct sk_buff *rx_skb_top; + + struct e1000_queue_stats stats; +}; + +/* board specific private data structure */ +struct e1000_adapter { + struct timer_list watchdog_timer; + struct timer_list phy_info_timer; + struct timer_list blink_timer; + + struct work_struct reset_task; + struct work_struct watchdog_task; + + const struct e1000_info *ei; + + struct vlan_group *vlgrp; + u32 bd_number; + u32 rx_buffer_len; + u16 mng_vlan_id; + u16 link_speed; + u16 link_duplex; + + spinlock_t tx_queue_lock; /* prevent concurrent tail updates */ + + /* this is still needed for 82571 and above */ + atomic_t irq_sem; + + /* track device up/down/testing state */ + unsigned long state; + + /* Interrupt Throttle Rate */ + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + + /* + * TX + */ + struct e1000_ring *tx_ring /* One per active queue */ + ____cacheline_aligned_in_smp; + + struct napi_struct napi; + + unsigned long tx_queue_len; + unsigned int restart_queue; + u32 txd_cmd; + + bool detect_tx_hung; + u8 tx_timeout_factor; + + u32 tx_int_delay; + u32 tx_abs_int_delay; + + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + + /* TX stats */ + u64 tpt_old; + u64 colc_old; + u64 gotcl_old; + u32 gotcl; + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u32 tx_dma_failed; + + /* + * RX + */ + bool (*clean_rx) (struct e1000_adapter *adapter, + int *work_done, int work_to_do) + ____cacheline_aligned_in_smp; + void (*alloc_rx_buf) (struct e1000_adapter *adapter, + int cleaned_count); + struct e1000_ring *rx_ring; + + u32 rx_int_delay; + u32 rx_abs_int_delay; + + /* RX stats */ + u64 hw_csum_err; + u64 hw_csum_good; + u64 rx_hdr_split; + u64 gorcl_old; + u32 gorcl; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + + unsigned int rx_ps_pages; + u16 rx_ps_bsize0; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + struct net_device_stats net_stats; + spinlock_t stats_lock; /* prevent concurrent stats updates */ + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + + struct e1000_ring test_tx_ring; + struct e1000_ring test_rx_ring; + u32 test_icr; + + u32 msg_enable; + + u32 eeprom_wol; + u32 wol; + u32 pba; + + u8 fc_autoneg; + + unsigned long led_status; + + unsigned int flags; +}; + +struct e1000_info { + enum e1000_mac_type mac; + unsigned int flags; + u32 pba; + s32 (*get_invariants)(struct e1000_adapter *); + struct e1000_mac_operations *mac_ops; + struct e1000_phy_operations *phy_ops; + struct e1000_nvm_operations *nvm_ops; +}; + +/* hardware capability, feature, and workaround flags */ +#define FLAG_HAS_AMT (1 << 0) +#define FLAG_HAS_FLASH (1 << 1) +#define FLAG_HAS_HW_VLAN_FILTER (1 << 2) +#define FLAG_HAS_WOL (1 << 3) +#define FLAG_HAS_ERT (1 << 4) +#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) +#define FLAG_HAS_SWSM_ON_LOAD (1 << 6) +#define FLAG_HAS_JUMBO_FRAMES (1 << 7) +#define FLAG_HAS_ASPM (1 << 8) +#define FLAG_HAS_STATS_ICR_ICT (1 << 9) +#define FLAG_HAS_STATS_PTC_PRC (1 << 10) +#define FLAG_HAS_SMART_POWER_DOWN (1 << 11) +#define FLAG_IS_QUAD_PORT_A (1 << 12) +#define FLAG_IS_QUAD_PORT (1 << 13) +#define FLAG_TIPG_MEDIUM_FOR_80003ESLAN (1 << 14) +#define FLAG_APME_IN_WUC (1 << 15) +#define FLAG_APME_IN_CTRL3 (1 << 16) +#define FLAG_APME_CHECK_PORT_B (1 << 17) +#define FLAG_DISABLE_FC_PAUSE_TIME (1 << 18) +#define FLAG_NO_WAKE_UCAST (1 << 19) +#define FLAG_MNG_PT_ENABLED (1 << 20) +#define FLAG_RESET_OVERWRITES_LAA (1 << 21) +#define FLAG_TARC_SPEED_MODE_BIT (1 << 22) +#define FLAG_TARC_SET_BIT_ZERO (1 << 23) +#define FLAG_RX_NEEDS_RESTART (1 << 24) +#define FLAG_LSC_GIG_SPEED_DROP (1 << 25) +#define FLAG_SMART_POWER_DOWN (1 << 26) +#define FLAG_MSI_ENABLED (1 << 27) +#define FLAG_RX_CSUM_ENABLED (1 << 28) +#define FLAG_TSO_FORCE (1 << 29) + +#define E1000_RX_DESC_PS(R, i) \ + (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) +#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) +#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) +#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) +#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) + +enum e1000_state_t { + __E1000_TESTING, + __E1000_RESETTING, + __E1000_DOWN +}; + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +extern char e1000e_driver_name[]; +extern const char e1000e_driver_version[]; + +extern void e1000e_check_options(struct e1000_adapter *adapter); +extern void e1000e_set_ethtool_ops(struct net_device *netdev); + +extern int e1000e_up(struct e1000_adapter *adapter); +extern void e1000e_down(struct e1000_adapter *adapter); +extern void e1000e_reinit_locked(struct e1000_adapter *adapter); +extern void e1000e_reset(struct e1000_adapter *adapter); +extern void e1000e_power_up_phy(struct e1000_adapter *adapter); +extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter); +extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter); +extern void e1000e_free_rx_resources(struct e1000_adapter *adapter); +extern void e1000e_free_tx_resources(struct e1000_adapter *adapter); +extern void e1000e_update_stats(struct e1000_adapter *adapter); + +extern unsigned int copybreak; + +extern char *e1000e_get_hw_dev_name(struct e1000_hw *hw); + +extern struct e1000_info e1000_82571_info; +extern struct e1000_info e1000_82572_info; +extern struct e1000_info e1000_82573_info; +extern struct e1000_info e1000_ich8_info; +extern struct e1000_info e1000_ich9_info; +extern struct e1000_info e1000_es2_info; + +extern s32 e1000e_read_part_num(struct e1000_hw *hw, u32 *part_num); + +extern s32 e1000e_commit_phy(struct e1000_hw *hw); + +extern bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw); + +extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw); +extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state); + +extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, + bool state); +extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); +extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); + +extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); +extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); +extern s32 e1000e_check_for_serdes_link(struct e1000_hw *hw); +extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw); +extern s32 e1000e_led_on_generic(struct e1000_hw *hw); +extern s32 e1000e_led_off_generic(struct e1000_hw *hw); +extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw); +extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex); +extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex); +extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw); +extern s32 e1000e_get_auto_rd_done(struct e1000_hw *hw); +extern s32 e1000e_id_led_init(struct e1000_hw *hw); +extern void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw); +extern s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw); +extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw); +extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw); +extern s32 e1000e_setup_link(struct e1000_hw *hw); +extern void e1000e_clear_vfta(struct e1000_hw *hw); +extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); +extern void e1000e_mc_addr_list_update_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count, + u32 rar_used_count, u32 rar_count); +extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); +extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); +extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop); +extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw); +extern s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data); +extern void e1000e_config_collision_dist(struct e1000_hw *hw); +extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw); +extern s32 e1000e_force_mac_fc(struct e1000_hw *hw); +extern s32 e1000e_blink_led(struct e1000_hw *hw); +extern void e1000e_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); +extern void e1000e_reset_adaptive(struct e1000_hw *hw); +extern void e1000e_update_adaptive(struct e1000_hw *hw); + +extern s32 e1000e_setup_copper_link(struct e1000_hw *hw); +extern s32 e1000e_get_phy_id(struct e1000_hw *hw); +extern void e1000e_put_hw_semaphore(struct e1000_hw *hw); +extern s32 e1000e_check_reset_block_generic(struct e1000_hw *hw); +extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw); +extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw); +extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw); +extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw); +extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active); +extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw); +extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw); +extern s32 e1000e_get_cfg_done(struct e1000_hw *hw); +extern s32 e1000e_get_cable_length_m88(struct e1000_hw *hw); +extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw); +extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); +extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id); +extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); +extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); +extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw); +extern s32 e1000e_check_downshift(struct e1000_hw *hw); + +static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) +{ + return hw->phy.ops.reset_phy(hw); +} + +static inline s32 e1000_check_reset_block(struct e1000_hw *hw) +{ + return hw->phy.ops.check_reset_block(hw); +} + +static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return hw->phy.ops.read_phy_reg(hw, offset, data); +} + +static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data) +{ + return hw->phy.ops.write_phy_reg(hw, offset, data); +} + +static inline s32 e1000_get_cable_length(struct e1000_hw *hw) +{ + return hw->phy.ops.get_cable_length(hw); +} + +extern s32 e1000e_acquire_nvm(struct e1000_hw *hw); +extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw); +extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); +extern s32 e1000e_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw); +extern void e1000e_release_nvm(struct e1000_hw *hw); +extern void e1000e_reload_nvm(struct e1000_hw *hw); +extern s32 e1000e_read_mac_addr(struct e1000_hw *hw); + +static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) +{ + return hw->nvm.ops.validate_nvm(hw); +} + +static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw) +{ + return hw->nvm.ops.update_nvm(hw); +} + +static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + return hw->nvm.ops.read_nvm(hw, offset, words, data); +} + +static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + return hw->nvm.ops.write_nvm(hw, offset, words, data); +} + +static inline s32 e1000_get_phy_info(struct e1000_hw *hw) +{ + return hw->phy.ops.get_phy_info(hw); +} + +extern bool e1000e_check_mng_mode(struct e1000_hw *hw); +extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw); +extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length); + +static inline u32 __er32(struct e1000_hw *hw, unsigned long reg) +{ + return readl(hw->hw_addr + reg); +} + +static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) +{ + writel(val, hw->hw_addr + reg); +} + +#endif /* _E1000_H_ */ diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c new file mode 100644 index 000000000000..88657adf965f --- /dev/null +++ b/drivers/net/e1000e/es2lan.c @@ -0,0 +1,1232 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* + * 80003ES2LAN Gigabit Ethernet Controller (Copper) + * 80003ES2LAN Gigabit Ethernet Controller (Serdes) + */ + +#include <linux/netdevice.h> +#include <linux/ethtool.h> +#include <linux/delay.h> +#include <linux/pci.h> + +#include "e1000.h" + +#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00 +#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02 +#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10 + +#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008 +#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800 +#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010 + +#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004 +#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000 + +#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ +#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000 + +#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8 +#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9 + +/* GG82563 PHY Specific Status Register (Page 0, Register 16 */ +#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Disab. */ +#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060 +#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */ +#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */ +#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */ + +/* PHY Specific Control Register 2 (Page 0, Register 26) */ +#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 + /* 1=Reverse Auto-Negotiation */ + +/* MAC Specific Control Register (Page 2, Register 21) */ +/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */ +#define GG82563_MSCR_TX_CLK_MASK 0x0007 +#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004 +#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005 +#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007 + +#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */ + +/* DSP Distance Register (Page 5, Register 26) */ +#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M + 1 = 50-80M + 2 = 80-110M + 3 = 110-140M + 4 = >140M */ + +/* Kumeran Mode Control Register (Page 193, Register 16) */ +#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800 + +/* Power Management Control Register (Page 193, Register 20) */ +#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001 + /* 1=Enable SERDES Electrical Idle */ + +/* In-Band Control Register (Page 194, Register 18) */ +#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */ + +/* A table for the GG82563 cable length where the range is defined + * with a lower bound at "index" and the upper bound at + * "index + 5". + */ +static const u16 e1000_gg82563_cable_length_table[] = + { 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF }; + +static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw); +static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); +static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); +static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw); +static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw); +static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw); +static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex); + +/** + * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + if (hw->media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + return 0; + } + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + phy->type = e1000_phy_gg82563; + + /* This can only be done after all function pointers are setup. */ + ret_val = e1000e_get_phy_id(hw); + + /* Verify phy id */ + if (phy->id != GG82563_E_PHY_ID) + return -E1000_ERR_PHY; + + return ret_val; +} + +/** + * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u16 size; + + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; + break; + } + + nvm->type = e1000_nvm_eeprom_spi; + + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + nvm->word_size = 1 << size; + + return 0; +} + +/** + * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; + struct e1000_mac_operations *func = &mac->ops; + + /* Set media type */ + switch (adapter->pdev->device) { + case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: + hw->media_type = e1000_media_type_internal_serdes; + break; + default: + hw->media_type = e1000_media_type_copper; + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + /* Set if manageability features are enabled. */ + mac->arc_subsystem_valid = + (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0; + + /* check for link */ + switch (hw->media_type) { + case e1000_media_type_copper: + func->setup_physical_interface = e1000_setup_copper_link_80003es2lan; + func->check_for_link = e1000e_check_for_copper_link; + break; + case e1000_media_type_fiber: + func->setup_physical_interface = e1000e_setup_fiber_serdes_link; + func->check_for_link = e1000e_check_for_fiber_link; + break; + case e1000_media_type_internal_serdes: + func->setup_physical_interface = e1000e_setup_fiber_serdes_link; + func->check_for_link = e1000e_check_for_serdes_link; + break; + default: + return -E1000_ERR_CONFIG; + break; + } + + return 0; +} + +static s32 e1000_get_invariants_80003es2lan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + s32 rc; + + rc = e1000_init_mac_params_80003es2lan(adapter); + if (rc) + return rc; + + rc = e1000_init_nvm_params_80003es2lan(hw); + if (rc) + return rc; + + rc = e1000_init_phy_params_80003es2lan(hw); + if (rc) + return rc; + + return 0; +} + +/** + * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to acquire access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + **/ +static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; + + return e1000_acquire_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_release_phy_80003es2lan - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + **/ +static void e1000_release_phy_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; + e1000_release_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM + * @hw: pointer to the HW structure + * + * Acquire the semaphore to access the EEPROM. This is a function + * pointer entry point called by the api module. + **/ +static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); + if (ret_val) + return ret_val; + + ret_val = e1000e_acquire_nvm(hw); + + if (ret_val) + e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); + + return ret_val; +} + +/** + * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM + * @hw: pointer to the HW structure + * + * Release the semaphore used to access the EEPROM. This is a + * function pointer entry point called by the api module. + **/ +static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw) +{ + e1000e_release_nvm(hw); + e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); +} + +/** + * e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 i = 0; + s32 timeout = 200; + + while (i < timeout) { + if (e1000e_get_hw_semaphore(hw)) + return -E1000_ERR_SWFW_SYNC; + + swfw_sync = er32(SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) */ + e1000e_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + hw_dbg(hw, + "Driver can't access resource, SW_FW_SYNC timeout.\n"); + return -E1000_ERR_SWFW_SYNC; + } + + swfw_sync |= swmask; + ew32(SW_FW_SYNC, swfw_sync); + + e1000e_put_hw_semaphore(hw); + + return 0; +} + +/** + * e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + while (e1000e_get_hw_semaphore(hw) != 0); + /* Empty */ + + swfw_sync = er32(SW_FW_SYNC); + swfw_sync &= ~mask; + ew32(SW_FW_SYNC, swfw_sync); + + e1000e_put_hw_semaphore(hw); +} + +/** + * e1000_read_phy_reg_gg82563_80003es2lan - Read GG82563 PHY register + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @data: pointer to the data returned from the operation + * + * Read the GG82563 PHY register. This is a function pointer entry + * point called by the api module. + **/ +static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, + u32 offset, u16 *data) +{ + s32 ret_val; + u32 page_select; + u16 temp; + + /* Select Configuration Page */ + if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) + page_select = GG82563_PHY_PAGE_SELECT; + else + /* Use Alternative Page Select register to access + * registers 30 and 31 + */ + page_select = GG82563_PHY_PAGE_SELECT_ALT; + + temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); + ret_val = e1000e_write_phy_reg_m88(hw, page_select, temp); + if (ret_val) + return ret_val; + + /* The "ready" bit in the MDIC register may be incorrectly set + * before the device has completed the "Page Select" MDI + * transaction. So we wait 200us after each MDI command... + */ + udelay(200); + + /* ...and verify the command was successful. */ + ret_val = e1000e_read_phy_reg_m88(hw, page_select, &temp); + + if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { + ret_val = -E1000_ERR_PHY; + return ret_val; + } + + udelay(200); + + ret_val = e1000e_read_phy_reg_m88(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + udelay(200); + + return ret_val; +} + +/** + * e1000_write_phy_reg_gg82563_80003es2lan - Write GG82563 PHY register + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @data: value to write to the register + * + * Write to the GG82563 PHY register. This is a function pointer entry + * point called by the api module. + **/ +static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, + u32 offset, u16 data) +{ + s32 ret_val; + u32 page_select; + u16 temp; + + /* Select Configuration Page */ + if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) + page_select = GG82563_PHY_PAGE_SELECT; + else + /* Use Alternative Page Select register to access + * registers 30 and 31 + */ + page_select = GG82563_PHY_PAGE_SELECT_ALT; + + temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); + ret_val = e1000e_write_phy_reg_m88(hw, page_select, temp); + if (ret_val) + return ret_val; + + + /* The "ready" bit in the MDIC register may be incorrectly set + * before the device has completed the "Page Select" MDI + * transaction. So we wait 200us after each MDI command... + */ + udelay(200); + + /* ...and verify the command was successful. */ + ret_val = e1000e_read_phy_reg_m88(hw, page_select, &temp); + + if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) + return -E1000_ERR_PHY; + + udelay(200); + + ret_val = e1000e_write_phy_reg_m88(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + udelay(200); + + return ret_val; +} + +/** + * e1000_write_nvm_80003es2lan - Write to ESB2 NVM + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @words: number of words to write + * @data: buffer of data to write to the NVM + * + * Write "words" of data to the ESB2 NVM. This is a function + * pointer entry point called by the api module. + **/ +static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + return e1000e_write_nvm_spi(hw, offset, words, data); +} + +/** + * e1000_get_cfg_done_80003es2lan - Wait for configuration to complete + * @hw: pointer to the HW structure + * + * Wait a specific amount of time for manageability processes to complete. + * This is a function pointer entry point called by the phy module. + **/ +static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + if (hw->bus.func == 1) + mask = E1000_NVM_CFG_DONE_PORT_1; + + while (timeout) { + if (er32(EEMNGCTL) & mask) + break; + msleep(1); + timeout--; + } + if (!timeout) { + hw_dbg(hw, "MNG configuration cycle has not completed.\n"); + return -E1000_ERR_RESET; + } + + return 0; +} + +/** + * e1000_phy_force_speed_duplex_80003es2lan - Force PHY speed and duplex + * @hw: pointer to the HW structure + * + * Force the speed and duplex settings onto the PHY. This is a + * function pointer entry point called by the phy module. + **/ +static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + bool link; + + /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO; + ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + hw_dbg(hw, "GG82563 PSCR: %X\n", phy_data); + + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &phy_data); + + /* Reset the phy to commit changes. */ + phy_data |= MII_CR_RESET; + + ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + udelay(1); + + if (hw->phy.wait_for_link) { + hw_dbg(hw, "Waiting for forced speed/duplex link " + "on GG82563 phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) { + /* We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = e1000e_phy_reset_dsp(hw); + if (ret_val) + return ret_val; + } + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Resetting the phy means we need to verify the TX_CLK corresponds + * to the link speed. 10Mbps -> 2.5MHz, else 25MHz. + */ + phy_data &= ~GG82563_MSCR_TX_CLK_MASK; + if (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED) + phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5; + else + phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25; + + /* In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; + ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data); + + return ret_val; +} + +/** + * e1000_get_cable_length_80003es2lan - Set approximate cable length + * @hw: pointer to the HW structure + * + * Find the approximate cable length as measured by the GG82563 PHY. + * This is a function pointer entry point called by the phy module. + **/ +static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + u16 index; + + ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data); + if (ret_val) + return ret_val; + + index = phy_data & GG82563_DSPD_CABLE_LENGTH; + phy->min_cable_length = e1000_gg82563_cable_length_table[index]; + phy->max_cable_length = e1000_gg82563_cable_length_table[index+5]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return 0; +} + +/** + * e1000_get_link_up_info_80003es2lan - Report speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to speed buffer + * @duplex: pointer to duplex buffer + * + * Retrieve the current speed and duplex configuration. + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + if (hw->media_type == e1000_media_type_copper) { + ret_val = e1000e_get_speed_and_duplex_copper(hw, + speed, + duplex); + if (ret_val) + return ret_val; + if (*speed == SPEED_1000) + ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw); + else + ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw, + *duplex); + } else { + ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw, + speed, + duplex); + } + + return ret_val; +} + +/** + * e1000_reset_hw_80003es2lan - Reset the ESB2 controller + * @hw: pointer to the HW structure + * + * Perform a global reset to the ESB2 controller. + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) +{ + u32 ctrl; + u32 icr; + s32 ret_val; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000e_disable_pcie_master(hw); + if (ret_val) + hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); + + hw_dbg(hw, "Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + e1e_flush(); + + msleep(10); + + ctrl = er32(CTRL); + + hw_dbg(hw, "Issuing a global reset to MAC\n"); + ew32(CTRL, ctrl | E1000_CTRL_RST); + + ret_val = e1000e_get_auto_rd_done(hw); + if (ret_val) + /* We don't want to continue accessing MAC registers. */ + return ret_val; + + /* Clear any pending interrupt events. */ + ew32(IMC, 0xffffffff); + icr = er32(ICR); + + return 0; +} + +/** + * e1000_init_hw_80003es2lan - Initialize the ESB2 controller + * @hw: pointer to the HW structure + * + * Initialize the hw bits, LED, VFTA, MTA, link and hw counters. + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 reg_data; + s32 ret_val; + u16 i; + + e1000_initialize_hw_bits_80003es2lan(hw); + + /* Initialize identification LED */ + ret_val = e1000e_id_led_init(hw); + if (ret_val) { + hw_dbg(hw, "Error initializing identification LED\n"); + return ret_val; + } + + /* Disabling VLAN filtering */ + hw_dbg(hw, "Initializing the IEEE VLAN\n"); + e1000e_clear_vfta(hw); + + /* Setup the receive address. */ + e1000e_init_rx_addrs(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + hw_dbg(hw, "Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Setup link and flow control */ + ret_val = e1000e_setup_link(hw); + + /* Set the transmit descriptor write-back policy */ + reg_data = er32(TXDCTL); + reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; + ew32(TXDCTL, reg_data); + + /* ...for both queues. */ + reg_data = er32(TXDCTL1); + reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; + ew32(TXDCTL1, reg_data); + + /* Enable retransmit on late collisions */ + reg_data = er32(TCTL); + reg_data |= E1000_TCTL_RTLC; + ew32(TCTL, reg_data); + + /* Configure Gigabit Carry Extend Padding */ + reg_data = er32(TCTL_EXT); + reg_data &= ~E1000_TCTL_EXT_GCEX_MASK; + reg_data |= DEFAULT_TCTL_EXT_GCEX_80003ES2LAN; + ew32(TCTL_EXT, reg_data); + + /* Configure Transmit Inter-Packet Gap */ + reg_data = er32(TIPG); + reg_data &= ~E1000_TIPG_IPGT_MASK; + reg_data |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; + ew32(TIPG, reg_data); + + reg_data = E1000_READ_REG_ARRAY(hw, E1000_FFLT, 0x0001); + reg_data &= ~0x00100000; + E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data); + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_initialize_hw_bits_80003es2lan - Init hw bits of ESB2 + * @hw: pointer to the HW structure + * + * Initializes required hardware-dependent bits needed for normal operation. + **/ +static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw) +{ + u32 reg; + + /* Transmit Descriptor Control 0 */ + reg = er32(TXDCTL); + reg |= (1 << 22); + ew32(TXDCTL, reg); + + /* Transmit Descriptor Control 1 */ + reg = er32(TXDCTL1); + reg |= (1 << 22); + ew32(TXDCTL1, reg); + + /* Transmit Arbitration Control 0 */ + reg = er32(TARC0); + reg &= ~(0xF << 27); /* 30:27 */ + if (hw->media_type != e1000_media_type_copper) + reg &= ~(1 << 20); + ew32(TARC0, reg); + + /* Transmit Arbitration Control 1 */ + reg = er32(TARC1); + if (er32(TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + ew32(TARC1, reg); +} + +/** + * e1000_copper_link_setup_gg82563_80003es2lan - Configure GG82563 Link + * @hw: pointer to the HW structure + * + * Setup some GG82563 PHY registers for obtaining link + **/ +static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 ctrl_ext; + u16 data; + + ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, + &data); + if (ret_val) + return ret_val; + + data |= GG82563_MSCR_ASSERT_CRS_ON_TX; + /* Use 25MHz for both link down and 1000Base-T for Tx clock. */ + data |= GG82563_MSCR_TX_CLK_1000MBPS_25; + + ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, + data); + if (ret_val) + return ret_val; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK; + + switch (phy->mdix) { + case 1: + data |= GG82563_PSCR_CROSSOVER_MODE_MDI; + break; + case 2: + data |= GG82563_PSCR_CROSSOVER_MODE_MDIX; + break; + case 0: + default: + data |= GG82563_PSCR_CROSSOVER_MODE_AUTO; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE; + if (phy->disable_polarity_correction) + data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE; + + ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, data); + if (ret_val) + return ret_val; + + /* SW Reset the PHY so all changes take effect */ + ret_val = e1000e_commit_phy(hw); + if (ret_val) { + hw_dbg(hw, "Error Resetting the PHY\n"); + return ret_val; + } + + /* Bypass RX and TX FIFO's */ + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL, + E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | + E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL_2, &data); + if (ret_val) + return ret_val; + + data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG; + ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL_2, data); + if (ret_val) + return ret_val; + + ctrl_ext = er32(CTRL_EXT); + ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK); + ew32(CTRL_EXT, ctrl_ext); + + ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data); + if (ret_val) + return ret_val; + + /* Do not init these registers when the HW is in IAMT mode, since the + * firmware will have already initialized them. We only initialize + * them if the HW is not in IAMT mode. + */ + if (!e1000e_check_mng_mode(hw)) { + /* Enable Electrical Idle on the PHY */ + data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE; + ret_val = e1e_wphy(hw, GG82563_PHY_PWR_MGMT_CTRL, data); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, data); + if (ret_val) + return ret_val; + } + + /* Workaround: Disable padding in Kumeran interface in the MAC + * and in the PHY to avoid CRC errors. + */ + ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data); + if (ret_val) + return ret_val; + + data |= GG82563_ICR_DIS_PADDING; + ret_val = e1e_wphy(hw, GG82563_PHY_INBAND_CTRL, data); + if (ret_val) + return ret_val; + + return 0; +} + +/** + * e1000_setup_copper_link_80003es2lan - Setup Copper Link for ESB2 + * @hw: pointer to the HW structure + * + * Essentially a wrapper for setting up all things "copper" related. + * This is a function pointer entry point called by the mac module. + **/ +static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 reg_data; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + + /* Set the mac to wait the maximum time between each + * iteration and increase the max iterations when + * polling the phy; this fixes erroneous timeouts at 10Mbps. */ + ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF); + if (ret_val) + return ret_val; + ret_val = e1000e_read_kmrn_reg(hw, GG82563_REG(0x34, 9), ®_data); + if (ret_val) + return ret_val; + reg_data |= 0x3F; + ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data); + if (ret_val) + return ret_val; + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, + ®_data); + if (ret_val) + return ret_val; + reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING; + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, + reg_data); + if (ret_val) + return ret_val; + + ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw); + if (ret_val) + return ret_val; + + ret_val = e1000e_setup_copper_link(hw); + + return 0; +} + +/** + * e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation + * @hw: pointer to the HW structure + * @duplex: current duplex setting + * + * Configure the KMRN interface by applying last minute quirks for + * 10/100 operation. + **/ +static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex) +{ + s32 ret_val; + u32 tipg; + u16 reg_data; + + reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, + reg_data); + if (ret_val) + return ret_val; + + /* Configure Transmit Inter-Packet Gap */ + tipg = er32(TIPG); + tipg &= ~E1000_TIPG_IPGT_MASK; + tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN; + ew32(TIPG, tipg); + + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); + if (ret_val) + return ret_val; + + if (duplex == HALF_DUPLEX) + reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER; + else + reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + + ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); + + return 0; +} + +/** + * e1000_cfg_kmrn_1000_80003es2lan - Apply "quirks" for gigabit operation + * @hw: pointer to the HW structure + * + * Configure the KMRN interface by applying last minute quirks for + * gigabit operation. + **/ +static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 reg_data; + u32 tipg; + + reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, + reg_data); + if (ret_val) + return ret_val; + + /* Configure Transmit Inter-Packet Gap */ + tipg = er32(TIPG); + tipg &= ~E1000_TIPG_IPGT_MASK; + tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; + ew32(TIPG, tipg); + + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); + if (ret_val) + return ret_val; + + reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); + + return ret_val; +} + +/** + * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw) +{ + u32 temp; + + e1000e_clear_hw_cntrs_base(hw); + + temp = er32(PRC64); + temp = er32(PRC127); + temp = er32(PRC255); + temp = er32(PRC511); + temp = er32(PRC1023); + temp = er32(PRC1522); + temp = er32(PTC64); + temp = er32(PTC127); + temp = er32(PTC255); + temp = er32(PTC511); + temp = er32(PTC1023); + temp = er32(PTC1522); + + temp = er32(ALGNERRC); + temp = er32(RXERRC); + temp = er32(TNCRS); + temp = er32(CEXTERR); + temp = er32(TSCTC); + temp = er32(TSCTFC); + + temp = er32(MGTPRC); + temp = er32(MGTPDC); + temp = er32(MGTPTC); + + temp = er32(IAC); + temp = er32(ICRXOC); + + temp = er32(ICRXPTC); + temp = er32(ICRXATC); + temp = er32(ICTXPTC); + temp = er32(ICTXATC); + temp = er32(ICTXQEC); + temp = er32(ICTXQMTC); + temp = er32(ICRXDMTC); +} + +static struct e1000_mac_operations es2_mac_ops = { + .mng_mode_enab = E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT, + /* check_for_link dependent on media type */ + .cleanup_led = e1000e_cleanup_led_generic, + .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan, + .get_bus_info = e1000e_get_bus_info_pcie, + .get_link_up_info = e1000_get_link_up_info_80003es2lan, + .led_on = e1000e_led_on_generic, + .led_off = e1000e_led_off_generic, + .mc_addr_list_update = e1000e_mc_addr_list_update_generic, + .reset_hw = e1000_reset_hw_80003es2lan, + .init_hw = e1000_init_hw_80003es2lan, + .setup_link = e1000e_setup_link, + /* setup_physical_interface dependent on media type */ +}; + +static struct e1000_phy_operations es2_phy_ops = { + .acquire_phy = e1000_acquire_phy_80003es2lan, + .check_reset_block = e1000e_check_reset_block_generic, + .commit_phy = e1000e_phy_sw_reset, + .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan, + .get_cfg_done = e1000_get_cfg_done_80003es2lan, + .get_cable_length = e1000_get_cable_length_80003es2lan, + .get_phy_info = e1000e_get_phy_info_m88, + .read_phy_reg = e1000_read_phy_reg_gg82563_80003es2lan, + .release_phy = e1000_release_phy_80003es2lan, + .reset_phy = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = NULL, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_phy_reg = e1000_write_phy_reg_gg82563_80003es2lan, +}; + +static struct e1000_nvm_operations es2_nvm_ops = { + .acquire_nvm = e1000_acquire_nvm_80003es2lan, + .read_nvm = e1000e_read_nvm_eerd, + .release_nvm = e1000_release_nvm_80003es2lan, + .update_nvm = e1000e_update_nvm_checksum_generic, + .valid_led_default = e1000e_valid_led_default, + .validate_nvm = e1000e_validate_nvm_checksum_generic, + .write_nvm = e1000_write_nvm_80003es2lan, +}; + +struct e1000_info e1000_es2_info = { + .mac = e1000_80003es2lan, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_STATS_PTC_PRC + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_STATS_ICR_ICT + | FLAG_RX_NEEDS_RESTART /* errata */ + | FLAG_TARC_SET_BIT_ZERO /* errata */ + | FLAG_APME_CHECK_PORT_B + | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ + | FLAG_TIPG_MEDIUM_FOR_80003ESLAN, + .pba = 38, + .get_invariants = e1000_get_invariants_80003es2lan, + .mac_ops = &es2_mac_ops, + .phy_ops = &es2_phy_ops, + .nvm_ops = &es2_nvm_ops, +}; + diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c new file mode 100644 index 000000000000..b7a7e2ae5e13 --- /dev/null +++ b/drivers/net/e1000e/ethtool.c @@ -0,0 +1,1780 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* ethtool support for e1000 */ + +#include <linux/netdevice.h> +#include <linux/ethtool.h> +#include <linux/pci.h> +#include <linux/delay.h> + +#include "e1000.h" + +struct e1000_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define E1000_STAT(m) sizeof(((struct e1000_adapter *)0)->m), \ + offsetof(struct e1000_adapter, m) +static const struct e1000_stats e1000_gstrings_stats[] = { + { "rx_packets", E1000_STAT(stats.gprc) }, + { "tx_packets", E1000_STAT(stats.gptc) }, + { "rx_bytes", E1000_STAT(stats.gorcl) }, + { "tx_bytes", E1000_STAT(stats.gotcl) }, + { "rx_broadcast", E1000_STAT(stats.bprc) }, + { "tx_broadcast", E1000_STAT(stats.bptc) }, + { "rx_multicast", E1000_STAT(stats.mprc) }, + { "tx_multicast", E1000_STAT(stats.mptc) }, + { "rx_errors", E1000_STAT(net_stats.rx_errors) }, + { "tx_errors", E1000_STAT(net_stats.tx_errors) }, + { "tx_dropped", E1000_STAT(net_stats.tx_dropped) }, + { "multicast", E1000_STAT(stats.mprc) }, + { "collisions", E1000_STAT(stats.colc) }, + { "rx_length_errors", E1000_STAT(net_stats.rx_length_errors) }, + { "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) }, + { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, + { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) }, + { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, + { "rx_missed_errors", E1000_STAT(stats.mpc) }, + { "tx_aborted_errors", E1000_STAT(stats.ecol) }, + { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, + { "tx_fifo_errors", E1000_STAT(net_stats.tx_fifo_errors) }, + { "tx_heartbeat_errors", E1000_STAT(net_stats.tx_heartbeat_errors) }, + { "tx_window_errors", E1000_STAT(stats.latecol) }, + { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, + { "tx_deferred_ok", E1000_STAT(stats.dc) }, + { "tx_single_coll_ok", E1000_STAT(stats.scc) }, + { "tx_multi_coll_ok", E1000_STAT(stats.mcc) }, + { "tx_timeout_count", E1000_STAT(tx_timeout_count) }, + { "tx_restart_queue", E1000_STAT(restart_queue) }, + { "rx_long_length_errors", E1000_STAT(stats.roc) }, + { "rx_short_length_errors", E1000_STAT(stats.ruc) }, + { "rx_align_errors", E1000_STAT(stats.algnerrc) }, + { "tx_tcp_seg_good", E1000_STAT(stats.tsctc) }, + { "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) }, + { "rx_flow_control_xon", E1000_STAT(stats.xonrxc) }, + { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) }, + { "tx_flow_control_xon", E1000_STAT(stats.xontxc) }, + { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) }, + { "rx_long_byte_count", E1000_STAT(stats.gorcl) }, + { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, + { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, + { "rx_header_split", E1000_STAT(rx_hdr_split) }, + { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, + { "tx_smbus", E1000_STAT(stats.mgptc) }, + { "rx_smbus", E1000_STAT(stats.mgprc) }, + { "dropped_smbus", E1000_STAT(stats.mgpdc) }, + { "rx_dma_failed", E1000_STAT(rx_dma_failed) }, + { "tx_dma_failed", E1000_STAT(tx_dma_failed) }, +}; + +#define E1000_GLOBAL_STATS_LEN \ + sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats) +#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN) +static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; +#define E1000_TEST_LEN sizeof(e1000_gstrings_test) / ETH_GSTRING_LEN + +static int e1000_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (hw->media_type == e1000_media_type_copper) { + + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP); + if (hw->phy.type == e1000_phy_ife) + ecmd->supported &= ~SUPPORTED_1000baseT_Full; + ecmd->advertising = ADVERTISED_TP; + + if (hw->mac.autoneg == 1) { + ecmd->advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + ecmd->advertising |= hw->phy.autoneg_advertised; + } + + ecmd->port = PORT_TP; + ecmd->phy_address = hw->phy.addr; + ecmd->transceiver = XCVR_INTERNAL; + + } else { + ecmd->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + ecmd->advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg); + + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; + } + + if (er32(STATUS) & E1000_STATUS_LU) { + + adapter->hw.mac.ops.get_link_up_info(hw, &adapter->link_speed, + &adapter->link_duplex); + ecmd->speed = adapter->link_speed; + + /* unfortunately FULL_DUPLEX != DUPLEX_FULL + * and HALF_DUPLEX != DUPLEX_HALF */ + + if (adapter->link_duplex == FULL_DUPLEX) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ecmd->speed = -1; + ecmd->duplex = -1; + } + + ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) || + hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + return 0; +} + +static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) +{ + struct e1000_mac_info *mac = &adapter->hw.mac; + + mac->autoneg = 0; + + /* Fiber NICs only allow 1000 gbps Full duplex */ + if ((adapter->hw.media_type == e1000_media_type_fiber) && + spddplx != (SPEED_1000 + DUPLEX_FULL)) { + ndev_err(adapter->netdev, "Unsupported Speed/Duplex " + "configuration\n"); + return -EINVAL; + } + + switch (spddplx) { + case SPEED_10 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_10_HALF; + break; + case SPEED_10 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_10_FULL; + break; + case SPEED_100 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_100_HALF; + break; + case SPEED_100 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_100_FULL; + break; + case SPEED_1000 + DUPLEX_FULL: + mac->autoneg = 1; + adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: + ndev_err(adapter->netdev, "Unsupported Speed/Duplex " + "configuration\n"); + return -EINVAL; + } + return 0; +} + +static int e1000_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* When SoL/IDER sessions are active, autoneg/speed/duplex + * cannot be changed */ + if (e1000_check_reset_block(hw)) { + ndev_err(netdev, "Cannot change link " + "characteristics when SoL/IDER is active.\n"); + return -EINVAL; + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + msleep(1); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = 1; + if (hw->media_type == e1000_media_type_fiber) + hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + else + hw->phy.autoneg_advertised = ecmd->advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + ecmd->advertising = hw->phy.autoneg_advertised; + } else { + if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) { + clear_bit(__E1000_RESETTING, &adapter->state); + return -EINVAL; + } + } + + /* reset the link */ + + if (netif_running(adapter->netdev)) { + e1000e_down(adapter); + e1000e_up(adapter); + } else { + e1000e_reset(adapter); + } + + clear_bit(__E1000_RESETTING, &adapter->state); + return 0; +} + +static void e1000_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->mac.fc == e1000_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->mac.fc == e1000_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->mac.fc == e1000_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int e1000_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + msleep(1); + + if (pause->rx_pause && pause->tx_pause) + hw->mac.fc = e1000_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->mac.fc = e1000_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->mac.fc = e1000_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->mac.fc = e1000_fc_none; + + hw->mac.original_fc = hw->mac.fc; + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + hw->mac.fc = e1000_fc_default; + if (netif_running(adapter->netdev)) { + e1000e_down(adapter); + e1000e_up(adapter); + } else { + e1000e_reset(adapter); + } + } else { + retval = ((hw->media_type == e1000_media_type_fiber) ? + hw->mac.ops.setup_link(hw) : e1000e_force_mac_fc(hw)); + } + + clear_bit(__E1000_RESETTING, &adapter->state); + return retval; +} + +static u32 e1000_get_rx_csum(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + return (adapter->flags & FLAG_RX_CSUM_ENABLED); +} + +static int e1000_set_rx_csum(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (data) + adapter->flags |= FLAG_RX_CSUM_ENABLED; + else + adapter->flags &= ~FLAG_RX_CSUM_ENABLED; + + if (netif_running(netdev)) + e1000e_reinit_locked(adapter); + else + e1000e_reset(adapter); + return 0; +} + +static u32 e1000_get_tx_csum(struct net_device *netdev) +{ + return ((netdev->features & NETIF_F_HW_CSUM) != 0); +} + +static int e1000_set_tx_csum(struct net_device *netdev, u32 data) +{ + if (data) + netdev->features |= NETIF_F_HW_CSUM; + else + netdev->features &= ~NETIF_F_HW_CSUM; + + return 0; +} + +static int e1000_set_tso(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (data) { + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + } else { + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + } + + ndev_info(netdev, "TSO is %s\n", + data ? "Enabled" : "Disabled"); + adapter->flags |= FLAG_TSO_FORCE; + return 0; +} + +static u32 e1000_get_msglevel(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void e1000_set_msglevel(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int e1000_get_regs_len(struct net_device *netdev) +{ +#define E1000_REGS_LEN 32 /* overestimate */ + return E1000_REGS_LEN * sizeof(u32); +} + +static void e1000_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + u8 revision_id; + + memset(p, 0, E1000_REGS_LEN * sizeof(u32)); + + pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id); + + regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDLEN); + regs_buff[9] = er32(TDH); + regs_buff[10] = er32(TDT); + regs_buff[11] = er32(TIDV); + + regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ + if (hw->phy.type == e1000_phy_m88) { + e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + regs_buff[17] = (u32)phy_data; /* extended 10bt distance */ + regs_buff[18] = regs_buff[13]; /* cable polarity */ + regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[20] = regs_buff[17]; /* polarity correction */ + /* phy receive errors */ + regs_buff[22] = adapter->phy_stats.receive_errors; + regs_buff[23] = regs_buff[13]; /* mdix mode */ + } + regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */ + e1e_rphy(hw, PHY_1000T_STATUS, &phy_data); + regs_buff[24] = (u32)phy_data; /* phy local receiver status */ + regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ +} + +static int e1000_get_eeprom_len(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + return adapter->hw.nvm.word_size * 2; +} + +static int e1000_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word; + int last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = adapter->pdev->vendor | (adapter->pdev->device << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc(sizeof(u16) * + (last_word - first_word + 1), GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + ret_val = e1000_read_nvm(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + } else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = e1000_read_nvm(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int e1000_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len; + int first_word; + int last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16))) + return -EFAULT; + + max_len = hw->nvm.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word */ + /* only the second byte of the word is being modified */ + ret_val = e1000_read_nvm(hw, first_word, 1, &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) + /* need read/modify/write of last changed EEPROM word */ + /* only the first byte of the word is being modified */ + ret_val = e1000_read_nvm(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); + + ret_val = e1000_write_nvm(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum over the first part of the EEPROM if needed + * and flush shadow RAM for 82573 controllers */ + if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG) || + (hw->mac.type == e1000_82573))) + e1000e_update_nvm_checksum(hw); + + kfree(eeprom_buff); + return ret_val; +} + +static void e1000_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + char firmware_version[32]; + u16 eeprom_data; + + strncpy(drvinfo->driver, e1000e_driver_name, 32); + strncpy(drvinfo->version, e1000e_driver_version, 32); + + /* EEPROM image version # is reported as firmware version # for + * PCI-E controllers */ + e1000_read_nvm(&adapter->hw, 5, 1, &eeprom_data); + sprintf(firmware_version, "%d.%d-%d", + (eeprom_data & 0xF000) >> 12, + (eeprom_data & 0x0FF0) >> 4, + eeprom_data & 0x000F); + + strncpy(drvinfo->fw_version, firmware_version, 32); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + drvinfo->regdump_len = e1000_get_regs_len(netdev); + drvinfo->eedump_len = e1000_get_eeprom_len(netdev); +} + +static void e1000_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_ring *rx_ring = adapter->rx_ring; + + ring->rx_max_pending = E1000_MAX_RXD; + ring->tx_max_pending = E1000_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = rx_ring->count; + ring->tx_pending = tx_ring->count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int e1000_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_ring *tx_ring, *tx_old; + struct e1000_ring *rx_ring, *rx_old; + int err; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + msleep(1); + + if (netif_running(adapter->netdev)) + e1000e_down(adapter); + + tx_old = adapter->tx_ring; + rx_old = adapter->rx_ring; + + err = -ENOMEM; + tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); + if (!tx_ring) + goto err_alloc_tx; + + rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); + if (!rx_ring) + goto err_alloc_rx; + + adapter->tx_ring = tx_ring; + adapter->rx_ring = rx_ring; + + rx_ring->count = max(ring->rx_pending, (u32)E1000_MIN_RXD); + rx_ring->count = min(rx_ring->count, (u32)(E1000_MAX_RXD)); + rx_ring->count = ALIGN(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE); + + tx_ring->count = max(ring->tx_pending, (u32)E1000_MIN_TXD); + tx_ring->count = min(tx_ring->count, (u32)(E1000_MAX_TXD)); + tx_ring->count = ALIGN(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if (netif_running(adapter->netdev)) { + /* Try to get new resources before deleting old */ + err = e1000e_setup_rx_resources(adapter); + if (err) + goto err_setup_rx; + err = e1000e_setup_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* save the new, restore the old in order to free it, + * then restore the new back again */ + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + e1000e_free_rx_resources(adapter); + e1000e_free_tx_resources(adapter); + kfree(tx_old); + kfree(rx_old); + adapter->rx_ring = rx_ring; + adapter->tx_ring = tx_ring; + err = e1000e_up(adapter); + if (err) + goto err_setup; + } + + clear_bit(__E1000_RESETTING, &adapter->state); + return 0; +err_setup_tx: + e1000e_free_rx_resources(adapter); +err_setup_rx: + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + kfree(rx_ring); +err_alloc_rx: + kfree(tx_ring); +err_alloc_tx: + e1000e_up(adapter); +err_setup: + clear_bit(__E1000_RESETTING, &adapter->state); + return err; +} + +#define REG_PATTERN_TEST(R, M, W) REG_PATTERN_TEST_ARRAY(R, 0, M, W) +#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, writeable) \ +{ \ + u32 _pat; \ + u32 _value; \ + u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \ + for (_pat = 0; _pat < ARRAY_SIZE(_test); _pat++) { \ + E1000_WRITE_REG_ARRAY(hw, reg, offset, \ + (_test[_pat] & writeable)); \ + _value = E1000_READ_REG_ARRAY(hw, reg, offset); \ + if (_value != (_test[_pat] & writeable & mask)) { \ + ndev_err(netdev, "pattern test reg %04X " \ + "failed: got 0x%08X expected 0x%08X\n", \ + reg + offset, \ + value, (_test[_pat] & writeable & mask)); \ + *data = reg; \ + return 1; \ + } \ + } \ +} + +#define REG_SET_AND_CHECK(R, M, W) \ +{ \ + u32 _value; \ + __ew32(hw, R, W & M); \ + _value = __er32(hw, R); \ + if ((W & M) != (_value & M)) { \ + ndev_err(netdev, "set/check reg %04X test failed: " \ + "got 0x%08X expected 0x%08X\n", R, (_value & M), \ + (W & M)); \ + *data = R; \ + return 1; \ + } \ +} + +static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &adapter->hw.mac; + struct net_device *netdev = adapter->netdev; + u32 value; + u32 before; + u32 after; + u32 i; + u32 toggle; + + /* The status register is Read Only, so a write should fail. + * Some bits that get toggled are ignored. + */ + switch (mac->type) { + /* there are several bits on newer hardware that are r/w */ + case e1000_82571: + case e1000_82572: + case e1000_80003es2lan: + toggle = 0x7FFFF3FF; + break; + case e1000_82573: + case e1000_ich8lan: + case e1000_ich9lan: + toggle = 0x7FFFF033; + break; + default: + toggle = 0xFFFFF833; + break; + } + + before = er32(STATUS); + value = (er32(STATUS) & toggle); + ew32(STATUS, toggle); + after = er32(STATUS) & toggle; + if (value != after) { + ndev_err(netdev, "failed STATUS register test got: " + "0x%08X expected: 0x%08X\n", after, value); + *data = 1; + return 1; + } + /* restore previous status */ + ew32(STATUS, before); + + if ((mac->type != e1000_ich8lan) && + (mac->type != e1000_ich9lan)) { + REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_VET, 0x0000FFFF, 0xFFFFFFFF); + } + + REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDLEN, 0x000FFF80, 0x000FFFFF); + REG_PATTERN_TEST(E1000_RDH, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_RDT, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8); + REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF); + REG_PATTERN_TEST(E1000_TDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_TDLEN, 0x000FFF80, 0x000FFFFF); + + REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); + + before = (((mac->type == e1000_ich8lan) || + (mac->type == e1000_ich9lan)) ? 0x06C3B33E : 0x06DFB3FE); + REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB); + REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); + + REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + if ((mac->type != e1000_ich8lan) && + (mac->type != e1000_ich9lan)) + REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); + for (i = 0; i < mac->rar_entry_count; i++) + REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), + 0x8003FFFF, 0xFFFFFFFF); + + for (i = 0; i < mac->mta_reg_count; i++) + REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF); + + *data = 0; + return 0; +} + +static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) +{ + u16 temp; + u16 checksum = 0; + u16 i; + + *data = 0; + /* Read and add up the contents of the EEPROM */ + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) { + *data = 1; + break; + } + checksum += temp; + } + + /* If Checksum is not Correct return error else test passed */ + if ((checksum != (u16) NVM_SUM) && !(*data)) + *data = 2; + + return *data; +} + +static irqreturn_t e1000_test_intr(int irq, void *data) +{ + struct net_device *netdev = (struct net_device *) data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + adapter->test_icr |= er32(ICR); + + return IRQ_HANDLED; +} + +static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 mask; + u32 shared_int = 1; + u32 irq = adapter->pdev->irq; + int i; + + *data = 0; + + /* NOTE: we don't test MSI interrupts here, yet */ + /* Hook up test interrupt handler just for this test */ + if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, + netdev)) { + shared_int = 0; + } else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + ndev_info(netdev, "testing %s interrupt\n", + (shared_int ? "shared" : "unshared")); + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + msleep(10); + + /* Test each interrupt */ + for (i = 0; i < 10; i++) { + + if (((adapter->hw.mac.type == e1000_ich8lan) || + (adapter->hw.mac.type == e1000_ich9lan)) && i == 8) + continue; + + /* Interrupt to test */ + mask = 1 << i; + + if (!shared_int) { + /* Disable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, mask); + ew32(ICS, mask); + msleep(10); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* Enable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was not posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMS, mask); + ew32(ICS, mask); + msleep(10); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, ~mask & 0x00007FFF); + ew32(ICS, ~mask & 0x00007FFF); + msleep(10); + + if (adapter->test_icr) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + msleep(10); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + + return *data; +} + +static void e1000_free_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = &adapter->test_tx_ring; + struct e1000_ring *rx_ring = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i; + + if (tx_ring->desc && tx_ring->buffer_info) { + for (i = 0; i < tx_ring->count; i++) { + if (tx_ring->buffer_info[i].dma) + pci_unmap_single(pdev, + tx_ring->buffer_info[i].dma, + tx_ring->buffer_info[i].length, + PCI_DMA_TODEVICE); + if (tx_ring->buffer_info[i].skb) + dev_kfree_skb(tx_ring->buffer_info[i].skb); + } + } + + if (rx_ring->desc && rx_ring->buffer_info) { + for (i = 0; i < rx_ring->count; i++) { + if (rx_ring->buffer_info[i].dma) + pci_unmap_single(pdev, + rx_ring->buffer_info[i].dma, + 2048, PCI_DMA_FROMDEVICE); + if (rx_ring->buffer_info[i].skb) + dev_kfree_skb(rx_ring->buffer_info[i].skb); + } + } + + if (tx_ring->desc) { + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + tx_ring->desc = NULL; + } + if (rx_ring->desc) { + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + rx_ring->desc = NULL; + } + + kfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + kfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; +} + +static int e1000_setup_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = &adapter->test_tx_ring; + struct e1000_ring *rx_ring = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + int size; + int i; + int ret_val; + + /* Setup Tx descriptor ring and Tx buffers */ + + if (!tx_ring->count) + tx_ring->count = E1000_DEFAULT_TXD; + + size = tx_ring->count * sizeof(struct e1000_buffer); + tx_ring->buffer_info = kmalloc(size, GFP_KERNEL); + if (!tx_ring->buffer_info) { + ret_val = 1; + goto err_nomem; + } + memset(tx_ring->buffer_info, 0, size); + + tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) { + ret_val = 2; + goto err_nomem; + } + memset(tx_ring->desc, 0, tx_ring->size); + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + ew32(TDBAL, + ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); + ew32(TDBAH, ((u64) tx_ring->dma >> 32)); + ew32(TDLEN, + tx_ring->count * sizeof(struct e1000_tx_desc)); + ew32(TDH, 0); + ew32(TDT, 0); + ew32(TCTL, + E1000_TCTL_PSP | E1000_TCTL_EN | + E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | + E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); + + for (i = 0; i < tx_ring->count; i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); + struct sk_buff *skb; + unsigned int skb_size = 1024; + + skb = alloc_skb(skb_size, GFP_KERNEL); + if (!skb) { + ret_val = 3; + goto err_nomem; + } + skb_put(skb, skb_size); + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].length = skb->len; + tx_ring->buffer_info[i].dma = + pci_map_single(pdev, skb->data, skb->len, + PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(tx_ring->buffer_info[i].dma)) { + ret_val = 4; + goto err_nomem; + } + tx_desc->buffer_addr = cpu_to_le64( + tx_ring->buffer_info[i].dma); + tx_desc->lower.data = cpu_to_le32(skb->len); + tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | + E1000_TXD_CMD_IFCS | + E1000_TXD_CMD_RPS); + tx_desc->upper.data = 0; + } + + /* Setup Rx descriptor ring and Rx buffers */ + + if (!rx_ring->count) + rx_ring->count = E1000_DEFAULT_RXD; + + size = rx_ring->count * sizeof(struct e1000_buffer); + rx_ring->buffer_info = kmalloc(size, GFP_KERNEL); + if (!rx_ring->buffer_info) { + ret_val = 5; + goto err_nomem; + } + memset(rx_ring->buffer_info, 0, size); + + rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc); + rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) { + ret_val = 6; + goto err_nomem; + } + memset(rx_ring->desc, 0, rx_ring->size); + rx_ring->next_to_use = 0; + rx_ring->next_to_clean = 0; + + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF)); + ew32(RDBAH, ((u64) rx_ring->dma >> 32)); + ew32(RDLEN, rx_ring->size); + ew32(RDH, 0); + ew32(RDT, 0); + rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + ew32(RCTL, rctl); + + for (i = 0; i < rx_ring->count; i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); + struct sk_buff *skb; + + skb = alloc_skb(2048 + NET_IP_ALIGN, GFP_KERNEL); + if (!skb) { + ret_val = 7; + goto err_nomem; + } + skb_reserve(skb, NET_IP_ALIGN); + rx_ring->buffer_info[i].skb = skb; + rx_ring->buffer_info[i].dma = + pci_map_single(pdev, skb->data, 2048, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(rx_ring->buffer_info[i].dma)) { + ret_val = 8; + goto err_nomem; + } + rx_desc->buffer_addr = + cpu_to_le64(rx_ring->buffer_info[i].dma); + memset(skb->data, 0x00, skb->len); + } + + return 0; + +err_nomem: + e1000_free_desc_rings(adapter); + return ret_val; +} + +static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) +{ + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1e_wphy(&adapter->hw, 29, 0x001F); + e1e_wphy(&adapter->hw, 30, 0x8FFC); + e1e_wphy(&adapter->hw, 29, 0x001A); + e1e_wphy(&adapter->hw, 30, 0x8FF0); +} + +static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg = 0; + u32 stat_reg = 0; + + adapter->hw.mac.autoneg = 0; + + if (adapter->hw.phy.type == e1000_phy_m88) { + /* Auto-MDI/MDIX Off */ + e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + e1e_wphy(hw, PHY_CONTROL, 0x9140); + /* autoneg off */ + e1e_wphy(hw, PHY_CONTROL, 0x8140); + } else if (adapter->hw.phy.type == e1000_phy_gg82563) + e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC); + + ctrl_reg = er32(CTRL); + + if (adapter->hw.phy.type == e1000_phy_ife) { + /* force 100, set loopback */ + e1e_wphy(hw, PHY_CONTROL, 0x6100); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_100 |/* Force Speed to 100 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + } else { + /* force 1000, set loopback */ + e1e_wphy(hw, PHY_CONTROL, 0x4140); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = er32(CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + } + + if (adapter->hw.media_type == e1000_media_type_copper && + adapter->hw.phy.type == e1000_phy_m88) { + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + } else { + /* Set the ILOS bit on the fiber Nic if half duplex link is + * detected. */ + stat_reg = er32(STATUS); + if ((stat_reg & E1000_STATUS_FD) == 0) + ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); + } + + ew32(CTRL, ctrl_reg); + + /* Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. + */ + if (adapter->hw.phy.type == e1000_phy_m88) + e1000_phy_disable_receiver(adapter); + + udelay(500); + + return 0; +} + +static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl = er32(CTRL); + int link = 0; + + /* special requirements for 82571/82572 fiber adapters */ + + /* jump through hoops to make sure link is up because serdes + * link is hardwired up */ + ctrl |= E1000_CTRL_SLU; + ew32(CTRL, ctrl); + + /* disable autoneg */ + ctrl = er32(TXCW); + ctrl &= ~(1 << 31); + ew32(TXCW, ctrl); + + link = (er32(STATUS) & E1000_STATUS_LU); + + if (!link) { + /* set invert loss of signal */ + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_ILOS; + ew32(CTRL, ctrl); + } + + /* special write to serdes control register to enable SerDes analog + * loopback */ +#define E1000_SERDES_LB_ON 0x410 + ew32(SCTL, E1000_SERDES_LB_ON); + msleep(10); + + return 0; +} + +/* only call this for fiber/serdes connections to es2lan */ +static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrlext = er32(CTRL_EXT); + u32 ctrl = er32(CTRL); + + /* save CTRL_EXT to restore later, reuse an empty variable (unused + on mac_type 80003es2lan) */ + adapter->tx_fifo_head = ctrlext; + + /* clear the serdes mode bits, putting the device into mac loopback */ + ctrlext &= ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + ew32(CTRL_EXT, ctrlext); + + /* force speed to 1000/FD, link up */ + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | + E1000_CTRL_SPD_1000 | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* set mac loopback */ + ctrl = er32(RCTL); + ctrl |= E1000_RCTL_LBM_MAC; + ew32(RCTL, ctrl); + + /* set testing mode parameters (no need to reset later) */ +#define KMRNCTRLSTA_OPMODE (0x1F << 16) +#define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582 + ew32(KMRNCTRLSTA, + (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII)); + + return 0; +} + +static int e1000_setup_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + switch (hw->mac.type) { + case e1000_80003es2lan: + return e1000_set_es2lan_mac_loopback(adapter); + break; + case e1000_82571: + case e1000_82572: + return e1000_set_82571_fiber_loopback(adapter); + break; + default: + rctl = er32(RCTL); + rctl |= E1000_RCTL_LBM_TCVR; + ew32(RCTL, rctl); + return 0; + } + } else if (hw->media_type == e1000_media_type_copper) { + return e1000_integrated_phy_loopback(adapter); + } + + return 7; +} + +static void e1000_loopback_cleanup(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + u16 phy_reg; + + rctl = er32(RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + ew32(RCTL, rctl); + + switch (hw->mac.type) { + case e1000_80003es2lan: + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + /* restore CTRL_EXT, stealing space from tx_fifo_head */ + ew32(CTRL_EXT, + adapter->tx_fifo_head); + adapter->tx_fifo_head = 0; + } + /* fall through */ + case e1000_82571: + case e1000_82572: + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { +#define E1000_SERDES_LB_OFF 0x400 + ew32(SCTL, E1000_SERDES_LB_OFF); + msleep(10); + break; + } + /* Fall Through */ + default: + hw->mac.autoneg = 1; + if (hw->phy.type == e1000_phy_gg82563) + e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x180); + e1e_rphy(hw, PHY_CONTROL, &phy_reg); + if (phy_reg & MII_CR_LOOPBACK) { + phy_reg &= ~MII_CR_LOOPBACK; + e1e_wphy(hw, PHY_CONTROL, phy_reg); + e1000e_commit_phy(hw); + } + break; + } +} + +static void e1000_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size &= ~1; + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); + memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); +} + +static int e1000_check_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + frame_size &= ~1; + if (*(skb->data + 3) == 0xFF) + if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && + (*(skb->data + frame_size / 2 + 12) == 0xAF)) + return 0; + return 13; +} + +static int e1000_run_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = &adapter->test_tx_ring; + struct e1000_ring *rx_ring = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + int i, j, k, l; + int lc; + int good_cnt; + int ret_val = 0; + unsigned long time; + + ew32(RDT, rx_ring->count - 1); + + /* Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rx_ring->count <= tx_ring->count) + lc = ((tx_ring->count / 64) * 2) + 1; + else + lc = ((rx_ring->count / 64) * 2) + 1; + + k = 0; + l = 0; + for (j = 0; j <= lc; j++) { /* loop count loop */ + for (i = 0; i < 64; i++) { /* send the packets */ + e1000_create_lbtest_frame( + tx_ring->buffer_info[i].skb, 1024); + pci_dma_sync_single_for_device(pdev, + tx_ring->buffer_info[k].dma, + tx_ring->buffer_info[k].length, + PCI_DMA_TODEVICE); + k++; + if (k == tx_ring->count) + k = 0; + } + ew32(TDT, k); + msleep(200); + time = jiffies; /* set the start time for the receive */ + good_cnt = 0; + do { /* receive the sent packets */ + pci_dma_sync_single_for_cpu(pdev, + rx_ring->buffer_info[l].dma, 2048, + PCI_DMA_FROMDEVICE); + + ret_val = e1000_check_lbtest_frame( + rx_ring->buffer_info[l].skb, 1024); + if (!ret_val) + good_cnt++; + l++; + if (l == rx_ring->count) + l = 0; + /* time + 20 msecs (200 msecs on 2.4) is more than + * enough time to complete the receives, if it's + * exceeded, break and error off + */ + } while ((good_cnt < 64) && !time_after(jiffies, time + 20)); + if (good_cnt != 64) { + ret_val = 13; /* ret_val is the same as mis-compare */ + break; + } + if (jiffies >= (time + 2)) { + ret_val = 14; /* error code for time out error */ + break; + } + } /* end loop count loop */ + return ret_val; +} + +static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) +{ + /* PHY loopback cannot be performed if SoL/IDER + * sessions are active */ + if (e1000_check_reset_block(&adapter->hw)) { + ndev_err(adapter->netdev, "Cannot do PHY loopback test " + "when SoL/IDER is active.\n"); + *data = 0; + goto out; + } + + *data = e1000_setup_desc_rings(adapter); + if (data) + goto out; + + *data = e1000_setup_loopback_test(adapter); + if (data) + goto err_loopback; + + *data = e1000_run_loopback_test(adapter); + e1000_loopback_cleanup(adapter); + +err_loopback: + e1000_free_desc_rings(adapter); +out: + return *data; +} + +static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + + *data = 0; + if (hw->media_type == e1000_media_type_internal_serdes) { + int i = 0; + hw->mac.serdes_has_link = 0; + + /* On some blade server designs, link establishment + * could take as long as 2-3 minutes */ + do { + hw->mac.ops.check_for_link(hw); + if (hw->mac.serdes_has_link) + return *data; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { + hw->mac.ops.check_for_link(hw); + if (hw->mac.autoneg) + msleep(4000); + + if (!(er32(STATUS) & + E1000_STATUS_LU)) + *data = 1; + } + return *data; +} + +static int e1000e_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E1000_TEST_LEN; + case ETH_SS_STATS: + return E1000_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e1000_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + u16 autoneg_advertised; + u8 forced_speed_duplex; + u8 autoneg; + bool if_running = netif_running(netdev); + + set_bit(__E1000_TESTING, &adapter->state); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + /* save speed, duplex, autoneg settings */ + autoneg_advertised = adapter->hw.phy.autoneg_advertised; + forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; + autoneg = adapter->hw.mac.autoneg; + + ndev_info(netdev, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + dev_close(netdev); + else + e1000e_reset(adapter); + + if (e1000_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000e_reset(adapter); + if (e1000_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000e_reset(adapter); + if (e1000_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000e_reset(adapter); + /* make sure the phy is powered up */ + e1000e_power_up_phy(adapter); + if (e1000_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* restore speed, duplex, autoneg settings */ + adapter->hw.phy.autoneg_advertised = autoneg_advertised; + adapter->hw.mac.forced_speed_duplex = forced_speed_duplex; + adapter->hw.mac.autoneg = autoneg; + + /* force this routine to wait until autoneg complete/timeout */ + adapter->hw.phy.wait_for_link = 1; + e1000e_reset(adapter); + adapter->hw.phy.wait_for_link = 0; + + clear_bit(__E1000_TESTING, &adapter->state); + if (if_running) + dev_open(netdev); + } else { + ndev_info(netdev, "online testing starting\n"); + /* Online tests */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Online tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__E1000_TESTING, &adapter->state); + } + msleep_interruptible(4 * 1000); +} + +static void e1000_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + wol->supported = 0; + wol->wolopts = 0; + + if (!(adapter->flags & FLAG_HAS_WOL)) + return; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC; + + /* apply any specific unsupported masks here */ + if (adapter->flags & FLAG_NO_WAKE_UCAST) { + wol->supported &= ~WAKE_UCAST; + + if (adapter->wol & E1000_WUFC_EX) + ndev_err(netdev, "Interface does not support " + "directed (unicast) frame wake-up packets\n"); + } + + if (adapter->wol & E1000_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & E1000_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & E1000_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & E1000_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; +} + +static int e1000_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if (!(adapter->flags & FLAG_HAS_WOL)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= E1000_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= E1000_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= E1000_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= E1000_WUFC_MAG; + + return 0; +} + +/* toggle LED 4 times per second = 2 "blinks" per second */ +#define E1000_ID_INTERVAL (HZ/4) + +/* bit defines for adapter->led_status */ +#define E1000_LED_ON 0 + +static void e1000_led_blink_callback(unsigned long data) +{ + struct e1000_adapter *adapter = (struct e1000_adapter *) data; + + if (test_and_change_bit(E1000_LED_ON, &adapter->led_status)) + adapter->hw.mac.ops.led_off(&adapter->hw); + else + adapter->hw.mac.ops.led_on(&adapter->hw); + + mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL); +} + +static int e1000_phys_id(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)) + data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ); + + if (adapter->hw.phy.type == e1000_phy_ife) { + if (!adapter->blink_timer.function) { + init_timer(&adapter->blink_timer); + adapter->blink_timer.function = + e1000_led_blink_callback; + adapter->blink_timer.data = (unsigned long) adapter; + } + mod_timer(&adapter->blink_timer, jiffies); + msleep_interruptible(data * 1000); + del_timer_sync(&adapter->blink_timer); + e1e_wphy(&adapter->hw, + IFE_PHY_SPECIAL_CONTROL_LED, 0); + } else { + e1000e_blink_led(&adapter->hw); + msleep_interruptible(data * 1000); + } + + adapter->hw.mac.ops.led_off(&adapter->hw); + clear_bit(E1000_LED_ON, &adapter->led_status); + adapter->hw.mac.ops.cleanup_led(&adapter->hw); + + return 0; +} + +static int e1000_nway_reset(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + e1000e_reinit_locked(adapter); + return 0; +} + +static void e1000_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + int i; + + e1000e_update_stats(adapter); + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset; + data[i] = (e1000_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } +} + +static void e1000_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *e1000_gstrings_test, + E1000_TEST_LEN*ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + memcpy(p, e1000_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static const struct ethtool_ops e1000_ethtool_ops = { + .get_settings = e1000_get_settings, + .set_settings = e1000_set_settings, + .get_drvinfo = e1000_get_drvinfo, + .get_regs_len = e1000_get_regs_len, + .get_regs = e1000_get_regs, + .get_wol = e1000_get_wol, + .set_wol = e1000_set_wol, + .get_msglevel = e1000_get_msglevel, + .set_msglevel = e1000_set_msglevel, + .nway_reset = e1000_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = e1000_get_eeprom_len, + .get_eeprom = e1000_get_eeprom, + .set_eeprom = e1000_set_eeprom, + .get_ringparam = e1000_get_ringparam, + .set_ringparam = e1000_set_ringparam, + .get_pauseparam = e1000_get_pauseparam, + .set_pauseparam = e1000_set_pauseparam, + .get_rx_csum = e1000_get_rx_csum, + .set_rx_csum = e1000_set_rx_csum, + .get_tx_csum = e1000_get_tx_csum, + .set_tx_csum = e1000_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, + .get_tso = ethtool_op_get_tso, + .set_tso = e1000_set_tso, + .self_test = e1000_diag_test, + .get_strings = e1000_get_strings, + .phys_id = e1000_phys_id, + .get_ethtool_stats = e1000_get_ethtool_stats, + .get_sset_count = e1000e_get_sset_count, +}; + +void e1000e_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops); +} diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h new file mode 100644 index 000000000000..aa82f1afb7fb --- /dev/null +++ b/drivers/net/e1000e/hw.h @@ -0,0 +1,864 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include <linux/types.h> + +struct e1000_hw; +struct e1000_adapter; + +#include "defines.h" + +#define er32(reg) __er32(hw, E1000_##reg) +#define ew32(reg,val) __ew32(hw, E1000_##reg, (val)) +#define e1e_flush() er32(STATUS) + +#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \ + (writel((value), ((a)->hw_addr + reg + ((offset) << 2)))) + +#define E1000_READ_REG_ARRAY(a, reg, offset) \ + (readl((a)->hw_addr + reg + ((offset) << 2))) + +enum e1e_registers { + E1000_CTRL = 0x00000, /* Device Control - RW */ + E1000_STATUS = 0x00008, /* Device Status - RO */ + E1000_EECD = 0x00010, /* EEPROM/Flash Control - RW */ + E1000_EERD = 0x00014, /* EEPROM Read - RW */ + E1000_CTRL_EXT = 0x00018, /* Extended Device Control - RW */ + E1000_FLA = 0x0001C, /* Flash Access - RW */ + E1000_MDIC = 0x00020, /* MDI Control - RW */ + E1000_SCTL = 0x00024, /* SerDes Control - RW */ + E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */ + E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */ + E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */ + E1000_FCT = 0x00030, /* Flow Control Type - RW */ + E1000_VET = 0x00038, /* VLAN Ether Type - RW */ + E1000_ICR = 0x000C0, /* Interrupt Cause Read - R/clr */ + E1000_ITR = 0x000C4, /* Interrupt Throttling Rate - RW */ + E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */ + E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */ + E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */ + E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */ + E1000_RCTL = 0x00100, /* RX Control - RW */ + E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */ + E1000_TXCW = 0x00178, /* TX Configuration Word - RW */ + E1000_RXCW = 0x00180, /* RX Configuration Word - RO */ + E1000_TCTL = 0x00400, /* TX Control - RW */ + E1000_TCTL_EXT = 0x00404, /* Extended TX Control - RW */ + E1000_TIPG = 0x00410, /* TX Inter-packet gap -RW */ + E1000_AIT = 0x00458, /* Adaptive Interframe Spacing Throttle - RW */ + E1000_LEDCTL = 0x00E00, /* LED Control - RW */ + E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */ + E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */ + E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */ + E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */ + E1000_PBS = 0x01008, /* Packet Buffer Size */ + E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */ + E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */ + E1000_FLOP = 0x0103C, /* FLASH Opcode Register */ + E1000_ERT = 0x02008, /* Early Rx Threshold - RW */ + E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */ + E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */ + E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */ + E1000_RDBAL = 0x02800, /* RX Descriptor Base Address Low - RW */ + E1000_RDBAH = 0x02804, /* RX Descriptor Base Address High - RW */ + E1000_RDLEN = 0x02808, /* RX Descriptor Length - RW */ + E1000_RDH = 0x02810, /* RX Descriptor Head - RW */ + E1000_RDT = 0x02818, /* RX Descriptor Tail - RW */ + E1000_RDTR = 0x02820, /* RX Delay Timer - RW */ + E1000_RADV = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */ + +/* Convenience macros + * + * Note: "_n" is the queue number of the register to be written to. + * + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + * + */ +#define E1000_RDBAL_REG(_n) (E1000_RDBAL + (_n << 8)) + E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */ + E1000_TDBAL = 0x03800, /* TX Descriptor Base Address Low - RW */ + E1000_TDBAH = 0x03804, /* TX Descriptor Base Address High - RW */ + E1000_TDLEN = 0x03808, /* TX Descriptor Length - RW */ + E1000_TDH = 0x03810, /* TX Descriptor Head - RW */ + E1000_TDT = 0x03818, /* TX Descriptor Tail - RW */ + E1000_TIDV = 0x03820, /* TX Interrupt Delay Value - RW */ + E1000_TXDCTL = 0x03828, /* TX Descriptor Control - RW */ + E1000_TADV = 0x0382C, /* TX Interrupt Absolute Delay Val - RW */ + E1000_TARC0 = 0x03840, /* TX Arbitration Count (0) */ + E1000_TXDCTL1 = 0x03928, /* TX Descriptor Control (1) - RW */ + E1000_TARC1 = 0x03940, /* TX Arbitration Count (1) */ + E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */ + E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */ + E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */ + E1000_RXERRC = 0x0400C, /* Receive Error Count - R/clr */ + E1000_MPC = 0x04010, /* Missed Packet Count - R/clr */ + E1000_SCC = 0x04014, /* Single Collision Count - R/clr */ + E1000_ECOL = 0x04018, /* Excessive Collision Count - R/clr */ + E1000_MCC = 0x0401C, /* Multiple Collision Count - R/clr */ + E1000_LATECOL = 0x04020, /* Late Collision Count - R/clr */ + E1000_COLC = 0x04028, /* Collision Count - R/clr */ + E1000_DC = 0x04030, /* Defer Count - R/clr */ + E1000_TNCRS = 0x04034, /* TX-No CRS - R/clr */ + E1000_SEC = 0x04038, /* Sequence Error Count - R/clr */ + E1000_CEXTERR = 0x0403C, /* Carrier Extension Error Count - R/clr */ + E1000_RLEC = 0x04040, /* Receive Length Error Count - R/clr */ + E1000_XONRXC = 0x04048, /* XON RX Count - R/clr */ + E1000_XONTXC = 0x0404C, /* XON TX Count - R/clr */ + E1000_XOFFRXC = 0x04050, /* XOFF RX Count - R/clr */ + E1000_XOFFTXC = 0x04054, /* XOFF TX Count - R/clr */ + E1000_FCRUC = 0x04058, /* Flow Control RX Unsupported Count- R/clr */ + E1000_PRC64 = 0x0405C, /* Packets RX (64 bytes) - R/clr */ + E1000_PRC127 = 0x04060, /* Packets RX (65-127 bytes) - R/clr */ + E1000_PRC255 = 0x04064, /* Packets RX (128-255 bytes) - R/clr */ + E1000_PRC511 = 0x04068, /* Packets RX (255-511 bytes) - R/clr */ + E1000_PRC1023 = 0x0406C, /* Packets RX (512-1023 bytes) - R/clr */ + E1000_PRC1522 = 0x04070, /* Packets RX (1024-1522 bytes) - R/clr */ + E1000_GPRC = 0x04074, /* Good Packets RX Count - R/clr */ + E1000_BPRC = 0x04078, /* Broadcast Packets RX Count - R/clr */ + E1000_MPRC = 0x0407C, /* Multicast Packets RX Count - R/clr */ + E1000_GPTC = 0x04080, /* Good Packets TX Count - R/clr */ + E1000_GORCL = 0x04088, /* Good Octets RX Count Low - R/clr */ + E1000_GORCH = 0x0408C, /* Good Octets RX Count High - R/clr */ + E1000_GOTCL = 0x04090, /* Good Octets TX Count Low - R/clr */ + E1000_GOTCH = 0x04094, /* Good Octets TX Count High - R/clr */ + E1000_RNBC = 0x040A0, /* RX No Buffers Count - R/clr */ + E1000_RUC = 0x040A4, /* RX Undersize Count - R/clr */ + E1000_RFC = 0x040A8, /* RX Fragment Count - R/clr */ + E1000_ROC = 0x040AC, /* RX Oversize Count - R/clr */ + E1000_RJC = 0x040B0, /* RX Jabber Count - R/clr */ + E1000_MGTPRC = 0x040B4, /* Management Packets RX Count - R/clr */ + E1000_MGTPDC = 0x040B8, /* Management Packets Dropped Count - R/clr */ + E1000_MGTPTC = 0x040BC, /* Management Packets TX Count - R/clr */ + E1000_TORL = 0x040C0, /* Total Octets RX Low - R/clr */ + E1000_TORH = 0x040C4, /* Total Octets RX High - R/clr */ + E1000_TOTL = 0x040C8, /* Total Octets TX Low - R/clr */ + E1000_TOTH = 0x040CC, /* Total Octets TX High - R/clr */ + E1000_TPR = 0x040D0, /* Total Packets RX - R/clr */ + E1000_TPT = 0x040D4, /* Total Packets TX - R/clr */ + E1000_PTC64 = 0x040D8, /* Packets TX (64 bytes) - R/clr */ + E1000_PTC127 = 0x040DC, /* Packets TX (65-127 bytes) - R/clr */ + E1000_PTC255 = 0x040E0, /* Packets TX (128-255 bytes) - R/clr */ + E1000_PTC511 = 0x040E4, /* Packets TX (256-511 bytes) - R/clr */ + E1000_PTC1023 = 0x040E8, /* Packets TX (512-1023 bytes) - R/clr */ + E1000_PTC1522 = 0x040EC, /* Packets TX (1024-1522 Bytes) - R/clr */ + E1000_MPTC = 0x040F0, /* Multicast Packets TX Count - R/clr */ + E1000_BPTC = 0x040F4, /* Broadcast Packets TX Count - R/clr */ + E1000_TSCTC = 0x040F8, /* TCP Segmentation Context TX - R/clr */ + E1000_TSCTFC = 0x040FC, /* TCP Segmentation Context TX Fail - R/clr */ + E1000_IAC = 0x04100, /* Interrupt Assertion Count */ + E1000_ICRXPTC = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */ + E1000_ICRXATC = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */ + E1000_ICTXPTC = 0x0410C, /* Irq Cause Tx Packet Timer Expire Count */ + E1000_ICTXATC = 0x04110, /* Irq Cause Tx Abs Timer Expire Count */ + E1000_ICTXQEC = 0x04118, /* Irq Cause Tx Queue Empty Count */ + E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */ + E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */ + E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */ + E1000_RXCSUM = 0x05000, /* RX Checksum Control - RW */ + E1000_RFCTL = 0x05008, /* Receive Filter Control*/ + E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */ + E1000_RA = 0x05400, /* Receive Address - RW Array */ + E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */ + E1000_WUC = 0x05800, /* Wakeup Control - RW */ + E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */ + E1000_WUS = 0x05810, /* Wakeup Status - RO */ + E1000_MANC = 0x05820, /* Management Control - RW */ + E1000_FFLT = 0x05F00, /* Flexible Filter Length Table - RW Array */ + E1000_HOST_IF = 0x08800, /* Host Interface */ + + E1000_KMRNCTRLSTA = 0x00034, /* MAC-PHY interface - RW */ + E1000_MANC2H = 0x05860, /* Management Control To Host - RW */ + E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */ + E1000_GCR = 0x05B00, /* PCI-Ex Control */ + E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */ + E1000_SWSM = 0x05B50, /* SW Semaphore */ + E1000_FWSM = 0x05B54, /* FW Semaphore */ + E1000_HICR = 0x08F00, /* Host Inteface Control */ +}; + +/* RSS registers */ + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ + +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ + +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ + +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 + +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_MDIX 0x0008 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 + +#define IGP02E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F +#define IGP02E1000_AGC_RANGE 15 + +/* manage.c */ +#define E1000_VFTA_ENTRY_SHIFT 5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +#define E1000_HICR_EN 0x01 /* Enable bit - RO */ +#define E1000_HICR_C 0x02 /* Driver sets this bit when done + * to put command in RAM */ +#define E1000_HICR_FW_RESET_ENABLE 0x40 +#define E1000_HICR_FW_RESET 0x80 + +#define E1000_FWSM_MODE_MASK 0xE +#define E1000_FWSM_MODE_SHIFT 1 + +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1 +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 + +/* nvm.c */ +#define E1000_STM_OPCODE 0xDB00 + +#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000 +#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KMRNCTRLSTA_REN 0x00200000 +#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ +#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */ + +/* IFE PHY Extended Status Control */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 + +/* IFE PHY Special Control */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 +#define IFE_PSC_FORCE_POLARITY 0x0020 + +/* IFE PHY Special Control and LED Control */ +#define IFE_PSCL_PROBE_MODE 0x0020 +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +/* IFE PHY MDIX Control */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */ +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */ + +#define E1000_CABLE_LENGTH_UNDEFINED 0xFF + +#define E1000_DEV_ID_82571EB_COPPER 0x105E +#define E1000_DEV_ID_82571EB_FIBER 0x105F +#define E1000_DEV_ID_82571EB_SERDES 0x1060 +#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4 +#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5 +#define E1000_DEV_ID_82571EB_QUAD_COPPER_LP 0x10BC +#define E1000_DEV_ID_82572EI_COPPER 0x107D +#define E1000_DEV_ID_82572EI_FIBER 0x107E +#define E1000_DEV_ID_82572EI_SERDES 0x107F +#define E1000_DEV_ID_82572EI 0x10B9 +#define E1000_DEV_ID_82573E 0x108B +#define E1000_DEV_ID_82573E_IAMT 0x108C +#define E1000_DEV_ID_82573L 0x109A + +#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 +#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 +#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA +#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB + +#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049 +#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A +#define E1000_DEV_ID_ICH8_IGP_C 0x104B +#define E1000_DEV_ID_ICH8_IFE 0x104C +#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4 +#define E1000_DEV_ID_ICH8_IFE_G 0x10C5 +#define E1000_DEV_ID_ICH8_IGP_M 0x104D +#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD +#define E1000_DEV_ID_ICH9_IGP_C 0x294C +#define E1000_DEV_ID_ICH9_IFE 0x10C0 +#define E1000_DEV_ID_ICH9_IFE_GT 0x10C3 +#define E1000_DEV_ID_ICH9_IFE_G 0x10C2 + +#define E1000_FUNC_1 1 + +enum e1000_mac_type { + e1000_82571, + e1000_82572, + e1000_82573, + e1000_80003es2lan, + e1000_ich8lan, + e1000_ich9lan, +}; + +enum e1000_media_type { + e1000_media_type_unknown = 0, + e1000_media_type_copper = 1, + e1000_media_type_fiber = 2, + e1000_media_type_internal_serdes = 3, + e1000_num_media_types +}; + +enum e1000_nvm_type { + e1000_nvm_unknown = 0, + e1000_nvm_none, + e1000_nvm_eeprom_spi, + e1000_nvm_flash_hw, + e1000_nvm_flash_sw +}; + +enum e1000_nvm_override { + e1000_nvm_override_none = 0, + e1000_nvm_override_spi_small, + e1000_nvm_override_spi_large +}; + +enum e1000_phy_type { + e1000_phy_unknown = 0, + e1000_phy_none, + e1000_phy_m88, + e1000_phy_igp, + e1000_phy_igp_2, + e1000_phy_gg82563, + e1000_phy_igp_3, + e1000_phy_ife, +}; + +enum e1000_bus_width { + e1000_bus_width_unknown = 0, + e1000_bus_width_pcie_x1, + e1000_bus_width_pcie_x2, + e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +}; + +enum e1000_1000t_rx_status { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +}; + +enum e1000_rev_polarity{ + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +}; + +enum e1000_fc_mode { + e1000_fc_none = 0, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full, + e1000_fc_default = 0xFF +}; + +enum e1000_ms_type { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +}; + +enum e1000_smart_speed { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +}; + +/* Receive Descriptor */ +struct e1000_rx_desc { + u64 buffer_addr; /* Address of the descriptor's data buffer */ + u16 length; /* Length of data DMAed into data buffer */ + u16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + u16 special; +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + u64 buffer_addr; + u64 reserved; + } read; + struct { + struct { + u32 mrq; /* Multiple Rx Queues */ + union { + u32 rss; /* RSS Hash */ + struct { + u16 ip_id; /* IP id */ + u16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + u32 status_error; /* ext status/error */ + u16 length; + u16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + u64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + u32 mrq; /* Multiple Rx Queues */ + union { + u32 rss; /* RSS Hash */ + struct { + u16 ip_id; /* IP id */ + u16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + u32 status_error; /* ext status/error */ + u16 length0; /* length of buffer 0 */ + u16 vlan; /* VLAN tag */ + } middle; + struct { + u16 header_status; + u16 length[3]; /* length of buffers 1-3 */ + } upper; + u64 reserved; + } wb; /* writeback */ +}; + +/* Transmit Descriptor */ +struct e1000_tx_desc { + u64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + u32 data; + struct { + u16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + u32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + u16 special; + } fields; + } upper; +}; + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + u32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + u16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + u32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + u16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + u32 cmd_and_length; + union { + u32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + u16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + u64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + u32 data; + struct { + u16 length; /* Data buffer length */ + u8 typ_len_ext; + u8 cmd; + } flags; + } lower; + union { + u32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + u16 special; /* */ + } fields; + } upper; +}; + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorcl; + u64 gorch; + u64 gotcl; + u64 gotch; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 torl; + u64 torh; + u64 totl; + u64 toth; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; + +/* Host Interface "Rev 1" */ +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; + u8 checksum; +}; + +#define E1000_HI_MAX_DATA_LENGTH 252 +struct e1000_host_command_info { + struct e1000_host_command_header command_header; + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; +}; + +/* Host Interface "Rev 2" */ +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; +}; + +/* Function pointers and static data for the MAC. */ +struct e1000_mac_operations { + u32 mng_mode_enab; + + s32 (*check_for_link)(struct e1000_hw *); + s32 (*cleanup_led)(struct e1000_hw *); + void (*clear_hw_cntrs)(struct e1000_hw *); + s32 (*get_bus_info)(struct e1000_hw *); + s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); + s32 (*led_on)(struct e1000_hw *); + s32 (*led_off)(struct e1000_hw *); + void (*mc_addr_list_update)(struct e1000_hw *, u8 *, u32, u32, + u32); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + s32 (*setup_link)(struct e1000_hw *); + s32 (*setup_physical_interface)(struct e1000_hw *); +}; + +/* Function pointers for the PHY. */ +struct e1000_phy_operations { + s32 (*acquire_phy)(struct e1000_hw *); + s32 (*check_reset_block)(struct e1000_hw *); + s32 (*commit_phy)(struct e1000_hw *); + s32 (*force_speed_duplex)(struct e1000_hw *); + s32 (*get_cfg_done)(struct e1000_hw *hw); + s32 (*get_cable_length)(struct e1000_hw *); + s32 (*get_phy_info)(struct e1000_hw *); + s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *); + void (*release_phy)(struct e1000_hw *); + s32 (*reset_phy)(struct e1000_hw *); + s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_phy_reg)(struct e1000_hw *, u32, u16); +}; + +/* Function pointers for the NVM. */ +struct e1000_nvm_operations { + s32 (*acquire_nvm)(struct e1000_hw *); + s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *); + void (*release_nvm)(struct e1000_hw *); + s32 (*update_nvm)(struct e1000_hw *); + s32 (*valid_led_default)(struct e1000_hw *, u16 *); + s32 (*validate_nvm)(struct e1000_hw *); + s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *); +}; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + + u8 addr[6]; + u8 perm_addr[6]; + + enum e1000_mac_type type; + enum e1000_fc_mode fc; + enum e1000_fc_mode original_fc; + + u32 collision_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 max_frame_size; + u32 mc_filter_type; + u32 min_frame_size; + u32 tx_packet_delta; + u32 txcw; + + u16 current_ifs_val; + u16 ifs_max_val; + u16 ifs_min_val; + u16 ifs_ratio; + u16 ifs_step_size; + u16 mta_reg_count; + u16 rar_entry_count; + u16 fc_high_water; + u16 fc_low_water; + u16 fc_pause_time; + + u8 forced_speed_duplex; + + bool arc_subsystem_valid; + bool autoneg; + bool autoneg_failed; + bool get_link_status; + bool in_ifs_mode; + bool serdes_has_link; + bool tx_pkt_filtering; +}; + +struct e1000_phy_info { + struct e1000_phy_operations ops; + + enum e1000_phy_type type; + + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + + u8 mdix; + + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool speed_downgraded; + bool wait_for_link; +}; + +struct e1000_nvm_info { + struct e1000_nvm_operations ops; + + enum e1000_nvm_type type; + enum e1000_nvm_override override; + + u32 flash_bank_size; + u32 flash_base_addr; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info { + enum e1000_bus_width width; + + u16 func; +}; + +struct e1000_dev_spec_82571 { + bool laa_is_present; +}; + +struct e1000_shadow_ram { + u16 value; + bool modified; +}; + +#define E1000_ICH8_SHADOW_RAM_WORDS 2048 + +struct e1000_dev_spec_ich8lan { + bool kmrn_lock_loss_workaround_enabled; + struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS]; +}; + +struct e1000_hw { + struct e1000_adapter *adapter; + + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + + struct e1000_mac_info mac; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; + struct e1000_host_mng_dhcp_cookie mng_cookie; + + union { + struct e1000_dev_spec_82571 e82571; + struct e1000_dev_spec_ich8lan ich8lan; + } dev_spec; + + enum e1000_media_type media_type; +}; + +#ifdef DEBUG +#define hw_dbg(hw, format, arg...) \ + printk(KERN_DEBUG, "%s: " format, e1000e_get_hw_dev_name(hw), ##arg); +#else +static inline int __attribute__ ((format (printf, 2, 3))) +hw_dbg(struct e1000_hw *hw, const char *format, ...) +{ + return 0; +} +#endif + +#endif diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c new file mode 100644 index 000000000000..8f8139de1f48 --- /dev/null +++ b/drivers/net/e1000e/ich8lan.c @@ -0,0 +1,2225 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* + * 82562G-2 10/100 Network Connection + * 82562GT 10/100 Network Connection + * 82562GT-2 10/100 Network Connection + * 82562V 10/100 Network Connection + * 82562V-2 10/100 Network Connection + * 82566DC-2 Gigabit Network Connection + * 82566DC Gigabit Network Connection + * 82566DM-2 Gigabit Network Connection + * 82566DM Gigabit Network Connection + * 82566MC Gigabit Network Connection + * 82566MM Gigabit Network Connection + */ + +#include <linux/netdevice.h> +#include <linux/ethtool.h> +#include <linux/delay.h> +#include <linux/pci.h> + +#include "e1000.h" + +#define ICH_FLASH_GFPREG 0x0000 +#define ICH_FLASH_HSFSTS 0x0004 +#define ICH_FLASH_HSFCTL 0x0006 +#define ICH_FLASH_FADDR 0x0008 +#define ICH_FLASH_FDATA0 0x0010 + +#define ICH_FLASH_READ_COMMAND_TIMEOUT 500 +#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500 +#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 3000000 +#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF +#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 + +#define ICH_CYCLE_READ 0 +#define ICH_CYCLE_WRITE 2 +#define ICH_CYCLE_ERASE 3 + +#define FLASH_GFPREG_BASE_MASK 0x1FFF +#define FLASH_SECTOR_ADDR_SHIFT 12 + +#define ICH_FLASH_SEG_SIZE_256 256 +#define ICH_FLASH_SEG_SIZE_4K 4096 +#define ICH_FLASH_SEG_SIZE_8K 8192 +#define ICH_FLASH_SEG_SIZE_64K 65536 + + +#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */ + +#define E1000_ICH_MNG_IAMT_MODE 0x2 + +#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_DEF1_OFF2 << 8) | \ + (ID_LED_DEF1_ON2 << 4) | \ + (ID_LED_DEF1_DEF2)) + +#define E1000_ICH_NVM_SIG_WORD 0x13 +#define E1000_ICH_NVM_SIG_MASK 0xC000 + +#define E1000_ICH8_LAN_INIT_TIMEOUT 1500 + +#define E1000_FEXTNVM_SW_CONFIG 1 +#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */ + +#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL + +#define E1000_ICH_RAR_ENTRIES 7 + +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ + ((reg) & MAX_PHY_REG_ADDRESS)) +#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */ +#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */ + +#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 +#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300 +#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200 + +/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ +/* Offset 04h HSFSTS */ +union ich8_hws_flash_status { + struct ich8_hsfsts { + u16 flcdone :1; /* bit 0 Flash Cycle Done */ + u16 flcerr :1; /* bit 1 Flash Cycle Error */ + u16 dael :1; /* bit 2 Direct Access error Log */ + u16 berasesz :2; /* bit 4:3 Sector Erase Size */ + u16 flcinprog :1; /* bit 5 flash cycle in Progress */ + u16 reserved1 :2; /* bit 13:6 Reserved */ + u16 reserved2 :6; /* bit 13:6 Reserved */ + u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */ + u16 flockdn :1; /* bit 15 Flash Config Lock-Down */ + } hsf_status; + u16 regval; +}; + +/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */ +/* Offset 06h FLCTL */ +union ich8_hws_flash_ctrl { + struct ich8_hsflctl { + u16 flcgo :1; /* 0 Flash Cycle Go */ + u16 flcycle :2; /* 2:1 Flash Cycle */ + u16 reserved :5; /* 7:3 Reserved */ + u16 fldbcount :2; /* 9:8 Flash Data Byte Count */ + u16 flockdn :6; /* 15:10 Reserved */ + } hsf_ctrl; + u16 regval; +}; + +/* ICH Flash Region Access Permissions */ +union ich8_hws_flash_regacc { + struct ich8_flracc { + u32 grra :8; /* 0:7 GbE region Read Access */ + u32 grwa :8; /* 8:15 GbE region Write Access */ + u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */ + u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */ + } hsf_flregacc; + u16 regval; +}; + +static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); +static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); +static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); +static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw); +static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); +static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, + u32 offset, u8 byte); +static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, + u16 *data); +static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 *data); +static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw); +static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); + +static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) +{ + return readw(hw->flash_address + reg); +} + +static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg) +{ + return readl(hw->flash_address + reg); +} + +static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val) +{ + writew(val, hw->flash_address + reg); +} + +static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val) +{ + writel(val, hw->flash_address + reg); +} + +#define er16flash(reg) __er16flash(hw, (reg)) +#define er32flash(reg) __er32flash(hw, (reg)) +#define ew16flash(reg,val) __ew16flash(hw, (reg), (val)) +#define ew32flash(reg,val) __ew32flash(hw, (reg), (val)) + +/** + * e1000_init_phy_params_ich8lan - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific PHY parameters and function pointers. + **/ +static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 i = 0; + + phy->addr = 1; + phy->reset_delay_us = 100; + + phy->id = 0; + while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) && + (i++ < 100)) { + msleep(1); + ret_val = e1000e_get_phy_id(hw); + if (ret_val) + return ret_val; + } + + /* Verify phy id */ + switch (phy->id) { + case IGP03E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + phy->type = e1000_phy_ife; + phy->autoneg_mask = E1000_ALL_NOT_GIG; + break; + default: + return -E1000_ERR_PHY; + break; + } + + return 0; +} + +/** + * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific NVM parameters and function + * pointers. + **/ +static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 gfpreg; + u32 sector_base_addr; + u32 sector_end_addr; + u16 i; + + /* Can't read flash registers if the register set isn't mapped. + */ + if (!hw->flash_address) { + hw_dbg(hw, "ERROR: Flash registers not mapped\n"); + return -E1000_ERR_CONFIG; + } + + nvm->type = e1000_nvm_flash_sw; + + gfpreg = er32flash(ICH_FLASH_GFPREG); + + /* sector_X_addr is a "sector"-aligned address (4096 bytes) + * Add 1 to sector_end_addr since this sector is included in + * the overall size. */ + sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; + sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; + + /* flash_base_addr is byte-aligned */ + nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; + + /* find total size of the NVM, then cut in half since the total + * size represents two separate NVM banks. */ + nvm->flash_bank_size = (sector_end_addr - sector_base_addr) + << FLASH_SECTOR_ADDR_SHIFT; + nvm->flash_bank_size /= 2; + /* Adjust to word count */ + nvm->flash_bank_size /= sizeof(u16); + + nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS; + + /* Clear shadow ram */ + for (i = 0; i < nvm->word_size; i++) { + dev_spec->shadow_ram[i].modified = 0; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + + return 0; +} + +/** + * e1000_init_mac_params_ich8lan - Initialize MAC function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific MAC parameters and function + * pointers. + **/ +static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; + + /* Set media type function pointer */ + hw->media_type = e1000_media_type_copper; + + /* Set mta register count */ + mac->mta_reg_count = 32; + /* Set rar entry count */ + mac->rar_entry_count = E1000_ICH_RAR_ENTRIES; + if (mac->type == e1000_ich8lan) + mac->rar_entry_count--; + /* Set if manageability features are enabled. */ + mac->arc_subsystem_valid = 1; + + /* Enable PCS Lock-loss workaround for ICH8 */ + if (mac->type == e1000_ich8lan) + e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, 1); + + return 0; +} + +static s32 e1000_get_invariants_ich8lan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + s32 rc; + + rc = e1000_init_mac_params_ich8lan(adapter); + if (rc) + return rc; + + rc = e1000_init_nvm_params_ich8lan(hw); + if (rc) + return rc; + + rc = e1000_init_phy_params_ich8lan(hw); + if (rc) + return rc; + + if ((adapter->hw.mac.type == e1000_ich8lan) && + (adapter->hw.phy.type == e1000_phy_igp_3)) + adapter->flags |= FLAG_LSC_GIG_SPEED_DROP; + + return 0; +} + +/** + * e1000_acquire_swflag_ich8lan - Acquire software control flag + * @hw: pointer to the HW structure + * + * Acquires the software control flag for performing NVM and PHY + * operations. This is a function pointer entry point only called by + * read/write routines for the PHY and NVM parts. + **/ +static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + u32 timeout = PHY_CFG_TIMEOUT; + + while (timeout) { + extcnf_ctrl = er32(EXTCNF_CTRL); + extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; + ew32(EXTCNF_CTRL, extcnf_ctrl); + + extcnf_ctrl = er32(EXTCNF_CTRL); + if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) + break; + mdelay(1); + timeout--; + } + + if (!timeout) { + hw_dbg(hw, "FW or HW has locked the resource for too long.\n"); + return -E1000_ERR_CONFIG; + } + + return 0; +} + +/** + * e1000_release_swflag_ich8lan - Release software control flag + * @hw: pointer to the HW structure + * + * Releases the software control flag for performing NVM and PHY operations. + * This is a function pointer entry point only called by read/write + * routines for the PHY and NVM parts. + **/ +static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + + extcnf_ctrl = er32(EXTCNF_CTRL); + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; + ew32(EXTCNF_CTRL, extcnf_ctrl); +} + +/** + * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Checks if firmware is blocking the reset of the PHY. + * This is a function pointer entry point only called by + * reset routines. + **/ +static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) +{ + u32 fwsm; + + fwsm = er32(FWSM); + + return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET; +} + +/** + * e1000_phy_force_speed_duplex_ich8lan - Force PHY speed & duplex + * @hw: pointer to the HW structure + * + * Forces the speed and duplex settings of the PHY. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + if (phy->type != e1000_phy_ife) { + ret_val = e1000e_phy_force_speed_duplex_igp(hw); + return ret_val; + } + + ret_val = e1e_rphy(hw, PHY_CONTROL, &data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &data); + + ret_val = e1e_wphy(hw, PHY_CONTROL, data); + if (ret_val) + return ret_val; + + /* Disable MDI-X support for 10/100 */ + ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + return ret_val; + + data &= ~IFE_PMC_AUTO_MDIX; + data &= ~IFE_PMC_FORCE_MDIX; + + ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data); + if (ret_val) + return ret_val; + + hw_dbg(hw, "IFE PMC: %X\n", data); + + udelay(1); + + if (phy->wait_for_link) { + hw_dbg(hw, "Waiting for forced speed/duplex link on IFE phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + return ret_val; + + if (!link) + hw_dbg(hw, "Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + return ret_val; + } + + return 0; +} + +/** + * e1000_phy_hw_reset_ich8lan - Performs a PHY reset + * @hw: pointer to the HW structure + * + * Resets the PHY + * This is a function pointer entry point called by drivers + * or other shared routines. + **/ +static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i; + u32 data, cnf_size, cnf_base_addr, sw_cfg_mask; + s32 ret_val; + u16 loop = E1000_ICH8_LAN_INIT_TIMEOUT; + u16 word_addr, reg_data, reg_addr, phy_page = 0; + + ret_val = e1000e_phy_hw_reset_generic(hw); + if (ret_val) + return ret_val; + + /* Initialize the PHY from the NVM on ICH platforms. This + * is needed due to an issue where the NVM configuration is + * not properly autoloaded after power transitions. + * Therefore, after each PHY reset, we will load the + * configuration data out of the NVM manually. + */ + if (hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) { + struct e1000_adapter *adapter = hw->adapter; + + /* Check if SW needs configure the PHY */ + if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) || + (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M)) + sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; + else + sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; + + data = er32(FEXTNVM); + if (!(data & sw_cfg_mask)) + return 0; + + /* Wait for basic configuration completes before proceeding*/ + do { + data = er32(STATUS); + data &= E1000_STATUS_LAN_INIT_DONE; + udelay(100); + } while ((!data) && --loop); + + /* If basic configuration is incomplete before the above loop + * count reaches 0, loading the configuration from NVM will + * leave the PHY in a bad state possibly resulting in no link. + */ + if (loop == 0) { + hw_dbg(hw, "LAN_INIT_DONE not set, increase timeout\n"); + } + + /* Clear the Init Done bit for the next init event */ + data = er32(STATUS); + data &= ~E1000_STATUS_LAN_INIT_DONE; + ew32(STATUS, data); + + /* Make sure HW does not configure LCD from PHY + * extended configuration before SW configuration */ + data = er32(EXTCNF_CTRL); + if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) + return 0; + + cnf_size = er32(EXTCNF_SIZE); + cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; + cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; + if (!cnf_size) + return 0; + + cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; + cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; + + /* Configure LCD from extended configuration + * region. */ + + /* cnf_base_addr is in DWORD */ + word_addr = (u16)(cnf_base_addr << 1); + + for (i = 0; i < cnf_size; i++) { + ret_val = e1000_read_nvm(hw, + (word_addr + i * 2), + 1, + ®_data); + if (ret_val) + return ret_val; + + ret_val = e1000_read_nvm(hw, + (word_addr + i * 2 + 1), + 1, + ®_addr); + if (ret_val) + return ret_val; + + /* Save off the PHY page for future writes. */ + if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { + phy_page = reg_data; + continue; + } + + reg_addr |= phy_page; + + ret_val = e1e_wphy(hw, (u32)reg_addr, reg_data); + if (ret_val) + return ret_val; + } + } + + return 0; +} + +/** + * e1000_get_phy_info_ife_ich8lan - Retrieves various IFE PHY states + * @hw: pointer to the HW structure + * + * Populates "phy" structure with various feature states. + * This function is only called by other family-specific + * routines. + **/ +static s32 e1000_get_phy_info_ife_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + hw_dbg(hw, "Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data); + if (ret_val) + return ret_val; + phy->polarity_correction = (!(data & IFE_PSC_AUTO_POLARITY_DISABLE)); + + if (phy->polarity_correction) { + ret_val = e1000_check_polarity_ife_ich8lan(hw); + if (ret_val) + return ret_val; + } else { + /* Polarity is forced */ + phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + } + + ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = (data & IFE_PMC_MDIX_STATUS); + + /* The following parameters are undefined for 10/100 operation. */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + + return 0; +} + +/** + * e1000_get_phy_info_ich8lan - Calls appropriate PHY type get_phy_info + * @hw: pointer to the HW structure + * + * Wrapper for calling the get_phy_info routines for the appropriate phy type. + * This is a function pointer entry point called by drivers + * or other shared routines. + **/ +static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw) +{ + switch (hw->phy.type) { + case e1000_phy_ife: + return e1000_get_phy_info_ife_ich8lan(hw); + break; + case e1000_phy_igp_3: + return e1000e_get_phy_info_igp(hw); + break; + default: + break; + } + + return -E1000_ERR_PHY_TYPE; +} + +/** + * e1000_check_polarity_ife_ich8lan - Check cable polarity for IFE PHY + * @hw: pointer to the HW structure + * + * Polarity is determined on the polarity reveral feature being enabled. + * This function is only called by other family-specific + * routines. + **/ +static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + /* Polarity is determined based on the reversal feature + * being enabled. + */ + if (phy->polarity_correction) { + offset = IFE_PHY_EXTENDED_STATUS_CONTROL; + mask = IFE_PESC_POLARITY_REVERSED; + } else { + offset = IFE_PHY_SPECIAL_CONTROL; + mask = IFE_PSC_FORCE_POLARITY; + } + + ret_val = e1e_rphy(hw, offset, &phy_data); + + if (!ret_val) + phy->cable_polarity = (phy_data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: TRUE to enable LPLU, FALSE to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 phy_ctrl; + s32 ret_val = 0; + u16 data; + + if (phy->type != e1000_phy_igp_3) + return ret_val; + + phy_ctrl = er32(PHY_CTRL); + + if (active) { + phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + /* Call gig speed drop workaround on LPLU before accessing + * any PHY registers */ + if ((hw->mac.type == e1000_ich8lan) && + (hw->phy.type == e1000_phy_igp_3)) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + return ret_val; + } else { + phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } + + return 0; +} + +/** + * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state + * @hw: pointer to the HW structure + * @active: TRUE to enable LPLU, FALSE to disable + * + * Sets the LPLU D3 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 phy_ctrl; + s32 ret_val; + u16 data; + + phy_ctrl = er32(PHY_CTRL); + + if (!active) { + phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + /* Call gig speed drop workaround on LPLU before accessing + * any PHY registers */ + if ((hw->mac.type == e1000_ich8lan) && + (hw->phy.type == e1000_phy_igp_3)) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + } + + return 0; +} + +/** + * e1000_read_nvm_ich8lan - Read word(s) from the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to read. + * @words: Size of data to read in words + * @data: Pointer to the word(s) to read at offset. + * + * Reads a word(s) from the NVM using the flash access registers. + **/ +static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 act_offset; + s32 ret_val; + u16 i, word; + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + hw_dbg(hw, "nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + ret_val = e1000_acquire_swflag_ich8lan(hw); + if (ret_val) + return ret_val; + + /* Start with the bank offset, then add the relative offset. */ + act_offset = (er32(EECD) & E1000_EECD_SEC1VAL) + ? nvm->flash_bank_size + : 0; + act_offset += offset; + + for (i = 0; i < words; i++) { + if ((dev_spec->shadow_ram) && + (dev_spec->shadow_ram[offset+i].modified)) { + data[i] = dev_spec->shadow_ram[offset+i].value; + } else { + ret_val = e1000_read_flash_word_ich8lan(hw, + act_offset + i, + &word); + if (ret_val) + break; + data[i] = word; + } + } + + e1000_release_swflag_ich8lan(hw); + + return ret_val; +} + +/** + * e1000_flash_cycle_init_ich8lan - Initialize flash + * @hw: pointer to the HW structure + * + * This function does initial flash setup so that a new read/write/erase cycle + * can be started. + **/ +static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) +{ + union ich8_hws_flash_status hsfsts; + s32 ret_val = -E1000_ERR_NVM; + s32 i = 0; + + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + + /* Check if the flash descriptor is valid */ + if (hsfsts.hsf_status.fldesvalid == 0) { + hw_dbg(hw, "Flash descriptor invalid. " + "SW Sequencing must be used."); + return -E1000_ERR_NVM; + } + + /* Clear FCERR and DAEL in hw status by writing 1 */ + hsfsts.hsf_status.flcerr = 1; + hsfsts.hsf_status.dael = 1; + + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + + /* Either we should have a hardware SPI cycle in progress + * bit to check against, in order to start a new cycle or + * FDONE bit should be changed in the hardware so that it + * is 1 after harware reset, which can then be used as an + * indication whether a cycle is in progress or has been + * completed. + */ + + if (hsfsts.hsf_status.flcinprog == 0) { + /* There is no cycle running at present, + * so we can start a cycle */ + /* Begin by setting Flash Cycle Done. */ + hsfsts.hsf_status.flcdone = 1; + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + ret_val = 0; + } else { + /* otherwise poll for sometime so the current + * cycle has a chance to end before giving up. */ + for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { + hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcinprog == 0) { + ret_val = 0; + break; + } + udelay(1); + } + if (ret_val == 0) { + /* Successful in waiting for previous cycle to timeout, + * now set the Flash Cycle Done. */ + hsfsts.hsf_status.flcdone = 1; + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + } else { + hw_dbg(hw, "Flash controller busy, cannot get access"); + } + } + + return ret_val; +} + +/** + * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase) + * @hw: pointer to the HW structure + * @timeout: maximum time to wait for completion + * + * This function starts a flash cycle and waits for its completion. + **/ +static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) +{ + union ich8_hws_flash_ctrl hsflctl; + union ich8_hws_flash_status hsfsts; + s32 ret_val = -E1000_ERR_NVM; + u32 i = 0; + + /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + hsflctl.hsf_ctrl.flcgo = 1; + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + /* wait till FDONE bit is set to 1 */ + do { + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcdone == 1) + break; + udelay(1); + } while (i++ < timeout); + + if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) + return 0; + + return ret_val; +} + +/** + * e1000_read_flash_word_ich8lan - Read word from flash + * @hw: pointer to the HW structure + * @offset: offset to data location + * @data: pointer to the location for storing the data + * + * Reads the flash word at offset into data. Offset is converted + * to bytes before read. + **/ +static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + /* Must convert offset into bytes. */ + offset <<= 1; + + return e1000_read_flash_data_ich8lan(hw, offset, 2, data); +} + +/** + * e1000_read_flash_data_ich8lan - Read byte or word from NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the byte or word to read. + * @size: Size of data to read, 1=byte 2=word + * @data: Pointer to the word to store the value read. + * + * Reads a byte or word from the NVM using the flash access registers. + **/ +static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 *data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + u32 flash_data = 0; + s32 ret_val = -E1000_ERR_NVM; + u8 count = 0; + + if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + + flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr; + + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val != 0) + break; + + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = size - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ret_val = e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_READ_COMMAND_TIMEOUT); + + /* Check if FCERR is set to 1, if set to 1, clear it + * and try the whole sequence a few more times, else + * read in (shift in) the Flash Data0, the order is + * least significant byte first msb to lsb */ + if (ret_val == 0) { + flash_data = er32flash(ICH_FLASH_FDATA0); + if (size == 1) { + *data = (u8)(flash_data & 0x000000FF); + } else if (size == 2) { + *data = (u16)(flash_data & 0x0000FFFF); + } + break; + } else { + /* If we've gotten here, then things are probably + * completely hosed, but if the error condition is + * detected, it won't hurt to give it another try... + * ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr == 1) { + /* Repeat for some time before giving up. */ + continue; + } else if (hsfsts.hsf_status.flcdone == 0) { + hw_dbg(hw, "Timeout error - flash cycle " + "did not complete."); + break; + } + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** + * e1000_write_nvm_ich8lan - Write word(s) to the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to write. + * @words: Size of data to write in words + * @data: Pointer to the word(s) to write at offset. + * + * Writes a byte or word to the NVM using the flash access registers. + **/ +static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + s32 ret_val; + u16 i; + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + hw_dbg(hw, "nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + ret_val = e1000_acquire_swflag_ich8lan(hw); + if (ret_val) + return ret_val; + + for (i = 0; i < words; i++) { + dev_spec->shadow_ram[offset+i].modified = 1; + dev_spec->shadow_ram[offset+i].value = data[i]; + } + + e1000_release_swflag_ich8lan(hw); + + return 0; +} + +/** + * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM + * @hw: pointer to the HW structure + * + * The NVM checksum is updated by calling the generic update_nvm_checksum, + * which writes the checksum to the shadow ram. The changes in the shadow + * ram are then committed to the EEPROM by processing each bank at a time + * checking for the modified bit and writing only the pending changes. + * After a succesful commit, the shadow ram is cleared and is ready for + * future writes. + **/ +static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 i, act_offset, new_bank_offset, old_bank_offset; + s32 ret_val; + u16 data; + + ret_val = e1000e_update_nvm_checksum_generic(hw); + if (ret_val) + return ret_val;; + + if (nvm->type != e1000_nvm_flash_sw) + return ret_val;; + + ret_val = e1000_acquire_swflag_ich8lan(hw); + if (ret_val) + return ret_val;; + + /* We're writing to the opposite bank so if we're on bank 1, + * write to bank 0 etc. We also need to erase the segment that + * is going to be written */ + if (!(er32(EECD) & E1000_EECD_SEC1VAL)) { + new_bank_offset = nvm->flash_bank_size; + old_bank_offset = 0; + e1000_erase_flash_bank_ich8lan(hw, 1); + } else { + old_bank_offset = nvm->flash_bank_size; + new_bank_offset = 0; + e1000_erase_flash_bank_ich8lan(hw, 0); + } + + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { + /* Determine whether to write the value stored + * in the other NVM bank or a modified value stored + * in the shadow RAM */ + if (dev_spec->shadow_ram[i].modified) { + data = dev_spec->shadow_ram[i].value; + } else { + e1000_read_flash_word_ich8lan(hw, + i + old_bank_offset, + &data); + } + + /* If the word is 0x13, then make sure the signature bits + * (15:14) are 11b until the commit has completed. + * This will allow us to write 10b which indicates the + * signature is valid. We want to do this after the write + * has completed so that we don't mark the segment valid + * while the write is still in progress */ + if (i == E1000_ICH_NVM_SIG_WORD) + data |= E1000_ICH_NVM_SIG_MASK; + + /* Convert offset to bytes. */ + act_offset = (i + new_bank_offset) << 1; + + udelay(100); + /* Write the bytes to the new bank. */ + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset, + (u8)data); + if (ret_val) + break; + + udelay(100); + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset + 1, + (u8)(data >> 8)); + if (ret_val) + break; + } + + /* Don't bother writing the segment valid bits if sector + * programming failed. */ + if (ret_val) { + hw_dbg(hw, "Flash commit failed.\n"); + e1000_release_swflag_ich8lan(hw); + return ret_val; + } + + /* Finally validate the new segment by setting bit 15:14 + * to 10b in word 0x13 , this can be done without an + * erase as well since these bits are 11 to start with + * and we need to change bit 14 to 0b */ + act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; + e1000_read_flash_word_ich8lan(hw, act_offset, &data); + data &= 0xBFFF; + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset * 2 + 1, + (u8)(data >> 8)); + if (ret_val) { + e1000_release_swflag_ich8lan(hw); + return ret_val; + } + + /* And invalidate the previously valid segment by setting + * its signature word (0x13) high_byte to 0b. This can be + * done without an erase because flash erase sets all bits + * to 1's. We can write 1's to 0's without an erase */ + act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); + if (ret_val) { + e1000_release_swflag_ich8lan(hw); + return ret_val; + } + + /* Great! Everything worked, we can now clear the cached entries. */ + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { + dev_spec->shadow_ram[i].modified = 0; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + + e1000_release_swflag_ich8lan(hw); + + /* Reload the EEPROM, or else modifications will not appear + * until after the next adapter reset. + */ + e1000e_reload_nvm(hw); + msleep(10); + + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19. + * If the bit is 0, that the EEPROM had been modified, but the checksum was not + * calculated, in which case we need to calculate the checksum and set bit 6. + **/ +static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 data; + + /* Read 0x19 and check bit 6. If this bit is 0, the checksum + * needs to be fixed. This bit is an indication that the NVM + * was prepared by OEM software and did not calculate the + * checksum...a likely scenario. + */ + ret_val = e1000_read_nvm(hw, 0x19, 1, &data); + if (ret_val) + return ret_val; + + if ((data & 0x40) == 0) { + data |= 0x40; + ret_val = e1000_write_nvm(hw, 0x19, 1, &data); + if (ret_val) + return ret_val; + ret_val = e1000e_update_nvm_checksum(hw); + if (ret_val) + return ret_val; + } + + return e1000e_validate_nvm_checksum_generic(hw); +} + +/** + * e1000_write_flash_data_ich8lan - Writes bytes to the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the byte/word to read. + * @size: Size of data to read, 1=byte 2=word + * @data: The byte(s) to write to the NVM. + * + * Writes one/two bytes to the NVM using the flash access registers. + **/ +static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + u32 flash_data = 0; + s32 ret_val; + u8 count = 0; + + if (size < 1 || size > 2 || data > size * 0xff || + offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + + flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr; + + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + break; + + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = size -1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + if (size == 1) + flash_data = (u32)data & 0x00FF; + else + flash_data = (u32)data; + + ew32flash(ICH_FLASH_FDATA0, flash_data); + + /* check if FCERR is set to 1 , if set to 1, clear it + * and try the whole sequence a few more times else done */ + ret_val = e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_WRITE_COMMAND_TIMEOUT); + if (!ret_val) + break; + + /* If we're here, then things are most likely + * completely hosed, but if the error condition + * is detected, it won't hurt to give it another + * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr == 1) + /* Repeat for some time before giving up. */ + continue; + if (hsfsts.hsf_status.flcdone == 0) { + hw_dbg(hw, "Timeout error - flash cycle " + "did not complete."); + break; + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** + * e1000_write_flash_byte_ich8lan - Write a single byte to NVM + * @hw: pointer to the HW structure + * @offset: The index of the byte to read. + * @data: The byte to write to the NVM. + * + * Writes a single byte to the NVM using the flash access registers. + **/ +static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, + u8 data) +{ + u16 word = (u16)data; + + return e1000_write_flash_data_ich8lan(hw, offset, 1, word); +} + +/** + * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM + * @hw: pointer to the HW structure + * @offset: The offset of the byte to write. + * @byte: The byte to write to the NVM. + * + * Writes a single byte to the NVM using the flash access registers. + * Goes through a retry algorithm before giving up. + **/ +static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, + u32 offset, u8 byte) +{ + s32 ret_val; + u16 program_retries; + + ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); + if (!ret_val) + return ret_val; + + for (program_retries = 0; program_retries < 100; program_retries++) { + hw_dbg(hw, "Retrying Byte %2.2X at offset %u\n", byte, offset); + udelay(100); + ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); + if (!ret_val) + break; + } + if (program_retries == 100) + return -E1000_ERR_NVM; + + return 0; +} + +/** + * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM + * @hw: pointer to the HW structure + * @bank: 0 for first bank, 1 for second bank, etc. + * + * Erases the bank specified. Each bank is a 4k block. Banks are 0 based. + * bank N is 4096 * N + flash_reg_addr. + **/ +static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + /* bank size is in 16bit words - adjust to bytes */ + u32 flash_bank_size = nvm->flash_bank_size * 2; + s32 ret_val; + s32 count = 0; + s32 iteration; + s32 sector_size; + s32 j; + + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + + /* Determine HW Sector size: Read BERASE bits of hw flash status + * register */ + /* 00: The Hw sector is 256 bytes, hence we need to erase 16 + * consecutive sectors. The start index for the nth Hw sector + * can be calculated as = bank * 4096 + n * 256 + * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. + * The start index for the nth Hw sector can be calculated + * as = bank * 4096 + * 10: The Hw sector is 8K bytes, nth sector = bank * 8192 + * (ich9 only, otherwise error condition) + * 11: The Hw sector is 64K bytes, nth sector = bank * 65536 + */ + switch (hsfsts.hsf_status.berasesz) { + case 0: + /* Hw sector size 256 */ + sector_size = ICH_FLASH_SEG_SIZE_256; + iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256; + break; + case 1: + sector_size = ICH_FLASH_SEG_SIZE_4K; + iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_4K; + break; + case 2: + if (hw->mac.type == e1000_ich9lan) { + sector_size = ICH_FLASH_SEG_SIZE_8K; + iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_8K; + } else { + return -E1000_ERR_NVM; + } + break; + case 3: + sector_size = ICH_FLASH_SEG_SIZE_64K; + iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_64K; + break; + default: + return -E1000_ERR_NVM; + } + + /* Start with the base address, then add the sector offset. */ + flash_linear_addr = hw->nvm.flash_base_addr; + flash_linear_addr += (bank) ? (sector_size * iteration) : 0; + + for (j = 0; j < iteration ; j++) { + do { + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + return ret_val; + + /* Write a value 11 (block Erase) in Flash + * Cycle field in hw flash control */ + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + /* Write the last 24 bits of an index within the + * block into Flash Linear address field in Flash + * Address. + */ + flash_linear_addr += (j * sector_size); + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ret_val = e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_ERASE_COMMAND_TIMEOUT); + if (ret_val == 0) + break; + + /* Check if FCERR is set to 1. If 1, + * clear it and try the whole sequence + * a few more times else Done */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr == 1) + /* repeat for some time before + * giving up */ + continue; + else if (hsfsts.hsf_status.flcdone == 0) + return ret_val; + } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); + } + + return 0; +} + +/** + * e1000_valid_led_default_ich8lan - Set the default LED settings + * @hw: pointer to the HW structure + * @data: Pointer to the LED settings + * + * Reads the LED default settings from the NVM to data. If the NVM LED + * settings is all 0's or F's, set the LED default to a valid LED default + * setting. + **/ +static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + + if (*data == ID_LED_RESERVED_0000 || + *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT_ICH8LAN; + + return 0; +} + +/** + * e1000_get_bus_info_ich8lan - Get/Set the bus type and width + * @hw: pointer to the HW structure + * + * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability + * register, so the the bus width is hard coded. + **/ +static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; + + ret_val = e1000e_get_bus_info_pcie(hw); + + /* ICH devices are "PCI Express"-ish. They have + * a configuration space, but do not contain + * PCI Express Capability registers, so bus width + * must be hardcoded. + */ + if (bus->width == e1000_bus_width_unknown) + bus->width = e1000_bus_width_pcie_x1; + + return ret_val; +} + +/** + * e1000_reset_hw_ich8lan - Reset the hardware + * @hw: pointer to the HW structure + * + * Does a full reset of the hardware which includes a reset of the PHY and + * MAC. + **/ +static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) +{ + u32 ctrl, icr, kab; + s32 ret_val; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000e_disable_pcie_master(hw); + if (ret_val) { + hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); + } + + hw_dbg(hw, "Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Disable the Transmit and Receive units. Then delay to allow + * any pending transactions to complete before we hit the MAC + * with the global reset. + */ + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + e1e_flush(); + + msleep(10); + + /* Workaround for ICH8 bit corruption issue in FIFO memory */ + if (hw->mac.type == e1000_ich8lan) { + /* Set Tx and Rx buffer allocation to 8k apiece. */ + ew32(PBA, E1000_PBA_8K); + /* Set Packet Buffer Size to 16k. */ + ew32(PBS, E1000_PBS_16K); + } + + ctrl = er32(CTRL); + + if (!e1000_check_reset_block(hw)) { + /* PHY HW reset requires MAC CORE reset at the same + * time to make sure the interface between MAC and the + * external PHY is reset. + */ + ctrl |= E1000_CTRL_PHY_RST; + } + ret_val = e1000_acquire_swflag_ich8lan(hw); + hw_dbg(hw, "Issuing a global reset to ich8lan"); + ew32(CTRL, (ctrl | E1000_CTRL_RST)); + msleep(20); + + ret_val = e1000e_get_auto_rd_done(hw); + if (ret_val) { + /* + * When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + hw_dbg(hw, "Auto Read Done did not complete\n"); + } + + ew32(IMC, 0xffffffff); + icr = er32(ICR); + + kab = er32(KABGTXD); + kab |= E1000_KABGTXD_BGSQLBIAS; + ew32(KABGTXD, kab); + + return ret_val; +} + +/** + * e1000_init_hw_ich8lan - Initialize the hardware + * @hw: pointer to the HW structure + * + * Prepares the hardware for transmit and receive by doing the following: + * - initialize hardware bits + * - initialize LED identification + * - setup receive address registers + * - setup flow control + * - setup transmit discriptors + * - clear statistics + **/ +static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl_ext, txdctl, snoop; + s32 ret_val; + u16 i; + + e1000_initialize_hw_bits_ich8lan(hw); + + /* Initialize identification LED */ + ret_val = e1000e_id_led_init(hw); + if (ret_val) { + hw_dbg(hw, "Error initializing identification LED\n"); + return ret_val; + } + + /* Setup the receive address. */ + e1000e_init_rx_addrs(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + hw_dbg(hw, "Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Setup link and flow control */ + ret_val = e1000_setup_link_ich8lan(hw); + + /* Set the transmit descriptor write-back policy for both queues */ + txdctl = er32(TXDCTL); + txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH; + ew32(TXDCTL, txdctl); + txdctl = er32(TXDCTL1); + txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH; + ew32(TXDCTL1, txdctl); + + /* ICH8 has opposite polarity of no_snoop bits. + * By default, we should use snoop behavior. */ + if (mac->type == e1000_ich8lan) + snoop = PCIE_ICH8_SNOOP_ALL; + else + snoop = (u32) ~(PCIE_NO_SNOOP_ALL); + e1000e_set_pcie_no_snoop(hw, snoop); + + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + ew32(CTRL_EXT, ctrl_ext); + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_ich8lan(hw); + + return 0; +} +/** + * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits + * @hw: pointer to the HW structure + * + * Sets/Clears required hardware bits necessary for correctly setting up the + * hardware for transmit and receive. + **/ +static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) +{ + u32 reg; + + /* Extended Device Control */ + reg = er32(CTRL_EXT); + reg |= (1 << 22); + ew32(CTRL_EXT, reg); + + /* Transmit Descriptor Control 0 */ + reg = er32(TXDCTL); + reg |= (1 << 22); + ew32(TXDCTL, reg); + + /* Transmit Descriptor Control 1 */ + reg = er32(TXDCTL1); + reg |= (1 << 22); + ew32(TXDCTL1, reg); + + /* Transmit Arbitration Control 0 */ + reg = er32(TARC0); + if (hw->mac.type == e1000_ich8lan) + reg |= (1 << 28) | (1 << 29); + reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); + ew32(TARC0, reg); + + /* Transmit Arbitration Control 1 */ + reg = er32(TARC1); + if (er32(TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + reg |= (1 << 24) | (1 << 26) | (1 << 30); + ew32(TARC1, reg); + + /* Device Status */ + if (hw->mac.type == e1000_ich8lan) { + reg = er32(STATUS); + reg &= ~(1 << 31); + ew32(STATUS, reg); + } +} + +/** + * e1000_setup_link_ich8lan - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + + if (e1000_check_reset_block(hw)) + return 0; + + /* ICH parts do not have a word in the NVM to determine + * the default flow control setting, so we explicitly + * set it to full. + */ + if (mac->fc == e1000_fc_default) + mac->fc = e1000_fc_full; + + mac->original_fc = mac->fc; + + hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", mac->fc); + + /* Continue to configure the copper link. */ + ret_val = e1000_setup_copper_link_ich8lan(hw); + if (ret_val) + return ret_val; + + ew32(FCTTV, mac->fc_pause_time); + + return e1000e_set_fc_watermarks(hw); +} + +/** + * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface + * @hw: pointer to the HW structure + * + * Configures the kumeran interface to the PHY to wait the appropriate time + * when polling the PHY, then call the generic setup_copper_link to finish + * configuring the copper link. + **/ +static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 reg_data; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + + /* Set the mac to wait the maximum time between each iteration + * and increase the max iterations when polling the phy; + * this fixes erroneous timeouts at 10Mbps. */ + ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF); + if (ret_val) + return ret_val; + ret_val = e1000e_read_kmrn_reg(hw, GG82563_REG(0x34, 9), ®_data); + if (ret_val) + return ret_val; + reg_data |= 0x3F; + ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data); + if (ret_val) + return ret_val; + + if (hw->phy.type == e1000_phy_igp_3) { + ret_val = e1000e_copper_link_setup_igp(hw); + if (ret_val) + return ret_val; + } + + return e1000e_setup_copper_link(hw); +} + +/** + * e1000_get_link_up_info_ich8lan - Get current link speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to store current link speed + * @duplex: pointer to store the current link duplex + * + * Calls the generic get_speed_and_duplex to retreive the current link + * information and then calls the Kumeran lock loss workaround for links at + * gigabit speeds. + **/ +static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex); + if (ret_val) + return ret_val; + + if ((hw->mac.type == e1000_ich8lan) && + (hw->phy.type == e1000_phy_igp_3) && + (*speed == SPEED_1000)) { + ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); + } + + return ret_val; +} + +/** + * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround + * @hw: pointer to the HW structure + * + * Work-around for 82566 Kumeran PCS lock loss: + * On link status change (i.e. PCI reset, speed change) and link is up and + * speed is gigabit- + * 0) if workaround is optionally disabled do nothing + * 1) wait 1ms for Kumeran link to come up + * 2) check Kumeran Diagnostic register PCS lock loss bit + * 3) if not set the link is locked (all is good), otherwise... + * 4) reset the PHY + * 5) repeat up to 10 times + * Note: this is only called for IGP3 copper when speed is 1gb. + **/ +static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 phy_ctrl; + s32 ret_val; + u16 i, data; + bool link; + + if (!dev_spec->kmrn_lock_loss_workaround_enabled) + return 0; + + /* Make sure link is up before proceeding. If not just return. + * Attempting this while link is negotiating fouled up link + * stability */ + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (!link) + return 0; + + for (i = 0; i < 10; i++) { + /* read once to clear */ + ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); + if (ret_val) + return ret_val; + /* and again to get new status */ + ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); + if (ret_val) + return ret_val; + + /* check for PCS lock */ + if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) + return 0; + + /* Issue PHY reset */ + e1000_phy_hw_reset(hw); + mdelay(5); + } + /* Disable GigE link negotiation */ + phy_ctrl = er32(PHY_CTRL); + phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE); + ew32(PHY_CTRL, phy_ctrl); + + /* Call gig speed drop workaround on Giga disable before accessing + * any PHY registers */ + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* unable to acquire PCS lock */ + return -E1000_ERR_PHY; +} + +/** + * e1000_set_kmrn_lock_loss_workaound_ich8lan - Set Kumeran workaround state + * @hw: pointer to the HW structure + * @state: boolean value used to set the current Kumaran workaround state + * + * If ICH8, set the current Kumeran workaround state (enabled - TRUE + * /disabled - FALSE). + **/ +void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, + bool state) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + + if (hw->mac.type != e1000_ich8lan) { + hw_dbg(hw, "Workaround applies to ICH8 only.\n"); + return; + } + + dev_spec->kmrn_lock_loss_workaround_enabled = state; +} + +/** + * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 + * @hw: pointer to the HW structure + * + * Workaround for 82566 power-down on D3 entry: + * 1) disable gigabit link + * 2) write VR power-down enable + * 3) read it back + * Continue if successful, else issue LCD reset and repeat + **/ +void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) +{ + u32 reg; + u16 data; + u8 retry = 0; + + if (hw->phy.type != e1000_phy_igp_3) + return; + + /* Try the workaround twice (if needed) */ + do { + /* Disable link */ + reg = er32(PHY_CTRL); + reg |= (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE); + ew32(PHY_CTRL, reg); + + /* Call gig speed drop workaround on Giga disable before + * accessing any PHY registers */ + if (hw->mac.type == e1000_ich8lan) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* Write VR power-down enable */ + e1e_rphy(hw, IGP3_VR_CTRL, &data); + data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; + e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN); + + /* Read it back and test */ + e1e_rphy(hw, IGP3_VR_CTRL, &data); + data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; + if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry) + break; + + /* Issue PHY reset and repeat at most one more time */ + reg = er32(CTRL); + ew32(CTRL, reg | E1000_CTRL_PHY_RST); + retry++; + } while (retry); +} + +/** + * e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working + * @hw: pointer to the HW structure + * + * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), + * LPLU, Giga disable, MDIC PHY reset): + * 1) Set Kumeran Near-end loopback + * 2) Clear Kumeran Near-end loopback + * Should only be called for ICH8[m] devices with IGP_3 Phy. + **/ +void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 reg_data; + + if ((hw->mac.type != e1000_ich8lan) || + (hw->phy.type != e1000_phy_igp_3)) + return; + + ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + ®_data); + if (ret_val) + return; + reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + reg_data); + if (ret_val) + return; + reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + reg_data); +} + +/** + * e1000_cleanup_led_ich8lan - Restore the default LED operation + * @hw: pointer to the HW structure + * + * Return the LED back to the default configuration. + **/ +static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw) +{ + if (hw->phy.type == e1000_phy_ife) + return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); + + ew32(LEDCTL, hw->mac.ledctl_default); + return 0; +} + +/** + * e1000_led_on_ich8lan - Turn LED's on + * @hw: pointer to the HW structure + * + * Turn on the LED's. + **/ +static s32 e1000_led_on_ich8lan(struct e1000_hw *hw) +{ + if (hw->phy.type == e1000_phy_ife) + return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, + (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); + + ew32(LEDCTL, hw->mac.ledctl_mode2); + return 0; +} + +/** + * e1000_led_off_ich8lan - Turn LED's off + * @hw: pointer to the HW structure + * + * Turn off the LED's. + **/ +static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) +{ + if (hw->phy.type == e1000_phy_ife) + return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, + (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); + + ew32(LEDCTL, hw->mac.ledctl_mode1); + return 0; +} + +/** + * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters + * @hw: pointer to the HW structure + * + * Clears hardware counters specific to the silicon family and calls + * clear_hw_cntrs_generic to clear all general purpose counters. + **/ +static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) +{ + u32 temp; + + e1000e_clear_hw_cntrs_base(hw); + + temp = er32(ALGNERRC); + temp = er32(RXERRC); + temp = er32(TNCRS); + temp = er32(CEXTERR); + temp = er32(TSCTC); + temp = er32(TSCTFC); + + temp = er32(MGTPRC); + temp = er32(MGTPDC); + temp = er32(MGTPTC); + + temp = er32(IAC); + temp = er32(ICRXOC); + +} + +static struct e1000_mac_operations ich8_mac_ops = { + .mng_mode_enab = E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT, + .check_for_link = e1000e_check_for_copper_link, + .cleanup_led = e1000_cleanup_led_ich8lan, + .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan, + .get_bus_info = e1000_get_bus_info_ich8lan, + .get_link_up_info = e1000_get_link_up_info_ich8lan, + .led_on = e1000_led_on_ich8lan, + .led_off = e1000_led_off_ich8lan, + .mc_addr_list_update = e1000e_mc_addr_list_update_generic, + .reset_hw = e1000_reset_hw_ich8lan, + .init_hw = e1000_init_hw_ich8lan, + .setup_link = e1000_setup_link_ich8lan, + .setup_physical_interface= e1000_setup_copper_link_ich8lan, +}; + +static struct e1000_phy_operations ich8_phy_ops = { + .acquire_phy = e1000_acquire_swflag_ich8lan, + .check_reset_block = e1000_check_reset_block_ich8lan, + .commit_phy = NULL, + .force_speed_duplex = e1000_phy_force_speed_duplex_ich8lan, + .get_cfg_done = e1000e_get_cfg_done, + .get_cable_length = e1000e_get_cable_length_igp_2, + .get_phy_info = e1000_get_phy_info_ich8lan, + .read_phy_reg = e1000e_read_phy_reg_igp, + .release_phy = e1000_release_swflag_ich8lan, + .reset_phy = e1000_phy_hw_reset_ich8lan, + .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan, + .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan, + .write_phy_reg = e1000e_write_phy_reg_igp, +}; + +static struct e1000_nvm_operations ich8_nvm_ops = { + .acquire_nvm = e1000_acquire_swflag_ich8lan, + .read_nvm = e1000_read_nvm_ich8lan, + .release_nvm = e1000_release_swflag_ich8lan, + .update_nvm = e1000_update_nvm_checksum_ich8lan, + .valid_led_default = e1000_valid_led_default_ich8lan, + .validate_nvm = e1000_validate_nvm_checksum_ich8lan, + .write_nvm = e1000_write_nvm_ich8lan, +}; + +struct e1000_info e1000_ich8_info = { + .mac = e1000_ich8lan, + .flags = FLAG_HAS_WOL + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_APME_IN_WUC, + .pba = 8, + .get_invariants = e1000_get_invariants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +struct e1000_info e1000_ich9_info = { + .mac = e1000_ich9lan, + .flags = FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_WOL + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_ERT + | FLAG_HAS_FLASH + | FLAG_APME_IN_WUC, + .pba = 10, + .get_invariants = e1000_get_invariants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c new file mode 100644 index 000000000000..0bdeca30c75f --- /dev/null +++ b/drivers/net/e1000e/lib.c @@ -0,0 +1,2493 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include <linux/netdevice.h> +#include <linux/ethtool.h> +#include <linux/delay.h> +#include <linux/pci.h> + +#include "e1000.h" + +enum e1000_mng_mode { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_if_only +}; + +#define E1000_FACTPS_MNGCG 0x20000000 + +#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management + * Technology signature */ + +/** + * e1000e_get_bus_info_pcie - Get PCIe bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCIe), and PCIe function. + **/ +s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + struct e1000_adapter *adapter = hw->adapter; + u32 status; + u16 pcie_link_status, pci_header_type, cap_offset; + + cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); + if (!cap_offset) { + bus->width = e1000_bus_width_unknown; + } else { + pci_read_config_word(adapter->pdev, + cap_offset + PCIE_LINK_STATUS, + &pcie_link_status); + bus->width = (enum e1000_bus_width)((pcie_link_status & + PCIE_LINK_WIDTH_MASK) >> + PCIE_LINK_WIDTH_SHIFT); + } + + pci_read_config_word(adapter->pdev, PCI_HEADER_TYPE_REGISTER, + &pci_header_type); + if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) { + status = er32(STATUS); + bus->func = (status & E1000_STATUS_FUNC_MASK) + >> E1000_STATUS_FUNC_SHIFT; + } else { + bus->func = 0; + } + + return 0; +} + +/** + * e1000e_write_vfta - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +void e1000e_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); + e1e_flush(); +} + +/** + * e1000e_init_rx_addrs - Initialize receive address's + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setups the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + **/ +void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) +{ + u32 i; + + /* Setup the receive address */ + hw_dbg(hw, "Programming MAC Address into RAR[0]\n"); + + e1000e_rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + hw_dbg(hw, "Clearing RAR[1-%u]\n", rar_count-1); + for (i = 1; i < rar_count; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0); + e1e_flush(); + E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0); + e1e_flush(); + } +} + +/** + * e1000e_rar_set - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + **/ +void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | + ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + rar_high |= E1000_RAH_AV; + + E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low); + E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high); +} + +/** + * e1000_mta_set - Set multicast filter table address + * @hw: pointer to the HW structure + * @hash_value: determines the MTA register and bit to set + * + * The multicast table address is a register array of 32-bit registers. + * The hash_value is used to determine what register the bit is in, the + * current value is read, the new bit is OR'd in and the new value is + * written back into the register. + **/ +static void e1000_mta_set(struct e1000_hw *hw, u32 hash_value) +{ + u32 hash_bit, hash_reg, mta; + + /* The MTA is a register array of 32-bit registers. It is + * treated like an array of (32*mta_reg_count) bits. We want to + * set bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The (hw->mac.mta_reg_count - 1) serves as a + * mask to bits 31:5 of the hash value which gives us the + * register we're modifying. The hash bit within that register + * is determined by the lower 5 bits of the hash value. + */ + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg); + + mta |= (1 << hash_bit); + + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta); + e1e_flush(); +} + +/** + * e1000_hash_mc_addr - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. See + * e1000_mta_set_generic() + **/ +static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + */ + /* For example, given the following Destination MAC Address and an + * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16) mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * e1000e_mc_addr_list_update_generic - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * @rar_used_count: the first RAR register free to program + * @rar_count: total number of supported Receive Address Registers + * + * Updates the Receive Address Registers and Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + * The parameter rar_count will usually be hw->mac.rar_entry_count + * unless there are workarounds that change this. + **/ +void e1000e_mc_addr_list_update_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count, + u32 rar_used_count, u32 rar_count) +{ + u32 hash_value; + u32 i; + + /* Load the first set of multicast addresses into the exact + * filters (RAR). If there are not enough to fill the RAR + * array, clear the filters. + */ + for (i = rar_used_count; i < rar_count; i++) { + if (mc_addr_count) { + e1000e_rar_set(hw, mc_addr_list, i); + mc_addr_count--; + mc_addr_list += ETH_ALEN; + } else { + E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0); + e1e_flush(); + E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0); + e1e_flush(); + } + } + + /* Clear the old settings from the MTA */ + hw_dbg(hw, "Clearing MTA\n"); + for (i = 0; i < hw->mac.mta_reg_count; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + e1e_flush(); + } + + /* Load any remaining multicast addresses into the hash table. */ + for (; mc_addr_count > 0; mc_addr_count--) { + hash_value = e1000_hash_mc_addr(hw, mc_addr_list); + hw_dbg(hw, "Hash value = 0x%03X\n", hash_value); + e1000_mta_set(hw, hash_value); + mc_addr_list += ETH_ALEN; + } +} + +/** + * e1000e_clear_hw_cntrs_base - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + **/ +void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) +{ + u32 temp; + + temp = er32(CRCERRS); + temp = er32(SYMERRS); + temp = er32(MPC); + temp = er32(SCC); + temp = er32(ECOL); + temp = er32(MCC); + temp = er32(LATECOL); + temp = er32(COLC); + temp = er32(DC); + temp = er32(SEC); + temp = er32(RLEC); + temp = er32(XONRXC); + temp = er32(XONTXC); + temp = er32(XOFFRXC); + temp = er32(XOFFTXC); + temp = er32(FCRUC); + temp = er32(GPRC); + temp = er32(BPRC); + temp = er32(MPRC); + temp = er32(GPTC); + temp = er32(GORCL); + temp = er32(GORCH); + temp = er32(GOTCL); + temp = er32(GOTCH); + temp = er32(RNBC); + temp = er32(RUC); + temp = er32(RFC); + temp = er32(ROC); + temp = er32(RJC); + temp = er32(TORL); + temp = er32(TORH); + temp = er32(TOTL); + temp = er32(TOTH); + temp = er32(TPR); + temp = er32(TPT); + temp = er32(MPTC); + temp = er32(BPTC); +} + +/** + * e1000e_check_for_copper_link - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +s32 e1000e_check_for_copper_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) + return 0; + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) + return ret_val; /* No link detected */ + + mac->get_link_status = 0; + + /* Check if there was DownShift, must be checked + * immediately after link-up */ + e1000e_check_downshift(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -E1000_ERR_CONFIG; + return ret_val; + } + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + e1000e_config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) { + hw_dbg(hw, "Error configuring flow control\n"); + } + + return ret_val; +} + +/** + * e1000e_check_for_fiber_link - Check for link (Fiber) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), the cable is plugged in (we have signal), + * and our link partner is not trying to auto-negotiate with us (we + * are receiving idles or data), we need to force link up. We also + * need to give auto-negotiation time to complete, in case the cable + * was just plugged in. The autoneg_failed flag does this. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) && + (!(rxcw & E1000_RXCW_C))) { + if (mac->autoneg_failed == 0) { + mac->autoneg_failed = 1; + return 0; + } + hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) { + hw_dbg(hw, "Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = 1; + } + + return 0; +} + +/** + * e1000e_check_for_serdes_link - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { + if (mac->autoneg_failed == 0) { + mac->autoneg_failed = 1; + return 0; + } + hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) { + hw_dbg(hw, "Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = 1; + } else if (!(E1000_TXCW_ANE & er32(TXCW))) { + /* If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + udelay(10); + if (E1000_RXCW_SYNCH & er32(RXCW)) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = 1; + hw_dbg(hw, "SERDES: Link is up.\n"); + } + } else { + mac->serdes_has_link = 0; + hw_dbg(hw, "SERDES: Link is down.\n"); + } + } + + if (E1000_TXCW_ANE & er32(TXCW)) { + status = er32(STATUS); + mac->serdes_has_link = (status & E1000_STATUS_LU); + } + + return 0; +} + +/** + * e1000_set_default_fc_generic - Set flow control default values + * @hw: pointer to the HW structure + * + * Read the EEPROM for the default values for flow control and store the + * values. + **/ +static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + u16 nvm_data; + + if (mac->fc != e1000_fc_default) + return 0; + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); + + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + + if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) + mac->fc = e1000_fc_none; + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == + NVM_WORD0F_ASM_DIR) + mac->fc = e1000_fc_tx_pause; + else + mac->fc = e1000_fc_full; + + return 0; +} + +/** + * e1000e_setup_link - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +s32 e1000e_setup_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (e1000_check_reset_block(hw)) + return 0; + + /* + * If flow control is set to default, set flow control based on + * the EEPROM flow control settings. + */ + if (mac->fc == e1000_fc_default) { + ret_val = e1000_set_default_fc_generic(hw); + if (ret_val) + return ret_val; + } + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + mac->original_fc = mac->fc; + + hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", mac->fc); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = mac->ops.setup_physical_interface(hw); + if (ret_val) + return ret_val; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + hw_dbg(hw, "Initializing the Flow Control address, type and timer regs\n"); + ew32(FCT, FLOW_CONTROL_TYPE); + ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); + ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); + + ew32(FCTTV, mac->fc_pause_time); + + return e1000e_set_fc_watermarks(hw); +} + +/** + * e1000_commit_fc_settings_generic - Configure flow control + * @hw: pointer to the HW structure + * + * Write the flow control settings to the Transmit Config Word Register (TXCW) + * base on the flow control settings in e1000_mac_info. + **/ +static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 txcw; + + /* Check for a software override of the flow control settings, and + * setup the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Transmit Config Word Register (TXCW) and re-start auto- + * negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we + * do not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + */ + switch (mac->fc) { + case e1000_fc_none: + /* Flow control completely disabled by a software over-ride. */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case e1000_fc_rx_pause: + /* RX Flow control is enabled and TX Flow control is disabled + * by a software over-ride. Since there really isn't a way to + * advertise that we are capable of RX Pause ONLY, we will + * advertise that we support both symmetric and asymmetric RX + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case e1000_fc_tx_pause: + /* TX Flow control is enabled, and RX Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case e1000_fc_full: + /* Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + hw_dbg(hw, "Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + break; + } + + ew32(TXCW, txcw); + mac->txcw = txcw; + + return 0; +} + +/** + * e1000_poll_fiber_serdes_link_generic - Poll for link up + * @hw: pointer to the HW structure + * + * Polls for link up by reading the status register, if link fails to come + * up with auto-negotiation, then the link is forced if a signal is detected. + **/ +static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 i, status; + s32 ret_val; + + /* If we have a signal (the cable is plugged in, or assumed true for + * serdes media) then poll for a "Link-Up" indication in the Device + * Status Register. Time-out if a link isn't seen in 500 milliseconds + * seconds (Auto-negotiation should complete in less than 500 + * milliseconds even if the other end is doing it in SW). + */ + for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { + msleep(10); + status = er32(STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == FIBER_LINK_UP_LIMIT) { + hw_dbg(hw, "Never got a valid link from auto-neg!!!\n"); + mac->autoneg_failed = 1; + /* AutoNeg failed to achieve a link, so we'll call + * mac->check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = mac->ops.check_for_link(hw); + if (ret_val) { + hw_dbg(hw, "Error while checking for link\n"); + return ret_val; + } + mac->autoneg_failed = 0; + } else { + mac->autoneg_failed = 0; + hw_dbg(hw, "Valid Link Found\n"); + } + + return 0; +} + +/** + * e1000e_setup_fiber_serdes_link - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes + * links. Upon successful setup, poll for link. + **/ +s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + ctrl = er32(CTRL); + + /* Take the link out of reset */ + ctrl &= ~E1000_CTRL_LRST; + + e1000e_config_collision_dist(hw); + + ret_val = e1000_commit_fc_settings_generic(hw); + if (ret_val) + return ret_val; + + /* Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + hw_dbg(hw, "Auto-negotiation enabled\n"); + + ew32(CTRL, ctrl); + e1e_flush(); + msleep(1); + + /* For these adapters, the SW defineable pin 1 is set when the optics + * detect a signal. If we have a signal, then poll for a "Link-Up" + * indication. + */ + if (hw->media_type == e1000_media_type_internal_serdes || + (er32(CTRL) & E1000_CTRL_SWDPIN1)) { + ret_val = e1000_poll_fiber_serdes_link_generic(hw); + } else { + hw_dbg(hw, "No signal detected\n"); + } + + return 0; +} + +/** + * e1000e_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void e1000e_config_collision_dist(struct e1000_hw *hw) +{ + u32 tctl; + + tctl = er32(TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; + + ew32(TCTL, tctl); + e1e_flush(); +} + +/** + * e1000e_set_fc_watermarks - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * tansmission as well. + **/ +s32 e1000e_set_fc_watermarks(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 fcrtl = 0, fcrth = 0; + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (mac->fc & e1000_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = mac->fc_low_water; + fcrtl |= E1000_FCRTL_XONE; + fcrth = mac->fc_high_water; + } + ew32(FCRTL, fcrtl); + ew32(FCRTH, fcrth); + + return 0; +} + +/** + * e1000e_force_mac_fc - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + **/ +s32 e1000e_force_mac_fc(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl; + + ctrl = er32(CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "mac->fc" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + hw_dbg(hw, "mac->fc = %u\n", mac->fc); + + switch (mac->fc) { + case e1000_fc_none: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case e1000_fc_rx_pause: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case e1000_fc_tx_pause: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case e1000_fc_full: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + hw_dbg(hw, "Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ew32(CTRL, ctrl); + + return 0; +} + +/** + * e1000e_config_fc_after_link_up - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + **/ +s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = 0; + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + u16 speed, duplex; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) { + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) + ret_val = e1000e_force_mac_fc(hw); + } else { + if (hw->media_type == e1000_media_type_copper) + ret_val = e1000e_force_mac_fc(hw); + } + + if (ret_val) { + hw_dbg(hw, "Error forcing flow control settings\n"); + return ret_val; + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->media_type == e1000_media_type_copper) && mac->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + hw_dbg(hw, "Copper PHY and Auto Neg " + "has not completed.\n"); + return ret_val; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg); + if (ret_val) + return ret_val; + ret_val = e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg); + if (ret_val) + return ret_val; + + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + */ + /* Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | E1000_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected RX ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (mac->original_fc == e1000_fc_full) { + mac->fc = e1000_fc_full; + hw_dbg(hw, "Flow Control = FULL.\r\n"); + } else { + mac->fc = e1000_fc_rx_pause; + hw_dbg(hw, "Flow Control = " + "RX PAUSE frames only.\r\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + mac->fc = e1000_fc_tx_pause; + hw_dbg(hw, "Flow Control = TX PAUSE frames only.\r\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + mac->fc = e1000_fc_rx_pause; + hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n"); + } + /* Per the IEEE spec, at this point flow control should be + * disabled. However, we want to consider that we could + * be connected to a legacy switch that doesn't advertise + * desired flow control, but can be forced on the link + * partner. So if we advertised no flow control, that is + * what we will resolve to. If we advertised some kind of + * receive capability (Rx Pause Only or Full Flow Control) + * and the link partner advertised none, we will configure + * ourselves to enable Rx Flow Control only. We can do + * this safely for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, no + * harm done since we won't be receiving any PAUSE frames + * anyway. If the intent on the link partner was to have + * flow control enabled, then by us enabling RX only, we + * can at least receive pause frames and process them. + * This is a good idea because in most cases, since we are + * predominantly a server NIC, more times than not we will + * be asked to delay transmission of packets than asking + * our link partner to pause transmission of frames. + */ + else if ((mac->original_fc == e1000_fc_none) || + (mac->original_fc == e1000_fc_tx_pause)) { + mac->fc = e1000_fc_none; + hw_dbg(hw, "Flow Control = NONE.\r\n"); + } else { + mac->fc = e1000_fc_rx_pause; + hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); + if (ret_val) { + hw_dbg(hw, "Error getting link speed and duplex\n"); + return ret_val; + } + + if (duplex == HALF_DUPLEX) + mac->fc = e1000_fc_none; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000e_force_mac_fc(hw); + if (ret_val) { + hw_dbg(hw, "Error forcing flow control settings\n"); + return ret_val; + } + } + + return 0; +} + +/** + * e1000e_get_speed_and_duplex_copper - Retreive current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + **/ +s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + u32 status; + + status = er32(STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + hw_dbg(hw, "1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + hw_dbg(hw, "100 Mbs, "); + } else { + *speed = SPEED_10; + hw_dbg(hw, "10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + hw_dbg(hw, "Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + hw_dbg(hw, "Half Duplex\n"); + } + + return 0; +} + +/** + * e1000e_get_speed_and_duplex_fiber_serdes - Retreive current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Sets the speed and duplex to gigabit full duplex (the only possible option) + * for fiber/serdes links. + **/ +s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + + return 0; +} + +/** + * e1000e_get_hw_semaphore - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = er32(SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + i++; + } + + if (i == timeout) { + hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n"); + return -E1000_ERR_NVM; + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = er32(SWSM); + ew32(SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (er32(SWSM) & E1000_SWSM_SWESMBI) + break; + + udelay(50); + } + + if (i == timeout) { + /* Release semaphores */ + e1000e_put_hw_semaphore(hw); + hw_dbg(hw, "Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000e_put_hw_semaphore - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +void e1000e_put_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + + swsm = er32(SWSM); + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + ew32(SWSM, swsm); +} + +/** + * e1000e_get_auto_rd_done - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + **/ +s32 e1000e_get_auto_rd_done(struct e1000_hw *hw) +{ + s32 i = 0; + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (er32(EECD) & E1000_EECD_AUTO_RD) + break; + msleep(1); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + hw_dbg(hw, "Auto read by HW from NVM has not completed.\n"); + return -E1000_ERR_RESET; + } + + return 0; +} + +/** + * e1000e_valid_led_default - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; + + return 0; +} + +/** + * e1000e_id_led_init - + * @hw: pointer to the HW structure + * + **/ +s32 e1000e_id_led_init(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 data, i, temp; + const u16 led_mask = 0x0F; + + ret_val = hw->nvm.ops.valid_led_default(hw, &data); + if (ret_val) + return ret_val; + + mac->ledctl_default = er32(LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + + return 0; +} + +/** + * e1000e_cleanup_led_generic - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + **/ +s32 e1000e_cleanup_led_generic(struct e1000_hw *hw) +{ + ew32(LEDCTL, hw->mac.ledctl_default); + return 0; +} + +/** + * e1000e_blink_led - Blink LED + * @hw: pointer to the HW structure + * + * Blink the led's which are set to be on. + **/ +s32 e1000e_blink_led(struct e1000_hw *hw) +{ + u32 ledctl_blink = 0; + u32 i; + + if (hw->media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); + } else { + /* set the blink bit for each LED that's "on" (0x0E) + * in ledctl_mode2 */ + ledctl_blink = hw->mac.ledctl_mode2; + for (i = 0; i < 4; i++) + if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == + E1000_LEDCTL_MODE_LED_ON) + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << + (i * 8)); + } + + ew32(LEDCTL, ledctl_blink); + + return 0; +} + +/** + * e1000e_led_on_generic - Turn LED on + * @hw: pointer to the HW structure + * + * Turn LED on. + **/ +s32 e1000e_led_on_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + switch (hw->media_type) { + case e1000_media_type_fiber: + ctrl = er32(CTRL); + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + ew32(CTRL, ctrl); + break; + case e1000_media_type_copper: + ew32(LEDCTL, hw->mac.ledctl_mode2); + break; + default: + break; + } + + return 0; +} + +/** + * e1000e_led_off_generic - Turn LED off + * @hw: pointer to the HW structure + * + * Turn LED off. + **/ +s32 e1000e_led_off_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + switch (hw->media_type) { + case e1000_media_type_fiber: + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + ew32(CTRL, ctrl); + break; + case e1000_media_type_copper: + ew32(LEDCTL, hw->mac.ledctl_mode1); + break; + default: + break; + } + + return 0; +} + +/** + * e1000e_set_pcie_no_snoop - Set PCI-express capabilities + * @hw: pointer to the HW structure + * @no_snoop: bitmap of snoop events + * + * Set the PCI-express register to snoop for events enabled in 'no_snoop'. + **/ +void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop) +{ + u32 gcr; + + if (no_snoop) { + gcr = er32(GCR); + gcr &= ~(PCIE_NO_SNOOP_ALL); + gcr |= no_snoop; + ew32(GCR, gcr); + } +} + +/** + * e1000e_disable_pcie_master - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns 0 if successful, else returns -10 + * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + **/ +s32 e1000e_disable_pcie_master(struct e1000_hw *hw) +{ + u32 ctrl; + s32 timeout = MASTER_DISABLE_TIMEOUT; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; + ew32(CTRL, ctrl); + + while (timeout) { + if (!(er32(STATUS) & + E1000_STATUS_GIO_MASTER_ENABLE)) + break; + udelay(100); + timeout--; + } + + if (!timeout) { + hw_dbg(hw, "Master requests are pending.\n"); + return -E1000_ERR_MASTER_REQUESTS_PENDING; + } + + return 0; +} + +/** + * e1000e_reset_adaptive - Reset Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Reset the Adaptive Interframe Spacing throttle to default values. + **/ +void e1000e_reset_adaptive(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + mac->current_ifs_val = 0; + mac->ifs_min_val = IFS_MIN; + mac->ifs_max_val = IFS_MAX; + mac->ifs_step_size = IFS_STEP; + mac->ifs_ratio = IFS_RATIO; + + mac->in_ifs_mode = 0; + ew32(AIT, 0); +} + +/** + * e1000e_update_adaptive - Update Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Update the Adaptive Interframe Spacing Throttle value based on the + * time between transmitted packets and time between collisions. + **/ +void e1000e_update_adaptive(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { + if (mac->tx_packet_delta > MIN_NUM_XMITS) { + mac->in_ifs_mode = 1; + if (mac->current_ifs_val < mac->ifs_max_val) { + if (!mac->current_ifs_val) + mac->current_ifs_val = mac->ifs_min_val; + else + mac->current_ifs_val += + mac->ifs_step_size; + ew32(AIT, + mac->current_ifs_val); + } + } + } else { + if (mac->in_ifs_mode && + (mac->tx_packet_delta <= MIN_NUM_XMITS)) { + mac->current_ifs_val = 0; + mac->in_ifs_mode = 0; + ew32(AIT, 0); + } + } +} + +/** + * e1000_raise_eec_clk - Raise EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Enable/Raise the EEPROM clock bit. + **/ +static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd | E1000_EECD_SK; + ew32(EECD, *eecd); + e1e_flush(); + udelay(hw->nvm.delay_usec); +} + +/** + * e1000_lower_eec_clk - Lower EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Clear/Lower the EEPROM clock bit. + **/ +static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd & ~E1000_EECD_SK; + ew32(EECD, *eecd); + e1e_flush(); + udelay(hw->nvm.delay_usec); +} + +/** + * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM + * @hw: pointer to the HW structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + * + * We need to shift 'count' bits out to the EEPROM. So, the value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u32 mask; + + mask = 0x01 << (count - 1); + if (nvm->type == e1000_nvm_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + ew32(EECD, eecd); + e1e_flush(); + + udelay(nvm->delay_usec); + + e1000_raise_eec_clk(hw, &eecd); + e1000_lower_eec_clk(hw, &eecd); + + mask >>= 1; + } while (mask); + + eecd &= ~E1000_EECD_DI; + ew32(EECD, eecd); +} + +/** + * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM + * @hw: pointer to the HW structure + * @count: number of bits to shift in + * + * In order to read a register from the EEPROM, we need to shift 'count' bits + * in from the EEPROM. Bits are "shifted in" by raising the clock input to + * the EEPROM (setting the SK bit), and then reading the value of the data out + * "DO" bit. During this "shifting in" process the data in "DI" bit should + * always be clear. + **/ +static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data <<= 1; + e1000_raise_eec_clk(hw, &eecd); + + eecd = er32(EECD); + + eecd &= ~E1000_EECD_DI; + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_eec_clk(hw, &eecd); + } + + return data; +} + +/** + * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + **/ +s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) +{ + u32 attempts = 100000; + u32 i, reg = 0; + + for (i = 0; i < attempts; i++) { + if (ee_reg == E1000_NVM_POLL_READ) + reg = er32(EERD); + else + reg = er32(EEWR); + + if (reg & E1000_NVM_RW_REG_DONE) + return 0; + + udelay(5); + } + + return -E1000_ERR_NVM; +} + +/** + * e1000e_acquire_nvm - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +s32 e1000e_acquire_nvm(struct e1000_hw *hw) +{ + u32 eecd = er32(EECD); + s32 timeout = E1000_NVM_GRANT_ATTEMPTS; + + ew32(EECD, eecd | E1000_EECD_REQ); + eecd = er32(EECD); + + while (timeout) { + if (eecd & E1000_EECD_GNT) + break; + udelay(5); + eecd = er32(EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + hw_dbg(hw, "Could not acquire NVM grant\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000_standby_nvm - Return EEPROM to standby state + * @hw: pointer to the HW structure + * + * Return the EEPROM to a standby state. + **/ +static void e1000_standby_nvm(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + e1e_flush(); + udelay(nvm->delay_usec); + eecd &= ~E1000_EECD_CS; + ew32(EECD, eecd); + e1e_flush(); + udelay(nvm->delay_usec); + } +} + +/** + * e1000_stop_nvm - Terminate EEPROM command + * @hw: pointer to the HW structure + * + * Terminates the current command by inverting the EEPROM's chip select pin. + **/ +static void e1000_stop_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + eecd = er32(EECD); + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + /* Pull CS high */ + eecd |= E1000_EECD_CS; + e1000_lower_eec_clk(hw, &eecd); + } +} + +/** + * e1000e_release_nvm - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +void e1000e_release_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + e1000_stop_nvm(hw); + + eecd = er32(EECD); + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); +} + +/** + * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write + * @hw: pointer to the HW structure + * + * Setups the EEPROM for reading and writing. + **/ +static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u16 timeout = 0; + u8 spi_stat_reg; + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + udelay(1); + timeout = NVM_MAX_RETRY_SPI; + + /* Read "Status Register" repeatedly until the LSB is cleared. + * The EEPROM will signal that the command has been completed + * by clearing bit 0 of the internal status register. If it's + * not cleared within 'timeout', then error out. */ + while (timeout) { + e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, + hw->nvm.opcode_bits); + spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8); + if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) + break; + + udelay(5); + e1000_standby_nvm(hw); + timeout--; + } + + if (!timeout) { + hw_dbg(hw, "SPI NVM Status error\n"); + return -E1000_ERR_NVM; + } + } + + return 0; +} + +/** + * e1000e_read_nvm_spi - Read EEPROM's using SPI + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM. + **/ +s32 e1000e_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i = 0; + s32 ret_val; + u16 word_in; + u8 read_opcode = NVM_READ_OPCODE_SPI; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg(hw, "nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + ret_val = nvm->ops.acquire_nvm(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) { + nvm->ops.release_nvm(hw); + return ret_val; + } + + e1000_standby_nvm(hw); + + if ((nvm->address_bits == 8) && (offset >= 128)) + read_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); + + /* Read the data. SPI NVMs increment the address with each byte + * read and will roll over if reading beyond the end. This allows + * us to read the whole NVM from any offset */ + for (i = 0; i < words; i++) { + word_in = e1000_shift_in_eec_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + + nvm->ops.release_nvm(hw); + return 0; +} + +/** + * e1000e_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg(hw, "nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + for (i = 0; i < words; i++) { + eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + + E1000_NVM_RW_REG_START; + + ew32(EERD, eerd); + ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (er32(EERD) >> + E1000_NVM_RW_REG_DATA); + } + + return ret_val; +} + +/** + * e1000e_write_nvm_spi - Write to EEPROM using SPI + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using SPI interface. + * + * If e1000e_update_nvm_checksum is not called after this function , the + * EEPROM will most likley contain an invalid checksum. + **/ +s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val; + u16 widx = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg(hw, "nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + ret_val = nvm->ops.acquire_nvm(hw); + if (ret_val) + return ret_val; + + msleep(10); + + while (widx < words) { + u8 write_opcode = NVM_WRITE_OPCODE_SPI; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) { + nvm->ops.release_nvm(hw); + return ret_val; + } + + e1000_standby_nvm(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ + e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, + nvm->opcode_bits); + + e1000_standby_nvm(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode */ + if ((nvm->address_bits == 8) && (offset >= 128)) + write_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), + nvm->address_bits); + + /* Loop to allow for up to whole page write of eeprom */ + while (widx < words) { + u16 word_out = data[widx]; + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_eec_bits(hw, word_out, 16); + widx++; + + if ((((offset + widx) * 2) % nvm->page_size) == 0) { + e1000_standby_nvm(hw); + break; + } + } + } + + msleep(10); + return 0; +} + +/** + * e1000e_read_mac_addr - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + **/ +s32 e1000e_read_mac_addr(struct e1000_hw *hw) +{ + s32 ret_val; + u16 offset, nvm_data, i; + + for (i = 0; i < ETH_ALEN; i += 2) { + offset = i >> 1; + ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF); + hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8); + } + + /* Flip last bit of mac address if we're on second port */ + if (hw->bus.func == E1000_FUNC_1) + hw->mac.perm_addr[5] ^= 1; + + for (i = 0; i < ETH_ALEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return 0; +} + +/** + * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + hw_dbg(hw, "NVM Checksum Invalid\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000e_update_nvm_checksum_generic - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error while updating checksum.\n"); + return ret_val; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + hw_dbg(hw, "NVM Write Error while updating checksum.\n"); + + return ret_val; +} + +/** + * e1000e_reload_nvm - Reloads EEPROM + * @hw: pointer to the HW structure + * + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the + * extended control register. + **/ +void e1000e_reload_nvm(struct e1000_hw *hw) +{ + u32 ctrl_ext; + + udelay(10); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); +} + +/** + * e1000_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +static u8 e1000_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8) (0 - sum); +} + +/** + * e1000_mng_enable_host_if - Checks host interface is enabled + * @hw: pointer to the HW structure + * + * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND + * + * This function checks whether the HOST IF is enabled for command operaton + * and also checks whether the previous command is completed. It busy waits + * in case of previous command is not completed. + **/ +static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) +{ + u32 hicr; + u8 i; + + /* Check that the host interface is enabled. */ + hicr = er32(HICR); + if ((hicr & E1000_HICR_EN) == 0) { + hw_dbg(hw, "E1000_HOST_EN bit disabled.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + /* check the previous command is completed */ + for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { + hicr = er32(HICR); + if (!(hicr & E1000_HICR_C)) + break; + mdelay(1); + } + + if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { + hw_dbg(hw, "Previous command timeout failed .\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + return 0; +} + +/** + * e1000e_check_mng_mode - check managament mode + * @hw: pointer to the HW structure + * + * Reads the firmware semaphore register and returns true (>0) if + * manageability is enabled, else false (0). + **/ +bool e1000e_check_mng_mode(struct e1000_hw *hw) +{ + u32 fwsm = er32(FWSM); + + return (fwsm & E1000_FWSM_MODE_MASK) == hw->mac.ops.mng_mode_enab; +} + +/** + * e1000e_enable_tx_pkt_filtering - Enable packet filtering on TX + * @hw: pointer to the HW structure + * + * Enables packet filtering on transmit packets if manageability is enabled + * and host interface is enabled. + **/ +bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) +{ + struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie; + u32 *buffer = (u32 *)&hw->mng_cookie; + u32 offset; + s32 ret_val, hdr_csum, csum; + u8 i, len; + + /* No manageability, no filtering */ + if (!e1000e_check_mng_mode(hw)) { + hw->mac.tx_pkt_filtering = 0; + return 0; + } + + /* If we can't read from the host interface for whatever + * reason, disable filtering. + */ + ret_val = e1000_mng_enable_host_if(hw); + if (ret_val != 0) { + hw->mac.tx_pkt_filtering = 0; + return ret_val; + } + + /* Read in the header. Length and offset are in dwords. */ + len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2; + offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2; + for (i = 0; i < len; i++) + *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset + i); + hdr_csum = hdr->checksum; + hdr->checksum = 0; + csum = e1000_calculate_checksum((u8 *)hdr, + E1000_MNG_DHCP_COOKIE_LENGTH); + /* If either the checksums or signature don't match, then + * the cookie area isn't considered valid, in which case we + * take the safe route of assuming Tx filtering is enabled. + */ + if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { + hw->mac.tx_pkt_filtering = 1; + return 1; + } + + /* Cookie area is valid, make the final check for filtering. */ + if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) { + hw->mac.tx_pkt_filtering = 0; + return 0; + } + + hw->mac.tx_pkt_filtering = 1; + return 1; +} + +/** + * e1000_mng_write_cmd_header - Writes manageability command header + * @hw: pointer to the HW structure + * @hdr: pointer to the host interface command header + * + * Writes the command header after does the checksum calculation. + **/ +static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr) +{ + u16 i, length = sizeof(struct e1000_host_mng_command_header); + + /* Write the whole command header structure with new checksum. */ + + hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length); + + length >>= 2; + /* Write the relevant command block into the ram area. */ + for (i = 0; i < length; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i, + *((u32 *) hdr + i)); + e1e_flush(); + } + + return 0; +} + +/** + * e1000_mng_host_if_write - Writes to the manageability host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface buffer + * @length: size of the buffer + * @offset: location in the buffer to write to + * @sum: sum of the data (not checksum) + * + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient + * way. Also fills up the sum of the buffer in *buffer parameter. + **/ +static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, + u16 length, u16 offset, u8 *sum) +{ + u8 *tmp; + u8 *bufptr = buffer; + u32 data = 0; + u16 remaining, i, j, prev_bytes; + + /* sum = only sum of the data and it is not checksum */ + + if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) + return -E1000_ERR_PARAM; + + tmp = (u8 *)&data; + prev_bytes = offset & 0x3; + offset >>= 2; + + if (prev_bytes) { + data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset); + for (j = prev_bytes; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data); + length -= j - prev_bytes; + offset++; + } + + remaining = length & 0x3; + length -= remaining; + + /* Calculate length in DWORDs */ + length >>= 2; + + /* The device driver writes the relevant command block into the + * ram area. */ + for (i = 0; i < length; i++) { + for (j = 0; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); + } + if (remaining) { + for (j = 0; j < sizeof(u32); j++) { + if (j < remaining) + *(tmp + j) = *bufptr++; + else + *(tmp + j) = 0; + + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); + } + + return 0; +} + +/** + * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + **/ +s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) +{ + struct e1000_host_mng_command_header hdr; + s32 ret_val; + u32 hicr; + + hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; + hdr.command_length = length; + hdr.reserved1 = 0; + hdr.reserved2 = 0; + hdr.checksum = 0; + + /* Enable the host interface */ + ret_val = e1000_mng_enable_host_if(hw); + if (ret_val) + return ret_val; + + /* Populate the host interface with the contents of "buffer". */ + ret_val = e1000_mng_host_if_write(hw, buffer, length, + sizeof(hdr), &(hdr.checksum)); + if (ret_val) + return ret_val; + + /* Write the manageability command header */ + ret_val = e1000_mng_write_cmd_header(hw, &hdr); + if (ret_val) + return ret_val; + + /* Tell the ARC a new command is pending. */ + hicr = er32(HICR); + ew32(HICR, hicr | E1000_HICR_C); + + return 0; +} + +/** + * e1000e_enable_mng_pass_thru - Enable processing of ARP's + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to allow ARPs to be processed by the host. + **/ +bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + u32 fwsm, factps; + bool ret_val = 0; + + manc = er32(MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN) || + !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) + return ret_val; + + if (hw->mac.arc_subsystem_valid) { + fwsm = er32(FWSM); + factps = er32(FACTPS); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { + ret_val = 1; + return ret_val; + } + } else { + if ((manc & E1000_MANC_SMBUS_EN) && + !(manc & E1000_MANC_ASF_EN)) { + ret_val = 1; + return ret_val; + } + } + + return ret_val; +} + +s32 e1000e_read_part_num(struct e1000_hw *hw, u32 *part_num) +{ + s32 ret_val; + u16 nvm_data; + + ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + *part_num = (u32)(nvm_data << 16); + + ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + *part_num |= nvm_data; + + return 0; +} diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c new file mode 100644 index 000000000000..033e124d1c1f --- /dev/null +++ b/drivers/net/e1000e/netdev.c @@ -0,0 +1,4438 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/vmalloc.h> +#include <linux/pagemap.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/tcp.h> +#include <linux/ipv6.h> +#include <net/checksum.h> +#include <net/ip6_checksum.h> +#include <linux/mii.h> +#include <linux/ethtool.h> +#include <linux/if_vlan.h> +#include <linux/cpu.h> +#include <linux/smp.h> + +#include "e1000.h" + +#define DRV_VERSION "0.2.0" +char e1000e_driver_name[] = "e1000e"; +const char e1000e_driver_version[] = DRV_VERSION; + +static const struct e1000_info *e1000_info_tbl[] = { + [board_82571] = &e1000_82571_info, + [board_82572] = &e1000_82572_info, + [board_82573] = &e1000_82573_info, + [board_80003es2lan] = &e1000_es2_info, + [board_ich8lan] = &e1000_ich8_info, + [board_ich9lan] = &e1000_ich9_info, +}; + +#ifdef DEBUG +/** + * e1000_get_hw_dev_name - return device name string + * used by hardware layer to print debugging information + **/ +char *e1000e_get_hw_dev_name(struct e1000_hw *hw) +{ + return hw->adapter->netdev->name; +} +#endif + +/** + * e1000_desc_unused - calculate if we have unused descriptors + **/ +static int e1000_desc_unused(struct e1000_ring *ring) +{ + if (ring->next_to_clean > ring->next_to_use) + return ring->next_to_clean - ring->next_to_use - 1; + + return ring->count + ring->next_to_clean - ring->next_to_use - 1; +} + +/** + * e1000_receive_skb - helper function to handle rx indications + * @adapter: board private structure + * @status: descriptor status field as written by hardware + * @vlan: descriptor vlan field as written by hardware (no le/be conversion) + * @skb: pointer to sk_buff to be indicated to stack + **/ +static void e1000_receive_skb(struct e1000_adapter *adapter, + struct net_device *netdev, + struct sk_buff *skb, + u8 status, u16 vlan) +{ + skb->protocol = eth_type_trans(skb, netdev); + + if (adapter->vlgrp && (status & E1000_RXD_STAT_VP)) + vlan_hwaccel_receive_skb(skb, adapter->vlgrp, + le16_to_cpu(vlan) & + E1000_RXD_SPC_VLAN_MASK); + else + netif_receive_skb(skb); + + netdev->last_rx = jiffies; +} + +/** + * e1000_rx_checksum - Receive Checksum Offload for 82543 + * @adapter: board private structure + * @status_err: receive descriptor status and error fields + * @csum: receive descriptor csum field + * @sk_buff: socket buffer with received data + **/ +static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, + u32 csum, struct sk_buff *skb) +{ + u16 status = (u16)status_err; + u8 errors = (u8)(status_err >> 24); + skb->ip_summed = CHECKSUM_NONE; + + /* Ignore Checksum bit is set */ + if (status & E1000_RXD_STAT_IXSM) + return; + /* TCP/UDP checksum error bit is set */ + if (errors & E1000_RXD_ERR_TCPE) { + /* let the stack verify checksum errors */ + adapter->hw_csum_err++; + return; + } + + /* TCP/UDP Checksum has not been calculated */ + if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) + return; + + /* It must be a TCP or UDP packet with a valid checksum */ + if (status & E1000_RXD_STAT_TCPCS) { + /* TCP checksum is good */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + /* IP fragment with UDP payload */ + /* Hardware complements the payload checksum, so we undo it + * and then put the value in host order for further stack use. + */ + csum = ntohl(csum ^ 0xFFFF); + skb->csum = csum; + skb->ip_summed = CHECKSUM_COMPLETE; + } + adapter->hw_csum_good++; +} + +/** + * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended + * @adapter: address of board private structure + **/ +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + int cleaned_count) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_rx_desc *rx_desc; + struct e1000_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + skb = buffer_info->skb; + if (skb) { + skb_trim(skb, 0); + goto map_skb; + } + + skb = netdev_alloc_skb(netdev, bufsz); + if (!skb) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + /* Make buffer alignment 2 beyond a 16 byte boundary + * this will result in a 16 byte aligned IP header after + * the 14 byte MAC header is removed + */ + skb_reserve(skb, NET_IP_ALIGN); + + buffer_info->skb = skb; +map_skb: + buffer_info->dma = pci_map_single(pdev, skb->data, + adapter->rx_buffer_len, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(buffer_info->dma)) { + dev_err(&pdev->dev, "RX DMA map failed\n"); + adapter->rx_dma_failed++; + break; + } + + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + i++; + if (i == rx_ring->count) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + if (i-- == 0) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->tail); + } +} + +/** + * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split + * @adapter: address of board private structure + **/ +static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, + int cleaned_count) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union e1000_rx_desc_packet_split *rx_desc; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_buffer *buffer_info; + struct e1000_ps_page *ps_page; + struct sk_buff *skb; + unsigned int i, j; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + rx_desc = E1000_RX_DESC_PS(*rx_ring, i); + + for (j = 0; j < PS_PAGE_BUFFERS; j++) { + ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS) + + j]; + if (j < adapter->rx_ps_pages) { + if (!ps_page->page) { + ps_page->page = alloc_page(GFP_ATOMIC); + if (!ps_page->page) { + adapter->alloc_rx_buff_failed++; + goto no_buffers; + } + ps_page->dma = pci_map_page(pdev, + ps_page->page, + 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error( + ps_page->dma)) { + dev_err(&adapter->pdev->dev, + "RX DMA page map failed\n"); + adapter->rx_dma_failed++; + goto no_buffers; + } + } + /* + * Refresh the desc even if buffer_addrs + * didn't change because each write-back + * erases this info. + */ + rx_desc->read.buffer_addr[j+1] = + cpu_to_le64(ps_page->dma); + } else { + rx_desc->read.buffer_addr[j+1] = ~0; + } + } + + skb = netdev_alloc_skb(netdev, + adapter->rx_ps_bsize0 + NET_IP_ALIGN); + + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + + /* Make buffer alignment 2 beyond a 16 byte boundary + * this will result in a 16 byte aligned IP header after + * the 14 byte MAC header is removed + */ + skb_reserve(skb, NET_IP_ALIGN); + + buffer_info->skb = skb; + buffer_info->dma = pci_map_single(pdev, skb->data, + adapter->rx_ps_bsize0, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(buffer_info->dma)) { + dev_err(&pdev->dev, "RX DMA map failed\n"); + adapter->rx_dma_failed++; + /* cleanup skb */ + dev_kfree_skb_any(skb); + buffer_info->skb = NULL; + break; + } + + rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); + + i++; + if (i == rx_ring->count) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + +no_buffers: + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + + if (!(i--)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + /* Hardware increments by 16 bytes, but packet split + * descriptors are 32 bytes...so we increment tail + * twice as much. + */ + writel(i<<1, adapter->hw.hw_addr + rx_ring->tail); + } +} + +/** + * e1000_alloc_rx_buffers_jumbo - Replace used jumbo receive buffers + * + * @adapter: address of board private structure + * @cleaned_count: number of buffers to allocate this pass + **/ +static void e1000_alloc_rx_buffers_jumbo(struct e1000_adapter *adapter, + int cleaned_count) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_rx_desc *rx_desc; + struct e1000_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + unsigned int bufsz = 256 - + 16 /*for skb_reserve */ - + NET_IP_ALIGN; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + skb = buffer_info->skb; + if (skb) { + skb_trim(skb, 0); + goto check_page; + } + + skb = netdev_alloc_skb(netdev, bufsz); + if (!skb) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + /* Make buffer alignment 2 beyond a 16 byte boundary + * this will result in a 16 byte aligned IP header after + * the 14 byte MAC header is removed + */ + skb_reserve(skb, NET_IP_ALIGN); + + buffer_info->skb = skb; +check_page: + /* allocate a new page if necessary */ + if (!buffer_info->page) { + buffer_info->page = alloc_page(GFP_ATOMIC); + if (!buffer_info->page) { + adapter->alloc_rx_buff_failed++; + break; + } + } + + if (!buffer_info->dma) + buffer_info->dma = pci_map_page(pdev, + buffer_info->page, 0, + PAGE_SIZE, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(buffer_info->dma)) { + dev_err(&adapter->pdev->dev, "RX DMA page map failed\n"); + adapter->rx_dma_failed++; + break; + } + + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + i++; + if (i == rx_ring->count) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + if (i-- == 0) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->tail); + } +} + +/** + * e1000_clean_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = 0; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + + status = rx_desc->status; + skb = buffer_info->skb; + buffer_info->skb = NULL; + + prefetch(skb->data - NET_IP_ALIGN); + + i++; + if (i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = 1; + cleaned_count++; + pci_unmap_single(pdev, + buffer_info->dma, + adapter->rx_buffer_len, + PCI_DMA_FROMDEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + + /* !EOP means multiple descriptors were used to store a single + * packet, also make sure the frame isn't just CRC only */ + if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { + /* All receives must fit into a single buffer */ + ndev_dbg(netdev, "%s: Receive packet consumed " + "multiple buffers\n", netdev->name); + /* recycle */ + buffer_info->skb = skb; + goto next_desc; + } + + if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) { + /* recycle */ + buffer_info->skb = skb; + goto next_desc; + } + + /* adjust length to remove Ethernet CRC */ + length -= 4; + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += length; + total_rx_packets++; + + /* code added for copybreak, this should improve + * performance for small packets with large amounts + * of reassembly being done in the stack */ + if (length < copybreak) { + struct sk_buff *new_skb = + netdev_alloc_skb(netdev, length + NET_IP_ALIGN); + if (new_skb) { + skb_reserve(new_skb, NET_IP_ALIGN); + memcpy(new_skb->data - NET_IP_ALIGN, + skb->data - NET_IP_ALIGN, + length + NET_IP_ALIGN); + /* save the skb in buffer_info as good */ + buffer_info->skb = skb; + skb = new_skb; + } + /* else just continue with the old one */ + } + /* end copybreak code */ + skb_put(skb, length); + + /* Receive Checksum Offload */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= E1000_RX_BUFFER_WRITE) { + adapter->alloc_rx_buf(adapter, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = e1000_desc_unused(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + return cleaned; +} + +static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, + u16 length) +{ + bi->page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += length; +} + +static void e1000_put_txbuf(struct e1000_adapter *adapter, + struct e1000_buffer *buffer_info) +{ + if (buffer_info->dma) { + pci_unmap_page(adapter->pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_TODEVICE); + buffer_info->dma = 0; + } + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } +} + +static void e1000_print_tx_hang(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + unsigned int i = tx_ring->next_to_clean; + unsigned int eop = tx_ring->buffer_info[i].next_to_watch; + struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); + struct net_device *netdev = adapter->netdev; + + /* detected Tx unit hang */ + ndev_err(netdev, + "Detected Tx Unit Hang:\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]:\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n", + readl(adapter->hw.hw_addr + tx_ring->head), + readl(adapter->hw.hw_addr + tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->buffer_info[eop].time_stamp, + eop, + jiffies, + eop_desc->upper.fields.status); +} + +/** + * e1000_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_tx_desc *tx_desc, *eop_desc; + struct e1000_buffer *buffer_info; + unsigned int i, eop; + unsigned int count = 0; + bool cleaned = 0; + unsigned int total_tx_bytes = 0, total_tx_packets = 0; + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + + while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { + for (cleaned = 0; !cleaned; ) { + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + cleaned = (i == eop); + + if (cleaned) { + struct sk_buff *skb = buffer_info->skb; + unsigned int segs, bytecount; + segs = skb_shinfo(skb)->gso_segs ?: 1; + /* multiply data chunks by size of headers */ + bytecount = ((segs - 1) * skb_headlen(skb)) + + skb->len; + total_tx_packets += segs; + total_tx_bytes += bytecount; + } + + e1000_put_txbuf(adapter, buffer_info); + tx_desc->upper.data = 0; + + i++; + if (i == tx_ring->count) + i = 0; + } + + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); +#define E1000_TX_WEIGHT 64 + /* weight of a sort for tx, to avoid endless transmit cleanup */ + if (count++ == E1000_TX_WEIGHT) + break; + } + + tx_ring->next_to_clean = i; + +#define TX_WAKE_THRESHOLD 32 + if (cleaned && netif_carrier_ok(netdev) && + e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (netif_queue_stopped(netdev) && + !(test_bit(__E1000_DOWN, &adapter->state))) { + netif_wake_queue(netdev); + ++adapter->restart_queue; + } + } + + if (adapter->detect_tx_hung) { + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i */ + adapter->detect_tx_hung = 0; + if (tx_ring->buffer_info[eop].dma && + time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + + (adapter->tx_timeout_factor * HZ)) + && !(er32(STATUS) & + E1000_STATUS_TXOFF)) { + e1000_print_tx_hang(adapter); + netif_stop_queue(netdev); + } + } + adapter->total_tx_bytes += total_tx_bytes; + adapter->total_tx_packets += total_tx_packets; + return cleaned; +} + +/** + * e1000_clean_rx_irq_jumbo - Send received data up the network stack; legacy + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_rx_irq_jumbo(struct e1000_adapter *adapter, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = 0; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + + status = rx_desc->status; + skb = buffer_info->skb; + buffer_info->skb = NULL; + + i++; + if (i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = 1; + cleaned_count++; + pci_unmap_page(pdev, + buffer_info->dma, + PAGE_SIZE, + PCI_DMA_FROMDEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + + /* errors is only valid for DD + EOP descriptors */ + if ((status & E1000_RXD_STAT_EOP) && + (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { + /* recycle both page and skb */ + buffer_info->skb = skb; + /* an error means any chain goes out the window too */ + if (rx_ring->rx_skb_top) + dev_kfree_skb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + goto next_desc; + } + +#define rxtop rx_ring->rx_skb_top + if (!(status & E1000_RXD_STAT_EOP)) { + /* this descriptor is only the beginning (or middle) */ + if (!rxtop) { + /* this is the beginning of a chain */ + rxtop = skb; + skb_fill_page_desc(rxtop, 0, buffer_info->page, + 0, length); + } else { + /* this is the middle of a chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->page, 0, + length); + /* re-use the skb, only consumed the page */ + buffer_info->skb = skb; + } + e1000_consume_page(buffer_info, rxtop, length); + goto next_desc; + } else { + if (rxtop) { + /* end of the chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->page, 0, length); + /* re-use the current skb, we only consumed the + * page */ + buffer_info->skb = skb; + skb = rxtop; + rxtop = NULL; + e1000_consume_page(buffer_info, skb, length); + } else { + /* no chain, got EOP, this buf is the packet + * copybreak to save the put_page/alloc_page */ + if (length <= copybreak && + skb_tailroom(skb) >= length) { + u8 *vaddr; + vaddr = kmap_atomic(buffer_info->page, + KM_SKB_DATA_SOFTIRQ); + memcpy(skb_tail_pointer(skb), + vaddr, length); + kunmap_atomic(vaddr, + KM_SKB_DATA_SOFTIRQ); + /* re-use the page, so don't erase + * buffer_info->page */ + skb_put(skb, length); + } else { + skb_fill_page_desc(skb, 0, + buffer_info->page, 0, + length); + e1000_consume_page(buffer_info, skb, + length); + } + } + } + + /* Receive Checksum Offload XXX recompute due to CRC strip? */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + pskb_trim(skb, skb->len - 4); + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + /* eth type trans needs skb->data to point to something */ + if (!pskb_may_pull(skb, ETH_HLEN)) { + ndev_err(netdev, "__pskb_pull_tail failed.\n"); + dev_kfree_skb(skb); + goto next_desc; + } + + e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= E1000_RX_BUFFER_WRITE) { + adapter->alloc_rx_buf(adapter, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = e1000_desc_unused(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + return cleaned; +} + +/** + * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, + int *work_done, int work_to_do) +{ + union e1000_rx_desc_packet_split *rx_desc, *next_rxd; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_buffer *buffer_info, *next_buffer; + struct e1000_ps_page *ps_page; + struct sk_buff *skb; + unsigned int i, j; + u32 length, staterr; + int cleaned_count = 0; + bool cleaned = 0; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC_PS(*rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.middle.status_error); + buffer_info = &rx_ring->buffer_info[i]; + + while (staterr & E1000_RXD_STAT_DD) { + if (*work_done >= work_to_do) + break; + (*work_done)++; + skb = buffer_info->skb; + + /* in the packet split case this is header only */ + prefetch(skb->data - NET_IP_ALIGN); + + i++; + if (i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC_PS(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = 1; + cleaned_count++; + pci_unmap_single(pdev, buffer_info->dma, + adapter->rx_ps_bsize0, + PCI_DMA_FROMDEVICE); + buffer_info->dma = 0; + + if (!(staterr & E1000_RXD_STAT_EOP)) { + ndev_dbg(netdev, "%s: Packet Split buffers didn't pick " + "up the full packet\n", netdev->name); + dev_kfree_skb_irq(skb); + goto next_desc; + } + + if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { + dev_kfree_skb_irq(skb); + goto next_desc; + } + + length = le16_to_cpu(rx_desc->wb.middle.length0); + + if (!length) { + ndev_dbg(netdev, "%s: Last part of the packet spanning" + " multiple descriptors\n", netdev->name); + dev_kfree_skb_irq(skb); + goto next_desc; + } + + /* Good Receive */ + skb_put(skb, length); + + { + /* this looks ugly, but it seems compiler issues make it + more efficient than reusing j */ + int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); + + /* page alloc/put takes too long and effects small packet + * throughput, so unsplit small packets and save the alloc/put*/ + if (l1 && (l1 <= copybreak) && + ((length + l1) <= adapter->rx_ps_bsize0)) { + u8 *vaddr; + + ps_page = &rx_ring->ps_pages[i * PS_PAGE_BUFFERS]; + + /* there is no documentation about how to call + * kmap_atomic, so we can't hold the mapping + * very long */ + pci_dma_sync_single_for_cpu(pdev, ps_page->dma, + PAGE_SIZE, PCI_DMA_FROMDEVICE); + vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ); + memcpy(skb_tail_pointer(skb), vaddr, l1); + kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); + pci_dma_sync_single_for_device(pdev, ps_page->dma, + PAGE_SIZE, PCI_DMA_FROMDEVICE); + /* remove the CRC */ + l1 -= 4; + skb_put(skb, l1); + goto copydone; + } /* if */ + } + + for (j = 0; j < PS_PAGE_BUFFERS; j++) { + length = le16_to_cpu(rx_desc->wb.upper.length[j]); + if (!length) + break; + + ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS) + j]; + pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + ps_page->dma = 0; + skb_fill_page_desc(skb, j, ps_page->page, 0, length); + ps_page->page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += length; + } + + /* strip the ethernet crc, problem is we're using pages now so + * this whole operation can get a little cpu intensive */ + pskb_trim(skb, skb->len - 4); + +copydone: + total_rx_bytes += skb->len; + total_rx_packets++; + + e1000_rx_checksum(adapter, staterr, le16_to_cpu( + rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); + + if (rx_desc->wb.upper.header_status & + cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) + adapter->rx_hdr_split++; + + e1000_receive_skb(adapter, netdev, skb, + staterr, rx_desc->wb.middle.vlan); + +next_desc: + rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); + buffer_info->skb = NULL; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= E1000_RX_BUFFER_WRITE) { + adapter->alloc_rx_buf(adapter, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + + staterr = le32_to_cpu(rx_desc->wb.middle.status_error); + } + rx_ring->next_to_clean = i; + + cleaned_count = e1000_desc_unused(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + return cleaned; +} + +/** + * e1000_clean_rx_ring - Free Rx Buffers per Queue + * @adapter: board private structure + **/ +static void e1000_clean_rx_ring(struct e1000_adapter *adapter) +{ + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_buffer *buffer_info; + struct e1000_ps_page *ps_page; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i, j; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (buffer_info->dma) { + if (adapter->clean_rx == e1000_clean_rx_irq) + pci_unmap_single(pdev, buffer_info->dma, + adapter->rx_buffer_len, + PCI_DMA_FROMDEVICE); + else if (adapter->clean_rx == e1000_clean_rx_irq_jumbo) + pci_unmap_page(pdev, buffer_info->dma, + PAGE_SIZE, PCI_DMA_FROMDEVICE); + else if (adapter->clean_rx == e1000_clean_rx_irq_ps) + pci_unmap_single(pdev, buffer_info->dma, + adapter->rx_ps_bsize0, + PCI_DMA_FROMDEVICE); + buffer_info->dma = 0; + } + + if (buffer_info->page) { + put_page(buffer_info->page); + buffer_info->page = NULL; + } + + if (buffer_info->skb) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; + } + + for (j = 0; j < PS_PAGE_BUFFERS; j++) { + ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS) + + j]; + if (!ps_page->page) + break; + pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + ps_page->dma = 0; + put_page(ps_page->page); + ps_page->page = NULL; + } + } + + /* there also may be some cached data from a chained receive */ + if (rx_ring->rx_skb_top) { + dev_kfree_skb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + } + + size = sizeof(struct e1000_buffer) * rx_ring->count; + memset(rx_ring->buffer_info, 0, size); + size = sizeof(struct e1000_ps_page) + * (rx_ring->count * PS_PAGE_BUFFERS); + memset(rx_ring->ps_pages, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + writel(0, adapter->hw.hw_addr + rx_ring->head); + writel(0, adapter->hw.hw_addr + rx_ring->tail); +} + +/** + * e1000_intr_msi - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr_msi(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + /* read ICR disables interrupts using IAM, so keep up with our + * enable/disable accounting */ + atomic_inc(&adapter->irq_sem); + + if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { + hw->mac.get_link_status = 1; + /* ICH8 workaround-- Call gig speed drop workaround on cable + * disconnect (LSC) before accessing any PHY registers */ + if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && + (!(er32(STATUS) & E1000_STATUS_LU))) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* 80003ES2LAN workaround-- For packet buffer work-around on + * link down event; disable receives here in the ISR and reset + * adapter in watchdog */ + if (netif_carrier_ok(netdev) && + adapter->flags & FLAG_RX_NEEDS_RESTART) { + /* disable receives */ + u32 rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + } + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (netif_rx_schedule_prep(netdev, &adapter->napi)) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __netif_rx_schedule(netdev, &adapter->napi); + } else { + atomic_dec(&adapter->irq_sem); + } + + return IRQ_HANDLED; +} + +/** + * e1000_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + u32 rctl, icr = er32(ICR); + if (!icr) + return IRQ_NONE; /* Not our interrupt */ + + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt */ + if (!(icr & E1000_ICR_INT_ASSERTED)) + return IRQ_NONE; + + /* Interrupt Auto-Mask...upon reading ICR, + * interrupts are masked. No need for the + * IMC write, but it does mean we should + * account for it ASAP. */ + atomic_inc(&adapter->irq_sem); + + if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { + hw->mac.get_link_status = 1; + /* ICH8 workaround-- Call gig speed drop workaround on cable + * disconnect (LSC) before accessing any PHY registers */ + if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && + (!(er32(STATUS) & E1000_STATUS_LU))) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* 80003ES2LAN workaround-- + * For packet buffer work-around on link down event; + * disable receives here in the ISR and + * reset adapter in watchdog + */ + if (netif_carrier_ok(netdev) && + (adapter->flags & FLAG_RX_NEEDS_RESTART)) { + /* disable receives */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + } + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (netif_rx_schedule_prep(netdev, &adapter->napi)) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __netif_rx_schedule(netdev, &adapter->napi); + } else { + atomic_dec(&adapter->irq_sem); + } + + return IRQ_HANDLED; +} + +static int e1000_request_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + void (*handler) = &e1000_intr; + int irq_flags = IRQF_SHARED; + int err; + + err = pci_enable_msi(adapter->pdev); + if (err) { + ndev_warn(netdev, + "Unable to allocate MSI interrupt Error: %d\n", err); + } else { + adapter->flags |= FLAG_MSI_ENABLED; + handler = &e1000_intr_msi; + irq_flags = 0; + } + + err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, + netdev); + if (err) { + if (adapter->flags & FLAG_MSI_ENABLED) + pci_disable_msi(adapter->pdev); + ndev_err(netdev, + "Unable to allocate interrupt Error: %d\n", err); + } + + return err; +} + +static void e1000_free_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + free_irq(adapter->pdev->irq, netdev); + if (adapter->flags & FLAG_MSI_ENABLED) { + pci_disable_msi(adapter->pdev); + adapter->flags &= ~FLAG_MSI_ENABLED; + } +} + +/** + * e1000_irq_disable - Mask off interrupt generation on the NIC + **/ +static void e1000_irq_disable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + atomic_inc(&adapter->irq_sem); + ew32(IMC, ~0); + e1e_flush(); + synchronize_irq(adapter->pdev->irq); +} + +/** + * e1000_irq_enable - Enable default interrupt generation settings + **/ +static void e1000_irq_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (atomic_dec_and_test(&adapter->irq_sem)) { + ew32(IMS, IMS_ENABLE_MASK); + e1e_flush(); + } +} + +/** + * e1000_get_hw_control - get control of the h/w from f/w + * @adapter: address of board private structure + * + * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. For AMT version (only with 82573) + * of the f/w this means that the network i/f is open. + **/ +static void e1000_get_hw_control(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + u32 swsm; + + /* Let firmware know the driver has taken over */ + if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { + swsm = er32(SWSM); + ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); + } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { + ctrl_ext = er32(CTRL_EXT); + ew32(CTRL_EXT, + ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); + } +} + +/** + * e1000_release_hw_control - release control of the h/w to f/w + * @adapter: address of board private structure + * + * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. For AMT version (only with 82573) i + * of the f/w this means that the network i/f is closed. + * + **/ +static void e1000_release_hw_control(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + u32 swsm; + + /* Let firmware taken over control of h/w */ + if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { + swsm = er32(SWSM); + ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); + } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { + ctrl_ext = er32(CTRL_EXT); + ew32(CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); + } +} + +static void e1000_release_manageability(struct e1000_adapter *adapter) +{ + if (adapter->flags & FLAG_MNG_PT_ENABLED) { + struct e1000_hw *hw = &adapter->hw; + + u32 manc = er32(MANC); + + /* re-enable hardware interception of ARP */ + manc |= E1000_MANC_ARP_EN; + manc &= ~E1000_MANC_EN_MNG2HOST; + + /* don't explicitly have to mess with MANC2H since + * MANC has an enable disable that gates MANC2H */ + ew32(MANC, manc); + } +} + +/** + * @e1000_alloc_ring - allocate memory for a ring structure + **/ +static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, + struct e1000_ring *ring) +{ + struct pci_dev *pdev = adapter->pdev; + + ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, + GFP_KERNEL); + if (!ring->desc) + return -ENOMEM; + + return 0; +} + +/** + * e1000e_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int e1000e_setup_tx_resources(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + int err = -ENOMEM, size; + + size = sizeof(struct e1000_buffer) * tx_ring->count; + tx_ring->buffer_info = vmalloc(size); + if (!tx_ring->buffer_info) + goto err; + memset(tx_ring->buffer_info, 0, size); + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + err = e1000_alloc_ring_dma(adapter, tx_ring); + if (err) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + spin_lock_init(&adapter->tx_queue_lock); + + return 0; +err: + vfree(tx_ring->buffer_info); + ndev_err(adapter->netdev, + "Unable to allocate memory for the transmit descriptor ring\n"); + return err; +} + +/** + * e1000e_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * + * Returns 0 on success, negative on failure + **/ +int e1000e_setup_rx_resources(struct e1000_adapter *adapter) +{ + struct e1000_ring *rx_ring = adapter->rx_ring; + int size, desc_len, err = -ENOMEM; + + size = sizeof(struct e1000_buffer) * rx_ring->count; + rx_ring->buffer_info = vmalloc(size); + if (!rx_ring->buffer_info) + goto err; + memset(rx_ring->buffer_info, 0, size); + + rx_ring->ps_pages = kcalloc(rx_ring->count * PS_PAGE_BUFFERS, + sizeof(struct e1000_ps_page), + GFP_KERNEL); + if (!rx_ring->ps_pages) + goto err; + + desc_len = sizeof(union e1000_rx_desc_packet_split); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * desc_len; + rx_ring->size = ALIGN(rx_ring->size, 4096); + + err = e1000_alloc_ring_dma(adapter, rx_ring); + if (err) + goto err; + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->rx_skb_top = NULL; + + return 0; +err: + vfree(rx_ring->buffer_info); + kfree(rx_ring->ps_pages); + ndev_err(adapter->netdev, + "Unable to allocate memory for the transmit descriptor ring\n"); + return err; +} + +/** + * e1000_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + **/ +static void e1000_clean_tx_ring(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_buffer *buffer_info; + unsigned long size; + unsigned int i; + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + e1000_put_txbuf(adapter, buffer_info); + } + + size = sizeof(struct e1000_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + writel(0, adapter->hw.hw_addr + tx_ring->head); + writel(0, adapter->hw.hw_addr + tx_ring->tail); +} + +/** + * e1000e_free_tx_resources - Free Tx Resources per Queue + * @adapter: board private structure + * + * Free all transmit software resources + **/ +void e1000e_free_tx_resources(struct e1000_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *tx_ring = adapter->tx_ring; + + e1000_clean_tx_ring(adapter); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + tx_ring->desc = NULL; +} + +/** + * e1000e_free_rx_resources - Free Rx Resources + * @adapter: board private structure + * + * Free all receive software resources + **/ + +void e1000e_free_rx_resources(struct e1000_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *rx_ring = adapter->rx_ring; + + e1000_clean_rx_ring(adapter); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + kfree(rx_ring->ps_pages); + rx_ring->ps_pages = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + rx_ring->desc = NULL; +} + +/** + * e1000_update_itr - update the dynamic ITR value based on statistics + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * this functionality is controlled by the InterruptThrottleRate module + * parameter (see e1000_param.c) + * @adapter: pointer to adapter + * @itr_setting: current adapter->itr + * @packets: the number of packets during this measurement interval + * @bytes: the number of bytes during this measurement interval + **/ +static unsigned int e1000_update_itr(struct e1000_adapter *adapter, + u16 itr_setting, int packets, + int bytes) +{ + unsigned int retval = itr_setting; + + if (packets == 0) + goto update_itr_done; + + switch (itr_setting) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) { + retval = low_latency; + } + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes/packets > 8000) { + retval = bulk_latency; + } else if ((packets < 10) || ((bytes/packets) > 1200)) { + retval = bulk_latency; + } else if ((packets > 35)) { + retval = lowest_latency; + } + } else if (bytes/packets > 2000) { + retval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + retval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) { + retval = low_latency; + } + } else if (bytes < 6000) { + retval = low_latency; + } + break; + } + +update_itr_done: + return retval; +} + +static void e1000_set_itr(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 current_itr; + u32 new_itr = adapter->itr; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + if (adapter->link_speed != SPEED_1000) { + current_itr = 0; + new_itr = 4000; + goto set_itr_now; + } + + adapter->tx_itr = e1000_update_itr(adapter, + adapter->tx_itr, + adapter->total_tx_packets, + adapter->total_tx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) + adapter->tx_itr = low_latency; + + adapter->rx_itr = e1000_update_itr(adapter, + adapter->rx_itr, + adapter->total_rx_packets, + adapter->total_rx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) + adapter->rx_itr = low_latency; + + current_itr = max(adapter->rx_itr, adapter->tx_itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = 70000; + break; + case low_latency: + new_itr = 20000; /* aka hwitr = ~200 */ + break; + case bulk_latency: + new_itr = 4000; + break; + default: + break; + } + +set_itr_now: + if (new_itr != adapter->itr) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing */ + new_itr = new_itr > adapter->itr ? + min(adapter->itr + (new_itr >> 2), new_itr) : + new_itr; + adapter->itr = new_itr; + ew32(ITR, 1000000000 / (new_itr * 256)); + } +} + +/** + * e1000_clean - NAPI Rx polling callback + * @adapter: board private structure + **/ +static int e1000_clean(struct napi_struct *napi, int budget) +{ + struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); + struct net_device *poll_dev = adapter->netdev; + int tx_cleaned = 0, work_done = 0; + + /* Must NOT use netdev_priv macro here. */ + adapter = poll_dev->priv; + + /* Keep link state information with original netdev */ + if (!netif_carrier_ok(poll_dev)) + goto quit_polling; + + /* e1000_clean is called per-cpu. This lock protects + * tx_ring from being cleaned by multiple cpus + * simultaneously. A failure obtaining the lock means + * tx_ring is currently being cleaned anyway. */ + if (spin_trylock(&adapter->tx_queue_lock)) { + tx_cleaned = e1000_clean_tx_irq(adapter); + spin_unlock(&adapter->tx_queue_lock); + } + + adapter->clean_rx(adapter, &work_done, budget); + + /* If no Tx and not enough Rx work done, exit the polling mode */ + if ((!tx_cleaned && (work_done < budget)) || + !netif_running(poll_dev)) { +quit_polling: + if (adapter->itr_setting & 3) + e1000_set_itr(adapter); + netif_rx_complete(poll_dev, napi); + e1000_irq_enable(adapter); + } + + return work_done; +} + +static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + /* don't update vlan cookie if already programmed */ + if ((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && + (vid == adapter->mng_vlan_id)) + return; + /* add VID to filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); + vfta |= (1 << (vid & 0x1F)); + e1000e_write_vfta(hw, index, vfta); +} + +static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + e1000_irq_disable(adapter); + vlan_group_set_device(adapter->vlgrp, vid, NULL); + e1000_irq_enable(adapter); + + if ((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && + (vid == adapter->mng_vlan_id)) { + /* release control to f/w */ + e1000_release_hw_control(adapter); + return; + } + + /* remove VID from filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); + vfta &= ~(1 << (vid & 0x1F)); + e1000e_write_vfta(hw, index, vfta); +} + +static void e1000_update_mng_vlan(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u16 vid = adapter->hw.mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + + if (!adapter->vlgrp) + return; + + if (!vlan_group_get_device(adapter->vlgrp, vid)) { + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + if (adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { + e1000_vlan_rx_add_vid(netdev, vid); + adapter->mng_vlan_id = vid; + } + + if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && + (vid != old_vid) && + !vlan_group_get_device(adapter->vlgrp, old_vid)) + e1000_vlan_rx_kill_vid(netdev, old_vid); + } else { + adapter->mng_vlan_id = vid; + } +} + + +static void e1000_vlan_rx_register(struct net_device *netdev, + struct vlan_group *grp) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, rctl; + + e1000_irq_disable(adapter); + adapter->vlgrp = grp; + + if (grp) { + /* enable VLAN tag insert/strip */ + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_VME; + ew32(CTRL, ctrl); + + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + /* enable VLAN receive filtering */ + rctl = er32(RCTL); + rctl |= E1000_RCTL_VFE; + rctl &= ~E1000_RCTL_CFIEN; + ew32(RCTL, rctl); + e1000_update_mng_vlan(adapter); + } + } else { + /* disable VLAN tag insert/strip */ + ctrl = er32(CTRL); + ctrl &= ~E1000_CTRL_VME; + ew32(CTRL, ctrl); + + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + /* disable VLAN filtering */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_VFE; + ew32(RCTL, rctl); + if (adapter->mng_vlan_id != + (u16)E1000_MNG_VLAN_NONE) { + e1000_vlan_rx_kill_vid(netdev, + adapter->mng_vlan_id); + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + } + } + } + + e1000_irq_enable(adapter); +} + +static void e1000_restore_vlan(struct e1000_adapter *adapter) +{ + u16 vid; + + e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); + + if (!adapter->vlgrp) + return; + + for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { + if (!vlan_group_get_device(adapter->vlgrp, vid)) + continue; + e1000_vlan_rx_add_vid(adapter->netdev, vid); + } +} + +static void e1000_init_manageability(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 manc, manc2h; + + if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) + return; + + manc = er32(MANC); + + /* disable hardware interception of ARP */ + manc &= ~(E1000_MANC_ARP_EN); + + /* enable receiving management packets to the host. this will probably + * generate destination unreachable messages from the host OS, but + * the packets will be handled on SMBUS */ + manc |= E1000_MANC_EN_MNG2HOST; + manc2h = er32(MANC2H); +#define E1000_MNG2HOST_PORT_623 (1 << 5) +#define E1000_MNG2HOST_PORT_664 (1 << 6) + manc2h |= E1000_MNG2HOST_PORT_623; + manc2h |= E1000_MNG2HOST_PORT_664; + ew32(MANC2H, manc2h); + ew32(MANC, manc); +} + +/** + * e1000_configure_tx - Configure 8254x Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void e1000_configure_tx(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *tx_ring = adapter->tx_ring; + u64 tdba; + u32 tdlen, tctl, tipg, tarc; + u32 ipgr1, ipgr2; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + tdba = tx_ring->dma; + tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); + ew32(TDBAL, (tdba & DMA_32BIT_MASK)); + ew32(TDBAH, (tdba >> 32)); + ew32(TDLEN, tdlen); + ew32(TDH, 0); + ew32(TDT, 0); + tx_ring->head = E1000_TDH; + tx_ring->tail = E1000_TDT; + + /* Set the default values for the Tx Inter Packet Gap timer */ + tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */ + ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */ + ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */ + + if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN) + ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */ + + tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; + tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; + ew32(TIPG, tipg); + + /* Set the Tx Interrupt Delay register */ + ew32(TIDV, adapter->tx_int_delay); + /* tx irq moderation */ + ew32(TADV, adapter->tx_abs_int_delay); + + /* Program the Transmit Control Register */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + + if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { + tarc = er32(TARC0); + /* set the speed mode bit, we'll clear it if we're not at + * gigabit link later */ +#define SPEED_MODE_BIT (1 << 21) + tarc |= SPEED_MODE_BIT; + ew32(TARC0, tarc); + } + + /* errata: program both queues to unweighted RR */ + if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { + tarc = er32(TARC0); + tarc |= 1; + ew32(TARC0, tarc); + tarc = er32(TARC1); + tarc |= 1; + ew32(TARC1, tarc); + } + + e1000e_config_collision_dist(hw); + + /* Setup Transmit Descriptor Settings for eop descriptor */ + adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; + + /* only set IDE if we are delaying interrupts using the timers */ + if (adapter->tx_int_delay) + adapter->txd_cmd |= E1000_TXD_CMD_IDE; + + /* enable Report Status bit */ + adapter->txd_cmd |= E1000_TXD_CMD_RS; + + ew32(TCTL, tctl); + + adapter->tx_queue_len = adapter->netdev->tx_queue_len; +} + +/** + * e1000_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ + (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) +static void e1000_setup_rctl(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl, rfctl; + u32 psrctl = 0; + u32 pages = 0; + + /* Program MC offset vector base */ + rctl = er32(RCTL); + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + + /* Do not Store bad packets */ + rctl &= ~E1000_RCTL_SBP; + + /* Enable Long Packet receive */ + if (adapter->netdev->mtu <= ETH_DATA_LEN) + rctl &= ~E1000_RCTL_LPE; + else + rctl |= E1000_RCTL_LPE; + + /* Setup buffer sizes */ + rctl &= ~E1000_RCTL_SZ_4096; + rctl |= E1000_RCTL_BSEX; + switch (adapter->rx_buffer_len) { + case 256: + rctl |= E1000_RCTL_SZ_256; + rctl &= ~E1000_RCTL_BSEX; + break; + case 512: + rctl |= E1000_RCTL_SZ_512; + rctl &= ~E1000_RCTL_BSEX; + break; + case 1024: + rctl |= E1000_RCTL_SZ_1024; + rctl &= ~E1000_RCTL_BSEX; + break; + case 2048: + default: + rctl |= E1000_RCTL_SZ_2048; + rctl &= ~E1000_RCTL_BSEX; + break; + case 4096: + rctl |= E1000_RCTL_SZ_4096; + break; + case 8192: + rctl |= E1000_RCTL_SZ_8192; + break; + case 16384: + rctl |= E1000_RCTL_SZ_16384; + break; + } + + /* + * 82571 and greater support packet-split where the protocol + * header is placed in skb->data and the packet data is + * placed in pages hanging off of skb_shinfo(skb)->nr_frags. + * In the case of a non-split, skb->data is linearly filled, + * followed by the page buffers. Therefore, skb->data is + * sized to hold the largest protocol header. + * + * allocations using alloc_page take too long for regular MTU + * so only enable packet split for jumbo frames + * + * Using pages when the page size is greater than 16k wastes + * a lot of memory, since we allocate 3 pages at all times + * per packet. + */ + adapter->rx_ps_pages = 0; + pages = PAGE_USE_COUNT(adapter->netdev->mtu); + if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) + adapter->rx_ps_pages = pages; + + if (adapter->rx_ps_pages) { + /* Configure extra packet-split registers */ + rfctl = er32(RFCTL); + rfctl |= E1000_RFCTL_EXTEN; + /* disable packet split support for IPv6 extension headers, + * because some malformed IPv6 headers can hang the RX */ + rfctl |= (E1000_RFCTL_IPV6_EX_DIS | + E1000_RFCTL_NEW_IPV6_EXT_DIS); + + ew32(RFCTL, rfctl); + + /* disable the stripping of CRC because it breaks + * BMC firmware connected over SMBUS */ + rctl |= E1000_RCTL_DTYP_PS /* | E1000_RCTL_SECRC */; + + psrctl |= adapter->rx_ps_bsize0 >> + E1000_PSRCTL_BSIZE0_SHIFT; + + switch (adapter->rx_ps_pages) { + case 3: + psrctl |= PAGE_SIZE << + E1000_PSRCTL_BSIZE3_SHIFT; + case 2: + psrctl |= PAGE_SIZE << + E1000_PSRCTL_BSIZE2_SHIFT; + case 1: + psrctl |= PAGE_SIZE >> + E1000_PSRCTL_BSIZE1_SHIFT; + break; + } + + ew32(PSRCTL, psrctl); + } + + ew32(RCTL, rctl); +} + +/** + * e1000_configure_rx - Configure Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void e1000_configure_rx(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *rx_ring = adapter->rx_ring; + u64 rdba; + u32 rdlen, rctl, rxcsum, ctrl_ext; + + if (adapter->rx_ps_pages) { + /* this is a 32 byte descriptor */ + rdlen = rx_ring->count * + sizeof(union e1000_rx_desc_packet_split); + adapter->clean_rx = e1000_clean_rx_irq_ps; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; + } else if (adapter->netdev->mtu > ETH_FRAME_LEN + VLAN_HLEN + 4) { + rdlen = rx_ring->count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_rx_irq_jumbo; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers_jumbo; + } else { + rdlen = rx_ring->count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers; + } + + /* disable receives while setting up the descriptors */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + e1e_flush(); + msleep(10); + + /* set the Receive Delay Timer Register */ + ew32(RDTR, adapter->rx_int_delay); + + /* irq moderation */ + ew32(RADV, adapter->rx_abs_int_delay); + if (adapter->itr_setting != 0) + ew32(ITR, + 1000000000 / (adapter->itr * 256)); + + ctrl_ext = er32(CTRL_EXT); + /* Reset delay timers after every interrupt */ + ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR; + /* Auto-Mask interrupts upon ICR access */ + ctrl_ext |= E1000_CTRL_EXT_IAME; + ew32(IAM, 0xffffffff); + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring */ + rdba = rx_ring->dma; + ew32(RDBAL, (rdba & DMA_32BIT_MASK)); + ew32(RDBAH, (rdba >> 32)); + ew32(RDLEN, rdlen); + ew32(RDH, 0); + ew32(RDT, 0); + rx_ring->head = E1000_RDH; + rx_ring->tail = E1000_RDT; + + /* Enable Receive Checksum Offload for TCP and UDP */ + rxcsum = er32(RXCSUM); + if (adapter->flags & FLAG_RX_CSUM_ENABLED) { + rxcsum |= E1000_RXCSUM_TUOFL; + + /* IPv4 payload checksum for UDP fragments must be + * used in conjunction with packet-split. */ + if (adapter->rx_ps_pages) + rxcsum |= E1000_RXCSUM_IPPCSE; + } else { + rxcsum &= ~E1000_RXCSUM_TUOFL; + /* no need to clear IPPCSE as it defaults to 0 */ + } + ew32(RXCSUM, rxcsum); + + /* Enable early receives on supported devices, only takes effect when + * packet size is equal or larger than the specified value (in 8 byte + * units), e.g. using jumbo frames when setting to E1000_ERT_2048 */ + if ((adapter->flags & FLAG_HAS_ERT) && + (adapter->netdev->mtu > ETH_DATA_LEN)) + ew32(ERT, E1000_ERT_2048); + + /* Enable Receives */ + ew32(RCTL, rctl); +} + +/** + * e1000_mc_addr_list_update - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * @rar_used_count: the first RAR register free to program + * @rar_count: total number of supported Receive Address Registers + * + * Updates the Receive Address Registers and Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + * The parameter rar_count will usually be hw->mac.rar_entry_count + * unless there are workarounds that change this. Currently no func pointer + * exists and all implementations are handled in the generic version of this + * function. + **/ +static void e1000_mc_addr_list_update(struct e1000_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, u32 rar_used_count, + u32 rar_count) +{ + hw->mac.ops.mc_addr_list_update(hw, mc_addr_list, mc_addr_count, + rar_used_count, rar_count); +} + +/** + * e1000_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void e1000_set_multi(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; + struct dev_mc_list *mc_ptr; + u8 *mta_list; + u32 rctl; + int i; + + /* Check for Promiscuous and All Multicast modes */ + + rctl = er32(RCTL); + + if (netdev->flags & IFF_PROMISC) { + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + } else if (netdev->flags & IFF_ALLMULTI) { + rctl |= E1000_RCTL_MPE; + rctl &= ~E1000_RCTL_UPE; + } else { + rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); + } + + ew32(RCTL, rctl); + + if (netdev->mc_count) { + mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC); + if (!mta_list) + return; + + /* prepare a packed array of only addresses. */ + mc_ptr = netdev->mc_list; + + for (i = 0; i < netdev->mc_count; i++) { + if (!mc_ptr) + break; + memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, + ETH_ALEN); + mc_ptr = mc_ptr->next; + } + + e1000_mc_addr_list_update(hw, mta_list, i, 1, + mac->rar_entry_count); + kfree(mta_list); + } else { + /* + * if we're called from probe, we might not have + * anything to do here, so clear out the list + */ + e1000_mc_addr_list_update(hw, NULL, 0, 1, + mac->rar_entry_count); + } +} + +/** + * e1000_configure - configure the hardware for RX and TX + * @adapter: private board structure + **/ +static void e1000_configure(struct e1000_adapter *adapter) +{ + e1000_set_multi(adapter->netdev); + + e1000_restore_vlan(adapter); + e1000_init_manageability(adapter); + + e1000_configure_tx(adapter); + e1000_setup_rctl(adapter); + e1000_configure_rx(adapter); + adapter->alloc_rx_buf(adapter, + e1000_desc_unused(adapter->rx_ring)); +} + +/** + * e1000e_power_up_phy - restore link in case the phy was powered down + * @adapter: address of board private structure + * + * The phy may be powered down to save power and turn off link when the + * driver is unloaded and wake on lan is not enabled (among others) + * *** this routine MUST be followed by a call to e1000e_reset *** + **/ +void e1000e_power_up_phy(struct e1000_adapter *adapter) +{ + u16 mii_reg = 0; + + /* Just clear the power down bit to wake the phy back up */ + if (adapter->hw.media_type == e1000_media_type_copper) { + /* according to the manual, the phy will retain its + * settings across a power-down/up cycle */ + e1e_rphy(&adapter->hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + e1e_wphy(&adapter->hw, PHY_CONTROL, mii_reg); + } + + adapter->hw.mac.ops.setup_link(&adapter->hw); +} + +/** + * e1000_power_down_phy - Power down the PHY + * + * Power down the PHY so no link is implied when interface is down + * The PHY cannot be powered down is management or WoL is active + */ +static void e1000_power_down_phy(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 mii_reg; + + /* WoL is enabled */ + if (!adapter->wol) + return; + + /* non-copper PHY? */ + if (adapter->hw.media_type != e1000_media_type_copper) + return; + + /* reset is blocked because of a SoL/IDER session */ + if (e1000e_check_mng_mode(hw) || + e1000_check_reset_block(hw)) + return; + + /* managebility (AMT) is enabled */ + if (er32(MANC) & E1000_MANC_SMBUS_EN) + return; + + /* power down the PHY */ + e1e_rphy(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + e1e_wphy(hw, PHY_CONTROL, mii_reg); + mdelay(1); +} + +/** + * e1000e_reset - bring the hardware into a known good state + * + * This function boots the hardware and enables some settings that + * require a configuration cycle of the hardware - those cannot be + * set/changed during runtime. After reset the device needs to be + * properly configured for rx, tx etc. + */ +void e1000e_reset(struct e1000_adapter *adapter) +{ + struct e1000_mac_info *mac = &adapter->hw.mac; + struct e1000_hw *hw = &adapter->hw; + u32 tx_space, min_tx_space, min_rx_space; + u16 hwm; + + if (mac->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN ) { + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. */ + adapter->pba = er32(PBA); + /* upper 16 bits has Tx packet buffer allocation size in KB */ + tx_space = adapter->pba >> 16; + /* lower 16 bits has Rx packet buffer allocation size in KB */ + adapter->pba &= 0xffff; + /* the tx fifo also stores 16 bytes of information about the tx + * but don't include ethernet FCS because hardware appends it */ + min_tx_space = (mac->max_frame_size + + sizeof(struct e1000_tx_desc) - + ETH_FCS_LEN) * 2; + min_tx_space = ALIGN(min_tx_space, 1024); + min_tx_space >>= 10; + /* software strips receive CRC, so leave room for it */ + min_rx_space = mac->max_frame_size; + min_rx_space = ALIGN(min_rx_space, 1024); + min_rx_space >>= 10; + + /* If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation */ + if (tx_space < min_tx_space && + ((min_tx_space - tx_space) < adapter->pba)) { + adapter->pba -= - (min_tx_space - tx_space); + + /* if short on rx space, rx wins and must trump tx + * adjustment or use Early Receive if available */ + if ((adapter->pba < min_rx_space) && + (!(adapter->flags & FLAG_HAS_ERT))) + /* ERT enabled in e1000_configure_rx */ + adapter->pba = min_rx_space; + } + } + + ew32(PBA, adapter->pba); + + /* flow control settings */ + /* The high water mark must be low enough to fit one full frame + * (or the size used for early receive) above it in the Rx FIFO. + * Set it to the lower of: + * - 90% of the Rx FIFO size, and + * - the full Rx FIFO size minus the early receive size (for parts + * with ERT support assuming ERT set to E1000_ERT_2048), or + * - the full Rx FIFO size minus one full frame */ + if (adapter->flags & FLAG_HAS_ERT) + hwm = min(((adapter->pba << 10) * 9 / 10), + ((adapter->pba << 10) - (E1000_ERT_2048 << 3))); + else + hwm = min(((adapter->pba << 10) * 9 / 10), + ((adapter->pba << 10) - mac->max_frame_size)); + + mac->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ + mac->fc_low_water = mac->fc_high_water - 8; + + if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) + mac->fc_pause_time = 0xFFFF; + else + mac->fc_pause_time = E1000_FC_PAUSE_TIME; + mac->fc = mac->original_fc; + + /* Allow time for pending master requests to run */ + mac->ops.reset_hw(hw); + ew32(WUC, 0); + + if (mac->ops.init_hw(hw)) + ndev_err(adapter->netdev, "Hardware Error\n"); + + e1000_update_mng_vlan(adapter); + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + ew32(VET, ETH_P_8021Q); + + e1000e_reset_adaptive(hw); + e1000_get_phy_info(hw); + + if (!(adapter->flags & FLAG_SMART_POWER_DOWN)) { + u16 phy_data = 0; + /* speed up time to link by disabling smart power down, ignore + * the return value of this function because there is nothing + * different we would do if it failed */ + e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); + phy_data &= ~IGP02E1000_PM_SPD; + e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); + } + + e1000_release_manageability(adapter); +} + +int e1000e_up(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* hardware has been reset, we need to reload some things */ + e1000_configure(adapter); + + clear_bit(__E1000_DOWN, &adapter->state); + + napi_enable(&adapter->napi); + e1000_irq_enable(adapter); + + /* fire a link change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + return 0; +} + +void e1000e_down(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 tctl, rctl; + + /* signal that we're down so the interrupt handler does not + * reschedule our watchdog timer */ + set_bit(__E1000_DOWN, &adapter->state); + + /* disable receives in the hardware */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + + netif_stop_queue(netdev); + + /* disable transmits in the hardware */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); + /* flush both disables and wait for them to finish */ + e1e_flush(); + msleep(10); + + napi_disable(&adapter->napi); + e1000_irq_disable(adapter); + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + netdev->tx_queue_len = adapter->tx_queue_len; + netif_carrier_off(netdev); + adapter->link_speed = 0; + adapter->link_duplex = 0; + + e1000e_reset(adapter); + e1000_clean_tx_ring(adapter); + e1000_clean_rx_ring(adapter); + + /* + * TODO: for power management, we could drop the link and + * pci_disable_device here. + */ +} + +void e1000e_reinit_locked(struct e1000_adapter *adapter) +{ + might_sleep(); + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + msleep(1); + e1000e_down(adapter); + e1000e_up(adapter); + clear_bit(__E1000_RESETTING, &adapter->state); +} + +/** + * e1000_sw_init - Initialize general software structures (struct e1000_adapter) + * @adapter: board private structure to initialize + * + * e1000_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int __devinit e1000_sw_init(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + + adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; + adapter->rx_ps_bsize0 = 128; + hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); + if (!adapter->tx_ring) + goto err; + + adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); + if (!adapter->rx_ring) + goto err; + + spin_lock_init(&adapter->tx_queue_lock); + + /* Explicitly disable IRQ since the NIC can be in any state. */ + atomic_set(&adapter->irq_sem, 0); + e1000_irq_disable(adapter); + + spin_lock_init(&adapter->stats_lock); + + set_bit(__E1000_DOWN, &adapter->state); + return 0; + +err: + ndev_err(netdev, "Unable to allocate memory for queues\n"); + kfree(adapter->rx_ring); + kfree(adapter->tx_ring); + return -ENOMEM; +} + +/** + * e1000_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int e1000_open(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + /* disallow open during test */ + if (test_bit(__E1000_TESTING, &adapter->state)) + return -EBUSY; + + /* allocate transmit descriptors */ + err = e1000e_setup_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = e1000e_setup_rx_resources(adapter); + if (err) + goto err_setup_rx; + + e1000e_power_up_phy(adapter); + + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + if ((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) + e1000_update_mng_vlan(adapter); + + /* If AMT is enabled, let the firmware know that the network + * interface is now open */ + if ((adapter->flags & FLAG_HAS_AMT) && + e1000e_check_mng_mode(&adapter->hw)) + e1000_get_hw_control(adapter); + + /* before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. */ + e1000_configure(adapter); + + err = e1000_request_irq(adapter); + if (err) + goto err_req_irq; + + /* From here on the code is the same as e1000e_up() */ + clear_bit(__E1000_DOWN, &adapter->state); + + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + /* fire a link status change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + + return 0; + +err_req_irq: + e1000_release_hw_control(adapter); + e1000_power_down_phy(adapter); + e1000e_free_rx_resources(adapter); +err_setup_rx: + e1000e_free_tx_resources(adapter); +err_setup_tx: + e1000e_reset(adapter); + + return err; +} + +/** + * e1000_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int e1000_close(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); + e1000e_down(adapter); + e1000_power_down_phy(adapter); + e1000_free_irq(adapter); + + e1000e_free_tx_resources(adapter); + e1000e_free_rx_resources(adapter); + + /* kill manageability vlan ID if supported, but not if a vlan with + * the same ID is registered on the host OS (let 8021q kill it) */ + if ((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && + !(adapter->vlgrp && + vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) + e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); + + /* If AMT is enabled, let the firmware know that the network + * interface is now closed */ + if ((adapter->flags & FLAG_HAS_AMT) && + e1000e_check_mng_mode(&adapter->hw)) + e1000_release_hw_control(adapter); + + return 0; +} +/** + * e1000_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int e1000_set_mac(struct net_device *netdev, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); + + e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); + + if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { + /* activate the work around */ + e1000e_set_laa_state_82571(&adapter->hw, 1); + + /* Hold a copy of the LAA in RAR[14] This is done so that + * between the time RAR[0] gets clobbered and the time it + * gets fixed (in e1000_watchdog), the actual LAA is in one + * of the RARs and no incoming packets directed to this port + * are dropped. Eventually the LAA will be in RAR[0] and + * RAR[14] */ + e1000e_rar_set(&adapter->hw, + adapter->hw.mac.addr, + adapter->hw.mac.rar_entry_count - 1); + } + + return 0; +} + +/* Need to wait a few seconds after link up to get diagnostic information from + * the phy */ +static void e1000_update_phy_info(unsigned long data) +{ + struct e1000_adapter *adapter = (struct e1000_adapter *) data; + e1000_get_phy_info(&adapter->hw); +} + +/** + * e1000e_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ +void e1000e_update_stats(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + unsigned long irq_flags; + u16 phy_tmp; + +#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF + + /* + * Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + spin_lock_irqsave(&adapter->stats_lock, irq_flags); + + /* these counters are modified from e1000_adjust_tbi_stats, + * called from the interrupt context, so they must only + * be written while holding adapter->stats_lock + */ + + adapter->stats.crcerrs += er32(CRCERRS); + adapter->stats.gprc += er32(GPRC); + adapter->stats.gorcl += er32(GORCL); + adapter->stats.gorch += er32(GORCH); + adapter->stats.bprc += er32(BPRC); + adapter->stats.mprc += er32(MPRC); + adapter->stats.roc += er32(ROC); + + if (adapter->flags & FLAG_HAS_STATS_PTC_PRC) { + adapter->stats.prc64 += er32(PRC64); + adapter->stats.prc127 += er32(PRC127); + adapter->stats.prc255 += er32(PRC255); + adapter->stats.prc511 += er32(PRC511); + adapter->stats.prc1023 += er32(PRC1023); + adapter->stats.prc1522 += er32(PRC1522); + adapter->stats.symerrs += er32(SYMERRS); + adapter->stats.sec += er32(SEC); + } + + adapter->stats.mpc += er32(MPC); + adapter->stats.scc += er32(SCC); + adapter->stats.ecol += er32(ECOL); + adapter->stats.mcc += er32(MCC); + adapter->stats.latecol += er32(LATECOL); + adapter->stats.dc += er32(DC); + adapter->stats.rlec += er32(RLEC); + adapter->stats.xonrxc += er32(XONRXC); + adapter->stats.xontxc += er32(XONTXC); + adapter->stats.xoffrxc += er32(XOFFRXC); + adapter->stats.xofftxc += er32(XOFFTXC); + adapter->stats.fcruc += er32(FCRUC); + adapter->stats.gptc += er32(GPTC); + adapter->stats.gotcl += er32(GOTCL); + adapter->stats.gotch += er32(GOTCH); + adapter->stats.rnbc += er32(RNBC); + adapter->stats.ruc += er32(RUC); + adapter->stats.rfc += er32(RFC); + adapter->stats.rjc += er32(RJC); + adapter->stats.torl += er32(TORL); + adapter->stats.torh += er32(TORH); + adapter->stats.totl += er32(TOTL); + adapter->stats.toth += er32(TOTH); + adapter->stats.tpr += er32(TPR); + + if (adapter->flags & FLAG_HAS_STATS_PTC_PRC) { + adapter->stats.ptc64 += er32(PTC64); + adapter->stats.ptc127 += er32(PTC127); + adapter->stats.ptc255 += er32(PTC255); + adapter->stats.ptc511 += er32(PTC511); + adapter->stats.ptc1023 += er32(PTC1023); + adapter->stats.ptc1522 += er32(PTC1522); + } + + adapter->stats.mptc += er32(MPTC); + adapter->stats.bptc += er32(BPTC); + + /* used for adaptive IFS */ + + hw->mac.tx_packet_delta = er32(TPT); + adapter->stats.tpt += hw->mac.tx_packet_delta; + hw->mac.collision_delta = er32(COLC); + adapter->stats.colc += hw->mac.collision_delta; + + adapter->stats.algnerrc += er32(ALGNERRC); + adapter->stats.rxerrc += er32(RXERRC); + adapter->stats.tncrs += er32(TNCRS); + adapter->stats.cexterr += er32(CEXTERR); + adapter->stats.tsctc += er32(TSCTC); + adapter->stats.tsctfc += er32(TSCTFC); + + adapter->stats.iac += er32(IAC); + + if (adapter->flags & FLAG_HAS_STATS_ICR_ICT) { + adapter->stats.icrxoc += er32(ICRXOC); + adapter->stats.icrxptc += er32(ICRXPTC); + adapter->stats.icrxatc += er32(ICRXATC); + adapter->stats.ictxptc += er32(ICTXPTC); + adapter->stats.ictxatc += er32(ICTXATC); + adapter->stats.ictxqec += er32(ICTXQEC); + adapter->stats.ictxqmtc += er32(ICTXQMTC); + adapter->stats.icrxdmtc += er32(ICRXDMTC); + } + + /* Fill out the OS statistics structure */ + adapter->net_stats.rx_packets = adapter->stats.gprc; + adapter->net_stats.tx_packets = adapter->stats.gptc; + adapter->net_stats.rx_bytes = adapter->stats.gorcl; + adapter->net_stats.tx_bytes = adapter->stats.gotcl; + adapter->net_stats.multicast = adapter->stats.mprc; + adapter->net_stats.collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC */ + adapter->net_stats.rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + adapter->net_stats.rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; + adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; + adapter->net_stats.rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + adapter->net_stats.tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; + adapter->net_stats.tx_window_errors = adapter->stats.latecol; + adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Phy Stats */ + if (hw->media_type == e1000_media_type_copper) { + if ((adapter->link_speed == SPEED_1000) && + (!e1e_rphy(hw, PHY_1000T_STATUS, &phy_tmp))) { + phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; + adapter->phy_stats.idle_errors += phy_tmp; + } + } + + /* Management Stats */ + adapter->stats.mgptc += er32(MGTPTC); + adapter->stats.mgprc += er32(MGTPRC); + adapter->stats.mgpdc += er32(MGTPDC); + + spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); +} + +static void e1000_print_link_info(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 ctrl = er32(CTRL); + + ndev_info(netdev, + "Link is Up %d Mbps %s, Flow Control: %s\n", + adapter->link_speed, + (adapter->link_duplex == FULL_DUPLEX) ? + "Full Duplex" : "Half Duplex", + ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? + "RX/TX" : + ((ctrl & E1000_CTRL_RFCE) ? "RX" : + ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); +} + +/** + * e1000_watchdog - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void e1000_watchdog(unsigned long data) +{ + struct e1000_adapter *adapter = (struct e1000_adapter *) data; + + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); + + /* TODO: make this use queue_delayed_work() */ +} + +static void e1000_watchdog_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, watchdog_task); + + struct net_device *netdev = adapter->netdev; + struct e1000_mac_info *mac = &adapter->hw.mac; + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_hw *hw = &adapter->hw; + u32 link, tctl; + s32 ret_val; + int tx_pending = 0; + + if ((netif_carrier_ok(netdev)) && + (er32(STATUS) & E1000_STATUS_LU)) + goto link_up; + + ret_val = mac->ops.check_for_link(hw); + if ((ret_val == E1000_ERR_PHY) && + (adapter->hw.phy.type == e1000_phy_igp_3) && + (er32(CTRL) & + E1000_PHY_CTRL_GBE_DISABLE)) { + /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ + ndev_info(netdev, + "Gigabit has been disabled, downgrading speed\n"); + } + + if ((e1000e_enable_tx_pkt_filtering(hw)) && + (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) + e1000_update_mng_vlan(adapter); + + if ((adapter->hw.media_type == e1000_media_type_internal_serdes) && + !(er32(TXCW) & E1000_TXCW_ANE)) + link = adapter->hw.mac.serdes_has_link; + else + link = er32(STATUS) & E1000_STATUS_LU; + + if (link) { + if (!netif_carrier_ok(netdev)) { + bool txb2b = 1; + mac->ops.get_link_up_info(&adapter->hw, + &adapter->link_speed, + &adapter->link_duplex); + e1000_print_link_info(adapter); + /* tweak tx_queue_len according to speed/duplex + * and adjust the timeout factor */ + netdev->tx_queue_len = adapter->tx_queue_len; + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + txb2b = 0; + netdev->tx_queue_len = 10; + adapter->tx_timeout_factor = 14; + break; + case SPEED_100: + txb2b = 0; + netdev->tx_queue_len = 100; + /* maybe add some timeout factor ? */ + break; + } + + /* workaround: re-program speed mode bit after + * link-up event */ + if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && + !txb2b) { + u32 tarc0; + tarc0 = er32(TARC0); + tarc0 &= ~SPEED_MODE_BIT; + ew32(TARC0, tarc0); + } + + /* disable TSO for pcie and 10/100 speeds, to avoid + * some hardware issues */ + if (!(adapter->flags & FLAG_TSO_FORCE)) { + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + ndev_info(netdev, + "10/100 speed: disabling TSO\n"); + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + break; + case SPEED_1000: + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + break; + default: + /* oops */ + break; + } + } + + /* enable transmits in the hardware, need to do this + * after setting TARC0 */ + tctl = er32(TCTL); + tctl |= E1000_TCTL_EN; + ew32(TCTL, tctl); + + netif_carrier_on(netdev); + netif_wake_queue(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + } else { + /* make sure the receive unit is started */ + if (adapter->flags & FLAG_RX_NEEDS_RESTART) { + u32 rctl = er32(RCTL); + ew32(RCTL, rctl | + E1000_RCTL_EN); + } + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + ndev_info(netdev, "Link is Down\n"); + netif_carrier_off(netdev); + netif_stop_queue(netdev); + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + + if (adapter->flags & FLAG_RX_NEEDS_RESTART) + schedule_work(&adapter->reset_task); + } + } + +link_up: + e1000e_update_stats(adapter); + + mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; + adapter->tpt_old = adapter->stats.tpt; + mac->collision_delta = adapter->stats.colc - adapter->colc_old; + adapter->colc_old = adapter->stats.colc; + + adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; + adapter->gorcl_old = adapter->stats.gorcl; + adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; + adapter->gotcl_old = adapter->stats.gotcl; + + e1000e_update_adaptive(&adapter->hw); + + if (!netif_carrier_ok(netdev)) { + tx_pending = (e1000_desc_unused(tx_ring) + 1 < + tx_ring->count); + if (tx_pending) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + } + } + + /* Cause software interrupt to ensure rx ring is cleaned */ + ew32(ICS, E1000_ICS_RXDMT0); + + /* Force detection of hung controller every watchdog period */ + adapter->detect_tx_hung = 1; + + /* With 82571 controllers, LAA may be overwritten due to controller + * reset from the other port. Set the appropriate LAA in RAR[0] */ + if (e1000e_get_laa_state_82571(hw)) + e1000e_rar_set(hw, adapter->hw.mac.addr, 0); + + /* Reset the timer */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); +} + +#define E1000_TX_FLAGS_CSUM 0x00000001 +#define E1000_TX_FLAGS_VLAN 0x00000002 +#define E1000_TX_FLAGS_TSO 0x00000004 +#define E1000_TX_FLAGS_IPV4 0x00000008 +#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 +#define E1000_TX_FLAGS_VLAN_SHIFT 16 + +static int e1000_tso(struct e1000_adapter *adapter, + struct sk_buff *skb) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_context_desc *context_desc; + struct e1000_buffer *buffer_info; + unsigned int i; + u32 cmd_length = 0; + u16 ipcse = 0, tucse, mss; + u8 ipcss, ipcso, tucss, tucso, hdr_len; + int err; + + if (skb_is_gso(skb)) { + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) + return err; + } + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + mss = skb_shinfo(skb)->gso_size; + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + cmd_length = E1000_TXD_CMD_IP; + ipcse = skb_transport_offset(skb) - 1; + } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + ipcse = 0; + } + ipcss = skb_network_offset(skb); + ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; + tucss = skb_transport_offset(skb); + tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; + tucse = 0; + + cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | + E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); + + i = tx_ring->next_to_use; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + + context_desc->lower_setup.ip_fields.ipcss = ipcss; + context_desc->lower_setup.ip_fields.ipcso = ipcso; + context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); + context_desc->upper_setup.tcp_fields.tucss = tucss; + context_desc->upper_setup.tcp_fields.tucso = tucso; + context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); + context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); + context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; + context_desc->cmd_and_length = cpu_to_le32(cmd_length); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + return 1; + } + + return 0; +} + +static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_context_desc *context_desc; + struct e1000_buffer *buffer_info; + unsigned int i; + u8 css; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + css = skb_transport_offset(skb); + + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + + context_desc->lower_setup.ip_config = 0; + context_desc->upper_setup.tcp_fields.tucss = css; + context_desc->upper_setup.tcp_fields.tucso = + css + skb->csum_offset; + context_desc->upper_setup.tcp_fields.tucse = 0; + context_desc->tcp_seg_setup.data = 0; + context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + return 1; + } + + return 0; +} + +#define E1000_MAX_PER_TXD 8192 +#define E1000_MAX_TXD_PWR 12 + +static int e1000_tx_map(struct e1000_adapter *adapter, + struct sk_buff *skb, unsigned int first, + unsigned int max_per_txd, unsigned int nr_frags, + unsigned int mss) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_buffer *buffer_info; + unsigned int len = skb->len - skb->data_len; + unsigned int offset = 0, size, count = 0, i; + unsigned int f; + + i = tx_ring->next_to_use; + + while (len) { + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc */ + if (mss && !nr_frags && size == len && size > 8) + size -= 4; + + buffer_info->length = size; + /* set time_stamp *before* dma to help avoid a possible race */ + buffer_info->time_stamp = jiffies; + buffer_info->dma = + pci_map_single(adapter->pdev, + skb->data + offset, + size, + PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(buffer_info->dma)) { + dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); + adapter->tx_dma_failed++; + return -1; + } + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + i++; + if (i == tx_ring->count) + i = 0; + } + + for (f = 0; f < nr_frags; f++) { + struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[f]; + len = frag->size; + offset = frag->page_offset; + + while (len) { + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc */ + if (mss && f == (nr_frags-1) && size == len && size > 8) + size -= 4; + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->dma = + pci_map_page(adapter->pdev, + frag->page, + offset, + size, + PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(buffer_info->dma)) { + dev_err(&adapter->pdev->dev, + "TX DMA page map failed\n"); + adapter->tx_dma_failed++; + return -1; + } + + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + + i++; + if (i == tx_ring->count) + i = 0; + } + } + + if (i == 0) + i = tx_ring->count - 1; + else + i--; + + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[first].next_to_watch = i; + + return count; +} + +static void e1000_tx_queue(struct e1000_adapter *adapter, + int tx_flags, int count) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_tx_desc *tx_desc = NULL; + struct e1000_buffer *buffer_info; + u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; + unsigned int i; + + if (tx_flags & E1000_TX_FLAGS_TSO) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | + E1000_TXD_CMD_TSE; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + + if (tx_flags & E1000_TX_FLAGS_IPV4) + txd_upper |= E1000_TXD_POPTS_IXSM << 8; + } + + if (tx_flags & E1000_TX_FLAGS_CSUM) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + } + + if (tx_flags & E1000_TX_FLAGS_VLAN) { + txd_lower |= E1000_TXD_CMD_VLE; + txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); + } + + i = tx_ring->next_to_use; + + while (count--) { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = E1000_TX_DESC(*tx_ring, i); + tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + tx_desc->lower.data = + cpu_to_le32(txd_lower | buffer_info->length); + tx_desc->upper.data = cpu_to_le32(txd_upper); + + i++; + if (i == tx_ring->count) + i = 0; + } + + tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + + tx_ring->next_to_use = i; + writel(i, adapter->hw.hw_addr + tx_ring->tail); + /* we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems */ + mmiowb(); +} + +#define MINIMUM_DHCP_PACKET_SIZE 282 +static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, + struct sk_buff *skb) +{ + struct e1000_hw *hw = &adapter->hw; + u16 length, offset; + + if (vlan_tx_tag_present(skb)) { + if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) + && (adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) + return 0; + } + + if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) + return 0; + + if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP)) + return 0; + + { + const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14); + struct udphdr *udp; + + if (ip->protocol != IPPROTO_UDP) + return 0; + + udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); + if (ntohs(udp->dest) != 67) + return 0; + + offset = (u8 *)udp + 8 - skb->data; + length = skb->len - offset; + return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length); + } + + return 0; +} + +static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + netif_stop_queue(netdev); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. */ + if (e1000_desc_unused(adapter->tx_ring) < size) + return -EBUSY; + + /* A reprieve! */ + netif_start_queue(netdev); + ++adapter->restart_queue; + return 0; +} + +static int e1000_maybe_stop_tx(struct net_device *netdev, int size) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (e1000_desc_unused(adapter->tx_ring) >= size) + return 0; + return __e1000_maybe_stop_tx(netdev, size); +} + +#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) +static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_ring *tx_ring = adapter->tx_ring; + unsigned int first; + unsigned int max_per_txd = E1000_MAX_PER_TXD; + unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; + unsigned int tx_flags = 0; + unsigned int len = skb->len - skb->data_len; + unsigned long irq_flags; + unsigned int nr_frags; + unsigned int mss; + int count = 0; + int tso; + unsigned int f; + + if (test_bit(__E1000_DOWN, &adapter->state)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (skb->len <= 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + mss = skb_shinfo(skb)->gso_size; + /* The controller does a simple calculation to + * make sure there is enough room in the FIFO before + * initiating the DMA for each buffer. The calc is: + * 4 = ceil(buffer len/mss). To make sure we don't + * overrun the FIFO, adjust the max buffer len if mss + * drops. */ + if (mss) { + u8 hdr_len; + max_per_txd = min(mss << 2, max_per_txd); + max_txd_pwr = fls(max_per_txd) - 1; + + /* TSO Workaround for 82571/2/3 Controllers -- if skb->data + * points to just header, pull a few bytes of payload from + * frags into skb->data */ + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (skb->data_len && (hdr_len == len)) { + unsigned int pull_size; + + pull_size = min((unsigned int)4, skb->data_len); + if (!__pskb_pull_tail(skb, pull_size)) { + ndev_err(netdev, + "__pskb_pull_tail failed.\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + len = skb->len - skb->data_len; + } + } + + /* reserve a descriptor for the offload context */ + if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) + count++; + count++; + + count += TXD_USE_COUNT(len, max_txd_pwr); + + nr_frags = skb_shinfo(skb)->nr_frags; + for (f = 0; f < nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, + max_txd_pwr); + + if (adapter->hw.mac.tx_pkt_filtering) + e1000_transfer_dhcp_info(adapter, skb); + + if (!spin_trylock_irqsave(&adapter->tx_queue_lock, irq_flags)) + /* Collision - tell upper layer to requeue */ + return NETDEV_TX_LOCKED; + + /* need: count + 2 desc gap to keep tail from touching + * head, otherwise try next time */ + if (e1000_maybe_stop_tx(netdev, count + 2)) { + spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); + return NETDEV_TX_BUSY; + } + + if (adapter->vlgrp && vlan_tx_tag_present(skb)) { + tx_flags |= E1000_TX_FLAGS_VLAN; + tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); + } + + first = tx_ring->next_to_use; + + tso = e1000_tso(adapter, skb); + if (tso < 0) { + dev_kfree_skb_any(skb); + spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); + return NETDEV_TX_OK; + } + + if (tso) + tx_flags |= E1000_TX_FLAGS_TSO; + else if (e1000_tx_csum(adapter, skb)) + tx_flags |= E1000_TX_FLAGS_CSUM; + + /* Old method was to assume IPv4 packet by default if TSO was enabled. + * 82571 hardware supports TSO capabilities for IPv6 as well... + * no longer assume, we must. */ + if (skb->protocol == htons(ETH_P_IP)) + tx_flags |= E1000_TX_FLAGS_IPV4; + + count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss); + if (count < 0) { + /* handle pci_map_single() error in e1000_tx_map */ + dev_kfree_skb_any(skb); + spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); + return NETDEV_TX_OK; + } + + e1000_tx_queue(adapter, tx_flags, count); + + netdev->trans_start = jiffies; + + /* Make sure there is space in the ring for the next send. */ + e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2); + + spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); + return NETDEV_TX_OK; +} + +/** + * e1000_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void e1000_tx_timeout(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); +} + +static void e1000_reset_task(struct work_struct *work) +{ + struct e1000_adapter *adapter; + adapter = container_of(work, struct e1000_adapter, reset_task); + + e1000e_reinit_locked(adapter); +} + +/** + * e1000_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + **/ +static struct net_device_stats *e1000_get_stats(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* only return the current stats */ + return &adapter->net_stats; +} + +/** + * e1000_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int e1000_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || + (max_frame > MAX_JUMBO_FRAME_SIZE)) { + ndev_err(netdev, "Invalid MTU setting\n"); + return -EINVAL; + } + + /* Jumbo frame size limits */ + if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) { + if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { + ndev_err(netdev, "Jumbo Frames not supported.\n"); + return -EINVAL; + } + if (adapter->hw.phy.type == e1000_phy_ife) { + ndev_err(netdev, "Jumbo Frames not supported.\n"); + return -EINVAL; + } + } + +#define MAX_STD_JUMBO_FRAME_SIZE 9234 + if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { + ndev_err(netdev, "MTU > 9216 not supported.\n"); + return -EINVAL; + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + msleep(1); + /* e1000e_down has a dependency on max_frame_size */ + adapter->hw.mac.max_frame_size = max_frame; + if (netif_running(netdev)) + e1000e_down(adapter); + + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + * means we reserve 2 more, this pushes us to allocate from the next + * larger slab size. + * i.e. RXBUFFER_2048 --> size-4096 slab + * however with the new *_jumbo* routines, jumbo receives will use + * fragmented skbs */ + + if (max_frame <= 256) + adapter->rx_buffer_len = 256; + else if (max_frame <= 512) + adapter->rx_buffer_len = 512; + else if (max_frame <= 1024) + adapter->rx_buffer_len = 1024; + else if (max_frame <= 2048) + adapter->rx_buffer_len = 2048; + else + adapter->rx_buffer_len = 4096; + + /* adjust allocation if LPE protects us, and we aren't using SBP */ + if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || + (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) + adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + + ETH_FCS_LEN ; + + ndev_info(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + e1000e_up(adapter); + else + e1000e_reset(adapter); + + clear_bit(__E1000_RESETTING, &adapter->state); + + return 0; +} + +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct mii_ioctl_data *data = if_mii(ifr); + unsigned long irq_flags; + + if (adapter->hw.media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = adapter->hw.phy.addr; + break; + case SIOCGMIIREG: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + spin_lock_irqsave(&adapter->stats_lock, irq_flags); + if (e1e_rphy(&adapter->hw, data->reg_num & 0x1F, + &data->val_out)) { + spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); + break; + case SIOCSMIIREG: + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return e1000_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, ctrl_ext, rctl, status; + u32 wufc = adapter->wol; + int retval = 0; + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); + e1000e_down(adapter); + e1000_free_irq(adapter); + } + + retval = pci_save_state(pdev); + if (retval) + return retval; + + status = er32(STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + + if (wufc) { + e1000_setup_rctl(adapter); + e1000_set_multi(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) { + rctl = er32(RCTL); + rctl |= E1000_RCTL_MPE; + ew32(RCTL, rctl); + } + + ctrl = er32(CTRL); + /* advertise wake from D3Cold */ + #define E1000_CTRL_ADVD3WUC 0x00100000 + /* phy power management enable */ + #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 + ctrl |= E1000_CTRL_ADVD3WUC | + E1000_CTRL_EN_PHY_PWR_MGMT; + ew32(CTRL, ctrl); + + if (adapter->hw.media_type == e1000_media_type_fiber || + adapter->hw.media_type == e1000_media_type_internal_serdes) { + /* keep the laser running in D3 */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; + ew32(CTRL_EXT, ctrl_ext); + } + + /* Allow time for pending master requests to run */ + e1000e_disable_pcie_master(&adapter->hw); + + ew32(WUC, E1000_WUC_PME_EN); + ew32(WUFC, wufc); + pci_enable_wake(pdev, PCI_D3hot, 1); + pci_enable_wake(pdev, PCI_D3cold, 1); + } else { + ew32(WUC, 0); + ew32(WUFC, 0); + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + } + + e1000_release_manageability(adapter); + + /* make sure adapter isn't asleep if manageability is enabled */ + if (adapter->flags & FLAG_MNG_PT_ENABLED) { + pci_enable_wake(pdev, PCI_D3hot, 1); + pci_enable_wake(pdev, PCI_D3cold, 1); + } + + if (adapter->hw.phy.type == e1000_phy_igp_3) + e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. */ + e1000_release_hw_control(adapter); + + pci_disable_device(pdev); + + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +#ifdef CONFIG_PM +static int e1000_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, + "Cannot enable PCI device from suspend\n"); + return err; + } + + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (netif_running(netdev)) { + err = e1000_request_irq(adapter); + if (err) + return err; + } + + e1000e_power_up_phy(adapter); + e1000e_reset(adapter); + ew32(WUS, ~0); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) + e1000e_up(adapter); + + netif_device_attach(netdev); + + /* If the controller has AMT, do not set DRV_LOAD until the interface + * is up. For all other cases, let the f/w know that the h/w is now + * under the control of the driver. */ + if (!(adapter->flags & FLAG_HAS_AMT) || !e1000e_check_mng_mode(&adapter->hw)) + e1000_get_hw_control(adapter); + + return 0; +} +#endif + +static void e1000_shutdown(struct pci_dev *pdev) +{ + e1000_suspend(pdev, PMSG_SUSPEND); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void e1000_netpoll(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + disable_irq(adapter->pdev->irq); + e1000_intr(adapter->pdev->irq, netdev); + + e1000_clean_tx_irq(adapter); + + enable_irq(adapter->pdev->irq); +} +#endif + +/** + * e1000_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (netif_running(netdev)) + e1000e_down(adapter); + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e1000_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000_resume routine. + */ +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + e1000e_reset(adapter); + ew32(WUS, ~0); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e1000_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the e1000_resume routine. + */ +static void e1000_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) { + if (e1000e_up(adapter)) { + dev_err(&pdev->dev, + "can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); + + /* If the controller has AMT, do not set DRV_LOAD until the interface + * is up. For all other cases, let the f/w know that the h/w is now + * under the control of the driver. */ + if (!(adapter->flags & FLAG_HAS_AMT) || + !e1000e_check_mng_mode(&adapter->hw)) + e1000_get_hw_control(adapter); + +} + +static void e1000_print_device_info(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 part_num; + + /* print bus type/speed/width info */ + ndev_info(netdev, "(PCI Express:2.5GB/s:%s) " + "%02x:%02x:%02x:%02x:%02x:%02x\n", + /* bus width */ + ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : + "Width x1"), + /* MAC address */ + netdev->dev_addr[0], netdev->dev_addr[1], + netdev->dev_addr[2], netdev->dev_addr[3], + netdev->dev_addr[4], netdev->dev_addr[5]); + ndev_info(netdev, "Intel(R) PRO/%s Network Connection\n", + (hw->phy.type == e1000_phy_ife) + ? "10/100" : "1000"); + e1000e_read_part_num(hw, &part_num); + ndev_info(netdev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", + hw->mac.type, hw->phy.type, + (part_num >> 8), (part_num & 0xff)); +} + +/** + * e1000_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in e1000_pci_tbl + * + * Returns 0 on success, negative on failure + * + * e1000_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int __devinit e1000_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct e1000_adapter *adapter; + struct e1000_hw *hw; + const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; + unsigned long mmio_start, mmio_len; + unsigned long flash_start, flash_len; + + static int cards_found; + int i, err, pci_using_dac; + u16 eeprom_data = 0; + u16 eeprom_apme_mask = E1000_EEPROM_APME; + + err = pci_enable_device(pdev); + if (err) + return err; + + pci_using_dac = 0; + err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); + if (!err) { + err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); + if (!err) + pci_using_dac = 1; + } else { + err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (err) { + err = pci_set_consistent_dma_mask(pdev, + DMA_32BIT_MASK); + if (err) { + dev_err(&pdev->dev, "No usable DMA " + "configuration, aborting\n"); + goto err_dma; + } + } + } + + err = pci_request_regions(pdev, e1000e_driver_name); + if (err) + goto err_pci_reg; + + pci_set_master(pdev); + + err = -ENOMEM; + netdev = alloc_etherdev(sizeof(struct e1000_adapter)); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + hw = &adapter->hw; + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->ei = ei; + adapter->pba = ei->pba; + adapter->flags = ei->flags; + adapter->hw.adapter = adapter; + adapter->hw.mac.type = ei->mac; + adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; + + mmio_start = pci_resource_start(pdev, 0); + mmio_len = pci_resource_len(pdev, 0); + + err = -EIO; + adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); + if (!adapter->hw.hw_addr) + goto err_ioremap; + + if ((adapter->flags & FLAG_HAS_FLASH) && + (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { + flash_start = pci_resource_start(pdev, 1); + flash_len = pci_resource_len(pdev, 1); + adapter->hw.flash_address = ioremap(flash_start, flash_len); + if (!adapter->hw.flash_address) + goto err_flashmap; + } + + /* construct the net_device struct */ + netdev->open = &e1000_open; + netdev->stop = &e1000_close; + netdev->hard_start_xmit = &e1000_xmit_frame; + netdev->get_stats = &e1000_get_stats; + netdev->set_multicast_list = &e1000_set_multi; + netdev->set_mac_address = &e1000_set_mac; + netdev->change_mtu = &e1000_change_mtu; + netdev->do_ioctl = &e1000_ioctl; + e1000e_set_ethtool_ops(netdev); + netdev->tx_timeout = &e1000_tx_timeout; + netdev->watchdog_timeo = 5 * HZ; + netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); + netdev->vlan_rx_register = e1000_vlan_rx_register; + netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid; + netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid; +#ifdef CONFIG_NET_POLL_CONTROLLER + netdev->poll_controller = e1000_netpoll; +#endif + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + netdev->mem_start = mmio_start; + netdev->mem_end = mmio_start + mmio_len; + + adapter->bd_number = cards_found++; + + /* setup adapter struct */ + err = e1000_sw_init(adapter); + if (err) + goto err_sw_init; + + err = -EIO; + + memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); + memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); + memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); + + err = ei->get_invariants(adapter); + if (err) + goto err_hw_init; + + hw->mac.ops.get_bus_info(&adapter->hw); + + adapter->hw.phy.wait_for_link = 0; + + /* Copper options */ + if (adapter->hw.media_type == e1000_media_type_copper) { + adapter->hw.phy.mdix = AUTO_ALL_MODES; + adapter->hw.phy.disable_polarity_correction = 0; + adapter->hw.phy.ms_type = e1000_ms_hw_default; + } + + if (e1000_check_reset_block(&adapter->hw)) + ndev_info(netdev, + "PHY reset is blocked due to SOL/IDER session.\n"); + + netdev->features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX; + + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) + netdev->features |= NETIF_F_HW_VLAN_FILTER; + + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + /* We should not be using LLTX anymore, but we are still TX faster with + * it. */ + netdev->features |= NETIF_F_LLTX; + + if (e1000e_enable_mng_pass_thru(&adapter->hw)) + adapter->flags |= FLAG_MNG_PT_ENABLED; + + /* before reading the NVM, reset the controller to + * put the device in a known good starting state */ + adapter->hw.mac.ops.reset_hw(&adapter->hw); + + /* + * systems with ASPM and others may see the checksum fail on the first + * attempt. Let's give it a few tries + */ + for (i = 0;; i++) { + if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) + break; + if (i == 2) { + ndev_err(netdev, "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + } + + /* copy the MAC address out of the NVM */ + if (e1000e_read_mac_addr(&adapter->hw)) + ndev_err(netdev, "NVM Read Error while reading MAC address\n"); + + memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); + memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->perm_addr)) { + ndev_err(netdev, "Invalid MAC Address: " + "%02x:%02x:%02x:%02x:%02x:%02x\n", + netdev->perm_addr[0], netdev->perm_addr[1], + netdev->perm_addr[2], netdev->perm_addr[3], + netdev->perm_addr[4], netdev->perm_addr[5]); + err = -EIO; + goto err_eeprom; + } + + init_timer(&adapter->watchdog_timer); + adapter->watchdog_timer.function = &e1000_watchdog; + adapter->watchdog_timer.data = (unsigned long) adapter; + + init_timer(&adapter->phy_info_timer); + adapter->phy_info_timer.function = &e1000_update_phy_info; + adapter->phy_info_timer.data = (unsigned long) adapter; + + INIT_WORK(&adapter->reset_task, e1000_reset_task); + INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); + + e1000e_check_options(adapter); + + /* Initialize link parameters. User can change them with ethtool */ + adapter->hw.mac.autoneg = 1; + adapter->fc_autoneg = 1; + adapter->hw.mac.original_fc = e1000_fc_default; + adapter->hw.mac.fc = e1000_fc_default; + adapter->hw.phy.autoneg_advertised = 0x2f; + + /* ring size defaults */ + adapter->rx_ring->count = 256; + adapter->tx_ring->count = 256; + + /* + * Initial Wake on LAN setting - If APM wake is enabled in + * the EEPROM, enable the ACPI Magic Packet filter + */ + if (adapter->flags & FLAG_APME_IN_WUC) { + /* APME bit in EEPROM is mapped to WUC.APME */ + eeprom_data = er32(WUC); + eeprom_apme_mask = E1000_WUC_APME; + } else if (adapter->flags & FLAG_APME_IN_CTRL3) { + if (adapter->flags & FLAG_APME_CHECK_PORT_B && + (adapter->hw.bus.func == 1)) + e1000_read_nvm(&adapter->hw, + NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + else + e1000_read_nvm(&adapter->hw, + NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); + } + + /* fetch WoL from EEPROM */ + if (eeprom_data & eeprom_apme_mask) + adapter->eeprom_wol |= E1000_WUFC_MAG; + + /* + * now that we have the eeprom settings, apply the special cases + * where the eeprom may be wrong or the board simply won't support + * wake on lan on a particular port + */ + if (!(adapter->flags & FLAG_HAS_WOL)) + adapter->eeprom_wol = 0; + + /* initialize the wol settings based on the eeprom settings */ + adapter->wol = adapter->eeprom_wol; + + /* reset the hardware with the new settings */ + e1000e_reset(adapter); + + /* If the controller has AMT, do not set DRV_LOAD until the interface + * is up. For all other cases, let the f/w know that the h/w is now + * under the control of the driver. */ + if (!(adapter->flags & FLAG_HAS_AMT) || + !e1000e_check_mng_mode(&adapter->hw)) + e1000_get_hw_control(adapter); + + /* tell the stack to leave us alone until e1000_open() is called */ + netif_carrier_off(netdev); + netif_stop_queue(netdev); + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + e1000_print_device_info(adapter); + + return 0; + +err_register: +err_hw_init: + e1000_release_hw_control(adapter); +err_eeprom: + if (!e1000_check_reset_block(&adapter->hw)) + e1000_phy_hw_reset(&adapter->hw); + + if (adapter->hw.flash_address) + iounmap(adapter->hw.flash_address); + +err_flashmap: + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); +err_sw_init: + iounmap(adapter->hw.hw_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * e1000_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * e1000_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void __devexit e1000_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* flush_scheduled work may reschedule our watchdog task, so + * explicitly disable watchdog tasks from being rescheduled */ + set_bit(__E1000_DOWN, &adapter->state); + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + flush_scheduled_work(); + + e1000_release_manageability(adapter); + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. */ + e1000_release_hw_control(adapter); + + unregister_netdev(netdev); + + if (!e1000_check_reset_block(&adapter->hw)) + e1000_phy_hw_reset(&adapter->hw); + + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + iounmap(adapter->hw.hw_addr); + if (adapter->hw.flash_address) + iounmap(adapter->hw.flash_address); + pci_release_regions(pdev); + + free_netdev(netdev); + + pci_disable_device(pdev); +} + +/* PCI Error Recovery (ERS) */ +static struct pci_error_handlers e1000_err_handler = { + .error_detected = e1000_io_error_detected, + .slot_reset = e1000_io_slot_reset, + .resume = e1000_io_resume, +}; + +static struct pci_device_id e1000_pci_tbl[] = { + /* + * Support for 82571/2/3, es2lan and ich8 will be phased in + * stepwise. + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), + board_80003es2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), + board_80003es2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT), + board_80003es2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT), + board_80003es2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, + */ + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, + + { } /* terminate list */ +}; +MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); + +/* PCI Device API Driver */ +static struct pci_driver e1000_driver = { + .name = e1000e_driver_name, + .id_table = e1000_pci_tbl, + .probe = e1000_probe, + .remove = __devexit_p(e1000_remove), +#ifdef CONFIG_PM + /* Power Managment Hooks */ + .suspend = e1000_suspend, + .resume = e1000_resume, +#endif + .shutdown = e1000_shutdown, + .err_handler = &e1000_err_handler +}; + +/** + * e1000_init_module - Driver Registration Routine + * + * e1000_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init e1000_init_module(void) +{ + int ret; + printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n", + e1000e_driver_name, e1000e_driver_version); + printk(KERN_INFO "%s: Copyright (c) 1999-2007 Intel Corporation.\n", + e1000e_driver_name); + ret = pci_register_driver(&e1000_driver); + + return ret; +} +module_init(e1000_init_module); + +/** + * e1000_exit_module - Driver Exit Cleanup Routine + * + * e1000_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit e1000_exit_module(void) +{ + pci_unregister_driver(&e1000_driver); +} +module_exit(e1000_exit_module); + + +MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); +MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +/* e1000_main.c */ diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c new file mode 100644 index 000000000000..e4e655efb23c --- /dev/null +++ b/drivers/net/e1000e/param.c @@ -0,0 +1,382 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include <linux/netdevice.h> + +#include "e1000.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define E1000_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +#define COPYBREAK_DEFAULT 256 +unsigned int copybreak = COPYBREAK_DEFAULT; +module_param(copybreak, uint, 0644); +MODULE_PARM_DESC(copybreak, + "Maximum size of packet that is copied to a new buffer on receive"); + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } +#define E1000_PARAM(X, desc) \ + static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ + static int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); + + +/* Transmit Interrupt Delay in units of 1.024 microseconds + * Tx interrupt delay needs to typically be set to something non zero + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); +#define DEFAULT_TIDV 8 +#define MAX_TXDELAY 0xFFFF +#define MIN_TXDELAY 0 + +/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); +#define DEFAULT_TADV 32 +#define MAX_TXABSDELAY 0xFFFF +#define MIN_TXABSDELAY 0 + +/* Receive Interrupt Delay in units of 1.024 microseconds + * hardware will likely hang if you set this to anything but zero. + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); +#define DEFAULT_RDTR 0 +#define MAX_RXDELAY 0xFFFF +#define MIN_RXDELAY 0 + +/* Receive Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); +#define DEFAULT_RADV 8 +#define MAX_RXABSDELAY 0xFFFF +#define MIN_RXABSDELAY 0 + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) + */ +E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); +#define DEFAULT_ITR 3 +#define MAX_ITR 100000 +#define MIN_ITR 100 + +/* Enable Smart Power Down of the PHY + * + * Valid Range: 0, 1 + * + * Default Value: 0 (disabled) + */ +E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); + +/* Enable Kumeran Lock Loss workaround + * + * Valid Range: 0, 1 + * + * Default Value: 1 (enabled) + */ +E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround"); + +struct e1000_option { + enum { enable_option, range_option, list_option } type; + char *name; + char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + struct e1000_opt_list { int i; char *str; } *p; + } l; + } arg; +}; + +static int __devinit e1000_validate_option(int *value, + struct e1000_option *opt, + struct e1000_adapter *adapter) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + ndev_info(adapter->netdev, "%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + ndev_info(adapter->netdev, "%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + ndev_info(adapter->netdev, + "%s set to %i\n", opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + struct e1000_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + ndev_info(adapter->netdev, "%s\n", + ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + ndev_info(adapter->netdev, "Invalid %s value specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +/** + * e1000e_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void __devinit e1000e_check_options(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int bd = adapter->bd_number; + + if (bd >= E1000_MAX_NIC) { + ndev_notice(netdev, + "Warning: no configuration for board #%i\n", bd); + ndev_notice(netdev, "Using defaults for all values\n"); + } + + { /* Transmit Interrupt Delay */ + struct e1000_option opt = { + .type = range_option, + .name = "Transmit Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_TIDV), + .def = DEFAULT_TIDV, + .arg = { .r = { .min = MIN_TXDELAY, + .max = MAX_TXDELAY } } + }; + + if (num_TxIntDelay > bd) { + adapter->tx_int_delay = TxIntDelay[bd]; + e1000_validate_option(&adapter->tx_int_delay, &opt, + adapter); + } else { + adapter->tx_int_delay = opt.def; + } + } + { /* Transmit Absolute Interrupt Delay */ + struct e1000_option opt = { + .type = range_option, + .name = "Transmit Absolute Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_TADV), + .def = DEFAULT_TADV, + .arg = { .r = { .min = MIN_TXABSDELAY, + .max = MAX_TXABSDELAY } } + }; + + if (num_TxAbsIntDelay > bd) { + adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; + e1000_validate_option(&adapter->tx_abs_int_delay, &opt, + adapter); + } else { + adapter->tx_abs_int_delay = opt.def; + } + } + { /* Receive Interrupt Delay */ + struct e1000_option opt = { + .type = range_option, + .name = "Receive Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_RDTR), + .def = DEFAULT_RDTR, + .arg = { .r = { .min = MIN_RXDELAY, + .max = MAX_RXDELAY } } + }; + + /* modify min and default if 82573 for slow ping w/a, + * a value greater than 8 needs to be set for RDTR */ + if (adapter->flags & FLAG_HAS_ASPM) { + opt.def = 32; + opt.arg.r.min = 8; + } + + if (num_RxIntDelay > bd) { + adapter->rx_int_delay = RxIntDelay[bd]; + e1000_validate_option(&adapter->rx_int_delay, &opt, + adapter); + } else { + adapter->rx_int_delay = opt.def; + } + } + { /* Receive Absolute Interrupt Delay */ + struct e1000_option opt = { + .type = range_option, + .name = "Receive Absolute Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_RADV), + .def = DEFAULT_RADV, + .arg = { .r = { .min = MIN_RXABSDELAY, + .max = MAX_RXABSDELAY } } + }; + + if (num_RxAbsIntDelay > bd) { + adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; + e1000_validate_option(&adapter->rx_abs_int_delay, &opt, + adapter); + } else { + adapter->rx_abs_int_delay = opt.def; + } + } + { /* Interrupt Throttling Rate */ + struct e1000_option opt = { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of " + __MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR } } + }; + + if (num_InterruptThrottleRate > bd) { + adapter->itr = InterruptThrottleRate[bd]; + switch (adapter->itr) { + case 0: + ndev_info(netdev, "%s turned off\n", + opt.name); + break; + case 1: + ndev_info(netdev, + "%s set to dynamic mode\n", + opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 3: + ndev_info(netdev, + "%s set to dynamic conservative mode\n", + opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + default: + e1000_validate_option(&adapter->itr, &opt, + adapter); + /* + * save the setting, because the dynamic bits + * change itr. clear the lower two bits + * because they are used as control + */ + adapter->itr_setting = adapter->itr & ~3; + break; + } + } else { + adapter->itr_setting = opt.def; + adapter->itr = 20000; + } + } + { /* Smart Power Down */ + struct e1000_option opt = { + .type = enable_option, + .name = "PHY Smart Power Down", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + + if (num_SmartPowerDownEnable > bd) { + int spd = SmartPowerDownEnable[bd]; + e1000_validate_option(&spd, &opt, adapter); + if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) + && spd) + adapter->flags |= FLAG_SMART_POWER_DOWN; + } + } + { /* Kumeran Lock Loss Workaround */ + struct e1000_option opt = { + .type = enable_option, + .name = "Kumeran Lock Loss Workaround", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_KumeranLockLoss > bd) { + int kmrn_lock_loss = KumeranLockLoss[bd]; + e1000_validate_option(&kmrn_lock_loss, &opt, adapter); + if (hw->mac.type == e1000_ich8lan) + e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, + kmrn_lock_loss); + } else { + if (hw->mac.type == e1000_ich8lan) + e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, + opt.def); + } + } +} diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c new file mode 100644 index 000000000000..793231810ae0 --- /dev/null +++ b/drivers/net/e1000e/phy.c @@ -0,0 +1,1773 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include <linux/delay.h> + +#include "e1000.h" + +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); +static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); +static s32 e1000_wait_autoneg(struct e1000_hw *hw); + +/* Cable length tables */ +static const u16 e1000_m88_cable_length_table[] = + { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; + +static const u16 e1000_igp_2_cable_length_table[] = + { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, + 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22, + 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40, + 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61, + 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, + 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, + 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, + 124}; +#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ + (sizeof(e1000_igp_2_cable_length_table) / \ + sizeof(e1000_igp_2_cable_length_table[0])) + +/** + * e1000e_check_reset_block_generic - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return 0, otherwise + * return E1000_BLK_PHY_RESET (12). + **/ +s32 e1000e_check_reset_block_generic(struct e1000_hw *hw) +{ + u32 manc; + + manc = er32(MANC); + + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? + E1000_BLK_PHY_RESET : 0; +} + +/** + * e1000e_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +s32 e1000e_get_phy_id(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_id; + + ret_val = e1e_rphy(hw, PHY_ID1, &phy_id); + if (ret_val) + return ret_val; + + phy->id = (u32)(phy_id << 16); + udelay(20); + ret_val = e1e_rphy(hw, PHY_ID2, &phy_id); + if (ret_val) + return ret_val; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + + return 0; +} + +/** + * e1000e_phy_reset_dsp - Reset PHY DSP + * @hw: pointer to the HW structure + * + * Reset the digital signal processor. + **/ +s32 e1000e_phy_reset_dsp(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); + if (ret_val) + return ret_val; + + return e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0); +} + +/** + * e1000_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control regsiter in the PHY at offset and stores the + * information read to data. + **/ +static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg(hw, "PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + hw_dbg(hw, "MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + hw_dbg(hw, "MDI Error\n"); + return -E1000_ERR_PHY; + } + *data = (u16) mdic; + + return 0; +} + +/** + * e1000_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +static s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg(hw, "PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = (((u32)data) | + (offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed */ + for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) { + udelay(5); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + hw_dbg(hw, "MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + + return 0; +} + +/** + * e1000e_read_phy_reg_m88 - Read m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release_phy(hw); + + return ret_val; +} + +/** + * e1000e_write_phy_reg_m88 - Write m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release_phy(hw); + + return ret_val; +} + +/** + * e1000e_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + return ret_val; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = e1000_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (ret_val) { + hw->phy.ops.release_phy(hw); + return ret_val; + } + } + + ret_val = e1000_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release_phy(hw); + + return ret_val; +} + +/** + * e1000e_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + return ret_val; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = e1000_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (ret_val) { + hw->phy.ops.release_phy(hw); + return ret_val; + } + } + + ret_val = e1000_write_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release_phy(hw); + + return ret_val; +} + +/** + * e1000e_read_kmrn_reg - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary. Then reads the PHY register at offset + * using the kumeran interface. The information retrieved is stored in data. + * Release any acquired semaphores before exiting. + **/ +s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + u32 kmrnctrlsta; + s32 ret_val; + + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + return ret_val; + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; + ew32(KMRNCTRLSTA, kmrnctrlsta); + + udelay(2); + + kmrnctrlsta = er32(KMRNCTRLSTA); + *data = (u16)kmrnctrlsta; + + hw->phy.ops.release_phy(hw); + + return ret_val; +} + +/** + * e1000e_write_kmrn_reg - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary. Then write the data to PHY register + * at the offset using the kumeran interface. Release any acquired semaphores + * before exiting. + **/ +s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + u32 kmrnctrlsta; + s32 ret_val; + + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + return ret_val; + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | data; + ew32(KMRNCTRLSTA, kmrnctrlsta); + + udelay(2); + hw->phy.ops.release_phy(hw); + + return ret_val; +} + +/** + * e1000e_copper_link_setup_m88 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock + * and downshift values are set also. + **/ +s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if (phy->revision < 4) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((phy->revision == 2) && + (phy->id == M88E1111_I_PHY_ID)) { + /* 82573L PHY - set the downshift counter to 5x. */ + phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + } + ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + /* Commit the changes. */ + ret_val = e1000e_commit_phy(hw); + if (ret_val) + hw_dbg(hw, "Error committing the PHY changes\n"); + + return ret_val; +} + +/** + * e1000e_copper_link_setup_igp - Setup igp PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for + * igp PHY's. + **/ +s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) { + hw_dbg(hw, "Error resetting the PHY.\n"); + return ret_val; + } + + /* Wait 15ms for MAC to configure PHY from NVM settings. */ + msleep(15); + + /* disable lplu d0 during driver init */ + ret_val = e1000_set_d0_lplu_state(hw, 0); + if (ret_val) { + hw_dbg(hw, "Error Disabling LPLU D0\n"); + return ret_val; + } + /* Configure mdi-mdix settings */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (phy->mdix) { + case 1: + data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, data); + if (ret_val) + return ret_val; + + /* set auto-master slave resolution settings */ + if (hw->mac.autoneg) { + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. */ + if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + + /* Set auto Master/Slave resolution process */ + ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~CR_1000T_MS_ENABLE; + ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data); + if (ret_val) + return ret_val; + } + + ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ? + ((data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : + e1000_ms_auto; + + switch (phy->ms_type) { + case e1000_ms_force_master: + data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + data |= CR_1000T_MS_ENABLE; + data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + data &= ~CR_1000T_MS_ENABLE; + default: + break; + } + ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data); + } + + return ret_val; +} + +/** + * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + **/ +static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg = 0; + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + hw_dbg(hw, "autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + hw_dbg(hw, "Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + hw_dbg(hw, "Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + hw_dbg(hw, "Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + hw_dbg(hw, "Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + hw_dbg(hw, "Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + hw_dbg(hw, "Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->mac.fc) { + case e1000_fc_none: + /* Flow control (RX & TX) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_rx_pause: + /* RX Flow control is enabled, and TX Flow control is + * disabled, by a software over-ride. + */ + /* Since there really isn't a way to advertise that we are + * capable of RX Pause ONLY, we will advertise that we + * support both symmetric and asymmetric RX PAUSE. Later + * (in e1000e_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_tx_pause: + /* TX Flow control is enabled, and RX Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case e1000_fc_full: + /* Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + hw_dbg(hw, "Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + return ret_val; + } + + ret_val = e1e_wphy(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + hw_dbg(hw, "Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); + } + + return ret_val; +} + +/** + * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * wait_for_link, then wait for autoneg to complete before exiting. + **/ +static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_ctrl; + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (phy->autoneg_advertised == 0) + phy->autoneg_advertised = phy->autoneg_mask; + + hw_dbg(hw, "Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + hw_dbg(hw, "Error Setting up Auto-Negotiation\n"); + return ret_val; + } + hw_dbg(hw, "Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + return ret_val; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + return ret_val; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->wait_for_link) { + ret_val = e1000_wait_autoneg(hw); + if (ret_val) { + hw_dbg(hw, "Error while waiting for " + "autoneg to complete\n"); + return ret_val; + } + } + + hw->mac.get_link_status = 1; + + return ret_val; +} + +/** + * e1000e_setup_copper_link - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + **/ +s32 e1000e_setup_copper_link(struct e1000_hw *hw) +{ + s32 ret_val; + bool link; + + if (hw->mac.autoneg) { + /* Setup autoneg and flow control advertisement and perform + * autonegotiation. */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + return ret_val; + } else { + /* PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. */ + hw_dbg(hw, "Forcing Speed and Duplex\n"); + ret_val = e1000_phy_force_speed_duplex(hw); + if (ret_val) { + hw_dbg(hw, "Error Forcing Speed and Duplex\n"); + return ret_val; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = e1000e_phy_has_link_generic(hw, + COPPER_LINK_UP_LIMIT, + 10, + &link); + if (ret_val) + return ret_val; + + if (link) { + hw_dbg(hw, "Valid link established!!!\n"); + e1000e_config_collision_dist(hw); + ret_val = e1000e_config_fc_after_link_up(hw); + } else { + hw_dbg(hw, "Unable to establish link!!!\n"); + } + + return ret_val; +} + +/** + * e1000e_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + + hw_dbg(hw, "IGP PSCR: %X\n", phy_data); + + udelay(1); + + if (phy->wait_for_link) { + hw_dbg(hw, "Waiting for forced speed/duplex link on IGP phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + return ret_val; + + if (!link) + hw_dbg(hw, "Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + return ret_val; + } + + return ret_val; +} + +/** + * e1000e_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Resets the PHY to commit the + * changes. If time expires while waiting for link up, we reset the DSP. + * After reset, TX_CLK and CRS on TX must be set. Return successful upon + * successful completion, else return corresponding error code. + **/ +s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + hw_dbg(hw, "M88E1000 PSCR: %X\n", phy_data); + + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &phy_data); + + /* Reset the phy to commit changes. */ + phy_data |= MII_CR_RESET; + + ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + udelay(1); + + if (phy->wait_for_link) { + hw_dbg(hw, "Waiting for forced speed/duplex link on M88 phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) { + /* We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, 0x001d); + if (ret_val) + return ret_val; + ret_val = e1000e_phy_reset_dsp(hw); + if (ret_val) + return ret_val; + } + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Resetting the phy means we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock from + * the reset value of 2.5MHz. + */ + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + + return ret_val; +} + +/** + * e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex + * @hw: pointer to the HW structure + * @phy_ctrl: pointer to current value of PHY_CONTROL + * + * Forces speed and duplex on the PHY by doing the following: disable flow + * control, force speed/duplex on the MAC, disable auto speed detection, + * disable auto-negotiation, configure duplex, configure speed, configure + * the collision distance, write configuration to CTRL register. The + * caller must write to the PHY_CONTROL register for these settings to + * take affect. + **/ +void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl; + + /* Turn off flow control when forcing speed/duplex */ + mac->fc = e1000_fc_none; + + /* Force speed/duplex on the mac */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~E1000_CTRL_SPD_SEL; + + /* Disable Auto Speed Detection */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Disable autoneg on the phy */ + *phy_ctrl &= ~MII_CR_AUTO_NEG_EN; + + /* Forcing Full or Half Duplex? */ + if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { + ctrl &= ~E1000_CTRL_FD; + *phy_ctrl &= ~MII_CR_FULL_DUPLEX; + hw_dbg(hw, "Half Duplex\n"); + } else { + ctrl |= E1000_CTRL_FD; + *phy_ctrl |= MII_CR_FULL_DUPLEX; + hw_dbg(hw, "Full Duplex\n"); + } + + /* Forcing 10mb or 100mb? */ + if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { + ctrl |= E1000_CTRL_SPD_100; + *phy_ctrl |= MII_CR_SPEED_100; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + hw_dbg(hw, "Forcing 100mb\n"); + } else { + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + *phy_ctrl |= MII_CR_SPEED_10; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + hw_dbg(hw, "Forcing 10mb\n"); + } + + e1000e_config_collision_dist(hw); + + ew32(CTRL, ctrl); +} + +/** + * e1000e_set_d3_lplu_state - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + return ret_val; + + if (!active) { + data &= ~IGP02E1000_PM_D3_LPLU; + ret_val = e1e_wphy(hw, + IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + return ret_val; + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= IGP02E1000_PM_D3_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + if (ret_val) + return ret_val; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + } + + return ret_val; +} + +/** + * e1000e_check_downshift - Checks whether a downshift in speed occured + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns 1 + * + * A downshift is detected by querying the PHY link health. + **/ +s32 e1000e_check_downshift(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + switch (phy->type) { + case e1000_phy_m88: + case e1000_phy_gg82563: + offset = M88E1000_PHY_SPEC_STATUS; + mask = M88E1000_PSSR_DOWNSHIFT; + break; + case e1000_phy_igp_2: + case e1000_phy_igp_3: + offset = IGP01E1000_PHY_LINK_HEALTH; + mask = IGP01E1000_PLHR_SS_DOWNGRADE; + break; + default: + /* speed downshift not supported */ + phy->speed_downgraded = 0; + return 0; + } + + ret_val = e1e_rphy(hw, offset, &phy_data); + + if (!ret_val) + phy->speed_downgraded = (phy_data & mask); + + return ret_val; +} + +/** + * e1000_check_polarity_m88 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +static s32 e1000_check_polarity_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data); + + if (!ret_val) + phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * e1000_check_polarity_igp - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY port status register, and the + * current speed (since there is no polarity at 100Mbps). + **/ +static s32 e1000_check_polarity_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data, offset, mask; + + /* Polarity is determined based on the speed of + * our connection. */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + return ret_val; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + offset = IGP01E1000_PHY_PCS_INIT_REG; + mask = IGP01E1000_PHY_POLARITY_MASK; + } else { + /* This really only applies to 10Mbps since + * there is no polarity for 100Mbps (always 0). + */ + offset = IGP01E1000_PHY_PORT_STATUS; + mask = IGP01E1000_PSSR_POLARITY_REVERSED; + } + + ret_val = e1e_rphy(hw, offset, &data); + + if (!ret_val) + phy->cable_polarity = (data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * e1000_wait_autoneg - Wait for auto-neg compeletion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + **/ +static s32 e1000_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 i, phy_status; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msleep(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * e1000e_phy_has_link_generic - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + **/ +s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + s32 ret_val = 0; + u16 i, phy_status; + + for (i = 0; i < iterations; i++) { + /* Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + mdelay(usec_interval/1000); + else + udelay(usec_interval); + } + + *success = (i < iterations); + + return ret_val; +} + +/** + * e1000e_get_cable_length_m88 - Determine cable length for m88 PHY + * @hw: pointer to the HW structure + * + * Reads the PHY specific status register to retrieve the cable length + * information. The cable length is determined by averaging the minimum and + * maximum values to get the "average" cable length. The m88 PHY has four + * possible cable length values, which are: + * Register Value Cable Length + * 0 < 50 meters + * 1 50 - 80 meters + * 2 80 - 110 meters + * 3 110 - 140 meters + * 4 > 140 meters + **/ +s32 e1000e_get_cable_length_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index+1]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return ret_val; +} + +/** + * e1000e_get_cable_length_igp_2 - Determine cable length for igp2 PHY + * @hw: pointer to the HW structure + * + * The automatic gain control (agc) normalizes the amplitude of the + * received signal, adjusting for the attenuation produced by the + * cable. By reading the AGC registers, which reperesent the + * cobination of course and fine gain value, the value can be put + * into a lookup table to obtain the approximate cable length + * for each channel. + **/ +s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, i, agc_value = 0; + u16 cur_agc_index, max_agc_index = 0; + u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; + u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = + {IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D}; + + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1e_rphy(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + /* Getting bits 15:9, which represent the combination of + * course and fine gain values. The result is a number + * that can be put into the lookup table to obtain the + * approximate cable length. */ + cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK; + + /* Array index bound check. */ + if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || + (cur_agc_index == 0)) + return -E1000_ERR_PHY; + + /* Remove min & max AGC values from calculation. */ + if (e1000_igp_2_cable_length_table[min_agc_index] > + e1000_igp_2_cable_length_table[cur_agc_index]) + min_agc_index = cur_agc_index; + if (e1000_igp_2_cable_length_table[max_agc_index] < + e1000_igp_2_cable_length_table[cur_agc_index]) + max_agc_index = cur_agc_index; + + agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; + } + + agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + + e1000_igp_2_cable_length_table[max_agc_index]); + agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); + + /* Calculate cable length with the error range of +/- 10 meters. */ + phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0; + phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return ret_val; +} + +/** + * e1000e_get_phy_info_m88 - Retrieve PHY information + * @hw: pointer to the HW structure + * + * Valid for only copper links. Read the PHY status register (sticky read) + * to verify that link is up. Read the PHY special control register to + * determine the polarity and 10base-T extended distance. Read the PHY + * special status register to determine MDI/MDIx and current speed. If + * speed is 1000, then determine cable length, local and remote receiver. + **/ +s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + if (hw->media_type != e1000_media_type_copper) { + hw_dbg(hw, "Phy info is only valid for copper media\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + hw_dbg(hw, "Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy->polarity_correction = (phy_data & + M88E1000_PSCR_POLARITY_REVERSAL); + + ret_val = e1000_check_polarity_m88(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX); + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + ret_val = e1000_get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + /* Set values to "undefined" */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return ret_val; +} + +/** + * e1000e_get_phy_info_igp - Retrieve igp PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 e1000e_get_phy_info_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + hw_dbg(hw, "Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + phy->polarity_correction = 1; + + ret_val = e1000_check_polarity_igp(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = (data & IGP01E1000_PSSR_MDIX); + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + ret_val = e1000_get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data); + if (ret_val) + return ret_val; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return ret_val; +} + +/** + * e1000e_phy_sw_reset - PHY software reset + * @hw: pointer to the HW structure + * + * Does a software reset of the PHY by reading the PHY control register and + * setting/write the control register reset bit to the PHY. + **/ +s32 e1000e_phy_sw_reset(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_ctrl; + + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + return ret_val; + + phy_ctrl |= MII_CR_RESET; + ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + return ret_val; + + udelay(1); + + return ret_val; +} + +/** + * e1000e_phy_hw_reset_generic - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and relase the semaphore (if necessary). + **/ +s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 ctrl; + + ret_val = e1000_check_reset_block(hw); + if (ret_val) + return 0; + + ret_val = phy->ops.acquire_phy(hw); + if (ret_val) + return ret_val; + + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); + e1e_flush(); + + udelay(phy->reset_delay_us); + + ew32(CTRL, ctrl); + e1e_flush(); + + udelay(150); + + phy->ops.release_phy(hw); + + return e1000_get_phy_cfg_done(hw); +} + +/** + * e1000e_get_cfg_done - Generic configuration done + * @hw: pointer to the HW structure + * + * Generic function to wait 10 milli-seconds for configuration to complete + * and return success. + **/ +s32 e1000e_get_cfg_done(struct e1000_hw *hw) +{ + mdelay(10); + return 0; +} + +/* Internal function pointers */ + +/** + * e1000_get_phy_cfg_done - Generic PHY configuration done + * @hw: pointer to the HW structure + * + * Return success if silicon family did not implement a family specific + * get_cfg_done function. + **/ +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) +{ + if (hw->phy.ops.get_cfg_done) + return hw->phy.ops.get_cfg_done(hw); + + return 0; +} + +/** + * e1000_phy_force_speed_duplex - Generic force PHY speed/duplex + * @hw: pointer to the HW structure + * + * When the silicon family has not implemented a forced speed/duplex + * function for the PHY, simply return 0. + **/ +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) +{ + if (hw->phy.ops.force_speed_duplex) + return hw->phy.ops.force_speed_duplex(hw); + + return 0; +} + +/** + * e1000e_get_phy_type_from_id - Get PHY type from id + * @phy_id: phy_id read from the phy + * + * Returns the phy type from the id. + **/ +enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) +{ + enum e1000_phy_type phy_type = e1000_phy_unknown; + + switch (phy_id) { + case M88E1000_I_PHY_ID: + case M88E1000_E_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1011_I_PHY_ID: + phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ + phy_type = e1000_phy_igp_2; + break; + case GG82563_E_PHY_ID: + phy_type = e1000_phy_gg82563; + break; + case IGP03E1000_E_PHY_ID: + phy_type = e1000_phy_igp_3; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + phy_type = e1000_phy_ife; + break; + default: + phy_type = e1000_phy_unknown; + break; + } + return phy_type; +} + +/** + * e1000e_commit_phy - Soft PHY reset + * @hw: pointer to the HW structure + * + * Performs a soft PHY reset on those that apply. This is a function pointer + * entry point called by drivers. + **/ +s32 e1000e_commit_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.commit_phy) + return hw->phy.ops.commit_phy(hw); + + return 0; +} + +/** + * e1000_set_d0_lplu_state - Sets low power link up state for D0 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D0 + * and SmartSpeed is disabled when active is true, else clear lplu for D0 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. This is a function pointer entry point called by drivers. + **/ +static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) +{ + if (hw->phy.ops.set_d0_lplu_state) + return hw->phy.ops.set_d0_lplu_state(hw, active); + + return 0; +} diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c index b2b0a96218ca..6390f51ea6fb 100644 --- a/drivers/net/e2100.c +++ b/drivers/net/e2100.c @@ -124,8 +124,6 @@ static int __init do_e2100_probe(struct net_device *dev) int base_addr = dev->base_addr; int irq = dev->irq; - SET_MODULE_OWNER(dev); - if (base_addr > 0x1ff) /* Check a single specified location. */ return e21_probe1(dev, base_addr); else if (base_addr != 0) /* Don't probe at all. */ diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c index 47680237f783..83bda6ccde98 100644 --- a/drivers/net/eepro.c +++ b/drivers/net/eepro.c @@ -192,7 +192,6 @@ static unsigned int net_debug = NET_DEBUG; /* Information that need to be kept for each board. */ struct eepro_local { - struct net_device_stats stats; unsigned rx_start; unsigned tx_start; /* start of the transmit chain */ int tx_last; /* pointer to last packet in the transmit chain */ @@ -315,7 +314,6 @@ static irqreturn_t eepro_interrupt(int irq, void *dev_id); static void eepro_rx(struct net_device *dev); static void eepro_transmit_interrupt(struct net_device *dev); static int eepro_close(struct net_device *dev); -static struct net_device_stats *eepro_get_stats(struct net_device *dev); static void set_multicast_list(struct net_device *dev); static void eepro_tx_timeout (struct net_device *dev); @@ -514,7 +512,7 @@ buffer (transmit-buffer = 32K - receive-buffer). /* a complete sel reset */ #define eepro_complete_selreset(ioaddr) { \ - lp->stats.tx_errors++;\ + dev->stats.tx_errors++;\ eepro_sel_reset(ioaddr);\ lp->tx_end = \ lp->xmt_lower_limit;\ @@ -537,8 +535,6 @@ static int __init do_eepro_probe(struct net_device *dev) int base_addr = dev->base_addr; int irq = dev->irq; - SET_MODULE_OWNER(dev); - #ifdef PnPWakeup /* XXXX for multiple cards should this only be run once? */ @@ -594,8 +590,6 @@ struct net_device * __init eepro_probe(int unit) if (!dev) return ERR_PTR(-ENODEV); - SET_MODULE_OWNER(dev); - sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); @@ -696,6 +690,7 @@ static void __init eepro_print_info (struct net_device *dev) struct eepro_local * lp = netdev_priv(dev); int i; const char * ifmap[] = {"AUI", "10Base2", "10BaseT"}; + DECLARE_MAC_BUF(mac); i = inb(dev->base_addr + ID_REG); printk(KERN_DEBUG " id: %#x ",i); @@ -717,10 +712,10 @@ static void __init eepro_print_info (struct net_device *dev) case LAN595: printk("%s: Intel 82595-based lan card at %#x,", dev->name, (unsigned)dev->base_addr); + break; } - for (i=0; i < 6; i++) - printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]); + printk(" %s", print_mac(mac, dev->dev_addr)); if (net_debug > 3) printk(KERN_DEBUG ", %dK RCV buffer", @@ -860,7 +855,6 @@ static int __init eepro_probe1(struct net_device *dev, int autoprobe) dev->open = eepro_open; dev->stop = eepro_close; dev->hard_start_xmit = eepro_send_packet; - dev->get_stats = eepro_get_stats; dev->set_multicast_list = &set_multicast_list; dev->tx_timeout = eepro_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; @@ -1158,9 +1152,9 @@ static int eepro_send_packet(struct sk_buff *skb, struct net_device *dev) if (hardware_send_packet(dev, buf, length)) /* we won't wake queue here because we're out of space */ - lp->stats.tx_dropped++; + dev->stats.tx_dropped++; else { - lp->stats.tx_bytes+=skb->len; + dev->stats.tx_bytes+=skb->len; dev->trans_start = jiffies; netif_wake_queue(dev); } @@ -1170,7 +1164,7 @@ static int eepro_send_packet(struct sk_buff *skb, struct net_device *dev) dev_kfree_skb (skb); /* You might need to clean up and record Tx statistics here. */ - /* lp->stats.tx_aborted_errors++; */ + /* dev->stats.tx_aborted_errors++; */ if (net_debug > 5) printk(KERN_DEBUG "%s: exiting eepro_send_packet routine.\n", dev->name); @@ -1277,16 +1271,6 @@ static int eepro_close(struct net_device *dev) return 0; } -/* Get the current statistics. This may be called with the card open or - closed. */ -static struct net_device_stats * -eepro_get_stats(struct net_device *dev) -{ - struct eepro_local *lp = netdev_priv(dev); - - return &lp->stats; -} - /* Set or clear the multicast filter for this adaptor. */ static void @@ -1579,12 +1563,12 @@ eepro_rx(struct net_device *dev) /* Malloc up new buffer. */ struct sk_buff *skb; - lp->stats.rx_bytes+=rcv_size; + dev->stats.rx_bytes+=rcv_size; rcv_size &= 0x3fff; skb = dev_alloc_skb(rcv_size+5); if (skb == NULL) { printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; rcv_car = lp->rx_start + RCV_HEADER + rcv_size; lp->rx_start = rcv_next_frame; outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG); @@ -1606,28 +1590,28 @@ eepro_rx(struct net_device *dev) skb->protocol = eth_type_trans(skb,dev); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; + dev->stats.rx_packets++; } else { /* Not sure will ever reach here, I set the 595 to discard bad received frames */ - lp->stats.rx_errors++; + dev->stats.rx_errors++; if (rcv_status & 0x0100) - lp->stats.rx_over_errors++; + dev->stats.rx_over_errors++; else if (rcv_status & 0x0400) - lp->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; else if (rcv_status & 0x0800) - lp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; printk(KERN_DEBUG "%s: event = %#x, status = %#x, next = %#x, size = %#x\n", dev->name, rcv_event, rcv_status, rcv_next_frame, rcv_size); } if (rcv_status & 0x1000) - lp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; rcv_car = lp->rx_start + RCV_HEADER + rcv_size; lp->rx_start = rcv_next_frame; @@ -1670,11 +1654,11 @@ eepro_transmit_interrupt(struct net_device *dev) netif_wake_queue (dev); if (xmt_status & TX_OK) - lp->stats.tx_packets++; + dev->stats.tx_packets++; else { - lp->stats.tx_errors++; + dev->stats.tx_errors++; if (xmt_status & 0x0400) { - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; printk(KERN_DEBUG "%s: carrier error\n", dev->name); printk(KERN_DEBUG "%s: XMT status = %#x\n", @@ -1688,11 +1672,11 @@ eepro_transmit_interrupt(struct net_device *dev) } } if (xmt_status & 0x000f) { - lp->stats.collisions += (xmt_status & 0x000f); + dev->stats.collisions += (xmt_status & 0x000f); } if ((xmt_status & 0x0040) == 0x0) { - lp->stats.tx_heartbeat_errors++; + dev->stats.tx_heartbeat_errors++; } } } diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c index 3c54014acece..1548a80f917d 100644 --- a/drivers/net/eepro100.c +++ b/drivers/net/eepro100.c @@ -622,6 +622,7 @@ static int __devinit speedo_found1(struct pci_dev *pdev, int size; void *tx_ring_space; dma_addr_t tx_ring_dma; + DECLARE_MAC_BUF(mac); size = TX_RING_SIZE * sizeof(struct TxFD) + sizeof(struct speedo_stats); tx_ring_space = pci_alloc_consistent(pdev, size, &tx_ring_dma); @@ -635,7 +636,6 @@ static int __devinit speedo_found1(struct pci_dev *pdev, return -1; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); if (dev->mem_start > 0) @@ -706,12 +706,8 @@ static int __devinit speedo_found1(struct pci_dev *pdev, else product = pci_name(pdev); - printk(KERN_INFO "%s: %s, ", dev->name, product); - - for (i = 0; i < 5; i++) - printk("%2.2X:", dev->dev_addr[i]); - printk("%2.2X, ", dev->dev_addr[i]); - printk("IRQ %d.\n", pdev->irq); + printk(KERN_INFO "%s: %s, %s, IRQ %d.\n", dev->name, product, + print_mac(mac, dev->dev_addr), pdev->irq); sp = netdev_priv(dev); diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c index 7934ea37f944..9c85e50014b4 100644 --- a/drivers/net/eexpress.c +++ b/drivers/net/eexpress.c @@ -135,7 +135,6 @@ struct net_local { - struct net_device_stats stats; unsigned long last_tx; /* jiffies when last transmit started */ unsigned long init_time; /* jiffies when eexp_hw_init586 called */ unsigned short rx_first; /* first rx buf, same as RX_BUF_START */ @@ -247,7 +246,6 @@ static char mca_irqmap[] = { 12, 9, 3, 4, 5, 10, 11, 15 }; static int eexp_open(struct net_device *dev); static int eexp_close(struct net_device *dev); static void eexp_timeout(struct net_device *dev); -static struct net_device_stats *eexp_stats(struct net_device *dev); static int eexp_xmit(struct sk_buff *buf, struct net_device *dev); static irqreturn_t eexp_irq(int irq, void *dev_addr); @@ -341,8 +339,6 @@ static int __init do_express_probe(struct net_device *dev) int dev_irq = dev->irq; int err; - SET_MODULE_OWNER(dev); - dev->if_port = 0xff; /* not set */ #ifdef CONFIG_MCA_LEGACY @@ -535,17 +531,6 @@ static int eexp_close(struct net_device *dev) } /* - * Return interface stats - */ - -static struct net_device_stats *eexp_stats(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - - return &lp->stats; -} - -/* * This gets called when a higher level thinks we are broken. Check that * nothing has become jammed in the CU. */ @@ -648,7 +633,7 @@ static void eexp_timeout(struct net_device *dev) printk(KERN_INFO "%s: transmit timed out, %s?\n", dev->name, (SCB_complete(status)?"lost interrupt": "board on fire")); - lp->stats.tx_errors++; + dev->stats.tx_errors++; lp->last_tx = jiffies; if (!SCB_complete(status)) { scb_command(dev, SCB_CUabort); @@ -696,7 +681,7 @@ static int eexp_xmit(struct sk_buff *buf, struct net_device *dev) { unsigned short *data = (unsigned short *)buf->data; - lp->stats.tx_bytes += length; + dev->stats.tx_bytes += length; eexp_hw_tx_pio(dev,data,length); } @@ -845,7 +830,7 @@ static irqreturn_t eexp_irq(int irq, void *dev_info) outw(rbd+8, ioaddr+READ_PTR); printk("[%04x]\n", inw(ioaddr+DATAPORT)); #endif - lp->stats.rx_errors++; + dev->stats.rx_errors++; #if 1 eexp_hw_rxinit(dev); #else @@ -954,17 +939,17 @@ static void eexp_hw_rx_pio(struct net_device *dev) } else if (!FD_OK(status)) { - lp->stats.rx_errors++; + dev->stats.rx_errors++; if (FD_CRC(status)) - lp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; if (FD_Align(status)) - lp->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (FD_Resrc(status)) - lp->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; if (FD_DMA(status)) - lp->stats.rx_over_errors++; + dev->stats.rx_over_errors++; if (FD_Short(status)) - lp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; } else { @@ -974,7 +959,7 @@ static void eexp_hw_rx_pio(struct net_device *dev) if (skb == NULL) { printk(KERN_WARNING "%s: Memory squeeze, dropping packet\n",dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; break; } skb_reserve(skb, 2); @@ -983,8 +968,8 @@ static void eexp_hw_rx_pio(struct net_device *dev) skb->protocol = eth_type_trans(skb,dev); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } outw(rx_block, ioaddr+WRITE_PTR); outw(0, ioaddr+DATAPORT); @@ -1055,7 +1040,7 @@ static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf, outw(0xFFFF, ioaddr+SIGNAL_CA); } - lp->stats.tx_packets++; + dev->stats.tx_packets++; lp->last_tx = jiffies; } @@ -1182,7 +1167,6 @@ static int __init eexp_hw_probe(struct net_device *dev, unsigned short ioaddr) dev->open = eexp_open; dev->stop = eexp_close; dev->hard_start_xmit = eexp_xmit; - dev->get_stats = eexp_stats; dev->set_multicast_list = &eexp_set_multicast; dev->tx_timeout = eexp_timeout; dev->watchdog_timeo = 2*HZ; @@ -1265,35 +1249,35 @@ static unsigned short eexp_hw_lasttxstat(struct net_device *dev) else { lp->last_tx_restart = 0; - lp->stats.collisions += Stat_NoColl(status); + dev->stats.collisions += Stat_NoColl(status); if (!Stat_OK(status)) { char *whatsup = NULL; - lp->stats.tx_errors++; + dev->stats.tx_errors++; if (Stat_Abort(status)) - lp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; if (Stat_TNoCar(status)) { whatsup = "aborted, no carrier"; - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; } if (Stat_TNoCTS(status)) { whatsup = "aborted, lost CTS"; - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; } if (Stat_TNoDMA(status)) { whatsup = "FIFO underran"; - lp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; } if (Stat_TXColl(status)) { whatsup = "aborted, too many collisions"; - lp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; } if (whatsup) printk(KERN_INFO "%s: transmit %s\n", dev->name, whatsup); } else - lp->stats.tx_packets++; + dev->stats.tx_packets++; } if (tx_block == TX_BUF_START+((lp->num_tx_bufs-1)*TX_BUF_SIZE)) lp->tx_reap = tx_block = TX_BUF_START; diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index 6628fa622e2c..ac21526b6de8 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h @@ -33,19 +33,20 @@ #include <linux/ethtool.h> #include <linux/vmalloc.h> #include <linux/if_vlan.h> +#include <linux/inet_lro.h> #include <asm/ibmebus.h> #include <asm/abs_addr.h> #include <asm/io.h> #define DRV_NAME "ehea" -#define DRV_VERSION "EHEA_0070" +#define DRV_VERSION "EHEA_0078" /* eHEA capability flags */ #define DLPAR_PORT_ADD_REM 1 #define DLPAR_MEM_ADD 2 #define DLPAR_MEM_REM 4 -#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM) +#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM | DLPAR_MEM_ADD) #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) @@ -58,6 +59,7 @@ #define EHEA_SMALL_QUEUES #define EHEA_NUM_TX_QP 1 +#define EHEA_LRO_MAX_AGGR 64 #ifdef EHEA_SMALL_QUEUES #define EHEA_MAX_CQE_COUNT 1023 @@ -84,6 +86,8 @@ #define EHEA_RQ2_PKT_SIZE 1522 #define EHEA_L_PKT_SIZE 256 /* low latency */ +#define MAX_LRO_DESCRIPTORS 8 + /* Send completion signaling */ /* Protection Domain Identifier */ @@ -351,6 +355,7 @@ struct ehea_q_skb_arr { * Port resources */ struct ehea_port_res { + struct napi_struct napi; struct port_stats p_stats; struct ehea_mr send_mr; /* send memory region */ struct ehea_mr recv_mr; /* receive memory region */ @@ -362,7 +367,6 @@ struct ehea_port_res { struct ehea_cq *send_cq; struct ehea_cq *recv_cq; struct ehea_eq *eq; - struct net_device *d_netdev; struct ehea_q_skb_arr rq1_skba; struct ehea_q_skb_arr rq2_skba; struct ehea_q_skb_arr rq3_skba; @@ -376,6 +380,8 @@ struct ehea_port_res { u64 tx_packets; u64 rx_packets; u32 poll_counter; + struct net_lro_mgr lro_mgr; + struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS]; }; @@ -385,7 +391,6 @@ struct ehea_adapter { struct ibmebus_dev *ebus_dev; struct ehea_port *port[EHEA_MAX_PORTS]; struct ehea_eq *neq; /* notification event queue */ - struct workqueue_struct *ehea_wq; struct tasklet_struct neq_tasklet; struct ehea_mr mr; u32 pd; /* protection domain */ @@ -402,6 +407,8 @@ struct ehea_mc_list { #define EHEA_PORT_UP 1 #define EHEA_PORT_DOWN 0 +#define EHEA_PHY_LINK_UP 1 +#define EHEA_PHY_LINK_DOWN 0 #define EHEA_MAX_PORT_RES 16 struct ehea_port { struct ehea_adapter *adapter; /* adapter that owns this port */ @@ -427,6 +434,8 @@ struct ehea_port { u32 msg_enable; u32 sig_comp_iv; u32 state; + u32 lro_max_aggr; + u8 phy_link; u8 full_duplex; u8 autoneg; u8 num_def_qps; diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c index decec8cfe96b..679f40ee9572 100644 --- a/drivers/net/ehea/ehea_ethtool.c +++ b/drivers/net/ehea/ehea_ethtool.c @@ -183,6 +183,9 @@ static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = { {"PR5 free_swqes"}, {"PR6 free_swqes"}, {"PR7 free_swqes"}, + {"LRO aggregated"}, + {"LRO flushed"}, + {"LRO no_desc"}, }; static void ehea_get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -193,9 +196,14 @@ static void ehea_get_strings(struct net_device *dev, u32 stringset, u8 *data) } } -static int ehea_get_stats_count(struct net_device *dev) +static int ehea_get_sset_count(struct net_device *dev, int sset) { - return ARRAY_SIZE(ehea_ethtool_stats_keys); + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(ehea_ethtool_stats_keys); + default: + return -EOPNOTSUPP; + } } static void ehea_get_ethtool_stats(struct net_device *dev, @@ -204,7 +212,7 @@ static void ehea_get_ethtool_stats(struct net_device *dev, int i, k, tmp; struct ehea_port *port = netdev_priv(dev); - for (i = 0; i < ehea_get_stats_count(dev); i++) + for (i = 0; i < ehea_get_sset_count(dev, ETH_SS_STATS); i++) data[i] = 0; i = 0; @@ -239,6 +247,18 @@ static void ehea_get_ethtool_stats(struct net_device *dev, for (k = 0; k < 8; k++) data[i++] = atomic_read(&port->port_res[k].swqe_avail); + for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++) + tmp |= port->port_res[k].lro_mgr.stats.aggregated; + data[i++] = tmp; + + for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++) + tmp |= port->port_res[k].lro_mgr.stats.flushed; + data[i++] = tmp; + + for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++) + tmp |= port->port_res[k].lro_mgr.stats.no_desc; + data[i++] = tmp; + } const struct ethtool_ops ehea_ethtool_ops = { @@ -247,12 +267,9 @@ const struct ethtool_ops ehea_ethtool_ops = { .get_msglevel = ehea_get_msglevel, .set_msglevel = ehea_set_msglevel, .get_link = ethtool_op_get_link, - .get_tx_csum = ethtool_op_get_tx_csum, - .get_sg = ethtool_op_get_sg, - .get_tso = ethtool_op_get_tso, .set_tso = ethtool_op_set_tso, .get_strings = ehea_get_strings, - .get_stats_count = ehea_get_stats_count, + .get_sset_count = ehea_get_sset_count, .get_ethtool_stats = ehea_get_ethtool_stats, .get_rx_csum = ehea_get_rx_csum, .set_settings = ehea_set_settings, diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 1d1571cf322e..2ba57e6ace4d 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -52,18 +52,26 @@ static int rq2_entries = EHEA_DEF_ENTRIES_RQ2; static int rq3_entries = EHEA_DEF_ENTRIES_RQ3; static int sq_entries = EHEA_DEF_ENTRIES_SQ; static int use_mcs = 0; +static int use_lro = 0; +static int lro_max_aggr = EHEA_LRO_MAX_AGGR; static int num_tx_qps = EHEA_NUM_TX_QP; +static int prop_carrier_state = 0; module_param(msg_level, int, 0); module_param(rq1_entries, int, 0); module_param(rq2_entries, int, 0); module_param(rq3_entries, int, 0); module_param(sq_entries, int, 0); +module_param(prop_carrier_state, int, 0); module_param(use_mcs, int, 0); +module_param(use_lro, int, 0); +module_param(lro_max_aggr, int, 0); module_param(num_tx_qps, int, 0); MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS"); MODULE_PARM_DESC(msg_level, "msg_level"); +MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical " + "port to stack. 1:yes, 0:no. Default = 0 "); MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 " "[2^x - 1], x = [6..14]. Default = " __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")"); @@ -76,14 +84,19 @@ MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 " MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue " "[2^x - 1], x = [6..14]. Default = " __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")"); -MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 1 "); +MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 0 "); + +MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = " + __MODULE_STRING(EHEA_LRO_MAX_AGGR)); +MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, " + "Default = 0"); static int port_name_cnt = 0; static LIST_HEAD(adapter_list); u64 ehea_driver_flags = 0; -struct workqueue_struct *ehea_driver_wq; struct work_struct ehea_rereg_mr_task; +struct semaphore dlpar_mem_lock; static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev, const struct of_device_id *id); @@ -164,16 +177,24 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; struct net_device *dev = pr->port->netdev; int max_index_mask = pr->rq1_skba.len - 1; + int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes; + int adder = 0; int i; - if (!nr_of_wqes) + pr->rq1_skba.os_skbs = 0; + + if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { + pr->rq1_skba.index = index; + pr->rq1_skba.os_skbs = fill_wqes; return; + } - for (i = 0; i < nr_of_wqes; i++) { + for (i = 0; i < fill_wqes; i++) { if (!skb_arr_rq1[index]) { skb_arr_rq1[index] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); if (!skb_arr_rq1[index]) { + pr->rq1_skba.os_skbs = fill_wqes - i; ehea_error("%s: no mem for skb/%d wqes filled", dev->name, i); break; @@ -181,9 +202,14 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) } index--; index &= max_index_mask; + adder++; } + + if (adder == 0) + return; + /* Ring doorbell */ - ehea_update_rq1a(pr->qp, i); + ehea_update_rq1a(pr->qp, adder); } static int ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) @@ -217,16 +243,21 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr, struct sk_buff **skb_arr = q_skba->arr; struct ehea_rwqe *rwqe; int i, index, max_index_mask, fill_wqes; + int adder = 0; int ret = 0; fill_wqes = q_skba->os_skbs + num_wqes; + q_skba->os_skbs = 0; - if (!fill_wqes) + if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { + q_skba->os_skbs = fill_wqes; return ret; + } index = q_skba->index; max_index_mask = q_skba->len - 1; for (i = 0; i < fill_wqes; i++) { + u64 tmp_addr; struct sk_buff *skb = netdev_alloc_skb(dev, packet_size); if (!skb) { ehea_error("%s: no mem for skb/%d wqes filled", @@ -238,30 +269,37 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr, skb_reserve(skb, NET_IP_ALIGN); skb_arr[index] = skb; + tmp_addr = ehea_map_vaddr(skb->data); + if (tmp_addr == -1) { + dev_kfree_skb(skb); + q_skba->os_skbs = fill_wqes - i; + ret = 0; + break; + } rwqe = ehea_get_next_rwqe(qp, rq_nr); rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type) | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index); rwqe->sg_list[0].l_key = pr->recv_mr.lkey; - rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); + rwqe->sg_list[0].vaddr = tmp_addr; rwqe->sg_list[0].len = packet_size; rwqe->data_segments = 1; index++; index &= max_index_mask; - - if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) - goto out; + adder++; } q_skba->index = index; + if (adder == 0) + goto out; /* Ring doorbell */ iosync(); if (rq_nr == 2) - ehea_update_rq2a(pr->qp, i); + ehea_update_rq2a(pr->qp, adder); else - ehea_update_rq3a(pr->qp, i); + ehea_update_rq3a(pr->qp, adder); out: return ret; } @@ -382,16 +420,70 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq, if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) { ehea_error("Critical receive error. Resetting port."); - queue_work(pr->port->adapter->ehea_wq, &pr->port->reset_task); + schedule_work(&pr->port->reset_task); return 1; } return 0; } -static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev, - struct ehea_port_res *pr, - int *budget) +static int get_skb_hdr(struct sk_buff *skb, void **iphdr, + void **tcph, u64 *hdr_flags, void *priv) +{ + struct ehea_cqe *cqe = priv; + unsigned int ip_len; + struct iphdr *iph; + + /* non tcp/udp packets */ + if (!cqe->header_length) + return -1; + + /* non tcp packet */ + skb_reset_network_header(skb); + iph = ip_hdr(skb); + if (iph->protocol != IPPROTO_TCP) + return -1; + + ip_len = ip_hdrlen(skb); + skb_set_transport_header(skb, ip_len); + *tcph = tcp_hdr(skb); + + /* check if ip header and tcp header are complete */ + if (iph->tot_len < ip_len + tcp_hdrlen(skb)) + return -1; + + *hdr_flags = LRO_IPV4 | LRO_TCP; + *iphdr = iph; + + return 0; +} + +static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe, + struct sk_buff *skb) +{ + int vlan_extracted = (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) + && pr->port->vgrp; + + if (use_lro) { + if (vlan_extracted) + lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb, + pr->port->vgrp, + cqe->vlan_tag, + cqe); + else + lro_receive_skb(&pr->lro_mgr, skb, cqe); + } else { + if (vlan_extracted) + vlan_hwaccel_receive_skb(skb, pr->port->vgrp, + cqe->vlan_tag); + else + netif_receive_skb(skb); + } +} + +static int ehea_proc_rwqes(struct net_device *dev, + struct ehea_port_res *pr, + int budget) { struct ehea_port *port = pr->port; struct ehea_qp *qp = pr->qp; @@ -404,18 +496,16 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev, int skb_arr_rq2_len = pr->rq2_skba.len; int skb_arr_rq3_len = pr->rq3_skba.len; int processed, processed_rq1, processed_rq2, processed_rq3; - int wqe_index, last_wqe_index, rq, my_quota, port_reset; + int wqe_index, last_wqe_index, rq, port_reset; processed = processed_rq1 = processed_rq2 = processed_rq3 = 0; last_wqe_index = 0; - my_quota = min(*budget, dev->quota); cqe = ehea_poll_rq1(qp, &wqe_index); - while ((my_quota > 0) && cqe) { + while ((processed < budget) && cqe) { ehea_inc_rq1(qp); processed_rq1++; processed++; - my_quota--; if (netif_msg_rx_status(port)) ehea_dump(cqe, sizeof(*cqe), "CQE"); @@ -430,14 +520,14 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev, if (netif_msg_rx_err(port)) ehea_error("LL rq1: skb=NULL"); - skb = netdev_alloc_skb(port->netdev, + skb = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); if (!skb) break; } skb_copy_to_linear_data(skb, ((char*)cqe) + 64, cqe->num_bytes_transfered - 4); - ehea_fill_skb(port->netdev, skb, cqe); + ehea_fill_skb(dev, skb, cqe); } else if (rq == 2) { /* RQ2 */ skb = get_skb_by_index(skb_arr_rq2, skb_arr_rq2_len, cqe); @@ -446,7 +536,7 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev, ehea_error("rq2: skb=NULL"); break; } - ehea_fill_skb(port->netdev, skb, cqe); + ehea_fill_skb(dev, skb, cqe); processed_rq2++; } else { /* RQ3 */ skb = get_skb_by_index(skb_arr_rq3, @@ -456,16 +546,12 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev, ehea_error("rq3: skb=NULL"); break; } - ehea_fill_skb(port->netdev, skb, cqe); + ehea_fill_skb(dev, skb, cqe); processed_rq3++; } - if ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) - && port->vgrp) - vlan_hwaccel_receive_skb(skb, port->vgrp, - cqe->vlan_tag); - else - netif_receive_skb(skb); + ehea_proc_skb(pr, cqe, skb); + dev->last_rx = jiffies; } else { pr->p_stats.poll_receive_errors++; port_reset = ehea_treat_poll_error(pr, rq, cqe, @@ -476,16 +562,16 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev, } cqe = ehea_poll_rq1(qp, &wqe_index); } + if (use_lro) + lro_flush_all(&pr->lro_mgr); pr->rx_packets += processed; - *budget -= processed; ehea_refill_rq1(pr, last_wqe_index, processed_rq1); ehea_refill_rq2(pr, processed_rq2); ehea_refill_rq3(pr, processed_rq3); - cqe = ehea_poll_rq1(qp, &wqe_index); - return cqe; + return processed; } static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) @@ -509,8 +595,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) ehea_error("Send Completion Error: Resetting port"); if (netif_msg_tx_err(pr->port)) ehea_dump(cqe, sizeof(*cqe), "Send CQE"); - queue_work(pr->port->adapter->ehea_wq, - &pr->port->reset_task); + schedule_work(&pr->port->reset_task); break; } @@ -548,22 +633,27 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) } #define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16 +#define EHEA_POLL_MAX_CQES 65535 -static int ehea_poll(struct net_device *dev, int *budget) +static int ehea_poll(struct napi_struct *napi, int budget) { - struct ehea_port_res *pr = dev->priv; + struct ehea_port_res *pr = container_of(napi, struct ehea_port_res, napi); + struct net_device *dev = pr->port->netdev; struct ehea_cqe *cqe; struct ehea_cqe *cqe_skb = NULL; int force_irq, wqe_index; - - cqe = ehea_poll_rq1(pr->qp, &wqe_index); - cqe_skb = ehea_poll_cq(pr->send_cq); + int rx = 0; force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ); + cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); + + if (!force_irq) + rx += ehea_proc_rwqes(dev, pr, budget - rx); - if ((!cqe && !cqe_skb) || force_irq) { + while ((rx != budget) || force_irq) { pr->poll_counter = 0; - netif_rx_complete(dev); + force_irq = 0; + netif_rx_complete(dev, napi); ehea_reset_cq_ep(pr->recv_cq); ehea_reset_cq_ep(pr->send_cq); ehea_reset_cq_n1(pr->recv_cq); @@ -572,26 +662,35 @@ static int ehea_poll(struct net_device *dev, int *budget) cqe_skb = ehea_poll_cq(pr->send_cq); if (!cqe && !cqe_skb) - return 0; + return rx; - if (!netif_rx_reschedule(dev, dev->quota)) - return 0; + if (!netif_rx_reschedule(dev, napi)) + return rx; + + cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); + rx += ehea_proc_rwqes(dev, pr, budget - rx); } - cqe = ehea_proc_rwqes(dev, pr, budget); - cqe_skb = ehea_proc_cqes(pr, 300); + pr->poll_counter++; + return rx; +} - if (cqe || cqe_skb) - pr->poll_counter++; +#ifdef CONFIG_NET_POLL_CONTROLLER +static void ehea_netpoll(struct net_device *dev) +{ + struct ehea_port *port = netdev_priv(dev); + int i; - return 1; + for (i = 0; i < port->num_def_qps; i++) + netif_rx_schedule(dev, &port->port_res[i].napi); } +#endif static irqreturn_t ehea_recv_irq_handler(int irq, void *param) { struct ehea_port_res *pr = param; - netif_rx_schedule(pr->d_netdev); + netif_rx_schedule(pr->port->netdev, &pr->napi); return IRQ_HANDLED; } @@ -615,7 +714,7 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param) eqe = ehea_poll_eq(port->qp_eq); } - queue_work(port->adapter->ehea_wq, &port->reset_task); + schedule_work(&port->reset_task); return IRQ_HANDLED; } @@ -795,7 +894,9 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed) ehea_error("Failed setting port speed"); } } - netif_carrier_on(port->netdev); + if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP)) + netif_carrier_on(port->netdev); + kfree(cb4); out: return ret; @@ -850,13 +951,19 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe) } if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) { + port->phy_link = EHEA_PHY_LINK_UP; if (netif_msg_link(port)) ehea_info("%s: Physical port up", port->netdev->name); + if (prop_carrier_state) + netif_carrier_on(port->netdev); } else { + port->phy_link = EHEA_PHY_LINK_DOWN; if (netif_msg_link(port)) ehea_info("%s: Physical port down", port->netdev->name); + if (prop_carrier_state) + netif_carrier_off(port->netdev); } if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe)) @@ -1205,14 +1312,16 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, kfree(init_attr); - pr->d_netdev = alloc_netdev(0, "", ether_setup); - if (!pr->d_netdev) - goto out_free; - pr->d_netdev->priv = pr; - pr->d_netdev->weight = 64; - pr->d_netdev->poll = ehea_poll; - set_bit(__LINK_STATE_START, &pr->d_netdev->state); - strcpy(pr->d_netdev->name, port->netdev->name); + netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64); + + pr->lro_mgr.max_aggr = pr->port->lro_max_aggr; + pr->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS; + pr->lro_mgr.lro_arr = pr->lro_desc; + pr->lro_mgr.get_skb_header = get_skb_hdr; + pr->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; + pr->lro_mgr.dev = port->netdev; + pr->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; + pr->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; ret = 0; goto out; @@ -1235,8 +1344,6 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) { int ret, i; - free_netdev(pr->d_netdev); - ret = ehea_destroy_qp(pr->qp); if (!ret) { @@ -1307,7 +1414,6 @@ static void write_swqe2_TSO(struct sk_buff *skb, u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; int skb_data_size = skb->len - skb->data_len; int headersize; - u64 tmp_addr; /* Packet is TCP with TSO enabled */ swqe->tx_control |= EHEA_SWQE_TSO; @@ -1328,9 +1434,8 @@ static void write_swqe2_TSO(struct sk_buff *skb, /* set sg1entry data */ sg1entry->l_key = lkey; sg1entry->len = skb_data_size - headersize; - - tmp_addr = (u64)(skb->data + headersize); - sg1entry->vaddr = ehea_map_vaddr(tmp_addr); + sg1entry->vaddr = + ehea_map_vaddr(skb->data + headersize); swqe->descriptors++; } } else @@ -1343,7 +1448,6 @@ static void write_swqe2_nonTSO(struct sk_buff *skb, int skb_data_size = skb->len - skb->data_len; u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; - u64 tmp_addr; /* Packet is any nonTSO type * @@ -1360,8 +1464,8 @@ static void write_swqe2_nonTSO(struct sk_buff *skb, /* copy sg1entry data */ sg1entry->l_key = lkey; sg1entry->len = skb_data_size - SWQE2_MAX_IMM; - tmp_addr = (u64)(skb->data + SWQE2_MAX_IMM); - sg1entry->vaddr = ehea_map_vaddr(tmp_addr); + sg1entry->vaddr = + ehea_map_vaddr(skb->data + SWQE2_MAX_IMM); swqe->descriptors++; } } else { @@ -1376,7 +1480,6 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev, struct ehea_vsgentry *sg_list, *sg1entry, *sgentry; skb_frag_t *frag; int nfrags, sg1entry_contains_frag_data, i; - u64 tmp_addr; nfrags = skb_shinfo(skb)->nr_frags; sg1entry = &swqe->u.immdata_desc.sg_entry; @@ -1398,9 +1501,9 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev, /* copy sg1entry data */ sg1entry->l_key = lkey; sg1entry->len = frag->size; - tmp_addr = (u64)(page_address(frag->page) - + frag->page_offset); - sg1entry->vaddr = ehea_map_vaddr(tmp_addr); + sg1entry->vaddr = + ehea_map_vaddr(page_address(frag->page) + + frag->page_offset); swqe->descriptors++; sg1entry_contains_frag_data = 1; } @@ -1412,10 +1515,9 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev, sgentry->l_key = lkey; sgentry->len = frag->size; - - tmp_addr = (u64)(page_address(frag->page) - + frag->page_offset); - sgentry->vaddr = ehea_map_vaddr(tmp_addr); + sgentry->vaddr = + ehea_map_vaddr(page_address(frag->page) + + frag->page_offset); swqe->descriptors++; } } @@ -1433,7 +1535,8 @@ static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid) port->logical_port_id, reg_type, port->mac_addr, 0, hcallid); if (hret != H_SUCCESS) { - ehea_error("reg_dereg_bcmc failed (tagged)"); + ehea_error("%sregistering bc address failed (tagged)", + hcallid == H_REG_BCMC ? "" : "de"); ret = -EIO; goto out_herr; } @@ -1444,7 +1547,8 @@ static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid) port->logical_port_id, reg_type, port->mac_addr, 0, hcallid); if (hret != H_SUCCESS) { - ehea_error("reg_dereg_bcmc failed (vlan)"); + ehea_error("%sregistering bc address failed (vlan)", + hcallid == H_REG_BCMC ? "" : "de"); ret = -EIO; } out_herr: @@ -1887,11 +1991,12 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) ehea_dump(swqe, 512, "swqe"); } - if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) - goto out; + if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { + netif_stop_queue(dev); + swqe->tx_control |= EHEA_SWQE_PURGE; + } ehea_post_swqe(pr->qp, swqe); - pr->tx_packets++; if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { spin_lock_irqsave(&pr->netif_queue, flags); @@ -1904,7 +2009,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) } dev->trans_start = jiffies; spin_unlock(&pr->xmit_lock); -out: + return NETDEV_TX_OK; } @@ -2144,24 +2249,18 @@ static int ehea_clean_all_portres(struct ehea_port *port) return ret; } -static void ehea_remove_adapter_mr (struct ehea_adapter *adapter) +static void ehea_remove_adapter_mr(struct ehea_adapter *adapter) { - int i; - - for (i=0; i < EHEA_MAX_PORTS; i++) - if (adapter->port[i]) - return; + if (adapter->active_ports) + return; ehea_rem_mr(&adapter->mr); } -static int ehea_add_adapter_mr (struct ehea_adapter *adapter) +static int ehea_add_adapter_mr(struct ehea_adapter *adapter) { - int i; - - for (i=0; i < EHEA_MAX_PORTS; i++) - if (adapter->port[i]) - return 0; + if (adapter->active_ports) + return 0; return ehea_reg_kernel_mr(adapter, &adapter->mr); } @@ -2170,7 +2269,6 @@ static int ehea_up(struct net_device *dev) { int ret, i; struct ehea_port *port = netdev_priv(dev); - u64 mac_addr = 0; if (port->state == EHEA_PORT_UP) return 0; @@ -2189,18 +2287,10 @@ static int ehea_up(struct net_device *dev) goto out_clean_pr; } - ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); - if (ret) { - ret = -EIO; - ehea_error("out_clean_pr"); - goto out_clean_pr; - } - mac_addr = (*(u64*)dev->dev_addr) >> 16; - ret = ehea_reg_interrupts(dev); if (ret) { - ehea_error("out_dereg_bc"); - goto out_dereg_bc; + ehea_error("reg_interrupts failed. ret:%d", ret); + goto out_clean_pr; } for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { @@ -2226,9 +2316,6 @@ static int ehea_up(struct net_device *dev) out_free_irqs: ehea_free_interrupts(dev); -out_dereg_bc: - ehea_broadcast_reg_helper(port, H_DEREG_BCMC); - out_clean_pr: ehea_clean_all_portres(port); out: @@ -2238,6 +2325,22 @@ out: return ret; } +static void port_napi_disable(struct ehea_port *port) +{ + int i; + + for (i = 0; i < port->num_def_qps; i++) + napi_disable(&port->port_res[i].napi); +} + +static void port_napi_enable(struct ehea_port *port) +{ + int i; + + for (i = 0; i < port->num_def_qps; i++) + napi_enable(&port->port_res[i].napi); +} + static int ehea_open(struct net_device *dev) { int ret; @@ -2249,8 +2352,10 @@ static int ehea_open(struct net_device *dev) ehea_info("enabling port %s", dev->name); ret = ehea_up(dev); - if (!ret) + if (!ret) { + port_napi_enable(port); netif_start_queue(dev); + } up(&port->port_lock); @@ -2259,7 +2364,7 @@ static int ehea_open(struct net_device *dev) static int ehea_down(struct net_device *dev) { - int ret, i; + int ret; struct ehea_port *port = netdev_priv(dev); if (port->state == EHEA_PORT_DOWN) @@ -2268,12 +2373,8 @@ static int ehea_down(struct net_device *dev) ehea_drop_multicast_list(dev); ehea_free_interrupts(dev); - for (i = 0; i < port->num_def_qps; i++) - while (test_bit(__LINK_STATE_RX_SCHED, - &port->port_res[i].d_netdev->state)) - msleep(1); + port_napi_disable(port); - ehea_broadcast_reg_helper(port, H_DEREG_BCMC); port->state = EHEA_PORT_DOWN; ret = ehea_clean_all_portres(port); @@ -2292,7 +2393,7 @@ static int ehea_stop(struct net_device *dev) if (netif_msg_ifdown(port)) ehea_info("disabling port %s", dev->name); - flush_workqueue(port->adapter->ehea_wq); + flush_scheduled_work(); down(&port->port_lock); netif_stop_queue(dev); ret = ehea_down(dev); @@ -2300,6 +2401,192 @@ static int ehea_stop(struct net_device *dev) return ret; } +void ehea_purge_sq(struct ehea_qp *orig_qp) +{ + struct ehea_qp qp = *orig_qp; + struct ehea_qp_init_attr *init_attr = &qp.init_attr; + struct ehea_swqe *swqe; + int wqe_index; + int i; + + for (i = 0; i < init_attr->act_nr_send_wqes; i++) { + swqe = ehea_get_swqe(&qp, &wqe_index); + swqe->tx_control |= EHEA_SWQE_PURGE; + } +} + +int ehea_stop_qps(struct net_device *dev) +{ + struct ehea_port *port = netdev_priv(dev); + struct ehea_adapter *adapter = port->adapter; + struct hcp_modify_qp_cb0* cb0; + int ret = -EIO; + int dret; + int i; + u64 hret; + u64 dummy64 = 0; + u16 dummy16 = 0; + + cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!cb0) { + ret = -ENOMEM; + goto out; + } + + for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) { + struct ehea_port_res *pr = &port->port_res[i]; + struct ehea_qp *qp = pr->qp; + + /* Purge send queue */ + ehea_purge_sq(qp); + + /* Disable queue pair */ + hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, + EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), + cb0); + if (hret != H_SUCCESS) { + ehea_error("query_ehea_qp failed (1)"); + goto out; + } + + cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8; + cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED; + + hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, + EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, + 1), cb0, &dummy64, + &dummy64, &dummy16, &dummy16); + if (hret != H_SUCCESS) { + ehea_error("modify_ehea_qp failed (1)"); + goto out; + } + + hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, + EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), + cb0); + if (hret != H_SUCCESS) { + ehea_error("query_ehea_qp failed (2)"); + goto out; + } + + /* deregister shared memory regions */ + dret = ehea_rem_smrs(pr); + if (dret) { + ehea_error("unreg shared memory region failed"); + goto out; + } + } + + ret = 0; +out: + kfree(cb0); + + return ret; +} + +void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res * pr) +{ + struct ehea_qp qp = *orig_qp; + struct ehea_qp_init_attr *init_attr = &qp.init_attr; + struct ehea_rwqe *rwqe; + struct sk_buff **skba_rq2 = pr->rq2_skba.arr; + struct sk_buff **skba_rq3 = pr->rq3_skba.arr; + struct sk_buff *skb; + u32 lkey = pr->recv_mr.lkey; + + + int i; + int index; + + for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) { + rwqe = ehea_get_next_rwqe(&qp, 2); + rwqe->sg_list[0].l_key = lkey; + index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id); + skb = skba_rq2[index]; + if (skb) + rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); + } + + for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) { + rwqe = ehea_get_next_rwqe(&qp, 3); + rwqe->sg_list[0].l_key = lkey; + index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id); + skb = skba_rq3[index]; + if (skb) + rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); + } +} + +int ehea_restart_qps(struct net_device *dev) +{ + struct ehea_port *port = netdev_priv(dev); + struct ehea_adapter *adapter = port->adapter; + int ret = 0; + int i; + + struct hcp_modify_qp_cb0* cb0; + u64 hret; + u64 dummy64 = 0; + u16 dummy16 = 0; + + cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!cb0) { + ret = -ENOMEM; + goto out; + } + + for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) { + struct ehea_port_res *pr = &port->port_res[i]; + struct ehea_qp *qp = pr->qp; + + ret = ehea_gen_smrs(pr); + if (ret) { + ehea_error("creation of shared memory regions failed"); + goto out; + } + + ehea_update_rqs(qp, pr); + + /* Enable queue pair */ + hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, + EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), + cb0); + if (hret != H_SUCCESS) { + ehea_error("query_ehea_qp failed (1)"); + goto out; + } + + cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8; + cb0->qp_ctl_reg |= H_QP_CR_ENABLED; + + hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, + EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, + 1), cb0, &dummy64, + &dummy64, &dummy16, &dummy16); + if (hret != H_SUCCESS) { + ehea_error("modify_ehea_qp failed (1)"); + goto out; + } + + hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, + EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), + cb0); + if (hret != H_SUCCESS) { + ehea_error("query_ehea_qp failed (2)"); + goto out; + } + + /* refill entire queue */ + ehea_refill_rq1(pr, pr->rq1_skba.index, 0); + ehea_refill_rq2(pr, 0); + ehea_refill_rq3(pr, 0); + } +out: + kfree(cb0); + + return ret; +} + static void ehea_reset_port(struct work_struct *work) { int ret; @@ -2310,7 +2597,8 @@ static void ehea_reset_port(struct work_struct *work) port->resets++; down(&port->port_lock); netif_stop_queue(dev); - netif_poll_disable(dev); + + port_napi_disable(port); ehea_down(dev); @@ -2318,10 +2606,13 @@ static void ehea_reset_port(struct work_struct *work) if (ret) goto out; + ehea_set_multicast_list(dev); + if (netif_msg_timer(port)) ehea_info("Device %s resetted successfully", dev->name); - netif_poll_enable(dev); + port_napi_enable(port); + netif_wake_queue(dev); out: up(&port->port_lock); @@ -2333,6 +2624,7 @@ static void ehea_rereg_mrs(struct work_struct *work) int ret, i; struct ehea_adapter *adapter; + down(&dlpar_mem_lock); ehea_info("LPAR memory enlarged - re-initializing driver"); list_for_each_entry(adapter, &adapter_list, list) @@ -2345,12 +2637,14 @@ static void ehea_rereg_mrs(struct work_struct *work) struct net_device *dev = port->netdev; if (dev->flags & IFF_UP) { - ehea_info("stopping %s", - dev->name); down(&port->port_lock); netif_stop_queue(dev); - netif_poll_disable(dev); - ehea_down(dev); + ret = ehea_stop_qps(dev); + if (ret) { + up(&port->port_lock); + goto out; + } + port_napi_disable(port); up(&port->port_lock); } } @@ -2366,10 +2660,11 @@ static void ehea_rereg_mrs(struct work_struct *work) } ehea_destroy_busmap(); - ret = ehea_create_busmap(); - if (ret) + if (ret) { + ehea_error("creating ehea busmap failed"); goto out; + } clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags); @@ -2391,21 +2686,18 @@ static void ehea_rereg_mrs(struct work_struct *work) struct net_device *dev = port->netdev; if (dev->flags & IFF_UP) { - ehea_info("restarting %s", - dev->name); down(&port->port_lock); - - ret = ehea_up(dev); - if (!ret) { - netif_poll_enable(dev); + port_napi_enable(port); + ret = ehea_restart_qps(dev); + if (!ret) netif_wake_queue(dev); - } - up(&port->port_lock); } } } } + up(&dlpar_mem_lock); + ehea_info("re-initializing driver complete"); out: return; } @@ -2414,8 +2706,9 @@ static void ehea_tx_watchdog(struct net_device *dev) { struct ehea_port *port = netdev_priv(dev); - if (netif_carrier_ok(dev)) - queue_work(port->adapter->ehea_wq, &port->reset_task); + if (netif_carrier_ok(dev) && + !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags)) + schedule_work(&port->reset_task); } int ehea_sense_adapter_attr(struct ehea_adapter *adapter) @@ -2493,7 +2786,7 @@ static ssize_t ehea_show_port_id(struct device *dev, struct device_attribute *attr, char *buf) { struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev); - return sprintf(buf, "0x%X", port->logical_port_id); + return sprintf(buf, "%d", port->logical_port_id); } static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id, @@ -2630,13 +2923,12 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, SET_NETDEV_DEV(dev, port_dev); /* initialize net_device structure */ - SET_MODULE_OWNER(dev); - memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN); dev->open = ehea_open; - dev->poll = ehea_poll; - dev->weight = 64; +#ifdef CONFIG_NET_POLL_CONTROLLER + dev->poll_controller = ehea_netpoll; +#endif dev->stop = ehea_stop; dev->hard_start_xmit = ehea_start_xmit; dev->get_stats = ehea_get_stats; @@ -2655,14 +2947,22 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, INIT_WORK(&port->reset_task, ehea_reset_port); + ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); + if (ret) { + ret = -EIO; + goto out_unreg_port; + } + ehea_set_ethtool_ops(dev); ret = register_netdev(dev); if (ret) { ehea_error("register_netdev failed. ret=%d", ret); - goto out_unreg_port; + goto out_dereg_bc; } + port->lro_max_aggr = lro_max_aggr; + ret = ehea_get_jumboframe_status(port, &jumbo); if (ret) ehea_error("failed determining jumbo frame status for %s", @@ -2675,6 +2975,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, return port; +out_dereg_bc: + ehea_broadcast_reg_helper(port, H_DEREG_BCMC); + out_unreg_port: ehea_unregister_port(port); @@ -2694,6 +2997,7 @@ static void ehea_shutdown_single_port(struct ehea_port *port) { unregister_netdev(port->netdev); ehea_unregister_port(port); + ehea_broadcast_reg_helper(port, H_DEREG_BCMC); kfree(port->mc_list); free_netdev(port->netdev); port->adapter->active_ports--; @@ -2771,7 +3075,7 @@ static ssize_t ehea_probe_port(struct device *dev, u32 logical_port_id; - sscanf(buf, "%X", &logical_port_id); + sscanf(buf, "%d", &logical_port_id); port = ehea_get_port(adapter, logical_port_id); @@ -2824,7 +3128,7 @@ static ssize_t ehea_remove_port(struct device *dev, int i; u32 logical_port_id; - sscanf(buf, "%X", &logical_port_id); + sscanf(buf, "%d", &logical_port_id); port = ehea_get_port(adapter, logical_port_id); @@ -2937,15 +3241,9 @@ static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev, goto out_kill_eq; } - adapter->ehea_wq = create_workqueue("ehea_wq"); - if (!adapter->ehea_wq) { - ret = -EIO; - goto out_free_irq; - } - ret = ehea_create_device_sysfs(dev); if (ret) - goto out_kill_wq; + goto out_free_irq; ret = ehea_setup_ports(adapter); if (ret) { @@ -2959,9 +3257,6 @@ static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev, out_rem_dev_sysfs: ehea_remove_device_sysfs(dev); -out_kill_wq: - destroy_workqueue(adapter->ehea_wq); - out_free_irq: ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter); @@ -2987,7 +3282,7 @@ static int __devexit ehea_remove(struct ibmebus_dev *dev) ehea_remove_device_sysfs(dev); - destroy_workqueue(adapter->ehea_wq); + flush_scheduled_work(); ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter); tasklet_kill(&adapter->neq_tasklet); @@ -3045,9 +3340,9 @@ int __init ehea_module_init(void) printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION); - ehea_driver_wq = create_workqueue("ehea_driver_wq"); INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs); + sema_init(&dlpar_mem_lock, 1); ret = check_module_parm(); if (ret) @@ -3078,6 +3373,7 @@ out: static void __exit ehea_module_exit(void) { + flush_scheduled_work(); driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); ibmebus_unregister_driver(&ehea_driver); ehea_destroy_busmap(); diff --git a/drivers/net/ehea/ehea_phyp.h b/drivers/net/ehea/ehea_phyp.h index 89b63531ff26..faa191d23b86 100644 --- a/drivers/net/ehea/ehea_phyp.h +++ b/drivers/net/ehea/ehea_phyp.h @@ -126,6 +126,7 @@ struct hcp_modify_qp_cb0 { #define H_QP_CR_STATE_RDY2RCV 0x0000030000000000ULL /* Ready to recv */ #define H_QP_CR_STATE_RDY2SND 0x0000050000000000ULL /* Ready to send */ #define H_QP_CR_STATE_ERROR 0x0000800000000000ULL /* Error */ +#define H_QP_CR_RES_STATE 0x0000007F00000000ULL /* Resultant state */ struct hcp_modify_qp_cb1 { u32 qpn; /* 00 */ diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c index a36fa6c23fdf..83b76432b41a 100644 --- a/drivers/net/ehea/ehea_qmr.c +++ b/drivers/net/ehea/ehea_qmr.c @@ -34,7 +34,6 @@ struct ehea_busmap ehea_bmap = { 0, 0, NULL }; extern u64 ehea_driver_flags; -extern struct workqueue_struct *ehea_driver_wq; extern struct work_struct ehea_rereg_mr_task; @@ -235,6 +234,8 @@ int ehea_destroy_cq(struct ehea_cq *cq) if (!cq) return 0; + hcp_epas_dtor(&cq->epas); + if ((hret = ehea_destroy_cq_res(cq, NORMAL_FREE)) == H_R_STATE) { ehea_error_data(cq->adapter, cq->fw_handle); hret = ehea_destroy_cq_res(cq, FORCE_FREE); @@ -361,6 +362,8 @@ int ehea_destroy_eq(struct ehea_eq *eq) if (!eq) return 0; + hcp_epas_dtor(&eq->epas); + if ((hret = ehea_destroy_eq_res(eq, NORMAL_FREE)) == H_R_STATE) { ehea_error_data(eq->adapter, eq->fw_handle); hret = ehea_destroy_eq_res(eq, FORCE_FREE); @@ -541,6 +544,8 @@ int ehea_destroy_qp(struct ehea_qp *qp) if (!qp) return 0; + hcp_epas_dtor(&qp->epas); + if ((hret = ehea_destroy_qp_res(qp, NORMAL_FREE)) == H_R_STATE) { ehea_error_data(qp->adapter, qp->fw_handle); hret = ehea_destroy_qp_res(qp, FORCE_FREE); @@ -557,8 +562,7 @@ int ehea_destroy_qp(struct ehea_qp *qp) int ehea_create_busmap( void ) { u64 vaddr = EHEA_BUSMAP_START; - unsigned long abs_max_pfn = 0; - unsigned long sec_max_pfn; + unsigned long high_section_index = 0; int i; /* @@ -568,14 +572,10 @@ int ehea_create_busmap( void ) ehea_bmap.valid_sections = 0; for (i = 0; i < NR_MEM_SECTIONS; i++) - if (valid_section_nr(i)) { - sec_max_pfn = section_nr_to_pfn(i); - if (sec_max_pfn > abs_max_pfn) - abs_max_pfn = sec_max_pfn; - ehea_bmap.valid_sections++; - } + if (valid_section_nr(i)) + high_section_index = i; - ehea_bmap.entries = abs_max_pfn / EHEA_PAGES_PER_SECTION + 1; + ehea_bmap.entries = high_section_index + 1; ehea_bmap.vaddr = vmalloc(ehea_bmap.entries * sizeof(*ehea_bmap.vaddr)); if (!ehea_bmap.vaddr) @@ -587,6 +587,7 @@ int ehea_create_busmap( void ) if (pfn_valid(pfn)) { ehea_bmap.vaddr[i] = vaddr; vaddr += EHEA_SECTSIZE; + ehea_bmap.valid_sections++; } else ehea_bmap.vaddr[i] = 0; } @@ -616,7 +617,7 @@ u64 ehea_map_vaddr(void *caddr) if (unlikely(mapped_addr == -1)) if (!test_and_set_bit(__EHEA_STOP_XFER, &ehea_driver_flags)) - queue_work(ehea_driver_wq, &ehea_rereg_mr_task); + schedule_work(&ehea_rereg_mr_task); return mapped_addr; } @@ -631,7 +632,7 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) mr_len = ehea_bmap.valid_sections * EHEA_SECTSIZE; - pt = kzalloc(EHEA_MAX_RPAGE * sizeof(u64), GFP_KERNEL); + pt = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!pt) { ehea_error("no mem"); ret = -ENOMEM; @@ -654,8 +655,8 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) void *sectbase = __va(i << SECTION_SIZE_BITS); unsigned long k = 0; - for (j = 0; j < (PAGES_PER_SECTION / EHEA_MAX_RPAGE); - j++) { + for (j = 0; j < (EHEA_PAGES_PER_SECTION / + EHEA_MAX_RPAGE); j++) { for (m = 0; m < EHEA_MAX_RPAGE; m++) { pg = sectbase + ((k++) * EHEA_PAGESIZE); diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h index b71f8452a5e3..562de0ebdd85 100644 --- a/drivers/net/ehea/ehea_qmr.h +++ b/drivers/net/ehea/ehea_qmr.h @@ -39,7 +39,7 @@ #define EHEA_PAGESHIFT 12 #define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT) #define EHEA_SECTSIZE (1UL << 24) -#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> PAGE_SHIFT) +#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT) #if (1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE #error eHEA module can't work if kernel sectionsize < ehea sectionsize @@ -145,7 +145,7 @@ struct ehea_rwqe { #define EHEA_CQE_VLAN_TAG_XTRACT 0x0400 #define EHEA_CQE_TYPE_RQ 0x60 -#define EHEA_CQE_STAT_ERR_MASK 0x721F +#define EHEA_CQE_STAT_ERR_MASK 0x720F #define EHEA_CQE_STAT_FAT_ERR_MASK 0x1F #define EHEA_CQE_STAT_ERR_TCP 0x4000 #define EHEA_CQE_STAT_ERR_IP 0x2000 diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c index 119778401e48..ecdd3fc8d70c 100644 --- a/drivers/net/epic100.c +++ b/drivers/net/epic100.c @@ -262,6 +262,7 @@ struct epic_private { /* Ring pointers. */ spinlock_t lock; /* Group with Tx control cache line. */ spinlock_t napi_lock; + struct napi_struct napi; unsigned int reschedule_in_poll; unsigned int cur_tx, dirty_tx; @@ -294,7 +295,7 @@ static void epic_tx_timeout(struct net_device *dev); static void epic_init_ring(struct net_device *dev); static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev); static int epic_rx(struct net_device *dev, int budget); -static int epic_poll(struct net_device *dev, int *budget); +static int epic_poll(struct napi_struct *napi, int budget); static irqreturn_t epic_interrupt(int irq, void *dev_instance); static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static const struct ethtool_ops netdev_ethtool_ops; @@ -316,6 +317,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev, int i, ret, option = 0, duplex = 0; void *ring_space; dma_addr_t ring_dma; + DECLARE_MAC_BUF(mac); /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -351,7 +353,6 @@ static int __devinit epic_init_one (struct pci_dev *pdev, dev_err(&pdev->dev, "no memory for eth device\n"); goto err_out_free_res; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); #ifdef USE_IO_OPS @@ -487,18 +488,15 @@ static int __devinit epic_init_one (struct pci_dev *pdev, dev->ethtool_ops = &netdev_ethtool_ops; dev->watchdog_timeo = TX_TIMEOUT; dev->tx_timeout = &epic_tx_timeout; - dev->poll = epic_poll; - dev->weight = 64; + netif_napi_add(dev, &ep->napi, epic_poll, 64); ret = register_netdev(dev); if (ret < 0) goto err_out_unmap_rx; - printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ", - dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq); - for (i = 0; i < 5; i++) - printk("%2.2x:", dev->dev_addr[i]); - printk("%2.2x.\n", dev->dev_addr[i]); + printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %s\n", + dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq, + print_mac(mac, dev->dev_addr)); out: return ret; @@ -660,8 +658,11 @@ static int epic_open(struct net_device *dev) /* Soft reset the chip. */ outl(0x4001, ioaddr + GENCTL); - if ((retval = request_irq(dev->irq, &epic_interrupt, IRQF_SHARED, dev->name, dev))) + napi_enable(&ep->napi); + if ((retval = request_irq(dev->irq, &epic_interrupt, IRQF_SHARED, dev->name, dev))) { + napi_disable(&ep->napi); return retval; + } epic_init_ring(dev); @@ -1103,9 +1104,9 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance) if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) { spin_lock(&ep->napi_lock); - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &ep->napi)) { epic_napi_irq_off(dev, ep); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &ep->napi); } else ep->reschedule_in_poll++; spin_unlock(&ep->napi_lock); @@ -1257,26 +1258,22 @@ static void epic_rx_err(struct net_device *dev, struct epic_private *ep) outw(RxQueued, ioaddr + COMMAND); } -static int epic_poll(struct net_device *dev, int *budget) +static int epic_poll(struct napi_struct *napi, int budget) { - struct epic_private *ep = dev->priv; - int work_done = 0, orig_budget; + struct epic_private *ep = container_of(napi, struct epic_private, napi); + struct net_device *dev = ep->mii.dev; + int work_done = 0; long ioaddr = dev->base_addr; - orig_budget = (*budget > dev->quota) ? dev->quota : *budget; - rx_action: epic_tx(dev, ep); - work_done += epic_rx(dev, *budget); + work_done += epic_rx(dev, budget); epic_rx_err(dev, ep); - *budget -= work_done; - dev->quota -= work_done; - - if (netif_running(dev) && (work_done < orig_budget)) { + if (netif_running(dev) && (work_done < budget)) { unsigned long flags; int more; @@ -1286,7 +1283,7 @@ rx_action: more = ep->reschedule_in_poll; if (!more) { - __netif_rx_complete(dev); + __netif_rx_complete(dev, napi); outl(EpicNapiEvent, ioaddr + INTSTAT); epic_napi_irq_on(dev, ep); } else @@ -1298,7 +1295,7 @@ rx_action: goto rx_action; } - return (work_done >= orig_budget); + return work_done; } static int epic_close(struct net_device *dev) @@ -1309,6 +1306,7 @@ static int epic_close(struct net_device *dev) int i; netif_stop_queue(dev); + napi_disable(&ep->napi); if (debug > 1) printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n", @@ -1495,8 +1493,6 @@ static const struct ethtool_ops netdev_ethtool_ops = { .get_link = netdev_get_link, .get_msglevel = netdev_get_msglevel, .set_msglevel = netdev_set_msglevel, - .get_sg = ethtool_op_get_sg, - .get_tx_csum = ethtool_op_get_tx_csum, .begin = ethtool_begin, .complete = ethtool_complete }; diff --git a/drivers/net/eql.c b/drivers/net/eql.c index a93700e5661a..18f1364d3d5b 100644 --- a/drivers/net/eql.c +++ b/drivers/net/eql.c @@ -116,6 +116,7 @@ #include <linux/init.h> #include <linux/timer.h> #include <linux/netdevice.h> +#include <net/net_namespace.h> #include <linux/if.h> #include <linux/if_arp.h> @@ -127,7 +128,6 @@ static int eql_open(struct net_device *dev); static int eql_close(struct net_device *dev); static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); static int eql_slave_xmit(struct sk_buff *skb, struct net_device *dev); -static struct net_device_stats *eql_get_stats(struct net_device *dev); #define eql_is_slave(dev) ((dev->flags & IFF_SLAVE) == IFF_SLAVE) #define eql_is_master(dev) ((dev->flags & IFF_MASTER) == IFF_MASTER) @@ -166,8 +166,6 @@ static void __init eql_setup(struct net_device *dev) { equalizer_t *eql = netdev_priv(dev); - SET_MODULE_OWNER(dev); - init_timer(&eql->timer); eql->timer.data = (unsigned long) eql; eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL; @@ -181,7 +179,6 @@ static void __init eql_setup(struct net_device *dev) dev->stop = eql_close; dev->do_ioctl = eql_ioctl; dev->hard_start_xmit = eql_slave_xmit; - dev->get_stats = eql_get_stats; /* * Now we undo some of the things that eth_setup does @@ -338,9 +335,9 @@ static int eql_slave_xmit(struct sk_buff *skb, struct net_device *dev) skb->priority = 1; slave->bytes_queued += skb->len; dev_queue_xmit(skb); - eql->stats.tx_packets++; + dev->stats.tx_packets++; } else { - eql->stats.tx_dropped++; + dev->stats.tx_dropped++; dev_kfree_skb(skb); } @@ -349,12 +346,6 @@ static int eql_slave_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } -static struct net_device_stats * eql_get_stats(struct net_device *dev) -{ - equalizer_t *eql = netdev_priv(dev); - return &eql->stats; -} - /* * Private ioctl functions */ @@ -391,7 +382,7 @@ static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave) slave_t *duplicate_slave = NULL; duplicate_slave = __eql_find_slave_dev(queue, slave->dev); - if (duplicate_slave != 0) + if (duplicate_slave) eql_kill_one_slave(queue, duplicate_slave); list_add(&slave->list, &queue->all_slaves); @@ -412,7 +403,7 @@ static int eql_enslave(struct net_device *master_dev, slaving_request_t __user * if (copy_from_user(&srq, srqp, sizeof (slaving_request_t))) return -EFAULT; - slave_dev = dev_get_by_name(srq.slave_name); + slave_dev = dev_get_by_name(&init_net, srq.slave_name); if (slave_dev) { if ((master_dev->flags & IFF_UP) == IFF_UP) { /* slave is not a master & not already a slave: */ @@ -460,7 +451,7 @@ static int eql_emancipate(struct net_device *master_dev, slaving_request_t __use if (copy_from_user(&srq, srqp, sizeof (slaving_request_t))) return -EFAULT; - slave_dev = dev_get_by_name(srq.slave_name); + slave_dev = dev_get_by_name(&init_net, srq.slave_name); ret = -EINVAL; if (slave_dev) { spin_lock_bh(&eql->queue.lock); @@ -493,7 +484,7 @@ static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *scp) if (copy_from_user(&sc, scp, sizeof (slave_config_t))) return -EFAULT; - slave_dev = dev_get_by_name(sc.slave_name); + slave_dev = dev_get_by_name(&init_net, sc.slave_name); if (!slave_dev) return -ENODEV; @@ -528,7 +519,7 @@ static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp) if (copy_from_user(&sc, scp, sizeof (slave_config_t))) return -EFAULT; - slave_dev = dev_get_by_name(sc.slave_name); + slave_dev = dev_get_by_name(&init_net, sc.slave_name); if (!slave_dev) return -ENODEV; diff --git a/drivers/net/es3210.c b/drivers/net/es3210.c index 822e5bfd1a71..deefa51b8c31 100644 --- a/drivers/net/es3210.c +++ b/drivers/net/es3210.c @@ -130,8 +130,6 @@ static int __init do_es_probe(struct net_device *dev) int irq = dev->irq; int mem_start = dev->mem_start; - SET_MODULE_OWNER(dev); - if (ioaddr > 0x1ff) /* Check a single specified location. */ return es_probe1(dev, ioaddr); else if (ioaddr > 0) /* Don't probe at all. */ @@ -181,6 +179,7 @@ static int __init es_probe1(struct net_device *dev, int ioaddr) { int i, retval; unsigned long eisa_id; + DECLARE_MAC_BUF(mac); if (!request_region(ioaddr + ES_SA_PROM, ES_IO_EXTENT, "es3210")) return -ENODEV; @@ -192,7 +191,6 @@ static int __init es_probe1(struct net_device *dev, int ioaddr) inb(ioaddr + ES_CFG4), inb(ioaddr + ES_CFG5), inb(ioaddr + ES_CFG6)); #endif - /* Check the EISA ID of the card. */ eisa_id = inl(ioaddr + ES_ID_PORT); if ((eisa_id != ES_EISA_ID1) && (eisa_id != ES_EISA_ID2)) { @@ -200,21 +198,21 @@ static int __init es_probe1(struct net_device *dev, int ioaddr) goto out; } + for (i = 0; i < ETHER_ADDR_LEN ; i++) + dev->dev_addr[i] = inb(ioaddr + ES_SA_PROM + i); + /* Check the Racal vendor ID as well. */ - if (inb(ioaddr + ES_SA_PROM + 0) != ES_ADDR0 - || inb(ioaddr + ES_SA_PROM + 1) != ES_ADDR1 - || inb(ioaddr + ES_SA_PROM + 2) != ES_ADDR2 ) { - printk("es3210.c: card not found"); - for(i = 0; i < ETHER_ADDR_LEN; i++) - printk(" %02x", inb(ioaddr + ES_SA_PROM + i)); - printk(" (invalid prefix).\n"); + if (dev->dev_addr[0] != ES_ADDR0 || + dev->dev_addr[1] != ES_ADDR1 || + dev->dev_addr[2] != ES_ADDR2) { + printk("es3210.c: card not found %s (invalid_prefix).\n", + print_mac(mac, dev->dev_addr)); retval = -ENODEV; goto out; } - printk("es3210.c: ES3210 rev. %ld at %#x, node", eisa_id>>24, ioaddr); - for(i = 0; i < ETHER_ADDR_LEN; i++) - printk(" %02x", (dev->dev_addr[i] = inb(ioaddr + ES_SA_PROM + i))); + printk("es3210.c: ES3210 rev. %ld at %#x, node %s", + eisa_id>>24, ioaddr, print_mac(mac, dev->dev_addr)); /* Snarf the interrupt now. */ if (dev->irq == 0) { diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c index 04abf59e5007..243fc6b354b5 100644 --- a/drivers/net/eth16i.c +++ b/drivers/net/eth16i.c @@ -380,7 +380,6 @@ static unsigned int eth16i_debug = ETH16I_DEBUG; /* Information for each board */ struct eth16i_local { - struct net_device_stats stats; unsigned char tx_started; unsigned char tx_buf_busy; unsigned short tx_queue; /* Number of packets in transmit buffer */ @@ -426,8 +425,6 @@ static int eth16i_set_irq(struct net_device *dev); static ushort eth16i_parse_mediatype(const char* s); #endif -static struct net_device_stats *eth16i_get_stats(struct net_device *dev); - static char cardname[] __initdata = "ICL EtherTeam 16i/32"; static int __init do_eth16i_probe(struct net_device *dev) @@ -436,8 +433,6 @@ static int __init do_eth16i_probe(struct net_device *dev) int ioaddr; int base_addr = dev->base_addr; - SET_MODULE_OWNER(dev); - if(eth16i_debug > 4) printk(KERN_DEBUG "Probing started for %s\n", cardname); @@ -559,7 +554,6 @@ static int __init eth16i_probe1(struct net_device *dev, int ioaddr) dev->open = eth16i_open; dev->stop = eth16i_close; dev->hard_start_xmit = eth16i_tx; - dev->get_stats = eth16i_get_stats; dev->set_multicast_list = eth16i_multicast; dev->tx_timeout = eth16i_timeout; dev->watchdog_timeo = TX_TIMEOUT; @@ -1047,7 +1041,7 @@ static void eth16i_timeout(struct net_device *dev) printk(KERN_DEBUG "lp->tx_queue_len = %d\n", lp->tx_queue_len); printk(KERN_DEBUG "lp->tx_started = %d\n", lp->tx_started); } - lp->stats.tx_errors++; + dev->stats.tx_errors++; eth16i_reset(dev); dev->trans_start = jiffies; outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG); @@ -1132,7 +1126,6 @@ static int eth16i_tx(struct sk_buff *skb, struct net_device *dev) static void eth16i_rx(struct net_device *dev) { - struct eth16i_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; int boguscount = MAX_RX_LOOP; @@ -1151,16 +1144,16 @@ static void eth16i_rx(struct net_device *dev) inb(ioaddr + RECEIVE_MODE_REG), status); if( !(status & PKT_GOOD) ) { - lp->stats.rx_errors++; + dev->stats.rx_errors++; if( (pkt_len < ETH_ZLEN) || (pkt_len > ETH_FRAME_LEN) ) { - lp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; eth16i_reset(dev); return; } else { eth16i_skip_packet(dev); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; } } else { /* Ok so now we should have a good packet */ @@ -1171,7 +1164,7 @@ static void eth16i_rx(struct net_device *dev) printk(KERN_WARNING "%s: Could'n allocate memory for packet (len %d)\n", dev->name, pkt_len); eth16i_skip_packet(dev); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; break; } @@ -1214,8 +1207,8 @@ static void eth16i_rx(struct net_device *dev) } netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } /* else */ @@ -1252,32 +1245,32 @@ static irqreturn_t eth16i_interrupt(int irq, void *dev_id) if( status & 0x7f00 ) { - lp->stats.rx_errors++; + dev->stats.rx_errors++; if(status & (BUS_RD_ERR << 8) ) printk(KERN_WARNING "%s: Bus read error.\n",dev->name); - if(status & (SHORT_PKT_ERR << 8) ) lp->stats.rx_length_errors++; - if(status & (ALIGN_ERR << 8) ) lp->stats.rx_frame_errors++; - if(status & (CRC_ERR << 8) ) lp->stats.rx_crc_errors++; - if(status & (RX_BUF_OVERFLOW << 8) ) lp->stats.rx_over_errors++; + if(status & (SHORT_PKT_ERR << 8) ) dev->stats.rx_length_errors++; + if(status & (ALIGN_ERR << 8) ) dev->stats.rx_frame_errors++; + if(status & (CRC_ERR << 8) ) dev->stats.rx_crc_errors++; + if(status & (RX_BUF_OVERFLOW << 8) ) dev->stats.rx_over_errors++; } if( status & 0x001a) { - lp->stats.tx_errors++; + dev->stats.tx_errors++; - if(status & CR_LOST) lp->stats.tx_carrier_errors++; - if(status & TX_JABBER_ERR) lp->stats.tx_window_errors++; + if(status & CR_LOST) dev->stats.tx_carrier_errors++; + if(status & TX_JABBER_ERR) dev->stats.tx_window_errors++; #if 0 if(status & COLLISION) { - lp->stats.collisions += + dev->stats.collisions += ((inb(ioaddr+TRANSMIT_MODE_REG) & 0xF0) >> 4); } #endif if(status & COLLISIONS_16) { if(lp->col_16 < MAX_COL_16) { lp->col_16++; - lp->stats.collisions++; + dev->stats.collisions++; /* Resume transmitting, skip failed packet */ outb(0x02, ioaddr + COL_16_REG); } @@ -1290,8 +1283,8 @@ static irqreturn_t eth16i_interrupt(int irq, void *dev_id) if( status & 0x00ff ) { /* Let's check the transmit status reg */ if(status & TX_DONE) { /* The transmit has been done */ - lp->stats.tx_packets = lp->tx_buffered_packets; - lp->stats.tx_bytes += lp->tx_buffered_bytes; + dev->stats.tx_packets = lp->tx_buffered_packets; + dev->stats.tx_bytes += lp->tx_buffered_bytes; lp->col_16 = 0; if(lp->tx_queue) { /* Is there still packets ? */ @@ -1371,12 +1364,6 @@ static void eth16i_multicast(struct net_device *dev) } } -static struct net_device_stats *eth16i_get_stats(struct net_device *dev) -{ - struct eth16i_local *lp = netdev_priv(dev); - return &lp->stats; -} - static void eth16i_select_regbank(unsigned char banknbr, int ioaddr) { unsigned char data; diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c index cb0792c187ba..593a120e31b2 100644 --- a/drivers/net/ewrk3.c +++ b/drivers/net/ewrk3.c @@ -275,7 +275,6 @@ struct ewrk3_private { u_long shmem_base; /* Shared memory start address */ void __iomem *shmem; u_long shmem_length; /* Shared memory window length */ - struct net_device_stats stats; /* Public stats */ struct ewrk3_stats pktStats; /* Private stats counters */ u_char irq_mask; /* Adapter IRQ mask bits */ u_char mPage; /* Maximum 2kB Page number */ @@ -302,7 +301,6 @@ static int ewrk3_open(struct net_device *dev); static int ewrk3_queue_pkt(struct sk_buff *skb, struct net_device *dev); static irqreturn_t ewrk3_interrupt(int irq, void *dev_id); static int ewrk3_close(struct net_device *dev); -static struct net_device_stats *ewrk3_get_stats(struct net_device *dev); static void set_multicast_list(struct net_device *dev); static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static const struct ethtool_ops ethtool_ops_203; @@ -356,7 +354,6 @@ struct net_device * __init ewrk3_probe(int unit) sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); } - SET_MODULE_OWNER(dev); err = ewrk3_probe1(dev, dev->base_addr, dev->irq); if (err) @@ -399,6 +396,7 @@ ewrk3_hw_init(struct net_device *dev, u_long iobase) u_long mem_start, shmem_length; u_char cr, cmr, icr, nicsr, lemac, hard_strapped = 0; u_char eeprom_image[EEPROM_MAX], chksum, eisa_cr = 0; + DECLARE_MAC_BUF(mac); /* ** Stop the EWRK3. Enable the DBR ROM. Disable interrupts and remote boot. @@ -463,10 +461,7 @@ ewrk3_hw_init(struct net_device *dev, u_long iobase) if (lemac != LeMAC2) DevicePresent(iobase); /* need after EWRK3_INIT */ status = get_hw_addr(dev, eeprom_image, lemac); - for (i = 0; i < ETH_ALEN - 1; i++) { /* get the ethernet addr. */ - printk("%2.2x:", dev->dev_addr[i]); - } - printk("%2.2x,\n", dev->dev_addr[i]); + printk("%s\n", print_mac(mac, dev->dev_addr)); if (status) { printk(" which has an EEPROM CRC error.\n"); @@ -612,7 +607,6 @@ ewrk3_hw_init(struct net_device *dev, u_long iobase) dev->open = ewrk3_open; dev->hard_start_xmit = ewrk3_queue_pkt; dev->stop = ewrk3_close; - dev->get_stats = ewrk3_get_stats; dev->set_multicast_list = set_multicast_list; dev->do_ioctl = ewrk3_ioctl; if (lp->adapter_name[4] == '3') @@ -632,7 +626,7 @@ static int ewrk3_open(struct net_device *dev) { struct ewrk3_private *lp = netdev_priv(dev); u_long iobase = dev->base_addr; - int i, status = 0; + int status = 0; u_char icr, csr; /* @@ -652,12 +646,10 @@ static int ewrk3_open(struct net_device *dev) ewrk3_init(dev); if (ewrk3_debug > 1) { + DECLARE_MAC_BUF(mac); printk("%s: ewrk3 open with irq %d\n", dev->name, dev->irq); - printk(" physical address: "); - for (i = 0; i < 5; i++) { - printk("%2.2x:", (u_char) dev->dev_addr[i]); - } - printk("%2.2x\n", (u_char) dev->dev_addr[i]); + printk(" physical address: %s\n", + print_mac(mac, dev->dev_addr)); if (lp->shmem_length == 0) { printk(" no shared memory, I/O only mode\n"); } else { @@ -864,7 +856,7 @@ static int ewrk3_queue_pkt (struct sk_buff *skb, struct net_device *dev) ENABLE_IRQs; spin_unlock_irq (&lp->hw_lock); - lp->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += skb->len; dev->trans_start = jiffies; dev_kfree_skb (skb); @@ -981,13 +973,13 @@ static int ewrk3_rx(struct net_device *dev) } if (!(rx_status & R_ROK)) { /* There was an error. */ - lp->stats.rx_errors++; /* Update the error stats. */ + dev->stats.rx_errors++; /* Update the error stats. */ if (rx_status & R_DBE) - lp->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (rx_status & R_CRC) - lp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; if (rx_status & R_PLL) - lp->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; } else { struct sk_buff *skb; @@ -1038,11 +1030,11 @@ static int ewrk3_rx(struct net_device *dev) ** Update stats */ dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } else { printk("%s: Insufficient memory; nuking packet.\n", dev->name); - lp->stats.rx_dropped++; /* Really, deferred. */ + dev->stats.rx_dropped++; /* Really, deferred. */ break; } } @@ -1072,11 +1064,11 @@ static int ewrk3_tx(struct net_device *dev) while ((tx_status = inb(EWRK3_TDQ)) > 0) { /* Whilst there's old buffers */ if (tx_status & T_VSTS) { /* The status is valid */ if (tx_status & T_TXE) { - lp->stats.tx_errors++; + dev->stats.tx_errors++; if (tx_status & T_NCL) - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (tx_status & T_LCL) - lp->stats.tx_window_errors++; + dev->stats.tx_window_errors++; if (tx_status & T_CTU) { if ((tx_status & T_COLL) ^ T_XUR) { lp->pktStats.tx_underruns++; @@ -1085,13 +1077,13 @@ static int ewrk3_tx(struct net_device *dev) } } else if (tx_status & T_COLL) { if ((tx_status & T_COLL) ^ T_XCOLL) { - lp->stats.collisions++; + dev->stats.collisions++; } else { lp->pktStats.excessive_collisions++; } } } else { - lp->stats.tx_packets++; + dev->stats.tx_packets++; } } } @@ -1134,14 +1126,6 @@ static int ewrk3_close(struct net_device *dev) return 0; } -static struct net_device_stats *ewrk3_get_stats(struct net_device *dev) -{ - struct ewrk3_private *lp = netdev_priv(dev); - - /* Null body since there is no framing error counter */ - return &lp->stats; -} - /* ** Set or clear the multicast filter for this adapter. */ diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c index ff9f177d7157..43f7647ff246 100644 --- a/drivers/net/fealnx.c +++ b/drivers/net/fealnx.c @@ -486,6 +486,7 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev, #else int bar = 1; #endif + DECLARE_MAC_BUF(mac); /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -527,7 +528,6 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev, err = -ENOMEM; goto err_out_unmap; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); /* read ethernet id */ @@ -665,11 +665,9 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev, if (err) goto err_out_free_tx; - printk(KERN_INFO "%s: %s at %p, ", - dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr); - for (i = 0; i < 5; i++) - printk("%2.2x:", dev->dev_addr[i]); - printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq); + printk(KERN_INFO "%s: %s at %p, %s, IRQ %d.\n", + dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr, + print_mac(mac, dev->dev_addr), irq); return 0; @@ -1892,8 +1890,6 @@ static const struct ethtool_ops netdev_ethtool_ops = { .get_link = netdev_get_link, .get_msglevel = netdev_get_msglevel, .set_msglevel = netdev_set_msglevel, - .get_sg = ethtool_op_get_sg, - .get_tx_csum = ethtool_op_get_tx_csum, }; static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 03023dd17829..2b5782056dda 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c @@ -47,6 +47,7 @@ #include <asm/uaccess.h> #include <asm/io.h> #include <asm/pgtable.h> +#include <asm/cacheflush.h> #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || \ defined(CONFIG_M5272) || defined(CONFIG_M528x) || \ @@ -98,8 +99,6 @@ static unsigned char fec_mac_default[] = { #define FEC_FLASHMAC 0xf0006006 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) #define FEC_FLASHMAC 0xf0006000 -#elif defined (CONFIG_MTD_KeyTechnology) -#define FEC_FLASHMAC 0xffe04000 #elif defined(CONFIG_CANCam) #define FEC_FLASHMAC 0xf0020000 #elif defined (CONFIG_M5272C3) @@ -191,6 +190,8 @@ struct fec_enet_private { /* Hardware registers of the FEC device */ volatile fec_t *hwp; + struct net_device *netdev; + /* The saved address of a sent-in-place packet/buffer, for skfree(). */ unsigned char *tx_bounce[TX_RING_SIZE]; struct sk_buff* tx_skbuff[TX_RING_SIZE]; @@ -203,7 +204,6 @@ struct fec_enet_private { cbd_t *tx_bd_base; cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ cbd_t *dirty_tx; /* The ring entries to be free()ed. */ - struct net_device_stats stats; uint tx_full; spinlock_t lock; @@ -233,7 +233,6 @@ static irqreturn_t fec_enet_interrupt(int irq, void * dev_id); static void fec_enet_tx(struct net_device *dev); static void fec_enet_rx(struct net_device *dev); static int fec_enet_close(struct net_device *dev); -static struct net_device_stats *fec_enet_get_stats(struct net_device *dev); static void set_multicast_list(struct net_device *dev); static void fec_restart(struct net_device *dev, int duplex); static void fec_stop(struct net_device *dev); @@ -358,7 +357,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) */ fep->tx_skbuff[fep->skb_cur] = skb; - fep->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += skb->len; fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; /* Push the data cache so the CPM does not get stale memory @@ -408,7 +407,7 @@ fec_timeout(struct net_device *dev) struct fec_enet_private *fep = netdev_priv(dev); printk("%s: transmit timed out.\n", dev->name); - fep->stats.tx_errors++; + dev->stats.tx_errors++; #ifndef final_version { int i; @@ -510,19 +509,19 @@ fec_enet_tx(struct net_device *dev) if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) { - fep->stats.tx_errors++; + dev->stats.tx_errors++; if (status & BD_ENET_TX_HB) /* No heartbeat */ - fep->stats.tx_heartbeat_errors++; + dev->stats.tx_heartbeat_errors++; if (status & BD_ENET_TX_LC) /* Late collision */ - fep->stats.tx_window_errors++; + dev->stats.tx_window_errors++; if (status & BD_ENET_TX_RL) /* Retrans limit */ - fep->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; if (status & BD_ENET_TX_UN) /* Underrun */ - fep->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; if (status & BD_ENET_TX_CSL) /* Carrier lost */ - fep->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; } else { - fep->stats.tx_packets++; + dev->stats.tx_packets++; } #ifndef final_version @@ -533,7 +532,7 @@ fec_enet_tx(struct net_device *dev) * but we eventually sent the packet OK. */ if (status & BD_ENET_TX_DEF) - fep->stats.collisions++; + dev->stats.collisions++; /* Free the sk buffer associated with this last transmit. */ @@ -606,17 +605,17 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { /* Check for errors. */ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { - fep->stats.rx_errors++; + dev->stats.rx_errors++; if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { /* Frame too long or too short. */ - fep->stats.rx_length_errors++; + dev->stats.rx_length_errors++; } if (status & BD_ENET_RX_NO) /* Frame alignment */ - fep->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (status & BD_ENET_RX_CR) /* CRC Error */ - fep->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; if (status & BD_ENET_RX_OV) /* FIFO overrun */ - fep->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; } /* Report late collisions as a frame error. @@ -624,16 +623,16 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { * have in the buffer. So, just drop this frame on the floor. */ if (status & BD_ENET_RX_CL) { - fep->stats.rx_errors++; - fep->stats.rx_frame_errors++; + dev->stats.rx_errors++; + dev->stats.rx_frame_errors++; goto rx_processing_done; } /* Process the incoming frame. */ - fep->stats.rx_packets++; + dev->stats.rx_packets++; pkt_len = bdp->cbd_datlen; - fep->stats.rx_bytes += pkt_len; + dev->stats.rx_bytes += pkt_len; data = (__u8*)__va(bdp->cbd_bufaddr); /* This does 16 byte alignment, exactly what we need. @@ -645,7 +644,7 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { if (skb == NULL) { printk("%s: Memory squeeze, dropping packet.\n", dev->name); - fep->stats.rx_dropped++; + dev->stats.rx_dropped++; } else { skb_put(skb,pkt_len-4); /* Make room */ skb_copy_to_linear_data(skb, data, pkt_len-4); @@ -1269,7 +1268,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev) icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR3); *icrp = 0x00000ddd; icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1); - *icrp = (*icrp & 0x70777777) | 0x0d000000; + *icrp = 0x0d000000; } static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep) @@ -1331,7 +1330,7 @@ static void __inline__ fec_disable_phy_intr(void) { volatile unsigned long *icrp; icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1); - *icrp = (*icrp & 0x70777777) | 0x08000000; + *icrp = 0x08000000; } static void __inline__ fec_phy_ack_intr(void) @@ -1339,7 +1338,7 @@ static void __inline__ fec_phy_ack_intr(void) volatile unsigned long *icrp; /* Acknowledge the interrupt */ icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1); - *icrp = (*icrp & 0x77777777) | 0x08000000; + *icrp = 0x0d000000; } static void __inline__ fec_localhw_setup(void) @@ -1426,6 +1425,29 @@ static void __inline__ fec_request_intrs(struct net_device *dev) *gpio_pehlpar = 0xc0; } #endif + +#if defined(CONFIG_M527x) + /* Set up gpio outputs for MII lines */ + { + volatile u8 *gpio_par_fec; + volatile u16 *gpio_par_feci2c; + + gpio_par_feci2c = (volatile u16 *)(MCF_IPSBAR + 0x100082); + /* Set up gpio outputs for FEC0 MII lines */ + gpio_par_fec = (volatile u8 *)(MCF_IPSBAR + 0x100078); + + *gpio_par_feci2c |= 0x0f00; + *gpio_par_fec |= 0xc0; + +#if defined(CONFIG_FEC2) + /* Set up gpio outputs for FEC1 MII lines */ + gpio_par_fec = (volatile u8 *)(MCF_IPSBAR + 0x100079); + + *gpio_par_feci2c |= 0x00a0; + *gpio_par_fec |= 0xc0; +#endif /* CONFIG_FEC2 */ + } +#endif /* CONFIG_M527x */ } static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep) @@ -1940,9 +1962,10 @@ static void mii_display_status(struct net_device *dev) printk(".\n"); } -static void mii_display_config(struct net_device *dev) +static void mii_display_config(struct work_struct *work) { - struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task); + struct net_device *dev = fep->netdev; uint status = fep->phy_status; /* @@ -1976,9 +1999,10 @@ static void mii_display_config(struct net_device *dev) fep->sequence_done = 1; } -static void mii_relink(struct net_device *dev) +static void mii_relink(struct work_struct *work) { - struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task); + struct net_device *dev = fep->netdev; int duplex; /* @@ -2022,7 +2046,7 @@ static void mii_queue_relink(uint mii_reg, struct net_device *dev) return; fep->mii_phy_task_queued = 1; - INIT_WORK(&fep->phy_task, (void*)mii_relink, dev); + INIT_WORK(&fep->phy_task, mii_relink); schedule_work(&fep->phy_task); } @@ -2035,7 +2059,7 @@ static void mii_queue_config(uint mii_reg, struct net_device *dev) return; fep->mii_phy_task_queued = 1; - INIT_WORK(&fep->phy_task, (void*)mii_display_config, dev); + INIT_WORK(&fep->phy_task, mii_display_config); schedule_work(&fep->phy_task); } @@ -2194,13 +2218,6 @@ fec_enet_close(struct net_device *dev) return 0; } -static struct net_device_stats *fec_enet_get_stats(struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - - return &fep->stats; -} - /* Set or clear the multicast filter for this adaptor. * Skeleton taken from sunlance driver. * The CPM Ethernet implementation allows Multicast as well as individual @@ -2330,6 +2347,7 @@ int __init fec_enet_init(struct net_device *dev) fep->index = index; fep->hwp = fecp; + fep->netdev = dev; /* Whack a reset. We should wait for this. */ @@ -2435,7 +2453,6 @@ int __init fec_enet_init(struct net_device *dev) dev->tx_timeout = fec_timeout; dev->watchdog_timeo = TX_TIMEOUT; dev->stop = fec_enet_close; - dev->get_stats = fec_enet_get_stats; dev->set_multicast_list = set_multicast_list; for (i=0; i<NMII-1; i++) @@ -2618,6 +2635,7 @@ static int __init fec_enet_module_init(void) { struct net_device *dev; int i, j, err; + DECLARE_MAC_BUF(mac); printk("FEC ENET Version 0.2\n"); @@ -2636,10 +2654,8 @@ static int __init fec_enet_module_init(void) return -EIO; } - printk("%s: ethernet ", dev->name); - for (j = 0; (j < 5); j++) - printk("%02x:", dev->dev_addr[j]); - printk("%02x\n", dev->dev_addr[5]); + printk("%s: ethernet %s\n", + dev->name, print_mac(mac, dev->dev_addr)); } return 0; } diff --git a/drivers/net/fec_8xx/fec_8xx.h b/drivers/net/fec_8xx/fec_8xx.h index 5af60b0f9208..f3b1c6fbba8b 100644 --- a/drivers/net/fec_8xx/fec_8xx.h +++ b/drivers/net/fec_8xx/fec_8xx.h @@ -105,6 +105,8 @@ struct fec; struct fec_enet_private { spinlock_t lock; /* during all ops except TX pckt processing */ spinlock_t tx_lock; /* during fec_start_xmit and fec_tx */ + struct net_device *dev; + struct napi_struct napi; int fecno; struct fec *fecp; const struct fec_platform_info *fpi; diff --git a/drivers/net/fec_8xx/fec_main.c b/drivers/net/fec_8xx/fec_main.c index e5502af5b8e2..8d2904fa5789 100644 --- a/drivers/net/fec_8xx/fec_main.c +++ b/drivers/net/fec_8xx/fec_main.c @@ -465,9 +465,9 @@ void fec_stop(struct net_device *dev) } /* common receive function */ -static int fec_enet_rx_common(struct net_device *dev, int *budget) +static int fec_enet_rx_common(struct fec_enet_private *ep, + struct net_device *dev, int budget) { - struct fec_enet_private *fep = netdev_priv(dev); fec_t *fecp = fep->fecp; const struct fec_platform_info *fpi = fep->fpi; cbd_t *bdp; @@ -475,11 +475,8 @@ static int fec_enet_rx_common(struct net_device *dev, int *budget) int received = 0; __u16 pkt_len, sc; int curidx; - int rx_work_limit; if (fpi->use_napi) { - rx_work_limit = min(dev->quota, *budget); - if (!netif_running(dev)) return 0; } @@ -530,11 +527,6 @@ static int fec_enet_rx_common(struct net_device *dev, int *budget) BUG_ON(skbn == NULL); } else { - - /* napi, got packet but no quota */ - if (fpi->use_napi && --rx_work_limit < 0) - break; - skb = fep->rx_skbuff[curidx]; BUG_ON(skb == NULL); @@ -599,25 +591,24 @@ static int fec_enet_rx_common(struct net_device *dev, int *budget) * able to keep up at the expense of system resources. */ FW(fecp, r_des_active, 0x01000000); + + if (received >= budget) + break; + } fep->cur_rx = bdp; if (fpi->use_napi) { - dev->quota -= received; - *budget -= received; - - if (rx_work_limit < 0) - return 1; /* not done */ + if (received < budget) { + netif_rx_complete(dev, &fep->napi); - /* done */ - netif_rx_complete(dev); - - /* enable RX interrupt bits */ - FS(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB); + /* enable RX interrupt bits */ + FS(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB); + } } - return 0; + return received; } static void fec_enet_tx(struct net_device *dev) @@ -743,12 +734,12 @@ fec_enet_interrupt(int irq, void *dev_id) if ((int_events & FEC_ENET_RXF) != 0) { if (!fpi->use_napi) - fec_enet_rx_common(dev, NULL); + fec_enet_rx_common(fep, dev, ~0); else { - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &fep->napi)) { /* disable rx interrupts */ FC(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &fep->napi); } else { printk(KERN_ERR DRV_MODULE_NAME ": %s driver bug! interrupt while in poll!\n", @@ -893,10 +884,13 @@ static int fec_enet_open(struct net_device *dev) const struct fec_platform_info *fpi = fep->fpi; unsigned long flags; + napi_enable(&fep->napi); + /* Install our interrupt handler. */ if (request_irq(fpi->fec_irq, fec_enet_interrupt, 0, "fec", dev) != 0) { printk(KERN_ERR DRV_MODULE_NAME ": %s Could not allocate FEC IRQ!", dev->name); + napi_disable(&fep->napi); return -EINVAL; } @@ -907,6 +901,7 @@ static int fec_enet_open(struct net_device *dev) printk(KERN_ERR DRV_MODULE_NAME ": %s Could not allocate PHY IRQ!", dev->name); free_irq(fpi->fec_irq, dev); + napi_disable(&fep->napi); return -EINVAL; } @@ -932,6 +927,7 @@ static int fec_enet_close(struct net_device *dev) unsigned long flags; netif_stop_queue(dev); + napi_disable(&fep->napi); netif_carrier_off(dev); if (fpi->use_mdio) @@ -955,9 +951,12 @@ static struct net_device_stats *fec_enet_get_stats(struct net_device *dev) return &fep->stats; } -static int fec_enet_poll(struct net_device *dev, int *budget) +static int fec_enet_poll(struct napi_struct *napi, int budget) { - return fec_enet_rx_common(dev, budget); + struct fec_enet_private *fep = container_of(napi, struct fec_enet_private, napi); + struct net_device *dev = fep->dev; + + return fec_enet_rx_common(fep, dev, budget); } /*************************************************************************/ @@ -1042,9 +1041,7 @@ static const struct ethtool_ops fec_ethtool_ops = { .get_link = ethtool_op_get_link, .get_msglevel = fec_get_msglevel, .set_msglevel = fec_set_msglevel, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, /* local! */ - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, .get_regs = fec_get_regs, }; @@ -1104,9 +1101,9 @@ int fec_8xx_init_one(const struct fec_platform_info *fpi, err = -ENOMEM; goto err; } - SET_MODULE_OWNER(dev); fep = netdev_priv(dev); + fep->dev = dev; /* partial reset of FEC */ fec_whack_reset(fecp); @@ -1172,10 +1169,9 @@ int fec_8xx_init_one(const struct fec_platform_info *fpi, dev->get_stats = fec_enet_get_stats; dev->set_multicast_list = fec_set_multicast_list; dev->set_mac_address = fec_set_mac_address; - if (fpi->use_napi) { - dev->poll = fec_enet_poll; - dev->weight = fpi->napi_weight; - } + netif_napi_add(dev, &fec->napi, + fec_enet_poll, fpi->napi_weight); + dev->ethtool_ops = &fec_ethtool_ops; dev->do_ioctl = fec_ioctl; diff --git a/drivers/net/fec_8xx/fec_mii.c b/drivers/net/fec_8xx/fec_mii.c index b3fa0d6a159c..e8e10a02d202 100644 --- a/drivers/net/fec_8xx/fec_mii.c +++ b/drivers/net/fec_8xx/fec_mii.c @@ -308,12 +308,11 @@ int fec_mii_phy_id_detect(struct net_device *dev) return -1; } - for (i = 0, phy = phy_info; i < sizeof(phy_info) / sizeof(phy_info[0]); - i++, phy++) + for (i = 0, phy = phy_info; i < ARRAY_SIZE(phy_info); i++, phy++) if (phy->id == (phy_hwid >> 4) || phy->id == 0) break; - if (i >= sizeof(phy_info) / sizeof(phy_info[0])) { + if (i >= ARRAY_SIZE(phy_info)) { printk(KERN_ERR DRV_MODULE_NAME ": %s PHY id 0x%08x is not supported!\n", dev->name, phy_hwid); diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 136827f8dc2e..dae30b731342 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -159,6 +159,8 @@ #define dprintk(x...) do { } while (0) #endif +#define TX_WORK_PER_LOOP 64 +#define RX_WORK_PER_LOOP 64 /* * Hardware access: @@ -178,6 +180,7 @@ #define DEV_HAS_STATISTICS_V2 0x0800 /* device supports hw statistics version 2 */ #define DEV_HAS_TEST_EXTENDED 0x1000 /* device supports extended diagnostic test */ #define DEV_HAS_MGMT_UNIT 0x2000 /* device supports management unit */ +#define DEV_HAS_CORRECT_MACADDR 0x4000 /* device supports correct mac address order */ enum { NvRegIrqStatus = 0x000, @@ -551,7 +554,7 @@ union ring_type { #define PHY_OUI_MARVELL 0x5043 #define PHY_OUI_CICADA 0x03f1 #define PHY_OUI_VITESSE 0x01c1 -#define PHY_OUI_REALTEK 0x01c1 +#define PHY_OUI_REALTEK 0x0732 #define PHYID1_OUI_MASK 0x03ff #define PHYID1_OUI_SHFT 6 #define PHYID2_OUI_MASK 0xfc00 @@ -744,6 +747,9 @@ struct nv_skb_map { struct fe_priv { spinlock_t lock; + struct net_device *dev; + struct napi_struct napi; + /* General data: * Locking: spin_lock(&np->lock); */ struct net_device_stats stats; @@ -1585,9 +1591,10 @@ static int nv_alloc_rx_optimized(struct net_device *dev) static void nv_do_rx_refill(unsigned long data) { struct net_device *dev = (struct net_device *) data; + struct fe_priv *np = netdev_priv(dev); /* Just reschedule NAPI rx processing */ - netif_rx_schedule(dev); + netif_rx_schedule(dev, &np->napi); } #else static void nv_do_rx_refill(unsigned long data) @@ -2996,7 +3003,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data) #ifdef CONFIG_FORCEDETH_NAPI if (events & NVREG_IRQ_RX_ALL) { - netif_rx_schedule(dev); + netif_rx_schedule(dev, &np->napi); /* Disable furthur receive irq's */ spin_lock(&np->lock); @@ -3009,7 +3016,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data) spin_unlock(&np->lock); } #else - if (nv_rx_process(dev, dev->weight)) { + if (nv_rx_process(dev, RX_WORK_PER_LOOP)) { if (unlikely(nv_alloc_rx(dev))) { spin_lock(&np->lock); if (!np->in_shutdown) @@ -3067,8 +3074,8 @@ static irqreturn_t nv_nic_irq(int foo, void *data) np->nic_poll_irq = np->irqmask; mod_timer(&np->nic_poll, jiffies + POLL_WAIT); } - printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); spin_unlock(&np->lock); + printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); break; } @@ -3078,8 +3085,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data) return IRQ_RETVAL(i); } -#define TX_WORK_PER_LOOP 64 -#define RX_WORK_PER_LOOP 64 /** * All _optimized functions are used to help increase performance * (reduce CPU and increase throughput). They use descripter version 3, @@ -3113,7 +3118,7 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data) #ifdef CONFIG_FORCEDETH_NAPI if (events & NVREG_IRQ_RX_ALL) { - netif_rx_schedule(dev); + netif_rx_schedule(dev, &np->napi); /* Disable furthur receive irq's */ spin_lock(&np->lock); @@ -3126,7 +3131,7 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data) spin_unlock(&np->lock); } #else - if (nv_rx_process_optimized(dev, dev->weight)) { + if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { if (unlikely(nv_alloc_rx_optimized(dev))) { spin_lock(&np->lock); if (!np->in_shutdown) @@ -3185,8 +3190,8 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data) np->nic_poll_irq = np->irqmask; mod_timer(&np->nic_poll, jiffies + POLL_WAIT); } - printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); spin_unlock(&np->lock); + printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); break; } @@ -3232,8 +3237,8 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data) np->nic_poll_irq |= NVREG_IRQ_TX_ALL; mod_timer(&np->nic_poll, jiffies + POLL_WAIT); } - printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); spin_unlock_irqrestore(&np->lock, flags); + printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); break; } @@ -3244,19 +3249,19 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data) } #ifdef CONFIG_FORCEDETH_NAPI -static int nv_napi_poll(struct net_device *dev, int *budget) +static int nv_napi_poll(struct napi_struct *napi, int budget) { - int pkts, limit = min(*budget, dev->quota); - struct fe_priv *np = netdev_priv(dev); + struct fe_priv *np = container_of(napi, struct fe_priv, napi); + struct net_device *dev = np->dev; u8 __iomem *base = get_hwbase(dev); unsigned long flags; - int retcode; + int pkts, retcode; if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { - pkts = nv_rx_process(dev, limit); + pkts = nv_rx_process(dev, budget); retcode = nv_alloc_rx(dev); } else { - pkts = nv_rx_process_optimized(dev, limit); + pkts = nv_rx_process_optimized(dev, budget); retcode = nv_alloc_rx_optimized(dev); } @@ -3267,13 +3272,12 @@ static int nv_napi_poll(struct net_device *dev, int *budget) spin_unlock_irqrestore(&np->lock, flags); } - if (pkts < limit) { - /* all done, no more packets present */ - netif_rx_complete(dev); - + if (pkts < budget) { /* re-enable receive interrupts */ spin_lock_irqsave(&np->lock, flags); + __netif_rx_complete(dev, napi); + np->irqmask |= NVREG_IRQ_RX_ALL; if (np->msi_flags & NV_MSI_X_ENABLED) writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); @@ -3281,13 +3285,8 @@ static int nv_napi_poll(struct net_device *dev, int *budget) writel(np->irqmask, base + NvRegIrqMask); spin_unlock_irqrestore(&np->lock, flags); - return 0; - } else { - /* used up our quantum, so reschedule */ - dev->quota -= pkts; - *budget -= pkts; - return 1; } + return pkts; } #endif @@ -3295,6 +3294,7 @@ static int nv_napi_poll(struct net_device *dev, int *budget) static irqreturn_t nv_nic_irq_rx(int foo, void *data) { struct net_device *dev = (struct net_device *) data; + struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); u32 events; @@ -3302,7 +3302,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data) writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); if (events) { - netif_rx_schedule(dev); + netif_rx_schedule(dev, &np->napi); /* disable receive interrupts on the nic */ writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); pci_push(base); @@ -3328,7 +3328,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data) if (!(events & np->irqmask)) break; - if (nv_rx_process_optimized(dev, dev->weight)) { + if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { if (unlikely(nv_alloc_rx_optimized(dev))) { spin_lock_irqsave(&np->lock, flags); if (!np->in_shutdown) @@ -3347,8 +3347,8 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data) np->nic_poll_irq |= NVREG_IRQ_RX_ALL; mod_timer(&np->nic_poll, jiffies + POLL_WAIT); } - printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); spin_unlock_irqrestore(&np->lock, flags); + printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); break; } } @@ -3420,8 +3420,8 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data) np->nic_poll_irq |= NVREG_IRQ_OTHER; mod_timer(&np->nic_poll, jiffies + POLL_WAIT); } - printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); spin_unlock_irqrestore(&np->lock, flags); + printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); break; } @@ -4333,16 +4333,26 @@ static int nv_set_sg(struct net_device *dev, u32 data) return -EOPNOTSUPP; } -static int nv_get_stats_count(struct net_device *dev) +static int nv_get_sset_count(struct net_device *dev, int sset) { struct fe_priv *np = netdev_priv(dev); - if (np->driver_data & DEV_HAS_STATISTICS_V1) - return NV_DEV_STATISTICS_V1_COUNT; - else if (np->driver_data & DEV_HAS_STATISTICS_V2) - return NV_DEV_STATISTICS_V2_COUNT; - else - return 0; + switch (sset) { + case ETH_SS_TEST: + if (np->driver_data & DEV_HAS_TEST_EXTENDED) + return NV_TEST_COUNT_EXTENDED; + else + return NV_TEST_COUNT_BASE; + case ETH_SS_STATS: + if (np->driver_data & DEV_HAS_STATISTICS_V1) + return NV_DEV_STATISTICS_V1_COUNT; + else if (np->driver_data & DEV_HAS_STATISTICS_V2) + return NV_DEV_STATISTICS_V2_COUNT; + else + return 0; + default: + return -EOPNOTSUPP; + } } static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer) @@ -4352,17 +4362,7 @@ static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *e /* update stats */ nv_do_stats_poll((unsigned long)dev); - memcpy(buffer, &np->estats, nv_get_stats_count(dev)*sizeof(u64)); -} - -static int nv_self_test_count(struct net_device *dev) -{ - struct fe_priv *np = netdev_priv(dev); - - if (np->driver_data & DEV_HAS_TEST_EXTENDED) - return NV_TEST_COUNT_EXTENDED; - else - return NV_TEST_COUNT_BASE; + memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); } static int nv_link_test(struct net_device *dev) @@ -4609,7 +4609,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); int result; - memset(buffer, 0, nv_self_test_count(dev)*sizeof(u64)); + memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64)); if (!nv_link_test(dev)) { test->flags |= ETH_TEST_FL_FAILED; @@ -4619,7 +4619,9 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 if (test->flags & ETH_TEST_FL_OFFLINE) { if (netif_running(dev)) { netif_stop_queue(dev); - netif_poll_disable(dev); +#ifdef CONFIG_FORCEDETH_NAPI + napi_disable(&np->napi); +#endif netif_tx_lock_bh(dev); spin_lock_irq(&np->lock); nv_disable_hw_interrupts(dev, np->irqmask); @@ -4678,7 +4680,9 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 nv_start_rx(dev); nv_start_tx(dev); netif_start_queue(dev); - netif_poll_enable(dev); +#ifdef CONFIG_FORCEDETH_NAPI + napi_enable(&np->napi); +#endif nv_enable_hw_interrupts(dev, np->irqmask); } } @@ -4688,10 +4692,10 @@ static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer) { switch (stringset) { case ETH_SS_STATS: - memcpy(buffer, &nv_estats_str, nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str)); + memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str)); break; case ETH_SS_TEST: - memcpy(buffer, &nv_etests_str, nv_self_test_count(dev)*sizeof(struct nv_ethtool_str)); + memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str)); break; } } @@ -4706,8 +4710,6 @@ static const struct ethtool_ops ops = { .get_regs_len = nv_get_regs_len, .get_regs = nv_get_regs, .nway_reset = nv_nway_reset, - .get_perm_addr = ethtool_op_get_perm_addr, - .get_tso = ethtool_op_get_tso, .set_tso = nv_set_tso, .get_ringparam = nv_get_ringparam, .set_ringparam = nv_set_ringparam, @@ -4715,14 +4717,11 @@ static const struct ethtool_ops ops = { .set_pauseparam = nv_set_pauseparam, .get_rx_csum = nv_get_rx_csum, .set_rx_csum = nv_set_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = nv_set_tx_csum, - .get_sg = ethtool_op_get_sg, .set_sg = nv_set_sg, .get_strings = nv_get_strings, - .get_stats_count = nv_get_stats_count, .get_ethtool_stats = nv_get_ethtool_stats, - .self_test_count = nv_self_test_count, + .get_sset_count = nv_get_sset_count, .self_test = nv_self_test, }; @@ -4911,12 +4910,14 @@ static int nv_open(struct net_device *dev) nv_start_rx(dev); nv_start_tx(dev); netif_start_queue(dev); - netif_poll_enable(dev); +#ifdef CONFIG_FORCEDETH_NAPI + napi_enable(&np->napi); +#endif if (ret) { netif_carrier_on(dev); } else { - printk("%s: no link during initialization.\n", dev->name); + printk(KERN_INFO "%s: no link during initialization.\n", dev->name); netif_carrier_off(dev); } if (oom) @@ -4942,7 +4943,9 @@ static int nv_close(struct net_device *dev) spin_lock_irq(&np->lock); np->in_shutdown = 1; spin_unlock_irq(&np->lock); - netif_poll_disable(dev); +#ifdef CONFIG_FORCEDETH_NAPI + napi_disable(&np->napi); +#endif synchronize_irq(dev->irq); del_timer_sync(&np->oom_kick); @@ -4987,6 +4990,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i u32 powerstate, txreg; u32 phystate_orig = 0, phystate; int phyinitialized = 0; + DECLARE_MAC_BUF(mac); dev = alloc_etherdev(sizeof(struct fe_priv)); err = -ENOMEM; @@ -4994,9 +4998,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i goto out; np = netdev_priv(dev); + np->dev = dev; np->pci_dev = pci_dev; spin_lock_init(&np->lock); - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pci_dev->dev); init_timer(&np->oom_kick); @@ -5137,12 +5141,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i goto out_unmap; np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; } - np->rx_skb = kmalloc(sizeof(struct nv_skb_map) * np->rx_ring_size, GFP_KERNEL); - np->tx_skb = kmalloc(sizeof(struct nv_skb_map) * np->tx_ring_size, GFP_KERNEL); + np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); + np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); if (!np->rx_skb || !np->tx_skb) goto out_freering; - memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); - memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size); dev->open = nv_open; dev->stop = nv_close; @@ -5157,9 +5159,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = nv_poll_controller; #endif - dev->weight = RX_WORK_PER_LOOP; #ifdef CONFIG_FORCEDETH_NAPI - dev->poll = nv_napi_poll; + netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); #endif SET_ETHTOOL_OPS(dev, &ops); dev->tx_timeout = nv_tx_timeout; @@ -5174,7 +5175,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i /* check the workaround bit for correct mac address order */ txreg = readl(base + NvRegTransmitPoll); - if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) { + if ((txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) || + (id->driver_data & DEV_HAS_CORRECT_MACADDR)) { /* mac address is already in correct order */ dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; @@ -5203,10 +5205,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i * Bad mac address. At least one bios sets the mac address * to 01:23:45:67:89:ab */ - printk(KERN_ERR "%s: Invalid Mac address detected: %02x:%02x:%02x:%02x:%02x:%02x\n", - pci_name(pci_dev), - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + printk(KERN_ERR "%s: Invalid Mac address detected: %s\n", + pci_name(pci_dev), print_mac(mac, dev->dev_addr)); printk(KERN_ERR "Please complain to your hardware vendor. Switching to a random MAC.\n"); dev->dev_addr[0] = 0x00; dev->dev_addr[1] = 0x00; @@ -5214,9 +5214,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i get_random_bytes(&dev->dev_addr[3], 3); } - dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev), - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + dprintk(KERN_DEBUG "%s: MAC Address %s\n", + pci_name(pci_dev), print_mac(mac, dev->dev_addr)); /* set mac address */ nv_copy_mac_to_hw(dev); @@ -5502,51 +5501,67 @@ static struct pci_device_id pci_tbl[] = { }, { /* MCP61 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP61 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP61 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP61 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP65 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP65 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP65 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP65 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP67 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP67 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP67 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP67 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, + }, + { /* MCP73 Ethernet Controller */ + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, + }, + { /* MCP73 Ethernet Controller */ + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, + }, + { /* MCP73 Ethernet Controller */ + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, + }, + { /* MCP73 Ethernet Controller */ + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, {0,}, }; diff --git a/drivers/net/fs_enet/Kconfig b/drivers/net/fs_enet/Kconfig index e27ee210b605..2765e49e07df 100644 --- a/drivers/net/fs_enet/Kconfig +++ b/drivers/net/fs_enet/Kconfig @@ -11,6 +11,7 @@ config FS_ENET_HAS_SCC config FS_ENET_HAS_FCC bool "Chip has an FCC usable for ethernet" depends on FS_ENET && CPM2 + select MDIO_BITBANG default y config FS_ENET_HAS_FEC diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index a4a2a0ea43d3..04c6faec88d2 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c @@ -1,17 +1,17 @@ /* * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. * - * Copyright (c) 2003 Intracom S.A. + * Copyright (c) 2003 Intracom S.A. * by Pantelis Antoniou <panto@intracom.gr> - * - * 2005 (c) MontaVista Software, Inc. + * + * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug <vbordug@ru.mvista.com> * * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com> * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se> * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ @@ -39,28 +39,35 @@ #include <linux/vmalloc.h> #include <asm/pgtable.h> - -#include <asm/pgtable.h> #include <asm/irq.h> #include <asm/uaccess.h> +#ifdef CONFIG_PPC_CPM_NEW_BINDING +#include <asm/of_platform.h> +#endif + #include "fs_enet.h" /*************************************************/ +#ifndef CONFIG_PPC_CPM_NEW_BINDING static char version[] __devinitdata = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")" "\n"; +#endif MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>"); MODULE_DESCRIPTION("Freescale Ethernet Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); -int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */ +static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */ module_param(fs_enet_debug, int, 0); MODULE_PARM_DESC(fs_enet_debug, "Freescale bitmapped debugging message enable value"); +#ifdef CONFIG_NET_POLL_CONTROLLER +static void fs_enet_netpoll(struct net_device *dev); +#endif static void fs_set_multicast_list(struct net_device *dev) { @@ -69,19 +76,25 @@ static void fs_set_multicast_list(struct net_device *dev) (*fep->ops->set_multicast_list)(dev); } +static void skb_align(struct sk_buff *skb, int align) +{ + int off = ((unsigned long)skb->data) & (align - 1); + + if (off) + skb_reserve(skb, align - off); +} + /* NAPI receive function */ -static int fs_enet_rx_napi(struct net_device *dev, int *budget) +static int fs_enet_rx_napi(struct napi_struct *napi, int budget) { - struct fs_enet_private *fep = netdev_priv(dev); + struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi); + struct net_device *dev = to_net_dev(fep->dev); const struct fs_platform_info *fpi = fep->fpi; - cbd_t *bdp; + cbd_t __iomem *bdp; struct sk_buff *skb, *skbn, *skbt; int received = 0; u16 pkt_len, sc; int curidx; - int rx_work_limit = 0; /* pacify gcc */ - - rx_work_limit = min(dev->quota, *budget); if (!netif_running(dev)) return 0; @@ -96,7 +109,6 @@ static int fs_enet_rx_napi(struct net_device *dev, int *budget) (*fep->ops->napi_clear_rx_event)(dev); while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) { - curidx = bdp - fep->rx_bd_base; /* @@ -109,7 +121,7 @@ static int fs_enet_rx_napi(struct net_device *dev, int *budget) dev->name); /* - * Check for errors. + * Check for errors. */ if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { @@ -136,11 +148,6 @@ static int fs_enet_rx_napi(struct net_device *dev, int *budget) skbn = skb; } else { - - /* napi, got packet but no quota */ - if (--rx_work_limit < 0) - break; - skb = fep->rx_skbuff[curidx]; dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), @@ -166,9 +173,13 @@ static int fs_enet_rx_napi(struct net_device *dev, int *budget) skb = skbn; skbn = skbt; } - } else + } else { skbn = dev_alloc_skb(ENET_RX_FRSIZE); + if (skbn) + skb_align(skbn, ENET_RX_ALIGN); + } + if (skbn != NULL) { skb_put(skb, pkt_len); /* Make room */ skb->protocol = eth_type_trans(skb, dev); @@ -191,7 +202,7 @@ static int fs_enet_rx_napi(struct net_device *dev, int *budget) CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); /* - * Update BD pointer to next entry. + * Update BD pointer to next entry. */ if ((sc & BD_ENET_RX_WRAP) == 0) bdp++; @@ -199,22 +210,19 @@ static int fs_enet_rx_napi(struct net_device *dev, int *budget) bdp = fep->rx_bd_base; (*fep->ops->rx_bd_done)(dev); + + if (received >= budget) + break; } fep->cur_rx = bdp; - dev->quota -= received; - *budget -= received; - - if (rx_work_limit < 0) - return 1; /* not done */ - - /* done */ - netif_rx_complete(dev); - - (*fep->ops->napi_enable_rx)(dev); - - return 0; + if (received >= budget) { + /* done */ + netif_rx_complete(dev, napi); + (*fep->ops->napi_enable_rx)(dev); + } + return received; } /* non NAPI receive function */ @@ -222,7 +230,7 @@ static int fs_enet_rx_non_napi(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); const struct fs_platform_info *fpi = fep->fpi; - cbd_t *bdp; + cbd_t __iomem *bdp; struct sk_buff *skb, *skbn, *skbt; int received = 0; u16 pkt_len, sc; @@ -247,7 +255,7 @@ static int fs_enet_rx_non_napi(struct net_device *dev) dev->name); /* - * Check for errors. + * Check for errors. */ if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { @@ -300,9 +308,13 @@ static int fs_enet_rx_non_napi(struct net_device *dev) skb = skbn; skbn = skbt; } - } else + } else { skbn = dev_alloc_skb(ENET_RX_FRSIZE); + if (skbn) + skb_align(skbn, ENET_RX_ALIGN); + } + if (skbn != NULL) { skb_put(skb, pkt_len); /* Make room */ skb->protocol = eth_type_trans(skb, dev); @@ -325,7 +337,7 @@ static int fs_enet_rx_non_napi(struct net_device *dev) CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); /* - * Update BD pointer to next entry. + * Update BD pointer to next entry. */ if ((sc & BD_ENET_RX_WRAP) == 0) bdp++; @@ -343,17 +355,16 @@ static int fs_enet_rx_non_napi(struct net_device *dev) static void fs_enet_tx(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - cbd_t *bdp; + cbd_t __iomem *bdp; struct sk_buff *skb; int dirtyidx, do_wake, do_restart; u16 sc; - spin_lock(&fep->lock); + spin_lock(&fep->tx_lock); bdp = fep->dirty_tx; do_wake = do_restart = 0; while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) { - dirtyidx = bdp - fep->tx_bd_base; if (fep->tx_free == fep->tx_ring) @@ -362,7 +373,7 @@ static void fs_enet_tx(struct net_device *dev) skb = fep->tx_skbuff[dirtyidx]; /* - * Check for errors. + * Check for errors. */ if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) { @@ -402,13 +413,13 @@ static void fs_enet_tx(struct net_device *dev) skb->len, DMA_TO_DEVICE); /* - * Free the sk buffer associated with this last transmit. + * Free the sk buffer associated with this last transmit. */ dev_kfree_skb_irq(skb); fep->tx_skbuff[dirtyidx] = NULL; /* - * Update pointer to next buffer descriptor to be transmitted. + * Update pointer to next buffer descriptor to be transmitted. */ if ((sc & BD_ENET_TX_WRAP) == 0) bdp++; @@ -428,7 +439,7 @@ static void fs_enet_tx(struct net_device *dev) if (do_restart) (*fep->ops->tx_restart)(dev); - spin_unlock(&fep->lock); + spin_unlock(&fep->tx_lock); if (do_wake) netif_wake_queue(dev); @@ -454,7 +465,6 @@ fs_enet_interrupt(int irq, void *dev_id) nr = 0; while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) { - nr++; int_clr_events = int_events; @@ -470,7 +480,7 @@ fs_enet_interrupt(int irq, void *dev_id) if (!fpi->use_napi) fs_enet_rx_non_napi(dev); else { - napi_ok = netif_rx_schedule_prep(dev); + napi_ok = napi_schedule_prep(&fep->napi); (*fep->ops->napi_disable_rx)(dev); (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx); @@ -478,7 +488,7 @@ fs_enet_interrupt(int irq, void *dev_id) /* NOTE: it is possible for FCCs in NAPI mode */ /* to submit a spurious interrupt while in poll */ if (napi_ok) - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &fep->napi); } } @@ -493,7 +503,7 @@ fs_enet_interrupt(int irq, void *dev_id) void fs_init_bds(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - cbd_t *bdp; + cbd_t __iomem *bdp; struct sk_buff *skb; int i; @@ -504,7 +514,7 @@ void fs_init_bds(struct net_device *dev) fep->cur_rx = fep->rx_bd_base; /* - * Initialize the receive buffer descriptors. + * Initialize the receive buffer descriptors. */ for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { skb = dev_alloc_skb(ENET_RX_FRSIZE); @@ -514,6 +524,7 @@ void fs_init_bds(struct net_device *dev) dev->name); break; } + skb_align(skb, ENET_RX_ALIGN); fep->rx_skbuff[i] = skb; CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skb->data, @@ -524,7 +535,7 @@ void fs_init_bds(struct net_device *dev) ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP)); } /* - * if we failed, fillup remainder + * if we failed, fillup remainder */ for (; i < fep->rx_ring; i++, bdp++) { fep->rx_skbuff[i] = NULL; @@ -532,7 +543,7 @@ void fs_init_bds(struct net_device *dev) } /* - * ...and the same for transmit. + * ...and the same for transmit. */ for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { fep->tx_skbuff[i] = NULL; @@ -546,11 +557,11 @@ void fs_cleanup_bds(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); struct sk_buff *skb; - cbd_t *bdp; + cbd_t __iomem *bdp; int i; /* - * Reset SKB transmit buffers. + * Reset SKB transmit buffers. */ for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { if ((skb = fep->tx_skbuff[i]) == NULL) @@ -565,7 +576,7 @@ void fs_cleanup_bds(struct net_device *dev) } /* - * Reset SKB receive buffers + * Reset SKB receive buffers */ for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { if ((skb = fep->rx_skbuff[i]) == NULL) @@ -587,7 +598,7 @@ void fs_cleanup_bds(struct net_device *dev) static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - cbd_t *bdp; + cbd_t __iomem *bdp; int curidx; u16 sc; unsigned long flags; @@ -595,7 +606,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) spin_lock_irqsave(&fep->tx_lock, flags); /* - * Fill in a Tx ring entry + * Fill in a Tx ring entry */ bdp = fep->cur_tx; @@ -614,19 +625,19 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) curidx = bdp - fep->tx_bd_base; /* - * Clear all of the status flags. + * Clear all of the status flags. */ CBDC_SC(bdp, BD_ENET_TX_STATS); /* - * Save skb pointer. + * Save skb pointer. */ fep->tx_skbuff[curidx] = skb; fep->stats.tx_bytes += skb->len; /* - * Push the data cache so the CPM does not get stale memory data. + * Push the data cache so the CPM does not get stale memory data. */ CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skb->data, skb->len, DMA_TO_DEVICE)); @@ -635,7 +646,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) dev->trans_start = jiffies; /* - * If this was the last BD in the ring, start at the beginning again. + * If this was the last BD in the ring, start at the beginning again. */ if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) fep->cur_tx++; @@ -710,45 +721,43 @@ static void fs_timeout(struct net_device *dev) *-----------------------------------------------------------------------------*/ static void generic_adjust_link(struct net_device *dev) { - struct fs_enet_private *fep = netdev_priv(dev); - struct phy_device *phydev = fep->phydev; - int new_state = 0; - - if (phydev->link) { - - /* adjust to duplex mode */ - if (phydev->duplex != fep->oldduplex){ - new_state = 1; - fep->oldduplex = phydev->duplex; - } - - if (phydev->speed != fep->oldspeed) { - new_state = 1; - fep->oldspeed = phydev->speed; - } - - if (!fep->oldlink) { - new_state = 1; - fep->oldlink = 1; - netif_schedule(dev); - netif_carrier_on(dev); - netif_start_queue(dev); - } - - if (new_state) - fep->ops->restart(dev); - - } else if (fep->oldlink) { - new_state = 1; - fep->oldlink = 0; - fep->oldspeed = 0; - fep->oldduplex = -1; - netif_carrier_off(dev); - netif_stop_queue(dev); - } - - if (new_state && netif_msg_link(fep)) - phy_print_status(phydev); + struct fs_enet_private *fep = netdev_priv(dev); + struct phy_device *phydev = fep->phydev; + int new_state = 0; + + if (phydev->link) { + /* adjust to duplex mode */ + if (phydev->duplex != fep->oldduplex) { + new_state = 1; + fep->oldduplex = phydev->duplex; + } + + if (phydev->speed != fep->oldspeed) { + new_state = 1; + fep->oldspeed = phydev->speed; + } + + if (!fep->oldlink) { + new_state = 1; + fep->oldlink = 1; + netif_schedule(dev); + netif_carrier_on(dev); + netif_start_queue(dev); + } + + if (new_state) + fep->ops->restart(dev); + } else if (fep->oldlink) { + new_state = 1; + fep->oldlink = 0; + fep->oldspeed = 0; + fep->oldduplex = -1; + netif_carrier_off(dev); + netif_stop_queue(dev); + } + + if (new_state && netif_msg_link(fep)) + phy_print_status(phydev); } @@ -792,25 +801,28 @@ static int fs_init_phy(struct net_device *dev) return 0; } - static int fs_enet_open(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); int r; int err; + napi_enable(&fep->napi); + /* Install our interrupt handler. */ r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt); if (r != 0) { printk(KERN_ERR DRV_MODULE_NAME ": %s Could not allocate FS_ENET IRQ!", dev->name); + napi_disable(&fep->napi); return -EINVAL; } err = fs_init_phy(dev); - if(err) + if(err) { + napi_disable(&fep->napi); return err; - + } phy_start(fep->phydev); return 0; @@ -823,10 +835,13 @@ static int fs_enet_close(struct net_device *dev) netif_stop_queue(dev); netif_carrier_off(dev); + napi_disable(&fep->napi); phy_stop(fep->phydev); spin_lock_irqsave(&fep->lock, flags); + spin_lock(&fep->tx_lock); (*fep->ops->stop)(dev); + spin_unlock(&fep->tx_lock); spin_unlock_irqrestore(&fep->lock, flags); /* release any irqs */ @@ -915,9 +930,7 @@ static const struct ethtool_ops fs_ethtool_ops = { .get_link = ethtool_op_get_link, .get_msglevel = fs_get_msglevel, .set_msglevel = fs_set_msglevel, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, /* local! */ - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, .get_regs = fs_get_regs, }; @@ -941,6 +954,7 @@ static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) extern int fs_mii_connect(struct net_device *dev); extern void fs_mii_disconnect(struct net_device *dev); +#ifndef CONFIG_PPC_CPM_NEW_BINDING static struct net_device *fs_init_instance(struct device *dev, struct fs_platform_info *fpi) { @@ -961,10 +975,8 @@ static struct net_device *fs_init_instance(struct device *dev, err = -ENOMEM; goto err; } - SET_MODULE_OWNER(ndev); fep = netdev_priv(ndev); - memset(fep, 0, privsize); /* clear everything */ fep->dev = dev; dev_set_drvdata(dev, ndev); @@ -978,7 +990,7 @@ static struct net_device *fs_init_instance(struct device *dev, #endif #ifdef CONFIG_FS_ENET_HAS_SCC - if (fs_get_scc_index(fpi->fs_no) >=0 ) + if (fs_get_scc_index(fpi->fs_no) >=0) fep->ops = &fs_scc_ops; #endif @@ -1013,13 +1025,13 @@ static struct net_device *fs_init_instance(struct device *dev, spin_lock_init(&fep->tx_lock); /* - * Set the Ethernet address. + * Set the Ethernet address. */ for (i = 0; i < 6; i++) ndev->dev_addr[i] = fpi->macaddr[i]; - + r = (*fep->ops->allocate_bd)(ndev); - + if (fep->ring_base == NULL) { printk(KERN_ERR DRV_MODULE_NAME ": %s buffer descriptor alloc failed (%d).\n", ndev->name, r); @@ -1038,7 +1050,7 @@ static struct net_device *fs_init_instance(struct device *dev, fep->rx_ring = fpi->rx_ring; /* - * The FEC Ethernet specific entries in the device structure. + * The FEC Ethernet specific entries in the device structure. */ ndev->open = fs_enet_open; ndev->hard_start_xmit = fs_enet_start_xmit; @@ -1047,10 +1059,14 @@ static struct net_device *fs_init_instance(struct device *dev, ndev->stop = fs_enet_close; ndev->get_stats = fs_enet_get_stats; ndev->set_multicast_list = fs_set_multicast_list; - if (fpi->use_napi) { - ndev->poll = fs_enet_rx_napi; - ndev->weight = fpi->napi_weight; - } + +#ifdef CONFIG_NET_POLL_CONTROLLER + ndev->poll_controller = fs_enet_netpoll; +#endif + + netif_napi_add(ndev, &fep->napi, + fs_enet_rx_napi, fpi->napi_weight); + ndev->ethtool_ops = &fs_ethtool_ops; ndev->do_ioctl = fs_ioctl; @@ -1069,9 +1085,8 @@ static struct net_device *fs_init_instance(struct device *dev, return ndev; - err: +err: if (ndev != NULL) { - if (registered) unregister_netdev(ndev); @@ -1106,7 +1121,7 @@ static int fs_cleanup_instance(struct net_device *ndev) unregister_netdev(ndev); dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), - fep->ring_base, fep->ring_mem_addr); + (void __force *)fep->ring_base, fep->ring_mem_addr); /* reset it */ (*fep->ops->cleanup_data)(ndev); @@ -1121,43 +1136,259 @@ static int fs_cleanup_instance(struct net_device *ndev) return 0; } +#endif /**************************************************************************************/ /* handy pointer to the immap */ -void *fs_enet_immap = NULL; +void __iomem *fs_enet_immap = NULL; static int setup_immap(void) { - phys_addr_t paddr = 0; - unsigned long size = 0; - #ifdef CONFIG_CPM1 - paddr = IMAP_ADDR; - size = 0x10000; /* map 64K */ + fs_enet_immap = ioremap(IMAP_ADDR, 0x4000); + WARN_ON(!fs_enet_immap); +#elif defined(CONFIG_CPM2) + fs_enet_immap = cpm2_immr; #endif -#ifdef CONFIG_CPM2 - paddr = CPM_MAP_ADDR; - size = 0x40000; /* map 256 K */ -#endif - fs_enet_immap = ioremap(paddr, size); - if (fs_enet_immap == NULL) - return -EBADF; /* XXX ahem; maybe just BUG_ON? */ - return 0; } static void cleanup_immap(void) { - if (fs_enet_immap != NULL) { - iounmap(fs_enet_immap); - fs_enet_immap = NULL; - } +#if defined(CONFIG_CPM1) + iounmap(fs_enet_immap); +#endif } /**************************************************************************************/ +#ifdef CONFIG_PPC_CPM_NEW_BINDING +static int __devinit find_phy(struct device_node *np, + struct fs_platform_info *fpi) +{ + struct device_node *phynode, *mdionode; + struct resource res; + int ret = 0, len; + + const u32 *data = of_get_property(np, "phy-handle", &len); + if (!data || len != 4) + return -EINVAL; + + phynode = of_find_node_by_phandle(*data); + if (!phynode) + return -EINVAL; + + mdionode = of_get_parent(phynode); + if (!mdionode) + goto out_put_phy; + + ret = of_address_to_resource(mdionode, 0, &res); + if (ret) + goto out_put_mdio; + + data = of_get_property(phynode, "reg", &len); + if (!data || len != 4) + goto out_put_mdio; + + snprintf(fpi->bus_id, 16, PHY_ID_FMT, res.start, *data); + +out_put_mdio: + of_node_put(mdionode); +out_put_phy: + of_node_put(phynode); + return ret; +} + +#ifdef CONFIG_FS_ENET_HAS_FEC +#define IS_FEC(match) ((match)->data == &fs_fec_ops) +#else +#define IS_FEC(match) 0 +#endif + +static int __devinit fs_enet_probe(struct of_device *ofdev, + const struct of_device_id *match) +{ + struct net_device *ndev; + struct fs_enet_private *fep; + struct fs_platform_info *fpi; + const u32 *data; + const u8 *mac_addr; + int privsize, len, ret = -ENODEV; + + fpi = kzalloc(sizeof(*fpi), GFP_KERNEL); + if (!fpi) + return -ENOMEM; + + if (!IS_FEC(match)) { + data = of_get_property(ofdev->node, "fsl,cpm-command", &len); + if (!data || len != 4) + goto out_free_fpi; + + fpi->cp_command = *data; + } + + fpi->rx_ring = 32; + fpi->tx_ring = 32; + fpi->rx_copybreak = 240; + fpi->use_napi = 0; + fpi->napi_weight = 17; + + ret = find_phy(ofdev->node, fpi); + if (ret) + goto out_free_fpi; + + privsize = sizeof(*fep) + + sizeof(struct sk_buff **) * + (fpi->rx_ring + fpi->tx_ring); + + ndev = alloc_etherdev(privsize); + if (!ndev) { + ret = -ENOMEM; + goto out_free_fpi; + } + + SET_MODULE_OWNER(ndev); + dev_set_drvdata(&ofdev->dev, ndev); + + fep = netdev_priv(ndev); + fep->dev = &ofdev->dev; + fep->fpi = fpi; + fep->ops = match->data; + + ret = fep->ops->setup_data(ndev); + if (ret) + goto out_free_dev; + + fep->rx_skbuff = (struct sk_buff **)&fep[1]; + fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring; + + spin_lock_init(&fep->lock); + spin_lock_init(&fep->tx_lock); + + mac_addr = of_get_mac_address(ofdev->node); + if (mac_addr) + memcpy(ndev->dev_addr, mac_addr, 6); + + ret = fep->ops->allocate_bd(ndev); + if (ret) + goto out_cleanup_data; + + fep->rx_bd_base = fep->ring_base; + fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring; + + fep->tx_ring = fpi->tx_ring; + fep->rx_ring = fpi->rx_ring; + + ndev->open = fs_enet_open; + ndev->hard_start_xmit = fs_enet_start_xmit; + ndev->tx_timeout = fs_timeout; + ndev->watchdog_timeo = 2 * HZ; + ndev->stop = fs_enet_close; + ndev->get_stats = fs_enet_get_stats; + ndev->set_multicast_list = fs_set_multicast_list; + if (fpi->use_napi) { + ndev->poll = fs_enet_rx_napi; + ndev->weight = fpi->napi_weight; + } + ndev->ethtool_ops = &fs_ethtool_ops; + ndev->do_ioctl = fs_ioctl; + + init_timer(&fep->phy_timer_list); + + netif_carrier_off(ndev); + + ret = register_netdev(ndev); + if (ret) + goto out_free_bd; + + printk(KERN_INFO "%s: fs_enet: %02x:%02x:%02x:%02x:%02x:%02x\n", + ndev->name, + ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2], + ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]); + + return 0; + +out_free_bd: + fep->ops->free_bd(ndev); +out_cleanup_data: + fep->ops->cleanup_data(ndev); +out_free_dev: + free_netdev(ndev); + dev_set_drvdata(&ofdev->dev, NULL); +out_free_fpi: + kfree(fpi); + return ret; +} + +static int fs_enet_remove(struct of_device *ofdev) +{ + struct net_device *ndev = dev_get_drvdata(&ofdev->dev); + struct fs_enet_private *fep = netdev_priv(ndev); + + unregister_netdev(ndev); + + fep->ops->free_bd(ndev); + fep->ops->cleanup_data(ndev); + dev_set_drvdata(fep->dev, NULL); + + free_netdev(ndev); + return 0; +} + +static struct of_device_id fs_enet_match[] = { +#ifdef CONFIG_FS_ENET_HAS_SCC + { + .compatible = "fsl,cpm1-scc-enet", + .data = (void *)&fs_scc_ops, + }, +#endif +#ifdef CONFIG_FS_ENET_HAS_FCC + { + .compatible = "fsl,cpm2-fcc-enet", + .data = (void *)&fs_fcc_ops, + }, +#endif +#ifdef CONFIG_FS_ENET_HAS_FEC + { + .compatible = "fsl,pq1-fec-enet", + .data = (void *)&fs_fec_ops, + }, +#endif + {} +}; + +static struct of_platform_driver fs_enet_driver = { + .name = "fs_enet", + .match_table = fs_enet_match, + .probe = fs_enet_probe, + .remove = fs_enet_remove, +}; + +static int __init fs_init(void) +{ + int r = setup_immap(); + if (r != 0) + return r; + + r = of_register_platform_driver(&fs_enet_driver); + if (r != 0) + goto out; + + return 0; + +out: + cleanup_immap(); + return r; +} + +static void __exit fs_cleanup(void) +{ + of_unregister_platform_driver(&fs_enet_driver); + cleanup_immap(); +} +#else static int __devinit fs_enet_probe(struct device *dev) { struct net_device *ndev; @@ -1262,7 +1493,6 @@ static int __init fs_init(void) err: cleanup_immap(); return r; - } static void __exit fs_cleanup(void) @@ -1272,6 +1502,16 @@ static void __exit fs_cleanup(void) driver_unregister(&fs_enet_scc_driver); cleanup_immap(); } +#endif + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void fs_enet_netpoll(struct net_device *dev) +{ + disable_irq(dev->irq); + fs_enet_interrupt(dev->irq, dev, NULL); + enable_irq(dev->irq); +} +#endif /**************************************************************************************/ diff --git a/drivers/net/fs_enet/fs_enet.h b/drivers/net/fs_enet/fs_enet.h index 569be225cd05..baf6477165af 100644 --- a/drivers/net/fs_enet/fs_enet.h +++ b/drivers/net/fs_enet/fs_enet.h @@ -15,8 +15,8 @@ #include <asm/commproc.h> struct fec_info { - fec_t* fecp; - u32 mii_speed; + fec_t __iomem *fecp; + u32 mii_speed; }; #endif @@ -24,19 +24,6 @@ struct fec_info { #include <asm/cpm2.h> #endif -/* This is used to operate with pins. - Note that the actual port size may - be different; cpm(s) handle it OK */ -struct bb_info { - u8 mdio_dat_msk; - u8 mdio_dir_msk; - u8 *mdio_dir; - u8 *mdio_dat; - u8 mdc_msk; - u8 *mdc_dat; - int delay; -}; - /* hw driver ops */ struct fs_ops { int (*setup_data)(struct net_device *dev); @@ -82,60 +69,26 @@ struct phy_info { /* Must be a multiple of 32 (to cover both FEC & FCC) */ #define PKT_MAXBLR_SIZE ((PKT_MAXBUF_SIZE + 31) & ~31) /* This is needed so that invalidate_xxx wont invalidate too much */ -#define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE) - -struct fs_enet_mii_bus { - struct list_head list; - spinlock_t mii_lock; - const struct fs_mii_bus_info *bus_info; - int refs; - u32 usage_map; - - int (*mii_read)(struct fs_enet_mii_bus *bus, - int phy_id, int location); - - void (*mii_write)(struct fs_enet_mii_bus *bus, - int phy_id, int location, int value); - - union { - struct { - unsigned int mii_speed; - void *fecp; - } fec; - - struct { - /* note that the actual port size may */ - /* be different; cpm(s) handle it OK */ - u8 mdio_msk; - u8 *mdio_dir; - u8 *mdio_dat; - u8 mdc_msk; - u8 *mdc_dir; - u8 *mdc_dat; - } bitbang; - - struct { - u16 lpa; - } fixed; - }; -}; +#define ENET_RX_ALIGN 16 +#define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE + ENET_RX_ALIGN - 1) struct fs_enet_private { + struct napi_struct napi; struct device *dev; /* pointer back to the device (must be initialized first) */ spinlock_t lock; /* during all ops except TX pckt processing */ spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */ - const struct fs_platform_info *fpi; + struct fs_platform_info *fpi; const struct fs_ops *ops; int rx_ring, tx_ring; dma_addr_t ring_mem_addr; - void *ring_base; + void __iomem *ring_base; struct sk_buff **rx_skbuff; struct sk_buff **tx_skbuff; - cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */ - cbd_t *tx_bd_base; - cbd_t *dirty_tx; /* ring entries to be free()ed. */ - cbd_t *cur_rx; - cbd_t *cur_tx; + cbd_t __iomem *rx_bd_base; /* Address of Rx and Tx buffers. */ + cbd_t __iomem *tx_bd_base; + cbd_t __iomem *dirty_tx; /* ring entries to be free()ed. */ + cbd_t __iomem *cur_rx; + cbd_t __iomem *cur_tx; int tx_free; struct net_device_stats stats; struct timer_list phy_timer_list; @@ -143,7 +96,6 @@ struct fs_enet_private { u32 msg_enable; struct mii_if_info mii_if; unsigned int last_mii_status; - struct fs_enet_mii_bus *mii_bus; int interrupt; struct phy_device *phydev; @@ -161,23 +113,23 @@ struct fs_enet_private { union { struct { int idx; /* FEC1 = 0, FEC2 = 1 */ - void *fecp; /* hw registers */ + void __iomem *fecp; /* hw registers */ u32 hthi, htlo; /* state for multicast */ } fec; struct { int idx; /* FCC1-3 = 0-2 */ - void *fccp; /* hw registers */ - void *ep; /* parameter ram */ - void *fcccp; /* hw registers cont. */ - void *mem; /* FCC DPRAM */ + void __iomem *fccp; /* hw registers */ + void __iomem *ep; /* parameter ram */ + void __iomem *fcccp; /* hw registers cont. */ + void __iomem *mem; /* FCC DPRAM */ u32 gaddrh, gaddrl; /* group address */ } fcc; struct { int idx; /* FEC1 = 0, FEC2 = 1 */ - void *sccp; /* hw registers */ - void *ep; /* parameter ram */ + void __iomem *sccp; /* hw registers */ + void __iomem *ep; /* parameter ram */ u32 hthi, htlo; /* state for multicast */ } scc; @@ -185,9 +137,10 @@ struct fs_enet_private { }; /***************************************************************************/ +#ifndef CONFIG_PPC_CPM_NEW_BINDING int fs_enet_mdio_bb_init(void); -int fs_mii_fixed_init(struct fs_enet_mii_bus *bus); int fs_enet_mdio_fec_init(void); +#endif void fs_init_bds(struct net_device *dev); void fs_cleanup_bds(struct net_device *dev); @@ -247,7 +200,7 @@ extern const struct fs_ops fs_scc_ops; /*******************************************************************/ /* handy pointer to the immap */ -extern void *fs_enet_immap; +extern void __iomem *fs_enet_immap; /*******************************************************************/ diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c index 5603121132cd..da4efbca646e 100644 --- a/drivers/net/fs_enet/mac-fcc.c +++ b/drivers/net/fs_enet/mac-fcc.c @@ -1,14 +1,14 @@ /* * FCC driver for Motorola MPC82xx (PQ2). * - * Copyright (c) 2003 Intracom S.A. + * Copyright (c) 2003 Intracom S.A. * by Pantelis Antoniou <panto@intracom.gr> * - * 2005 (c) MontaVista Software, Inc. + * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug <vbordug@ru.mvista.com> * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ @@ -42,34 +42,29 @@ #include <asm/irq.h> #include <asm/uaccess.h> +#ifdef CONFIG_PPC_CPM_NEW_BINDING +#include <asm/of_device.h> +#endif + #include "fs_enet.h" /*************************************************/ /* FCC access macros */ -#define __fcc_out32(addr, x) out_be32((unsigned *)addr, x) -#define __fcc_out16(addr, x) out_be16((unsigned short *)addr, x) -#define __fcc_out8(addr, x) out_8((unsigned char *)addr, x) -#define __fcc_in32(addr) in_be32((unsigned *)addr) -#define __fcc_in16(addr) in_be16((unsigned short *)addr) -#define __fcc_in8(addr) in_8((unsigned char *)addr) - -/* parameter space */ - /* write, read, set bits, clear bits */ -#define W32(_p, _m, _v) __fcc_out32(&(_p)->_m, (_v)) -#define R32(_p, _m) __fcc_in32(&(_p)->_m) +#define W32(_p, _m, _v) out_be32(&(_p)->_m, (_v)) +#define R32(_p, _m) in_be32(&(_p)->_m) #define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v)) #define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v)) -#define W16(_p, _m, _v) __fcc_out16(&(_p)->_m, (_v)) -#define R16(_p, _m) __fcc_in16(&(_p)->_m) +#define W16(_p, _m, _v) out_be16(&(_p)->_m, (_v)) +#define R16(_p, _m) in_be16(&(_p)->_m) #define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v)) #define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v)) -#define W8(_p, _m, _v) __fcc_out8(&(_p)->_m, (_v)) -#define R8(_p, _m) __fcc_in8(&(_p)->_m) +#define W8(_p, _m, _v) out_8(&(_p)->_m, (_v)) +#define R8(_p, _m) in_8(&(_p)->_m) #define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v)) #define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v)) @@ -83,34 +78,62 @@ #define MAX_CR_CMD_LOOPS 10000 -static inline int fcc_cr_cmd(struct fs_enet_private *fep, u32 mcn, u32 op) +static inline int fcc_cr_cmd(struct fs_enet_private *fep, u32 op) { const struct fs_platform_info *fpi = fep->fpi; - - cpm2_map_t *immap = fs_enet_immap; - cpm_cpm2_t *cpmp = &immap->im_cpm; - u32 v; int i; - /* Currently I don't know what feature call will look like. But - I guess there'd be something like do_cpm_cmd() which will require page & sblock */ - v = mk_cr_cmd(fpi->cp_page, fpi->cp_block, mcn, op); - W32(cpmp, cp_cpcr, v | CPM_CR_FLG); + W32(cpmp, cp_cpcr, fpi->cp_command | op | CPM_CR_FLG); for (i = 0; i < MAX_CR_CMD_LOOPS; i++) if ((R32(cpmp, cp_cpcr) & CPM_CR_FLG) == 0) - break; + return 0; - if (i >= MAX_CR_CMD_LOOPS) { - printk(KERN_ERR "%s(): Not able to issue CPM command\n", - __FUNCTION__); - return 1; - } - - return 0; + printk(KERN_ERR "%s(): Not able to issue CPM command\n", + __FUNCTION__); + return 1; } static int do_pd_setup(struct fs_enet_private *fep) { +#ifdef CONFIG_PPC_CPM_NEW_BINDING + struct of_device *ofdev = to_of_device(fep->dev); + struct fs_platform_info *fpi = fep->fpi; + int ret = -EINVAL; + + fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL); + if (fep->interrupt == NO_IRQ) + goto out; + + fep->fcc.fccp = of_iomap(ofdev->node, 0); + if (!fep->fcc.fccp) + goto out; + + fep->fcc.ep = of_iomap(ofdev->node, 1); + if (!fep->fcc.ep) + goto out_fccp; + + fep->fcc.fcccp = of_iomap(ofdev->node, 2); + if (!fep->fcc.fcccp) + goto out_ep; + + fep->fcc.mem = (void __iomem *)cpm2_immr; + fpi->dpram_offset = cpm_dpalloc(128, 8); + if (IS_ERR_VALUE(fpi->dpram_offset)) { + ret = fpi->dpram_offset; + goto out_fcccp; + } + + return 0; + +out_fcccp: + iounmap(fep->fcc.fcccp); +out_ep: + iounmap(fep->fcc.ep); +out_fccp: + iounmap(fep->fcc.fccp); +out: + return ret; +#else struct platform_device *pdev = to_platform_device(fep->dev); struct resource *r; @@ -121,33 +144,33 @@ static int do_pd_setup(struct fs_enet_private *fep) /* Attach the memory for the FCC Parameter RAM */ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram"); - fep->fcc.ep = (void *)ioremap(r->start, r->end - r->start + 1); + fep->fcc.ep = ioremap(r->start, r->end - r->start + 1); if (fep->fcc.ep == NULL) return -EINVAL; r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_regs"); - fep->fcc.fccp = (void *)ioremap(r->start, r->end - r->start + 1); + fep->fcc.fccp = ioremap(r->start, r->end - r->start + 1); if (fep->fcc.fccp == NULL) return -EINVAL; if (fep->fpi->fcc_regs_c) { - - fep->fcc.fcccp = (void *)fep->fpi->fcc_regs_c; + fep->fcc.fcccp = (void __iomem *)fep->fpi->fcc_regs_c; } else { r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_regs_c"); - fep->fcc.fcccp = (void *)ioremap(r->start, + fep->fcc.fcccp = ioremap(r->start, r->end - r->start + 1); } if (fep->fcc.fcccp == NULL) return -EINVAL; - fep->fcc.mem = (void *)fep->fpi->mem_offset; + fep->fcc.mem = (void __iomem *)fep->fpi->mem_offset; if (fep->fcc.mem == NULL) return -EINVAL; return 0; +#endif } #define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB) @@ -158,11 +181,17 @@ static int do_pd_setup(struct fs_enet_private *fep) static int setup_data(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - const struct fs_platform_info *fpi = fep->fpi; +#ifndef CONFIG_PPC_CPM_NEW_BINDING + struct fs_platform_info *fpi = fep->fpi; + + fpi->cp_command = (fpi->cp_page << 26) | + (fpi->cp_block << 21) | + (12 << 6); fep->fcc.idx = fs_get_fcc_index(fpi->fs_no); if ((unsigned int)fep->fcc.idx >= 3) /* max 3 FCCs */ return -EINVAL; +#endif if (do_pd_setup(fep) != 0) return -EINVAL; @@ -180,7 +209,7 @@ static int allocate_bd(struct net_device *dev) struct fs_enet_private *fep = netdev_priv(dev); const struct fs_platform_info *fpi = fep->fpi; - fep->ring_base = dma_alloc_coherent(fep->dev, + fep->ring_base = (void __iomem __force *)dma_alloc_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), &fep->ring_mem_addr, GFP_KERNEL); @@ -198,7 +227,7 @@ static void free_bd(struct net_device *dev) if (fep->ring_base) dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), - fep->ring_base, fep->ring_mem_addr); + (void __force *)fep->ring_base, fep->ring_mem_addr); } static void cleanup_data(struct net_device *dev) @@ -209,7 +238,7 @@ static void cleanup_data(struct net_device *dev) static void set_promiscuous_mode(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fcc_t *fccp = fep->fcc.fccp; + fcc_t __iomem *fccp = fep->fcc.fccp; S32(fccp, fcc_fpsmr, FCC_PSMR_PRO); } @@ -217,7 +246,7 @@ static void set_promiscuous_mode(struct net_device *dev) static void set_multicast_start(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fcc_enet_t *ep = fep->fcc.ep; + fcc_enet_t __iomem *ep = fep->fcc.ep; W32(ep, fen_gaddrh, 0); W32(ep, fen_gaddrl, 0); @@ -226,7 +255,7 @@ static void set_multicast_start(struct net_device *dev) static void set_multicast_one(struct net_device *dev, const u8 *mac) { struct fs_enet_private *fep = netdev_priv(dev); - fcc_enet_t *ep = fep->fcc.ep; + fcc_enet_t __iomem *ep = fep->fcc.ep; u16 taddrh, taddrm, taddrl; taddrh = ((u16)mac[5] << 8) | mac[4]; @@ -236,14 +265,14 @@ static void set_multicast_one(struct net_device *dev, const u8 *mac) W16(ep, fen_taddrh, taddrh); W16(ep, fen_taddrm, taddrm); W16(ep, fen_taddrl, taddrl); - fcc_cr_cmd(fep, 0x0C, CPM_CR_SET_GADDR); + fcc_cr_cmd(fep, CPM_CR_SET_GADDR); } static void set_multicast_finish(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fcc_t *fccp = fep->fcc.fccp; - fcc_enet_t *ep = fep->fcc.ep; + fcc_t __iomem *fccp = fep->fcc.fccp; + fcc_enet_t __iomem *ep = fep->fcc.ep; /* clear promiscuous always */ C32(fccp, fcc_fpsmr, FCC_PSMR_PRO); @@ -278,12 +307,14 @@ static void restart(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); const struct fs_platform_info *fpi = fep->fpi; - fcc_t *fccp = fep->fcc.fccp; - fcc_c_t *fcccp = fep->fcc.fcccp; - fcc_enet_t *ep = fep->fcc.ep; + fcc_t __iomem *fccp = fep->fcc.fccp; + fcc_c_t __iomem *fcccp = fep->fcc.fcccp; + fcc_enet_t __iomem *ep = fep->fcc.ep; dma_addr_t rx_bd_base_phys, tx_bd_base_phys; u16 paddrh, paddrm, paddrl; +#ifndef CONFIG_PPC_CPM_NEW_BINDING u16 mem_addr; +#endif const unsigned char *mac; int i; @@ -291,7 +322,7 @@ static void restart(struct net_device *dev) /* clear everything (slow & steady does it) */ for (i = 0; i < sizeof(*ep); i++) - __fcc_out8((char *)ep + i, 0); + out_8((u8 __iomem *)ep + i, 0); /* get physical address */ rx_bd_base_phys = fep->ring_mem_addr; @@ -315,14 +346,22 @@ static void restart(struct net_device *dev) * this area. */ +#ifdef CONFIG_PPC_CPM_NEW_BINDING + W16(ep, fen_genfcc.fcc_riptr, fpi->dpram_offset); + W16(ep, fen_genfcc.fcc_tiptr, fpi->dpram_offset + 32); + + W16(ep, fen_padptr, fpi->dpram_offset + 64); +#else mem_addr = (u32) fep->fcc.mem; /* de-fixup dpram offset */ W16(ep, fen_genfcc.fcc_riptr, (mem_addr & 0xffff)); W16(ep, fen_genfcc.fcc_tiptr, ((mem_addr + 32) & 0xffff)); + W16(ep, fen_padptr, mem_addr + 64); +#endif /* fill with special symbol... */ - memset(fep->fcc.mem + fpi->dpram_offset + 64, 0x88, 32); + memset_io(fep->fcc.mem + fpi->dpram_offset + 64, 0x88, 32); W32(ep, fen_genfcc.fcc_rbptr, 0); W32(ep, fen_genfcc.fcc_tbptr, 0); @@ -407,7 +446,7 @@ static void restart(struct net_device *dev) S8(fcccp, fcc_gfemr, 0x20); } - fcc_cr_cmd(fep, 0x0c, CPM_CR_INIT_TRX); + fcc_cr_cmd(fep, CPM_CR_INIT_TRX); /* clear events */ W16(fccp, fcc_fcce, 0xffff); @@ -438,7 +477,7 @@ static void restart(struct net_device *dev) static void stop(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fcc_t *fccp = fep->fcc.fccp; + fcc_t __iomem *fccp = fep->fcc.fccp; /* stop ethernet */ C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT); @@ -465,7 +504,7 @@ static void post_free_irq(struct net_device *dev, int irq) static void napi_clear_rx_event(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fcc_t *fccp = fep->fcc.fccp; + fcc_t __iomem *fccp = fep->fcc.fccp; W16(fccp, fcc_fcce, FCC_NAPI_RX_EVENT_MSK); } @@ -473,7 +512,7 @@ static void napi_clear_rx_event(struct net_device *dev) static void napi_enable_rx(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fcc_t *fccp = fep->fcc.fccp; + fcc_t __iomem *fccp = fep->fcc.fccp; S16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK); } @@ -481,7 +520,7 @@ static void napi_enable_rx(struct net_device *dev) static void napi_disable_rx(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fcc_t *fccp = fep->fcc.fccp; + fcc_t __iomem *fccp = fep->fcc.fccp; C16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK); } @@ -494,15 +533,15 @@ static void rx_bd_done(struct net_device *dev) static void tx_kickstart(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fcc_t *fccp = fep->fcc.fccp; + fcc_t __iomem *fccp = fep->fcc.fccp; - S32(fccp, fcc_ftodr, 0x80); + S16(fccp, fcc_ftodr, 0x8000); } static u32 get_int_events(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fcc_t *fccp = fep->fcc.fccp; + fcc_t __iomem *fccp = fep->fcc.fccp; return (u32)R16(fccp, fcc_fcce); } @@ -510,7 +549,7 @@ static u32 get_int_events(struct net_device *dev) static void clear_int_events(struct net_device *dev, u32 int_events) { struct fs_enet_private *fep = netdev_priv(dev); - fcc_t *fccp = fep->fcc.fccp; + fcc_t __iomem *fccp = fep->fcc.fccp; W16(fccp, fcc_fcce, int_events & 0xffff); } @@ -521,47 +560,46 @@ static void ev_error(struct net_device *dev, u32 int_events) ": %s FS_ENET ERROR(s) 0x%x\n", dev->name, int_events); } -int get_regs(struct net_device *dev, void *p, int *sizep) +static int get_regs(struct net_device *dev, void *p, int *sizep) { struct fs_enet_private *fep = netdev_priv(dev); - if (*sizep < sizeof(fcc_t) + sizeof(fcc_c_t) + sizeof(fcc_enet_t)) + if (*sizep < sizeof(fcc_t) + sizeof(fcc_enet_t) + 1) return -EINVAL; memcpy_fromio(p, fep->fcc.fccp, sizeof(fcc_t)); p = (char *)p + sizeof(fcc_t); - memcpy_fromio(p, fep->fcc.fcccp, sizeof(fcc_c_t)); - p = (char *)p + sizeof(fcc_c_t); - memcpy_fromio(p, fep->fcc.ep, sizeof(fcc_enet_t)); + p = (char *)p + sizeof(fcc_enet_t); + memcpy_fromio(p, fep->fcc.fcccp, 1); return 0; } -int get_regs_len(struct net_device *dev) +static int get_regs_len(struct net_device *dev) { - return sizeof(fcc_t) + sizeof(fcc_c_t) + sizeof(fcc_enet_t); + return sizeof(fcc_t) + sizeof(fcc_enet_t) + 1; } /* Some transmit errors cause the transmitter to shut * down. We now issue a restart transmit. Since the * errors close the BD and update the pointers, the restart * _should_ pick up without having to reset any of our - * pointers either. Also, To workaround 8260 device erratum + * pointers either. Also, To workaround 8260 device erratum * CPM37, we must disable and then re-enable the transmitter * following a Late Collision, Underrun, or Retry Limit error. */ -void tx_restart(struct net_device *dev) +static void tx_restart(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fcc_t *fccp = fep->fcc.fccp; + fcc_t __iomem *fccp = fep->fcc.fccp; C32(fccp, fcc_gfmr, FCC_GFMR_ENT); udelay(10); S32(fccp, fcc_gfmr, FCC_GFMR_ENT); - fcc_cr_cmd(fep, 0x0C, CPM_CR_RESTART_TX); + fcc_cr_cmd(fep, CPM_CR_RESTART_TX); } /*************************************************************************/ diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c index 04b4f80a1cde..c1fee48517e3 100644 --- a/drivers/net/fs_enet/mac-fec.c +++ b/drivers/net/fs_enet/mac-fec.c @@ -1,14 +1,14 @@ /* * Freescale Ethernet controllers * - * Copyright (c) 2005 Intracom S.A. + * Copyright (c) 2005 Intracom S.A. * by Pantelis Antoniou <panto@intracom.gr> * - * 2005 (c) MontaVista Software, Inc. + * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug <vbordug@ru.mvista.com> * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ @@ -43,6 +43,10 @@ #include <asm/commproc.h> #endif +#ifdef CONFIG_PPC_CPM_NEW_BINDING +#include <asm/of_device.h> +#endif + #include "fs_enet.h" #include "fec.h" @@ -79,7 +83,7 @@ */ #define FEC_RESET_DELAY 50 -static int whack_reset(fec_t * fecp) +static int whack_reset(fec_t __iomem *fecp) { int i; @@ -95,9 +99,22 @@ static int whack_reset(fec_t * fecp) static int do_pd_setup(struct fs_enet_private *fep) { - struct platform_device *pdev = to_platform_device(fep->dev); +#ifdef CONFIG_PPC_CPM_NEW_BINDING + struct of_device *ofdev = to_of_device(fep->dev); + + fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL); + if (fep->interrupt == NO_IRQ) + return -EINVAL; + + fep->fec.fecp = of_iomap(ofdev->node, 0); + if (!fep->fcc.fccp) + return -EINVAL; + + return 0; +#else + struct platform_device *pdev = to_platform_device(fep->dev); struct resource *r; - + /* Fill out IRQ field */ fep->interrupt = platform_get_irq_byname(pdev,"interrupt"); if (fep->interrupt < 0) @@ -110,7 +127,7 @@ static int do_pd_setup(struct fs_enet_private *fep) return -EINVAL; return 0; - +#endif } #define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB) @@ -141,8 +158,8 @@ static int allocate_bd(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); const struct fs_platform_info *fpi = fep->fpi; - - fep->ring_base = dma_alloc_coherent(fep->dev, + + fep->ring_base = (void __force __iomem *)dma_alloc_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), &fep->ring_mem_addr, GFP_KERNEL); @@ -160,7 +177,7 @@ static void free_bd(struct net_device *dev) if(fep->ring_base) dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), - fep->ring_base, + (void __force *)fep->ring_base, fep->ring_mem_addr); } @@ -172,7 +189,7 @@ static void cleanup_data(struct net_device *dev) static void set_promiscuous_mode(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fec_t *fecp = fep->fec.fecp; + fec_t __iomem *fecp = fep->fec.fecp; FS(fecp, r_cntrl, FEC_RCNTRL_PROM); } @@ -220,7 +237,7 @@ static void set_multicast_one(struct net_device *dev, const u8 *mac) static void set_multicast_finish(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fec_t *fecp = fep->fec.fecp; + fec_t __iomem *fecp = fep->fec.fecp; /* if all multi or too many multicasts; just enable all */ if ((dev->flags & IFF_ALLMULTI) != 0 || @@ -254,7 +271,7 @@ static void restart(struct net_device *dev) u32 cptr; #endif struct fs_enet_private *fep = netdev_priv(dev); - fec_t *fecp = fep->fec.fecp; + fec_t __iomem *fecp = fep->fec.fecp; const struct fs_platform_info *fpi = fep->fpi; dma_addr_t rx_bd_base_phys, tx_bd_base_phys; int r; @@ -280,13 +297,13 @@ static void restart(struct net_device *dev) FW(fecp, addr_high, addrlo); /* - * Reset all multicast. + * Reset all multicast. */ FW(fecp, hash_table_high, fep->fec.hthi); FW(fecp, hash_table_low, fep->fec.htlo); /* - * Set maximum receive buffer size. + * Set maximum receive buffer size. */ FW(fecp, r_buff_size, PKT_MAXBLR_SIZE); FW(fecp, r_hash, PKT_MAXBUF_SIZE); @@ -296,7 +313,7 @@ static void restart(struct net_device *dev) tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring; /* - * Set receive and transmit descriptor base. + * Set receive and transmit descriptor base. */ FW(fecp, r_des_start, rx_bd_base_phys); FW(fecp, x_des_start, tx_bd_base_phys); @@ -304,7 +321,7 @@ static void restart(struct net_device *dev) fs_init_bds(dev); /* - * Enable big endian and don't care about SDMA FC. + * Enable big endian and don't care about SDMA FC. */ FW(fecp, fun_code, 0x78000000); @@ -366,13 +383,13 @@ static void restart(struct net_device *dev) } /* - * Enable interrupts we wish to service. + * Enable interrupts we wish to service. */ FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB | FEC_ENET_RXF | FEC_ENET_RXB); /* - * And last, enable the transmit and receive processing. + * And last, enable the transmit and receive processing. */ FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); FW(fecp, r_des_active, 0x01000000); @@ -382,7 +399,7 @@ static void stop(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); const struct fs_platform_info *fpi = fep->fpi; - fec_t *fecp = fep->fec.fecp; + fec_t __iomem *fecp = fep->fec.fecp; struct fec_info* feci= fep->phydev->bus->priv; @@ -401,7 +418,7 @@ static void stop(struct net_device *dev) ": %s FEC timeout on graceful transmit stop\n", dev->name); /* - * Disable FEC. Let only MII interrupts. + * Disable FEC. Let only MII interrupts. */ FW(fecp, imask, 0); FC(fecp, ecntrl, FEC_ECNTRL_ETHER_EN); @@ -444,7 +461,7 @@ static void post_free_irq(struct net_device *dev, int irq) static void napi_clear_rx_event(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fec_t *fecp = fep->fec.fecp; + fec_t __iomem *fecp = fep->fec.fecp; FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK); } @@ -452,7 +469,7 @@ static void napi_clear_rx_event(struct net_device *dev) static void napi_enable_rx(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fec_t *fecp = fep->fec.fecp; + fec_t __iomem *fecp = fep->fec.fecp; FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK); } @@ -460,7 +477,7 @@ static void napi_enable_rx(struct net_device *dev) static void napi_disable_rx(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fec_t *fecp = fep->fec.fecp; + fec_t __iomem *fecp = fep->fec.fecp; FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK); } @@ -468,7 +485,7 @@ static void napi_disable_rx(struct net_device *dev) static void rx_bd_done(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fec_t *fecp = fep->fec.fecp; + fec_t __iomem *fecp = fep->fec.fecp; FW(fecp, r_des_active, 0x01000000); } @@ -476,7 +493,7 @@ static void rx_bd_done(struct net_device *dev) static void tx_kickstart(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fec_t *fecp = fep->fec.fecp; + fec_t __iomem *fecp = fep->fec.fecp; FW(fecp, x_des_active, 0x01000000); } @@ -484,7 +501,7 @@ static void tx_kickstart(struct net_device *dev) static u32 get_int_events(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fec_t *fecp = fep->fec.fecp; + fec_t __iomem *fecp = fep->fec.fecp; return FR(fecp, ievent) & FR(fecp, imask); } @@ -492,7 +509,7 @@ static u32 get_int_events(struct net_device *dev) static void clear_int_events(struct net_device *dev, u32 int_events) { struct fs_enet_private *fep = netdev_priv(dev); - fec_t *fecp = fep->fec.fecp; + fec_t __iomem *fecp = fep->fec.fecp; FW(fecp, ievent, int_events); } @@ -503,7 +520,7 @@ static void ev_error(struct net_device *dev, u32 int_events) ": %s FEC ERROR(s) 0x%x\n", dev->name, int_events); } -int get_regs(struct net_device *dev, void *p, int *sizep) +static int get_regs(struct net_device *dev, void *p, int *sizep) { struct fs_enet_private *fep = netdev_priv(dev); @@ -515,12 +532,12 @@ int get_regs(struct net_device *dev, void *p, int *sizep) return 0; } -int get_regs_len(struct net_device *dev) +static int get_regs_len(struct net_device *dev) { return sizeof(fec_t); } -void tx_restart(struct net_device *dev) +static void tx_restart(struct net_device *dev) { /* nothing */ } diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c index 7540966687ec..03134f47a4eb 100644 --- a/drivers/net/fs_enet/mac-scc.c +++ b/drivers/net/fs_enet/mac-scc.c @@ -1,14 +1,14 @@ /* * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx. * - * Copyright (c) 2003 Intracom S.A. + * Copyright (c) 2003 Intracom S.A. * by Pantelis Antoniou <panto@intracom.gr> - * - * 2005 (c) MontaVista Software, Inc. + * + * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug <vbordug@ru.mvista.com> * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ @@ -43,6 +43,10 @@ #include <asm/commproc.h> #endif +#ifdef CONFIG_PPC_CPM_NEW_BINDING +#include <asm/of_platform.h> +#endif + #include "fs_enet.h" /*************************************************/ @@ -82,34 +86,45 @@ #define SCC_MAX_MULTICAST_ADDRS 64 /* - * Delay to wait for SCC reset command to complete (in us) + * Delay to wait for SCC reset command to complete (in us) */ #define SCC_RESET_DELAY 50 #define MAX_CR_CMD_LOOPS 10000 static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op) { - cpm8xx_t *cpmp = &((immap_t *)fs_enet_immap)->im_cpm; - u32 v, ch; - int i = 0; + const struct fs_platform_info *fpi = fep->fpi; + int i; - ch = fep->scc.idx << 2; - v = mk_cr_cmd(ch, op); - W16(cpmp, cp_cpcr, v | CPM_CR_FLG); + W16(cpmp, cp_cpcr, fpi->cp_command | CPM_CR_FLG | (op << 8)); for (i = 0; i < MAX_CR_CMD_LOOPS; i++) if ((R16(cpmp, cp_cpcr) & CPM_CR_FLG) == 0) - break; + return 0; - if (i >= MAX_CR_CMD_LOOPS) { - printk(KERN_ERR "%s(): Not able to issue CPM command\n", - __FUNCTION__); - return 1; - } - return 0; + printk(KERN_ERR "%s(): Not able to issue CPM command\n", + __FUNCTION__); + return 1; } static int do_pd_setup(struct fs_enet_private *fep) { +#ifdef CONFIG_PPC_CPM_NEW_BINDING + struct of_device *ofdev = to_of_device(fep->dev); + + fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL); + if (fep->interrupt == NO_IRQ) + return -EINVAL; + + fep->scc.sccp = of_iomap(ofdev->node, 0); + if (!fep->scc.sccp) + return -EINVAL; + + fep->scc.ep = of_iomap(ofdev->node, 1); + if (!fep->scc.ep) { + iounmap(fep->scc.sccp); + return -EINVAL; + } +#else struct platform_device *pdev = to_platform_device(fep->dev); struct resource *r; @@ -129,6 +144,7 @@ static int do_pd_setup(struct fs_enet_private *fep) if (fep->scc.ep == NULL) return -EINVAL; +#endif return 0; } @@ -141,12 +157,17 @@ static int do_pd_setup(struct fs_enet_private *fep) static int setup_data(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - const struct fs_platform_info *fpi = fep->fpi; + +#ifdef CONFIG_PPC_CPM_NEW_BINDING + struct fs_platform_info *fpi = fep->fpi; fep->scc.idx = fs_get_scc_index(fpi->fs_no); - if ((unsigned int)fep->fcc.idx > 4) /* max 4 SCCs */ + if ((unsigned int)fep->fcc.idx >= 4) /* max 4 SCCs */ return -EINVAL; + fpi->cp_command = fep->fcc.idx << 6; +#endif + do_pd_setup(fep); fep->scc.hthi = 0; @@ -154,7 +175,7 @@ static int setup_data(struct net_device *dev) fep->ev_napi_rx = SCC_NAPI_RX_EVENT_MSK; fep->ev_rx = SCC_RX_EVENT; - fep->ev_tx = SCC_TX_EVENT; + fep->ev_tx = SCC_TX_EVENT | SCCE_ENET_TXE; fep->ev_err = SCC_ERR_EVENT_MSK; return 0; @@ -170,7 +191,8 @@ static int allocate_bd(struct net_device *dev) if (IS_ERR_VALUE(fep->ring_mem_addr)) return -ENOMEM; - fep->ring_base = cpm_dpram_addr(fep->ring_mem_addr); + fep->ring_base = (void __iomem __force*) + cpm_dpram_addr(fep->ring_mem_addr); return 0; } @@ -189,9 +211,9 @@ static void cleanup_data(struct net_device *dev) } static void set_promiscuous_mode(struct net_device *dev) -{ +{ struct fs_enet_private *fep = netdev_priv(dev); - scc_t *sccp = fep->scc.sccp; + scc_t __iomem *sccp = fep->scc.sccp; S16(sccp, scc_psmr, SCC_PSMR_PRO); } @@ -199,7 +221,7 @@ static void set_promiscuous_mode(struct net_device *dev) static void set_multicast_start(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - scc_enet_t *ep = fep->scc.ep; + scc_enet_t __iomem *ep = fep->scc.ep; W16(ep, sen_gaddr1, 0); W16(ep, sen_gaddr2, 0); @@ -210,7 +232,7 @@ static void set_multicast_start(struct net_device *dev) static void set_multicast_one(struct net_device *dev, const u8 * mac) { struct fs_enet_private *fep = netdev_priv(dev); - scc_enet_t *ep = fep->scc.ep; + scc_enet_t __iomem *ep = fep->scc.ep; u16 taddrh, taddrm, taddrl; taddrh = ((u16) mac[5] << 8) | mac[4]; @@ -226,8 +248,8 @@ static void set_multicast_one(struct net_device *dev, const u8 * mac) static void set_multicast_finish(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - scc_t *sccp = fep->scc.sccp; - scc_enet_t *ep = fep->scc.ep; + scc_t __iomem *sccp = fep->scc.sccp; + scc_enet_t __iomem *ep = fep->scc.ep; /* clear promiscuous always */ C16(sccp, scc_psmr, SCC_PSMR_PRO); @@ -264,8 +286,8 @@ static void set_multicast_list(struct net_device *dev) static void restart(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - scc_t *sccp = fep->scc.sccp; - scc_enet_t *ep = fep->scc.ep; + scc_t __iomem *sccp = fep->scc.sccp; + scc_enet_t __iomem *ep = fep->scc.ep; const struct fs_platform_info *fpi = fep->fpi; u16 paddrh, paddrm, paddrl; const unsigned char *mac; @@ -275,7 +297,7 @@ static void restart(struct net_device *dev) /* clear everything (slow & steady does it) */ for (i = 0; i < sizeof(*ep); i++) - __fs_out8((char *)ep + i, 0); + __fs_out8((u8 __iomem *)ep + i, 0); /* point to bds */ W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr); @@ -323,7 +345,7 @@ static void restart(struct net_device *dev) W16(ep, sen_iaddr3, 0); W16(ep, sen_iaddr4, 0); - /* set address + /* set address */ mac = dev->dev_addr; paddrh = ((u16) mac[5] << 8) | mac[4]; @@ -345,7 +367,7 @@ static void restart(struct net_device *dev) W16(sccp, scc_scce, 0xffff); - /* Enable interrupts we wish to service. + /* Enable interrupts we wish to service. */ W16(sccp, scc_sccm, SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB); @@ -373,10 +395,10 @@ static void restart(struct net_device *dev) S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); } -static void stop(struct net_device *dev) +static void stop(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - scc_t *sccp = fep->scc.sccp; + scc_t __iomem *sccp = fep->scc.sccp; int i; for (i = 0; (R16(sccp, scc_sccm) == 0) && i < SCC_RESET_DELAY; i++) @@ -420,7 +442,7 @@ static void post_free_irq(struct net_device *dev, int irq) static void napi_clear_rx_event(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - scc_t *sccp = fep->scc.sccp; + scc_t __iomem *sccp = fep->scc.sccp; W16(sccp, scc_scce, SCC_NAPI_RX_EVENT_MSK); } @@ -428,7 +450,7 @@ static void napi_clear_rx_event(struct net_device *dev) static void napi_enable_rx(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - scc_t *sccp = fep->scc.sccp; + scc_t __iomem *sccp = fep->scc.sccp; S16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK); } @@ -436,7 +458,7 @@ static void napi_enable_rx(struct net_device *dev) static void napi_disable_rx(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - scc_t *sccp = fep->scc.sccp; + scc_t __iomem *sccp = fep->scc.sccp; C16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK); } @@ -454,7 +476,7 @@ static void tx_kickstart(struct net_device *dev) static u32 get_int_events(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - scc_t *sccp = fep->scc.sccp; + scc_t __iomem *sccp = fep->scc.sccp; return (u32) R16(sccp, scc_scce); } @@ -462,7 +484,7 @@ static u32 get_int_events(struct net_device *dev) static void clear_int_events(struct net_device *dev, u32 int_events) { struct fs_enet_private *fep = netdev_priv(dev); - scc_t *sccp = fep->scc.sccp; + scc_t __iomem *sccp = fep->scc.sccp; W16(sccp, scc_scce, int_events & 0xffff); } @@ -477,20 +499,20 @@ static int get_regs(struct net_device *dev, void *p, int *sizep) { struct fs_enet_private *fep = netdev_priv(dev); - if (*sizep < sizeof(scc_t) + sizeof(scc_enet_t)) + if (*sizep < sizeof(scc_t) + sizeof(scc_enet_t __iomem *)) return -EINVAL; memcpy_fromio(p, fep->scc.sccp, sizeof(scc_t)); p = (char *)p + sizeof(scc_t); - memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t)); + memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t __iomem *)); return 0; } static int get_regs_len(struct net_device *dev) { - return sizeof(scc_t) + sizeof(scc_enet_t); + return sizeof(scc_t) + sizeof(scc_enet_t __iomem *); } static void tx_restart(struct net_device *dev) diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c index d3840108ffbd..b8e4a736a130 100644 --- a/drivers/net/fs_enet/mii-bitbang.c +++ b/drivers/net/fs_enet/mii-bitbang.c @@ -1,314 +1,288 @@ /* * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. * - * Copyright (c) 2003 Intracom S.A. + * Copyright (c) 2003 Intracom S.A. * by Pantelis Antoniou <panto@intracom.gr> - * - * 2005 (c) MontaVista Software, Inc. + * + * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug <vbordug@ru.mvista.com> * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ - #include <linux/module.h> -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/ptrace.h> -#include <linux/errno.h> #include <linux/ioport.h> #include <linux/slab.h> -#include <linux/interrupt.h> #include <linux/init.h> -#include <linux/delay.h> +#include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/spinlock.h> #include <linux/mii.h> -#include <linux/ethtool.h> -#include <linux/bitops.h> #include <linux/platform_device.h> +#include <linux/mdio-bitbang.h> -#include <asm/pgtable.h> -#include <asm/irq.h> -#include <asm/uaccess.h> +#ifdef CONFIG_PPC_CPM_NEW_BINDING +#include <linux/of_platform.h> +#endif #include "fs_enet.h" -static int bitbang_prep_bit(u8 **datp, u8 *mskp, - struct fs_mii_bit *mii_bit) -{ - void *dat; - int adv; - u8 msk; - - dat = (void*) mii_bit->offset; - - adv = mii_bit->bit >> 3; - dat = (char *)dat + adv; - - msk = 1 << (7 - (mii_bit->bit & 7)); - - *datp = dat; - *mskp = msk; - - return 0; -} +struct bb_info { + struct mdiobb_ctrl ctrl; + __be32 __iomem *dir; + __be32 __iomem *dat; + u32 mdio_msk; + u32 mdc_msk; +}; -static inline void bb_set(u8 *p, u8 m) +/* FIXME: If any other users of GPIO crop up, then these will have to + * have some sort of global synchronization to avoid races with other + * pins on the same port. The ideal solution would probably be to + * bind the ports to a GPIO driver, and have this be a client of it. + */ +static inline void bb_set(u32 __iomem *p, u32 m) { - out_8(p, in_8(p) | m); + out_be32(p, in_be32(p) | m); } -static inline void bb_clr(u8 *p, u8 m) +static inline void bb_clr(u32 __iomem *p, u32 m) { - out_8(p, in_8(p) & ~m); + out_be32(p, in_be32(p) & ~m); } -static inline int bb_read(u8 *p, u8 m) +static inline int bb_read(u32 __iomem *p, u32 m) { - return (in_8(p) & m) != 0; + return (in_be32(p) & m) != 0; } -static inline void mdio_active(struct bb_info *bitbang) +static inline void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) { - bb_set(bitbang->mdio_dir, bitbang->mdio_dir_msk); -} + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); -static inline void mdio_tristate(struct bb_info *bitbang ) -{ - bb_clr(bitbang->mdio_dir, bitbang->mdio_dir_msk); + if (dir) + bb_set(bitbang->dir, bitbang->mdio_msk); + else + bb_clr(bitbang->dir, bitbang->mdio_msk); + + /* Read back to flush the write. */ + in_be32(bitbang->dir); } -static inline int mdio_read(struct bb_info *bitbang ) +static inline int mdio_read(struct mdiobb_ctrl *ctrl) { - return bb_read(bitbang->mdio_dat, bitbang->mdio_dat_msk); + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + return bb_read(bitbang->dat, bitbang->mdio_msk); } -static inline void mdio(struct bb_info *bitbang , int what) +static inline void mdio(struct mdiobb_ctrl *ctrl, int what) { + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + if (what) - bb_set(bitbang->mdio_dat, bitbang->mdio_dat_msk); + bb_set(bitbang->dat, bitbang->mdio_msk); else - bb_clr(bitbang->mdio_dat, bitbang->mdio_dat_msk); + bb_clr(bitbang->dat, bitbang->mdio_msk); + + /* Read back to flush the write. */ + in_be32(bitbang->dat); } -static inline void mdc(struct bb_info *bitbang , int what) +static inline void mdc(struct mdiobb_ctrl *ctrl, int what) { + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + if (what) - bb_set(bitbang->mdc_dat, bitbang->mdc_msk); + bb_set(bitbang->dat, bitbang->mdc_msk); else - bb_clr(bitbang->mdc_dat, bitbang->mdc_msk); + bb_clr(bitbang->dat, bitbang->mdc_msk); + + /* Read back to flush the write. */ + in_be32(bitbang->dat); } -static inline void mii_delay(struct bb_info *bitbang ) +static struct mdiobb_ops bb_ops = { + .owner = THIS_MODULE, + .set_mdc = mdc, + .set_mdio_dir = mdio_dir, + .set_mdio_data = mdio, + .get_mdio_data = mdio_read, +}; + +#ifdef CONFIG_PPC_CPM_NEW_BINDING +static int __devinit fs_mii_bitbang_init(struct mii_bus *bus, + struct device_node *np) { - udelay(bitbang->delay); + struct resource res; + const u32 *data; + int mdio_pin, mdc_pin, len; + struct bb_info *bitbang = bus->priv; + + int ret = of_address_to_resource(np, 0, &res); + if (ret) + return ret; + + if (res.end - res.start < 13) + return -ENODEV; + + /* This should really encode the pin number as well, but all + * we get is an int, and the odds of multiple bitbang mdio buses + * is low enough that it's not worth going too crazy. + */ + bus->id = res.start; + + data = of_get_property(np, "fsl,mdio-pin", &len); + if (!data || len != 4) + return -ENODEV; + mdio_pin = *data; + + data = of_get_property(np, "fsl,mdc-pin", &len); + if (!data || len != 4) + return -ENODEV; + mdc_pin = *data; + + bitbang->dir = ioremap(res.start, res.end - res.start + 1); + if (!bitbang->dir) + return -ENOMEM; + + bitbang->dat = bitbang->dir + 4; + bitbang->mdio_msk = 1 << (31 - mdio_pin); + bitbang->mdc_msk = 1 << (31 - mdc_pin); + + return 0; } -/* Utility to send the preamble, address, and register (common to read and write). */ -static void bitbang_pre(struct bb_info *bitbang , int read, u8 addr, u8 reg) +static void __devinit add_phy(struct mii_bus *bus, struct device_node *np) { - int j; - - /* - * Send a 32 bit preamble ('1's) with an extra '1' bit for good measure. - * The IEEE spec says this is a PHY optional requirement. The AMD - * 79C874 requires one after power up and one after a MII communications - * error. This means that we are doing more preambles than we need, - * but it is safer and will be much more robust. - */ + const u32 *data; + int len, id, irq; - mdio_active(bitbang); - mdio(bitbang, 1); - for (j = 0; j < 32; j++) { - mdc(bitbang, 0); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); - } + data = of_get_property(np, "reg", &len); + if (!data || len != 4) + return; - /* send the start bit (01) and the read opcode (10) or write (10) */ - mdc(bitbang, 0); - mdio(bitbang, 0); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); - mdc(bitbang, 0); - mdio(bitbang, 1); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); - mdc(bitbang, 0); - mdio(bitbang, read); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); - mdc(bitbang, 0); - mdio(bitbang, !read); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); - - /* send the PHY address */ - for (j = 0; j < 5; j++) { - mdc(bitbang, 0); - mdio(bitbang, (addr & 0x10) != 0); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); - addr <<= 1; - } + id = *data; + bus->phy_mask &= ~(1 << id); - /* send the register address */ - for (j = 0; j < 5; j++) { - mdc(bitbang, 0); - mdio(bitbang, (reg & 0x10) != 0); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); - reg <<= 1; - } + irq = of_irq_to_resource(np, 0, NULL); + if (irq != NO_IRQ) + bus->irq[id] = irq; } -static int fs_enet_mii_bb_read(struct mii_bus *bus , int phy_id, int location) +static int __devinit fs_enet_mdio_probe(struct of_device *ofdev, + const struct of_device_id *match) { - u16 rdreg; - int ret, j; - u8 addr = phy_id & 0xff; - u8 reg = location & 0xff; - struct bb_info* bitbang = bus->priv; - - bitbang_pre(bitbang, 1, addr, reg); - - /* tri-state our MDIO I/O pin so we can read */ - mdc(bitbang, 0); - mdio_tristate(bitbang); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); - - /* check the turnaround bit: the PHY should be driving it to zero */ - if (mdio_read(bitbang) != 0) { - /* PHY didn't drive TA low */ - for (j = 0; j < 32; j++) { - mdc(bitbang, 0); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); - } - ret = -1; + struct device_node *np = NULL; + struct mii_bus *new_bus; + struct bb_info *bitbang; + int ret = -ENOMEM; + int i; + + bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL); + if (!bitbang) goto out; - } - mdc(bitbang, 0); - mii_delay(bitbang); - - /* read 16 bits of register data, MSB first */ - rdreg = 0; - for (j = 0; j < 16; j++) { - mdc(bitbang, 1); - mii_delay(bitbang); - rdreg <<= 1; - rdreg |= mdio_read(bitbang); - mdc(bitbang, 0); - mii_delay(bitbang); - } + bitbang->ctrl.ops = &bb_ops; + + new_bus = alloc_mdio_bitbang(&bitbang->ctrl); + if (!new_bus) + goto out_free_priv; + + new_bus->name = "CPM2 Bitbanged MII", + + ret = fs_mii_bitbang_init(new_bus, ofdev->node); + if (ret) + goto out_free_bus; + + new_bus->phy_mask = ~0; + new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!new_bus->irq) + goto out_unmap_regs; + + for (i = 0; i < PHY_MAX_ADDR; i++) + new_bus->irq[i] = -1; + + while ((np = of_get_next_child(ofdev->node, np))) + if (!strcmp(np->type, "ethernet-phy")) + add_phy(new_bus, np); + + new_bus->dev = &ofdev->dev; + dev_set_drvdata(&ofdev->dev, new_bus); + + ret = mdiobus_register(new_bus); + if (ret) + goto out_free_irqs; - mdc(bitbang, 1); - mii_delay(bitbang); - mdc(bitbang, 0); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); + return 0; - ret = rdreg; +out_free_irqs: + dev_set_drvdata(&ofdev->dev, NULL); + kfree(new_bus->irq); +out_unmap_regs: + iounmap(bitbang->dir); +out_free_bus: + kfree(new_bus); +out_free_priv: + free_mdio_bitbang(new_bus); out: return ret; } -static int fs_enet_mii_bb_write(struct mii_bus *bus, int phy_id, int location, u16 val) +static int fs_enet_mdio_remove(struct of_device *ofdev) { - int j; - struct bb_info* bitbang = bus->priv; - - u8 addr = phy_id & 0xff; - u8 reg = location & 0xff; - u16 value = val & 0xffff; - - bitbang_pre(bitbang, 0, addr, reg); - - /* send the turnaround (10) */ - mdc(bitbang, 0); - mdio(bitbang, 1); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); - mdc(bitbang, 0); - mdio(bitbang, 0); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); - - /* write 16 bits of register data, MSB first */ - for (j = 0; j < 16; j++) { - mdc(bitbang, 0); - mdio(bitbang, (value & 0x8000) != 0); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); - value <<= 1; - } + struct mii_bus *bus = dev_get_drvdata(&ofdev->dev); + struct bb_info *bitbang = bus->priv; - /* - * Tri-state the MDIO line. - */ - mdio_tristate(bitbang); - mdc(bitbang, 0); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); - return 0; -} + mdiobus_unregister(bus); + free_mdio_bitbang(bus); + dev_set_drvdata(&ofdev->dev, NULL); + kfree(bus->irq); + iounmap(bitbang->dir); + kfree(bitbang); + kfree(bus); -static int fs_enet_mii_bb_reset(struct mii_bus *bus) -{ - /*nothing here - dunno how to reset it*/ return 0; } -static int fs_mii_bitbang_init(struct bb_info *bitbang, struct fs_mii_bb_platform_info* fmpi) -{ - int r; +static struct of_device_id fs_enet_mdio_bb_match[] = { + { + .compatible = "fsl,cpm2-mdio-bitbang", + }, + {}, +}; - bitbang->delay = fmpi->delay; +static struct of_platform_driver fs_enet_bb_mdio_driver = { + .name = "fsl-bb-mdio", + .match_table = fs_enet_mdio_bb_match, + .probe = fs_enet_mdio_probe, + .remove = fs_enet_mdio_remove, +}; - r = bitbang_prep_bit(&bitbang->mdio_dir, - &bitbang->mdio_dir_msk, - &fmpi->mdio_dir); - if (r != 0) - return r; +static int fs_enet_mdio_bb_init(void) +{ + return of_register_platform_driver(&fs_enet_bb_mdio_driver); +} - r = bitbang_prep_bit(&bitbang->mdio_dat, - &bitbang->mdio_dat_msk, - &fmpi->mdio_dat); - if (r != 0) - return r; +static void fs_enet_mdio_bb_exit(void) +{ + of_unregister_platform_driver(&fs_enet_bb_mdio_driver); +} - r = bitbang_prep_bit(&bitbang->mdc_dat, - &bitbang->mdc_msk, - &fmpi->mdc_dat); - if (r != 0) - return r; +module_init(fs_enet_mdio_bb_init); +module_exit(fs_enet_mdio_bb_exit); +#else +static int __devinit fs_mii_bitbang_init(struct bb_info *bitbang, + struct fs_mii_bb_platform_info *fmpi) +{ + bitbang->dir = (u32 __iomem *)fmpi->mdio_dir.offset; + bitbang->dat = (u32 __iomem *)fmpi->mdio_dat.offset; + bitbang->mdio_msk = 1U << (31 - fmpi->mdio_dat.bit); + bitbang->mdc_msk = 1U << (31 - fmpi->mdc_dat.bit); return 0; } - static int __devinit fs_enet_mdio_probe(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -320,20 +294,19 @@ static int __devinit fs_enet_mdio_probe(struct device *dev) if (NULL == dev) return -EINVAL; - new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL); + bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL); - if (NULL == new_bus) + if (NULL == bitbang) return -ENOMEM; - bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL); + bitbang->ctrl.ops = &bb_ops; - if (NULL == bitbang) + new_bus = alloc_mdio_bitbang(&bitbang->ctrl); + + if (NULL == new_bus) return -ENOMEM; new_bus->name = "BB MII Bus", - new_bus->read = &fs_enet_mii_bb_read, - new_bus->write = &fs_enet_mii_bb_write, - new_bus->reset = &fs_enet_mii_bb_reset, new_bus->id = pdev->id; new_bus->phy_mask = ~0x9; @@ -365,13 +338,12 @@ static int __devinit fs_enet_mdio_probe(struct device *dev) return 0; bus_register_fail: + free_mdio_bitbang(new_bus); kfree(bitbang); - kfree(new_bus); return err; } - static int fs_enet_mdio_remove(struct device *dev) { struct mii_bus *bus = dev_get_drvdata(dev); @@ -380,9 +352,7 @@ static int fs_enet_mdio_remove(struct device *dev) dev_set_drvdata(dev, NULL); - iounmap((void *) (&bus->priv)); - bus->priv = NULL; - kfree(bus); + free_mdio_bitbang(bus); return 0; } @@ -403,4 +373,4 @@ void fs_enet_mdio_bb_exit(void) { driver_unregister(&fs_enet_bb_mdio_driver); } - +#endif diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c index 0a563a83016f..a89cf15090b8 100644 --- a/drivers/net/fs_enet/mii-fec.c +++ b/drivers/net/fs_enet/mii-fec.c @@ -36,6 +36,10 @@ #include <asm/irq.h> #include <asm/uaccess.h> +#ifdef CONFIG_PPC_CPM_NEW_BINDING +#include <asm/of_platform.h> +#endif + #include "fs_enet.h" #include "fec.h" @@ -47,6 +51,7 @@ #define FEC_MII_LOOPS 10000 +#ifndef CONFIG_PPC_CPM_NEW_BINDING static int match_has_phy (struct device *dev, void* data) { struct platform_device* pdev = container_of(dev, struct platform_device, dev); @@ -65,7 +70,7 @@ static int match_has_phy (struct device *dev, void* data) static int fs_mii_fec_init(struct fec_info* fec, struct fs_mii_fec_platform_info *fmpi) { struct resource *r; - fec_t *fecp; + fec_t __iomem *fecp; char* name = "fsl-cpm-fec"; /* we need fec in order to be useful */ @@ -80,7 +85,7 @@ static int fs_mii_fec_init(struct fec_info* fec, struct fs_mii_fec_platform_info r = platform_get_resource_byname(fec_pdev, IORESOURCE_MEM, "regs"); - fec->fecp = fecp = (fec_t*)ioremap(r->start,sizeof(fec_t)); + fec->fecp = fecp = ioremap(r->start,sizeof(fec_t)); fec->mii_speed = fmpi->mii_speed; setbits32(&fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ @@ -90,11 +95,12 @@ static int fs_mii_fec_init(struct fec_info* fec, struct fs_mii_fec_platform_info return 0; } +#endif static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location) { struct fec_info* fec = bus->priv; - fec_t *fecp = fec->fecp; + fec_t __iomem *fecp = fec->fecp; int i, ret = -1; if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) @@ -113,13 +119,12 @@ static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location) } return ret; - } static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val) { struct fec_info* fec = bus->priv; - fec_t *fecp = fec->fecp; + fec_t __iomem *fecp = fec->fecp; int i; /* this must never happen */ @@ -146,6 +151,141 @@ static int fs_enet_fec_mii_reset(struct mii_bus *bus) return 0; } +#ifdef CONFIG_PPC_CPM_NEW_BINDING +static void __devinit add_phy(struct mii_bus *bus, struct device_node *np) +{ + const u32 *data; + int len, id, irq; + + data = of_get_property(np, "reg", &len); + if (!data || len != 4) + return; + + id = *data; + bus->phy_mask &= ~(1 << id); + + irq = of_irq_to_resource(np, 0, NULL); + if (irq != NO_IRQ) + bus->irq[id] = irq; +} + +static int __devinit fs_enet_mdio_probe(struct of_device *ofdev, + const struct of_device_id *match) +{ + struct device_node *np = NULL; + struct resource res; + struct mii_bus *new_bus; + struct fec_info *fec; + int ret = -ENOMEM, i; + + new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL); + if (!new_bus) + goto out; + + fec = kzalloc(sizeof(struct fec_info), GFP_KERNEL); + if (!fec) + goto out_mii; + + new_bus->priv = fec; + new_bus->name = "FEC MII Bus"; + new_bus->read = &fs_enet_fec_mii_read; + new_bus->write = &fs_enet_fec_mii_write; + new_bus->reset = &fs_enet_fec_mii_reset; + + ret = of_address_to_resource(ofdev->node, 0, &res); + if (ret) + return ret; + + new_bus->id = res.start; + + fec->fecp = ioremap(res.start, res.end - res.start + 1); + if (!fec->fecp) + goto out_fec; + + fec->mii_speed = ((ppc_proc_freq + 4999999) / 5000000) << 1; + + setbits32(&fec->fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE); + setbits32(&fec->fecp->fec_ecntrl, FEC_ECNTRL_PINMUX | + FEC_ECNTRL_ETHER_EN); + out_be32(&fec->fecp->fec_ievent, FEC_ENET_MII); + out_be32(&fec->fecp->fec_mii_speed, fec->mii_speed); + + new_bus->phy_mask = ~0; + new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!new_bus->irq) + goto out_unmap_regs; + + for (i = 0; i < PHY_MAX_ADDR; i++) + new_bus->irq[i] = -1; + + while ((np = of_get_next_child(ofdev->node, np))) + if (!strcmp(np->type, "ethernet-phy")) + add_phy(new_bus, np); + + new_bus->dev = &ofdev->dev; + dev_set_drvdata(&ofdev->dev, new_bus); + + ret = mdiobus_register(new_bus); + if (ret) + goto out_free_irqs; + + return 0; + +out_free_irqs: + dev_set_drvdata(&ofdev->dev, NULL); + kfree(new_bus->irq); +out_unmap_regs: + iounmap(fec->fecp); +out_fec: + kfree(fec); +out_mii: + kfree(new_bus); +out: + return ret; +} + +static int fs_enet_mdio_remove(struct of_device *ofdev) +{ + struct mii_bus *bus = dev_get_drvdata(&ofdev->dev); + struct fec_info *fec = bus->priv; + + mdiobus_unregister(bus); + dev_set_drvdata(&ofdev->dev, NULL); + kfree(bus->irq); + iounmap(fec->fecp); + kfree(fec); + kfree(bus); + + return 0; +} + +static struct of_device_id fs_enet_mdio_fec_match[] = { + { + .compatible = "fsl,pq1-fec-mdio", + }, + {}, +}; + +static struct of_platform_driver fs_enet_fec_mdio_driver = { + .name = "fsl-fec-mdio", + .match_table = fs_enet_mdio_fec_match, + .probe = fs_enet_mdio_probe, + .remove = fs_enet_mdio_remove, +}; + +static int fs_enet_mdio_fec_init(void) +{ + return of_register_platform_driver(&fs_enet_fec_mdio_driver); +} + +static void fs_enet_mdio_fec_exit(void) +{ + of_unregister_platform_driver(&fs_enet_fec_mdio_driver); +} + +module_init(fs_enet_mdio_fec_init); +module_exit(fs_enet_mdio_fec_exit); +#else static int __devinit fs_enet_fec_mdio_probe(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -236,4 +376,4 @@ void fs_enet_mdio_fec_exit(void) { driver_unregister(&fs_enet_fec_mdio_driver); } - +#endif diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index d7a1a58de766..0db5e6fabe73 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -116,7 +116,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); static void gfar_timeout(struct net_device *dev); static int gfar_close(struct net_device *dev); struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp); -static struct net_device_stats *gfar_get_stats(struct net_device *dev); static int gfar_set_mac_address(struct net_device *dev); static int gfar_change_mtu(struct net_device *dev, int new_mtu); static irqreturn_t gfar_error(int irq, void *dev_id); @@ -134,7 +133,7 @@ static void gfar_configure_serdes(struct net_device *dev); extern int gfar_local_mdio_write(struct gfar_mii *regs, int mii_id, int regnum, u16 value); extern int gfar_local_mdio_read(struct gfar_mii *regs, int mii_id, int regnum); #ifdef CONFIG_GFAR_NAPI -static int gfar_poll(struct net_device *dev, int *budget); +static int gfar_poll(struct napi_struct *napi, int budget); #endif #ifdef CONFIG_NET_POLL_CONTROLLER static void gfar_netpoll(struct net_device *dev); @@ -171,6 +170,7 @@ static int gfar_probe(struct platform_device *pdev) struct resource *r; int idx; int err = 0; + DECLARE_MAC_BUF(mac); einfo = (struct gianfar_platform_data *) pdev->dev.platform_data; @@ -188,6 +188,7 @@ static int gfar_probe(struct platform_device *pdev) return -ENOMEM; priv = netdev_priv(dev); + priv->dev = dev; /* Set the info in the priv to the current info */ priv->einfo = einfo; @@ -253,7 +254,6 @@ static int gfar_probe(struct platform_device *pdev) /* Set the dev->base_addr to the gfar reg region */ dev->base_addr = (unsigned long) (priv->regs); - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); /* Fill in the dev structure */ @@ -261,15 +261,11 @@ static int gfar_probe(struct platform_device *pdev) dev->hard_start_xmit = gfar_start_xmit; dev->tx_timeout = gfar_timeout; dev->watchdog_timeo = TX_TIMEOUT; -#ifdef CONFIG_GFAR_NAPI - dev->poll = gfar_poll; - dev->weight = GFAR_DEV_WEIGHT; -#endif + netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT); #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = gfar_netpoll; #endif dev->stop = gfar_close; - dev->get_stats = gfar_get_stats; dev->change_mtu = gfar_change_mtu; dev->mtu = 1500; dev->set_multicast_list = gfar_set_multi; @@ -361,10 +357,8 @@ static int gfar_probe(struct platform_device *pdev) gfar_init_sysfs(dev); /* Print out the device info */ - printk(KERN_INFO DEVICE_NAME, dev->name); - for (idx = 0; idx < 6; idx++) - printk("%2.2x%c", dev->dev_addr[idx], idx == 5 ? ' ' : ':'); - printk("\n"); + printk(KERN_INFO DEVICE_NAME "%s\n", + dev->name, print_mac(mac, dev->dev_addr)); /* Even more device info helps when determining which kernel */ /* provided which set of benchmarks. */ @@ -420,8 +414,18 @@ static phy_interface_t gfar_get_interface(struct net_device *dev) if (ecntrl & ECNTRL_REDUCED_MODE) { if (ecntrl & ECNTRL_REDUCED_MII_MODE) return PHY_INTERFACE_MODE_RMII; - else + else { + phy_interface_t interface = priv->einfo->interface; + + /* + * This isn't autodetected right now, so it must + * be set by the device tree or platform code. + */ + if (interface == PHY_INTERFACE_MODE_RGMII_ID) + return PHY_INTERFACE_MODE_RGMII_ID; + return PHY_INTERFACE_MODE_RGMII; + } } if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) @@ -929,6 +933,8 @@ static int gfar_enet_open(struct net_device *dev) { int err; + napi_enable(&priv->napi); + /* Initialize a bunch of registers */ init_registers(dev); @@ -936,10 +942,14 @@ static int gfar_enet_open(struct net_device *dev) err = init_phy(dev); - if(err) + if(err) { + napi_disable(&priv->napi); return err; + } err = startup_gfar(dev); + if (err) + napi_disable(&priv->napi); netif_start_queue(dev); @@ -1000,7 +1010,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned long flags; /* Update transmit stats */ - priv->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += skb->len; /* Lock priv now */ spin_lock_irqsave(&priv->txlock, flags); @@ -1073,7 +1083,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) if (txbdp == priv->dirty_tx) { netif_stop_queue(dev); - priv->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; } /* Update the current txbd to the next one */ @@ -1092,6 +1102,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) static int gfar_close(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); + + napi_disable(&priv->napi); + stop_gfar(dev); /* Disconnect from the PHY */ @@ -1103,14 +1116,6 @@ static int gfar_close(struct net_device *dev) return 0; } -/* returns a net_device_stats structure pointer */ -static struct net_device_stats * gfar_get_stats(struct net_device *dev) -{ - struct gfar_private *priv = netdev_priv(dev); - - return &(priv->stats); -} - /* Changes the mac address if the controller is not running. */ int gfar_set_mac_address(struct net_device *dev) { @@ -1222,7 +1227,7 @@ static void gfar_timeout(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - priv->stats.tx_errors++; + dev->stats.tx_errors++; if (dev->flags & IFF_UP) { stop_gfar(dev); @@ -1252,12 +1257,12 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id) if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0)) break; - priv->stats.tx_packets++; + dev->stats.tx_packets++; /* Deferred means some collisions occurred during transmit, */ /* but we eventually sent the packet. */ if (bdp->status & TXBD_DEF) - priv->stats.collisions++; + dev->stats.collisions++; /* Free the sk buffer associated with this TxBD */ dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]); @@ -1308,7 +1313,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp) return NULL; alignamount = RXBUF_ALIGNMENT - - (((unsigned) skb->data) & (RXBUF_ALIGNMENT - 1)); + (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)); /* We need the data buffer to be aligned properly. We will reserve * as many bytes as needed to align the data properly @@ -1329,7 +1334,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp) static inline void count_errors(unsigned short status, struct gfar_private *priv) { - struct net_device_stats *stats = &priv->stats; + struct net_device_stats *stats = &dev->stats; struct gfar_extra_stats *estats = &priv->extra_stats; /* If the packet was truncated, none of the other errors @@ -1380,12 +1385,12 @@ irqreturn_t gfar_receive(int irq, void *dev_id) /* support NAPI */ #ifdef CONFIG_GFAR_NAPI - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &priv->napi)) { tempval = gfar_read(&priv->regs->imask); tempval &= IMASK_RX_DISABLED; gfar_write(&priv->regs->imask, tempval); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &priv->napi); } else { if (netif_msg_rx_err(priv)) printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n", @@ -1454,7 +1459,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, if (NULL == skb) { if (netif_msg_rx_err(priv)) printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name); - priv->stats.rx_dropped++; + dev->stats.rx_dropped++; priv->extra_stats.rx_skbmissing++; } else { int ret; @@ -1512,7 +1517,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) { /* Increment the number of packets */ - priv->stats.rx_packets++; + dev->stats.rx_packets++; howmany++; /* Remove the FCS from the packet length */ @@ -1520,7 +1525,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) gfar_process_frame(dev, skb, pkt_len); - priv->stats.rx_bytes += pkt_len; + dev->stats.rx_bytes += pkt_len; } else { count_errors(bdp->status, priv); @@ -1559,23 +1564,16 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) } #ifdef CONFIG_GFAR_NAPI -static int gfar_poll(struct net_device *dev, int *budget) +static int gfar_poll(struct napi_struct *napi, int budget) { + struct gfar_private *priv = container_of(napi, struct gfar_private, napi); + struct net_device *dev = priv->dev; int howmany; - struct gfar_private *priv = netdev_priv(dev); - int rx_work_limit = *budget; - - if (rx_work_limit > dev->quota) - rx_work_limit = dev->quota; - howmany = gfar_clean_rx_ring(dev, rx_work_limit); + howmany = gfar_clean_rx_ring(dev, budget); - dev->quota -= howmany; - rx_work_limit -= howmany; - *budget -= howmany; - - if (rx_work_limit > 0) { - netif_rx_complete(dev); + if (howmany < budget) { + netif_rx_complete(dev, napi); /* Clear the halt bit in RSTAT */ gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); @@ -1591,8 +1589,7 @@ static int gfar_poll(struct net_device *dev, int *budget) gfar_write(&priv->regs->rxic, 0); } - /* Return 1 if there's more work to do */ - return (rx_work_limit > 0) ? 0 : 1; + return howmany; } #endif @@ -1908,17 +1905,17 @@ static irqreturn_t gfar_error(int irq, void *dev_id) /* Update the error counters */ if (events & IEVENT_TXE) { - priv->stats.tx_errors++; + dev->stats.tx_errors++; if (events & IEVENT_LC) - priv->stats.tx_window_errors++; + dev->stats.tx_window_errors++; if (events & IEVENT_CRL) - priv->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; if (events & IEVENT_XFUN) { if (netif_msg_tx_err(priv)) printk(KERN_DEBUG "%s: TX FIFO underrun, " "packet dropped.\n", dev->name); - priv->stats.tx_dropped++; + dev->stats.tx_dropped++; priv->extra_stats.tx_underrun++; /* Reactivate the Tx Queues */ @@ -1928,7 +1925,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id) printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); } if (events & IEVENT_BSY) { - priv->stats.rx_errors++; + dev->stats.rx_errors++; priv->extra_stats.rx_bsy++; gfar_receive(irq, dev_id); @@ -1943,7 +1940,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id) dev->name, gfar_read(&priv->regs->rstat)); } if (events & IEVENT_BABR) { - priv->stats.rx_errors++; + dev->stats.rx_errors++; priv->extra_stats.rx_babr++; if (netif_msg_rx_err(priv)) diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index d8e779c102fa..c16cc8b946a9 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h @@ -45,7 +45,6 @@ #include <linux/crc32.h> #include <linux/workqueue.h> #include <linux/ethtool.h> -#include <linux/netdevice.h> #include <linux/fsl_devices.h> #include "gianfar_mii.h" @@ -691,6 +690,9 @@ struct gfar_private { /* RX Locked fields */ spinlock_t rxlock; + struct net_device *dev; + struct napi_struct napi; + /* skb array and index */ struct sk_buff ** rx_skbuff; u16 skb_currx; diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c index 7b411c1514db..6007147cc1e9 100644 --- a/drivers/net/gianfar_ethtool.c +++ b/drivers/net/gianfar_ethtool.c @@ -34,7 +34,6 @@ #include <linux/module.h> #include <linux/crc32.h> #include <asm/types.h> -#include <asm/uaccess.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/phy.h> @@ -153,15 +152,19 @@ static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, buf[i] = extra[i]; } -/* Returns the number of stats (and their corresponding strings) */ -static int gfar_stats_count(struct net_device *dev) +static int gfar_sset_count(struct net_device *dev, int sset) { struct gfar_private *priv = netdev_priv(dev); - if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) - return GFAR_STATS_LEN; - else - return GFAR_EXTRA_STATS_LEN; + switch (sset) { + case ETH_SS_STATS: + if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) + return GFAR_STATS_LEN; + else + return GFAR_EXTRA_STATS_LEN; + default: + return -EOPNOTSUPP; + } } /* Fills in the drvinfo structure with some basic info */ @@ -172,8 +175,6 @@ static void gfar_gdrvinfo(struct net_device *dev, struct strncpy(drvinfo->version, gfar_driver_version, GFAR_INFOSTR_LEN); strncpy(drvinfo->fw_version, "N/A", GFAR_INFOSTR_LEN); strncpy(drvinfo->bus_info, "N/A", GFAR_INFOSTR_LEN); - drvinfo->n_stats = GFAR_STATS_LEN; - drvinfo->testinfo_len = 0; drvinfo->regdump_len = 0; drvinfo->eedump_len = 0; } @@ -576,7 +577,7 @@ const struct ethtool_ops gfar_ethtool_ops = { .get_ringparam = gfar_gringparam, .set_ringparam = gfar_sringparam, .get_strings = gfar_gstrings, - .get_stats_count = gfar_stats_count, + .get_sset_count = gfar_sset_count, .get_ethtool_stats = gfar_fill_stats, .get_rx_csum = gfar_get_rx_csum, .get_tx_csum = gfar_get_tx_csum, diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c index ac3596f45dd8..100bf410bf5f 100644 --- a/drivers/net/gianfar_mii.c +++ b/drivers/net/gianfar_mii.c @@ -245,7 +245,7 @@ int __init gfar_mdio_init(void) return driver_register(&gianfar_mdio_driver); } -void __exit gfar_mdio_exit(void) +void gfar_mdio_exit(void) { driver_unregister(&gianfar_mdio_driver); } diff --git a/drivers/net/gianfar_mii.h b/drivers/net/gianfar_mii.h index 5d3400469514..b373091c7031 100644 --- a/drivers/net/gianfar_mii.h +++ b/drivers/net/gianfar_mii.h @@ -42,5 +42,5 @@ struct gfar_mii { int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum); int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value); int __init gfar_mdio_init(void); -void __exit gfar_mdio_exit(void); +void gfar_mdio_exit(void); #endif /* GIANFAR_PHY_H */ diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c index 15254dc7876a..ed407c85708f 100644 --- a/drivers/net/hamachi.c +++ b/drivers/net/hamachi.c @@ -580,6 +580,7 @@ static int __devinit hamachi_init_one (struct pci_dev *pdev, void *ring_space; dma_addr_t ring_dma; int ret = -ENOMEM; + DECLARE_MAC_BUF(mac); /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -613,7 +614,6 @@ static int __devinit hamachi_init_one (struct pci_dev *pdev, if (!dev) goto err_out_iounmap; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); #ifdef TX_CHECKSUM @@ -742,12 +742,9 @@ static int __devinit hamachi_init_one (struct pci_dev *pdev, goto err_out_unmap_rx; } - printk(KERN_INFO "%s: %s type %x at %p, ", + printk(KERN_INFO "%s: %s type %x at %p, %s, IRQ %d.\n", dev->name, chip_tbl[chip_id].name, readl(ioaddr + ChipRev), - ioaddr); - for (i = 0; i < 5; i++) - printk("%2.2x:", dev->dev_addr[i]); - printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq); + ioaddr, print_mac(mac, dev->dev_addr), irq); i = readb(ioaddr + PCIClkMeas); printk(KERN_INFO "%s: %d-bit %d Mhz PCI bus (%d), Virtual Jumpers " "%2.2x, LPA %4.4x.\n", @@ -1020,7 +1017,7 @@ static inline int hamachi_tx(struct net_device *dev) break; /* Free the original skb. */ skb = hmp->tx_skbuff[entry]; - if (skb != 0) { + if (skb) { pci_unmap_single(hmp->pci_dev, hmp->tx_ring[entry].addr, skb->len, PCI_DMA_TODEVICE); @@ -1072,7 +1069,6 @@ static void hamachi_tx_timeout(struct net_device *dev) " resetting...\n", dev->name, (int)readw(ioaddr + TxStatus)); { - int i; printk(KERN_DEBUG " Rx ring %p: ", hmp->rx_ring); for (i = 0; i < RX_RING_SIZE; i++) printk(" %8.8x", (unsigned int)hmp->rx_ring[i].status_n_length); diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 760d04a671f9..ecd156def039 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -288,7 +288,8 @@ static int sp_close(struct net_device *dev) /* Return the frame type ID */ static int sp_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, void *saddr, unsigned len) + unsigned short type, const void *daddr, + const void *saddr, unsigned len) { #ifdef CONFIG_INET if (type != htons(ETH_P_AX25)) @@ -323,6 +324,11 @@ static int sp_rebuild_header(struct sk_buff *skb) #endif } +static const struct header_ops sp_header_ops = { + .create = sp_header, + .rebuild = sp_rebuild_header, +}; + static void sp_setup(struct net_device *dev) { /* Finish setting up the DEVICE info. */ @@ -331,22 +337,21 @@ static void sp_setup(struct net_device *dev) dev->open = sp_open_dev; dev->destructor = free_netdev; dev->stop = sp_close; - dev->hard_header = sp_header; + dev->get_stats = sp_get_stats; dev->set_mac_address = sp_set_mac_address; dev->hard_header_len = AX25_MAX_HEADER_LEN; + dev->header_ops = &sp_header_ops; + dev->addr_len = AX25_ADDR_LEN; dev->type = ARPHRD_AX25; dev->tx_queue_len = 10; - dev->rebuild_header = sp_rebuild_header; dev->tx_timeout = NULL; /* Only activated in AX.25 mode */ memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); - SET_MODULE_OWNER(dev); - dev->flags = 0; } diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index 84aa2117c0ee..1a5a75acf73e 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c @@ -320,7 +320,7 @@ static int eppconfig(struct baycom_state *bc) sprintf(portarg, "%ld", bc->pdev->port->base); printk(KERN_DEBUG "%s: %s -s -p %s -m %s\n", bc_drvname, eppconfig_path, portarg, modearg); - return call_usermodehelper(eppconfig_path, argv, envp, 1); + return call_usermodehelper(eppconfig_path, argv, envp, UMH_WAIT_PROC); } /* ---------------------------------------------------------------------- */ @@ -1159,8 +1159,7 @@ static void baycom_probe(struct net_device *dev) /* Fill in the fields of the device structure */ bc->skb = NULL; - dev->hard_header = ax25_hard_header; - dev->rebuild_header = ax25_rebuild_header; + dev->header_ops = &ax25_header_ops; dev->set_mac_address = baycom_set_mac_address; dev->type = ARPHRD_AX25; /* AF_AX25 device */ diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index 656f2789c9ba..5ddf8b0c34f9 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -64,7 +64,7 @@ #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> -#include <linux/if_ether.h> +#include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <net/sock.h> @@ -83,6 +83,7 @@ #include <net/ip.h> #include <net/arp.h> +#include <net/net_namespace.h> #include <linux/bpqether.h> @@ -94,7 +95,6 @@ static char bpq_eth_addr[6]; static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); static int bpq_device_event(struct notifier_block *, unsigned long, void *); -static const char *bpq_print_ethaddr(const unsigned char *); static struct packet_type bpq_packet_type = { .type = __constant_htons(ETH_P_BPQ), @@ -172,6 +172,9 @@ static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty struct ethhdr *eth; struct bpqdev *bpq; + if (dev->nd_net != &init_net) + goto drop; + if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) return NET_RX_DROP; @@ -283,7 +286,7 @@ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev) skb->protocol = ax25_type_trans(skb, dev); skb_reset_network_header(skb); - dev->hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0); + dev_hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0); bpq->stats.tx_packets++; bpq->stats.tx_bytes+=skb->len; @@ -379,16 +382,6 @@ static int bpq_close(struct net_device *dev) /* * Proc filesystem */ -static const char * bpq_print_ethaddr(const unsigned char *e) -{ - static char buf[18]; - - sprintf(buf, "%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X", - e[0], e[1], e[2], e[3], e[4], e[5]); - - return buf; -} - static void *bpq_seq_start(struct seq_file *seq, loff_t *pos) { int i = 1; @@ -413,12 +406,12 @@ static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos) ++*pos; if (v == SEQ_START_TOKEN) - p = bpq_devices.next; + p = rcu_dereference(bpq_devices.next); else - p = ((struct bpqdev *)v)->bpq_list.next; + p = rcu_dereference(((struct bpqdev *)v)->bpq_list.next); return (p == &bpq_devices) ? NULL - : rcu_dereference(list_entry(p, struct bpqdev, bpq_list)); + : list_entry(p, struct bpqdev, bpq_list); } static void bpq_seq_stop(struct seq_file *seq, void *v) @@ -434,14 +427,16 @@ static int bpq_seq_show(struct seq_file *seq, void *v) "dev ether destination accept from\n"); else { const struct bpqdev *bpqdev = v; + DECLARE_MAC_BUF(mac); seq_printf(seq, "%-5s %-10s %s ", bpqdev->axdev->name, bpqdev->ethdev->name, - bpq_print_ethaddr(bpqdev->dest_addr)); + print_mac(mac, bpqdev->dest_addr)); - seq_printf(seq, "%s\n", - (bpqdev->acpt_addr[0] & 0x01) ? "*" - : bpq_print_ethaddr(bpqdev->acpt_addr)); + if (is_multicast_ether_addr(bpqdev->acpt_addr)) + seq_printf(seq, "*\n"); + else + seq_printf(seq, "%s\n", print_mac(mac, bpqdev->acpt_addr)); } return 0; @@ -488,8 +483,7 @@ static void bpq_setup(struct net_device *dev) dev->flags = 0; #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) - dev->hard_header = ax25_hard_header; - dev->rebuild_header = ax25_rebuild_header; + dev->header_ops = &ax25_header_ops; #endif dev->type = ARPHRD_AX25; @@ -559,6 +553,9 @@ static int bpq_device_event(struct notifier_block *this,unsigned long event, voi { struct net_device *dev = (struct net_device *)ptr; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + if (!dev_is_ethdev(dev)) return NOTIFY_DONE; @@ -594,7 +591,7 @@ static int bpq_device_event(struct notifier_block *this,unsigned long event, voi static int __init bpq_init_driver(void) { #ifdef CONFIG_PROC_FS - if (!proc_net_fops_create("bpqether", S_IRUGO, &bpq_info_fops)) { + if (!proc_net_fops_create(&init_net, "bpqether", S_IRUGO, &bpq_info_fops)) { printk(KERN_ERR "bpq: cannot create /proc/net/bpqether entry.\n"); return -ENOENT; @@ -618,7 +615,7 @@ static void __exit bpq_cleanup_driver(void) unregister_netdevice_notifier(&bpq_dev_notifier); - proc_net_remove("bpqether"); + proc_net_remove(&init_net, "bpqether"); rtnl_lock(); while (!list_empty(&bpq_devices)) { diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index 3be8c5047599..bc02e4694804 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c @@ -453,8 +453,8 @@ static int __init setup_adapter(int card_base, int type, int n) int scc_base = card_base + hw[type].scc_offset; char *chipnames[] = CHIPNAMES; - /* Allocate memory */ - info = kmalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA); + /* Initialize what is necessary for write_scc and write_scc_data */ + info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA); if (!info) { printk(KERN_ERR "dmascc: " "could not allocate memory for %s at %#3x\n", @@ -462,8 +462,6 @@ static int __init setup_adapter(int card_base, int type, int n) goto out; } - /* Initialize what is necessary for write_scc and write_scc_data */ - memset(info, 0, sizeof(struct scc_info)); info->dev[0] = alloc_netdev(0, "", dev_setup); if (!info->dev[0]) { @@ -583,8 +581,7 @@ static int __init setup_adapter(int card_base, int type, int n) dev->do_ioctl = scc_ioctl; dev->hard_start_xmit = scc_send_packet; dev->get_stats = scc_get_stats; - dev->hard_header = ax25_hard_header; - dev->rebuild_header = ax25_rebuild_header; + dev->header_ops = &ax25_header_ops; dev->set_mac_address = scc_set_mac_address; } if (register_netdev(info->dev[0])) { diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c index b33adc6a340b..ae9629fa6882 100644 --- a/drivers/net/hamradio/hdlcdrv.c +++ b/drivers/net/hamradio/hdlcdrv.c @@ -682,8 +682,7 @@ static void hdlcdrv_setup(struct net_device *dev) s->skb = NULL; - dev->hard_header = ax25_hard_header; - dev->rebuild_header = ax25_rebuild_header; + dev->header_ops = &ax25_header_ops; dev->set_mac_address = hdlcdrv_set_mac_address; dev->type = ARPHRD_AX25; /* AF_AX25 device */ diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index d08fbc396648..9e43c47691ca 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -578,8 +578,9 @@ static int ax_open_dev(struct net_device *dev) #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) /* Return the frame type ID */ -static int ax_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - void *daddr, void *saddr, unsigned len) +static int ax_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, const void *daddr, + const void *saddr, unsigned len) { #ifdef CONFIG_INET if (type != htons(ETH_P_AX25)) @@ -670,6 +671,11 @@ static struct net_device_stats *ax_get_stats(struct net_device *dev) return &ax->stats; } +static const struct header_ops ax_header_ops = { + .create = ax_header, + .rebuild = ax_rebuild_header, +}; + static void ax_setup(struct net_device *dev) { /* Finish setting up the DEVICE info. */ @@ -683,8 +689,8 @@ static void ax_setup(struct net_device *dev) dev->addr_len = 0; dev->type = ARPHRD_AX25; dev->tx_queue_len = 10; - dev->hard_header = ax_header; - dev->rebuild_header = ax_rebuild_header; + dev->header_ops = &ax_header_ops; + memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c index 6fdaad5a4577..353d13e543ce 100644 --- a/drivers/net/hamradio/scc.c +++ b/drivers/net/hamradio/scc.c @@ -174,6 +174,7 @@ #include <linux/seq_file.h> #include <linux/bitops.h> +#include <net/net_namespace.h> #include <net/ax25.h> #include <asm/irq.h> @@ -1550,8 +1551,8 @@ static void scc_net_setup(struct net_device *dev) dev->stop = scc_net_close; dev->hard_start_xmit = scc_net_tx; - dev->hard_header = ax25_hard_header; - dev->rebuild_header = ax25_rebuild_header; + dev->header_ops = &ax25_header_ops; + dev->set_mac_address = scc_net_set_mac_address; dev->get_stats = scc_net_get_stats; dev->do_ioctl = scc_net_ioctl; @@ -2114,7 +2115,7 @@ static int __init scc_init_driver (void) } rtnl_unlock(); - proc_net_fops_create("z8530drv", 0, &scc_net_seq_fops); + proc_net_fops_create(&init_net, "z8530drv", 0, &scc_net_seq_fops); return 0; } @@ -2169,7 +2170,7 @@ static void __exit scc_cleanup_driver(void) if (Vector_Latch) release_region(Vector_Latch, 1); - proc_net_remove("z8530drv"); + proc_net_remove(&init_net, "z8530drv"); } MODULE_AUTHOR("Joerg Reuter <jreuter@yaina.de>"); diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index 467559debfd6..1c942862a3f4 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -65,6 +65,7 @@ #include <linux/kernel.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> +#include <net/net_namespace.h> #include <asm/uaccess.h> #include <linux/init.h> @@ -1096,8 +1097,7 @@ static void yam_setup(struct net_device *dev) skb_queue_head_init(&yp->send_queue); - dev->hard_header = ax25_hard_header; - dev->rebuild_header = ax25_rebuild_header; + dev->header_ops = &ax25_header_ops; dev->set_mac_address = yam_set_mac_address; @@ -1142,7 +1142,7 @@ static int __init yam_init_driver(void) yam_timer.expires = jiffies + HZ / 100; add_timer(&yam_timer); - proc_net_fops_create("yam", S_IRUGO, &yam_info_fops); + proc_net_fops_create(&init_net, "yam", S_IRUGO, &yam_info_fops); return 0; error: while (--i >= 0) { @@ -1174,7 +1174,7 @@ static void __exit yam_cleanup_driver(void) kfree(p); } - proc_net_remove("yam"); + proc_net_remove(&init_net, "yam"); } /* --------------------------------------------------------------------- */ diff --git a/drivers/net/hp-plus.c b/drivers/net/hp-plus.c index 99a36cc3f8df..c2c4f49d7578 100644 --- a/drivers/net/hp-plus.c +++ b/drivers/net/hp-plus.c @@ -122,8 +122,6 @@ static int __init do_hpp_probe(struct net_device *dev) int base_addr = dev->base_addr; int irq = dev->irq; - SET_MODULE_OWNER(dev); - if (base_addr > 0x1ff) /* Check a single specified location. */ return hpp_probe1(dev, base_addr); else if (base_addr != 0) /* Don't probe at all. */ @@ -168,6 +166,7 @@ static int __init hpp_probe1(struct net_device *dev, int ioaddr) const char name[] = "HP-PC-LAN+"; int mem_start; static unsigned version_printed; + DECLARE_MAC_BUF(mac); if (!request_region(ioaddr, HP_IO_EXTENT, DRV_NAME)) return -EBUSY; @@ -182,7 +181,7 @@ static int __init hpp_probe1(struct net_device *dev, int ioaddr) if (ei_debug && version_printed++ == 0) printk(version); - printk("%s: %s at %#3x,", dev->name, name, ioaddr); + printk("%s: %s at %#3x, ", dev->name, name, ioaddr); /* Retrieve and checksum the station address. */ outw(MAC_Page, ioaddr + HP_PAGING); @@ -191,10 +190,11 @@ static int __init hpp_probe1(struct net_device *dev, int ioaddr) unsigned char inval = inb(ioaddr + 8 + i); dev->dev_addr[i] = inval; checksum += inval; - printk(" %2.2x", inval); } checksum += inb(ioaddr + 14); + printk("%s", print_mac(mac, dev->dev_addr)); + if (checksum != 0xff) { printk(" bad checksum %2.2x.\n", checksum); retval = -ENODEV; diff --git a/drivers/net/hp.c b/drivers/net/hp.c index 635b13c2e2aa..c649a8019beb 100644 --- a/drivers/net/hp.c +++ b/drivers/net/hp.c @@ -86,8 +86,6 @@ static int __init do_hp_probe(struct net_device *dev) int base_addr = dev->base_addr; int irq = dev->irq; - SET_MODULE_OWNER(dev); - if (base_addr > 0x1ff) /* Check a single specified location. */ return hp_probe1(dev, base_addr); else if (base_addr != 0) /* Don't probe at all. */ @@ -129,6 +127,7 @@ static int __init hp_probe1(struct net_device *dev, int ioaddr) int i, retval, board_id, wordmode; const char *name; static unsigned version_printed; + DECLARE_MAC_BUF(mac); if (!request_region(ioaddr, HP_IO_EXTENT, DRV_NAME)) return -EBUSY; @@ -160,7 +159,9 @@ static int __init hp_probe1(struct net_device *dev, int ioaddr) printk("%s: %s (ID %02x) at %#3x,", dev->name, name, board_id, ioaddr); for(i = 0; i < ETHER_ADDR_LEN; i++) - printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i)); + dev->dev_addr[i] = inb(ioaddr + i); + + printk(" %s", print_mac(mac, dev->dev_addr)); /* Snarf the interrupt now. Someday this could be moved to open(). */ if (dev->irq < 2) { diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c index 8caa591c5649..e4fde17e2841 100644 --- a/drivers/net/hp100.c +++ b/drivers/net/hp100.c @@ -404,8 +404,6 @@ struct net_device * __init hp100_probe(int unit) if (!dev) return ERR_PTR(-ENODEV); - SET_MODULE_OWNER(dev); - #ifdef HP100_DEBUG_B hp100_outw(0x4200, TRACE); printk("hp100: %s: probe\n", dev->name); @@ -2095,9 +2093,9 @@ static void hp100_set_multicast_list(struct net_device *dev) addrs = dmi->dmi_addr; if ((*addrs & 0x01) == 0x01) { /* multicast address? */ #ifdef HP100_DEBUG - printk("hp100: %s: multicast = %02x:%02x:%02x:%02x:%02x:%02x, ", - dev->name, addrs[0], addrs[1], addrs[2], - addrs[3], addrs[4], addrs[5]); + DECLARE_MAC_BUF(mac); + printk("hp100: %s: multicast = %s, ", + dev->name, print_mac(mac, addrs)); #endif for (j = idx = 0; j < 6; j++) { idx ^= *addrs++ & 0x3f; @@ -2843,7 +2841,6 @@ static int __init hp100_eisa_probe (struct device *gendev) if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &edev->dev); err = hp100_probe1(dev, edev->base_addr + 0xC38, HP100_BUS_EISA, NULL); @@ -2896,7 +2893,6 @@ static int __devinit hp100_pci_probe (struct pci_dev *pdev, goto out0; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); pci_read_config_word(pdev, PCI_COMMAND, &pci_command); @@ -2993,7 +2989,6 @@ static int __init hp100_isa_init(void) return -ENOMEM; } - SET_MODULE_OWNER(dev); err = hp100_isa_probe(dev, hp100_port[i]); if (!err) diff --git a/drivers/net/hplance.c b/drivers/net/hplance.c index c991cb82ff22..be6e5bc7c881 100644 --- a/drivers/net/hplance.c +++ b/drivers/net/hplance.c @@ -141,7 +141,6 @@ static void __init hplance_init(struct net_device *dev, struct dio_dev *d) dev->poll_controller = lance_poll; #endif dev->hard_start_xmit = &lance_start_xmit; - dev->get_stats = &lance_get_stats; dev->set_multicast_list = &lance_set_multicast; dev->dma = 0; diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c index f970bfbb9db2..b96cf2dcb109 100644 --- a/drivers/net/hydra.c +++ b/drivers/net/hydra.c @@ -103,6 +103,7 @@ static int __devinit hydra_init(struct zorro_dev *z) int start_page, stop_page; int j; int err; + DECLARE_MAC_BUF(mac); static u32 hydra_offsets[16] = { 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, @@ -112,7 +113,6 @@ static int __devinit hydra_init(struct zorro_dev *z) dev = ____alloc_ei_netdev(0); if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); for(j = 0; j < ETHER_ADDR_LEN; j++) dev->dev_addr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j)); @@ -163,10 +163,8 @@ static int __devinit hydra_init(struct zorro_dev *z) zorro_set_drvdata(z, dev); printk(KERN_INFO "%s: Hydra at 0x%08lx, address " - "%02x:%02x:%02x:%02x:%02x:%02x (hydra.c " HYDRA_VERSION ")\n", - dev->name, z->resource.start, dev->dev_addr[0], dev->dev_addr[1], - dev->dev_addr[2], dev->dev_addr[3], dev->dev_addr[4], - dev->dev_addr[5]); + "%s (hydra.c " HYDRA_VERSION ")\n", + dev->name, z->resource.start, print_mac(mac, dev->dev_addr)); return 0; } diff --git a/drivers/net/ibm_emac/Kconfig b/drivers/net/ibm_emac/Kconfig new file mode 100644 index 000000000000..f61c48047dc0 --- /dev/null +++ b/drivers/net/ibm_emac/Kconfig @@ -0,0 +1,70 @@ +config IBM_EMAC + tristate "PowerPC 4xx on-chip Ethernet support" + depends on 4xx && !PPC_MERGE + help + This driver supports the PowerPC 4xx EMAC family of on-chip + Ethernet controllers. + +config IBM_EMAC_RXB + int "Number of receive buffers" + depends on IBM_EMAC + default "128" + +config IBM_EMAC_TXB + int "Number of transmit buffers" + depends on IBM_EMAC + default "64" + +config IBM_EMAC_POLL_WEIGHT + int "MAL NAPI polling weight" + depends on IBM_EMAC + default "32" + +config IBM_EMAC_RX_COPY_THRESHOLD + int "RX skb copy threshold (bytes)" + depends on IBM_EMAC + default "256" + +config IBM_EMAC_RX_SKB_HEADROOM + int "Additional RX skb headroom (bytes)" + depends on IBM_EMAC + default "0" + help + Additional receive skb headroom. Note, that driver + will always reserve at least 2 bytes to make IP header + aligned, so usually there is no need to add any additional + headroom. + + If unsure, set to 0. + +config IBM_EMAC_PHY_RX_CLK_FIX + bool "PHY Rx clock workaround" + depends on IBM_EMAC && (405EP || 440GX || 440EP || 440GR) + help + Enable this if EMAC attached to a PHY which doesn't generate + RX clock if there is no link, if this is the case, you will + see "TX disable timeout" or "RX disable timeout" in the system + log. + + If unsure, say N. + +config IBM_EMAC_DEBUG + bool "Debugging" + depends on IBM_EMAC + default n + +config IBM_EMAC_ZMII + bool + depends on IBM_EMAC && (NP405H || NP405L || 44x) + default y + +config IBM_EMAC_RGMII + bool + depends on IBM_EMAC && 440GX + default y + +config IBM_EMAC_TAH + bool + depends on IBM_EMAC && 440GX + default y + diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c index f752e5fc65ba..73664f226f32 100644 --- a/drivers/net/ibm_emac/ibm_emac_core.c +++ b/drivers/net/ibm_emac/ibm_emac_core.c @@ -353,10 +353,9 @@ static void emac_hash_mc(struct ocp_enet_private *dev) for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) { int bit; - DBG2("%d: mc %02x:%02x:%02x:%02x:%02x:%02x" NL, - dev->def->index, - dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2], - dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]); + DECLARE_MAC_BUF(mac); + DBG2("%d: mc %s" NL, + dev->def->index, print_mac(mac, dmi->dmi_addr)); bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26); gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f); @@ -1843,9 +1842,14 @@ static int emac_ethtool_nway_reset(struct net_device *ndev) return res; } -static int emac_ethtool_get_stats_count(struct net_device *ndev) +static int emac_get_sset_count(struct net_device *ndev, int sset) { - return EMAC_ETHTOOL_STATS_COUNT; + switch (sset) { + case ETH_SS_STATS: + return EMAC_ETHTOOL_STATS_COUNT; + default: + return -EOPNOTSUPP; + } } static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset, @@ -1876,7 +1880,6 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev, strcpy(info->version, DRV_VERSION); info->fw_version[0] = '\0'; sprintf(info->bus_info, "PPC 4xx EMAC %d", dev->def->index); - info->n_stats = emac_ethtool_get_stats_count(ndev); info->regdump_len = emac_ethtool_get_regs_len(ndev); } @@ -1896,12 +1899,10 @@ static const struct ethtool_ops emac_ethtool_ops = { .get_rx_csum = emac_ethtool_get_rx_csum, .get_strings = emac_ethtool_get_strings, - .get_stats_count = emac_ethtool_get_stats_count, + .get_sset_count = emac_get_sset_count, .get_ethtool_stats = emac_ethtool_get_ethtool_stats, .get_link = ethtool_op_get_link, - .get_tx_csum = ethtool_op_get_tx_csum, - .get_sg = ethtool_op_get_sg, }; static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) @@ -1942,6 +1943,7 @@ static int __init emac_probe(struct ocp_device *ocpdev) struct ocp_device *maldev; struct ocp_enet_private *dev; int err, i; + DECLARE_MAC_BUF(mac); DBG("%d: probe" NL, ocpdev->def->index); @@ -1962,7 +1964,6 @@ static int __init emac_probe(struct ocp_device *ocpdev) dev->ndev = ndev; dev->ldev = &ocpdev->dev; dev->def = ocpdev->def; - SET_MODULE_OWNER(ndev); /* Find MAL device we are connected to */ maldev = @@ -2191,10 +2192,8 @@ static int __init emac_probe(struct ocp_device *ocpdev) ocp_set_drvdata(ocpdev, dev); - printk("%s: emac%d, MAC %02x:%02x:%02x:%02x:%02x:%02x\n", - ndev->name, dev->def->index, - ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2], - ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]); + printk("%s: emac%d, MAC %s\n", + ndev->name, dev->def->index, print_mac(mac, ndev->dev_addr)); if (dev->phy.address >= 0) printk("%s: found %s PHY (0x%02x)\n", ndev->name, diff --git a/drivers/net/ibm_emac/ibm_emac_debug.c b/drivers/net/ibm_emac/ibm_emac_debug.c index 92f970d402df..1f70906cfb98 100644 --- a/drivers/net/ibm_emac/ibm_emac_debug.c +++ b/drivers/net/ibm_emac/ibm_emac_debug.c @@ -132,7 +132,7 @@ void emac_dbg_register(int idx, struct ocp_enet_private *dev) { unsigned long flags; - if (idx >= sizeof(__emacs) / sizeof(__emacs[0])) { + if (idx >= ARRAY_SIZE(__emacs)) { printk(KERN_WARNING "invalid index %d when registering EMAC for debugging\n", idx); @@ -148,7 +148,7 @@ void mal_dbg_register(int idx, struct ibm_ocp_mal *mal) { unsigned long flags; - if (idx >= sizeof(__mals) / sizeof(__mals[0])) { + if (idx >= ARRAY_SIZE(__mals)) { printk(KERN_WARNING "invalid index %d when registering MAL for debugging\n", idx); @@ -167,11 +167,11 @@ void emac_dbg_dump_all(void) local_irq_save(flags); - for (i = 0; i < sizeof(__mals) / sizeof(__mals[0]); ++i) + for (i = 0; i < ARRAY_SIZE(__mals); ++i) if (__mals[i]) emac_mal_dump(__mals[i]); - for (i = 0; i < sizeof(__emacs) / sizeof(__emacs[0]); ++i) + for (i = 0; i < ARRAY_SIZE(__emacs); ++i) if (__emacs[i]) emac_mac_dump(i, __emacs[i]); diff --git a/drivers/net/ibm_emac/ibm_emac_mal.c b/drivers/net/ibm_emac/ibm_emac_mal.c index cabd9846a5ee..4e49e8c4f871 100644 --- a/drivers/net/ibm_emac/ibm_emac_mal.c +++ b/drivers/net/ibm_emac/ibm_emac_mal.c @@ -207,10 +207,10 @@ static irqreturn_t mal_serr(int irq, void *dev_instance) static inline void mal_schedule_poll(struct ibm_ocp_mal *mal) { - if (likely(netif_rx_schedule_prep(&mal->poll_dev))) { + if (likely(napi_schedule_prep(&mal->napi))) { MAL_DBG2("%d: schedule_poll" NL, mal->def->index); mal_disable_eob_irq(mal); - __netif_rx_schedule(&mal->poll_dev); + __napi_schedule(&mal->napi); } else MAL_DBG2("%d: already in poll" NL, mal->def->index); } @@ -273,11 +273,11 @@ static irqreturn_t mal_rxde(int irq, void *dev_instance) return IRQ_HANDLED; } -static int mal_poll(struct net_device *ndev, int *budget) +static int mal_poll(struct napi_struct *napi, int budget) { - struct ibm_ocp_mal *mal = ndev->priv; + struct ibm_ocp_mal *mal = container_of(napi, struct ibm_ocp_mal, napi); struct list_head *l; - int rx_work_limit = min(ndev->quota, *budget), received = 0, done; + int received = 0; MAL_DBG2("%d: poll(%d) %d ->" NL, mal->def->index, *budget, rx_work_limit); @@ -295,38 +295,34 @@ static int mal_poll(struct net_device *ndev, int *budget) list_for_each(l, &mal->poll_list) { struct mal_commac *mc = list_entry(l, struct mal_commac, poll_list); - int n = mc->ops->poll_rx(mc->dev, rx_work_limit); + int n = mc->ops->poll_rx(mc->dev, budget); if (n) { received += n; - rx_work_limit -= n; - if (rx_work_limit <= 0) { - done = 0; + budget -= n; + if (budget <= 0) goto more_work; // XXX What if this is the last one ? - } } } /* We need to disable IRQs to protect from RXDE IRQ here */ local_irq_disable(); - __netif_rx_complete(ndev); + __napi_complete(napi); mal_enable_eob_irq(mal); local_irq_enable(); - done = 1; - /* Check for "rotting" packet(s) */ list_for_each(l, &mal->poll_list) { struct mal_commac *mc = list_entry(l, struct mal_commac, poll_list); if (unlikely(mc->ops->peek_rx(mc->dev) || mc->rx_stopped)) { MAL_DBG2("%d: rotting packet" NL, mal->def->index); - if (netif_rx_reschedule(ndev, received)) + if (napi_reschedule(napi)) mal_disable_eob_irq(mal); else MAL_DBG2("%d: already in poll list" NL, mal->def->index); - if (rx_work_limit > 0) + if (budget > 0) goto again; else goto more_work; @@ -335,12 +331,8 @@ static int mal_poll(struct net_device *ndev, int *budget) } more_work: - ndev->quota -= received; - *budget -= received; - - MAL_DBG2("%d: poll() %d <- %d" NL, mal->def->index, *budget, - done ? 0 : 1); - return done ? 0 : 1; + MAL_DBG2("%d: poll() %d <- %d" NL, mal->def->index, budget, received); + return received; } static void mal_reset(struct ibm_ocp_mal *mal) @@ -425,11 +417,8 @@ static int __init mal_probe(struct ocp_device *ocpdev) mal->def = ocpdev->def; INIT_LIST_HEAD(&mal->poll_list); - set_bit(__LINK_STATE_START, &mal->poll_dev.state); - mal->poll_dev.weight = CONFIG_IBM_EMAC_POLL_WEIGHT; - mal->poll_dev.poll = mal_poll; - mal->poll_dev.priv = mal; - atomic_set(&mal->poll_dev.refcnt, 1); + mal->napi.weight = CONFIG_IBM_EMAC_POLL_WEIGHT; + mal->napi.poll = mal_poll; INIT_LIST_HEAD(&mal->list); @@ -520,11 +509,8 @@ static void __exit mal_remove(struct ocp_device *ocpdev) MAL_DBG("%d: remove" NL, mal->def->index); - /* Syncronize with scheduled polling, - stolen from net/core/dev.c:dev_close() - */ - clear_bit(__LINK_STATE_START, &mal->poll_dev.state); - netif_poll_disable(&mal->poll_dev); + /* Synchronize with scheduled polling */ + napi_disable(&mal->napi); if (!list_empty(&mal->list)) { /* This is *very* bad */ diff --git a/drivers/net/ibm_emac/ibm_emac_mal.h b/drivers/net/ibm_emac/ibm_emac_mal.h index 64bc338acc6c..8f54d621994d 100644 --- a/drivers/net/ibm_emac/ibm_emac_mal.h +++ b/drivers/net/ibm_emac/ibm_emac_mal.h @@ -195,7 +195,7 @@ struct ibm_ocp_mal { dcr_host_t dcrhost; struct list_head poll_list; - struct net_device poll_dev; + struct napi_struct napi; struct list_head list; u32 tx_chan_mask; diff --git a/drivers/net/ibm_newemac/Kconfig b/drivers/net/ibm_newemac/Kconfig new file mode 100644 index 000000000000..0d3e7380bad0 --- /dev/null +++ b/drivers/net/ibm_newemac/Kconfig @@ -0,0 +1,63 @@ +config IBM_NEW_EMAC + tristate "IBM EMAC Ethernet support" + depends on PPC_DCR && PPC_MERGE + help + This driver supports the IBM EMAC family of Ethernet controllers + typically found on 4xx embedded PowerPC chips, but also on the + Axon southbridge for Cell. + +config IBM_NEW_EMAC_RXB + int "Number of receive buffers" + depends on IBM_NEW_EMAC + default "128" + +config IBM_NEW_EMAC_TXB + int "Number of transmit buffers" + depends on IBM_NEW_EMAC + default "64" + +config IBM_NEW_EMAC_POLL_WEIGHT + int "MAL NAPI polling weight" + depends on IBM_NEW_EMAC + default "32" + +config IBM_NEW_EMAC_RX_COPY_THRESHOLD + int "RX skb copy threshold (bytes)" + depends on IBM_NEW_EMAC + default "256" + +config IBM_NEW_EMAC_RX_SKB_HEADROOM + int "Additional RX skb headroom (bytes)" + depends on IBM_NEW_EMAC + default "0" + help + Additional receive skb headroom. Note, that driver + will always reserve at least 2 bytes to make IP header + aligned, so usually there is no need to add any additional + headroom. + + If unsure, set to 0. + +config IBM_NEW_EMAC_DEBUG + bool "Debugging" + depends on IBM_NEW_EMAC + default n + +# The options below has to be select'ed by the respective +# processor types or platforms + +config IBM_NEW_EMAC_ZMII + bool + default n + +config IBM_NEW_EMAC_RGMII + bool + default n + +config IBM_NEW_EMAC_TAH + bool + default n + +config IBM_NEW_EMAC_EMAC4 + bool + default n diff --git a/drivers/net/ibm_newemac/Makefile b/drivers/net/ibm_newemac/Makefile new file mode 100644 index 000000000000..0b5c99512762 --- /dev/null +++ b/drivers/net/ibm_newemac/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for the PowerPC 4xx on-chip ethernet driver +# + +obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac.o + +ibm_newemac-y := mal.o core.o phy.o +ibm_newemac-$(CONFIG_IBM_NEW_EMAC_ZMII) += zmii.o +ibm_newemac-$(CONFIG_IBM_NEW_EMAC_RGMII) += rgmii.o +ibm_newemac-$(CONFIG_IBM_NEW_EMAC_TAH) += tah.o +ibm_newemac-$(CONFIG_IBM_NEW_EMAC_DEBUG) += debug.o diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c new file mode 100644 index 000000000000..8ea500961871 --- /dev/null +++ b/drivers/net/ibm_newemac/core.c @@ -0,0 +1,2906 @@ +/* + * drivers/net/ibm_newemac/core.c + * + * Driver for PowerPC 4xx on-chip ethernet controller. + * + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> + * + * Based on original work by + * Matt Porter <mporter@kernel.crashing.org> + * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org> + * Armin Kuster <akuster@mvista.com> + * Johnnie Peters <jpeters@mvista.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include <linux/sched.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/delay.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/crc32.h> +#include <linux/ethtool.h> +#include <linux/mii.h> +#include <linux/bitops.h> +#include <linux/workqueue.h> + +#include <asm/processor.h> +#include <asm/io.h> +#include <asm/dma.h> +#include <asm/uaccess.h> + +#include "core.h" + +/* + * Lack of dma_unmap_???? calls is intentional. + * + * API-correct usage requires additional support state information to be + * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to + * EMAC design (e.g. TX buffer passed from network stack can be split into + * several BDs, dma_map_single/dma_map_page can be used to map particular BD), + * maintaining such information will add additional overhead. + * Current DMA API implementation for 4xx processors only ensures cache coherency + * and dma_unmap_???? routines are empty and are likely to stay this way. + * I decided to omit dma_unmap_??? calls because I don't want to add additional + * complexity just for the sake of following some abstract API, when it doesn't + * add any real benefit to the driver. I understand that this decision maybe + * controversial, but I really tried to make code API-correct and efficient + * at the same time and didn't come up with code I liked :(. --ebs + */ + +#define DRV_NAME "emac" +#define DRV_VERSION "3.54" +#define DRV_DESC "PPC 4xx OCP EMAC driver" + +MODULE_DESCRIPTION(DRV_DESC); +MODULE_AUTHOR + ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>"); +MODULE_LICENSE("GPL"); + +/* + * PPC64 doesn't (yet) have a cacheable_memcpy + */ +#ifdef CONFIG_PPC64 +#define cacheable_memcpy(d,s,n) memcpy((d),(s),(n)) +#endif + +/* minimum number of free TX descriptors required to wake up TX process */ +#define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4) + +/* If packet size is less than this number, we allocate small skb and copy packet + * contents into it instead of just sending original big skb up + */ +#define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD + +/* Since multiple EMACs share MDIO lines in various ways, we need + * to avoid re-using the same PHY ID in cases where the arch didn't + * setup precise phy_map entries + * + * XXX This is something that needs to be reworked as we can have multiple + * EMAC "sets" (multiple ASICs containing several EMACs) though we can + * probably require in that case to have explicit PHY IDs in the device-tree + */ +static u32 busy_phy_map; +static DEFINE_MUTEX(emac_phy_map_lock); + +/* This is the wait queue used to wait on any event related to probe, that + * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc... + */ +static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait); + +/* Having stable interface names is a doomed idea. However, it would be nice + * if we didn't have completely random interface names at boot too :-) It's + * just a matter of making everybody's life easier. Since we are doing + * threaded probing, it's a bit harder though. The base idea here is that + * we make up a list of all emacs in the device-tree before we register the + * driver. Every emac will then wait for the previous one in the list to + * initialize before itself. We should also keep that list ordered by + * cell_index. + * That list is only 4 entries long, meaning that additional EMACs don't + * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased. + */ + +#define EMAC_BOOT_LIST_SIZE 4 +static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE]; + +/* How long should I wait for dependent devices ? */ +#define EMAC_PROBE_DEP_TIMEOUT (HZ * 5) + +/* I don't want to litter system log with timeout errors + * when we have brain-damaged PHY. + */ +static inline void emac_report_timeout_error(struct emac_instance *dev, + const char *error) +{ + if (net_ratelimit()) + printk(KERN_ERR "%s: %s\n", dev->ndev->name, error); +} + +/* PHY polling intervals */ +#define PHY_POLL_LINK_ON HZ +#define PHY_POLL_LINK_OFF (HZ / 5) + +/* Graceful stop timeouts in us. + * We should allow up to 1 frame time (full-duplex, ignoring collisions) + */ +#define STOP_TIMEOUT_10 1230 +#define STOP_TIMEOUT_100 124 +#define STOP_TIMEOUT_1000 13 +#define STOP_TIMEOUT_1000_JUMBO 73 + +/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */ +static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = { + "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum", + "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom", + "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu", + "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet", + "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error", + "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range", + "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun", + "rx_bad_packet", "rx_runt_packet", "rx_short_event", + "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long", + "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors", + "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral", + "tx_bd_excessive_collisions", "tx_bd_late_collision", + "tx_bd_multple_collisions", "tx_bd_single_collision", + "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe", + "tx_errors" +}; + +static irqreturn_t emac_irq(int irq, void *dev_instance); +static void emac_clean_tx_ring(struct emac_instance *dev); +static void __emac_set_multicast_list(struct emac_instance *dev); + +static inline int emac_phy_supports_gige(int phy_mode) +{ + return phy_mode == PHY_MODE_GMII || + phy_mode == PHY_MODE_RGMII || + phy_mode == PHY_MODE_TBI || + phy_mode == PHY_MODE_RTBI; +} + +static inline int emac_phy_gpcs(int phy_mode) +{ + return phy_mode == PHY_MODE_TBI || + phy_mode == PHY_MODE_RTBI; +} + +static inline void emac_tx_enable(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + u32 r; + + DBG(dev, "tx_enable" NL); + + r = in_be32(&p->mr0); + if (!(r & EMAC_MR0_TXE)) + out_be32(&p->mr0, r | EMAC_MR0_TXE); +} + +static void emac_tx_disable(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + u32 r; + + DBG(dev, "tx_disable" NL); + + r = in_be32(&p->mr0); + if (r & EMAC_MR0_TXE) { + int n = dev->stop_timeout; + out_be32(&p->mr0, r & ~EMAC_MR0_TXE); + while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) { + udelay(1); + --n; + } + if (unlikely(!n)) + emac_report_timeout_error(dev, "TX disable timeout"); + } +} + +static void emac_rx_enable(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + u32 r; + + if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) + goto out; + + DBG(dev, "rx_enable" NL); + + r = in_be32(&p->mr0); + if (!(r & EMAC_MR0_RXE)) { + if (unlikely(!(r & EMAC_MR0_RXI))) { + /* Wait if previous async disable is still in progress */ + int n = dev->stop_timeout; + while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) { + udelay(1); + --n; + } + if (unlikely(!n)) + emac_report_timeout_error(dev, + "RX disable timeout"); + } + out_be32(&p->mr0, r | EMAC_MR0_RXE); + } + out: + ; +} + +static void emac_rx_disable(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + u32 r; + + DBG(dev, "rx_disable" NL); + + r = in_be32(&p->mr0); + if (r & EMAC_MR0_RXE) { + int n = dev->stop_timeout; + out_be32(&p->mr0, r & ~EMAC_MR0_RXE); + while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) { + udelay(1); + --n; + } + if (unlikely(!n)) + emac_report_timeout_error(dev, "RX disable timeout"); + } +} + +static inline void emac_netif_stop(struct emac_instance *dev) +{ + netif_tx_lock_bh(dev->ndev); + dev->no_mcast = 1; + netif_tx_unlock_bh(dev->ndev); + dev->ndev->trans_start = jiffies; /* prevent tx timeout */ + mal_poll_disable(dev->mal, &dev->commac); + netif_tx_disable(dev->ndev); +} + +static inline void emac_netif_start(struct emac_instance *dev) +{ + netif_tx_lock_bh(dev->ndev); + dev->no_mcast = 0; + if (dev->mcast_pending && netif_running(dev->ndev)) + __emac_set_multicast_list(dev); + netif_tx_unlock_bh(dev->ndev); + + netif_wake_queue(dev->ndev); + + /* NOTE: unconditional netif_wake_queue is only appropriate + * so long as all callers are assured to have free tx slots + * (taken from tg3... though the case where that is wrong is + * not terribly harmful) + */ + mal_poll_enable(dev->mal, &dev->commac); +} + +static inline void emac_rx_disable_async(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + u32 r; + + DBG(dev, "rx_disable_async" NL); + + r = in_be32(&p->mr0); + if (r & EMAC_MR0_RXE) + out_be32(&p->mr0, r & ~EMAC_MR0_RXE); +} + +static int emac_reset(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + int n = 20; + + DBG(dev, "reset" NL); + + if (!dev->reset_failed) { + /* 40x erratum suggests stopping RX channel before reset, + * we stop TX as well + */ + emac_rx_disable(dev); + emac_tx_disable(dev); + } + + out_be32(&p->mr0, EMAC_MR0_SRST); + while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n) + --n; + + if (n) { + dev->reset_failed = 0; + return 0; + } else { + emac_report_timeout_error(dev, "reset timeout"); + dev->reset_failed = 1; + return -ETIMEDOUT; + } +} + +static void emac_hash_mc(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + u16 gaht[4] = { 0 }; + struct dev_mc_list *dmi; + + DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count); + + for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) { + int bit; + DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL, + dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2], + dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]); + + bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26); + gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f); + } + out_be32(&p->gaht1, gaht[0]); + out_be32(&p->gaht2, gaht[1]); + out_be32(&p->gaht3, gaht[2]); + out_be32(&p->gaht4, gaht[3]); +} + +static inline u32 emac_iff2rmr(struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + u32 r; + + r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE; + + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) + r |= EMAC4_RMR_BASE; + else + r |= EMAC_RMR_BASE; + + if (ndev->flags & IFF_PROMISC) + r |= EMAC_RMR_PME; + else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32) + r |= EMAC_RMR_PMME; + else if (ndev->mc_count > 0) + r |= EMAC_RMR_MAE; + + return r; +} + +static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size) +{ + u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT; + + DBG2(dev, "__emac_calc_base_mr1" NL); + + switch(tx_size) { + case 2048: + ret |= EMAC_MR1_TFS_2K; + break; + default: + printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n", + dev->ndev->name, tx_size); + } + + switch(rx_size) { + case 16384: + ret |= EMAC_MR1_RFS_16K; + break; + case 4096: + ret |= EMAC_MR1_RFS_4K; + break; + default: + printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n", + dev->ndev->name, rx_size); + } + + return ret; +} + +static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size) +{ + u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR | + EMAC4_MR1_OBCI(dev->opb_bus_freq); + + DBG2(dev, "__emac4_calc_base_mr1" NL); + + switch(tx_size) { + case 4096: + ret |= EMAC4_MR1_TFS_4K; + break; + case 2048: + ret |= EMAC4_MR1_TFS_2K; + break; + default: + printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n", + dev->ndev->name, tx_size); + } + + switch(rx_size) { + case 16384: + ret |= EMAC4_MR1_RFS_16K; + break; + case 4096: + ret |= EMAC4_MR1_RFS_4K; + break; + case 2048: + ret |= EMAC4_MR1_RFS_2K; + break; + default: + printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n", + dev->ndev->name, rx_size); + } + + return ret; +} + +static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size) +{ + return emac_has_feature(dev, EMAC_FTR_EMAC4) ? + __emac4_calc_base_mr1(dev, tx_size, rx_size) : + __emac_calc_base_mr1(dev, tx_size, rx_size); +} + +static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size) +{ + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) + return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4; + else + return ((size >> 6) - 1) << EMAC_TRTR_SHIFT; +} + +static inline u32 emac_calc_rwmr(struct emac_instance *dev, + unsigned int low, unsigned int high) +{ + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) + return (low << 22) | ( (high & 0x3ff) << 6); + else + return (low << 23) | ( (high & 0x1ff) << 7); +} + +static int emac_configure(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + struct net_device *ndev = dev->ndev; + int tx_size, rx_size; + u32 r, mr1 = 0; + + DBG(dev, "configure" NL); + + if (emac_reset(dev) < 0) + return -ETIMEDOUT; + + if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) + tah_reset(dev->tah_dev); + + DBG(dev, " duplex = %d, pause = %d, asym_pause = %d\n", + dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause); + + /* Default fifo sizes */ + tx_size = dev->tx_fifo_size; + rx_size = dev->rx_fifo_size; + + /* Check for full duplex */ + if (dev->phy.duplex == DUPLEX_FULL) + mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001; + + /* Adjust fifo sizes, mr1 and timeouts based on link speed */ + dev->stop_timeout = STOP_TIMEOUT_10; + switch (dev->phy.speed) { + case SPEED_1000: + if (emac_phy_gpcs(dev->phy.mode)) { + mr1 |= EMAC_MR1_MF_1000GPCS | + EMAC_MR1_MF_IPPA(dev->phy.address); + + /* Put some arbitrary OUI, Manuf & Rev IDs so we can + * identify this GPCS PHY later. + */ + out_be32(&p->ipcr, 0xdeadbeef); + } else + mr1 |= EMAC_MR1_MF_1000; + + /* Extended fifo sizes */ + tx_size = dev->tx_fifo_size_gige; + rx_size = dev->rx_fifo_size_gige; + + if (dev->ndev->mtu > ETH_DATA_LEN) { + mr1 |= EMAC_MR1_JPSM; + dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO; + } else + dev->stop_timeout = STOP_TIMEOUT_1000; + break; + case SPEED_100: + mr1 |= EMAC_MR1_MF_100; + dev->stop_timeout = STOP_TIMEOUT_100; + break; + default: /* make gcc happy */ + break; + } + + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) + rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port, + dev->phy.speed); + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) + zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed); + + /* on 40x erratum forces us to NOT use integrated flow control, + * let's hope it works on 44x ;) + */ + if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) && + dev->phy.duplex == DUPLEX_FULL) { + if (dev->phy.pause) + mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP; + else if (dev->phy.asym_pause) + mr1 |= EMAC_MR1_APP; + } + + /* Add base settings & fifo sizes & program MR1 */ + mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size); + out_be32(&p->mr1, mr1); + + /* Set individual MAC address */ + out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]); + out_be32(&p->ialr, (ndev->dev_addr[2] << 24) | + (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) | + ndev->dev_addr[5]); + + /* VLAN Tag Protocol ID */ + out_be32(&p->vtpid, 0x8100); + + /* Receive mode register */ + r = emac_iff2rmr(ndev); + if (r & EMAC_RMR_MAE) + emac_hash_mc(dev); + out_be32(&p->rmr, r); + + /* FIFOs thresholds */ + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) + r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1, + tx_size / 2 / dev->fifo_entry_size); + else + r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1, + tx_size / 2 / dev->fifo_entry_size); + out_be32(&p->tmr1, r); + out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2)); + + /* PAUSE frame is sent when RX FIFO reaches its high-water mark, + there should be still enough space in FIFO to allow the our link + partner time to process this frame and also time to send PAUSE + frame itself. + + Here is the worst case scenario for the RX FIFO "headroom" + (from "The Switch Book") (100Mbps, without preamble, inter-frame gap): + + 1) One maximum-length frame on TX 1522 bytes + 2) One PAUSE frame time 64 bytes + 3) PAUSE frame decode time allowance 64 bytes + 4) One maximum-length frame on RX 1522 bytes + 5) Round-trip propagation delay of the link (100Mb) 15 bytes + ---------- + 3187 bytes + + I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes) + low-water mark to RX_FIFO_SIZE / 8 (512 bytes) + */ + r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size, + rx_size / 4 / dev->fifo_entry_size); + out_be32(&p->rwmr, r); + + /* Set PAUSE timer to the maximum */ + out_be32(&p->ptr, 0xffff); + + /* IRQ sources */ + r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE | + EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE | + EMAC_ISR_IRE | EMAC_ISR_TE; + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) + r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE | + EMAC4_ISR_RXOE | */; + out_be32(&p->iser, r); + + /* We need to take GPCS PHY out of isolate mode after EMAC reset */ + if (emac_phy_gpcs(dev->phy.mode)) + emac_mii_reset_phy(&dev->phy); + + return 0; +} + +static void emac_reinitialize(struct emac_instance *dev) +{ + DBG(dev, "reinitialize" NL); + + emac_netif_stop(dev); + if (!emac_configure(dev)) { + emac_tx_enable(dev); + emac_rx_enable(dev); + } + emac_netif_start(dev); +} + +static void emac_full_tx_reset(struct emac_instance *dev) +{ + DBG(dev, "full_tx_reset" NL); + + emac_tx_disable(dev); + mal_disable_tx_channel(dev->mal, dev->mal_tx_chan); + emac_clean_tx_ring(dev); + dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0; + + emac_configure(dev); + + mal_enable_tx_channel(dev->mal, dev->mal_tx_chan); + emac_tx_enable(dev); + emac_rx_enable(dev); +} + +static void emac_reset_work(struct work_struct *work) +{ + struct emac_instance *dev = container_of(work, struct emac_instance, reset_work); + + DBG(dev, "reset_work" NL); + + mutex_lock(&dev->link_lock); + emac_netif_stop(dev); + emac_full_tx_reset(dev); + emac_netif_start(dev); + mutex_unlock(&dev->link_lock); +} + +static void emac_tx_timeout(struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + + DBG(dev, "tx_timeout" NL); + + schedule_work(&dev->reset_work); +} + + +static inline int emac_phy_done(struct emac_instance *dev, u32 stacr) +{ + int done = !!(stacr & EMAC_STACR_OC); + + if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT)) + done = !done; + + return done; +}; + +static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg) +{ + struct emac_regs __iomem *p = dev->emacp; + u32 r = 0; + int n, err = -ETIMEDOUT; + + mutex_lock(&dev->mdio_lock); + + DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg); + + /* Enable proper MDIO port */ + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) + zmii_get_mdio(dev->zmii_dev, dev->zmii_port); + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) + rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port); + + /* Wait for management interface to become idle */ + n = 10; + while (!emac_phy_done(dev, in_be32(&p->stacr))) { + udelay(1); + if (!--n) { + DBG2(dev, " -> timeout wait idle\n"); + goto bail; + } + } + + /* Issue read command */ + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) + r = EMAC4_STACR_BASE(dev->opb_bus_freq); + else + r = EMAC_STACR_BASE(dev->opb_bus_freq); + if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT)) + r |= EMAC_STACR_OC; + if (emac_has_feature(dev, EMAC_FTR_HAS_AXON_STACR)) + r |= EMACX_STACR_STAC_READ; + else + r |= EMAC_STACR_STAC_READ; + r |= (reg & EMAC_STACR_PRA_MASK) + | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT); + out_be32(&p->stacr, r); + + /* Wait for read to complete */ + n = 100; + while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) { + udelay(1); + if (!--n) { + DBG2(dev, " -> timeout wait complete\n"); + goto bail; + } + } + + if (unlikely(r & EMAC_STACR_PHYE)) { + DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg); + err = -EREMOTEIO; + goto bail; + } + + r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK); + + DBG2(dev, "mdio_read -> %04x" NL, r); + err = 0; + bail: + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) + rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port); + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) + zmii_put_mdio(dev->zmii_dev, dev->zmii_port); + mutex_unlock(&dev->mdio_lock); + + return err == 0 ? r : err; +} + +static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg, + u16 val) +{ + struct emac_regs __iomem *p = dev->emacp; + u32 r = 0; + int n, err = -ETIMEDOUT; + + mutex_lock(&dev->mdio_lock); + + DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val); + + /* Enable proper MDIO port */ + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) + zmii_get_mdio(dev->zmii_dev, dev->zmii_port); + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) + rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port); + + /* Wait for management interface to be idle */ + n = 10; + while (!emac_phy_done(dev, in_be32(&p->stacr))) { + udelay(1); + if (!--n) { + DBG2(dev, " -> timeout wait idle\n"); + goto bail; + } + } + + /* Issue write command */ + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) + r = EMAC4_STACR_BASE(dev->opb_bus_freq); + else + r = EMAC_STACR_BASE(dev->opb_bus_freq); + if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT)) + r |= EMAC_STACR_OC; + if (emac_has_feature(dev, EMAC_FTR_HAS_AXON_STACR)) + r |= EMACX_STACR_STAC_WRITE; + else + r |= EMAC_STACR_STAC_WRITE; + r |= (reg & EMAC_STACR_PRA_MASK) | + ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) | + (val << EMAC_STACR_PHYD_SHIFT); + out_be32(&p->stacr, r); + + /* Wait for write to complete */ + n = 100; + while (!emac_phy_done(dev, in_be32(&p->stacr))) { + udelay(1); + if (!--n) { + DBG2(dev, " -> timeout wait complete\n"); + goto bail; + } + } + err = 0; + bail: + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) + rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port); + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) + zmii_put_mdio(dev->zmii_dev, dev->zmii_port); + mutex_unlock(&dev->mdio_lock); +} + +static int emac_mdio_read(struct net_device *ndev, int id, int reg) +{ + struct emac_instance *dev = netdev_priv(ndev); + int res; + + res = __emac_mdio_read(dev->mdio_instance ? dev->mdio_instance : dev, + (u8) id, (u8) reg); + return res; +} + +static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val) +{ + struct emac_instance *dev = netdev_priv(ndev); + + __emac_mdio_write(dev->mdio_instance ? dev->mdio_instance : dev, + (u8) id, (u8) reg, (u16) val); +} + +/* Tx lock BH */ +static void __emac_set_multicast_list(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + u32 rmr = emac_iff2rmr(dev->ndev); + + DBG(dev, "__multicast %08x" NL, rmr); + + /* I decided to relax register access rules here to avoid + * full EMAC reset. + * + * There is a real problem with EMAC4 core if we use MWSW_001 bit + * in MR1 register and do a full EMAC reset. + * One TX BD status update is delayed and, after EMAC reset, it + * never happens, resulting in TX hung (it'll be recovered by TX + * timeout handler eventually, but this is just gross). + * So we either have to do full TX reset or try to cheat here :) + * + * The only required change is to RX mode register, so I *think* all + * we need is just to stop RX channel. This seems to work on all + * tested SoCs. --ebs + * + * If we need the full reset, we might just trigger the workqueue + * and do it async... a bit nasty but should work --BenH + */ + dev->mcast_pending = 0; + emac_rx_disable(dev); + if (rmr & EMAC_RMR_MAE) + emac_hash_mc(dev); + out_be32(&p->rmr, rmr); + emac_rx_enable(dev); +} + +/* Tx lock BH */ +static void emac_set_multicast_list(struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + + DBG(dev, "multicast" NL); + + BUG_ON(!netif_running(dev->ndev)); + + if (dev->no_mcast) { + dev->mcast_pending = 1; + return; + } + __emac_set_multicast_list(dev); +} + +static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu) +{ + int rx_sync_size = emac_rx_sync_size(new_mtu); + int rx_skb_size = emac_rx_skb_size(new_mtu); + int i, ret = 0; + + mutex_lock(&dev->link_lock); + emac_netif_stop(dev); + emac_rx_disable(dev); + mal_disable_rx_channel(dev->mal, dev->mal_rx_chan); + + if (dev->rx_sg_skb) { + ++dev->estats.rx_dropped_resize; + dev_kfree_skb(dev->rx_sg_skb); + dev->rx_sg_skb = NULL; + } + + /* Make a first pass over RX ring and mark BDs ready, dropping + * non-processed packets on the way. We need this as a separate pass + * to simplify error recovery in the case of allocation failure later. + */ + for (i = 0; i < NUM_RX_BUFF; ++i) { + if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST) + ++dev->estats.rx_dropped_resize; + + dev->rx_desc[i].data_len = 0; + dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY | + (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0); + } + + /* Reallocate RX ring only if bigger skb buffers are required */ + if (rx_skb_size <= dev->rx_skb_size) + goto skip; + + /* Second pass, allocate new skbs */ + for (i = 0; i < NUM_RX_BUFF; ++i) { + struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC); + if (!skb) { + ret = -ENOMEM; + goto oom; + } + + BUG_ON(!dev->rx_skb[i]); + dev_kfree_skb(dev->rx_skb[i]); + + skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2); + dev->rx_desc[i].data_ptr = + dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size, + DMA_FROM_DEVICE) + 2; + dev->rx_skb[i] = skb; + } + skip: + /* Check if we need to change "Jumbo" bit in MR1 */ + if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) { + /* This is to prevent starting RX channel in emac_rx_enable() */ + set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags); + + dev->ndev->mtu = new_mtu; + emac_full_tx_reset(dev); + } + + mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu)); + oom: + /* Restart RX */ + clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags); + dev->rx_slot = 0; + mal_enable_rx_channel(dev->mal, dev->mal_rx_chan); + emac_rx_enable(dev); + emac_netif_start(dev); + mutex_unlock(&dev->link_lock); + + return ret; +} + +/* Process ctx, rtnl_lock semaphore */ +static int emac_change_mtu(struct net_device *ndev, int new_mtu) +{ + struct emac_instance *dev = netdev_priv(ndev); + int ret = 0; + + if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu) + return -EINVAL; + + DBG(dev, "change_mtu(%d)" NL, new_mtu); + + if (netif_running(ndev)) { + /* Check if we really need to reinitalize RX ring */ + if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu)) + ret = emac_resize_rx_ring(dev, new_mtu); + } + + if (!ret) { + ndev->mtu = new_mtu; + dev->rx_skb_size = emac_rx_skb_size(new_mtu); + dev->rx_sync_size = emac_rx_sync_size(new_mtu); + } + + return ret; +} + +static void emac_clean_tx_ring(struct emac_instance *dev) +{ + int i; + + for (i = 0; i < NUM_TX_BUFF; ++i) { + if (dev->tx_skb[i]) { + dev_kfree_skb(dev->tx_skb[i]); + dev->tx_skb[i] = NULL; + if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY) + ++dev->estats.tx_dropped; + } + dev->tx_desc[i].ctrl = 0; + dev->tx_desc[i].data_ptr = 0; + } +} + +static void emac_clean_rx_ring(struct emac_instance *dev) +{ + int i; + + for (i = 0; i < NUM_RX_BUFF; ++i) + if (dev->rx_skb[i]) { + dev->rx_desc[i].ctrl = 0; + dev_kfree_skb(dev->rx_skb[i]); + dev->rx_skb[i] = NULL; + dev->rx_desc[i].data_ptr = 0; + } + + if (dev->rx_sg_skb) { + dev_kfree_skb(dev->rx_sg_skb); + dev->rx_sg_skb = NULL; + } +} + +static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot, + gfp_t flags) +{ + struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags); + if (unlikely(!skb)) + return -ENOMEM; + + dev->rx_skb[slot] = skb; + dev->rx_desc[slot].data_len = 0; + + skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2); + dev->rx_desc[slot].data_ptr = + dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size, + DMA_FROM_DEVICE) + 2; + wmb(); + dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY | + (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0); + + return 0; +} + +static void emac_print_link_status(struct emac_instance *dev) +{ + if (netif_carrier_ok(dev->ndev)) + printk(KERN_INFO "%s: link is up, %d %s%s\n", + dev->ndev->name, dev->phy.speed, + dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX", + dev->phy.pause ? ", pause enabled" : + dev->phy.asym_pause ? ", asymmetric pause enabled" : ""); + else + printk(KERN_INFO "%s: link is down\n", dev->ndev->name); +} + +/* Process ctx, rtnl_lock semaphore */ +static int emac_open(struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + int err, i; + + DBG(dev, "open" NL); + + /* Setup error IRQ handler */ + err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev); + if (err) { + printk(KERN_ERR "%s: failed to request IRQ %d\n", + ndev->name, dev->emac_irq); + return err; + } + + /* Allocate RX ring */ + for (i = 0; i < NUM_RX_BUFF; ++i) + if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) { + printk(KERN_ERR "%s: failed to allocate RX ring\n", + ndev->name); + goto oom; + } + + dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0; + clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags); + dev->rx_sg_skb = NULL; + + mutex_lock(&dev->link_lock); + + /* XXX Start PHY polling now. Shouldn't wr do like sungem instead and + * always poll the PHY even when the iface is down ? That would allow + * things like laptop-net to work. --BenH + */ + if (dev->phy.address >= 0) { + int link_poll_interval; + if (dev->phy.def->ops->poll_link(&dev->phy)) { + dev->phy.def->ops->read_link(&dev->phy); + netif_carrier_on(dev->ndev); + link_poll_interval = PHY_POLL_LINK_ON; + } else { + netif_carrier_off(dev->ndev); + link_poll_interval = PHY_POLL_LINK_OFF; + } + dev->link_polling = 1; + wmb(); + schedule_delayed_work(&dev->link_work, link_poll_interval); + emac_print_link_status(dev); + } else + netif_carrier_on(dev->ndev); + + emac_configure(dev); + mal_poll_add(dev->mal, &dev->commac); + mal_enable_tx_channel(dev->mal, dev->mal_tx_chan); + mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu)); + mal_enable_rx_channel(dev->mal, dev->mal_rx_chan); + emac_tx_enable(dev); + emac_rx_enable(dev); + emac_netif_start(dev); + + mutex_unlock(&dev->link_lock); + + return 0; + oom: + emac_clean_rx_ring(dev); + free_irq(dev->emac_irq, dev); + + return -ENOMEM; +} + +/* BHs disabled */ +#if 0 +static int emac_link_differs(struct emac_instance *dev) +{ + u32 r = in_be32(&dev->emacp->mr1); + + int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF; + int speed, pause, asym_pause; + + if (r & EMAC_MR1_MF_1000) + speed = SPEED_1000; + else if (r & EMAC_MR1_MF_100) + speed = SPEED_100; + else + speed = SPEED_10; + + switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) { + case (EMAC_MR1_EIFC | EMAC_MR1_APP): + pause = 1; + asym_pause = 0; + break; + case EMAC_MR1_APP: + pause = 0; + asym_pause = 1; + break; + default: + pause = asym_pause = 0; + } + return speed != dev->phy.speed || duplex != dev->phy.duplex || + pause != dev->phy.pause || asym_pause != dev->phy.asym_pause; +} +#endif + +static void emac_link_timer(struct work_struct *work) +{ + struct emac_instance *dev = + container_of((struct delayed_work *)work, + struct emac_instance, link_work); + int link_poll_interval; + + mutex_lock(&dev->link_lock); + + DBG2(dev, "link timer" NL); + + if (dev->phy.def->ops->poll_link(&dev->phy)) { + if (!netif_carrier_ok(dev->ndev)) { + /* Get new link parameters */ + dev->phy.def->ops->read_link(&dev->phy); + + netif_carrier_on(dev->ndev); + emac_netif_stop(dev); + emac_full_tx_reset(dev); + emac_netif_start(dev); + emac_print_link_status(dev); + } + link_poll_interval = PHY_POLL_LINK_ON; + } else { + if (netif_carrier_ok(dev->ndev)) { + emac_reinitialize(dev); + netif_carrier_off(dev->ndev); + netif_tx_disable(dev->ndev); + emac_print_link_status(dev); + } + link_poll_interval = PHY_POLL_LINK_OFF; + } + schedule_delayed_work(&dev->link_work, link_poll_interval); + + mutex_unlock(&dev->link_lock); +} + +static void emac_force_link_update(struct emac_instance *dev) +{ + netif_carrier_off(dev->ndev); + if (dev->link_polling) { + cancel_rearming_delayed_work(&dev->link_work); + if (dev->link_polling) + schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF); + } +} + +/* Process ctx, rtnl_lock semaphore */ +static int emac_close(struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + + DBG(dev, "close" NL); + + if (dev->phy.address >= 0) + cancel_rearming_delayed_work(&dev->link_work); + + emac_netif_stop(dev); + flush_scheduled_work(); + + emac_rx_disable(dev); + emac_tx_disable(dev); + mal_disable_rx_channel(dev->mal, dev->mal_rx_chan); + mal_disable_tx_channel(dev->mal, dev->mal_tx_chan); + mal_poll_del(dev->mal, &dev->commac); + + emac_clean_tx_ring(dev); + emac_clean_rx_ring(dev); + + free_irq(dev->emac_irq, dev); + + return 0; +} + +static inline u16 emac_tx_csum(struct emac_instance *dev, + struct sk_buff *skb) +{ + if (emac_has_feature(dev, EMAC_FTR_HAS_TAH && + skb->ip_summed == CHECKSUM_PARTIAL)) { + ++dev->stats.tx_packets_csum; + return EMAC_TX_CTRL_TAH_CSUM; + } + return 0; +} + +static inline int emac_xmit_finish(struct emac_instance *dev, int len) +{ + struct emac_regs __iomem *p = dev->emacp; + struct net_device *ndev = dev->ndev; + + /* Send the packet out. If the if makes a significant perf + * difference, then we can store the TMR0 value in "dev" + * instead + */ + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) + out_be32(&p->tmr0, EMAC4_TMR0_XMIT); + else + out_be32(&p->tmr0, EMAC_TMR0_XMIT); + + if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) { + netif_stop_queue(ndev); + DBG2(dev, "stopped TX queue" NL); + } + + ndev->trans_start = jiffies; + ++dev->stats.tx_packets; + dev->stats.tx_bytes += len; + + return 0; +} + +/* Tx lock BH */ +static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + unsigned int len = skb->len; + int slot; + + u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY | + MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb); + + slot = dev->tx_slot++; + if (dev->tx_slot == NUM_TX_BUFF) { + dev->tx_slot = 0; + ctrl |= MAL_TX_CTRL_WRAP; + } + + DBG2(dev, "xmit(%u) %d" NL, len, slot); + + dev->tx_skb[slot] = skb; + dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev, + skb->data, len, + DMA_TO_DEVICE); + dev->tx_desc[slot].data_len = (u16) len; + wmb(); + dev->tx_desc[slot].ctrl = ctrl; + + return emac_xmit_finish(dev, len); +} + +#ifdef CONFIG_IBM_NEW_EMAC_TAH +static inline int emac_xmit_split(struct emac_instance *dev, int slot, + u32 pd, int len, int last, u16 base_ctrl) +{ + while (1) { + u16 ctrl = base_ctrl; + int chunk = min(len, MAL_MAX_TX_SIZE); + len -= chunk; + + slot = (slot + 1) % NUM_TX_BUFF; + + if (last && !len) + ctrl |= MAL_TX_CTRL_LAST; + if (slot == NUM_TX_BUFF - 1) + ctrl |= MAL_TX_CTRL_WRAP; + + dev->tx_skb[slot] = NULL; + dev->tx_desc[slot].data_ptr = pd; + dev->tx_desc[slot].data_len = (u16) chunk; + dev->tx_desc[slot].ctrl = ctrl; + ++dev->tx_cnt; + + if (!len) + break; + + pd += chunk; + } + return slot; +} + +/* Tx lock BH disabled (SG version for TAH equipped EMACs) */ +static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + int nr_frags = skb_shinfo(skb)->nr_frags; + int len = skb->len, chunk; + int slot, i; + u16 ctrl; + u32 pd; + + /* This is common "fast" path */ + if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE)) + return emac_start_xmit(skb, ndev); + + len -= skb->data_len; + + /* Note, this is only an *estimation*, we can still run out of empty + * slots because of the additional fragmentation into + * MAL_MAX_TX_SIZE-sized chunks + */ + if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF)) + goto stop_queue; + + ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY | + emac_tx_csum(dev, skb); + slot = dev->tx_slot; + + /* skb data */ + dev->tx_skb[slot] = NULL; + chunk = min(len, MAL_MAX_TX_SIZE); + dev->tx_desc[slot].data_ptr = pd = + dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE); + dev->tx_desc[slot].data_len = (u16) chunk; + len -= chunk; + if (unlikely(len)) + slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags, + ctrl); + /* skb fragments */ + for (i = 0; i < nr_frags; ++i) { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + len = frag->size; + + if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF)) + goto undo_frame; + + pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len, + DMA_TO_DEVICE); + + slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1, + ctrl); + } + + DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot); + + /* Attach skb to the last slot so we don't release it too early */ + dev->tx_skb[slot] = skb; + + /* Send the packet out */ + if (dev->tx_slot == NUM_TX_BUFF - 1) + ctrl |= MAL_TX_CTRL_WRAP; + wmb(); + dev->tx_desc[dev->tx_slot].ctrl = ctrl; + dev->tx_slot = (slot + 1) % NUM_TX_BUFF; + + return emac_xmit_finish(dev, skb->len); + + undo_frame: + /* Well, too bad. Our previous estimation was overly optimistic. + * Undo everything. + */ + while (slot != dev->tx_slot) { + dev->tx_desc[slot].ctrl = 0; + --dev->tx_cnt; + if (--slot < 0) + slot = NUM_TX_BUFF - 1; + } + ++dev->estats.tx_undo; + + stop_queue: + netif_stop_queue(ndev); + DBG2(dev, "stopped TX queue" NL); + return 1; +} +#else +# define emac_start_xmit_sg emac_start_xmit +#endif /* !defined(CONFIG_IBM_NEW_EMAC_TAH) */ + +/* Tx lock BHs */ +static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl) +{ + struct emac_error_stats *st = &dev->estats; + + DBG(dev, "BD TX error %04x" NL, ctrl); + + ++st->tx_bd_errors; + if (ctrl & EMAC_TX_ST_BFCS) + ++st->tx_bd_bad_fcs; + if (ctrl & EMAC_TX_ST_LCS) + ++st->tx_bd_carrier_loss; + if (ctrl & EMAC_TX_ST_ED) + ++st->tx_bd_excessive_deferral; + if (ctrl & EMAC_TX_ST_EC) + ++st->tx_bd_excessive_collisions; + if (ctrl & EMAC_TX_ST_LC) + ++st->tx_bd_late_collision; + if (ctrl & EMAC_TX_ST_MC) + ++st->tx_bd_multple_collisions; + if (ctrl & EMAC_TX_ST_SC) + ++st->tx_bd_single_collision; + if (ctrl & EMAC_TX_ST_UR) + ++st->tx_bd_underrun; + if (ctrl & EMAC_TX_ST_SQE) + ++st->tx_bd_sqe; +} + +static void emac_poll_tx(void *param) +{ + struct emac_instance *dev = param; + u32 bad_mask; + + DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot); + + if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) + bad_mask = EMAC_IS_BAD_TX_TAH; + else + bad_mask = EMAC_IS_BAD_TX; + + netif_tx_lock_bh(dev->ndev); + if (dev->tx_cnt) { + u16 ctrl; + int slot = dev->ack_slot, n = 0; + again: + ctrl = dev->tx_desc[slot].ctrl; + if (!(ctrl & MAL_TX_CTRL_READY)) { + struct sk_buff *skb = dev->tx_skb[slot]; + ++n; + + if (skb) { + dev_kfree_skb(skb); + dev->tx_skb[slot] = NULL; + } + slot = (slot + 1) % NUM_TX_BUFF; + + if (unlikely(ctrl & bad_mask)) + emac_parse_tx_error(dev, ctrl); + + if (--dev->tx_cnt) + goto again; + } + if (n) { + dev->ack_slot = slot; + if (netif_queue_stopped(dev->ndev) && + dev->tx_cnt < EMAC_TX_WAKEUP_THRESH) + netif_wake_queue(dev->ndev); + + DBG2(dev, "tx %d pkts" NL, n); + } + } + netif_tx_unlock_bh(dev->ndev); +} + +static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot, + int len) +{ + struct sk_buff *skb = dev->rx_skb[slot]; + + DBG2(dev, "recycle %d %d" NL, slot, len); + + if (len) + dma_map_single(&dev->ofdev->dev, skb->data - 2, + EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE); + + dev->rx_desc[slot].data_len = 0; + wmb(); + dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY | + (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0); +} + +static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl) +{ + struct emac_error_stats *st = &dev->estats; + + DBG(dev, "BD RX error %04x" NL, ctrl); + + ++st->rx_bd_errors; + if (ctrl & EMAC_RX_ST_OE) + ++st->rx_bd_overrun; + if (ctrl & EMAC_RX_ST_BP) + ++st->rx_bd_bad_packet; + if (ctrl & EMAC_RX_ST_RP) + ++st->rx_bd_runt_packet; + if (ctrl & EMAC_RX_ST_SE) + ++st->rx_bd_short_event; + if (ctrl & EMAC_RX_ST_AE) + ++st->rx_bd_alignment_error; + if (ctrl & EMAC_RX_ST_BFCS) + ++st->rx_bd_bad_fcs; + if (ctrl & EMAC_RX_ST_PTL) + ++st->rx_bd_packet_too_long; + if (ctrl & EMAC_RX_ST_ORE) + ++st->rx_bd_out_of_range; + if (ctrl & EMAC_RX_ST_IRE) + ++st->rx_bd_in_range; +} + +static inline void emac_rx_csum(struct emac_instance *dev, + struct sk_buff *skb, u16 ctrl) +{ +#ifdef CONFIG_IBM_NEW_EMAC_TAH + if (!ctrl && dev->tah_dev) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + ++dev->stats.rx_packets_csum; + } +#endif +} + +static inline int emac_rx_sg_append(struct emac_instance *dev, int slot) +{ + if (likely(dev->rx_sg_skb != NULL)) { + int len = dev->rx_desc[slot].data_len; + int tot_len = dev->rx_sg_skb->len + len; + + if (unlikely(tot_len + 2 > dev->rx_skb_size)) { + ++dev->estats.rx_dropped_mtu; + dev_kfree_skb(dev->rx_sg_skb); + dev->rx_sg_skb = NULL; + } else { + cacheable_memcpy(dev->rx_sg_skb->tail, + dev->rx_skb[slot]->data, len); + skb_put(dev->rx_sg_skb, len); + emac_recycle_rx_skb(dev, slot, len); + return 0; + } + } + emac_recycle_rx_skb(dev, slot, 0); + return -1; +} + +/* NAPI poll context */ +static int emac_poll_rx(void *param, int budget) +{ + struct emac_instance *dev = param; + int slot = dev->rx_slot, received = 0; + + DBG2(dev, "poll_rx(%d)" NL, budget); + + again: + while (budget > 0) { + int len; + struct sk_buff *skb; + u16 ctrl = dev->rx_desc[slot].ctrl; + + if (ctrl & MAL_RX_CTRL_EMPTY) + break; + + skb = dev->rx_skb[slot]; + mb(); + len = dev->rx_desc[slot].data_len; + + if (unlikely(!MAL_IS_SINGLE_RX(ctrl))) + goto sg; + + ctrl &= EMAC_BAD_RX_MASK; + if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) { + emac_parse_rx_error(dev, ctrl); + ++dev->estats.rx_dropped_error; + emac_recycle_rx_skb(dev, slot, 0); + len = 0; + goto next; + } + + if (len && len < EMAC_RX_COPY_THRESH) { + struct sk_buff *copy_skb = + alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC); + if (unlikely(!copy_skb)) + goto oom; + + skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2); + cacheable_memcpy(copy_skb->data - 2, skb->data - 2, + len + 2); + emac_recycle_rx_skb(dev, slot, len); + skb = copy_skb; + } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) + goto oom; + + skb_put(skb, len); + push_packet: + skb->dev = dev->ndev; + skb->protocol = eth_type_trans(skb, dev->ndev); + emac_rx_csum(dev, skb, ctrl); + + if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) + ++dev->estats.rx_dropped_stack; + next: + ++dev->stats.rx_packets; + skip: + dev->stats.rx_bytes += len; + slot = (slot + 1) % NUM_RX_BUFF; + --budget; + ++received; + continue; + sg: + if (ctrl & MAL_RX_CTRL_FIRST) { + BUG_ON(dev->rx_sg_skb); + if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) { + DBG(dev, "rx OOM %d" NL, slot); + ++dev->estats.rx_dropped_oom; + emac_recycle_rx_skb(dev, slot, 0); + } else { + dev->rx_sg_skb = skb; + skb_put(skb, len); + } + } else if (!emac_rx_sg_append(dev, slot) && + (ctrl & MAL_RX_CTRL_LAST)) { + + skb = dev->rx_sg_skb; + dev->rx_sg_skb = NULL; + + ctrl &= EMAC_BAD_RX_MASK; + if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) { + emac_parse_rx_error(dev, ctrl); + ++dev->estats.rx_dropped_error; + dev_kfree_skb(skb); + len = 0; + } else + goto push_packet; + } + goto skip; + oom: + DBG(dev, "rx OOM %d" NL, slot); + /* Drop the packet and recycle skb */ + ++dev->estats.rx_dropped_oom; + emac_recycle_rx_skb(dev, slot, 0); + goto next; + } + + if (received) { + DBG2(dev, "rx %d BDs" NL, received); + dev->rx_slot = slot; + } + + if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) { + mb(); + if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) { + DBG2(dev, "rx restart" NL); + received = 0; + goto again; + } + + if (dev->rx_sg_skb) { + DBG2(dev, "dropping partial rx packet" NL); + ++dev->estats.rx_dropped_error; + dev_kfree_skb(dev->rx_sg_skb); + dev->rx_sg_skb = NULL; + } + + clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags); + mal_enable_rx_channel(dev->mal, dev->mal_rx_chan); + emac_rx_enable(dev); + dev->rx_slot = 0; + } + return received; +} + +/* NAPI poll context */ +static int emac_peek_rx(void *param) +{ + struct emac_instance *dev = param; + + return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY); +} + +/* NAPI poll context */ +static int emac_peek_rx_sg(void *param) +{ + struct emac_instance *dev = param; + + int slot = dev->rx_slot; + while (1) { + u16 ctrl = dev->rx_desc[slot].ctrl; + if (ctrl & MAL_RX_CTRL_EMPTY) + return 0; + else if (ctrl & MAL_RX_CTRL_LAST) + return 1; + + slot = (slot + 1) % NUM_RX_BUFF; + + /* I'm just being paranoid here :) */ + if (unlikely(slot == dev->rx_slot)) + return 0; + } +} + +/* Hard IRQ */ +static void emac_rxde(void *param) +{ + struct emac_instance *dev = param; + + ++dev->estats.rx_stopped; + emac_rx_disable_async(dev); +} + +/* Hard IRQ */ +static irqreturn_t emac_irq(int irq, void *dev_instance) +{ + struct emac_instance *dev = dev_instance; + struct emac_regs __iomem *p = dev->emacp; + struct emac_error_stats *st = &dev->estats; + u32 isr; + + spin_lock(&dev->lock); + + isr = in_be32(&p->isr); + out_be32(&p->isr, isr); + + DBG(dev, "isr = %08x" NL, isr); + + if (isr & EMAC4_ISR_TXPE) + ++st->tx_parity; + if (isr & EMAC4_ISR_RXPE) + ++st->rx_parity; + if (isr & EMAC4_ISR_TXUE) + ++st->tx_underrun; + if (isr & EMAC4_ISR_RXOE) + ++st->rx_fifo_overrun; + if (isr & EMAC_ISR_OVR) + ++st->rx_overrun; + if (isr & EMAC_ISR_BP) + ++st->rx_bad_packet; + if (isr & EMAC_ISR_RP) + ++st->rx_runt_packet; + if (isr & EMAC_ISR_SE) + ++st->rx_short_event; + if (isr & EMAC_ISR_ALE) + ++st->rx_alignment_error; + if (isr & EMAC_ISR_BFCS) + ++st->rx_bad_fcs; + if (isr & EMAC_ISR_PTLE) + ++st->rx_packet_too_long; + if (isr & EMAC_ISR_ORE) + ++st->rx_out_of_range; + if (isr & EMAC_ISR_IRE) + ++st->rx_in_range; + if (isr & EMAC_ISR_SQE) + ++st->tx_sqe; + if (isr & EMAC_ISR_TE) + ++st->tx_errors; + + spin_unlock(&dev->lock); + + return IRQ_HANDLED; +} + +static struct net_device_stats *emac_stats(struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + struct emac_stats *st = &dev->stats; + struct emac_error_stats *est = &dev->estats; + struct net_device_stats *nst = &dev->nstats; + unsigned long flags; + + DBG2(dev, "stats" NL); + + /* Compute "legacy" statistics */ + spin_lock_irqsave(&dev->lock, flags); + nst->rx_packets = (unsigned long)st->rx_packets; + nst->rx_bytes = (unsigned long)st->rx_bytes; + nst->tx_packets = (unsigned long)st->tx_packets; + nst->tx_bytes = (unsigned long)st->tx_bytes; + nst->rx_dropped = (unsigned long)(est->rx_dropped_oom + + est->rx_dropped_error + + est->rx_dropped_resize + + est->rx_dropped_mtu); + nst->tx_dropped = (unsigned long)est->tx_dropped; + + nst->rx_errors = (unsigned long)est->rx_bd_errors; + nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun + + est->rx_fifo_overrun + + est->rx_overrun); + nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error + + est->rx_alignment_error); + nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs + + est->rx_bad_fcs); + nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet + + est->rx_bd_short_event + + est->rx_bd_packet_too_long + + est->rx_bd_out_of_range + + est->rx_bd_in_range + + est->rx_runt_packet + + est->rx_short_event + + est->rx_packet_too_long + + est->rx_out_of_range + + est->rx_in_range); + + nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors); + nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun + + est->tx_underrun); + nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss; + nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral + + est->tx_bd_excessive_collisions + + est->tx_bd_late_collision + + est->tx_bd_multple_collisions); + spin_unlock_irqrestore(&dev->lock, flags); + return nst; +} + +static struct mal_commac_ops emac_commac_ops = { + .poll_tx = &emac_poll_tx, + .poll_rx = &emac_poll_rx, + .peek_rx = &emac_peek_rx, + .rxde = &emac_rxde, +}; + +static struct mal_commac_ops emac_commac_sg_ops = { + .poll_tx = &emac_poll_tx, + .poll_rx = &emac_poll_rx, + .peek_rx = &emac_peek_rx_sg, + .rxde = &emac_rxde, +}; + +/* Ethtool support */ +static int emac_ethtool_get_settings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct emac_instance *dev = netdev_priv(ndev); + + cmd->supported = dev->phy.features; + cmd->port = PORT_MII; + cmd->phy_address = dev->phy.address; + cmd->transceiver = + dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL; + + mutex_lock(&dev->link_lock); + cmd->advertising = dev->phy.advertising; + cmd->autoneg = dev->phy.autoneg; + cmd->speed = dev->phy.speed; + cmd->duplex = dev->phy.duplex; + mutex_unlock(&dev->link_lock); + + return 0; +} + +static int emac_ethtool_set_settings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct emac_instance *dev = netdev_priv(ndev); + u32 f = dev->phy.features; + + DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL, + cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising); + + /* Basic sanity checks */ + if (dev->phy.address < 0) + return -EOPNOTSUPP; + if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE) + return -EINVAL; + if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0) + return -EINVAL; + if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) + return -EINVAL; + + if (cmd->autoneg == AUTONEG_DISABLE) { + switch (cmd->speed) { + case SPEED_10: + if (cmd->duplex == DUPLEX_HALF + && !(f & SUPPORTED_10baseT_Half)) + return -EINVAL; + if (cmd->duplex == DUPLEX_FULL + && !(f & SUPPORTED_10baseT_Full)) + return -EINVAL; + break; + case SPEED_100: + if (cmd->duplex == DUPLEX_HALF + && !(f & SUPPORTED_100baseT_Half)) + return -EINVAL; + if (cmd->duplex == DUPLEX_FULL + && !(f & SUPPORTED_100baseT_Full)) + return -EINVAL; + break; + case SPEED_1000: + if (cmd->duplex == DUPLEX_HALF + && !(f & SUPPORTED_1000baseT_Half)) + return -EINVAL; + if (cmd->duplex == DUPLEX_FULL + && !(f & SUPPORTED_1000baseT_Full)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + mutex_lock(&dev->link_lock); + dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed, + cmd->duplex); + mutex_unlock(&dev->link_lock); + + } else { + if (!(f & SUPPORTED_Autoneg)) + return -EINVAL; + + mutex_lock(&dev->link_lock); + dev->phy.def->ops->setup_aneg(&dev->phy, + (cmd->advertising & f) | + (dev->phy.advertising & + (ADVERTISED_Pause | + ADVERTISED_Asym_Pause))); + mutex_unlock(&dev->link_lock); + } + emac_force_link_update(dev); + + return 0; +} + +static void emac_ethtool_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *rp) +{ + rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF; + rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF; +} + +static void emac_ethtool_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pp) +{ + struct emac_instance *dev = netdev_priv(ndev); + + mutex_lock(&dev->link_lock); + if ((dev->phy.features & SUPPORTED_Autoneg) && + (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause))) + pp->autoneg = 1; + + if (dev->phy.duplex == DUPLEX_FULL) { + if (dev->phy.pause) + pp->rx_pause = pp->tx_pause = 1; + else if (dev->phy.asym_pause) + pp->tx_pause = 1; + } + mutex_unlock(&dev->link_lock); +} + +static u32 emac_ethtool_get_rx_csum(struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + + return dev->tah_dev != 0; +} + +static int emac_get_regs_len(struct emac_instance *dev) +{ + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) + return sizeof(struct emac_ethtool_regs_subhdr) + + EMAC4_ETHTOOL_REGS_SIZE; + else + return sizeof(struct emac_ethtool_regs_subhdr) + + EMAC_ETHTOOL_REGS_SIZE; +} + +static int emac_ethtool_get_regs_len(struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + int size; + + size = sizeof(struct emac_ethtool_regs_hdr) + + emac_get_regs_len(dev) + mal_get_regs_len(dev->mal); + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) + size += zmii_get_regs_len(dev->zmii_dev); + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) + size += rgmii_get_regs_len(dev->rgmii_dev); + if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) + size += tah_get_regs_len(dev->tah_dev); + + return size; +} + +static void *emac_dump_regs(struct emac_instance *dev, void *buf) +{ + struct emac_ethtool_regs_subhdr *hdr = buf; + + hdr->index = dev->cell_index; + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) { + hdr->version = EMAC4_ETHTOOL_REGS_VER; + memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE); + return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE); + } else { + hdr->version = EMAC_ETHTOOL_REGS_VER; + memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE); + return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE); + } +} + +static void emac_ethtool_get_regs(struct net_device *ndev, + struct ethtool_regs *regs, void *buf) +{ + struct emac_instance *dev = netdev_priv(ndev); + struct emac_ethtool_regs_hdr *hdr = buf; + + hdr->components = 0; + buf = hdr + 1; + + buf = mal_dump_regs(dev->mal, buf); + buf = emac_dump_regs(dev, buf); + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) { + hdr->components |= EMAC_ETHTOOL_REGS_ZMII; + buf = zmii_dump_regs(dev->zmii_dev, buf); + } + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) { + hdr->components |= EMAC_ETHTOOL_REGS_RGMII; + buf = rgmii_dump_regs(dev->rgmii_dev, buf); + } + if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) { + hdr->components |= EMAC_ETHTOOL_REGS_TAH; + buf = tah_dump_regs(dev->tah_dev, buf); + } +} + +static int emac_ethtool_nway_reset(struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + int res = 0; + + DBG(dev, "nway_reset" NL); + + if (dev->phy.address < 0) + return -EOPNOTSUPP; + + mutex_lock(&dev->link_lock); + if (!dev->phy.autoneg) { + res = -EINVAL; + goto out; + } + + dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising); + out: + mutex_unlock(&dev->link_lock); + emac_force_link_update(dev); + return res; +} + +static int emac_ethtool_get_stats_count(struct net_device *ndev) +{ + return EMAC_ETHTOOL_STATS_COUNT; +} + +static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset, + u8 * buf) +{ + if (stringset == ETH_SS_STATS) + memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys)); +} + +static void emac_ethtool_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *estats, + u64 * tmp_stats) +{ + struct emac_instance *dev = netdev_priv(ndev); + + memcpy(tmp_stats, &dev->stats, sizeof(dev->stats)); + tmp_stats += sizeof(dev->stats) / sizeof(u64); + memcpy(tmp_stats, &dev->estats, sizeof(dev->estats)); +} + +static void emac_ethtool_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + struct emac_instance *dev = netdev_priv(ndev); + + strcpy(info->driver, "ibm_emac"); + strcpy(info->version, DRV_VERSION); + info->fw_version[0] = '\0'; + sprintf(info->bus_info, "PPC 4xx EMAC-%d %s", + dev->cell_index, dev->ofdev->node->full_name); + info->n_stats = emac_ethtool_get_stats_count(ndev); + info->regdump_len = emac_ethtool_get_regs_len(ndev); +} + +static const struct ethtool_ops emac_ethtool_ops = { + .get_settings = emac_ethtool_get_settings, + .set_settings = emac_ethtool_set_settings, + .get_drvinfo = emac_ethtool_get_drvinfo, + + .get_regs_len = emac_ethtool_get_regs_len, + .get_regs = emac_ethtool_get_regs, + + .nway_reset = emac_ethtool_nway_reset, + + .get_ringparam = emac_ethtool_get_ringparam, + .get_pauseparam = emac_ethtool_get_pauseparam, + + .get_rx_csum = emac_ethtool_get_rx_csum, + + .get_strings = emac_ethtool_get_strings, + .get_stats_count = emac_ethtool_get_stats_count, + .get_ethtool_stats = emac_ethtool_get_ethtool_stats, + + .get_link = ethtool_op_get_link, + .get_tx_csum = ethtool_op_get_tx_csum, + .get_sg = ethtool_op_get_sg, +}; + +static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) +{ + struct emac_instance *dev = netdev_priv(ndev); + uint16_t *data = (uint16_t *) & rq->ifr_ifru; + + DBG(dev, "ioctl %08x" NL, cmd); + + if (dev->phy.address < 0) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + case SIOCDEVPRIVATE: + data[0] = dev->phy.address; + /* Fall through */ + case SIOCGMIIREG: + case SIOCDEVPRIVATE + 1: + data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]); + return 0; + + case SIOCSMIIREG: + case SIOCDEVPRIVATE + 2: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + emac_mdio_write(ndev, dev->phy.address, data[1], data[2]); + return 0; + default: + return -EOPNOTSUPP; + } +} + +struct emac_depentry { + u32 phandle; + struct device_node *node; + struct of_device *ofdev; + void *drvdata; +}; + +#define EMAC_DEP_MAL_IDX 0 +#define EMAC_DEP_ZMII_IDX 1 +#define EMAC_DEP_RGMII_IDX 2 +#define EMAC_DEP_TAH_IDX 3 +#define EMAC_DEP_MDIO_IDX 4 +#define EMAC_DEP_PREV_IDX 5 +#define EMAC_DEP_COUNT 6 + +static int __devinit emac_check_deps(struct emac_instance *dev, + struct emac_depentry *deps) +{ + int i, there = 0; + struct device_node *np; + + for (i = 0; i < EMAC_DEP_COUNT; i++) { + /* no dependency on that item, allright */ + if (deps[i].phandle == 0) { + there++; + continue; + } + /* special case for blist as the dependency might go away */ + if (i == EMAC_DEP_PREV_IDX) { + np = *(dev->blist - 1); + if (np == NULL) { + deps[i].phandle = 0; + there++; + continue; + } + if (deps[i].node == NULL) + deps[i].node = of_node_get(np); + } + if (deps[i].node == NULL) + deps[i].node = of_find_node_by_phandle(deps[i].phandle); + if (deps[i].node == NULL) + continue; + if (deps[i].ofdev == NULL) + deps[i].ofdev = of_find_device_by_node(deps[i].node); + if (deps[i].ofdev == NULL) + continue; + if (deps[i].drvdata == NULL) + deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev); + if (deps[i].drvdata != NULL) + there++; + } + return (there == EMAC_DEP_COUNT); +} + +static void emac_put_deps(struct emac_instance *dev) +{ + if (dev->mal_dev) + of_dev_put(dev->mal_dev); + if (dev->zmii_dev) + of_dev_put(dev->zmii_dev); + if (dev->rgmii_dev) + of_dev_put(dev->rgmii_dev); + if (dev->mdio_dev) + of_dev_put(dev->mdio_dev); + if (dev->tah_dev) + of_dev_put(dev->tah_dev); +} + +static int __devinit emac_of_bus_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + /* We are only intereted in device addition */ + if (action == BUS_NOTIFY_BOUND_DRIVER) + wake_up_all(&emac_probe_wait); + return 0; +} + +static struct notifier_block emac_of_bus_notifier = { + .notifier_call = emac_of_bus_notify +}; + +static int __devinit emac_wait_deps(struct emac_instance *dev) +{ + struct emac_depentry deps[EMAC_DEP_COUNT]; + int i, err; + + memset(&deps, 0, sizeof(deps)); + + deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph; + deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph; + deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph; + if (dev->tah_ph) + deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph; + if (dev->mdio_ph) + deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph; + if (dev->blist && dev->blist > emac_boot_list) + deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu; + bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier); + wait_event_timeout(emac_probe_wait, + emac_check_deps(dev, deps), + EMAC_PROBE_DEP_TIMEOUT); + bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier); + err = emac_check_deps(dev, deps) ? 0 : -ENODEV; + for (i = 0; i < EMAC_DEP_COUNT; i++) { + if (deps[i].node) + of_node_put(deps[i].node); + if (err && deps[i].ofdev) + of_dev_put(deps[i].ofdev); + } + if (err == 0) { + dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev; + dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev; + dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev; + dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev; + dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev; + } + if (deps[EMAC_DEP_PREV_IDX].ofdev) + of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev); + return err; +} + +static int __devinit emac_read_uint_prop(struct device_node *np, const char *name, + u32 *val, int fatal) +{ + int len; + const u32 *prop = of_get_property(np, name, &len); + if (prop == NULL || len < sizeof(u32)) { + if (fatal) + printk(KERN_ERR "%s: missing %s property\n", + np->full_name, name); + return -ENODEV; + } + *val = *prop; + return 0; +} + +static int __devinit emac_init_phy(struct emac_instance *dev) +{ + struct device_node *np = dev->ofdev->node; + struct net_device *ndev = dev->ndev; + u32 phy_map, adv; + int i; + + dev->phy.dev = ndev; + dev->phy.mode = dev->phy_mode; + + /* PHY-less configuration. + * XXX I probably should move these settings to the dev tree + */ + if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) { + emac_reset(dev); + + /* PHY-less configuration. + * XXX I probably should move these settings to the dev tree + */ + dev->phy.address = -1; + dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII; + dev->phy.pause = 1; + + return 0; + } + + mutex_lock(&emac_phy_map_lock); + phy_map = dev->phy_map | busy_phy_map; + + DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map); + + dev->phy.mdio_read = emac_mdio_read; + dev->phy.mdio_write = emac_mdio_write; + + /* Configure EMAC with defaults so we can at least use MDIO + * This is needed mostly for 440GX + */ + if (emac_phy_gpcs(dev->phy.mode)) { + /* XXX + * Make GPCS PHY address equal to EMAC index. + * We probably should take into account busy_phy_map + * and/or phy_map here. + * + * Note that the busy_phy_map is currently global + * while it should probably be per-ASIC... + */ + dev->phy.address = dev->cell_index; + } + + emac_configure(dev); + + if (dev->phy_address != 0xffffffff) + phy_map = ~(1 << dev->phy_address); + + for (i = 0; i < 0x20; phy_map >>= 1, ++i) + if (!(phy_map & 1)) { + int r; + busy_phy_map |= 1 << i; + + /* Quick check if there is a PHY at the address */ + r = emac_mdio_read(dev->ndev, i, MII_BMCR); + if (r == 0xffff || r < 0) + continue; + if (!emac_mii_phy_probe(&dev->phy, i)) + break; + } + mutex_unlock(&emac_phy_map_lock); + if (i == 0x20) { + printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name); + return -ENXIO; + } + + /* Init PHY */ + if (dev->phy.def->ops->init) + dev->phy.def->ops->init(&dev->phy); + + /* Disable any PHY features not supported by the platform */ + dev->phy.def->features &= ~dev->phy_feat_exc; + + /* Setup initial link parameters */ + if (dev->phy.features & SUPPORTED_Autoneg) { + adv = dev->phy.features; + if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x)) + adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; + /* Restart autonegotiation */ + dev->phy.def->ops->setup_aneg(&dev->phy, adv); + } else { + u32 f = dev->phy.def->features; + int speed = SPEED_10, fd = DUPLEX_HALF; + + /* Select highest supported speed/duplex */ + if (f & SUPPORTED_1000baseT_Full) { + speed = SPEED_1000; + fd = DUPLEX_FULL; + } else if (f & SUPPORTED_1000baseT_Half) + speed = SPEED_1000; + else if (f & SUPPORTED_100baseT_Full) { + speed = SPEED_100; + fd = DUPLEX_FULL; + } else if (f & SUPPORTED_100baseT_Half) + speed = SPEED_100; + else if (f & SUPPORTED_10baseT_Full) + fd = DUPLEX_FULL; + + /* Force link parameters */ + dev->phy.def->ops->setup_forced(&dev->phy, speed, fd); + } + return 0; +} + +static int __devinit emac_init_config(struct emac_instance *dev) +{ + struct device_node *np = dev->ofdev->node; + const void *p; + unsigned int plen; + const char *pm, *phy_modes[] = { + [PHY_MODE_NA] = "", + [PHY_MODE_MII] = "mii", + [PHY_MODE_RMII] = "rmii", + [PHY_MODE_SMII] = "smii", + [PHY_MODE_RGMII] = "rgmii", + [PHY_MODE_TBI] = "tbi", + [PHY_MODE_GMII] = "gmii", + [PHY_MODE_RTBI] = "rtbi", + [PHY_MODE_SGMII] = "sgmii", + }; + + /* Read config from device-tree */ + if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1)) + return -ENXIO; + if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1)) + return -ENXIO; + if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1)) + return -ENXIO; + if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1)) + return -ENXIO; + if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0)) + dev->max_mtu = 1500; + if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0)) + dev->rx_fifo_size = 2048; + if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0)) + dev->tx_fifo_size = 2048; + if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0)) + dev->rx_fifo_size_gige = dev->rx_fifo_size; + if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0)) + dev->tx_fifo_size_gige = dev->tx_fifo_size; + if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0)) + dev->phy_address = 0xffffffff; + if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0)) + dev->phy_map = 0xffffffff; + if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1)) + return -ENXIO; + if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0)) + dev->tah_ph = 0; + if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0)) + dev->tah_ph = 0; + if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0)) + dev->mdio_ph = 0; + if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0)) + dev->zmii_ph = 0;; + if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0)) + dev->zmii_port = 0xffffffff;; + if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0)) + dev->rgmii_ph = 0;; + if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0)) + dev->rgmii_port = 0xffffffff;; + if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0)) + dev->fifo_entry_size = 16; + if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0)) + dev->mal_burst_size = 256; + + /* PHY mode needs some decoding */ + dev->phy_mode = PHY_MODE_NA; + pm = of_get_property(np, "phy-mode", &plen); + if (pm != NULL) { + int i; + for (i = 0; i < ARRAY_SIZE(phy_modes); i++) + if (!strcasecmp(pm, phy_modes[i])) { + dev->phy_mode = i; + break; + } + } + + /* Backward compat with non-final DT */ + if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) { + u32 nmode = *(const u32 *)pm; + if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII) + dev->phy_mode = nmode; + } + + /* Check EMAC version */ + if (of_device_is_compatible(np, "ibm,emac4")) + dev->features |= EMAC_FTR_EMAC4; + if (of_device_is_compatible(np, "ibm,emac-axon") + || of_device_is_compatible(np, "ibm,emac-440epx")) + dev->features |= EMAC_FTR_HAS_AXON_STACR + | EMAC_FTR_STACR_OC_INVERT; + if (of_device_is_compatible(np, "ibm,emac-440spe")) + dev->features |= EMAC_FTR_STACR_OC_INVERT; + + /* Fixup some feature bits based on the device tree and verify + * we have support for them compiled in + */ + if (dev->tah_ph != 0) { +#ifdef CONFIG_IBM_NEW_EMAC_TAH + dev->features |= EMAC_FTR_HAS_TAH; +#else + printk(KERN_ERR "%s: TAH support not enabled !\n", + np->full_name); + return -ENXIO; +#endif + } + + if (dev->zmii_ph != 0) { +#ifdef CONFIG_IBM_NEW_EMAC_ZMII + dev->features |= EMAC_FTR_HAS_ZMII; +#else + printk(KERN_ERR "%s: ZMII support not enabled !\n", + np->full_name); + return -ENXIO; +#endif + } + + if (dev->rgmii_ph != 0) { +#ifdef CONFIG_IBM_NEW_EMAC_RGMII + dev->features |= EMAC_FTR_HAS_RGMII; +#else + printk(KERN_ERR "%s: RGMII support not enabled !\n", + np->full_name); + return -ENXIO; +#endif + } + + /* Read MAC-address */ + p = of_get_property(np, "local-mac-address", NULL); + if (p == NULL) { + printk(KERN_ERR "%s: Can't find local-mac-address property\n", + np->full_name); + return -ENXIO; + } + memcpy(dev->ndev->dev_addr, p, 6); + + DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE); + DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige); + DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige); + DBG(dev, "max_mtu : %d\n", dev->max_mtu); + DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq); + + return 0; +} + +static int __devinit emac_probe(struct of_device *ofdev, + const struct of_device_id *match) +{ + struct net_device *ndev; + struct emac_instance *dev; + struct device_node *np = ofdev->node; + struct device_node **blist = NULL; + int err, i; + + /* Find ourselves in the bootlist if we are there */ + for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++) + if (emac_boot_list[i] == np) + blist = &emac_boot_list[i]; + + /* Allocate our net_device structure */ + err = -ENOMEM; + ndev = alloc_etherdev(sizeof(struct emac_instance)); + if (!ndev) { + printk(KERN_ERR "%s: could not allocate ethernet device!\n", + np->full_name); + goto err_gone; + } + dev = netdev_priv(ndev); + dev->ndev = ndev; + dev->ofdev = ofdev; + dev->blist = blist; + SET_NETDEV_DEV(ndev, &ofdev->dev); + + /* Initialize some embedded data structures */ + mutex_init(&dev->mdio_lock); + mutex_init(&dev->link_lock); + spin_lock_init(&dev->lock); + INIT_WORK(&dev->reset_work, emac_reset_work); + + /* Init various config data based on device-tree */ + err = emac_init_config(dev); + if (err != 0) + goto err_free; + + /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */ + dev->emac_irq = irq_of_parse_and_map(np, 0); + dev->wol_irq = irq_of_parse_and_map(np, 1); + if (dev->emac_irq == NO_IRQ) { + printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name); + goto err_free; + } + ndev->irq = dev->emac_irq; + + /* Map EMAC regs */ + if (of_address_to_resource(np, 0, &dev->rsrc_regs)) { + printk(KERN_ERR "%s: Can't get registers address\n", + np->full_name); + goto err_irq_unmap; + } + // TODO : request_mem_region + dev->emacp = ioremap(dev->rsrc_regs.start, sizeof(struct emac_regs)); + if (dev->emacp == NULL) { + printk(KERN_ERR "%s: Can't map device registers!\n", + np->full_name); + err = -ENOMEM; + goto err_irq_unmap; + } + + /* Wait for dependent devices */ + err = emac_wait_deps(dev); + if (err) { + printk(KERN_ERR + "%s: Timeout waiting for dependent devices\n", + np->full_name); + /* display more info about what's missing ? */ + goto err_reg_unmap; + } + dev->mal = dev_get_drvdata(&dev->mal_dev->dev); + if (dev->mdio_dev != NULL) + dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev); + + /* Register with MAL */ + dev->commac.ops = &emac_commac_ops; + dev->commac.dev = dev; + dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan); + dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan); + err = mal_register_commac(dev->mal, &dev->commac); + if (err) { + printk(KERN_ERR "%s: failed to register with mal %s!\n", + np->full_name, dev->mal_dev->node->full_name); + goto err_rel_deps; + } + dev->rx_skb_size = emac_rx_skb_size(ndev->mtu); + dev->rx_sync_size = emac_rx_sync_size(ndev->mtu); + + /* Get pointers to BD rings */ + dev->tx_desc = + dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan); + dev->rx_desc = + dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan); + + DBG(dev, "tx_desc %p" NL, dev->tx_desc); + DBG(dev, "rx_desc %p" NL, dev->rx_desc); + + /* Clean rings */ + memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor)); + memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor)); + + /* Attach to ZMII, if needed */ + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) && + (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0) + goto err_unreg_commac; + + /* Attach to RGMII, if needed */ + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) && + (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0) + goto err_detach_zmii; + + /* Attach to TAH, if needed */ + if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) && + (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0) + goto err_detach_rgmii; + + /* Set some link defaults before we can find out real parameters */ + dev->phy.speed = SPEED_100; + dev->phy.duplex = DUPLEX_FULL; + dev->phy.autoneg = AUTONEG_DISABLE; + dev->phy.pause = dev->phy.asym_pause = 0; + dev->stop_timeout = STOP_TIMEOUT_100; + INIT_DELAYED_WORK(&dev->link_work, emac_link_timer); + + /* Find PHY if any */ + err = emac_init_phy(dev); + if (err != 0) + goto err_detach_tah; + + /* Fill in the driver function table */ + ndev->open = &emac_open; +#ifdef CONFIG_IBM_NEW_EMAC_TAH + if (dev->tah_dev) { + ndev->hard_start_xmit = &emac_start_xmit_sg; + ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; + } else +#endif + ndev->hard_start_xmit = &emac_start_xmit; + ndev->tx_timeout = &emac_tx_timeout; + ndev->watchdog_timeo = 5 * HZ; + ndev->stop = &emac_close; + ndev->get_stats = &emac_stats; + ndev->set_multicast_list = &emac_set_multicast_list; + ndev->do_ioctl = &emac_ioctl; + if (emac_phy_supports_gige(dev->phy_mode)) { + ndev->change_mtu = &emac_change_mtu; + dev->commac.ops = &emac_commac_sg_ops; + } + SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops); + + netif_carrier_off(ndev); + netif_stop_queue(ndev); + + err = register_netdev(ndev); + if (err) { + printk(KERN_ERR "%s: failed to register net device (%d)!\n", + np->full_name, err); + goto err_detach_tah; + } + + /* Set our drvdata last as we don't want them visible until we are + * fully initialized + */ + wmb(); + dev_set_drvdata(&ofdev->dev, dev); + + /* There's a new kid in town ! Let's tell everybody */ + wake_up_all(&emac_probe_wait); + + + printk(KERN_INFO + "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n", + ndev->name, dev->cell_index, np->full_name, + ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2], + ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]); + + if (dev->phy.address >= 0) + printk("%s: found %s PHY (0x%02x)\n", ndev->name, + dev->phy.def->name, dev->phy.address); + + emac_dbg_register(dev); + + /* Life is good */ + return 0; + + /* I have a bad feeling about this ... */ + + err_detach_tah: + if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) + tah_detach(dev->tah_dev, dev->tah_port); + err_detach_rgmii: + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) + rgmii_detach(dev->rgmii_dev, dev->rgmii_port); + err_detach_zmii: + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) + zmii_detach(dev->zmii_dev, dev->zmii_port); + err_unreg_commac: + mal_unregister_commac(dev->mal, &dev->commac); + err_rel_deps: + emac_put_deps(dev); + err_reg_unmap: + iounmap(dev->emacp); + err_irq_unmap: + if (dev->wol_irq != NO_IRQ) + irq_dispose_mapping(dev->wol_irq); + if (dev->emac_irq != NO_IRQ) + irq_dispose_mapping(dev->emac_irq); + err_free: + kfree(ndev); + err_gone: + /* if we were on the bootlist, remove us as we won't show up and + * wake up all waiters to notify them in case they were waiting + * on us + */ + if (blist) { + *blist = NULL; + wake_up_all(&emac_probe_wait); + } + return err; +} + +static int __devexit emac_remove(struct of_device *ofdev) +{ + struct emac_instance *dev = dev_get_drvdata(&ofdev->dev); + + DBG(dev, "remove" NL); + + dev_set_drvdata(&ofdev->dev, NULL); + + unregister_netdev(dev->ndev); + + if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) + tah_detach(dev->tah_dev, dev->tah_port); + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) + rgmii_detach(dev->rgmii_dev, dev->rgmii_port); + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) + zmii_detach(dev->zmii_dev, dev->zmii_port); + + mal_unregister_commac(dev->mal, &dev->commac); + emac_put_deps(dev); + + emac_dbg_unregister(dev); + iounmap(dev->emacp); + + if (dev->wol_irq != NO_IRQ) + irq_dispose_mapping(dev->wol_irq); + if (dev->emac_irq != NO_IRQ) + irq_dispose_mapping(dev->emac_irq); + + kfree(dev->ndev); + + return 0; +} + +/* XXX Features in here should be replaced by properties... */ +static struct of_device_id emac_match[] = +{ + { + .type = "network", + .compatible = "ibm,emac", + }, + { + .type = "network", + .compatible = "ibm,emac4", + }, + {}, +}; + +static struct of_platform_driver emac_driver = { + .name = "emac", + .match_table = emac_match, + + .probe = emac_probe, + .remove = emac_remove, +}; + +static void __init emac_make_bootlist(void) +{ + struct device_node *np = NULL; + int j, max, i = 0, k; + int cell_indices[EMAC_BOOT_LIST_SIZE]; + + /* Collect EMACs */ + while((np = of_find_all_nodes(np)) != NULL) { + const u32 *idx; + + if (of_match_node(emac_match, np) == NULL) + continue; + if (of_get_property(np, "unused", NULL)) + continue; + idx = of_get_property(np, "cell-index", NULL); + if (idx == NULL) + continue; + cell_indices[i] = *idx; + emac_boot_list[i++] = of_node_get(np); + if (i >= EMAC_BOOT_LIST_SIZE) { + of_node_put(np); + break; + } + } + max = i; + + /* Bubble sort them (doh, what a creative algorithm :-) */ + for (i = 0; max > 1 && (i < (max - 1)); i++) + for (j = i; j < max; j++) { + if (cell_indices[i] > cell_indices[j]) { + np = emac_boot_list[i]; + emac_boot_list[i] = emac_boot_list[j]; + emac_boot_list[j] = np; + k = cell_indices[i]; + cell_indices[i] = cell_indices[j]; + cell_indices[j] = k; + } + } +} + +static int __init emac_init(void) +{ + int rc; + + printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n"); + + /* Init debug stuff */ + emac_init_debug(); + + /* Build EMAC boot list */ + emac_make_bootlist(); + + /* Init submodules */ + rc = mal_init(); + if (rc) + goto err; + rc = zmii_init(); + if (rc) + goto err_mal; + rc = rgmii_init(); + if (rc) + goto err_zmii; + rc = tah_init(); + if (rc) + goto err_rgmii; + rc = of_register_platform_driver(&emac_driver); + if (rc) + goto err_tah; + + return 0; + + err_tah: + tah_exit(); + err_rgmii: + rgmii_exit(); + err_zmii: + zmii_exit(); + err_mal: + mal_exit(); + err: + return rc; +} + +static void __exit emac_exit(void) +{ + int i; + + of_unregister_platform_driver(&emac_driver); + + tah_exit(); + rgmii_exit(); + zmii_exit(); + mal_exit(); + emac_fini_debug(); + + /* Destroy EMAC boot list */ + for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++) + if (emac_boot_list[i]) + of_node_put(emac_boot_list[i]); +} + +module_init(emac_init); +module_exit(emac_exit); diff --git a/drivers/net/ibm_newemac/core.h b/drivers/net/ibm_newemac/core.h new file mode 100644 index 000000000000..4011803117ca --- /dev/null +++ b/drivers/net/ibm_newemac/core.h @@ -0,0 +1,355 @@ +/* + * drivers/net/ibm_newemac/core.h + * + * Driver for PowerPC 4xx on-chip ethernet controller. + * + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> + * + * Based on original work by + * Armin Kuster <akuster@mvista.com> + * Johnnie Peters <jpeters@mvista.com> + * Copyright 2000, 2001 MontaVista Softare Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#ifndef __IBM_NEWEMAC_CORE_H +#define __IBM_NEWEMAC_CORE_H + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/dma-mapping.h> +#include <linux/spinlock.h> + +#include <asm/of_platform.h> +#include <asm/io.h> +#include <asm/dcr.h> + +#include "emac.h" +#include "phy.h" +#include "zmii.h" +#include "rgmii.h" +#include "mal.h" +#include "tah.h" +#include "debug.h" + +#define NUM_TX_BUFF CONFIG_IBM_NEW_EMAC_TXB +#define NUM_RX_BUFF CONFIG_IBM_NEW_EMAC_RXB + +/* Simple sanity check */ +#if NUM_TX_BUFF > 256 || NUM_RX_BUFF > 256 +#error Invalid number of buffer descriptors (greater than 256) +#endif + +#define EMAC_MIN_MTU 46 + +/* Maximum L2 header length (VLAN tagged, no FCS) */ +#define EMAC_MTU_OVERHEAD (6 * 2 + 2 + 4) + +/* RX BD size for the given MTU */ +static inline int emac_rx_size(int mtu) +{ + if (mtu > ETH_DATA_LEN) + return MAL_MAX_RX_SIZE; + else + return mal_rx_size(ETH_DATA_LEN + EMAC_MTU_OVERHEAD); +} + +#define EMAC_DMA_ALIGN(x) ALIGN((x), dma_get_cache_alignment()) + +#define EMAC_RX_SKB_HEADROOM \ + EMAC_DMA_ALIGN(CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM) + +/* Size of RX skb for the given MTU */ +static inline int emac_rx_skb_size(int mtu) +{ + int size = max(mtu + EMAC_MTU_OVERHEAD, emac_rx_size(mtu)); + return EMAC_DMA_ALIGN(size + 2) + EMAC_RX_SKB_HEADROOM; +} + +/* RX DMA sync size */ +static inline int emac_rx_sync_size(int mtu) +{ + return EMAC_DMA_ALIGN(emac_rx_size(mtu) + 2); +} + +/* Driver statistcs is split into two parts to make it more cache friendly: + * - normal statistics (packet count, etc) + * - error statistics + * + * When statistics is requested by ethtool, these parts are concatenated, + * normal one goes first. + * + * Please, keep these structures in sync with emac_stats_keys. + */ + +/* Normal TX/RX Statistics */ +struct emac_stats { + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + u64 rx_packets_csum; + u64 tx_packets_csum; +}; + +/* Error statistics */ +struct emac_error_stats { + u64 tx_undo; + + /* Software RX Errors */ + u64 rx_dropped_stack; + u64 rx_dropped_oom; + u64 rx_dropped_error; + u64 rx_dropped_resize; + u64 rx_dropped_mtu; + u64 rx_stopped; + /* BD reported RX errors */ + u64 rx_bd_errors; + u64 rx_bd_overrun; + u64 rx_bd_bad_packet; + u64 rx_bd_runt_packet; + u64 rx_bd_short_event; + u64 rx_bd_alignment_error; + u64 rx_bd_bad_fcs; + u64 rx_bd_packet_too_long; + u64 rx_bd_out_of_range; + u64 rx_bd_in_range; + /* EMAC IRQ reported RX errors */ + u64 rx_parity; + u64 rx_fifo_overrun; + u64 rx_overrun; + u64 rx_bad_packet; + u64 rx_runt_packet; + u64 rx_short_event; + u64 rx_alignment_error; + u64 rx_bad_fcs; + u64 rx_packet_too_long; + u64 rx_out_of_range; + u64 rx_in_range; + + /* Software TX Errors */ + u64 tx_dropped; + /* BD reported TX errors */ + u64 tx_bd_errors; + u64 tx_bd_bad_fcs; + u64 tx_bd_carrier_loss; + u64 tx_bd_excessive_deferral; + u64 tx_bd_excessive_collisions; + u64 tx_bd_late_collision; + u64 tx_bd_multple_collisions; + u64 tx_bd_single_collision; + u64 tx_bd_underrun; + u64 tx_bd_sqe; + /* EMAC IRQ reported TX errors */ + u64 tx_parity; + u64 tx_underrun; + u64 tx_sqe; + u64 tx_errors; +}; + +#define EMAC_ETHTOOL_STATS_COUNT ((sizeof(struct emac_stats) + \ + sizeof(struct emac_error_stats)) \ + / sizeof(u64)) + +struct emac_instance { + struct net_device *ndev; + struct resource rsrc_regs; + struct emac_regs __iomem *emacp; + struct of_device *ofdev; + struct device_node **blist; /* bootlist entry */ + + /* MAL linkage */ + u32 mal_ph; + struct of_device *mal_dev; + u32 mal_rx_chan; + u32 mal_tx_chan; + struct mal_instance *mal; + struct mal_commac commac; + + /* PHY infos */ + u32 phy_mode; + u32 phy_map; + u32 phy_address; + u32 phy_feat_exc; + struct mii_phy phy; + struct mutex link_lock; + struct delayed_work link_work; + int link_polling; + + /* Shared MDIO if any */ + u32 mdio_ph; + struct of_device *mdio_dev; + struct emac_instance *mdio_instance; + struct mutex mdio_lock; + + /* ZMII infos if any */ + u32 zmii_ph; + u32 zmii_port; + struct of_device *zmii_dev; + + /* RGMII infos if any */ + u32 rgmii_ph; + u32 rgmii_port; + struct of_device *rgmii_dev; + + /* TAH infos if any */ + u32 tah_ph; + u32 tah_port; + struct of_device *tah_dev; + + /* IRQs */ + int wol_irq; + int emac_irq; + + /* OPB bus frequency in Mhz */ + u32 opb_bus_freq; + + /* Cell index within an ASIC (for clk mgmnt) */ + u32 cell_index; + + /* Max supported MTU */ + u32 max_mtu; + + /* Feature bits (from probe table) */ + unsigned int features; + + /* Tx and Rx fifo sizes & other infos in bytes */ + u32 tx_fifo_size; + u32 tx_fifo_size_gige; + u32 rx_fifo_size; + u32 rx_fifo_size_gige; + u32 fifo_entry_size; + u32 mal_burst_size; /* move to MAL ? */ + + /* Descriptor management + */ + struct mal_descriptor *tx_desc; + int tx_cnt; + int tx_slot; + int ack_slot; + + struct mal_descriptor *rx_desc; + int rx_slot; + struct sk_buff *rx_sg_skb; /* 1 */ + int rx_skb_size; + int rx_sync_size; + + struct sk_buff *tx_skb[NUM_TX_BUFF]; + struct sk_buff *rx_skb[NUM_RX_BUFF]; + + /* Stats + */ + struct emac_error_stats estats; + struct net_device_stats nstats; + struct emac_stats stats; + + /* Misc + */ + int reset_failed; + int stop_timeout; /* in us */ + int no_mcast; + int mcast_pending; + struct work_struct reset_work; + spinlock_t lock; +}; + +/* + * Features of various EMAC implementations + */ + +/* + * No flow control on 40x according to the original driver + */ +#define EMAC_FTR_NO_FLOW_CONTROL_40x 0x00000001 +/* + * Cell is an EMAC4 + */ +#define EMAC_FTR_EMAC4 0x00000002 +/* + * For the 440SPe, AMCC inexplicably changed the polarity of + * the "operation complete" bit in the MII control register. + */ +#define EMAC_FTR_STACR_OC_INVERT 0x00000004 +/* + * Set if we have a TAH. + */ +#define EMAC_FTR_HAS_TAH 0x00000008 +/* + * Set if we have a ZMII. + */ +#define EMAC_FTR_HAS_ZMII 0x00000010 +/* + * Set if we have a RGMII. + */ +#define EMAC_FTR_HAS_RGMII 0x00000020 +/* + * Set if we have axon-type STACR + */ +#define EMAC_FTR_HAS_AXON_STACR 0x00000040 + + +/* Right now, we don't quite handle the always/possible masks on the + * most optimal way as we don't have a way to say something like + * always EMAC4. Patches welcome. + */ +enum { + EMAC_FTRS_ALWAYS = 0, + + EMAC_FTRS_POSSIBLE = +#ifdef CONFIG_IBM_NEW_EMAC_EMAC4 + EMAC_FTR_EMAC4 | EMAC_FTR_HAS_AXON_STACR | + EMAC_FTR_STACR_OC_INVERT | +#endif +#ifdef CONFIG_IBM_NEW_EMAC_TAH + EMAC_FTR_HAS_TAH | +#endif +#ifdef CONFIG_IBM_NEW_EMAC_ZMII + EMAC_FTR_HAS_ZMII | +#endif +#ifdef CONFIG_IBM_NEW_EMAC_RGMII + EMAC_FTR_HAS_RGMII | +#endif + 0, +}; + +static inline int emac_has_feature(struct emac_instance *dev, + unsigned long feature) +{ + return (EMAC_FTRS_ALWAYS & feature) || + (EMAC_FTRS_POSSIBLE & dev->features & feature); +} + + +/* Ethtool get_regs complex data. + * We want to get not just EMAC registers, but also MAL, ZMII, RGMII, TAH + * when available. + * + * Returned BLOB consists of the ibm_emac_ethtool_regs_hdr, + * MAL registers, EMAC registers and optional ZMII, RGMII, TAH registers. + * Each register component is preceded with emac_ethtool_regs_subhdr. + * Order of the optional headers follows their relative bit posititions + * in emac_ethtool_regs_hdr.components + */ +#define EMAC_ETHTOOL_REGS_ZMII 0x00000001 +#define EMAC_ETHTOOL_REGS_RGMII 0x00000002 +#define EMAC_ETHTOOL_REGS_TAH 0x00000004 + +struct emac_ethtool_regs_hdr { + u32 components; +}; + +struct emac_ethtool_regs_subhdr { + u32 version; + u32 index; +}; + +#endif /* __IBM_NEWEMAC_CORE_H */ diff --git a/drivers/net/ibm_newemac/debug.c b/drivers/net/ibm_newemac/debug.c new file mode 100644 index 000000000000..170524ee0f19 --- /dev/null +++ b/drivers/net/ibm_newemac/debug.c @@ -0,0 +1,238 @@ +/* + * drivers/net/ibm_newemac/debug.c + * + * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines. + * + * Copyright (c) 2004, 2005 Zultys Technologies + * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#include <linux/init.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/sysrq.h> +#include <asm/io.h> + +#include "core.h" + +static spinlock_t emac_dbg_lock = SPIN_LOCK_UNLOCKED; + +static void emac_desc_dump(struct emac_instance *p) +{ + int i; + printk("** EMAC %s TX BDs **\n" + " tx_cnt = %d tx_slot = %d ack_slot = %d\n", + p->ofdev->node->full_name, + p->tx_cnt, p->tx_slot, p->ack_slot); + for (i = 0; i < NUM_TX_BUFF / 2; ++i) + printk + ("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n", + i, p->tx_desc[i].data_ptr, p->tx_skb[i] ? 'V' : ' ', + p->tx_desc[i].ctrl, p->tx_desc[i].data_len, + NUM_TX_BUFF / 2 + i, + p->tx_desc[NUM_TX_BUFF / 2 + i].data_ptr, + p->tx_skb[NUM_TX_BUFF / 2 + i] ? 'V' : ' ', + p->tx_desc[NUM_TX_BUFF / 2 + i].ctrl, + p->tx_desc[NUM_TX_BUFF / 2 + i].data_len); + + printk("** EMAC %s RX BDs **\n" + " rx_slot = %d flags = 0x%lx rx_skb_size = %d rx_sync_size = %d\n" + " rx_sg_skb = 0x%p\n", + p->ofdev->node->full_name, + p->rx_slot, p->commac.flags, p->rx_skb_size, + p->rx_sync_size, p->rx_sg_skb); + for (i = 0; i < NUM_RX_BUFF / 2; ++i) + printk + ("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n", + i, p->rx_desc[i].data_ptr, p->rx_skb[i] ? 'V' : ' ', + p->rx_desc[i].ctrl, p->rx_desc[i].data_len, + NUM_RX_BUFF / 2 + i, + p->rx_desc[NUM_RX_BUFF / 2 + i].data_ptr, + p->rx_skb[NUM_RX_BUFF / 2 + i] ? 'V' : ' ', + p->rx_desc[NUM_RX_BUFF / 2 + i].ctrl, + p->rx_desc[NUM_RX_BUFF / 2 + i].data_len); +} + +static void emac_mac_dump(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + + printk("** EMAC %s registers **\n" + "MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n" + "RMR = 0x%08x ISR = 0x%08x ISER = 0x%08x\n" + "IAR = %04x%08x VTPID = 0x%04x VTCI = 0x%04x\n" + "IAHT: 0x%04x 0x%04x 0x%04x 0x%04x " + "GAHT: 0x%04x 0x%04x 0x%04x 0x%04x\n" + "LSA = %04x%08x IPGVR = 0x%04x\n" + "STACR = 0x%08x TRTR = 0x%08x RWMR = 0x%08x\n" + "OCTX = 0x%08x OCRX = 0x%08x IPCR = 0x%08x\n", + dev->ofdev->node->full_name, in_be32(&p->mr0), in_be32(&p->mr1), + in_be32(&p->tmr0), in_be32(&p->tmr1), + in_be32(&p->rmr), in_be32(&p->isr), in_be32(&p->iser), + in_be32(&p->iahr), in_be32(&p->ialr), in_be32(&p->vtpid), + in_be32(&p->vtci), + in_be32(&p->iaht1), in_be32(&p->iaht2), in_be32(&p->iaht3), + in_be32(&p->iaht4), + in_be32(&p->gaht1), in_be32(&p->gaht2), in_be32(&p->gaht3), + in_be32(&p->gaht4), + in_be32(&p->lsah), in_be32(&p->lsal), in_be32(&p->ipgvr), + in_be32(&p->stacr), in_be32(&p->trtr), in_be32(&p->rwmr), + in_be32(&p->octx), in_be32(&p->ocrx), in_be32(&p->ipcr) + ); + + emac_desc_dump(dev); +} + +static void emac_mal_dump(struct mal_instance *mal) +{ + int i; + + printk("** MAL %s Registers **\n" + "CFG = 0x%08x ESR = 0x%08x IER = 0x%08x\n" + "TX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n" + "RX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n", + mal->ofdev->node->full_name, + get_mal_dcrn(mal, MAL_CFG), get_mal_dcrn(mal, MAL_ESR), + get_mal_dcrn(mal, MAL_IER), + get_mal_dcrn(mal, MAL_TXCASR), get_mal_dcrn(mal, MAL_TXCARR), + get_mal_dcrn(mal, MAL_TXEOBISR), get_mal_dcrn(mal, MAL_TXDEIR), + get_mal_dcrn(mal, MAL_RXCASR), get_mal_dcrn(mal, MAL_RXCARR), + get_mal_dcrn(mal, MAL_RXEOBISR), get_mal_dcrn(mal, MAL_RXDEIR) + ); + + printk("TX|"); + for (i = 0; i < mal->num_tx_chans; ++i) { + if (i && !(i % 4)) + printk("\n "); + printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_TXCTPR(i))); + } + printk("\nRX|"); + for (i = 0; i < mal->num_rx_chans; ++i) { + if (i && !(i % 4)) + printk("\n "); + printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_RXCTPR(i))); + } + printk("\n "); + for (i = 0; i < mal->num_rx_chans; ++i) { + u32 r = get_mal_dcrn(mal, MAL_RCBS(i)); + if (i && !(i % 3)) + printk("\n "); + printk("RCBS%d = 0x%08x (%d) ", i, r, r * 16); + } + printk("\n"); +} + +static struct emac_instance *__emacs[4]; +static struct mal_instance *__mals[1]; + +void emac_dbg_register(struct emac_instance *dev) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&emac_dbg_lock, flags); + for (i = 0; i < ARRAY_SIZE(__emacs); i++) + if (__emacs[i] == NULL) { + __emacs[i] = dev; + break; + } + spin_unlock_irqrestore(&emac_dbg_lock, flags); +} + +void emac_dbg_unregister(struct emac_instance *dev) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&emac_dbg_lock, flags); + for (i = 0; i < ARRAY_SIZE(__emacs); i++) + if (__emacs[i] == dev) { + __emacs[i] = NULL; + break; + } + spin_unlock_irqrestore(&emac_dbg_lock, flags); +} + +void mal_dbg_register(struct mal_instance *mal) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&emac_dbg_lock, flags); + for (i = 0; i < ARRAY_SIZE(__mals); i++) + if (__mals[i] == NULL) { + __mals[i] = mal; + break; + } + spin_unlock_irqrestore(&emac_dbg_lock, flags); +} + +void mal_dbg_unregister(struct mal_instance *mal) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&emac_dbg_lock, flags); + for (i = 0; i < ARRAY_SIZE(__mals); i++) + if (__mals[i] == mal) { + __mals[i] = NULL; + break; + } + spin_unlock_irqrestore(&emac_dbg_lock, flags); +} + +void emac_dbg_dump_all(void) +{ + unsigned int i; + unsigned long flags; + + spin_lock_irqsave(&emac_dbg_lock, flags); + + for (i = 0; i < ARRAY_SIZE(__mals); ++i) + if (__mals[i]) + emac_mal_dump(__mals[i]); + + for (i = 0; i < ARRAY_SIZE(__emacs); ++i) + if (__emacs[i]) + emac_mac_dump(__emacs[i]); + + spin_unlock_irqrestore(&emac_dbg_lock, flags); +} + +#if defined(CONFIG_MAGIC_SYSRQ) +static void emac_sysrq_handler(int key, struct tty_struct *tty) +{ + emac_dbg_dump_all(); +} + +static struct sysrq_key_op emac_sysrq_op = { + .handler = emac_sysrq_handler, + .help_msg = "emaC", + .action_msg = "Show EMAC(s) status", +}; + +int __init emac_init_debug(void) +{ + return register_sysrq_key('c', &emac_sysrq_op); +} + +void __exit emac_fini_debug(void) +{ + unregister_sysrq_key('c', &emac_sysrq_op); +} + +#else +int __init emac_init_debug(void) +{ + return 0; +} +void __exit emac_fini_debug(void) +{ +} +#endif /* CONFIG_MAGIC_SYSRQ */ diff --git a/drivers/net/ibm_newemac/debug.h b/drivers/net/ibm_newemac/debug.h new file mode 100644 index 000000000000..1dd2dcbc157f --- /dev/null +++ b/drivers/net/ibm_newemac/debug.h @@ -0,0 +1,78 @@ +/* + * drivers/net/ibm_newemac/debug.h + * + * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines. + * + * Copyright (c) 2004, 2005 Zultys Technologies + * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#ifndef __IBM_NEWEMAC_DEBUG_H +#define __IBM_NEWEMAC_DEBUG_H + +#include <linux/init.h> + +#include "core.h" + +#if defined(CONFIG_IBM_NEW_EMAC_DEBUG) + +struct emac_instance; +struct mal_instance; + +extern void emac_dbg_register(struct emac_instance *dev); +extern void emac_dbg_unregister(struct emac_instance *dev); +extern void mal_dbg_register(struct mal_instance *mal); +extern void mal_dbg_unregister(struct mal_instance *mal); +extern int emac_init_debug(void) __init; +extern void emac_fini_debug(void) __exit; +extern void emac_dbg_dump_all(void); + +# define DBG_LEVEL 1 + +#else + +# define emac_dbg_register(x) do { } while(0) +# define emac_dbg_unregister(x) do { } while(0) +# define mal_dbg_register(x) do { } while(0) +# define mal_dbg_unregister(x) do { } while(0) +# define emac_init_debug() do { } while(0) +# define emac_fini_debug() do { } while(0) +# define emac_dbg_dump_all() do { } while(0) + +# define DBG_LEVEL 0 + +#endif + +#define EMAC_DBG(dev, name, fmt, arg...) \ + printk(KERN_DEBUG #name "%s: " fmt, dev->ofdev->node->full_name, ## arg) + +#if DBG_LEVEL > 0 +# define DBG(d,f,x...) EMAC_DBG(d, emac, f, ##x) +# define MAL_DBG(d,f,x...) EMAC_DBG(d, mal, f, ##x) +# define ZMII_DBG(d,f,x...) EMAC_DBG(d, zmii, f, ##x) +# define RGMII_DBG(d,f,x...) EMAC_DBG(d, rgmii, f, ##x) +# define NL "\n" +#else +# define DBG(f,x...) ((void)0) +# define MAL_DBG(d,f,x...) ((void)0) +# define ZMII_DBG(d,f,x...) ((void)0) +# define RGMII_DBG(d,f,x...) ((void)0) +#endif +#if DBG_LEVEL > 1 +# define DBG2(d,f,x...) DBG(d,f, ##x) +# define MAL_DBG2(d,f,x...) MAL_DBG(d,f, ##x) +# define ZMII_DBG2(d,f,x...) ZMII_DBG(d,f, ##x) +# define RGMII_DBG2(d,f,x...) RGMII_DBG(d,f, ##x) +#else +# define DBG2(f,x...) ((void)0) +# define MAL_DBG2(d,f,x...) ((void)0) +# define ZMII_DBG2(d,f,x...) ((void)0) +# define RGMII_DBG2(d,f,x...) ((void)0) +#endif + +#endif /* __IBM_NEWEMAC_DEBUG_H */ diff --git a/drivers/net/ibm_newemac/emac.h b/drivers/net/ibm_newemac/emac.h new file mode 100644 index 000000000000..bef92efeeadc --- /dev/null +++ b/drivers/net/ibm_newemac/emac.h @@ -0,0 +1,268 @@ +/* + * drivers/net/ibm_newemac/emac.h + * + * Register definitions for PowerPC 4xx on-chip ethernet contoller + * + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> + * + * Based on original work by + * Matt Porter <mporter@kernel.crashing.org> + * Armin Kuster <akuster@mvista.com> + * Copyright 2002-2004 MontaVista Software Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#ifndef __IBM_NEWEMAC_H +#define __IBM_NEWEMAC_H + +#include <linux/types.h> + +/* EMAC registers Write Access rules */ +struct emac_regs { + u32 mr0; /* special */ + u32 mr1; /* Reset */ + u32 tmr0; /* special */ + u32 tmr1; /* special */ + u32 rmr; /* Reset */ + u32 isr; /* Always */ + u32 iser; /* Reset */ + u32 iahr; /* Reset, R, T */ + u32 ialr; /* Reset, R, T */ + u32 vtpid; /* Reset, R, T */ + u32 vtci; /* Reset, R, T */ + u32 ptr; /* Reset, T */ + u32 iaht1; /* Reset, R */ + u32 iaht2; /* Reset, R */ + u32 iaht3; /* Reset, R */ + u32 iaht4; /* Reset, R */ + u32 gaht1; /* Reset, R */ + u32 gaht2; /* Reset, R */ + u32 gaht3; /* Reset, R */ + u32 gaht4; /* Reset, R */ + u32 lsah; + u32 lsal; + u32 ipgvr; /* Reset, T */ + u32 stacr; /* special */ + u32 trtr; /* special */ + u32 rwmr; /* Reset */ + u32 octx; + u32 ocrx; + u32 ipcr; +}; + +/* + * PHY mode settings (EMAC <-> ZMII/RGMII bridge <-> PHY) + */ +#define PHY_MODE_NA 0 +#define PHY_MODE_MII 1 +#define PHY_MODE_RMII 2 +#define PHY_MODE_SMII 3 +#define PHY_MODE_RGMII 4 +#define PHY_MODE_TBI 5 +#define PHY_MODE_GMII 6 +#define PHY_MODE_RTBI 7 +#define PHY_MODE_SGMII 8 + + +#define EMAC_ETHTOOL_REGS_VER 0 +#define EMAC_ETHTOOL_REGS_SIZE (sizeof(struct emac_regs) - sizeof(u32)) +#define EMAC4_ETHTOOL_REGS_VER 1 +#define EMAC4_ETHTOOL_REGS_SIZE sizeof(struct emac_regs) + +/* EMACx_MR0 */ +#define EMAC_MR0_RXI 0x80000000 +#define EMAC_MR0_TXI 0x40000000 +#define EMAC_MR0_SRST 0x20000000 +#define EMAC_MR0_TXE 0x10000000 +#define EMAC_MR0_RXE 0x08000000 +#define EMAC_MR0_WKE 0x04000000 + +/* EMACx_MR1 */ +#define EMAC_MR1_FDE 0x80000000 +#define EMAC_MR1_ILE 0x40000000 +#define EMAC_MR1_VLE 0x20000000 +#define EMAC_MR1_EIFC 0x10000000 +#define EMAC_MR1_APP 0x08000000 +#define EMAC_MR1_IST 0x01000000 + +#define EMAC_MR1_MF_MASK 0x00c00000 +#define EMAC_MR1_MF_10 0x00000000 +#define EMAC_MR1_MF_100 0x00400000 +#define EMAC_MR1_MF_1000 0x00800000 +#define EMAC_MR1_MF_1000GPCS 0x00c00000 +#define EMAC_MR1_MF_IPPA(id) (((id) & 0x1f) << 6) + +#define EMAC_MR1_RFS_4K 0x00300000 +#define EMAC_MR1_RFS_16K 0x00000000 +#define EMAC_MR1_TFS_2K 0x00080000 +#define EMAC_MR1_TR0_MULT 0x00008000 +#define EMAC_MR1_JPSM 0x00000000 +#define EMAC_MR1_MWSW_001 0x00000000 +#define EMAC_MR1_BASE(opb) (EMAC_MR1_TFS_2K | EMAC_MR1_TR0_MULT) + + +#define EMAC4_MR1_RFS_2K 0x00100000 +#define EMAC4_MR1_RFS_4K 0x00180000 +#define EMAC4_MR1_RFS_16K 0x00280000 +#define EMAC4_MR1_TFS_2K 0x00020000 +#define EMAC4_MR1_TFS_4K 0x00030000 +#define EMAC4_MR1_TR 0x00008000 +#define EMAC4_MR1_MWSW_001 0x00001000 +#define EMAC4_MR1_JPSM 0x00000800 +#define EMAC4_MR1_OBCI_MASK 0x00000038 +#define EMAC4_MR1_OBCI_50 0x00000000 +#define EMAC4_MR1_OBCI_66 0x00000008 +#define EMAC4_MR1_OBCI_83 0x00000010 +#define EMAC4_MR1_OBCI_100 0x00000018 +#define EMAC4_MR1_OBCI_100P 0x00000020 +#define EMAC4_MR1_OBCI(freq) ((freq) <= 50 ? EMAC4_MR1_OBCI_50 : \ + (freq) <= 66 ? EMAC4_MR1_OBCI_66 : \ + (freq) <= 83 ? EMAC4_MR1_OBCI_83 : \ + (freq) <= 100 ? EMAC4_MR1_OBCI_100 : \ + EMAC4_MR1_OBCI_100P) + +/* EMACx_TMR0 */ +#define EMAC_TMR0_GNP 0x80000000 +#define EMAC_TMR0_DEFAULT 0x00000000 +#define EMAC4_TMR0_TFAE_2_32 0x00000001 +#define EMAC4_TMR0_TFAE_4_64 0x00000002 +#define EMAC4_TMR0_TFAE_8_128 0x00000003 +#define EMAC4_TMR0_TFAE_16_256 0x00000004 +#define EMAC4_TMR0_TFAE_32_512 0x00000005 +#define EMAC4_TMR0_TFAE_64_1024 0x00000006 +#define EMAC4_TMR0_TFAE_128_2048 0x00000007 +#define EMAC4_TMR0_DEFAULT EMAC4_TMR0_TFAE_2_32 +#define EMAC_TMR0_XMIT (EMAC_TMR0_GNP | EMAC_TMR0_DEFAULT) +#define EMAC4_TMR0_XMIT (EMAC_TMR0_GNP | EMAC4_TMR0_DEFAULT) + +/* EMACx_TMR1 */ + +#define EMAC_TMR1(l,h) (((l) << 27) | (((h) & 0xff) << 16)) +#define EMAC4_TMR1(l,h) (((l) << 27) | (((h) & 0x3ff) << 14)) + +/* EMACx_RMR */ +#define EMAC_RMR_SP 0x80000000 +#define EMAC_RMR_SFCS 0x40000000 +#define EMAC_RMR_RRP 0x20000000 +#define EMAC_RMR_RFP 0x10000000 +#define EMAC_RMR_ROP 0x08000000 +#define EMAC_RMR_RPIR 0x04000000 +#define EMAC_RMR_PPP 0x02000000 +#define EMAC_RMR_PME 0x01000000 +#define EMAC_RMR_PMME 0x00800000 +#define EMAC_RMR_IAE 0x00400000 +#define EMAC_RMR_MIAE 0x00200000 +#define EMAC_RMR_BAE 0x00100000 +#define EMAC_RMR_MAE 0x00080000 +#define EMAC_RMR_BASE 0x00000000 +#define EMAC4_RMR_RFAF_2_32 0x00000001 +#define EMAC4_RMR_RFAF_4_64 0x00000002 +#define EMAC4_RMR_RFAF_8_128 0x00000003 +#define EMAC4_RMR_RFAF_16_256 0x00000004 +#define EMAC4_RMR_RFAF_32_512 0x00000005 +#define EMAC4_RMR_RFAF_64_1024 0x00000006 +#define EMAC4_RMR_RFAF_128_2048 0x00000007 +#define EMAC4_RMR_BASE EMAC4_RMR_RFAF_128_2048 + +/* EMACx_ISR & EMACx_ISER */ +#define EMAC4_ISR_TXPE 0x20000000 +#define EMAC4_ISR_RXPE 0x10000000 +#define EMAC4_ISR_TXUE 0x08000000 +#define EMAC4_ISR_RXOE 0x04000000 +#define EMAC_ISR_OVR 0x02000000 +#define EMAC_ISR_PP 0x01000000 +#define EMAC_ISR_BP 0x00800000 +#define EMAC_ISR_RP 0x00400000 +#define EMAC_ISR_SE 0x00200000 +#define EMAC_ISR_ALE 0x00100000 +#define EMAC_ISR_BFCS 0x00080000 +#define EMAC_ISR_PTLE 0x00040000 +#define EMAC_ISR_ORE 0x00020000 +#define EMAC_ISR_IRE 0x00010000 +#define EMAC_ISR_SQE 0x00000080 +#define EMAC_ISR_TE 0x00000040 +#define EMAC_ISR_MOS 0x00000002 +#define EMAC_ISR_MOF 0x00000001 + +/* EMACx_STACR */ +#define EMAC_STACR_PHYD_MASK 0xffff +#define EMAC_STACR_PHYD_SHIFT 16 +#define EMAC_STACR_OC 0x00008000 +#define EMAC_STACR_PHYE 0x00004000 +#define EMAC_STACR_STAC_MASK 0x00003000 +#define EMAC_STACR_STAC_READ 0x00001000 +#define EMAC_STACR_STAC_WRITE 0x00002000 +#define EMAC_STACR_OPBC_MASK 0x00000C00 +#define EMAC_STACR_OPBC_50 0x00000000 +#define EMAC_STACR_OPBC_66 0x00000400 +#define EMAC_STACR_OPBC_83 0x00000800 +#define EMAC_STACR_OPBC_100 0x00000C00 +#define EMAC_STACR_OPBC(freq) ((freq) <= 50 ? EMAC_STACR_OPBC_50 : \ + (freq) <= 66 ? EMAC_STACR_OPBC_66 : \ + (freq) <= 83 ? EMAC_STACR_OPBC_83 : EMAC_STACR_OPBC_100) +#define EMAC_STACR_BASE(opb) EMAC_STACR_OPBC(opb) +#define EMAC4_STACR_BASE(opb) 0x00000000 +#define EMAC_STACR_PCDA_MASK 0x1f +#define EMAC_STACR_PCDA_SHIFT 5 +#define EMAC_STACR_PRA_MASK 0x1f +#define EMACX_STACR_STAC_MASK 0x00003800 +#define EMACX_STACR_STAC_READ 0x00001000 +#define EMACX_STACR_STAC_WRITE 0x00000800 +#define EMACX_STACR_STAC_IND_ADDR 0x00002000 +#define EMACX_STACR_STAC_IND_READ 0x00003800 +#define EMACX_STACR_STAC_IND_READINC 0x00003000 +#define EMACX_STACR_STAC_IND_WRITE 0x00002800 + + +/* EMACx_TRTR */ +#define EMAC_TRTR_SHIFT_EMAC4 27 +#define EMAC_TRTR_SHIFT 24 + +/* EMAC specific TX descriptor control fields (write access) */ +#define EMAC_TX_CTRL_GFCS 0x0200 +#define EMAC_TX_CTRL_GP 0x0100 +#define EMAC_TX_CTRL_ISA 0x0080 +#define EMAC_TX_CTRL_RSA 0x0040 +#define EMAC_TX_CTRL_IVT 0x0020 +#define EMAC_TX_CTRL_RVT 0x0010 +#define EMAC_TX_CTRL_TAH_CSUM 0x000e + +/* EMAC specific TX descriptor status fields (read access) */ +#define EMAC_TX_ST_BFCS 0x0200 +#define EMAC_TX_ST_LCS 0x0080 +#define EMAC_TX_ST_ED 0x0040 +#define EMAC_TX_ST_EC 0x0020 +#define EMAC_TX_ST_LC 0x0010 +#define EMAC_TX_ST_MC 0x0008 +#define EMAC_TX_ST_SC 0x0004 +#define EMAC_TX_ST_UR 0x0002 +#define EMAC_TX_ST_SQE 0x0001 +#define EMAC_IS_BAD_TX (EMAC_TX_ST_LCS | EMAC_TX_ST_ED | \ + EMAC_TX_ST_EC | EMAC_TX_ST_LC | \ + EMAC_TX_ST_MC | EMAC_TX_ST_UR) +#define EMAC_IS_BAD_TX_TAH (EMAC_TX_ST_LCS | EMAC_TX_ST_ED | \ + EMAC_TX_ST_EC | EMAC_TX_ST_LC) + +/* EMAC specific RX descriptor status fields (read access) */ +#define EMAC_RX_ST_OE 0x0200 +#define EMAC_RX_ST_PP 0x0100 +#define EMAC_RX_ST_BP 0x0080 +#define EMAC_RX_ST_RP 0x0040 +#define EMAC_RX_ST_SE 0x0020 +#define EMAC_RX_ST_AE 0x0010 +#define EMAC_RX_ST_BFCS 0x0008 +#define EMAC_RX_ST_PTL 0x0004 +#define EMAC_RX_ST_ORE 0x0002 +#define EMAC_RX_ST_IRE 0x0001 +#define EMAC_RX_TAH_BAD_CSUM 0x0003 +#define EMAC_BAD_RX_MASK (EMAC_RX_ST_OE | EMAC_RX_ST_BP | \ + EMAC_RX_ST_RP | EMAC_RX_ST_SE | \ + EMAC_RX_ST_AE | EMAC_RX_ST_BFCS | \ + EMAC_RX_ST_PTL | EMAC_RX_ST_ORE | \ + EMAC_RX_ST_IRE ) +#endif /* __IBM_NEWEMAC_H */ diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c new file mode 100644 index 000000000000..58854117b1a9 --- /dev/null +++ b/drivers/net/ibm_newemac/mal.c @@ -0,0 +1,711 @@ +/* + * drivers/net/ibm_newemac/mal.c + * + * Memory Access Layer (MAL) support + * + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> + * + * Based on original work by + * Benjamin Herrenschmidt <benh@kernel.crashing.org>, + * David Gibson <hermes@gibson.dropbear.id.au>, + * + * Armin Kuster <akuster@mvista.com> + * Copyright 2002 MontaVista Softare Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include <linux/delay.h> + +#include "core.h" + +static int mal_count; + +int __devinit mal_register_commac(struct mal_instance *mal, + struct mal_commac *commac) +{ + unsigned long flags; + + spin_lock_irqsave(&mal->lock, flags); + + MAL_DBG(mal, "reg(%08x, %08x)" NL, + commac->tx_chan_mask, commac->rx_chan_mask); + + /* Don't let multiple commacs claim the same channel(s) */ + if ((mal->tx_chan_mask & commac->tx_chan_mask) || + (mal->rx_chan_mask & commac->rx_chan_mask)) { + spin_unlock_irqrestore(&mal->lock, flags); + printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n", + mal->index); + return -EBUSY; + } + + mal->tx_chan_mask |= commac->tx_chan_mask; + mal->rx_chan_mask |= commac->rx_chan_mask; + list_add(&commac->list, &mal->list); + + spin_unlock_irqrestore(&mal->lock, flags); + + return 0; +} + +void __devexit mal_unregister_commac(struct mal_instance *mal, + struct mal_commac *commac) +{ + unsigned long flags; + + spin_lock_irqsave(&mal->lock, flags); + + MAL_DBG(mal, "unreg(%08x, %08x)" NL, + commac->tx_chan_mask, commac->rx_chan_mask); + + mal->tx_chan_mask &= ~commac->tx_chan_mask; + mal->rx_chan_mask &= ~commac->rx_chan_mask; + list_del_init(&commac->list); + + spin_unlock_irqrestore(&mal->lock, flags); +} + +int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size) +{ + BUG_ON(channel < 0 || channel >= mal->num_rx_chans || + size > MAL_MAX_RX_SIZE); + + MAL_DBG(mal, "set_rbcs(%d, %lu)" NL, channel, size); + + if (size & 0xf) { + printk(KERN_WARNING + "mal%d: incorrect RX size %lu for the channel %d\n", + mal->index, size, channel); + return -EINVAL; + } + + set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4); + return 0; +} + +int mal_tx_bd_offset(struct mal_instance *mal, int channel) +{ + BUG_ON(channel < 0 || channel >= mal->num_tx_chans); + + return channel * NUM_TX_BUFF; +} + +int mal_rx_bd_offset(struct mal_instance *mal, int channel) +{ + BUG_ON(channel < 0 || channel >= mal->num_rx_chans); + return mal->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF; +} + +void mal_enable_tx_channel(struct mal_instance *mal, int channel) +{ + unsigned long flags; + + spin_lock_irqsave(&mal->lock, flags); + + MAL_DBG(mal, "enable_tx(%d)" NL, channel); + + set_mal_dcrn(mal, MAL_TXCASR, + get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel)); + + spin_unlock_irqrestore(&mal->lock, flags); +} + +void mal_disable_tx_channel(struct mal_instance *mal, int channel) +{ + set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel)); + + MAL_DBG(mal, "disable_tx(%d)" NL, channel); +} + +void mal_enable_rx_channel(struct mal_instance *mal, int channel) +{ + unsigned long flags; + + spin_lock_irqsave(&mal->lock, flags); + + MAL_DBG(mal, "enable_rx(%d)" NL, channel); + + set_mal_dcrn(mal, MAL_RXCASR, + get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel)); + + spin_unlock_irqrestore(&mal->lock, flags); +} + +void mal_disable_rx_channel(struct mal_instance *mal, int channel) +{ + set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel)); + + MAL_DBG(mal, "disable_rx(%d)" NL, channel); +} + +void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac) +{ + unsigned long flags; + + spin_lock_irqsave(&mal->lock, flags); + + MAL_DBG(mal, "poll_add(%p)" NL, commac); + + /* starts disabled */ + set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags); + + list_add_tail(&commac->poll_list, &mal->poll_list); + + spin_unlock_irqrestore(&mal->lock, flags); +} + +void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac) +{ + unsigned long flags; + + spin_lock_irqsave(&mal->lock, flags); + + MAL_DBG(mal, "poll_del(%p)" NL, commac); + + list_del(&commac->poll_list); + + spin_unlock_irqrestore(&mal->lock, flags); +} + +/* synchronized by mal_poll() */ +static inline void mal_enable_eob_irq(struct mal_instance *mal) +{ + MAL_DBG2(mal, "enable_irq" NL); + + // XXX might want to cache MAL_CFG as the DCR read can be slooooow + set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE); +} + +/* synchronized by __LINK_STATE_RX_SCHED bit in ndev->state */ +static inline void mal_disable_eob_irq(struct mal_instance *mal) +{ + // XXX might want to cache MAL_CFG as the DCR read can be slooooow + set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE); + + MAL_DBG2(mal, "disable_irq" NL); +} + +static irqreturn_t mal_serr(int irq, void *dev_instance) +{ + struct mal_instance *mal = dev_instance; + + u32 esr = get_mal_dcrn(mal, MAL_ESR); + + /* Clear the error status register */ + set_mal_dcrn(mal, MAL_ESR, esr); + + MAL_DBG(mal, "SERR %08x" NL, esr); + + if (esr & MAL_ESR_EVB) { + if (esr & MAL_ESR_DE) { + /* We ignore Descriptor error, + * TXDE or RXDE interrupt will be generated anyway. + */ + return IRQ_HANDLED; + } + + if (esr & MAL_ESR_PEIN) { + /* PLB error, it's probably buggy hardware or + * incorrect physical address in BD (i.e. bug) + */ + if (net_ratelimit()) + printk(KERN_ERR + "mal%d: system error, " + "PLB (ESR = 0x%08x)\n", + mal->index, esr); + return IRQ_HANDLED; + } + + /* OPB error, it's probably buggy hardware or incorrect + * EBC setup + */ + if (net_ratelimit()) + printk(KERN_ERR + "mal%d: system error, OPB (ESR = 0x%08x)\n", + mal->index, esr); + } + return IRQ_HANDLED; +} + +static inline void mal_schedule_poll(struct mal_instance *mal) +{ + if (likely(napi_schedule_prep(&mal->napi))) { + MAL_DBG2(mal, "schedule_poll" NL); + mal_disable_eob_irq(mal); + __napi_schedule(&mal->napi); + } else + MAL_DBG2(mal, "already in poll" NL); +} + +static irqreturn_t mal_txeob(int irq, void *dev_instance) +{ + struct mal_instance *mal = dev_instance; + + u32 r = get_mal_dcrn(mal, MAL_TXEOBISR); + + MAL_DBG2(mal, "txeob %08x" NL, r); + + mal_schedule_poll(mal); + set_mal_dcrn(mal, MAL_TXEOBISR, r); + + return IRQ_HANDLED; +} + +static irqreturn_t mal_rxeob(int irq, void *dev_instance) +{ + struct mal_instance *mal = dev_instance; + + u32 r = get_mal_dcrn(mal, MAL_RXEOBISR); + + MAL_DBG2(mal, "rxeob %08x" NL, r); + + mal_schedule_poll(mal); + set_mal_dcrn(mal, MAL_RXEOBISR, r); + + return IRQ_HANDLED; +} + +static irqreturn_t mal_txde(int irq, void *dev_instance) +{ + struct mal_instance *mal = dev_instance; + + u32 deir = get_mal_dcrn(mal, MAL_TXDEIR); + set_mal_dcrn(mal, MAL_TXDEIR, deir); + + MAL_DBG(mal, "txde %08x" NL, deir); + + if (net_ratelimit()) + printk(KERN_ERR + "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n", + mal->index, deir); + + return IRQ_HANDLED; +} + +static irqreturn_t mal_rxde(int irq, void *dev_instance) +{ + struct mal_instance *mal = dev_instance; + struct list_head *l; + + u32 deir = get_mal_dcrn(mal, MAL_RXDEIR); + + MAL_DBG(mal, "rxde %08x" NL, deir); + + list_for_each(l, &mal->list) { + struct mal_commac *mc = list_entry(l, struct mal_commac, list); + if (deir & mc->rx_chan_mask) { + set_bit(MAL_COMMAC_RX_STOPPED, &mc->flags); + mc->ops->rxde(mc->dev); + } + } + + mal_schedule_poll(mal); + set_mal_dcrn(mal, MAL_RXDEIR, deir); + + return IRQ_HANDLED; +} + +void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac) +{ + /* Spinlock-type semantics: only one caller disable poll at a time */ + while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags)) + msleep(1); + + /* Synchronize with the MAL NAPI poller. */ + napi_disable(&mal->napi); +} + +void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac) +{ + smp_wmb(); + clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags); + + // XXX might want to kick a poll now... +} + +static int mal_poll(struct napi_struct *napi, int budget) +{ + struct mal_instance *mal = container_of(napi, struct mal_instance, napi); + struct list_head *l; + int received = 0; + unsigned long flags; + + MAL_DBG2(mal, "poll(%d) %d ->" NL, *budget, + rx_work_limit); + again: + /* Process TX skbs */ + list_for_each(l, &mal->poll_list) { + struct mal_commac *mc = + list_entry(l, struct mal_commac, poll_list); + mc->ops->poll_tx(mc->dev); + } + + /* Process RX skbs. + * + * We _might_ need something more smart here to enforce polling + * fairness. + */ + list_for_each(l, &mal->poll_list) { + struct mal_commac *mc = + list_entry(l, struct mal_commac, poll_list); + int n; + if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags))) + continue; + n = mc->ops->poll_rx(mc->dev, budget); + if (n) { + received += n; + budget -= n; + if (budget <= 0) + goto more_work; // XXX What if this is the last one ? + } + } + + /* We need to disable IRQs to protect from RXDE IRQ here */ + spin_lock_irqsave(&mal->lock, flags); + __napi_complete(napi); + mal_enable_eob_irq(mal); + spin_unlock_irqrestore(&mal->lock, flags); + + /* Check for "rotting" packet(s) */ + list_for_each(l, &mal->poll_list) { + struct mal_commac *mc = + list_entry(l, struct mal_commac, poll_list); + if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags))) + continue; + if (unlikely(mc->ops->peek_rx(mc->dev) || + test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) { + MAL_DBG2(mal, "rotting packet" NL); + if (napi_reschedule(napi)) + mal_disable_eob_irq(mal); + else + MAL_DBG2(mal, "already in poll list" NL); + + if (budget > 0) + goto again; + else + goto more_work; + } + mc->ops->poll_tx(mc->dev); + } + + more_work: + MAL_DBG2(mal, "poll() %d <- %d" NL, budget, received); + return received; +} + +static void mal_reset(struct mal_instance *mal) +{ + int n = 10; + + MAL_DBG(mal, "reset" NL); + + set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR); + + /* Wait for reset to complete (1 system clock) */ + while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n) + --n; + + if (unlikely(!n)) + printk(KERN_ERR "mal%d: reset timeout\n", mal->index); +} + +int mal_get_regs_len(struct mal_instance *mal) +{ + return sizeof(struct emac_ethtool_regs_subhdr) + + sizeof(struct mal_regs); +} + +void *mal_dump_regs(struct mal_instance *mal, void *buf) +{ + struct emac_ethtool_regs_subhdr *hdr = buf; + struct mal_regs *regs = (struct mal_regs *)(hdr + 1); + int i; + + hdr->version = mal->version; + hdr->index = mal->index; + + regs->tx_count = mal->num_tx_chans; + regs->rx_count = mal->num_rx_chans; + + regs->cfg = get_mal_dcrn(mal, MAL_CFG); + regs->esr = get_mal_dcrn(mal, MAL_ESR); + regs->ier = get_mal_dcrn(mal, MAL_IER); + regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR); + regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR); + regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR); + regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR); + regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR); + regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR); + regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR); + regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR); + + for (i = 0; i < regs->tx_count; ++i) + regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i)); + + for (i = 0; i < regs->rx_count; ++i) { + regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i)); + regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i)); + } + return regs + 1; +} + +static int __devinit mal_probe(struct of_device *ofdev, + const struct of_device_id *match) +{ + struct mal_instance *mal; + int err = 0, i, bd_size; + int index = mal_count++; + const u32 *prop; + u32 cfg; + + mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL); + if (!mal) { + printk(KERN_ERR + "mal%d: out of memory allocating MAL structure!\n", + index); + return -ENOMEM; + } + mal->index = index; + mal->ofdev = ofdev; + mal->version = of_device_is_compatible(ofdev->node, "ibm,mcmal2") ? 2 : 1; + + MAL_DBG(mal, "probe" NL); + + prop = of_get_property(ofdev->node, "num-tx-chans", NULL); + if (prop == NULL) { + printk(KERN_ERR + "mal%d: can't find MAL num-tx-chans property!\n", + index); + err = -ENODEV; + goto fail; + } + mal->num_tx_chans = prop[0]; + + prop = of_get_property(ofdev->node, "num-rx-chans", NULL); + if (prop == NULL) { + printk(KERN_ERR + "mal%d: can't find MAL num-rx-chans property!\n", + index); + err = -ENODEV; + goto fail; + } + mal->num_rx_chans = prop[0]; + + mal->dcr_base = dcr_resource_start(ofdev->node, 0); + if (mal->dcr_base == 0) { + printk(KERN_ERR + "mal%d: can't find DCR resource!\n", index); + err = -ENODEV; + goto fail; + } + mal->dcr_host = dcr_map(ofdev->node, mal->dcr_base, 0x100); + if (!DCR_MAP_OK(mal->dcr_host)) { + printk(KERN_ERR + "mal%d: failed to map DCRs !\n", index); + err = -ENODEV; + goto fail; + } + + mal->txeob_irq = irq_of_parse_and_map(ofdev->node, 0); + mal->rxeob_irq = irq_of_parse_and_map(ofdev->node, 1); + mal->serr_irq = irq_of_parse_and_map(ofdev->node, 2); + mal->txde_irq = irq_of_parse_and_map(ofdev->node, 3); + mal->rxde_irq = irq_of_parse_and_map(ofdev->node, 4); + if (mal->txeob_irq == NO_IRQ || mal->rxeob_irq == NO_IRQ || + mal->serr_irq == NO_IRQ || mal->txde_irq == NO_IRQ || + mal->rxde_irq == NO_IRQ) { + printk(KERN_ERR + "mal%d: failed to map interrupts !\n", index); + err = -ENODEV; + goto fail_unmap; + } + + INIT_LIST_HEAD(&mal->poll_list); + mal->napi.weight = CONFIG_IBM_NEW_EMAC_POLL_WEIGHT; + mal->napi.poll = mal_poll; + INIT_LIST_HEAD(&mal->list); + spin_lock_init(&mal->lock); + + /* Load power-on reset defaults */ + mal_reset(mal); + + /* Set the MAL configuration register */ + cfg = (mal->version == 2) ? MAL2_CFG_DEFAULT : MAL1_CFG_DEFAULT; + cfg |= MAL_CFG_PLBB | MAL_CFG_OPBBL | MAL_CFG_LEA; + + /* Current Axon is not happy with priority being non-0, it can + * deadlock, fix it up here + */ + if (of_device_is_compatible(ofdev->node, "ibm,mcmal-axon")) + cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10); + + /* Apply configuration */ + set_mal_dcrn(mal, MAL_CFG, cfg); + + /* Allocate space for BD rings */ + BUG_ON(mal->num_tx_chans <= 0 || mal->num_tx_chans > 32); + BUG_ON(mal->num_rx_chans <= 0 || mal->num_rx_chans > 32); + + bd_size = sizeof(struct mal_descriptor) * + (NUM_TX_BUFF * mal->num_tx_chans + + NUM_RX_BUFF * mal->num_rx_chans); + mal->bd_virt = + dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, + GFP_KERNEL); + if (mal->bd_virt == NULL) { + printk(KERN_ERR + "mal%d: out of memory allocating RX/TX descriptors!\n", + index); + err = -ENOMEM; + goto fail_unmap; + } + memset(mal->bd_virt, 0, bd_size); + + for (i = 0; i < mal->num_tx_chans; ++i) + set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma + + sizeof(struct mal_descriptor) * + mal_tx_bd_offset(mal, i)); + + for (i = 0; i < mal->num_rx_chans; ++i) + set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma + + sizeof(struct mal_descriptor) * + mal_rx_bd_offset(mal, i)); + + err = request_irq(mal->serr_irq, mal_serr, 0, "MAL SERR", mal); + if (err) + goto fail2; + err = request_irq(mal->txde_irq, mal_txde, 0, "MAL TX DE", mal); + if (err) + goto fail3; + err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal); + if (err) + goto fail4; + err = request_irq(mal->rxde_irq, mal_rxde, 0, "MAL RX DE", mal); + if (err) + goto fail5; + err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal); + if (err) + goto fail6; + + /* Enable all MAL SERR interrupt sources */ + if (mal->version == 2) + set_mal_dcrn(mal, MAL_IER, MAL2_IER_EVENTS); + else + set_mal_dcrn(mal, MAL_IER, MAL1_IER_EVENTS); + + /* Enable EOB interrupt */ + mal_enable_eob_irq(mal); + + printk(KERN_INFO + "MAL v%d %s, %d TX channels, %d RX channels\n", + mal->version, ofdev->node->full_name, + mal->num_tx_chans, mal->num_rx_chans); + + /* Advertise this instance to the rest of the world */ + wmb(); + dev_set_drvdata(&ofdev->dev, mal); + + mal_dbg_register(mal); + + return 0; + + fail6: + free_irq(mal->rxde_irq, mal); + fail5: + free_irq(mal->txeob_irq, mal); + fail4: + free_irq(mal->txde_irq, mal); + fail3: + free_irq(mal->serr_irq, mal); + fail2: + dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma); + fail_unmap: + dcr_unmap(mal->dcr_host, mal->dcr_base, 0x100); + fail: + kfree(mal); + + return err; +} + +static int __devexit mal_remove(struct of_device *ofdev) +{ + struct mal_instance *mal = dev_get_drvdata(&ofdev->dev); + + MAL_DBG(mal, "remove" NL); + + /* Synchronize with scheduled polling */ + napi_disable(&mal->napi); + + if (!list_empty(&mal->list)) { + /* This is *very* bad */ + printk(KERN_EMERG + "mal%d: commac list is not empty on remove!\n", + mal->index); + WARN_ON(1); + } + + dev_set_drvdata(&ofdev->dev, NULL); + + free_irq(mal->serr_irq, mal); + free_irq(mal->txde_irq, mal); + free_irq(mal->txeob_irq, mal); + free_irq(mal->rxde_irq, mal); + free_irq(mal->rxeob_irq, mal); + + mal_reset(mal); + + mal_dbg_unregister(mal); + + dma_free_coherent(&ofdev->dev, + sizeof(struct mal_descriptor) * + (NUM_TX_BUFF * mal->num_tx_chans + + NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt, + mal->bd_dma); + kfree(mal); + + return 0; +} + +static struct of_device_id mal_platform_match[] = +{ + { + .compatible = "ibm,mcmal", + }, + { + .compatible = "ibm,mcmal2", + }, + /* Backward compat */ + { + .type = "mcmal-dma", + .compatible = "ibm,mcmal", + }, + { + .type = "mcmal-dma", + .compatible = "ibm,mcmal2", + }, + {}, +}; + +static struct of_platform_driver mal_of_driver = { + .name = "mcmal", + .match_table = mal_platform_match, + + .probe = mal_probe, + .remove = mal_remove, +}; + +int __init mal_init(void) +{ + return of_register_platform_driver(&mal_of_driver); +} + +void mal_exit(void) +{ + of_unregister_platform_driver(&mal_of_driver); +} diff --git a/drivers/net/ibm_newemac/mal.h b/drivers/net/ibm_newemac/mal.h new file mode 100644 index 000000000000..cb1a16d589fe --- /dev/null +++ b/drivers/net/ibm_newemac/mal.h @@ -0,0 +1,276 @@ +/* + * drivers/net/ibm_newemac/mal.h + * + * Memory Access Layer (MAL) support + * + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> + * + * Based on original work by + * Armin Kuster <akuster@mvista.com> + * Copyright 2002 MontaVista Softare Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#ifndef __IBM_NEWEMAC_MAL_H +#define __IBM_NEWEMAC_MAL_H + +/* + * There are some variations on the MAL, we express them in this driver as + * MAL Version 1 and 2 though that doesn't match any IBM terminology. + * + * We call MAL 1 the version in 405GP, 405GPR, 405EP, 440EP, 440GR and + * NP405H. + * + * We call MAL 2 the version in 440GP, 440GX, 440SP, 440SPE and Axon + * + * The driver expects a "version" property in the emac node containing + * a number 1 or 2. New device-trees for EMAC capable platforms are thus + * required to include that when porting to arch/powerpc. + */ + +/* MALx DCR registers */ +#define MAL_CFG 0x00 +#define MAL_CFG_SR 0x80000000 +#define MAL_CFG_PLBB 0x00004000 +#define MAL_CFG_OPBBL 0x00000080 +#define MAL_CFG_EOPIE 0x00000004 +#define MAL_CFG_LEA 0x00000002 +#define MAL_CFG_SD 0x00000001 + +/* MAL V1 CFG bits */ +#define MAL1_CFG_PLBP_MASK 0x00c00000 +#define MAL1_CFG_PLBP_10 0x00800000 +#define MAL1_CFG_GA 0x00200000 +#define MAL1_CFG_OA 0x00100000 +#define MAL1_CFG_PLBLE 0x00080000 +#define MAL1_CFG_PLBT_MASK 0x00078000 +#define MAL1_CFG_DEFAULT (MAL1_CFG_PLBP_10 | MAL1_CFG_PLBT_MASK) + +/* MAL V2 CFG bits */ +#define MAL2_CFG_RPP_MASK 0x00c00000 +#define MAL2_CFG_RPP_10 0x00800000 +#define MAL2_CFG_RMBS_MASK 0x00300000 +#define MAL2_CFG_WPP_MASK 0x000c0000 +#define MAL2_CFG_WPP_10 0x00080000 +#define MAL2_CFG_WMBS_MASK 0x00030000 +#define MAL2_CFG_PLBLE 0x00008000 +#define MAL2_CFG_DEFAULT (MAL2_CFG_RMBS_MASK | MAL2_CFG_WMBS_MASK | \ + MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10) + +#define MAL_ESR 0x01 +#define MAL_ESR_EVB 0x80000000 +#define MAL_ESR_CIDT 0x40000000 +#define MAL_ESR_CID_MASK 0x3e000000 +#define MAL_ESR_CID_SHIFT 25 +#define MAL_ESR_DE 0x00100000 +#define MAL_ESR_OTE 0x00040000 +#define MAL_ESR_OSE 0x00020000 +#define MAL_ESR_PEIN 0x00010000 +#define MAL_ESR_DEI 0x00000010 +#define MAL_ESR_OTEI 0x00000004 +#define MAL_ESR_OSEI 0x00000002 +#define MAL_ESR_PBEI 0x00000001 + +/* MAL V1 ESR bits */ +#define MAL1_ESR_ONE 0x00080000 +#define MAL1_ESR_ONEI 0x00000008 + +/* MAL V2 ESR bits */ +#define MAL2_ESR_PTE 0x00800000 +#define MAL2_ESR_PRE 0x00400000 +#define MAL2_ESR_PWE 0x00200000 +#define MAL2_ESR_PTEI 0x00000080 +#define MAL2_ESR_PREI 0x00000040 +#define MAL2_ESR_PWEI 0x00000020 + + +#define MAL_IER 0x02 +#define MAL_IER_DE 0x00000010 +#define MAL_IER_OTE 0x00000004 +#define MAL_IER_OE 0x00000002 +#define MAL_IER_PE 0x00000001 +/* MAL V1 IER bits */ +#define MAL1_IER_NWE 0x00000008 +#define MAL1_IER_SOC_EVENTS MAL1_IER_NWE +#define MAL1_IER_EVENTS (MAL1_IER_SOC_EVENTS | MAL_IER_OTE | \ + MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE) + +/* MAL V2 IER bits */ +#define MAL2_IER_PT 0x00000080 +#define MAL2_IER_PRE 0x00000040 +#define MAL2_IER_PWE 0x00000020 +#define MAL2_IER_SOC_EVENTS (MAL2_IER_PT | MAL2_IER_PRE | MAL2_IER_PWE) +#define MAL2_IER_EVENTS (MAL2_IER_SOC_EVENTS | MAL_IER_OTE | \ + MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE) + + +#define MAL_TXCASR 0x04 +#define MAL_TXCARR 0x05 +#define MAL_TXEOBISR 0x06 +#define MAL_TXDEIR 0x07 +#define MAL_RXCASR 0x10 +#define MAL_RXCARR 0x11 +#define MAL_RXEOBISR 0x12 +#define MAL_RXDEIR 0x13 +#define MAL_TXCTPR(n) ((n) + 0x20) +#define MAL_RXCTPR(n) ((n) + 0x40) +#define MAL_RCBS(n) ((n) + 0x60) + +/* In reality MAL can handle TX buffers up to 4095 bytes long, + * but this isn't a good round number :) --ebs + */ +#define MAL_MAX_TX_SIZE 4080 +#define MAL_MAX_RX_SIZE 4080 + +static inline int mal_rx_size(int len) +{ + len = (len + 0xf) & ~0xf; + return len > MAL_MAX_RX_SIZE ? MAL_MAX_RX_SIZE : len; +} + +static inline int mal_tx_chunks(int len) +{ + return (len + MAL_MAX_TX_SIZE - 1) / MAL_MAX_TX_SIZE; +} + +#define MAL_CHAN_MASK(n) (0x80000000 >> (n)) + +/* MAL Buffer Descriptor structure */ +struct mal_descriptor { + u16 ctrl; /* MAL / Commac status control bits */ + u16 data_len; /* Max length is 4K-1 (12 bits) */ + u32 data_ptr; /* pointer to actual data buffer */ +}; + +/* the following defines are for the MadMAL status and control registers. */ +/* MADMAL transmit and receive status/control bits */ +#define MAL_RX_CTRL_EMPTY 0x8000 +#define MAL_RX_CTRL_WRAP 0x4000 +#define MAL_RX_CTRL_CM 0x2000 +#define MAL_RX_CTRL_LAST 0x1000 +#define MAL_RX_CTRL_FIRST 0x0800 +#define MAL_RX_CTRL_INTR 0x0400 +#define MAL_RX_CTRL_SINGLE (MAL_RX_CTRL_LAST | MAL_RX_CTRL_FIRST) +#define MAL_IS_SINGLE_RX(ctrl) (((ctrl) & MAL_RX_CTRL_SINGLE) == MAL_RX_CTRL_SINGLE) + +#define MAL_TX_CTRL_READY 0x8000 +#define MAL_TX_CTRL_WRAP 0x4000 +#define MAL_TX_CTRL_CM 0x2000 +#define MAL_TX_CTRL_LAST 0x1000 +#define MAL_TX_CTRL_INTR 0x0400 + +struct mal_commac_ops { + void (*poll_tx) (void *dev); + int (*poll_rx) (void *dev, int budget); + int (*peek_rx) (void *dev); + void (*rxde) (void *dev); +}; + +struct mal_commac { + struct mal_commac_ops *ops; + void *dev; + struct list_head poll_list; + long flags; +#define MAL_COMMAC_RX_STOPPED 0 +#define MAL_COMMAC_POLL_DISABLED 1 + u32 tx_chan_mask; + u32 rx_chan_mask; + struct list_head list; +}; + +struct mal_instance { + int version; + int dcr_base; + dcr_host_t dcr_host; + + int num_tx_chans; /* Number of TX channels */ + int num_rx_chans; /* Number of RX channels */ + int txeob_irq; /* TX End Of Buffer IRQ */ + int rxeob_irq; /* RX End Of Buffer IRQ */ + int txde_irq; /* TX Descriptor Error IRQ */ + int rxde_irq; /* RX Descriptor Error IRQ */ + int serr_irq; /* MAL System Error IRQ */ + + struct list_head poll_list; + struct napi_struct napi; + + struct list_head list; + u32 tx_chan_mask; + u32 rx_chan_mask; + + dma_addr_t bd_dma; + struct mal_descriptor *bd_virt; + + struct of_device *ofdev; + int index; + spinlock_t lock; +}; + +static inline u32 get_mal_dcrn(struct mal_instance *mal, int reg) +{ + return dcr_read(mal->dcr_host, mal->dcr_base + reg); +} + +static inline void set_mal_dcrn(struct mal_instance *mal, int reg, u32 val) +{ + dcr_write(mal->dcr_host, mal->dcr_base + reg, val); +} + +/* Register MAL devices */ +int mal_init(void); +void mal_exit(void); + +int mal_register_commac(struct mal_instance *mal, + struct mal_commac *commac); +void mal_unregister_commac(struct mal_instance *mal, + struct mal_commac *commac); +int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size); + +/* Returns BD ring offset for a particular channel + (in 'struct mal_descriptor' elements) +*/ +int mal_tx_bd_offset(struct mal_instance *mal, int channel); +int mal_rx_bd_offset(struct mal_instance *mal, int channel); + +void mal_enable_tx_channel(struct mal_instance *mal, int channel); +void mal_disable_tx_channel(struct mal_instance *mal, int channel); +void mal_enable_rx_channel(struct mal_instance *mal, int channel); +void mal_disable_rx_channel(struct mal_instance *mal, int channel); + +void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac); +void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac); + +/* Add/remove EMAC to/from MAL polling list */ +void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac); +void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac); + +/* Ethtool MAL registers */ +struct mal_regs { + u32 tx_count; + u32 rx_count; + + u32 cfg; + u32 esr; + u32 ier; + u32 tx_casr; + u32 tx_carr; + u32 tx_eobisr; + u32 tx_deir; + u32 rx_casr; + u32 rx_carr; + u32 rx_eobisr; + u32 rx_deir; + u32 tx_ctpr[32]; + u32 rx_ctpr[32]; + u32 rcbs[32]; +}; + +int mal_get_regs_len(struct mal_instance *mal); +void *mal_dump_regs(struct mal_instance *mal, void *buf); + +#endif /* __IBM_NEWEMAC_MAL_H */ diff --git a/drivers/net/ibm_newemac/phy.c b/drivers/net/ibm_newemac/phy.c new file mode 100644 index 000000000000..aa1f0ddf1e3e --- /dev/null +++ b/drivers/net/ibm_newemac/phy.c @@ -0,0 +1,373 @@ +/* + * drivers/net/ibm_newemac/phy.c + * + * Driver for PowerPC 4xx on-chip ethernet controller, PHY support. + * Borrowed from sungem_phy.c, though I only kept the generic MII + * driver for now. + * + * This file should be shared with other drivers or eventually + * merged as the "low level" part of miilib + * + * (c) 2003, Benjamin Herrenscmidt (benh@kernel.crashing.org) + * (c) 2004-2005, Eugene Surovegin <ebs@ebshome.net> + * + */ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/netdevice.h> +#include <linux/mii.h> +#include <linux/ethtool.h> +#include <linux/delay.h> + +#include "emac.h" +#include "phy.h" + +static inline int phy_read(struct mii_phy *phy, int reg) +{ + return phy->mdio_read(phy->dev, phy->address, reg); +} + +static inline void phy_write(struct mii_phy *phy, int reg, int val) +{ + phy->mdio_write(phy->dev, phy->address, reg, val); +} + +int emac_mii_reset_phy(struct mii_phy *phy) +{ + int val; + int limit = 10000; + + val = phy_read(phy, MII_BMCR); + val &= ~(BMCR_ISOLATE | BMCR_ANENABLE); + val |= BMCR_RESET; + phy_write(phy, MII_BMCR, val); + + udelay(300); + + while (limit--) { + val = phy_read(phy, MII_BMCR); + if (val >= 0 && (val & BMCR_RESET) == 0) + break; + udelay(10); + } + if ((val & BMCR_ISOLATE) && limit > 0) + phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE); + + return limit <= 0; +} + +static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise) +{ + int ctl, adv; + + phy->autoneg = AUTONEG_ENABLE; + phy->speed = SPEED_10; + phy->duplex = DUPLEX_HALF; + phy->pause = phy->asym_pause = 0; + phy->advertising = advertise; + + ctl = phy_read(phy, MII_BMCR); + if (ctl < 0) + return ctl; + ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE); + + /* First clear the PHY */ + phy_write(phy, MII_BMCR, ctl); + + /* Setup standard advertise */ + adv = phy_read(phy, MII_ADVERTISE); + if (adv < 0) + return adv; + adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | + ADVERTISE_PAUSE_ASYM); + if (advertise & ADVERTISED_10baseT_Half) + adv |= ADVERTISE_10HALF; + if (advertise & ADVERTISED_10baseT_Full) + adv |= ADVERTISE_10FULL; + if (advertise & ADVERTISED_100baseT_Half) + adv |= ADVERTISE_100HALF; + if (advertise & ADVERTISED_100baseT_Full) + adv |= ADVERTISE_100FULL; + if (advertise & ADVERTISED_Pause) + adv |= ADVERTISE_PAUSE_CAP; + if (advertise & ADVERTISED_Asym_Pause) + adv |= ADVERTISE_PAUSE_ASYM; + phy_write(phy, MII_ADVERTISE, adv); + + if (phy->features & + (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)) { + adv = phy_read(phy, MII_CTRL1000); + if (adv < 0) + return adv; + adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); + if (advertise & ADVERTISED_1000baseT_Full) + adv |= ADVERTISE_1000FULL; + if (advertise & ADVERTISED_1000baseT_Half) + adv |= ADVERTISE_1000HALF; + phy_write(phy, MII_CTRL1000, adv); + } + + /* Start/Restart aneg */ + ctl = phy_read(phy, MII_BMCR); + ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); + phy_write(phy, MII_BMCR, ctl); + + return 0; +} + +static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd) +{ + int ctl; + + phy->autoneg = AUTONEG_DISABLE; + phy->speed = speed; + phy->duplex = fd; + phy->pause = phy->asym_pause = 0; + + ctl = phy_read(phy, MII_BMCR); + if (ctl < 0) + return ctl; + ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE); + + /* First clear the PHY */ + phy_write(phy, MII_BMCR, ctl | BMCR_RESET); + + /* Select speed & duplex */ + switch (speed) { + case SPEED_10: + break; + case SPEED_100: + ctl |= BMCR_SPEED100; + break; + case SPEED_1000: + ctl |= BMCR_SPEED1000; + break; + default: + return -EINVAL; + } + if (fd == DUPLEX_FULL) + ctl |= BMCR_FULLDPLX; + phy_write(phy, MII_BMCR, ctl); + + return 0; +} + +static int genmii_poll_link(struct mii_phy *phy) +{ + int status; + + /* Clear latched value with dummy read */ + phy_read(phy, MII_BMSR); + status = phy_read(phy, MII_BMSR); + if (status < 0 || (status & BMSR_LSTATUS) == 0) + return 0; + if (phy->autoneg == AUTONEG_ENABLE && !(status & BMSR_ANEGCOMPLETE)) + return 0; + return 1; +} + +static int genmii_read_link(struct mii_phy *phy) +{ + if (phy->autoneg == AUTONEG_ENABLE) { + int glpa = 0; + int lpa = phy_read(phy, MII_LPA) & phy_read(phy, MII_ADVERTISE); + if (lpa < 0) + return lpa; + + if (phy->features & + (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)) { + int adv = phy_read(phy, MII_CTRL1000); + glpa = phy_read(phy, MII_STAT1000); + + if (glpa < 0 || adv < 0) + return adv; + + glpa &= adv << 2; + } + + phy->speed = SPEED_10; + phy->duplex = DUPLEX_HALF; + phy->pause = phy->asym_pause = 0; + + if (glpa & (LPA_1000FULL | LPA_1000HALF)) { + phy->speed = SPEED_1000; + if (glpa & LPA_1000FULL) + phy->duplex = DUPLEX_FULL; + } else if (lpa & (LPA_100FULL | LPA_100HALF)) { + phy->speed = SPEED_100; + if (lpa & LPA_100FULL) + phy->duplex = DUPLEX_FULL; + } else if (lpa & LPA_10FULL) + phy->duplex = DUPLEX_FULL; + + if (phy->duplex == DUPLEX_FULL) { + phy->pause = lpa & LPA_PAUSE_CAP ? 1 : 0; + phy->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0; + } + } else { + int bmcr = phy_read(phy, MII_BMCR); + if (bmcr < 0) + return bmcr; + + if (bmcr & BMCR_FULLDPLX) + phy->duplex = DUPLEX_FULL; + else + phy->duplex = DUPLEX_HALF; + if (bmcr & BMCR_SPEED1000) + phy->speed = SPEED_1000; + else if (bmcr & BMCR_SPEED100) + phy->speed = SPEED_100; + else + phy->speed = SPEED_10; + + phy->pause = phy->asym_pause = 0; + } + return 0; +} + +/* Generic implementation for most 10/100/1000 PHYs */ +static struct mii_phy_ops generic_phy_ops = { + .setup_aneg = genmii_setup_aneg, + .setup_forced = genmii_setup_forced, + .poll_link = genmii_poll_link, + .read_link = genmii_read_link +}; + +static struct mii_phy_def genmii_phy_def = { + .phy_id = 0x00000000, + .phy_id_mask = 0x00000000, + .name = "Generic MII", + .ops = &generic_phy_ops +}; + +/* CIS8201 */ +#define MII_CIS8201_10BTCSR 0x16 +#define TENBTCSR_ECHO_DISABLE 0x2000 +#define MII_CIS8201_EPCR 0x17 +#define EPCR_MODE_MASK 0x3000 +#define EPCR_GMII_MODE 0x0000 +#define EPCR_RGMII_MODE 0x1000 +#define EPCR_TBI_MODE 0x2000 +#define EPCR_RTBI_MODE 0x3000 +#define MII_CIS8201_ACSR 0x1c +#define ACSR_PIN_PRIO_SELECT 0x0004 + +static int cis8201_init(struct mii_phy *phy) +{ + int epcr; + + epcr = phy_read(phy, MII_CIS8201_EPCR); + if (epcr < 0) + return epcr; + + epcr &= ~EPCR_MODE_MASK; + + switch (phy->mode) { + case PHY_MODE_TBI: + epcr |= EPCR_TBI_MODE; + break; + case PHY_MODE_RTBI: + epcr |= EPCR_RTBI_MODE; + break; + case PHY_MODE_GMII: + epcr |= EPCR_GMII_MODE; + break; + case PHY_MODE_RGMII: + default: + epcr |= EPCR_RGMII_MODE; + } + + phy_write(phy, MII_CIS8201_EPCR, epcr); + + /* MII regs override strap pins */ + phy_write(phy, MII_CIS8201_ACSR, + phy_read(phy, MII_CIS8201_ACSR) | ACSR_PIN_PRIO_SELECT); + + /* Disable TX_EN -> CRS echo mode, otherwise 10/HDX doesn't work */ + phy_write(phy, MII_CIS8201_10BTCSR, + phy_read(phy, MII_CIS8201_10BTCSR) | TENBTCSR_ECHO_DISABLE); + + return 0; +} + +static struct mii_phy_ops cis8201_phy_ops = { + .init = cis8201_init, + .setup_aneg = genmii_setup_aneg, + .setup_forced = genmii_setup_forced, + .poll_link = genmii_poll_link, + .read_link = genmii_read_link +}; + +static struct mii_phy_def cis8201_phy_def = { + .phy_id = 0x000fc410, + .phy_id_mask = 0x000ffff0, + .name = "CIS8201 Gigabit Ethernet", + .ops = &cis8201_phy_ops +}; + +static struct mii_phy_def *mii_phy_table[] = { + &cis8201_phy_def, + &genmii_phy_def, + NULL +}; + +int emac_mii_phy_probe(struct mii_phy *phy, int address) +{ + struct mii_phy_def *def; + int i; + u32 id; + + phy->autoneg = AUTONEG_DISABLE; + phy->advertising = 0; + phy->address = address; + phy->speed = SPEED_10; + phy->duplex = DUPLEX_HALF; + phy->pause = phy->asym_pause = 0; + + /* Take PHY out of isolate mode and reset it. */ + if (emac_mii_reset_phy(phy)) + return -ENODEV; + + /* Read ID and find matching entry */ + id = (phy_read(phy, MII_PHYSID1) << 16) | phy_read(phy, MII_PHYSID2); + for (i = 0; (def = mii_phy_table[i]) != NULL; i++) + if ((id & def->phy_id_mask) == def->phy_id) + break; + /* Should never be NULL (we have a generic entry), but... */ + if (!def) + return -ENODEV; + + phy->def = def; + + /* Determine PHY features if needed */ + phy->features = def->features; + if (!phy->features) { + u16 bmsr = phy_read(phy, MII_BMSR); + if (bmsr & BMSR_ANEGCAPABLE) + phy->features |= SUPPORTED_Autoneg; + if (bmsr & BMSR_10HALF) + phy->features |= SUPPORTED_10baseT_Half; + if (bmsr & BMSR_10FULL) + phy->features |= SUPPORTED_10baseT_Full; + if (bmsr & BMSR_100HALF) + phy->features |= SUPPORTED_100baseT_Half; + if (bmsr & BMSR_100FULL) + phy->features |= SUPPORTED_100baseT_Full; + if (bmsr & BMSR_ESTATEN) { + u16 esr = phy_read(phy, MII_ESTATUS); + if (esr & ESTATUS_1000_TFULL) + phy->features |= SUPPORTED_1000baseT_Full; + if (esr & ESTATUS_1000_THALF) + phy->features |= SUPPORTED_1000baseT_Half; + } + phy->features |= SUPPORTED_MII; + } + + /* Setup default advertising */ + phy->advertising = phy->features; + + return 0; +} + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ibm_newemac/phy.h b/drivers/net/ibm_newemac/phy.h new file mode 100644 index 000000000000..6feca26afedb --- /dev/null +++ b/drivers/net/ibm_newemac/phy.h @@ -0,0 +1,80 @@ +/* + * drivers/net/ibm_newemac/phy.h + * + * Driver for PowerPC 4xx on-chip ethernet controller, PHY support + * + * Benjamin Herrenschmidt <benh@kernel.crashing.org> + * February 2003 + * + * Minor additions by Eugene Surovegin <ebs@ebshome.net>, 2004 + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This file basically duplicates sungem_phy.{c,h} with different PHYs + * supported. I'm looking into merging that in a single mii layer more + * flexible than mii.c + */ + +#ifndef __IBM_NEWEMAC_PHY_H +#define __IBM_NEWEMAC_PHY_H + +struct mii_phy; + +/* Operations supported by any kind of PHY */ +struct mii_phy_ops { + int (*init) (struct mii_phy * phy); + int (*suspend) (struct mii_phy * phy, int wol_options); + int (*setup_aneg) (struct mii_phy * phy, u32 advertise); + int (*setup_forced) (struct mii_phy * phy, int speed, int fd); + int (*poll_link) (struct mii_phy * phy); + int (*read_link) (struct mii_phy * phy); +}; + +/* Structure used to statically define an mii/gii based PHY */ +struct mii_phy_def { + u32 phy_id; /* Concatenated ID1 << 16 | ID2 */ + u32 phy_id_mask; /* Significant bits */ + u32 features; /* Ethtool SUPPORTED_* defines or + 0 for autodetect */ + int magic_aneg; /* Autoneg does all speed test for us */ + const char *name; + const struct mii_phy_ops *ops; +}; + +/* An instance of a PHY, partially borrowed from mii_if_info */ +struct mii_phy { + struct mii_phy_def *def; + u32 advertising; /* Ethtool ADVERTISED_* defines */ + u32 features; /* Copied from mii_phy_def.features + or determined automaticaly */ + int address; /* PHY address */ + int mode; /* PHY mode */ + + /* 1: autoneg enabled, 0: disabled */ + int autoneg; + + /* forced speed & duplex (no autoneg) + * partner speed & duplex & pause (autoneg) + */ + int speed; + int duplex; + int pause; + int asym_pause; + + /* Provided by host chip */ + struct net_device *dev; + int (*mdio_read) (struct net_device * dev, int addr, int reg); + void (*mdio_write) (struct net_device * dev, int addr, int reg, + int val); +}; + +/* Pass in a struct mii_phy with dev, mdio_read and mdio_write + * filled, the remaining fields will be filled on return + */ +int emac_mii_phy_probe(struct mii_phy *phy, int address); +int emac_mii_reset_phy(struct mii_phy *phy); + +#endif /* __IBM_NEWEMAC_PHY_H */ diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ibm_newemac/rgmii.c new file mode 100644 index 000000000000..bcd7fc639c40 --- /dev/null +++ b/drivers/net/ibm_newemac/rgmii.c @@ -0,0 +1,323 @@ +/* + * drivers/net/ibm_newemac/rgmii.c + * + * Driver for PowerPC 4xx on-chip ethernet controller, RGMII bridge support. + * + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> + * + * Based on original work by + * Matt Porter <mporter@kernel.crashing.org> + * Copyright 2004 MontaVista Software, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#include <linux/kernel.h> +#include <linux/ethtool.h> +#include <asm/io.h> + +#include "emac.h" +#include "debug.h" + +// XXX FIXME: Axon seems to support a subset of the RGMII, we +// thus need to take that into account and possibly change some +// of the bit settings below that don't seem to quite match the +// AXON spec + +/* RGMIIx_FER */ +#define RGMII_FER_MASK(idx) (0x7 << ((idx) * 4)) +#define RGMII_FER_RTBI(idx) (0x4 << ((idx) * 4)) +#define RGMII_FER_RGMII(idx) (0x5 << ((idx) * 4)) +#define RGMII_FER_TBI(idx) (0x6 << ((idx) * 4)) +#define RGMII_FER_GMII(idx) (0x7 << ((idx) * 4)) + +/* RGMIIx_SSR */ +#define RGMII_SSR_MASK(idx) (0x7 << ((idx) * 8)) +#define RGMII_SSR_100(idx) (0x2 << ((idx) * 8)) +#define RGMII_SSR_1000(idx) (0x4 << ((idx) * 8)) + +/* RGMII bridge supports only GMII/TBI and RGMII/RTBI PHYs */ +static inline int rgmii_valid_mode(int phy_mode) +{ + return phy_mode == PHY_MODE_GMII || + phy_mode == PHY_MODE_RGMII || + phy_mode == PHY_MODE_TBI || + phy_mode == PHY_MODE_RTBI; +} + +static inline const char *rgmii_mode_name(int mode) +{ + switch (mode) { + case PHY_MODE_RGMII: + return "RGMII"; + case PHY_MODE_TBI: + return "TBI"; + case PHY_MODE_GMII: + return "GMII"; + case PHY_MODE_RTBI: + return "RTBI"; + default: + BUG(); + } +} + +static inline u32 rgmii_mode_mask(int mode, int input) +{ + switch (mode) { + case PHY_MODE_RGMII: + return RGMII_FER_RGMII(input); + case PHY_MODE_TBI: + return RGMII_FER_TBI(input); + case PHY_MODE_GMII: + return RGMII_FER_GMII(input); + case PHY_MODE_RTBI: + return RGMII_FER_RTBI(input); + default: + BUG(); + } +} + +int __devinit rgmii_attach(struct of_device *ofdev, int input, int mode) +{ + struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct rgmii_regs *p = dev->base; + + RGMII_DBG(dev, "attach(%d)" NL, input); + + /* Check if we need to attach to a RGMII */ + if (input < 0 || !rgmii_valid_mode(mode)) { + printk(KERN_ERR "%s: unsupported settings !\n", + ofdev->node->full_name); + return -ENODEV; + } + + mutex_lock(&dev->lock); + + /* Enable this input */ + out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input)); + + printk(KERN_NOTICE "%s: input %d in %s mode\n", + ofdev->node->full_name, input, rgmii_mode_name(mode)); + + ++dev->users; + + mutex_unlock(&dev->lock); + + return 0; +} + +void rgmii_set_speed(struct of_device *ofdev, int input, int speed) +{ + struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct rgmii_regs *p = dev->base; + u32 ssr; + + mutex_lock(&dev->lock); + + ssr = in_be32(&p->ssr) & ~RGMII_SSR_MASK(input); + + RGMII_DBG(dev, "speed(%d, %d)" NL, input, speed); + + if (speed == SPEED_1000) + ssr |= RGMII_SSR_1000(input); + else if (speed == SPEED_100) + ssr |= RGMII_SSR_100(input); + + out_be32(&p->ssr, ssr); + + mutex_unlock(&dev->lock); +} + +void rgmii_get_mdio(struct of_device *ofdev, int input) +{ + struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct rgmii_regs *p = dev->base; + u32 fer; + + RGMII_DBG2(dev, "get_mdio(%d)" NL, input); + + if (dev->type != RGMII_AXON) + return; + + mutex_lock(&dev->lock); + + fer = in_be32(&p->fer); + fer |= 0x00080000u >> input; + out_be32(&p->fer, fer); + (void)in_be32(&p->fer); + + DBG2(dev, " fer = 0x%08x\n", fer); +} + +void rgmii_put_mdio(struct of_device *ofdev, int input) +{ + struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct rgmii_regs *p = dev->base; + u32 fer; + + RGMII_DBG2(dev, "put_mdio(%d)" NL, input); + + if (dev->type != RGMII_AXON) + return; + + fer = in_be32(&p->fer); + fer &= ~(0x00080000u >> input); + out_be32(&p->fer, fer); + (void)in_be32(&p->fer); + + DBG2(dev, " fer = 0x%08x\n", fer); + + mutex_unlock(&dev->lock); +} + +void __devexit rgmii_detach(struct of_device *ofdev, int input) +{ + struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct rgmii_regs *p = dev->base; + + mutex_lock(&dev->lock); + + BUG_ON(!dev || dev->users == 0); + + RGMII_DBG(dev, "detach(%d)" NL, input); + + /* Disable this input */ + out_be32(&p->fer, in_be32(&p->fer) & ~RGMII_FER_MASK(input)); + + --dev->users; + + mutex_unlock(&dev->lock); +} + +int rgmii_get_regs_len(struct of_device *ofdev) +{ + return sizeof(struct emac_ethtool_regs_subhdr) + + sizeof(struct rgmii_regs); +} + +void *rgmii_dump_regs(struct of_device *ofdev, void *buf) +{ + struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct emac_ethtool_regs_subhdr *hdr = buf; + struct rgmii_regs *regs = (struct rgmii_regs *)(hdr + 1); + + hdr->version = 0; + hdr->index = 0; /* for now, are there chips with more than one + * rgmii ? if yes, then we'll add a cell_index + * like we do for emac + */ + memcpy_fromio(regs, dev->base, sizeof(struct rgmii_regs)); + return regs + 1; +} + + +static int __devinit rgmii_probe(struct of_device *ofdev, + const struct of_device_id *match) +{ + struct device_node *np = ofdev->node; + struct rgmii_instance *dev; + struct resource regs; + int rc; + + rc = -ENOMEM; + dev = kzalloc(sizeof(struct rgmii_instance), GFP_KERNEL); + if (dev == NULL) { + printk(KERN_ERR "%s: could not allocate RGMII device!\n", + np->full_name); + goto err_gone; + } + + mutex_init(&dev->lock); + dev->ofdev = ofdev; + + rc = -ENXIO; + if (of_address_to_resource(np, 0, ®s)) { + printk(KERN_ERR "%s: Can't get registers address\n", + np->full_name); + goto err_free; + } + + rc = -ENOMEM; + dev->base = (struct rgmii_regs *)ioremap(regs.start, + sizeof(struct rgmii_regs)); + if (dev->base == NULL) { + printk(KERN_ERR "%s: Can't map device registers!\n", + np->full_name); + goto err_free; + } + + /* Check for RGMII type */ + if (device_is_compatible(ofdev->node, "ibm,rgmii-axon")) + dev->type = RGMII_AXON; + else + dev->type = RGMII_STANDARD; + + DBG2(dev, " Boot FER = 0x%08x, SSR = 0x%08x\n", + in_be32(&dev->base->fer), in_be32(&dev->base->ssr)); + + /* Disable all inputs by default */ + out_be32(&dev->base->fer, 0); + + printk(KERN_INFO + "RGMII %s %s initialized\n", + dev->type == RGMII_STANDARD ? "standard" : "axon", + ofdev->node->full_name); + + wmb(); + dev_set_drvdata(&ofdev->dev, dev); + + return 0; + + err_free: + kfree(dev); + err_gone: + return rc; +} + +static int __devexit rgmii_remove(struct of_device *ofdev) +{ + struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); + + dev_set_drvdata(&ofdev->dev, NULL); + + WARN_ON(dev->users != 0); + + iounmap(dev->base); + kfree(dev); + + return 0; +} + +static struct of_device_id rgmii_match[] = +{ + { + .type = "rgmii-interface", + .compatible = "ibm,rgmii", + }, + { + .type = "emac-rgmii", + }, + {}, +}; + +static struct of_platform_driver rgmii_driver = { + .name = "emac-rgmii", + .match_table = rgmii_match, + + .probe = rgmii_probe, + .remove = rgmii_remove, +}; + +int __init rgmii_init(void) +{ + return of_register_platform_driver(&rgmii_driver); +} + +void rgmii_exit(void) +{ + of_unregister_platform_driver(&rgmii_driver); +} diff --git a/drivers/net/ibm_newemac/rgmii.h b/drivers/net/ibm_newemac/rgmii.h new file mode 100644 index 000000000000..57806833121e --- /dev/null +++ b/drivers/net/ibm_newemac/rgmii.h @@ -0,0 +1,76 @@ +/* + * drivers/net/ibm_newemac/rgmii.h + * + * Driver for PowerPC 4xx on-chip ethernet controller, RGMII bridge support. + * + * Based on ocp_zmii.h/ibm_emac_zmii.h + * Armin Kuster akuster@mvista.com + * + * Copyright 2004 MontaVista Software, Inc. + * Matt Porter <mporter@kernel.crashing.org> + * + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __IBM_NEWEMAC_RGMII_H +#define __IBM_NEWEMAC_RGMII_H + +/* RGMII bridge type */ +#define RGMII_STANDARD 0 +#define RGMII_AXON 1 + +/* RGMII bridge */ +struct rgmii_regs { + u32 fer; /* Function enable register */ + u32 ssr; /* Speed select register */ +}; + +/* RGMII device */ +struct rgmii_instance { + struct rgmii_regs __iomem *base; + + /* Type of RGMII bridge */ + int type; + + /* Only one EMAC whacks us at a time */ + struct mutex lock; + + /* number of EMACs using this RGMII bridge */ + int users; + + /* OF device instance */ + struct of_device *ofdev; +}; + +#ifdef CONFIG_IBM_NEW_EMAC_RGMII + +extern int rgmii_init(void); +extern void rgmii_exit(void); +extern int rgmii_attach(struct of_device *ofdev, int input, int mode); +extern void rgmii_detach(struct of_device *ofdev, int input); +extern void rgmii_get_mdio(struct of_device *ofdev, int input); +extern void rgmii_put_mdio(struct of_device *ofdev, int input); +extern void rgmii_set_speed(struct of_device *ofdev, int input, int speed); +extern int rgmii_get_regs_len(struct of_device *ofdev); +extern void *rgmii_dump_regs(struct of_device *ofdev, void *buf); + +#else + +# define rgmii_init() 0 +# define rgmii_exit() do { } while(0) +# define rgmii_attach(x,y,z) (-ENXIO) +# define rgmii_detach(x,y) do { } while(0) +# define rgmii_get_mdio(o,i) do { } while (0) +# define rgmii_put_mdio(o,i) do { } while (0) +# define rgmii_set_speed(x,y,z) do { } while(0) +# define rgmii_get_regs_len(x) 0 +# define rgmii_dump_regs(x,buf) (buf) +#endif /* !CONFIG_IBM_NEW_EMAC_RGMII */ + +#endif /* __IBM_NEWEMAC_RGMII_H */ diff --git a/drivers/net/ibm_newemac/tah.c b/drivers/net/ibm_newemac/tah.c new file mode 100644 index 000000000000..e05c7e81efb6 --- /dev/null +++ b/drivers/net/ibm_newemac/tah.c @@ -0,0 +1,173 @@ +/* + * drivers/net/ibm_newemac/tah.c + * + * Driver for PowerPC 4xx on-chip ethernet controller, TAH support. + * + * Copyright 2004 MontaVista Software, Inc. + * Matt Porter <mporter@kernel.crashing.org> + * + * Copyright (c) 2005 Eugene Surovegin <ebs@ebshome.net> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include <asm/io.h> + +#include "emac.h" +#include "core.h" + +int __devinit tah_attach(struct of_device *ofdev, int channel) +{ + struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); + + mutex_lock(&dev->lock); + /* Reset has been done at probe() time... nothing else to do for now */ + ++dev->users; + mutex_unlock(&dev->lock); + + return 0; +} + +void __devexit tah_detach(struct of_device *ofdev, int channel) +{ + struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); + + mutex_lock(&dev->lock); + --dev->users; + mutex_unlock(&dev->lock); +} + +void tah_reset(struct of_device *ofdev) +{ + struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); + struct tah_regs *p = dev->base; + int n; + + /* Reset TAH */ + out_be32(&p->mr, TAH_MR_SR); + n = 100; + while ((in_be32(&p->mr) & TAH_MR_SR) && n) + --n; + + if (unlikely(!n)) + printk(KERN_ERR "%s: reset timeout\n", ofdev->node->full_name); + + /* 10KB TAH TX FIFO accomodates the max MTU of 9000 */ + out_be32(&p->mr, + TAH_MR_CVR | TAH_MR_ST_768 | TAH_MR_TFS_10KB | TAH_MR_DTFP | + TAH_MR_DIG); +} + +int tah_get_regs_len(struct of_device *ofdev) +{ + return sizeof(struct emac_ethtool_regs_subhdr) + + sizeof(struct tah_regs); +} + +void *tah_dump_regs(struct of_device *ofdev, void *buf) +{ + struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); + struct emac_ethtool_regs_subhdr *hdr = buf; + struct tah_regs *regs = (struct tah_regs *)(hdr + 1); + + hdr->version = 0; + hdr->index = 0; /* for now, are there chips with more than one + * zmii ? if yes, then we'll add a cell_index + * like we do for emac + */ + memcpy_fromio(regs, dev->base, sizeof(struct tah_regs)); + return regs + 1; +} + +static int __devinit tah_probe(struct of_device *ofdev, + const struct of_device_id *match) +{ + struct device_node *np = ofdev->node; + struct tah_instance *dev; + struct resource regs; + int rc; + + rc = -ENOMEM; + dev = kzalloc(sizeof(struct tah_instance), GFP_KERNEL); + if (dev == NULL) { + printk(KERN_ERR "%s: could not allocate TAH device!\n", + np->full_name); + goto err_gone; + } + + mutex_init(&dev->lock); + dev->ofdev = ofdev; + + rc = -ENXIO; + if (of_address_to_resource(np, 0, ®s)) { + printk(KERN_ERR "%s: Can't get registers address\n", + np->full_name); + goto err_free; + } + + rc = -ENOMEM; + dev->base = (struct tah_regs *)ioremap(regs.start, + sizeof(struct tah_regs)); + if (dev->base == NULL) { + printk(KERN_ERR "%s: Can't map device registers!\n", + np->full_name); + goto err_free; + } + + /* Initialize TAH and enable IPv4 checksum verification, no TSO yet */ + tah_reset(ofdev); + + printk(KERN_INFO + "TAH %s initialized\n", ofdev->node->full_name); + wmb(); + dev_set_drvdata(&ofdev->dev, dev); + + return 0; + + err_free: + kfree(dev); + err_gone: + return rc; +} + +static int __devexit tah_remove(struct of_device *ofdev) +{ + struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); + + dev_set_drvdata(&ofdev->dev, NULL); + + WARN_ON(dev->users != 0); + + iounmap(dev->base); + kfree(dev); + + return 0; +} + +static struct of_device_id tah_match[] = +{ + { + .type = "tah", + }, + {}, +}; + +static struct of_platform_driver tah_driver = { + .name = "emac-tah", + .match_table = tah_match, + + .probe = tah_probe, + .remove = tah_remove, +}; + +int __init tah_init(void) +{ + return of_register_platform_driver(&tah_driver); +} + +void tah_exit(void) +{ + of_unregister_platform_driver(&tah_driver); +} diff --git a/drivers/net/ibm_newemac/tah.h b/drivers/net/ibm_newemac/tah.h new file mode 100644 index 000000000000..bc41853b6e26 --- /dev/null +++ b/drivers/net/ibm_newemac/tah.h @@ -0,0 +1,90 @@ +/* + * drivers/net/ibm_newemac/tah.h + * + * Driver for PowerPC 4xx on-chip ethernet controller, TAH support. + * + * Copyright 2004 MontaVista Software, Inc. + * Matt Porter <mporter@kernel.crashing.org> + * + * Copyright (c) 2005 Eugene Surovegin <ebs@ebshome.net> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __IBM_NEWEMAC_TAH_H +#define __IBM_NEWEMAC_TAH_H + +/* TAH */ +struct tah_regs { + u32 revid; + u32 pad[3]; + u32 mr; + u32 ssr0; + u32 ssr1; + u32 ssr2; + u32 ssr3; + u32 ssr4; + u32 ssr5; + u32 tsr; +}; + + +/* TAH device */ +struct tah_instance { + struct tah_regs __iomem *base; + + /* Only one EMAC whacks us at a time */ + struct mutex lock; + + /* number of EMACs using this TAH */ + int users; + + /* OF device instance */ + struct of_device *ofdev; +}; + + +/* TAH engine */ +#define TAH_MR_CVR 0x80000000 +#define TAH_MR_SR 0x40000000 +#define TAH_MR_ST_256 0x01000000 +#define TAH_MR_ST_512 0x02000000 +#define TAH_MR_ST_768 0x03000000 +#define TAH_MR_ST_1024 0x04000000 +#define TAH_MR_ST_1280 0x05000000 +#define TAH_MR_ST_1536 0x06000000 +#define TAH_MR_TFS_16KB 0x00000000 +#define TAH_MR_TFS_2KB 0x00200000 +#define TAH_MR_TFS_4KB 0x00400000 +#define TAH_MR_TFS_6KB 0x00600000 +#define TAH_MR_TFS_8KB 0x00800000 +#define TAH_MR_TFS_10KB 0x00a00000 +#define TAH_MR_DTFP 0x00100000 +#define TAH_MR_DIG 0x00080000 + +#ifdef CONFIG_IBM_NEW_EMAC_TAH + +extern int tah_init(void); +extern void tah_exit(void); +extern int tah_attach(struct of_device *ofdev, int channel); +extern void tah_detach(struct of_device *ofdev, int channel); +extern void tah_reset(struct of_device *ofdev); +extern int tah_get_regs_len(struct of_device *ofdev); +extern void *tah_dump_regs(struct of_device *ofdev, void *buf); + +#else + +# define tah_init() 0 +# define tah_exit() do { } while(0) +# define tah_attach(x,y) (-ENXIO) +# define tah_detach(x,y) do { } while(0) +# define tah_reset(x) do { } while(0) +# define tah_get_regs_len(x) 0 +# define tah_dump_regs(x,buf) (buf) + +#endif /* !CONFIG_IBM_NEW_EMAC_TAH */ + +#endif /* __IBM_NEWEMAC_TAH_H */ diff --git a/drivers/net/ibm_newemac/zmii.c b/drivers/net/ibm_newemac/zmii.c new file mode 100644 index 000000000000..d06312901848 --- /dev/null +++ b/drivers/net/ibm_newemac/zmii.c @@ -0,0 +1,322 @@ +/* + * drivers/net/ibm_newemac/zmii.c + * + * Driver for PowerPC 4xx on-chip ethernet controller, ZMII bridge support. + * + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> + * + * Based on original work by + * Armin Kuster <akuster@mvista.com> + * Copyright 2001 MontaVista Softare Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#include <linux/kernel.h> +#include <linux/ethtool.h> +#include <asm/io.h> + +#include "emac.h" +#include "core.h" + +/* ZMIIx_FER */ +#define ZMII_FER_MDI(idx) (0x80000000 >> ((idx) * 4)) +#define ZMII_FER_MDI_ALL (ZMII_FER_MDI(0) | ZMII_FER_MDI(1) | \ + ZMII_FER_MDI(2) | ZMII_FER_MDI(3)) + +#define ZMII_FER_SMII(idx) (0x40000000 >> ((idx) * 4)) +#define ZMII_FER_RMII(idx) (0x20000000 >> ((idx) * 4)) +#define ZMII_FER_MII(idx) (0x10000000 >> ((idx) * 4)) + +/* ZMIIx_SSR */ +#define ZMII_SSR_SCI(idx) (0x40000000 >> ((idx) * 4)) +#define ZMII_SSR_FSS(idx) (0x20000000 >> ((idx) * 4)) +#define ZMII_SSR_SP(idx) (0x10000000 >> ((idx) * 4)) + +/* ZMII only supports MII, RMII and SMII + * we also support autodetection for backward compatibility + */ +static inline int zmii_valid_mode(int mode) +{ + return mode == PHY_MODE_MII || + mode == PHY_MODE_RMII || + mode == PHY_MODE_SMII || + mode == PHY_MODE_NA; +} + +static inline const char *zmii_mode_name(int mode) +{ + switch (mode) { + case PHY_MODE_MII: + return "MII"; + case PHY_MODE_RMII: + return "RMII"; + case PHY_MODE_SMII: + return "SMII"; + default: + BUG(); + } +} + +static inline u32 zmii_mode_mask(int mode, int input) +{ + switch (mode) { + case PHY_MODE_MII: + return ZMII_FER_MII(input); + case PHY_MODE_RMII: + return ZMII_FER_RMII(input); + case PHY_MODE_SMII: + return ZMII_FER_SMII(input); + default: + return 0; + } +} + +int __devinit zmii_attach(struct of_device *ofdev, int input, int *mode) +{ + struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct zmii_regs *p = dev->base; + + ZMII_DBG(dev, "init(%d, %d)" NL, input, *mode); + + if (!zmii_valid_mode(*mode)) + /* Probably an EMAC connected to RGMII, + * but it still may need ZMII for MDIO so + * we don't fail here. + */ + return 0; + + mutex_lock(&dev->lock); + + /* Autodetect ZMII mode if not specified. + * This is only for backward compatibility with the old driver. + * Please, always specify PHY mode in your board port to avoid + * any surprises. + */ + if (dev->mode == PHY_MODE_NA) { + if (*mode == PHY_MODE_NA) { + u32 r = dev->fer_save; + + ZMII_DBG(dev, "autodetecting mode, FER = 0x%08x" NL, r); + + if (r & (ZMII_FER_MII(0) | ZMII_FER_MII(1))) + dev->mode = PHY_MODE_MII; + else if (r & (ZMII_FER_RMII(0) | ZMII_FER_RMII(1))) + dev->mode = PHY_MODE_RMII; + else + dev->mode = PHY_MODE_SMII; + } else + dev->mode = *mode; + + printk(KERN_NOTICE "%s: bridge in %s mode\n", + ofdev->node->full_name, zmii_mode_name(dev->mode)); + } else { + /* All inputs must use the same mode */ + if (*mode != PHY_MODE_NA && *mode != dev->mode) { + printk(KERN_ERR + "%s: invalid mode %d specified for input %d\n", + ofdev->node->full_name, *mode, input); + mutex_unlock(&dev->lock); + return -EINVAL; + } + } + + /* Report back correct PHY mode, + * it may be used during PHY initialization. + */ + *mode = dev->mode; + + /* Enable this input */ + out_be32(&p->fer, in_be32(&p->fer) | zmii_mode_mask(dev->mode, input)); + ++dev->users; + + mutex_unlock(&dev->lock); + + return 0; +} + +void zmii_get_mdio(struct of_device *ofdev, int input) +{ + struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); + u32 fer; + + ZMII_DBG2(dev, "get_mdio(%d)" NL, input); + + mutex_lock(&dev->lock); + + fer = in_be32(&dev->base->fer) & ~ZMII_FER_MDI_ALL; + out_be32(&dev->base->fer, fer | ZMII_FER_MDI(input)); +} + +void zmii_put_mdio(struct of_device *ofdev, int input) +{ + struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); + + ZMII_DBG2(dev, "put_mdio(%d)" NL, input); + mutex_unlock(&dev->lock); +} + + +void zmii_set_speed(struct of_device *ofdev, int input, int speed) +{ + struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); + u32 ssr; + + mutex_lock(&dev->lock); + + ssr = in_be32(&dev->base->ssr); + + ZMII_DBG(dev, "speed(%d, %d)" NL, input, speed); + + if (speed == SPEED_100) + ssr |= ZMII_SSR_SP(input); + else + ssr &= ~ZMII_SSR_SP(input); + + out_be32(&dev->base->ssr, ssr); + + mutex_unlock(&dev->lock); +} + +void __devexit zmii_detach(struct of_device *ofdev, int input) +{ + struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); + + BUG_ON(!dev || dev->users == 0); + + mutex_lock(&dev->lock); + + ZMII_DBG(dev, "detach(%d)" NL, input); + + /* Disable this input */ + out_be32(&dev->base->fer, + in_be32(&dev->base->fer) & ~zmii_mode_mask(dev->mode, input)); + + --dev->users; + + mutex_unlock(&dev->lock); +} + +int zmii_get_regs_len(struct of_device *ofdev) +{ + return sizeof(struct emac_ethtool_regs_subhdr) + + sizeof(struct zmii_regs); +} + +void *zmii_dump_regs(struct of_device *ofdev, void *buf) +{ + struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct emac_ethtool_regs_subhdr *hdr = buf; + struct zmii_regs *regs = (struct zmii_regs *)(hdr + 1); + + hdr->version = 0; + hdr->index = 0; /* for now, are there chips with more than one + * zmii ? if yes, then we'll add a cell_index + * like we do for emac + */ + memcpy_fromio(regs, dev->base, sizeof(struct zmii_regs)); + return regs + 1; +} + +static int __devinit zmii_probe(struct of_device *ofdev, + const struct of_device_id *match) +{ + struct device_node *np = ofdev->node; + struct zmii_instance *dev; + struct resource regs; + int rc; + + rc = -ENOMEM; + dev = kzalloc(sizeof(struct zmii_instance), GFP_KERNEL); + if (dev == NULL) { + printk(KERN_ERR "%s: could not allocate ZMII device!\n", + np->full_name); + goto err_gone; + } + + mutex_init(&dev->lock); + dev->ofdev = ofdev; + dev->mode = PHY_MODE_NA; + + rc = -ENXIO; + if (of_address_to_resource(np, 0, ®s)) { + printk(KERN_ERR "%s: Can't get registers address\n", + np->full_name); + goto err_free; + } + + rc = -ENOMEM; + dev->base = (struct zmii_regs *)ioremap(regs.start, + sizeof(struct zmii_regs)); + if (dev->base == NULL) { + printk(KERN_ERR "%s: Can't map device registers!\n", + np->full_name); + goto err_free; + } + + /* We may need FER value for autodetection later */ + dev->fer_save = in_be32(&dev->base->fer); + + /* Disable all inputs by default */ + out_be32(&dev->base->fer, 0); + + printk(KERN_INFO + "ZMII %s initialized\n", ofdev->node->full_name); + wmb(); + dev_set_drvdata(&ofdev->dev, dev); + + return 0; + + err_free: + kfree(dev); + err_gone: + return rc; +} + +static int __devexit zmii_remove(struct of_device *ofdev) +{ + struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); + + dev_set_drvdata(&ofdev->dev, NULL); + + WARN_ON(dev->users != 0); + + iounmap(dev->base); + kfree(dev); + + return 0; +} + +static struct of_device_id zmii_match[] = +{ + { + .compatible = "ibm,zmii", + }, + /* For backward compat with old DT */ + { + .type = "emac-zmii", + }, + {}, +}; + +static struct of_platform_driver zmii_driver = { + .name = "emac-zmii", + .match_table = zmii_match, + + .probe = zmii_probe, + .remove = zmii_remove, +}; + +int __init zmii_init(void) +{ + return of_register_platform_driver(&zmii_driver); +} + +void zmii_exit(void) +{ + of_unregister_platform_driver(&zmii_driver); +} diff --git a/drivers/net/ibm_newemac/zmii.h b/drivers/net/ibm_newemac/zmii.h new file mode 100644 index 000000000000..82a9968b1f74 --- /dev/null +++ b/drivers/net/ibm_newemac/zmii.h @@ -0,0 +1,73 @@ +/* + * drivers/net/ibm_newemac/zmii.h + * + * Driver for PowerPC 4xx on-chip ethernet controller, ZMII bridge support. + * + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> + * + * Based on original work by + * Armin Kuster <akuster@mvista.com> + * Copyright 2001 MontaVista Softare Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#ifndef __IBM_NEWEMAC_ZMII_H +#define __IBM_NEWEMAC_ZMII_H + +/* ZMII bridge registers */ +struct zmii_regs { + u32 fer; /* Function enable reg */ + u32 ssr; /* Speed select reg */ + u32 smiirs; /* SMII status reg */ +}; + +/* ZMII device */ +struct zmii_instance { + struct zmii_regs __iomem *base; + + /* Only one EMAC whacks us at a time */ + struct mutex lock; + + /* subset of PHY_MODE_XXXX */ + int mode; + + /* number of EMACs using this ZMII bridge */ + int users; + + /* FER value left by firmware */ + u32 fer_save; + + /* OF device instance */ + struct of_device *ofdev; +}; + +#ifdef CONFIG_IBM_NEW_EMAC_ZMII + +extern int zmii_init(void); +extern void zmii_exit(void); +extern int zmii_attach(struct of_device *ofdev, int input, int *mode); +extern void zmii_detach(struct of_device *ofdev, int input); +extern void zmii_get_mdio(struct of_device *ofdev, int input); +extern void zmii_put_mdio(struct of_device *ofdev, int input); +extern void zmii_set_speed(struct of_device *ofdev, int input, int speed); +extern int zmii_get_regs_len(struct of_device *ocpdev); +extern void *zmii_dump_regs(struct of_device *ofdev, void *buf); + +#else +# define zmii_init() 0 +# define zmii_exit() do { } while(0) +# define zmii_attach(x,y,z) (-ENXIO) +# define zmii_detach(x,y) do { } while(0) +# define zmii_get_mdio(x,y) do { } while(0) +# define zmii_put_mdio(x,y) do { } while(0) +# define zmii_set_speed(x,y,z) do { } while(0) +# define zmii_get_regs_len(x) 0 +# define zmii_dump_regs(x,buf) (buf) +#endif /* !CONFIG_IBM_NEW_EMAC_ZMII */ + +#endif /* __IBM_NEWEMAC_ZMII_H */ diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c index fe85d6fcba33..91d83aca6bc7 100644 --- a/drivers/net/ibmlana.c +++ b/drivers/net/ibmlana.c @@ -591,7 +591,7 @@ static void irqrx_handler(struct net_device *dev) skb = dev_alloc_skb(rda.length + 2); if (skb == NULL) - priv->stat.rx_dropped++; + dev->stats.rx_dropped++; else { /* copy out data */ @@ -606,8 +606,8 @@ static void irqrx_handler(struct net_device *dev) /* bookkeeping */ dev->last_rx = jiffies; - priv->stat.rx_packets++; - priv->stat.rx_bytes += rda.length; + dev->stats.rx_packets++; + dev->stats.rx_bytes += rda.length; /* pass to the upper layers */ netif_rx(skb); @@ -617,11 +617,11 @@ static void irqrx_handler(struct net_device *dev) /* otherwise check error status bits and increase statistics */ else { - priv->stat.rx_errors++; + dev->stats.rx_errors++; if (rda.status & RCREG_FAER) - priv->stat.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (rda.status & RCREG_CRCR) - priv->stat.rx_crc_errors++; + dev->stats.rx_crc_errors++; } /* descriptor processed, will become new last descriptor in queue */ @@ -656,8 +656,8 @@ static void irqtx_handler(struct net_device *dev) memcpy_fromio(&tda, priv->base + priv->tdastart + (priv->currtxdescr * sizeof(tda_t)), sizeof(tda_t)); /* update statistics */ - priv->stat.tx_packets++; - priv->stat.tx_bytes += tda.length; + dev->stats.tx_packets++; + dev->stats.tx_bytes += tda.length; /* update our pointers */ priv->txused[priv->currtxdescr] = 0; @@ -680,15 +680,15 @@ static void irqtxerr_handler(struct net_device *dev) memcpy_fromio(&tda, priv->base + priv->tdastart + (priv->currtxdescr * sizeof(tda_t)), sizeof(tda_t)); /* update statistics */ - priv->stat.tx_errors++; + dev->stats.tx_errors++; if (tda.status & (TCREG_NCRS | TCREG_CRSL)) - priv->stat.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (tda.status & TCREG_EXC) - priv->stat.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; if (tda.status & TCREG_OWC) - priv->stat.tx_window_errors++; + dev->stats.tx_window_errors++; if (tda.status & TCREG_FU) - priv->stat.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; /* update our pointers */ priv->txused[priv->currtxdescr] = 0; @@ -824,7 +824,7 @@ static int ibmlana_tx(struct sk_buff *skb, struct net_device *dev) if (priv->txusedcnt >= TXBUFCNT) { retval = -EIO; - priv->stat.tx_dropped++; + dev->stats.tx_dropped++; goto tx_done; } @@ -876,14 +876,6 @@ tx_done: return retval; } -/* return pointer to Ethernet statistics */ - -static struct net_device_stats *ibmlana_stats(struct net_device *dev) -{ - ibmlana_priv *priv = netdev_priv(dev); - return &priv->stat; -} - /* switch receiver mode. */ static void ibmlana_set_multicast_list(struct net_device *dev) @@ -906,8 +898,7 @@ static int ibmlana_probe(struct net_device *dev) int base = 0, irq = 0, iobase = 0, memlen = 0; ibmlana_priv *priv; ibmlana_medium medium; - - SET_MODULE_OWNER(dev); + DECLARE_MAC_BUF(mac); /* can't work without an MCA bus ;-) */ if (MCA_bus == 0) @@ -980,7 +971,6 @@ static int ibmlana_probe(struct net_device *dev) dev->stop = ibmlana_close; dev->hard_start_xmit = ibmlana_tx; dev->do_ioctl = NULL; - dev->get_stats = ibmlana_stats; dev->set_multicast_list = ibmlana_set_multicast_list; dev->flags |= IFF_MULTICAST; @@ -992,11 +982,10 @@ static int ibmlana_probe(struct net_device *dev) /* print config */ printk(KERN_INFO "%s: IRQ %d, I/O %#lx, memory %#lx-%#lx, " - "MAC address %02x:%02x:%02x:%02x:%02x:%02x.\n", + "MAC address %s.\n", dev->name, priv->realirq, dev->base_addr, dev->mem_start, dev->mem_end - 1, - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + print_mac(mac, dev->dev_addr)); printk(KERN_INFO "%s: %s medium\n", dev->name, MediaNames[priv->medium]); /* reset board */ diff --git a/drivers/net/ibmlana.h b/drivers/net/ibmlana.h index 6b58bab9e308..aa3ddbdee4bb 100644 --- a/drivers/net/ibmlana.h +++ b/drivers/net/ibmlana.h @@ -26,7 +26,6 @@ typedef enum { typedef struct { unsigned int slot; /* MCA-Slot-# */ - struct net_device_stats stat; /* packet statistics */ int realirq; /* memorizes actual IRQ, even when currently not allocated */ ibmlana_medium medium; /* physical cannector */ diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index d96eb7229548..7d7758f3ad8c 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c @@ -28,7 +28,6 @@ /**************************************************************************/ /* TODO: - - remove frag processing code - no longer needed - add support for sysfs - possibly remove procfs support */ @@ -47,6 +46,9 @@ #include <linux/mm.h> #include <linux/ethtool.h> #include <linux/proc_fs.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <net/net_namespace.h> #include <asm/semaphore.h> #include <asm/hvcall.h> #include <asm/atomic.h> @@ -83,9 +85,8 @@ static int ibmveth_open(struct net_device *dev); static int ibmveth_close(struct net_device *dev); static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); -static int ibmveth_poll(struct net_device *dev, int *budget); +static int ibmveth_poll(struct napi_struct *napi, int budget); static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *dev); -static struct net_device_stats *ibmveth_get_stats(struct net_device *dev); static void ibmveth_set_multicast_list(struct net_device *dev); static int ibmveth_change_mtu(struct net_device *dev, int new_mtu); static void ibmveth_proc_register_driver(void); @@ -97,7 +98,7 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); static struct kobj_type ktype_veth_pool; #ifdef CONFIG_PROC_FS -#define IBMVETH_PROC_DIR "net/ibmveth" +#define IBMVETH_PROC_DIR "ibmveth" static struct proc_dir_entry *ibmveth_proc_dir; #endif @@ -110,20 +111,49 @@ MODULE_DESCRIPTION("IBM i/pSeries Virtual Ethernet Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(ibmveth_driver_version); +struct ibmveth_stat { + char name[ETH_GSTRING_LEN]; + int offset; +}; + +#define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat) +#define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off)) + +struct ibmveth_stat ibmveth_stats[] = { + { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) }, + { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) }, + { "replenish_add_buff_failure", IBMVETH_STAT_OFF(replenish_add_buff_failure) }, + { "replenish_add_buff_success", IBMVETH_STAT_OFF(replenish_add_buff_success) }, + { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) }, + { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) }, + { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) }, + { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) }, +}; + /* simple methods of getting data from the current rxq entry */ +static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter) +{ + return adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off; +} + +static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter) +{ + return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >> IBMVETH_RXQ_TOGGLE_SHIFT; +} + static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter) { - return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].toggle == adapter->rx_queue.toggle); + return (ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle); } static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter) { - return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].valid); + return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID); } static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter) { - return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].offset); + return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK); } static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) @@ -131,6 +161,11 @@ static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].length); } +static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter) +{ + return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD); +} + /* setup the initial settings for a buffer pool */ static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active) { @@ -228,9 +263,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc correlator = ((u64)pool->index << 32) | index; *(u64*)skb->data = correlator; - desc.desc = 0; - desc.fields.valid = 1; - desc.fields.length = pool->buff_size; + desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size; desc.fields.address = dma_addr; lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); @@ -371,9 +404,8 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) return; } - desc.desc = 0; - desc.fields.valid = 1; - desc.fields.length = adapter->rx_buff_pool[pool].buff_size; + desc.fields.flags_len = IBMVETH_BUF_VALID | + adapter->rx_buff_pool[pool].buff_size; desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index]; lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); @@ -480,6 +512,8 @@ static int ibmveth_open(struct net_device *netdev) ibmveth_debug_printk("open starting\n"); + napi_enable(&adapter->napi); + for(i = 0; i<IbmVethNumBufferPools; i++) rxq_entries += adapter->rx_buff_pool[i].size; @@ -489,6 +523,7 @@ static int ibmveth_open(struct net_device *netdev) if(!adapter->buffer_list_addr || !adapter->filter_list_addr) { ibmveth_error_printk("unable to allocate filter or buffer list pages\n"); ibmveth_cleanup(adapter); + napi_disable(&adapter->napi); return -ENOMEM; } @@ -498,6 +533,7 @@ static int ibmveth_open(struct net_device *netdev) if(!adapter->rx_queue.queue_addr) { ibmveth_error_printk("unable to allocate rx queue pages\n"); ibmveth_cleanup(adapter); + napi_disable(&adapter->napi); return -ENOMEM; } @@ -514,6 +550,7 @@ static int ibmveth_open(struct net_device *netdev) (dma_mapping_error(adapter->rx_queue.queue_dma))) { ibmveth_error_printk("unable to map filter or buffer list pages\n"); ibmveth_cleanup(adapter); + napi_disable(&adapter->napi); return -ENOMEM; } @@ -524,9 +561,7 @@ static int ibmveth_open(struct net_device *netdev) memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); mac_address = mac_address >> 16; - rxq_desc.desc = 0; - rxq_desc.fields.valid = 1; - rxq_desc.fields.length = adapter->rx_queue.queue_len; + rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | adapter->rx_queue.queue_len; rxq_desc.fields.address = adapter->rx_queue.queue_dma; ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr); @@ -545,6 +580,7 @@ static int ibmveth_open(struct net_device *netdev) rxq_desc.desc, mac_address); ibmveth_cleanup(adapter); + napi_disable(&adapter->napi); return -ENONET; } @@ -555,6 +591,7 @@ static int ibmveth_open(struct net_device *netdev) ibmveth_error_printk("unable to alloc pool\n"); adapter->rx_buff_pool[i].active = 0; ibmveth_cleanup(adapter); + napi_disable(&adapter->napi); return -ENOMEM ; } } @@ -567,6 +604,7 @@ static int ibmveth_open(struct net_device *netdev) } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); ibmveth_cleanup(adapter); + napi_disable(&adapter->napi); return rc; } @@ -587,6 +625,8 @@ static int ibmveth_close(struct net_device *netdev) ibmveth_debug_printk("close starting\n"); + napi_disable(&adapter->napi); + if (!adapter->pool_config) netif_stop_queue(netdev); @@ -634,12 +674,164 @@ static u32 netdev_get_link(struct net_device *dev) { return 1; } +static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data) +{ + struct ibmveth_adapter *adapter = dev->priv; + + if (data) + adapter->rx_csum = 1; + else { + /* + * Since the ibmveth firmware interface does not have the concept of + * separate tx/rx checksum offload enable, if rx checksum is disabled + * we also have to disable tx checksum offload. Once we disable rx + * checksum offload, we are no longer allowed to send tx buffers that + * are not properly checksummed. + */ + adapter->rx_csum = 0; + dev->features &= ~NETIF_F_IP_CSUM; + } +} + +static void ibmveth_set_tx_csum_flags(struct net_device *dev, u32 data) +{ + struct ibmveth_adapter *adapter = dev->priv; + + if (data) { + dev->features |= NETIF_F_IP_CSUM; + adapter->rx_csum = 1; + } else + dev->features &= ~NETIF_F_IP_CSUM; +} + +static int ibmveth_set_csum_offload(struct net_device *dev, u32 data, + void (*done) (struct net_device *, u32)) +{ + struct ibmveth_adapter *adapter = dev->priv; + u64 set_attr, clr_attr, ret_attr; + long ret; + int rc1 = 0, rc2 = 0; + int restart = 0; + + if (netif_running(dev)) { + restart = 1; + adapter->pool_config = 1; + ibmveth_close(dev); + adapter->pool_config = 0; + } + + set_attr = 0; + clr_attr = 0; + + if (data) + set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; + else + clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; + + ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); + + if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) && + !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) && + (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { + ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr, + set_attr, &ret_attr); + + if (ret != H_SUCCESS) { + rc1 = -EIO; + ibmveth_error_printk("unable to change checksum offload settings." + " %d rc=%ld\n", data, ret); + + ret = h_illan_attributes(adapter->vdev->unit_address, + set_attr, clr_attr, &ret_attr); + } else + done(dev, data); + } else { + rc1 = -EIO; + ibmveth_error_printk("unable to change checksum offload settings." + " %d rc=%ld ret_attr=%lx\n", data, ret, ret_attr); + } + + if (restart) + rc2 = ibmveth_open(dev); + + return rc1 ? rc1 : rc2; +} + +static int ibmveth_set_rx_csum(struct net_device *dev, u32 data) +{ + struct ibmveth_adapter *adapter = dev->priv; + + if ((data && adapter->rx_csum) || (!data && !adapter->rx_csum)) + return 0; + + return ibmveth_set_csum_offload(dev, data, ibmveth_set_rx_csum_flags); +} + +static int ibmveth_set_tx_csum(struct net_device *dev, u32 data) +{ + struct ibmveth_adapter *adapter = dev->priv; + int rc = 0; + + if (data && (dev->features & NETIF_F_IP_CSUM)) + return 0; + if (!data && !(dev->features & NETIF_F_IP_CSUM)) + return 0; + + if (data && !adapter->rx_csum) + rc = ibmveth_set_csum_offload(dev, data, ibmveth_set_tx_csum_flags); + else + ibmveth_set_tx_csum_flags(dev, data); + + return rc; +} + +static u32 ibmveth_get_rx_csum(struct net_device *dev) +{ + struct ibmveth_adapter *adapter = dev->priv; + return adapter->rx_csum; +} + +static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + int i; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN) + memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN); +} + +static int ibmveth_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(ibmveth_stats); + default: + return -EOPNOTSUPP; + } +} + +static void ibmveth_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + int i; + struct ibmveth_adapter *adapter = dev->priv; + + for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++) + data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset); +} + static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, .get_settings = netdev_get_settings, .get_link = netdev_get_link, - .get_sg = ethtool_op_get_sg, - .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = ibmveth_set_tx_csum, + .get_rx_csum = ibmveth_get_rx_csum, + .set_rx_csum = ibmveth_set_rx_csum, + .get_strings = ibmveth_get_strings, + .get_sset_count = ibmveth_get_sset_count, + .get_ethtool_stats = ibmveth_get_ethtool_stats, }; static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) @@ -652,9 +844,8 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ibmveth_adapter *adapter = netdev->priv; - union ibmveth_buf_desc desc[IbmVethMaxSendFrags]; + union ibmveth_buf_desc desc; unsigned long lpar_rc; - int nfrags = 0, curfrag; unsigned long correlator; unsigned long flags; unsigned int retry_count; @@ -664,83 +855,48 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) unsigned int tx_send_failed = 0; unsigned int tx_map_failed = 0; + desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; + desc.fields.address = dma_map_single(&adapter->vdev->dev, skb->data, + skb->len, DMA_TO_DEVICE); - if ((skb_shinfo(skb)->nr_frags + 1) > IbmVethMaxSendFrags) { + if (skb->ip_summed == CHECKSUM_PARTIAL && + ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { + ibmveth_error_printk("tx: failed to checksum packet\n"); tx_dropped++; goto out; } - memset(&desc, 0, sizeof(desc)); - - /* nfrags = number of frags after the initial fragment */ - nfrags = skb_shinfo(skb)->nr_frags; + if (skb->ip_summed == CHECKSUM_PARTIAL) { + unsigned char *buf = skb_transport_header(skb) + skb->csum_offset; - if(nfrags) - adapter->tx_multidesc_send++; + desc.fields.flags_len |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD); - /* map the initial fragment */ - desc[0].fields.length = nfrags ? skb->len - skb->data_len : skb->len; - desc[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data, - desc[0].fields.length, DMA_TO_DEVICE); - desc[0].fields.valid = 1; + /* Need to zero out the checksum */ + buf[0] = 0; + buf[1] = 0; + } - if(dma_mapping_error(desc[0].fields.address)) { - ibmveth_error_printk("tx: unable to map initial fragment\n"); + if (dma_mapping_error(desc.fields.address)) { + ibmveth_error_printk("tx: unable to map xmit buffer\n"); tx_map_failed++; tx_dropped++; goto out; } - curfrag = nfrags; - - /* map fragments past the initial portion if there are any */ - while(curfrag--) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[curfrag]; - desc[curfrag+1].fields.address - = dma_map_single(&adapter->vdev->dev, - page_address(frag->page) + frag->page_offset, - frag->size, DMA_TO_DEVICE); - desc[curfrag+1].fields.length = frag->size; - desc[curfrag+1].fields.valid = 1; - - if(dma_mapping_error(desc[curfrag+1].fields.address)) { - ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag); - tx_map_failed++; - tx_dropped++; - /* Free all the mappings we just created */ - while(curfrag < nfrags) { - dma_unmap_single(&adapter->vdev->dev, - desc[curfrag+1].fields.address, - desc[curfrag+1].fields.length, - DMA_TO_DEVICE); - curfrag++; - } - goto out; - } - } - /* send the frame. Arbitrarily set retrycount to 1024 */ correlator = 0; retry_count = 1024; do { lpar_rc = h_send_logical_lan(adapter->vdev->unit_address, - desc[0].desc, - desc[1].desc, - desc[2].desc, - desc[3].desc, - desc[4].desc, - desc[5].desc, - correlator, - &correlator); + desc.desc, 0, 0, 0, 0, 0, + correlator, &correlator); } while ((lpar_rc == H_BUSY) && (retry_count--)); if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) { - int i; ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc); - for(i = 0; i < 6; i++) { - ibmveth_error_printk("tx: desc[%i] valid=%d, len=%d, address=0x%d\n", i, - desc[i].fields.valid, desc[i].fields.length, desc[i].fields.address); - } + ibmveth_error_printk("tx: valid=%d, len=%d, address=0x%08x\n", + (desc.fields.flags_len & IBMVETH_BUF_VALID) ? 1 : 0, + skb->len, desc.fields.address); tx_send_failed++; tx_dropped++; } else { @@ -749,16 +905,13 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) netdev->trans_start = jiffies; } - do { - dma_unmap_single(&adapter->vdev->dev, - desc[nfrags].fields.address, - desc[nfrags].fields.length, DMA_TO_DEVICE); - } while(--nfrags >= 0); + dma_unmap_single(&adapter->vdev->dev, desc.fields.address, + skb->len, DMA_TO_DEVICE); out: spin_lock_irqsave(&adapter->stats_lock, flags); - adapter->stats.tx_dropped += tx_dropped; - adapter->stats.tx_bytes += tx_bytes; - adapter->stats.tx_packets += tx_packets; + netdev->stats.tx_dropped += tx_dropped; + netdev->stats.tx_bytes += tx_bytes; + netdev->stats.tx_packets += tx_packets; adapter->tx_send_failed += tx_send_failed; adapter->tx_map_failed += tx_map_failed; spin_unlock_irqrestore(&adapter->stats_lock, flags); @@ -767,80 +920,72 @@ out: spin_lock_irqsave(&adapter->stats_lock, flags); return 0; } -static int ibmveth_poll(struct net_device *netdev, int *budget) +static int ibmveth_poll(struct napi_struct *napi, int budget) { - struct ibmveth_adapter *adapter = netdev->priv; - int max_frames_to_process = netdev->quota; + struct ibmveth_adapter *adapter = container_of(napi, struct ibmveth_adapter, napi); + struct net_device *netdev = adapter->netdev; int frames_processed = 0; - int more_work = 1; unsigned long lpar_rc; restart_poll: do { - struct net_device *netdev = adapter->netdev; + struct sk_buff *skb; - if(ibmveth_rxq_pending_buffer(adapter)) { - struct sk_buff *skb; + if (!ibmveth_rxq_pending_buffer(adapter)) + break; - rmb(); + rmb(); + if (!ibmveth_rxq_buffer_valid(adapter)) { + wmb(); /* suggested by larson1 */ + adapter->rx_invalid_buffer++; + ibmveth_debug_printk("recycling invalid buffer\n"); + ibmveth_rxq_recycle_buffer(adapter); + } else { + int length = ibmveth_rxq_frame_length(adapter); + int offset = ibmveth_rxq_frame_offset(adapter); + int csum_good = ibmveth_rxq_csum_good(adapter); - if(!ibmveth_rxq_buffer_valid(adapter)) { - wmb(); /* suggested by larson1 */ - adapter->rx_invalid_buffer++; - ibmveth_debug_printk("recycling invalid buffer\n"); - ibmveth_rxq_recycle_buffer(adapter); - } else { - int length = ibmveth_rxq_frame_length(adapter); - int offset = ibmveth_rxq_frame_offset(adapter); - skb = ibmveth_rxq_get_buffer(adapter); + skb = ibmveth_rxq_get_buffer(adapter); + if (csum_good) + skb->ip_summed = CHECKSUM_UNNECESSARY; - ibmveth_rxq_harvest_buffer(adapter); + ibmveth_rxq_harvest_buffer(adapter); - skb_reserve(skb, offset); - skb_put(skb, length); - skb->protocol = eth_type_trans(skb, netdev); + skb_reserve(skb, offset); + skb_put(skb, length); + skb->protocol = eth_type_trans(skb, netdev); - netif_receive_skb(skb); /* send it up */ + netif_receive_skb(skb); /* send it up */ - adapter->stats.rx_packets++; - adapter->stats.rx_bytes += length; - frames_processed++; - netdev->last_rx = jiffies; - } - } else { - more_work = 0; + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += length; + frames_processed++; + netdev->last_rx = jiffies; } - } while(more_work && (frames_processed < max_frames_to_process)); + } while (frames_processed < budget); ibmveth_replenish_task(adapter); - if(more_work) { - /* more work to do - return that we are not done yet */ - netdev->quota -= frames_processed; - *budget -= frames_processed; - return 1; - } - - /* we think we are done - reenable interrupts, then check once more to make sure we are done */ - lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE); + if (frames_processed < budget) { + /* We think we are done - reenable interrupts, + * then check once more to make sure we are done. + */ + lpar_rc = h_vio_signal(adapter->vdev->unit_address, + VIO_IRQ_ENABLE); - ibmveth_assert(lpar_rc == H_SUCCESS); + ibmveth_assert(lpar_rc == H_SUCCESS); - netif_rx_complete(netdev); + netif_rx_complete(netdev, napi); - if(ibmveth_rxq_pending_buffer(adapter) && netif_rx_reschedule(netdev, frames_processed)) - { - lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); - ibmveth_assert(lpar_rc == H_SUCCESS); - more_work = 1; - goto restart_poll; + if (ibmveth_rxq_pending_buffer(adapter) && + netif_rx_reschedule(netdev, napi)) { + lpar_rc = h_vio_signal(adapter->vdev->unit_address, + VIO_IRQ_DISABLE); + goto restart_poll; + } } - netdev->quota -= frames_processed; - *budget -= frames_processed; - - /* we really are done */ - return 0; + return frames_processed; } static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance) @@ -849,20 +994,15 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance) struct ibmveth_adapter *adapter = netdev->priv; unsigned long lpar_rc; - if(netif_rx_schedule_prep(netdev)) { - lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); + if (netif_rx_schedule_prep(netdev, &adapter->napi)) { + lpar_rc = h_vio_signal(adapter->vdev->unit_address, + VIO_IRQ_DISABLE); ibmveth_assert(lpar_rc == H_SUCCESS); - __netif_rx_schedule(netdev); + __netif_rx_schedule(netdev, &adapter->napi); } return IRQ_HANDLED; } -static struct net_device_stats *ibmveth_get_stats(struct net_device *dev) -{ - struct ibmveth_adapter *adapter = dev->priv; - return &adapter->stats; -} - static void ibmveth_set_multicast_list(struct net_device *netdev) { struct ibmveth_adapter *adapter = netdev->priv; @@ -962,8 +1102,10 @@ static void ibmveth_poll_controller(struct net_device *dev) static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) { int rc, i; + long ret; struct net_device *netdev; - struct ibmveth_adapter *adapter = NULL; + struct ibmveth_adapter *adapter; + u64 set_attr, ret_attr; unsigned char *mac_addr_p; unsigned int *mcastFilterSize_p; @@ -994,10 +1136,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ if(!netdev) return -ENOMEM; - SET_MODULE_OWNER(netdev); - adapter = netdev->priv; - memset(adapter, 0, sizeof(adapter)); dev->dev.driver_data = netdev; adapter->vdev = dev; @@ -1005,6 +1144,8 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ adapter->mcastFilterSize= *mcastFilterSize_p; adapter->pool_config = 0; + netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); + /* Some older boxes running PHYP non-natively have an OF that returns a 8-byte local-mac-address field (and the first 2 bytes have to be ignored) while newer boxes' OF return @@ -1021,11 +1162,8 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ netdev->irq = dev->irq; netdev->open = ibmveth_open; - netdev->poll = ibmveth_poll; - netdev->weight = 16; netdev->stop = ibmveth_close; netdev->hard_start_xmit = ibmveth_start_xmit; - netdev->get_stats = ibmveth_get_stats; netdev->set_multicast_list = ibmveth_set_multicast_list; netdev->do_ioctl = ibmveth_ioctl; netdev->ethtool_ops = &netdev_ethtool_ops; @@ -1045,7 +1183,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ pool_count[i], pool_size[i], pool_active[i]); kobj->parent = &dev->dev.kobj; - sprintf(kobj->name, "pool%d", i); + kobject_set_name(kobj, "pool%d", i); kobj->ktype = &ktype_veth_pool; kobject_register(kobj); } @@ -1058,6 +1196,22 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ ibmveth_debug_printk("registering netdev...\n"); + ret = h_illan_attributes(dev->unit_address, 0, 0, &ret_attr); + + if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) && + !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) && + (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { + set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; + + ret = h_illan_attributes(dev->unit_address, 0, set_attr, &ret_attr); + + if (ret == H_SUCCESS) { + adapter->rx_csum = 1; + netdev->features |= NETIF_F_IP_CSUM; + } else + ret = h_illan_attributes(dev->unit_address, set_attr, 0, &ret_attr); + } + rc = register_netdev(netdev); if(rc) { @@ -1093,15 +1247,14 @@ static int __devexit ibmveth_remove(struct vio_dev *dev) #ifdef CONFIG_PROC_FS static void ibmveth_proc_register_driver(void) { - ibmveth_proc_dir = proc_mkdir(IBMVETH_PROC_DIR, NULL); + ibmveth_proc_dir = proc_mkdir(IBMVETH_PROC_DIR, init_net.proc_net); if (ibmveth_proc_dir) { - SET_MODULE_OWNER(ibmveth_proc_dir); } } static void ibmveth_proc_unregister_driver(void) { - remove_proc_entry(IBMVETH_PROC_DIR, NULL); + remove_proc_entry(IBMVETH_PROC_DIR, init_net.proc_net); } static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos) @@ -1128,22 +1281,16 @@ static int ibmveth_seq_show(struct seq_file *seq, void *v) struct ibmveth_adapter *adapter = seq->private; char *current_mac = ((char*) &adapter->netdev->dev_addr); char *firmware_mac = ((char*) &adapter->mac_addr) ; + DECLARE_MAC_BUF(mac); seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version); seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address); - seq_printf(seq, "Current MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", - current_mac[0], current_mac[1], current_mac[2], - current_mac[3], current_mac[4], current_mac[5]); - seq_printf(seq, "Firmware MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", - firmware_mac[0], firmware_mac[1], firmware_mac[2], - firmware_mac[3], firmware_mac[4], firmware_mac[5]); + seq_printf(seq, "Current MAC: %s\n", print_mac(mac, current_mac)); + seq_printf(seq, "Firmware MAC: %s\n", print_mac(mac, firmware_mac)); seq_printf(seq, "\nAdapter Statistics:\n"); - seq_printf(seq, " TX: skbuffs linearized: %ld\n", adapter->tx_linearized); - seq_printf(seq, " multi-descriptor sends: %ld\n", adapter->tx_multidesc_send); - seq_printf(seq, " skb_linearize failures: %ld\n", adapter->tx_linearize_failed); - seq_printf(seq, " vio_map_single failres: %ld\n", adapter->tx_map_failed); + seq_printf(seq, " TX: vio_map_single failres: %ld\n", adapter->tx_map_failed); seq_printf(seq, " send failures: %ld\n", adapter->tx_send_failed); seq_printf(seq, " RX: replenish task cycles: %ld\n", adapter->replenish_task_cycles); seq_printf(seq, " alloc_skb_failures: %ld\n", adapter->replenish_no_mem); @@ -1196,7 +1343,6 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter) } else { entry->data = (void *) adapter; entry->proc_fops = &ibmveth_proc_fops; - SET_MODULE_OWNER(entry); } } return; @@ -1280,24 +1426,28 @@ const char * buf, size_t count) int i; /* Make sure there is a buffer pool with buffers that can hold a packet of the size of the MTU */ - for(i = 0; i<IbmVethNumBufferPools; i++) { + for (i = 0; i < IbmVethNumBufferPools; i++) { if (pool == &adapter->rx_buff_pool[i]) continue; if (!adapter->rx_buff_pool[i].active) continue; - if (mtu < adapter->rx_buff_pool[i].buff_size) { - pool->active = 0; - h_free_logical_lan_buffer(adapter-> - vdev-> - unit_address, - pool-> - buff_size); - } + if (mtu <= adapter->rx_buff_pool[i].buff_size) + break; } - if (pool->active) { + + if (i == IbmVethNumBufferPools) { ibmveth_error_printk("no active pool >= MTU\n"); return -EPERM; } + + pool->active = 0; + if (netif_running(netdev)) { + adapter->pool_config = 1; + ibmveth_close(netdev); + adapter->pool_config = 0; + if ((rc = ibmveth_open(netdev))) + return rc; + } } } else if (attr == &veth_num_attr) { if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h index bb69ccae8ace..41f61cd18852 100644 --- a/drivers/net/ibmveth.h +++ b/drivers/net/ibmveth.h @@ -25,8 +25,6 @@ #ifndef _IBMVETH_H #define _IBMVETH_H -#define IbmVethMaxSendFrags 6 - /* constants for H_MULTICAST_CTRL */ #define IbmVethMcastReceptionModifyBit 0x80000UL #define IbmVethMcastReceptionEnableBit 0x20000UL @@ -41,6 +39,12 @@ #define IbmVethMcastRemoveFilter 0x2UL #define IbmVethMcastClearFilterTable 0x3UL +#define IBMVETH_ILLAN_PADDED_PKT_CSUM 0x0000000000002000ULL +#define IBMVETH_ILLAN_TRUNK_PRI_MASK 0x0000000000000F00ULL +#define IBMVETH_ILLAN_IPV6_TCP_CSUM 0x0000000000000004ULL +#define IBMVETH_ILLAN_IPV4_TCP_CSUM 0x0000000000000002ULL +#define IBMVETH_ILLAN_ACTIVE_TRUNK 0x0000000000000001ULL + /* hcall macros */ #define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \ plpar_hcall_norets(H_REGISTER_LOGICAL_LAN, ua, buflst, rxq, fltlst, mac) @@ -67,15 +71,27 @@ static inline long h_send_logical_lan(unsigned long unit_address, return rc; } +static inline long h_illan_attributes(unsigned long unit_address, + unsigned long reset_mask, unsigned long set_mask, + unsigned long *ret_attributes) +{ + long rc; + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + + rc = plpar_hcall(H_ILLAN_ATTRIBUTES, retbuf, unit_address, + reset_mask, set_mask); + + *ret_attributes = retbuf[0]; + + return rc; +} + #define h_multicast_ctrl(ua, cmd, mac) \ plpar_hcall_norets(H_MULTICAST_CTRL, ua, cmd, mac) #define h_change_logical_lan_mac(ua, mac) \ plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac) -#define h_free_logical_lan_buffer(ua, bufsize) \ - plpar_hcall_norets(H_FREE_LOGICAL_LAN_BUFFER, ua, bufsize) - #define IbmVethNumBufferPools 5 #define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */ #define IBMVETH_MAX_MTU 68 @@ -115,6 +131,7 @@ struct ibmveth_rx_q { struct ibmveth_adapter { struct vio_dev *vdev; struct net_device *netdev; + struct napi_struct napi; struct net_device_stats stats; unsigned int mcastFilterSize; unsigned long mac_addr; @@ -125,6 +142,7 @@ struct ibmveth_adapter { struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools]; struct ibmveth_rx_q rx_queue; int pool_config; + int rx_csum; /* adapter specific stats */ u64 replenish_task_cycles; @@ -133,20 +151,19 @@ struct ibmveth_adapter { u64 replenish_add_buff_success; u64 rx_invalid_buffer; u64 rx_no_buffer; - u64 tx_multidesc_send; - u64 tx_linearized; - u64 tx_linearize_failed; u64 tx_map_failed; u64 tx_send_failed; spinlock_t stats_lock; }; struct ibmveth_buf_desc_fields { - u32 valid : 1; - u32 toggle : 1; - u32 reserved : 6; - u32 length : 24; - u32 address; + u32 flags_len; +#define IBMVETH_BUF_VALID 0x80000000 +#define IBMVETH_BUF_TOGGLE 0x40000000 +#define IBMVETH_BUF_NO_CSUM 0x02000000 +#define IBMVETH_BUF_CSUM_GOOD 0x01000000 +#define IBMVETH_BUF_LEN_MASK 0x00FFFFFF + u32 address; }; union ibmveth_buf_desc { @@ -155,12 +172,16 @@ union ibmveth_buf_desc { }; struct ibmveth_rx_q_entry { - u16 toggle : 1; - u16 valid : 1; - u16 reserved : 14; - u16 offset; - u32 length; - u64 correlator; + u32 flags_off; +#define IBMVETH_RXQ_TOGGLE 0x80000000 +#define IBMVETH_RXQ_TOGGLE_SHIFT 31 +#define IBMVETH_RXQ_VALID 0x40000000 +#define IBMVETH_RXQ_NO_CSUM 0x02000000 +#define IBMVETH_RXQ_CSUM_GOOD 0x01000000 +#define IBMVETH_RXQ_OFF_MASK 0x0000FFFF + + u32 length; + u64 correlator; }; #endif /* _IBMVETH_H */ diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index f5c3598e59af..15949d3df17e 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -34,12 +34,12 @@ #include <linux/init.h> #include <linux/moduleparam.h> #include <net/pkt_sched.h> +#include <net/net_namespace.h> #define TX_TIMEOUT (2*HZ) #define TX_Q_LIMIT 32 struct ifb_private { - struct net_device_stats stats; struct tasklet_struct ifb_tasklet; int tasklet_pending; /* mostly debug stats leave in for now */ @@ -60,7 +60,6 @@ static int numifbs = 2; static void ri_tasklet(unsigned long dev); static int ifb_xmit(struct sk_buff *skb, struct net_device *dev); -static struct net_device_stats *ifb_get_stats(struct net_device *dev); static int ifb_open(struct net_device *dev); static int ifb_close(struct net_device *dev); @@ -69,7 +68,7 @@ static void ri_tasklet(unsigned long dev) struct net_device *_dev = (struct net_device *)dev; struct ifb_private *dp = netdev_priv(_dev); - struct net_device_stats *stats = &dp->stats; + struct net_device_stats *stats = &_dev->stats; struct sk_buff *skb; dp->st_task_enter++; @@ -97,7 +96,7 @@ static void ri_tasklet(unsigned long dev) stats->tx_packets++; stats->tx_bytes +=skb->len; - skb->dev = __dev_get_by_index(skb->iif); + skb->dev = __dev_get_by_index(&init_net, skb->iif); if (!skb->dev) { dev_kfree_skb(skb); stats->tx_dropped++; @@ -139,7 +138,6 @@ resched: static void ifb_setup(struct net_device *dev) { /* Initialize the device structure. */ - dev->get_stats = ifb_get_stats; dev->hard_start_xmit = ifb_xmit; dev->open = &ifb_open; dev->stop = &ifb_close; @@ -151,14 +149,13 @@ static void ifb_setup(struct net_device *dev) dev->change_mtu = NULL; dev->flags |= IFF_NOARP; dev->flags &= ~IFF_MULTICAST; - SET_MODULE_OWNER(dev); random_ether_addr(dev->dev_addr); } static int ifb_xmit(struct sk_buff *skb, struct net_device *dev) { struct ifb_private *dp = netdev_priv(dev); - struct net_device_stats *stats = &dp->stats; + struct net_device_stats *stats = &dev->stats; int ret = 0; u32 from = G_TC_FROM(skb->tc_verd); @@ -185,19 +182,6 @@ static int ifb_xmit(struct sk_buff *skb, struct net_device *dev) return ret; } -static struct net_device_stats *ifb_get_stats(struct net_device *dev) -{ - struct ifb_private *dp = netdev_priv(dev); - struct net_device_stats *stats = &dp->stats; - - pr_debug("tasklets stats %ld:%ld:%ld:%ld:%ld:%ld:%ld:%ld:%ld \n", - dp->st_task_enter, dp->st_txq_refl_try, dp->st_rxq_enter, - dp->st_rx2tx_tran, dp->st_rxq_notenter, dp->st_rx_frm_egr, - dp->st_rx_frm_ing, dp->st_rxq_check, dp->st_rxq_rsch); - - return stats; -} - static int ifb_close(struct net_device *dev) { struct ifb_private *dp = netdev_priv(dev); diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c index 3ca1e8ece548..373f72cdbe8e 100644 --- a/drivers/net/ioc3-eth.c +++ b/drivers/net/ioc3-eth.c @@ -5,7 +5,7 @@ * * Driver for SGI's IOC3 based Ethernet cards as found in the PCI card. * - * Copyright (C) 1999, 2000, 2001, 2003 Ralf Baechle + * Copyright (C) 1999, 2000, 01, 03, 06 Ralf Baechle * Copyright (C) 1995, 1999, 2000, 2001 by Silicon Graphics, Inc. * * References: @@ -48,6 +48,7 @@ #ifdef CONFIG_SERIAL_8250 #include <linux/serial_core.h> #include <linux/serial_8250.h> +#include <linux/serial_reg.h> #endif #include <linux/netdevice.h> @@ -61,12 +62,7 @@ #include <asm/pgtable.h> #include <asm/uaccess.h> #include <asm/sn/types.h> -#include <asm/sn/sn0/addrs.h> -#include <asm/sn/sn0/hubni.h> -#include <asm/sn/sn0/hubio.h> -#include <asm/sn/klconfig.h> #include <asm/sn/ioc3.h> -#include <asm/sn/sn0/ip27.h> #include <asm/pci/bridge.h> /* @@ -94,6 +90,9 @@ struct ioc3_private { u32 emcr, ehar_h, ehar_l; spinlock_t ioc3_lock; struct mii_if_info mii; + unsigned long flags; +#define IOC3_FLAG_RX_CHECKSUMS 1 + struct pci_dev *pdev; /* Members used by autonegotiation */ @@ -444,18 +443,12 @@ static void ioc3_get_eaddr_nic(struct ioc3_private *ip) */ static void ioc3_get_eaddr(struct ioc3_private *ip) { - int i; - + DECLARE_MAC_BUF(mac); ioc3_get_eaddr_nic(ip); - printk("Ethernet address is "); - for (i = 0; i < 6; i++) { - printk("%02x", priv_netdev(ip)->dev_addr[i]); - if (i < 5) - printk(":"); - } - printk(".\n"); + printk("Ethernet address is %s.\n", + print_mac(mac, priv_netdev(ip)->dev_addr)); } static void __ioc3_set_mac_address(struct net_device *dev) @@ -520,8 +513,6 @@ static struct net_device_stats *ioc3_get_stats(struct net_device *dev) return &ip->stats; } -#ifdef CONFIG_SGI_IOC3_ETH_HW_RX_CSUM - static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len) { struct ethhdr *eh = eth_hdr(skb); @@ -589,7 +580,6 @@ static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len) if (csum == 0xffff) skb->ip_summed = CHECKSUM_UNNECESSARY; } -#endif /* CONFIG_SGI_IOC3_ETH_HW_RX_CSUM */ static inline void ioc3_rx(struct ioc3_private *ip) { @@ -624,9 +614,9 @@ static inline void ioc3_rx(struct ioc3_private *ip) goto next; } -#ifdef CONFIG_SGI_IOC3_ETH_HW_RX_CSUM - ioc3_tcpudp_checksum(skb, w0 & ERXBUF_IPCKSUM_MASK,len); -#endif + if (likely(ip->flags & IOC3_FLAG_RX_CHECKSUMS)) + ioc3_tcpudp_checksum(skb, + w0 & ERXBUF_IPCKSUM_MASK, len); netif_rx(skb); @@ -1151,13 +1141,41 @@ static int ioc3_is_menet(struct pci_dev *pdev) * Also look in ip27-pci.c:pci_fixup_ioc3() for some comments on working * around ioc3 oddities in this respect. * - * The IOC3 serials use a 22MHz clock rate with an additional divider by 3. + * The IOC3 serials use a 22MHz clock rate with an additional divider which + * can be programmed in the SCR register if the DLAB bit is set. + * + * Register to interrupt zero because we share the interrupt with + * the serial driver which we don't properly support yet. + * + * Can't use UPF_IOREMAP as the whole of IOC3 resources have already been + * registered. */ +static void __devinit ioc3_8250_register(struct ioc3_uartregs __iomem *uart) +{ +#define COSMISC_CONSTANT 6 + + struct uart_port port = { + .irq = 0, + .flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF, + .iotype = UPIO_MEM, + .regshift = 0, + .uartclk = (22000000 << 1) / COSMISC_CONSTANT, + + .membase = (unsigned char __iomem *) uart, + .mapbase = (unsigned long) uart, + }; + unsigned char lcr; + + lcr = uart->iu_lcr; + uart->iu_lcr = lcr | UART_LCR_DLAB; + uart->iu_scr = COSMISC_CONSTANT, + uart->iu_lcr = lcr; + uart->iu_lcr; + serial8250_register_port(&port); +} static void __devinit ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3) { - struct uart_port port; - /* * We need to recognice and treat the fourth MENET serial as it * does not have an SuperIO chip attached to it, therefore attempting @@ -1171,24 +1189,35 @@ static void __devinit ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3) return; /* - * Register to interrupt zero because we share the interrupt with - * the serial driver which we don't properly support yet. - * - * Can't use UPF_IOREMAP as the whole of IOC3 resources have already - * been registered. + * Switch IOC3 to PIO mode. It probably already was but let's be + * paranoid */ - memset(&port, 0, sizeof(port)); - port.irq = 0; - port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF; - port.iotype = UPIO_MEM; - port.regshift = 0; - port.uartclk = 22000000 / 3; - - port.membase = (unsigned char *) &ioc3->sregs.uarta; - serial8250_register_port(&port); - - port.membase = (unsigned char *) &ioc3->sregs.uartb; - serial8250_register_port(&port); + ioc3->gpcr_s = GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL; + ioc3->gpcr_s; + ioc3->gppr_6 = 0; + ioc3->gppr_6; + ioc3->gppr_7 = 0; + ioc3->gppr_7; + ioc3->sscr_a = ioc3->sscr_a & ~SSCR_DMA_EN; + ioc3->sscr_a; + ioc3->sscr_b = ioc3->sscr_b & ~SSCR_DMA_EN; + ioc3->sscr_b; + /* Disable all SA/B interrupts except for SA/B_INT in SIO_IEC. */ + ioc3->sio_iec &= ~ (SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL | + SIO_IR_SA_RX_HIGH | SIO_IR_SA_RX_TIMER | + SIO_IR_SA_DELTA_DCD | SIO_IR_SA_DELTA_CTS | + SIO_IR_SA_TX_EXPLICIT | SIO_IR_SA_MEMERR); + ioc3->sio_iec |= SIO_IR_SA_INT; + ioc3->sscr_a = 0; + ioc3->sio_iec &= ~ (SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL | + SIO_IR_SB_RX_HIGH | SIO_IR_SB_RX_TIMER | + SIO_IR_SB_DELTA_DCD | SIO_IR_SB_DELTA_CTS | + SIO_IR_SB_TX_EXPLICIT | SIO_IR_SB_MEMERR); + ioc3->sio_iec |= SIO_IR_SB_INT; + ioc3->sscr_b = 0; + + ioc3_8250_register(&ioc3->sregs.uarta); + ioc3_8250_register(&ioc3->sregs.uartb); } #endif @@ -1238,7 +1267,6 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto out_free; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); ip = netdev_priv(dev); @@ -1298,9 +1326,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev->set_multicast_list = ioc3_set_multicast_list; dev->set_mac_address = ioc3_set_mac_address; dev->ethtool_ops = &ioc3_ethtool_ops; -#ifdef CONFIG_SGI_IOC3_ETH_HW_TX_CSUM dev->features = NETIF_F_IP_CSUM; -#endif sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1); sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2); @@ -1390,7 +1416,6 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) uint32_t w0 = 0; int produce; -#ifdef CONFIG_SGI_IOC3_ETH_HW_TX_CSUM /* * IOC3 has a fairly simple minded checksumming hardware which simply * adds up the 1's complement checksum for the entire packet and @@ -1438,7 +1463,6 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT); } -#endif /* CONFIG_SGI_IOC3_ETH_HW_TX_CSUM */ spin_lock_irq(&ip->ioc3_lock); @@ -1593,12 +1617,37 @@ static u32 ioc3_get_link(struct net_device *dev) return rc; } +static u32 ioc3_get_rx_csum(struct net_device *dev) +{ + struct ioc3_private *ip = netdev_priv(dev); + + return ip->flags & IOC3_FLAG_RX_CHECKSUMS; +} + +static int ioc3_set_rx_csum(struct net_device *dev, u32 data) +{ + struct ioc3_private *ip = netdev_priv(dev); + + spin_lock_bh(&ip->ioc3_lock); + if (data) + ip->flags |= IOC3_FLAG_RX_CHECKSUMS; + else + ip->flags &= ~IOC3_FLAG_RX_CHECKSUMS; + spin_unlock_bh(&ip->ioc3_lock); + + return 0; +} + static const struct ethtool_ops ioc3_ethtool_ops = { .get_drvinfo = ioc3_get_drvinfo, .get_settings = ioc3_get_settings, .set_settings = ioc3_set_settings, .nway_reset = ioc3_nway_reset, .get_link = ioc3_get_link, + .get_rx_csum = ioc3_get_rx_csum, + .set_rx_csum = ioc3_set_rx_csum, + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = ethtool_op_set_tx_csum }; static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c new file mode 100644 index 000000000000..59898ce54dcf --- /dev/null +++ b/drivers/net/ipg.c @@ -0,0 +1,2332 @@ +/* + * ipg.c: Device Driver for the IP1000 Gigabit Ethernet Adapter + * + * Copyright (C) 2003, 2007 IC Plus Corp + * + * Original Author: + * + * Craig Rich + * Sundance Technology, Inc. + * www.sundanceti.com + * craig_rich@sundanceti.com + * + * Current Maintainer: + * + * Sorbica Shieh. + * http://www.icplus.com.tw + * sorbica@icplus.com.tw + * + * Jesse Huang + * http://www.icplus.com.tw + * jesse@icplus.com.tw + */ +#include <linux/crc32.h> +#include <linux/ethtool.h> +#include <linux/mii.h> +#include <linux/mutex.h> + +#include <asm/div64.h> + +#define IPG_RX_RING_BYTES (sizeof(struct ipg_rx) * IPG_RFDLIST_LENGTH) +#define IPG_TX_RING_BYTES (sizeof(struct ipg_tx) * IPG_TFDLIST_LENGTH) +#define IPG_RESET_MASK \ + (IPG_AC_GLOBAL_RESET | IPG_AC_RX_RESET | IPG_AC_TX_RESET | \ + IPG_AC_DMA | IPG_AC_FIFO | IPG_AC_NETWORK | IPG_AC_HOST | \ + IPG_AC_AUTO_INIT) + +#define ipg_w32(val32,reg) iowrite32((val32), ioaddr + (reg)) +#define ipg_w16(val16,reg) iowrite16((val16), ioaddr + (reg)) +#define ipg_w8(val8,reg) iowrite8((val8), ioaddr + (reg)) + +#define ipg_r32(reg) ioread32(ioaddr + (reg)) +#define ipg_r16(reg) ioread16(ioaddr + (reg)) +#define ipg_r8(reg) ioread8(ioaddr + (reg)) + +#define JUMBO_FRAME_4k_ONLY +enum { + netdev_io_size = 128 +}; + +#include "ipg.h" +#define DRV_NAME "ipg" + +MODULE_AUTHOR("IC Plus Corp. 2003"); +MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver " + DrvVer); +MODULE_LICENSE("GPL"); + +static const char *ipg_brand_name[] = { + "IC PLUS IP1000 1000/100/10 based NIC", + "Sundance Technology ST2021 based NIC", + "Tamarack Microelectronics TC9020/9021 based NIC", + "Tamarack Microelectronics TC9020/9021 based NIC", + "D-Link NIC", + "D-Link NIC IP1000A" +}; + +static struct pci_device_id ipg_pci_tbl[] __devinitdata = { + { PCI_VDEVICE(SUNDANCE, 0x1023), 0 }, + { PCI_VDEVICE(SUNDANCE, 0x2021), 1 }, + { PCI_VDEVICE(SUNDANCE, 0x1021), 2 }, + { PCI_VDEVICE(DLINK, 0x9021), 3 }, + { PCI_VDEVICE(DLINK, 0x4000), 4 }, + { PCI_VDEVICE(DLINK, 0x4020), 5 }, + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, ipg_pci_tbl); + +static inline void __iomem *ipg_ioaddr(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + return sp->ioaddr; +} + +#ifdef IPG_DEBUG +static void ipg_dump_rfdlist(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int i; + u32 offset; + + IPG_DEBUG_MSG("_dump_rfdlist\n"); + + printk(KERN_INFO "rx_current = %2.2x\n", sp->rx_current); + printk(KERN_INFO "rx_dirty = %2.2x\n", sp->rx_dirty); + printk(KERN_INFO "RFDList start address = %16.16lx\n", + (unsigned long) sp->rxd_map); + printk(KERN_INFO "RFDListPtr register = %8.8x%8.8x\n", + ipg_r32(IPG_RFDLISTPTR1), ipg_r32(IPG_RFDLISTPTR0)); + + for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { + offset = (u32) &sp->rxd[i].next_desc - (u32) sp->rxd; + printk(KERN_INFO "%2.2x %4.4x RFDNextPtr = %16.16lx\n", i, + offset, (unsigned long) sp->rxd[i].next_desc); + offset = (u32) &sp->rxd[i].rfs - (u32) sp->rxd; + printk(KERN_INFO "%2.2x %4.4x RFS = %16.16lx\n", i, + offset, (unsigned long) sp->rxd[i].rfs); + offset = (u32) &sp->rxd[i].frag_info - (u32) sp->rxd; + printk(KERN_INFO "%2.2x %4.4x frag_info = %16.16lx\n", i, + offset, (unsigned long) sp->rxd[i].frag_info); + } +} + +static void ipg_dump_tfdlist(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int i; + u32 offset; + + IPG_DEBUG_MSG("_dump_tfdlist\n"); + + printk(KERN_INFO "tx_current = %2.2x\n", sp->tx_current); + printk(KERN_INFO "tx_dirty = %2.2x\n", sp->tx_dirty); + printk(KERN_INFO "TFDList start address = %16.16lx\n", + (unsigned long) sp->txd_map); + printk(KERN_INFO "TFDListPtr register = %8.8x%8.8x\n", + ipg_r32(IPG_TFDLISTPTR1), ipg_r32(IPG_TFDLISTPTR0)); + + for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { + offset = (u32) &sp->txd[i].next_desc - (u32) sp->txd; + printk(KERN_INFO "%2.2x %4.4x TFDNextPtr = %16.16lx\n", i, + offset, (unsigned long) sp->txd[i].next_desc); + + offset = (u32) &sp->txd[i].tfc - (u32) sp->txd; + printk(KERN_INFO "%2.2x %4.4x TFC = %16.16lx\n", i, + offset, (unsigned long) sp->txd[i].tfc); + offset = (u32) &sp->txd[i].frag_info - (u32) sp->txd; + printk(KERN_INFO "%2.2x %4.4x frag_info = %16.16lx\n", i, + offset, (unsigned long) sp->txd[i].frag_info); + } +} +#endif + +static void ipg_write_phy_ctl(void __iomem *ioaddr, u8 data) +{ + ipg_w8(IPG_PC_RSVD_MASK & data, PHY_CTRL); + ndelay(IPG_PC_PHYCTRLWAIT_NS); +} + +static void ipg_drive_phy_ctl_low_high(void __iomem *ioaddr, u8 data) +{ + ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | data); + ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | data); +} + +static void send_three_state(void __iomem *ioaddr, u8 phyctrlpolarity) +{ + phyctrlpolarity |= (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR; + + ipg_drive_phy_ctl_low_high(ioaddr, phyctrlpolarity); +} + +static void send_end(void __iomem *ioaddr, u8 phyctrlpolarity) +{ + ipg_w8((IPG_PC_MGMTCLK_LO | (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR | + phyctrlpolarity) & IPG_PC_RSVD_MASK, PHY_CTRL); +} + +static u16 read_phy_bit(void __iomem * ioaddr, u8 phyctrlpolarity) +{ + u16 bit_data; + + ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | phyctrlpolarity); + + bit_data = ((ipg_r8(PHY_CTRL) & IPG_PC_MGMTDATA) >> 1) & 1; + + ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | phyctrlpolarity); + + return bit_data; +} + +/* + * Read a register from the Physical Layer device located + * on the IPG NIC, using the IPG PHYCTRL register. + */ +static int mdio_read(struct net_device * dev, int phy_id, int phy_reg) +{ + void __iomem *ioaddr = ipg_ioaddr(dev); + /* + * The GMII mangement frame structure for a read is as follows: + * + * |Preamble|st|op|phyad|regad|ta| data |idle| + * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z | + * + * <32 1s> = 32 consecutive logic 1 values + * A = bit of Physical Layer device address (MSB first) + * R = bit of register address (MSB first) + * z = High impedance state + * D = bit of read data (MSB first) + * + * Transmission order is 'Preamble' field first, bits transmitted + * left to right (first to last). + */ + struct { + u32 field; + unsigned int len; + } p[] = { + { GMII_PREAMBLE, 32 }, /* Preamble */ + { GMII_ST, 2 }, /* ST */ + { GMII_READ, 2 }, /* OP */ + { phy_id, 5 }, /* PHYAD */ + { phy_reg, 5 }, /* REGAD */ + { 0x0000, 2 }, /* TA */ + { 0x0000, 16 }, /* DATA */ + { 0x0000, 1 } /* IDLE */ + }; + unsigned int i, j; + u8 polarity, data; + + polarity = ipg_r8(PHY_CTRL); + polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY); + + /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */ + for (j = 0; j < 5; j++) { + for (i = 0; i < p[j].len; i++) { + /* For each variable length field, the MSB must be + * transmitted first. Rotate through the field bits, + * starting with the MSB, and move each bit into the + * the 1st (2^1) bit position (this is the bit position + * corresponding to the MgmtData bit of the PhyCtrl + * register for the IPG). + * + * Example: ST = 01; + * + * First write a '0' to bit 1 of the PhyCtrl + * register, then write a '1' to bit 1 of the + * PhyCtrl register. + * + * To do this, right shift the MSB of ST by the value: + * [field length - 1 - #ST bits already written] + * then left shift this result by 1. + */ + data = (p[j].field >> (p[j].len - 1 - i)) << 1; + data &= IPG_PC_MGMTDATA; + data |= polarity | IPG_PC_MGMTDIR; + + ipg_drive_phy_ctl_low_high(ioaddr, data); + } + } + + send_three_state(ioaddr, polarity); + + read_phy_bit(ioaddr, polarity); + + /* + * For a read cycle, the bits for the next two fields (TA and + * DATA) are driven by the PHY (the IPG reads these bits). + */ + for (i = 0; i < p[6].len; i++) { + p[6].field |= + (read_phy_bit(ioaddr, polarity) << (p[6].len - 1 - i)); + } + + send_three_state(ioaddr, polarity); + send_three_state(ioaddr, polarity); + send_three_state(ioaddr, polarity); + send_end(ioaddr, polarity); + + /* Return the value of the DATA field. */ + return p[6].field; +} + +/* + * Write to a register from the Physical Layer device located + * on the IPG NIC, using the IPG PHYCTRL register. + */ +static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val) +{ + void __iomem *ioaddr = ipg_ioaddr(dev); + /* + * The GMII mangement frame structure for a read is as follows: + * + * |Preamble|st|op|phyad|regad|ta| data |idle| + * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z | + * + * <32 1s> = 32 consecutive logic 1 values + * A = bit of Physical Layer device address (MSB first) + * R = bit of register address (MSB first) + * z = High impedance state + * D = bit of write data (MSB first) + * + * Transmission order is 'Preamble' field first, bits transmitted + * left to right (first to last). + */ + struct { + u32 field; + unsigned int len; + } p[] = { + { GMII_PREAMBLE, 32 }, /* Preamble */ + { GMII_ST, 2 }, /* ST */ + { GMII_WRITE, 2 }, /* OP */ + { phy_id, 5 }, /* PHYAD */ + { phy_reg, 5 }, /* REGAD */ + { 0x0002, 2 }, /* TA */ + { val & 0xffff, 16 }, /* DATA */ + { 0x0000, 1 } /* IDLE */ + }; + unsigned int i, j; + u8 polarity, data; + + polarity = ipg_r8(PHY_CTRL); + polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY); + + /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */ + for (j = 0; j < 7; j++) { + for (i = 0; i < p[j].len; i++) { + /* For each variable length field, the MSB must be + * transmitted first. Rotate through the field bits, + * starting with the MSB, and move each bit into the + * the 1st (2^1) bit position (this is the bit position + * corresponding to the MgmtData bit of the PhyCtrl + * register for the IPG). + * + * Example: ST = 01; + * + * First write a '0' to bit 1 of the PhyCtrl + * register, then write a '1' to bit 1 of the + * PhyCtrl register. + * + * To do this, right shift the MSB of ST by the value: + * [field length - 1 - #ST bits already written] + * then left shift this result by 1. + */ + data = (p[j].field >> (p[j].len - 1 - i)) << 1; + data &= IPG_PC_MGMTDATA; + data |= polarity | IPG_PC_MGMTDIR; + + ipg_drive_phy_ctl_low_high(ioaddr, data); + } + } + + /* The last cycle is a tri-state, so read from the PHY. */ + for (j = 7; j < 8; j++) { + for (i = 0; i < p[j].len; i++) { + ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity); + + p[j].field |= ((ipg_r8(PHY_CTRL) & + IPG_PC_MGMTDATA) >> 1) << (p[j].len - 1 - i); + + ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity); + } + } +} + +/* Set LED_Mode JES20040127EEPROM */ +static void ipg_set_led_mode(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + u32 mode; + + mode = ipg_r32(ASIC_CTRL); + mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED); + + if ((sp->LED_Mode & 0x03) > 1) + mode |= IPG_AC_LED_MODE_BIT_1; /* Write Asic Control Bit 29 */ + + if ((sp->LED_Mode & 0x01) == 1) + mode |= IPG_AC_LED_MODE; /* Write Asic Control Bit 14 */ + + if ((sp->LED_Mode & 0x08) == 8) + mode |= IPG_AC_LED_SPEED; /* Write Asic Control Bit 27 */ + + ipg_w32(mode, ASIC_CTRL); +} + +/* Set PHYSet JES20040127EEPROM */ +static void ipg_set_phy_set(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + int physet; + + physet = ipg_r8(PHY_SET); + physet &= ~(IPG_PS_MEM_LENB9B | IPG_PS_MEM_LEN9 | IPG_PS_NON_COMPDET); + physet |= ((sp->LED_Mode & 0x70) >> 4); + ipg_w8(physet, PHY_SET); +} + +static int ipg_reset(struct net_device *dev, u32 resetflags) +{ + /* Assert functional resets via the IPG AsicCtrl + * register as specified by the 'resetflags' input + * parameter. + */ + void __iomem *ioaddr = ipg_ioaddr(dev); //JES20040127EEPROM: + unsigned int timeout_count = 0; + + IPG_DEBUG_MSG("_reset\n"); + + ipg_w32(ipg_r32(ASIC_CTRL) | resetflags, ASIC_CTRL); + + /* Delay added to account for problem with 10Mbps reset. */ + mdelay(IPG_AC_RESETWAIT); + + while (IPG_AC_RESET_BUSY & ipg_r32(ASIC_CTRL)) { + mdelay(IPG_AC_RESETWAIT); + if (++timeout_count > IPG_AC_RESET_TIMEOUT) + return -ETIME; + } + /* Set LED Mode in Asic Control JES20040127EEPROM */ + ipg_set_led_mode(dev); + + /* Set PHYSet Register Value JES20040127EEPROM */ + ipg_set_phy_set(dev); + return 0; +} + +/* Find the GMII PHY address. */ +static int ipg_find_phyaddr(struct net_device *dev) +{ + unsigned int phyaddr, i; + + for (i = 0; i < 32; i++) { + u32 status; + + /* Search for the correct PHY address among 32 possible. */ + phyaddr = (IPG_NIC_PHY_ADDRESS + i) % 32; + + /* 10/22/03 Grace change verify from GMII_PHY_STATUS to + GMII_PHY_ID1 + */ + + status = mdio_read(dev, phyaddr, MII_BMSR); + + if ((status != 0xFFFF) && (status != 0)) + return phyaddr; + } + + return 0x1f; +} + +/* + * Configure IPG based on result of IEEE 802.3 PHY + * auto-negotiation. + */ +static int ipg_config_autoneg(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int txflowcontrol; + unsigned int rxflowcontrol; + unsigned int fullduplex; + unsigned int gig; + u32 mac_ctrl_val; + u32 asicctrl; + u8 phyctrl; + + IPG_DEBUG_MSG("_config_autoneg\n"); + + asicctrl = ipg_r32(ASIC_CTRL); + phyctrl = ipg_r8(PHY_CTRL); + mac_ctrl_val = ipg_r32(MAC_CTRL); + + /* Set flags for use in resolving auto-negotation, assuming + * non-1000Mbps, half duplex, no flow control. + */ + fullduplex = 0; + txflowcontrol = 0; + rxflowcontrol = 0; + gig = 0; + + /* To accomodate a problem in 10Mbps operation, + * set a global flag if PHY running in 10Mbps mode. + */ + sp->tenmbpsmode = 0; + + printk(KERN_INFO "%s: Link speed = ", dev->name); + + /* Determine actual speed of operation. */ + switch (phyctrl & IPG_PC_LINK_SPEED) { + case IPG_PC_LINK_SPEED_10MBPS: + printk("10Mbps.\n"); + printk(KERN_INFO "%s: 10Mbps operational mode enabled.\n", + dev->name); + sp->tenmbpsmode = 1; + break; + case IPG_PC_LINK_SPEED_100MBPS: + printk("100Mbps.\n"); + break; + case IPG_PC_LINK_SPEED_1000MBPS: + printk("1000Mbps.\n"); + gig = 1; + break; + default: + printk("undefined!\n"); + return 0; + } + + if (phyctrl & IPG_PC_DUPLEX_STATUS) { + fullduplex = 1; + txflowcontrol = 1; + rxflowcontrol = 1; + } + + /* Configure full duplex, and flow control. */ + if (fullduplex == 1) { + /* Configure IPG for full duplex operation. */ + printk(KERN_INFO "%s: setting full duplex, ", dev->name); + + mac_ctrl_val |= IPG_MC_DUPLEX_SELECT_FD; + + if (txflowcontrol == 1) { + printk("TX flow control"); + mac_ctrl_val |= IPG_MC_TX_FLOW_CONTROL_ENABLE; + } else { + printk("no TX flow control"); + mac_ctrl_val &= ~IPG_MC_TX_FLOW_CONTROL_ENABLE; + } + + if (rxflowcontrol == 1) { + printk(", RX flow control."); + mac_ctrl_val |= IPG_MC_RX_FLOW_CONTROL_ENABLE; + } else { + printk(", no RX flow control."); + mac_ctrl_val &= ~IPG_MC_RX_FLOW_CONTROL_ENABLE; + } + + printk("\n"); + } else { + /* Configure IPG for half duplex operation. */ + printk(KERN_INFO "%s: setting half duplex, " + "no TX flow control, no RX flow control.\n", dev->name); + + mac_ctrl_val &= ~IPG_MC_DUPLEX_SELECT_FD & + ~IPG_MC_TX_FLOW_CONTROL_ENABLE & + ~IPG_MC_RX_FLOW_CONTROL_ENABLE; + } + ipg_w32(mac_ctrl_val, MAC_CTRL); + return 0; +} + +/* Determine and configure multicast operation and set + * receive mode for IPG. + */ +static void ipg_nic_set_multicast_list(struct net_device *dev) +{ + void __iomem *ioaddr = ipg_ioaddr(dev); + struct dev_mc_list *mc_list_ptr; + unsigned int hashindex; + u32 hashtable[2]; + u8 receivemode; + + IPG_DEBUG_MSG("_nic_set_multicast_list\n"); + + receivemode = IPG_RM_RECEIVEUNICAST | IPG_RM_RECEIVEBROADCAST; + + if (dev->flags & IFF_PROMISC) { + /* NIC to be configured in promiscuous mode. */ + receivemode = IPG_RM_RECEIVEALLFRAMES; + } else if ((dev->flags & IFF_ALLMULTI) || + (dev->flags & IFF_MULTICAST & + (dev->mc_count > IPG_MULTICAST_HASHTABLE_SIZE))) { + /* NIC to be configured to receive all multicast + * frames. */ + receivemode |= IPG_RM_RECEIVEMULTICAST; + } else if (dev->flags & IFF_MULTICAST & (dev->mc_count > 0)) { + /* NIC to be configured to receive selected + * multicast addresses. */ + receivemode |= IPG_RM_RECEIVEMULTICASTHASH; + } + + /* Calculate the bits to set for the 64 bit, IPG HASHTABLE. + * The IPG applies a cyclic-redundancy-check (the same CRC + * used to calculate the frame data FCS) to the destination + * address all incoming multicast frames whose destination + * address has the multicast bit set. The least significant + * 6 bits of the CRC result are used as an addressing index + * into the hash table. If the value of the bit addressed by + * this index is a 1, the frame is passed to the host system. + */ + + /* Clear hashtable. */ + hashtable[0] = 0x00000000; + hashtable[1] = 0x00000000; + + /* Cycle through all multicast addresses to filter. */ + for (mc_list_ptr = dev->mc_list; + mc_list_ptr != NULL; mc_list_ptr = mc_list_ptr->next) { + /* Calculate CRC result for each multicast address. */ + hashindex = crc32_le(0xffffffff, mc_list_ptr->dmi_addr, + ETH_ALEN); + + /* Use only the least significant 6 bits. */ + hashindex = hashindex & 0x3F; + + /* Within "hashtable", set bit number "hashindex" + * to a logic 1. + */ + set_bit(hashindex, (void *)hashtable); + } + + /* Write the value of the hashtable, to the 4, 16 bit + * HASHTABLE IPG registers. + */ + ipg_w32(hashtable[0], HASHTABLE_0); + ipg_w32(hashtable[1], HASHTABLE_1); + + ipg_w8(IPG_RM_RSVD_MASK & receivemode, RECEIVE_MODE); + + IPG_DEBUG_MSG("ReceiveMode = %x\n", ipg_r8(RECEIVE_MODE)); +} + +static int ipg_io_config(struct net_device *dev) +{ + void __iomem *ioaddr = ipg_ioaddr(dev); + u32 origmacctrl; + u32 restoremacctrl; + + IPG_DEBUG_MSG("_io_config\n"); + + origmacctrl = ipg_r32(MAC_CTRL); + + restoremacctrl = origmacctrl | IPG_MC_STATISTICS_ENABLE; + + /* Based on compilation option, determine if FCS is to be + * stripped on receive frames by IPG. + */ + if (!IPG_STRIP_FCS_ON_RX) + restoremacctrl |= IPG_MC_RCV_FCS; + + /* Determine if transmitter and/or receiver are + * enabled so we may restore MACCTRL correctly. + */ + if (origmacctrl & IPG_MC_TX_ENABLED) + restoremacctrl |= IPG_MC_TX_ENABLE; + + if (origmacctrl & IPG_MC_RX_ENABLED) + restoremacctrl |= IPG_MC_RX_ENABLE; + + /* Transmitter and receiver must be disabled before setting + * IFSSelect. + */ + ipg_w32((origmacctrl & (IPG_MC_RX_DISABLE | IPG_MC_TX_DISABLE)) & + IPG_MC_RSVD_MASK, MAC_CTRL); + + /* Now that transmitter and receiver are disabled, write + * to IFSSelect. + */ + ipg_w32((origmacctrl & IPG_MC_IFS_96BIT) & IPG_MC_RSVD_MASK, MAC_CTRL); + + /* Set RECEIVEMODE register. */ + ipg_nic_set_multicast_list(dev); + + ipg_w16(IPG_MAX_RXFRAME_SIZE, MAX_FRAME_SIZE); + + ipg_w8(IPG_RXDMAPOLLPERIOD_VALUE, RX_DMA_POLL_PERIOD); + ipg_w8(IPG_RXDMAURGENTTHRESH_VALUE, RX_DMA_URGENT_THRESH); + ipg_w8(IPG_RXDMABURSTTHRESH_VALUE, RX_DMA_BURST_THRESH); + ipg_w8(IPG_TXDMAPOLLPERIOD_VALUE, TX_DMA_POLL_PERIOD); + ipg_w8(IPG_TXDMAURGENTTHRESH_VALUE, TX_DMA_URGENT_THRESH); + ipg_w8(IPG_TXDMABURSTTHRESH_VALUE, TX_DMA_BURST_THRESH); + ipg_w16((IPG_IE_HOST_ERROR | IPG_IE_TX_DMA_COMPLETE | + IPG_IE_TX_COMPLETE | IPG_IE_INT_REQUESTED | + IPG_IE_UPDATE_STATS | IPG_IE_LINK_EVENT | + IPG_IE_RX_DMA_COMPLETE | IPG_IE_RX_DMA_PRIORITY), INT_ENABLE); + ipg_w16(IPG_FLOWONTHRESH_VALUE, FLOW_ON_THRESH); + ipg_w16(IPG_FLOWOFFTHRESH_VALUE, FLOW_OFF_THRESH); + + /* IPG multi-frag frame bug workaround. + * Per silicon revision B3 eratta. + */ + ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0200, DEBUG_CTRL); + + /* IPG TX poll now bug workaround. + * Per silicon revision B3 eratta. + */ + ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0010, DEBUG_CTRL); + + /* IPG RX poll now bug workaround. + * Per silicon revision B3 eratta. + */ + ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0020, DEBUG_CTRL); + + /* Now restore MACCTRL to original setting. */ + ipg_w32(IPG_MC_RSVD_MASK & restoremacctrl, MAC_CTRL); + + /* Disable unused RMON statistics. */ + ipg_w32(IPG_RZ_ALL, RMON_STATISTICS_MASK); + + /* Disable unused MIB statistics. */ + ipg_w32(IPG_SM_MACCONTROLFRAMESXMTD | IPG_SM_MACCONTROLFRAMESRCVD | + IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK | IPG_SM_TXJUMBOFRAMES | + IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK | IPG_SM_RXJUMBOFRAMES | + IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK | + IPG_SM_UDPCHECKSUMERRORS | IPG_SM_TCPCHECKSUMERRORS | + IPG_SM_IPCHECKSUMERRORS, STATISTICS_MASK); + + return 0; +} + +/* + * Create a receive buffer within system memory and update + * NIC private structure appropriately. + */ +static int ipg_get_rxbuff(struct net_device *dev, int entry) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + struct ipg_rx *rxfd = sp->rxd + entry; + struct sk_buff *skb; + u64 rxfragsize; + + IPG_DEBUG_MSG("_get_rxbuff\n"); + + skb = netdev_alloc_skb(dev, IPG_RXSUPPORT_SIZE + NET_IP_ALIGN); + if (!skb) { + sp->RxBuff[entry] = NULL; + return -ENOMEM; + } + + /* Adjust the data start location within the buffer to + * align IP address field to a 16 byte boundary. + */ + skb_reserve(skb, NET_IP_ALIGN); + + /* Associate the receive buffer with the IPG NIC. */ + skb->dev = dev; + + /* Save the address of the sk_buff structure. */ + sp->RxBuff[entry] = skb; + + rxfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data, + sp->rx_buf_sz, PCI_DMA_FROMDEVICE)); + + /* Set the RFD fragment length. */ + rxfragsize = IPG_RXFRAG_SIZE; + rxfd->frag_info |= cpu_to_le64((rxfragsize << 48) & IPG_RFI_FRAGLEN); + + return 0; +} + +static int init_rfdlist(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int i; + + IPG_DEBUG_MSG("_init_rfdlist\n"); + + for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { + struct ipg_rx *rxfd = sp->rxd + i; + + if (sp->RxBuff[i]) { + pci_unmap_single(sp->pdev, + le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), + sp->rx_buf_sz, PCI_DMA_FROMDEVICE); + IPG_DEV_KFREE_SKB(sp->RxBuff[i]); + sp->RxBuff[i] = NULL; + } + + /* Clear out the RFS field. */ + rxfd->rfs = 0x0000000000000000; + + if (ipg_get_rxbuff(dev, i) < 0) { + /* + * A receive buffer was not ready, break the + * RFD list here. + */ + IPG_DEBUG_MSG("Cannot allocate Rx buffer.\n"); + + /* Just in case we cannot allocate a single RFD. + * Should not occur. + */ + if (i == 0) { + printk(KERN_ERR "%s: No memory available" + " for RFD list.\n", dev->name); + return -ENOMEM; + } + } + + rxfd->next_desc = cpu_to_le64(sp->rxd_map + + sizeof(struct ipg_rx)*(i + 1)); + } + sp->rxd[i - 1].next_desc = cpu_to_le64(sp->rxd_map); + + sp->rx_current = 0; + sp->rx_dirty = 0; + + /* Write the location of the RFDList to the IPG. */ + ipg_w32((u32) sp->rxd_map, RFD_LIST_PTR_0); + ipg_w32(0x00000000, RFD_LIST_PTR_1); + + return 0; +} + +static void init_tfdlist(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int i; + + IPG_DEBUG_MSG("_init_tfdlist\n"); + + for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { + struct ipg_tx *txfd = sp->txd + i; + + txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE); + + if (sp->TxBuff[i]) { + IPG_DEV_KFREE_SKB(sp->TxBuff[i]); + sp->TxBuff[i] = NULL; + } + + txfd->next_desc = cpu_to_le64(sp->txd_map + + sizeof(struct ipg_tx)*(i + 1)); + } + sp->txd[i - 1].next_desc = cpu_to_le64(sp->txd_map); + + sp->tx_current = 0; + sp->tx_dirty = 0; + + /* Write the location of the TFDList to the IPG. */ + IPG_DDEBUG_MSG("Starting TFDListPtr = %8.8x\n", + (u32) sp->txd_map); + ipg_w32((u32) sp->txd_map, TFD_LIST_PTR_0); + ipg_w32(0x00000000, TFD_LIST_PTR_1); + + sp->ResetCurrentTFD = 1; +} + +/* + * Free all transmit buffers which have already been transfered + * via DMA to the IPG. + */ +static void ipg_nic_txfree(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int curr; + u64 txd_map; + unsigned int released, pending; + + txd_map = (u64)sp->txd_map; + curr = ipg_r32(TFD_LIST_PTR_0) - + do_div(txd_map, sizeof(struct ipg_tx)) - 1; + + IPG_DEBUG_MSG("_nic_txfree\n"); + + pending = sp->tx_current - sp->tx_dirty; + + for (released = 0; released < pending; released++) { + unsigned int dirty = sp->tx_dirty % IPG_TFDLIST_LENGTH; + struct sk_buff *skb = sp->TxBuff[dirty]; + struct ipg_tx *txfd = sp->txd + dirty; + + IPG_DEBUG_MSG("TFC = %16.16lx\n", (unsigned long) txfd->tfc); + + /* Look at each TFD's TFC field beginning + * at the last freed TFD up to the current TFD. + * If the TFDDone bit is set, free the associated + * buffer. + */ + if (dirty == curr) + break; + + /* Setup TFDDONE for compatible issue. */ + txfd->tfc |= cpu_to_le64(IPG_TFC_TFDDONE); + + /* Free the transmit buffer. */ + if (skb) { + pci_unmap_single(sp->pdev, + le64_to_cpu(txfd->frag_info & ~IPG_TFI_FRAGLEN), + skb->len, PCI_DMA_TODEVICE); + + IPG_DEV_KFREE_SKB(skb); + + sp->TxBuff[dirty] = NULL; + } + } + + sp->tx_dirty += released; + + if (netif_queue_stopped(dev) && + (sp->tx_current != (sp->tx_dirty + IPG_TFDLIST_LENGTH))) { + netif_wake_queue(dev); + } +} + +static void ipg_tx_timeout(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + + ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | IPG_AC_NETWORK | + IPG_AC_FIFO); + + spin_lock_irq(&sp->lock); + + /* Re-configure after DMA reset. */ + if (ipg_io_config(dev) < 0) { + printk(KERN_INFO "%s: Error during re-configuration.\n", + dev->name); + } + + init_tfdlist(dev); + + spin_unlock_irq(&sp->lock); + + ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & IPG_MC_RSVD_MASK, + MAC_CTRL); +} + +/* + * For TxComplete interrupts, free all transmit + * buffers which have already been transfered via DMA + * to the IPG. + */ +static void ipg_nic_txcleanup(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int i; + + IPG_DEBUG_MSG("_nic_txcleanup\n"); + + for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { + /* Reading the TXSTATUS register clears the + * TX_COMPLETE interrupt. + */ + u32 txstatusdword = ipg_r32(TX_STATUS); + + IPG_DEBUG_MSG("TxStatus = %8.8x\n", txstatusdword); + + /* Check for Transmit errors. Error bits only valid if + * TX_COMPLETE bit in the TXSTATUS register is a 1. + */ + if (!(txstatusdword & IPG_TS_TX_COMPLETE)) + break; + + /* If in 10Mbps mode, indicate transmit is ready. */ + if (sp->tenmbpsmode) { + netif_wake_queue(dev); + } + + /* Transmit error, increment stat counters. */ + if (txstatusdword & IPG_TS_TX_ERROR) { + IPG_DEBUG_MSG("Transmit error.\n"); + sp->stats.tx_errors++; + } + + /* Late collision, re-enable transmitter. */ + if (txstatusdword & IPG_TS_LATE_COLLISION) { + IPG_DEBUG_MSG("Late collision on transmit.\n"); + ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & + IPG_MC_RSVD_MASK, MAC_CTRL); + } + + /* Maximum collisions, re-enable transmitter. */ + if (txstatusdword & IPG_TS_TX_MAX_COLL) { + IPG_DEBUG_MSG("Maximum collisions on transmit.\n"); + ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & + IPG_MC_RSVD_MASK, MAC_CTRL); + } + + /* Transmit underrun, reset and re-enable + * transmitter. + */ + if (txstatusdword & IPG_TS_TX_UNDERRUN) { + IPG_DEBUG_MSG("Transmitter underrun.\n"); + sp->stats.tx_fifo_errors++; + ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | + IPG_AC_NETWORK | IPG_AC_FIFO); + + /* Re-configure after DMA reset. */ + if (ipg_io_config(dev) < 0) { + printk(KERN_INFO + "%s: Error during re-configuration.\n", + dev->name); + } + init_tfdlist(dev); + + ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & + IPG_MC_RSVD_MASK, MAC_CTRL); + } + } + + ipg_nic_txfree(dev); +} + +/* Provides statistical information about the IPG NIC. */ +struct net_device_stats *ipg_nic_get_stats(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + u16 temp1; + u16 temp2; + + IPG_DEBUG_MSG("_nic_get_stats\n"); + + /* Check to see if the NIC has been initialized via nic_open, + * before trying to read statistic registers. + */ + if (!test_bit(__LINK_STATE_START, &dev->state)) + return &sp->stats; + + sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK); + sp->stats.tx_packets += ipg_r32(IPG_FRAMESXMTDOK); + sp->stats.rx_bytes += ipg_r32(IPG_OCTETRCVOK); + sp->stats.tx_bytes += ipg_r32(IPG_OCTETXMTOK); + temp1 = ipg_r16(IPG_FRAMESLOSTRXERRORS); + sp->stats.rx_errors += temp1; + sp->stats.rx_missed_errors += temp1; + temp1 = ipg_r32(IPG_SINGLECOLFRAMES) + ipg_r32(IPG_MULTICOLFRAMES) + + ipg_r32(IPG_LATECOLLISIONS); + temp2 = ipg_r16(IPG_CARRIERSENSEERRORS); + sp->stats.collisions += temp1; + sp->stats.tx_dropped += ipg_r16(IPG_FRAMESABORTXSCOLLS); + sp->stats.tx_errors += ipg_r16(IPG_FRAMESWEXDEFERRAL) + + ipg_r32(IPG_FRAMESWDEFERREDXMT) + temp1 + temp2; + sp->stats.multicast += ipg_r32(IPG_MCSTOCTETRCVDOK); + + /* detailed tx_errors */ + sp->stats.tx_carrier_errors += temp2; + + /* detailed rx_errors */ + sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) + + ipg_r16(IPG_FRAMETOOLONGERRRORS); + sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS); + + /* Unutilized IPG statistic registers. */ + ipg_r32(IPG_MCSTFRAMESRCVDOK); + + return &sp->stats; +} + +/* Restore used receive buffers. */ +static int ipg_nic_rxrestore(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + const unsigned int curr = sp->rx_current; + unsigned int dirty = sp->rx_dirty; + + IPG_DEBUG_MSG("_nic_rxrestore\n"); + + for (dirty = sp->rx_dirty; curr - dirty > 0; dirty++) { + unsigned int entry = dirty % IPG_RFDLIST_LENGTH; + + /* rx_copybreak may poke hole here and there. */ + if (sp->RxBuff[entry]) + continue; + + /* Generate a new receive buffer to replace the + * current buffer (which will be released by the + * Linux system). + */ + if (ipg_get_rxbuff(dev, entry) < 0) { + IPG_DEBUG_MSG("Cannot allocate new Rx buffer.\n"); + + break; + } + + /* Reset the RFS field. */ + sp->rxd[entry].rfs = 0x0000000000000000; + } + sp->rx_dirty = dirty; + + return 0; +} + +#ifdef JUMBO_FRAME + +/* use jumboindex and jumbosize to control jumbo frame status + initial status is jumboindex=-1 and jumbosize=0 + 1. jumboindex = -1 and jumbosize=0 : previous jumbo frame has been done. + 2. jumboindex != -1 and jumbosize != 0 : jumbo frame is not over size and receiving + 3. jumboindex = -1 and jumbosize != 0 : jumbo frame is over size, already dump + previous receiving and need to continue dumping the current one +*/ +enum { + NormalPacket, + ErrorPacket +}; + +enum { + Frame_NoStart_NoEnd = 0, + Frame_WithStart = 1, + Frame_WithEnd = 10, + Frame_WithStart_WithEnd = 11 +}; + +inline void ipg_nic_rx_free_skb(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH; + + if (sp->RxBuff[entry]) { + struct ipg_rx *rxfd = sp->rxd + entry; + + pci_unmap_single(sp->pdev, + le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), + sp->rx_buf_sz, PCI_DMA_FROMDEVICE); + IPG_DEV_KFREE_SKB(sp->RxBuff[entry]); + sp->RxBuff[entry] = NULL; + } +} + +inline int ipg_nic_rx_check_frame_type(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + struct ipg_rx *rxfd = sp->rxd + (sp->rx_current % IPG_RFDLIST_LENGTH); + int type = Frame_NoStart_NoEnd; + + if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) + type += Frame_WithStart; + if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND) + type += Frame_WithEnd; + return type; +} + +inline int ipg_nic_rx_check_error(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH; + struct ipg_rx *rxfd = sp->rxd + entry; + + if (IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) & + (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME | + IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR | + IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR))) { + IPG_DEBUG_MSG("Rx error, RFS = %16.16lx\n", + (unsigned long) rxfd->rfs); + + /* Increment general receive error statistic. */ + sp->stats.rx_errors++; + + /* Increment detailed receive error statistics. */ + if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) { + IPG_DEBUG_MSG("RX FIFO overrun occured.\n"); + + sp->stats.rx_fifo_errors++; + } + + if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) { + IPG_DEBUG_MSG("RX runt occured.\n"); + sp->stats.rx_length_errors++; + } + + /* Do nothing for IPG_RFS_RXOVERSIZEDFRAME, + * error count handled by a IPG statistic register. + */ + + if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) { + IPG_DEBUG_MSG("RX alignment error occured.\n"); + sp->stats.rx_frame_errors++; + } + + /* Do nothing for IPG_RFS_RXFCSERROR, error count + * handled by a IPG statistic register. + */ + + /* Free the memory associated with the RX + * buffer since it is erroneous and we will + * not pass it to higher layer processes. + */ + if (sp->RxBuff[entry]) { + pci_unmap_single(sp->pdev, + le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), + sp->rx_buf_sz, PCI_DMA_FROMDEVICE); + + IPG_DEV_KFREE_SKB(sp->RxBuff[entry]); + sp->RxBuff[entry] = NULL; + } + return ErrorPacket; + } + return NormalPacket; +} + +static void ipg_nic_rx_with_start_and_end(struct net_device *dev, + struct ipg_nic_private *sp, + struct ipg_rx *rxfd, unsigned entry) +{ + struct SJumbo *jumbo = &sp->Jumbo; + struct sk_buff *skb; + int framelen; + + if (jumbo->FoundStart) { + IPG_DEV_KFREE_SKB(jumbo->skb); + jumbo->FoundStart = 0; + jumbo->CurrentSize = 0; + jumbo->skb = NULL; + } + + // 1: found error, 0 no error + if (ipg_nic_rx_check_error(dev) != NormalPacket) + return; + + skb = sp->RxBuff[entry]; + if (!skb) + return; + + // accept this frame and send to upper layer + framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; + if (framelen > IPG_RXFRAG_SIZE) + framelen = IPG_RXFRAG_SIZE; + + skb_put(skb, framelen); + skb->protocol = eth_type_trans(skb, dev); + skb->ip_summed = CHECKSUM_NONE; + netif_rx(skb); + dev->last_rx = jiffies; + sp->RxBuff[entry] = NULL; +} + +static void ipg_nic_rx_with_start(struct net_device *dev, + struct ipg_nic_private *sp, + struct ipg_rx *rxfd, unsigned entry) +{ + struct SJumbo *jumbo = &sp->Jumbo; + struct pci_dev *pdev = sp->pdev; + struct sk_buff *skb; + + // 1: found error, 0 no error + if (ipg_nic_rx_check_error(dev) != NormalPacket) + return; + + // accept this frame and send to upper layer + skb = sp->RxBuff[entry]; + if (!skb) + return; + + if (jumbo->FoundStart) + IPG_DEV_KFREE_SKB(jumbo->skb); + + pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), + sp->rx_buf_sz, PCI_DMA_FROMDEVICE); + + skb_put(skb, IPG_RXFRAG_SIZE); + + jumbo->FoundStart = 1; + jumbo->CurrentSize = IPG_RXFRAG_SIZE; + jumbo->skb = skb; + + sp->RxBuff[entry] = NULL; + dev->last_rx = jiffies; +} + +static void ipg_nic_rx_with_end(struct net_device *dev, + struct ipg_nic_private *sp, + struct ipg_rx *rxfd, unsigned entry) +{ + struct SJumbo *jumbo = &sp->Jumbo; + + //1: found error, 0 no error + if (ipg_nic_rx_check_error(dev) == NormalPacket) { + struct sk_buff *skb = sp->RxBuff[entry]; + + if (!skb) + return; + + if (jumbo->FoundStart) { + int framelen, endframelen; + + framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; + + endframeLen = framelen - jumbo->CurrentSize; + /* + if (framelen > IPG_RXFRAG_SIZE) + framelen=IPG_RXFRAG_SIZE; + */ + if (framelen > IPG_RXSUPPORT_SIZE) + IPG_DEV_KFREE_SKB(jumbo->skb); + else { + memcpy(skb_put(jumbo->skb, endframeLen), + skb->data, endframeLen); + + jumbo->skb->protocol = + eth_type_trans(jumbo->skb, dev); + + jumbo->skb->ip_summed = CHECKSUM_NONE; + netif_rx(jumbo->skb); + } + } + + dev->last_rx = jiffies; + jumbo->FoundStart = 0; + jumbo->CurrentSize = 0; + jumbo->skb = NULL; + + ipg_nic_rx_free_skb(dev); + } else { + IPG_DEV_KFREE_SKB(jumbo->skb); + jumbo->FoundStart = 0; + jumbo->CurrentSize = 0; + jumbo->skb = NULL; + } +} + +static void ipg_nic_rx_no_start_no_end(struct net_device *dev, + struct ipg_nic_private *sp, + struct ipg_rx *rxfd, unsigned entry) +{ + struct SJumbo *jumbo = &sp->Jumbo; + + //1: found error, 0 no error + if (ipg_nic_rx_check_error(dev) == NormalPacket) { + struct sk_buff *skb = sp->RxBuff[entry]; + + if (skb) { + if (jumbo->FoundStart) { + jumbo->CurrentSize += IPG_RXFRAG_SIZE; + if (jumbo->CurrentSize <= IPG_RXSUPPORT_SIZE) { + memcpy(skb_put(jumbo->skb, + IPG_RXFRAG_SIZE), + skb->data, IPG_RXFRAG_SIZE); + } + } + dev->last_rx = jiffies; + ipg_nic_rx_free_skb(dev); + } + } else { + IPG_DEV_KFREE_SKB(jumbo->skb); + jumbo->FoundStart = 0; + jumbo->CurrentSize = 0; + jumbo->skb = NULL; + } +} + +static int ipg_nic_rx(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + unsigned int curr = sp->rx_current; + void __iomem *ioaddr = sp->ioaddr; + unsigned int i; + + IPG_DEBUG_MSG("_nic_rx\n"); + + for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) { + unsigned int entry = curr % IPG_RFDLIST_LENGTH; + struct ipg_rx *rxfd = sp->rxd + entry; + + if (!(rxfd->rfs & le64_to_cpu(IPG_RFS_RFDDONE))) + break; + + switch (ipg_nic_rx_check_frame_type(dev)) { + case Frame_WithStart_WithEnd: + ipg_nic_rx_with_start_and_end(dev, tp, rxfd, entry); + break; + case Frame_WithStart: + ipg_nic_rx_with_start(dev, tp, rxfd, entry); + break; + case Frame_WithEnd: + ipg_nic_rx_with_end(dev, tp, rxfd, entry); + break; + case Frame_NoStart_NoEnd: + ipg_nic_rx_no_start_no_end(dev, tp, rxfd, entry); + break; + } + } + + sp->rx_current = curr; + + if (i == IPG_MAXRFDPROCESS_COUNT) { + /* There are more RFDs to process, however the + * allocated amount of RFD processing time has + * expired. Assert Interrupt Requested to make + * sure we come back to process the remaining RFDs. + */ + ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL); + } + + ipg_nic_rxrestore(dev); + + return 0; +} + +#else +static int ipg_nic_rx(struct net_device *dev) +{ + /* Transfer received Ethernet frames to higher network layers. */ + struct ipg_nic_private *sp = netdev_priv(dev); + unsigned int curr = sp->rx_current; + void __iomem *ioaddr = sp->ioaddr; + struct ipg_rx *rxfd; + unsigned int i; + + IPG_DEBUG_MSG("_nic_rx\n"); + +#define __RFS_MASK \ + cpu_to_le64(IPG_RFS_RFDDONE | IPG_RFS_FRAMESTART | IPG_RFS_FRAMEEND) + + for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) { + unsigned int entry = curr % IPG_RFDLIST_LENGTH; + struct sk_buff *skb = sp->RxBuff[entry]; + unsigned int framelen; + + rxfd = sp->rxd + entry; + + if (((rxfd->rfs & __RFS_MASK) != __RFS_MASK) || !skb) + break; + + /* Get received frame length. */ + framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; + + /* Check for jumbo frame arrival with too small + * RXFRAG_SIZE. + */ + if (framelen > IPG_RXFRAG_SIZE) { + IPG_DEBUG_MSG + ("RFS FrameLen > allocated fragment size.\n"); + + framelen = IPG_RXFRAG_SIZE; + } + + if ((IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs & + (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME | + IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR | + IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR))))) { + + IPG_DEBUG_MSG("Rx error, RFS = %16.16lx\n", + (unsigned long int) rxfd->rfs); + + /* Increment general receive error statistic. */ + sp->stats.rx_errors++; + + /* Increment detailed receive error statistics. */ + if (le64_to_cpu(rxfd->rfs & IPG_RFS_RXFIFOOVERRUN)) { + IPG_DEBUG_MSG("RX FIFO overrun occured.\n"); + sp->stats.rx_fifo_errors++; + } + + if (le64_to_cpu(rxfd->rfs & IPG_RFS_RXRUNTFRAME)) { + IPG_DEBUG_MSG("RX runt occured.\n"); + sp->stats.rx_length_errors++; + } + + if (le64_to_cpu(rxfd->rfs & IPG_RFS_RXOVERSIZEDFRAME)) ; + /* Do nothing, error count handled by a IPG + * statistic register. + */ + + if (le64_to_cpu(rxfd->rfs & IPG_RFS_RXALIGNMENTERROR)) { + IPG_DEBUG_MSG("RX alignment error occured.\n"); + sp->stats.rx_frame_errors++; + } + + if (le64_to_cpu(rxfd->rfs & IPG_RFS_RXFCSERROR)) ; + /* Do nothing, error count handled by a IPG + * statistic register. + */ + + /* Free the memory associated with the RX + * buffer since it is erroneous and we will + * not pass it to higher layer processes. + */ + if (skb) { + u64 info = rxfd->frag_info; + + pci_unmap_single(sp->pdev, + le64_to_cpu(info & ~IPG_RFI_FRAGLEN), + sp->rx_buf_sz, PCI_DMA_FROMDEVICE); + + IPG_DEV_KFREE_SKB(skb); + } + } else { + + /* Adjust the new buffer length to accomodate the size + * of the received frame. + */ + skb_put(skb, framelen); + + /* Set the buffer's protocol field to Ethernet. */ + skb->protocol = eth_type_trans(skb, dev); + + /* If the frame contains an IP/TCP/UDP frame, + * determine if upper layer must check IP/TCP/UDP + * checksums. + * + * NOTE: DO NOT RELY ON THE TCP/UDP CHECKSUM + * VERIFICATION FOR SILICON REVISIONS B3 + * AND EARLIER! + * + if ((le64_to_cpu(rxfd->rfs & + (IPG_RFS_TCPDETECTED | IPG_RFS_UDPDETECTED | + IPG_RFS_IPDETECTED))) && + !(le64_to_cpu(rxfd->rfs & + (IPG_RFS_TCPERROR | IPG_RFS_UDPERROR | + IPG_RFS_IPERROR)))) { + * Indicate IP checksums were performed + * by the IPG. + * + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else + */ + { + /* The IPG encountered an error with (or + * there were no) IP/TCP/UDP checksums. + * This may or may not indicate an invalid + * IP/TCP/UDP frame was received. Let the + * upper layer decide. + */ + skb->ip_summed = CHECKSUM_NONE; + } + + /* Hand off frame for higher layer processing. + * The function netif_rx() releases the sk_buff + * when processing completes. + */ + netif_rx(skb); + + /* Record frame receive time (jiffies = Linux + * kernel current time stamp). + */ + dev->last_rx = jiffies; + } + + /* Assure RX buffer is not reused by IPG. */ + sp->RxBuff[entry] = NULL; + } + + /* + * If there are more RFDs to proces and the allocated amount of RFD + * processing time has expired, assert Interrupt Requested to make + * sure we come back to process the remaining RFDs. + */ + if (i == IPG_MAXRFDPROCESS_COUNT) + ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL); + +#ifdef IPG_DEBUG + /* Check if the RFD list contained no receive frame data. */ + if (!i) + sp->EmptyRFDListCount++; +#endif + while ((le64_to_cpu(rxfd->rfs & IPG_RFS_RFDDONE)) && + !((le64_to_cpu(rxfd->rfs & IPG_RFS_FRAMESTART)) && + (le64_to_cpu(rxfd->rfs & IPG_RFS_FRAMEEND)))) { + unsigned int entry = curr++ % IPG_RFDLIST_LENGTH; + + rxfd = sp->rxd + entry; + + IPG_DEBUG_MSG("Frame requires multiple RFDs.\n"); + + /* An unexpected event, additional code needed to handle + * properly. So for the time being, just disregard the + * frame. + */ + + /* Free the memory associated with the RX + * buffer since it is erroneous and we will + * not pass it to higher layer processes. + */ + if (sp->RxBuff[entry]) { + pci_unmap_single(sp->pdev, + le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), + sp->rx_buf_sz, PCI_DMA_FROMDEVICE); + IPG_DEV_KFREE_SKB(sp->RxBuff[entry]); + } + + /* Assure RX buffer is not reused by IPG. */ + sp->RxBuff[entry] = NULL; + } + + sp->rx_current = curr; + + /* Check to see if there are a minimum number of used + * RFDs before restoring any (should improve performance.) + */ + if ((curr - sp->rx_dirty) >= IPG_MINUSEDRFDSTOFREE) + ipg_nic_rxrestore(dev); + + return 0; +} +#endif + +static void ipg_reset_after_host_error(struct work_struct *work) +{ + struct ipg_nic_private *sp = + container_of(work, struct ipg_nic_private, task.work); + struct net_device *dev = sp->dev; + + IPG_DDEBUG_MSG("DMACtrl = %8.8x\n", ioread32(sp->ioaddr + IPG_DMACTRL)); + + /* + * Acknowledge HostError interrupt by resetting + * IPG DMA and HOST. + */ + ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA); + + init_rfdlist(dev); + init_tfdlist(dev); + + if (ipg_io_config(dev) < 0) { + printk(KERN_INFO "%s: Cannot recover from PCI error.\n", + dev->name); + schedule_delayed_work(&sp->task, HZ); + } +} + +static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst) +{ + struct net_device *dev = dev_inst; + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int handled = 0; + u16 status; + + IPG_DEBUG_MSG("_interrupt_handler\n"); + +#ifdef JUMBO_FRAME + ipg_nic_rxrestore(dev); +#endif + /* Get interrupt source information, and acknowledge + * some (i.e. TxDMAComplete, RxDMAComplete, RxEarly, + * IntRequested, MacControlFrame, LinkEvent) interrupts + * if issued. Also, all IPG interrupts are disabled by + * reading IntStatusAck. + */ + status = ipg_r16(INT_STATUS_ACK); + + IPG_DEBUG_MSG("IntStatusAck = %4.4x\n", status); + + /* Shared IRQ of remove event. */ + if (!(status & IPG_IS_RSVD_MASK)) + goto out_enable; + + handled = 1; + + if (unlikely(!netif_running(dev))) + goto out; + + spin_lock(&sp->lock); + + /* If RFDListEnd interrupt, restore all used RFDs. */ + if (status & IPG_IS_RFD_LIST_END) { + IPG_DEBUG_MSG("RFDListEnd Interrupt.\n"); + + /* The RFD list end indicates an RFD was encountered + * with a 0 NextPtr, or with an RFDDone bit set to 1 + * (indicating the RFD is not read for use by the + * IPG.) Try to restore all RFDs. + */ + ipg_nic_rxrestore(dev); + +#ifdef IPG_DEBUG + /* Increment the RFDlistendCount counter. */ + sp->RFDlistendCount++; +#endif + } + + /* If RFDListEnd, RxDMAPriority, RxDMAComplete, or + * IntRequested interrupt, process received frames. */ + if ((status & IPG_IS_RX_DMA_PRIORITY) || + (status & IPG_IS_RFD_LIST_END) || + (status & IPG_IS_RX_DMA_COMPLETE) || + (status & IPG_IS_INT_REQUESTED)) { +#ifdef IPG_DEBUG + /* Increment the RFD list checked counter if interrupted + * only to check the RFD list. */ + if (status & (~(IPG_IS_RX_DMA_PRIORITY | IPG_IS_RFD_LIST_END | + IPG_IS_RX_DMA_COMPLETE | IPG_IS_INT_REQUESTED) & + (IPG_IS_HOST_ERROR | IPG_IS_TX_DMA_COMPLETE | + IPG_IS_LINK_EVENT | IPG_IS_TX_COMPLETE | + IPG_IS_UPDATE_STATS))) + sp->RFDListCheckedCount++; +#endif + + ipg_nic_rx(dev); + } + + /* If TxDMAComplete interrupt, free used TFDs. */ + if (status & IPG_IS_TX_DMA_COMPLETE) + ipg_nic_txfree(dev); + + /* TxComplete interrupts indicate one of numerous actions. + * Determine what action to take based on TXSTATUS register. + */ + if (status & IPG_IS_TX_COMPLETE) + ipg_nic_txcleanup(dev); + + /* If UpdateStats interrupt, update Linux Ethernet statistics */ + if (status & IPG_IS_UPDATE_STATS) + ipg_nic_get_stats(dev); + + /* If HostError interrupt, reset IPG. */ + if (status & IPG_IS_HOST_ERROR) { + IPG_DDEBUG_MSG("HostError Interrupt\n"); + + schedule_delayed_work(&sp->task, 0); + } + + /* If LinkEvent interrupt, resolve autonegotiation. */ + if (status & IPG_IS_LINK_EVENT) { + if (ipg_config_autoneg(dev) < 0) + printk(KERN_INFO "%s: Auto-negotiation error.\n", + dev->name); + } + + /* If MACCtrlFrame interrupt, do nothing. */ + if (status & IPG_IS_MAC_CTRL_FRAME) + IPG_DEBUG_MSG("MACCtrlFrame interrupt.\n"); + + /* If RxComplete interrupt, do nothing. */ + if (status & IPG_IS_RX_COMPLETE) + IPG_DEBUG_MSG("RxComplete interrupt.\n"); + + /* If RxEarly interrupt, do nothing. */ + if (status & IPG_IS_RX_EARLY) + IPG_DEBUG_MSG("RxEarly interrupt.\n"); + +out_enable: + /* Re-enable IPG interrupts. */ + ipg_w16(IPG_IE_TX_DMA_COMPLETE | IPG_IE_RX_DMA_COMPLETE | + IPG_IE_HOST_ERROR | IPG_IE_INT_REQUESTED | IPG_IE_TX_COMPLETE | + IPG_IE_LINK_EVENT | IPG_IE_UPDATE_STATS, INT_ENABLE); + + spin_unlock(&sp->lock); +out: + return IRQ_RETVAL(handled); +} + +static void ipg_rx_clear(struct ipg_nic_private *sp) +{ + unsigned int i; + + for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { + if (sp->RxBuff[i]) { + struct ipg_rx *rxfd = sp->rxd + i; + + IPG_DEV_KFREE_SKB(sp->RxBuff[i]); + sp->RxBuff[i] = NULL; + pci_unmap_single(sp->pdev, + le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), + sp->rx_buf_sz, PCI_DMA_FROMDEVICE); + } + } +} + +static void ipg_tx_clear(struct ipg_nic_private *sp) +{ + unsigned int i; + + for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { + if (sp->TxBuff[i]) { + struct ipg_tx *txfd = sp->txd + i; + + pci_unmap_single(sp->pdev, + le64_to_cpu(txfd->frag_info & ~IPG_TFI_FRAGLEN), + sp->TxBuff[i]->len, PCI_DMA_TODEVICE); + + IPG_DEV_KFREE_SKB(sp->TxBuff[i]); + + sp->TxBuff[i] = NULL; + } + } +} + +static int ipg_nic_open(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + struct pci_dev *pdev = sp->pdev; + int rc; + + IPG_DEBUG_MSG("_nic_open\n"); + + sp->rx_buf_sz = IPG_RXSUPPORT_SIZE; + + /* Check for interrupt line conflicts, and request interrupt + * line for IPG. + * + * IMPORTANT: Disable IPG interrupts prior to registering + * IRQ. + */ + ipg_w16(0x0000, INT_ENABLE); + + /* Register the interrupt line to be used by the IPG within + * the Linux system. + */ + rc = request_irq(pdev->irq, &ipg_interrupt_handler, IRQF_SHARED, + dev->name, dev); + if (rc < 0) { + printk(KERN_INFO "%s: Error when requesting interrupt.\n", + dev->name); + goto out; + } + + dev->irq = pdev->irq; + + rc = -ENOMEM; + + sp->rxd = dma_alloc_coherent(&pdev->dev, IPG_RX_RING_BYTES, + &sp->rxd_map, GFP_KERNEL); + if (!sp->rxd) + goto err_free_irq_0; + + sp->txd = dma_alloc_coherent(&pdev->dev, IPG_TX_RING_BYTES, + &sp->txd_map, GFP_KERNEL); + if (!sp->txd) + goto err_free_rx_1; + + rc = init_rfdlist(dev); + if (rc < 0) { + printk(KERN_INFO "%s: Error during configuration.\n", + dev->name); + goto err_free_tx_2; + } + + init_tfdlist(dev); + + rc = ipg_io_config(dev); + if (rc < 0) { + printk(KERN_INFO "%s: Error during configuration.\n", + dev->name); + goto err_release_tfdlist_3; + } + + /* Resolve autonegotiation. */ + if (ipg_config_autoneg(dev) < 0) + printk(KERN_INFO "%s: Auto-negotiation error.\n", dev->name); + +#ifdef JUMBO_FRAME + /* initialize JUMBO Frame control variable */ + sp->Jumbo.FoundStart = 0; + sp->Jumbo.CurrentSize = 0; + sp->Jumbo.skb = 0; + dev->mtu = IPG_TXFRAG_SIZE; +#endif + + /* Enable transmit and receive operation of the IPG. */ + ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_RX_ENABLE | IPG_MC_TX_ENABLE) & + IPG_MC_RSVD_MASK, MAC_CTRL); + + netif_start_queue(dev); +out: + return rc; + +err_release_tfdlist_3: + ipg_tx_clear(sp); + ipg_rx_clear(sp); +err_free_tx_2: + dma_free_coherent(&pdev->dev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map); +err_free_rx_1: + dma_free_coherent(&pdev->dev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map); +err_free_irq_0: + free_irq(pdev->irq, dev); + goto out; +} + +static int ipg_nic_stop(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + struct pci_dev *pdev = sp->pdev; + + IPG_DEBUG_MSG("_nic_stop\n"); + + netif_stop_queue(dev); + + IPG_DDEBUG_MSG("RFDlistendCount = %i\n", sp->RFDlistendCount); + IPG_DDEBUG_MSG("RFDListCheckedCount = %i\n", sp->rxdCheckedCount); + IPG_DDEBUG_MSG("EmptyRFDListCount = %i\n", sp->EmptyRFDListCount); + IPG_DUMPTFDLIST(dev); + + do { + (void) ipg_r16(INT_STATUS_ACK); + + ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA); + + synchronize_irq(pdev->irq); + } while (ipg_r16(INT_ENABLE) & IPG_IE_RSVD_MASK); + + ipg_rx_clear(sp); + + ipg_tx_clear(sp); + + pci_free_consistent(pdev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map); + pci_free_consistent(pdev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map); + + free_irq(pdev->irq, dev); + + return 0; +} + +static int ipg_nic_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int entry = sp->tx_current % IPG_TFDLIST_LENGTH; + unsigned long flags; + struct ipg_tx *txfd; + + IPG_DDEBUG_MSG("_nic_hard_start_xmit\n"); + + /* If in 10Mbps mode, stop the transmit queue so + * no more transmit frames are accepted. + */ + if (sp->tenmbpsmode) + netif_stop_queue(dev); + + if (sp->ResetCurrentTFD) { + sp->ResetCurrentTFD = 0; + entry = 0; + } + + txfd = sp->txd + entry; + + sp->TxBuff[entry] = skb; + + /* Clear all TFC fields, except TFDDONE. */ + txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE); + + /* Specify the TFC field within the TFD. */ + txfd->tfc |= cpu_to_le64(IPG_TFC_WORDALIGNDISABLED | + (IPG_TFC_FRAMEID & cpu_to_le64(sp->tx_current)) | + (IPG_TFC_FRAGCOUNT & (1 << 24))); + + /* Request TxComplete interrupts at an interval defined + * by the constant IPG_FRAMESBETWEENTXCOMPLETES. + * Request TxComplete interrupt for every frame + * if in 10Mbps mode to accomodate problem with 10Mbps + * processing. + */ + if (sp->tenmbpsmode) + txfd->tfc |= cpu_to_le64(IPG_TFC_TXINDICATE); + else if (!((sp->tx_current - sp->tx_dirty + 1) > + IPG_FRAMESBETWEENTXDMACOMPLETES)) { + txfd->tfc |= cpu_to_le64(IPG_TFC_TXDMAINDICATE); + } + /* Based on compilation option, determine if FCS is to be + * appended to transmit frame by IPG. + */ + if (!(IPG_APPEND_FCS_ON_TX)) + txfd->tfc |= cpu_to_le64(IPG_TFC_FCSAPPENDDISABLE); + + /* Based on compilation option, determine if IP, TCP and/or + * UDP checksums are to be added to transmit frame by IPG. + */ + if (IPG_ADD_IPCHECKSUM_ON_TX) + txfd->tfc |= cpu_to_le64(IPG_TFC_IPCHECKSUMENABLE); + + if (IPG_ADD_TCPCHECKSUM_ON_TX) + txfd->tfc |= cpu_to_le64(IPG_TFC_TCPCHECKSUMENABLE); + + if (IPG_ADD_UDPCHECKSUM_ON_TX) + txfd->tfc |= cpu_to_le64(IPG_TFC_UDPCHECKSUMENABLE); + + /* Based on compilation option, determine if VLAN tag info is to be + * inserted into transmit frame by IPG. + */ + if (IPG_INSERT_MANUAL_VLAN_TAG) { + txfd->tfc |= cpu_to_le64(IPG_TFC_VLANTAGINSERT | + ((u64) IPG_MANUAL_VLAN_VID << 32) | + ((u64) IPG_MANUAL_VLAN_CFI << 44) | + ((u64) IPG_MANUAL_VLAN_USERPRIORITY << 45)); + } + + /* The fragment start location within system memory is defined + * by the sk_buff structure's data field. The physical address + * of this location within the system's virtual memory space + * is determined using the IPG_HOST2BUS_MAP function. + */ + txfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data, + skb->len, PCI_DMA_TODEVICE)); + + /* The length of the fragment within system memory is defined by + * the sk_buff structure's len field. + */ + txfd->frag_info |= cpu_to_le64(IPG_TFI_FRAGLEN & + ((u64) (skb->len & 0xffff) << 48)); + + /* Clear the TFDDone bit last to indicate the TFD is ready + * for transfer to the IPG. + */ + txfd->tfc &= cpu_to_le64(~IPG_TFC_TFDDONE); + + spin_lock_irqsave(&sp->lock, flags); + + sp->tx_current++; + + mmiowb(); + + ipg_w32(IPG_DC_TX_DMA_POLL_NOW, DMA_CTRL); + + if (sp->tx_current == (sp->tx_dirty + IPG_TFDLIST_LENGTH)) + netif_wake_queue(dev); + + spin_unlock_irqrestore(&sp->lock, flags); + + return NETDEV_TX_OK; +} + +static void ipg_set_phy_default_param(unsigned char rev, + struct net_device *dev, int phy_address) +{ + unsigned short length; + unsigned char revision; + unsigned short *phy_param; + unsigned short address, value; + + phy_param = &DefaultPhyParam[0]; + length = *phy_param & 0x00FF; + revision = (unsigned char)((*phy_param) >> 8); + phy_param++; + while (length != 0) { + if (rev == revision) { + while (length > 1) { + address = *phy_param; + value = *(phy_param + 1); + phy_param += 2; + mdio_write(dev, phy_address, address, value); + length -= 4; + } + break; + } else { + phy_param += length / 2; + length = *phy_param & 0x00FF; + revision = (unsigned char)((*phy_param) >> 8); + phy_param++; + } + } +} + +/* JES20040127EEPROM */ +static int read_eeprom(struct net_device *dev, int eep_addr) +{ + void __iomem *ioaddr = ipg_ioaddr(dev); + unsigned int i; + int ret = 0; + u16 value; + + value = IPG_EC_EEPROM_READOPCODE | (eep_addr & 0xff); + ipg_w16(value, EEPROM_CTRL); + + for (i = 0; i < 1000; i++) { + u16 data; + + mdelay(10); + data = ipg_r16(EEPROM_CTRL); + if (!(data & IPG_EC_EEPROM_BUSY)) { + ret = ipg_r16(EEPROM_DATA); + break; + } + } + return ret; +} + +static void ipg_init_mii(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + struct mii_if_info *mii_if = &sp->mii_if; + int phyaddr; + + mii_if->dev = dev; + mii_if->mdio_read = mdio_read; + mii_if->mdio_write = mdio_write; + mii_if->phy_id_mask = 0x1f; + mii_if->reg_num_mask = 0x1f; + + mii_if->phy_id = phyaddr = ipg_find_phyaddr(dev); + + if (phyaddr != 0x1f) { + u16 mii_phyctrl, mii_1000cr; + u8 revisionid = 0; + + mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000); + mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF | + GMII_PHY_1000BASETCONTROL_PreferMaster; + mdio_write(dev, phyaddr, MII_CTRL1000, mii_1000cr); + + mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR); + + /* Set default phyparam */ + pci_read_config_byte(sp->pdev, PCI_REVISION_ID, &revisionid); + ipg_set_phy_default_param(revisionid, dev, phyaddr); + + /* Reset PHY */ + mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART; + mdio_write(dev, phyaddr, MII_BMCR, mii_phyctrl); + + } +} + +static int ipg_hw_init(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int i; + int rc; + + /* Read/Write and Reset EEPROM Value Jesse20040128EEPROM_VALUE */ + /* Read LED Mode Configuration from EEPROM */ + sp->LED_Mode = read_eeprom(dev, 6); + + /* Reset all functions within the IPG. Do not assert + * RST_OUT as not compatible with some PHYs. + */ + rc = ipg_reset(dev, IPG_RESET_MASK); + if (rc < 0) + goto out; + + ipg_init_mii(dev); + + /* Read MAC Address from EEPROM */ + for (i = 0; i < 3; i++) + sp->station_addr[i] = read_eeprom(dev, 16 + i); + + for (i = 0; i < 3; i++) + ipg_w16(sp->station_addr[i], STATION_ADDRESS_0 + 2*i); + + /* Set station address in ethernet_device structure. */ + dev->dev_addr[0] = ipg_r16(STATION_ADDRESS_0) & 0x00ff; + dev->dev_addr[1] = (ipg_r16(STATION_ADDRESS_0) & 0xff00) >> 8; + dev->dev_addr[2] = ipg_r16(STATION_ADDRESS_1) & 0x00ff; + dev->dev_addr[3] = (ipg_r16(STATION_ADDRESS_1) & 0xff00) >> 8; + dev->dev_addr[4] = ipg_r16(STATION_ADDRESS_2) & 0x00ff; + dev->dev_addr[5] = (ipg_r16(STATION_ADDRESS_2) & 0xff00) >> 8; +out: + return rc; +} + +static int ipg_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + int rc; + + mutex_lock(&sp->mii_mutex); + rc = generic_mii_ioctl(&sp->mii_if, if_mii(ifr), cmd, NULL); + mutex_unlock(&sp->mii_mutex); + + return rc; +} + +static int ipg_nic_change_mtu(struct net_device *dev, int new_mtu) +{ + /* Function to accomodate changes to Maximum Transfer Unit + * (or MTU) of IPG NIC. Cannot use default function since + * the default will not allow for MTU > 1500 bytes. + */ + + IPG_DEBUG_MSG("_nic_change_mtu\n"); + + /* Check that the new MTU value is between 68 (14 byte header, 46 + * byte payload, 4 byte FCS) and IPG_MAX_RXFRAME_SIZE, which + * corresponds to the MAXFRAMESIZE register in the IPG. + */ + if ((new_mtu < 68) || (new_mtu > IPG_MAX_RXFRAME_SIZE)) + return -EINVAL; + + dev->mtu = new_mtu; + + return 0; +} + +static int ipg_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + int rc; + + mutex_lock(&sp->mii_mutex); + rc = mii_ethtool_gset(&sp->mii_if, cmd); + mutex_unlock(&sp->mii_mutex); + + return rc; +} + +static int ipg_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + int rc; + + mutex_lock(&sp->mii_mutex); + rc = mii_ethtool_sset(&sp->mii_if, cmd); + mutex_unlock(&sp->mii_mutex); + + return rc; +} + +static int ipg_nway_reset(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + int rc; + + mutex_lock(&sp->mii_mutex); + rc = mii_nway_restart(&sp->mii_if); + mutex_unlock(&sp->mii_mutex); + + return rc; +} + +static struct ethtool_ops ipg_ethtool_ops = { + .get_settings = ipg_get_settings, + .set_settings = ipg_set_settings, + .nway_reset = ipg_nway_reset, +}; + +static void ipg_remove(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct ipg_nic_private *sp = netdev_priv(dev); + + IPG_DEBUG_MSG("_remove\n"); + + /* Un-register Ethernet device. */ + unregister_netdev(dev); + + pci_iounmap(pdev, sp->ioaddr); + + pci_release_regions(pdev); + + free_netdev(dev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +static int __devinit ipg_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + unsigned int i = id->driver_data; + struct ipg_nic_private *sp; + struct net_device *dev; + void __iomem *ioaddr; + int rc; + + rc = pci_enable_device(pdev); + if (rc < 0) + goto out; + + printk(KERN_INFO "%s: %s\n", pci_name(pdev), ipg_brand_name[i]); + + pci_set_master(pdev); + + rc = pci_set_dma_mask(pdev, DMA_40BIT_MASK); + if (rc < 0) { + rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (rc < 0) { + printk(KERN_ERR "%s: DMA config failed.\n", + pci_name(pdev)); + goto err_disable_0; + } + } + + /* + * Initialize net device. + */ + dev = alloc_etherdev(sizeof(struct ipg_nic_private)); + if (!dev) { + printk(KERN_ERR "%s: alloc_etherdev failed\n", pci_name(pdev)); + rc = -ENOMEM; + goto err_disable_0; + } + + sp = netdev_priv(dev); + spin_lock_init(&sp->lock); + mutex_init(&sp->mii_mutex); + + /* Declare IPG NIC functions for Ethernet device methods. + */ + dev->open = &ipg_nic_open; + dev->stop = &ipg_nic_stop; + dev->hard_start_xmit = &ipg_nic_hard_start_xmit; + dev->get_stats = &ipg_nic_get_stats; + dev->set_multicast_list = &ipg_nic_set_multicast_list; + dev->do_ioctl = ipg_ioctl; + dev->tx_timeout = ipg_tx_timeout; + dev->change_mtu = &ipg_nic_change_mtu; + + SET_NETDEV_DEV(dev, &pdev->dev); + SET_ETHTOOL_OPS(dev, &ipg_ethtool_ops); + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_free_dev_1; + + ioaddr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1)); + if (!ioaddr) { + printk(KERN_ERR "%s cannot map MMIO\n", pci_name(pdev)); + rc = -EIO; + goto err_release_regions_2; + } + + /* Save the pointer to the PCI device information. */ + sp->ioaddr = ioaddr; + sp->pdev = pdev; + sp->dev = dev; + + INIT_DELAYED_WORK(&sp->task, ipg_reset_after_host_error); + + pci_set_drvdata(pdev, dev); + + rc = ipg_hw_init(dev); + if (rc < 0) + goto err_unmap_3; + + rc = register_netdev(dev); + if (rc < 0) + goto err_unmap_3; + + printk(KERN_INFO "Ethernet device registered as: %s\n", dev->name); +out: + return rc; + +err_unmap_3: + pci_iounmap(pdev, ioaddr); +err_release_regions_2: + pci_release_regions(pdev); +err_free_dev_1: + free_netdev(dev); +err_disable_0: + pci_disable_device(pdev); + goto out; +} + +static struct pci_driver ipg_pci_driver = { + .name = IPG_DRIVER_NAME, + .id_table = ipg_pci_tbl, + .probe = ipg_probe, + .remove = __devexit_p(ipg_remove), +}; + +static int __init ipg_init_module(void) +{ + return pci_register_driver(&ipg_pci_driver); +} + +static void __exit ipg_exit_module(void) +{ + pci_unregister_driver(&ipg_pci_driver); +} + +module_init(ipg_init_module); +module_exit(ipg_exit_module); diff --git a/drivers/net/ipg.h b/drivers/net/ipg.h new file mode 100644 index 000000000000..1952d0dfd314 --- /dev/null +++ b/drivers/net/ipg.h @@ -0,0 +1,856 @@ +/* + * + * ipg.h + * + * Include file for Gigabit Ethernet device driver for Network + * Interface Cards (NICs) utilizing the Tamarack Microelectronics + * Inc. IPG Gigabit or Triple Speed Ethernet Media Access + * Controller. + * + * Craig Rich + * Sundance Technology, Inc. + * 1485 Saratoga Avenue + * Suite 200 + * San Jose, CA 95129 + * 408 873 4117 + * www.sundanceti.com + * craig_rich@sundanceti.com + */ +#ifndef __LINUX_IPG_H +#define __LINUX_IPG_H + +#include <linux/version.h> +#include <linux/module.h> + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/ioport.h> +#include <linux/errno.h> +#include <asm/io.h> +#include <linux/delay.h> +#include <linux/types.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/init.h> +#include <linux/skbuff.h> +#include <linux/version.h> +#include <asm/bitops.h> +/*#include <asm/spinlock.h>*/ + +#define DrvVer "2.09d" + +#define IPG_DEV_KFREE_SKB(skb) dev_kfree_skb_irq(skb) + +/* + * Constants + */ + +/* GMII based PHY IDs */ +#define NS 0x2000 +#define MARVELL 0x0141 +#define ICPLUS_PHY 0x243 + +/* NIC Physical Layer Device MII register fields. */ +#define MII_PHY_SELECTOR_IEEE8023 0x0001 +#define MII_PHY_TECHABILITYFIELD 0x1FE0 + +/* GMII_PHY_1000 need to set to prefer master */ +#define GMII_PHY_1000BASETCONTROL_PreferMaster 0x0400 + +/* NIC Physical Layer Device GMII constants. */ +#define GMII_PREAMBLE 0xFFFFFFFF +#define GMII_ST 0x1 +#define GMII_READ 0x2 +#define GMII_WRITE 0x1 +#define GMII_TA_READ_MASK 0x1 +#define GMII_TA_WRITE 0x2 + +/* I/O register offsets. */ +enum ipg_regs { + DMA_CTRL = 0x00, + RX_DMA_STATUS = 0x08, // Unused + reserved + TFD_LIST_PTR_0 = 0x10, + TFD_LIST_PTR_1 = 0x14, + TX_DMA_BURST_THRESH = 0x18, + TX_DMA_URGENT_THRESH = 0x19, + TX_DMA_POLL_PERIOD = 0x1a, + RFD_LIST_PTR_0 = 0x1c, + RFD_LIST_PTR_1 = 0x20, + RX_DMA_BURST_THRESH = 0x24, + RX_DMA_URGENT_THRESH = 0x25, + RX_DMA_POLL_PERIOD = 0x26, + DEBUG_CTRL = 0x2c, + ASIC_CTRL = 0x30, + FIFO_CTRL = 0x38, // Unused + FLOW_OFF_THRESH = 0x3c, + FLOW_ON_THRESH = 0x3e, + EEPROM_DATA = 0x48, + EEPROM_CTRL = 0x4a, + EXPROM_ADDR = 0x4c, // Unused + EXPROM_DATA = 0x50, // Unused + WAKE_EVENT = 0x51, // Unused + COUNTDOWN = 0x54, // Unused + INT_STATUS_ACK = 0x5a, + INT_ENABLE = 0x5c, + INT_STATUS = 0x5e, // Unused + TX_STATUS = 0x60, + MAC_CTRL = 0x6c, + VLAN_TAG = 0x70, // Unused + PHY_SET = 0x75, // JES20040127EEPROM + PHY_CTRL = 0x76, + STATION_ADDRESS_0 = 0x78, + STATION_ADDRESS_1 = 0x7a, + STATION_ADDRESS_2 = 0x7c, + MAX_FRAME_SIZE = 0x86, + RECEIVE_MODE = 0x88, + HASHTABLE_0 = 0x8c, + HASHTABLE_1 = 0x90, + RMON_STATISTICS_MASK = 0x98, + STATISTICS_MASK = 0x9c, + RX_JUMBO_FRAMES = 0xbc, // Unused + TCP_CHECKSUM_ERRORS = 0xc0, // Unused + IP_CHECKSUM_ERRORS = 0xc2, // Unused + UDP_CHECKSUM_ERRORS = 0xc4, // Unused + TX_JUMBO_FRAMES = 0xf4 // Unused +}; + +/* Ethernet MIB statistic register offsets. */ +#define IPG_OCTETRCVOK 0xA8 +#define IPG_MCSTOCTETRCVDOK 0xAC +#define IPG_BCSTOCTETRCVOK 0xB0 +#define IPG_FRAMESRCVDOK 0xB4 +#define IPG_MCSTFRAMESRCVDOK 0xB8 +#define IPG_BCSTFRAMESRCVDOK 0xBE +#define IPG_MACCONTROLFRAMESRCVD 0xC6 +#define IPG_FRAMETOOLONGERRRORS 0xC8 +#define IPG_INRANGELENGTHERRORS 0xCA +#define IPG_FRAMECHECKSEQERRORS 0xCC +#define IPG_FRAMESLOSTRXERRORS 0xCE +#define IPG_OCTETXMTOK 0xD0 +#define IPG_MCSTOCTETXMTOK 0xD4 +#define IPG_BCSTOCTETXMTOK 0xD8 +#define IPG_FRAMESXMTDOK 0xDC +#define IPG_MCSTFRAMESXMTDOK 0xE0 +#define IPG_FRAMESWDEFERREDXMT 0xE4 +#define IPG_LATECOLLISIONS 0xE8 +#define IPG_MULTICOLFRAMES 0xEC +#define IPG_SINGLECOLFRAMES 0xF0 +#define IPG_BCSTFRAMESXMTDOK 0xF6 +#define IPG_CARRIERSENSEERRORS 0xF8 +#define IPG_MACCONTROLFRAMESXMTDOK 0xFA +#define IPG_FRAMESABORTXSCOLLS 0xFC +#define IPG_FRAMESWEXDEFERRAL 0xFE + +/* RMON statistic register offsets. */ +#define IPG_ETHERSTATSCOLLISIONS 0x100 +#define IPG_ETHERSTATSOCTETSTRANSMIT 0x104 +#define IPG_ETHERSTATSPKTSTRANSMIT 0x108 +#define IPG_ETHERSTATSPKTS64OCTESTSTRANSMIT 0x10C +#define IPG_ETHERSTATSPKTS65TO127OCTESTSTRANSMIT 0x110 +#define IPG_ETHERSTATSPKTS128TO255OCTESTSTRANSMIT 0x114 +#define IPG_ETHERSTATSPKTS256TO511OCTESTSTRANSMIT 0x118 +#define IPG_ETHERSTATSPKTS512TO1023OCTESTSTRANSMIT 0x11C +#define IPG_ETHERSTATSPKTS1024TO1518OCTESTSTRANSMIT 0x120 +#define IPG_ETHERSTATSCRCALIGNERRORS 0x124 +#define IPG_ETHERSTATSUNDERSIZEPKTS 0x128 +#define IPG_ETHERSTATSFRAGMENTS 0x12C +#define IPG_ETHERSTATSJABBERS 0x130 +#define IPG_ETHERSTATSOCTETS 0x134 +#define IPG_ETHERSTATSPKTS 0x138 +#define IPG_ETHERSTATSPKTS64OCTESTS 0x13C +#define IPG_ETHERSTATSPKTS65TO127OCTESTS 0x140 +#define IPG_ETHERSTATSPKTS128TO255OCTESTS 0x144 +#define IPG_ETHERSTATSPKTS256TO511OCTESTS 0x148 +#define IPG_ETHERSTATSPKTS512TO1023OCTESTS 0x14C +#define IPG_ETHERSTATSPKTS1024TO1518OCTESTS 0x150 + +/* RMON statistic register equivalents. */ +#define IPG_ETHERSTATSMULTICASTPKTSTRANSMIT 0xE0 +#define IPG_ETHERSTATSBROADCASTPKTSTRANSMIT 0xF6 +#define IPG_ETHERSTATSMULTICASTPKTS 0xB8 +#define IPG_ETHERSTATSBROADCASTPKTS 0xBE +#define IPG_ETHERSTATSOVERSIZEPKTS 0xC8 +#define IPG_ETHERSTATSDROPEVENTS 0xCE + +/* Serial EEPROM offsets */ +#define IPG_EEPROM_CONFIGPARAM 0x00 +#define IPG_EEPROM_ASICCTRL 0x01 +#define IPG_EEPROM_SUBSYSTEMVENDORID 0x02 +#define IPG_EEPROM_SUBSYSTEMID 0x03 +#define IPG_EEPROM_STATIONADDRESS0 0x10 +#define IPG_EEPROM_STATIONADDRESS1 0x11 +#define IPG_EEPROM_STATIONADDRESS2 0x12 + +/* Register & data structure bit masks */ + +/* PCI register masks. */ + +/* IOBaseAddress */ +#define IPG_PIB_RSVD_MASK 0xFFFFFE01 +#define IPG_PIB_IOBASEADDRESS 0xFFFFFF00 +#define IPG_PIB_IOBASEADDRIND 0x00000001 + +/* MemBaseAddress */ +#define IPG_PMB_RSVD_MASK 0xFFFFFE07 +#define IPG_PMB_MEMBASEADDRIND 0x00000001 +#define IPG_PMB_MEMMAPTYPE 0x00000006 +#define IPG_PMB_MEMMAPTYPE0 0x00000002 +#define IPG_PMB_MEMMAPTYPE1 0x00000004 +#define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00 + +/* ConfigStatus */ +#define IPG_CS_RSVD_MASK 0xFFB0 +#define IPG_CS_CAPABILITIES 0x0010 +#define IPG_CS_66MHZCAPABLE 0x0020 +#define IPG_CS_FASTBACK2BACK 0x0080 +#define IPG_CS_DATAPARITYREPORTED 0x0100 +#define IPG_CS_DEVSELTIMING 0x0600 +#define IPG_CS_SIGNALEDTARGETABORT 0x0800 +#define IPG_CS_RECEIVEDTARGETABORT 0x1000 +#define IPG_CS_RECEIVEDMASTERABORT 0x2000 +#define IPG_CS_SIGNALEDSYSTEMERROR 0x4000 +#define IPG_CS_DETECTEDPARITYERROR 0x8000 + +/* TFD data structure masks. */ + +/* TFDList, TFC */ +#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFF +#define IPG_TFC_FRAMEID 0x000000000000FFFF +#define IPG_TFC_WORDALIGN 0x0000000000030000 +#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000 +#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000 +#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000 +#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000 +#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000 +#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000 +#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000 +#define IPG_TFC_TXINDICATE 0x0000000000400000 +#define IPG_TFC_TXDMAINDICATE 0x0000000000800000 +#define IPG_TFC_FRAGCOUNT 0x000000000F000000 +#define IPG_TFC_VLANTAGINSERT 0x0000000010000000 +#define IPG_TFC_TFDDONE 0x0000000080000000 +#define IPG_TFC_VID 0x00000FFF00000000 +#define IPG_TFC_CFI 0x0000100000000000 +#define IPG_TFC_USERPRIORITY 0x0000E00000000000 + +/* TFDList, FragInfo */ +#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFF +#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFF +#define IPG_TFI_FRAGLEN 0xFFFF000000000000LL + +/* RFD data structure masks. */ + +/* RFDList, RFS */ +#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFF +#define IPG_RFS_RXFRAMELEN 0x000000000000FFFF +#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000 +#define IPG_RFS_RXRUNTFRAME 0x0000000000020000 +#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000 +#define IPG_RFS_RXFCSERROR 0x0000000000080000 +#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000 +#define IPG_RFS_RXLENGTHERROR 0x0000000000200000 +#define IPG_RFS_VLANDETECTED 0x0000000000400000 +#define IPG_RFS_TCPDETECTED 0x0000000000800000 +#define IPG_RFS_TCPERROR 0x0000000001000000 +#define IPG_RFS_UDPDETECTED 0x0000000002000000 +#define IPG_RFS_UDPERROR 0x0000000004000000 +#define IPG_RFS_IPDETECTED 0x0000000008000000 +#define IPG_RFS_IPERROR 0x0000000010000000 +#define IPG_RFS_FRAMESTART 0x0000000020000000 +#define IPG_RFS_FRAMEEND 0x0000000040000000 +#define IPG_RFS_RFDDONE 0x0000000080000000 +#define IPG_RFS_TCI 0x0000FFFF00000000 + +/* RFDList, FragInfo */ +#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFF +#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFF +#define IPG_RFI_FRAGLEN 0xFFFF000000000000LL + +/* I/O Register masks. */ + +/* RMON Statistics Mask */ +#define IPG_RZ_ALL 0x0FFFFFFF + +/* Statistics Mask */ +#define IPG_SM_ALL 0x0FFFFFFF +#define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001 +#define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002 +#define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004 +#define IPG_SM_RXJUMBOFRAMES 0x00000008 +#define IPG_SM_TCPCHECKSUMERRORS 0x00000010 +#define IPG_SM_IPCHECKSUMERRORS 0x00000020 +#define IPG_SM_UDPCHECKSUMERRORS 0x00000040 +#define IPG_SM_MACCONTROLFRAMESRCVD 0x00000080 +#define IPG_SM_FRAMESTOOLONGERRORS 0x00000100 +#define IPG_SM_INRANGELENGTHERRORS 0x00000200 +#define IPG_SM_FRAMECHECKSEQERRORS 0x00000400 +#define IPG_SM_FRAMESLOSTRXERRORS 0x00000800 +#define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000 +#define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000 +#define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000 +#define IPG_SM_FRAMESWDEFERREDXMT 0x00008000 +#define IPG_SM_LATECOLLISIONS 0x00010000 +#define IPG_SM_MULTICOLFRAMES 0x00020000 +#define IPG_SM_SINGLECOLFRAMES 0x00040000 +#define IPG_SM_TXJUMBOFRAMES 0x00080000 +#define IPG_SM_CARRIERSENSEERRORS 0x00100000 +#define IPG_SM_MACCONTROLFRAMESXMTD 0x00200000 +#define IPG_SM_FRAMESABORTXSCOLLS 0x00400000 +#define IPG_SM_FRAMESWEXDEFERAL 0x00800000 + +/* Countdown */ +#define IPG_CD_RSVD_MASK 0x0700FFFF +#define IPG_CD_COUNT 0x0000FFFF +#define IPG_CD_COUNTDOWNSPEED 0x01000000 +#define IPG_CD_COUNTDOWNMODE 0x02000000 +#define IPG_CD_COUNTINTENABLED 0x04000000 + +/* TxDMABurstThresh */ +#define IPG_TB_RSVD_MASK 0xFF + +/* TxDMAUrgentThresh */ +#define IPG_TU_RSVD_MASK 0xFF + +/* TxDMAPollPeriod */ +#define IPG_TP_RSVD_MASK 0xFF + +/* RxDMAUrgentThresh */ +#define IPG_RU_RSVD_MASK 0xFF + +/* RxDMAPollPeriod */ +#define IPG_RP_RSVD_MASK 0xFF + +/* ReceiveMode */ +#define IPG_RM_RSVD_MASK 0x3F +#define IPG_RM_RECEIVEUNICAST 0x01 +#define IPG_RM_RECEIVEMULTICAST 0x02 +#define IPG_RM_RECEIVEBROADCAST 0x04 +#define IPG_RM_RECEIVEALLFRAMES 0x08 +#define IPG_RM_RECEIVEMULTICASTHASH 0x10 +#define IPG_RM_RECEIVEIPMULTICAST 0x20 + +/* PhySet JES20040127EEPROM*/ +#define IPG_PS_MEM_LENB9B 0x01 +#define IPG_PS_MEM_LEN9 0x02 +#define IPG_PS_NON_COMPDET 0x04 + +/* PhyCtrl */ +#define IPG_PC_RSVD_MASK 0xFF +#define IPG_PC_MGMTCLK_LO 0x00 +#define IPG_PC_MGMTCLK_HI 0x01 +#define IPG_PC_MGMTCLK 0x01 +#define IPG_PC_MGMTDATA 0x02 +#define IPG_PC_MGMTDIR 0x04 +#define IPG_PC_DUPLEX_POLARITY 0x08 +#define IPG_PC_DUPLEX_STATUS 0x10 +#define IPG_PC_LINK_POLARITY 0x20 +#define IPG_PC_LINK_SPEED 0xC0 +#define IPG_PC_LINK_SPEED_10MBPS 0x40 +#define IPG_PC_LINK_SPEED_100MBPS 0x80 +#define IPG_PC_LINK_SPEED_1000MBPS 0xC0 + +/* DMACtrl */ +#define IPG_DC_RSVD_MASK 0xC07D9818 +#define IPG_DC_RX_DMA_COMPLETE 0x00000008 +#define IPG_DC_RX_DMA_POLL_NOW 0x00000010 +#define IPG_DC_TX_DMA_COMPLETE 0x00000800 +#define IPG_DC_TX_DMA_POLL_NOW 0x00001000 +#define IPG_DC_TX_DMA_IN_PROG 0x00008000 +#define IPG_DC_RX_EARLY_DISABLE 0x00010000 +#define IPG_DC_MWI_DISABLE 0x00040000 +#define IPG_DC_TX_WRITE_BACK_DISABLE 0x00080000 +#define IPG_DC_TX_BURST_LIMIT 0x00700000 +#define IPG_DC_TARGET_ABORT 0x40000000 +#define IPG_DC_MASTER_ABORT 0x80000000 + +/* ASICCtrl */ +#define IPG_AC_RSVD_MASK 0x07FFEFF2 +#define IPG_AC_EXP_ROM_SIZE 0x00000002 +#define IPG_AC_PHY_SPEED10 0x00000010 +#define IPG_AC_PHY_SPEED100 0x00000020 +#define IPG_AC_PHY_SPEED1000 0x00000040 +#define IPG_AC_PHY_MEDIA 0x00000080 +#define IPG_AC_FORCED_CFG 0x00000700 +#define IPG_AC_D3RESETDISABLE 0x00000800 +#define IPG_AC_SPEED_UP_MODE 0x00002000 +#define IPG_AC_LED_MODE 0x00004000 +#define IPG_AC_RST_OUT_POLARITY 0x00008000 +#define IPG_AC_GLOBAL_RESET 0x00010000 +#define IPG_AC_RX_RESET 0x00020000 +#define IPG_AC_TX_RESET 0x00040000 +#define IPG_AC_DMA 0x00080000 +#define IPG_AC_FIFO 0x00100000 +#define IPG_AC_NETWORK 0x00200000 +#define IPG_AC_HOST 0x00400000 +#define IPG_AC_AUTO_INIT 0x00800000 +#define IPG_AC_RST_OUT 0x01000000 +#define IPG_AC_INT_REQUEST 0x02000000 +#define IPG_AC_RESET_BUSY 0x04000000 +#define IPG_AC_LED_SPEED 0x08000000 //JES20040127EEPROM +#define IPG_AC_LED_MODE_BIT_1 0x20000000 //JES20040127EEPROM + +/* EepromCtrl */ +#define IPG_EC_RSVD_MASK 0x83FF +#define IPG_EC_EEPROM_ADDR 0x00FF +#define IPG_EC_EEPROM_OPCODE 0x0300 +#define IPG_EC_EEPROM_SUBCOMMAD 0x0000 +#define IPG_EC_EEPROM_WRITEOPCODE 0x0100 +#define IPG_EC_EEPROM_READOPCODE 0x0200 +#define IPG_EC_EEPROM_ERASEOPCODE 0x0300 +#define IPG_EC_EEPROM_BUSY 0x8000 + +/* FIFOCtrl */ +#define IPG_FC_RSVD_MASK 0xC001 +#define IPG_FC_RAM_TEST_MODE 0x0001 +#define IPG_FC_TRANSMITTING 0x4000 +#define IPG_FC_RECEIVING 0x8000 + +/* TxStatus */ +#define IPG_TS_RSVD_MASK 0xFFFF00DD +#define IPG_TS_TX_ERROR 0x00000001 +#define IPG_TS_LATE_COLLISION 0x00000004 +#define IPG_TS_TX_MAX_COLL 0x00000008 +#define IPG_TS_TX_UNDERRUN 0x00000010 +#define IPG_TS_TX_IND_REQD 0x00000040 +#define IPG_TS_TX_COMPLETE 0x00000080 +#define IPG_TS_TX_FRAMEID 0xFFFF0000 + +/* WakeEvent */ +#define IPG_WE_WAKE_PKT_ENABLE 0x01 +#define IPG_WE_MAGIC_PKT_ENABLE 0x02 +#define IPG_WE_LINK_EVT_ENABLE 0x04 +#define IPG_WE_WAKE_POLARITY 0x08 +#define IPG_WE_WAKE_PKT_EVT 0x10 +#define IPG_WE_MAGIC_PKT_EVT 0x20 +#define IPG_WE_LINK_EVT 0x40 +#define IPG_WE_WOL_ENABLE 0x80 + +/* IntEnable */ +#define IPG_IE_RSVD_MASK 0x1FFE +#define IPG_IE_HOST_ERROR 0x0002 +#define IPG_IE_TX_COMPLETE 0x0004 +#define IPG_IE_MAC_CTRL_FRAME 0x0008 +#define IPG_IE_RX_COMPLETE 0x0010 +#define IPG_IE_RX_EARLY 0x0020 +#define IPG_IE_INT_REQUESTED 0x0040 +#define IPG_IE_UPDATE_STATS 0x0080 +#define IPG_IE_LINK_EVENT 0x0100 +#define IPG_IE_TX_DMA_COMPLETE 0x0200 +#define IPG_IE_RX_DMA_COMPLETE 0x0400 +#define IPG_IE_RFD_LIST_END 0x0800 +#define IPG_IE_RX_DMA_PRIORITY 0x1000 + +/* IntStatus */ +#define IPG_IS_RSVD_MASK 0x1FFF +#define IPG_IS_INTERRUPT_STATUS 0x0001 +#define IPG_IS_HOST_ERROR 0x0002 +#define IPG_IS_TX_COMPLETE 0x0004 +#define IPG_IS_MAC_CTRL_FRAME 0x0008 +#define IPG_IS_RX_COMPLETE 0x0010 +#define IPG_IS_RX_EARLY 0x0020 +#define IPG_IS_INT_REQUESTED 0x0040 +#define IPG_IS_UPDATE_STATS 0x0080 +#define IPG_IS_LINK_EVENT 0x0100 +#define IPG_IS_TX_DMA_COMPLETE 0x0200 +#define IPG_IS_RX_DMA_COMPLETE 0x0400 +#define IPG_IS_RFD_LIST_END 0x0800 +#define IPG_IS_RX_DMA_PRIORITY 0x1000 + +/* MACCtrl */ +#define IPG_MC_RSVD_MASK 0x7FE33FA3 +#define IPG_MC_IFS_SELECT 0x00000003 +#define IPG_MC_IFS_4352BIT 0x00000003 +#define IPG_MC_IFS_1792BIT 0x00000002 +#define IPG_MC_IFS_1024BIT 0x00000001 +#define IPG_MC_IFS_96BIT 0x00000000 +#define IPG_MC_DUPLEX_SELECT 0x00000020 +#define IPG_MC_DUPLEX_SELECT_FD 0x00000020 +#define IPG_MC_DUPLEX_SELECT_HD 0x00000000 +#define IPG_MC_TX_FLOW_CONTROL_ENABLE 0x00000080 +#define IPG_MC_RX_FLOW_CONTROL_ENABLE 0x00000100 +#define IPG_MC_RCV_FCS 0x00000200 +#define IPG_MC_FIFO_LOOPBACK 0x00000400 +#define IPG_MC_MAC_LOOPBACK 0x00000800 +#define IPG_MC_AUTO_VLAN_TAGGING 0x00001000 +#define IPG_MC_AUTO_VLAN_UNTAGGING 0x00002000 +#define IPG_MC_COLLISION_DETECT 0x00010000 +#define IPG_MC_CARRIER_SENSE 0x00020000 +#define IPG_MC_STATISTICS_ENABLE 0x00200000 +#define IPG_MC_STATISTICS_DISABLE 0x00400000 +#define IPG_MC_STATISTICS_ENABLED 0x00800000 +#define IPG_MC_TX_ENABLE 0x01000000 +#define IPG_MC_TX_DISABLE 0x02000000 +#define IPG_MC_TX_ENABLED 0x04000000 +#define IPG_MC_RX_ENABLE 0x08000000 +#define IPG_MC_RX_DISABLE 0x10000000 +#define IPG_MC_RX_ENABLED 0x20000000 +#define IPG_MC_PAUSED 0x40000000 + +/* + * Tune + */ + +/* Miscellaneous Constants. */ +#define TRUE 1 +#define FALSE 0 + +/* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS append on TX. */ +#define IPG_APPEND_FCS_ON_TX TRUE + +/* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS strip on RX. */ +#define IPG_STRIP_FCS_ON_RX TRUE + +/* Assign IPG_DROP_ON_RX_ETH_ERRORS > 0 to drop RX frames with + * Ethernet errors. + */ +#define IPG_DROP_ON_RX_ETH_ERRORS TRUE + +/* Assign IPG_INSERT_MANUAL_VLAN_TAG > 0 to insert VLAN tags manually + * (via TFC). + */ +#define IPG_INSERT_MANUAL_VLAN_TAG FALSE + +/* Assign IPG_ADD_IPCHECKSUM_ON_TX > 0 for auto IP checksum on TX. */ +#define IPG_ADD_IPCHECKSUM_ON_TX FALSE + +/* Assign IPG_ADD_TCPCHECKSUM_ON_TX > 0 for auto TCP checksum on TX. + * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER. + */ +#define IPG_ADD_TCPCHECKSUM_ON_TX FALSE + +/* Assign IPG_ADD_UDPCHECKSUM_ON_TX > 0 for auto UDP checksum on TX. + * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER. + */ +#define IPG_ADD_UDPCHECKSUM_ON_TX FALSE + +/* If inserting VLAN tags manually, assign the IPG_MANUAL_VLAN_xx + * constants as desired. + */ +#define IPG_MANUAL_VLAN_VID 0xABC +#define IPG_MANUAL_VLAN_CFI 0x1 +#define IPG_MANUAL_VLAN_USERPRIORITY 0x5 + +#define IPG_IO_REG_RANGE 0xFF +#define IPG_MEM_REG_RANGE 0x154 +#define IPG_DRIVER_NAME "Sundance Technology IPG Triple-Speed Ethernet" +#define IPG_NIC_PHY_ADDRESS 0x01 +#define IPG_DMALIST_ALIGN_PAD 0x07 +#define IPG_MULTICAST_HASHTABLE_SIZE 0x40 + +/* Number of miliseconds to wait after issuing a software reset. + * 0x05 <= IPG_AC_RESETWAIT to account for proper 10Mbps operation. + */ +#define IPG_AC_RESETWAIT 0x05 + +/* Number of IPG_AC_RESETWAIT timeperiods before declaring timeout. */ +#define IPG_AC_RESET_TIMEOUT 0x0A + +/* Minimum number of nanoseconds used to toggle MDC clock during + * MII/GMII register access. + */ +#define IPG_PC_PHYCTRLWAIT_NS 200 + +#define IPG_TFDLIST_LENGTH 0x100 + +/* Number of frames between TxDMAComplete interrupt. + * 0 < IPG_FRAMESBETWEENTXDMACOMPLETES <= IPG_TFDLIST_LENGTH + */ +#define IPG_FRAMESBETWEENTXDMACOMPLETES 0x1 + +#ifdef JUMBO_FRAME + +# ifdef JUMBO_FRAME_SIZE_2K +# define JUMBO_FRAME_SIZE 2048 +# define __IPG_RXFRAG_SIZE 2048 +# else +# ifdef JUMBO_FRAME_SIZE_3K +# define JUMBO_FRAME_SIZE 3072 +# define __IPG_RXFRAG_SIZE 3072 +# else +# ifdef JUMBO_FRAME_SIZE_4K +# define JUMBO_FRAME_SIZE 4096 +# define __IPG_RXFRAG_SIZE 4088 +# else +# ifdef JUMBO_FRAME_SIZE_5K +# define JUMBO_FRAME_SIZE 5120 +# define __IPG_RXFRAG_SIZE 4088 +# else +# ifdef JUMBO_FRAME_SIZE_6K +# define JUMBO_FRAME_SIZE 6144 +# define __IPG_RXFRAG_SIZE 4088 +# else +# ifdef JUMBO_FRAME_SIZE_7K +# define JUMBO_FRAME_SIZE 7168 +# define __IPG_RXFRAG_SIZE 4088 +# else +# ifdef JUMBO_FRAME_SIZE_8K +# define JUMBO_FRAME_SIZE 8192 +# define __IPG_RXFRAG_SIZE 4088 +# else +# ifdef JUMBO_FRAME_SIZE_9K +# define JUMBO_FRAME_SIZE 9216 +# define __IPG_RXFRAG_SIZE 4088 +# else +# ifdef JUMBO_FRAME_SIZE_10K +# define JUMBO_FRAME_SIZE 10240 +# define __IPG_RXFRAG_SIZE 4088 +# else +# define JUMBO_FRAME_SIZE 4096 +# endif +# endif +# endif +# endif +# endif +# endif +# endif +# endif +# endif +#endif + +/* Size of allocated received buffers. Nominally 0x0600. + * Define larger if expecting jumbo frames. + */ +#ifdef JUMBO_FRAME +//IPG_TXFRAG_SIZE must <= 0x2b00, or TX will crash +#define IPG_TXFRAG_SIZE JUMBO_FRAME_SIZE +#endif + +/* Size of allocated received buffers. Nominally 0x0600. + * Define larger if expecting jumbo frames. + */ +#ifdef JUMBO_FRAME +//4088=4096-8 +#define IPG_RXFRAG_SIZE __IPG_RXFRAG_SIZE +#define IPG_RXSUPPORT_SIZE IPG_MAX_RXFRAME_SIZE +#else +#define IPG_RXFRAG_SIZE 0x0600 +#define IPG_RXSUPPORT_SIZE IPG_RXFRAG_SIZE +#endif + +/* IPG_MAX_RXFRAME_SIZE <= IPG_RXFRAG_SIZE */ +#ifdef JUMBO_FRAME +#define IPG_MAX_RXFRAME_SIZE JUMBO_FRAME_SIZE +#else +#define IPG_MAX_RXFRAME_SIZE 0x0600 +#endif + +#define IPG_RFDLIST_LENGTH 0x100 + +/* Maximum number of RFDs to process per interrupt. + * 1 < IPG_MAXRFDPROCESS_COUNT < IPG_RFDLIST_LENGTH + */ +#define IPG_MAXRFDPROCESS_COUNT 0x80 + +/* Minimum margin between last freed RFD, and current RFD. + * 1 < IPG_MINUSEDRFDSTOFREE < IPG_RFDLIST_LENGTH + */ +#define IPG_MINUSEDRFDSTOFREE 0x80 + +/* specify the jumbo frame maximum size + * per unit is 0x600 (the RxBuffer size that one RFD can carry) + */ +#define MAX_JUMBOSIZE 0x8 // max is 12K + +/* Key register values loaded at driver start up. */ + +/* TXDMAPollPeriod is specified in 320ns increments. + * + * Value Time + * --------------------- + * 0x00-0x01 320ns + * 0x03 ~1us + * 0x1F ~10us + * 0xFF ~82us + */ +#define IPG_TXDMAPOLLPERIOD_VALUE 0x26 + +/* TxDMAUrgentThresh specifies the minimum amount of + * data in the transmit FIFO before asserting an + * urgent transmit DMA request. + * + * Value Min TxFIFO occupied space before urgent TX request + * --------------------------------------------------------------- + * 0x00-0x04 128 bytes (1024 bits) + * 0x27 1248 bytes (~10000 bits) + * 0x30 1536 bytes (12288 bits) + * 0xFF 8192 bytes (65535 bits) + */ +#define IPG_TXDMAURGENTTHRESH_VALUE 0x04 + +/* TxDMABurstThresh specifies the minimum amount of + * free space in the transmit FIFO before asserting an + * transmit DMA request. + * + * Value Min TxFIFO free space before TX request + * ---------------------------------------------------- + * 0x00-0x08 256 bytes + * 0x30 1536 bytes + * 0xFF 8192 bytes + */ +#define IPG_TXDMABURSTTHRESH_VALUE 0x30 + +/* RXDMAPollPeriod is specified in 320ns increments. + * + * Value Time + * --------------------- + * 0x00-0x01 320ns + * 0x03 ~1us + * 0x1F ~10us + * 0xFF ~82us + */ +#define IPG_RXDMAPOLLPERIOD_VALUE 0x01 + +/* RxDMAUrgentThresh specifies the minimum amount of + * free space within the receive FIFO before asserting + * a urgent receive DMA request. + * + * Value Min RxFIFO free space before urgent RX request + * --------------------------------------------------------------- + * 0x00-0x04 128 bytes (1024 bits) + * 0x27 1248 bytes (~10000 bits) + * 0x30 1536 bytes (12288 bits) + * 0xFF 8192 bytes (65535 bits) + */ +#define IPG_RXDMAURGENTTHRESH_VALUE 0x30 + +/* RxDMABurstThresh specifies the minimum amount of + * occupied space within the receive FIFO before asserting + * a receive DMA request. + * + * Value Min TxFIFO free space before TX request + * ---------------------------------------------------- + * 0x00-0x08 256 bytes + * 0x30 1536 bytes + * 0xFF 8192 bytes + */ +#define IPG_RXDMABURSTTHRESH_VALUE 0x30 + +/* FlowOnThresh specifies the maximum amount of occupied + * space in the receive FIFO before a PAUSE frame with + * maximum pause time transmitted. + * + * Value Max RxFIFO occupied space before PAUSE + * --------------------------------------------------- + * 0x0000 0 bytes + * 0x0740 29,696 bytes + * 0x07FF 32,752 bytes + */ +#define IPG_FLOWONTHRESH_VALUE 0x0740 + +/* FlowOffThresh specifies the minimum amount of occupied + * space in the receive FIFO before a PAUSE frame with + * zero pause time is transmitted. + * + * Value Max RxFIFO occupied space before PAUSE + * --------------------------------------------------- + * 0x0000 0 bytes + * 0x00BF 3056 bytes + * 0x07FF 32,752 bytes + */ +#define IPG_FLOWOFFTHRESH_VALUE 0x00BF + +/* + * Miscellaneous macros. + */ + +/* Marco for printing debug statements. +# define IPG_DDEBUG_MSG(args...) printk(KERN_DEBUG "IPG: " ## args) */ +#ifdef IPG_DEBUG +# define IPG_DEBUG_MSG(args...) +# define IPG_DDEBUG_MSG(args...) printk(KERN_DEBUG "IPG: " args) +# define IPG_DUMPRFDLIST(args) ipg_dump_rfdlist(args) +# define IPG_DUMPTFDLIST(args) ipg_dump_tfdlist(args) +#else +# define IPG_DEBUG_MSG(args...) +# define IPG_DDEBUG_MSG(args...) +# define IPG_DUMPRFDLIST(args) +# define IPG_DUMPTFDLIST(args) +#endif + +/* + * End miscellaneous macros. + */ + +/* Transmit Frame Descriptor. The IPG supports 15 fragments, + * however Linux requires only a single fragment. Note, each + * TFD field is 64 bits wide. + */ +struct ipg_tx { + u64 next_desc; + u64 tfc; + u64 frag_info; +}; + +/* Receive Frame Descriptor. Note, each RFD field is 64 bits wide. + */ +struct ipg_rx { + u64 next_desc; + u64 rfs; + u64 frag_info; +}; + +struct SJumbo { + int FoundStart; + int CurrentSize; + struct sk_buff *skb; +}; +/* Structure of IPG NIC specific data. */ +struct ipg_nic_private { + void __iomem *ioaddr; + struct ipg_tx *txd; + struct ipg_rx *rxd; + dma_addr_t txd_map; + dma_addr_t rxd_map; + struct sk_buff *TxBuff[IPG_TFDLIST_LENGTH]; + struct sk_buff *RxBuff[IPG_RFDLIST_LENGTH]; + unsigned int tx_current; + unsigned int tx_dirty; + unsigned int rx_current; + unsigned int rx_dirty; +// Add by Grace 2005/05/19 +#ifdef JUMBO_FRAME + struct SJumbo Jumbo; +#endif + unsigned int rx_buf_sz; + struct pci_dev *pdev; + struct net_device *dev; + struct net_device_stats stats; + spinlock_t lock; + int tenmbpsmode; + + /*Jesse20040128EEPROM_VALUE */ + u16 LED_Mode; + u16 station_addr[3]; /* Station Address in EEPROM Reg 0x10..0x12 */ + + struct mutex mii_mutex; + struct mii_if_info mii_if; + int ResetCurrentTFD; +#ifdef IPG_DEBUG + int RFDlistendCount; + int RFDListCheckedCount; + int EmptyRFDListCount; +#endif + struct delayed_work task; +}; + +//variable record -- index by leading revision/length +//Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN +unsigned short DefaultPhyParam[] = { + // 11/12/03 IP1000A v1-3 rev=0x40 + /*-------------------------------------------------------------------------- + (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2, + 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6, + 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700, + --------------------------------------------------------------------------*/ + // 12/17/03 IP1000A v1-4 rev=0x40 + (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, + 0x0000, + 30, 0x005e, 9, 0x0700, + // 01/09/04 IP1000A v1-5 rev=0x41 + (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, + 0x0000, + 30, 0x005e, 9, 0x0700, + 0x0000 +}; + +#endif /* __LINUX_IPG_H */ diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig index 829da9a1d113..65806956728a 100644 --- a/drivers/net/irda/Kconfig +++ b/drivers/net/irda/Kconfig @@ -155,6 +155,41 @@ config KINGSUN_DONGLE To compile it as a module, choose M here: the module will be called kingsun-sir. +config EP7211_DONGLE + tristate "EP7211 I/R support" + depends on IRTTY_SIR && ARCH_EP7211 && IRDA && EXPERIMENTAL + help + Say Y here if you want to build support for the Cirrus logic + EP7211 chipset's infrared module. + +config KSDAZZLE_DONGLE + tristate "KingSun Dazzle IrDA-USB dongle (EXPERIMENTAL)" + depends on IRDA && USB && EXPERIMENTAL + help + Say Y or M here if you want to build support for the KingSun Dazzle + IrDA-USB bridge device driver. + + This USB bridge does not conform to the IrDA-USB device class + specification, and therefore needs its own specific driver. This + dongle supports SIR speeds only (9600 through 115200 bps). + + To compile it as a module, choose M here: the module will be called + ksdazzle-sir. + +config KS959_DONGLE + tristate "KingSun KS-959 IrDA-USB dongle (EXPERIMENTAL)" + depends on IRDA && USB && EXPERIMENTAL + help + Say Y or M here if you want to build support for the KingSun KS-959 + IrDA-USB bridge device driver. + + This USB bridge does not conform to the IrDA-USB device class + specification, and therefore needs its own specific driver. This + dongle supports SIR speeds only (9600 through 57600 bps). + + To compile it as a module, choose M here: the module will be called + ks959-sir. + comment "Old SIR device drivers" config IRPORT_SIR @@ -355,7 +390,7 @@ config WINBOND_FIR config TOSHIBA_FIR tristate "Toshiba Type-O IR Port" - depends on IRDA && PCI && !64BIT + depends on IRDA && PCI && !64BIT && VIRT_TO_BUS help Say Y here if you want to build support for the Toshiba Type-O IR and Donau oboe chipsets. These chipsets are used by the Toshiba diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile index 233a2f923730..fefbb5909081 100644 --- a/drivers/net/irda/Makefile +++ b/drivers/net/irda/Makefile @@ -45,7 +45,10 @@ obj-$(CONFIG_MCP2120_DONGLE) += mcp2120-sir.o obj-$(CONFIG_ACT200L_DONGLE) += act200l-sir.o obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o +obj-$(CONFIG_EP7211_DONGLE) += ep7211-sir.o obj-$(CONFIG_KINGSUN_DONGLE) += kingsun-sir.o +obj-$(CONFIG_KSDAZZLE_DONGLE) += ksdazzle-sir.o +obj-$(CONFIG_KS959_DONGLE) += ks959-sir.o # The SIR helper module sir-dev-objs := sir_dev.o sir_dongle.o diff --git a/drivers/net/irda/actisys-sir.c b/drivers/net/irda/actisys-sir.c index 9715ab5572e9..ccf6ec548a64 100644 --- a/drivers/net/irda/actisys-sir.c +++ b/drivers/net/irda/actisys-sir.c @@ -67,7 +67,7 @@ static int actisys_reset(struct sir_dev *); /* Note : the 220L doesn't support 38400, but we will fix that below */ static unsigned baud_rates[] = { 9600, 19200, 57600, 115200, 38400 }; -#define MAX_SPEEDS (sizeof(baud_rates)/sizeof(baud_rates[0])) +#define MAX_SPEEDS ARRAY_SIZE(baud_rates) static struct dongle_driver act220l = { .owner = THIS_MODULE, diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c index f9c889c0dd07..9f584521304a 100644 --- a/drivers/net/irda/ali-ircc.c +++ b/drivers/net/irda/ali-ircc.c @@ -360,10 +360,6 @@ static int ali_ircc_open(int i, chipio_t *info) self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; self->tx_fifo.tail = self->tx_buff.head; - - /* Keep track of module usage */ - SET_MODULE_OWNER(dev); - /* Override the network functions we need to use */ dev->hard_start_xmit = ali_ircc_sir_hard_xmit; dev->open = ali_ircc_net_open; diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c index 3ca47bf6dfec..3e5eca1aa987 100644 --- a/drivers/net/irda/donauboe.c +++ b/drivers/net/irda/donauboe.c @@ -1660,7 +1660,6 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid) } #endif - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pci_dev->dev); dev->hard_start_xmit = toshoboe_hard_xmit; dev->open = toshoboe_net_open; diff --git a/drivers/net/irda/ep7211-sir.c b/drivers/net/irda/ep7211-sir.c new file mode 100644 index 000000000000..831572429bb9 --- /dev/null +++ b/drivers/net/irda/ep7211-sir.c @@ -0,0 +1,89 @@ +/* + * IR port driver for the Cirrus Logic EP7211 processor. + * + * Copyright 2001, Blue Mug Inc. All rights reserved. + * Copyright 2007, Samuel Ortiz <samuel@sortiz.org> + */ +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/tty.h> +#include <linux/init.h> +#include <linux/spinlock.h> + +#include <net/irda/irda.h> +#include <net/irda/irda_device.h> + +#include <asm/io.h> +#include <asm/hardware.h> + +#include "sir-dev.h" + +#define MIN_DELAY 25 /* 15 us, but wait a little more to be sure */ +#define MAX_DELAY 10000 /* 1 ms */ + +static int ep7211_open(struct sir_dev *dev); +static int ep7211_close(struct sir_dev *dev); +static int ep7211_change_speed(struct sir_dev *dev, unsigned speed); +static int ep7211_reset(struct sir_dev *dev); + +static struct dongle_driver ep7211 = { + .owner = THIS_MODULE, + .driver_name = "EP7211 IR driver", + .type = IRDA_EP7211_DONGLE, + .open = ep7211_open, + .close = ep7211_close, + .reset = ep7211_reset, + .set_speed = ep7211_change_speed, +}; + +static int __init ep7211_sir_init(void) +{ + return irda_register_dongle(&ep7211); +} + +static void __exit ep7211_sir_cleanup(void) +{ + irda_unregister_dongle(&ep7211); +} + +static int ep7211_open(struct sir_dev *dev) +{ + unsigned int syscon; + + /* Turn on the SIR encoder. */ + syscon = clps_readl(SYSCON1); + syscon |= SYSCON1_SIREN; + clps_writel(syscon, SYSCON1); + + return 0; +} + +static int ep7211_close(struct sir_dev *dev) +{ + unsigned int syscon; + + /* Turn off the SIR encoder. */ + syscon = clps_readl(SYSCON1); + syscon &= ~SYSCON1_SIREN; + clps_writel(syscon, SYSCON1); + + return 0; +} + +static int ep7211_change_speed(struct sir_dev *dev, unsigned speed) +{ + return 0; +} + +static int ep7211_reset(struct sir_dev *dev) +{ + return 0; +} + +MODULE_AUTHOR("Samuel Ortiz <samuel@sortiz.org>"); +MODULE_DESCRIPTION("EP7211 IR dongle driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("irda-dongle-13"); /* IRDA_EP7211_DONGLE */ + +module_init(ep7211_sir_init); +module_exit(ep7211_sir_cleanup); diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c index 0ac240ca905b..c6355c00fd7a 100644 --- a/drivers/net/irda/irda-usb.c +++ b/drivers/net/irda/irda-usb.c @@ -1561,10 +1561,9 @@ static inline struct irda_class_desc *irda_usb_find_class_desc(struct usb_interf struct irda_class_desc *desc; int ret; - desc = kmalloc(sizeof (*desc), GFP_KERNEL); - if (desc == NULL) + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) return NULL; - memset(desc, 0, sizeof(*desc)); /* USB-IrDA class spec 1.0: * 6.1.3: Standard "Get Descriptor" Device Request is not @@ -1617,7 +1616,7 @@ static int irda_usb_probe(struct usb_interface *intf, { struct net_device *net; struct usb_device *dev = interface_to_usbdev(intf); - struct irda_usb_cb *self = NULL; + struct irda_usb_cb *self; struct usb_host_interface *interface; struct irda_class_desc *irda_desc; int ret = -ENOMEM; @@ -1636,7 +1635,6 @@ static int irda_usb_probe(struct usb_interface *intf, if (!net) goto err_out; - SET_MODULE_OWNER(net); SET_NETDEV_DEV(net, &intf->dev); self = net->priv; self->netdev = net; @@ -1655,7 +1653,7 @@ static int irda_usb_probe(struct usb_interface *intf, self->header_length = USB_IRDA_HEADER; } - self->rx_urb = kzalloc(self->max_rx_urb * sizeof(struct urb *), + self->rx_urb = kcalloc(self->max_rx_urb, sizeof(struct urb *), GFP_KERNEL); for (i = 0; i < self->max_rx_urb; i++) { @@ -1715,7 +1713,7 @@ static int irda_usb_probe(struct usb_interface *intf, /* Find IrDA class descriptor */ irda_desc = irda_usb_find_class_desc(intf); ret = -ENODEV; - if (irda_desc == NULL) + if (!irda_desc) goto err_out_3; if (self->needspatch) { @@ -1738,15 +1736,13 @@ static int irda_usb_probe(struct usb_interface *intf, /* Don't change this buffer size and allocation without doing * some heavy and complete testing. Don't ask why :-( * Jean II */ - self->speed_buff = kmalloc(IRDA_USB_SPEED_MTU, GFP_KERNEL); - if (self->speed_buff == NULL) + self->speed_buff = kzalloc(IRDA_USB_SPEED_MTU, GFP_KERNEL); + if (!self->speed_buff) goto err_out_3; - memset(self->speed_buff, 0, IRDA_USB_SPEED_MTU); - self->tx_buff = kzalloc(IRDA_SKB_MAX_MTU + self->header_length, GFP_KERNEL); - if (self->tx_buff == NULL) + if (!self->tx_buff) goto err_out_4; ret = irda_usb_open(self); @@ -1767,12 +1763,11 @@ static int irda_usb_probe(struct usb_interface *intf, /* replace IrDA class descriptor with what patched device is now reporting */ irda_desc = irda_usb_find_class_desc (self->usbintf); - if (irda_desc == NULL) { + if (!irda_desc) { ret = -ENODEV; goto err_out_6; } - if (self->irda_desc) - kfree (self->irda_desc); + kfree(self->irda_desc); self->irda_desc = irda_desc; irda_usb_init_qos(self); } diff --git a/drivers/net/irda/irport.c b/drivers/net/irda/irport.c index 3078c419cb02..c79caa5d3d71 100644 --- a/drivers/net/irda/irport.c +++ b/drivers/net/irda/irport.c @@ -164,20 +164,17 @@ irport_open(int i, unsigned int iobase, unsigned int irq) /* Allocate memory if needed */ if (self->tx_buff.truesize > 0) { - self->tx_buff.head = kmalloc(self->tx_buff.truesize, + self->tx_buff.head = kzalloc(self->tx_buff.truesize, GFP_KERNEL); if (self->tx_buff.head == NULL) { IRDA_ERROR("%s(), can't allocate memory for " "transmit buffer!\n", __FUNCTION__); goto err_out4; } - memset(self->tx_buff.head, 0, self->tx_buff.truesize); } self->tx_buff.data = self->tx_buff.head; self->netdev = dev; - /* Keep track of module usage */ - SET_MODULE_OWNER(dev); /* May be overridden by piggyback drivers */ self->interrupt = irport_interrupt; diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c index ad1857364d51..6f5f697ec9f8 100644 --- a/drivers/net/irda/irtty-sir.c +++ b/drivers/net/irda/irtty-sir.c @@ -505,10 +505,9 @@ static int irtty_open(struct tty_struct *tty) } /* allocate private device info block */ - priv = kmalloc(sizeof(*priv), GFP_KERNEL); + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) goto out_put; - memset(priv, 0, sizeof(*priv)); priv->magic = IRTTY_MAGIC; priv->tty = tty; diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c index bdd5c979bead..648e54b3f00e 100644 --- a/drivers/net/irda/kingsun-sir.c +++ b/drivers/net/irda/kingsun-sir.c @@ -66,7 +66,6 @@ #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> -#include <linux/module.h> #include <linux/kref.h> #include <linux/usb.h> #include <linux/device.h> @@ -488,7 +487,6 @@ static int kingsun_probe(struct usb_interface *intf, if(!net) goto err_out1; - SET_MODULE_OWNER(net); SET_NETDEV_DEV(net, &intf->dev); kingsun = netdev_priv(net); kingsun->irlap = NULL; @@ -509,12 +507,12 @@ static int kingsun_probe(struct usb_interface *intf, spin_lock_init(&kingsun->lock); /* Allocate input buffer */ - kingsun->in_buf = (__u8 *)kmalloc(kingsun->max_rx, GFP_KERNEL); + kingsun->in_buf = kmalloc(kingsun->max_rx, GFP_KERNEL); if (!kingsun->in_buf) goto free_mem; /* Allocate output buffer */ - kingsun->out_buf = (__u8 *)kmalloc(KINGSUN_FIFO_SIZE, GFP_KERNEL); + kingsun->out_buf = kmalloc(KINGSUN_FIFO_SIZE, GFP_KERNEL); if (!kingsun->out_buf) goto free_mem; diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c new file mode 100644 index 000000000000..8c257a51341a --- /dev/null +++ b/drivers/net/irda/ks959-sir.c @@ -0,0 +1,938 @@ +/***************************************************************************** +* +* Filename: ks959-sir.c +* Version: 0.1.2 +* Description: Irda KingSun KS-959 USB Dongle +* Status: Experimental +* Author: Alex VillacĂs Lasso <a_villacis@palosanto.com> +* with help from Domen Puncer <domen@coderock.org> +* +* Based on stir4200, mcs7780, kingsun-sir drivers. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +* +*****************************************************************************/ + +/* + * Following is my most current (2007-07-17) understanding of how the Kingsun + * KS-959 dongle is supposed to work. This information was deduced by + * reverse-engineering and examining the USB traffic captured with USBSnoopy + * from the WinXP driver. Feel free to update here as more of the dongle is + * known. + * + * My most sincere thanks must go to Domen Puncer <domen@coderock.org> for + * invaluable help in cracking the obfuscation and padding required for this + * dongle. + * + * General: This dongle exposes one interface with one interrupt IN endpoint. + * However, the interrupt endpoint is NOT used at all for this dongle. Instead, + * this dongle uses control transfers for everything, including sending and + * receiving the IrDA frame data. Apparently the interrupt endpoint is just a + * dummy to ensure the dongle has a valid interface to present to the PC.And I + * thought the DonShine dongle was weird... In addition, this dongle uses + * obfuscation (?!?!), applied at the USB level, to hide the traffic, both sent + * and received, from the dongle. I call it obfuscation because the XOR keying + * and padding required to produce an USB traffic acceptable for the dongle can + * not be explained by any other technical requirement. + * + * Transmission: To transmit an IrDA frame, the driver must prepare a control + * URB with the following as a setup packet: + * bRequestType USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE + * bRequest 0x09 + * wValue <length of valid data before padding, little endian> + * wIndex 0x0000 + * wLength <length of padded data> + * The payload packet must be manually wrapped and escaped (as in stir4200.c), + * then padded and obfuscated before being sent. Both padding and obfuscation + * are implemented in the procedure obfuscate_tx_buffer(). Suffice to say, the + * designer/programmer of the dongle used his name as a source for the + * obfuscation. WTF?! + * Apparently the dongle cannot handle payloads larger than 256 bytes. The + * driver has to perform fragmentation in order to send anything larger than + * this limit. + * + * Reception: To receive data, the driver must poll the dongle regularly (like + * kingsun-sir.c) with control URBs and the following as a setup packet: + * bRequestType USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE + * bRequest 0x01 + * wValue 0x0200 + * wIndex 0x0000 + * wLength 0x0800 (size of available buffer) + * If there is data to be read, it will be returned as the response payload. + * This data is (apparently) not padded, but it is obfuscated. To de-obfuscate + * it, the driver must XOR every byte, in sequence, with a value that starts at + * 1 and is incremented with each byte processed, and then with 0x55. The value + * incremented with each byte processed overflows as an unsigned char. The + * resulting bytes form a wrapped SIR frame that is unwrapped and unescaped + * as in stir4200.c The incremented value is NOT reset with each frame, but is + * kept across the entire session with the dongle. Also, the dongle inserts an + * extra garbage byte with value 0x95 (after decoding) every 0xff bytes, which + * must be skipped. + * + * Speed change: To change the speed of the dongle, the driver prepares a + * control URB with the following as a setup packet: + * bRequestType USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE + * bRequest 0x09 + * wValue 0x0200 + * wIndex 0x0001 + * wLength 0x0008 (length of the payload) + * The payload is a 8-byte record, apparently identical to the one used in + * drivers/usb/serial/cypress_m8.c to change speed: + * __u32 baudSpeed; + * unsigned int dataBits : 2; // 0 - 5 bits 3 - 8 bits + * unsigned int : 1; + * unsigned int stopBits : 1; + * unsigned int parityEnable : 1; + * unsigned int parityType : 1; + * unsigned int : 1; + * unsigned int reset : 1; + * unsigned char reserved[3]; // set to 0 + * + * For now only SIR speeds have been observed with this dongle. Therefore, + * nothing is known on what changes (if any) must be done to frame wrapping / + * unwrapping for higher than SIR speeds. This driver assumes no change is + * necessary and announces support for all the way to 57600 bps. Although the + * package announces support for up to 4MBps, tests with a Sony Ericcson K300 + * phone show corruption when receiving large frames at 115200 bps, the highest + * speed announced by the phone. However, transmission at 115200 bps is OK. Go + * figure. Since I don't know whether the phone or the dongle is at fault, max + * announced speed is 57600 bps until someone produces a device that can run + * at higher speeds with this dongle. + */ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/kref.h> +#include <linux/usb.h> +#include <linux/device.h> +#include <linux/crc32.h> + +#include <asm/unaligned.h> +#include <asm/byteorder.h> +#include <asm/uaccess.h> + +#include <net/irda/irda.h> +#include <net/irda/wrapper.h> +#include <net/irda/crc.h> + +#define KS959_VENDOR_ID 0x07d0 +#define KS959_PRODUCT_ID 0x4959 + +/* These are the currently known USB ids */ +static struct usb_device_id dongles[] = { + /* KingSun Co,Ltd IrDA/USB Bridge */ + {USB_DEVICE(KS959_VENDOR_ID, KS959_PRODUCT_ID)}, + {} +}; + +MODULE_DEVICE_TABLE(usb, dongles); + +#define KINGSUN_MTT 0x07 +#define KINGSUN_REQ_RECV 0x01 +#define KINGSUN_REQ_SEND 0x09 + +#define KINGSUN_RCV_FIFO_SIZE 2048 /* Max length we can receive */ +#define KINGSUN_SND_FIFO_SIZE 2048 /* Max packet we can send */ +#define KINGSUN_SND_PACKET_SIZE 256 /* Max packet dongle can handle */ + +struct ks959_speedparams { + __le32 baudrate; /* baud rate, little endian */ + __u8 flags; + __u8 reserved[3]; +} __attribute__ ((packed)); + +#define KS_DATA_5_BITS 0x00 +#define KS_DATA_6_BITS 0x01 +#define KS_DATA_7_BITS 0x02 +#define KS_DATA_8_BITS 0x03 + +#define KS_STOP_BITS_1 0x00 +#define KS_STOP_BITS_2 0x08 + +#define KS_PAR_DISABLE 0x00 +#define KS_PAR_EVEN 0x10 +#define KS_PAR_ODD 0x30 +#define KS_RESET 0x80 + +struct ks959_cb { + struct usb_device *usbdev; /* init: probe_irda */ + struct net_device *netdev; /* network layer */ + struct irlap_cb *irlap; /* The link layer we are binded to */ + struct net_device_stats stats; /* network statistics */ + struct qos_info qos; + + struct usb_ctrlrequest *tx_setuprequest; + struct urb *tx_urb; + __u8 *tx_buf_clear; + unsigned int tx_buf_clear_used; + unsigned int tx_buf_clear_sent; + __u8 *tx_buf_xored; + + struct usb_ctrlrequest *rx_setuprequest; + struct urb *rx_urb; + __u8 *rx_buf; + __u8 rx_variable_xormask; + iobuff_t rx_unwrap_buff; + struct timeval rx_time; + + struct usb_ctrlrequest *speed_setuprequest; + struct urb *speed_urb; + struct ks959_speedparams speedparams; + unsigned int new_speed; + + spinlock_t lock; + int receiving; +}; + +/* Procedure to perform the obfuscation/padding expected by the dongle + * + * buf_cleartext (IN) Cleartext version of the IrDA frame to transmit + * len_cleartext (IN) Length of the cleartext version of IrDA frame + * buf_xoredtext (OUT) Obfuscated version of frame built by proc + * len_maxbuf (OUT) Maximum space available at buf_xoredtext + * + * (return) length of obfuscated frame with padding + * + * If not enough space (as indicated by len_maxbuf vs. required padding), + * zero is returned + * + * The value of lookup_string is actually a required portion of the algorithm. + * Seems the designer of the dongle wanted to state who exactly is responsible + * for implementing obfuscation. Send your best (or other) wishes to him ]:-) + */ +static unsigned int obfuscate_tx_buffer(const __u8 * buf_cleartext, + unsigned int len_cleartext, + __u8 * buf_xoredtext, + unsigned int len_maxbuf) +{ + unsigned int len_xoredtext; + + /* Calculate required length with padding, check for necessary space */ + len_xoredtext = ((len_cleartext + 7) & ~0x7) + 0x10; + if (len_xoredtext <= len_maxbuf) { + static const __u8 lookup_string[] = "wangshuofei19710"; + __u8 xor_mask; + + /* Unlike the WinXP driver, we *do* clear out the padding */ + memset(buf_xoredtext, 0, len_xoredtext); + + xor_mask = lookup_string[(len_cleartext & 0x0f) ^ 0x06] ^ 0x55; + + while (len_cleartext-- > 0) { + *buf_xoredtext++ = *buf_cleartext++ ^ xor_mask; + } + } else { + len_xoredtext = 0; + } + return len_xoredtext; +} + +/* Callback transmission routine */ +static void ks959_speed_irq(struct urb *urb) +{ + /* unlink, shutdown, unplug, other nasties */ + if (urb->status != 0) { + err("ks959_speed_irq: urb asynchronously failed - %d", + urb->status); + } +} + +/* Send a control request to change speed of the dongle */ +static int ks959_change_speed(struct ks959_cb *kingsun, unsigned speed) +{ + static unsigned int supported_speeds[] = { 2400, 9600, 19200, 38400, + 57600, 115200, 576000, 1152000, 4000000, 0 + }; + int err; + unsigned int i; + + if (kingsun->speed_setuprequest == NULL || kingsun->speed_urb == NULL) + return -ENOMEM; + + /* Check that requested speed is among the supported ones */ + for (i = 0; supported_speeds[i] && supported_speeds[i] != speed; i++) ; + if (supported_speeds[i] == 0) + return -EOPNOTSUPP; + + memset(&(kingsun->speedparams), 0, sizeof(struct ks959_speedparams)); + kingsun->speedparams.baudrate = cpu_to_le32(speed); + kingsun->speedparams.flags = KS_DATA_8_BITS; + + /* speed_setuprequest pre-filled in ks959_probe */ + usb_fill_control_urb(kingsun->speed_urb, kingsun->usbdev, + usb_sndctrlpipe(kingsun->usbdev, 0), + (unsigned char *)kingsun->speed_setuprequest, + &(kingsun->speedparams), + sizeof(struct ks959_speedparams), ks959_speed_irq, + kingsun); + kingsun->speed_urb->status = 0; + err = usb_submit_urb(kingsun->speed_urb, GFP_ATOMIC); + + return err; +} + +/* Submit one fragment of an IrDA frame to the dongle */ +static void ks959_send_irq(struct urb *urb); +static int ks959_submit_tx_fragment(struct ks959_cb *kingsun) +{ + unsigned int padlen; + unsigned int wraplen; + int ret; + + /* Check whether current plaintext can produce a padded buffer that fits + within the range handled by the dongle */ + wraplen = (KINGSUN_SND_PACKET_SIZE & ~0x7) - 0x10; + if (wraplen > kingsun->tx_buf_clear_used) + wraplen = kingsun->tx_buf_clear_used; + + /* Perform dongle obfuscation. Also remove the portion of the frame that + was just obfuscated and will now be sent to the dongle. */ + padlen = obfuscate_tx_buffer(kingsun->tx_buf_clear, wraplen, + kingsun->tx_buf_xored, + KINGSUN_SND_PACKET_SIZE); + + /* Calculate how much data can be transmitted in this urb */ + kingsun->tx_setuprequest->wValue = cpu_to_le16(wraplen); + kingsun->tx_setuprequest->wLength = cpu_to_le16(padlen); + /* Rest of the fields were filled in ks959_probe */ + usb_fill_control_urb(kingsun->tx_urb, kingsun->usbdev, + usb_sndctrlpipe(kingsun->usbdev, 0), + (unsigned char *)kingsun->tx_setuprequest, + kingsun->tx_buf_xored, padlen, + ks959_send_irq, kingsun); + kingsun->tx_urb->status = 0; + ret = usb_submit_urb(kingsun->tx_urb, GFP_ATOMIC); + + /* Remember how much data was sent, in order to update at callback */ + kingsun->tx_buf_clear_sent = (ret == 0) ? wraplen : 0; + return ret; +} + +/* Callback transmission routine */ +static void ks959_send_irq(struct urb *urb) +{ + struct ks959_cb *kingsun = urb->context; + struct net_device *netdev = kingsun->netdev; + int ret = 0; + + /* in process of stopping, just drop data */ + if (!netif_running(kingsun->netdev)) { + err("ks959_send_irq: Network not running!"); + return; + } + + /* unlink, shutdown, unplug, other nasties */ + if (urb->status != 0) { + err("ks959_send_irq: urb asynchronously failed - %d", + urb->status); + return; + } + + if (kingsun->tx_buf_clear_used > 0) { + /* Update data remaining to be sent */ + if (kingsun->tx_buf_clear_sent < kingsun->tx_buf_clear_used) { + memmove(kingsun->tx_buf_clear, + kingsun->tx_buf_clear + + kingsun->tx_buf_clear_sent, + kingsun->tx_buf_clear_used - + kingsun->tx_buf_clear_sent); + } + kingsun->tx_buf_clear_used -= kingsun->tx_buf_clear_sent; + kingsun->tx_buf_clear_sent = 0; + + if (kingsun->tx_buf_clear_used > 0) { + /* There is more data to be sent */ + if ((ret = ks959_submit_tx_fragment(kingsun)) != 0) { + err("ks959_send_irq: failed tx_urb submit: %d", + ret); + switch (ret) { + case -ENODEV: + case -EPIPE: + break; + default: + kingsun->stats.tx_errors++; + netif_start_queue(netdev); + } + } + } else { + /* All data sent, send next speed && wake network queue */ + if (kingsun->new_speed != -1 && + cpu_to_le32(kingsun->new_speed) != + kingsun->speedparams.baudrate) + ks959_change_speed(kingsun, kingsun->new_speed); + + netif_wake_queue(netdev); + } + } +} + +/* + * Called from net/core when new frame is available. + */ +static int ks959_hard_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct ks959_cb *kingsun; + unsigned int wraplen; + int ret = 0; + + if (skb == NULL || netdev == NULL) + return -EINVAL; + + netif_stop_queue(netdev); + + /* the IRDA wrapping routines don't deal with non linear skb */ + SKB_LINEAR_ASSERT(skb); + + kingsun = netdev_priv(netdev); + + spin_lock(&kingsun->lock); + kingsun->new_speed = irda_get_next_speed(skb); + + /* Append data to the end of whatever data remains to be transmitted */ + wraplen = + async_wrap_skb(skb, kingsun->tx_buf_clear, KINGSUN_SND_FIFO_SIZE); + kingsun->tx_buf_clear_used = wraplen; + + if ((ret = ks959_submit_tx_fragment(kingsun)) != 0) { + err("ks959_hard_xmit: failed tx_urb submit: %d", ret); + switch (ret) { + case -ENODEV: + case -EPIPE: + break; + default: + kingsun->stats.tx_errors++; + netif_start_queue(netdev); + } + } else { + kingsun->stats.tx_packets++; + kingsun->stats.tx_bytes += skb->len; + + } + + dev_kfree_skb(skb); + spin_unlock(&kingsun->lock); + + return ret; +} + +/* Receive callback function */ +static void ks959_rcv_irq(struct urb *urb) +{ + struct ks959_cb *kingsun = urb->context; + int ret; + + /* in process of stopping, just drop data */ + if (!netif_running(kingsun->netdev)) { + kingsun->receiving = 0; + return; + } + + /* unlink, shutdown, unplug, other nasties */ + if (urb->status != 0) { + err("kingsun_rcv_irq: urb asynchronously failed - %d", + urb->status); + kingsun->receiving = 0; + return; + } + + if (urb->actual_length > 0) { + __u8 *bytes = urb->transfer_buffer; + unsigned int i; + + for (i = 0; i < urb->actual_length; i++) { + /* De-obfuscation implemented here: variable portion of + xormask is incremented, and then used with the encoded + byte for the XOR. The result of the operation is used + to unwrap the SIR frame. */ + kingsun->rx_variable_xormask++; + bytes[i] = + bytes[i] ^ kingsun->rx_variable_xormask ^ 0x55u; + + /* rx_variable_xormask doubles as an index counter so we + can skip the byte at 0xff (wrapped around to 0). + */ + if (kingsun->rx_variable_xormask != 0) { + async_unwrap_char(kingsun->netdev, + &kingsun->stats, + &kingsun->rx_unwrap_buff, + bytes[i]); + } + } + kingsun->netdev->last_rx = jiffies; + do_gettimeofday(&kingsun->rx_time); + kingsun->receiving = + (kingsun->rx_unwrap_buff.state != OUTSIDE_FRAME) ? 1 : 0; + } + + /* This urb has already been filled in kingsun_net_open. Setup + packet must be re-filled, but it is assumed that urb keeps the + pointer to the initial setup packet, as well as the payload buffer. + Setup packet is already pre-filled at ks959_probe. + */ + urb->status = 0; + ret = usb_submit_urb(urb, GFP_ATOMIC); +} + +/* + * Function kingsun_net_open (dev) + * + * Network device is taken up. Usually this is done by "ifconfig irda0 up" + */ +static int ks959_net_open(struct net_device *netdev) +{ + struct ks959_cb *kingsun = netdev_priv(netdev); + int err = -ENOMEM; + char hwname[16]; + + /* At this point, urbs are NULL, and skb is NULL (see kingsun_probe) */ + kingsun->receiving = 0; + + /* Initialize for SIR to copy data directly into skb. */ + kingsun->rx_unwrap_buff.in_frame = FALSE; + kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME; + kingsun->rx_unwrap_buff.truesize = IRDA_SKB_MAX_MTU; + kingsun->rx_unwrap_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU); + if (!kingsun->rx_unwrap_buff.skb) + goto free_mem; + + skb_reserve(kingsun->rx_unwrap_buff.skb, 1); + kingsun->rx_unwrap_buff.head = kingsun->rx_unwrap_buff.skb->data; + do_gettimeofday(&kingsun->rx_time); + + kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!kingsun->rx_urb) + goto free_mem; + + kingsun->tx_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!kingsun->tx_urb) + goto free_mem; + + kingsun->speed_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!kingsun->speed_urb) + goto free_mem; + + /* Initialize speed for dongle */ + kingsun->new_speed = 9600; + err = ks959_change_speed(kingsun, 9600); + if (err < 0) + goto free_mem; + + /* + * Now that everything should be initialized properly, + * Open new IrLAP layer instance to take care of us... + */ + sprintf(hwname, "usb#%d", kingsun->usbdev->devnum); + kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname); + if (!kingsun->irlap) { + err("ks959-sir: irlap_open failed"); + goto free_mem; + } + + /* Start reception. Setup request already pre-filled in ks959_probe */ + usb_fill_control_urb(kingsun->rx_urb, kingsun->usbdev, + usb_rcvctrlpipe(kingsun->usbdev, 0), + (unsigned char *)kingsun->rx_setuprequest, + kingsun->rx_buf, KINGSUN_RCV_FIFO_SIZE, + ks959_rcv_irq, kingsun); + kingsun->rx_urb->status = 0; + err = usb_submit_urb(kingsun->rx_urb, GFP_KERNEL); + if (err) { + err("ks959-sir: first urb-submit failed: %d", err); + goto close_irlap; + } + + netif_start_queue(netdev); + + /* Situation at this point: + - all work buffers allocated + - urbs allocated and ready to fill + - max rx packet known (in max_rx) + - unwrap state machine initialized, in state outside of any frame + - receive request in progress + - IrLAP layer started, about to hand over packets to send + */ + + return 0; + + close_irlap: + irlap_close(kingsun->irlap); + free_mem: + usb_free_urb(kingsun->speed_urb); + kingsun->speed_urb = NULL; + usb_free_urb(kingsun->tx_urb); + kingsun->tx_urb = NULL; + usb_free_urb(kingsun->rx_urb); + kingsun->rx_urb = NULL; + if (kingsun->rx_unwrap_buff.skb) { + kfree_skb(kingsun->rx_unwrap_buff.skb); + kingsun->rx_unwrap_buff.skb = NULL; + kingsun->rx_unwrap_buff.head = NULL; + } + return err; +} + +/* + * Function kingsun_net_close (kingsun) + * + * Network device is taken down. Usually this is done by + * "ifconfig irda0 down" + */ +static int ks959_net_close(struct net_device *netdev) +{ + struct ks959_cb *kingsun = netdev_priv(netdev); + + /* Stop transmit processing */ + netif_stop_queue(netdev); + + /* Mop up receive && transmit urb's */ + usb_kill_urb(kingsun->tx_urb); + usb_free_urb(kingsun->tx_urb); + kingsun->tx_urb = NULL; + + usb_kill_urb(kingsun->speed_urb); + usb_free_urb(kingsun->speed_urb); + kingsun->speed_urb = NULL; + + usb_kill_urb(kingsun->rx_urb); + usb_free_urb(kingsun->rx_urb); + kingsun->rx_urb = NULL; + + kfree_skb(kingsun->rx_unwrap_buff.skb); + kingsun->rx_unwrap_buff.skb = NULL; + kingsun->rx_unwrap_buff.head = NULL; + kingsun->rx_unwrap_buff.in_frame = FALSE; + kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME; + kingsun->receiving = 0; + + /* Stop and remove instance of IrLAP */ + if (kingsun->irlap) + irlap_close(kingsun->irlap); + + kingsun->irlap = NULL; + + return 0; +} + +/* + * IOCTLs : Extra out-of-band network commands... + */ +static int ks959_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) +{ + struct if_irda_req *irq = (struct if_irda_req *)rq; + struct ks959_cb *kingsun = netdev_priv(netdev); + int ret = 0; + + switch (cmd) { + case SIOCSBANDWIDTH: /* Set bandwidth */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + /* Check if the device is still there */ + if (netif_device_present(kingsun->netdev)) + return ks959_change_speed(kingsun, irq->ifr_baudrate); + break; + + case SIOCSMEDIABUSY: /* Set media busy */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + /* Check if the IrDA stack is still there */ + if (netif_running(kingsun->netdev)) + irda_device_set_media_busy(kingsun->netdev, TRUE); + break; + + case SIOCGRECEIVING: + /* Only approximately true */ + irq->ifr_receiving = kingsun->receiving; + break; + + default: + ret = -EOPNOTSUPP; + } + + return ret; +} + +/* + * Get device stats (for /proc/net/dev and ifconfig) + */ +static struct net_device_stats *ks959_net_get_stats(struct net_device *netdev) +{ + struct ks959_cb *kingsun = netdev_priv(netdev); + return &kingsun->stats; +} + +/* + * This routine is called by the USB subsystem for each new device + * in the system. We need to check if the device is ours, and in + * this case start handling it. + */ +static int ks959_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct usb_device *dev = interface_to_usbdev(intf); + struct ks959_cb *kingsun = NULL; + struct net_device *net = NULL; + int ret = -ENOMEM; + + /* Allocate network device container. */ + net = alloc_irdadev(sizeof(*kingsun)); + if (!net) + goto err_out1; + + SET_NETDEV_DEV(net, &intf->dev); + kingsun = netdev_priv(net); + kingsun->netdev = net; + kingsun->usbdev = dev; + kingsun->irlap = NULL; + kingsun->tx_setuprequest = NULL; + kingsun->tx_urb = NULL; + kingsun->tx_buf_clear = NULL; + kingsun->tx_buf_xored = NULL; + kingsun->tx_buf_clear_used = 0; + kingsun->tx_buf_clear_sent = 0; + + kingsun->rx_setuprequest = NULL; + kingsun->rx_urb = NULL; + kingsun->rx_buf = NULL; + kingsun->rx_variable_xormask = 0; + kingsun->rx_unwrap_buff.in_frame = FALSE; + kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME; + kingsun->rx_unwrap_buff.skb = NULL; + kingsun->receiving = 0; + spin_lock_init(&kingsun->lock); + + kingsun->speed_setuprequest = NULL; + kingsun->speed_urb = NULL; + kingsun->speedparams.baudrate = 0; + + /* Allocate input buffer */ + kingsun->rx_buf = kmalloc(KINGSUN_RCV_FIFO_SIZE, GFP_KERNEL); + if (!kingsun->rx_buf) + goto free_mem; + + /* Allocate input setup packet */ + kingsun->rx_setuprequest = + kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); + if (!kingsun->rx_setuprequest) + goto free_mem; + kingsun->rx_setuprequest->bRequestType = + USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE; + kingsun->rx_setuprequest->bRequest = KINGSUN_REQ_RECV; + kingsun->rx_setuprequest->wValue = cpu_to_le16(0x0200); + kingsun->rx_setuprequest->wIndex = 0; + kingsun->rx_setuprequest->wLength = cpu_to_le16(KINGSUN_RCV_FIFO_SIZE); + + /* Allocate output buffer */ + kingsun->tx_buf_clear = kmalloc(KINGSUN_SND_FIFO_SIZE, GFP_KERNEL); + if (!kingsun->tx_buf_clear) + goto free_mem; + kingsun->tx_buf_xored = kmalloc(KINGSUN_SND_PACKET_SIZE, GFP_KERNEL); + if (!kingsun->tx_buf_xored) + goto free_mem; + + /* Allocate and initialize output setup packet */ + kingsun->tx_setuprequest = + kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); + if (!kingsun->tx_setuprequest) + goto free_mem; + kingsun->tx_setuprequest->bRequestType = + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE; + kingsun->tx_setuprequest->bRequest = KINGSUN_REQ_SEND; + kingsun->tx_setuprequest->wValue = 0; + kingsun->tx_setuprequest->wIndex = 0; + kingsun->tx_setuprequest->wLength = 0; + + /* Allocate and initialize speed setup packet */ + kingsun->speed_setuprequest = + kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); + if (!kingsun->speed_setuprequest) + goto free_mem; + kingsun->speed_setuprequest->bRequestType = + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE; + kingsun->speed_setuprequest->bRequest = KINGSUN_REQ_SEND; + kingsun->speed_setuprequest->wValue = cpu_to_le16(0x0200); + kingsun->speed_setuprequest->wIndex = cpu_to_le16(0x0001); + kingsun->speed_setuprequest->wLength = + cpu_to_le16(sizeof(struct ks959_speedparams)); + + printk(KERN_INFO "KingSun KS-959 IRDA/USB found at address %d, " + "Vendor: %x, Product: %x\n", + dev->devnum, le16_to_cpu(dev->descriptor.idVendor), + le16_to_cpu(dev->descriptor.idProduct)); + + /* Initialize QoS for this device */ + irda_init_max_qos_capabilies(&kingsun->qos); + + /* Baud rates known to be supported. Please uncomment if devices (other + than a SonyEriccson K300 phone) can be shown to support higher speed + with this dongle. + */ + kingsun->qos.baud_rate.bits = + IR_2400 | IR_9600 | IR_19200 | IR_38400 | IR_57600; + kingsun->qos.min_turn_time.bits &= KINGSUN_MTT; + irda_qos_bits_to_value(&kingsun->qos); + + /* Override the network functions we need to use */ + net->hard_start_xmit = ks959_hard_xmit; + net->open = ks959_net_open; + net->stop = ks959_net_close; + net->get_stats = ks959_net_get_stats; + net->do_ioctl = ks959_net_ioctl; + + ret = register_netdev(net); + if (ret != 0) + goto free_mem; + + info("IrDA: Registered KingSun KS-959 device %s", net->name); + + usb_set_intfdata(intf, kingsun); + + /* Situation at this point: + - all work buffers allocated + - setup requests pre-filled + - urbs not allocated, set to NULL + - max rx packet known (is KINGSUN_FIFO_SIZE) + - unwrap state machine (partially) initialized, but skb == NULL + */ + + return 0; + + free_mem: + kfree(kingsun->speed_setuprequest); + kfree(kingsun->tx_setuprequest); + kfree(kingsun->tx_buf_xored); + kfree(kingsun->tx_buf_clear); + kfree(kingsun->rx_setuprequest); + kfree(kingsun->rx_buf); + free_netdev(net); + err_out1: + return ret; +} + +/* + * The current device is removed, the USB layer tell us to shut it down... + */ +static void ks959_disconnect(struct usb_interface *intf) +{ + struct ks959_cb *kingsun = usb_get_intfdata(intf); + + if (!kingsun) + return; + + unregister_netdev(kingsun->netdev); + + /* Mop up receive && transmit urb's */ + if (kingsun->speed_urb != NULL) { + usb_kill_urb(kingsun->speed_urb); + usb_free_urb(kingsun->speed_urb); + kingsun->speed_urb = NULL; + } + if (kingsun->tx_urb != NULL) { + usb_kill_urb(kingsun->tx_urb); + usb_free_urb(kingsun->tx_urb); + kingsun->tx_urb = NULL; + } + if (kingsun->rx_urb != NULL) { + usb_kill_urb(kingsun->rx_urb); + usb_free_urb(kingsun->rx_urb); + kingsun->rx_urb = NULL; + } + + kfree(kingsun->speed_setuprequest); + kfree(kingsun->tx_setuprequest); + kfree(kingsun->tx_buf_xored); + kfree(kingsun->tx_buf_clear); + kfree(kingsun->rx_setuprequest); + kfree(kingsun->rx_buf); + free_netdev(kingsun->netdev); + + usb_set_intfdata(intf, NULL); +} + +#ifdef CONFIG_PM +/* USB suspend, so power off the transmitter/receiver */ +static int ks959_suspend(struct usb_interface *intf, pm_message_t message) +{ + struct ks959_cb *kingsun = usb_get_intfdata(intf); + + netif_device_detach(kingsun->netdev); + if (kingsun->speed_urb != NULL) + usb_kill_urb(kingsun->speed_urb); + if (kingsun->tx_urb != NULL) + usb_kill_urb(kingsun->tx_urb); + if (kingsun->rx_urb != NULL) + usb_kill_urb(kingsun->rx_urb); + return 0; +} + +/* Coming out of suspend, so reset hardware */ +static int ks959_resume(struct usb_interface *intf) +{ + struct ks959_cb *kingsun = usb_get_intfdata(intf); + + if (kingsun->rx_urb != NULL) { + /* Setup request already filled in ks959_probe */ + usb_submit_urb(kingsun->rx_urb, GFP_KERNEL); + } + netif_device_attach(kingsun->netdev); + + return 0; +} +#endif + +/* + * USB device callbacks + */ +static struct usb_driver irda_driver = { + .name = "ks959-sir", + .probe = ks959_probe, + .disconnect = ks959_disconnect, + .id_table = dongles, +#ifdef CONFIG_PM + .suspend = ks959_suspend, + .resume = ks959_resume, +#endif +}; + +/* + * Module insertion + */ +static int __init ks959_init(void) +{ + return usb_register(&irda_driver); +} + +module_init(ks959_init); + +/* + * Module removal + */ +static void __exit ks959_cleanup(void) +{ + /* Deregister the driver and remove all pending instances */ + usb_deregister(&irda_driver); +} + +module_exit(ks959_cleanup); + +MODULE_AUTHOR("Alex VillacĂs Lasso <a_villacis@palosanto.com>"); +MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun KS-959"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c new file mode 100644 index 000000000000..d01a28593ce2 --- /dev/null +++ b/drivers/net/irda/ksdazzle-sir.c @@ -0,0 +1,832 @@ +/***************************************************************************** +* +* Filename: ksdazzle.c +* Version: 0.1.2 +* Description: Irda KingSun Dazzle USB Dongle +* Status: Experimental +* Author: Alex VillacĂs Lasso <a_villacis@palosanto.com> +* +* Based on stir4200, mcs7780, kingsun-sir drivers. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +* +*****************************************************************************/ + +/* + * Following is my most current (2007-07-26) understanding of how the Kingsun + * 07D0:4100 dongle (sometimes known as the MA-660) is supposed to work. This + * information was deduced by examining the USB traffic captured with USBSnoopy + * from the WinXP driver. Feel free to update here as more of the dongle is + * known. + * + * General: This dongle exposes one interface with two interrupt endpoints, one + * IN and one OUT. In this regard, it is similar to what the Kingsun/Donshine + * dongle (07c0:4200) exposes. Traffic is raw and needs to be wrapped and + * unwrapped manually as in stir4200, kingsun-sir, and ks959-sir. + * + * Transmission: To transmit an IrDA frame, it is necessary to wrap it, then + * split it into multiple segments of up to 7 bytes each, and transmit each in + * sequence. It seems that sending a single big block (like kingsun-sir does) + * won't work with this dongle. Each segment needs to be prefixed with a value + * equal to (unsigned char)0xF8 + <number of bytes in segment>, inside a payload + * of exactly 8 bytes. For example, a segment of 1 byte gets prefixed by 0xF9, + * and one of 7 bytes gets prefixed by 0xFF. The bytes at the end of the + * payload, not considered by the prefix, are ignored (set to 0 by this + * implementation). + * + * Reception: To receive data, the driver must poll the dongle regularly (like + * kingsun-sir.c) with interrupt URBs. If data is available, it will be returned + * in payloads from 0 to 8 bytes long. When concatenated, these payloads form + * a raw IrDA stream that needs to be unwrapped as in stir4200 and kingsun-sir + * + * Speed change: To change the speed of the dongle, the driver prepares a + * control URB with the following as a setup packet: + * bRequestType USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE + * bRequest 0x09 + * wValue 0x0200 + * wIndex 0x0001 + * wLength 0x0008 (length of the payload) + * The payload is a 8-byte record, apparently identical to the one used in + * drivers/usb/serial/cypress_m8.c to change speed: + * __u32 baudSpeed; + * unsigned int dataBits : 2; // 0 - 5 bits 3 - 8 bits + * unsigned int : 1; + * unsigned int stopBits : 1; + * unsigned int parityEnable : 1; + * unsigned int parityType : 1; + * unsigned int : 1; + * unsigned int reset : 1; + * unsigned char reserved[3]; // set to 0 + * + * For now only SIR speeds have been observed with this dongle. Therefore, + * nothing is known on what changes (if any) must be done to frame wrapping / + * unwrapping for higher than SIR speeds. This driver assumes no change is + * necessary and announces support for all the way to 115200 bps. + */ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/kref.h> +#include <linux/usb.h> +#include <linux/device.h> +#include <linux/crc32.h> + +#include <asm/unaligned.h> +#include <asm/byteorder.h> +#include <asm/uaccess.h> + +#include <net/irda/irda.h> +#include <net/irda/wrapper.h> +#include <net/irda/crc.h> + +#define KSDAZZLE_VENDOR_ID 0x07d0 +#define KSDAZZLE_PRODUCT_ID 0x4100 + +/* These are the currently known USB ids */ +static struct usb_device_id dongles[] = { + /* KingSun Co,Ltd IrDA/USB Bridge */ + {USB_DEVICE(KSDAZZLE_VENDOR_ID, KSDAZZLE_PRODUCT_ID)}, + {} +}; + +MODULE_DEVICE_TABLE(usb, dongles); + +#define KINGSUN_MTT 0x07 +#define KINGSUN_REQ_RECV 0x01 +#define KINGSUN_REQ_SEND 0x09 + +#define KINGSUN_SND_FIFO_SIZE 2048 /* Max packet we can send */ +#define KINGSUN_RCV_MAX 2048 /* Max transfer we can receive */ + +struct ksdazzle_speedparams { + __le32 baudrate; /* baud rate, little endian */ + __u8 flags; + __u8 reserved[3]; +} __attribute__ ((packed)); + +#define KS_DATA_5_BITS 0x00 +#define KS_DATA_6_BITS 0x01 +#define KS_DATA_7_BITS 0x02 +#define KS_DATA_8_BITS 0x03 + +#define KS_STOP_BITS_1 0x00 +#define KS_STOP_BITS_2 0x08 + +#define KS_PAR_DISABLE 0x00 +#define KS_PAR_EVEN 0x10 +#define KS_PAR_ODD 0x30 +#define KS_RESET 0x80 + +#define KINGSUN_EP_IN 0 +#define KINGSUN_EP_OUT 1 + +struct ksdazzle_cb { + struct usb_device *usbdev; /* init: probe_irda */ + struct net_device *netdev; /* network layer */ + struct irlap_cb *irlap; /* The link layer we are binded to */ + struct net_device_stats stats; /* network statistics */ + struct qos_info qos; + + struct urb *tx_urb; + __u8 *tx_buf_clear; + unsigned int tx_buf_clear_used; + unsigned int tx_buf_clear_sent; + __u8 tx_payload[8]; + + struct urb *rx_urb; + __u8 *rx_buf; + iobuff_t rx_unwrap_buff; + + struct usb_ctrlrequest *speed_setuprequest; + struct urb *speed_urb; + struct ksdazzle_speedparams speedparams; + unsigned int new_speed; + + __u8 ep_in; + __u8 ep_out; + + spinlock_t lock; + int receiving; +}; + +/* Callback transmission routine */ +static void ksdazzle_speed_irq(struct urb *urb) +{ + /* unlink, shutdown, unplug, other nasties */ + if (urb->status != 0) { + err("ksdazzle_speed_irq: urb asynchronously failed - %d", + urb->status); + } +} + +/* Send a control request to change speed of the dongle */ +static int ksdazzle_change_speed(struct ksdazzle_cb *kingsun, unsigned speed) +{ + static unsigned int supported_speeds[] = { 2400, 9600, 19200, 38400, + 57600, 115200, 576000, 1152000, 4000000, 0 + }; + int err; + unsigned int i; + + if (kingsun->speed_setuprequest == NULL || kingsun->speed_urb == NULL) + return -ENOMEM; + + /* Check that requested speed is among the supported ones */ + for (i = 0; supported_speeds[i] && supported_speeds[i] != speed; i++) ; + if (supported_speeds[i] == 0) + return -EOPNOTSUPP; + + memset(&(kingsun->speedparams), 0, sizeof(struct ksdazzle_speedparams)); + kingsun->speedparams.baudrate = cpu_to_le32(speed); + kingsun->speedparams.flags = KS_DATA_8_BITS; + + /* speed_setuprequest pre-filled in ksdazzle_probe */ + usb_fill_control_urb(kingsun->speed_urb, kingsun->usbdev, + usb_sndctrlpipe(kingsun->usbdev, 0), + (unsigned char *)kingsun->speed_setuprequest, + &(kingsun->speedparams), + sizeof(struct ksdazzle_speedparams), + ksdazzle_speed_irq, kingsun); + kingsun->speed_urb->status = 0; + err = usb_submit_urb(kingsun->speed_urb, GFP_ATOMIC); + + return err; +} + +/* Submit one fragment of an IrDA frame to the dongle */ +static void ksdazzle_send_irq(struct urb *urb); +static int ksdazzle_submit_tx_fragment(struct ksdazzle_cb *kingsun) +{ + unsigned int wraplen; + int ret; + + /* We can send at most 7 bytes of payload at a time */ + wraplen = 7; + if (wraplen > kingsun->tx_buf_clear_used) + wraplen = kingsun->tx_buf_clear_used; + + /* Prepare payload prefix with used length */ + memset(kingsun->tx_payload, 0, 8); + kingsun->tx_payload[0] = (unsigned char)0xf8 + wraplen; + memcpy(kingsun->tx_payload + 1, kingsun->tx_buf_clear, wraplen); + + usb_fill_int_urb(kingsun->tx_urb, kingsun->usbdev, + usb_sndintpipe(kingsun->usbdev, kingsun->ep_out), + kingsun->tx_payload, 8, ksdazzle_send_irq, kingsun, 1); + kingsun->tx_urb->status = 0; + ret = usb_submit_urb(kingsun->tx_urb, GFP_ATOMIC); + + /* Remember how much data was sent, in order to update at callback */ + kingsun->tx_buf_clear_sent = (ret == 0) ? wraplen : 0; + return ret; +} + +/* Callback transmission routine */ +static void ksdazzle_send_irq(struct urb *urb) +{ + struct ksdazzle_cb *kingsun = urb->context; + struct net_device *netdev = kingsun->netdev; + int ret = 0; + + /* in process of stopping, just drop data */ + if (!netif_running(kingsun->netdev)) { + err("ksdazzle_send_irq: Network not running!"); + return; + } + + /* unlink, shutdown, unplug, other nasties */ + if (urb->status != 0) { + err("ksdazzle_send_irq: urb asynchronously failed - %d", + urb->status); + return; + } + + if (kingsun->tx_buf_clear_used > 0) { + /* Update data remaining to be sent */ + if (kingsun->tx_buf_clear_sent < kingsun->tx_buf_clear_used) { + memmove(kingsun->tx_buf_clear, + kingsun->tx_buf_clear + + kingsun->tx_buf_clear_sent, + kingsun->tx_buf_clear_used - + kingsun->tx_buf_clear_sent); + } + kingsun->tx_buf_clear_used -= kingsun->tx_buf_clear_sent; + kingsun->tx_buf_clear_sent = 0; + + if (kingsun->tx_buf_clear_used > 0) { + /* There is more data to be sent */ + if ((ret = ksdazzle_submit_tx_fragment(kingsun)) != 0) { + err("ksdazzle_send_irq: failed tx_urb submit: %d", ret); + switch (ret) { + case -ENODEV: + case -EPIPE: + break; + default: + kingsun->stats.tx_errors++; + netif_start_queue(netdev); + } + } + } else { + /* All data sent, send next speed && wake network queue */ + if (kingsun->new_speed != -1 && + cpu_to_le32(kingsun->new_speed) != + kingsun->speedparams.baudrate) + ksdazzle_change_speed(kingsun, + kingsun->new_speed); + + netif_wake_queue(netdev); + } + } +} + +/* + * Called from net/core when new frame is available. + */ +static int ksdazzle_hard_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct ksdazzle_cb *kingsun; + unsigned int wraplen; + int ret = 0; + + if (skb == NULL || netdev == NULL) + return -EINVAL; + + netif_stop_queue(netdev); + + /* the IRDA wrapping routines don't deal with non linear skb */ + SKB_LINEAR_ASSERT(skb); + + kingsun = netdev_priv(netdev); + + spin_lock(&kingsun->lock); + kingsun->new_speed = irda_get_next_speed(skb); + + /* Append data to the end of whatever data remains to be transmitted */ + wraplen = + async_wrap_skb(skb, kingsun->tx_buf_clear, KINGSUN_SND_FIFO_SIZE); + kingsun->tx_buf_clear_used = wraplen; + + if ((ret = ksdazzle_submit_tx_fragment(kingsun)) != 0) { + err("ksdazzle_hard_xmit: failed tx_urb submit: %d", ret); + switch (ret) { + case -ENODEV: + case -EPIPE: + break; + default: + kingsun->stats.tx_errors++; + netif_start_queue(netdev); + } + } else { + kingsun->stats.tx_packets++; + kingsun->stats.tx_bytes += skb->len; + + } + + dev_kfree_skb(skb); + spin_unlock(&kingsun->lock); + + return ret; +} + +/* Receive callback function */ +static void ksdazzle_rcv_irq(struct urb *urb) +{ + struct ksdazzle_cb *kingsun = urb->context; + + /* in process of stopping, just drop data */ + if (!netif_running(kingsun->netdev)) { + kingsun->receiving = 0; + return; + } + + /* unlink, shutdown, unplug, other nasties */ + if (urb->status != 0) { + err("ksdazzle_rcv_irq: urb asynchronously failed - %d", + urb->status); + kingsun->receiving = 0; + return; + } + + if (urb->actual_length > 0) { + __u8 *bytes = urb->transfer_buffer; + unsigned int i; + + for (i = 0; i < urb->actual_length; i++) { + async_unwrap_char(kingsun->netdev, &kingsun->stats, + &kingsun->rx_unwrap_buff, bytes[i]); + } + kingsun->netdev->last_rx = jiffies; + kingsun->receiving = + (kingsun->rx_unwrap_buff.state != OUTSIDE_FRAME) ? 1 : 0; + } + + /* This urb has already been filled in ksdazzle_net_open. It is assumed that + urb keeps the pointer to the payload buffer. + */ + urb->status = 0; + usb_submit_urb(urb, GFP_ATOMIC); +} + +/* + * Function ksdazzle_net_open (dev) + * + * Network device is taken up. Usually this is done by "ifconfig irda0 up" + */ +static int ksdazzle_net_open(struct net_device *netdev) +{ + struct ksdazzle_cb *kingsun = netdev_priv(netdev); + int err = -ENOMEM; + char hwname[16]; + + /* At this point, urbs are NULL, and skb is NULL (see ksdazzle_probe) */ + kingsun->receiving = 0; + + /* Initialize for SIR to copy data directly into skb. */ + kingsun->rx_unwrap_buff.in_frame = FALSE; + kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME; + kingsun->rx_unwrap_buff.truesize = IRDA_SKB_MAX_MTU; + kingsun->rx_unwrap_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU); + if (!kingsun->rx_unwrap_buff.skb) + goto free_mem; + + skb_reserve(kingsun->rx_unwrap_buff.skb, 1); + kingsun->rx_unwrap_buff.head = kingsun->rx_unwrap_buff.skb->data; + + kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!kingsun->rx_urb) + goto free_mem; + + kingsun->tx_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!kingsun->tx_urb) + goto free_mem; + + kingsun->speed_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!kingsun->speed_urb) + goto free_mem; + + /* Initialize speed for dongle */ + kingsun->new_speed = 9600; + err = ksdazzle_change_speed(kingsun, 9600); + if (err < 0) + goto free_mem; + + /* + * Now that everything should be initialized properly, + * Open new IrLAP layer instance to take care of us... + */ + sprintf(hwname, "usb#%d", kingsun->usbdev->devnum); + kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname); + if (!kingsun->irlap) { + err("ksdazzle-sir: irlap_open failed"); + goto free_mem; + } + + /* Start reception. */ + usb_fill_int_urb(kingsun->rx_urb, kingsun->usbdev, + usb_rcvintpipe(kingsun->usbdev, kingsun->ep_in), + kingsun->rx_buf, KINGSUN_RCV_MAX, ksdazzle_rcv_irq, + kingsun, 1); + kingsun->rx_urb->status = 0; + err = usb_submit_urb(kingsun->rx_urb, GFP_KERNEL); + if (err) { + err("ksdazzle-sir: first urb-submit failed: %d", err); + goto close_irlap; + } + + netif_start_queue(netdev); + + /* Situation at this point: + - all work buffers allocated + - urbs allocated and ready to fill + - max rx packet known (in max_rx) + - unwrap state machine initialized, in state outside of any frame + - receive request in progress + - IrLAP layer started, about to hand over packets to send + */ + + return 0; + + close_irlap: + irlap_close(kingsun->irlap); + free_mem: + usb_free_urb(kingsun->speed_urb); + kingsun->speed_urb = NULL; + usb_free_urb(kingsun->tx_urb); + kingsun->tx_urb = NULL; + usb_free_urb(kingsun->rx_urb); + kingsun->rx_urb = NULL; + if (kingsun->rx_unwrap_buff.skb) { + kfree_skb(kingsun->rx_unwrap_buff.skb); + kingsun->rx_unwrap_buff.skb = NULL; + kingsun->rx_unwrap_buff.head = NULL; + } + return err; +} + +/* + * Function ksdazzle_net_close (dev) + * + * Network device is taken down. Usually this is done by + * "ifconfig irda0 down" + */ +static int ksdazzle_net_close(struct net_device *netdev) +{ + struct ksdazzle_cb *kingsun = netdev_priv(netdev); + + /* Stop transmit processing */ + netif_stop_queue(netdev); + + /* Mop up receive && transmit urb's */ + usb_kill_urb(kingsun->tx_urb); + usb_free_urb(kingsun->tx_urb); + kingsun->tx_urb = NULL; + + usb_kill_urb(kingsun->speed_urb); + usb_free_urb(kingsun->speed_urb); + kingsun->speed_urb = NULL; + + usb_kill_urb(kingsun->rx_urb); + usb_free_urb(kingsun->rx_urb); + kingsun->rx_urb = NULL; + + kfree_skb(kingsun->rx_unwrap_buff.skb); + kingsun->rx_unwrap_buff.skb = NULL; + kingsun->rx_unwrap_buff.head = NULL; + kingsun->rx_unwrap_buff.in_frame = FALSE; + kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME; + kingsun->receiving = 0; + + /* Stop and remove instance of IrLAP */ + irlap_close(kingsun->irlap); + + kingsun->irlap = NULL; + + return 0; +} + +/* + * IOCTLs : Extra out-of-band network commands... + */ +static int ksdazzle_net_ioctl(struct net_device *netdev, struct ifreq *rq, + int cmd) +{ + struct if_irda_req *irq = (struct if_irda_req *)rq; + struct ksdazzle_cb *kingsun = netdev_priv(netdev); + int ret = 0; + + switch (cmd) { + case SIOCSBANDWIDTH: /* Set bandwidth */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + /* Check if the device is still there */ + if (netif_device_present(kingsun->netdev)) + return ksdazzle_change_speed(kingsun, + irq->ifr_baudrate); + break; + + case SIOCSMEDIABUSY: /* Set media busy */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + /* Check if the IrDA stack is still there */ + if (netif_running(kingsun->netdev)) + irda_device_set_media_busy(kingsun->netdev, TRUE); + break; + + case SIOCGRECEIVING: + /* Only approximately true */ + irq->ifr_receiving = kingsun->receiving; + break; + + default: + ret = -EOPNOTSUPP; + } + + return ret; +} + +/* + * Get device stats (for /proc/net/dev and ifconfig) + */ +static struct net_device_stats *ksdazzle_net_get_stats(struct net_device + *netdev) +{ + struct ksdazzle_cb *kingsun = netdev_priv(netdev); + return &kingsun->stats; +} + +/* + * This routine is called by the USB subsystem for each new device + * in the system. We need to check if the device is ours, and in + * this case start handling it. + */ +static int ksdazzle_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct usb_host_interface *interface; + struct usb_endpoint_descriptor *endpoint; + + struct usb_device *dev = interface_to_usbdev(intf); + struct ksdazzle_cb *kingsun = NULL; + struct net_device *net = NULL; + int ret = -ENOMEM; + int pipe, maxp_in, maxp_out; + __u8 ep_in; + __u8 ep_out; + + /* Check that there really are two interrupt endpoints. Check based on the + one in drivers/usb/input/usbmouse.c + */ + interface = intf->cur_altsetting; + if (interface->desc.bNumEndpoints != 2) { + err("ksdazzle: expected 2 endpoints, found %d", + interface->desc.bNumEndpoints); + return -ENODEV; + } + endpoint = &interface->endpoint[KINGSUN_EP_IN].desc; + if (!usb_endpoint_is_int_in(endpoint)) { + err("ksdazzle: endpoint 0 is not interrupt IN"); + return -ENODEV; + } + + ep_in = endpoint->bEndpointAddress; + pipe = usb_rcvintpipe(dev, ep_in); + maxp_in = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); + if (maxp_in > 255 || maxp_in <= 1) { + err("ksdazzle: endpoint 0 has max packet size %d not in range [2..255]", maxp_in); + return -ENODEV; + } + + endpoint = &interface->endpoint[KINGSUN_EP_OUT].desc; + if (!usb_endpoint_is_int_out(endpoint)) { + err("ksdazzle: endpoint 1 is not interrupt OUT"); + return -ENODEV; + } + + ep_out = endpoint->bEndpointAddress; + pipe = usb_sndintpipe(dev, ep_out); + maxp_out = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); + + /* Allocate network device container. */ + net = alloc_irdadev(sizeof(*kingsun)); + if (!net) + goto err_out1; + + SET_NETDEV_DEV(net, &intf->dev); + kingsun = netdev_priv(net); + kingsun->netdev = net; + kingsun->usbdev = dev; + kingsun->ep_in = ep_in; + kingsun->ep_out = ep_out; + kingsun->irlap = NULL; + kingsun->tx_urb = NULL; + kingsun->tx_buf_clear = NULL; + kingsun->tx_buf_clear_used = 0; + kingsun->tx_buf_clear_sent = 0; + + kingsun->rx_urb = NULL; + kingsun->rx_buf = NULL; + kingsun->rx_unwrap_buff.in_frame = FALSE; + kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME; + kingsun->rx_unwrap_buff.skb = NULL; + kingsun->receiving = 0; + spin_lock_init(&kingsun->lock); + + kingsun->speed_setuprequest = NULL; + kingsun->speed_urb = NULL; + kingsun->speedparams.baudrate = 0; + + /* Allocate input buffer */ + kingsun->rx_buf = kmalloc(KINGSUN_RCV_MAX, GFP_KERNEL); + if (!kingsun->rx_buf) + goto free_mem; + + /* Allocate output buffer */ + kingsun->tx_buf_clear = kmalloc(KINGSUN_SND_FIFO_SIZE, GFP_KERNEL); + if (!kingsun->tx_buf_clear) + goto free_mem; + + /* Allocate and initialize speed setup packet */ + kingsun->speed_setuprequest = + kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); + if (!kingsun->speed_setuprequest) + goto free_mem; + kingsun->speed_setuprequest->bRequestType = + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE; + kingsun->speed_setuprequest->bRequest = KINGSUN_REQ_SEND; + kingsun->speed_setuprequest->wValue = cpu_to_le16(0x0200); + kingsun->speed_setuprequest->wIndex = cpu_to_le16(0x0001); + kingsun->speed_setuprequest->wLength = + cpu_to_le16(sizeof(struct ksdazzle_speedparams)); + + printk(KERN_INFO "KingSun/Dazzle IRDA/USB found at address %d, " + "Vendor: %x, Product: %x\n", + dev->devnum, le16_to_cpu(dev->descriptor.idVendor), + le16_to_cpu(dev->descriptor.idProduct)); + + /* Initialize QoS for this device */ + irda_init_max_qos_capabilies(&kingsun->qos); + + /* Baud rates known to be supported. Please uncomment if devices (other + than a SonyEriccson K300 phone) can be shown to support higher speeds + with this dongle. + */ + kingsun->qos.baud_rate.bits = + IR_2400 | IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200; + kingsun->qos.min_turn_time.bits &= KINGSUN_MTT; + irda_qos_bits_to_value(&kingsun->qos); + + /* Override the network functions we need to use */ + net->hard_start_xmit = ksdazzle_hard_xmit; + net->open = ksdazzle_net_open; + net->stop = ksdazzle_net_close; + net->get_stats = ksdazzle_net_get_stats; + net->do_ioctl = ksdazzle_net_ioctl; + + ret = register_netdev(net); + if (ret != 0) + goto free_mem; + + info("IrDA: Registered KingSun/Dazzle device %s", net->name); + + usb_set_intfdata(intf, kingsun); + + /* Situation at this point: + - all work buffers allocated + - setup requests pre-filled + - urbs not allocated, set to NULL + - max rx packet known (is KINGSUN_FIFO_SIZE) + - unwrap state machine (partially) initialized, but skb == NULL + */ + + return 0; + + free_mem: + kfree(kingsun->speed_setuprequest); + kfree(kingsun->tx_buf_clear); + kfree(kingsun->rx_buf); + free_netdev(net); + err_out1: + return ret; +} + +/* + * The current device is removed, the USB layer tell us to shut it down... + */ +static void ksdazzle_disconnect(struct usb_interface *intf) +{ + struct ksdazzle_cb *kingsun = usb_get_intfdata(intf); + + if (!kingsun) + return; + + unregister_netdev(kingsun->netdev); + + /* Mop up receive && transmit urb's */ + usb_kill_urb(kingsun->speed_urb); + usb_free_urb(kingsun->speed_urb); + kingsun->speed_urb = NULL; + + usb_kill_urb(kingsun->tx_urb); + usb_free_urb(kingsun->tx_urb); + kingsun->tx_urb = NULL; + + usb_kill_urb(kingsun->rx_urb); + usb_free_urb(kingsun->rx_urb); + kingsun->rx_urb = NULL; + + kfree(kingsun->speed_setuprequest); + kfree(kingsun->tx_buf_clear); + kfree(kingsun->rx_buf); + free_netdev(kingsun->netdev); + + usb_set_intfdata(intf, NULL); +} + +#ifdef CONFIG_PM +/* USB suspend, so power off the transmitter/receiver */ +static int ksdazzle_suspend(struct usb_interface *intf, pm_message_t message) +{ + struct ksdazzle_cb *kingsun = usb_get_intfdata(intf); + + netif_device_detach(kingsun->netdev); + if (kingsun->speed_urb != NULL) + usb_kill_urb(kingsun->speed_urb); + if (kingsun->tx_urb != NULL) + usb_kill_urb(kingsun->tx_urb); + if (kingsun->rx_urb != NULL) + usb_kill_urb(kingsun->rx_urb); + return 0; +} + +/* Coming out of suspend, so reset hardware */ +static int ksdazzle_resume(struct usb_interface *intf) +{ + struct ksdazzle_cb *kingsun = usb_get_intfdata(intf); + + if (kingsun->rx_urb != NULL) { + /* Setup request already filled in ksdazzle_probe */ + usb_submit_urb(kingsun->rx_urb, GFP_KERNEL); + } + netif_device_attach(kingsun->netdev); + + return 0; +} +#endif + +/* + * USB device callbacks + */ +static struct usb_driver irda_driver = { + .name = "ksdazzle-sir", + .probe = ksdazzle_probe, + .disconnect = ksdazzle_disconnect, + .id_table = dongles, +#ifdef CONFIG_PM + .suspend = ksdazzle_suspend, + .resume = ksdazzle_resume, +#endif +}; + +/* + * Module insertion + */ +static int __init ksdazzle_init(void) +{ + return usb_register(&irda_driver); +} + +module_init(ksdazzle_init); + +/* + * Module removal + */ +static void __exit ksdazzle_cleanup(void) +{ + /* Deregister the driver and remove all pending instances */ + usb_deregister(&irda_driver); +} + +module_exit(ksdazzle_cleanup); + +MODULE_AUTHOR("Alex VillacĂs Lasso <a_villacis@palosanto.com>"); +MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun Dazzle"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c index 0de867288a47..0b769192d4ce 100644 --- a/drivers/net/irda/mcs7780.c +++ b/drivers/net/irda/mcs7780.c @@ -50,7 +50,6 @@ #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> -#include <linux/module.h> #include <linux/kref.h> #include <linux/usb.h> #include <linux/device.h> @@ -465,7 +464,7 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len) } fcs = ~(crc32_le(~0, buf, new_len)); - if(fcs != le32_to_cpu(get_unaligned((u32 *)(buf+new_len)))) { + if(fcs != le32_to_cpu(get_unaligned((__le32 *)(buf+new_len)))) { IRDA_ERROR("crc error calc 0x%x len %d\n", fcs, new_len); mcs->stats.rx_errors++; mcs->stats.rx_crc_errors++; @@ -899,8 +898,6 @@ static int mcs_probe(struct usb_interface *intf, IRDA_DEBUG(1, "MCS7780 USB-IrDA bridge found at %d.\n", udev->devnum); - /* what is it realy for? */ - SET_MODULE_OWNER(ndev); SET_NETDEV_DEV(ndev, &intf->dev); ret = usb_reset_configuration(udev); diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c index d96c89751a71..12b9378c587f 100644 --- a/drivers/net/irda/nsc-ircc.c +++ b/drivers/net/irda/nsc-ircc.c @@ -437,7 +437,6 @@ static int __init nsc_ircc_open(chipio_t *info) self->tx_fifo.tail = self->tx_buff.head; /* Override the network functions we need to use */ - SET_MODULE_OWNER(dev); dev->hard_start_xmit = nsc_ircc_hard_xmit_sir; dev->open = nsc_ircc_net_open; dev->stop = nsc_ircc_net_close; diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c index 9d6c8f391b2d..6078e03de9a8 100644 --- a/drivers/net/irda/sir_dev.c +++ b/drivers/net/irda/sir_dev.c @@ -414,7 +414,7 @@ EXPORT_SYMBOL(sirdev_raw_read); int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts) { int ret = -ENXIO; - if (dev->drv->set_dtr_rts != 0) + if (dev->drv->set_dtr_rts) ret = dev->drv->set_dtr_rts(dev, dtr, rts); return ret; } @@ -913,8 +913,6 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n dev->drv = drv; dev->netdev = ndev; - SET_MODULE_OWNER(ndev); - /* Override the network functions we need to use */ ndev->hard_start_xmit = sirdev_hard_xmit; ndev->open = sirdev_open; diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c index 36ab98386be0..7e7b5828214a 100644 --- a/drivers/net/irda/smsc-ircc2.c +++ b/drivers/net/irda/smsc-ircc2.c @@ -81,7 +81,7 @@ MODULE_LICENSE("GPL"); static int smsc_nopnp = 1; module_param_named(nopnp, smsc_nopnp, bool, 0); -MODULE_PARM_DESC(nopnp, "Do not use PNP to detect controller settings"); +MODULE_PARM_DESC(nopnp, "Do not use PNP to detect controller settings, defaults to true"); #define DMA_INVAL 255 static int ircc_dma = DMA_INVAL; @@ -519,8 +519,6 @@ static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u goto err_out1; } - SET_MODULE_OWNER(dev); - dev->hard_start_xmit = smsc_ircc_hard_xmit_sir; #if SMSC_IRCC2_C_NET_TIMEOUT dev->tx_timeout = smsc_ircc_timeout; diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c index 755aa444a4dd..042bc2f0417d 100644 --- a/drivers/net/irda/stir4200.c +++ b/drivers/net/irda/stir4200.c @@ -332,7 +332,7 @@ static void fir_eof(struct stir_cb *stir) } fcs = ~(crc32_le(~0, rx_buff->data, len)); - if (fcs != le32_to_cpu(get_unaligned((u32 *)(rx_buff->data+len)))) { + if (fcs != le32_to_cpu(get_unaligned((__le32 *)(rx_buff->data+len)))) { pr_debug("crc error calc 0x%x len %d\n", fcs, len); stir->stats.rx_errors++; stir->stats.rx_crc_errors++; @@ -1034,7 +1034,6 @@ static int stir_probe(struct usb_interface *intf, if(!net) goto err_out1; - SET_MODULE_OWNER(net); SET_NETDEV_DEV(net, &intf->dev); stir = netdev_priv(net); stir->netdev = net; diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c index ff5358574d0a..126ec7c8680e 100644 --- a/drivers/net/irda/via-ircc.c +++ b/drivers/net/irda/via-ircc.c @@ -429,9 +429,6 @@ static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id) self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; self->tx_fifo.tail = self->tx_buff.head; - /* Keep track of module usage */ - SET_MODULE_OWNER(dev); - /* Override the network functions we need to use */ dev->hard_start_xmit = via_ircc_hard_xmit_sir; dev->open = via_ircc_net_open; diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c index 0538ca9ce058..acd082a96a4f 100644 --- a/drivers/net/irda/vlsi_ir.c +++ b/drivers/net/irda/vlsi_ir.c @@ -1584,8 +1584,6 @@ static int vlsi_irda_init(struct net_device *ndev) vlsi_irda_dev_t *idev = ndev->priv; struct pci_dev *pdev = idev->pdev; - SET_MODULE_OWNER(ndev); - ndev->irq = pdev->irq; ndev->base_addr = pci_resource_start(pdev,0); diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h index ca12a6096419..c8b9c74eea52 100644 --- a/drivers/net/irda/vlsi_ir.h +++ b/drivers/net/irda/vlsi_ir.h @@ -537,10 +537,10 @@ calc_width_bits(unsigned baudrate, unsigned widthselect, unsigned clockselect) */ struct ring_descr_hw { - volatile u16 rd_count; /* tx/rx count [11:0] */ - u16 reserved; + volatile __le16 rd_count; /* tx/rx count [11:0] */ + __le16 reserved; union { - u32 addr; /* [23:0] of the buffer's busaddress */ + __le32 addr; /* [23:0] of the buffer's busaddress */ struct { u8 addr_res[3]; volatile u8 status; /* descriptor status */ diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c index 5182e800cc18..9fd2451b0fb2 100644 --- a/drivers/net/irda/w83977af_ir.c +++ b/drivers/net/irda/w83977af_ir.c @@ -232,9 +232,6 @@ int w83977af_open(int i, unsigned int iobase, unsigned int irq, self->rx_buff.data = self->rx_buff.head; self->netdev = dev; - /* Keep track of module usage */ - SET_MODULE_OWNER(dev); - /* Override the network functions we need to use */ dev->hard_start_xmit = w83977af_hard_xmit; dev->open = w83977af_net_open; diff --git a/drivers/net/isa-skeleton.c b/drivers/net/isa-skeleton.c index 0343f12d2ffb..d6ff26af37b3 100644 --- a/drivers/net/isa-skeleton.c +++ b/drivers/net/isa-skeleton.c @@ -133,8 +133,6 @@ static int __init do_netcard_probe(struct net_device *dev) int base_addr = dev->base_addr; int irq = dev->irq; - SET_MODULE_OWNER(dev); - if (base_addr > 0x1ff) /* Check a single specified location. */ return netcard_probe1(dev, base_addr); else if (base_addr != 0) /* Don't probe at all. */ @@ -194,6 +192,7 @@ static int __init netcard_probe1(struct net_device *dev, int ioaddr) static unsigned version_printed; int i; int err = -ENODEV; + DECLARE_MAC_BUF(mac); /* Grab the region so that no one else tries to probe our ioports. */ if (!request_region(ioaddr, NETCARD_IO_EXTENT, cardname)) @@ -219,7 +218,9 @@ static int __init netcard_probe1(struct net_device *dev, int ioaddr) /* Retrieve and print the ethernet address. */ for (i = 0; i < 6; i++) - printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i)); + dev->dev_addr[i] = inb(ioaddr + i); + + printk("%s", print_mac(mac, dev->dev_addr)); err = -EAGAIN; #ifdef jumpered_interrupts diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c index 347d50cd77d4..97bd9dc2e52e 100644 --- a/drivers/net/iseries_veth.c +++ b/drivers/net/iseries_veth.c @@ -196,7 +196,6 @@ struct veth_lpar_connection { struct veth_port { struct device *dev; - struct net_device_stats stats; u64 mac_addr; HvLpIndexMap lpar_map; @@ -822,10 +821,9 @@ static int veth_init_connection(u8 rlp) || ! HvLpConfig_doLpsCommunicateOnVirtualLan(this_lp, rlp) ) return 0; - cnx = kmalloc(sizeof(*cnx), GFP_KERNEL); + cnx = kzalloc(sizeof(*cnx), GFP_KERNEL); if (! cnx) return -ENOMEM; - memset(cnx, 0, sizeof(*cnx)); cnx->remote_lp = rlp; spin_lock_init(&cnx->lock); @@ -852,14 +850,13 @@ static int veth_init_connection(u8 rlp) if (rc != 0) return rc; - msgs = kmalloc(VETH_NUMBUFFERS * sizeof(struct veth_msg), GFP_KERNEL); + msgs = kcalloc(VETH_NUMBUFFERS, sizeof(struct veth_msg), GFP_KERNEL); if (! msgs) { veth_error("Can't allocate buffers for LPAR %d.\n", rlp); return -ENOMEM; } cnx->msgs = msgs; - memset(msgs, 0, VETH_NUMBUFFERS * sizeof(struct veth_msg)); for (i = 0; i < VETH_NUMBUFFERS; i++) { msgs[i].token = i; @@ -938,9 +935,6 @@ static void veth_release_connection(struct kobject *kobj) static int veth_open(struct net_device *dev) { - struct veth_port *port = (struct veth_port *) dev->priv; - - memset(&port->stats, 0, sizeof (port->stats)); netif_start_queue(dev); return 0; } @@ -951,13 +945,6 @@ static int veth_close(struct net_device *dev) return 0; } -static struct net_device_stats *veth_get_stats(struct net_device *dev) -{ - struct veth_port *port = (struct veth_port *) dev->priv; - - return &port->stats; -} - static int veth_change_mtu(struct net_device *dev, int new_mtu) { if ((new_mtu < 68) || (new_mtu > VETH_MAX_MTU)) @@ -1086,7 +1073,6 @@ static struct net_device * __init veth_probe_one(int vlan, dev->open = veth_open; dev->hard_start_xmit = veth_start_xmit; dev->stop = veth_close; - dev->get_stats = veth_get_stats; dev->change_mtu = veth_change_mtu; dev->set_mac_address = NULL; dev->set_multicast_list = veth_set_multicast_list; @@ -1185,7 +1171,6 @@ static void veth_transmit_to_many(struct sk_buff *skb, HvLpIndexMap lpmask, struct net_device *dev) { - struct veth_port *port = (struct veth_port *) dev->priv; int i, success, error; success = error = 0; @@ -1201,11 +1186,11 @@ static void veth_transmit_to_many(struct sk_buff *skb, } if (error) - port->stats.tx_errors++; + dev->stats.tx_errors++; if (success) { - port->stats.tx_packets++; - port->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; } } @@ -1543,8 +1528,8 @@ static void veth_receive(struct veth_lpar_connection *cnx, skb->protocol = eth_type_trans(skb, dev); skb->ip_summed = CHECKSUM_NONE; netif_rx(skb); /* send it up */ - port->stats.rx_packets++; - port->stats.rx_bytes += length; + dev->stats.rx_packets++; + dev->stats.rx_bytes += length; } while (startchunk += nchunks, startchunk < VETH_MAX_FRAMES_PER_MSG); /* Ack it */ diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h index 3569d5b03388..1eee8894c732 100644 --- a/drivers/net/ixgb/ixgb.h +++ b/drivers/net/ixgb/ixgb.h @@ -184,6 +184,7 @@ struct ixgb_adapter { boolean_t rx_csum; /* OS defined structs */ + struct napi_struct napi; struct net_device *netdev; struct pci_dev *pdev; struct net_device_stats net_stats; diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c index 52c99d01d568..e8eb0fd6c576 100644 --- a/drivers/net/ixgb/ixgb_ee.c +++ b/drivers/net/ixgb/ixgb_ee.c @@ -411,7 +411,7 @@ ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t offset, uint16_t data) ixgb_cleanup_eeprom(hw); /* clear the init_ctrl_reg_1 to signify that the cache is invalidated */ - ee_map->init_ctrl_reg_1 = le16_to_cpu(EEPROM_ICW1_SIGNATURE_CLEAR); + ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR); return; } @@ -476,19 +476,19 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw) uint16_t ee_data; ee_data = ixgb_read_eeprom(hw, i); checksum += ee_data; - hw->eeprom[i] = le16_to_cpu(ee_data); + hw->eeprom[i] = cpu_to_le16(ee_data); } if (checksum != (uint16_t) EEPROM_SUM) { DEBUGOUT("ixgb_ee: Checksum invalid.\n"); /* clear the init_ctrl_reg_1 to signify that the cache is * invalidated */ - ee_map->init_ctrl_reg_1 = le16_to_cpu(EEPROM_ICW1_SIGNATURE_CLEAR); + ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR); return (FALSE); } - if ((ee_map->init_ctrl_reg_1 & le16_to_cpu(EEPROM_ICW1_SIGNATURE_MASK)) - != le16_to_cpu(EEPROM_ICW1_SIGNATURE_VALID)) { + if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK)) + != cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) { DEBUGOUT("ixgb_ee: Signature invalid.\n"); return(FALSE); } @@ -511,8 +511,8 @@ ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw) { struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; - if ((ee_map->init_ctrl_reg_1 & le16_to_cpu(EEPROM_ICW1_SIGNATURE_MASK)) - == le16_to_cpu(EEPROM_ICW1_SIGNATURE_VALID)) { + if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK)) + == cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) { return (TRUE); } else { return ixgb_get_eeprom_data(hw); @@ -528,7 +528,7 @@ ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw) * Returns: * Word at indexed offset in eeprom, if valid, 0 otherwise. ******************************************************************************/ -uint16_t +__le16 ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index) { diff --git a/drivers/net/ixgb/ixgb_ee.h b/drivers/net/ixgb/ixgb_ee.h index ef236b935c15..7908bf3005ed 100644 --- a/drivers/net/ixgb/ixgb_ee.h +++ b/drivers/net/ixgb/ixgb_ee.h @@ -76,22 +76,22 @@ /* EEPROM structure */ struct ixgb_ee_map_type { uint8_t mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS]; - uint16_t compatibility; - uint16_t reserved1[4]; - uint32_t pba_number; - uint16_t init_ctrl_reg_1; - uint16_t subsystem_id; - uint16_t subvendor_id; - uint16_t device_id; - uint16_t vendor_id; - uint16_t init_ctrl_reg_2; - uint16_t oem_reserved[16]; - uint16_t swdpins_reg; - uint16_t circuit_ctrl_reg; + __le16 compatibility; + __le16 reserved1[4]; + __le32 pba_number; + __le16 init_ctrl_reg_1; + __le16 subsystem_id; + __le16 subvendor_id; + __le16 device_id; + __le16 vendor_id; + __le16 init_ctrl_reg_2; + __le16 oem_reserved[16]; + __le16 swdpins_reg; + __le16 circuit_ctrl_reg; uint8_t d3_power; uint8_t d0_power; - uint16_t reserved2[28]; - uint16_t checksum; + __le16 reserved2[28]; + __le16 checksum; }; /* EEPROM Functions */ diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c index afde84868bea..fddd5844168d 100644 --- a/drivers/net/ixgb/ixgb_ethtool.c +++ b/drivers/net/ixgb/ixgb_ethtool.c @@ -94,8 +94,7 @@ static struct ixgb_stats ixgb_gstrings_stats[] = { {"tx_csum_offload_errors", IXGB_STAT(hw_csum_tx_error)} }; -#define IXGB_STATS_LEN \ - sizeof(ixgb_gstrings_stats) / sizeof(struct ixgb_stats) +#define IXGB_STATS_LEN ARRAY_SIZE(ixgb_gstrings_stats) static int ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) @@ -423,7 +422,7 @@ ixgb_get_eeprom(struct net_device *netdev, { struct ixgb_adapter *adapter = netdev_priv(netdev); struct ixgb_hw *hw = &adapter->hw; - uint16_t *eeprom_buff; + __le16 *eeprom_buff; int i, max_len, first_word, last_word; int ret_val = 0; @@ -447,7 +446,7 @@ ixgb_get_eeprom(struct net_device *netdev, first_word = eeprom->offset >> 1; last_word = (eeprom->offset + eeprom->len - 1) >> 1; - eeprom_buff = kmalloc(sizeof(uint16_t) * + eeprom_buff = kmalloc(sizeof(__le16) * (last_word - first_word + 1), GFP_KERNEL); if(!eeprom_buff) return -ENOMEM; @@ -660,9 +659,14 @@ ixgb_phys_id(struct net_device *netdev, uint32_t data) } static int -ixgb_get_stats_count(struct net_device *netdev) +ixgb_get_sset_count(struct net_device *netdev, int sset) { - return IXGB_STATS_LEN; + switch (sset) { + case ETH_SS_STATS: + return IXGB_STATS_LEN; + default: + return -EOPNOTSUPP; + } } static void @@ -714,17 +718,14 @@ static const struct ethtool_ops ixgb_ethtool_ops = { .set_rx_csum = ixgb_set_rx_csum, .get_tx_csum = ixgb_get_tx_csum, .set_tx_csum = ixgb_set_tx_csum, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, .get_msglevel = ixgb_get_msglevel, .set_msglevel = ixgb_set_msglevel, - .get_tso = ethtool_op_get_tso, .set_tso = ixgb_set_tso, .get_strings = ixgb_get_strings, .phys_id = ixgb_phys_id, - .get_stats_count = ixgb_get_stats_count, + .get_sset_count = ixgb_get_sset_count, .get_ethtool_stats = ixgb_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, }; void ixgb_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h index 40ef5ca88717..af5643324ee3 100644 --- a/drivers/net/ixgb/ixgb_hw.h +++ b/drivers/net/ixgb/ixgb_hw.h @@ -711,7 +711,7 @@ struct ixgb_hw { uint32_t bar2; uint32_t bar3; uint16_t pci_cmd_word; /* PCI command register id from PCI configuration space */ - uint16_t eeprom[IXGB_EEPROM_SIZE]; /* EEPROM contents read at init time */ + __le16 eeprom[IXGB_EEPROM_SIZE]; /* EEPROM contents read at init time */ unsigned long io_base; /* Our I/O mapped location */ uint32_t lastLFC; uint32_t lastRFC; @@ -809,7 +809,7 @@ void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, uint8_t *mac_addr); uint32_t ixgb_get_ee_pba_number(struct ixgb_hw *hw); uint16_t ixgb_get_ee_device_id(struct ixgb_hw *hw); boolean_t ixgb_get_eeprom_data(struct ixgb_hw *hw); -uint16_t ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index); +__le16 ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index); /* Everything else */ void ixgb_led_on(struct ixgb_hw *hw); diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 991c8833e23c..d444de58ba34 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -97,7 +97,7 @@ static irqreturn_t ixgb_intr(int irq, void *data); static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter); #ifdef CONFIG_IXGB_NAPI -static int ixgb_clean(struct net_device *netdev, int *budget); +static int ixgb_clean(struct napi_struct *napi, int budget); static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do); #else @@ -288,7 +288,7 @@ ixgb_up(struct ixgb_adapter *adapter) mod_timer(&adapter->watchdog_timer, jiffies); #ifdef CONFIG_IXGB_NAPI - netif_poll_enable(netdev); + napi_enable(&adapter->napi); #endif ixgb_irq_enable(adapter); @@ -309,7 +309,7 @@ ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog) if(kill_watchdog) del_timer_sync(&adapter->watchdog_timer); #ifdef CONFIG_IXGB_NAPI - netif_poll_disable(netdev); + napi_disable(&adapter->napi); #endif adapter->link_speed = 0; adapter->link_duplex = 0; @@ -382,7 +382,6 @@ ixgb_probe(struct pci_dev *pdev, goto err_alloc_etherdev; } - SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); @@ -421,8 +420,7 @@ ixgb_probe(struct pci_dev *pdev, netdev->tx_timeout = &ixgb_tx_timeout; netdev->watchdog_timeo = 5 * HZ; #ifdef CONFIG_IXGB_NAPI - netdev->poll = &ixgb_clean; - netdev->weight = 64; + netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64); #endif netdev->vlan_rx_register = ixgb_vlan_rx_register; netdev->vlan_rx_add_vid = ixgb_vlan_rx_add_vid; @@ -1746,7 +1744,7 @@ ixgb_intr(int irq, void *data) } #ifdef CONFIG_IXGB_NAPI - if(netif_rx_schedule_prep(netdev)) { + if (netif_rx_schedule_prep(netdev, &adapter->napi)) { /* Disable interrupts and register for poll. The flush of the posted write is intentionally left out. @@ -1754,7 +1752,7 @@ ixgb_intr(int irq, void *data) atomic_inc(&adapter->irq_sem); IXGB_WRITE_REG(&adapter->hw, IMC, ~0); - __netif_rx_schedule(netdev); + __netif_rx_schedule(netdev, &adapter->napi); } #else /* yes, that is actually a & and it is meant to make sure that @@ -1776,27 +1774,23 @@ ixgb_intr(int irq, void *data) **/ static int -ixgb_clean(struct net_device *netdev, int *budget) +ixgb_clean(struct napi_struct *napi, int budget) { - struct ixgb_adapter *adapter = netdev_priv(netdev); - int work_to_do = min(*budget, netdev->quota); + struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi); + struct net_device *netdev = adapter->netdev; int tx_cleaned; int work_done = 0; tx_cleaned = ixgb_clean_tx_irq(adapter); - ixgb_clean_rx_irq(adapter, &work_done, work_to_do); - - *budget -= work_done; - netdev->quota -= work_done; + ixgb_clean_rx_irq(adapter, &work_done, budget); /* if no Tx and not enough Rx work done, exit the polling mode */ if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) { - netif_rx_complete(netdev); + netif_rx_complete(netdev, napi); ixgb_irq_enable(adapter); - return 0; } - return 1; + return work_done; } #endif diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile new file mode 100644 index 000000000000..ccd83d9f579e --- /dev/null +++ b/drivers/net/ixgbe/Makefile @@ -0,0 +1,36 @@ +################################################################################ +# +# Intel 10 Gigabit PCI Express Linux driver +# Copyright(c) 1999 - 2007 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# Linux NICS <linux.nics@intel.com> +# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +# Makefile for the Intel(R) 10GbE PCI Express ethernet driver +# + +obj-$(CONFIG_IXGBE) += ixgbe.o + +ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ + ixgbe_82598.o ixgbe_phy.o diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h new file mode 100644 index 000000000000..c160a7d91e21 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe.h @@ -0,0 +1,259 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_H_ +#define _IXGBE_H_ + +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/netdevice.h> + +#include "ixgbe_type.h" +#include "ixgbe_common.h" + + +#define IXGBE_ERR(args...) printk(KERN_ERR "ixgbe: " args) + +#define PFX "ixgbe: " +#define DPRINTK(nlevel, klevel, fmt, args...) \ + ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ + printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ + __FUNCTION__ , ## args))) + +/* TX/RX descriptor defines */ +#define IXGBE_DEFAULT_TXD 1024 +#define IXGBE_MAX_TXD 4096 +#define IXGBE_MIN_TXD 64 + +#define IXGBE_DEFAULT_RXD 1024 +#define IXGBE_MAX_RXD 4096 +#define IXGBE_MIN_RXD 64 + +#define IXGBE_DEFAULT_RXQ 1 +#define IXGBE_MAX_RXQ 1 +#define IXGBE_MIN_RXQ 1 + +#define IXGBE_DEFAULT_ITR_RX_USECS 125 /* 8k irqs/sec */ +#define IXGBE_DEFAULT_ITR_TX_USECS 250 /* 4k irqs/sec */ +#define IXGBE_MIN_ITR_USECS 100 /* 500k irqs/sec */ +#define IXGBE_MAX_ITR_USECS 10000 /* 100 irqs/sec */ + +/* flow control */ +#define IXGBE_DEFAULT_FCRTL 0x10000 +#define IXGBE_MIN_FCRTL 0 +#define IXGBE_MAX_FCRTL 0x7FF80 +#define IXGBE_DEFAULT_FCRTH 0x20000 +#define IXGBE_MIN_FCRTH 0 +#define IXGBE_MAX_FCRTH 0x7FFF0 +#define IXGBE_DEFAULT_FCPAUSE 0x6800 /* may be too long */ +#define IXGBE_MIN_FCPAUSE 0 +#define IXGBE_MAX_FCPAUSE 0xFFFF + +/* Supported Rx Buffer Sizes */ +#define IXGBE_RXBUFFER_64 64 /* Used for packet split */ +#define IXGBE_RXBUFFER_128 128 /* Used for packet split */ +#define IXGBE_RXBUFFER_256 256 /* Used for packet split */ +#define IXGBE_RXBUFFER_2048 2048 + +#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256 + +#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) + +/* How many Tx Descriptors do we need to call netif_wake_queue? */ +#define IXGBE_TX_QUEUE_WAKE 16 + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define IXGBE_TX_FLAGS_CSUM (u32)(1) +#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1) +#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2) +#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3) +#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer */ +struct ixgbe_tx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + unsigned long time_stamp; + u16 length; + u16 next_to_watch; +}; + +struct ixgbe_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + struct page *page; + dma_addr_t page_dma; +}; + +struct ixgbe_queue_stats { + u64 packets; + u64 bytes; +}; + +struct ixgbe_ring { + struct ixgbe_adapter *adapter; /* backlink */ + void *desc; /* descriptor ring memory */ + dma_addr_t dma; /* phys. address of descriptor ring */ + unsigned int size; /* length in bytes */ + unsigned int count; /* amount of descriptors */ + unsigned int next_to_use; + unsigned int next_to_clean; + + union { + struct ixgbe_tx_buffer *tx_buffer_info; + struct ixgbe_rx_buffer *rx_buffer_info; + }; + + u16 head; + u16 tail; + + /* To protect race between sender and clean_tx_irq */ + spinlock_t tx_lock; + + struct ixgbe_queue_stats stats; + + u32 eims_value; + u16 itr_register; + + char name[IFNAMSIZ + 5]; + u16 work_limit; /* max work per interrupt */ +}; + +/* Helper macros to switch between ints/sec and what the register uses. + * And yes, it's the same math going both ways. + */ +#define EITR_INTS_PER_SEC_TO_REG(_eitr) \ + ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 0) +#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG + +#define IXGBE_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) + +#define IXGBE_RX_DESC_ADV(R, i) \ + (&(((union ixgbe_adv_rx_desc *)((R).desc))[i])) +#define IXGBE_TX_DESC_ADV(R, i) \ + (&(((union ixgbe_adv_tx_desc *)((R).desc))[i])) +#define IXGBE_TX_CTXTDESC_ADV(R, i) \ + (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i])) + +#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 + +/* board specific private data structure */ +struct ixgbe_adapter { + struct timer_list watchdog_timer; + struct vlan_group *vlgrp; + u16 bd_number; + u16 rx_buf_len; + atomic_t irq_sem; + struct work_struct reset_task; + + /* TX */ + struct ixgbe_ring *tx_ring; /* One per active queue */ + struct napi_struct napi; + u64 restart_queue; + u64 lsc_int; + u64 hw_tso_ctxt; + u64 hw_tso6_ctxt; + u32 tx_timeout_count; + bool detect_tx_hung; + + /* RX */ + struct ixgbe_ring *rx_ring; /* One per active queue */ + u64 hw_csum_tx_good; + u64 hw_csum_rx_error; + u64 hw_csum_rx_good; + u64 non_eop_descs; + int num_tx_queues; + int num_rx_queues; + struct msix_entry *msix_entries; + + u64 rx_hdr_split; + u32 alloc_rx_page_failed; + u32 alloc_rx_buff_failed; + + u32 flags; +#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1) +#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1) +#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 2) +#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3) +#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4) + + /* Interrupt Throttle Rate */ + u32 rx_eitr; + u32 tx_eitr; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + struct net_device_stats net_stats; + + /* structs defined in ixgbe_hw.h */ + struct ixgbe_hw hw; + u16 msg_enable; + struct ixgbe_hw_stats stats; + char lsc_name[IFNAMSIZ + 5]; + + unsigned long state; + u64 tx_busy; +}; + +enum ixbge_state_t { + __IXGBE_TESTING, + __IXGBE_RESETTING, + __IXGBE_DOWN +}; + +enum ixgbe_boards { + board_82598AF, + board_82598EB, + board_82598AT, +}; + +extern struct ixgbe_info ixgbe_82598AF_info; +extern struct ixgbe_info ixgbe_82598EB_info; +extern struct ixgbe_info ixgbe_82598AT_info; + +extern char ixgbe_driver_name[]; +extern char ixgbe_driver_version[]; + +extern int ixgbe_up(struct ixgbe_adapter *adapter); +extern void ixgbe_down(struct ixgbe_adapter *adapter); +extern void ixgbe_reset(struct ixgbe_adapter *adapter); +extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); +extern void ixgbe_set_ethtool_ops(struct net_device *netdev); +extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rxdr); +extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, + struct ixgbe_ring *txdr); + +#endif /* _IXGBE_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c new file mode 100644 index 000000000000..00ee20125ca9 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_82598.c @@ -0,0 +1,589 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/sched.h> + +#include "ixgbe_type.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +#define IXGBE_82598_MAX_TX_QUEUES 32 +#define IXGBE_82598_MAX_RX_QUEUES 64 +#define IXGBE_82598_RAR_ENTRIES 16 + +static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw); +static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed, + bool *autoneg); +static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw, + u32 *speed, bool *autoneg); +static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw); +static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw); +static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed, + bool *link_up); +static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, u32 speed, + bool autoneg, + bool autoneg_wait_to_complete); +static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw); +static s32 ixgbe_check_copper_link_82598(struct ixgbe_hw *hw, u32 *speed, + bool *link_up); +static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed, + bool autoneg, + bool autoneg_wait_to_complete); +static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); + + +static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) +{ + hw->mac.num_rx_queues = IXGBE_82598_MAX_TX_QUEUES; + hw->mac.num_tx_queues = IXGBE_82598_MAX_RX_QUEUES; + hw->mac.num_rx_addrs = IXGBE_82598_RAR_ENTRIES; + + return 0; +} + +/** + * ixgbe_get_link_settings_82598 - Determines default link settings + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + * + * Determines the default link settings by reading the AUTOC register. + **/ +static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed, + bool *autoneg) +{ + s32 status = 0; + s32 autoc_reg; + + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + + if (hw->mac.link_settings_loaded) { + autoc_reg &= ~IXGBE_AUTOC_LMS_ATTACH_TYPE; + autoc_reg &= ~IXGBE_AUTOC_LMS_MASK; + autoc_reg |= hw->mac.link_attach_type; + autoc_reg |= hw->mac.link_mode_select; + } + + switch (autoc_reg & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = false; + break; + + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + break; + + case IXGBE_AUTOC_LMS_1G_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + break; + + case IXGBE_AUTOC_LMS_KX4_AN: + case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + if (autoc_reg & IXGBE_AUTOC_KX4_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc_reg & IXGBE_AUTOC_KX_SUPP) + *speed |= IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + break; + + default: + status = IXGBE_ERR_LINK_SETUP; + break; + } + + return status; +} + +/** + * ixgbe_get_copper_link_settings_82598 - Determines default link settings + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + * + * Determines the default link settings by reading the AUTOC register. + **/ +static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw, + u32 *speed, bool *autoneg) +{ + s32 status = IXGBE_ERR_LINK_SETUP; + u16 speed_ability; + + *speed = 0; + *autoneg = true; + + status = ixgbe_read_phy_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &speed_ability); + + if (status == 0) { + if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G) + *speed |= IXGBE_LINK_SPEED_1GB_FULL; + } + + return status; +} + +/** + * ixgbe_get_media_type_82598 - Determines media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) +{ + enum ixgbe_media_type media_type; + + /* Media type for I82598 is based on device ID */ + switch (hw->device_id) { + case IXGBE_DEV_ID_82598AF_DUAL_PORT: + case IXGBE_DEV_ID_82598AF_SINGLE_PORT: + case IXGBE_DEV_ID_82598EB_CX4: + media_type = ixgbe_media_type_fiber; + break; + case IXGBE_DEV_ID_82598AT_DUAL_PORT: + media_type = ixgbe_media_type_copper; + break; + default: + media_type = ixgbe_media_type_unknown; + break; + } + + return media_type; +} + +/** + * ixgbe_setup_mac_link_82598 - Configures MAC link settings + * @hw: pointer to hardware structure + * + * Configures link settings based on values in the ixgbe_hw struct. + * Restarts the link. Performs autonegotiation if needed. + **/ +static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw) +{ + u32 autoc_reg; + u32 links_reg; + u32 i; + s32 status = 0; + + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + + if (hw->mac.link_settings_loaded) { + autoc_reg &= ~IXGBE_AUTOC_LMS_ATTACH_TYPE; + autoc_reg &= ~IXGBE_AUTOC_LMS_MASK; + autoc_reg |= hw->mac.link_attach_type; + autoc_reg |= hw->mac.link_mode_select; + + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + msleep(50); + } + + /* Restart link */ + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + + /* Only poll for autoneg to complete if specified to do so */ + if (hw->phy.autoneg_wait_to_complete) { + if (hw->mac.link_mode_select == IXGBE_AUTOC_LMS_KX4_AN || + hw->mac.link_mode_select == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { + links_reg = 0; /* Just in case Autoneg time = 0 */ + for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + if (links_reg & IXGBE_LINKS_KX_AN_COMP) + break; + msleep(100); + } + if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { + status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; + hw_dbg(hw, + "Autonegotiation did not complete.\n"); + } + } + } + + /* + * We want to save off the original Flow Control configuration just in + * case we get disconnected and then reconnected into a different hub + * or switch with different Flow Control capabilities. + */ + hw->fc.type = hw->fc.original_type; + ixgbe_setup_fc(hw, 0); + + /* Add delay to filter out noises during initial link setup */ + msleep(50); + + return status; +} + +/** + * ixgbe_check_mac_link_82598 - Get link/speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true is link is up, false otherwise + * + * Reads the links register to determine if link is up and the current speed + **/ +static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed, + bool *link_up) +{ + u32 links_reg; + + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + + if (links_reg & IXGBE_LINKS_UP) + *link_up = true; + else + *link_up = false; + + if (links_reg & IXGBE_LINKS_SPEED) + *speed = IXGBE_LINK_SPEED_10GB_FULL; + else + *speed = IXGBE_LINK_SPEED_1GB_FULL; + + return 0; +} + +/** + * ixgbe_setup_mac_link_speed_82598 - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: true if auto-negotiation enabled + * @autoneg_wait_to_complete: true if waiting is needed to complete + * + * Set the link speed in the AUTOC register and restarts link. + **/ +static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, + u32 speed, bool autoneg, + bool autoneg_wait_to_complete) +{ + s32 status = 0; + + /* If speed is 10G, then check for CX4 or XAUI. */ + if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && + (!(hw->mac.link_attach_type & IXGBE_AUTOC_10G_KX4))) + hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN; + else if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (!autoneg)) + hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_LINK_NO_AN; + else if (autoneg) { + /* BX mode - Autonegotiate 1G */ + if (!(hw->mac.link_attach_type & IXGBE_AUTOC_1G_PMA_PMD)) + hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_AN; + else /* KX/KX4 mode */ + hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN_1G_AN; + } else { + status = IXGBE_ERR_LINK_SETUP; + } + + if (status == 0) { + hw->phy.autoneg_wait_to_complete = autoneg_wait_to_complete; + + hw->mac.link_settings_loaded = true; + /* + * Setup and restart the link based on the new values in + * ixgbe_hw This will write the AUTOC register based on the new + * stored values + */ + hw->phy.ops.setup(hw); + } + + return status; +} + + +/** + * ixgbe_setup_copper_link_82598 - Setup copper link settings + * @hw: pointer to hardware structure + * + * Configures link settings based on values in the ixgbe_hw struct. + * Restarts the link. Performs autonegotiation if needed. Restart + * phy and wait for autonegotiate to finish. Then synchronize the + * MAC and PHY. + **/ +static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw) +{ + s32 status; + u32 speed = 0; + bool link_up = false; + + /* Set up MAC */ + hw->phy.ops.setup(hw); + + /* Restart autonegotiation on PHY */ + status = hw->phy.ops.setup(hw); + + /* Synchronize MAC to PHY speed */ + if (status == 0) + status = hw->phy.ops.check(hw, &speed, &link_up); + + return status; +} + +/** + * ixgbe_check_copper_link_82598 - Syncs MAC & PHY link settings + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true if link is up, false otherwise + * + * Reads the mac link, phy link, and synchronizes the MAC to PHY. + **/ +static s32 ixgbe_check_copper_link_82598(struct ixgbe_hw *hw, u32 *speed, + bool *link_up) +{ + s32 status; + u32 phy_speed = 0; + bool phy_link = false; + + /* This is the speed and link the MAC is set at */ + hw->phy.ops.check(hw, speed, link_up); + + /* + * Check current speed and link status of the PHY register. + * This is a vendor specific register and may have to + * be changed for other copper PHYs. + */ + status = hw->phy.ops.check(hw, &phy_speed, &phy_link); + + if ((status == 0) && (phy_link)) { + /* + * Check current link status of the MACs link's register + * matches that of the speed in the PHY register + */ + if (*speed != phy_speed) { + /* + * The copper PHY requires 82598 attach type to be XAUI + * for 10G and BX for 1G + */ + hw->mac.link_attach_type = + (IXGBE_AUTOC_10G_XAUI | IXGBE_AUTOC_1G_BX); + + /* Synchronize the MAC speed to the PHY speed */ + status = hw->phy.ops.setup_speed(hw, phy_speed, false, + false); + if (status == 0) + hw->phy.ops.check(hw, speed, link_up); + else + status = IXGBE_ERR_LINK_SETUP; + } + } else { + *link_up = phy_link; + } + + return status; +} + +/** + * ixgbe_setup_copper_link_speed_82598 - Set the PHY autoneg advertised field + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: true if autonegotiation enabled + * @autoneg_wait_to_complete: true if waiting is needed to complete + * + * Sets the link speed in the AUTOC register in the MAC and restarts link. + **/ +static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed, + bool autoneg, + bool autoneg_wait_to_complete) +{ + s32 status; + bool link_up = 0; + + /* Setup the PHY according to input speed */ + status = hw->phy.ops.setup_speed(hw, speed, autoneg, + autoneg_wait_to_complete); + + /* Synchronize MAC to PHY speed */ + if (status == 0) + status = hw->phy.ops.check(hw, &speed, &link_up); + + return status; +} + +/** + * ixgbe_reset_hw_82598 - Performs hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by reseting the transmit and receive units, masks and + * clears all interrupts, performing a PHY reset, and performing a link (MAC) + * reset. + **/ +static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) +{ + s32 status = 0; + u32 ctrl; + u32 gheccr; + u32 i; + u32 autoc; + u8 analog_val; + + /* Call adapter stop to disable tx/rx and clear interrupts */ + ixgbe_stop_adapter(hw); + + /* + * Power up the Atlas TX lanes if they are currently powered down. + * Atlas TX lanes are powered down for MAC loopback tests, but + * they are not automatically restored on reset. + */ + ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); + if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { + /* Enable TX Atlas so packets can be transmitted again */ + ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; + ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, analog_val); + + ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; + ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, analog_val); + + ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; + ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, analog_val); + + ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; + ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, analog_val); + } + + /* Reset PHY */ + ixgbe_reset_phy(hw); + + /* + * Prevent the PCI-E bus from from hanging by disabling PCI-E master + * access and verify no pending requests before reset + */ + if (ixgbe_disable_pcie_master(hw) != 0) { + status = IXGBE_ERR_MASTER_REQUESTS_PENDING; + hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); + } + + /* + * Issue global reset to the MAC. This needs to be a SW reset. + * If link reset is used, it might reset the MAC when mng is using it + */ + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); + IXGBE_WRITE_FLUSH(hw); + + /* Poll for reset bit to self-clear indicating reset is complete */ + for (i = 0; i < 10; i++) { + udelay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST)) + break; + } + if (ctrl & IXGBE_CTRL_RST) { + status = IXGBE_ERR_RESET_FAILED; + hw_dbg(hw, "Reset polling failed to complete.\n"); + } + + msleep(50); + + gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); + gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); + IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); + + /* + * AUTOC register which stores link settings gets cleared + * and reloaded from EEPROM after reset. We need to restore + * our stored value from init in case SW changed the attach + * type or speed. If this is the first time and link settings + * have not been stored, store default settings from AUTOC. + */ + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + if (hw->mac.link_settings_loaded) { + autoc &= ~(IXGBE_AUTOC_LMS_ATTACH_TYPE); + autoc &= ~(IXGBE_AUTOC_LMS_MASK); + autoc |= hw->mac.link_attach_type; + autoc |= hw->mac.link_mode_select; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); + } else { + hw->mac.link_attach_type = + (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE); + hw->mac.link_mode_select = (autoc & IXGBE_AUTOC_LMS_MASK); + hw->mac.link_settings_loaded = true; + } + + /* Store the permanent mac address */ + ixgbe_get_mac_addr(hw, hw->mac.perm_addr); + + return status; +} + +static struct ixgbe_mac_operations mac_ops_82598 = { + .reset = &ixgbe_reset_hw_82598, + .get_media_type = &ixgbe_get_media_type_82598, +}; + +static struct ixgbe_phy_operations phy_ops_82598EB = { + .setup = &ixgbe_setup_copper_link_82598, + .check = &ixgbe_check_copper_link_82598, + .setup_speed = &ixgbe_setup_copper_link_speed_82598, + .get_settings = &ixgbe_get_copper_link_settings_82598, +}; + +struct ixgbe_info ixgbe_82598EB_info = { + .mac = ixgbe_mac_82598EB, + .get_invariants = &ixgbe_get_invariants_82598, + .mac_ops = &mac_ops_82598, + .phy_ops = &phy_ops_82598EB, +}; + +static struct ixgbe_phy_operations phy_ops_82598AT = { + .setup = &ixgbe_setup_tnx_phy_link, + .check = &ixgbe_check_tnx_phy_link, + .setup_speed = &ixgbe_setup_tnx_phy_link_speed, + .get_settings = &ixgbe_get_copper_link_settings_82598, +}; + +struct ixgbe_info ixgbe_82598AT_info = { + .mac = ixgbe_mac_82598EB, + .get_invariants = &ixgbe_get_invariants_82598, + .mac_ops = &mac_ops_82598, + .phy_ops = &phy_ops_82598AT, +}; + +static struct ixgbe_phy_operations phy_ops_82598AF = { + .setup = &ixgbe_setup_mac_link_82598, + .check = &ixgbe_check_mac_link_82598, + .setup_speed = &ixgbe_setup_mac_link_speed_82598, + .get_settings = &ixgbe_get_link_settings_82598, +}; + +struct ixgbe_info ixgbe_82598AF_info = { + .mac = ixgbe_mac_82598EB, + .get_invariants = &ixgbe_get_invariants_82598, + .mac_ops = &mac_ops_82598, + .phy_ops = &phy_ops_82598AF, +}; + diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c new file mode 100644 index 000000000000..512e3b22ed08 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_common.c @@ -0,0 +1,1175 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/sched.h> + +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw); + +static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw); +static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); +static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); +static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw); + +static s32 ixgbe_clear_vfta(struct ixgbe_hw *hw); +static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw); +static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); +static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr); + +/** + * ixgbe_start_hw - Prepare hardware for TX/RX + * @hw: pointer to hardware structure + * + * Starts the hardware by filling the bus info structure and media type, clears + * all on chip counters, initializes receive address registers, multicast + * table, VLAN filter table, calls routine to set up link and flow control + * settings, and leaves transmit and receive units disabled and uninitialized + **/ +s32 ixgbe_start_hw(struct ixgbe_hw *hw) +{ + u32 ctrl_ext; + + /* Set the media type */ + hw->phy.media_type = hw->mac.ops.get_media_type(hw); + + /* Identify the PHY */ + ixgbe_identify_phy(hw); + + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table + */ + ixgbe_init_rx_addrs(hw); + + /* Clear the VLAN filter table */ + ixgbe_clear_vfta(hw); + + /* Set up link */ + hw->phy.ops.setup(hw); + + /* Clear statistics registers */ + ixgbe_clear_hw_cntrs(hw); + + /* Set No Snoop Disable */ + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + + /* Clear adapter stopped flag */ + hw->adapter_stopped = false; + + return 0; +} + +/** + * ixgbe_init_hw - Generic hardware initialization + * @hw: pointer to hardware structure + * + * Initialize the hardware by reseting the hardware, filling the bus info + * structure and media type, clears all on chip counters, initializes receive + * address registers, multicast table, VLAN filter table, calls routine to set + * up link and flow control settings, and leaves transmit and receive units + * disabled and uninitialized + **/ +s32 ixgbe_init_hw(struct ixgbe_hw *hw) +{ + /* Reset the hardware */ + hw->mac.ops.reset(hw); + + /* Start the HW */ + ixgbe_start_hw(hw); + + return 0; +} + +/** + * ixgbe_clear_hw_cntrs - Generic clear hardware counters + * @hw: pointer to hardware structure + * + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. + **/ +static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw) +{ + u16 i = 0; + + IXGBE_READ_REG(hw, IXGBE_CRCERRS); + IXGBE_READ_REG(hw, IXGBE_ILLERRC); + IXGBE_READ_REG(hw, IXGBE_ERRBC); + IXGBE_READ_REG(hw, IXGBE_MSPDC); + for (i = 0; i < 8; i++) + IXGBE_READ_REG(hw, IXGBE_MPC(i)); + + IXGBE_READ_REG(hw, IXGBE_MLFC); + IXGBE_READ_REG(hw, IXGBE_MRFC); + IXGBE_READ_REG(hw, IXGBE_RLEC); + IXGBE_READ_REG(hw, IXGBE_LXONTXC); + IXGBE_READ_REG(hw, IXGBE_LXONRXC); + IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); + IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); + + for (i = 0; i < 8; i++) { + IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); + IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); + IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); + IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); + } + + IXGBE_READ_REG(hw, IXGBE_PRC64); + IXGBE_READ_REG(hw, IXGBE_PRC127); + IXGBE_READ_REG(hw, IXGBE_PRC255); + IXGBE_READ_REG(hw, IXGBE_PRC511); + IXGBE_READ_REG(hw, IXGBE_PRC1023); + IXGBE_READ_REG(hw, IXGBE_PRC1522); + IXGBE_READ_REG(hw, IXGBE_GPRC); + IXGBE_READ_REG(hw, IXGBE_BPRC); + IXGBE_READ_REG(hw, IXGBE_MPRC); + IXGBE_READ_REG(hw, IXGBE_GPTC); + IXGBE_READ_REG(hw, IXGBE_GORCL); + IXGBE_READ_REG(hw, IXGBE_GORCH); + IXGBE_READ_REG(hw, IXGBE_GOTCL); + IXGBE_READ_REG(hw, IXGBE_GOTCH); + for (i = 0; i < 8; i++) + IXGBE_READ_REG(hw, IXGBE_RNBC(i)); + IXGBE_READ_REG(hw, IXGBE_RUC); + IXGBE_READ_REG(hw, IXGBE_RFC); + IXGBE_READ_REG(hw, IXGBE_ROC); + IXGBE_READ_REG(hw, IXGBE_RJC); + IXGBE_READ_REG(hw, IXGBE_MNGPRC); + IXGBE_READ_REG(hw, IXGBE_MNGPDC); + IXGBE_READ_REG(hw, IXGBE_MNGPTC); + IXGBE_READ_REG(hw, IXGBE_TORL); + IXGBE_READ_REG(hw, IXGBE_TORH); + IXGBE_READ_REG(hw, IXGBE_TPR); + IXGBE_READ_REG(hw, IXGBE_TPT); + IXGBE_READ_REG(hw, IXGBE_PTC64); + IXGBE_READ_REG(hw, IXGBE_PTC127); + IXGBE_READ_REG(hw, IXGBE_PTC255); + IXGBE_READ_REG(hw, IXGBE_PTC511); + IXGBE_READ_REG(hw, IXGBE_PTC1023); + IXGBE_READ_REG(hw, IXGBE_PTC1522); + IXGBE_READ_REG(hw, IXGBE_MPTC); + IXGBE_READ_REG(hw, IXGBE_BPTC); + for (i = 0; i < 16; i++) { + IXGBE_READ_REG(hw, IXGBE_QPRC(i)); + IXGBE_READ_REG(hw, IXGBE_QBRC(i)); + IXGBE_READ_REG(hw, IXGBE_QPTC(i)); + IXGBE_READ_REG(hw, IXGBE_QBTC(i)); + } + + return 0; +} + +/** + * ixgbe_get_mac_addr - Generic get MAC address + * @hw: pointer to hardware structure + * @mac_addr: Adapter MAC address + * + * Reads the adapter's MAC address from first Receive Address Register (RAR0) + * A reset of the adapter must be performed prior to calling this function + * in order for the MAC address to have been loaded from the EEPROM into RAR0 + **/ +s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); + rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); + + for (i = 0; i < 4; i++) + mac_addr[i] = (u8)(rar_low >> (i*8)); + + for (i = 0; i < 2; i++) + mac_addr[i+4] = (u8)(rar_high >> (i*8)); + + return 0; +} + +s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num) +{ + s32 ret_val; + u16 data; + + ret_val = ixgbe_read_eeprom(hw, IXGBE_PBANUM0_PTR, &data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + *part_num = (u32)(data << 16); + + ret_val = ixgbe_read_eeprom(hw, IXGBE_PBANUM1_PTR, &data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + *part_num |= data; + + return 0; +} + +/** + * ixgbe_stop_adapter - Generic stop TX/RX units + * @hw: pointer to hardware structure + * + * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. + **/ +s32 ixgbe_stop_adapter(struct ixgbe_hw *hw) +{ + u32 number_of_queues; + u32 reg_val; + u16 i; + + /* + * Set the adapter_stopped flag so other driver functions stop touching + * the hardware + */ + hw->adapter_stopped = true; + + /* Disable the receive unit */ + reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + reg_val &= ~(IXGBE_RXCTRL_RXEN); + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); + msleep(2); + + /* Clear interrupt mask to stop from interrupts being generated */ + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); + + /* Clear any pending interrupts */ + IXGBE_READ_REG(hw, IXGBE_EICR); + + /* Disable the transmit unit. Each queue must be disabled. */ + number_of_queues = hw->mac.num_tx_queues; + for (i = 0; i < number_of_queues; i++) { + reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); + if (reg_val & IXGBE_TXDCTL_ENABLE) { + reg_val &= ~IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), reg_val); + } + } + + return 0; +} + +/** + * ixgbe_led_on - Turns on the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn on + **/ +s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index) +{ + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + + /* To turn on the LED, set mode to ON. */ + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + + return 0; +} + +/** + * ixgbe_led_off - Turns off the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn off + **/ +s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index) +{ + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + + /* To turn off the LED, set mode to OFF. */ + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + + return 0; +} + + +/** + * ixgbe_init_eeprom - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +s32 ixgbe_init_eeprom(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + u32 eec; + u16 eeprom_size; + + if (eeprom->type == ixgbe_eeprom_uninitialized) { + eeprom->type = ixgbe_eeprom_none; + + /* + * Check for EEPROM present first. + * If not present leave as none + */ + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + if (eec & IXGBE_EEC_PRES) { + eeprom->type = ixgbe_eeprom_spi; + + /* + * SPI EEPROM is assumed here. This code would need to + * change if a future EEPROM is not SPI. + */ + eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> + IXGBE_EEC_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); + } + + if (eec & IXGBE_EEC_ADDR_SIZE) + eeprom->address_bits = 16; + else + eeprom->address_bits = 8; + hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: " + "%d\n", eeprom->type, eeprom->word_size, + eeprom->address_bits); + } + + return 0; +} + +/** + * ixgbe_read_eeprom - Read EEPROM word using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + u32 eerd; + s32 status; + + eerd = (offset << IXGBE_EEPROM_READ_ADDR_SHIFT) + + IXGBE_EEPROM_READ_REG_START; + + IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); + status = ixgbe_poll_eeprom_eerd_done(hw); + + if (status == 0) + *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >> + IXGBE_EEPROM_READ_REG_DATA); + else + hw_dbg(hw, "Eeprom read timed out\n"); + + return status; +} + +/** + * ixgbe_poll_eeprom_eerd_done - Poll EERD status + * @hw: pointer to hardware structure + * + * Polls the status bit (bit 1) of the EERD to determine when the read is done. + **/ +static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw) +{ + u32 i; + u32 reg; + s32 status = IXGBE_ERR_EEPROM; + + for (i = 0; i < IXGBE_EERD_ATTEMPTS; i++) { + reg = IXGBE_READ_REG(hw, IXGBE_EERD); + if (reg & IXGBE_EEPROM_READ_REG_DONE) { + status = 0; + break; + } + udelay(5); + } + return status; +} + +/** + * ixgbe_get_eeprom_semaphore - Get hardware semaphore + * @hw: pointer to hardware structure + * + * Sets the hardware semaphores so EEPROM access can occur for bit-bang method + **/ +static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_EEPROM; + u32 timeout; + u32 i; + u32 swsm; + + /* Set timeout value based on size of EEPROM */ + timeout = hw->eeprom.word_size + 1; + + /* Get SMBI software semaphore between device drivers first */ + for (i = 0; i < timeout; i++) { + /* + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + if (!(swsm & IXGBE_SWSM_SMBI)) { + status = 0; + break; + } + msleep(1); + } + + /* Now get the semaphore between SW/FW through the SWESMBI bit */ + if (status == 0) { + for (i = 0; i < timeout; i++) { + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + + /* Set the SW EEPROM semaphore bit to request access */ + swsm |= IXGBE_SWSM_SWESMBI; + IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); + + /* + * If we set the bit successfully then we got the + * semaphore. + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + if (swsm & IXGBE_SWSM_SWESMBI) + break; + + udelay(50); + } + + /* + * Release semaphores and return error if SW EEPROM semaphore + * was not granted because we don't have access to the EEPROM + */ + if (i >= timeout) { + hw_dbg(hw, "Driver can't access the Eeprom - Semaphore " + "not granted.\n"); + ixgbe_release_eeprom_semaphore(hw); + status = IXGBE_ERR_EEPROM; + } + } + + return status; +} + +/** + * ixgbe_release_eeprom_semaphore - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function clears hardware semaphore bits. + **/ +static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) +{ + u32 swsm; + + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + + /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ + swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); + IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); +} + +/** + * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum + * @hw: pointer to hardware structure + **/ +static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw) +{ + u16 i; + u16 j; + u16 checksum = 0; + u16 length = 0; + u16 pointer = 0; + u16 word = 0; + + /* Include 0x0-0x3F in the checksum */ + for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { + if (ixgbe_read_eeprom(hw, i, &word) != 0) { + hw_dbg(hw, "EEPROM read failed\n"); + break; + } + checksum += word; + } + + /* Include all data from pointers except for the fw pointer */ + for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { + ixgbe_read_eeprom(hw, i, &pointer); + + /* Make sure the pointer seems valid */ + if (pointer != 0xFFFF && pointer != 0) { + ixgbe_read_eeprom(hw, pointer, &length); + + if (length != 0xFFFF && length != 0) { + for (j = pointer+1; j <= pointer+length; j++) { + ixgbe_read_eeprom(hw, j, &word); + checksum += word; + } + } + } + } + + checksum = (u16)IXGBE_EEPROM_SUM - checksum; + + return checksum; +} + +/** + * ixgbe_validate_eeprom_checksum - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + /* + * Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = ixgbe_read_eeprom(hw, 0, &checksum); + + if (status == 0) { + checksum = ixgbe_calc_eeprom_checksum(hw); + + ixgbe_read_eeprom(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); + + /* + * Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) + status = IXGBE_ERR_EEPROM_CHECKSUM; + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + } else { + hw_dbg(hw, "EEPROM read failed\n"); + } + + return status; +} + +/** + * ixgbe_validate_mac_addr - Validate MAC address + * @mac_addr: pointer to MAC address. + * + * Tests a MAC address to ensure it is a valid Individual Address + **/ +s32 ixgbe_validate_mac_addr(u8 *mac_addr) +{ + s32 status = 0; + + /* Make sure it is not a multicast address */ + if (IXGBE_IS_MULTICAST(mac_addr)) + status = IXGBE_ERR_INVALID_MAC_ADDR; + /* Not a broadcast address */ + else if (IXGBE_IS_BROADCAST(mac_addr)) + status = IXGBE_ERR_INVALID_MAC_ADDR; + /* Reject the zero address */ + else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && + mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) + status = IXGBE_ERR_INVALID_MAC_ADDR; + + return status; +} + +/** + * ixgbe_set_rar - Set RX address register + * @hw: pointer to hardware structure + * @addr: Address to put into receive address register + * @index: Receive address register to write + * @vind: Vind to set RAR to + * @enable_addr: set flag that address is active + * + * Puts an ethernet address into a receive address register. + **/ +s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind, + u32 enable_addr) +{ + u32 rar_low, rar_high; + + /* + * HW expects these in little endian so we reverse the byte order from + * network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | + ((u32)addr[3] << 24)); + + rar_high = ((u32)addr[4] | + ((u32)addr[5] << 8) | + ((vind << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK)); + + if (enable_addr != 0) + rar_high |= IXGBE_RAH_AV; + + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); + + return 0; +} + +/** + * ixgbe_init_rx_addrs - Initializes receive address filters. + * @hw: pointer to hardware structure + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive addresss registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + **/ +static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) +{ + u32 i; + u32 rar_entries = hw->mac.num_rx_addrs; + + /* + * If the current mac address is valid, assume it is a software override + * to the permanent address. + * Otherwise, use the permanent address from the eeprom. + */ + if (ixgbe_validate_mac_addr(hw->mac.addr) == + IXGBE_ERR_INVALID_MAC_ADDR) { + /* Get the MAC address from the RAR0 for later reference */ + ixgbe_get_mac_addr(hw, hw->mac.addr); + + hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ", + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2]); + hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3], + hw->mac.addr[4], hw->mac.addr[5]); + } else { + /* Setup the receive address. */ + hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); + hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ", + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2]); + hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3], + hw->mac.addr[4], hw->mac.addr[5]); + + ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); + } + + hw->addr_ctrl.rar_used_count = 1; + + /* Zero out the other receive addresses. */ + hw_dbg(hw, "Clearing RAR[1-15]\n"); + for (i = 1; i < rar_entries; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); + } + + /* Clear the MTA */ + hw->addr_ctrl.mc_addr_in_rar_count = 0; + hw->addr_ctrl.mta_in_use = 0; + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); + + hw_dbg(hw, " Clearing MTA\n"); + for (i = 0; i < IXGBE_MC_TBL_SIZE; i++) + IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); + + return 0; +} + +/** + * ixgbe_mta_vector - Determines bit-vector in multicast table to set + * @hw: pointer to hardware structure + * @mc_addr: the multicast address + * + * Extracts the 12 bits, from a multicast address, to determine which + * bit-vector to set in the multicast table. The hardware uses 12 bits, from + * incoming rx multicast addresses, to determine the bit-vector to check in + * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set + * by the MO field of the MCSTCTRL. The MO field is set during initalization + * to mc_filter_type. + **/ +static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) +{ + u32 vector = 0; + + switch (hw->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + hw_dbg(hw, "MC filter type param set incorrectly\n"); + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +/** + * ixgbe_set_mta - Set bit-vector in multicast table + * @hw: pointer to hardware structure + * @hash_value: Multicast address hash value + * + * Sets the bit-vector in the multicast table. + **/ +static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) +{ + u32 vector; + u32 vector_bit; + u32 vector_reg; + u32 mta_reg; + + hw->addr_ctrl.mta_in_use++; + + vector = ixgbe_mta_vector(hw, mc_addr); + hw_dbg(hw, " bit-vector = 0x%03X\n", vector); + + /* + * The MTA is a register array of 128 32-bit registers. It is treated + * like an array of 4096 bits. We want to set bit + * BitArray[vector_value]. So we figure out what register the bit is + * in, read it, OR in the new bit, then write back the new value. The + * register is determined by the upper 7 bits of the vector value and + * the bit within that register are determined by the lower 5 bits of + * the value. + */ + vector_reg = (vector >> 5) & 0x7F; + vector_bit = vector & 0x1F; + mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); + mta_reg |= (1 << vector_bit); + IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); +} + +/** + * ixgbe_add_mc_addr - Adds a multicast address. + * @hw: pointer to hardware structure + * @mc_addr: new multicast address + * + * Adds it to unused receive address register or to the multicast table. + **/ +static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr) +{ + u32 rar_entries = hw->mac.num_rx_addrs; + + hw_dbg(hw, " MC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n", + mc_addr[0], mc_addr[1], mc_addr[2], + mc_addr[3], mc_addr[4], mc_addr[5]); + + /* + * Place this multicast address in the RAR if there is room, + * else put it in the MTA + */ + if (hw->addr_ctrl.rar_used_count < rar_entries) { + ixgbe_set_rar(hw, hw->addr_ctrl.rar_used_count, + mc_addr, 0, IXGBE_RAH_AV); + hw_dbg(hw, "Added a multicast address to RAR[%d]\n", + hw->addr_ctrl.rar_used_count); + hw->addr_ctrl.rar_used_count++; + hw->addr_ctrl.mc_addr_in_rar_count++; + } else { + ixgbe_set_mta(hw, mc_addr); + } + + hw_dbg(hw, "ixgbe_add_mc_addr Complete\n"); +} + +/** + * ixgbe_update_mc_addr_list - Updates MAC list of multicast addresses + * @hw: pointer to hardware structure + * @mc_addr_list: the list of new multicast addresses + * @mc_addr_count: number of addresses + * @pad: number of bytes between addresses in the list + * + * The given list replaces any existing list. Clears the MC addrs from receive + * address registers and the multicast table. Uses unsed receive address + * registers for the first multicast addresses, and hashes the rest into the + * multicast table. + **/ +s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, u32 pad) +{ + u32 i; + u32 rar_entries = hw->mac.num_rx_addrs; + + /* + * Set the new number of MC addresses that we are being requested to + * use. + */ + hw->addr_ctrl.num_mc_addrs = mc_addr_count; + hw->addr_ctrl.rar_used_count -= hw->addr_ctrl.mc_addr_in_rar_count; + hw->addr_ctrl.mc_addr_in_rar_count = 0; + hw->addr_ctrl.mta_in_use = 0; + + /* Zero out the other receive addresses. */ + hw_dbg(hw, "Clearing RAR[1-15]\n"); + for (i = hw->addr_ctrl.rar_used_count; i < rar_entries; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); + } + + /* Clear the MTA */ + hw_dbg(hw, " Clearing MTA\n"); + for (i = 0; i < IXGBE_MC_TBL_SIZE; i++) + IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); + + /* Add the new addresses */ + for (i = 0; i < mc_addr_count; i++) { + hw_dbg(hw, " Adding the multicast addresses:\n"); + ixgbe_add_mc_addr(hw, mc_addr_list + + (i * (IXGBE_ETH_LENGTH_OF_ADDRESS + pad))); + } + + /* Enable mta */ + if (hw->addr_ctrl.mta_in_use > 0) + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, + IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); + + hw_dbg(hw, "ixgbe_update_mc_addr_list Complete\n"); + return 0; +} + +/** + * ixgbe_clear_vfta - Clear VLAN filter table + * @hw: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +static s32 ixgbe_clear_vfta(struct ixgbe_hw *hw) +{ + u32 offset; + u32 vlanbyte; + + for (offset = 0; offset < IXGBE_VLAN_FILTER_TBL_SIZE; offset++) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); + + for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) + for (offset = 0; offset < IXGBE_VLAN_FILTER_TBL_SIZE; offset++) + IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), + 0); + + return 0; +} + +/** + * ixgbe_set_vfta - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFTA + * @vlan_on: boolean flag to turn on/off VLAN in VFTA + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on) +{ + u32 VftaIndex; + u32 BitOffset; + u32 VftaReg; + u32 VftaByte; + + /* Determine 32-bit word position in array */ + VftaIndex = (vlan >> 5) & 0x7F; /* upper seven bits */ + + /* Determine the location of the (VMD) queue index */ + VftaByte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ + BitOffset = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ + + /* Set the nibble for VMD queue index */ + VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex)); + VftaReg &= (~(0x0F << BitOffset)); + VftaReg |= (vind << BitOffset); + IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex), VftaReg); + + /* Determine the location of the bit for this VLAN id */ + BitOffset = vlan & 0x1F; /* lower five bits */ + + VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTA(VftaIndex)); + if (vlan_on) + /* Turn on this VLAN id */ + VftaReg |= (1 << BitOffset); + else + /* Turn off this VLAN id */ + VftaReg &= ~(1 << BitOffset); + IXGBE_WRITE_REG(hw, IXGBE_VFTA(VftaIndex), VftaReg); + + return 0; +} + +/** + * ixgbe_setup_fc - Configure flow control settings + * @hw: pointer to hardware structure + * @packetbuf_num: packet buffer number (0-7) + * + * Configures the flow control settings based on SW configuration. + * This function is used for 802.3x flow control configuration only. + **/ +s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) +{ + u32 frctl_reg; + u32 rmcs_reg; + + if (packetbuf_num < 0 || packetbuf_num > 7) + hw_dbg(hw, "Invalid packet buffer number [%d], expected range" + "is 0-7\n", packetbuf_num); + + frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); + frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); + + rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); + rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); + + /* + * We want to save off the original Flow Control configuration just in + * case we get disconnected and then reconnected into a different hub + * or switch with different Flow Control capabilities. + */ + hw->fc.type = hw->fc.original_type; + + /* + * The possible values of the "flow_control" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames but not + * send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we do not + * support receiving pause frames) + * 3: Both Rx and TX flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.type) { + case ixgbe_fc_none: + break; + case ixgbe_fc_rx_pause: + /* + * RX Flow control is enabled, + * and TX Flow control is disabled. + */ + frctl_reg |= IXGBE_FCTRL_RFCE; + break; + case ixgbe_fc_tx_pause: + /* + * TX Flow control is enabled, and RX Flow control is disabled, + * by a software over-ride. + */ + rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; + break; + case ixgbe_fc_full: + /* + * Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + frctl_reg |= IXGBE_FCTRL_RFCE; + rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; + break; + default: + /* We should never get here. The value should be 0-3. */ + hw_dbg(hw, "Flow control param set incorrectly\n"); + break; + } + + /* Enable 802.3x based flow control settings. */ + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg); + IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); + + /* + * We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + if (hw->fc.type & ixgbe_fc_tx_pause) { + if (hw->fc.send_xon) { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), + (hw->fc.low_water | IXGBE_FCRTL_XONE)); + } else { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), + hw->fc.low_water); + } + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), + (hw->fc.high_water)|IXGBE_FCRTH_FCEN); + } + + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time); + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); + + return 0; +} + +/** + * ixgbe_disable_pcie_master - Disable PCI-express master access + * @hw: pointer to hardware structure + * + * Disables PCI-Express master access and verifies there are no pending + * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable + * bit hasn't caused the master requests to be disabled, else 0 + * is returned signifying master requests disabled. + **/ +s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) +{ + u32 ctrl; + s32 i; + s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; + + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + ctrl |= IXGBE_CTRL_GIO_DIS; + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + + for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { + if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) { + status = 0; + break; + } + udelay(100); + } + + return status; +} + + +/** + * ixgbe_acquire_swfw_sync - Aquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify wich semaphore to acquire + * + * Aquires the SWFW semaphore throught the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) +{ + u32 gssr; + u32 swmask = mask; + u32 fwmask = mask << 5; + s32 timeout = 200; + + while (timeout) { + if (ixgbe_get_eeprom_semaphore(hw)) + return -IXGBE_ERR_SWFW_SYNC; + + gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); + if (!(gssr & (fwmask | swmask))) + break; + + /* + * Firmware currently using resource (fwmask) or other software + * thread currently using resource (swmask) + */ + ixgbe_release_eeprom_semaphore(hw); + msleep(5); + timeout--; + } + + if (!timeout) { + hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n"); + return -IXGBE_ERR_SWFW_SYNC; + } + + gssr |= swmask; + IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); + + ixgbe_release_eeprom_semaphore(hw); + return 0; +} + +/** + * ixgbe_release_swfw_sync - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify wich semaphore to release + * + * Releases the SWFW semaphore throught the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) +{ + u32 gssr; + u32 swmask = mask; + + ixgbe_get_eeprom_semaphore(hw); + + gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); + gssr &= ~swmask; + IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); + + ixgbe_release_eeprom_semaphore(hw); +} + +/** + * ixgbe_read_analog_reg8- Reads 8 bit 82598 Atlas analog register + * @hw: pointer to hardware structure + * @reg: analog register to read + * @val: read value + * + * Performs write operation to analog register specified. + **/ +s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val) +{ + u32 atlas_ctl; + + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, + IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); + IXGBE_WRITE_FLUSH(hw); + udelay(10); + atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); + *val = (u8)atlas_ctl; + + return 0; +} + +/** + * ixgbe_write_analog_reg8- Writes 8 bit Atlas analog register + * @hw: pointer to hardware structure + * @reg: atlas register to write + * @val: value to write + * + * Performs write operation to Atlas analog register specified. + **/ +s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val) +{ + u32 atlas_ctl; + + atlas_ctl = (reg << 8) | val; + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); + IXGBE_WRITE_FLUSH(hw); + udelay(10); + + return 0; +} + diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h new file mode 100644 index 000000000000..de6ddd5d04ad --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_common.h @@ -0,0 +1,86 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_COMMON_H_ +#define _IXGBE_COMMON_H_ + +#include "ixgbe_type.h" + +s32 ixgbe_init_hw(struct ixgbe_hw *hw); +s32 ixgbe_start_hw(struct ixgbe_hw *hw); +s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr); +s32 ixgbe_stop_adapter(struct ixgbe_hw *hw); +s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num); + +s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index); + +s32 ixgbe_init_eeprom(struct ixgbe_hw *hw); +s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data); +s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val); + +s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind, + u32 enable_addr); +s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, u32 pad); +s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on); +s32 ixgbe_validate_mac_addr(u8 *mac_addr); + +s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packtetbuf_num); + +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); +s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); + +s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val); +s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val); + +#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) + +#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg)) + +#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) (\ + writel((value), ((a)->hw_addr + (reg) + ((offset) << 2)))) + +#define IXGBE_READ_REG_ARRAY(a, reg, offset) (\ + readl((a)->hw_addr + (reg) + ((offset) << 2))) + +#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) + +#ifdef DEBUG +#define hw_dbg(hw, format, arg...) \ +printk(KERN_DEBUG, "%s: " format, ixgbe_get_hw_dev_name(hw), ##arg); +#else +static inline int __attribute__ ((format (printf, 2, 3))) +hw_dbg(struct ixgbe_hw *hw, const char *format, ...) +{ + return 0; +} +#endif + +#endif /* IXGBE_COMMON */ diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c new file mode 100644 index 000000000000..a4e576a0c543 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -0,0 +1,948 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* ethtool support for ixgbe */ + +#include <linux/types.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/ethtool.h> +#include <linux/vmalloc.h> +#include <linux/uaccess.h> + +#include "ixgbe.h" + + +#define IXGBE_ALL_RAR_ENTRIES 16 + +struct ixgbe_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define IXGBE_STAT(m) sizeof(((struct ixgbe_adapter *)0)->m), \ + offsetof(struct ixgbe_adapter, m) +static struct ixgbe_stats ixgbe_gstrings_stats[] = { + {"rx_packets", IXGBE_STAT(net_stats.rx_packets)}, + {"tx_packets", IXGBE_STAT(net_stats.tx_packets)}, + {"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)}, + {"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)}, + {"lsc_int", IXGBE_STAT(lsc_int)}, + {"tx_busy", IXGBE_STAT(tx_busy)}, + {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, + {"rx_errors", IXGBE_STAT(net_stats.rx_errors)}, + {"tx_errors", IXGBE_STAT(net_stats.tx_errors)}, + {"rx_dropped", IXGBE_STAT(net_stats.rx_dropped)}, + {"tx_dropped", IXGBE_STAT(net_stats.tx_dropped)}, + {"multicast", IXGBE_STAT(net_stats.multicast)}, + {"broadcast", IXGBE_STAT(stats.bprc)}, + {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) }, + {"collisions", IXGBE_STAT(net_stats.collisions)}, + {"rx_over_errors", IXGBE_STAT(net_stats.rx_over_errors)}, + {"rx_crc_errors", IXGBE_STAT(net_stats.rx_crc_errors)}, + {"rx_frame_errors", IXGBE_STAT(net_stats.rx_frame_errors)}, + {"rx_fifo_errors", IXGBE_STAT(net_stats.rx_fifo_errors)}, + {"rx_missed_errors", IXGBE_STAT(net_stats.rx_missed_errors)}, + {"tx_aborted_errors", IXGBE_STAT(net_stats.tx_aborted_errors)}, + {"tx_carrier_errors", IXGBE_STAT(net_stats.tx_carrier_errors)}, + {"tx_fifo_errors", IXGBE_STAT(net_stats.tx_fifo_errors)}, + {"tx_heartbeat_errors", IXGBE_STAT(net_stats.tx_heartbeat_errors)}, + {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)}, + {"tx_restart_queue", IXGBE_STAT(restart_queue)}, + {"rx_long_length_errors", IXGBE_STAT(stats.roc)}, + {"rx_short_length_errors", IXGBE_STAT(stats.ruc)}, + {"tx_tcp4_seg_ctxt", IXGBE_STAT(hw_tso_ctxt)}, + {"tx_tcp6_seg_ctxt", IXGBE_STAT(hw_tso6_ctxt)}, + {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)}, + {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)}, + {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)}, + {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)}, + {"rx_csum_offload_good", IXGBE_STAT(hw_csum_rx_good)}, + {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)}, + {"tx_csum_offload_ctxt", IXGBE_STAT(hw_csum_tx_good)}, + {"rx_header_split", IXGBE_STAT(rx_hdr_split)}, + {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, + {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, +}; + +#define IXGBE_QUEUE_STATS_LEN \ + ((((struct ixgbe_adapter *)netdev->priv)->num_tx_queues + \ + ((struct ixgbe_adapter *)netdev->priv)->num_rx_queues) * \ + (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) +#define IXGBE_GLOBAL_STATS_LEN \ + sizeof(ixgbe_gstrings_stats) / sizeof(struct ixgbe_stats) +#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN) + +static int ixgbe_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); + ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; + + if (netif_carrier_ok(adapter->netdev)) { + ecmd->speed = SPEED_10000; + ecmd->duplex = DUPLEX_FULL; + } else { + ecmd->speed = -1; + ecmd->duplex = -1; + } + + ecmd->autoneg = AUTONEG_DISABLE; + return 0; +} + +static int ixgbe_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (ecmd->autoneg == AUTONEG_ENABLE || + ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL) + return -EINVAL; + + if (netif_running(adapter->netdev)) { + ixgbe_down(adapter); + ixgbe_reset(adapter); + ixgbe_up(adapter); + } else { + ixgbe_reset(adapter); + } + + return 0; +} + +static void ixgbe_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + pause->autoneg = AUTONEG_DISABLE; + + if (hw->fc.type == ixgbe_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.type == ixgbe_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.type == ixgbe_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int ixgbe_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + if (pause->autoneg == AUTONEG_ENABLE) + return -EINVAL; + + if (pause->rx_pause && pause->tx_pause) + hw->fc.type = ixgbe_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.type = ixgbe_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.type = ixgbe_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.type = ixgbe_fc_none; + + hw->fc.original_type = hw->fc.type; + + if (netif_running(adapter->netdev)) { + ixgbe_down(adapter); + ixgbe_up(adapter); + } else { + ixgbe_reset(adapter); + } + + return 0; +} + +static u32 ixgbe_get_rx_csum(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + return (adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED); +} + +static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + if (data) + adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; + else + adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED; + + if (netif_running(netdev)) { + ixgbe_down(adapter); + ixgbe_up(adapter); + } else { + ixgbe_reset(adapter); + } + + return 0; +} + +static u32 ixgbe_get_tx_csum(struct net_device *netdev) +{ + return (netdev->features & NETIF_F_HW_CSUM) != 0; +} + +static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data) +{ + if (data) + netdev->features |= NETIF_F_HW_CSUM; + else + netdev->features &= ~NETIF_F_HW_CSUM; + + return 0; +} + +static int ixgbe_set_tso(struct net_device *netdev, u32 data) +{ + + if (data) { + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + } else { + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + } + return 0; +} + +static u32 ixgbe_get_msglevel(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int ixgbe_get_regs_len(struct net_device *netdev) +{ +#define IXGBE_REGS_LEN 1128 + return IXGBE_REGS_LEN * sizeof(u32); +} + +#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ + +static void ixgbe_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u8 i; + + memset(p, 0, IXGBE_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id; + + /* General Registers */ + regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL); + regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS); + regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP); + regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP); + regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER); + regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER); + + /* NVM Register */ + regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC); + regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD); + regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA); + regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL); + regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA); + regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL); + regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA); + regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT); + regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP); + regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC); + + /* Interrupt */ + regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICR); + regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS); + regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS); + regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC); + regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC); + regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM); + regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0)); + regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0)); + regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT); + regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA); + regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL); + regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE); + + /* Flow Control */ + regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP); + regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0)); + regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1)); + regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2)); + regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3)); + for (i = 0; i < 8; i++) + regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); + for (i = 0; i < 8; i++) + regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); + regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); + regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); + + /* Receive DMA */ + for (i = 0; i < 64; i++) + regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); + for (i = 0; i < 64; i++) + regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); + for (i = 0; i < 64; i++) + regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); + for (i = 0; i < 64; i++) + regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); + for (i = 0; i < 64; i++) + regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); + for (i = 0; i < 64; i++) + regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + for (i = 0; i < 16; i++) + regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); + for (i = 0; i < 16; i++) + regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + for (i = 0; i < 8; i++) + regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); + regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN); + + /* Receive */ + regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM); + regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL); + for (i = 0; i < 16; i++) + regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i)); + regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE); + regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL); + regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL); + regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC); + regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); + for (i = 0; i < 8; i++) + regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i)); + for (i = 0; i < 8; i++) + regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i)); + regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP); + + /* Transmit */ + for (i = 0; i < 32; i++) + regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); + for (i = 0; i < 32; i++) + regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); + for (i = 0; i < 32; i++) + regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); + for (i = 0; i < 32; i++) + regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); + for (i = 0; i < 32; i++) + regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); + for (i = 0; i < 32; i++) + regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); + for (i = 0; i < 32; i++) + regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i)); + for (i = 0; i < 32; i++) + regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i)); + regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL); + for (i = 0; i < 16; i++) + regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); + regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG); + for (i = 0; i < 8; i++) + regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i)); + regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP); + + /* Wake Up */ + regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC); + regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC); + regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS); + regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV); + regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT); + regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT); + regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL); + regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); + regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT); + + /* DCE */ + regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); + regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); + regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); + regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR); + for (i = 0; i < 8; i++) + regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i)); + for (i = 0; i < 8; i++) + regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i)); + for (i = 0; i < 8; i++) + regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i)); + for (i = 0; i < 8; i++) + regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i)); + for (i = 0; i < 8; i++) + regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); + for (i = 0; i < 8; i++) + regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); + + /* Statistics */ + regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs); + regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc); + regs_buff[883] = IXGBE_GET_STAT(adapter, errbc); + regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc); + for (i = 0; i < 8; i++) + regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]); + regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc); + regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc); + regs_buff[895] = IXGBE_GET_STAT(adapter, rlec); + regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc); + regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc); + regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc); + regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc); + for (i = 0; i < 8; i++) + regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]); + for (i = 0; i < 8; i++) + regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]); + for (i = 0; i < 8; i++) + regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]); + for (i = 0; i < 8; i++) + regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]); + regs_buff[932] = IXGBE_GET_STAT(adapter, prc64); + regs_buff[933] = IXGBE_GET_STAT(adapter, prc127); + regs_buff[934] = IXGBE_GET_STAT(adapter, prc255); + regs_buff[935] = IXGBE_GET_STAT(adapter, prc511); + regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023); + regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522); + regs_buff[938] = IXGBE_GET_STAT(adapter, gprc); + regs_buff[939] = IXGBE_GET_STAT(adapter, bprc); + regs_buff[940] = IXGBE_GET_STAT(adapter, mprc); + regs_buff[941] = IXGBE_GET_STAT(adapter, gptc); + regs_buff[942] = IXGBE_GET_STAT(adapter, gorc); + regs_buff[944] = IXGBE_GET_STAT(adapter, gotc); + for (i = 0; i < 8; i++) + regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]); + regs_buff[954] = IXGBE_GET_STAT(adapter, ruc); + regs_buff[955] = IXGBE_GET_STAT(adapter, rfc); + regs_buff[956] = IXGBE_GET_STAT(adapter, roc); + regs_buff[957] = IXGBE_GET_STAT(adapter, rjc); + regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc); + regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc); + regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc); + regs_buff[961] = IXGBE_GET_STAT(adapter, tor); + regs_buff[963] = IXGBE_GET_STAT(adapter, tpr); + regs_buff[964] = IXGBE_GET_STAT(adapter, tpt); + regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64); + regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127); + regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255); + regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511); + regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023); + regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522); + regs_buff[971] = IXGBE_GET_STAT(adapter, mptc); + regs_buff[972] = IXGBE_GET_STAT(adapter, bptc); + regs_buff[973] = IXGBE_GET_STAT(adapter, xec); + for (i = 0; i < 16; i++) + regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]); + for (i = 0; i < 16; i++) + regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]); + for (i = 0; i < 16; i++) + regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]); + for (i = 0; i < 16; i++) + regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]); + + /* MAC */ + regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG); + regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); + regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); + regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0); + regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1); + regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); + regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); + regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP); + regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP); + regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0); + regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1); + regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP); + regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA); + regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE); + regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD); + regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS); + regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA); + regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD); + regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD); + regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD); + regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG); + regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1); + regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2); + regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS); + regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC); + regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS); + regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC); + regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS); + regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3); + regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1); + regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2); + regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); + + /* Diagnostic */ + regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL); + for (i = 0; i < 8; i++) + regs_buff[1072] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i)); + regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN); + regs_buff[1081] = IXGBE_READ_REG(hw, IXGBE_RIC_DW0); + regs_buff[1082] = IXGBE_READ_REG(hw, IXGBE_RIC_DW1); + regs_buff[1083] = IXGBE_READ_REG(hw, IXGBE_RIC_DW2); + regs_buff[1084] = IXGBE_READ_REG(hw, IXGBE_RIC_DW3); + regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE); + regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL); + for (i = 0; i < 8; i++) + regs_buff[1087] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i)); + regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN); + regs_buff[1096] = IXGBE_READ_REG(hw, IXGBE_TIC_DW0); + regs_buff[1097] = IXGBE_READ_REG(hw, IXGBE_TIC_DW1); + regs_buff[1098] = IXGBE_READ_REG(hw, IXGBE_TIC_DW2); + regs_buff[1099] = IXGBE_READ_REG(hw, IXGBE_TIC_DW3); + regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); + regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); + regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0); + regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1); + regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2); + regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3); + regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL); + regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0); + regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1); + regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2); + regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3); + for (i = 0; i < 8; i++) + regs_buff[1111] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); + regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); + regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1); + regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2); + regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1); + regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2); + regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS); + regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL); + regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC); + regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC); +} + +static int ixgbe_get_eeprom_len(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + return adapter->hw.eeprom.word_size * 2; +} + +static int ixgbe_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word, eeprom_len; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_len = last_word - first_word + 1; + + eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + for (i = 0; i < eeprom_len; i++) { + if ((ret_val = ixgbe_read_eeprom(hw, first_word + i, + &eeprom_buff[i]))) + break; + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < eeprom_len; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static void ixgbe_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + strncpy(drvinfo->driver, ixgbe_driver_name, 32); + strncpy(drvinfo->version, ixgbe_driver_version, 32); + strncpy(drvinfo->fw_version, "N/A", 32); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + drvinfo->n_stats = IXGBE_STATS_LEN; + drvinfo->regdump_len = ixgbe_get_regs_len(netdev); +} + +static void ixgbe_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_ring *tx_ring = adapter->tx_ring; + struct ixgbe_ring *rx_ring = adapter->rx_ring; + + ring->rx_max_pending = IXGBE_MAX_RXD; + ring->tx_max_pending = IXGBE_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = rx_ring->count; + ring->tx_pending = tx_ring->count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int ixgbe_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_tx_buffer *old_buf; + struct ixgbe_rx_buffer *old_rx_buf; + void *old_desc; + int i, err; + u32 new_rx_count, new_tx_count, old_size; + dma_addr_t old_dma; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_rx_count = max(ring->rx_pending, (u32)IXGBE_MIN_RXD); + new_rx_count = min(new_rx_count, (u32)IXGBE_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = max(ring->tx_pending, (u32)IXGBE_MIN_TXD); + new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring->count) && + (new_rx_count == adapter->rx_ring->count)) { + /* nothing to do */ + return 0; + } + + if (netif_running(adapter->netdev)) + ixgbe_down(adapter); + + /* + * We can't just free everything and then setup again, + * because the ISRs in MSI-X mode get passed pointers + * to the tx and rx ring structs. + */ + if (new_tx_count != adapter->tx_ring->count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + /* Save existing descriptor ring */ + old_buf = adapter->tx_ring[i].tx_buffer_info; + old_desc = adapter->tx_ring[i].desc; + old_size = adapter->tx_ring[i].size; + old_dma = adapter->tx_ring[i].dma; + /* Try to allocate a new one */ + adapter->tx_ring[i].tx_buffer_info = NULL; + adapter->tx_ring[i].desc = NULL; + adapter->tx_ring[i].count = new_tx_count; + err = ixgbe_setup_tx_resources(adapter, + &adapter->tx_ring[i]); + if (err) { + /* Restore the old one so at least + the adapter still works, even if + we failed the request */ + adapter->tx_ring[i].tx_buffer_info = old_buf; + adapter->tx_ring[i].desc = old_desc; + adapter->tx_ring[i].size = old_size; + adapter->tx_ring[i].dma = old_dma; + goto err_setup; + } + /* Free the old buffer manually */ + vfree(old_buf); + pci_free_consistent(adapter->pdev, old_size, + old_desc, old_dma); + } + } + + if (new_rx_count != adapter->rx_ring->count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + + old_rx_buf = adapter->rx_ring[i].rx_buffer_info; + old_desc = adapter->rx_ring[i].desc; + old_size = adapter->rx_ring[i].size; + old_dma = adapter->rx_ring[i].dma; + + adapter->rx_ring[i].rx_buffer_info = NULL; + adapter->rx_ring[i].desc = NULL; + adapter->rx_ring[i].dma = 0; + adapter->rx_ring[i].count = new_rx_count; + err = ixgbe_setup_rx_resources(adapter, + &adapter->rx_ring[i]); + if (err) { + adapter->rx_ring[i].rx_buffer_info = old_rx_buf; + adapter->rx_ring[i].desc = old_desc; + adapter->rx_ring[i].size = old_size; + adapter->rx_ring[i].dma = old_dma; + goto err_setup; + } + + vfree(old_rx_buf); + pci_free_consistent(adapter->pdev, old_size, old_desc, + old_dma); + } + } + + err = 0; +err_setup: + if (netif_running(adapter->netdev)) + ixgbe_up(adapter); + + return err; +} + +static int ixgbe_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return IXGBE_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void ixgbe_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + u64 *queue_stat; + int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64); + int j, k; + int i; + + ixgbe_update_stats(adapter); + for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { + char *p = (char *)adapter + ixgbe_gstrings_stats[i].stat_offset; + data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < adapter->num_tx_queues; j++) { + queue_stat = (u64 *)&adapter->tx_ring[j].stats; + for (k = 0; k < stat_count; k++) + data[i + k] = queue_stat[k]; + i += k; + } + for (j = 0; j < adapter->num_rx_queues; j++) { + queue_stat = (u64 *)&adapter->rx_ring[j].stats; + for (k = 0; k < stat_count; k++) + data[i + k] = queue_stat[k]; + i += k; + } +} + +static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { + memcpy(p, ixgbe_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_tx_queues; i++) { + sprintf(p, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_rx_queues; i++) { + sprintf(p, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + } +/* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ + break; + } +} + + +static void ixgbe_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + wol->supported = 0; + wol->wolopts = 0; + + return; +} + +static int ixgbe_nway_reset(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) { + ixgbe_down(adapter); + ixgbe_reset(adapter); + ixgbe_up(adapter); + } + + return 0; +} + +static int ixgbe_phys_id(struct net_device *netdev, u32 data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + u32 led_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_LEDCTL); + u32 i; + + if (!data || data > 300) + data = 300; + + for (i = 0; i < (data * 1000); i += 400) { + ixgbe_led_on(&adapter->hw, IXGBE_LED_ON); + msleep_interruptible(200); + ixgbe_led_off(&adapter->hw, IXGBE_LED_ON); + msleep_interruptible(200); + } + + /* Restore LED settings */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, led_reg); + + return 0; +} + +static int ixgbe_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (adapter->rx_eitr == 0) + ec->rx_coalesce_usecs = 0; + else + ec->rx_coalesce_usecs = 1000000 / adapter->rx_eitr; + + if (adapter->tx_eitr == 0) + ec->tx_coalesce_usecs = 0; + else + ec->tx_coalesce_usecs = 1000000 / adapter->tx_eitr; + + ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit; + return 0; +} + +static int ixgbe_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if ((ec->rx_coalesce_usecs > IXGBE_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 0) && + (ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS))) + return -EINVAL; + if ((ec->tx_coalesce_usecs > IXGBE_MAX_ITR_USECS) || + ((ec->tx_coalesce_usecs > 0) && + (ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS))) + return -EINVAL; + + /* convert to rate of irq's per second */ + if (ec->rx_coalesce_usecs == 0) + adapter->rx_eitr = 0; + else + adapter->rx_eitr = (1000000 / ec->rx_coalesce_usecs); + + if (ec->tx_coalesce_usecs == 0) + adapter->tx_eitr = 0; + else + adapter->tx_eitr = (1000000 / ec->tx_coalesce_usecs); + + if (ec->tx_max_coalesced_frames_irq) + adapter->tx_ring[0].work_limit = + ec->tx_max_coalesced_frames_irq; + + if (netif_running(netdev)) { + ixgbe_down(adapter); + ixgbe_up(adapter); + } + + return 0; +} + + +static struct ethtool_ops ixgbe_ethtool_ops = { + .get_settings = ixgbe_get_settings, + .set_settings = ixgbe_set_settings, + .get_drvinfo = ixgbe_get_drvinfo, + .get_regs_len = ixgbe_get_regs_len, + .get_regs = ixgbe_get_regs, + .get_wol = ixgbe_get_wol, + .nway_reset = ixgbe_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = ixgbe_get_eeprom_len, + .get_eeprom = ixgbe_get_eeprom, + .get_ringparam = ixgbe_get_ringparam, + .set_ringparam = ixgbe_set_ringparam, + .get_pauseparam = ixgbe_get_pauseparam, + .set_pauseparam = ixgbe_set_pauseparam, + .get_rx_csum = ixgbe_get_rx_csum, + .set_rx_csum = ixgbe_set_rx_csum, + .get_tx_csum = ixgbe_get_tx_csum, + .set_tx_csum = ixgbe_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, + .get_msglevel = ixgbe_get_msglevel, + .set_msglevel = ixgbe_set_msglevel, + .get_tso = ethtool_op_get_tso, + .set_tso = ixgbe_set_tso, + .get_strings = ixgbe_get_strings, + .phys_id = ixgbe_phys_id, + .get_sset_count = ixgbe_get_sset_count, + .get_ethtool_stats = ixgbe_get_ethtool_stats, + .get_coalesce = ixgbe_get_coalesce, + .set_coalesce = ixgbe_set_coalesce, +}; + +void ixgbe_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops); +} diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c new file mode 100644 index 000000000000..b75f1c6efc42 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -0,0 +1,2872 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include <linux/types.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/vmalloc.h> +#include <linux/string.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/ipv6.h> +#include <net/checksum.h> +#include <net/ip6_checksum.h> +#include <linux/ethtool.h> +#include <linux/if_vlan.h> + +#include "ixgbe.h" +#include "ixgbe_common.h" + +char ixgbe_driver_name[] = "ixgbe"; +static char ixgbe_driver_string[] = + "Intel(R) 10 Gigabit PCI Express Network Driver"; + +#define DRV_VERSION "1.1.18" +char ixgbe_driver_version[] = DRV_VERSION; +static char ixgbe_copyright[] = "Copyright (c) 1999-2007 Intel Corporation."; + +static const struct ixgbe_info *ixgbe_info_tbl[] = { + [board_82598AF] = &ixgbe_82598AF_info, + [board_82598EB] = &ixgbe_82598EB_info, + [board_82598AT] = &ixgbe_82598AT_info, +}; + +/* ixgbe_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static struct pci_device_id ixgbe_pci_tbl[] = { + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), + board_82598AF }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), + board_82598AF }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT_DUAL_PORT), + board_82598AT }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), + board_82598EB }, + + /* required last entry */ + {0, } +}; +MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); + +MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); +MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +#define DEFAULT_DEBUG_LEVEL_SHIFT 3 + + +#ifdef DEBUG +/** + * ixgbe_get_hw_dev_name - return device name string + * used by hardware layer to print debugging information + **/ +char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw) +{ + struct ixgbe_adapter *adapter = hw->back; + struct net_device *netdev = adapter->netdev; + return netdev->name; +} +#endif + +static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry, + u8 msix_vector) +{ + u32 ivar, index; + + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + index = (int_alloc_entry >> 2) & 0x1F; + ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR(index)); + ivar &= ~(0xFF << (8 * (int_alloc_entry & 0x3))); + ivar |= (msix_vector << (8 * (int_alloc_entry & 0x3))); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar); +} + +static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, + struct ixgbe_tx_buffer + *tx_buffer_info) +{ + if (tx_buffer_info->dma) { + pci_unmap_page(adapter->pdev, + tx_buffer_info->dma, + tx_buffer_info->length, PCI_DMA_TODEVICE); + tx_buffer_info->dma = 0; + } + if (tx_buffer_info->skb) { + dev_kfree_skb_any(tx_buffer_info->skb); + tx_buffer_info->skb = NULL; + } + /* tx_buffer_info must be completely set up in the transmit path */ +} + +static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring, + unsigned int eop, + union ixgbe_adv_tx_desc *eop_desc) +{ + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i */ + adapter->detect_tx_hung = false; + if (tx_ring->tx_buffer_info[eop].dma && + time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && + !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) { + /* detected Tx unit hang */ + DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "tx_buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n", + readl(adapter->hw.hw_addr + tx_ring->head), + readl(adapter->hw.hw_addr + tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->tx_buffer_info[eop].time_stamp, + eop, jiffies, eop_desc->wb.status); + return true; + } + + return false; +} + +/** + * ixgbe_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + **/ +static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring) +{ + struct net_device *netdev = adapter->netdev; + union ixgbe_adv_tx_desc *tx_desc, *eop_desc; + struct ixgbe_tx_buffer *tx_buffer_info; + unsigned int i, eop; + bool cleaned = false; + int count = 0; + + i = tx_ring->next_to_clean; + eop = tx_ring->tx_buffer_info[i].next_to_watch; + eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); + while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) { + for (cleaned = false; !cleaned;) { + tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + cleaned = (i == eop); + + tx_ring->stats.bytes += tx_buffer_info->length; + ixgbe_unmap_and_free_tx_resource(adapter, + tx_buffer_info); + tx_desc->wb.status = 0; + + i++; + if (i == tx_ring->count) + i = 0; + } + + tx_ring->stats.packets++; + + eop = tx_ring->tx_buffer_info[i].next_to_watch; + eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); + + /* weight of a sort for tx, avoid endless transmit cleanup */ + if (count++ >= tx_ring->work_limit) + break; + } + + tx_ring->next_to_clean = i; + +#define TX_WAKE_THRESHOLD 32 + spin_lock(&tx_ring->tx_lock); + + if (cleaned && netif_carrier_ok(netdev) && + (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD) && + !test_bit(__IXGBE_DOWN, &adapter->state)) + netif_wake_queue(netdev); + + spin_unlock(&tx_ring->tx_lock); + + if (adapter->detect_tx_hung) + if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc)) + netif_stop_queue(netdev); + + if (count >= tx_ring->work_limit) + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value); + + return cleaned; +} + +/** + * ixgbe_receive_skb - Send a completed packet up the stack + * @adapter: board private structure + * @skb: packet to send up + * @is_vlan: packet has a VLAN tag + * @tag: VLAN tag from descriptor + **/ +static void ixgbe_receive_skb(struct ixgbe_adapter *adapter, + struct sk_buff *skb, bool is_vlan, + u16 tag) +{ + if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { + if (adapter->vlgrp && is_vlan) + vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag); + else + netif_receive_skb(skb); + } else { + + if (adapter->vlgrp && is_vlan) + vlan_hwaccel_rx(skb, adapter->vlgrp, tag); + else + netif_rx(skb); + } +} + +static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, + u32 status_err, + struct sk_buff *skb) +{ + skb->ip_summed = CHECKSUM_NONE; + + /* Ignore Checksum bit is set */ + if ((status_err & IXGBE_RXD_STAT_IXSM) || + !(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) + return; + /* TCP/UDP checksum error bit is set */ + if (status_err & (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE)) { + /* let the stack verify checksum errors */ + adapter->hw_csum_rx_error++; + return; + } + /* It must be a TCP or UDP packet with a valid checksum */ + if (status_err & (IXGBE_RXD_STAT_L4CS | IXGBE_RXD_STAT_UDPCS)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + adapter->hw_csum_rx_good++; +} + +/** + * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split + * @adapter: address of board private structure + **/ +static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring, + int cleaned_count) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union ixgbe_adv_rx_desc *rx_desc; + struct ixgbe_rx_buffer *rx_buffer_info; + struct sk_buff *skb; + unsigned int i; + unsigned int bufsz = adapter->rx_buf_len + NET_IP_ALIGN; + + i = rx_ring->next_to_use; + rx_buffer_info = &rx_ring->rx_buffer_info[i]; + + while (cleaned_count--) { + rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); + + if (!rx_buffer_info->page && + (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) { + rx_buffer_info->page = alloc_page(GFP_ATOMIC); + if (!rx_buffer_info->page) { + adapter->alloc_rx_page_failed++; + goto no_buffers; + } + rx_buffer_info->page_dma = + pci_map_page(pdev, rx_buffer_info->page, + 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); + } + + if (!rx_buffer_info->skb) { + skb = netdev_alloc_skb(netdev, bufsz); + + if (!skb) { + adapter->alloc_rx_buff_failed++; + goto no_buffers; + } + + /* + * Make buffer alignment 2 beyond a 16 byte boundary + * this will result in a 16 byte aligned IP header after + * the 14 byte MAC header is removed + */ + skb_reserve(skb, NET_IP_ALIGN); + + rx_buffer_info->skb = skb; + rx_buffer_info->dma = pci_map_single(pdev, skb->data, + bufsz, + PCI_DMA_FROMDEVICE); + } + /* Refresh the desc even if buffer_addrs didn't change because + * each write-back erases this info. */ + if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { + rx_desc->read.pkt_addr = + cpu_to_le64(rx_buffer_info->page_dma); + rx_desc->read.hdr_addr = + cpu_to_le64(rx_buffer_info->dma); + } else { + rx_desc->read.pkt_addr = + cpu_to_le64(rx_buffer_info->dma); + } + + i++; + if (i == rx_ring->count) + i = 0; + rx_buffer_info = &rx_ring->rx_buffer_info[i]; + } +no_buffers: + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + if (i-- == 0) + i = (rx_ring->count - 1); + + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->tail); + } +} + +static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union ixgbe_adv_rx_desc *rx_desc, *next_rxd; + struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; + struct sk_buff *skb; + unsigned int i; + u32 upper_len, len, staterr; + u16 hdr_info, vlan_tag; + bool is_vlan, cleaned = false; + int cleaned_count = 0; + + i = rx_ring->next_to_clean; + upper_len = 0; + rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + rx_buffer_info = &rx_ring->rx_buffer_info[i]; + is_vlan = (staterr & IXGBE_RXD_STAT_VP); + vlan_tag = le16_to_cpu(rx_desc->wb.upper.vlan); + + while (staterr & IXGBE_RXD_STAT_DD) { + if (*work_done >= work_to_do) + break; + (*work_done)++; + + if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { + hdr_info = + le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info); + len = + ((hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> + IXGBE_RXDADV_HDRBUFLEN_SHIFT); + if (hdr_info & IXGBE_RXDADV_SPH) + adapter->rx_hdr_split++; + if (len > IXGBE_RX_HDR_SIZE) + len = IXGBE_RX_HDR_SIZE; + upper_len = le16_to_cpu(rx_desc->wb.upper.length); + } else + len = le16_to_cpu(rx_desc->wb.upper.length); + + cleaned = true; + skb = rx_buffer_info->skb; + prefetch(skb->data - NET_IP_ALIGN); + rx_buffer_info->skb = NULL; + + if (len && !skb_shinfo(skb)->nr_frags) { + pci_unmap_single(pdev, rx_buffer_info->dma, + adapter->rx_buf_len + NET_IP_ALIGN, + PCI_DMA_FROMDEVICE); + skb_put(skb, len); + } + + if (upper_len) { + pci_unmap_page(pdev, rx_buffer_info->page_dma, + PAGE_SIZE, PCI_DMA_FROMDEVICE); + rx_buffer_info->page_dma = 0; + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + rx_buffer_info->page, 0, upper_len); + rx_buffer_info->page = NULL; + + skb->len += upper_len; + skb->data_len += upper_len; + skb->truesize += upper_len; + } + + i++; + if (i == rx_ring->count) + i = 0; + next_buffer = &rx_ring->rx_buffer_info[i]; + + next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i); + prefetch(next_rxd); + + cleaned_count++; + if (staterr & IXGBE_RXD_STAT_EOP) { + rx_ring->stats.packets++; + rx_ring->stats.bytes += skb->len; + } else { + rx_buffer_info->skb = next_buffer->skb; + rx_buffer_info->dma = next_buffer->dma; + next_buffer->skb = skb; + adapter->non_eop_descs++; + goto next_desc; + } + + if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) { + dev_kfree_skb_irq(skb); + goto next_desc; + } + + ixgbe_rx_checksum(adapter, staterr, skb); + skb->protocol = eth_type_trans(skb, netdev); + ixgbe_receive_skb(adapter, skb, is_vlan, vlan_tag); + netdev->last_rx = jiffies; + +next_desc: + rx_desc->wb.upper.status_error = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { + ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + rx_buffer_info = next_buffer; + + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + is_vlan = (staterr & IXGBE_RXD_STAT_VP); + vlan_tag = le16_to_cpu(rx_desc->wb.upper.vlan); + } + + rx_ring->next_to_clean = i; + cleaned_count = IXGBE_DESC_UNUSED(rx_ring); + + if (cleaned_count) + ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); + + return cleaned; +} + +#define IXGBE_MAX_INTR 10 +/** + * ixgbe_configure_msix - Configure MSI-X hardware + * @adapter: board private structure + * + * ixgbe_configure_msix sets up the hardware to properly generate MSI-X + * interrupts. + **/ +static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) +{ + int i, vector = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i), + IXGBE_MSIX_VECTOR(vector)); + writel(EITR_INTS_PER_SEC_TO_REG(adapter->tx_eitr), + adapter->hw.hw_addr + adapter->tx_ring[i].itr_register); + vector++; + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(i), + IXGBE_MSIX_VECTOR(vector)); + writel(EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr), + adapter->hw.hw_addr + adapter->rx_ring[i].itr_register); + vector++; + } + + vector = adapter->num_tx_queues + adapter->num_rx_queues; + ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, + IXGBE_MSIX_VECTOR(vector)); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(vector), 1950); +} + +static irqreturn_t ixgbe_msix_lsc(int irq, void *data) +{ + struct net_device *netdev = data; + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); + + if (eicr & IXGBE_EICR_LSC) { + adapter->lsc_int++; + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies); + } + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); + + return IRQ_HANDLED; +} + +static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) +{ + struct ixgbe_ring *txr = data; + struct ixgbe_adapter *adapter = txr->adapter; + + ixgbe_clean_tx_irq(adapter, txr); + + return IRQ_HANDLED; +} + +static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) +{ + struct ixgbe_ring *rxr = data; + struct ixgbe_adapter *adapter = rxr->adapter; + + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->eims_value); + netif_rx_schedule(adapter->netdev, &adapter->napi); + return IRQ_HANDLED; +} + +static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) +{ + struct ixgbe_adapter *adapter = container_of(napi, + struct ixgbe_adapter, napi); + struct net_device *netdev = adapter->netdev; + int work_done = 0; + struct ixgbe_ring *rxr = adapter->rx_ring; + + /* Keep link state information with original netdev */ + if (!netif_carrier_ok(netdev)) + goto quit_polling; + + ixgbe_clean_rx_irq(adapter, rxr, &work_done, budget); + + /* If no Tx and not enough Rx work done, exit the polling mode */ + if ((work_done < budget) || !netif_running(netdev)) { +quit_polling: + netif_rx_complete(netdev, napi); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, + rxr->eims_value); + } + + return work_done; +} + +/** + * ixgbe_setup_msix - Initialize MSI-X interrupts + * + * ixgbe_setup_msix allocates MSI-X vectors and requests + * interrutps from the kernel. + **/ +static int ixgbe_setup_msix(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i, int_vector = 0, err = 0; + int max_msix_count; + + /* +1 for the LSC interrupt */ + max_msix_count = adapter->num_rx_queues + adapter->num_tx_queues + 1; + adapter->msix_entries = kcalloc(max_msix_count, + sizeof(struct msix_entry), GFP_KERNEL); + if (!adapter->msix_entries) + return -ENOMEM; + + for (i = 0; i < max_msix_count; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix(adapter->pdev, adapter->msix_entries, + max_msix_count); + if (err) + goto out; + + for (i = 0; i < adapter->num_tx_queues; i++) { + sprintf(adapter->tx_ring[i].name, "%s-tx%d", netdev->name, i); + err = request_irq(adapter->msix_entries[int_vector].vector, + &ixgbe_msix_clean_tx, + 0, + adapter->tx_ring[i].name, + &(adapter->tx_ring[i])); + if (err) { + DPRINTK(PROBE, ERR, + "request_irq failed for MSIX interrupt " + "Error: %d\n", err); + goto release_irqs; + } + adapter->tx_ring[i].eims_value = + (1 << IXGBE_MSIX_VECTOR(int_vector)); + adapter->tx_ring[i].itr_register = IXGBE_EITR(int_vector); + int_vector++; + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + if (strlen(netdev->name) < (IFNAMSIZ - 5)) + sprintf(adapter->rx_ring[i].name, + "%s-rx%d", netdev->name, i); + else + memcpy(adapter->rx_ring[i].name, + netdev->name, IFNAMSIZ); + err = request_irq(adapter->msix_entries[int_vector].vector, + &ixgbe_msix_clean_rx, 0, + adapter->rx_ring[i].name, + &(adapter->rx_ring[i])); + if (err) { + DPRINTK(PROBE, ERR, + "request_irq failed for MSIX interrupt " + "Error: %d\n", err); + goto release_irqs; + } + + adapter->rx_ring[i].eims_value = + (1 << IXGBE_MSIX_VECTOR(int_vector)); + adapter->rx_ring[i].itr_register = IXGBE_EITR(int_vector); + int_vector++; + } + + sprintf(adapter->lsc_name, "%s-lsc", netdev->name); + err = request_irq(adapter->msix_entries[int_vector].vector, + &ixgbe_msix_lsc, 0, adapter->lsc_name, netdev); + if (err) { + DPRINTK(PROBE, ERR, + "request_irq for msix_lsc failed: %d\n", err); + goto release_irqs; + } + + /* FIXME: implement netif_napi_remove() instead */ + adapter->napi.poll = ixgbe_clean_rxonly; + adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; + return 0; + +release_irqs: + int_vector--; + for (; int_vector >= adapter->num_tx_queues; int_vector--) + free_irq(adapter->msix_entries[int_vector].vector, + &(adapter->rx_ring[int_vector - + adapter->num_tx_queues])); + + for (; int_vector >= 0; int_vector--) + free_irq(adapter->msix_entries[int_vector].vector, + &(adapter->tx_ring[int_vector])); +out: + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; + return err; +} + +/** + * ixgbe_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + * @pt_regs: CPU registers structure + **/ +static irqreturn_t ixgbe_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 eicr; + + eicr = IXGBE_READ_REG(hw, IXGBE_EICR); + + if (!eicr) + return IRQ_NONE; /* Not our interrupt */ + + if (eicr & IXGBE_EICR_LSC) { + adapter->lsc_int++; + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies); + } + if (netif_rx_schedule_prep(netdev, &adapter->napi)) { + /* Disable interrupts and register for poll. The flush of the + * posted write is intentionally left out. */ + atomic_inc(&adapter->irq_sem); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); + __netif_rx_schedule(netdev, &adapter->napi); + } + + return IRQ_HANDLED; +} + +/** + * ixgbe_request_irq - initialize interrupts + * @adapter: board private structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int ixgbe_request_irq(struct ixgbe_adapter *adapter, u32 *num_rx_queues) +{ + struct net_device *netdev = adapter->netdev; + int flags, err; + irqreturn_t(*handler) (int, void *) = &ixgbe_intr; + + flags = IRQF_SHARED; + + err = ixgbe_setup_msix(adapter); + if (!err) + goto request_done; + + /* + * if we can't do MSI-X, fall through and try MSI + * No need to reallocate memory since we're decreasing the number of + * queues. We just won't use the other ones, also it is freed correctly + * on ixgbe_remove. + */ + *num_rx_queues = 1; + + /* do MSI */ + err = pci_enable_msi(adapter->pdev); + if (!err) { + adapter->flags |= IXGBE_FLAG_MSI_ENABLED; + flags &= ~IRQF_SHARED; + handler = &ixgbe_intr; + } + + err = request_irq(adapter->pdev->irq, handler, flags, + netdev->name, netdev); + if (err) + DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err); + +request_done: + return err; +} + +static void ixgbe_free_irq(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + free_irq(adapter->msix_entries[i].vector, + &(adapter->tx_ring[i])); + for (i = 0; i < adapter->num_rx_queues; i++) + free_irq(adapter->msix_entries[i + + adapter->num_tx_queues].vector, + &(adapter->rx_ring[i])); + i = adapter->num_rx_queues + adapter->num_tx_queues; + free_irq(adapter->msix_entries[i].vector, netdev); + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; + return; + } + + free_irq(adapter->pdev->irq, netdev); + if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { + pci_disable_msi(adapter->pdev); + adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; + } +} + +/** + * ixgbe_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) +{ + atomic_inc(&adapter->irq_sem); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); + IXGBE_WRITE_FLUSH(&adapter->hw); + synchronize_irq(adapter->pdev->irq); +} + +/** + * ixgbe_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) +{ + if (atomic_dec_and_test(&adapter->irq_sem)) { + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, + (IXGBE_EIMS_ENABLE_MASK & + ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC))); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, + IXGBE_EIMS_ENABLE_MASK); + IXGBE_WRITE_FLUSH(&adapter->hw); + } +} + +/** + * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts + * + **/ +static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) +{ + int i; + struct ixgbe_hw *hw = &adapter->hw; + + if (adapter->rx_eitr) + IXGBE_WRITE_REG(hw, IXGBE_EITR(0), + EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr)); + + /* for re-triggering the interrupt in non-NAPI mode */ + adapter->rx_ring[0].eims_value = (1 << IXGBE_MSIX_VECTOR(0)); + adapter->tx_ring[0].eims_value = (1 << IXGBE_MSIX_VECTOR(0)); + + ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0); + for (i = 0; i < adapter->num_tx_queues; i++) + ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i), i); +} + +/** + * ixgbe_configure_tx - Configure 8254x Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) +{ + u64 tdba; + struct ixgbe_hw *hw = &adapter->hw; + u32 i, tdlen; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < adapter->num_tx_queues; i++) { + tdba = adapter->tx_ring[i].dma; + tdlen = adapter->tx_ring[i].count * + sizeof(union ixgbe_adv_tx_desc); + IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i), (tdba & DMA_32BIT_MASK)); + IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i), tdlen); + IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0); + adapter->tx_ring[i].head = IXGBE_TDH(i); + adapter->tx_ring[i].tail = IXGBE_TDT(i); + } + + IXGBE_WRITE_REG(hw, IXGBE_TIPG, IXGBE_TIPG_FIBER_DEFAULT); +} + +#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ + (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) + +#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 +/** + * ixgbe_configure_rx - Configure 8254x Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) +{ + u64 rdba; + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + u32 rdlen, rxctrl, rxcsum; + u32 random[10]; + u32 reta, mrqc; + int i; + u32 fctrl, hlreg0; + u32 srrctl; + u32 pages; + + /* Decide whether to use packet split mode or not */ + if (netdev->mtu > ETH_DATA_LEN) + adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; + else + adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; + + /* Set the RX buffer length according to the mode */ + if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { + adapter->rx_buf_len = IXGBE_RX_HDR_SIZE; + } else { + if (netdev->mtu <= ETH_DATA_LEN) + adapter->rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; + else + adapter->rx_buf_len = ALIGN(max_frame, 1024); + } + + fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); + fctrl |= IXGBE_FCTRL_BAM; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); + + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + if (adapter->netdev->mtu <= ETH_DATA_LEN) + hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; + else + hlreg0 |= IXGBE_HLREG0_JUMBOEN; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + + pages = PAGE_USE_COUNT(adapter->netdev->mtu); + + srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(0)); + srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; + srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; + + if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { + srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; + srrctl |= ((IXGBE_RX_HDR_SIZE << + IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & + IXGBE_SRRCTL_BSIZEHDR_MASK); + } else { + srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + + if (adapter->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) + srrctl |= + IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + else + srrctl |= + adapter->rx_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + } + IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl); + + rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); + /* disable receives while setting up the descriptors */ + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring */ + for (i = 0; i < adapter->num_rx_queues; i++) { + rdba = adapter->rx_ring[i].dma; + IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i), (rdba & DMA_32BIT_MASK)); + IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i), rdlen); + IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0); + adapter->rx_ring[i].head = IXGBE_RDH(i); + adapter->rx_ring[i].tail = IXGBE_RDT(i); + } + + if (adapter->num_rx_queues > 1) { + /* Random 40bytes used as random key in RSS hash function */ + get_random_bytes(&random[0], 40); + + switch (adapter->num_rx_queues) { + case 8: + case 4: + /* Bits [3:0] in each byte refers the Rx queue no */ + reta = 0x00010203; + break; + case 2: + reta = 0x00010001; + break; + default: + reta = 0x00000000; + break; + } + + /* Fill out redirection table */ + for (i = 0; i < 32; i++) { + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RETA(0), i, reta); + if (adapter->num_rx_queues > 4) { + i++; + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RETA(0), i, + 0x04050607); + } + } + + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, random[i]); + + mrqc = IXGBE_MRQC_RSSEN + /* Perform hash on these packet types */ + | IXGBE_MRQC_RSS_FIELD_IPV4 + | IXGBE_MRQC_RSS_FIELD_IPV4_TCP + | IXGBE_MRQC_RSS_FIELD_IPV4_UDP + | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP + | IXGBE_MRQC_RSS_FIELD_IPV6_EX + | IXGBE_MRQC_RSS_FIELD_IPV6 + | IXGBE_MRQC_RSS_FIELD_IPV6_TCP + | IXGBE_MRQC_RSS_FIELD_IPV6_UDP + | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + + /* Multiqueue and packet checksumming are mutually exclusive. */ + rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); + rxcsum |= IXGBE_RXCSUM_PCSD; + IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); + } else { + /* Enable Receive Checksum Offload for TCP and UDP */ + rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); + if (adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) { + /* Enable IPv4 payload checksum for UDP fragments + * Must be used in conjunction with packet-split. */ + rxcsum |= IXGBE_RXCSUM_IPPCSE; + } else { + /* don't need to clear IPPCSE as it defaults to 0 */ + } + IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); + } + /* Enable Receives */ + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); +} + +static void ixgbe_vlan_rx_register(struct net_device *netdev, + struct vlan_group *grp) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + u32 ctrl; + + ixgbe_irq_disable(adapter); + adapter->vlgrp = grp; + + if (grp) { + /* enable VLAN tag insert/strip */ + ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); + ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE; + ctrl &= ~IXGBE_VLNCTRL_CFIEN; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl); + } + + ixgbe_irq_enable(adapter); +} + +static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + /* add VID to filter table */ + ixgbe_set_vfta(&adapter->hw, vid, 0, true); +} + +static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + ixgbe_irq_disable(adapter); + vlan_group_set_device(adapter->vlgrp, vid, NULL); + ixgbe_irq_enable(adapter); + + /* remove VID from filter table */ + ixgbe_set_vfta(&adapter->hw, vid, 0, false); +} + +static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) +{ + ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp); + + if (adapter->vlgrp) { + u16 vid; + for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { + if (!vlan_group_get_device(adapter->vlgrp, vid)) + continue; + ixgbe_vlan_rx_add_vid(adapter->netdev, vid); + } + } +} + +/** + * ixgbe_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void ixgbe_set_multi(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + struct dev_mc_list *mc_ptr; + u8 *mta_list; + u32 fctrl; + int i; + + /* Check for Promiscuous and All Multicast modes */ + + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + + if (netdev->flags & IFF_PROMISC) { + fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + } else if (netdev->flags & IFF_ALLMULTI) { + fctrl |= IXGBE_FCTRL_MPE; + fctrl &= ~IXGBE_FCTRL_UPE; + } else { + fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + } + + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + + if (netdev->mc_count) { + mta_list = kcalloc(netdev->mc_count, ETH_ALEN, GFP_ATOMIC); + if (!mta_list) + return; + + /* Shared function expects packed array of only addresses. */ + mc_ptr = netdev->mc_list; + + for (i = 0; i < netdev->mc_count; i++) { + if (!mc_ptr) + break; + memcpy(mta_list + (i * ETH_ALEN), mc_ptr->dmi_addr, + ETH_ALEN); + mc_ptr = mc_ptr->next; + } + + ixgbe_update_mc_addr_list(hw, mta_list, i, 0); + kfree(mta_list); + } else { + ixgbe_update_mc_addr_list(hw, NULL, 0, 0); + } + +} + +static void ixgbe_configure(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + ixgbe_set_multi(netdev); + + ixgbe_restore_vlan(adapter); + + ixgbe_configure_tx(adapter); + ixgbe_configure_rx(adapter); + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i], + (adapter->rx_ring[i].count - 1)); +} + +static int ixgbe_up_complete(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + u32 gpie = 0; + struct ixgbe_hw *hw = &adapter->hw; + u32 txdctl, rxdctl, mhadd; + int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + + if (adapter->flags & (IXGBE_FLAG_MSIX_ENABLED | + IXGBE_FLAG_MSI_ENABLED)) { + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME | + IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD); + } else { + /* MSI only */ + gpie = (IXGBE_GPIE_EIAME | + IXGBE_GPIE_PBA_SUPPORT); + } + IXGBE_WRITE_REG(&adapter->hw, IXGBE_GPIE, gpie); + gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE); + } + + mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); + + if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { + mhadd &= ~IXGBE_MHADD_MFS_MASK; + mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; + + IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + txdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(i)); + txdctl |= IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(i), txdctl); + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + rxdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(i)); + rxdctl |= IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(i), rxdctl); + } + /* enable all receives */ + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN); + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxdctl); + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) + ixgbe_configure_msix(adapter); + else + ixgbe_configure_msi_and_legacy(adapter); + + clear_bit(__IXGBE_DOWN, &adapter->state); + napi_enable(&adapter->napi); + ixgbe_irq_enable(adapter); + + /* bring the link up in the watchdog, this could race with our first + * link up interrupt but shouldn't be a problem */ + mod_timer(&adapter->watchdog_timer, jiffies); + return 0; +} + +int ixgbe_up(struct ixgbe_adapter *adapter) +{ + /* hardware has been reset, we need to reload some things */ + ixgbe_configure(adapter); + + return ixgbe_up_complete(adapter); +} + +void ixgbe_reset(struct ixgbe_adapter *adapter) +{ + if (ixgbe_init_hw(&adapter->hw)) + DPRINTK(PROBE, ERR, "Hardware Error\n"); + + /* reprogram the RAR[0] in case user changed it. */ + ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); + +} + +#ifdef CONFIG_PM +static int ixgbe_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbe_adapter *adapter = netdev_priv(netdev); + u32 err, num_rx_queues = adapter->num_rx_queues; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + err = pci_enable_device(pdev); + if (err) { + printk(KERN_ERR "ixgbe: Cannot enable PCI device from " \ + "suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (netif_running(netdev)) { + err = ixgbe_request_irq(adapter, &num_rx_queues); + if (err) + return err; + } + + ixgbe_reset(adapter); + + if (netif_running(netdev)) + ixgbe_up(adapter); + + netif_device_attach(netdev); + + return 0; +} +#endif + +/** + * ixgbe_clean_rx_ring - Free Rx Buffers per Queue + * @adapter: board private structure + * @rx_ring: ring to free buffers from + **/ +static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + /* Free all the Rx ring sk_buffs */ + + for (i = 0; i < rx_ring->count; i++) { + struct ixgbe_rx_buffer *rx_buffer_info; + + rx_buffer_info = &rx_ring->rx_buffer_info[i]; + if (rx_buffer_info->dma) { + pci_unmap_single(pdev, rx_buffer_info->dma, + adapter->rx_buf_len, + PCI_DMA_FROMDEVICE); + rx_buffer_info->dma = 0; + } + if (rx_buffer_info->skb) { + dev_kfree_skb(rx_buffer_info->skb); + rx_buffer_info->skb = NULL; + } + if (!rx_buffer_info->page) + continue; + pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + rx_buffer_info->page_dma = 0; + + put_page(rx_buffer_info->page); + rx_buffer_info->page = NULL; + } + + size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + writel(0, adapter->hw.hw_addr + rx_ring->head); + writel(0, adapter->hw.hw_addr + rx_ring->tail); +} + +/** + * ixgbe_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + * @tx_ring: ring to be cleaned + **/ +static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring) +{ + struct ixgbe_tx_buffer *tx_buffer_info; + unsigned long size; + unsigned int i; + + /* Free all the Tx ring sk_buffs */ + + for (i = 0; i < tx_ring->count; i++) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); + } + + size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + writel(0, adapter->hw.hw_addr + tx_ring->head); + writel(0, adapter->hw.hw_addr + tx_ring->tail); +} + +/** + * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]); +} + +/** + * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]); +} + +void ixgbe_down(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u32 rxctrl; + + /* signal that we are down to the interrupt handler */ + set_bit(__IXGBE_DOWN, &adapter->state); + + /* disable receives */ + rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, + rxctrl & ~IXGBE_RXCTRL_RXEN); + + netif_tx_disable(netdev); + + /* disable transmits in the hardware */ + + /* flush both disables */ + IXGBE_WRITE_FLUSH(&adapter->hw); + msleep(10); + + ixgbe_irq_disable(adapter); + + napi_disable(&adapter->napi); + del_timer_sync(&adapter->watchdog_timer); + + netif_carrier_off(netdev); + netif_stop_queue(netdev); + + ixgbe_reset(adapter); + ixgbe_clean_all_tx_rings(adapter); + ixgbe_clean_all_rx_rings(adapter); + +} + +static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbe_adapter *adapter = netdev_priv(netdev); +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + ixgbe_down(adapter); + ixgbe_free_irq(adapter); + } + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + pci_disable_device(pdev); + + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +static void ixgbe_shutdown(struct pci_dev *pdev) +{ + ixgbe_suspend(pdev, PMSG_SUSPEND); +} + +/** + * ixgbe_clean - NAPI Rx polling callback + * @adapter: board private structure + **/ +static int ixgbe_clean(struct napi_struct *napi, int budget) +{ + struct ixgbe_adapter *adapter = container_of(napi, + struct ixgbe_adapter, napi); + struct net_device *netdev = adapter->netdev; + int tx_cleaned = 0, work_done = 0; + + /* Keep link state information with original netdev */ + if (!netif_carrier_ok(adapter->netdev)) + goto quit_polling; + + /* In non-MSIX case, there is no multi-Tx/Rx queue */ + tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring); + ixgbe_clean_rx_irq(adapter, &adapter->rx_ring[0], &work_done, + budget); + + /* If no Tx and not enough Rx work done, exit the polling mode */ + if ((!tx_cleaned && (work_done < budget)) || + !netif_running(adapter->netdev)) { +quit_polling: + netif_rx_complete(netdev, napi); + ixgbe_irq_enable(adapter); + } + + return work_done; +} + +/** + * ixgbe_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void ixgbe_tx_timeout(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + schedule_work(&adapter->reset_task); +} + +static void ixgbe_reset_task(struct work_struct *work) +{ + struct ixgbe_adapter *adapter; + adapter = container_of(work, struct ixgbe_adapter, reset_task); + + adapter->tx_timeout_count++; + + ixgbe_down(adapter); + ixgbe_up(adapter); +} + +/** + * ixgbe_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. The polling_netdev array is + * intended for Multiqueue, but should work fine with a single queue. + **/ +static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter) +{ + int i; + + adapter->tx_ring = kcalloc(adapter->num_tx_queues, + sizeof(struct ixgbe_ring), GFP_KERNEL); + if (!adapter->tx_ring) + return -ENOMEM; + + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD; + + adapter->rx_ring = kcalloc(adapter->num_rx_queues, + sizeof(struct ixgbe_ring), GFP_KERNEL); + if (!adapter->rx_ring) { + kfree(adapter->tx_ring); + return -ENOMEM; + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + adapter->rx_ring[i].adapter = adapter; + adapter->rx_ring[i].itr_register = IXGBE_EITR(i); + adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD; + } + + return 0; +} + +/** + * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) + * @adapter: board private structure to initialize + * + * ixgbe_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + + /* default flow control settings */ + hw->fc.original_type = ixgbe_fc_full; + hw->fc.type = ixgbe_fc_full; + + hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN; + if (hw->mac.ops.reset(hw)) { + dev_err(&pdev->dev, "HW Init failed\n"); + return -EIO; + } + if (hw->phy.ops.setup_speed(hw, IXGBE_LINK_SPEED_10GB_FULL, true, + false)) { + dev_err(&pdev->dev, "Link Speed setup failed\n"); + return -EIO; + } + + /* initialize eeprom parameters */ + if (ixgbe_init_eeprom(hw)) { + dev_err(&pdev->dev, "EEPROM initialization failed\n"); + return -EIO; + } + + /* Set the default values */ + adapter->num_rx_queues = IXGBE_DEFAULT_RXQ; + adapter->num_tx_queues = 1; + adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; + + if (ixgbe_alloc_queues(adapter)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + atomic_set(&adapter->irq_sem, 1); + set_bit(__IXGBE_DOWN, &adapter->state); + + return 0; +} + +/** + * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * @txdr: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, + struct ixgbe_ring *txdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size; + + size = sizeof(struct ixgbe_tx_buffer) * txdr->count; + txdr->tx_buffer_info = vmalloc(size); + if (!txdr->tx_buffer_info) { + DPRINTK(PROBE, ERR, + "Unable to allocate memory for the transmit descriptor ring\n"); + return -ENOMEM; + } + memset(txdr->tx_buffer_info, 0, size); + + /* round up to nearest 4K */ + txdr->size = txdr->count * sizeof(union ixgbe_adv_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + + txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); + if (!txdr->desc) { + vfree(txdr->tx_buffer_info); + DPRINTK(PROBE, ERR, + "Memory allocation failed for the tx desc ring\n"); + return -ENOMEM; + } + + txdr->adapter = adapter; + txdr->next_to_use = 0; + txdr->next_to_clean = 0; + txdr->work_limit = txdr->count; + spin_lock_init(&txdr->tx_lock); + + return 0; +} + +/** + * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * @rxdr: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rxdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size, desc_len; + + size = sizeof(struct ixgbe_rx_buffer) * rxdr->count; + rxdr->rx_buffer_info = vmalloc(size); + if (!rxdr->rx_buffer_info) { + DPRINTK(PROBE, ERR, + "vmalloc allocation failed for the rx desc ring\n"); + return -ENOMEM; + } + memset(rxdr->rx_buffer_info, 0, size); + + desc_len = sizeof(union ixgbe_adv_rx_desc); + + /* Round up to nearest 4K */ + rxdr->size = rxdr->count * desc_len; + rxdr->size = ALIGN(rxdr->size, 4096); + + rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); + + if (!rxdr->desc) { + DPRINTK(PROBE, ERR, + "Memory allocation failed for the rx desc ring\n"); + vfree(rxdr->rx_buffer_info); + return -ENOMEM; + } + + rxdr->next_to_clean = 0; + rxdr->next_to_use = 0; + rxdr->adapter = adapter; + + return 0; +} + +/** + * ixgbe_free_tx_resources - Free Tx Resources per Queue + * @adapter: board private structure + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +static void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + ixgbe_clean_tx_ring(adapter, tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]); +} + +/** + * ixgbe_free_rx_resources - Free Rx Resources + * @adapter: board private structure + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +static void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + ixgbe_clean_rx_ring(adapter, rx_ring); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]); +} + +/** + * ixgbe_setup_all_tx_resources - wrapper to allocate Tx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]); + if (err) { + DPRINTK(PROBE, ERR, + "Allocation for Tx Queue %u failed\n", i); + break; + } + } + + return err; +} + +/** + * ixgbe_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ + +static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]); + if (err) { + DPRINTK(PROBE, ERR, + "Allocation for Rx Queue %u failed\n", i); + break; + } + } + + return err; +} + +/** + * ixgbe_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + if ((max_frame < (ETH_ZLEN + ETH_FCS_LEN)) || + (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) + return -EINVAL; + + netdev->mtu = new_mtu; + + if (netif_running(netdev)) { + ixgbe_down(adapter); + ixgbe_up(adapter); + } + + return 0; +} + +/** + * ixgbe_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int ixgbe_open(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int err; + u32 ctrl_ext; + u32 num_rx_queues = adapter->num_rx_queues; + + /* Let firmware know the driver has taken over */ + ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, + ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); + +try_intr_reinit: + /* allocate transmit descriptors */ + err = ixgbe_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { + num_rx_queues = 1; + adapter->num_rx_queues = num_rx_queues; + } + + /* allocate receive descriptors */ + err = ixgbe_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + ixgbe_configure(adapter); + + err = ixgbe_request_irq(adapter, &num_rx_queues); + if (err) + goto err_req_irq; + + /* ixgbe_request might have reduced num_rx_queues */ + if (num_rx_queues < adapter->num_rx_queues) { + /* We didn't get MSI-X, so we need to release everything, + * set our Rx queue count to num_rx_queues, and redo the + * whole init process. + */ + ixgbe_free_irq(adapter); + if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { + pci_disable_msi(adapter->pdev); + adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; + } + ixgbe_free_all_rx_resources(adapter); + ixgbe_free_all_tx_resources(adapter); + adapter->num_rx_queues = num_rx_queues; + + /* Reset the hardware, and start over. */ + ixgbe_reset(adapter); + + goto try_intr_reinit; + } + + err = ixgbe_up_complete(adapter); + if (err) + goto err_up; + + return 0; + +err_up: + ixgbe_free_irq(adapter); +err_req_irq: + ixgbe_free_all_rx_resources(adapter); +err_setup_rx: + ixgbe_free_all_tx_resources(adapter); +err_setup_tx: + ixgbe_reset(adapter); + + return err; +} + +/** + * ixgbe_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int ixgbe_close(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + u32 ctrl_ext; + + ixgbe_down(adapter); + ixgbe_free_irq(adapter); + + ixgbe_free_all_tx_resources(adapter); + ixgbe_free_all_rx_resources(adapter); + + ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, + ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); + + return 0; +} + +/** + * ixgbe_update_stats - Update the board statistics counters. + * @adapter: board private structure + **/ +void ixgbe_update_stats(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u64 good_rx, missed_rx, bprc; + + adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); + good_rx = IXGBE_READ_REG(hw, IXGBE_GPRC); + missed_rx = IXGBE_READ_REG(hw, IXGBE_MPC(0)); + missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(1)); + missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(2)); + missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(3)); + missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(4)); + missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(5)); + missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(6)); + missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(7)); + adapter->stats.gprc += (good_rx - missed_rx); + + adapter->stats.mpc[0] += missed_rx; + adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); + bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); + adapter->stats.bprc += bprc; + adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); + adapter->stats.mprc -= bprc; + adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC); + adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); + adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); + adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); + adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); + adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); + adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); + + adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); + adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); + adapter->stats.lxontxc += IXGBE_READ_REG(hw, IXGBE_LXONTXC); + adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); + adapter->stats.lxofftxc += IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); + adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); + adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); + adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); + adapter->stats.rnbc[0] += IXGBE_READ_REG(hw, IXGBE_RNBC(0)); + adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); + adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC); + adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC); + adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH); + adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR); + adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); + adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); + adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); + adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); + adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); + adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); + adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); + adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); + + /* Fill out the OS statistics structure */ + adapter->net_stats.rx_packets = adapter->stats.gprc; + adapter->net_stats.tx_packets = adapter->stats.gptc; + adapter->net_stats.rx_bytes = adapter->stats.gorc; + adapter->net_stats.tx_bytes = adapter->stats.gotc; + adapter->net_stats.multicast = adapter->stats.mprc; + + /* Rx Errors */ + adapter->net_stats.rx_errors = adapter->stats.crcerrs + + adapter->stats.rlec; + adapter->net_stats.rx_dropped = 0; + adapter->net_stats.rx_length_errors = adapter->stats.rlec; + adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; + adapter->net_stats.rx_missed_errors = adapter->stats.mpc[0]; + +} + +/** + * ixgbe_watchdog - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void ixgbe_watchdog(unsigned long data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct net_device *netdev = adapter->netdev; + bool link_up; + u32 link_speed = 0; + + adapter->hw.phy.ops.check(&adapter->hw, &(link_speed), &link_up); + + if (link_up) { + if (!netif_carrier_ok(netdev)) { + u32 frctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); + u32 rmcs = IXGBE_READ_REG(&adapter->hw, IXGBE_RMCS); +#define FLOW_RX (frctl & IXGBE_FCTRL_RFCE) +#define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X) + DPRINTK(LINK, INFO, "NIC Link is Up %s, " + "Flow Control: %s\n", + (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? + "10 Gbps" : + (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? + "1 Gpbs" : "unknown speed")), + ((FLOW_RX && FLOW_TX) ? "RX/TX" : + (FLOW_RX ? "RX" : + (FLOW_TX ? "TX" : "None")))); + + netif_carrier_on(netdev); + netif_wake_queue(netdev); + } else { + /* Force detection of hung controller */ + adapter->detect_tx_hung = true; + } + } else { + if (netif_carrier_ok(netdev)) { + DPRINTK(LINK, INFO, "NIC Link is Down\n"); + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + } + + ixgbe_update_stats(adapter); + + /* Reset the timer */ + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); +} + +#define IXGBE_MAX_TXD_PWR 14 +#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \ + (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) +#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ + MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ + +static int ixgbe_tso(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring, struct sk_buff *skb, + u32 tx_flags, u8 *hdr_len) +{ + struct ixgbe_adv_tx_context_desc *context_desc; + unsigned int i; + int err; + struct ixgbe_tx_buffer *tx_buffer_info; + u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; + u32 mss_l4len_idx = 0, l4len; + *hdr_len = 0; + + if (skb_is_gso(skb)) { + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) + return err; + } + l4len = tcp_hdrlen(skb); + *hdr_len += l4len; + + if (skb->protocol == ntohs(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + adapter->hw_tso_ctxt++; + } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + adapter->hw_tso6_ctxt++; + } + + i = tx_ring->next_to_use; + + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); + + /* VLAN MACLEN IPLEN */ + if (tx_flags & IXGBE_TX_FLAGS_VLAN) + vlan_macip_lens |= + (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); + vlan_macip_lens |= ((skb_network_offset(skb)) << + IXGBE_ADVTXD_MACLEN_SHIFT); + *hdr_len += skb_network_offset(skb); + vlan_macip_lens |= + (skb_transport_header(skb) - skb_network_header(skb)); + *hdr_len += + (skb_transport_header(skb) - skb_network_header(skb)); + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = 0; + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | + IXGBE_ADVTXD_DTYP_CTXT); + + if (skb->protocol == ntohs(ETH_P_IP)) + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); + + /* MSS L4LEN IDX */ + mss_l4len_idx |= + (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT); + mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + + tx_buffer_info->time_stamp = jiffies; + tx_buffer_info->next_to_watch = i; + + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + return true; + } + return false; +} + +static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring, + struct sk_buff *skb, u32 tx_flags) +{ + struct ixgbe_adv_tx_context_desc *context_desc; + unsigned int i; + struct ixgbe_tx_buffer *tx_buffer_info; + u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; + + if (skb->ip_summed == CHECKSUM_PARTIAL || + (tx_flags & IXGBE_TX_FLAGS_VLAN)) { + i = tx_ring->next_to_use; + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); + + if (tx_flags & IXGBE_TX_FLAGS_VLAN) + vlan_macip_lens |= + (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); + vlan_macip_lens |= (skb_network_offset(skb) << + IXGBE_ADVTXD_MACLEN_SHIFT); + if (skb->ip_summed == CHECKSUM_PARTIAL) + vlan_macip_lens |= (skb_transport_header(skb) - + skb_network_header(skb)); + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = 0; + + type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | + IXGBE_ADVTXD_DTYP_CTXT); + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb->protocol == ntohs(ETH_P_IP)) + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; + + if (skb->sk->sk_protocol == IPPROTO_TCP) + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; + } + + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); + context_desc->mss_l4len_idx = 0; + + tx_buffer_info->time_stamp = jiffies; + tx_buffer_info->next_to_watch = i; + adapter->hw_csum_tx_good++; + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + return true; + } + return false; +} + +static int ixgbe_tx_map(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring, + struct sk_buff *skb, unsigned int first) +{ + struct ixgbe_tx_buffer *tx_buffer_info; + unsigned int len = skb->len; + unsigned int offset = 0, size, count = 0, i; + unsigned int nr_frags = skb_shinfo(skb)->nr_frags; + unsigned int f; + + len -= skb->data_len; + + i = tx_ring->next_to_use; + + while (len) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); + + tx_buffer_info->length = size; + tx_buffer_info->dma = pci_map_single(adapter->pdev, + skb->data + offset, + size, PCI_DMA_TODEVICE); + tx_buffer_info->time_stamp = jiffies; + tx_buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + i++; + if (i == tx_ring->count) + i = 0; + } + + for (f = 0; f < nr_frags; f++) { + struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[f]; + len = frag->size; + offset = frag->page_offset; + + while (len) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); + + tx_buffer_info->length = size; + tx_buffer_info->dma = pci_map_page(adapter->pdev, + frag->page, + offset, + size, PCI_DMA_TODEVICE); + tx_buffer_info->time_stamp = jiffies; + tx_buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + i++; + if (i == tx_ring->count) + i = 0; + } + } + if (i == 0) + i = tx_ring->count - 1; + else + i = i - 1; + tx_ring->tx_buffer_info[i].skb = skb; + tx_ring->tx_buffer_info[first].next_to_watch = i; + + return count; +} + +static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring, + int tx_flags, int count, u32 paylen, u8 hdr_len) +{ + union ixgbe_adv_tx_desc *tx_desc = NULL; + struct ixgbe_tx_buffer *tx_buffer_info; + u32 olinfo_status = 0, cmd_type_len = 0; + unsigned int i; + u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; + + cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; + + cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; + + if (tx_flags & IXGBE_TX_FLAGS_VLAN) + cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; + + if (tx_flags & IXGBE_TX_FLAGS_TSO) { + cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; + + olinfo_status |= IXGBE_TXD_POPTS_TXSM << + IXGBE_ADVTXD_POPTS_SHIFT; + + if (tx_flags & IXGBE_TX_FLAGS_IPV4) + olinfo_status |= IXGBE_TXD_POPTS_IXSM << + IXGBE_ADVTXD_POPTS_SHIFT; + + } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) + olinfo_status |= IXGBE_TXD_POPTS_TXSM << + IXGBE_ADVTXD_POPTS_SHIFT; + + olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); + + i = tx_ring->next_to_use; + while (count--) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); + tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type_len | tx_buffer_info->length); + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + + i++; + if (i == tx_ring->count) + i = 0; + } + + tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); + + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + tx_ring->next_to_use = i; + writel(i, adapter->hw.hw_addr + tx_ring->tail); +} + +static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_ring *tx_ring; + unsigned int len = skb->len; + unsigned int first; + unsigned int tx_flags = 0; + unsigned long flags = 0; + u8 hdr_len; + int tso; + unsigned int mss = 0; + int count = 0; + unsigned int f; + unsigned int nr_frags = skb_shinfo(skb)->nr_frags; + len -= skb->data_len; + + tx_ring = adapter->tx_ring; + + if (skb->len <= 0) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + mss = skb_shinfo(skb)->gso_size; + + if (mss) + count++; + else if (skb->ip_summed == CHECKSUM_PARTIAL) + count++; + + count += TXD_USE_COUNT(len); + for (f = 0; f < nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + + spin_lock_irqsave(&tx_ring->tx_lock, flags); + if (IXGBE_DESC_UNUSED(tx_ring) < (count + 2)) { + adapter->tx_busy++; + netif_stop_queue(netdev); + spin_unlock_irqrestore(&tx_ring->tx_lock, flags); + return NETDEV_TX_BUSY; + } + spin_unlock_irqrestore(&tx_ring->tx_lock, flags); + if (adapter->vlgrp && vlan_tx_tag_present(skb)) { + tx_flags |= IXGBE_TX_FLAGS_VLAN; + tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT); + } + + if (skb->protocol == ntohs(ETH_P_IP)) + tx_flags |= IXGBE_TX_FLAGS_IPV4; + first = tx_ring->next_to_use; + tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len); + if (tso < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (tso) + tx_flags |= IXGBE_TX_FLAGS_TSO; + else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) && + (skb->ip_summed == CHECKSUM_PARTIAL)) + tx_flags |= IXGBE_TX_FLAGS_CSUM; + + ixgbe_tx_queue(adapter, tx_ring, tx_flags, + ixgbe_tx_map(adapter, tx_ring, skb, first), + skb->len, hdr_len); + + netdev->trans_start = jiffies; + + spin_lock_irqsave(&tx_ring->tx_lock, flags); + /* Make sure there is space in the ring for the next send. */ + if (IXGBE_DESC_UNUSED(tx_ring) < DESC_NEEDED) + netif_stop_queue(netdev); + spin_unlock_irqrestore(&tx_ring->tx_lock, flags); + + return NETDEV_TX_OK; +} + +/** + * ixgbe_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + **/ +static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + /* only return the current stats */ + return &adapter->net_stats; +} + +/** + * ixgbe_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int ixgbe_set_mac(struct net_device *netdev, void *p) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); + + ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void ixgbe_netpoll(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + disable_irq(adapter->pdev->irq); + adapter->flags |= IXGBE_FLAG_IN_NETPOLL; + ixgbe_intr(adapter->pdev->irq, netdev); + adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; + enable_irq(adapter->pdev->irq); +} +#endif + +/** + * ixgbe_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in ixgbe_pci_tbl + * + * Returns 0 on success, negative on failure + * + * ixgbe_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int __devinit ixgbe_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct ixgbe_adapter *adapter = NULL; + struct ixgbe_hw *hw; + const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; + unsigned long mmio_start, mmio_len; + static int cards_found; + int i, err, pci_using_dac; + u16 link_status, link_speed, link_width; + u32 part_num; + + err = pci_enable_device(pdev); + if (err) + return err; + + if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) && + !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) { + pci_using_dac = 1; + } else { + err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (err) { + err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + if (err) { + dev_err(&pdev->dev, "No usable DMA " + "configuration, aborting\n"); + goto err_dma; + } + } + pci_using_dac = 0; + } + + err = pci_request_regions(pdev, ixgbe_driver_name); + if (err) { + dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); + goto err_pci_reg; + } + + pci_set_master(pdev); + + netdev = alloc_etherdev(sizeof(struct ixgbe_adapter)); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + + mmio_start = pci_resource_start(pdev, 0); + mmio_len = pci_resource_len(pdev, 0); + + hw->hw_addr = ioremap(mmio_start, mmio_len); + if (!hw->hw_addr) { + err = -EIO; + goto err_ioremap; + } + + for (i = 1; i <= 5; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + } + + netdev->open = &ixgbe_open; + netdev->stop = &ixgbe_close; + netdev->hard_start_xmit = &ixgbe_xmit_frame; + netdev->get_stats = &ixgbe_get_stats; + netdev->set_multicast_list = &ixgbe_set_multi; + netdev->set_mac_address = &ixgbe_set_mac; + netdev->change_mtu = &ixgbe_change_mtu; + ixgbe_set_ethtool_ops(netdev); + netdev->tx_timeout = &ixgbe_tx_timeout; + netdev->watchdog_timeo = 5 * HZ; + netif_napi_add(netdev, &adapter->napi, ixgbe_clean, 64); + netdev->vlan_rx_register = ixgbe_vlan_rx_register; + netdev->vlan_rx_add_vid = ixgbe_vlan_rx_add_vid; + netdev->vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid; +#ifdef CONFIG_NET_POLL_CONTROLLER + netdev->poll_controller = ixgbe_netpoll; +#endif + strcpy(netdev->name, pci_name(pdev)); + + netdev->mem_start = mmio_start; + netdev->mem_end = mmio_start + mmio_len; + + adapter->bd_number = cards_found; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* Setup hw api */ + memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); + memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops)); + + err = ii->get_invariants(hw); + if (err) + goto err_hw_init; + + /* setup the private structure */ + err = ixgbe_sw_init(adapter); + if (err) + goto err_sw_init; + + netdev->features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX | + NETIF_F_HW_VLAN_FILTER; + + netdev->features |= NETIF_F_TSO; + + netdev->features |= NETIF_F_TSO6; + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + + /* make sure the EEPROM is good */ + if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { + dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + + memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); + memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); + + if (ixgbe_validate_mac_addr(netdev->dev_addr)) { + err = -EIO; + goto err_eeprom; + } + + init_timer(&adapter->watchdog_timer); + adapter->watchdog_timer.function = &ixgbe_watchdog; + adapter->watchdog_timer.data = (unsigned long)adapter; + + INIT_WORK(&adapter->reset_task, ixgbe_reset_task); + + /* initialize default flow control settings */ + hw->fc.original_type = ixgbe_fc_full; + hw->fc.type = ixgbe_fc_full; + hw->fc.high_water = IXGBE_DEFAULT_FCRTH; + hw->fc.low_water = IXGBE_DEFAULT_FCRTL; + hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; + + /* Interrupt Throttle Rate */ + adapter->rx_eitr = (1000000 / IXGBE_DEFAULT_ITR_RX_USECS); + adapter->tx_eitr = (1000000 / IXGBE_DEFAULT_ITR_TX_USECS); + + /* print bus type/speed/width info */ + pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status); + link_speed = link_status & IXGBE_PCI_LINK_SPEED; + link_width = link_status & IXGBE_PCI_LINK_WIDTH; + dev_info(&pdev->dev, "(PCI Express:%s:%s) " + "%02x:%02x:%02x:%02x:%02x:%02x\n", + ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" : + (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" : + "Unknown"), + ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" : + (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" : + (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" : + (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" : + "Unknown"), + netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], + netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); + ixgbe_read_part_num(hw, &part_num); + dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", + hw->mac.type, hw->phy.type, + (part_num >> 8), (part_num & 0xff)); + + /* reset the hardware with the new settings */ + ixgbe_start_hw(hw); + + netif_carrier_off(netdev); + netif_stop_queue(netdev); + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + + dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n"); + cards_found++; + return 0; + +err_register: +err_hw_init: +err_sw_init: +err_eeprom: + iounmap(hw->hw_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * ixgbe_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * ixgbe_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void __devexit ixgbe_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + set_bit(__IXGBE_DOWN, &adapter->state); + del_timer_sync(&adapter->watchdog_timer); + + flush_scheduled_work(); + + unregister_netdev(netdev); + + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + iounmap(adapter->hw.hw_addr); + pci_release_regions(pdev); + + free_netdev(netdev); + + pci_disable_device(pdev); +} + +/** + * ixgbe_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbe_adapter *adapter = netdev->priv; + + netif_device_detach(netdev); + + if (netif_running(netdev)) + ixgbe_down(adapter); + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * ixgbe_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + */ +static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbe_adapter *adapter = netdev->priv; + + if (pci_enable_device(pdev)) { + DPRINTK(PROBE, ERR, + "Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + ixgbe_reset(adapter); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * ixgbe_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. + */ +static void ixgbe_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbe_adapter *adapter = netdev->priv; + + if (netif_running(netdev)) { + if (ixgbe_up(adapter)) { + DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n"); + return; + } + } + + netif_device_attach(netdev); + +} + +static struct pci_error_handlers ixgbe_err_handler = { + .error_detected = ixgbe_io_error_detected, + .slot_reset = ixgbe_io_slot_reset, + .resume = ixgbe_io_resume, +}; + +static struct pci_driver ixgbe_driver = { + .name = ixgbe_driver_name, + .id_table = ixgbe_pci_tbl, + .probe = ixgbe_probe, + .remove = __devexit_p(ixgbe_remove), +#ifdef CONFIG_PM + .suspend = ixgbe_suspend, + .resume = ixgbe_resume, +#endif + .shutdown = ixgbe_shutdown, + .err_handler = &ixgbe_err_handler +}; + +/** + * ixgbe_init_module - Driver Registration Routine + * + * ixgbe_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init ixgbe_init_module(void) +{ + int ret; + printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name, + ixgbe_driver_string, ixgbe_driver_version); + + printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright); + + ret = pci_register_driver(&ixgbe_driver); + return ret; +} +module_init(ixgbe_init_module); + +/** + * ixgbe_exit_module - Driver Exit Cleanup Routine + * + * ixgbe_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit ixgbe_exit_module(void) +{ + pci_unregister_driver(&ixgbe_driver); +} +module_exit(ixgbe_exit_module); + +/* ixgbe_main.c */ diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c new file mode 100644 index 000000000000..8002931ae823 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_phy.c @@ -0,0 +1,494 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/sched.h> + +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); +static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); +static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr); +static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data); + +/** + * ixgbe_identify_phy - Get physical layer module + * @hw: pointer to hardware structure + * + * Determines the physical layer module found on the current adapter. + **/ +s32 ixgbe_identify_phy(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + u32 phy_addr; + + for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { + if (ixgbe_validate_phy_addr(hw, phy_addr)) { + hw->phy.addr = phy_addr; + ixgbe_get_phy_id(hw); + hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id); + status = 0; + break; + } + } + return status; +} + +/** + * ixgbe_validate_phy_addr - Determines phy address is valid + * @hw: pointer to hardware structure + * + **/ +static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr) +{ + u16 phy_id = 0; + bool valid = false; + + hw->phy.addr = phy_addr; + ixgbe_read_phy_reg(hw, + IXGBE_MDIO_PHY_ID_HIGH, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &phy_id); + + if (phy_id != 0xFFFF && phy_id != 0x0) + valid = true; + + return valid; +} + +/** + * ixgbe_get_phy_id - Get the phy type + * @hw: pointer to hardware structure + * + **/ +static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) +{ + u32 status; + u16 phy_id_high = 0; + u16 phy_id_low = 0; + + status = ixgbe_read_phy_reg(hw, + IXGBE_MDIO_PHY_ID_HIGH, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &phy_id_high); + + if (status == 0) { + hw->phy.id = (u32)(phy_id_high << 16); + status = ixgbe_read_phy_reg(hw, + IXGBE_MDIO_PHY_ID_LOW, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &phy_id_low); + hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); + hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); + } + + return status; +} + +/** + * ixgbe_get_phy_type_from_id - Get the phy type + * @hw: pointer to hardware structure + * + **/ +static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) +{ + enum ixgbe_phy_type phy_type; + + switch (phy_id) { + case TN1010_PHY_ID: + phy_type = ixgbe_phy_tn; + break; + case QT2022_PHY_ID: + phy_type = ixgbe_phy_qt; + break; + default: + phy_type = ixgbe_phy_unknown; + break; + } + + return phy_type; +} + +/** + * ixgbe_reset_phy - Performs a PHY reset + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reset_phy(struct ixgbe_hw *hw) +{ + /* + * Perform soft PHY reset to the PHY_XS. + * This will cause a soft reset to the PHY + */ + return ixgbe_write_phy_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, + IXGBE_MDIO_PHY_XS_RESET); +} + +/** + * ixgbe_read_phy_reg - Reads a value from a specified PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + **/ +s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + u32 command; + u32 i; + u32 timeout = 10; + u32 data; + s32 status = 0; + u16 gssr; + + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) + gssr = IXGBE_GSSR_PHY1_SM; + else + gssr = IXGBE_GSSR_PHY0_SM; + + if (ixgbe_acquire_swfw_sync(hw, gssr) != 0) + status = IXGBE_ERR_SWFW_SYNC; + + if (status == 0) { + /* Setup and write the address cycle command */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* + * Check every 10 usec to see if the address cycle completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < timeout; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, "PHY address command did not complete.\n"); + status = IXGBE_ERR_PHY; + } + + if (status == 0) { + /* + * Address cycle complete, setup and write the read + * command + */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* + * Check every 10 usec to see if the address cycle + * completed. The MDI Command bit will clear when the + * operation is complete + */ + for (i = 0; i < timeout; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, + "PHY read command didn't complete\n"); + status = IXGBE_ERR_PHY; + } else { + /* + * Read operation is complete. Get the data + * from MSRWD + */ + data = IXGBE_READ_REG(hw, IXGBE_MSRWD); + data >>= IXGBE_MSRWD_READ_DATA_SHIFT; + *phy_data = (u16)(data); + } + } + + ixgbe_release_swfw_sync(hw, gssr); + } + return status; +} + +/** + * ixgbe_write_phy_reg - Writes a value to specified PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + **/ +static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + u32 command; + u32 i; + u32 timeout = 10; + s32 status = 0; + u16 gssr; + + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) + gssr = IXGBE_GSSR_PHY1_SM; + else + gssr = IXGBE_GSSR_PHY0_SM; + + if (ixgbe_acquire_swfw_sync(hw, gssr) != 0) + status = IXGBE_ERR_SWFW_SYNC; + + if (status == 0) { + /* Put the data in the MDI single read and write data register*/ + IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); + + /* Setup and write the address cycle command */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* + * Check every 10 usec to see if the address cycle completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < timeout; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) { + hw_dbg(hw, "PHY address cmd didn't complete\n"); + break; + } + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) + status = IXGBE_ERR_PHY; + + if (status == 0) { + /* + * Address cycle complete, setup and write the write + * command + */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* + * Check every 10 usec to see if the address cycle + * completed. The MDI Command bit will clear when the + * operation is complete + */ + for (i = 0; i < timeout; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) { + hw_dbg(hw, "PHY write command did not " + "complete.\n"); + break; + } + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) + status = IXGBE_ERR_PHY; + } + + ixgbe_release_swfw_sync(hw, gssr); + } + + return status; +} + +/** + * ixgbe_setup_tnx_phy_link - Set and restart autoneg + * @hw: pointer to hardware structure + * + * Restart autonegotiation and PHY and waits for completion. + **/ +s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_NOT_IMPLEMENTED; + u32 time_out; + u32 max_time_out = 10; + u16 autoneg_speed_selection_register = 0x10; + u16 autoneg_restart_mask = 0x0200; + u16 autoneg_complete_mask = 0x0020; + u16 autoneg_reg = 0; + + /* + * Set advertisement settings in PHY based on autoneg_advertised + * settings. If autoneg_advertised = 0, then advertise default values + * txn devices cannot be "forced" to a autoneg 10G and fail. But can + * for a 1G. + */ + ixgbe_read_phy_reg(hw, + autoneg_speed_selection_register, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL) + autoneg_reg &= 0xEFFF; /* 0 in bit 12 is 1G operation */ + else + autoneg_reg |= 0x1000; /* 1 in bit 12 is 10G/1G operation */ + + ixgbe_write_phy_reg(hw, + autoneg_speed_selection_register, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + + + /* Restart PHY autonegotiation and wait for completion */ + ixgbe_read_phy_reg(hw, + IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg |= autoneg_restart_mask; + + ixgbe_write_phy_reg(hw, + IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + + /* Wait for autonegotiation to finish */ + for (time_out = 0; time_out < max_time_out; time_out++) { + udelay(10); + /* Restart PHY autonegotiation and wait for completion */ + status = ixgbe_read_phy_reg(hw, + IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg &= autoneg_complete_mask; + if (autoneg_reg == autoneg_complete_mask) { + status = 0; + break; + } + } + + if (time_out == max_time_out) + status = IXGBE_ERR_LINK_SETUP; + + return status; +} + +/** + * ixgbe_check_tnx_phy_link - Determine link and speed status + * @hw: pointer to hardware structure + * + * Reads the VS1 register to determine if link is up and the current speed for + * the PHY. + **/ +s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed, + bool *link_up) +{ + s32 status = 0; + u32 time_out; + u32 max_time_out = 10; + u16 phy_link = 0; + u16 phy_speed = 0; + u16 phy_data = 0; + + /* Initialize speed and link to default case */ + *link_up = false; + *speed = IXGBE_LINK_SPEED_10GB_FULL; + + /* + * Check current speed and link status of the PHY register. + * This is a vendor specific register and may have to + * be changed for other copper PHYs. + */ + for (time_out = 0; time_out < max_time_out; time_out++) { + udelay(10); + if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) { + *link_up = true; + if (phy_speed == + IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS) + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + } else { + status = ixgbe_read_phy_reg(hw, + IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + &phy_data); + phy_link = phy_data & + IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; + phy_speed = phy_data & + IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; + } + } + + return status; +} + +/** + * ixgbe_setup_tnx_phy_link_speed - Sets the auto advertised capabilities + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: true if autonegotiation enabled + **/ +s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed, + bool autoneg, + bool autoneg_wait_to_complete) +{ + /* + * Clear autoneg_advertised and set new values based on input link + * speed. + */ + hw->phy.autoneg_advertised = 0; + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + /* Setup link based on the new speed settings */ + ixgbe_setup_tnx_phy_link(hw); + + return 0; +} diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h new file mode 100644 index 000000000000..199e8f670f3a --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_phy.h @@ -0,0 +1,50 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_PHY_H_ +#define _IXGBE_PHY_H_ + +#include "ixgbe_type.h" + +s32 ixgbe_init_shared_code_phy(struct ixgbe_hw *hw); +s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw); +s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, u32 *speed, bool *link_up); +s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, u32 speed, bool autoneg, + bool autoneg_wait_to_complete); +s32 ixgbe_identify_phy(struct ixgbe_hw *hw); +s32 ixgbe_reset_phy(struct ixgbe_hw *hw); +s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data); + +/* PHY specific */ +s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw); +s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed, bool *link_up); +s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed, bool autoneg, + bool autoneg_wait_to_complete); + +#endif /* _IXGBE_PHY_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h new file mode 100644 index 000000000000..fdcde16a2a99 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -0,0 +1,1332 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_TYPE_H_ +#define _IXGBE_TYPE_H_ + +#include <linux/types.h> + +/* Vendor ID */ +#define IXGBE_INTEL_VENDOR_ID 0x8086 + +/* Device IDs */ +#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 +#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 +#define IXGBE_DEV_ID_82598AT_DUAL_PORT 0x10C8 +#define IXGBE_DEV_ID_82598EB_CX4 0x10DD + +/* General Registers */ +#define IXGBE_CTRL 0x00000 +#define IXGBE_STATUS 0x00008 +#define IXGBE_CTRL_EXT 0x00018 +#define IXGBE_ESDP 0x00020 +#define IXGBE_EODSDP 0x00028 +#define IXGBE_LEDCTL 0x00200 +#define IXGBE_FRTIMER 0x00048 +#define IXGBE_TCPTIMER 0x0004C + +/* NVM Registers */ +#define IXGBE_EEC 0x10010 +#define IXGBE_EERD 0x10014 +#define IXGBE_FLA 0x1001C +#define IXGBE_EEMNGCTL 0x10110 +#define IXGBE_EEMNGDATA 0x10114 +#define IXGBE_FLMNGCTL 0x10118 +#define IXGBE_FLMNGDATA 0x1011C +#define IXGBE_FLMNGCNT 0x10120 +#define IXGBE_FLOP 0x1013C +#define IXGBE_GRC 0x10200 + +/* Interrupt Registers */ +#define IXGBE_EICR 0x00800 +#define IXGBE_EICS 0x00808 +#define IXGBE_EIMS 0x00880 +#define IXGBE_EIMC 0x00888 +#define IXGBE_EIAC 0x00810 +#define IXGBE_EIAM 0x00890 +#define IXGBE_EITR(_i) (0x00820 + ((_i) * 4)) /* 0x820-0x86c */ +#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ +#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */ +#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */ +#define IXGBE_PBACL 0x11068 +#define IXGBE_GPIE 0x00898 + +/* Flow Control Registers */ +#define IXGBE_PFCTOP 0x03008 +#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */ +#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */ +#define IXGBE_FCRTV 0x032A0 +#define IXGBE_TFCS 0x0CE00 + +/* Receive DMA Registers */ +#define IXGBE_RDBAL(_i) (0x01000 + ((_i) * 0x40)) /* 64 of each (0-63)*/ +#define IXGBE_RDBAH(_i) (0x01004 + ((_i) * 0x40)) +#define IXGBE_RDLEN(_i) (0x01008 + ((_i) * 0x40)) +#define IXGBE_RDH(_i) (0x01010 + ((_i) * 0x40)) +#define IXGBE_RDT(_i) (0x01018 + ((_i) * 0x40)) +#define IXGBE_RXDCTL(_i) (0x01028 + ((_i) * 0x40)) +#define IXGBE_RSCCTL(_i) (0x0102C + ((_i) * 0x40)) +#define IXGBE_SRRCTL(_i) (0x02100 + ((_i) * 4)) + /* array of 16 (0x02100-0x0213C) */ +#define IXGBE_DCA_RXCTRL(_i) (0x02200 + ((_i) * 4)) + /* array of 16 (0x02200-0x0223C) */ +#define IXGBE_RDRXCTL 0x02F00 +#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) + /* 8 of these 0x03C00 - 0x03C1C */ +#define IXGBE_RXCTRL 0x03000 +#define IXGBE_DROPEN 0x03D04 +#define IXGBE_RXPBSIZE_SHIFT 10 + +/* Receive Registers */ +#define IXGBE_RXCSUM 0x05000 +#define IXGBE_RFCTL 0x05008 +#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) + /* Multicast Table Array - 128 entries */ +#define IXGBE_RAL(_i) (0x05400 + ((_i) * 8)) /* 16 of these (0-15) */ +#define IXGBE_RAH(_i) (0x05404 + ((_i) * 8)) /* 16 of these (0-15) */ +#define IXGBE_PSRTYPE 0x05480 + /* 0x5480-0x54BC Packet split receive type */ +#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) + /* array of 4096 1-bit vlan filters */ +#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4)) + /*array of 4096 4-bit vlan vmdq indicies */ +#define IXGBE_FCTRL 0x05080 +#define IXGBE_VLNCTRL 0x05088 +#define IXGBE_MCSTCTRL 0x05090 +#define IXGBE_MRQC 0x05818 +#define IXGBE_VMD_CTL 0x0581C +#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_IMIRVP 0x05AC0 +#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ +#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ + +/* Transmit DMA registers */ +#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40))/* 32 of these (0-31)*/ +#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) +#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) +#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40)) +#define IXGBE_TDT(_i) (0x06018 + ((_i) * 0x40)) +#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40)) +#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40)) +#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) +#define IXGBE_DTXCTL 0x07E00 +#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) + /* there are 16 of these (0-15) */ +#define IXGBE_TIPG 0x0CB00 +#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) *0x04)) + /* there are 8 of these */ +#define IXGBE_MNGTXMAP 0x0CD10 +#define IXGBE_TIPG_FIBER_DEFAULT 3 +#define IXGBE_TXPBSIZE_SHIFT 10 + +/* Wake up registers */ +#define IXGBE_WUC 0x05800 +#define IXGBE_WUFC 0x05808 +#define IXGBE_WUS 0x05810 +#define IXGBE_IPAV 0x05838 +#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */ +#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */ +#define IXGBE_WUPL 0x05900 +#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ +#define IXGBE_FHFT 0x09000 /* Flex host filter table 9000-93FC */ + +/* Music registers */ +#define IXGBE_RMCS 0x03D00 +#define IXGBE_DPMCS 0x07F40 +#define IXGBE_PDPMCS 0x0CD00 +#define IXGBE_RUPPBMR 0x050A0 +#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TDTQ2TCCR(_i) (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */ +#define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */ +#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ + +/* Stats registers */ +#define IXGBE_CRCERRS 0x04000 +#define IXGBE_ILLERRC 0x04004 +#define IXGBE_ERRBC 0x04008 +#define IXGBE_MSPDC 0x04010 +#define IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/ +#define IXGBE_MLFC 0x04034 +#define IXGBE_MRFC 0x04038 +#define IXGBE_RLEC 0x04040 +#define IXGBE_LXONTXC 0x03F60 +#define IXGBE_LXONRXC 0x0CF60 +#define IXGBE_LXOFFTXC 0x03F68 +#define IXGBE_LXOFFRXC 0x0CF68 +#define IXGBE_PXONTXC(_i) (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/ +#define IXGBE_PXONRXC(_i) (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/ +#define IXGBE_PXOFFTXC(_i) (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/ +#define IXGBE_PXOFFRXC(_i) (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/ +#define IXGBE_PRC64 0x0405C +#define IXGBE_PRC127 0x04060 +#define IXGBE_PRC255 0x04064 +#define IXGBE_PRC511 0x04068 +#define IXGBE_PRC1023 0x0406C +#define IXGBE_PRC1522 0x04070 +#define IXGBE_GPRC 0x04074 +#define IXGBE_BPRC 0x04078 +#define IXGBE_MPRC 0x0407C +#define IXGBE_GPTC 0x04080 +#define IXGBE_GORCL 0x04088 +#define IXGBE_GORCH 0x0408C +#define IXGBE_GOTCL 0x04090 +#define IXGBE_GOTCH 0x04094 +#define IXGBE_RNBC(_i) (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/ +#define IXGBE_RUC 0x040A4 +#define IXGBE_RFC 0x040A8 +#define IXGBE_ROC 0x040AC +#define IXGBE_RJC 0x040B0 +#define IXGBE_MNGPRC 0x040B4 +#define IXGBE_MNGPDC 0x040B8 +#define IXGBE_MNGPTC 0x0CF90 +#define IXGBE_TORL 0x040C0 +#define IXGBE_TORH 0x040C4 +#define IXGBE_TPR 0x040D0 +#define IXGBE_TPT 0x040D4 +#define IXGBE_PTC64 0x040D8 +#define IXGBE_PTC127 0x040DC +#define IXGBE_PTC255 0x040E0 +#define IXGBE_PTC511 0x040E4 +#define IXGBE_PTC1023 0x040E8 +#define IXGBE_PTC1522 0x040EC +#define IXGBE_MPTC 0x040F0 +#define IXGBE_BPTC 0x040F4 +#define IXGBE_XEC 0x04120 + +#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) /* 16 of these */ +#define IXGBE_TQSMR(_i) (0x07300 + ((_i) * 4)) /* 8 of these */ + +#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */ + +/* Management */ +#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MANC 0x05820 +#define IXGBE_MFVAL 0x05824 +#define IXGBE_MANC2H 0x05860 +#define IXGBE_MDEF(_i) (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MIPAF 0x058B0 +#define IXGBE_MMAL(_i) (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */ +#define IXGBE_MMAH(_i) (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */ +#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */ + +/* ARC Subsystem registers */ +#define IXGBE_HICR 0x15F00 +#define IXGBE_FWSTS 0x15F0C +#define IXGBE_HSMC0R 0x15F04 +#define IXGBE_HSMC1R 0x15F08 +#define IXGBE_SWSR 0x15F10 +#define IXGBE_HFDR 0x15FE8 +#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */ + +/* PCI-E registers */ +#define IXGBE_GCR 0x11000 +#define IXGBE_GTV 0x11004 +#define IXGBE_FUNCTAG 0x11008 +#define IXGBE_GLT 0x1100C +#define IXGBE_GSCL_1 0x11010 +#define IXGBE_GSCL_2 0x11014 +#define IXGBE_GSCL_3 0x11018 +#define IXGBE_GSCL_4 0x1101C +#define IXGBE_GSCN_0 0x11020 +#define IXGBE_GSCN_1 0x11024 +#define IXGBE_GSCN_2 0x11028 +#define IXGBE_GSCN_3 0x1102C +#define IXGBE_FACTPS 0x10150 +#define IXGBE_PCIEANACTL 0x11040 +#define IXGBE_SWSM 0x10140 +#define IXGBE_FWSM 0x10148 +#define IXGBE_GSSR 0x10160 +#define IXGBE_MREVID 0x11064 +#define IXGBE_DCA_ID 0x11070 +#define IXGBE_DCA_CTRL 0x11074 + +/* Diagnostic Registers */ +#define IXGBE_RDSTATCTL 0x02C20 +#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ +#define IXGBE_RDHMPN 0x02F08 +#define IXGBE_RIC_DW0 0x02F10 +#define IXGBE_RIC_DW1 0x02F14 +#define IXGBE_RIC_DW2 0x02F18 +#define IXGBE_RIC_DW3 0x02F1C +#define IXGBE_RDPROBE 0x02F20 +#define IXGBE_TDSTATCTL 0x07C20 +#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */ +#define IXGBE_TDHMPN 0x07F08 +#define IXGBE_TIC_DW0 0x07F10 +#define IXGBE_TIC_DW1 0x07F14 +#define IXGBE_TIC_DW2 0x07F18 +#define IXGBE_TIC_DW3 0x07F1C +#define IXGBE_TDPROBE 0x07F20 +#define IXGBE_TXBUFCTRL 0x0C600 +#define IXGBE_TXBUFDATA0 0x0C610 +#define IXGBE_TXBUFDATA1 0x0C614 +#define IXGBE_TXBUFDATA2 0x0C618 +#define IXGBE_TXBUFDATA3 0x0C61C +#define IXGBE_RXBUFCTRL 0x03600 +#define IXGBE_RXBUFDATA0 0x03610 +#define IXGBE_RXBUFDATA1 0x03614 +#define IXGBE_RXBUFDATA2 0x03618 +#define IXGBE_RXBUFDATA3 0x0361C +#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_RFVAL 0x050A4 +#define IXGBE_MDFTC1 0x042B8 +#define IXGBE_MDFTC2 0x042C0 +#define IXGBE_MDFTFIFO1 0x042C4 +#define IXGBE_MDFTFIFO2 0x042C8 +#define IXGBE_MDFTS 0x042CC +#define IXGBE_RXDATAWRPTR(_i) (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/ +#define IXGBE_RXDESCWRPTR(_i) (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/ +#define IXGBE_RXDATARDPTR(_i) (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/ +#define IXGBE_RXDESCRDPTR(_i) (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/ +#define IXGBE_TXDATAWRPTR(_i) (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/ +#define IXGBE_TXDESCWRPTR(_i) (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/ +#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/ +#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/ +#define IXGBE_PCIEECCCTL 0x1106C +#define IXGBE_PBTXECC 0x0C300 +#define IXGBE_PBRXECC 0x03300 +#define IXGBE_GHECCR 0x110B0 + +/* MAC Registers */ +#define IXGBE_PCS1GCFIG 0x04200 +#define IXGBE_PCS1GLCTL 0x04208 +#define IXGBE_PCS1GLSTA 0x0420C +#define IXGBE_PCS1GDBG0 0x04210 +#define IXGBE_PCS1GDBG1 0x04214 +#define IXGBE_PCS1GANA 0x04218 +#define IXGBE_PCS1GANLP 0x0421C +#define IXGBE_PCS1GANNP 0x04220 +#define IXGBE_PCS1GANLPNP 0x04224 +#define IXGBE_HLREG0 0x04240 +#define IXGBE_HLREG1 0x04244 +#define IXGBE_PAP 0x04248 +#define IXGBE_MACA 0x0424C +#define IXGBE_APAE 0x04250 +#define IXGBE_ARD 0x04254 +#define IXGBE_AIS 0x04258 +#define IXGBE_MSCA 0x0425C +#define IXGBE_MSRWD 0x04260 +#define IXGBE_MLADD 0x04264 +#define IXGBE_MHADD 0x04268 +#define IXGBE_TREG 0x0426C +#define IXGBE_PCSS1 0x04288 +#define IXGBE_PCSS2 0x0428C +#define IXGBE_XPCSS 0x04290 +#define IXGBE_SERDESC 0x04298 +#define IXGBE_MACS 0x0429C +#define IXGBE_AUTOC 0x042A0 +#define IXGBE_LINKS 0x042A4 +#define IXGBE_AUTOC2 0x042A8 +#define IXGBE_AUTOC3 0x042AC +#define IXGBE_ANLP1 0x042B0 +#define IXGBE_ANLP2 0x042B4 +#define IXGBE_ATLASCTL 0x04800 + +/* RSCCTL Bit Masks */ +#define IXGBE_RSCCTL_RSCEN 0x01 +#define IXGBE_RSCCTL_MAXDESC_1 0x00 +#define IXGBE_RSCCTL_MAXDESC_4 0x04 +#define IXGBE_RSCCTL_MAXDESC_8 0x08 +#define IXGBE_RSCCTL_MAXDESC_16 0x0C + +/* CTRL Bit Masks */ +#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */ +#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */ +#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ + +/* FACTPS */ +#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */ + +/* MHADD Bit Masks */ +#define IXGBE_MHADD_MFS_MASK 0xFFFF0000 +#define IXGBE_MHADD_MFS_SHIFT 16 + +/* Extended Device Control */ +#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */ +#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ + +/* Direct Cache Access (DCA) definitions */ +#define IXGBE_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ +#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ + +#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ +#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ +#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ +#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ + +#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* TX Desc writeback RO bit */ +#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ + +/* MSCA Bit Masks */ +#define IXGBE_MSCA_NP_ADDR_MASK 0x0000FFFF /* MDI Address (new protocol) */ +#define IXGBE_MSCA_NP_ADDR_SHIFT 0 +#define IXGBE_MSCA_DEV_TYPE_MASK 0x001F0000 /* Device Type (new protocol) */ +#define IXGBE_MSCA_DEV_TYPE_SHIFT 16 /* Register Address (old protocol */ +#define IXGBE_MSCA_PHY_ADDR_MASK 0x03E00000 /* PHY Address mask */ +#define IXGBE_MSCA_PHY_ADDR_SHIFT 21 /* PHY Address shift*/ +#define IXGBE_MSCA_OP_CODE_MASK 0x0C000000 /* OP CODE mask */ +#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */ +#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */ +#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (write) */ +#define IXGBE_MSCA_READ 0x08000000 /* OP CODE 10 (read) */ +#define IXGBE_MSCA_READ_AUTOINC 0x0C000000 /* OP CODE 11 (read, auto inc)*/ +#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */ +#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */ +#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new protocol) */ +#define IXGBE_MSCA_OLD_PROTOCOL 0x10000000 /* ST CODE 01 (old protocol) */ +#define IXGBE_MSCA_MDI_COMMAND 0x40000000 /* Initiate MDI command */ +#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress enable */ + +/* MSRWD bit masks */ +#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF +#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0 +#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000 +#define IXGBE_MSRWD_READ_DATA_SHIFT 16 + +/* Atlas registers */ +#define IXGBE_ATLAS_PDN_LPBK 0x24 +#define IXGBE_ATLAS_PDN_10G 0xB +#define IXGBE_ATLAS_PDN_1G 0xC +#define IXGBE_ATLAS_PDN_AN 0xD + +/* Atlas bit masks */ +#define IXGBE_ATLASCTL_WRITE_CMD 0x00010000 +#define IXGBE_ATLAS_PDN_TX_REG_EN 0x10 +#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL 0xF0 +#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0 +#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0 + +/* Device Type definitions for new protocol MDIO commands */ +#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 +#define IXGBE_MDIO_PCS_DEV_TYPE 0x3 +#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 +#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ + +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0 - 10G, 1 - 1G */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010 + +#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ +#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */ +#define IXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */ +#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */ +#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/ +#define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/ +#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Abilty Reg */ +#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */ +#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */ + +#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 +#define IXGBE_MAX_PHY_ADDR 32 + +/* PHY IDs*/ +#define TN1010_PHY_ID 0x00A19410 +#define QT2022_PHY_ID 0x0043A400 + +/* General purpose Interrupt Enable */ +#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ +#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ +#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ +#define IXGBE_GPIE_EIAME 0x40000000 +#define IXGBE_GPIE_PBA_SUPPORT 0x80000000 + +/* Transmit Flow Control status */ +#define IXGBE_TFCS_TXOFF 0x00000001 +#define IXGBE_TFCS_TXOFF0 0x00000100 +#define IXGBE_TFCS_TXOFF1 0x00000200 +#define IXGBE_TFCS_TXOFF2 0x00000400 +#define IXGBE_TFCS_TXOFF3 0x00000800 +#define IXGBE_TFCS_TXOFF4 0x00001000 +#define IXGBE_TFCS_TXOFF5 0x00002000 +#define IXGBE_TFCS_TXOFF6 0x00004000 +#define IXGBE_TFCS_TXOFF7 0x00008000 + +/* TCP Timer */ +#define IXGBE_TCPTIMER_KS 0x00000100 +#define IXGBE_TCPTIMER_COUNT_ENABLE 0x00000200 +#define IXGBE_TCPTIMER_COUNT_FINISH 0x00000400 +#define IXGBE_TCPTIMER_LOOP 0x00000800 +#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF + +/* HLREG0 Bit Masks */ +#define IXGBE_HLREG0_TXCRCEN 0x00000001 /* bit 0 */ +#define IXGBE_HLREG0_RXCRCSTRP 0x00000002 /* bit 1 */ +#define IXGBE_HLREG0_JUMBOEN 0x00000004 /* bit 2 */ +#define IXGBE_HLREG0_TXPADEN 0x00000400 /* bit 10 */ +#define IXGBE_HLREG0_TXPAUSEEN 0x00001000 /* bit 12 */ +#define IXGBE_HLREG0_RXPAUSEEN 0x00004000 /* bit 14 */ +#define IXGBE_HLREG0_LPBK 0x00008000 /* bit 15 */ +#define IXGBE_HLREG0_MDCSPD 0x00010000 /* bit 16 */ +#define IXGBE_HLREG0_CONTMDC 0x00020000 /* bit 17 */ +#define IXGBE_HLREG0_CTRLFLTR 0x00040000 /* bit 18 */ +#define IXGBE_HLREG0_PREPEND 0x00F00000 /* bits 20-23 */ +#define IXGBE_HLREG0_PRIPAUSEEN 0x01000000 /* bit 24 */ +#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000 /* bits 25-26 */ +#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000 /* bit 27 */ +#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000 /* bit 28 */ + +/* VMD_CTL bitmasks */ +#define IXGBE_VMD_CTL_VMDQ_EN 0x00000001 +#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002 + +/* RDHMPN and TDHMPN bitmasks */ +#define IXGBE_RDHMPN_RDICADDR 0x007FF800 +#define IXGBE_RDHMPN_RDICRDREQ 0x00800000 +#define IXGBE_RDHMPN_RDICADDR_SHIFT 11 +#define IXGBE_TDHMPN_TDICADDR 0x003FF800 +#define IXGBE_TDHMPN_TDICRDREQ 0x00800000 +#define IXGBE_TDHMPN_TDICADDR_SHIFT 11 + +/* Receive Checksum Control */ +#define IXGBE_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* FCRTL Bit Masks */ +#define IXGBE_FCRTL_XONE 0x80000000 /* bit 31, XON enable */ +#define IXGBE_FCRTH_FCEN 0x80000000 /* Rx Flow control enable */ + +/* PAP bit masks*/ +#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */ + +/* RMCS Bit Masks */ +#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recylce Mode enable */ +/* Receive Arbitration Control: 0 Round Robin, 1 DFP */ +#define IXGBE_RMCS_RAC 0x00000004 +#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */ +#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority flow control ena */ +#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority flow control ena */ +#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */ + +/* Interrupt register bitmasks */ + +/* Extended Interrupt Cause Read */ +#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ +#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ +#define IXGBE_EICR_MNG 0x00400000 /* Managability Event Interrupt */ +#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ +#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ +#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ +#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ + +/* Extended Interrupt Cause Set */ +#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ +#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */ +#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ +#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +/* Extended Interrupt Mask Set */ +#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */ +#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ +#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +/* Extended Interrupt Mask Clear */ +#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */ +#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Error */ +#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +#define IXGBE_EIMS_ENABLE_MASK (\ + IXGBE_EIMS_RTX_QUEUE | \ + IXGBE_EIMS_LSC | \ + IXGBE_EIMS_TCP_TIMER | \ + IXGBE_EIMS_OTHER) + +/* Immediate Interrupt RX (A.K.A. Low Latency Interrupt) */ +#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ +#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ +#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +#define IXGBE_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ +#define IXGBE_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ +#define IXGBE_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ +#define IXGBE_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ +#define IXGBE_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ +#define IXGBE_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ +#define IXGBE_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of control bits */ + +/* Interrupt clear mask */ +#define IXGBE_IRQ_CLEAR_MASK 0xFFFFFFFF + +/* Interrupt Vector Allocation Registers */ +#define IXGBE_IVAR_REG_NUM 25 +#define IXGBE_IVAR_TXRX_ENTRY 96 +#define IXGBE_IVAR_RX_ENTRY 64 +#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i)) +#define IXGBE_IVAR_TX_QUEUE(_i) (64 + (_i)) +#define IXGBE_IVAR_TX_ENTRY 32 + +#define IXGBE_IVAR_TCP_TIMER_INDEX 96 /* 0 based index */ +#define IXGBE_IVAR_OTHER_CAUSES_INDEX 97 /* 0 based index */ + +#define IXGBE_MSIX_VECTOR(_i) (0 + (_i)) + +#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ + +/* VLAN Control Bit Masks */ +#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ +#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ +#define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */ +#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */ +#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */ + +#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ + +/* STATUS Bit Masks */ +#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */ +#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Enable Status */ + +#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */ +#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */ + +/* ESDP Bit Masks */ +#define IXGBE_ESDP_SDP4 0x00000001 /* SDP4 Data Value */ +#define IXGBE_ESDP_SDP5 0x00000002 /* SDP5 Data Value */ +#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */ +#define IXGBE_ESDP_SDP5_DIR 0x00000008 /* SDP5 IO direction */ + +/* LEDCTL Bit Masks */ +#define IXGBE_LED_IVRT_BASE 0x00000040 +#define IXGBE_LED_BLINK_BASE 0x00000080 +#define IXGBE_LED_MODE_MASK_BASE 0x0000000F +#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i))) +#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i)) +#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i) +#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i) +#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i) + +/* LED modes */ +#define IXGBE_LED_LINK_UP 0x0 +#define IXGBE_LED_LINK_10G 0x1 +#define IXGBE_LED_MAC 0x2 +#define IXGBE_LED_FILTER 0x3 +#define IXGBE_LED_LINK_ACTIVE 0x4 +#define IXGBE_LED_LINK_1G 0x5 +#define IXGBE_LED_ON 0xE +#define IXGBE_LED_OFF 0xF + +/* AUTOC Bit Masks */ +#define IXGBE_AUTOC_KX4_SUPP 0x80000000 +#define IXGBE_AUTOC_KX_SUPP 0x40000000 +#define IXGBE_AUTOC_PAUSE 0x30000000 +#define IXGBE_AUTOC_RF 0x08000000 +#define IXGBE_AUTOC_PD_TMR 0x06000000 +#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000 +#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000 +#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000 +#define IXGBE_AUTOC_AN_RESTART 0x00001000 +#define IXGBE_AUTOC_FLU 0x00000001 +#define IXGBE_AUTOC_LMS_SHIFT 13 +#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) + +#define IXGBE_AUTOC_1G_PMA_PMD 0x00000200 +#define IXGBE_AUTOC_10G_PMA_PMD 0x00000180 +#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 +#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 +#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) + +/* LINKS Bit Masks */ +#define IXGBE_LINKS_KX_AN_COMP 0x80000000 +#define IXGBE_LINKS_UP 0x40000000 +#define IXGBE_LINKS_SPEED 0x20000000 +#define IXGBE_LINKS_MODE 0x18000000 +#define IXGBE_LINKS_RX_MODE 0x06000000 +#define IXGBE_LINKS_TX_MODE 0x01800000 +#define IXGBE_LINKS_XGXS_EN 0x00400000 +#define IXGBE_LINKS_PCS_1G_EN 0x00200000 +#define IXGBE_LINKS_1G_AN_EN 0x00100000 +#define IXGBE_LINKS_KX_AN_IDLE 0x00080000 +#define IXGBE_LINKS_1G_SYNC 0x00040000 +#define IXGBE_LINKS_10G_ALIGN 0x00020000 +#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000 +#define IXGBE_LINKS_TL_FAULT 0x00001000 +#define IXGBE_LINKS_SIGNAL 0x00000F00 + +#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ + +/* SW Semaphore Register bitmasks */ +#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ + +/* GSSR definitions */ +#define IXGBE_GSSR_EEP_SM 0x0001 +#define IXGBE_GSSR_PHY0_SM 0x0002 +#define IXGBE_GSSR_PHY1_SM 0x0004 +#define IXGBE_GSSR_MAC_CSR_SM 0x0008 +#define IXGBE_GSSR_FLASH_SM 0x0010 + +/* EEC Register */ +#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */ +#define IXGBE_EEC_CS 0x00000002 /* EEPROM Chip Select */ +#define IXGBE_EEC_DI 0x00000004 /* EEPROM Data In */ +#define IXGBE_EEC_DO 0x00000008 /* EEPROM Data Out */ +#define IXGBE_EEC_FWE_MASK 0x00000030 /* FLASH Write Enable */ +#define IXGBE_EEC_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define IXGBE_EEC_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define IXGBE_EEC_FWE_SHIFT 4 +#define IXGBE_EEC_REQ 0x00000040 /* EEPROM Access Request */ +#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */ +#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */ +#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */ +/* EEPROM Addressing bits based on type (0-small, 1-large) */ +#define IXGBE_EEC_ADDR_SIZE 0x00000400 +#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */ + +#define IXGBE_EEC_SIZE_SHIFT 11 +#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6 +#define IXGBE_EEPROM_OPCODE_BITS 8 + +/* Checksum and EEPROM pointers */ +#define IXGBE_EEPROM_CHECKSUM 0x3F +#define IXGBE_EEPROM_SUM 0xBABA +#define IXGBE_PCIE_ANALOG_PTR 0x03 +#define IXGBE_ATLAS0_CONFIG_PTR 0x04 +#define IXGBE_ATLAS1_CONFIG_PTR 0x05 +#define IXGBE_PCIE_GENERAL_PTR 0x06 +#define IXGBE_PCIE_CONFIG0_PTR 0x07 +#define IXGBE_PCIE_CONFIG1_PTR 0x08 +#define IXGBE_CORE0_PTR 0x09 +#define IXGBE_CORE1_PTR 0x0A +#define IXGBE_MAC0_PTR 0x0B +#define IXGBE_MAC1_PTR 0x0C +#define IXGBE_CSR0_CONFIG_PTR 0x0D +#define IXGBE_CSR1_CONFIG_PTR 0x0E +#define IXGBE_FW_PTR 0x0F +#define IXGBE_PBANUM0_PTR 0x15 +#define IXGBE_PBANUM1_PTR 0x16 + +/* EEPROM Commands - SPI */ +#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ +#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01 +#define IXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ +#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ +/* EEPROM reset Write Enbale latch */ +#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 +#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ +#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ +#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Read Register */ +#define IXGBE_EEPROM_READ_REG_DATA 16 /* data offset in EEPROM read reg */ +#define IXGBE_EEPROM_READ_REG_DONE 2 /* Offset to READ done bit */ +#define IXGBE_EEPROM_READ_REG_START 1 /* First bit to start operation */ +#define IXGBE_EEPROM_READ_ADDR_SHIFT 2 /* Shift to the address bits */ + +#define IXGBE_ETH_LENGTH_OF_ADDRESS 6 + +#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS +#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ +#endif + +#ifndef IXGBE_EERD_ATTEMPTS +/* Number of 5 microseconds we wait for EERD read to complete */ +#define IXGBE_EERD_ATTEMPTS 100000 +#endif + +/* PCI Bus Info */ +#define IXGBE_PCI_LINK_STATUS 0xB2 +#define IXGBE_PCI_LINK_WIDTH 0x3F0 +#define IXGBE_PCI_LINK_WIDTH_1 0x10 +#define IXGBE_PCI_LINK_WIDTH_2 0x20 +#define IXGBE_PCI_LINK_WIDTH_4 0x40 +#define IXGBE_PCI_LINK_WIDTH_8 0x80 +#define IXGBE_PCI_LINK_SPEED 0xF +#define IXGBE_PCI_LINK_SPEED_2500 0x1 +#define IXGBE_PCI_LINK_SPEED_5000 0x2 + +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 + +/* PHY Types */ +#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 + +/* Check whether address is multicast. This is little-endian specific check.*/ +#define IXGBE_IS_MULTICAST(Address) \ + (bool)(((u8 *)(Address))[0] & ((u8)0x01)) + +/* Check whether an address is broadcast. */ +#define IXGBE_IS_BROADCAST(Address) \ + ((((u8 *)(Address))[0] == ((u8)0xff)) && \ + (((u8 *)(Address))[1] == ((u8)0xff))) + +/* RAH */ +#define IXGBE_RAH_VIND_MASK 0x003C0000 +#define IXGBE_RAH_VIND_SHIFT 18 +#define IXGBE_RAH_AV 0x80000000 + +/* Filters */ +#define IXGBE_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ +#define IXGBE_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Header split receive */ +#define IXGBE_RFCTL_ISCSI_DIS 0x00000001 +#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1 +#define IXGBE_RFCTL_NFSW_DIS 0x00000040 +#define IXGBE_RFCTL_NFSR_DIS 0x00000080 +#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300 +#define IXGBE_RFCTL_NFS_VER_SHIFT 8 +#define IXGBE_RFCTL_NFS_VER_2 0 +#define IXGBE_RFCTL_NFS_VER_3 1 +#define IXGBE_RFCTL_NFS_VER_4 2 +#define IXGBE_RFCTL_IPV6_DIS 0x00000400 +#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000 +#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000 +#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Transmit Config masks */ +#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ +#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */ +/* Enable short packet padding to 64 bytes */ +#define IXGBE_TX_PAD_ENABLE 0x00000400 +#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */ +/* This allows for 16K packets + 4k for vlan */ +#define IXGBE_MAX_FRAME_SZ 0x40040000 + +#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */ +#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq. # write-back enable */ + +/* Receive Config masks */ +#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ +#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ +#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ + +#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ +#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ +#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */ +#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ +#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */ +#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */ +/* Receive Priority Flow Control Enbale */ +#define IXGBE_FCTRL_RPFCE 0x00004000 +#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */ + +/* Multiple Receive Queue Control */ +#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */ +#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define IXGBE_MRQC_RSS_FIELD_IPV6 0x00100000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 +#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000 + +#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ +#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ + +/* Receive Descriptor bit definitions */ +#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ +#define IXGBE_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */ +#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ +#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ +#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ +#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ +#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ +#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ +#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ +#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ +#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ +#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ +#define IXGBE_RXDADV_HBO 0x00800000 +#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ +#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ +#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ +#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ +#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ +#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ +#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ +#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define IXGBE_RXD_PRI_SHIFT 13 +#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define IXGBE_RXD_CFI_SHIFT 12 + +/* SRRCTL bit definitions */ +#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ +#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 +#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 + +#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F +#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 +#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 +#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 +#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 +#define IXGBE_RXDADV_SPH 0x8000 + +/* RSS Hash results */ +#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000 +#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 +#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 + +/* RSS Packet Types as indicated in the receive descriptor. */ +#define IXGBE_RXDADV_PKTTYPE_NONE 0x00000000 +#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPv4 hdr + extensions */ +#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPv6 hdr + extensions */ +#define IXGBE_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ + +/* Masks to determine if packets should be dropped due to frame errors */ +#define IXGBE_RXD_ERR_FRAME_ERR_MASK (\ + IXGBE_RXD_ERR_CE | \ + IXGBE_RXD_ERR_LE | \ + IXGBE_RXD_ERR_PE | \ + IXGBE_RXD_ERR_OSE | \ + IXGBE_RXD_ERR_USE) + +#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK (\ + IXGBE_RXDADV_ERR_CE | \ + IXGBE_RXDADV_ERR_LE | \ + IXGBE_RXDADV_ERR_PE | \ + IXGBE_RXDADV_ERR_OSE | \ + IXGBE_RXDADV_ERR_USE) + +/* Multicast bit mask */ +#define IXGBE_MCSTCTRL_MFE 0x4 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 + +/* Vlan-specific macros */ +#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */ +#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */ +#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ +#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT + +/* Transmit Descriptor - Legacy */ +struct ixgbe_legacy_tx_desc { + u64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + u32 data; + struct { + u16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + u32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + u16 vlan; + } fields; + } upper; +}; + +/* Transmit Descriptor - Advanced */ +union ixgbe_adv_tx_desc { + struct { + u64 buffer_addr; /* Address of descriptor's data buf */ + u32 cmd_type_len; + u32 olinfo_status; + } read; + struct { + u64 rsvd; /* Reserved */ + u32 nxtseq_seed; + u32 status; + } wb; +}; + +/* Receive Descriptor - Legacy */ +struct ixgbe_legacy_rx_desc { + u64 buffer_addr; /* Address of the descriptor's data buffer */ + u16 length; /* Length of data DMAed into data buffer */ + u16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + u16 vlan; +}; + +/* Receive Descriptor - Advanced */ +union ixgbe_adv_rx_desc { + struct { + u64 pkt_addr; /* Packet buffer address */ + u64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + struct { + u16 pkt_info; /* RSS type, Packet type */ + u16 hdr_info; /* Split Header, header len */ + } lo_dword; + union { + u32 rss; /* RSS Hash */ + struct { + u16 ip_id; /* IP id */ + u16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + u32 status_error; /* ext status/error */ + u16 length; /* Packet length */ + u16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Context descriptors */ +struct ixgbe_adv_tx_context_desc { + u32 vlan_macip_lens; + u32 seqnum_seed; + u32 type_tucmd_mlhl; + u32 mss_l4len_idx; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buffer length(bytes) */ +#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ +#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ +#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ +#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ +#define IXGBE_ADVTXD_DCMD_RDMA 0x04000000 /* RDMA */ +#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ +#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ +#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ +#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ +#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ +#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED present in WB */ +#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */ +#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ +#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_EOM 0x00000400 /* Enable L bit-RDMA DDP hdr */ +#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU*/ +#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ +#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ +#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /* Req requires Markers and CRC */ +#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +/* Link speed */ +#define IXGBE_LINK_SPEED_UNKNOWN 0 +#define IXGBE_LINK_SPEED_100_FULL 0x0008 +#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 +#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 + + +enum ixgbe_eeprom_type { + ixgbe_eeprom_uninitialized = 0, + ixgbe_eeprom_spi, + ixgbe_eeprom_none /* No NVM support */ +}; + +enum ixgbe_mac_type { + ixgbe_mac_unknown = 0, + ixgbe_mac_82598EB, + ixgbe_num_macs +}; + +enum ixgbe_phy_type { + ixgbe_phy_unknown = 0, + ixgbe_phy_tn, + ixgbe_phy_qt, + ixgbe_phy_xaui +}; + +enum ixgbe_media_type { + ixgbe_media_type_unknown = 0, + ixgbe_media_type_fiber, + ixgbe_media_type_copper, + ixgbe_media_type_backplane +}; + +/* Flow Control Settings */ +enum ixgbe_fc_type { + ixgbe_fc_none = 0, + ixgbe_fc_rx_pause, + ixgbe_fc_tx_pause, + ixgbe_fc_full, + ixgbe_fc_default +}; + +struct ixgbe_addr_filter_info { + u32 num_mc_addrs; + u32 rar_used_count; + u32 mc_addr_in_rar_count; + u32 mta_in_use; +}; + +/* Flow control parameters */ +struct ixgbe_fc_info { + u32 high_water; /* Flow Control High-water */ + u32 low_water; /* Flow Control Low-water */ + u16 pause_time; /* Flow Control Pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum ixgbe_fc_type type; /* Type of flow control */ + enum ixgbe_fc_type original_type; +}; + +/* Statistics counters collected by the MAC */ +struct ixgbe_hw_stats { + u64 crcerrs; + u64 illerrc; + u64 errbc; + u64 mspdc; + u64 mpctotal; + u64 mpc[8]; + u64 mlfc; + u64 mrfc; + u64 rlec; + u64 lxontxc; + u64 lxonrxc; + u64 lxofftxc; + u64 lxoffrxc; + u64 pxontxc[8]; + u64 pxonrxc[8]; + u64 pxofftxc[8]; + u64 pxoffrxc[8]; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc[8]; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mngprc; + u64 mngpdc; + u64 mngptc; + u64 tor; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 xec; + u64 rqsmr[16]; + u64 tqsmr[8]; + u64 qprc[16]; + u64 qptc[16]; + u64 qbrc[16]; + u64 qbtc[16]; +}; + +/* forward declaration */ +struct ixgbe_hw; + +struct ixgbe_mac_operations { + s32 (*reset)(struct ixgbe_hw *); + enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); +}; + +struct ixgbe_phy_operations { + s32 (*setup)(struct ixgbe_hw *); + s32 (*check)(struct ixgbe_hw *, u32 *, bool *); + s32 (*setup_speed)(struct ixgbe_hw *, u32, bool, bool); + s32 (*get_settings)(struct ixgbe_hw *, u32 *, bool *); +}; + +struct ixgbe_mac_info { + struct ixgbe_mac_operations ops; + enum ixgbe_mac_type type; + u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + s32 mc_filter_type; + u32 num_rx_queues; + u32 num_tx_queues; + u32 num_rx_addrs; + u32 link_attach_type; + u32 link_mode_select; + bool link_settings_loaded; +}; + + +struct ixgbe_eeprom_info { + enum ixgbe_eeprom_type type; + u16 word_size; + u16 address_bits; +}; + +struct ixgbe_phy_info { + struct ixgbe_phy_operations ops; + + enum ixgbe_phy_type type; + u32 addr; + u32 id; + u32 revision; + enum ixgbe_media_type media_type; + u32 autoneg_advertised; + bool autoneg_wait_to_complete; +}; + +struct ixgbe_info { + enum ixgbe_mac_type mac; + s32 (*get_invariants)(struct ixgbe_hw *); + struct ixgbe_mac_operations *mac_ops; + struct ixgbe_phy_operations *phy_ops; +}; + +struct ixgbe_hw { + u8 __iomem *hw_addr; + void *back; + struct ixgbe_mac_info mac; + struct ixgbe_addr_filter_info addr_ctrl; + struct ixgbe_fc_info fc; + struct ixgbe_phy_info phy; + struct ixgbe_eeprom_info eeprom; + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + bool adapter_stopped; +}; + +/* Error Codes */ +#define IXGBE_ERR_EEPROM -1 +#define IXGBE_ERR_EEPROM_CHECKSUM -2 +#define IXGBE_ERR_PHY -3 +#define IXGBE_ERR_CONFIG -4 +#define IXGBE_ERR_PARAM -5 +#define IXGBE_ERR_MAC_TYPE -6 +#define IXGBE_ERR_UNKNOWN_PHY -7 +#define IXGBE_ERR_LINK_SETUP -8 +#define IXGBE_ERR_ADAPTER_STOPPED -9 +#define IXGBE_ERR_INVALID_MAC_ADDR -10 +#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11 +#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12 +#define IXGBE_ERR_INVALID_LINK_SETTINGS -13 +#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14 +#define IXGBE_ERR_RESET_FAILED -15 +#define IXGBE_ERR_SWFW_SYNC -16 +#define IXGBE_ERR_PHY_ADDR_INVALID -17 +#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF + +#endif /* _IXGBE_TYPE_H_ */ diff --git a/drivers/net/ixp2000/enp2611.c b/drivers/net/ixp2000/enp2611.c index d3f4235c585d..b02a981c87a8 100644 --- a/drivers/net/ixp2000/enp2611.c +++ b/drivers/net/ixp2000/enp2611.c @@ -210,7 +210,6 @@ static int __init enp2611_init_module(void) return -ENOMEM; } - SET_MODULE_OWNER(nds[i]); nds[i]->get_stats = enp2611_get_stats; pm3386_init_port(i); pm3386_get_mac(i, nds[i]->dev_addr); diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c index d9ce1aef148a..6c0dd49149d0 100644 --- a/drivers/net/ixp2000/ixpdev.c +++ b/drivers/net/ixp2000/ixpdev.c @@ -74,9 +74,9 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev) } -static int ixpdev_rx(struct net_device *dev, int *budget) +static int ixpdev_rx(struct net_device *dev, int processed, int budget) { - while (*budget > 0) { + while (processed < budget) { struct ixpdev_rx_desc *desc; struct sk_buff *skb; void *buf; @@ -122,29 +122,34 @@ static int ixpdev_rx(struct net_device *dev, int *budget) err: ixp2000_reg_write(RING_RX_PENDING, _desc); - dev->quota--; - (*budget)--; + processed++; } - return 1; + return processed; } /* dev always points to nds[0]. */ -static int ixpdev_poll(struct net_device *dev, int *budget) +static int ixpdev_poll(struct napi_struct *napi, int budget) { + struct ixpdev_priv *ip = container_of(napi, struct ixpdev_priv, napi); + struct net_device *dev = ip->dev; + int rx; + /* @@@ Have to stop polling when nds[0] is administratively * downed while we are polling. */ + rx = 0; do { ixp2000_reg_write(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0x00ff); - if (ixpdev_rx(dev, budget)) - return 1; + rx = ixpdev_rx(dev, rx, budget); + if (rx >= budget) + break; } while (ixp2000_reg_read(IXP2000_IRQ_THD_RAW_STATUS_A_0) & 0x00ff); - netif_rx_complete(dev); + netif_rx_complete(dev, napi); ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0x00ff); - return 0; + return rx; } static void ixpdev_tx_complete(void) @@ -199,9 +204,12 @@ static irqreturn_t ixpdev_interrupt(int irq, void *dev_id) * Any of the eight receive units signaled RX? */ if (status & 0x00ff) { + struct net_device *dev = nds[0]; + struct ixpdev_priv *ip = netdev_priv(dev); + ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff); - if (likely(__netif_rx_schedule_prep(nds[0]))) { - __netif_rx_schedule(nds[0]); + if (likely(napi_schedule_prep(&ip->napi))) { + __netif_rx_schedule(dev, &ip->napi); } else { printk(KERN_CRIT "ixp2000: irq while polling!!\n"); } @@ -232,11 +240,13 @@ static int ixpdev_open(struct net_device *dev) struct ixpdev_priv *ip = netdev_priv(dev); int err; + napi_enable(&ip->napi); if (!nds_open++) { err = request_irq(IRQ_IXP2000_THDA0, ixpdev_interrupt, IRQF_SHARED, "ixp2000_eth", nds); if (err) { nds_open--; + napi_disable(&ip->napi); return err; } @@ -254,6 +264,7 @@ static int ixpdev_close(struct net_device *dev) struct ixpdev_priv *ip = netdev_priv(dev); netif_stop_queue(dev); + napi_disable(&ip->napi); set_port_admin_status(ip->channel, 0); if (!--nds_open) { @@ -274,7 +285,6 @@ struct net_device *ixpdev_alloc(int channel, int sizeof_priv) return NULL; dev->hard_start_xmit = ixpdev_xmit; - dev->poll = ixpdev_poll; dev->open = ixpdev_open; dev->stop = ixpdev_close; #ifdef CONFIG_NET_POLL_CONTROLLER @@ -282,9 +292,10 @@ struct net_device *ixpdev_alloc(int channel, int sizeof_priv) #endif dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; - dev->weight = 64; ip = netdev_priv(dev); + ip->dev = dev; + netif_napi_add(dev, &ip->napi, ixpdev_poll, 64); ip->channel = channel; ip->tx_queue_entries = 0; diff --git a/drivers/net/ixp2000/ixpdev.h b/drivers/net/ixp2000/ixpdev.h index bd686cb63058..391ece623243 100644 --- a/drivers/net/ixp2000/ixpdev.h +++ b/drivers/net/ixp2000/ixpdev.h @@ -14,6 +14,8 @@ struct ixpdev_priv { + struct net_device *dev; + struct napi_struct napi; int channel; int tx_queue_entries; }; diff --git a/drivers/net/jazzsonic.c b/drivers/net/jazzsonic.c index 75f6f441e876..d3825c8ee994 100644 --- a/drivers/net/jazzsonic.c +++ b/drivers/net/jazzsonic.c @@ -45,7 +45,6 @@ #include <asm/jazzdma.h> static char jazz_sonic_string[] = "jazzsonic"; -static struct platform_device *jazz_sonic_device; #define SONIC_MEM_SIZE 0x100 @@ -70,14 +69,6 @@ static unsigned int sonic_debug = 1; #endif /* - * Base address and interrupt of the SONIC controller on JAZZ boards - */ -static struct { - unsigned int port; - unsigned int irq; -} sonic_portlist[] = { {JAZZ_ETHERNET_BASE, JAZZ_ETHERNET_IRQ}, {0, 0}}; - -/* * We cannot use station (ethernet) address prefixes to detect the * sonic controller since these are board manufacturer depended. * So we check for known Silicon Revision IDs instead. @@ -215,13 +206,13 @@ static int __init jazz_sonic_probe(struct platform_device *pdev) { struct net_device *dev; struct sonic_local *lp; + struct resource *res; int err = 0; int i; + DECLARE_MAC_BUF(mac); - /* - * Don't probe if we're not running on a Jazz board. - */ - if (mips_machgroup != MACH_GROUP_JAZZ) + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) return -ENODEV; dev = alloc_etherdev(sizeof(struct sonic_local)); @@ -231,37 +222,20 @@ static int __init jazz_sonic_probe(struct platform_device *pdev) lp = netdev_priv(dev); lp->device = &pdev->dev; SET_NETDEV_DEV(dev, &pdev->dev); - SET_MODULE_OWNER(dev); netdev_boot_setup_check(dev); - if (dev->base_addr >= KSEG0) { /* Check a single specified location. */ - err = sonic_probe1(dev); - } else if (dev->base_addr != 0) { /* Don't probe at all. */ - err = -ENXIO; - } else { - for (i = 0; sonic_portlist[i].port; i++) { - dev->base_addr = sonic_portlist[i].port; - dev->irq = sonic_portlist[i].irq; - if (sonic_probe1(dev) == 0) - break; - } - if (!sonic_portlist[i].port) - err = -ENODEV; - } + dev->base_addr = res->start; + dev->irq = platform_get_irq(pdev, 0); + err = sonic_probe1(dev); if (err) goto out; err = register_netdev(dev); if (err) goto out1; - printk("%s: MAC ", dev->name); - for (i = 0; i < 6; i++) { - printk("%2.2x", dev->dev_addr[i]); - if (i < 5) - printk(":"); - } - printk(" IRQ %d\n", dev->irq); + printk("%s: MAC %s IRQ %d\n", + dev->name, print_mac(mac, dev->dev_addr), dev->irq); return 0; @@ -303,38 +277,12 @@ static struct platform_driver jazz_sonic_driver = { static int __init jazz_sonic_init_module(void) { - int err; - - if ((err = platform_driver_register(&jazz_sonic_driver))) { - printk(KERN_ERR "Driver registration failed\n"); - return err; - } - - jazz_sonic_device = platform_device_alloc(jazz_sonic_string, 0); - if (!jazz_sonic_device) - goto out_unregister; - - if (platform_device_add(jazz_sonic_device)) { - platform_device_put(jazz_sonic_device); - jazz_sonic_device = NULL; - } - - return 0; - -out_unregister: - platform_driver_unregister(&jazz_sonic_driver); - - return -ENOMEM; + return platform_driver_register(&jazz_sonic_driver); } static void __exit jazz_sonic_cleanup_module(void) { platform_driver_unregister(&jazz_sonic_driver); - - if (jazz_sonic_device) { - platform_device_unregister(jazz_sonic_device); - jazz_sonic_device = NULL; - } } module_init(jazz_sonic_init_module); diff --git a/drivers/net/lance.c b/drivers/net/lance.c index a2f37e52b928..977ed3401bb3 100644 --- a/drivers/net/lance.c +++ b/drivers/net/lance.c @@ -466,6 +466,7 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int unsigned long flags; int err = -ENOMEM; void __iomem *bios; + DECLARE_MAC_BUF(mac); /* First we look for special cases. Check for HP's on-board ethernet by looking for 'HP' in the BIOS. @@ -521,23 +522,22 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int /* We can't allocate dev->priv from alloc_etherdev() because it must a ISA DMA-able region. */ - SET_MODULE_OWNER(dev); chipname = chip_table[lance_version].name; - printk("%s: %s at %#3x,", dev->name, chipname, ioaddr); + printk("%s: %s at %#3x, ", dev->name, chipname, ioaddr); /* There is a 16 byte station address PROM at the base address. The first six bytes are the station address. */ for (i = 0; i < 6; i++) - printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i)); + dev->dev_addr[i] = inb(ioaddr + i); + printk("%s", print_mac(mac, dev->dev_addr)); dev->base_addr = ioaddr; /* Make certain the data structures used by the LANCE are aligned and DMAble. */ - lp = kmalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL); + lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL); if(lp==NULL) return -ENODEV; if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp); - memset(lp, 0, sizeof(*lp)); dev->priv = lp; lp->name = chipname; lp->rx_buffs = (unsigned long)kmalloc(PKT_BUF_SZ*RX_RING_SIZE, diff --git a/drivers/net/lguest_net.c b/drivers/net/lguest_net.c new file mode 100644 index 000000000000..abce2ee8430a --- /dev/null +++ b/drivers/net/lguest_net.c @@ -0,0 +1,555 @@ +/*D:500 + * The Guest network driver. + * + * This is very simple a virtual network driver, and our last Guest driver. + * The only trick is that it can talk directly to multiple other recipients + * (ie. other Guests on the same network). It can also be used with only the + * Host on the network. + :*/ + +/* Copyright 2006 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +//#define DEBUG +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/module.h> +#include <linux/mm_types.h> +#include <linux/io.h> +#include <linux/lguest_bus.h> + +#define SHARED_SIZE PAGE_SIZE +#define MAX_LANS 4 +#define NUM_SKBS 8 + +/*M:011 Network code master Jeff Garzik points out numerous shortcomings in + * this driver if it aspires to greatness. + * + * Firstly, it doesn't use "NAPI": the networking's New API, and is poorer for + * it. As he says "NAPI means system-wide load leveling, across multiple + * network interfaces. Lack of NAPI can mean competition at higher loads." + * + * He also points out that we don't implement set_mac_address, so users cannot + * change the devices hardware address. When I asked why one would want to: + * "Bonding, and situations where you /do/ want the MAC address to "leak" out + * of the host onto the wider net." + * + * Finally, he would like module unloading: "It is not unrealistic to think of + * [un|re|]loading the net support module in an lguest guest. And, adding + * module support makes the programmer more responsible, because they now have + * to learn to clean up after themselves. Any driver that cannot clean up + * after itself is an incomplete driver in my book." + :*/ + +/*D:530 The "struct lguestnet_info" contains all the information we need to + * know about the network device. */ +struct lguestnet_info +{ + /* The mapped device page(s) (an array of "struct lguest_net"). */ + struct lguest_net *peer; + /* The physical address of the device page(s) */ + unsigned long peer_phys; + /* The size of the device page(s). */ + unsigned long mapsize; + + /* The lguest_device I come from */ + struct lguest_device *lgdev; + + /* My peerid (ie. my slot in the array). */ + unsigned int me; + + /* Receive queue: the network packets waiting to be filled. */ + struct sk_buff *skb[NUM_SKBS]; + struct lguest_dma dma[NUM_SKBS]; +}; +/*:*/ + +/* How many bytes left in this page. */ +static unsigned int rest_of_page(void *data) +{ + return PAGE_SIZE - ((unsigned long)data % PAGE_SIZE); +} + +/*D:570 Each peer (ie. Guest or Host) on the network binds their receive + * buffers to a different key: we simply use the physical address of the + * device's memory page plus the peer number. The Host insists that all keys + * be a multiple of 4, so we multiply the peer number by 4. */ +static unsigned long peer_key(struct lguestnet_info *info, unsigned peernum) +{ + return info->peer_phys + 4 * peernum; +} + +/* This is the routine which sets up a "struct lguest_dma" to point to a + * network packet, similar to req_to_dma() in lguest_blk.c. The structure of a + * "struct sk_buff" has grown complex over the years: it consists of a "head" + * linear section pointed to by "skb->data", and possibly an array of + * "fragments" in the case of a non-linear packet. + * + * Our receive buffers don't use fragments at all but outgoing skbs might, so + * we handle it. */ +static void skb_to_dma(const struct sk_buff *skb, unsigned int headlen, + struct lguest_dma *dma) +{ + unsigned int i, seg; + + /* First, we put the linear region into the "struct lguest_dma". Each + * entry can't go over a page boundary, so even though all our packets + * are 1514 bytes or less, we might need to use two entries here: */ + for (i = seg = 0; i < headlen; seg++, i += rest_of_page(skb->data+i)) { + dma->addr[seg] = virt_to_phys(skb->data + i); + dma->len[seg] = min((unsigned)(headlen - i), + rest_of_page(skb->data + i)); + } + + /* Now we handle the fragments: at least they're guaranteed not to go + * over a page. skb_shinfo(skb) returns a pointer to the structure + * which tells us about the number of fragments and the fragment + * array. */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, seg++) { + const skb_frag_t *f = &skb_shinfo(skb)->frags[i]; + /* Should not happen with MTU less than 64k - 2 * PAGE_SIZE. */ + if (seg == LGUEST_MAX_DMA_SECTIONS) { + /* We will end up sending a truncated packet should + * this ever happen. Plus, a cool log message! */ + printk("Woah dude! Megapacket!\n"); + break; + } + dma->addr[seg] = page_to_phys(f->page) + f->page_offset; + dma->len[seg] = f->size; + } + + /* If after all that we didn't use the entire "struct lguest_dma" + * array, we terminate it with a 0 length. */ + if (seg < LGUEST_MAX_DMA_SECTIONS) + dma->len[seg] = 0; +} + +/* + * Packet transmission. + * + * Our packet transmission is a little unusual. A real network card would just + * send out the packet and leave the receivers to decide if they're interested. + * Instead, we look through the network device memory page and see if any of + * the ethernet addresses match the packet destination, and if so we send it to + * that Guest. + * + * This is made a little more complicated in two cases. The first case is + * broadcast packets: for that we send the packet to all Guests on the network, + * one at a time. The second case is "promiscuous" mode, where a Guest wants + * to see all the packets on the network. We need a way for the Guest to tell + * us it wants to see all packets, so it sets the "multicast" bit on its + * published MAC address, which is never valid in a real ethernet address. + */ +#define PROMISC_BIT 0x01 + +/* This is the callback which is summoned whenever the network device's + * multicast or promiscuous state changes. If the card is in promiscuous mode, + * we advertise that in our ethernet address in the device's memory. We do the + * same if Linux wants any or all multicast traffic. */ +static void lguestnet_set_multicast(struct net_device *dev) +{ + struct lguestnet_info *info = netdev_priv(dev); + + if ((dev->flags & (IFF_PROMISC|IFF_ALLMULTI)) || dev->mc_count) + info->peer[info->me].mac[0] |= PROMISC_BIT; + else + info->peer[info->me].mac[0] &= ~PROMISC_BIT; +} + +/* A simple test function to see if a peer wants to see all packets.*/ +static int promisc(struct lguestnet_info *info, unsigned int peer) +{ + return info->peer[peer].mac[0] & PROMISC_BIT; +} + +/* Another simple function to see if a peer's advertised ethernet address + * matches a packet's destination ethernet address. */ +static int mac_eq(const unsigned char mac[ETH_ALEN], + struct lguestnet_info *info, unsigned int peer) +{ + /* Ignore multicast bit, which peer turns on to mean promisc. */ + if ((info->peer[peer].mac[0] & (~PROMISC_BIT)) != mac[0]) + return 0; + return memcmp(mac+1, info->peer[peer].mac+1, ETH_ALEN-1) == 0; +} + +/* This is the function which actually sends a packet once we've decided a + * peer wants it: */ +static void transfer_packet(struct net_device *dev, + struct sk_buff *skb, + unsigned int peernum) +{ + struct lguestnet_info *info = netdev_priv(dev); + struct lguest_dma dma; + + /* We use our handy "struct lguest_dma" packing function to prepare + * the skb for sending. */ + skb_to_dma(skb, skb_headlen(skb), &dma); + pr_debug("xfer length %04x (%u)\n", htons(skb->len), skb->len); + + /* This is the actual send call which copies the packet. */ + lguest_send_dma(peer_key(info, peernum), &dma); + + /* Check that the entire packet was transmitted. If not, it could mean + * that the other Guest registered a short receive buffer, but this + * driver should never do that. More likely, the peer is dead. */ + if (dma.used_len != skb->len) { + dev->stats.tx_carrier_errors++; + pr_debug("Bad xfer to peer %i: %i of %i (dma %p/%i)\n", + peernum, dma.used_len, skb->len, + (void *)dma.addr[0], dma.len[0]); + } else { + /* On success we update the stats. */ + dev->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + } +} + +/* Another helper function to tell is if a slot in the device memory is unused. + * Since we always set the Local Assignment bit in the ethernet address, the + * first byte can never be 0. */ +static int unused_peer(const struct lguest_net peer[], unsigned int num) +{ + return peer[num].mac[0] == 0; +} + +/* Finally, here is the routine which handles an outgoing packet. It's called + * "start_xmit" for traditional reasons. */ +static int lguestnet_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + unsigned int i; + int broadcast; + struct lguestnet_info *info = netdev_priv(dev); + /* Extract the destination ethernet address from the packet. */ + const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; + DECLARE_MAC_BUF(mac); + + pr_debug("%s: xmit %s\n", dev->name, print_mac(mac, dest)); + + /* If it's a multicast packet, we broadcast to everyone. That's not + * very efficient, but there are very few applications which actually + * use multicast, which is a shame really. + * + * As etherdevice.h points out: "By definition the broadcast address is + * also a multicast address." So we don't have to test for broadcast + * packets separately. */ + broadcast = is_multicast_ether_addr(dest); + + /* Look through all the published ethernet addresses to see if we + * should send this packet. */ + for (i = 0; i < info->mapsize/sizeof(struct lguest_net); i++) { + /* We don't send to ourselves (we actually can't SEND_DMA to + * ourselves anyway), and don't send to unused slots.*/ + if (i == info->me || unused_peer(info->peer, i)) + continue; + + /* If it's broadcast we send it. If they want every packet we + * send it. If the destination matches their address we send + * it. Otherwise we go to the next peer. */ + if (!broadcast && !promisc(info, i) && !mac_eq(dest, info, i)) + continue; + + pr_debug("lguestnet %s: sending from %i to %i\n", + dev->name, info->me, i); + /* Our routine which actually does the transfer. */ + transfer_packet(dev, skb, i); + } + + /* An xmit routine is expected to dispose of the packet, so we do. */ + dev_kfree_skb(skb); + + /* As per kernel convention, 0 means success. This is why I love + * networking: even if we never sent to anyone, that's still + * success! */ + return 0; +} + +/*D:560 + * Packet receiving. + * + * First, here's a helper routine which fills one of our array of receive + * buffers: */ +static int fill_slot(struct net_device *dev, unsigned int slot) +{ + struct lguestnet_info *info = netdev_priv(dev); + + /* We can receive ETH_DATA_LEN (1500) byte packets, plus a standard + * ethernet header of ETH_HLEN (14) bytes. */ + info->skb[slot] = netdev_alloc_skb(dev, ETH_HLEN + ETH_DATA_LEN); + if (!info->skb[slot]) { + printk("%s: could not fill slot %i\n", dev->name, slot); + return -ENOMEM; + } + + /* skb_to_dma() is a helper which sets up the "struct lguest_dma" to + * point to the data in the skb: we also use it for sending out a + * packet. */ + skb_to_dma(info->skb[slot], ETH_HLEN + ETH_DATA_LEN, &info->dma[slot]); + + /* This is a Write Memory Barrier: it ensures that the entry in the + * receive buffer array is written *before* we set the "used_len" entry + * to 0. If the Host were looking at the receive buffer array from a + * different CPU, it could potentially see "used_len = 0" and not see + * the updated receive buffer information. This would be a horribly + * nasty bug, so make sure the compiler and CPU know this has to happen + * first. */ + wmb(); + /* Writing 0 to "used_len" tells the Host it can use this receive + * buffer now. */ + info->dma[slot].used_len = 0; + return 0; +} + +/* This is the actual receive routine. When we receive an interrupt from the + * Host to tell us a packet has been delivered, we arrive here: */ +static irqreturn_t lguestnet_rcv(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct lguestnet_info *info = netdev_priv(dev); + unsigned int i, done = 0; + + /* Look through our entire receive array for an entry which has data + * in it. */ + for (i = 0; i < ARRAY_SIZE(info->dma); i++) { + unsigned int length; + struct sk_buff *skb; + + length = info->dma[i].used_len; + if (length == 0) + continue; + + /* We've found one! Remember the skb (we grabbed the length + * above), and immediately refill the slot we've taken it + * from. */ + done++; + skb = info->skb[i]; + fill_slot(dev, i); + + /* This shouldn't happen: micropackets could be sent by a + * badly-behaved Guest on the network, but the Host will never + * stuff more data in the buffer than the buffer length. */ + if (length < ETH_HLEN || length > ETH_HLEN + ETH_DATA_LEN) { + pr_debug(KERN_WARNING "%s: unbelievable skb len: %i\n", + dev->name, length); + dev_kfree_skb(skb); + continue; + } + + /* skb_put(), what a great function! I've ranted about this + * function before (http://lkml.org/lkml/1999/9/26/24). You + * call it after you've added data to the end of an skb (in + * this case, it was the Host which wrote the data). */ + skb_put(skb, length); + + /* The ethernet header contains a protocol field: we use the + * standard helper to extract it, and place the result in + * skb->protocol. The helper also sets up skb->pkt_type and + * eats up the ethernet header from the front of the packet. */ + skb->protocol = eth_type_trans(skb, dev); + + /* If this device doesn't need checksums for sending, we also + * don't need to check the packets when they come in. */ + if (dev->features & NETIF_F_NO_CSUM) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* As a last resort for debugging the driver or the lguest I/O + * subsystem, you can uncomment the "#define DEBUG" at the top + * of this file, which turns all the pr_debug() into printk() + * and floods the logs. */ + pr_debug("Receiving skb proto 0x%04x len %i type %i\n", + ntohs(skb->protocol), skb->len, skb->pkt_type); + + /* Update the packet and byte counts (visible from ifconfig, + * and good for debugging). */ + dev->stats.rx_bytes += skb->len; + dev->stats.rx_packets++; + + /* Hand our fresh network packet into the stack's "network + * interface receive" routine. That will free the packet + * itself when it's finished. */ + netif_rx(skb); + } + + /* If we found any packets, we assume the interrupt was for us. */ + return done ? IRQ_HANDLED : IRQ_NONE; +} + +/*D:550 This is where we start: when the device is brought up by dhcpd or + * ifconfig. At this point we advertise our MAC address to the rest of the + * network, and register receive buffers ready for incoming packets. */ +static int lguestnet_open(struct net_device *dev) +{ + int i; + struct lguestnet_info *info = netdev_priv(dev); + + /* Copy our MAC address into the device page, so others on the network + * can find us. */ + memcpy(info->peer[info->me].mac, dev->dev_addr, ETH_ALEN); + + /* We might already be in promisc mode (dev->flags & IFF_PROMISC). Our + * set_multicast callback handles this already, so we call it now. */ + lguestnet_set_multicast(dev); + + /* Allocate packets and put them into our "struct lguest_dma" array. + * If we fail to allocate all the packets we could still limp along, + * but it's a sign of real stress so we should probably give up now. */ + for (i = 0; i < ARRAY_SIZE(info->dma); i++) { + if (fill_slot(dev, i) != 0) + goto cleanup; + } + + /* Finally we tell the Host where our array of "struct lguest_dma" + * receive buffers is, binding it to the key corresponding to the + * device's physical memory plus our peerid. */ + if (lguest_bind_dma(peer_key(info,info->me), info->dma, + NUM_SKBS, lgdev_irq(info->lgdev)) != 0) + goto cleanup; + return 0; + +cleanup: + while (--i >= 0) + dev_kfree_skb(info->skb[i]); + return -ENOMEM; +} +/*:*/ + +/* The close routine is called when the device is no longer in use: we clean up + * elegantly. */ +static int lguestnet_close(struct net_device *dev) +{ + unsigned int i; + struct lguestnet_info *info = netdev_priv(dev); + + /* Clear all trace of our existence out of the device memory by setting + * the slot which held our MAC address to 0 (unused). */ + memset(&info->peer[info->me], 0, sizeof(info->peer[info->me])); + + /* Unregister our array of receive buffers */ + lguest_unbind_dma(peer_key(info, info->me), info->dma); + for (i = 0; i < ARRAY_SIZE(info->dma); i++) + dev_kfree_skb(info->skb[i]); + return 0; +} + +/*D:510 The network device probe function is basically a standard ethernet + * device setup. It reads the "struct lguest_device_desc" and sets the "struct + * net_device". Oh, the line-by-line excitement! Let's skip over it. :*/ +static int lguestnet_probe(struct lguest_device *lgdev) +{ + int err, irqf = IRQF_SHARED; + struct net_device *dev; + struct lguestnet_info *info; + struct lguest_device_desc *desc = &lguest_devices[lgdev->index]; + + pr_debug("lguest_net: probing for device %i\n", lgdev->index); + + dev = alloc_etherdev(sizeof(struct lguestnet_info)); + if (!dev) + return -ENOMEM; + + /* Ethernet defaults with some changes */ + ether_setup(dev); + dev->set_mac_address = NULL; + + dev->dev_addr[0] = 0x02; /* set local assignment bit (IEEE802) */ + dev->dev_addr[1] = 0x00; + memcpy(&dev->dev_addr[2], &lguest_data.guestid, 2); + dev->dev_addr[4] = 0x00; + dev->dev_addr[5] = 0x00; + + dev->open = lguestnet_open; + dev->stop = lguestnet_close; + dev->hard_start_xmit = lguestnet_start_xmit; + + /* We don't actually support multicast yet, but turning on/off + * promisc also calls dev->set_multicast_list. */ + dev->set_multicast_list = lguestnet_set_multicast; + SET_NETDEV_DEV(dev, &lgdev->dev); + + /* The network code complains if you have "scatter-gather" capability + * if you don't also handle checksums (it seem that would be + * "illogical"). So we use a lie of omission and don't tell it that we + * can handle scattered packets unless we also don't want checksums, + * even though to us they're completely independent. */ + if (desc->features & LGUEST_NET_F_NOCSUM) + dev->features = NETIF_F_SG|NETIF_F_NO_CSUM; + + info = netdev_priv(dev); + info->mapsize = PAGE_SIZE * desc->num_pages; + info->peer_phys = ((unsigned long)desc->pfn << PAGE_SHIFT); + info->lgdev = lgdev; + info->peer = lguest_map(info->peer_phys, desc->num_pages); + if (!info->peer) { + err = -ENOMEM; + goto free; + } + + /* This stores our peerid (upper bits reserved for future). */ + info->me = (desc->features & (info->mapsize-1)); + + err = register_netdev(dev); + if (err) { + pr_debug("lguestnet: registering device failed\n"); + goto unmap; + } + + if (lguest_devices[lgdev->index].features & LGUEST_DEVICE_F_RANDOMNESS) + irqf |= IRQF_SAMPLE_RANDOM; + if (request_irq(lgdev_irq(lgdev), lguestnet_rcv, irqf, "lguestnet", + dev) != 0) { + pr_debug("lguestnet: cannot get irq %i\n", lgdev_irq(lgdev)); + goto unregister; + } + + pr_debug("lguestnet: registered device %s\n", dev->name); + /* Finally, we put the "struct net_device" in the generic "struct + * lguest_device"s private pointer. Again, it's not necessary, but + * makes sure the cool kernel kids don't tease us. */ + lgdev->private = dev; + return 0; + +unregister: + unregister_netdev(dev); +unmap: + lguest_unmap(info->peer); +free: + free_netdev(dev); + return err; +} + +static struct lguest_driver lguestnet_drv = { + .name = "lguestnet", + .owner = THIS_MODULE, + .device_type = LGUEST_DEVICE_T_NET, + .probe = lguestnet_probe, +}; + +static __init int lguestnet_init(void) +{ + return register_lguest_driver(&lguestnet_drv); +} +module_init(lguestnet_init); + +MODULE_DESCRIPTION("Lguest network driver"); +MODULE_LICENSE("GPL"); + +/*D:580 + * This is the last of the Drivers, and with this we have covered the many and + * wonderous and fine (and boring) details of the Guest. + * + * "make Launcher" beckons, where we answer questions like "Where do Guests + * come from?", and "What do you do when someone asks for optimization?" + */ diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c index 5884f5bd04a4..ffaa14f2cd01 100644 --- a/drivers/net/lib82596.c +++ b/drivers/net/lib82596.c @@ -322,7 +322,6 @@ struct i596_private { struct i596_cmd *cmd_head; int cmd_backlog; u32 last_cmd; - struct net_device_stats stats; int next_tx_cmd; int options; spinlock_t lock; /* serialize access to chip */ @@ -352,7 +351,6 @@ static int i596_open(struct net_device *dev); static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev); static irqreturn_t i596_interrupt(int irq, void *dev_id); static int i596_close(struct net_device *dev); -static struct net_device_stats *i596_get_stats(struct net_device *dev); static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); static void i596_tx_timeout (struct net_device *dev); static void print_eth(unsigned char *buf, char *str); @@ -725,7 +723,7 @@ memory_squeeze: printk(KERN_ERR "%s: i596_rx Memory squeeze, dropping packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; } else { if (!rx_in_place) { /* 16 byte align the data fields */ @@ -742,28 +740,28 @@ memory_squeeze: skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } } else { DEB(DEB_ERRORS, printk(KERN_DEBUG "%s: Error, rfd.stat = 0x%04x\n", dev->name, rfd->stat)); - lp->stats.rx_errors++; + dev->stats.rx_errors++; if (rfd->stat & SWAP16(0x0100)) - lp->stats.collisions++; + dev->stats.collisions++; if (rfd->stat & SWAP16(0x8000)) - lp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; if (rfd->stat & SWAP16(0x0001)) - lp->stats.rx_over_errors++; + dev->stats.rx_over_errors++; if (rfd->stat & SWAP16(0x0002)) - lp->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; if (rfd->stat & SWAP16(0x0004)) - lp->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (rfd->stat & SWAP16(0x0008)) - lp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; if (rfd->stat & SWAP16(0x0010)) - lp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; } /* Clear the buffer descriptor count and EOF + F flags */ @@ -821,8 +819,8 @@ static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private dev_kfree_skb(skb); - lp->stats.tx_errors++; - lp->stats.tx_aborted_errors++; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; ptr->v_next = NULL; ptr->b_next = I596_NULL; @@ -951,10 +949,10 @@ static void i596_tx_timeout (struct net_device *dev) "%s: transmit timed out, status resetting.\n", dev->name)); - lp->stats.tx_errors++; + dev->stats.tx_errors++; /* Try to restart the adaptor */ - if (lp->last_restart == lp->stats.tx_packets) { + if (lp->last_restart == dev->stats.tx_packets) { DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n")); /* Shutdown and restart */ i596_reset (dev, lp); @@ -964,7 +962,7 @@ static void i596_tx_timeout (struct net_device *dev) lp->dma->scb.command = SWAP16(CUC_START | RX_START); DMA_WBACK_INV(dev, &(lp->dma->scb), sizeof(struct i596_scb)); ca (dev); - lp->last_restart = lp->stats.tx_packets; + lp->last_restart = dev->stats.tx_packets; } dev->trans_start = jiffies; @@ -999,7 +997,7 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) DEB(DEB_ERRORS, printk(KERN_DEBUG "%s: xmit ring full, dropping packet.\n", dev->name)); - lp->stats.tx_dropped++; + dev->stats.tx_dropped++; dev_kfree_skb(skb); } else { @@ -1025,8 +1023,8 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) DMA_WBACK_INV(dev, tbd, sizeof(struct i596_tbd)); i596_add_cmd(dev, &tx_cmd->cmd); - lp->stats.tx_packets++; - lp->stats.tx_bytes += length; + dev->stats.tx_packets++; + dev->stats.tx_bytes += length; } netif_start_queue(dev); @@ -1036,15 +1034,12 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) static void print_eth(unsigned char *add, char *str) { - int i; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); - printk(KERN_DEBUG "i596 0x%p, ", add); - for (i = 0; i < 6; i++) - printk(" %02X", add[i + 6]); - printk(" -->"); - for (i = 0; i < 6; i++) - printk(" %02X", add[i]); - printk(" %02X%02X, %s\n", add[12], add[13], str); + printk(KERN_DEBUG "i596 0x%p, %s --> %s %02X%02X, %s\n", + add, print_mac(mac, add + 6), print_mac(mac2, add), + add[12], add[13], str); } static int __devinit i82596_probe(struct net_device *dev) @@ -1076,7 +1071,6 @@ static int __devinit i82596_probe(struct net_device *dev) dev->open = i596_open; dev->stop = i596_close; dev->hard_start_xmit = i596_start_xmit; - dev->get_stats = i596_get_stats; dev->set_multicast_list = set_multicast_list; dev->tx_timeout = i596_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; @@ -1197,17 +1191,17 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id) DEB(DEB_TXADDR, print_eth(skb->data, "tx-done")); } else { - lp->stats.tx_errors++; + dev->stats.tx_errors++; if (ptr->status & SWAP16(0x0020)) - lp->stats.collisions++; + dev->stats.collisions++; if (!(ptr->status & SWAP16(0x0040))) - lp->stats.tx_heartbeat_errors++; + dev->stats.tx_heartbeat_errors++; if (ptr->status & SWAP16(0x0400)) - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (ptr->status & SWAP16(0x0800)) - lp->stats.collisions++; + dev->stats.collisions++; if (ptr->status & SWAP16(0x1000)) - lp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; } dma_unmap_single(dev->dev.parent, tx_cmd->dma_addr, @@ -1292,8 +1286,8 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id) "%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status)); ack_cmd |= RX_START; - lp->stats.rx_errors++; - lp->stats.rx_fifo_errors++; + dev->stats.rx_errors++; + dev->stats.rx_fifo_errors++; rebuild_rx_bufs(dev); } } @@ -1346,13 +1340,6 @@ static int i596_close(struct net_device *dev) return 0; } -static struct net_device_stats *i596_get_stats(struct net_device *dev) -{ - struct i596_private *lp = netdev_priv(dev); - - return &lp->stats; -} - /* * Set or clear the multicast filter for this adaptor. */ @@ -1362,6 +1349,7 @@ static void set_multicast_list(struct net_device *dev) struct i596_private *lp = netdev_priv(dev); struct i596_dma *dma = lp->dma; int config = 0, cnt; + DECLARE_MAC_BUF(mac); DEB(DEB_MULTI, printk(KERN_DEBUG @@ -1425,8 +1413,8 @@ static void set_multicast_list(struct net_device *dev) if (i596_debug > 1) DEB(DEB_MULTI, printk(KERN_DEBUG - "%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n", - dev->name, cp[0], cp[1], cp[2], cp[3], cp[4], cp[5])); + "%s: Adding address %s\n", + dev->name, print_mac(mac, cp))); } DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd)); i596_add_cmd(dev, &cmd->cmd); diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c index 5c86e737f954..c429a5002dd6 100644 --- a/drivers/net/lib8390.c +++ b/drivers/net/lib8390.c @@ -143,6 +143,52 @@ static void __NS8390_init(struct net_device *dev, int startp); * annoying the transmit function is called bh atomic. That places * restrictions on the user context callers as disable_irq won't save * them. + * + * Additional explanation of problems with locking by Alan Cox: + * + * "The author (me) didn't use spin_lock_irqsave because the slowness of the + * card means that approach caused horrible problems like losing serial data + * at 38400 baud on some chips. Rememeber many 8390 nics on PCI were ISA + * chips with FPGA front ends. + * + * Ok the logic behind the 8390 is very simple: + * + * Things to know + * - IRQ delivery is asynchronous to the PCI bus + * - Blocking the local CPU IRQ via spin locks was too slow + * - The chip has register windows needing locking work + * + * So the path was once (I say once as people appear to have changed it + * in the mean time and it now looks rather bogus if the changes to use + * disable_irq_nosync_irqsave are disabling the local IRQ) + * + * + * Take the page lock + * Mask the IRQ on chip + * Disable the IRQ (but not mask locally- someone seems to have + * broken this with the lock validator stuff) + * [This must be _nosync as the page lock may otherwise + * deadlock us] + * Drop the page lock and turn IRQs back on + * + * At this point an existing IRQ may still be running but we can't + * get a new one + * + * Take the lock (so we know the IRQ has terminated) but don't mask + * the IRQs on the processor + * Set irqlock [for debug] + * + * Transmit (slow as ****) + * + * re-enable the IRQ + * + * + * We have to use disable_irq because otherwise you will get delayed + * interrupts on the APIC bus deadlocking the transmit path. + * + * Quite hairy but the chip simply wasn't designed for SMP and you can't + * even ACK an interrupt without risking corrupting other parallel + * activities on the chip." [lkml, 25 Jul 2007] */ @@ -219,15 +265,6 @@ static void ei_tx_timeout(struct net_device *dev) int txsr, isr, tickssofar = jiffies - dev->trans_start; unsigned long flags; -#if defined(CONFIG_M32R) && defined(CONFIG_SMP) - unsigned long icucr; - - local_irq_save(flags); - icucr = inl(M32R_ICU_CR1_PORTL); - icucr |= M32R_ICUCR_ISMOD11; - outl(icucr, M32R_ICU_CR1_PORTL); - local_irq_restore(flags); -#endif ei_local->stat.tx_errors++; spin_lock_irqsave(&ei_local->page_lock, flags); diff --git a/drivers/net/lne390.c b/drivers/net/lne390.c index 0a08d0c4e7b4..b36989097883 100644 --- a/drivers/net/lne390.c +++ b/drivers/net/lne390.c @@ -111,8 +111,6 @@ static int __init do_lne390_probe(struct net_device *dev) int mem_start = dev->mem_start; int ret; - SET_MODULE_OWNER(dev); - if (ioaddr > 0x1ff) { /* Check a single specified location. */ if (!request_region(ioaddr, LNE390_IO_EXTENT, DRV_NAME)) return -EBUSY; @@ -171,6 +169,7 @@ static int __init lne390_probe1(struct net_device *dev, int ioaddr) { int i, revision, ret; unsigned long eisa_id; + DECLARE_MAC_BUF(mac); if (inb_p(ioaddr + LNE390_ID_PORT) == 0xff) return -ENODEV; @@ -202,10 +201,12 @@ static int __init lne390_probe1(struct net_device *dev, int ioaddr) } #endif - printk("lne390.c: LNE390%X in EISA slot %d, address", 0xa+revision, ioaddr/0x1000); for(i = 0; i < ETHER_ADDR_LEN; i++) - printk(" %02x", (dev->dev_addr[i] = inb(ioaddr + LNE390_SA_PROM + i))); - printk(".\nlne390.c: "); + dev->dev_addr[i] = inb(ioaddr + LNE390_SA_PROM + i); + printk("lne390.c: LNE390%X in EISA slot %d, address %s.\n", + 0xa+revision, ioaddr/0x1000, print_mac(mac, dev->dev_addr)); + + printk("lne390.c: "); /* Snarf the interrupt now. CFG file has them all listed as `edge' with share=NO */ if (dev->irq == 0) { diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 6ba6ed2b480a..be25aa33971c 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -57,12 +57,12 @@ #include <linux/ip.h> #include <linux/tcp.h> #include <linux/percpu.h> +#include <net/net_namespace.h> struct pcpu_lstats { unsigned long packets; unsigned long bytes; }; -static DEFINE_PER_CPU(struct pcpu_lstats, pcpu_lstats); #define LOOPBACK_OVERHEAD (128 + MAX_HEADER + 16 + 16) @@ -134,7 +134,7 @@ static void emulate_large_send_offload(struct sk_buff *skb) */ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev) { - struct pcpu_lstats *lb_stats; + struct pcpu_lstats *pcpu_lstats, *lb_stats; skb_orphan(skb); @@ -154,8 +154,9 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev) #endif dev->last_rx = jiffies; - /* it's OK to use __get_cpu_var() because BHs are off */ - lb_stats = &__get_cpu_var(pcpu_lstats); + /* it's OK to use per_cpu_ptr() because BHs are off */ + pcpu_lstats = netdev_priv(dev); + lb_stats = per_cpu_ptr(pcpu_lstats, smp_processor_id()); lb_stats->bytes += skb->len; lb_stats->packets++; @@ -166,15 +167,17 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev) static struct net_device_stats *get_stats(struct net_device *dev) { + const struct pcpu_lstats *pcpu_lstats; struct net_device_stats *stats = &dev->stats; unsigned long bytes = 0; unsigned long packets = 0; int i; + pcpu_lstats = netdev_priv(dev); for_each_possible_cpu(i) { const struct pcpu_lstats *lb_stats; - lb_stats = &per_cpu(pcpu_lstats, i); + lb_stats = per_cpu_ptr(pcpu_lstats, i); bytes += lb_stats->bytes; packets += lb_stats->packets; } @@ -192,46 +195,107 @@ static u32 always_on(struct net_device *dev) static const struct ethtool_ops loopback_ethtool_ops = { .get_link = always_on, - .get_tso = ethtool_op_get_tso, .set_tso = ethtool_op_set_tso, .get_tx_csum = always_on, .get_sg = always_on, .get_rx_csum = always_on, }; +static int loopback_dev_init(struct net_device *dev) +{ + struct pcpu_lstats *lstats; + + lstats = alloc_percpu(struct pcpu_lstats); + if (!lstats) + return -ENOMEM; + + dev->priv = lstats; + return 0; +} + +static void loopback_dev_free(struct net_device *dev) +{ + struct pcpu_lstats *lstats = netdev_priv(dev); + + free_percpu(lstats); + free_netdev(dev); +} + /* - * The loopback device is special. There is only one instance and - * it is statically allocated. Don't do this for other devices. + * The loopback device is special. There is only one instance + * per network namespace. */ -struct net_device loopback_dev = { - .name = "lo", - .get_stats = &get_stats, - .mtu = (16 * 1024) + 20 + 20 + 12, - .hard_start_xmit = loopback_xmit, - .hard_header = eth_header, - .hard_header_cache = eth_header_cache, - .header_cache_update = eth_header_cache_update, - .hard_header_len = ETH_HLEN, /* 14 */ - .addr_len = ETH_ALEN, /* 6 */ - .tx_queue_len = 0, - .type = ARPHRD_LOOPBACK, /* 0x0001*/ - .rebuild_header = eth_rebuild_header, - .flags = IFF_LOOPBACK, - .features = NETIF_F_SG | NETIF_F_FRAGLIST +static void loopback_setup(struct net_device *dev) +{ + dev->get_stats = &get_stats; + dev->mtu = (16 * 1024) + 20 + 20 + 12; + dev->hard_start_xmit = loopback_xmit; + dev->hard_header_len = ETH_HLEN; /* 14 */ + dev->addr_len = ETH_ALEN; /* 6 */ + dev->tx_queue_len = 0; + dev->type = ARPHRD_LOOPBACK; /* 0x0001*/ + dev->flags = IFF_LOOPBACK; + dev->features = NETIF_F_SG | NETIF_F_FRAGLIST #ifdef LOOPBACK_TSO - | NETIF_F_TSO + | NETIF_F_TSO #endif - | NETIF_F_NO_CSUM | NETIF_F_HIGHDMA - | NETIF_F_LLTX, - .ethtool_ops = &loopback_ethtool_ops, -}; + | NETIF_F_NO_CSUM + | NETIF_F_HIGHDMA + | NETIF_F_LLTX + | NETIF_F_NETNS_LOCAL, + dev->ethtool_ops = &loopback_ethtool_ops; + dev->header_ops = ð_header_ops; + dev->init = loopback_dev_init; + dev->destructor = loopback_dev_free; +} /* Setup and register the loopback device. */ -static int __init loopback_init(void) +static __net_init int loopback_net_init(struct net *net) { - return register_netdev(&loopback_dev); + struct net_device *dev; + int err; + + err = -ENOMEM; + dev = alloc_netdev(0, "lo", loopback_setup); + if (!dev) + goto out; + + dev->nd_net = net; + err = register_netdev(dev); + if (err) + goto out_free_netdev; + + err = 0; + net->loopback_dev = dev; + +out: + if (err) + panic("loopback: Failed to register netdevice: %d\n", err); + return err; + +out_free_netdev: + free_netdev(dev); + goto out; +} + +static __net_exit void loopback_net_exit(struct net *net) +{ + struct net_device *dev = net->loopback_dev; + + unregister_netdev(dev); +} + +static struct pernet_operations __net_initdata loopback_net_ops = { + .init = loopback_net_init, + .exit = loopback_net_exit, }; -module_init(loopback_init); +static int __init loopback_init(void) +{ + return register_pernet_device(&loopback_net_ops); +} -EXPORT_SYMBOL(loopback_dev); +/* Loopback is special. It should be initialized before any other network + * device and network subsystem. + */ +fs_initcall(loopback_init); diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c index 5fc18da1873d..c5095ecd8b11 100644 --- a/drivers/net/lp486e.c +++ b/drivers/net/lp486e.c @@ -350,7 +350,6 @@ struct i596_private { /* aligned to a 16-byte boundary */ struct i596_cmd *cmd_head; int cmd_backlog; unsigned long last_cmd; - struct net_device_stats stats; spinlock_t cmd_lock; }; @@ -381,7 +380,6 @@ static int i596_open(struct net_device *dev); static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev); static irqreturn_t i596_interrupt(int irq, void *dev_id); static int i596_close(struct net_device *dev); -static struct net_device_stats *i596_get_stats(struct net_device *dev); static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); static void print_eth(char *); static void set_multicast_list(struct net_device *dev); @@ -515,8 +513,6 @@ CLEAR_INT(void) { outb(0, IOADDR+8); } -#define SIZE(x) (sizeof(x)/sizeof((x)[0])) - #if 0 /* selftest or dump */ static void @@ -532,7 +528,7 @@ i596_port_do(struct net_device *dev, int portcmd, char *cmdname) { mdelay(30); /* random, unmotivated */ printk("lp486e i82596 %s result:\n", cmdname); - for (m = SIZE(lp->dump.dump); m && lp->dump.dump[m-1] == 0; m--) + for (m = ARRAY_SIZE(lp->dump.dump); m && lp->dump.dump[m-1] == 0; m--) ; for (i = 0; i < m; i++) { printk(" %04x", lp->dump.dump[i]); @@ -672,7 +668,7 @@ i596_rx_one(struct net_device *dev, struct i596_private *lp, if (skb == NULL) { printk ("%s: i596_rx Memory squeeze, " "dropping packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; return 1; } @@ -681,27 +677,27 @@ i596_rx_one(struct net_device *dev, struct i596_private *lp, skb->protocol = eth_type_trans(skb,dev); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; + dev->stats.rx_packets++; } else { #if 0 printk("Frame reception error status %04x\n", rfd->stat); #endif - lp->stats.rx_errors++; + dev->stats.rx_errors++; if (rfd->stat & RFD_COLLISION) - lp->stats.collisions++; + dev->stats.collisions++; if (rfd->stat & RFD_SHORT_FRAME_ERR) - lp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; if (rfd->stat & RFD_DMA_ERR) - lp->stats.rx_over_errors++; + dev->stats.rx_over_errors++; if (rfd->stat & RFD_NOBUFS_ERR) - lp->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; if (rfd->stat & RFD_ALIGN_ERR) - lp->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (rfd->stat & RFD_CRC_ERR) - lp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; if (rfd->stat & RFD_LENGTH_ERR) - lp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; } rfd->stat = rfd->count = 0; return 0; @@ -757,8 +753,8 @@ i596_cleanup_cmd(struct net_device *dev) { dev_kfree_skb_any(tx_cmd_tbd->skb); - lp->stats.tx_errors++; - lp->stats.tx_aborted_errors++; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; cmd->pa_next = I596_NULL; kfree((unsigned char *)tx_cmd); @@ -869,7 +865,6 @@ static int i596_open(struct net_device *dev) } static int i596_start_xmit (struct sk_buff *skb, struct net_device *dev) { - struct i596_private *lp = dev->priv; struct tx_cmd *tx_cmd; short length; @@ -886,7 +881,7 @@ static int i596_start_xmit (struct sk_buff *skb, struct net_device *dev) { tx_cmd = kmalloc((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC); if (tx_cmd == NULL) { printk(KERN_WARNING "%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name); - lp->stats.tx_dropped++; + dev->stats.tx_dropped++; dev_kfree_skb (skb); } else { struct i596_tbd *tx_cmd_tbd; @@ -909,7 +904,7 @@ static int i596_start_xmit (struct sk_buff *skb, struct net_device *dev) { i596_add_cmd (dev, (struct i596_cmd *) tx_cmd); - lp->stats.tx_packets++; + dev->stats.tx_packets++; } return 0; @@ -922,10 +917,10 @@ i596_tx_timeout (struct net_device *dev) { /* Transmitter timeout, serious problems. */ printk(KERN_WARNING "%s: transmit timed out, status resetting.\n", dev->name); - lp->stats.tx_errors++; + dev->stats.tx_errors++; /* Try to restart the adaptor */ - if (lp->last_restart == lp->stats.tx_packets) { + if (lp->last_restart == dev->stats.tx_packets) { printk ("Resetting board.\n"); /* Shutdown and restart */ @@ -935,7 +930,7 @@ i596_tx_timeout (struct net_device *dev) { printk ("Kicking board.\n"); lp->scb.command = (CUC_START | RX_START); CA(); - lp->last_restart = lp->stats.tx_packets; + lp->last_restart = dev->stats.tx_packets; } netif_wake_queue(dev); } @@ -1023,7 +1018,6 @@ static int __init lp486e_probe(struct net_device *dev) { dev->open = &i596_open; dev->stop = &i596_close; dev->hard_start_xmit = &i596_start_xmit; - dev->get_stats = &i596_get_stats; dev->set_multicast_list = &set_multicast_list; dev->watchdog_timeo = 5*HZ; dev->tx_timeout = i596_tx_timeout; @@ -1080,20 +1074,20 @@ i596_handle_CU_completion(struct net_device *dev, if (i596_debug) print_eth(pa_to_va(tx_cmd_tbd->pa_data)); } else { - lp->stats.tx_errors++; + dev->stats.tx_errors++; if (i596_debug) printk("transmission failure:%04x\n", cmd->status); if (cmd->status & 0x0020) - lp->stats.collisions++; + dev->stats.collisions++; if (!(cmd->status & 0x0040)) - lp->stats.tx_heartbeat_errors++; + dev->stats.tx_heartbeat_errors++; if (cmd->status & 0x0400) - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (cmd->status & 0x0800) - lp->stats.collisions++; + dev->stats.collisions++; if (cmd->status & 0x1000) - lp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; } dev_kfree_skb_irq(tx_cmd_tbd->skb); @@ -1244,12 +1238,6 @@ static int i596_close(struct net_device *dev) { return 0; } -static struct net_device_stats * i596_get_stats(struct net_device *dev) { - struct i596_private *lp = dev->priv; - - return &lp->stats; -} - /* * Set or clear the multicast filter for this adaptor. */ diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c index 90b0c3ed4bb6..9e700749bb31 100644 --- a/drivers/net/mac8390.c +++ b/drivers/net/mac8390.c @@ -313,8 +313,6 @@ struct net_device * __init mac8390_probe(int unit) if (unit >= 0) sprintf(dev->name, "eth%d", unit); - SET_MODULE_OWNER(dev); - while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET, ndev))) { /* Have we seen it already? */ if (slots & (1<<ndev->board->slot)) diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c index 26a3b45a4a34..30854f094965 100644 --- a/drivers/net/mac89x0.c +++ b/drivers/net/mac89x0.c @@ -181,6 +181,7 @@ struct net_device * __init mac89x0_probe(int unit) unsigned long ioaddr; unsigned short sig; int err = -ENODEV; + DECLARE_MAC_BUF(mac); dev = alloc_etherdev(sizeof(struct net_local)); if (!dev) @@ -191,8 +192,6 @@ struct net_device * __init mac89x0_probe(int unit) netdev_boot_setup_check(dev); } - SET_MODULE_OWNER(dev); - if (once_is_enough) goto out; once_is_enough = 1; @@ -274,13 +273,11 @@ struct net_device * __init mac89x0_probe(int unit) } dev->irq = SLOT2IRQ(slot); - printk(" IRQ %d ADDR ", dev->irq); - /* print the ethernet address. */ - for (i = 0; i < ETH_ALEN; i++) - printk("%2.2x%s", dev->dev_addr[i], - ((i < ETH_ALEN-1) ? ":" : "")); - printk("\n"); + /* print the IRQ and ethernet address. */ + + printk(" IRQ %d ADDR %s\n", + dev->irq, print_mac(mac, dev->dev_addr)); dev->open = net_open; dev->stop = net_close; @@ -608,7 +605,7 @@ module_param(debug, int, 0); MODULE_PARM_DESC(debug, "CS89[02]0 debug level (0-5)"); MODULE_LICENSE("GPL"); -int +int __init init_module(void) { net_debug = debug; diff --git a/drivers/net/macb.c b/drivers/net/macb.c index a4bb0264180a..047ea7be4850 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c @@ -470,47 +470,41 @@ static int macb_rx(struct macb *bp, int budget) return received; } -static int macb_poll(struct net_device *dev, int *budget) +static int macb_poll(struct napi_struct *napi, int budget) { - struct macb *bp = netdev_priv(dev); - int orig_budget, work_done, retval = 0; + struct macb *bp = container_of(napi, struct macb, napi); + struct net_device *dev = bp->dev; + int work_done; u32 status; status = macb_readl(bp, RSR); macb_writel(bp, RSR, status); + work_done = 0; if (!status) { /* * This may happen if an interrupt was pending before * this function was called last time, and no packets * have been received since. */ - netif_rx_complete(dev); + netif_rx_complete(dev, napi); goto out; } dev_dbg(&bp->pdev->dev, "poll: status = %08lx, budget = %d\n", - (unsigned long)status, *budget); + (unsigned long)status, budget); if (!(status & MACB_BIT(REC))) { dev_warn(&bp->pdev->dev, "No RX buffers complete, status = %02lx\n", (unsigned long)status); - netif_rx_complete(dev); + netif_rx_complete(dev, napi); goto out; } - orig_budget = *budget; - if (orig_budget > dev->quota) - orig_budget = dev->quota; - - work_done = macb_rx(bp, orig_budget); - if (work_done < orig_budget) { - netif_rx_complete(dev); - retval = 0; - } else { - retval = 1; - } + work_done = macb_rx(bp, budget); + if (work_done < budget) + netif_rx_complete(dev, napi); /* * We've done what we can to clean the buffers. Make sure we @@ -521,7 +515,7 @@ out: /* TODO: Handle errors */ - return retval; + return work_done; } static irqreturn_t macb_interrupt(int irq, void *dev_id) @@ -545,7 +539,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) } if (status & MACB_RX_INT_FLAGS) { - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &bp->napi)) { /* * There's no point taking any more interrupts * until we have processed the buffers @@ -553,7 +547,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) macb_writel(bp, IDR, MACB_RX_INT_FLAGS); dev_dbg(&bp->pdev->dev, "scheduling RX softirq\n"); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &bp->napi); } } @@ -937,6 +931,8 @@ static int macb_open(struct net_device *dev) return err; } + napi_enable(&bp->napi); + macb_init_rings(bp); macb_init_hw(bp); @@ -954,6 +950,7 @@ static int macb_close(struct net_device *dev) unsigned long flags; netif_stop_queue(dev); + napi_disable(&bp->napi); if (bp->phy_dev) phy_stop(bp->phy_dev); @@ -1074,6 +1071,7 @@ static int __devinit macb_probe(struct platform_device *pdev) unsigned long pclk_hz; u32 config; int err = -ENXIO; + DECLARE_MAC_BUF(mac); regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) { @@ -1088,7 +1086,6 @@ static int __devinit macb_probe(struct platform_device *pdev) goto err_out; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); /* TODO: Actually, we have some interesting features... */ @@ -1146,8 +1143,7 @@ static int __devinit macb_probe(struct platform_device *pdev) dev->get_stats = macb_get_stats; dev->set_multicast_list = macb_set_rx_mode; dev->do_ioctl = macb_ioctl; - dev->poll = macb_poll; - dev->weight = 64; + netif_napi_add(dev, &bp->napi, macb_poll, 64); dev->ethtool_ops = &macb_ethtool_ops; dev->base_addr = regs->start; @@ -1195,10 +1191,9 @@ static int __devinit macb_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dev); printk(KERN_INFO "%s: Atmel MACB at 0x%08lx irq %d " - "(%02x:%02x:%02x:%02x:%02x:%02x)\n", + "(%s)\n", dev->name, dev->base_addr, dev->irq, - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + print_mac(mac, dev->dev_addr)); phydev = bp->phy_dev; printk(KERN_INFO "%s: attached PHY driver [%s] " diff --git a/drivers/net/macb.h b/drivers/net/macb.h index 4e3283ebd97c..57b85acf0d16 100644 --- a/drivers/net/macb.h +++ b/drivers/net/macb.h @@ -374,6 +374,7 @@ struct macb { struct clk *pclk; struct clk *hclk; struct net_device *dev; + struct napi_struct napi; struct net_device_stats stats; struct macb_stats hw_stats; diff --git a/drivers/net/mace.c b/drivers/net/mace.c index 52b9332810c5..95ebe72f320f 100644 --- a/drivers/net/mace.c +++ b/drivers/net/mace.c @@ -57,7 +57,6 @@ struct mace_data { unsigned char tx_fullup; unsigned char tx_active; unsigned char tx_bad_runt; - struct net_device_stats stats; struct timer_list tx_timeout; int timeout_active; int port_aaui; @@ -78,7 +77,6 @@ struct mace_data { static int mace_open(struct net_device *dev); static int mace_close(struct net_device *dev); static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); -static struct net_device_stats *mace_stats(struct net_device *dev); static void mace_set_multicast(struct net_device *dev); static void mace_reset(struct net_device *dev); static int mace_set_address(struct net_device *dev, void *addr); @@ -103,6 +101,7 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i struct mace_data *mp; const unsigned char *addr; int j, rev, rc = -EBUSY; + DECLARE_MAC_BUF(mac); if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) { printk(KERN_ERR "can't use MACE %s: need 3 addrs and 3 irqs\n", @@ -143,7 +142,6 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i rc = -ENOMEM; goto err_release; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &mdev->ofdev.dev); mp = dev->priv; @@ -189,7 +187,6 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1); mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1; - memset(&mp->stats, 0, sizeof(mp->stats)); memset((char *) mp->tx_cmds, 0, (NCMDS_TX*N_TX_RING + N_RX_RING + 2) * sizeof(struct dbdma_cmd)); init_timer(&mp->tx_timeout); @@ -214,7 +211,6 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i dev->open = mace_open; dev->stop = mace_close; dev->hard_start_xmit = mace_xmit_start; - dev->get_stats = mace_stats; dev->set_multicast_list = mace_set_multicast; dev->set_mac_address = mace_set_address; @@ -245,11 +241,9 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i goto err_free_rx_irq; } - printk(KERN_INFO "%s: MACE at", dev->name); - for (j = 0; j < 6; ++j) { - printk("%c%.2x", (j? ':': ' '), dev->dev_addr[j]); - } - printk(", chip revision %d.%d\n", mp->chipid >> 8, mp->chipid & 0xff); + printk(KERN_INFO "%s: MACE at %s, chip revision %d.%d\n", + dev->name, print_mac(mac, dev->dev_addr), + mp->chipid >> 8, mp->chipid & 0xff); return 0; @@ -585,13 +579,6 @@ static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) return 0; } -static struct net_device_stats *mace_stats(struct net_device *dev) -{ - struct mace_data *p = (struct mace_data *) dev->priv; - - return &p->stats; -} - static void mace_set_multicast(struct net_device *dev) { struct mace_data *mp = (struct mace_data *) dev->priv; @@ -645,19 +632,19 @@ static void mace_set_multicast(struct net_device *dev) spin_unlock_irqrestore(&mp->lock, flags); } -static void mace_handle_misc_intrs(struct mace_data *mp, int intr) +static void mace_handle_misc_intrs(struct mace_data *mp, int intr, struct net_device *dev) { volatile struct mace __iomem *mb = mp->mace; static int mace_babbles, mace_jabbers; if (intr & MPCO) - mp->stats.rx_missed_errors += 256; - mp->stats.rx_missed_errors += in_8(&mb->mpc); /* reading clears it */ + dev->stats.rx_missed_errors += 256; + dev->stats.rx_missed_errors += in_8(&mb->mpc); /* reading clears it */ if (intr & RNTPCO) - mp->stats.rx_length_errors += 256; - mp->stats.rx_length_errors += in_8(&mb->rntpc); /* reading clears it */ + dev->stats.rx_length_errors += 256; + dev->stats.rx_length_errors += in_8(&mb->rntpc); /* reading clears it */ if (intr & CERR) - ++mp->stats.tx_heartbeat_errors; + ++dev->stats.tx_heartbeat_errors; if (intr & BABBLE) if (mace_babbles++ < 4) printk(KERN_DEBUG "mace: babbling transmitter\n"); @@ -681,7 +668,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) spin_lock_irqsave(&mp->lock, flags); intr = in_8(&mb->ir); /* read interrupt register */ in_8(&mb->xmtrc); /* get retries */ - mace_handle_misc_intrs(mp, intr); + mace_handle_misc_intrs(mp, intr, dev); i = mp->tx_empty; while (in_8(&mb->pr) & XMTSV) { @@ -694,7 +681,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) */ intr = in_8(&mb->ir); if (intr != 0) - mace_handle_misc_intrs(mp, intr); + mace_handle_misc_intrs(mp, intr, dev); if (mp->tx_bad_runt) { fs = in_8(&mb->xmtfs); mp->tx_bad_runt = 0; @@ -768,14 +755,14 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) } /* Update stats */ if (fs & (UFLO|LCOL|LCAR|RTRY)) { - ++mp->stats.tx_errors; + ++dev->stats.tx_errors; if (fs & LCAR) - ++mp->stats.tx_carrier_errors; + ++dev->stats.tx_carrier_errors; if (fs & (UFLO|LCOL|RTRY)) - ++mp->stats.tx_aborted_errors; + ++dev->stats.tx_aborted_errors; } else { - mp->stats.tx_bytes += mp->tx_bufs[i]->len; - ++mp->stats.tx_packets; + dev->stats.tx_bytes += mp->tx_bufs[i]->len; + ++dev->stats.tx_packets; } dev_kfree_skb_irq(mp->tx_bufs[i]); --mp->tx_active; @@ -829,7 +816,7 @@ static void mace_tx_timeout(unsigned long data) goto out; /* update various counters */ - mace_handle_misc_intrs(mp, in_8(&mb->ir)); + mace_handle_misc_intrs(mp, in_8(&mb->ir), dev); cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty; @@ -849,7 +836,7 @@ static void mace_tx_timeout(unsigned long data) /* fix up the transmit side */ i = mp->tx_empty; mp->tx_active = 0; - ++mp->stats.tx_errors; + ++dev->stats.tx_errors; if (mp->tx_bad_runt) { mp->tx_bad_runt = 0; } else if (i != mp->tx_fill) { @@ -917,18 +904,18 @@ static irqreturn_t mace_rxdma_intr(int irq, void *dev_id) /* got a packet, have a look at it */ skb = mp->rx_bufs[i]; if (skb == 0) { - ++mp->stats.rx_dropped; + ++dev->stats.rx_dropped; } else if (nb > 8) { data = skb->data; frame_status = (data[nb-3] << 8) + data[nb-4]; if (frame_status & (RS_OFLO|RS_CLSN|RS_FRAMERR|RS_FCSERR)) { - ++mp->stats.rx_errors; + ++dev->stats.rx_errors; if (frame_status & RS_OFLO) - ++mp->stats.rx_over_errors; + ++dev->stats.rx_over_errors; if (frame_status & RS_FRAMERR) - ++mp->stats.rx_frame_errors; + ++dev->stats.rx_frame_errors; if (frame_status & RS_FCSERR) - ++mp->stats.rx_crc_errors; + ++dev->stats.rx_crc_errors; } else { /* Mace feature AUTO_STRIP_RCV is on by default, dropping the * FCS on frames with 802.3 headers. This means that Ethernet @@ -940,15 +927,15 @@ static irqreturn_t mace_rxdma_intr(int irq, void *dev_id) nb -= 8; skb_put(skb, nb); skb->protocol = eth_type_trans(skb, dev); - mp->stats.rx_bytes += skb->len; + dev->stats.rx_bytes += skb->len; netif_rx(skb); dev->last_rx = jiffies; mp->rx_bufs[i] = NULL; - ++mp->stats.rx_packets; + ++dev->stats.rx_packets; } } else { - ++mp->stats.rx_errors; - ++mp->stats.rx_length_errors; + ++dev->stats.rx_errors; + ++dev->stats.rx_length_errors; } /* advance to next */ diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c index 9a343b965975..6589239b79ee 100644 --- a/drivers/net/macmace.c +++ b/drivers/net/macmace.c @@ -65,7 +65,6 @@ struct mace_data { unsigned char *rx_ring; dma_addr_t rx_ring_phys; int dma_intr; - struct net_device_stats stats; int rx_slot, rx_tail; int tx_slot, tx_sloti, tx_count; int chipid; @@ -92,7 +91,6 @@ struct mace_frame { static int mace_open(struct net_device *dev); static int mace_close(struct net_device *dev); static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); -static struct net_device_stats *mace_stats(struct net_device *dev); static void mace_set_multicast(struct net_device *dev); static int mace_set_address(struct net_device *dev, void *addr); static void mace_reset(struct net_device *dev); @@ -196,6 +194,7 @@ static int __devinit mace_probe(struct platform_device *pdev) unsigned char checksum = 0; static int found = 0; int err; + DECLARE_MAC_BUF(mac); if (found || macintosh_config->ether_type != MAC_ETHER_MACE) return -ENODEV; @@ -210,7 +209,6 @@ static int __devinit mace_probe(struct platform_device *pdev) mp->device = &pdev->dev; SET_NETDEV_DEV(dev, &pdev->dev); - SET_MODULE_OWNER(dev); dev->base_addr = (u32)MACE_BASE; mp->mace = (volatile struct mace *) MACE_BASE; @@ -243,20 +241,16 @@ static int __devinit mace_probe(struct platform_device *pdev) return -ENODEV; } - memset(&mp->stats, 0, sizeof(mp->stats)); - dev->open = mace_open; dev->stop = mace_close; dev->hard_start_xmit = mace_xmit_start; dev->tx_timeout = mace_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; - dev->get_stats = mace_stats; dev->set_multicast_list = mace_set_multicast; dev->set_mac_address = mace_set_address; - printk(KERN_INFO "%s: 68K MACE, hardware address %.2X", dev->name, dev->dev_addr[0]); - for (j = 1 ; j < 6 ; j++) printk(":%.2X", dev->dev_addr[j]); - printk("\n"); + printk(KERN_INFO "%s: 68K MACE, hardware address %s\n", + dev->name, print_mac(mac, dev->dev_addr)); err = register_netdev(dev); if (!err) @@ -473,8 +467,8 @@ static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) mp->tx_count--; local_irq_restore(flags); - mp->stats.tx_packets++; - mp->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; /* We need to copy into our xmit buffer to take care of alignment and caching issues */ skb_copy_from_linear_data(skb, mp->tx_ring, skb->len); @@ -493,12 +487,6 @@ static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -static struct net_device_stats *mace_stats(struct net_device *dev) -{ - struct mace_data *mp = netdev_priv(dev); - return &mp->stats; -} - static void mace_set_multicast(struct net_device *dev) { struct mace_data *mp = netdev_priv(dev); @@ -556,13 +544,13 @@ static void mace_handle_misc_intrs(struct mace_data *mp, int intr) static int mace_babbles, mace_jabbers; if (intr & MPCO) - mp->stats.rx_missed_errors += 256; - mp->stats.rx_missed_errors += mb->mpc; /* reading clears it */ + dev->stats.rx_missed_errors += 256; + dev->stats.rx_missed_errors += mb->mpc; /* reading clears it */ if (intr & RNTPCO) - mp->stats.rx_length_errors += 256; - mp->stats.rx_length_errors += mb->rntpc; /* reading clears it */ + dev->stats.rx_length_errors += 256; + dev->stats.rx_length_errors += mb->rntpc; /* reading clears it */ if (intr & CERR) - ++mp->stats.tx_heartbeat_errors; + ++dev->stats.tx_heartbeat_errors; if (intr & BABBLE) if (mace_babbles++ < 4) printk(KERN_DEBUG "macmace: babbling transmitter\n"); @@ -601,14 +589,14 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) } /* Update stats */ if (fs & (UFLO|LCOL|LCAR|RTRY)) { - ++mp->stats.tx_errors; + ++dev->stats.tx_errors; if (fs & LCAR) - ++mp->stats.tx_carrier_errors; + ++dev->stats.tx_carrier_errors; else if (fs & (UFLO|LCOL|RTRY)) { - ++mp->stats.tx_aborted_errors; + ++dev->stats.tx_aborted_errors; if (mb->xmtfs & UFLO) { printk(KERN_ERR "%s: DMA underrun.\n", dev->name); - mp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; mace_txdma_reset(dev); } } @@ -662,23 +650,23 @@ static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf) unsigned int frame_status = mf->rcvsts; if (frame_status & (RS_OFLO | RS_CLSN | RS_FRAMERR | RS_FCSERR)) { - mp->stats.rx_errors++; + dev->stats.rx_errors++; if (frame_status & RS_OFLO) { printk(KERN_DEBUG "%s: fifo overflow.\n", dev->name); - mp->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; } if (frame_status & RS_CLSN) - mp->stats.collisions++; + dev->stats.collisions++; if (frame_status & RS_FRAMERR) - mp->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (frame_status & RS_FCSERR) - mp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; } else { unsigned int frame_length = mf->rcvcnt + ((frame_status & 0x0F) << 8 ); skb = dev_alloc_skb(frame_length + 2); if (!skb) { - mp->stats.rx_dropped++; + dev->stats.rx_dropped++; return; } skb_reserve(skb, 2); @@ -687,8 +675,8 @@ static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf) skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; - mp->stats.rx_packets++; - mp->stats.rx_bytes += frame_length; + dev->stats.rx_packets++; + dev->stats.rx_bytes += frame_length; } } diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c index e9ecdbf352ae..b267161418ea 100644 --- a/drivers/net/macsonic.c +++ b/drivers/net/macsonic.c @@ -223,6 +223,7 @@ int __init mac_onboard_sonic_ethernet_addr(struct net_device* dev) struct sonic_local *lp = netdev_priv(dev); const int prom_addr = ONBOARD_SONIC_PROM_BASE; int i; + DECLARE_MAC_BUF(mac); /* On NuBus boards we can sometimes look in the ROM resources. No such luck for comm-slot/onboard. */ @@ -266,13 +267,8 @@ int __init mac_onboard_sonic_ethernet_addr(struct net_device* dev) dev->dev_addr[1] = val >> 8; dev->dev_addr[0] = val & 0xff; - printk(KERN_INFO "HW Address from CAM 15: "); - for (i = 0; i < 6; i++) { - printk("%2.2x", dev->dev_addr[i]); - if (i < 5) - printk(":"); - } - printk("\n"); + printk(KERN_INFO "HW Address from CAM 15: %s\n", + print_mac(mac, dev->dev_addr)); } else return 0; if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) && @@ -567,7 +563,7 @@ static int __init mac_sonic_probe(struct platform_device *pdev) struct net_device *dev; struct sonic_local *lp; int err; - int i; + DECLARE_MAC_BUF(mac); dev = alloc_etherdev(sizeof(struct sonic_local)); if (!dev) @@ -576,7 +572,6 @@ static int __init mac_sonic_probe(struct platform_device *pdev) lp = netdev_priv(dev); lp->device = &pdev->dev; SET_NETDEV_DEV(dev, &pdev->dev); - SET_MODULE_OWNER(dev); /* This will catch fatal stuff like -ENOMEM as well as success */ err = mac_onboard_sonic_probe(dev); @@ -592,13 +587,8 @@ found: if (err) goto out; - printk("%s: MAC ", dev->name); - for (i = 0; i < 6; i++) { - printk("%2.2x", dev->dev_addr[i]); - if (i < 5) - printk(":"); - } - printk(" IRQ %d\n", dev->irq); + printk("%s: MAC %s IRQ %d\n", + dev->name, print_mac(mac, dev->dev_addr), dev->irq); return 0; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index dc74d006e01f..b7c81c874f7a 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -164,16 +164,25 @@ static int macvlan_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) } static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, void *saddr, - unsigned len) + unsigned short type, const void *daddr, + const void *saddr, unsigned len) { const struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; - return lowerdev->hard_header(skb, lowerdev, type, daddr, - saddr ? : dev->dev_addr, len); + return dev_hard_header(skb, lowerdev, type, daddr, + saddr ? : dev->dev_addr, len); } +static const struct header_ops macvlan_hard_header_ops = { + .create = macvlan_hard_header, + .rebuild = eth_rebuild_header, + .parse = eth_header_parse, + .rebuild = eth_rebuild_header, + .cache = eth_header_cache, + .cache_update = eth_header_cache_update, +}; + static int macvlan_open(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); @@ -282,10 +291,6 @@ static u32 macvlan_ethtool_get_rx_csum(struct net_device *dev) static const struct ethtool_ops macvlan_ethtool_ops = { .get_link = ethtool_op_get_link, .get_rx_csum = macvlan_ethtool_get_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, - .get_tso = ethtool_op_get_tso, - .get_ufo = ethtool_op_get_ufo, - .get_sg = ethtool_op_get_sg, .get_drvinfo = macvlan_ethtool_get_drvinfo, }; @@ -299,9 +304,9 @@ static void macvlan_setup(struct net_device *dev) dev->change_mtu = macvlan_change_mtu; dev->change_rx_flags = macvlan_change_rx_flags; dev->set_multicast_list = macvlan_set_multicast_list; - dev->hard_header = macvlan_hard_header; dev->hard_start_xmit = macvlan_hard_start_xmit; dev->destructor = free_netdev; + dev->header_ops = &macvlan_hard_header_ops, dev->ethtool_ops = &macvlan_ethtool_ops; dev->tx_queue_len = 0; } @@ -376,7 +381,7 @@ static int macvlan_newlink(struct net_device *dev, if (!tb[IFLA_LINK]) return -EINVAL; - lowerdev = __dev_get_by_index(nla_get_u32(tb[IFLA_LINK])); + lowerdev = __dev_get_by_index(dev->nd_net, nla_get_u32(tb[IFLA_LINK])); if (lowerdev == NULL) return -ENODEV; diff --git a/drivers/net/meth.c b/drivers/net/meth.c index 92b403bf38b0..e25dbab67363 100644 --- a/drivers/net/meth.c +++ b/drivers/net/meth.c @@ -66,7 +66,6 @@ module_param(timeout, int, 0); * packets in and out, so there is place for a packet */ struct meth_private { - struct net_device_stats stats; /* in-memory copy of MAC Control register */ unsigned long mac_ctrl; /* in-memory copy of DMA Control register */ @@ -96,11 +95,11 @@ char o2meth_eaddr[8]={0,0,0,0,0,0,0,0}; static inline void load_eaddr(struct net_device *dev) { int i; - DPRINTK("Loading MAC Address: %02x:%02x:%02x:%02x:%02x:%02x\n", - (int)o2meth_eaddr[0]&0xFF,(int)o2meth_eaddr[1]&0xFF,(int)o2meth_eaddr[2]&0xFF, - (int)o2meth_eaddr[3]&0xFF,(int)o2meth_eaddr[4]&0xFF,(int)o2meth_eaddr[5]&0xFF); + DECLARE_MAC_BUF(mac); + for (i = 0; i < 6; i++) dev->dev_addr[i] = o2meth_eaddr[i]; + DPRINTK("Loading MAC Address: %s\n", print_mac(mac, dev->dev_addr)); mace->eth.mac_addr = (*(unsigned long*)o2meth_eaddr) >> 16; } @@ -401,15 +400,15 @@ static void meth_rx(struct net_device* dev, unsigned long int_status) printk(KERN_DEBUG "%s: bogus packet size: %ld, status=%#2lx.\n", dev->name, priv->rx_write, priv->rx_ring[priv->rx_write]->status.raw); - priv->stats.rx_errors++; - priv->stats.rx_length_errors++; + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; skb = priv->rx_skbs[priv->rx_write]; } else { - skb = alloc_skb(METH_RX_BUFF_SIZE, GFP_ATOMIC | GFP_DMA); + skb = alloc_skb(METH_RX_BUFF_SIZE, GFP_ATOMIC); if (!skb) { /* Ouch! No memory! Drop packet on the floor */ DPRINTK("No mem: dropping packet\n"); - priv->stats.rx_dropped++; + dev->stats.rx_dropped++; skb = priv->rx_skbs[priv->rx_write]; } else { struct sk_buff *skb_c = priv->rx_skbs[priv->rx_write]; @@ -421,13 +420,13 @@ static void meth_rx(struct net_device* dev, unsigned long int_status) priv->rx_skbs[priv->rx_write] = skb; skb_c->protocol = eth_type_trans(skb_c, dev); dev->last_rx = jiffies; - priv->stats.rx_packets++; - priv->stats.rx_bytes += len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; netif_rx(skb_c); } } } else { - priv->stats.rx_errors++; + dev->stats.rx_errors++; skb=priv->rx_skbs[priv->rx_write]; #if MFE_DEBUG>0 printk(KERN_WARNING "meth: RX error: status=0x%016lx\n",status); @@ -490,10 +489,10 @@ static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status) #endif if (status & METH_TX_ST_DONE) { if (status & METH_TX_ST_SUCCESS){ - priv->stats.tx_packets++; - priv->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; } else { - priv->stats.tx_errors++; + dev->stats.tx_errors++; #if MFE_DEBUG>=1 DPRINTK("TX error: status=%016lx <",status); if(status & METH_TX_ST_SUCCESS) @@ -734,7 +733,7 @@ static void meth_tx_timeout(struct net_device *dev) /* Try to reset the interface. */ meth_reset(dev); - priv->stats.tx_errors++; + dev->stats.tx_errors++; /* Clear all rings */ meth_free_tx_ring(priv); @@ -773,12 +772,6 @@ static int meth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) /* * Return statistics to the caller */ -static struct net_device_stats *meth_stats(struct net_device *dev) -{ - struct meth_private *priv = netdev_priv(dev); - return &priv->stats; -} - /* * The init function. */ @@ -796,7 +789,6 @@ static int __init meth_probe(struct platform_device *pdev) dev->stop = meth_release; dev->hard_start_xmit = meth_tx; dev->do_ioctl = meth_ioctl; - dev->get_stats = meth_stats; #ifdef HAVE_TX_TIMEOUT dev->tx_timeout = meth_tx_timeout; dev->watchdog_timeo = timeout; diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c index 9853c74f6bbf..d593175ab6f0 100644 --- a/drivers/net/mipsnet.c +++ b/drivers/net/mipsnet.c @@ -11,7 +11,6 @@ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> -#include <linux/netdevice.h> #include <linux/platform_device.h> #include <asm/io.h> #include <asm/mips-boards/simint.h> @@ -22,10 +21,6 @@ #define mipsnet_reg_address(dev, field) (dev->base_addr + field_offset(field)) -struct mipsnet_priv { - struct net_device_stats stats; -}; - static char mipsnet_string[] = "mipsnet"; /* @@ -50,7 +45,6 @@ static inline ssize_t mipsnet_put_todevice(struct net_device *dev, { int count_to_go = skb->len; char *buf_ptr = skb->data; - struct mipsnet_priv *mp = netdev_priv(dev); pr_debug("%s: %s(): telling MIPSNET txDataCount(%d)\n", dev->name, __FUNCTION__, skb->len); @@ -64,8 +58,8 @@ static inline ssize_t mipsnet_put_todevice(struct net_device *dev, outb(*buf_ptr, mipsnet_reg_address(dev, txDataBuffer)); } - mp->stats.tx_packets++; - mp->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; return skb->len; } @@ -88,10 +82,9 @@ static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count) { struct sk_buff *skb; size_t len = count; - struct mipsnet_priv *mp = netdev_priv(dev); if (!(skb = alloc_skb(len + 2, GFP_KERNEL))) { - mp->stats.rx_dropped++; + dev->stats.rx_dropped++; return -ENOMEM; } @@ -106,8 +99,8 @@ static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count) dev->name, __FUNCTION__); netif_rx(skb); - mp->stats.rx_packets++; - mp->stats.rx_bytes += len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; return count; } @@ -204,13 +197,6 @@ static int mipsnet_close(struct net_device *dev) return 0; } -static struct net_device_stats *mipsnet_get_stats(struct net_device *dev) -{ - struct mipsnet_priv *mp = netdev_priv(dev); - - return &mp->stats; -} - static void mipsnet_set_mclist(struct net_device *dev) { // we don't do anything @@ -222,7 +208,7 @@ static int __init mipsnet_probe(struct device *dev) struct net_device *netdev; int err; - netdev = alloc_etherdev(sizeof(struct mipsnet_priv)); + netdev = alloc_etherdev(0); if (!netdev) { err = -ENOMEM; goto out; @@ -233,7 +219,6 @@ static int __init mipsnet_probe(struct device *dev) netdev->open = mipsnet_open; netdev->stop = mipsnet_close; netdev->hard_start_xmit = mipsnet_xmit; - netdev->get_stats = mipsnet_get_stats; netdev->set_multicast_list = mipsnet_set_mclist; /* diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c index 1bb088aeaf71..6b32ec94b3a8 100644 --- a/drivers/net/mlx4/catas.c +++ b/drivers/net/mlx4/catas.c @@ -30,41 +30,133 @@ * SOFTWARE. */ +#include <linux/workqueue.h> + #include "mlx4.h" -void mlx4_handle_catas_err(struct mlx4_dev *dev) +enum { + MLX4_CATAS_POLL_INTERVAL = 5 * HZ, +}; + +static DEFINE_SPINLOCK(catas_lock); + +static LIST_HEAD(catas_list); +static struct workqueue_struct *catas_wq; +static struct work_struct catas_work; + +static int internal_err_reset = 1; +module_param(internal_err_reset, int, 0644); +MODULE_PARM_DESC(internal_err_reset, + "Reset device on internal errors if non-zero (default 1)"); + +static void dump_err_buf(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int i; - mlx4_err(dev, "Catastrophic error detected:\n"); + mlx4_err(dev, "Internal error detected:\n"); for (i = 0; i < priv->fw.catas_size; ++i) mlx4_err(dev, " buf[%02x]: %08x\n", i, swab32(readl(priv->catas_err.map + i))); +} - mlx4_dispatch_event(dev, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR, 0, 0); +static void poll_catas(unsigned long dev_ptr) +{ + struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr; + struct mlx4_priv *priv = mlx4_priv(dev); + + if (readl(priv->catas_err.map)) { + dump_err_buf(dev); + + mlx4_dispatch_event(dev, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR, 0, 0); + + if (internal_err_reset) { + spin_lock(&catas_lock); + list_add(&priv->catas_err.list, &catas_list); + spin_unlock(&catas_lock); + + queue_work(catas_wq, &catas_work); + } + } else + mod_timer(&priv->catas_err.timer, + round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL)); } -void mlx4_map_catas_buf(struct mlx4_dev *dev) +static void catas_reset(struct work_struct *work) +{ + struct mlx4_priv *priv, *tmppriv; + struct mlx4_dev *dev; + + LIST_HEAD(tlist); + int ret; + + spin_lock_irq(&catas_lock); + list_splice_init(&catas_list, &tlist); + spin_unlock_irq(&catas_lock); + + list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) { + ret = mlx4_restart_one(priv->dev.pdev); + dev = &priv->dev; + if (ret) + mlx4_err(dev, "Reset failed (%d)\n", ret); + else + mlx4_dbg(dev, "Reset succeeded\n"); + } +} + +void mlx4_start_catas_poll(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); unsigned long addr; + INIT_LIST_HEAD(&priv->catas_err.list); + init_timer(&priv->catas_err.timer); + priv->catas_err.map = NULL; + addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) + priv->fw.catas_offset; priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); - if (!priv->catas_err.map) - mlx4_warn(dev, "Failed to map catastrophic error buffer at 0x%lx\n", + if (!priv->catas_err.map) { + mlx4_warn(dev, "Failed to map internal error buffer at 0x%lx\n", addr); + return; + } + priv->catas_err.timer.data = (unsigned long) dev; + priv->catas_err.timer.function = poll_catas; + priv->catas_err.timer.expires = + round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL); + add_timer(&priv->catas_err.timer); } -void mlx4_unmap_catas_buf(struct mlx4_dev *dev) +void mlx4_stop_catas_poll(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); + del_timer_sync(&priv->catas_err.timer); + if (priv->catas_err.map) iounmap(priv->catas_err.map); + + spin_lock_irq(&catas_lock); + list_del(&priv->catas_err.list); + spin_unlock_irq(&catas_lock); +} + +int __init mlx4_catas_init(void) +{ + INIT_WORK(&catas_work, catas_reset); + + catas_wq = create_singlethread_workqueue("mlx4_err"); + if (!catas_wq) + return -ENOMEM; + + return 0; +} + +void mlx4_catas_cleanup(void) +{ + destroy_workqueue(catas_wq); } diff --git a/drivers/net/mlx4/cmd.c b/drivers/net/mlx4/cmd.c index c1f81a993f5d..db49051b97b1 100644 --- a/drivers/net/mlx4/cmd.c +++ b/drivers/net/mlx4/cmd.c @@ -95,7 +95,7 @@ enum { }; enum { - GO_BIT_TIMEOUT = 10000 + GO_BIT_TIMEOUT_MSECS = 10000 }; struct mlx4_cmd_context { @@ -155,7 +155,7 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, end = jiffies; if (event) - end += HZ * 10; + end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS); while (cmd_pending(dev)) { if (time_after_eq(jiffies, end)) @@ -184,6 +184,13 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, (event ? (1 << HCR_E_BIT) : 0) | (op_modifier << HCR_OPMOD_SHIFT) | op), hcr + 6); + + /* + * Make sure that our HCR writes don't get mixed in with + * writes from another CPU starting a FW command. + */ + mmiowb(); + cmd->toggle = cmd->toggle ^ 1; ret = 0; @@ -246,8 +253,6 @@ void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param) context->result = mlx4_status_to_errno(status); context->out_param = out_param; - context->token += priv->cmd.token_mask + 1; - complete(&context->done); } @@ -264,6 +269,7 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, spin_lock(&cmd->context_lock); BUG_ON(cmd->free_head < 0); context = &cmd->context[cmd->free_head]; + context->token += cmd->token_mask + 1; cmd->free_head = context->next; spin_unlock(&cmd->context_lock); diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c index 39253d0c1590..d4441fee3d80 100644 --- a/drivers/net/mlx4/cq.c +++ b/drivers/net/mlx4/cq.c @@ -231,7 +231,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) } EXPORT_SYMBOL_GPL(mlx4_cq_free); -int __devinit mlx4_init_cq_table(struct mlx4_dev *dev) +int mlx4_init_cq_table(struct mlx4_dev *dev) { struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; int err; diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index 27a82cecd693..9c36c2034030 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c @@ -89,14 +89,12 @@ struct mlx4_eq_context { (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \ (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \ - (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR) | \ (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \ (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \ (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \ (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ (1ull << MLX4_EVENT_TYPE_CMD)) -#define MLX4_CATAS_EVENT_MASK (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR) struct mlx4_eqe { u8 reserved1; @@ -264,7 +262,7 @@ static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr) writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); - for (i = 0; i < MLX4_EQ_CATAS; ++i) + for (i = 0; i < MLX4_NUM_EQ; ++i) work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); return IRQ_RETVAL(work); @@ -281,14 +279,6 @@ static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr) return IRQ_HANDLED; } -static irqreturn_t mlx4_catas_interrupt(int irq, void *dev_ptr) -{ - mlx4_handle_catas_err(dev_ptr); - - /* MSI-X vectors always belong to us */ - return IRQ_HANDLED; -} - static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, int eq_num) { @@ -310,8 +300,7 @@ static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, MLX4_CMD_TIME_CLASS_A); } -static void __devinit __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, - struct mlx4_eq *eq) +static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) { struct mlx4_priv *priv = mlx4_priv(dev); int index; @@ -333,8 +322,8 @@ static void __devinit __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4); } -static int __devinit mlx4_create_eq(struct mlx4_dev *dev, int nent, - u8 intr, struct mlx4_eq *eq) +static int mlx4_create_eq(struct mlx4_dev *dev, int nent, + u8 intr, struct mlx4_eq *eq) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_cmd_mailbox *mailbox; @@ -490,14 +479,12 @@ static void mlx4_free_irqs(struct mlx4_dev *dev) if (eq_table->have_irq) free_irq(dev->pdev->irq, dev); - for (i = 0; i < MLX4_EQ_CATAS; ++i) + for (i = 0; i < MLX4_NUM_EQ; ++i) if (eq_table->eq[i].have_irq) free_irq(eq_table->eq[i].irq, eq_table->eq + i); - if (eq_table->eq[MLX4_EQ_CATAS].have_irq) - free_irq(eq_table->eq[MLX4_EQ_CATAS].irq, dev); } -static int __devinit mlx4_map_clr_int(struct mlx4_dev *dev) +static int mlx4_map_clr_int(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -518,7 +505,7 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev) iounmap(priv->clr_base); } -int __devinit mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt) +int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt) { struct mlx4_priv *priv = mlx4_priv(dev); int ret; @@ -560,7 +547,7 @@ void mlx4_unmap_eq_icm(struct mlx4_dev *dev) __free_page(priv->eq_table.icm_page); } -int __devinit mlx4_init_eq_table(struct mlx4_dev *dev) +int mlx4_init_eq_table(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int err; @@ -598,32 +585,19 @@ int __devinit mlx4_init_eq_table(struct mlx4_dev *dev) if (dev->flags & MLX4_FLAG_MSI_X) { static const char *eq_name[] = { [MLX4_EQ_COMP] = DRV_NAME " (comp)", - [MLX4_EQ_ASYNC] = DRV_NAME " (async)", - [MLX4_EQ_CATAS] = DRV_NAME " (catas)" + [MLX4_EQ_ASYNC] = DRV_NAME " (async)" }; - err = mlx4_create_eq(dev, 1, MLX4_EQ_CATAS, - &priv->eq_table.eq[MLX4_EQ_CATAS]); - if (err) - goto err_out_async; - - for (i = 0; i < MLX4_EQ_CATAS; ++i) { + for (i = 0; i < MLX4_NUM_EQ; ++i) { err = request_irq(priv->eq_table.eq[i].irq, mlx4_msi_x_interrupt, 0, eq_name[i], priv->eq_table.eq + i); if (err) - goto err_out_catas; + goto err_out_async; priv->eq_table.eq[i].have_irq = 1; } - err = request_irq(priv->eq_table.eq[MLX4_EQ_CATAS].irq, - mlx4_catas_interrupt, 0, - eq_name[MLX4_EQ_CATAS], dev); - if (err) - goto err_out_catas; - - priv->eq_table.eq[MLX4_EQ_CATAS].have_irq = 1; } else { err = request_irq(dev->pdev->irq, mlx4_interrupt, IRQF_SHARED, DRV_NAME, dev); @@ -639,22 +613,11 @@ int __devinit mlx4_init_eq_table(struct mlx4_dev *dev) mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err); - for (i = 0; i < MLX4_EQ_CATAS; ++i) + for (i = 0; i < MLX4_NUM_EQ; ++i) eq_set_ci(&priv->eq_table.eq[i], 1); - if (dev->flags & MLX4_FLAG_MSI_X) { - err = mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 0, - priv->eq_table.eq[MLX4_EQ_CATAS].eqn); - if (err) - mlx4_warn(dev, "MAP_EQ for catas EQ %d failed (%d)\n", - priv->eq_table.eq[MLX4_EQ_CATAS].eqn, err); - } - return 0; -err_out_catas: - mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]); - err_out_async: mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]); @@ -675,19 +638,13 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); int i; - if (dev->flags & MLX4_FLAG_MSI_X) - mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 1, - priv->eq_table.eq[MLX4_EQ_CATAS].eqn); - mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1, priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); mlx4_free_irqs(dev); - for (i = 0; i < MLX4_EQ_CATAS; ++i) + for (i = 0; i < MLX4_NUM_EQ; ++i) mlx4_free_eq(dev, &priv->eq_table.eq[i]); - if (dev->flags & MLX4_FLAG_MSI_X) - mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]); mlx4_unmap_clr_int(dev); diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c index c45cbe43a0c4..6471d33afb7d 100644 --- a/drivers/net/mlx4/fw.c +++ b/drivers/net/mlx4/fw.c @@ -76,7 +76,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags) [ 0] = "RC transport", [ 1] = "UC transport", [ 2] = "UD transport", - [ 3] = "SRC transport", + [ 3] = "XRC transport", [ 4] = "reliable multicast", [ 5] = "FCoIB support", [ 6] = "SRQ support", diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c index b7a4aa8476fb..4b3c109d5eae 100644 --- a/drivers/net/mlx4/icm.c +++ b/drivers/net/mlx4/icm.c @@ -34,6 +34,7 @@ #include <linux/init.h> #include <linux/errno.h> #include <linux/mm.h> +#include <linux/scatterlist.h> #include <linux/mlx4/cmd.h> @@ -50,19 +51,41 @@ enum { MLX4_TABLE_CHUNK_SIZE = 1 << 18 }; -void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm) +static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) { - struct mlx4_icm_chunk *chunk, *tmp; int i; - list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) { - if (chunk->nsg > 0) - pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, - PCI_DMA_BIDIRECTIONAL); + if (chunk->nsg > 0) + pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, + PCI_DMA_BIDIRECTIONAL); + + for (i = 0; i < chunk->npages; ++i) + __free_pages(chunk->mem[i].page, + get_order(chunk->mem[i].length)); +} + +static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) +{ + int i; + + for (i = 0; i < chunk->npages; ++i) + dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, + lowmem_page_address(chunk->mem[i].page), + sg_dma_address(&chunk->mem[i])); +} + +void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent) +{ + struct mlx4_icm_chunk *chunk, *tmp; - for (i = 0; i < chunk->npages; ++i) - __free_pages(chunk->mem[i].page, - get_order(chunk->mem[i].length)); + if (!icm) + return; + + list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) { + if (coherent) + mlx4_free_icm_coherent(dev, chunk); + else + mlx4_free_icm_pages(dev, chunk); kfree(chunk); } @@ -70,16 +93,45 @@ void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm) kfree(icm); } +static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) +{ + mem->page = alloc_pages(gfp_mask, order); + if (!mem->page) + return -ENOMEM; + + mem->length = PAGE_SIZE << order; + mem->offset = 0; + return 0; +} + +static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem, + int order, gfp_t gfp_mask) +{ + void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, + &sg_dma_address(mem), gfp_mask); + if (!buf) + return -ENOMEM; + + sg_set_buf(mem, buf, PAGE_SIZE << order); + BUG_ON(mem->offset); + sg_dma_len(mem) = PAGE_SIZE << order; + return 0; +} + struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, - gfp_t gfp_mask) + gfp_t gfp_mask, int coherent) { struct mlx4_icm *icm; struct mlx4_icm_chunk *chunk = NULL; int cur_order; + int ret; + + /* We use sg_set_buf for coherent allocs, which assumes low memory */ + BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); if (!icm) - return icm; + return NULL; icm->refcount = 0; INIT_LIST_HEAD(&icm->chunk_list); @@ -101,12 +153,20 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, while (1 << cur_order > npages) --cur_order; - chunk->mem[chunk->npages].page = alloc_pages(gfp_mask, cur_order); - if (chunk->mem[chunk->npages].page) { - chunk->mem[chunk->npages].length = PAGE_SIZE << cur_order; - chunk->mem[chunk->npages].offset = 0; + if (coherent) + ret = mlx4_alloc_icm_coherent(&dev->pdev->dev, + &chunk->mem[chunk->npages], + cur_order, gfp_mask); + else + ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], + cur_order, gfp_mask); + + if (!ret) { + ++chunk->npages; - if (++chunk->npages == MLX4_ICM_CHUNK_LEN) { + if (coherent) + ++chunk->nsg; + else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, chunk->npages, PCI_DMA_BIDIRECTIONAL); @@ -125,7 +185,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, } } - if (chunk) { + if (!coherent && chunk) { chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, chunk->npages, PCI_DMA_BIDIRECTIONAL); @@ -137,7 +197,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, return icm; fail: - mlx4_free_icm(dev, icm); + mlx4_free_icm(dev, icm, coherent); return NULL; } @@ -202,7 +262,7 @@ int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj) table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT, (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | - __GFP_NOWARN); + __GFP_NOWARN, table->coherent); if (!table->icm[i]) { ret = -ENOMEM; goto out; @@ -210,7 +270,7 @@ int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj) if (mlx4_MAP_ICM(dev, table->icm[i], table->virt + (u64) i * MLX4_TABLE_CHUNK_SIZE)) { - mlx4_free_icm(dev, table->icm[i]); + mlx4_free_icm(dev, table->icm[i], table->coherent); table->icm[i] = NULL; ret = -ENOMEM; goto out; @@ -234,16 +294,16 @@ void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj) if (--table->icm[i]->refcount == 0) { mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE, MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); - mlx4_free_icm(dev, table->icm[i]); + mlx4_free_icm(dev, table->icm[i], table->coherent); table->icm[i] = NULL; } mutex_unlock(&table->mutex); } -void *mlx4_table_find(struct mlx4_icm_table *table, int obj) +void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle) { - int idx, offset, i; + int idx, offset, dma_offset, i; struct mlx4_icm_chunk *chunk; struct mlx4_icm *icm; struct page *page = NULL; @@ -253,15 +313,26 @@ void *mlx4_table_find(struct mlx4_icm_table *table, int obj) mutex_lock(&table->mutex); - idx = obj & (table->num_obj - 1); - icm = table->icm[idx / (MLX4_TABLE_CHUNK_SIZE / table->obj_size)]; - offset = idx % (MLX4_TABLE_CHUNK_SIZE / table->obj_size); + idx = (obj & (table->num_obj - 1)) * table->obj_size; + icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE]; + dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE; if (!icm) goto out; list_for_each_entry(chunk, &icm->chunk_list, list) { for (i = 0; i < chunk->npages; ++i) { + if (dma_handle && dma_offset >= 0) { + if (sg_dma_len(&chunk->mem[i]) > dma_offset) + *dma_handle = sg_dma_address(&chunk->mem[i]) + + dma_offset; + dma_offset -= sg_dma_len(&chunk->mem[i]); + } + /* + * DMA mapping can merge pages but not split them, + * so if we found the page, dma_handle has already + * been assigned to. + */ if (chunk->mem[i].length > offset) { page = chunk->mem[i].page; goto out; @@ -309,7 +380,7 @@ void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, u64 virt, int obj_size, int nobj, int reserved, - int use_lowmem) + int use_lowmem, int use_coherent) { int obj_per_chunk; int num_icm; @@ -327,6 +398,7 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, table->num_obj = nobj; table->obj_size = obj_size; table->lowmem = use_lowmem; + table->coherent = use_coherent; mutex_init(&table->mutex); for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { @@ -336,11 +408,11 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT, (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | - __GFP_NOWARN); + __GFP_NOWARN, use_coherent); if (!table->icm[i]) goto err; if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) { - mlx4_free_icm(dev, table->icm[i]); + mlx4_free_icm(dev, table->icm[i], use_coherent); table->icm[i] = NULL; goto err; } @@ -359,7 +431,7 @@ err: if (table->icm[i]) { mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE, MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); - mlx4_free_icm(dev, table->icm[i]); + mlx4_free_icm(dev, table->icm[i], use_coherent); } return -ENOMEM; @@ -373,7 +445,7 @@ void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table) if (table->icm[i]) { mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE, MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); - mlx4_free_icm(dev, table->icm[i]); + mlx4_free_icm(dev, table->icm[i], table->coherent); } kfree(table->icm); diff --git a/drivers/net/mlx4/icm.h b/drivers/net/mlx4/icm.h index bea223d879a5..6c44edf35847 100644 --- a/drivers/net/mlx4/icm.h +++ b/drivers/net/mlx4/icm.h @@ -67,8 +67,9 @@ struct mlx4_icm_iter { struct mlx4_dev; -struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, gfp_t gfp_mask); -void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm); +struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, + gfp_t gfp_mask, int coherent); +void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent); int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); @@ -78,11 +79,11 @@ void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, int start, int end); int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, u64 virt, int obj_size, int nobj, int reserved, - int use_lowmem); + int use_lowmem, int use_coherent); void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table); int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); -void *mlx4_table_find(struct mlx4_icm_table *table, int obj); +void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle); int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, int start, int end); void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c index 9ae951bf6aa6..be5d9e90ccf2 100644 --- a/drivers/net/mlx4/intf.c +++ b/drivers/net/mlx4/intf.c @@ -142,6 +142,7 @@ int mlx4_register_device(struct mlx4_dev *dev) mlx4_add_device(intf, priv); mutex_unlock(&intf_mutex); + mlx4_start_catas_poll(dev); return 0; } @@ -151,6 +152,7 @@ void mlx4_unregister_device(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_interface *intf; + mlx4_stop_catas_poll(dev); mutex_lock(&intf_mutex); list_for_each_entry(intf, &intf_list, list) diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index a4f2e0475a71..e029b8afbd37 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c @@ -61,7 +61,7 @@ MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); #ifdef CONFIG_PCI_MSI -static int msi_x; +static int msi_x = 1; module_param(msi_x, int, 0444); MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); @@ -78,14 +78,14 @@ static const char mlx4_version[] __devinitdata = static struct mlx4_profile default_profile = { .num_qp = 1 << 16, .num_srq = 1 << 16, - .rdmarc_per_qp = 4, + .rdmarc_per_qp = 1 << 4, .num_cq = 1 << 16, .num_mcg = 1 << 13, .num_mpt = 1 << 17, .num_mtt = 1 << 20, }; -static int __devinit mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) +static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) { int err; int i; @@ -149,7 +149,8 @@ static int __devinit mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev dev->caps.max_cqes = dev_cap->max_cq_sz - 1; dev->caps.reserved_cqs = dev_cap->reserved_cqs; dev->caps.reserved_eqs = dev_cap->reserved_eqs; - dev->caps.reserved_mtts = dev_cap->reserved_mtts; + dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts, + MLX4_MTT_ENTRY_PER_SEG); dev->caps.reserved_mrws = dev_cap->reserved_mrws; dev->caps.reserved_uars = dev_cap->reserved_uars; dev->caps.reserved_pds = dev_cap->reserved_pds; @@ -168,7 +169,7 @@ static int __devinit mlx4_load_fw(struct mlx4_dev *dev) int err; priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, - GFP_HIGHUSER | __GFP_NOWARN); + GFP_HIGHUSER | __GFP_NOWARN, 0); if (!priv->fw.fw_icm) { mlx4_err(dev, "Couldn't allocate FW area, aborting.\n"); return -ENOMEM; @@ -192,7 +193,7 @@ err_unmap_fa: mlx4_UNMAP_FA(dev); err_free: - mlx4_free_icm(dev, priv->fw.fw_icm); + mlx4_free_icm(dev, priv->fw.fw_icm, 0); return err; } @@ -207,7 +208,7 @@ static int __devinit mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, ((u64) (MLX4_CMPT_TYPE_QP * cmpt_entry_sz) << MLX4_CMPT_SHIFT), cmpt_entry_sz, dev->caps.num_qps, - dev->caps.reserved_qps, 0); + dev->caps.reserved_qps, 0, 0); if (err) goto err; @@ -216,7 +217,7 @@ static int __devinit mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, ((u64) (MLX4_CMPT_TYPE_SRQ * cmpt_entry_sz) << MLX4_CMPT_SHIFT), cmpt_entry_sz, dev->caps.num_srqs, - dev->caps.reserved_srqs, 0); + dev->caps.reserved_srqs, 0, 0); if (err) goto err_qp; @@ -225,7 +226,7 @@ static int __devinit mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, ((u64) (MLX4_CMPT_TYPE_CQ * cmpt_entry_sz) << MLX4_CMPT_SHIFT), cmpt_entry_sz, dev->caps.num_cqs, - dev->caps.reserved_cqs, 0); + dev->caps.reserved_cqs, 0, 0); if (err) goto err_srq; @@ -236,7 +237,7 @@ static int __devinit mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, cmpt_entry_sz, roundup_pow_of_two(MLX4_NUM_EQ + dev->caps.reserved_eqs), - MLX4_NUM_EQ + dev->caps.reserved_eqs, 0); + MLX4_NUM_EQ + dev->caps.reserved_eqs, 0, 0); if (err) goto err_cq; @@ -255,10 +256,8 @@ err: return err; } -static int __devinit mlx4_init_icm(struct mlx4_dev *dev, - struct mlx4_dev_cap *dev_cap, - struct mlx4_init_hca_param *init_hca, - u64 icm_size) +static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, + struct mlx4_init_hca_param *init_hca, u64 icm_size) { struct mlx4_priv *priv = mlx4_priv(dev); u64 aux_pages; @@ -275,7 +274,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, (unsigned long long) aux_pages << 2); priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, - GFP_HIGHUSER | __GFP_NOWARN); + GFP_HIGHUSER | __GFP_NOWARN, 0); if (!priv->fw.aux_icm) { mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n"); return -ENOMEM; @@ -299,11 +298,22 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, goto err_unmap_cmpt; } + /* + * Reserved MTT entries must be aligned up to a cacheline + * boundary, since the FW will write to them, while the driver + * writes to all other MTT entries. (The variable + * dev->caps.mtt_entry_sz below is really the MTT segment + * size, not the raw entry size) + */ + dev->caps.reserved_mtts = + ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, + dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; + err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, init_hca->mtt_base, dev->caps.mtt_entry_sz, dev->caps.num_mtt_segs, - dev->caps.reserved_mtts, 1); + dev->caps.reserved_mtts, 1, 0); if (err) { mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); goto err_unmap_eq; @@ -313,7 +323,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, init_hca->dmpt_base, dev_cap->dmpt_entry_sz, dev->caps.num_mpts, - dev->caps.reserved_mrws, 1); + dev->caps.reserved_mrws, 1, 1); if (err) { mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n"); goto err_unmap_mtt; @@ -323,7 +333,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, init_hca->qpc_base, dev_cap->qpc_entry_sz, dev->caps.num_qps, - dev->caps.reserved_qps, 0); + dev->caps.reserved_qps, 0, 0); if (err) { mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); goto err_unmap_dmpt; @@ -333,7 +343,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, init_hca->auxc_base, dev_cap->aux_entry_sz, dev->caps.num_qps, - dev->caps.reserved_qps, 0); + dev->caps.reserved_qps, 0, 0); if (err) { mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); goto err_unmap_qp; @@ -343,7 +353,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, init_hca->altc_base, dev_cap->altc_entry_sz, dev->caps.num_qps, - dev->caps.reserved_qps, 0); + dev->caps.reserved_qps, 0, 0); if (err) { mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); goto err_unmap_auxc; @@ -353,7 +363,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, init_hca->rdmarc_base, dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, dev->caps.num_qps, - dev->caps.reserved_qps, 0); + dev->caps.reserved_qps, 0, 0); if (err) { mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); goto err_unmap_altc; @@ -363,7 +373,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, init_hca->cqc_base, dev_cap->cqc_entry_sz, dev->caps.num_cqs, - dev->caps.reserved_cqs, 0); + dev->caps.reserved_cqs, 0, 0); if (err) { mlx4_err(dev, "Failed to map CQ context memory, aborting.\n"); goto err_unmap_rdmarc; @@ -373,7 +383,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, init_hca->srqc_base, dev_cap->srq_entry_sz, dev->caps.num_srqs, - dev->caps.reserved_srqs, 0); + dev->caps.reserved_srqs, 0, 0); if (err) { mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n"); goto err_unmap_cq; @@ -388,7 +398,7 @@ static int __devinit mlx4_init_icm(struct mlx4_dev *dev, init_hca->mc_base, MLX4_MGM_ENTRY_SIZE, dev->caps.num_mgms + dev->caps.num_amgms, dev->caps.num_mgms + dev->caps.num_amgms, - 0); + 0, 0); if (err) { mlx4_err(dev, "Failed to map MCG context memory, aborting.\n"); goto err_unmap_srq; @@ -433,7 +443,7 @@ err_unmap_aux: mlx4_UNMAP_ICM_AUX(dev); err_free_aux: - mlx4_free_icm(dev, priv->fw.aux_icm); + mlx4_free_icm(dev, priv->fw.aux_icm, 0); return err; } @@ -458,7 +468,7 @@ static void mlx4_free_icms(struct mlx4_dev *dev) mlx4_unmap_eq_icm(dev); mlx4_UNMAP_ICM_AUX(dev); - mlx4_free_icm(dev, priv->fw.aux_icm); + mlx4_free_icm(dev, priv->fw.aux_icm, 0); } static void mlx4_close_hca(struct mlx4_dev *dev) @@ -466,10 +476,10 @@ static void mlx4_close_hca(struct mlx4_dev *dev) mlx4_CLOSE_HCA(dev, 0); mlx4_free_icms(dev); mlx4_UNMAP_FA(dev); - mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm); + mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); } -static int __devinit mlx4_init_hca(struct mlx4_dev *dev) +static int mlx4_init_hca(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_adapter adapter; @@ -524,8 +534,8 @@ static int __devinit mlx4_init_hca(struct mlx4_dev *dev) } priv->eq_table.inta_pin = adapter.inta_pin; - priv->rev_id = adapter.revision_id; - memcpy(priv->board_id, adapter.board_id, sizeof priv->board_id); + dev->rev_id = adapter.revision_id; + memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); return 0; @@ -537,12 +547,12 @@ err_free_icm: err_stop_fw: mlx4_UNMAP_FA(dev); - mlx4_free_icm(dev, priv->fw.fw_icm); + mlx4_free_icm(dev, priv->fw.fw_icm, 0); return err; } -static int __devinit mlx4_setup_hca(struct mlx4_dev *dev) +static int mlx4_setup_hca(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int err; @@ -583,13 +593,11 @@ static int __devinit mlx4_setup_hca(struct mlx4_dev *dev) goto err_pd_table_free; } - mlx4_map_catas_buf(dev); - err = mlx4_init_eq_table(dev); if (err) { mlx4_err(dev, "Failed to initialize " "event queue table, aborting.\n"); - goto err_catas_buf; + goto err_mr_table_free; } err = mlx4_cmd_use_events(dev); @@ -601,13 +609,17 @@ static int __devinit mlx4_setup_hca(struct mlx4_dev *dev) err = mlx4_NOP(dev); if (err) { - mlx4_err(dev, "NOP command failed to generate interrupt " - "(IRQ %d), aborting.\n", - priv->eq_table.eq[MLX4_EQ_ASYNC].irq); - if (dev->flags & MLX4_FLAG_MSI_X) - mlx4_err(dev, "Try again with MSI-X disabled.\n"); - else + if (dev->flags & MLX4_FLAG_MSI_X) { + mlx4_warn(dev, "NOP command failed to generate MSI-X " + "interrupt IRQ %d).\n", + priv->eq_table.eq[MLX4_EQ_ASYNC].irq); + mlx4_warn(dev, "Trying again without MSI-X.\n"); + } else { + mlx4_err(dev, "NOP command failed to generate interrupt " + "(IRQ %d), aborting.\n", + priv->eq_table.eq[MLX4_EQ_ASYNC].irq); mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); + } goto err_cmd_poll; } @@ -659,8 +671,7 @@ err_cmd_poll: err_eq_table_free: mlx4_cleanup_eq_table(dev); -err_catas_buf: - mlx4_unmap_catas_buf(dev); +err_mr_table_free: mlx4_cleanup_mr_table(dev); err_pd_table_free: @@ -708,19 +719,12 @@ no_msi: priv->eq_table.eq[i].irq = dev->pdev->irq; } -static int __devinit mlx4_init_one(struct pci_dev *pdev, - const struct pci_device_id *id) +static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { - static int mlx4_version_printed; struct mlx4_priv *priv; struct mlx4_dev *dev; int err; - if (!mlx4_version_printed) { - printk(KERN_INFO "%s", mlx4_version); - ++mlx4_version_printed; - } - printk(KERN_INFO PFX "Initializing %s\n", pci_name(pdev)); @@ -806,8 +810,6 @@ static int __devinit mlx4_init_one(struct pci_dev *pdev, goto err_free_dev; } - mlx4_enable_msi_x(dev); - if (mlx4_cmd_init(dev)) { mlx4_err(dev, "Failed to init command interface, aborting.\n"); goto err_free_dev; @@ -817,7 +819,15 @@ static int __devinit mlx4_init_one(struct pci_dev *pdev, if (err) goto err_cmd; + mlx4_enable_msi_x(dev); + err = mlx4_setup_hca(dev); + if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) { + dev->flags &= ~MLX4_FLAG_MSI_X; + pci_disable_msix(pdev); + err = mlx4_setup_hca(dev); + } + if (err) goto err_close; @@ -836,23 +846,20 @@ err_cleanup: mlx4_cleanup_cq_table(dev); mlx4_cmd_use_polling(dev); mlx4_cleanup_eq_table(dev); - - mlx4_unmap_catas_buf(dev); - mlx4_cleanup_mr_table(dev); mlx4_cleanup_pd_table(dev); mlx4_cleanup_uar_table(dev); err_close: + if (dev->flags & MLX4_FLAG_MSI_X) + pci_disable_msix(pdev); + mlx4_close_hca(dev); err_cmd: mlx4_cmd_cleanup(dev); err_free_dev: - if (dev->flags & MLX4_FLAG_MSI_X) - pci_disable_msix(pdev); - kfree(priv); err_release_bar2: @@ -867,7 +874,20 @@ err_disable_pdev: return err; } -static void __devexit mlx4_remove_one(struct pci_dev *pdev) +static int __devinit mlx4_init_one(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + static int mlx4_version_printed; + + if (!mlx4_version_printed) { + printk(KERN_INFO "%s", mlx4_version); + ++mlx4_version_printed; + } + + return mlx4_init_one(pdev, id); +} + +static void mlx4_remove_one(struct pci_dev *pdev) { struct mlx4_dev *dev = pci_get_drvdata(pdev); struct mlx4_priv *priv = mlx4_priv(dev); @@ -885,9 +905,6 @@ static void __devexit mlx4_remove_one(struct pci_dev *pdev) mlx4_cleanup_cq_table(dev); mlx4_cmd_use_polling(dev); mlx4_cleanup_eq_table(dev); - - mlx4_unmap_catas_buf(dev); - mlx4_cleanup_mr_table(dev); mlx4_cleanup_pd_table(dev); @@ -908,6 +925,12 @@ static void __devexit mlx4_remove_one(struct pci_dev *pdev) } } +int mlx4_restart_one(struct pci_dev *pdev) +{ + mlx4_remove_one(pdev); + return __mlx4_init_one(pdev, NULL); +} + static struct pci_device_id mlx4_pci_table[] = { { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */ { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */ @@ -930,6 +953,10 @@ static int __init mlx4_init(void) { int ret; + ret = mlx4_catas_init(); + if (ret) + return ret; + ret = pci_register_driver(&mlx4_driver); return ret < 0 ? ret : 0; } @@ -937,6 +964,7 @@ static int __init mlx4_init(void) static void __exit mlx4_cleanup(void) { pci_unregister_driver(&mlx4_driver); + mlx4_catas_cleanup(); } module_init(mlx4_init); diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c index 672024a0ee71..a99e7729d333 100644 --- a/drivers/net/mlx4/mcg.c +++ b/drivers/net/mlx4/mcg.c @@ -359,7 +359,7 @@ out: } EXPORT_SYMBOL_GPL(mlx4_multicast_detach); -int __devinit mlx4_init_mcg_table(struct mlx4_dev *dev) +int mlx4_init_mcg_table(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int err; diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index d9c91a71fc87..53a1cdddfc13 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h @@ -39,6 +39,7 @@ #include <linux/mutex.h> #include <linux/radix-tree.h> +#include <linux/timer.h> #include <linux/mlx4/device.h> #include <linux/mlx4/doorbell.h> @@ -55,11 +56,7 @@ enum { }; enum { - MLX4_BOARD_ID_LEN = 64 -}; - -enum { - MLX4_MGM_ENTRY_SIZE = 0x40, + MLX4_MGM_ENTRY_SIZE = 0x100, MLX4_QP_PER_MGM = 4 * (MLX4_MGM_ENTRY_SIZE / 16 - 2), MLX4_MTT_ENTRY_PER_SEG = 8 }; @@ -67,7 +64,6 @@ enum { enum { MLX4_EQ_ASYNC, MLX4_EQ_COMP, - MLX4_EQ_CATAS, MLX4_NUM_EQ }; @@ -133,6 +129,7 @@ struct mlx4_icm_table { int num_obj; int obj_size; int lowmem; + int coherent; struct mutex mutex; struct mlx4_icm **icm; }; @@ -248,7 +245,8 @@ struct mlx4_mcg_table { struct mlx4_catas_err { u32 __iomem *map; - int size; + struct timer_list timer; + struct list_head list; }; struct mlx4_priv { @@ -276,9 +274,6 @@ struct mlx4_priv { struct mlx4_uar driver_uar; void __iomem *kar; - - u32 rev_id; - char board_id[MLX4_BOARD_ID_LEN]; }; static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) @@ -311,9 +306,11 @@ void mlx4_cleanup_qp_table(struct mlx4_dev *dev); void mlx4_cleanup_srq_table(struct mlx4_dev *dev); void mlx4_cleanup_mcg_table(struct mlx4_dev *dev); -void mlx4_map_catas_buf(struct mlx4_dev *dev); -void mlx4_unmap_catas_buf(struct mlx4_dev *dev); - +void mlx4_start_catas_poll(struct mlx4_dev *dev); +void mlx4_stop_catas_poll(struct mlx4_dev *dev); +int mlx4_catas_init(void); +void mlx4_catas_cleanup(void); +int mlx4_restart_one(struct pci_dev *pdev); int mlx4_register_device(struct mlx4_dev *dev); void mlx4_unregister_device(struct mlx4_dev *dev); void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type, diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c index d0808fa3ec82..0c05a10bae3b 100644 --- a/drivers/net/mlx4/mr.c +++ b/drivers/net/mlx4/mr.c @@ -68,6 +68,9 @@ struct mlx4_mpt_entry { #define MLX4_MTT_FLAG_PRESENT 1 +#define MLX4_MPT_STATUS_SW 0xF0 +#define MLX4_MPT_STATUS_HW 0x00 + static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order) { int o; @@ -255,10 +258,8 @@ int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, int err; index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap); - if (index == -1) { - err = -ENOMEM; - goto err; - } + if (index == -1) + return -ENOMEM; mr->iova = iova; mr->size = size; @@ -269,15 +270,8 @@ int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); if (err) - goto err_index; - - return 0; - -err_index: - mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index); + mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index); -err: - kfree(mr); return err; } EXPORT_SYMBOL_GPL(mlx4_mr_alloc); @@ -358,58 +352,57 @@ err_table: } EXPORT_SYMBOL_GPL(mlx4_mr_enable); -static int mlx4_WRITE_MTT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, - int num_mtt) +static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + int start_index, int npages, u64 *page_list) { - return mlx4_cmd(dev, mailbox->dma, num_mtt, 0, MLX4_CMD_WRITE_MTT, - MLX4_CMD_TIME_CLASS_B); + struct mlx4_priv *priv = mlx4_priv(dev); + __be64 *mtts; + dma_addr_t dma_handle; + int i; + int s = start_index * sizeof (u64); + + /* All MTTs must fit in the same page */ + if (start_index / (PAGE_SIZE / sizeof (u64)) != + (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64))) + return -EINVAL; + + if (start_index & (MLX4_MTT_ENTRY_PER_SEG - 1)) + return -EINVAL; + + mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg + + s / dev->caps.mtt_entry_sz, &dma_handle); + if (!mtts) + return -ENOMEM; + + for (i = 0; i < npages; ++i) + mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); + + dma_sync_single(&dev->pdev->dev, dma_handle, npages * sizeof (u64), DMA_TO_DEVICE); + + return 0; } int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) { - struct mlx4_cmd_mailbox *mailbox; - __be64 *mtt_entry; - int i; - int err = 0; + int chunk; + int err; if (mtt->order < 0) return -EINVAL; - mailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(mailbox)) - return PTR_ERR(mailbox); - - mtt_entry = mailbox->buf; - while (npages > 0) { - mtt_entry[0] = cpu_to_be64(mlx4_mtt_addr(dev, mtt) + start_index * 8); - mtt_entry[1] = 0; - - for (i = 0; i < npages && i < MLX4_MAILBOX_SIZE / 8 - 2; ++i) - mtt_entry[i + 2] = cpu_to_be64(page_list[i] | - MLX4_MTT_FLAG_PRESENT); - - /* - * If we have an odd number of entries to write, add - * one more dummy entry for firmware efficiency. - */ - if (i & 1) - mtt_entry[i + 2] = 0; - - err = mlx4_WRITE_MTT(dev, mailbox, (i + 1) & ~1); + chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages); + err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); if (err) - goto out; + return err; - npages -= i; - start_index += i; - page_list += i; + npages -= chunk; + start_index += chunk; + page_list += chunk; } -out: - mlx4_free_cmd_mailbox(dev, mailbox); - - return err; + return 0; } EXPORT_SYMBOL_GPL(mlx4_write_mtt); @@ -437,7 +430,7 @@ int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, } EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt); -int __devinit mlx4_init_mr_table(struct mlx4_dev *dev) +int mlx4_init_mr_table(struct mlx4_dev *dev) { struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; int err; @@ -453,7 +446,7 @@ int __devinit mlx4_init_mr_table(struct mlx4_dev *dev) goto err_buddy; if (dev->caps.reserved_mtts) { - if (mlx4_alloc_mtt_range(dev, ilog2(dev->caps.reserved_mtts)) == -1) { + if (mlx4_alloc_mtt_range(dev, fls(dev->caps.reserved_mtts - 1)) == -1) { mlx4_warn(dev, "MTT table of order %d is too small.\n", mr_table->mtt_buddy.max_order); err = -ENOMEM; @@ -479,3 +472,165 @@ void mlx4_cleanup_mr_table(struct mlx4_dev *dev) mlx4_buddy_cleanup(&mr_table->mtt_buddy); mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); } + +static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list, + int npages, u64 iova) +{ + int i, page_mask; + + if (npages > fmr->max_pages) + return -EINVAL; + + page_mask = (1 << fmr->page_shift) - 1; + + /* We are getting page lists, so va must be page aligned. */ + if (iova & page_mask) + return -EINVAL; + + /* Trust the user not to pass misaligned data in page_list */ + if (0) + for (i = 0; i < npages; ++i) { + if (page_list[i] & ~page_mask) + return -EINVAL; + } + + if (fmr->maps >= fmr->max_maps) + return -EINVAL; + + return 0; +} + +int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, + int npages, u64 iova, u32 *lkey, u32 *rkey) +{ + u32 key; + int i, err; + + err = mlx4_check_fmr(fmr, page_list, npages, iova); + if (err) + return err; + + ++fmr->maps; + + key = key_to_hw_index(fmr->mr.key); + key += dev->caps.num_mpts; + *lkey = *rkey = fmr->mr.key = hw_index_to_key(key); + + *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW; + + /* Make sure MPT status is visible before writing MTT entries */ + wmb(); + + for (i = 0; i < npages; ++i) + fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); + + dma_sync_single(&dev->pdev->dev, fmr->dma_handle, + npages * sizeof(u64), DMA_TO_DEVICE); + + fmr->mpt->key = cpu_to_be32(key); + fmr->mpt->lkey = cpu_to_be32(key); + fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift)); + fmr->mpt->start = cpu_to_be64(iova); + + /* Make MTT entries are visible before setting MPT status */ + wmb(); + + *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW; + + /* Make sure MPT status is visible before consumer can use FMR */ + wmb(); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr); + +int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, + int max_maps, u8 page_shift, struct mlx4_fmr *fmr) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + u64 mtt_seg; + int err = -ENOMEM; + + if (page_shift < 12 || page_shift >= 32) + return -EINVAL; + + /* All MTTs must fit in the same page */ + if (max_pages * sizeof *fmr->mtts > PAGE_SIZE) + return -EINVAL; + + fmr->page_shift = page_shift; + fmr->max_pages = max_pages; + fmr->max_maps = max_maps; + fmr->maps = 0; + + err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages, + page_shift, &fmr->mr); + if (err) + return err; + + mtt_seg = fmr->mr.mtt.first_seg * dev->caps.mtt_entry_sz; + + fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, + fmr->mr.mtt.first_seg, + &fmr->dma_handle); + if (!fmr->mtts) { + err = -ENOMEM; + goto err_free; + } + + fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table, + key_to_hw_index(fmr->mr.key), NULL); + if (!fmr->mpt) { + err = -ENOMEM; + goto err_free; + } + + return 0; + +err_free: + mlx4_mr_free(dev, &fmr->mr); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_fmr_alloc); + +int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr) +{ + return mlx4_mr_enable(dev, &fmr->mr); +} +EXPORT_SYMBOL_GPL(mlx4_fmr_enable); + +void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, + u32 *lkey, u32 *rkey) +{ + u32 key; + + if (!fmr->maps) + return; + + key = key_to_hw_index(fmr->mr.key); + key &= dev->caps.num_mpts - 1; + *lkey = *rkey = fmr->mr.key = hw_index_to_key(key); + + fmr->maps = 0; + + *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW; +} +EXPORT_SYMBOL_GPL(mlx4_fmr_unmap); + +int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr) +{ + if (fmr->maps) + return -EBUSY; + + fmr->mr.enabled = 0; + mlx4_mr_free(dev, &fmr->mr); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_fmr_free); + +int mlx4_SYNC_TPT(struct mlx4_dev *dev) +{ + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000); +} +EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT); diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c index 23dea1ee7750..3a93c5f0f7ab 100644 --- a/drivers/net/mlx4/pd.c +++ b/drivers/net/mlx4/pd.c @@ -57,7 +57,7 @@ void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn) } EXPORT_SYMBOL_GPL(mlx4_pd_free); -int __devinit mlx4_init_pd_table(struct mlx4_dev *dev) +int mlx4_init_pd_table(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c index 19b48c71cf7f..cc4b1be18219 100644 --- a/drivers/net/mlx4/qp.c +++ b/drivers/net/mlx4/qp.c @@ -240,7 +240,8 @@ void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp) mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn); mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); - mlx4_bitmap_free(&qp_table->bitmap, qp->qpn); + if (qp->qpn < dev->caps.sqp_start + 8) + mlx4_bitmap_free(&qp_table->bitmap, qp->qpn); } EXPORT_SYMBOL_GPL(mlx4_qp_free); @@ -250,7 +251,7 @@ static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn) MLX4_CMD_TIME_CLASS_B); } -int __devinit mlx4_init_qp_table(struct mlx4_dev *dev) +int mlx4_init_qp_table(struct mlx4_dev *dev) { struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; int err; diff --git a/drivers/net/mlx4/reset.c b/drivers/net/mlx4/reset.c index e4dfd4b11a4a..e199715fabd0 100644 --- a/drivers/net/mlx4/reset.c +++ b/drivers/net/mlx4/reset.c @@ -119,6 +119,9 @@ int mlx4_reset(struct mlx4_dev *dev) writel(MLX4_RESET_VALUE, reset + MLX4_RESET_OFFSET); iounmap(reset); + /* Docs say to wait one second before accessing device */ + msleep(1000); + end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES; do { if (!pci_read_config_word(dev->pdev, PCI_VENDOR_ID, &vendor) && diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c index b061c86d6839..d23f46d692ef 100644 --- a/drivers/net/mlx4/srq.c +++ b/drivers/net/mlx4/srq.c @@ -227,7 +227,7 @@ int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_waterm err = mlx4_QUERY_SRQ(dev, mailbox, srq->srqn); if (err) goto err_out; - *limit_watermark = srq_context->limit_watermark; + *limit_watermark = be16_to_cpu(srq_context->limit_watermark); err_out: mlx4_free_cmd_mailbox(dev, mailbox); @@ -235,7 +235,7 @@ err_out: } EXPORT_SYMBOL_GPL(mlx4_srq_query); -int __devinit mlx4_init_srq_table(struct mlx4_dev *dev) +int mlx4_init_srq_table(struct mlx4_dev *dev) { struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; int err; diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 1799eee88db7..b33d21f4efff 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -63,10 +63,9 @@ static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num); static int mv643xx_eth_open(struct net_device *); static int mv643xx_eth_stop(struct net_device *); static int mv643xx_eth_change_mtu(struct net_device *, int); -static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *); static void eth_port_init_mac_tables(unsigned int eth_port_num); #ifdef MV643XX_NAPI -static int mv643xx_poll(struct net_device *dev, int *budget); +static int mv643xx_poll(struct napi_struct *napi, int budget); #endif static int ethernet_phy_get(unsigned int eth_port_num); static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr); @@ -341,7 +340,7 @@ int mv643xx_eth_free_tx_descs(struct net_device *dev, int force) if (cmd_sts & ETH_ERROR_SUMMARY) { printk("%s: Error in TX\n", dev->name); - mp->stats.tx_errors++; + dev->stats.tx_errors++; } spin_unlock_irqrestore(&mp->lock, flags); @@ -388,7 +387,7 @@ static void mv643xx_eth_free_all_tx_descs(struct net_device *dev) static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) { struct mv643xx_private *mp = netdev_priv(dev); - struct net_device_stats *stats = &mp->stats; + struct net_device_stats *stats = &dev->stats; unsigned int received_packets = 0; struct sk_buff *skb; struct pkt_info pkt_info; @@ -534,7 +533,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) } /* PHY status changed */ - if (eth_int_cause_ext & ETH_INT_CAUSE_PHY) { + if (eth_int_cause_ext & (ETH_INT_CAUSE_PHY | ETH_INT_CAUSE_STATE)) { struct ethtool_cmd cmd; if (mii_link_ok(&mp->mii)) { @@ -562,7 +561,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) /* wait for previous write to complete */ mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); - netif_rx_schedule(dev); + netif_rx_schedule(dev, &mp->napi); } #else if (eth_int_cause & ETH_INT_CAUSE_RX) @@ -785,6 +784,7 @@ static int mv643xx_eth_open(struct net_device *dev) unsigned int port_num = mp->port_num; unsigned int size; int err; + DECLARE_MAC_BUF(mac); /* Clear any pending ethernet port interrupts */ mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); @@ -880,6 +880,10 @@ static int mv643xx_eth_open(struct net_device *dev) mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */ +#ifdef MV643XX_NAPI + napi_enable(&mp->napi); +#endif + eth_port_start(dev); /* Interrupt Coalescing */ @@ -982,7 +986,7 @@ static int mv643xx_eth_stop(struct net_device *dev) mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); #ifdef MV643XX_NAPI - netif_poll_disable(dev); + napi_disable(&mp->napi); #endif netif_carrier_off(dev); netif_stop_queue(dev); @@ -992,10 +996,6 @@ static int mv643xx_eth_stop(struct net_device *dev) mv643xx_eth_free_tx_rings(dev); mv643xx_eth_free_rx_rings(dev); -#ifdef MV643XX_NAPI - netif_poll_enable(dev); -#endif - free_irq(dev->irq, dev); return 0; @@ -1007,11 +1007,12 @@ static int mv643xx_eth_stop(struct net_device *dev) * * This function is used in case of NAPI */ -static int mv643xx_poll(struct net_device *dev, int *budget) +static int mv643xx_poll(struct napi_struct *napi, int budget) { - struct mv643xx_private *mp = netdev_priv(dev); - int done = 1, orig_budget, work_done; + struct mv643xx_private *mp = container_of(napi, struct mv643xx_private, napi); + struct net_device *dev = mp->dev; unsigned int port_num = mp->port_num; + int work_done; #ifdef MV643XX_TX_FAST_REFILL if (++mp->tx_clean_threshold > 5) { @@ -1020,27 +1021,20 @@ static int mv643xx_poll(struct net_device *dev, int *budget) } #endif + work_done = 0; if ((mv_read(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) - != (u32) mp->rx_used_desc_q) { - orig_budget = *budget; - if (orig_budget > dev->quota) - orig_budget = dev->quota; - work_done = mv643xx_eth_receive_queue(dev, orig_budget); - *budget -= work_done; - dev->quota -= work_done; - if (work_done >= orig_budget) - done = 0; - } + != (u32) mp->rx_used_desc_q) + work_done = mv643xx_eth_receive_queue(dev, budget); - if (done) { - netif_rx_complete(dev); + if (work_done < budget) { + netif_rx_complete(dev, napi); mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); } - return done ? 0 : 1; + return work_done; } #endif @@ -1198,7 +1192,7 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp, static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct mv643xx_private *mp = netdev_priv(dev); - struct net_device_stats *stats = &mp->stats; + struct net_device_stats *stats = &dev->stats; unsigned long flags; BUG_ON(netif_queue_stopped(dev)); @@ -1222,7 +1216,7 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) spin_lock_irqsave(&mp->lock, flags); eth_tx_submit_descs_for_skb(mp, skb); - stats->tx_bytes = skb->len; + stats->tx_bytes += skb->len; stats->tx_packets++; dev->trans_start = jiffies; @@ -1234,23 +1228,6 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) return 0; /* success */ } -/* - * mv643xx_eth_get_stats - * - * Returns a pointer to the interface statistics. - * - * Input : dev - a pointer to the required interface - * - * Output : a pointer to the interface's statistics - */ - -static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) -{ - struct mv643xx_private *mp = netdev_priv(dev); - - return &mp->stats; -} - #ifdef CONFIG_NET_POLL_CONTROLLER static void mv643xx_netpoll(struct net_device *netdev) { @@ -1333,6 +1310,10 @@ static int mv643xx_eth_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dev); mp = netdev_priv(dev); + mp->dev = dev; +#ifdef MV643XX_NAPI + netif_napi_add(dev, &mp->napi, mv643xx_poll, 64); +#endif res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); BUG_ON(!res); @@ -1341,23 +1322,17 @@ static int mv643xx_eth_probe(struct platform_device *pdev) dev->open = mv643xx_eth_open; dev->stop = mv643xx_eth_stop; dev->hard_start_xmit = mv643xx_eth_start_xmit; - dev->get_stats = mv643xx_eth_get_stats; dev->set_mac_address = mv643xx_eth_set_mac_address; dev->set_multicast_list = mv643xx_eth_set_rx_mode; /* No need to Tx Timeout */ dev->tx_timeout = mv643xx_eth_tx_timeout; -#ifdef MV643XX_NAPI - dev->poll = mv643xx_poll; - dev->weight = 64; -#endif #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = mv643xx_netpoll; #endif dev->watchdog_timeo = 2 * HZ; - dev->tx_queue_len = mp->tx_ring_size; dev->base_addr = 0; dev->change_mtu = mv643xx_eth_change_mtu; dev->do_ioctl = mv643xx_eth_do_ioctl; @@ -1432,7 +1407,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev) mv643xx_eth_update_pscr(dev, &cmd); mv643xx_set_settings(dev, &cmd); - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); err = register_netdev(dev); if (err) @@ -1440,8 +1414,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev) p = dev->dev_addr; printk(KERN_NOTICE - "%s: port %d with MAC address %02x:%02x:%02x:%02x:%02x:%02x\n", - dev->name, port_num, p[0], p[1], p[2], p[3], p[4], p[5]); + "%s: port %d with MAC address %s\n", + dev->name, port_num, print_mac(mac, p)); if (dev->features & NETIF_F_SG) printk(KERN_NOTICE "%s: Scatter Gather Enabled\n", dev->name); @@ -2688,8 +2662,7 @@ static const struct mv643xx_stats mv643xx_gstrings_stats[] = { { "late_collision", MV643XX_STAT(mib_counters.late_collision) }, }; -#define MV643XX_STATS_LEN \ - sizeof(mv643xx_gstrings_stats) / sizeof(struct mv643xx_stats) +#define MV643XX_STATS_LEN ARRAY_SIZE(mv643xx_gstrings_stats) static void mv643xx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) @@ -2701,9 +2674,14 @@ static void mv643xx_get_drvinfo(struct net_device *netdev, drvinfo->n_stats = MV643XX_STATS_LEN; } -static int mv643xx_get_stats_count(struct net_device *netdev) +static int mv643xx_get_sset_count(struct net_device *netdev, int sset) { - return MV643XX_STATS_LEN; + switch (sset) { + case ETH_SS_STATS: + return MV643XX_STATS_LEN; + default: + return -EOPNOTSUPP; + } } static void mv643xx_get_ethtool_stats(struct net_device *netdev, @@ -2763,13 +2741,9 @@ static const struct ethtool_ops mv643xx_ethtool_ops = { .set_settings = mv643xx_set_settings, .get_drvinfo = mv643xx_get_drvinfo, .get_link = mv643xx_eth_get_link, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, - .get_stats_count = mv643xx_get_stats_count, .get_ethtool_stats = mv643xx_get_ethtool_stats, .get_strings = mv643xx_get_strings, - .get_stats_count = mv643xx_get_stats_count, - .get_ethtool_stats = mv643xx_get_ethtool_stats, .nway_reset = mv643xx_eth_nway_restart, }; diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h index 82f8c0cbfb64..be669eb23788 100644 --- a/drivers/net/mv643xx_eth.h +++ b/drivers/net/mv643xx_eth.h @@ -64,7 +64,9 @@ #define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8) #define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR) #define ETH_INT_CAUSE_PHY 0x00010000 -#define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY) +#define ETH_INT_CAUSE_STATE 0x00100000 +#define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY | \ + ETH_INT_CAUSE_STATE) #define ETH_INT_MASK_ALL 0x00000000 #define ETH_INT_MASK_ALL_EXT 0x00000000 @@ -318,6 +320,8 @@ struct mv643xx_private { struct work_struct tx_timeout_task; + struct net_device *dev; + struct napi_struct napi; struct net_device_stats stats; struct mv643xx_mib_counters mib_counters; spinlock_t lock; diff --git a/drivers/net/mvme147.c b/drivers/net/mvme147.c index e246d00bba6d..86c9c06433cb 100644 --- a/drivers/net/mvme147.c +++ b/drivers/net/mvme147.c @@ -67,6 +67,7 @@ struct net_device * __init mvme147lance_probe(int unit) u_long *addr; u_long address; int err; + DECLARE_MAC_BUF(mac); if (!MACH_IS_MVME147 || called) return ERR_PTR(-ENODEV); @@ -79,8 +80,6 @@ struct net_device * __init mvme147lance_probe(int unit) if (unit >= 0) sprintf(dev->name, "eth%d", unit); - SET_MODULE_OWNER(dev); - /* Fill the dev fields */ dev->base_addr = (unsigned long)MVME147_LANCE_BASE; dev->open = &m147lance_open; @@ -103,12 +102,10 @@ struct net_device * __init mvme147lance_probe(int unit) address=address>>8; dev->dev_addr[3]=address&0xff; - printk("%s: MVME147 at 0x%08lx, irq %d, Hardware Address %02x:%02x:%02x:%02x:%02x:%02x\n", - dev->name, dev->base_addr, MVME147_LANCE_IRQ, - dev->dev_addr[0], - dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], - dev->dev_addr[5]); + printk("%s: MVME147 at 0x%08lx, irq %d, " + "Hardware Address %s\n", + dev->name, dev->base_addr, MVME147_LANCE_IRQ, + print_mac(mac, dev->dev_addr)); lp = (struct m147lance_private *)dev->priv; lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 16K */ diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index deca65330b0f..e8afa101433e 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -48,6 +48,7 @@ #include <linux/etherdevice.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> +#include <linux/inet_lro.h> #include <linux/ip.h> #include <linux/inet.h> #include <linux/in.h> @@ -62,6 +63,8 @@ #include <linux/io.h> #include <linux/log2.h> #include <net/checksum.h> +#include <net/ip.h> +#include <net/tcp.h> #include <asm/byteorder.h> #include <asm/io.h> #include <asm/processor.h> @@ -72,7 +75,7 @@ #include "myri10ge_mcp.h" #include "myri10ge_mcp_gen_header.h" -#define MYRI10GE_VERSION_STR "1.3.1-1.248" +#define MYRI10GE_VERSION_STR "1.3.2-1.269" MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); MODULE_AUTHOR("Maintainer: help@myri.com"); @@ -89,6 +92,8 @@ MODULE_LICENSE("Dual BSD/GPL"); #define MYRI10GE_EEPROM_STRINGS_SIZE 256 #define MYRI10GE_MAX_SEND_DESC_TSO ((65536 / 2048) * 2) +#define MYRI10GE_MAX_LRO_DESCRIPTORS 8 +#define MYRI10GE_LRO_MAX_PKTS 64 #define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff) #define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff @@ -151,6 +156,8 @@ struct myri10ge_rx_done { dma_addr_t bus; int cnt; int idx; + struct net_lro_mgr lro_mgr; + struct net_lro_desc lro_desc[MYRI10GE_MAX_LRO_DESCRIPTORS]; }; struct myri10ge_priv { @@ -163,6 +170,7 @@ struct myri10ge_priv { int small_bytes; int big_bytes; struct net_device *dev; + struct napi_struct napi; struct net_device_stats stats; u8 __iomem *sram; int sram_size; @@ -191,6 +199,7 @@ struct myri10ge_priv { struct timer_list watchdog_timer; int watchdog_tx_done; int watchdog_tx_req; + int watchdog_pause; int watchdog_resets; int tx_linearized; int pause; @@ -276,6 +285,14 @@ static int myri10ge_debug = -1; /* defaults above */ module_param(myri10ge_debug, int, 0); MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)"); +static int myri10ge_lro = 1; +module_param(myri10ge_lro, int, S_IRUGO); +MODULE_PARM_DESC(myri10ge_lro, "Enable large receive offload\n"); + +static int myri10ge_lro_max_pkts = MYRI10GE_LRO_MAX_PKTS; +module_param(myri10ge_lro_max_pkts, int, S_IRUGO); +MODULE_PARM_DESC(myri10ge_lro, "Number of LRO packets to be aggregated\n"); + static int myri10ge_fill_thresh = 256; module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed\n"); @@ -1019,6 +1036,15 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, remainder -= MYRI10GE_ALLOC_SIZE; } + if (mgp->csum_flag && myri10ge_lro) { + rx_frags[0].page_offset += MXGEFW_PAD; + rx_frags[0].size -= MXGEFW_PAD; + len -= MXGEFW_PAD; + lro_receive_frags(&mgp->rx_done.lro_mgr, rx_frags, + len, len, (void *)(unsigned long)csum, csum); + return 1; + } + hlen = MYRI10GE_HLEN > len ? len : MYRI10GE_HLEN; /* allocate an skb to attach the page(s) to. */ @@ -1099,7 +1125,7 @@ static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index) } } -static inline void myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int *limit) +static inline int myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int budget) { struct myri10ge_rx_done *rx_done = &mgp->rx_done; unsigned long rx_bytes = 0; @@ -1108,10 +1134,11 @@ static inline void myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int *limit) int idx = rx_done->idx; int cnt = rx_done->cnt; + int work_done = 0; u16 length; __wsum checksum; - while (rx_done->entry[idx].length != 0 && *limit != 0) { + while (rx_done->entry[idx].length != 0 && work_done++ < budget) { length = ntohs(rx_done->entry[idx].length); rx_done->entry[idx].length = 0; checksum = csum_unfold(rx_done->entry[idx].checksum); @@ -1127,16 +1154,15 @@ static inline void myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int *limit) rx_bytes += rx_ok * (unsigned long)length; cnt++; idx = cnt & (myri10ge_max_intr_slots - 1); - - /* limit potential for livelock by only handling a - * limited number of frames. */ - (*limit)--; } rx_done->idx = idx; rx_done->cnt = cnt; mgp->stats.rx_packets += rx_packets; mgp->stats.rx_bytes += rx_bytes; + if (myri10ge_lro) + lro_flush_all(&rx_done->lro_mgr); + /* restock receive rings if needed */ if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt < myri10ge_fill_thresh) myri10ge_alloc_rx_pages(mgp, &mgp->rx_small, @@ -1144,6 +1170,7 @@ static inline void myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int *limit) if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt < myri10ge_fill_thresh) myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0); + return work_done; } static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) @@ -1188,26 +1215,21 @@ static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) } } -static int myri10ge_poll(struct net_device *netdev, int *budget) +static int myri10ge_poll(struct napi_struct *napi, int budget) { - struct myri10ge_priv *mgp = netdev_priv(netdev); + struct myri10ge_priv *mgp = container_of(napi, struct myri10ge_priv, napi); + struct net_device *netdev = mgp->dev; struct myri10ge_rx_done *rx_done = &mgp->rx_done; - int limit, orig_limit, work_done; + int work_done; /* process as many rx events as NAPI will allow */ - limit = min(*budget, netdev->quota); - orig_limit = limit; - myri10ge_clean_rx_done(mgp, &limit); - work_done = orig_limit - limit; - *budget -= work_done; - netdev->quota -= work_done; + work_done = myri10ge_clean_rx_done(mgp, budget); if (rx_done->entry[rx_done->idx].length == 0 || !netif_running(netdev)) { - netif_rx_complete(netdev); + netif_rx_complete(netdev, napi); put_be32(htonl(3), mgp->irq_claim); - return 0; } - return 1; + return work_done; } static irqreturn_t myri10ge_intr(int irq, void *arg) @@ -1225,7 +1247,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg) /* low bit indicates receives are present, so schedule * napi poll handler */ if (stats->valid & 1) - netif_rx_schedule(mgp->dev); + netif_rx_schedule(mgp->dev, &mgp->napi); if (!mgp->msi_enabled) { put_be32(0, mgp->irq_deassert); @@ -1378,7 +1400,8 @@ static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = { "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32", "dropped_unicast_filtered", "dropped_multicast_filtered", "dropped_runt", "dropped_overrun", "dropped_no_small_buffer", - "dropped_no_big_buffer" + "dropped_no_big_buffer", "LRO aggregated", "LRO flushed", + "LRO avg aggr", "LRO no_desc" }; #define MYRI10GE_NET_STATS_LEN 21 @@ -1395,9 +1418,14 @@ myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data) } } -static int myri10ge_get_stats_count(struct net_device *netdev) +static int myri10ge_get_sset_count(struct net_device *netdev, int sset) { - return MYRI10GE_STATS_LEN; + switch (sset) { + case ETH_SS_STATS: + return MYRI10GE_STATS_LEN; + default: + return -EOPNOTSUPP; + } } static void @@ -1444,6 +1472,14 @@ myri10ge_get_ethtool_stats(struct net_device *netdev, data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_overrun); data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_small_buffer); data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_big_buffer); + data[i++] = mgp->rx_done.lro_mgr.stats.aggregated; + data[i++] = mgp->rx_done.lro_mgr.stats.flushed; + if (mgp->rx_done.lro_mgr.stats.flushed) + data[i++] = mgp->rx_done.lro_mgr.stats.aggregated / + mgp->rx_done.lro_mgr.stats.flushed; + else + data[i++] = 0; + data[i++] = mgp->rx_done.lro_mgr.stats.no_desc; } static void myri10ge_set_msglevel(struct net_device *netdev, u32 value) @@ -1468,15 +1504,12 @@ static const struct ethtool_ops myri10ge_ethtool_ops = { .get_ringparam = myri10ge_get_ringparam, .get_rx_csum = myri10ge_get_rx_csum, .set_rx_csum = myri10ge_set_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_hw_csum, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, - .get_tso = ethtool_op_get_tso, .set_tso = ethtool_op_set_tso, .get_link = ethtool_op_get_link, .get_strings = myri10ge_get_strings, - .get_stats_count = myri10ge_get_stats_count, + .get_sset_count = myri10ge_get_sset_count, .get_ethtool_stats = myri10ge_get_ethtool_stats, .set_msglevel = myri10ge_set_msglevel, .get_msglevel = myri10ge_get_msglevel @@ -1717,10 +1750,69 @@ static void myri10ge_free_irq(struct myri10ge_priv *mgp) pci_disable_msi(pdev); } +static int +myri10ge_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr, + void **ip_hdr, void **tcpudp_hdr, + u64 * hdr_flags, void *priv) +{ + struct ethhdr *eh; + struct vlan_ethhdr *veh; + struct iphdr *iph; + u8 *va = page_address(frag->page) + frag->page_offset; + unsigned long ll_hlen; + __wsum csum = (__wsum) (unsigned long)priv; + + /* find the mac header, aborting if not IPv4 */ + + eh = (struct ethhdr *)va; + *mac_hdr = eh; + ll_hlen = ETH_HLEN; + if (eh->h_proto != htons(ETH_P_IP)) { + if (eh->h_proto == htons(ETH_P_8021Q)) { + veh = (struct vlan_ethhdr *)va; + if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP)) + return -1; + + ll_hlen += VLAN_HLEN; + + /* + * HW checksum starts ETH_HLEN bytes into + * frame, so we must subtract off the VLAN + * header's checksum before csum can be used + */ + csum = csum_sub(csum, csum_partial(va + ETH_HLEN, + VLAN_HLEN, 0)); + } else { + return -1; + } + } + *hdr_flags = LRO_IPV4; + + iph = (struct iphdr *)(va + ll_hlen); + *ip_hdr = iph; + if (iph->protocol != IPPROTO_TCP) + return -1; + *hdr_flags |= LRO_TCP; + *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2); + + /* verify the IP checksum */ + if (unlikely(ip_fast_csum((u8 *) iph, iph->ihl))) + return -1; + + /* verify the checksum */ + if (unlikely(csum_tcpudp_magic(iph->saddr, iph->daddr, + ntohs(iph->tot_len) - (iph->ihl << 2), + IPPROTO_TCP, csum))) + return -1; + + return 0; +} + static int myri10ge_open(struct net_device *dev) { struct myri10ge_priv *mgp; struct myri10ge_cmd cmd; + struct net_lro_mgr *lro_mgr; int status, big_pow2; mgp = netdev_priv(dev); @@ -1852,7 +1944,19 @@ static int myri10ge_open(struct net_device *dev) mgp->link_state = htonl(~0U); mgp->rdma_tags_available = 15; - netif_poll_enable(mgp->dev); /* must happen prior to any irq */ + lro_mgr = &mgp->rx_done.lro_mgr; + lro_mgr->dev = dev; + lro_mgr->features = LRO_F_NAPI; + lro_mgr->ip_summed = CHECKSUM_COMPLETE; + lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; + lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS; + lro_mgr->lro_arr = mgp->rx_done.lro_desc; + lro_mgr->get_frag_header = myri10ge_get_frag_header; + lro_mgr->max_aggr = myri10ge_lro_max_pkts; + if (lro_mgr->max_aggr > MAX_SKB_FRAGS) + lro_mgr->max_aggr = MAX_SKB_FRAGS; + + napi_enable(&mgp->napi); /* must happen prior to any irq */ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0); if (status) { @@ -1896,7 +2000,7 @@ static int myri10ge_close(struct net_device *dev) del_timer_sync(&mgp->watchdog_timer); mgp->running = MYRI10GE_ETH_STOPPING; - netif_poll_disable(mgp->dev); + napi_disable(&mgp->napi); netif_carrier_off(dev); netif_stop_queue(dev); old_down_cnt = mgp->down_cnt; @@ -2296,6 +2400,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev) struct dev_mc_list *mc_list; __be32 data[2] = { 0, 0 }; int err; + DECLARE_MAC_BUF(mac); mgp = netdev_priv(dev); /* can be called from atomic contexts, @@ -2343,14 +2448,8 @@ static void myri10ge_set_multicast_list(struct net_device *dev) printk(KERN_ERR "myri10ge: %s: Failed " "MXGEFW_JOIN_MULTICAST_GROUP, error status:" "%d\t", dev->name, err); - printk(KERN_ERR "MAC %02x:%02x:%02x:%02x:%02x:%02x\n", - ((unsigned char *)&mc_list->dmi_addr)[0], - ((unsigned char *)&mc_list->dmi_addr)[1], - ((unsigned char *)&mc_list->dmi_addr)[2], - ((unsigned char *)&mc_list->dmi_addr)[3], - ((unsigned char *)&mc_list->dmi_addr)[4], - ((unsigned char *)&mc_list->dmi_addr)[5] - ); + printk(KERN_ERR "MAC %s\n", + print_mac(mac, mc_list->dmi_addr)); goto abort; } } @@ -2513,26 +2612,20 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp) { struct pci_dev *pdev = mgp->pdev; struct device *dev = &pdev->dev; - int cap, status; - u16 val; + int status; mgp->tx.boundary = 4096; /* * Verify the max read request size was set to 4KB * before trying the test with 4KB. */ - cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); - if (cap < 64) { - dev_err(dev, "Bad PCI_CAP_ID_EXP location %d\n", cap); - goto abort; - } - status = pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &val); - if (status != 0) { + status = pcie_get_readrq(pdev); + if (status < 0) { dev_err(dev, "Couldn't read max read req size: %d\n", status); goto abort; } - if ((val & (5 << 12)) != (5 << 12)) { - dev_warn(dev, "Max Read Request size != 4096 (0x%x)\n", val); + if (status != 4096) { + dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status); mgp->tx.boundary = 2048; } /* @@ -2800,6 +2893,7 @@ static void myri10ge_watchdog(struct work_struct *work) static void myri10ge_watchdog_timer(unsigned long arg) { struct myri10ge_priv *mgp; + u32 rx_pause_cnt; mgp = (struct myri10ge_priv *)arg; @@ -2816,19 +2910,28 @@ static void myri10ge_watchdog_timer(unsigned long arg) myri10ge_fill_thresh) mgp->rx_big.watchdog_needed = 0; } + rx_pause_cnt = ntohl(mgp->fw_stats->dropped_pause); if (mgp->tx.req != mgp->tx.done && mgp->tx.done == mgp->watchdog_tx_done && - mgp->watchdog_tx_req != mgp->watchdog_tx_done) + mgp->watchdog_tx_req != mgp->watchdog_tx_done) { /* nic seems like it might be stuck.. */ - schedule_work(&mgp->watchdog_work); - else - /* rearm timer */ - mod_timer(&mgp->watchdog_timer, - jiffies + myri10ge_watchdog_timeout * HZ); - + if (rx_pause_cnt != mgp->watchdog_pause) { + if (net_ratelimit()) + printk(KERN_WARNING "myri10ge %s:" + "TX paused, check link partner\n", + mgp->dev->name); + } else { + schedule_work(&mgp->watchdog_work); + return; + } + } + /* rearm timer */ + mod_timer(&mgp->watchdog_timer, + jiffies + myri10ge_watchdog_timeout * HZ); mgp->watchdog_tx_done = mgp->tx.done; mgp->watchdog_tx_req = mgp->tx.req; + mgp->watchdog_pause = rx_pause_cnt; } static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -2839,9 +2942,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) size_t bytes; int i; int status = -ENXIO; - int cap; int dac_enabled; - u16 val; netdev = alloc_etherdev(sizeof(*mgp)); if (netdev == NULL) { @@ -2852,8 +2953,9 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) SET_NETDEV_DEV(netdev, &pdev->dev); mgp = netdev_priv(netdev); - memset(mgp, 0, sizeof(*mgp)); mgp->dev = netdev; + netif_napi_add(netdev, &mgp->napi, + myri10ge_poll, myri10ge_napi_weight); mgp->pdev = pdev; mgp->csum_flag = MXGEFW_FLAGS_CKSUM; mgp->pause = myri10ge_flow_control; @@ -2873,19 +2975,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) = pci_find_capability(pdev, PCI_CAP_ID_VNDR); /* Set our max read request to 4KB */ - cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); - if (cap < 64) { - dev_err(&pdev->dev, "Bad PCI_CAP_ID_EXP location %d\n", cap); - goto abort_with_netdev; - } - status = pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &val); - if (status != 0) { - dev_err(&pdev->dev, "Error %d reading PCI_EXP_DEVCTL\n", - status); - goto abort_with_netdev; - } - val = (val & ~PCI_EXP_DEVCTL_READRQ) | (5 << 12); - status = pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, val); + status = pcie_set_readrq(pdev, 4096); if (status != 0) { dev_err(&pdev->dev, "Error %d writing PCI_EXP_DEVCTL\n", status); @@ -2990,8 +3080,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO; if (dac_enabled) netdev->features |= NETIF_F_HIGHDMA; - netdev->poll = myri10ge_poll; - netdev->weight = myri10ge_napi_weight; /* make sure we can get an irq, and that MSI can be * setup (if available). Also ensure netdev->irq @@ -3103,9 +3191,12 @@ static void myri10ge_remove(struct pci_dev *pdev) } #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008 +#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009 static struct pci_device_id myri10ge_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)}, + {PCI_DEVICE + (PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)}, {0}, }; diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c index 13444da93273..8d29319cc5cb 100644 --- a/drivers/net/myri_sbus.c +++ b/drivers/net/myri_sbus.c @@ -311,12 +311,12 @@ static void myri_is_not_so_happy(struct myri_eth *mp) #ifdef DEBUG_HEADER static void dump_ehdr(struct ethhdr *ehdr) { - printk("ehdr[h_dst(%02x:%02x:%02x:%02x:%02x:%02x)" - "h_source(%02x:%02x:%02x:%02x:%02x:%02x)h_proto(%04x)]\n", - ehdr->h_dest[0], ehdr->h_dest[1], ehdr->h_dest[2], - ehdr->h_dest[3], ehdr->h_dest[4], ehdr->h_dest[4], - ehdr->h_source[0], ehdr->h_source[1], ehdr->h_source[2], - ehdr->h_source[3], ehdr->h_source[4], ehdr->h_source[4], + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); + printk("ehdr[h_dst(%s)" + "h_source(%s)" + "h_proto(%04x)]\n", + print_mac(mac, ehdr->h_dest), print_mac(mac2, ehdr->h_source), ehdr->h_proto); } @@ -325,13 +325,7 @@ static void dump_ehdr_and_myripad(unsigned char *stuff) struct ethhdr *ehdr = (struct ethhdr *) (stuff + 2); printk("pad[%02x:%02x]", stuff[0], stuff[1]); - printk("ehdr[h_dst(%02x:%02x:%02x:%02x:%02x:%02x)" - "h_source(%02x:%02x:%02x:%02x:%02x:%02x)h_proto(%04x)]\n", - ehdr->h_dest[0], ehdr->h_dest[1], ehdr->h_dest[2], - ehdr->h_dest[3], ehdr->h_dest[4], ehdr->h_dest[4], - ehdr->h_source[0], ehdr->h_source[1], ehdr->h_source[2], - ehdr->h_source[3], ehdr->h_source[4], ehdr->h_source[4], - ehdr->h_proto); + dump_ehdr(ehdr); } #endif @@ -353,7 +347,7 @@ static void myri_tx(struct myri_eth *mp, struct net_device *dev) sbus_unmap_single(mp->myri_sdev, dma_addr, skb->len, SBUS_DMA_TODEVICE); dev_kfree_skb(skb); mp->tx_skbs[entry] = NULL; - mp->enet_stats.tx_packets++; + dev->stats.tx_packets++; entry = NEXT_TX(entry); } mp->tx_old = entry; @@ -434,20 +428,20 @@ static void myri_rx(struct myri_eth *mp, struct net_device *dev) RX_ALLOC_SIZE, SBUS_DMA_FROMDEVICE); if (len < (ETH_HLEN + MYRI_PAD_LEN) || (skb->data[0] != MYRI_PAD_LEN)) { DRX(("ERROR[")); - mp->enet_stats.rx_errors++; + dev->stats.rx_errors++; if (len < (ETH_HLEN + MYRI_PAD_LEN)) { DRX(("BAD_LENGTH] ")); - mp->enet_stats.rx_length_errors++; + dev->stats.rx_length_errors++; } else { DRX(("NO_PADDING] ")); - mp->enet_stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; } /* Return it to the LANAI. */ drop_it: drops++; DRX(("DROP ")); - mp->enet_stats.rx_dropped++; + dev->stats.rx_dropped++; sbus_dma_sync_single_for_device(mp->myri_sdev, sbus_readl(&rxd->myri_scatters[0].addr), RX_ALLOC_SIZE, @@ -527,8 +521,8 @@ static void myri_rx(struct myri_eth *mp, struct net_device *dev) netif_rx(skb); dev->last_rx = jiffies; - mp->enet_stats.rx_packets++; - mp->enet_stats.rx_bytes += len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; next: DRX(("NEXT\n")); entry = NEXT_RX(entry); @@ -596,7 +590,7 @@ static void myri_tx_timeout(struct net_device *dev) printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); - mp->enet_stats.tx_errors++; + dev->stats.tx_errors++; myri_init(mp, 0); netif_wake_queue(dev); } @@ -682,8 +676,9 @@ static int myri_start_xmit(struct sk_buff *skb, struct net_device *dev) * saddr=NULL means use device source address * daddr=NULL means leave destination address (eg unresolved arp) */ -static int myri_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - void *daddr, void *saddr, unsigned len) +static int myri_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, const void *daddr, + const void *saddr, unsigned len) { struct ethhdr *eth = (struct ethhdr *) skb_push(skb, ETH_HLEN); unsigned char *pad = (unsigned char *) skb_push(skb, MYRI_PAD_LEN); @@ -765,18 +760,18 @@ static int myri_rebuild_header(struct sk_buff *skb) return 0; } -int myri_header_cache(struct neighbour *neigh, struct hh_cache *hh) +static int myri_header_cache(const struct neighbour *neigh, struct hh_cache *hh) { unsigned short type = hh->hh_type; unsigned char *pad; struct ethhdr *eth; - struct net_device *dev = neigh->dev; + const struct net_device *dev = neigh->dev; pad = ((unsigned char *) hh->hh_data) + HH_DATA_OFF(sizeof(*eth) + MYRI_PAD_LEN); eth = (struct ethhdr *) (pad + MYRI_PAD_LEN); - if (type == __constant_htons(ETH_P_802_3)) + if (type == htons(ETH_P_802_3)) return -1; /* Refill MyriNet padding identifiers, this is just being anal. */ @@ -792,7 +787,9 @@ int myri_header_cache(struct neighbour *neigh, struct hh_cache *hh) /* Called by Address Resolution module to notify changes in address. */ -void myri_header_cache_update(struct hh_cache *hh, struct net_device *dev, unsigned char * haddr) +void myri_header_cache_update(struct hh_cache *hh, + const struct net_device *dev, + const unsigned char * haddr) { memcpy(((u8*)hh->hh_data) + HH_DATA_OFF(sizeof(struct ethhdr)), haddr, dev->addr_len); @@ -806,9 +803,6 @@ static int myri_change_mtu(struct net_device *dev, int new_mtu) return 0; } -static struct net_device_stats *myri_get_stats(struct net_device *dev) -{ return &(((struct myri_eth *)dev->priv)->enet_stats); } - static void myri_set_multicast(struct net_device *dev) { /* Do nothing, all MyriCOM nodes transmit multicast frames @@ -890,6 +884,13 @@ static void dump_eeprom(struct myri_eth *mp) } #endif +static const struct header_ops myri_header_ops = { + .create = myri_header, + .rebuild = myri_rebuild_header, + .cache = myri_header_cache, + .cache_update = myri_header_cache_update, +}; + static int __devinit myri_ether_init(struct sbus_dev *sdev) { static int num; @@ -898,6 +899,7 @@ static int __devinit myri_ether_init(struct sbus_dev *sdev) struct myri_eth *mp; unsigned char prop_buf[32]; int i; + DECLARE_MAC_BUF(mac); DET(("myri_ether_init(%p,%d):\n", sdev, num)); dev = alloc_etherdev(sizeof(struct myri_eth)); @@ -908,7 +910,6 @@ static int __devinit myri_ether_init(struct sbus_dev *sdev) if (version_printed++ == 0) printk(version); - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &sdev->ofdev.dev); mp = (struct myri_eth *) dev->priv; @@ -1061,7 +1062,6 @@ static int __devinit myri_ether_init(struct sbus_dev *sdev) dev->hard_start_xmit = &myri_start_xmit; dev->tx_timeout = &myri_tx_timeout; dev->watchdog_timeo = 5*HZ; - dev->get_stats = &myri_get_stats; dev->set_multicast_list = &myri_set_multicast; dev->irq = sdev->irqs[0]; @@ -1075,11 +1075,9 @@ static int __devinit myri_ether_init(struct sbus_dev *sdev) dev->mtu = MYRINET_MTU; dev->change_mtu = myri_change_mtu; - dev->hard_header = myri_header; - dev->rebuild_header = myri_rebuild_header; + dev->header_ops = &myri_header_ops; + dev->hard_header_len = (ETH_HLEN + MYRI_PAD_LEN); - dev->hard_header_cache = myri_header_cache; - dev->header_cache_update= myri_header_cache_update; /* Load code onto the LANai. */ DET(("Loading LANAI firmware\n")); @@ -1094,12 +1092,8 @@ static int __devinit myri_ether_init(struct sbus_dev *sdev) num++; - printk("%s: MyriCOM MyriNET Ethernet ", dev->name); - - for (i = 0; i < 6; i++) - printk("%2.2x%c", dev->dev_addr[i], - i == 5 ? ' ' : ':'); - printk("\n"); + printk("%s: MyriCOM MyriNET Ethernet %s\n", + dev->name, print_mac(mac, dev->dev_addr)); return 0; diff --git a/drivers/net/myri_sbus.h b/drivers/net/myri_sbus.h index 2f69ef7cdccb..5d93fcc95d55 100644 --- a/drivers/net/myri_sbus.h +++ b/drivers/net/myri_sbus.h @@ -280,7 +280,6 @@ struct myri_eth { void __iomem *lregs; /* Quick ptr to LANAI regs. */ struct sk_buff *rx_skbs[RX_RING_SIZE+1];/* RX skb's */ struct sk_buff *tx_skbs[TX_RING_SIZE]; /* TX skb's */ - struct net_device_stats enet_stats; /* Interface stats. */ /* These are less frequently accessed. */ void __iomem *regs; /* MyriCOM register space. */ diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index 6bb48ba80964..527f9dcc7f69 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c @@ -108,7 +108,7 @@ static int full_duplex[MAX_UNITS]; #define TX_TIMEOUT (2*HZ) #define NATSEMI_HW_TIMEOUT 400 -#define NATSEMI_TIMER_FREQ 3*HZ +#define NATSEMI_TIMER_FREQ 5*HZ #define NATSEMI_PG0_NREGS 64 #define NATSEMI_RFDR_NREGS 8 #define NATSEMI_PG1_NREGS 4 @@ -560,6 +560,8 @@ struct netdev_private { /* address of a sent-in-place packet/buffer, for later free() */ struct sk_buff *tx_skbuff[TX_RING_SIZE]; dma_addr_t tx_dma[TX_RING_SIZE]; + struct net_device *dev; + struct napi_struct napi; struct net_device_stats stats; /* Media monitoring timer */ struct timer_list timer; @@ -636,7 +638,7 @@ static void init_registers(struct net_device *dev); static int start_tx(struct sk_buff *skb, struct net_device *dev); static irqreturn_t intr_handler(int irq, void *dev_instance); static void netdev_error(struct net_device *dev, int intr_status); -static int natsemi_poll(struct net_device *dev, int *budget); +static int natsemi_poll(struct napi_struct *napi, int budget); static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do); static void netdev_tx_done(struct net_device *dev); static int natsemi_change_mtu(struct net_device *dev, int new_mtu); @@ -803,6 +805,7 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, const int pcibar = 1; /* PCI base address register */ int prev_eedata; u32 tmp; + DECLARE_MAC_BUF(mac); /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -835,7 +838,6 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, dev = alloc_etherdev(sizeof (struct netdev_private)); if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); i = pci_request_regions(pdev, DRV_NAME); @@ -861,6 +863,7 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, dev->irq = irq; np = netdev_priv(dev); + netif_napi_add(dev, &np->napi, natsemi_poll, 64); np->pci_dev = pdev; pci_set_drvdata(pdev, dev); @@ -931,8 +934,6 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, dev->do_ioctl = &netdev_ioctl; dev->tx_timeout = &tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; - dev->poll = natsemi_poll; - dev->weight = 64; #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = &natsemi_poll_controller; @@ -958,12 +959,10 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, goto err_create_file; if (netif_msg_drv(np)) { - printk(KERN_INFO "natsemi %s: %s at %#08lx (%s), ", - dev->name, natsemi_pci_info[chip_idx].name, iostart, - pci_name(np->pci_dev)); - for (i = 0; i < ETH_ALEN-1; i++) - printk("%02x:", dev->dev_addr[i]); - printk("%02x, IRQ %d", dev->dev_addr[i], irq); + printk(KERN_INFO "natsemi %s: %s at %#08lx " + "(%s), %s, IRQ %d", + dev->name, natsemi_pci_info[chip_idx].name, iostart, + pci_name(np->pci_dev), print_mac(mac, dev->dev_addr), irq); if (dev->if_port == PORT_TP) printk(", port TP.\n"); else if (np->ignore_phy) @@ -1554,6 +1553,8 @@ static int netdev_open(struct net_device *dev) free_irq(dev->irq, dev); return i; } + napi_enable(&np->napi); + init_ring(dev); spin_lock_irq(&np->lock); init_registers(dev); @@ -1614,7 +1615,7 @@ static void do_cable_magic(struct net_device *dev) * (these values all come from National) */ if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) { - struct netdev_private *np = netdev_priv(dev); + np = netdev_priv(dev); /* the bug has been triggered - fix the coefficient */ writew(TSTDAT_FIXED, ioaddr + TSTDAT); @@ -1797,7 +1798,7 @@ static void netdev_timer(unsigned long data) struct net_device *dev = (struct net_device *)data; struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); - int next_tick = 5*HZ; + int next_tick = NATSEMI_TIMER_FREQ; if (netif_msg_timer(np)) { /* DO NOT read the IntrStatus register, @@ -2200,10 +2201,10 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]); - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &np->napi)) { /* Disable interrupts and register for poll */ natsemi_irq_disable(dev); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &np->napi); } else printk(KERN_WARNING "%s: Ignoring interrupt, status %#08x, mask %#08x.\n", @@ -2216,12 +2217,11 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) /* This is the NAPI poll routine. As well as the standard RX handling * it also handles all other interrupts that the chip might raise. */ -static int natsemi_poll(struct net_device *dev, int *budget) +static int natsemi_poll(struct napi_struct *napi, int budget) { - struct netdev_private *np = netdev_priv(dev); + struct netdev_private *np = container_of(napi, struct netdev_private, napi); + struct net_device *dev = np->dev; void __iomem * ioaddr = ns_ioaddr(dev); - - int work_to_do = min(*budget, dev->quota); int work_done = 0; do { @@ -2236,7 +2236,7 @@ static int natsemi_poll(struct net_device *dev, int *budget) if (np->intr_status & (IntrRxDone | IntrRxIntr | RxStatusFIFOOver | IntrRxErr | IntrRxOverrun)) { - netdev_rx(dev, &work_done, work_to_do); + netdev_rx(dev, &work_done, budget); } if (np->intr_status & @@ -2250,16 +2250,13 @@ static int natsemi_poll(struct net_device *dev, int *budget) if (np->intr_status & IntrAbnormalSummary) netdev_error(dev, np->intr_status); - *budget -= work_done; - dev->quota -= work_done; - - if (work_done >= work_to_do) - return 1; + if (work_done >= budget) + return work_done; np->intr_status = readl(ioaddr + IntrStatus); } while (np->intr_status); - netif_rx_complete(dev); + netif_rx_complete(dev, napi); /* Reenable interrupts providing nothing is trying to shut * the chip down. */ @@ -2268,7 +2265,7 @@ static int natsemi_poll(struct net_device *dev, int *budget) natsemi_irq_enable(dev); spin_unlock(&np->lock); - return 0; + return work_done; } /* This routine is logically part of the interrupt handler, but separated @@ -2438,13 +2435,16 @@ static void netdev_error(struct net_device *dev, int intr_status) dev->name); } np->stats.rx_fifo_errors++; + np->stats.rx_errors++; } /* Hmmmmm, it's not clear how to recover from PCI faults. */ if (intr_status & IntrPCIErr) { printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name, intr_status & IntrPCIErr); np->stats.tx_fifo_errors++; + np->stats.tx_errors++; np->stats.rx_fifo_errors++; + np->stats.rx_errors++; } spin_unlock(&np->lock); } @@ -2502,8 +2502,8 @@ static void __set_rx_mode(struct net_device *dev) memset(mc_filter, 0, sizeof(mc_filter)); for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) { - int i = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 23) & 0x1ff; - mc_filter[i/8] |= (1 << (i & 0x07)); + int b = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 23) & 0x1ff; + mc_filter[b/8] |= (1 << (b & 0x07)); } rx_mode = RxFilterEnable | AcceptBroadcast | AcceptMulticast | AcceptMyPhys; @@ -3155,6 +3155,8 @@ static int netdev_close(struct net_device *dev) dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx); + napi_disable(&np->napi); + /* * FIXME: what if someone tries to close a device * that is suspended? @@ -3250,7 +3252,7 @@ static void __devexit natsemi_remove1 (struct pci_dev *pdev) * disable_irq() to enforce synchronization. * * natsemi_poll: checks before reenabling interrupts. suspend * sets hands_off, disables interrupts and then waits with - * netif_poll_disable(). + * napi_disable(). * * Interrupts must be disabled, otherwise hands_off can cause irq storms. */ @@ -3276,7 +3278,7 @@ static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state) spin_unlock_irq(&np->lock); enable_irq(dev->irq); - netif_poll_disable(dev); + napi_disable(&np->napi); /* Update the error counts. */ __get_stats(dev); @@ -3317,6 +3319,8 @@ static int natsemi_resume (struct pci_dev *pdev) pci_enable_device(pdev); /* pci_power_on(pdev); */ + napi_enable(&np->napi); + natsemi_reset(dev); init_ring(dev); disable_irq(dev->irq); @@ -3330,7 +3334,6 @@ static int natsemi_resume (struct pci_dev *pdev) mod_timer(&np->timer, jiffies + 1*HZ); } netif_device_attach(dev); - netif_poll_enable(dev); out: rtnl_unlock(); return 0; diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c index 38fd525f0f13..368f2560856d 100644 --- a/drivers/net/ne-h8300.c +++ b/drivers/net/ne-h8300.c @@ -149,8 +149,6 @@ static int __init do_ne_probe(struct net_device *dev) { unsigned int base_addr = dev->base_addr; - SET_MODULE_OWNER(dev); - /* First check any supplied i/o locations. User knows best. <cough> */ if (base_addr > 0x1ff) /* Check a single specified location. */ return ne_probe1(dev, base_addr); @@ -206,6 +204,7 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr) static unsigned version_printed; struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); unsigned char bus_width; + DECLARE_MAC_BUF(mac); if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME)) return -EBUSY; @@ -259,7 +258,7 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr) {E8390_RREAD+E8390_START, E8390_CMD}, }; - for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++) + for (i = 0; i < ARRAY_SIZE(program_seq); i++) outb_p(program_seq[i].value, ioaddr + program_seq[i].offset); } @@ -298,12 +297,11 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr) dev->base_addr = ioaddr; - for(i = 0; i < ETHER_ADDR_LEN; i++) { - printk(" %2.2x", SA_prom[i]); + for(i = 0; i < ETHER_ADDR_LEN; i++) dev->dev_addr[i] = SA_prom[i]; - } + printk(" %s\n", print_mac(mac, dev->dev_addr)); - printk("\n%s: %s found at %#x, using IRQ %d.\n", + printk("%s: %s found at %#x, using IRQ %d.\n", dev->name, name, ioaddr, dev->irq); ei_status.name = name; diff --git a/drivers/net/ne.c b/drivers/net/ne.c index c9f74bf5f491..874d291cbaed 100644 --- a/drivers/net/ne.c +++ b/drivers/net/ne.c @@ -191,8 +191,6 @@ static int __init do_ne_probe(struct net_device *dev) int orig_irq = dev->irq; #endif - SET_MODULE_OWNER(dev); - /* First check any supplied i/o locations. User knows best. <cough> */ if (base_addr > 0x1ff) /* Check a single specified location. */ return ne_probe1(dev, base_addr); @@ -293,6 +291,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr) int neX000, ctron, copam, bad_card; int reg0, ret; static unsigned version_printed; + DECLARE_MAC_BUF(mac); if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME)) return -EBUSY; @@ -377,7 +376,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr) {E8390_RREAD+E8390_START, E8390_CMD}, }; - for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++) + for (i = 0; i < ARRAY_SIZE(program_seq); i++) outb_p(program_seq[i].value, ioaddr + program_seq[i].offset); } @@ -505,16 +504,14 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr) for (i = 0 ; i < ETHER_ADDR_LEN ; i++) { dev->dev_addr[i] = SA_prom[i] = inb_p(ioaddr + EN1_PHYS_SHIFT(i)); - printk(" %2.2x", SA_prom[i]); } #else for(i = 0; i < ETHER_ADDR_LEN; i++) { - printk(" %2.2x", SA_prom[i]); dev->dev_addr[i] = SA_prom[i]; } #endif - printk("\n"); + printk("%s\n", print_mac(mac, dev->dev_addr)); ei_status.name = name; ei_status.tx_start_page = start_page; diff --git a/drivers/net/ne2.c b/drivers/net/ne2.c index 089b5bb702fc..f4cd8c7e81ba 100644 --- a/drivers/net/ne2.c +++ b/drivers/net/ne2.c @@ -251,8 +251,6 @@ static int __init do_ne2_probe(struct net_device *dev) int i; int adapter_found = 0; - SET_MODULE_OWNER(dev); - /* Do not check any supplied i/o locations. POS registers usually don't fail :) */ @@ -304,6 +302,7 @@ out: static int ne2_procinfo(char *buf, int slot, struct net_device *dev) { int len=0; + DECLARE_MAC_BUF(mac); len += sprintf(buf+len, "The NE/2 Ethernet Adapter\n" ); len += sprintf(buf+len, "Driver written by Wim Dumon "); @@ -314,12 +313,7 @@ static int ne2_procinfo(char *buf, int slot, struct net_device *dev) len += sprintf(buf+len, "Based on the original NE2000 drivers\n" ); len += sprintf(buf+len, "Base IO: %#x\n", (unsigned int)dev->base_addr); len += sprintf(buf+len, "IRQ : %d\n", dev->irq); - -#define HW_ADDR(i) dev->dev_addr[i] - len += sprintf(buf+len, "HW addr : %x:%x:%x:%x:%x:%x\n", - HW_ADDR(0), HW_ADDR(1), HW_ADDR(2), - HW_ADDR(3), HW_ADDR(4), HW_ADDR(5) ); -#undef HW_ADDR + len += sprintf(buf+len, "HW addr : %s\n", print_mac(mac, dev->dev_addr)); return len; } @@ -332,6 +326,7 @@ static int __init ne2_probe1(struct net_device *dev, int slot) const char *name = "NE/2"; int start_page, stop_page; static unsigned version_printed; + DECLARE_MAC_BUF(mac); if (ei_debug && version_printed++ == 0) printk(version); @@ -432,7 +427,7 @@ static int __init ne2_probe1(struct net_device *dev, int slot) {E8390_RREAD+E8390_START, E8390_CMD}, }; - for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++) + for (i = 0; i < ARRAY_SIZE(program_seq); i++) outb_p(program_seq[i].value, base_addr + program_seq[i].offset); @@ -471,12 +466,12 @@ static int __init ne2_probe1(struct net_device *dev, int slot) dev->base_addr = base_addr; - for(i = 0; i < ETHER_ADDR_LEN; i++) { - printk(" %2.2x", SA_prom[i]); + for(i = 0; i < ETHER_ADDR_LEN; i++) dev->dev_addr[i] = SA_prom[i]; - } - printk("\n%s: %s found at %#x, using IRQ %d.\n", + printk(" %s\n", print_mac(mac, dev->dev_addr)); + + printk("%s: %s found at %#x, using IRQ %d.\n", dev->name, name, base_addr, dev->irq); mca_set_adapter_procfn(slot, (MCA_ProcFn) ne2_procinfo, dev); diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c index cfdeaf7aa163..b569c90da4ba 100644 --- a/drivers/net/ne2k-pci.c +++ b/drivers/net/ne2k-pci.c @@ -212,6 +212,7 @@ static int __devinit ne2k_pci_init_one (struct pci_dev *pdev, static unsigned int fnd_cnt; long ioaddr; int flags = pci_clone_list[chip_idx].flags; + DECLARE_MAC_BUF(mac); /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -265,7 +266,6 @@ static int __devinit ne2k_pci_init_one (struct pci_dev *pdev, dev_err(&pdev->dev, "cannot allocate ethernet device\n"); goto err_out_free_res; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); /* Reset card. Who knows what dain-bramaged state it was left in. */ @@ -308,7 +308,7 @@ static int __devinit ne2k_pci_init_one (struct pci_dev *pdev, {0x00, EN0_RSARHI}, {E8390_RREAD+E8390_START, E8390_CMD}, }; - for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++) + for (i = 0; i < ARRAY_SIZE(program_seq); i++) outb(program_seq[i].value, ioaddr + program_seq[i].offset); } @@ -366,12 +366,12 @@ static int __devinit ne2k_pci_init_one (struct pci_dev *pdev, if (i) goto err_out_free_netdev; - printk("%s: %s found at %#lx, IRQ %d, ", - dev->name, pci_clone_list[chip_idx].name, ioaddr, dev->irq); - for(i = 0; i < 6; i++) { - printk("%2.2X%s", SA_prom[i], i == 5 ? ".\n": ":"); + for(i = 0; i < 6; i++) dev->dev_addr[i] = SA_prom[i]; - } + printk("%s: %s found at %#lx, IRQ %d, %s.\n", + dev->name, pci_clone_list[chip_idx].name, ioaddr, dev->irq, + print_mac(mac, dev->dev_addr)); + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); return 0; @@ -636,9 +636,6 @@ static void ne2k_pci_get_drvinfo(struct net_device *dev, static const struct ethtool_ops ne2k_pci_ethtool_ops = { .get_drvinfo = ne2k_pci_get_drvinfo, - .get_tx_csum = ethtool_op_get_tx_csum, - .get_sg = ethtool_op_get_sg, - .get_perm_addr = ethtool_op_get_perm_addr, }; static void __devexit ne2k_pci_remove_one (struct pci_dev *pdev) diff --git a/drivers/net/ne3210.c b/drivers/net/ne3210.c index 1a6fed76d4cc..425043a88db9 100644 --- a/drivers/net/ne3210.c +++ b/drivers/net/ne3210.c @@ -99,6 +99,7 @@ static int __init ne3210_eisa_probe (struct device *device) int i, retval, port_index; struct eisa_device *edev = to_eisa_device (device); struct net_device *dev; + DECLARE_MAC_BUF(mac); /* Allocate dev->priv and fill in 8390 specific dev fields. */ if (!(dev = alloc_ei_netdev ())) { @@ -106,7 +107,6 @@ static int __init ne3210_eisa_probe (struct device *device) return -ENOMEM; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, device); device->driver_data = dev; ioaddr = edev->base_addr; @@ -128,17 +128,15 @@ static int __init ne3210_eisa_probe (struct device *device) inb(ioaddr + NE3210_CFG1), inb(ioaddr + NE3210_CFG2)); #endif - port_index = inb(ioaddr + NE3210_CFG2) >> 6; - printk("ne3210.c: NE3210 in EISA slot %d, media: %s, addr:", - edev->slot, ifmap[port_index]); for(i = 0; i < ETHER_ADDR_LEN; i++) - printk(" %02x", (dev->dev_addr[i] = inb(ioaddr + NE3210_SA_PROM + i))); - + dev->dev_addr[i] = inb(ioaddr + NE3210_SA_PROM + i); + printk("ne3210.c: NE3210 in EISA slot %d, media: %s, addr: %s.\n", + edev->slot, ifmap[port_index], print_mac(mac, dev->dev_addr)); /* Snarf the interrupt now. CFG file has them all listed as `edge' with share=NO */ dev->irq = irq_map[(inb(ioaddr + NE3210_CFG2) >> 3) & 0x07]; - printk(".\nne3210.c: using IRQ %d, ", dev->irq); + printk("ne3210.c: using IRQ %d, ", dev->irq); retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev); if (retval) { diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 69233f6aa05c..5ffbb8891647 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -35,95 +35,780 @@ ****************************************************************/ #include <linux/mm.h> -#include <linux/tty.h> #include <linux/init.h> #include <linux/module.h> #include <linux/console.h> -#include <linux/tty_driver.h> #include <linux/moduleparam.h> #include <linux/string.h> -#include <linux/sysrq.h> -#include <linux/smp.h> #include <linux/netpoll.h> +#include <linux/inet.h> +#include <linux/configfs.h> MODULE_AUTHOR("Maintainer: Matt Mackall <mpm@selenic.com>"); MODULE_DESCRIPTION("Console driver for network interfaces"); MODULE_LICENSE("GPL"); -static char config[256]; -module_param_string(netconsole, config, 256, 0); +#define MAX_PARAM_LENGTH 256 +#define MAX_PRINT_CHUNK 1000 + +static char config[MAX_PARAM_LENGTH]; +module_param_string(netconsole, config, MAX_PARAM_LENGTH, 0); MODULE_PARM_DESC(netconsole, " netconsole=[src-port]@[src-ip]/[dev],[tgt-port]@<tgt-ip>/[tgt-macaddr]\n"); -static struct netpoll np = { - .name = "netconsole", - .dev_name = "eth0", - .local_port = 6665, - .remote_port = 6666, - .remote_mac = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, +#ifndef MODULE +static int __init option_setup(char *opt) +{ + strlcpy(config, opt, MAX_PARAM_LENGTH); + return 1; +} +__setup("netconsole=", option_setup); +#endif /* MODULE */ + +/* Linked list of all configured targets */ +static LIST_HEAD(target_list); + +/* This needs to be a spinlock because write_msg() cannot sleep */ +static DEFINE_SPINLOCK(target_list_lock); + +/** + * struct netconsole_target - Represents a configured netconsole target. + * @list: Links this target into the target_list. + * @item: Links us into the configfs subsystem hierarchy. + * @enabled: On / off knob to enable / disable target. + * Visible from userspace (read-write). + * We maintain a strict 1:1 correspondence between this and + * whether the corresponding netpoll is active or inactive. + * Also, other parameters of a target may be modified at + * runtime only when it is disabled (enabled == 0). + * @np: The netpoll structure for this target. + * Contains the other userspace visible parameters: + * dev_name (read-write) + * local_port (read-write) + * remote_port (read-write) + * local_ip (read-write) + * remote_ip (read-write) + * local_mac (read-only) + * remote_mac (read-write) + */ +struct netconsole_target { + struct list_head list; +#ifdef CONFIG_NETCONSOLE_DYNAMIC + struct config_item item; +#endif + int enabled; + struct netpoll np; }; -static int configured = 0; -#define MAX_PRINT_CHUNK 1000 +#ifdef CONFIG_NETCONSOLE_DYNAMIC -static void write_msg(struct console *con, const char *msg, unsigned int len) +static struct configfs_subsystem netconsole_subsys; + +static int __init dynamic_netconsole_init(void) { - int frag, left; - unsigned long flags; + config_group_init(&netconsole_subsys.su_group); + mutex_init(&netconsole_subsys.su_mutex); + return configfs_register_subsystem(&netconsole_subsys); +} - if (!np.dev) - return; +static void __exit dynamic_netconsole_exit(void) +{ + configfs_unregister_subsystem(&netconsole_subsys); +} + +/* + * Targets that were created by parsing the boot/module option string + * do not exist in the configfs hierarchy (and have NULL names) and will + * never go away, so make these a no-op for them. + */ +static void netconsole_target_get(struct netconsole_target *nt) +{ + if (config_item_name(&nt->item)) + config_item_get(&nt->item); +} + +static void netconsole_target_put(struct netconsole_target *nt) +{ + if (config_item_name(&nt->item)) + config_item_put(&nt->item); +} + +#else /* !CONFIG_NETCONSOLE_DYNAMIC */ + +static int __init dynamic_netconsole_init(void) +{ + return 0; +} - local_irq_save(flags); +static void __exit dynamic_netconsole_exit(void) +{ +} - for(left = len; left; ) { - frag = min(left, MAX_PRINT_CHUNK); - netpoll_send_udp(&np, msg, frag); - msg += frag; - left -= frag; +/* + * No danger of targets going away from under us when dynamic + * reconfigurability is off. + */ +static void netconsole_target_get(struct netconsole_target *nt) +{ +} + +static void netconsole_target_put(struct netconsole_target *nt) +{ +} + +#endif /* CONFIG_NETCONSOLE_DYNAMIC */ + +/* Allocate new target (from boot/module param) and setup netpoll for it */ +static struct netconsole_target *alloc_param_target(char *target_config) +{ + int err = -ENOMEM; + struct netconsole_target *nt; + + /* + * Allocate and initialize with defaults. + * Note that these targets get their config_item fields zeroed-out. + */ + nt = kzalloc(sizeof(*nt), GFP_KERNEL); + if (!nt) { + printk(KERN_ERR "netconsole: failed to allocate memory\n"); + goto fail; } - local_irq_restore(flags); + nt->np.name = "netconsole"; + strlcpy(nt->np.dev_name, "eth0", IFNAMSIZ); + nt->np.local_port = 6665; + nt->np.remote_port = 6666; + memset(nt->np.remote_mac, 0xff, ETH_ALEN); + + /* Parse parameters and setup netpoll */ + err = netpoll_parse_options(&nt->np, target_config); + if (err) + goto fail; + + err = netpoll_setup(&nt->np); + if (err) + goto fail; + + nt->enabled = 1; + + return nt; + +fail: + kfree(nt); + return ERR_PTR(err); } -static struct console netconsole = { - .name = "netcon", - .flags = CON_ENABLED | CON_PRINTBUFFER, - .write = write_msg +/* Cleanup netpoll for given target (from boot/module param) and free it */ +static void free_param_target(struct netconsole_target *nt) +{ + netpoll_cleanup(&nt->np); + kfree(nt); +} + +#ifdef CONFIG_NETCONSOLE_DYNAMIC + +/* + * Our subsystem hierarchy is: + * + * /sys/kernel/config/netconsole/ + * | + * <target>/ + * | enabled + * | dev_name + * | local_port + * | remote_port + * | local_ip + * | remote_ip + * | local_mac + * | remote_mac + * | + * <target>/... + */ + +struct netconsole_target_attr { + struct configfs_attribute attr; + ssize_t (*show)(struct netconsole_target *nt, + char *buf); + ssize_t (*store)(struct netconsole_target *nt, + const char *buf, + size_t count); }; -static int option_setup(char *opt) +static struct netconsole_target *to_target(struct config_item *item) { - configured = !netpoll_parse_options(&np, opt); - return 1; + return item ? + container_of(item, struct netconsole_target, item) : + NULL; } -__setup("netconsole=", option_setup); +/* + * Wrapper over simple_strtol (base 10) with sanity and range checking. + * We return (signed) long only because we may want to return errors. + * Do not use this to convert numbers that are allowed to be negative. + */ +static long strtol10_check_range(const char *cp, long min, long max) +{ + long ret; + char *p = (char *) cp; + + WARN_ON(min < 0); + WARN_ON(max < min); + + ret = simple_strtol(p, &p, 10); + + if (*p && (*p != '\n')) { + printk(KERN_ERR "netconsole: invalid input\n"); + return -EINVAL; + } + if ((ret < min) || (ret > max)) { + printk(KERN_ERR "netconsole: input %ld must be between " + "%ld and %ld\n", ret, min, max); + return -EINVAL; + } + + return ret; +} + +/* + * Attribute operations for netconsole_target. + */ + +static ssize_t show_enabled(struct netconsole_target *nt, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", nt->enabled); +} + +static ssize_t show_dev_name(struct netconsole_target *nt, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s\n", nt->np.dev_name); +} + +static ssize_t show_local_port(struct netconsole_target *nt, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", nt->np.local_port); +} + +static ssize_t show_remote_port(struct netconsole_target *nt, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", nt->np.remote_port); +} + +static ssize_t show_local_ip(struct netconsole_target *nt, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n", + HIPQUAD(nt->np.local_ip)); +} + +static ssize_t show_remote_ip(struct netconsole_target *nt, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n", + HIPQUAD(nt->np.remote_ip)); +} + +static ssize_t show_local_mac(struct netconsole_target *nt, char *buf) +{ + DECLARE_MAC_BUF(mac); + return snprintf(buf, PAGE_SIZE, "%s\n", + print_mac(mac, nt->np.local_mac)); +} + +static ssize_t show_remote_mac(struct netconsole_target *nt, char *buf) +{ + DECLARE_MAC_BUF(mac); + return snprintf(buf, PAGE_SIZE, "%s\n", + print_mac(mac, nt->np.remote_mac)); +} -static int init_netconsole(void) +/* + * This one is special -- targets created through the configfs interface + * are not enabled (and the corresponding netpoll activated) by default. + * The user is expected to set the desired parameters first (which + * would enable him to dynamically add new netpoll targets for new + * network interfaces as and when they come up). + */ +static ssize_t store_enabled(struct netconsole_target *nt, + const char *buf, + size_t count) { int err; + long enabled; + + enabled = strtol10_check_range(buf, 0, 1); + if (enabled < 0) + return enabled; + + if (enabled) { /* 1 */ + + /* + * Skip netpoll_parse_options() -- all the attributes are + * already configured via configfs. Just print them out. + */ + netpoll_print_options(&nt->np); + + err = netpoll_setup(&nt->np); + if (err) + return err; + + printk(KERN_INFO "netconsole: network logging started\n"); + + } else { /* 0 */ + netpoll_cleanup(&nt->np); + } + + nt->enabled = enabled; + + return strnlen(buf, count); +} + +static ssize_t store_dev_name(struct netconsole_target *nt, + const char *buf, + size_t count) +{ + size_t len; + + if (nt->enabled) { + printk(KERN_ERR "netconsole: target (%s) is enabled, " + "disable to update parameters\n", + config_item_name(&nt->item)); + return -EINVAL; + } + + strlcpy(nt->np.dev_name, buf, IFNAMSIZ); + + /* Get rid of possible trailing newline from echo(1) */ + len = strnlen(nt->np.dev_name, IFNAMSIZ); + if (nt->np.dev_name[len - 1] == '\n') + nt->np.dev_name[len - 1] = '\0'; + + return strnlen(buf, count); +} + +static ssize_t store_local_port(struct netconsole_target *nt, + const char *buf, + size_t count) +{ + long local_port; +#define __U16_MAX ((__u16) ~0U) + + if (nt->enabled) { + printk(KERN_ERR "netconsole: target (%s) is enabled, " + "disable to update parameters\n", + config_item_name(&nt->item)); + return -EINVAL; + } + + local_port = strtol10_check_range(buf, 0, __U16_MAX); + if (local_port < 0) + return local_port; + + nt->np.local_port = local_port; + + return strnlen(buf, count); +} + +static ssize_t store_remote_port(struct netconsole_target *nt, + const char *buf, + size_t count) +{ + long remote_port; +#define __U16_MAX ((__u16) ~0U) + + if (nt->enabled) { + printk(KERN_ERR "netconsole: target (%s) is enabled, " + "disable to update parameters\n", + config_item_name(&nt->item)); + return -EINVAL; + } + + remote_port = strtol10_check_range(buf, 0, __U16_MAX); + if (remote_port < 0) + return remote_port; + + nt->np.remote_port = remote_port; + + return strnlen(buf, count); +} + +static ssize_t store_local_ip(struct netconsole_target *nt, + const char *buf, + size_t count) +{ + if (nt->enabled) { + printk(KERN_ERR "netconsole: target (%s) is enabled, " + "disable to update parameters\n", + config_item_name(&nt->item)); + return -EINVAL; + } + + nt->np.local_ip = ntohl(in_aton(buf)); + + return strnlen(buf, count); +} + +static ssize_t store_remote_ip(struct netconsole_target *nt, + const char *buf, + size_t count) +{ + if (nt->enabled) { + printk(KERN_ERR "netconsole: target (%s) is enabled, " + "disable to update parameters\n", + config_item_name(&nt->item)); + return -EINVAL; + } + + nt->np.remote_ip = ntohl(in_aton(buf)); + + return strnlen(buf, count); +} + +static ssize_t store_remote_mac(struct netconsole_target *nt, + const char *buf, + size_t count) +{ + u8 remote_mac[ETH_ALEN]; + char *p = (char *) buf; + int i; - if(strlen(config)) - option_setup(config); + if (nt->enabled) { + printk(KERN_ERR "netconsole: target (%s) is enabled, " + "disable to update parameters\n", + config_item_name(&nt->item)); + return -EINVAL; + } - if(!configured) { - printk("netconsole: not configured, aborting\n"); - return 0; + for (i = 0; i < ETH_ALEN - 1; i++) { + remote_mac[i] = simple_strtoul(p, &p, 16); + if (*p != ':') + goto invalid; + p++; } + remote_mac[ETH_ALEN - 1] = simple_strtoul(p, &p, 16); + if (*p && (*p != '\n')) + goto invalid; + + memcpy(nt->np.remote_mac, remote_mac, ETH_ALEN); + + return strnlen(buf, count); + +invalid: + printk(KERN_ERR "netconsole: invalid input\n"); + return -EINVAL; +} + +/* + * Attribute definitions for netconsole_target. + */ + +#define NETCONSOLE_TARGET_ATTR_RO(_name) \ +static struct netconsole_target_attr netconsole_target_##_name = \ + __CONFIGFS_ATTR(_name, S_IRUGO, show_##_name, NULL) - err = netpoll_setup(&np); +#define NETCONSOLE_TARGET_ATTR_RW(_name) \ +static struct netconsole_target_attr netconsole_target_##_name = \ + __CONFIGFS_ATTR(_name, S_IRUGO | S_IWUSR, show_##_name, store_##_name) + +NETCONSOLE_TARGET_ATTR_RW(enabled); +NETCONSOLE_TARGET_ATTR_RW(dev_name); +NETCONSOLE_TARGET_ATTR_RW(local_port); +NETCONSOLE_TARGET_ATTR_RW(remote_port); +NETCONSOLE_TARGET_ATTR_RW(local_ip); +NETCONSOLE_TARGET_ATTR_RW(remote_ip); +NETCONSOLE_TARGET_ATTR_RO(local_mac); +NETCONSOLE_TARGET_ATTR_RW(remote_mac); + +static struct configfs_attribute *netconsole_target_attrs[] = { + &netconsole_target_enabled.attr, + &netconsole_target_dev_name.attr, + &netconsole_target_local_port.attr, + &netconsole_target_remote_port.attr, + &netconsole_target_local_ip.attr, + &netconsole_target_remote_ip.attr, + &netconsole_target_local_mac.attr, + &netconsole_target_remote_mac.attr, + NULL, +}; + +/* + * Item operations and type for netconsole_target. + */ + +static void netconsole_target_release(struct config_item *item) +{ + kfree(to_target(item)); +} + +static ssize_t netconsole_target_attr_show(struct config_item *item, + struct configfs_attribute *attr, + char *buf) +{ + ssize_t ret = -EINVAL; + struct netconsole_target *nt = to_target(item); + struct netconsole_target_attr *na = + container_of(attr, struct netconsole_target_attr, attr); + + if (na->show) + ret = na->show(nt, buf); + + return ret; +} + +static ssize_t netconsole_target_attr_store(struct config_item *item, + struct configfs_attribute *attr, + const char *buf, + size_t count) +{ + ssize_t ret = -EINVAL; + struct netconsole_target *nt = to_target(item); + struct netconsole_target_attr *na = + container_of(attr, struct netconsole_target_attr, attr); + + if (na->store) + ret = na->store(nt, buf, count); + + return ret; +} + +static struct configfs_item_operations netconsole_target_item_ops = { + .release = netconsole_target_release, + .show_attribute = netconsole_target_attr_show, + .store_attribute = netconsole_target_attr_store, +}; + +static struct config_item_type netconsole_target_type = { + .ct_attrs = netconsole_target_attrs, + .ct_item_ops = &netconsole_target_item_ops, + .ct_owner = THIS_MODULE, +}; + +/* + * Group operations and type for netconsole_subsys. + */ + +static struct config_item *make_netconsole_target(struct config_group *group, + const char *name) +{ + unsigned long flags; + struct netconsole_target *nt; + + /* + * Allocate and initialize with defaults. + * Target is disabled at creation (enabled == 0). + */ + nt = kzalloc(sizeof(*nt), GFP_KERNEL); + if (!nt) { + printk(KERN_ERR "netconsole: failed to allocate memory\n"); + return NULL; + } + + nt->np.name = "netconsole"; + strlcpy(nt->np.dev_name, "eth0", IFNAMSIZ); + nt->np.local_port = 6665; + nt->np.remote_port = 6666; + memset(nt->np.remote_mac, 0xff, ETH_ALEN); + + /* Initialize the config_item member */ + config_item_init_type_name(&nt->item, name, &netconsole_target_type); + + /* Adding, but it is disabled */ + spin_lock_irqsave(&target_list_lock, flags); + list_add(&nt->list, &target_list); + spin_unlock_irqrestore(&target_list_lock, flags); + + return &nt->item; +} + +static void drop_netconsole_target(struct config_group *group, + struct config_item *item) +{ + unsigned long flags; + struct netconsole_target *nt = to_target(item); + + spin_lock_irqsave(&target_list_lock, flags); + list_del(&nt->list); + spin_unlock_irqrestore(&target_list_lock, flags); + + /* + * The target may have never been enabled, or was manually disabled + * before being removed so netpoll may have already been cleaned up. + */ + if (nt->enabled) + netpoll_cleanup(&nt->np); + + config_item_put(&nt->item); +} + +static struct configfs_group_operations netconsole_subsys_group_ops = { + .make_item = make_netconsole_target, + .drop_item = drop_netconsole_target, +}; + +static struct config_item_type netconsole_subsys_type = { + .ct_group_ops = &netconsole_subsys_group_ops, + .ct_owner = THIS_MODULE, +}; + +/* The netconsole configfs subsystem */ +static struct configfs_subsystem netconsole_subsys = { + .su_group = { + .cg_item = { + .ci_namebuf = "netconsole", + .ci_type = &netconsole_subsys_type, + }, + }, +}; + +#endif /* CONFIG_NETCONSOLE_DYNAMIC */ + +/* Handle network interface device notifications */ +static int netconsole_netdev_event(struct notifier_block *this, + unsigned long event, + void *ptr) +{ + unsigned long flags; + struct netconsole_target *nt; + struct net_device *dev = ptr; + + if (!(event == NETDEV_CHANGEADDR || event == NETDEV_CHANGENAME)) + goto done; + + spin_lock_irqsave(&target_list_lock, flags); + list_for_each_entry(nt, &target_list, list) { + netconsole_target_get(nt); + if (nt->np.dev == dev) { + switch (event) { + case NETDEV_CHANGEADDR: + memcpy(nt->np.local_mac, dev->dev_addr, ETH_ALEN); + break; + + case NETDEV_CHANGENAME: + strlcpy(nt->np.dev_name, dev->name, IFNAMSIZ); + break; + } + } + netconsole_target_put(nt); + } + spin_unlock_irqrestore(&target_list_lock, flags); + +done: + return NOTIFY_DONE; +} + +static struct notifier_block netconsole_netdev_notifier = { + .notifier_call = netconsole_netdev_event, +}; + +static void write_msg(struct console *con, const char *msg, unsigned int len) +{ + int frag, left; + unsigned long flags; + struct netconsole_target *nt; + const char *tmp; + + /* Avoid taking lock and disabling interrupts unnecessarily */ + if (list_empty(&target_list)) + return; + + spin_lock_irqsave(&target_list_lock, flags); + list_for_each_entry(nt, &target_list, list) { + netconsole_target_get(nt); + if (nt->enabled && netif_running(nt->np.dev)) { + /* + * We nest this inside the for-each-target loop above + * so that we're able to get as much logging out to + * at least one target if we die inside here, instead + * of unnecessarily keeping all targets in lock-step. + */ + tmp = msg; + for (left = len; left;) { + frag = min(left, MAX_PRINT_CHUNK); + netpoll_send_udp(&nt->np, tmp, frag); + tmp += frag; + left -= frag; + } + } + netconsole_target_put(nt); + } + spin_unlock_irqrestore(&target_list_lock, flags); +} + +static struct console netconsole = { + .name = "netcon", + .flags = CON_ENABLED | CON_PRINTBUFFER, + .write = write_msg, +}; + +static int __init init_netconsole(void) +{ + int err; + struct netconsole_target *nt, *tmp; + unsigned long flags; + char *target_config; + char *input = config; + + if (strnlen(input, MAX_PARAM_LENGTH)) { + while ((target_config = strsep(&input, ";"))) { + nt = alloc_param_target(target_config); + if (IS_ERR(nt)) { + err = PTR_ERR(nt); + goto fail; + } + spin_lock_irqsave(&target_list_lock, flags); + list_add(&nt->list, &target_list); + spin_unlock_irqrestore(&target_list_lock, flags); + } + } + + err = register_netdevice_notifier(&netconsole_netdev_notifier); + if (err) + goto fail; + + err = dynamic_netconsole_init(); if (err) - return err; + goto undonotifier; register_console(&netconsole); printk(KERN_INFO "netconsole: network logging started\n"); - return 0; + + return err; + +undonotifier: + unregister_netdevice_notifier(&netconsole_netdev_notifier); + +fail: + printk(KERN_ERR "netconsole: cleaning up\n"); + + /* + * Remove all targets and destroy them (only targets created + * from the boot/module option exist here). Skipping the list + * lock is safe here, and netpoll_cleanup() will sleep. + */ + list_for_each_entry_safe(nt, tmp, &target_list, list) { + list_del(&nt->list); + free_param_target(nt); + } + + return err; } -static void cleanup_netconsole(void) +static void __exit cleanup_netconsole(void) { + struct netconsole_target *nt, *tmp; + unregister_console(&netconsole); - netpoll_cleanup(&np); + dynamic_netconsole_exit(); + unregister_netdevice_notifier(&netconsole_netdev_notifier); + + /* + * Targets created via configfs pin references on our module + * and would first be rmdir(2)'ed from userspace. We reach + * here only when they are already destroyed, and only those + * created from the boot/module option are left, so remove and + * destroy them. Skipping the list lock is safe here, and + * netpoll_cleanup() will sleep. + */ + list_for_each_entry_safe(nt, tmp, &target_list, list) { + list_del(&nt->list); + free_param_target(nt); + } } module_init(init_netconsole); diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c index 2b8da0a54998..eb0aff787dfd 100644 --- a/drivers/net/netx-eth.c +++ b/drivers/net/netx-eth.c @@ -97,7 +97,6 @@ struct netx_eth_priv { void __iomem *sram_base, *xpec_base, *xmac_base; int id; - struct net_device_stats stats; struct mii_if_info mii; u32 msg_enable; struct xc *xc; @@ -129,8 +128,8 @@ netx_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) FIFO_PTR_FRAMELEN(len)); ndev->trans_start = jiffies; - priv->stats.tx_packets++; - priv->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; netif_stop_queue(ndev); spin_unlock_irq(&priv->lock); @@ -156,7 +155,7 @@ static void netx_eth_receive(struct net_device *ndev) if (unlikely(skb == NULL)) { printk(KERN_NOTICE "%s: Low memory, packet dropped.\n", ndev->name); - priv->stats.rx_dropped++; + dev->stats.rx_dropped++; return; } @@ -170,8 +169,8 @@ static void netx_eth_receive(struct net_device *ndev) ndev->last_rx = jiffies; skb->protocol = eth_type_trans(skb, ndev); netif_rx(skb); - priv->stats.rx_packets++; - priv->stats.rx_bytes += len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; } static irqreturn_t @@ -210,12 +209,6 @@ netx_eth_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static struct net_device_stats *netx_eth_query_statistics(struct net_device *ndev) -{ - struct netx_eth_priv *priv = netdev_priv(ndev); - return &priv->stats; -} - static int netx_eth_open(struct net_device *ndev) { struct netx_eth_priv *priv = netdev_priv(ndev); @@ -323,7 +316,6 @@ static int netx_eth_enable(struct net_device *ndev) ndev->hard_start_xmit = netx_eth_hard_start_xmit; ndev->tx_timeout = netx_eth_timeout; ndev->watchdog_timeo = msecs_to_jiffies(5000); - ndev->get_stats = netx_eth_query_statistics; ndev->set_multicast_list = netx_eth_set_multicast_list; priv->msg_enable = NETIF_MSG_LINK; @@ -390,7 +382,6 @@ static int netx_eth_drv_probe(struct platform_device *pdev) ret = -ENOMEM; goto exit; } - SET_MODULE_OWNER(ndev); SET_NETDEV_DEV(ndev, &pdev->dev); platform_set_drvdata(pdev, ndev); diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index 325269d8ae38..fbc2553275dc 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h @@ -880,6 +880,7 @@ struct netxen_adapter { struct netxen_adapter *master; struct net_device *netdev; struct pci_dev *pdev; + struct napi_struct napi; struct net_device_stats net_stats; unsigned char mac_addr[ETH_ALEN]; int mtu; @@ -918,7 +919,7 @@ struct netxen_adapter { u16 link_duplex; u16 state; u16 link_autoneg; - int rcsum; + int rx_csum; int status; spinlock_t stats_lock; @@ -1118,7 +1119,7 @@ static const struct netxen_brdinfo netxen_boards[] = { {NETXEN_BRDTYPE_P2_SB31_2G, 2, "Dual Gb"}, }; -#define NUM_SUPPORTED_BOARDS (sizeof(netxen_boards)/sizeof(struct netxen_brdinfo)) +#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(netxen_boards) static inline void get_brd_port_by_type(u32 type, int *ports) { @@ -1179,8 +1180,7 @@ dma_watchdog_shutdown_poll_result(struct netxen_adapter *adapter) NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), &ctrl, 4)) printk(KERN_ERR "failed to read dma watchdog status\n"); - return ((netxen_get_dma_watchdog_enabled(ctrl) == 0) && - (netxen_get_dma_watchdog_disabled(ctrl) == 0)); + return (netxen_get_dma_watchdog_enabled(ctrl) == 0); } static inline int diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c index 0175f6c353f6..cfb847b0cae3 100644 --- a/drivers/net/netxen/netxen_nic_ethtool.c +++ b/drivers/net/netxen/netxen_nic_ethtool.c @@ -115,8 +115,6 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build); strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); - drvinfo->n_stats = NETXEN_NIC_STATS_LEN; - drvinfo->testinfo_len = NETXEN_NIC_TEST_LEN; drvinfo->regdump_len = NETXEN_NIC_REGS_LEN; drvinfo->eedump_len = netxen_nic_get_eeprom_len(dev); } @@ -518,17 +516,17 @@ netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) ring->rx_jumbo_pending = 0; for (i = 0; i < MAX_RCV_CTX; ++i) { ring->rx_pending += adapter->recv_ctx[i]. - rcv_desc[RCV_DESC_NORMAL_CTXID].rcv_pending; + rcv_desc[RCV_DESC_NORMAL_CTXID].max_rx_desc_count; ring->rx_jumbo_pending += adapter->recv_ctx[i]. - rcv_desc[RCV_DESC_JUMBO_CTXID].rcv_pending; + rcv_desc[RCV_DESC_JUMBO_CTXID].max_rx_desc_count; } + ring->tx_pending = adapter->max_tx_desc_count; - ring->rx_max_pending = adapter->max_rx_desc_count; - ring->tx_max_pending = adapter->max_tx_desc_count; - ring->rx_jumbo_max_pending = adapter->max_jumbo_rx_desc_count; + ring->rx_max_pending = MAX_RCV_DESCRIPTORS; + ring->tx_max_pending = MAX_CMD_DESCRIPTORS_HOST; + ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS; ring->rx_mini_max_pending = 0; ring->rx_mini_pending = 0; - ring->rx_jumbo_pending = 0; } static void @@ -672,9 +670,16 @@ static int netxen_nic_reg_test(struct net_device *dev) return 0; } -static int netxen_nic_diag_test_count(struct net_device *dev) +static int netxen_get_sset_count(struct net_device *dev, int sset) { - return NETXEN_NIC_TEST_LEN; + switch (sset) { + case ETH_SS_TEST: + return NETXEN_NIC_TEST_LEN; + case ETH_SS_STATS: + return NETXEN_NIC_STATS_LEN; + default: + return -EOPNOTSUPP; + } } static void @@ -709,11 +714,6 @@ netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 * data) } } -static int netxen_nic_get_stats_count(struct net_device *dev) -{ - return NETXEN_NIC_STATS_LEN; -} - static void netxen_nic_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 * data) @@ -731,6 +731,19 @@ netxen_nic_get_ethtool_stats(struct net_device *dev, } } +static u32 netxen_nic_get_rx_csum(struct net_device *dev) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + return adapter->rx_csum; +} + +static int netxen_nic_set_rx_csum(struct net_device *dev, u32 data) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + adapter->rx_csum = !!data; + return 0; +} + struct ethtool_ops netxen_nic_ethtool_ops = { .get_settings = netxen_nic_get_settings, .set_settings = netxen_nic_set_settings, @@ -744,16 +757,13 @@ struct ethtool_ops netxen_nic_ethtool_ops = { .get_ringparam = netxen_nic_get_ringparam, .get_pauseparam = netxen_nic_get_pauseparam, .set_pauseparam = netxen_nic_set_pauseparam, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, - .get_tso = ethtool_op_get_tso, .set_tso = ethtool_op_set_tso, - .self_test_count = netxen_nic_diag_test_count, .self_test = netxen_nic_diag_test, .get_strings = netxen_nic_get_strings, - .get_stats_count = netxen_nic_get_stats_count, .get_ethtool_stats = netxen_nic_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, + .get_sset_count = netxen_get_sset_count, + .get_rx_csum = netxen_nic_get_rx_csum, + .set_rx_csum = netxen_nic_set_rx_csum, }; diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h index 3276866b17e2..d72f8f8fcb50 100644 --- a/drivers/net/netxen/netxen_nic_hdr.h +++ b/drivers/net/netxen/netxen_nic_hdr.h @@ -649,9 +649,11 @@ enum { #define PCIX_INT_VECTOR (0x10100) #define PCIX_INT_MASK (0x10104) -#define PCIX_MN_WINDOW (0x10200) +#define PCIX_MN_WINDOW_F0 (0x10200) +#define PCIX_MN_WINDOW(_f) (PCIX_MN_WINDOW_F0 + (0x20 * (_f))) #define PCIX_MS_WINDOW (0x10204) -#define PCIX_SN_WINDOW (0x10208) +#define PCIX_SN_WINDOW_F0 (0x10208) +#define PCIX_SN_WINDOW(_f) (PCIX_SN_WINDOW_F0 + (0x20 * (_f))) #define PCIX_CRB_WINDOW (0x10210) #define PCIX_CRB_WINDOW_F0 (0x10210) #define PCIX_CRB_WINDOW_F1 (0x10230) diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c index aac15421bd1e..2c19b8df98fa 100644 --- a/drivers/net/netxen/netxen_nic_hw.c +++ b/drivers/net/netxen/netxen_nic_hw.c @@ -569,7 +569,7 @@ int netxen_is_flash_supported(struct netxen_adapter *adapter) /* if the flash size less than 4Mb, make huge war cry and die */ for (j = 1; j < 4; j++) { addr = j * NETXEN_NIC_WINDOW_MARGIN; - for (i = 0; i < (sizeof(locs) / sizeof(locs[0])); i++) { + for (i = 0; i < ARRAY_SIZE(locs); i++) { if (netxen_rom_fast_read(adapter, locs[i], &val01) == 0 && netxen_rom_fast_read(adapter, (addr + locs[i]), &val02) == 0) { @@ -904,11 +904,11 @@ netxen_nic_pci_set_window(struct netxen_adapter *adapter, ddr_mn_window = window; writel(window, PCI_OFFSET_SECOND_RANGE(adapter, NETXEN_PCIX_PH_REG - (PCIX_MN_WINDOW))); + (PCIX_MN_WINDOW(adapter->ahw.pci_func)))); /* MUST make sure window is set before we forge on... */ readl(PCI_OFFSET_SECOND_RANGE(adapter, NETXEN_PCIX_PH_REG - (PCIX_MN_WINDOW))); + (PCIX_MN_WINDOW(adapter->ahw.pci_func)))); } addr -= (window * NETXEN_WINDOW_ONE); addr += NETXEN_PCI_DDR_NET; @@ -929,11 +929,11 @@ netxen_nic_pci_set_window(struct netxen_adapter *adapter, writel((window << 22), PCI_OFFSET_SECOND_RANGE(adapter, NETXEN_PCIX_PH_REG - (PCIX_SN_WINDOW))); + (PCIX_SN_WINDOW(adapter->ahw.pci_func)))); /* MUST make sure window is set before we forge on... */ readl(PCI_OFFSET_SECOND_RANGE(adapter, NETXEN_PCIX_PH_REG - (PCIX_SN_WINDOW))); + (PCIX_SN_WINDOW(adapter->ahw.pci_func)))); } addr -= (window * 0x400000); addr += NETXEN_PCI_QDR_NET; diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 1811bcb8c380..37589265297e 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c @@ -1118,10 +1118,13 @@ netxen_process_rcv(struct netxen_adapter *adapter, int ctxid, skb = (struct sk_buff *)buffer->skb; - if (likely(netxen_get_sts_status(desc) == STATUS_CKSUM_OK)) { + if (likely(adapter->rx_csum && + netxen_get_sts_status(desc) == STATUS_CKSUM_OK)) { adapter->stats.csummed++; skb->ip_summed = CHECKSUM_UNNECESSARY; - } + } else + skb->ip_summed = CHECKSUM_NONE; + skb->dev = netdev; if (desc_ctx == RCV_DESC_LRO_CTXID) { /* True length was only available on the last pkt */ diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index b703ccfe040b..2a1d6d7ec351 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -39,14 +39,13 @@ #include "netxen_nic_phan_reg.h" #include <linux/dma-mapping.h> -#include <linux/vmalloc.h> #include <net/ip.h> MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID); -char netxen_nic_driver_name[] = "netxen-nic"; +char netxen_nic_driver_name[] = "netxen_nic"; static char netxen_nic_driver_string[] = "NetXen Network Driver version " NETXEN_NIC_LINUX_VERSIONID; @@ -68,7 +67,7 @@ static void netxen_tx_timeout(struct net_device *netdev); static void netxen_tx_timeout_task(struct work_struct *work); static void netxen_watchdog(unsigned long); static int netxen_handle_int(struct netxen_adapter *, struct net_device *); -static int netxen_nic_poll(struct net_device *dev, int *budget); +static int netxen_nic_poll(struct napi_struct *napi, int budget); #ifdef CONFIG_NET_POLL_CONTROLLER static void netxen_nic_poll_controller(struct net_device *netdev); #endif @@ -286,6 +285,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) int valid_mac = 0; u32 val; int pci_func_id = PCI_FUNC(pdev->devfn); + DECLARE_MAC_BUF(mac); printk(KERN_INFO "%s \n", netxen_nic_driver_string); @@ -326,16 +326,13 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_free_res; } - SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &pdev->dev); adapter = netdev->priv; - memset(adapter, 0 , sizeof(struct netxen_adapter)); adapter->ahw.pdev = pdev; adapter->ahw.pci_func = pci_func_id; spin_lock_init(&adapter->tx_lock); - spin_lock_init(&adapter->lock); /* remap phys address */ mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ @@ -403,12 +400,16 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->netdev = netdev; adapter->pdev = pdev; + netif_napi_add(netdev, &adapter->napi, + netxen_nic_poll, NETXEN_NETDEV_WEIGHT); + /* this will be read from FW later */ adapter->intr_scheme = -1; /* This will be reset for mezz cards */ adapter->portnum = pci_func_id; adapter->status &= ~NETXEN_NETDEV_STATUS; + adapter->rx_csum = 1; netdev->open = netxen_nic_open; netdev->stop = netxen_nic_close; @@ -423,8 +424,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netxen_nic_change_mtu(netdev, netdev->mtu); SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops); - netdev->poll = netxen_nic_poll; - netdev->weight = NETXEN_NETDEV_WEIGHT; #ifdef CONFIG_NET_POLL_CONTROLLER netdev->poll_controller = netxen_nic_poll_controller; #endif @@ -576,15 +575,9 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); if (!is_valid_ether_addr(netdev->perm_addr)) { - printk(KERN_ERR "%s: Bad MAC address " - "%02x:%02x:%02x:%02x:%02x:%02x.\n", - netxen_nic_driver_name, - netdev->dev_addr[0], - netdev->dev_addr[1], - netdev->dev_addr[2], - netdev->dev_addr[3], - netdev->dev_addr[4], - netdev->dev_addr[5]); + printk(KERN_ERR "%s: Bad MAC address %s.\n", + netxen_nic_driver_name, + print_mac(mac, netdev->dev_addr)); } else { if (adapter->macaddr_set) adapter->macaddr_set(adapter, @@ -747,9 +740,6 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev) netxen_nic_disable_int(adapter); - if (adapter->irq) - free_irq(adapter->irq, adapter); - if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { init_firmware_done++; netxen_free_hw_resources(adapter); @@ -773,28 +763,22 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev) } } - if (adapter->flags & NETXEN_NIC_MSI_ENABLED) - pci_disable_msi(pdev); - vfree(adapter->cmd_buf_arr); - pci_disable_device(pdev); - if (adapter->portnum == 0) { if (init_firmware_done) { - dma_watchdog_shutdown_request(adapter); - msleep(100); i = 100; - while ((dma_watchdog_shutdown_poll_result(adapter) != 1) && i) { - printk(KERN_INFO "dma_watchdog_shutdown_poll still in progress\n"); + do { + if (dma_watchdog_shutdown_request(adapter) == 1) + break; msleep(100); - i--; - } + if (dma_watchdog_shutdown_poll_result(adapter) == 1) + break; + } while (--i); - if (i == 0) { - printk(KERN_ERR "dma_watchdog_shutdown_request failed\n"); - return; - } + if (i == 0) + printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n", + netdev->name); /* clear the register for future unloads/loads */ writel(0, NETXEN_CRB_NORMALIZE(adapter, NETXEN_CAM_RAM(0x1fc))); @@ -803,11 +787,9 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev) /* leave the hw in the same state as reboot */ writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE)); - if (netxen_pinit_from_rom(adapter, 0)) - return; + netxen_pinit_from_rom(adapter, 0); msleep(1); - if (netxen_load_firmware(adapter)) - return; + netxen_load_firmware(adapter); netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); } @@ -816,30 +798,36 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev) printk(KERN_INFO "State: 0x%0x\n", readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE))); - dma_watchdog_shutdown_request(adapter); - msleep(100); i = 100; - while ((dma_watchdog_shutdown_poll_result(adapter) != 1) && i) { - printk(KERN_INFO "dma_watchdog_shutdown_poll still in progress\n"); + do { + if (dma_watchdog_shutdown_request(adapter) == 1) + break; msleep(100); - i--; - } + if (dma_watchdog_shutdown_poll_result(adapter) == 1) + break; + } while (--i); if (i) { netxen_free_adapter_offload(adapter); } else { - printk(KERN_ERR "failed to dma shutdown\n"); - return; + printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n", + netdev->name); } - } + if (adapter->irq) + free_irq(adapter->irq, adapter); + + if (adapter->flags & NETXEN_NIC_MSI_ENABLED) + pci_disable_msi(pdev); + iounmap(adapter->ahw.db_base); iounmap(adapter->ahw.pci_base0); iounmap(adapter->ahw.pci_base1); iounmap(adapter->ahw.pci_base2); pci_release_regions(pdev); + pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); free_netdev(netdev); @@ -891,19 +879,22 @@ static int netxen_nic_open(struct net_device *netdev) if (!adapter->driver_mismatch) mod_timer(&adapter->watchdog_timer, jiffies); + napi_enable(&adapter->napi); + netxen_nic_enable_int(adapter); /* Done here again so that even if phantom sw overwrote it, * we set it */ - if (adapter->macaddr_set) - adapter->macaddr_set(adapter, netdev->dev_addr); if (adapter->init_port && adapter->init_port(adapter, adapter->portnum) != 0) { del_timer_sync(&adapter->watchdog_timer); printk(KERN_ERR "%s: Failed to initialize port %d\n", netxen_nic_driver_name, adapter->portnum); + napi_disable(&adapter->napi); return -EIO; } + if (adapter->macaddr_set) + adapter->macaddr_set(adapter, netdev->dev_addr); netxen_nic_set_link_parameters(adapter); @@ -929,6 +920,9 @@ static int netxen_nic_close(struct net_device *netdev) netif_carrier_off(netdev); netif_stop_queue(netdev); + napi_disable(&adapter->napi); + + netxen_nic_disable_int(adapter); cmd_buff = adapter->cmd_buf_arr; for (i = 0; i < adapter->max_tx_desc_count; i++) { @@ -1226,15 +1220,12 @@ static void netxen_tx_timeout_task(struct work_struct *work) { struct netxen_adapter *adapter = container_of(work, struct netxen_adapter, tx_timeout_task); - unsigned long flags; printk(KERN_ERR "%s %s: transmit timeout, resetting.\n", netxen_nic_driver_name, adapter->netdev->name); - spin_lock_irqsave(&adapter->lock, flags); netxen_nic_close(adapter->netdev); netxen_nic_open(adapter->netdev); - spin_unlock_irqrestore(&adapter->lock, flags); adapter->netdev->trans_start = jiffies; netif_wake_queue(adapter->netdev); } @@ -1243,34 +1234,18 @@ static int netxen_handle_int(struct netxen_adapter *adapter, struct net_device *netdev) { u32 ret = 0; - u32 our_int = 0; DPRINTK(INFO, "Entered handle ISR\n"); adapter->stats.ints++; - if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) { - our_int = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_INT_VECTOR)); - /* not our interrupt */ - if ((our_int & (0x80 << adapter->portnum)) == 0) - return ret; - } - netxen_nic_disable_int(adapter); - if (adapter->intr_scheme == INTR_SCHEME_PERPORT) { - /* claim interrupt */ - if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) { - writel(our_int & ~((u32)(0x80 << adapter->portnum)), - NETXEN_CRB_NORMALIZE(adapter, CRB_INT_VECTOR)); - } - } - if (netxen_nic_rx_has_work(adapter) || netxen_nic_tx_has_work(adapter)) { - if (netif_rx_schedule_prep(netdev)) { + if (netif_rx_schedule_prep(netdev, &adapter->napi)) { /* * Interrupts are already disabled. */ - __netif_rx_schedule(netdev); + __netif_rx_schedule(netdev, &adapter->napi); } else { static unsigned int intcount = 0; if ((++intcount & 0xfff) == 0xfff) @@ -1298,6 +1273,7 @@ irqreturn_t netxen_intr(int irq, void *data) { struct netxen_adapter *adapter; struct net_device *netdev; + u32 our_int = 0; if (unlikely(!irq)) { return IRQ_NONE; /* Not our interrupt */ @@ -1305,21 +1281,35 @@ irqreturn_t netxen_intr(int irq, void *data) adapter = (struct netxen_adapter *)data; netdev = adapter->netdev; - /* process our status queue (for all 4 ports) */ + + if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) { + our_int = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_INT_VECTOR)); + /* not our interrupt */ + if ((our_int & (0x80 << adapter->portnum)) == 0) + return IRQ_NONE; + } + + if (adapter->intr_scheme == INTR_SCHEME_PERPORT) { + /* claim interrupt */ + if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) { + writel(our_int & ~((u32)(0x80 << adapter->portnum)), + NETXEN_CRB_NORMALIZE(adapter, CRB_INT_VECTOR)); + } + } + if (netif_running(netdev)) netxen_handle_int(adapter, netdev); return IRQ_HANDLED; } -static int netxen_nic_poll(struct net_device *netdev, int *budget) +static int netxen_nic_poll(struct napi_struct *napi, int budget) { - struct netxen_adapter *adapter = netdev_priv(netdev); - int work_to_do = min(*budget, netdev->quota); + struct netxen_adapter *adapter = container_of(napi, struct netxen_adapter, napi); + struct net_device *netdev = adapter->netdev; int done = 1; int ctx; - int this_work_done; - int work_done = 0; + int work_done; DPRINTK(INFO, "polling for %d descriptors\n", *budget); @@ -1337,16 +1327,11 @@ static int netxen_nic_poll(struct net_device *netdev, int *budget) * packets are on one context, it gets only half of the quota, * and ends up not processing it. */ - this_work_done = netxen_process_rcv_ring(adapter, ctx, - work_to_do / - MAX_RCV_CTX); - work_done += this_work_done; + work_done += netxen_process_rcv_ring(adapter, ctx, + budget / MAX_RCV_CTX); } - netdev->quota -= work_done; - *budget -= work_done; - - if (work_done >= work_to_do && netxen_nic_rx_has_work(adapter) != 0) + if (work_done >= budget && netxen_nic_rx_has_work(adapter) != 0) done = 0; if (netxen_process_cmd_ring((unsigned long)adapter) == 0) @@ -1355,11 +1340,11 @@ static int netxen_nic_poll(struct net_device *netdev, int *budget) DPRINTK(INFO, "new work_done: %d work_to_do: %d\n", work_done, work_to_do); if (done) { - netif_rx_complete(netdev); + netif_rx_complete(netdev, napi); netxen_nic_enable_int(adapter); } - return !done; + return work_done; } #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/netxen/netxen_nic_niu.c b/drivers/net/netxen/netxen_nic_niu.c index 05e0577a0e10..5b9e1b300fab 100644 --- a/drivers/net/netxen/netxen_nic_niu.c +++ b/drivers/net/netxen/netxen_nic_niu.c @@ -603,6 +603,7 @@ int netxen_niu_macaddr_set(struct netxen_adapter *adapter, int phy = physical_port[adapter->portnum]; unsigned char mac_addr[6]; int i; + DECLARE_MAC_BUF(mac); for (i = 0; i < 10; i++) { temp[0] = temp[1] = 0; @@ -627,15 +628,10 @@ int netxen_niu_macaddr_set(struct netxen_adapter *adapter, if (i == 10) { printk(KERN_ERR "%s: cannot set Mac addr for %s\n", netxen_nic_driver_name, adapter->netdev->name); - printk(KERN_ERR "MAC address set: " - "%02x:%02x:%02x:%02x:%02x:%02x.\n", - addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); - - printk(KERN_ERR "MAC address get: " - "%02x:%02x:%02x:%02x:%02x:%02x.\n", - mac_addr[0], - mac_addr[1], - mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5]); + printk(KERN_ERR "MAC address set: %s.\n", + print_mac(mac, addr)); + printk(KERN_ERR "MAC address get: %s.\n", + print_mac(mac, mac_addr)); } return 0; } diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c index 3d5b4232f65f..14a768fbce2e 100644 --- a/drivers/net/ni5010.c +++ b/drivers/net/ni5010.c @@ -89,7 +89,6 @@ static unsigned int ports[] __initdata = /* Information that needs to be kept for each board. */ struct ni5010_local { - struct net_device_stats stats; int o_pkt_size; spinlock_t lock; }; @@ -103,7 +102,6 @@ static irqreturn_t ni5010_interrupt(int irq, void *dev_id); static void ni5010_rx(struct net_device *dev); static void ni5010_timeout(struct net_device *dev); static int ni5010_close(struct net_device *dev); -static struct net_device_stats *ni5010_get_stats(struct net_device *dev); static void ni5010_set_multicast_list(struct net_device *dev); static void reset_receiver(struct net_device *dev); @@ -135,8 +133,6 @@ struct net_device * __init ni5010_probe(int unit) PRINTK2((KERN_DEBUG "%s: Entering ni5010_probe\n", dev->name)); - SET_MODULE_OWNER(dev); - if (io > 0x1ff) { /* Check a single specified location. */ err = ni5010_probe1(dev, io); } else if (io != 0) { /* Don't probe at all. */ @@ -207,6 +203,7 @@ static int __init ni5010_probe1(struct net_device *dev, int ioaddr) unsigned int data = 0; int boguscount = 40; int err = -ENODEV; + DECLARE_MAC_BUF(mac); dev->base_addr = ioaddr; dev->irq = irq; @@ -272,8 +269,9 @@ static int __init ni5010_probe1(struct net_device *dev, int ioaddr) for (i=0; i<6; i++) { outw(i, IE_GP); - printk("%2.2x ", dev->dev_addr[i] = inb(IE_SAPROM)); + dev->dev_addr[i] = inb(IE_SAPROM); } + printk("%s ", print_mac(mac, dev->dev_addr)); PRINTK2((KERN_DEBUG "%s: I/O #4 passed!\n", dev->name)); @@ -336,7 +334,6 @@ static int __init ni5010_probe1(struct net_device *dev, int ioaddr) dev->open = ni5010_open; dev->stop = ni5010_close; dev->hard_start_xmit = ni5010_send_packet; - dev->get_stats = ni5010_get_stats; dev->set_multicast_list = ni5010_set_multicast_list; dev->tx_timeout = ni5010_timeout; dev->watchdog_timeo = HZ/20; @@ -534,11 +531,11 @@ static void ni5010_rx(struct net_device *dev) if ( (rcv_stat & RS_VALID_BITS) != RS_PKT_OK) { PRINTK((KERN_INFO "%s: receive error.\n", dev->name)); - lp->stats.rx_errors++; - if (rcv_stat & RS_RUNT) lp->stats.rx_length_errors++; - if (rcv_stat & RS_ALIGN) lp->stats.rx_frame_errors++; - if (rcv_stat & RS_CRC_ERR) lp->stats.rx_crc_errors++; - if (rcv_stat & RS_OFLW) lp->stats.rx_fifo_errors++; + dev->stats.rx_errors++; + if (rcv_stat & RS_RUNT) dev->stats.rx_length_errors++; + if (rcv_stat & RS_ALIGN) dev->stats.rx_frame_errors++; + if (rcv_stat & RS_CRC_ERR) dev->stats.rx_crc_errors++; + if (rcv_stat & RS_OFLW) dev->stats.rx_fifo_errors++; outb(0xff, EDLC_RCLR); /* Clear the interrupt */ return; } @@ -549,8 +546,8 @@ static void ni5010_rx(struct net_device *dev) if (i_pkt_size > ETH_FRAME_LEN || i_pkt_size < 10 ) { PRINTK((KERN_DEBUG "%s: Packet size error, packet size = %#4.4x\n", dev->name, i_pkt_size)); - lp->stats.rx_errors++; - lp->stats.rx_length_errors++; + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; return; } @@ -558,7 +555,7 @@ static void ni5010_rx(struct net_device *dev) skb = dev_alloc_skb(i_pkt_size + 3); if (skb == NULL) { printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; return; } @@ -575,8 +572,8 @@ static void ni5010_rx(struct net_device *dev) skb->protocol = eth_type_trans(skb,dev); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += i_pkt_size; + dev->stats.rx_packets++; + dev->stats.rx_bytes += i_pkt_size; PRINTK2((KERN_DEBUG "%s: Received packet, size=%#4.4x\n", dev->name, i_pkt_size)); @@ -604,14 +601,14 @@ static int process_xmt_interrupt(struct net_device *dev) /* outb(0, IE_MMODE); */ /* xmt buf on sysbus FIXME: needed ? */ outb(MM_EN_XMT | MM_MUX, IE_MMODE); outb(XM_ALL, EDLC_XMASK); /* Enable xmt IRQ's */ - lp->stats.collisions++; + dev->stats.collisions++; return 1; } /* FIXME: handle other xmt error conditions */ - lp->stats.tx_packets++; - lp->stats.tx_bytes += lp->o_pkt_size; + dev->stats.tx_packets++; + dev->stats.tx_bytes += lp->o_pkt_size; netif_wake_queue(dev); PRINTK2((KERN_DEBUG "%s: sent packet, size=%#4.4x\n", @@ -640,24 +637,6 @@ static int ni5010_close(struct net_device *dev) } -/* Get the current statistics. This may be called with the card open or - closed. */ -static struct net_device_stats *ni5010_get_stats(struct net_device *dev) -{ - struct ni5010_local *lp = netdev_priv(dev); - - PRINTK2((KERN_DEBUG "%s: entering ni5010_get_stats\n", dev->name)); - - if (NI5010_DEBUG) ni5010_show_registers(dev); - - /* cli(); */ - /* Update the statistics from the device registers. */ - /* We do this in the interrupt handler */ - /* sti(); */ - - return &lp->stats; -} - /* Set or clear the multicast filter for this adaptor. num_addrs == -1 Promiscuous mode, receive all packets num_addrs == 0 Normal mode, clear multicast list @@ -670,14 +649,10 @@ static void ni5010_set_multicast_list(struct net_device *dev) PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name)); - if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI) { + if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI || dev->mc_list) { dev->flags |= IFF_PROMISC; outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */ PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name)); - } else if (dev->mc_list) { - /* Sorry, multicast not supported */ - PRINTK((KERN_DEBUG "%s: No multicast, entering broadcast mode\n", dev->name)); - outb(RMD_BROADCAST, EDLC_RMODE); } else { PRINTK((KERN_DEBUG "%s: Entering broadcast mode\n", dev->name)); outb(RMD_BROADCAST, EDLC_RMODE); /* Disable promiscuous mode, use normal mode */ diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c index 5e7999db2096..6b3384a24f07 100644 --- a/drivers/net/ni52.c +++ b/drivers/net/ni52.c @@ -382,8 +382,6 @@ struct net_device * __init ni52_probe(int unit) memend = dev->mem_end; } - SET_MODULE_OWNER(dev); - if (io > 0x1ff) { /* Check a single specified location. */ err = ni52_probe1(dev, io); } else if (io > 0) { /* Don't probe at all. */ diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c index 4ef5fe345191..097685245112 100644 --- a/drivers/net/ni65.c +++ b/drivers/net/ni65.c @@ -550,7 +550,6 @@ static int __init ni65_probe1(struct net_device *dev,int ioaddr) } dev->base_addr = ioaddr; - SET_MODULE_OWNER(dev); dev->open = ni65_open; dev->stop = ni65_close; dev->hard_start_xmit = ni65_send_packet; diff --git a/drivers/net/niu.c b/drivers/net/niu.c new file mode 100644 index 000000000000..43bfe7e6b6f5 --- /dev/null +++ b/drivers/net/niu.c @@ -0,0 +1,7939 @@ +/* niu.c: Neptune ethernet driver. + * + * Copyright (C) 2007 David S. Miller (davem@davemloft.net) + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/netdevice.h> +#include <linux/ethtool.h> +#include <linux/etherdevice.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/bitops.h> +#include <linux/mii.h> +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <linux/ip.h> +#include <linux/in.h> +#include <linux/ipv6.h> +#include <linux/log2.h> +#include <linux/jiffies.h> +#include <linux/crc32.h> + +#include <linux/io.h> + +#ifdef CONFIG_SPARC64 +#include <linux/of_device.h> +#endif + +#include "niu.h" + +#define DRV_MODULE_NAME "niu" +#define PFX DRV_MODULE_NAME ": " +#define DRV_MODULE_VERSION "0.5" +#define DRV_MODULE_RELDATE "October 5, 2007" + +static char version[] __devinitdata = + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + +MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); +MODULE_DESCRIPTION("NIU ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +#ifndef DMA_44BIT_MASK +#define DMA_44BIT_MASK 0x00000fffffffffffULL +#endif + +#ifndef readq +static u64 readq(void __iomem *reg) +{ + return (((u64)readl(reg + 0x4UL) << 32) | + (u64)readl(reg)); +} + +static void writeq(u64 val, void __iomem *reg) +{ + writel(val & 0xffffffff, reg); + writel(val >> 32, reg + 0x4UL); +} +#endif + +static struct pci_device_id niu_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)}, + {} +}; + +MODULE_DEVICE_TABLE(pci, niu_pci_tbl); + +#define NIU_TX_TIMEOUT (5 * HZ) + +#define nr64(reg) readq(np->regs + (reg)) +#define nw64(reg, val) writeq((val), np->regs + (reg)) + +#define nr64_mac(reg) readq(np->mac_regs + (reg)) +#define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg)) + +#define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg)) +#define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg)) + +#define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg)) +#define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg)) + +#define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg)) +#define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg)) + +#define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) + +static int niu_debug; +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "NIU debug level"); + +#define niudbg(TYPE, f, a...) \ +do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \ + printk(KERN_DEBUG PFX f, ## a); \ +} while (0) + +#define niuinfo(TYPE, f, a...) \ +do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \ + printk(KERN_INFO PFX f, ## a); \ +} while (0) + +#define niuwarn(TYPE, f, a...) \ +do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \ + printk(KERN_WARNING PFX f, ## a); \ +} while (0) + +#define niu_lock_parent(np, flags) \ + spin_lock_irqsave(&np->parent->lock, flags) +#define niu_unlock_parent(np, flags) \ + spin_unlock_irqrestore(&np->parent->lock, flags) + +static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg, + u64 bits, int limit, int delay) +{ + while (--limit >= 0) { + u64 val = nr64_mac(reg); + + if (!(val & bits)) + break; + udelay(delay); + } + if (limit < 0) + return -ENODEV; + return 0; +} + +static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg, + u64 bits, int limit, int delay, + const char *reg_name) +{ + int err; + + nw64_mac(reg, bits); + err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay); + if (err) + dev_err(np->device, PFX "%s: bits (%llx) of register %s " + "would not clear, val[%llx]\n", + np->dev->name, (unsigned long long) bits, reg_name, + (unsigned long long) nr64_mac(reg)); + return err; +} + +#define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ +({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ + __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ +}) + +static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg, + u64 bits, int limit, int delay) +{ + while (--limit >= 0) { + u64 val = nr64_ipp(reg); + + if (!(val & bits)) + break; + udelay(delay); + } + if (limit < 0) + return -ENODEV; + return 0; +} + +static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg, + u64 bits, int limit, int delay, + const char *reg_name) +{ + int err; + u64 val; + + val = nr64_ipp(reg); + val |= bits; + nw64_ipp(reg, val); + + err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay); + if (err) + dev_err(np->device, PFX "%s: bits (%llx) of register %s " + "would not clear, val[%llx]\n", + np->dev->name, (unsigned long long) bits, reg_name, + (unsigned long long) nr64_ipp(reg)); + return err; +} + +#define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ +({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ + __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ +}) + +static int __niu_wait_bits_clear(struct niu *np, unsigned long reg, + u64 bits, int limit, int delay) +{ + while (--limit >= 0) { + u64 val = nr64(reg); + + if (!(val & bits)) + break; + udelay(delay); + } + if (limit < 0) + return -ENODEV; + return 0; +} + +#define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \ +({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ + __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \ +}) + +static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg, + u64 bits, int limit, int delay, + const char *reg_name) +{ + int err; + + nw64(reg, bits); + err = __niu_wait_bits_clear(np, reg, bits, limit, delay); + if (err) + dev_err(np->device, PFX "%s: bits (%llx) of register %s " + "would not clear, val[%llx]\n", + np->dev->name, (unsigned long long) bits, reg_name, + (unsigned long long) nr64(reg)); + return err; +} + +#define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ +({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ + __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ +}) + +static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on) +{ + u64 val = (u64) lp->timer; + + if (on) + val |= LDG_IMGMT_ARM; + + nw64(LDG_IMGMT(lp->ldg_num), val); +} + +static int niu_ldn_irq_enable(struct niu *np, int ldn, int on) +{ + unsigned long mask_reg, bits; + u64 val; + + if (ldn < 0 || ldn > LDN_MAX) + return -EINVAL; + + if (ldn < 64) { + mask_reg = LD_IM0(ldn); + bits = LD_IM0_MASK; + } else { + mask_reg = LD_IM1(ldn - 64); + bits = LD_IM1_MASK; + } + + val = nr64(mask_reg); + if (on) + val &= ~bits; + else + val |= bits; + nw64(mask_reg, val); + + return 0; +} + +static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on) +{ + struct niu_parent *parent = np->parent; + int i; + + for (i = 0; i <= LDN_MAX; i++) { + int err; + + if (parent->ldg_map[i] != lp->ldg_num) + continue; + + err = niu_ldn_irq_enable(np, i, on); + if (err) + return err; + } + return 0; +} + +static int niu_enable_interrupts(struct niu *np, int on) +{ + int i; + + for (i = 0; i < np->num_ldg; i++) { + struct niu_ldg *lp = &np->ldg[i]; + int err; + + err = niu_enable_ldn_in_ldg(np, lp, on); + if (err) + return err; + } + for (i = 0; i < np->num_ldg; i++) + niu_ldg_rearm(np, &np->ldg[i], on); + + return 0; +} + +static u32 phy_encode(u32 type, int port) +{ + return (type << (port * 2)); +} + +static u32 phy_decode(u32 val, int port) +{ + return (val >> (port * 2)) & PORT_TYPE_MASK; +} + +static int mdio_wait(struct niu *np) +{ + int limit = 1000; + u64 val; + + while (--limit > 0) { + val = nr64(MIF_FRAME_OUTPUT); + if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1) + return val & MIF_FRAME_OUTPUT_DATA; + + udelay(10); + } + + return -ENODEV; +} + +static int mdio_read(struct niu *np, int port, int dev, int reg) +{ + int err; + + nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); + err = mdio_wait(np); + if (err < 0) + return err; + + nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev)); + return mdio_wait(np); +} + +static int mdio_write(struct niu *np, int port, int dev, int reg, int data) +{ + int err; + + nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); + err = mdio_wait(np); + if (err < 0) + return err; + + nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data)); + err = mdio_wait(np); + if (err < 0) + return err; + + return 0; +} + +static int mii_read(struct niu *np, int port, int reg) +{ + nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg)); + return mdio_wait(np); +} + +static int mii_write(struct niu *np, int port, int reg, int data) +{ + int err; + + nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data)); + err = mdio_wait(np); + if (err < 0) + return err; + + return 0; +} + +static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val) +{ + int err; + + err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, + ESR2_TI_PLL_TX_CFG_L(channel), + val & 0xffff); + if (!err) + err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, + ESR2_TI_PLL_TX_CFG_H(channel), + val >> 16); + return err; +} + +static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val) +{ + int err; + + err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, + ESR2_TI_PLL_RX_CFG_L(channel), + val & 0xffff); + if (!err) + err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, + ESR2_TI_PLL_RX_CFG_H(channel), + val >> 16); + return err; +} + +/* Mode is always 10G fiber. */ +static int serdes_init_niu(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + u32 tx_cfg, rx_cfg; + unsigned long i; + + tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV); + rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | + PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | + PLL_RX_CFG_EQ_LP_ADAPTIVE); + + if (lp->loopback_mode == LOOPBACK_PHY) { + u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; + + mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, + ESR2_TI_PLL_TEST_CFG_L, test_cfg); + + tx_cfg |= PLL_TX_CFG_ENTEST; + rx_cfg |= PLL_RX_CFG_ENTEST; + } + + /* Initialize all 4 lanes of the SERDES. */ + for (i = 0; i < 4; i++) { + int err = esr2_set_tx_cfg(np, i, tx_cfg); + if (err) + return err; + } + + for (i = 0; i < 4; i++) { + int err = esr2_set_rx_cfg(np, i, rx_cfg); + if (err) + return err; + } + + return 0; +} + +static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val) +{ + int err; + + err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan)); + if (err >= 0) { + *val = (err & 0xffff); + err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, + ESR_RXTX_CTRL_H(chan)); + if (err >= 0) + *val |= ((err & 0xffff) << 16); + err = 0; + } + return err; +} + +static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val) +{ + int err; + + err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, + ESR_GLUE_CTRL0_L(chan)); + if (err >= 0) { + *val = (err & 0xffff); + err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, + ESR_GLUE_CTRL0_H(chan)); + if (err >= 0) { + *val |= ((err & 0xffff) << 16); + err = 0; + } + } + return err; +} + +static int esr_read_reset(struct niu *np, u32 *val) +{ + int err; + + err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, + ESR_RXTX_RESET_CTRL_L); + if (err >= 0) { + *val = (err & 0xffff); + err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, + ESR_RXTX_RESET_CTRL_H); + if (err >= 0) { + *val |= ((err & 0xffff) << 16); + err = 0; + } + } + return err; +} + +static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val) +{ + int err; + + err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, + ESR_RXTX_CTRL_L(chan), val & 0xffff); + if (!err) + err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, + ESR_RXTX_CTRL_H(chan), (val >> 16)); + return err; +} + +static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val) +{ + int err; + + err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, + ESR_GLUE_CTRL0_L(chan), val & 0xffff); + if (!err) + err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, + ESR_GLUE_CTRL0_H(chan), (val >> 16)); + return err; +} + +static int esr_reset(struct niu *np) +{ + u32 reset; + int err; + + err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, + ESR_RXTX_RESET_CTRL_L, 0x0000); + if (err) + return err; + err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, + ESR_RXTX_RESET_CTRL_H, 0xffff); + if (err) + return err; + udelay(200); + + err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, + ESR_RXTX_RESET_CTRL_L, 0xffff); + if (err) + return err; + udelay(200); + + err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, + ESR_RXTX_RESET_CTRL_H, 0x0000); + if (err) + return err; + udelay(200); + + err = esr_read_reset(np, &reset); + if (err) + return err; + if (reset != 0) { + dev_err(np->device, PFX "Port %u ESR_RESET " + "did not clear [%08x]\n", + np->port, reset); + return -ENODEV; + } + + return 0; +} + +static int serdes_init_10g(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + unsigned long ctrl_reg, test_cfg_reg, i; + u64 ctrl_val, test_cfg_val, sig, mask, val; + int err; + + switch (np->port) { + case 0: + ctrl_reg = ENET_SERDES_0_CTRL_CFG; + test_cfg_reg = ENET_SERDES_0_TEST_CFG; + break; + case 1: + ctrl_reg = ENET_SERDES_1_CTRL_CFG; + test_cfg_reg = ENET_SERDES_1_TEST_CFG; + break; + + default: + return -EINVAL; + } + ctrl_val = (ENET_SERDES_CTRL_SDET_0 | + ENET_SERDES_CTRL_SDET_1 | + ENET_SERDES_CTRL_SDET_2 | + ENET_SERDES_CTRL_SDET_3 | + (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | + (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | + (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | + (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | + (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | + (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | + (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | + (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); + test_cfg_val = 0; + + if (lp->loopback_mode == LOOPBACK_PHY) { + test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << + ENET_SERDES_TEST_MD_0_SHIFT) | + (ENET_TEST_MD_PAD_LOOPBACK << + ENET_SERDES_TEST_MD_1_SHIFT) | + (ENET_TEST_MD_PAD_LOOPBACK << + ENET_SERDES_TEST_MD_2_SHIFT) | + (ENET_TEST_MD_PAD_LOOPBACK << + ENET_SERDES_TEST_MD_3_SHIFT)); + } + + nw64(ctrl_reg, ctrl_val); + nw64(test_cfg_reg, test_cfg_val); + + /* Initialize all 4 lanes of the SERDES. */ + for (i = 0; i < 4; i++) { + u32 rxtx_ctrl, glue0; + + err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); + if (err) + return err; + err = esr_read_glue0(np, i, &glue0); + if (err) + return err; + + rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); + rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | + (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); + + glue0 &= ~(ESR_GLUE_CTRL0_SRATE | + ESR_GLUE_CTRL0_THCNT | + ESR_GLUE_CTRL0_BLTIME); + glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | + (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | + (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | + (BLTIME_300_CYCLES << + ESR_GLUE_CTRL0_BLTIME_SHIFT)); + + err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); + if (err) + return err; + err = esr_write_glue0(np, i, glue0); + if (err) + return err; + } + + err = esr_reset(np); + if (err) + return err; + + sig = nr64(ESR_INT_SIGNALS); + switch (np->port) { + case 0: + mask = ESR_INT_SIGNALS_P0_BITS; + val = (ESR_INT_SRDY0_P0 | + ESR_INT_DET0_P0 | + ESR_INT_XSRDY_P0 | + ESR_INT_XDP_P0_CH3 | + ESR_INT_XDP_P0_CH2 | + ESR_INT_XDP_P0_CH1 | + ESR_INT_XDP_P0_CH0); + break; + + case 1: + mask = ESR_INT_SIGNALS_P1_BITS; + val = (ESR_INT_SRDY0_P1 | + ESR_INT_DET0_P1 | + ESR_INT_XSRDY_P1 | + ESR_INT_XDP_P1_CH3 | + ESR_INT_XDP_P1_CH2 | + ESR_INT_XDP_P1_CH1 | + ESR_INT_XDP_P1_CH0); + break; + + default: + return -EINVAL; + } + + if ((sig & mask) != val) { + dev_err(np->device, PFX "Port %u signal bits [%08x] are not " + "[%08x]\n", np->port, (int) (sig & mask), (int) val); + return -ENODEV; + } + + return 0; +} + +static int serdes_init_1g(struct niu *np) +{ + u64 val; + + val = nr64(ENET_SERDES_1_PLL_CFG); + val &= ~ENET_SERDES_PLL_FBDIV2; + switch (np->port) { + case 0: + val |= ENET_SERDES_PLL_HRATE0; + break; + case 1: + val |= ENET_SERDES_PLL_HRATE1; + break; + case 2: + val |= ENET_SERDES_PLL_HRATE2; + break; + case 3: + val |= ENET_SERDES_PLL_HRATE3; + break; + default: + return -EINVAL; + } + nw64(ENET_SERDES_1_PLL_CFG, val); + + return 0; +} + +static int bcm8704_reset(struct niu *np) +{ + int err, limit; + + err = mdio_read(np, np->phy_addr, + BCM8704_PHYXS_DEV_ADDR, MII_BMCR); + if (err < 0) + return err; + err |= BMCR_RESET; + err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, + MII_BMCR, err); + if (err) + return err; + + limit = 1000; + while (--limit >= 0) { + err = mdio_read(np, np->phy_addr, + BCM8704_PHYXS_DEV_ADDR, MII_BMCR); + if (err < 0) + return err; + if (!(err & BMCR_RESET)) + break; + } + if (limit < 0) { + dev_err(np->device, PFX "Port %u PHY will not reset " + "(bmcr=%04x)\n", np->port, (err & 0xffff)); + return -ENODEV; + } + return 0; +} + +/* When written, certain PHY registers need to be read back twice + * in order for the bits to settle properly. + */ +static int bcm8704_user_dev3_readback(struct niu *np, int reg) +{ + int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); + if (err < 0) + return err; + err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); + if (err < 0) + return err; + return 0; +} + +static int bcm8704_init_user_dev3(struct niu *np) +{ + int err; + + err = mdio_write(np, np->phy_addr, + BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL, + (USER_CONTROL_OPTXRST_LVL | + USER_CONTROL_OPBIASFLT_LVL | + USER_CONTROL_OBTMPFLT_LVL | + USER_CONTROL_OPPRFLT_LVL | + USER_CONTROL_OPTXFLT_LVL | + USER_CONTROL_OPRXLOS_LVL | + USER_CONTROL_OPRXFLT_LVL | + USER_CONTROL_OPTXON_LVL | + (0x3f << USER_CONTROL_RES1_SHIFT))); + if (err) + return err; + + err = mdio_write(np, np->phy_addr, + BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL, + (USER_PMD_TX_CTL_XFP_CLKEN | + (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) | + (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) | + USER_PMD_TX_CTL_TSCK_LPWREN)); + if (err) + return err; + + err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL); + if (err) + return err; + err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL); + if (err) + return err; + + err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, + BCM8704_USER_OPT_DIGITAL_CTRL); + if (err < 0) + return err; + err &= ~USER_ODIG_CTRL_GPIOS; + err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT); + err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, + BCM8704_USER_OPT_DIGITAL_CTRL, err); + if (err) + return err; + + mdelay(1000); + + return 0; +} + +static int xcvr_init_10g(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + u16 analog_stat0, tx_alarm_status; + int err; + u64 val; + + val = nr64_mac(XMAC_CONFIG); + val &= ~XMAC_CONFIG_LED_POLARITY; + val |= XMAC_CONFIG_FORCE_LED_ON; + nw64_mac(XMAC_CONFIG, val); + + /* XXX shared resource, lock parent XXX */ + val = nr64(MIF_CONFIG); + val |= MIF_CONFIG_INDIRECT_MODE; + nw64(MIF_CONFIG, val); + + err = bcm8704_reset(np); + if (err) + return err; + + err = bcm8704_init_user_dev3(np); + if (err) + return err; + + err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, + MII_BMCR); + if (err < 0) + return err; + err &= ~BMCR_LOOPBACK; + + if (lp->loopback_mode == LOOPBACK_MAC) + err |= BMCR_LOOPBACK; + + err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, + MII_BMCR, err); + if (err) + return err; + +#if 1 + err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, + MII_STAT1000); + if (err < 0) + return err; + pr_info(PFX "Port %u PMA_PMD(MII_STAT1000) [%04x]\n", + np->port, err); + + err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20); + if (err < 0) + return err; + pr_info(PFX "Port %u USER_DEV3(0x20) [%04x]\n", + np->port, err); + + err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, + MII_NWAYTEST); + if (err < 0) + return err; + pr_info(PFX "Port %u PHYXS(MII_NWAYTEST) [%04x]\n", + np->port, err); +#endif + + /* XXX dig this out it might not be so useful XXX */ + err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, + BCM8704_USER_ANALOG_STATUS0); + if (err < 0) + return err; + err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, + BCM8704_USER_ANALOG_STATUS0); + if (err < 0) + return err; + analog_stat0 = err; + + err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, + BCM8704_USER_TX_ALARM_STATUS); + if (err < 0) + return err; + err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, + BCM8704_USER_TX_ALARM_STATUS); + if (err < 0) + return err; + tx_alarm_status = err; + + if (analog_stat0 != 0x03fc) { + if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) { + pr_info(PFX "Port %u cable not connected " + "or bad cable.\n", np->port); + } else if (analog_stat0 == 0x639c) { + pr_info(PFX "Port %u optical module is bad " + "or missing.\n", np->port); + } + } + + return 0; +} + +static int mii_reset(struct niu *np) +{ + int limit, err; + + err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET); + if (err) + return err; + + limit = 1000; + while (--limit >= 0) { + udelay(500); + err = mii_read(np, np->phy_addr, MII_BMCR); + if (err < 0) + return err; + if (!(err & BMCR_RESET)) + break; + } + if (limit < 0) { + dev_err(np->device, PFX "Port %u MII would not reset, " + "bmcr[%04x]\n", np->port, err); + return -ENODEV; + } + + return 0; +} + +static int mii_init_common(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + u16 bmcr, bmsr, adv, estat; + int err; + + err = mii_reset(np); + if (err) + return err; + + err = mii_read(np, np->phy_addr, MII_BMSR); + if (err < 0) + return err; + bmsr = err; + + estat = 0; + if (bmsr & BMSR_ESTATEN) { + err = mii_read(np, np->phy_addr, MII_ESTATUS); + if (err < 0) + return err; + estat = err; + } + + bmcr = 0; + err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); + if (err) + return err; + + if (lp->loopback_mode == LOOPBACK_MAC) { + bmcr |= BMCR_LOOPBACK; + if (lp->active_speed == SPEED_1000) + bmcr |= BMCR_SPEED1000; + if (lp->active_duplex == DUPLEX_FULL) + bmcr |= BMCR_FULLDPLX; + } + + if (lp->loopback_mode == LOOPBACK_PHY) { + u16 aux; + + aux = (BCM5464R_AUX_CTL_EXT_LB | + BCM5464R_AUX_CTL_WRITE_1); + err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux); + if (err) + return err; + } + + /* XXX configurable XXX */ + /* XXX for now don't advertise half-duplex or asym pause... XXX */ + adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP; + if (bmsr & BMSR_10FULL) + adv |= ADVERTISE_10FULL; + if (bmsr & BMSR_100FULL) + adv |= ADVERTISE_100FULL; + err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv); + if (err) + return err; + + if (bmsr & BMSR_ESTATEN) { + u16 ctrl1000 = 0; + + if (estat & ESTATUS_1000_TFULL) + ctrl1000 |= ADVERTISE_1000FULL; + err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000); + if (err) + return err; + } + bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); + + err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); + if (err) + return err; + + err = mii_read(np, np->phy_addr, MII_BMCR); + if (err < 0) + return err; + err = mii_read(np, np->phy_addr, MII_BMSR); + if (err < 0) + return err; +#if 0 + pr_info(PFX "Port %u after MII init bmcr[%04x] bmsr[%04x]\n", + np->port, bmcr, bmsr); +#endif + + return 0; +} + +static int xcvr_init_1g(struct niu *np) +{ + u64 val; + + /* XXX shared resource, lock parent XXX */ + val = nr64(MIF_CONFIG); + val &= ~MIF_CONFIG_INDIRECT_MODE; + nw64(MIF_CONFIG, val); + + return mii_init_common(np); +} + +static int niu_xcvr_init(struct niu *np) +{ + const struct niu_phy_ops *ops = np->phy_ops; + int err; + + err = 0; + if (ops->xcvr_init) + err = ops->xcvr_init(np); + + return err; +} + +static int niu_serdes_init(struct niu *np) +{ + const struct niu_phy_ops *ops = np->phy_ops; + int err; + + err = 0; + if (ops->serdes_init) + err = ops->serdes_init(np); + + return err; +} + +static void niu_init_xif(struct niu *); + +static int niu_link_status_common(struct niu *np, int link_up) +{ + struct niu_link_config *lp = &np->link_config; + struct net_device *dev = np->dev; + unsigned long flags; + + if (!netif_carrier_ok(dev) && link_up) { + niuinfo(LINK, "%s: Link is up at %s, %s duplex\n", + dev->name, + (lp->active_speed == SPEED_10000 ? + "10Gb/sec" : + (lp->active_speed == SPEED_1000 ? + "1Gb/sec" : + (lp->active_speed == SPEED_100 ? + "100Mbit/sec" : "10Mbit/sec"))), + (lp->active_duplex == DUPLEX_FULL ? + "full" : "half")); + + spin_lock_irqsave(&np->lock, flags); + niu_init_xif(np); + spin_unlock_irqrestore(&np->lock, flags); + + netif_carrier_on(dev); + } else if (netif_carrier_ok(dev) && !link_up) { + niuwarn(LINK, "%s: Link is down\n", dev->name); + netif_carrier_off(dev); + } + + return 0; +} + +static int link_status_10g(struct niu *np, int *link_up_p) +{ + unsigned long flags; + int err, link_up; + + link_up = 0; + + spin_lock_irqsave(&np->lock, flags); + + err = -EINVAL; + if (np->link_config.loopback_mode != LOOPBACK_DISABLED) + goto out; + + err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, + BCM8704_PMD_RCV_SIGDET); + if (err < 0) + goto out; + if (!(err & PMD_RCV_SIGDET_GLOBAL)) { + err = 0; + goto out; + } + + err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, + BCM8704_PCS_10G_R_STATUS); + if (err < 0) + goto out; + if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) { + err = 0; + goto out; + } + + err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, + BCM8704_PHYXS_XGXS_LANE_STAT); + if (err < 0) + goto out; + + if (err != (PHYXS_XGXS_LANE_STAT_ALINGED | + PHYXS_XGXS_LANE_STAT_MAGIC | + PHYXS_XGXS_LANE_STAT_LANE3 | + PHYXS_XGXS_LANE_STAT_LANE2 | + PHYXS_XGXS_LANE_STAT_LANE1 | + PHYXS_XGXS_LANE_STAT_LANE0)) { + err = 0; + goto out; + } + + link_up = 1; + np->link_config.active_speed = SPEED_10000; + np->link_config.active_duplex = DUPLEX_FULL; + err = 0; + +out: + spin_unlock_irqrestore(&np->lock, flags); + + *link_up_p = link_up; + return err; +} + +static int link_status_1g(struct niu *np, int *link_up_p) +{ + u16 current_speed, bmsr; + unsigned long flags; + u8 current_duplex; + int err, link_up; + + link_up = 0; + current_speed = SPEED_INVALID; + current_duplex = DUPLEX_INVALID; + + spin_lock_irqsave(&np->lock, flags); + + err = -EINVAL; + if (np->link_config.loopback_mode != LOOPBACK_DISABLED) + goto out; + + err = mii_read(np, np->phy_addr, MII_BMSR); + if (err < 0) + goto out; + + bmsr = err; + if (bmsr & BMSR_LSTATUS) { + u16 adv, lpa, common, estat; + + err = mii_read(np, np->phy_addr, MII_ADVERTISE); + if (err < 0) + goto out; + adv = err; + + err = mii_read(np, np->phy_addr, MII_LPA); + if (err < 0) + goto out; + lpa = err; + + common = adv & lpa; + + err = mii_read(np, np->phy_addr, MII_ESTATUS); + if (err < 0) + goto out; + estat = err; + + link_up = 1; + if (estat & (ESTATUS_1000_TFULL | ESTATUS_1000_THALF)) { + current_speed = SPEED_1000; + if (estat & ESTATUS_1000_TFULL) + current_duplex = DUPLEX_FULL; + else + current_duplex = DUPLEX_HALF; + } else { + if (common & ADVERTISE_100BASE4) { + current_speed = SPEED_100; + current_duplex = DUPLEX_HALF; + } else if (common & ADVERTISE_100FULL) { + current_speed = SPEED_100; + current_duplex = DUPLEX_FULL; + } else if (common & ADVERTISE_100HALF) { + current_speed = SPEED_100; + current_duplex = DUPLEX_HALF; + } else if (common & ADVERTISE_10FULL) { + current_speed = SPEED_10; + current_duplex = DUPLEX_FULL; + } else if (common & ADVERTISE_10HALF) { + current_speed = SPEED_10; + current_duplex = DUPLEX_HALF; + } else + link_up = 0; + } + } + err = 0; + +out: + spin_unlock_irqrestore(&np->lock, flags); + + *link_up_p = link_up; + return err; +} + +static int niu_link_status(struct niu *np, int *link_up_p) +{ + const struct niu_phy_ops *ops = np->phy_ops; + int err; + + err = 0; + if (ops->link_status) + err = ops->link_status(np, link_up_p); + + return err; +} + +static void niu_timer(unsigned long __opaque) +{ + struct niu *np = (struct niu *) __opaque; + unsigned long off; + int err, link_up; + + err = niu_link_status(np, &link_up); + if (!err) + niu_link_status_common(np, link_up); + + if (netif_carrier_ok(np->dev)) + off = 5 * HZ; + else + off = 1 * HZ; + np->timer.expires = jiffies + off; + + add_timer(&np->timer); +} + +static const struct niu_phy_ops phy_ops_10g_fiber_niu = { + .serdes_init = serdes_init_niu, + .xcvr_init = xcvr_init_10g, + .link_status = link_status_10g, +}; + +static const struct niu_phy_ops phy_ops_10g_fiber = { + .serdes_init = serdes_init_10g, + .xcvr_init = xcvr_init_10g, + .link_status = link_status_10g, +}; + +static const struct niu_phy_ops phy_ops_10g_copper = { + .serdes_init = serdes_init_10g, + .link_status = link_status_10g, /* XXX */ +}; + +static const struct niu_phy_ops phy_ops_1g_fiber = { + .serdes_init = serdes_init_1g, + .xcvr_init = xcvr_init_1g, + .link_status = link_status_1g, +}; + +static const struct niu_phy_ops phy_ops_1g_copper = { + .xcvr_init = xcvr_init_1g, + .link_status = link_status_1g, +}; + +struct niu_phy_template { + const struct niu_phy_ops *ops; + u32 phy_addr_base; +}; + +static const struct niu_phy_template phy_template_niu = { + .ops = &phy_ops_10g_fiber_niu, + .phy_addr_base = 16, +}; + +static const struct niu_phy_template phy_template_10g_fiber = { + .ops = &phy_ops_10g_fiber, + .phy_addr_base = 8, +}; + +static const struct niu_phy_template phy_template_10g_copper = { + .ops = &phy_ops_10g_copper, + .phy_addr_base = 10, +}; + +static const struct niu_phy_template phy_template_1g_fiber = { + .ops = &phy_ops_1g_fiber, + .phy_addr_base = 0, +}; + +static const struct niu_phy_template phy_template_1g_copper = { + .ops = &phy_ops_1g_copper, + .phy_addr_base = 0, +}; + +static int niu_determine_phy_disposition(struct niu *np) +{ + struct niu_parent *parent = np->parent; + u8 plat_type = parent->plat_type; + const struct niu_phy_template *tp; + u32 phy_addr_off = 0; + + if (plat_type == PLAT_TYPE_NIU) { + tp = &phy_template_niu; + phy_addr_off += np->port; + } else { + switch (np->flags & (NIU_FLAGS_10G | NIU_FLAGS_FIBER)) { + case 0: + /* 1G copper */ + tp = &phy_template_1g_copper; + if (plat_type == PLAT_TYPE_VF_P0) + phy_addr_off = 10; + else if (plat_type == PLAT_TYPE_VF_P1) + phy_addr_off = 26; + + phy_addr_off += (np->port ^ 0x3); + break; + + case NIU_FLAGS_10G: + /* 10G copper */ + tp = &phy_template_1g_copper; + break; + + case NIU_FLAGS_FIBER: + /* 1G fiber */ + tp = &phy_template_1g_fiber; + break; + + case NIU_FLAGS_10G | NIU_FLAGS_FIBER: + /* 10G fiber */ + tp = &phy_template_10g_fiber; + if (plat_type == PLAT_TYPE_VF_P0 || + plat_type == PLAT_TYPE_VF_P1) + phy_addr_off = 8; + phy_addr_off += np->port; + break; + + default: + return -EINVAL; + } + } + + np->phy_ops = tp->ops; + np->phy_addr = tp->phy_addr_base + phy_addr_off; + + return 0; +} + +static int niu_init_link(struct niu *np) +{ + struct niu_parent *parent = np->parent; + int err, ignore; + + if (parent->plat_type == PLAT_TYPE_NIU) { + err = niu_xcvr_init(np); + if (err) + return err; + msleep(200); + } + err = niu_serdes_init(np); + if (err) + return err; + msleep(200); + err = niu_xcvr_init(np); + if (!err) + niu_link_status(np, &ignore); + return 0; +} + +static void niu_set_primary_mac(struct niu *np, unsigned char *addr) +{ + u16 reg0 = addr[4] << 8 | addr[5]; + u16 reg1 = addr[2] << 8 | addr[3]; + u16 reg2 = addr[0] << 8 | addr[1]; + + if (np->flags & NIU_FLAGS_XMAC) { + nw64_mac(XMAC_ADDR0, reg0); + nw64_mac(XMAC_ADDR1, reg1); + nw64_mac(XMAC_ADDR2, reg2); + } else { + nw64_mac(BMAC_ADDR0, reg0); + nw64_mac(BMAC_ADDR1, reg1); + nw64_mac(BMAC_ADDR2, reg2); + } +} + +static int niu_num_alt_addr(struct niu *np) +{ + if (np->flags & NIU_FLAGS_XMAC) + return XMAC_NUM_ALT_ADDR; + else + return BMAC_NUM_ALT_ADDR; +} + +static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr) +{ + u16 reg0 = addr[4] << 8 | addr[5]; + u16 reg1 = addr[2] << 8 | addr[3]; + u16 reg2 = addr[0] << 8 | addr[1]; + + if (index >= niu_num_alt_addr(np)) + return -EINVAL; + + if (np->flags & NIU_FLAGS_XMAC) { + nw64_mac(XMAC_ALT_ADDR0(index), reg0); + nw64_mac(XMAC_ALT_ADDR1(index), reg1); + nw64_mac(XMAC_ALT_ADDR2(index), reg2); + } else { + nw64_mac(BMAC_ALT_ADDR0(index), reg0); + nw64_mac(BMAC_ALT_ADDR1(index), reg1); + nw64_mac(BMAC_ALT_ADDR2(index), reg2); + } + + return 0; +} + +static int niu_enable_alt_mac(struct niu *np, int index, int on) +{ + unsigned long reg; + u64 val, mask; + + if (index >= niu_num_alt_addr(np)) + return -EINVAL; + + if (np->flags & NIU_FLAGS_XMAC) + reg = XMAC_ADDR_CMPEN; + else + reg = BMAC_ADDR_CMPEN; + + mask = 1 << index; + + val = nr64_mac(reg); + if (on) + val |= mask; + else + val &= ~mask; + nw64_mac(reg, val); + + return 0; +} + +static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg, + int num, int mac_pref) +{ + u64 val = nr64_mac(reg); + val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR); + val |= num; + if (mac_pref) + val |= HOST_INFO_MPR; + nw64_mac(reg, val); +} + +static int __set_rdc_table_num(struct niu *np, + int xmac_index, int bmac_index, + int rdc_table_num, int mac_pref) +{ + unsigned long reg; + + if (rdc_table_num & ~HOST_INFO_MACRDCTBLN) + return -EINVAL; + if (np->flags & NIU_FLAGS_XMAC) + reg = XMAC_HOST_INFO(xmac_index); + else + reg = BMAC_HOST_INFO(bmac_index); + __set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref); + return 0; +} + +static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num, + int mac_pref) +{ + return __set_rdc_table_num(np, 17, 0, table_num, mac_pref); +} + +static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num, + int mac_pref) +{ + return __set_rdc_table_num(np, 16, 8, table_num, mac_pref); +} + +static int niu_set_alt_mac_rdc_table(struct niu *np, int idx, + int table_num, int mac_pref) +{ + if (idx >= niu_num_alt_addr(np)) + return -EINVAL; + return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref); +} + +static u64 vlan_entry_set_parity(u64 reg_val) +{ + u64 port01_mask; + u64 port23_mask; + + port01_mask = 0x00ff; + port23_mask = 0xff00; + + if (hweight64(reg_val & port01_mask) & 1) + reg_val |= ENET_VLAN_TBL_PARITY0; + else + reg_val &= ~ENET_VLAN_TBL_PARITY0; + + if (hweight64(reg_val & port23_mask) & 1) + reg_val |= ENET_VLAN_TBL_PARITY1; + else + reg_val &= ~ENET_VLAN_TBL_PARITY1; + + return reg_val; +} + +static void vlan_tbl_write(struct niu *np, unsigned long index, + int port, int vpr, int rdc_table) +{ + u64 reg_val = nr64(ENET_VLAN_TBL(index)); + + reg_val &= ~((ENET_VLAN_TBL_VPR | + ENET_VLAN_TBL_VLANRDCTBLN) << + ENET_VLAN_TBL_SHIFT(port)); + if (vpr) + reg_val |= (ENET_VLAN_TBL_VPR << + ENET_VLAN_TBL_SHIFT(port)); + reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port)); + + reg_val = vlan_entry_set_parity(reg_val); + + nw64(ENET_VLAN_TBL(index), reg_val); +} + +static void vlan_tbl_clear(struct niu *np) +{ + int i; + + for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) + nw64(ENET_VLAN_TBL(i), 0); +} + +static int tcam_wait_bit(struct niu *np, u64 bit) +{ + int limit = 1000; + + while (--limit > 0) { + if (nr64(TCAM_CTL) & bit) + break; + udelay(1); + } + if (limit < 0) + return -ENODEV; + + return 0; +} + +static int tcam_flush(struct niu *np, int index) +{ + nw64(TCAM_KEY_0, 0x00); + nw64(TCAM_KEY_MASK_0, 0xff); + nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); + + return tcam_wait_bit(np, TCAM_CTL_STAT); +} + +#if 0 +static int tcam_read(struct niu *np, int index, + u64 *key, u64 *mask) +{ + int err; + + nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index)); + err = tcam_wait_bit(np, TCAM_CTL_STAT); + if (!err) { + key[0] = nr64(TCAM_KEY_0); + key[1] = nr64(TCAM_KEY_1); + key[2] = nr64(TCAM_KEY_2); + key[3] = nr64(TCAM_KEY_3); + mask[0] = nr64(TCAM_KEY_MASK_0); + mask[1] = nr64(TCAM_KEY_MASK_1); + mask[2] = nr64(TCAM_KEY_MASK_2); + mask[3] = nr64(TCAM_KEY_MASK_3); + } + return err; +} +#endif + +static int tcam_write(struct niu *np, int index, + u64 *key, u64 *mask) +{ + nw64(TCAM_KEY_0, key[0]); + nw64(TCAM_KEY_1, key[1]); + nw64(TCAM_KEY_2, key[2]); + nw64(TCAM_KEY_3, key[3]); + nw64(TCAM_KEY_MASK_0, mask[0]); + nw64(TCAM_KEY_MASK_1, mask[1]); + nw64(TCAM_KEY_MASK_2, mask[2]); + nw64(TCAM_KEY_MASK_3, mask[3]); + nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); + + return tcam_wait_bit(np, TCAM_CTL_STAT); +} + +#if 0 +static int tcam_assoc_read(struct niu *np, int index, u64 *data) +{ + int err; + + nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index)); + err = tcam_wait_bit(np, TCAM_CTL_STAT); + if (!err) + *data = nr64(TCAM_KEY_1); + + return err; +} +#endif + +static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data) +{ + nw64(TCAM_KEY_1, assoc_data); + nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index)); + + return tcam_wait_bit(np, TCAM_CTL_STAT); +} + +static void tcam_enable(struct niu *np, int on) +{ + u64 val = nr64(FFLP_CFG_1); + + if (on) + val &= ~FFLP_CFG_1_TCAM_DIS; + else + val |= FFLP_CFG_1_TCAM_DIS; + nw64(FFLP_CFG_1, val); +} + +static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio) +{ + u64 val = nr64(FFLP_CFG_1); + + val &= ~(FFLP_CFG_1_FFLPINITDONE | + FFLP_CFG_1_CAMLAT | + FFLP_CFG_1_CAMRATIO); + val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT); + val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT); + nw64(FFLP_CFG_1, val); + + val = nr64(FFLP_CFG_1); + val |= FFLP_CFG_1_FFLPINITDONE; + nw64(FFLP_CFG_1, val); +} + +static int tcam_user_eth_class_enable(struct niu *np, unsigned long class, + int on) +{ + unsigned long reg; + u64 val; + + if (class < CLASS_CODE_ETHERTYPE1 || + class > CLASS_CODE_ETHERTYPE2) + return -EINVAL; + + reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); + val = nr64(reg); + if (on) + val |= L2_CLS_VLD; + else + val &= ~L2_CLS_VLD; + nw64(reg, val); + + return 0; +} + +#if 0 +static int tcam_user_eth_class_set(struct niu *np, unsigned long class, + u64 ether_type) +{ + unsigned long reg; + u64 val; + + if (class < CLASS_CODE_ETHERTYPE1 || + class > CLASS_CODE_ETHERTYPE2 || + (ether_type & ~(u64)0xffff) != 0) + return -EINVAL; + + reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); + val = nr64(reg); + val &= ~L2_CLS_ETYPE; + val |= (ether_type << L2_CLS_ETYPE_SHIFT); + nw64(reg, val); + + return 0; +} +#endif + +static int tcam_user_ip_class_enable(struct niu *np, unsigned long class, + int on) +{ + unsigned long reg; + u64 val; + + if (class < CLASS_CODE_USER_PROG1 || + class > CLASS_CODE_USER_PROG4) + return -EINVAL; + + reg = L3_CLS(class - CLASS_CODE_USER_PROG1); + val = nr64(reg); + if (on) + val |= L3_CLS_VALID; + else + val &= ~L3_CLS_VALID; + nw64(reg, val); + + return 0; +} + +#if 0 +static int tcam_user_ip_class_set(struct niu *np, unsigned long class, + int ipv6, u64 protocol_id, + u64 tos_mask, u64 tos_val) +{ + unsigned long reg; + u64 val; + + if (class < CLASS_CODE_USER_PROG1 || + class > CLASS_CODE_USER_PROG4 || + (protocol_id & ~(u64)0xff) != 0 || + (tos_mask & ~(u64)0xff) != 0 || + (tos_val & ~(u64)0xff) != 0) + return -EINVAL; + + reg = L3_CLS(class - CLASS_CODE_USER_PROG1); + val = nr64(reg); + val &= ~(L3_CLS_IPVER | L3_CLS_PID | + L3_CLS_TOSMASK | L3_CLS_TOS); + if (ipv6) + val |= L3_CLS_IPVER; + val |= (protocol_id << L3_CLS_PID_SHIFT); + val |= (tos_mask << L3_CLS_TOSMASK_SHIFT); + val |= (tos_val << L3_CLS_TOS_SHIFT); + nw64(reg, val); + + return 0; +} +#endif + +static int tcam_early_init(struct niu *np) +{ + unsigned long i; + int err; + + tcam_enable(np, 0); + tcam_set_lat_and_ratio(np, + DEFAULT_TCAM_LATENCY, + DEFAULT_TCAM_ACCESS_RATIO); + for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) { + err = tcam_user_eth_class_enable(np, i, 0); + if (err) + return err; + } + for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) { + err = tcam_user_ip_class_enable(np, i, 0); + if (err) + return err; + } + + return 0; +} + +static int tcam_flush_all(struct niu *np) +{ + unsigned long i; + + for (i = 0; i < np->parent->tcam_num_entries; i++) { + int err = tcam_flush(np, i); + if (err) + return err; + } + return 0; +} + +static u64 hash_addr_regval(unsigned long index, unsigned long num_entries) +{ + return ((u64)index | (num_entries == 1 ? + HASH_TBL_ADDR_AUTOINC : 0)); +} + +#if 0 +static int hash_read(struct niu *np, unsigned long partition, + unsigned long index, unsigned long num_entries, + u64 *data) +{ + u64 val = hash_addr_regval(index, num_entries); + unsigned long i; + + if (partition >= FCRAM_NUM_PARTITIONS || + index + num_entries > FCRAM_SIZE) + return -EINVAL; + + nw64(HASH_TBL_ADDR(partition), val); + for (i = 0; i < num_entries; i++) + data[i] = nr64(HASH_TBL_DATA(partition)); + + return 0; +} +#endif + +static int hash_write(struct niu *np, unsigned long partition, + unsigned long index, unsigned long num_entries, + u64 *data) +{ + u64 val = hash_addr_regval(index, num_entries); + unsigned long i; + + if (partition >= FCRAM_NUM_PARTITIONS || + index + (num_entries * 8) > FCRAM_SIZE) + return -EINVAL; + + nw64(HASH_TBL_ADDR(partition), val); + for (i = 0; i < num_entries; i++) + nw64(HASH_TBL_DATA(partition), data[i]); + + return 0; +} + +static void fflp_reset(struct niu *np) +{ + u64 val; + + nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST); + udelay(10); + nw64(FFLP_CFG_1, 0); + + val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE; + nw64(FFLP_CFG_1, val); +} + +static void fflp_set_timings(struct niu *np) +{ + u64 val = nr64(FFLP_CFG_1); + + val &= ~FFLP_CFG_1_FFLPINITDONE; + val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT); + nw64(FFLP_CFG_1, val); + + val = nr64(FFLP_CFG_1); + val |= FFLP_CFG_1_FFLPINITDONE; + nw64(FFLP_CFG_1, val); + + val = nr64(FCRAM_REF_TMR); + val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN); + val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT); + val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT); + nw64(FCRAM_REF_TMR, val); +} + +static int fflp_set_partition(struct niu *np, u64 partition, + u64 mask, u64 base, int enable) +{ + unsigned long reg; + u64 val; + + if (partition >= FCRAM_NUM_PARTITIONS || + (mask & ~(u64)0x1f) != 0 || + (base & ~(u64)0x1f) != 0) + return -EINVAL; + + reg = FLW_PRT_SEL(partition); + + val = nr64(reg); + val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE); + val |= (mask << FLW_PRT_SEL_MASK_SHIFT); + val |= (base << FLW_PRT_SEL_BASE_SHIFT); + if (enable) + val |= FLW_PRT_SEL_EXT; + nw64(reg, val); + + return 0; +} + +static int fflp_disable_all_partitions(struct niu *np) +{ + unsigned long i; + + for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) { + int err = fflp_set_partition(np, 0, 0, 0, 0); + if (err) + return err; + } + return 0; +} + +static void fflp_llcsnap_enable(struct niu *np, int on) +{ + u64 val = nr64(FFLP_CFG_1); + + if (on) + val |= FFLP_CFG_1_LLCSNAP; + else + val &= ~FFLP_CFG_1_LLCSNAP; + nw64(FFLP_CFG_1, val); +} + +static void fflp_errors_enable(struct niu *np, int on) +{ + u64 val = nr64(FFLP_CFG_1); + + if (on) + val &= ~FFLP_CFG_1_ERRORDIS; + else + val |= FFLP_CFG_1_ERRORDIS; + nw64(FFLP_CFG_1, val); +} + +static int fflp_hash_clear(struct niu *np) +{ + struct fcram_hash_ipv4 ent; + unsigned long i; + + /* IPV4 hash entry with valid bit clear, rest is don't care. */ + memset(&ent, 0, sizeof(ent)); + ent.header = HASH_HEADER_EXT; + + for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) { + int err = hash_write(np, 0, i, 1, (u64 *) &ent); + if (err) + return err; + } + return 0; +} + +static int fflp_early_init(struct niu *np) +{ + struct niu_parent *parent; + unsigned long flags; + int err; + + niu_lock_parent(np, flags); + + parent = np->parent; + err = 0; + if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) { + niudbg(PROBE, "fflp_early_init: Initting hw on port %u\n", + np->port); + if (np->parent->plat_type != PLAT_TYPE_NIU) { + fflp_reset(np); + fflp_set_timings(np); + err = fflp_disable_all_partitions(np); + if (err) { + niudbg(PROBE, "fflp_disable_all_partitions " + "failed, err=%d\n", err); + goto out; + } + } + + err = tcam_early_init(np); + if (err) { + niudbg(PROBE, "tcam_early_init failed, err=%d\n", + err); + goto out; + } + fflp_llcsnap_enable(np, 1); + fflp_errors_enable(np, 0); + nw64(H1POLY, 0); + nw64(H2POLY, 0); + + err = tcam_flush_all(np); + if (err) { + niudbg(PROBE, "tcam_flush_all failed, err=%d\n", + err); + goto out; + } + if (np->parent->plat_type != PLAT_TYPE_NIU) { + err = fflp_hash_clear(np); + if (err) { + niudbg(PROBE, "fflp_hash_clear failed, " + "err=%d\n", err); + goto out; + } + } + + vlan_tbl_clear(np); + + niudbg(PROBE, "fflp_early_init: Success\n"); + parent->flags |= PARENT_FLGS_CLS_HWINIT; + } +out: + niu_unlock_parent(np, flags); + return err; +} + +static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key) +{ + if (class_code < CLASS_CODE_USER_PROG1 || + class_code > CLASS_CODE_SCTP_IPV6) + return -EINVAL; + + nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key); + return 0; +} + +static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key) +{ + if (class_code < CLASS_CODE_USER_PROG1 || + class_code > CLASS_CODE_SCTP_IPV6) + return -EINVAL; + + nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key); + return 0; +} + +static void niu_rx_skb_append(struct sk_buff *skb, struct page *page, + u32 offset, u32 size) +{ + int i = skb_shinfo(skb)->nr_frags; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + frag->page = page; + frag->page_offset = offset; + frag->size = size; + + skb->len += size; + skb->data_len += size; + skb->truesize += size; + + skb_shinfo(skb)->nr_frags = i + 1; +} + +static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a) +{ + a >>= PAGE_SHIFT; + a ^= (a >> ilog2(MAX_RBR_RING_SIZE)); + + return (a & (MAX_RBR_RING_SIZE - 1)); +} + +static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr, + struct page ***link) +{ + unsigned int h = niu_hash_rxaddr(rp, addr); + struct page *p, **pp; + + addr &= PAGE_MASK; + pp = &rp->rxhash[h]; + for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) { + if (p->index == addr) { + *link = pp; + break; + } + } + + return p; +} + +static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base) +{ + unsigned int h = niu_hash_rxaddr(rp, base); + + page->index = base; + page->mapping = (struct address_space *) rp->rxhash[h]; + rp->rxhash[h] = page; +} + +static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp, + gfp_t mask, int start_index) +{ + struct page *page; + u64 addr; + int i; + + page = alloc_page(mask); + if (!page) + return -ENOMEM; + + addr = np->ops->map_page(np->device, page, 0, + PAGE_SIZE, DMA_FROM_DEVICE); + + niu_hash_page(rp, page, addr); + if (rp->rbr_blocks_per_page > 1) + atomic_add(rp->rbr_blocks_per_page - 1, + &compound_head(page)->_count); + + for (i = 0; i < rp->rbr_blocks_per_page; i++) { + __le32 *rbr = &rp->rbr[start_index + i]; + + *rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT); + addr += rp->rbr_block_size; + } + + return 0; +} + +static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) +{ + int index = rp->rbr_index; + + rp->rbr_pending++; + if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) { + int err = niu_rbr_add_page(np, rp, mask, index); + + if (unlikely(err)) { + rp->rbr_pending--; + return; + } + + rp->rbr_index += rp->rbr_blocks_per_page; + BUG_ON(rp->rbr_index > rp->rbr_table_size); + if (rp->rbr_index == rp->rbr_table_size) + rp->rbr_index = 0; + + if (rp->rbr_pending >= rp->rbr_kick_thresh) { + nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending); + rp->rbr_pending = 0; + } + } +} + +static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp) +{ + unsigned int index = rp->rcr_index; + int num_rcr = 0; + + rp->rx_dropped++; + while (1) { + struct page *page, **link; + u64 addr, val; + u32 rcr_size; + + num_rcr++; + + val = le64_to_cpup(&rp->rcr[index]); + addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << + RCR_ENTRY_PKT_BUF_ADDR_SHIFT; + page = niu_find_rxpage(rp, addr, &link); + + rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> + RCR_ENTRY_PKTBUFSZ_SHIFT]; + if ((page->index + PAGE_SIZE) - rcr_size == addr) { + *link = (struct page *) page->mapping; + np->ops->unmap_page(np->device, page->index, + PAGE_SIZE, DMA_FROM_DEVICE); + page->index = 0; + page->mapping = NULL; + __free_page(page); + rp->rbr_refill_pending++; + } + + index = NEXT_RCR(rp, index); + if (!(val & RCR_ENTRY_MULTI)) + break; + + } + rp->rcr_index = index; + + return num_rcr; +} + +static int niu_process_rx_pkt(struct niu *np, struct rx_ring_info *rp) +{ + unsigned int index = rp->rcr_index; + struct sk_buff *skb; + int len, num_rcr; + + skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE); + if (unlikely(!skb)) + return niu_rx_pkt_ignore(np, rp); + + num_rcr = 0; + while (1) { + struct page *page, **link; + u32 rcr_size, append_size; + u64 addr, val, off; + + num_rcr++; + + val = le64_to_cpup(&rp->rcr[index]); + + len = (val & RCR_ENTRY_L2_LEN) >> + RCR_ENTRY_L2_LEN_SHIFT; + len -= ETH_FCS_LEN; + + addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << + RCR_ENTRY_PKT_BUF_ADDR_SHIFT; + page = niu_find_rxpage(rp, addr, &link); + + rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> + RCR_ENTRY_PKTBUFSZ_SHIFT]; + + off = addr & ~PAGE_MASK; + append_size = rcr_size; + if (num_rcr == 1) { + int ptype; + + off += 2; + append_size -= 2; + + ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT); + if ((ptype == RCR_PKT_TYPE_TCP || + ptype == RCR_PKT_TYPE_UDP) && + !(val & (RCR_ENTRY_NOPORT | + RCR_ENTRY_ERROR))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; + } + if (!(val & RCR_ENTRY_MULTI)) + append_size = len - skb->len; + + niu_rx_skb_append(skb, page, off, append_size); + if ((page->index + rp->rbr_block_size) - rcr_size == addr) { + *link = (struct page *) page->mapping; + np->ops->unmap_page(np->device, page->index, + PAGE_SIZE, DMA_FROM_DEVICE); + page->index = 0; + page->mapping = NULL; + rp->rbr_refill_pending++; + } else + get_page(page); + + index = NEXT_RCR(rp, index); + if (!(val & RCR_ENTRY_MULTI)) + break; + + } + rp->rcr_index = index; + + skb_reserve(skb, NET_IP_ALIGN); + __pskb_pull_tail(skb, min(len, NIU_RXPULL_MAX)); + + rp->rx_packets++; + rp->rx_bytes += skb->len; + + skb->protocol = eth_type_trans(skb, np->dev); + netif_receive_skb(skb); + + return num_rcr; +} + +static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) +{ + int blocks_per_page = rp->rbr_blocks_per_page; + int err, index = rp->rbr_index; + + err = 0; + while (index < (rp->rbr_table_size - blocks_per_page)) { + err = niu_rbr_add_page(np, rp, mask, index); + if (err) + break; + + index += blocks_per_page; + } + + rp->rbr_index = index; + return err; +} + +static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp) +{ + int i; + + for (i = 0; i < MAX_RBR_RING_SIZE; i++) { + struct page *page; + + page = rp->rxhash[i]; + while (page) { + struct page *next = (struct page *) page->mapping; + u64 base = page->index; + + np->ops->unmap_page(np->device, base, PAGE_SIZE, + DMA_FROM_DEVICE); + page->index = 0; + page->mapping = NULL; + + __free_page(page); + + page = next; + } + } + + for (i = 0; i < rp->rbr_table_size; i++) + rp->rbr[i] = cpu_to_le32(0); + rp->rbr_index = 0; +} + +static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx) +{ + struct tx_buff_info *tb = &rp->tx_buffs[idx]; + struct sk_buff *skb = tb->skb; + struct tx_pkt_hdr *tp; + u64 tx_flags; + int i, len; + + tp = (struct tx_pkt_hdr *) skb->data; + tx_flags = le64_to_cpup(&tp->flags); + + rp->tx_packets++; + rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) - + ((tx_flags & TXHDR_PAD) / 2)); + + len = skb_headlen(skb); + np->ops->unmap_single(np->device, tb->mapping, + len, DMA_TO_DEVICE); + + if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK) + rp->mark_pending--; + + tb->skb = NULL; + do { + idx = NEXT_TX(rp, idx); + len -= MAX_TX_DESC_LEN; + } while (len > 0); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + tb = &rp->tx_buffs[idx]; + BUG_ON(tb->skb != NULL); + np->ops->unmap_page(np->device, tb->mapping, + skb_shinfo(skb)->frags[i].size, + DMA_TO_DEVICE); + idx = NEXT_TX(rp, idx); + } + + dev_kfree_skb(skb); + + return idx; +} + +#define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4) + +static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) +{ + u16 pkt_cnt, tmp; + int cons; + u64 cs; + + cs = rp->tx_cs; + if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK)))) + goto out; + + tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT; + pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) & + (TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT); + + rp->last_pkt_cnt = tmp; + + cons = rp->cons; + + niudbg(TX_DONE, "%s: niu_tx_work() pkt_cnt[%u] cons[%d]\n", + np->dev->name, pkt_cnt, cons); + + while (pkt_cnt--) + cons = release_tx_packet(np, rp, cons); + + rp->cons = cons; + smp_mb(); + +out: + if (unlikely(netif_queue_stopped(np->dev) && + (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { + netif_tx_lock(np->dev); + if (netif_queue_stopped(np->dev) && + (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))) + netif_wake_queue(np->dev); + netif_tx_unlock(np->dev); + } +} + +static int niu_rx_work(struct niu *np, struct rx_ring_info *rp, int budget) +{ + int qlen, rcr_done = 0, work_done = 0; + struct rxdma_mailbox *mbox = rp->mbox; + u64 stat; + +#if 1 + stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); + qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN; +#else + stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); + qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN); +#endif + mbox->rx_dma_ctl_stat = 0; + mbox->rcrstat_a = 0; + + niudbg(RX_STATUS, "%s: niu_rx_work(chan[%d]), stat[%llx] qlen=%d\n", + np->dev->name, rp->rx_channel, (unsigned long long) stat, qlen); + + rcr_done = work_done = 0; + qlen = min(qlen, budget); + while (work_done < qlen) { + rcr_done += niu_process_rx_pkt(np, rp); + work_done++; + } + + if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) { + unsigned int i; + + for (i = 0; i < rp->rbr_refill_pending; i++) + niu_rbr_refill(np, rp, GFP_ATOMIC); + rp->rbr_refill_pending = 0; + } + + stat = (RX_DMA_CTL_STAT_MEX | + ((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) | + ((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT)); + + nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat); + + return work_done; +} + +static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget) +{ + u64 v0 = lp->v0; + u32 tx_vec = (v0 >> 32); + u32 rx_vec = (v0 & 0xffffffff); + int i, work_done = 0; + + niudbg(INTR, "%s: niu_poll_core() v0[%016llx]\n", + np->dev->name, (unsigned long long) v0); + + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + if (tx_vec & (1 << rp->tx_channel)) + niu_tx_work(np, rp); + nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0); + } + + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + if (rx_vec & (1 << rp->rx_channel)) { + int this_work_done; + + this_work_done = niu_rx_work(np, rp, + budget); + + budget -= this_work_done; + work_done += this_work_done; + } + nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0); + } + + return work_done; +} + +static int niu_poll(struct napi_struct *napi, int budget) +{ + struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi); + struct niu *np = lp->np; + int work_done; + + work_done = niu_poll_core(np, lp, budget); + + if (work_done < budget) { + netif_rx_complete(np->dev, napi); + niu_ldg_rearm(np, lp, 1); + } + return work_done; +} + +static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp, + u64 stat) +{ + dev_err(np->device, PFX "%s: RX channel %u errors ( ", + np->dev->name, rp->rx_channel); + + if (stat & RX_DMA_CTL_STAT_RBR_TMOUT) + printk("RBR_TMOUT "); + if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR) + printk("RSP_CNT "); + if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS) + printk("BYTE_EN_BUS "); + if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR) + printk("RSP_DAT "); + if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR) + printk("RCR_ACK "); + if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR) + printk("RCR_SHA_PAR "); + if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR) + printk("RBR_PRE_PAR "); + if (stat & RX_DMA_CTL_STAT_CONFIG_ERR) + printk("CONFIG "); + if (stat & RX_DMA_CTL_STAT_RCRINCON) + printk("RCRINCON "); + if (stat & RX_DMA_CTL_STAT_RCRFULL) + printk("RCRFULL "); + if (stat & RX_DMA_CTL_STAT_RBRFULL) + printk("RBRFULL "); + if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE) + printk("RBRLOGPAGE "); + if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE) + printk("CFIGLOGPAGE "); + if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR) + printk("DC_FIDO "); + + printk(")\n"); +} + +static int niu_rx_error(struct niu *np, struct rx_ring_info *rp) +{ + u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); + int err = 0; + + dev_err(np->device, PFX "%s: RX channel %u error, stat[%llx]\n", + np->dev->name, rp->rx_channel, (unsigned long long) stat); + + niu_log_rxchan_errors(np, rp, stat); + + if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL | + RX_DMA_CTL_STAT_PORT_FATAL)) + err = -EINVAL; + + nw64(RX_DMA_CTL_STAT(rp->rx_channel), + stat & RX_DMA_CTL_WRITE_CLEAR_ERRS); + + return err; +} + +static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp, + u64 cs) +{ + dev_err(np->device, PFX "%s: TX channel %u errors ( ", + np->dev->name, rp->tx_channel); + + if (cs & TX_CS_MBOX_ERR) + printk("MBOX "); + if (cs & TX_CS_PKT_SIZE_ERR) + printk("PKT_SIZE "); + if (cs & TX_CS_TX_RING_OFLOW) + printk("TX_RING_OFLOW "); + if (cs & TX_CS_PREF_BUF_PAR_ERR) + printk("PREF_BUF_PAR "); + if (cs & TX_CS_NACK_PREF) + printk("NACK_PREF "); + if (cs & TX_CS_NACK_PKT_RD) + printk("NACK_PKT_RD "); + if (cs & TX_CS_CONF_PART_ERR) + printk("CONF_PART "); + if (cs & TX_CS_PKT_PRT_ERR) + printk("PKT_PTR "); + + printk(")\n"); +} + +static int niu_tx_error(struct niu *np, struct tx_ring_info *rp) +{ + u64 cs, logh, logl; + + cs = nr64(TX_CS(rp->tx_channel)); + logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel)); + logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel)); + + dev_err(np->device, PFX "%s: TX channel %u error, " + "cs[%llx] logh[%llx] logl[%llx]\n", + np->dev->name, rp->tx_channel, + (unsigned long long) cs, + (unsigned long long) logh, + (unsigned long long) logl); + + niu_log_txchan_errors(np, rp, cs); + + return -ENODEV; +} + +static int niu_mif_interrupt(struct niu *np) +{ + u64 mif_status = nr64(MIF_STATUS); + int phy_mdint = 0; + + if (np->flags & NIU_FLAGS_XMAC) { + u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS); + + if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT) + phy_mdint = 1; + } + + dev_err(np->device, PFX "%s: MIF interrupt, " + "stat[%llx] phy_mdint(%d)\n", + np->dev->name, (unsigned long long) mif_status, phy_mdint); + + return -ENODEV; +} + +static void niu_xmac_interrupt(struct niu *np) +{ + struct niu_xmac_stats *mp = &np->mac_stats.xmac; + u64 val; + + val = nr64_mac(XTXMAC_STATUS); + if (val & XTXMAC_STATUS_FRAME_CNT_EXP) + mp->tx_frames += TXMAC_FRM_CNT_COUNT; + if (val & XTXMAC_STATUS_BYTE_CNT_EXP) + mp->tx_bytes += TXMAC_BYTE_CNT_COUNT; + if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR) + mp->tx_fifo_errors++; + if (val & XTXMAC_STATUS_TXMAC_OFLOW) + mp->tx_overflow_errors++; + if (val & XTXMAC_STATUS_MAX_PSIZE_ERR) + mp->tx_max_pkt_size_errors++; + if (val & XTXMAC_STATUS_TXMAC_UFLOW) + mp->tx_underflow_errors++; + + val = nr64_mac(XRXMAC_STATUS); + if (val & XRXMAC_STATUS_LCL_FLT_STATUS) + mp->rx_local_faults++; + if (val & XRXMAC_STATUS_RFLT_DET) + mp->rx_remote_faults++; + if (val & XRXMAC_STATUS_LFLT_CNT_EXP) + mp->rx_link_faults += LINK_FAULT_CNT_COUNT; + if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP) + mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT; + if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP) + mp->rx_frags += RXMAC_FRAG_CNT_COUNT; + if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP) + mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT; + if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) + mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; + if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) + mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; + if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP) + mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT; + if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP) + mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT; + if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP) + mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT; + if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP) + mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT; + if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP) + mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT; + if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP) + mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT; + if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP) + mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT; + if (val & XRXMAC_STAT_MSK_RXOCTET_CNT_EXP) + mp->rx_octets += RXMAC_BT_CNT_COUNT; + if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP) + mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT; + if (val & XRXMAC_STATUS_LENERR_CNT_EXP) + mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT; + if (val & XRXMAC_STATUS_CRCERR_CNT_EXP) + mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT; + if (val & XRXMAC_STATUS_RXUFLOW) + mp->rx_underflows++; + if (val & XRXMAC_STATUS_RXOFLOW) + mp->rx_overflows++; + + val = nr64_mac(XMAC_FC_STAT); + if (val & XMAC_FC_STAT_TX_MAC_NPAUSE) + mp->pause_off_state++; + if (val & XMAC_FC_STAT_TX_MAC_PAUSE) + mp->pause_on_state++; + if (val & XMAC_FC_STAT_RX_MAC_RPAUSE) + mp->pause_received++; +} + +static void niu_bmac_interrupt(struct niu *np) +{ + struct niu_bmac_stats *mp = &np->mac_stats.bmac; + u64 val; + + val = nr64_mac(BTXMAC_STATUS); + if (val & BTXMAC_STATUS_UNDERRUN) + mp->tx_underflow_errors++; + if (val & BTXMAC_STATUS_MAX_PKT_ERR) + mp->tx_max_pkt_size_errors++; + if (val & BTXMAC_STATUS_BYTE_CNT_EXP) + mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT; + if (val & BTXMAC_STATUS_FRAME_CNT_EXP) + mp->tx_frames += BTXMAC_FRM_CNT_COUNT; + + val = nr64_mac(BRXMAC_STATUS); + if (val & BRXMAC_STATUS_OVERFLOW) + mp->rx_overflows++; + if (val & BRXMAC_STATUS_FRAME_CNT_EXP) + mp->rx_frames += BRXMAC_FRAME_CNT_COUNT; + if (val & BRXMAC_STATUS_ALIGN_ERR_EXP) + mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; + if (val & BRXMAC_STATUS_CRC_ERR_EXP) + mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; + if (val & BRXMAC_STATUS_LEN_ERR_EXP) + mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT; + + val = nr64_mac(BMAC_CTRL_STATUS); + if (val & BMAC_CTRL_STATUS_NOPAUSE) + mp->pause_off_state++; + if (val & BMAC_CTRL_STATUS_PAUSE) + mp->pause_on_state++; + if (val & BMAC_CTRL_STATUS_PAUSE_RECV) + mp->pause_received++; +} + +static int niu_mac_interrupt(struct niu *np) +{ + if (np->flags & NIU_FLAGS_XMAC) + niu_xmac_interrupt(np); + else + niu_bmac_interrupt(np); + + return 0; +} + +static void niu_log_device_error(struct niu *np, u64 stat) +{ + dev_err(np->device, PFX "%s: Core device errors ( ", + np->dev->name); + + if (stat & SYS_ERR_MASK_META2) + printk("META2 "); + if (stat & SYS_ERR_MASK_META1) + printk("META1 "); + if (stat & SYS_ERR_MASK_PEU) + printk("PEU "); + if (stat & SYS_ERR_MASK_TXC) + printk("TXC "); + if (stat & SYS_ERR_MASK_RDMC) + printk("RDMC "); + if (stat & SYS_ERR_MASK_TDMC) + printk("TDMC "); + if (stat & SYS_ERR_MASK_ZCP) + printk("ZCP "); + if (stat & SYS_ERR_MASK_FFLP) + printk("FFLP "); + if (stat & SYS_ERR_MASK_IPP) + printk("IPP "); + if (stat & SYS_ERR_MASK_MAC) + printk("MAC "); + if (stat & SYS_ERR_MASK_SMX) + printk("SMX "); + + printk(")\n"); +} + +static int niu_device_error(struct niu *np) +{ + u64 stat = nr64(SYS_ERR_STAT); + + dev_err(np->device, PFX "%s: Core device error, stat[%llx]\n", + np->dev->name, (unsigned long long) stat); + + niu_log_device_error(np, stat); + + return -ENODEV; +} + +static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp) +{ + u64 v0 = lp->v0; + u64 v1 = lp->v1; + u64 v2 = lp->v2; + int i, err = 0; + + if (v1 & 0x00000000ffffffffULL) { + u32 rx_vec = (v1 & 0xffffffff); + + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + if (rx_vec & (1 << rp->rx_channel)) { + int r = niu_rx_error(np, rp); + if (r) + err = r; + } + } + } + if (v1 & 0x7fffffff00000000ULL) { + u32 tx_vec = (v1 >> 32) & 0x7fffffff; + + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + + if (tx_vec & (1 << rp->tx_channel)) { + int r = niu_tx_error(np, rp); + if (r) + err = r; + } + } + } + if ((v0 | v1) & 0x8000000000000000ULL) { + int r = niu_mif_interrupt(np); + if (r) + err = r; + } + if (v2) { + if (v2 & 0x01ef) { + int r = niu_mac_interrupt(np); + if (r) + err = r; + } + if (v2 & 0x0210) { + int r = niu_device_error(np); + if (r) + err = r; + } + } + + if (err) + niu_enable_interrupts(np, 0); + + return -EINVAL; +} + +static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp, + int ldn) +{ + struct rxdma_mailbox *mbox = rp->mbox; + u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); + + stat_write = (RX_DMA_CTL_STAT_RCRTHRES | + RX_DMA_CTL_STAT_RCRTO); + nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write); + + niudbg(INTR, "%s: rxchan_intr stat[%llx]\n", + np->dev->name, (unsigned long long) stat); +} + +static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp, + int ldn) +{ + rp->tx_cs = nr64(TX_CS(rp->tx_channel)); + + niudbg(INTR, "%s: txchan_intr cs[%llx]\n", + np->dev->name, (unsigned long long) rp->tx_cs); +} + +static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0) +{ + struct niu_parent *parent = np->parent; + u32 rx_vec, tx_vec; + int i; + + tx_vec = (v0 >> 32); + rx_vec = (v0 & 0xffffffff); + + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + int ldn = LDN_RXDMA(rp->rx_channel); + + if (parent->ldg_map[ldn] != ldg) + continue; + + nw64(LD_IM0(ldn), LD_IM0_MASK); + if (rx_vec & (1 << rp->rx_channel)) + niu_rxchan_intr(np, rp, ldn); + } + + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + int ldn = LDN_TXDMA(rp->tx_channel); + + if (parent->ldg_map[ldn] != ldg) + continue; + + nw64(LD_IM0(ldn), LD_IM0_MASK); + if (tx_vec & (1 << rp->tx_channel)) + niu_txchan_intr(np, rp, ldn); + } +} + +static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp, + u64 v0, u64 v1, u64 v2) +{ + if (likely(netif_rx_schedule_prep(np->dev, &lp->napi))) { + lp->v0 = v0; + lp->v1 = v1; + lp->v2 = v2; + __niu_fastpath_interrupt(np, lp->ldg_num, v0); + __netif_rx_schedule(np->dev, &lp->napi); + } +} + +static irqreturn_t niu_interrupt(int irq, void *dev_id) +{ + struct niu_ldg *lp = dev_id; + struct niu *np = lp->np; + int ldg = lp->ldg_num; + unsigned long flags; + u64 v0, v1, v2; + + if (netif_msg_intr(np)) + printk(KERN_DEBUG PFX "niu_interrupt() ldg[%p](%d) ", + lp, ldg); + + spin_lock_irqsave(&np->lock, flags); + + v0 = nr64(LDSV0(ldg)); + v1 = nr64(LDSV1(ldg)); + v2 = nr64(LDSV2(ldg)); + + if (netif_msg_intr(np)) + printk("v0[%llx] v1[%llx] v2[%llx]\n", + (unsigned long long) v0, + (unsigned long long) v1, + (unsigned long long) v2); + + if (unlikely(!v0 && !v1 && !v2)) { + spin_unlock_irqrestore(&np->lock, flags); + return IRQ_NONE; + } + + if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) { + int err = niu_slowpath_interrupt(np, lp); + if (err) + goto out; + } + if (likely(v0 & ~((u64)1 << LDN_MIF))) + niu_schedule_napi(np, lp, v0, v1, v2); + else + niu_ldg_rearm(np, lp, 1); +out: + spin_unlock_irqrestore(&np->lock, flags); + + return IRQ_HANDLED; +} + +static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp) +{ + if (rp->mbox) { + np->ops->free_coherent(np->device, + sizeof(struct rxdma_mailbox), + rp->mbox, rp->mbox_dma); + rp->mbox = NULL; + } + if (rp->rcr) { + np->ops->free_coherent(np->device, + MAX_RCR_RING_SIZE * sizeof(__le64), + rp->rcr, rp->rcr_dma); + rp->rcr = NULL; + rp->rcr_table_size = 0; + rp->rcr_index = 0; + } + if (rp->rbr) { + niu_rbr_free(np, rp); + + np->ops->free_coherent(np->device, + MAX_RBR_RING_SIZE * sizeof(__le32), + rp->rbr, rp->rbr_dma); + rp->rbr = NULL; + rp->rbr_table_size = 0; + rp->rbr_index = 0; + } + kfree(rp->rxhash); + rp->rxhash = NULL; +} + +static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp) +{ + if (rp->mbox) { + np->ops->free_coherent(np->device, + sizeof(struct txdma_mailbox), + rp->mbox, rp->mbox_dma); + rp->mbox = NULL; + } + if (rp->descr) { + int i; + + for (i = 0; i < MAX_TX_RING_SIZE; i++) { + if (rp->tx_buffs[i].skb) + (void) release_tx_packet(np, rp, i); + } + + np->ops->free_coherent(np->device, + MAX_TX_RING_SIZE * sizeof(__le64), + rp->descr, rp->descr_dma); + rp->descr = NULL; + rp->pending = 0; + rp->prod = 0; + rp->cons = 0; + rp->wrap_bit = 0; + } +} + +static void niu_free_channels(struct niu *np) +{ + int i; + + if (np->rx_rings) { + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + niu_free_rx_ring_info(np, rp); + } + kfree(np->rx_rings); + np->rx_rings = NULL; + np->num_rx_rings = 0; + } + + if (np->tx_rings) { + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + + niu_free_tx_ring_info(np, rp); + } + kfree(np->tx_rings); + np->tx_rings = NULL; + np->num_tx_rings = 0; + } +} + +static int niu_alloc_rx_ring_info(struct niu *np, + struct rx_ring_info *rp) +{ + BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64); + + rp->rxhash = kzalloc(MAX_RBR_RING_SIZE * sizeof(struct page *), + GFP_KERNEL); + if (!rp->rxhash) + return -ENOMEM; + + rp->mbox = np->ops->alloc_coherent(np->device, + sizeof(struct rxdma_mailbox), + &rp->mbox_dma, GFP_KERNEL); + if (!rp->mbox) + return -ENOMEM; + if ((unsigned long)rp->mbox & (64UL - 1)) { + dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " + "RXDMA mailbox %p\n", np->dev->name, rp->mbox); + return -EINVAL; + } + + rp->rcr = np->ops->alloc_coherent(np->device, + MAX_RCR_RING_SIZE * sizeof(__le64), + &rp->rcr_dma, GFP_KERNEL); + if (!rp->rcr) + return -ENOMEM; + if ((unsigned long)rp->rcr & (64UL - 1)) { + dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " + "RXDMA RCR table %p\n", np->dev->name, rp->rcr); + return -EINVAL; + } + rp->rcr_table_size = MAX_RCR_RING_SIZE; + rp->rcr_index = 0; + + rp->rbr = np->ops->alloc_coherent(np->device, + MAX_RBR_RING_SIZE * sizeof(__le32), + &rp->rbr_dma, GFP_KERNEL); + if (!rp->rbr) + return -ENOMEM; + if ((unsigned long)rp->rbr & (64UL - 1)) { + dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " + "RXDMA RBR table %p\n", np->dev->name, rp->rbr); + return -EINVAL; + } + rp->rbr_table_size = MAX_RBR_RING_SIZE; + rp->rbr_index = 0; + rp->rbr_pending = 0; + + return 0; +} + +static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp) +{ + int mtu = np->dev->mtu; + + /* These values are recommended by the HW designers for fair + * utilization of DRR amongst the rings. + */ + rp->max_burst = mtu + 32; + if (rp->max_burst > 4096) + rp->max_burst = 4096; +} + +static int niu_alloc_tx_ring_info(struct niu *np, + struct tx_ring_info *rp) +{ + BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64); + + rp->mbox = np->ops->alloc_coherent(np->device, + sizeof(struct txdma_mailbox), + &rp->mbox_dma, GFP_KERNEL); + if (!rp->mbox) + return -ENOMEM; + if ((unsigned long)rp->mbox & (64UL - 1)) { + dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " + "TXDMA mailbox %p\n", np->dev->name, rp->mbox); + return -EINVAL; + } + + rp->descr = np->ops->alloc_coherent(np->device, + MAX_TX_RING_SIZE * sizeof(__le64), + &rp->descr_dma, GFP_KERNEL); + if (!rp->descr) + return -ENOMEM; + if ((unsigned long)rp->descr & (64UL - 1)) { + dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " + "TXDMA descr table %p\n", np->dev->name, rp->descr); + return -EINVAL; + } + + rp->pending = MAX_TX_RING_SIZE; + rp->prod = 0; + rp->cons = 0; + rp->wrap_bit = 0; + + /* XXX make these configurable... XXX */ + rp->mark_freq = rp->pending / 4; + + niu_set_max_burst(np, rp); + + return 0; +} + +static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp) +{ + u16 bs; + + switch (PAGE_SIZE) { + case 4 * 1024: + case 8 * 1024: + case 16 * 1024: + case 32 * 1024: + rp->rbr_block_size = PAGE_SIZE; + rp->rbr_blocks_per_page = 1; + break; + + default: + if (PAGE_SIZE % (32 * 1024) == 0) + bs = 32 * 1024; + else if (PAGE_SIZE % (16 * 1024) == 0) + bs = 16 * 1024; + else if (PAGE_SIZE % (8 * 1024) == 0) + bs = 8 * 1024; + else if (PAGE_SIZE % (4 * 1024) == 0) + bs = 4 * 1024; + else + BUG(); + rp->rbr_block_size = bs; + rp->rbr_blocks_per_page = PAGE_SIZE / bs; + } + + rp->rbr_sizes[0] = 256; + rp->rbr_sizes[1] = 1024; + if (np->dev->mtu > ETH_DATA_LEN) { + switch (PAGE_SIZE) { + case 4 * 1024: + rp->rbr_sizes[2] = 4096; + break; + + default: + rp->rbr_sizes[2] = 8192; + break; + } + } else { + rp->rbr_sizes[2] = 2048; + } + rp->rbr_sizes[3] = rp->rbr_block_size; +} + +static int niu_alloc_channels(struct niu *np) +{ + struct niu_parent *parent = np->parent; + int first_rx_channel, first_tx_channel; + int i, port, err; + + port = np->port; + first_rx_channel = first_tx_channel = 0; + for (i = 0; i < port; i++) { + first_rx_channel += parent->rxchan_per_port[i]; + first_tx_channel += parent->txchan_per_port[i]; + } + + np->num_rx_rings = parent->rxchan_per_port[port]; + np->num_tx_rings = parent->txchan_per_port[port]; + + np->rx_rings = kzalloc(np->num_rx_rings * sizeof(struct rx_ring_info), + GFP_KERNEL); + err = -ENOMEM; + if (!np->rx_rings) + goto out_err; + + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + rp->np = np; + rp->rx_channel = first_rx_channel + i; + + err = niu_alloc_rx_ring_info(np, rp); + if (err) + goto out_err; + + niu_size_rbr(np, rp); + + /* XXX better defaults, configurable, etc... XXX */ + rp->nonsyn_window = 64; + rp->nonsyn_threshold = rp->rcr_table_size - 64; + rp->syn_window = 64; + rp->syn_threshold = rp->rcr_table_size - 64; + rp->rcr_pkt_threshold = 16; + rp->rcr_timeout = 8; + rp->rbr_kick_thresh = RBR_REFILL_MIN; + if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page) + rp->rbr_kick_thresh = rp->rbr_blocks_per_page; + + err = niu_rbr_fill(np, rp, GFP_KERNEL); + if (err) + return err; + } + + np->tx_rings = kzalloc(np->num_tx_rings * sizeof(struct tx_ring_info), + GFP_KERNEL); + err = -ENOMEM; + if (!np->tx_rings) + goto out_err; + + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + + rp->np = np; + rp->tx_channel = first_tx_channel + i; + + err = niu_alloc_tx_ring_info(np, rp); + if (err) + goto out_err; + } + + return 0; + +out_err: + niu_free_channels(np); + return err; +} + +static int niu_tx_cs_sng_poll(struct niu *np, int channel) +{ + int limit = 1000; + + while (--limit > 0) { + u64 val = nr64(TX_CS(channel)); + if (val & TX_CS_SNG_STATE) + return 0; + } + return -ENODEV; +} + +static int niu_tx_channel_stop(struct niu *np, int channel) +{ + u64 val = nr64(TX_CS(channel)); + + val |= TX_CS_STOP_N_GO; + nw64(TX_CS(channel), val); + + return niu_tx_cs_sng_poll(np, channel); +} + +static int niu_tx_cs_reset_poll(struct niu *np, int channel) +{ + int limit = 1000; + + while (--limit > 0) { + u64 val = nr64(TX_CS(channel)); + if (!(val & TX_CS_RST)) + return 0; + } + return -ENODEV; +} + +static int niu_tx_channel_reset(struct niu *np, int channel) +{ + u64 val = nr64(TX_CS(channel)); + int err; + + val |= TX_CS_RST; + nw64(TX_CS(channel), val); + + err = niu_tx_cs_reset_poll(np, channel); + if (!err) + nw64(TX_RING_KICK(channel), 0); + + return err; +} + +static int niu_tx_channel_lpage_init(struct niu *np, int channel) +{ + u64 val; + + nw64(TX_LOG_MASK1(channel), 0); + nw64(TX_LOG_VAL1(channel), 0); + nw64(TX_LOG_MASK2(channel), 0); + nw64(TX_LOG_VAL2(channel), 0); + nw64(TX_LOG_PAGE_RELO1(channel), 0); + nw64(TX_LOG_PAGE_RELO2(channel), 0); + nw64(TX_LOG_PAGE_HDL(channel), 0); + + val = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT; + val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1); + nw64(TX_LOG_PAGE_VLD(channel), val); + + /* XXX TXDMA 32bit mode? XXX */ + + return 0; +} + +static void niu_txc_enable_port(struct niu *np, int on) +{ + unsigned long flags; + u64 val, mask; + + niu_lock_parent(np, flags); + val = nr64(TXC_CONTROL); + mask = (u64)1 << np->port; + if (on) { + val |= TXC_CONTROL_ENABLE | mask; + } else { + val &= ~mask; + if ((val & ~TXC_CONTROL_ENABLE) == 0) + val &= ~TXC_CONTROL_ENABLE; + } + nw64(TXC_CONTROL, val); + niu_unlock_parent(np, flags); +} + +static void niu_txc_set_imask(struct niu *np, u64 imask) +{ + unsigned long flags; + u64 val; + + niu_lock_parent(np, flags); + val = nr64(TXC_INT_MASK); + val &= ~TXC_INT_MASK_VAL(np->port); + val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port)); + niu_unlock_parent(np, flags); +} + +static void niu_txc_port_dma_enable(struct niu *np, int on) +{ + u64 val = 0; + + if (on) { + int i; + + for (i = 0; i < np->num_tx_rings; i++) + val |= (1 << np->tx_rings[i].tx_channel); + } + nw64(TXC_PORT_DMA(np->port), val); +} + +static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp) +{ + int err, channel = rp->tx_channel; + u64 val, ring_len; + + err = niu_tx_channel_stop(np, channel); + if (err) + return err; + + err = niu_tx_channel_reset(np, channel); + if (err) + return err; + + err = niu_tx_channel_lpage_init(np, channel); + if (err) + return err; + + nw64(TXC_DMA_MAX(channel), rp->max_burst); + nw64(TX_ENT_MSK(channel), 0); + + if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE | + TX_RNG_CFIG_STADDR)) { + dev_err(np->device, PFX "%s: TX ring channel %d " + "DMA addr (%llx) is not aligned.\n", + np->dev->name, channel, + (unsigned long long) rp->descr_dma); + return -EINVAL; + } + + /* The length field in TX_RNG_CFIG is measured in 64-byte + * blocks. rp->pending is the number of TX descriptors in + * our ring, 8 bytes each, thus we divide by 8 bytes more + * to get the proper value the chip wants. + */ + ring_len = (rp->pending / 8); + + val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) | + rp->descr_dma); + nw64(TX_RNG_CFIG(channel), val); + + if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) || + ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) { + dev_err(np->device, PFX "%s: TX ring channel %d " + "MBOX addr (%llx) is has illegal bits.\n", + np->dev->name, channel, + (unsigned long long) rp->mbox_dma); + return -EINVAL; + } + nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32); + nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR); + + nw64(TX_CS(channel), 0); + + rp->last_pkt_cnt = 0; + + return 0; +} + +static void niu_init_rdc_groups(struct niu *np) +{ + struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port]; + int i, first_table_num = tp->first_table_num; + + for (i = 0; i < tp->num_tables; i++) { + struct rdc_table *tbl = &tp->tables[i]; + int this_table = first_table_num + i; + int slot; + + for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) + nw64(RDC_TBL(this_table, slot), + tbl->rxdma_channel[slot]); + } + + nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]); +} + +static void niu_init_drr_weight(struct niu *np) +{ + int type = phy_decode(np->parent->port_phy, np->port); + u64 val; + + switch (type) { + case PORT_TYPE_10G: + val = PT_DRR_WEIGHT_DEFAULT_10G; + break; + + case PORT_TYPE_1G: + default: + val = PT_DRR_WEIGHT_DEFAULT_1G; + break; + } + nw64(PT_DRR_WT(np->port), val); +} + +static int niu_init_hostinfo(struct niu *np) +{ + struct niu_parent *parent = np->parent; + struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; + int i, err, num_alt = niu_num_alt_addr(np); + int first_rdc_table = tp->first_table_num; + + err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); + if (err) + return err; + + err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); + if (err) + return err; + + for (i = 0; i < num_alt; i++) { + err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1); + if (err) + return err; + } + + return 0; +} + +static int niu_rx_channel_reset(struct niu *np, int channel) +{ + return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel), + RXDMA_CFIG1_RST, 1000, 10, + "RXDMA_CFIG1"); +} + +static int niu_rx_channel_lpage_init(struct niu *np, int channel) +{ + u64 val; + + nw64(RX_LOG_MASK1(channel), 0); + nw64(RX_LOG_VAL1(channel), 0); + nw64(RX_LOG_MASK2(channel), 0); + nw64(RX_LOG_VAL2(channel), 0); + nw64(RX_LOG_PAGE_RELO1(channel), 0); + nw64(RX_LOG_PAGE_RELO2(channel), 0); + nw64(RX_LOG_PAGE_HDL(channel), 0); + + val = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT; + val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1); + nw64(RX_LOG_PAGE_VLD(channel), val); + + return 0; +} + +static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp) +{ + u64 val; + + val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) | + ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) | + ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) | + ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT)); + nw64(RDC_RED_PARA(rp->rx_channel), val); +} + +static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret) +{ + u64 val = 0; + + switch (rp->rbr_block_size) { + case 4 * 1024: + val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT); + break; + case 8 * 1024: + val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT); + break; + case 16 * 1024: + val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT); + break; + case 32 * 1024: + val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT); + break; + default: + return -EINVAL; + } + val |= RBR_CFIG_B_VLD2; + switch (rp->rbr_sizes[2]) { + case 2 * 1024: + val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT); + break; + case 4 * 1024: + val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT); + break; + case 8 * 1024: + val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT); + break; + case 16 * 1024: + val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT); + break; + + default: + return -EINVAL; + } + val |= RBR_CFIG_B_VLD1; + switch (rp->rbr_sizes[1]) { + case 1 * 1024: + val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT); + break; + case 2 * 1024: + val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT); + break; + case 4 * 1024: + val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT); + break; + case 8 * 1024: + val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT); + break; + + default: + return -EINVAL; + } + val |= RBR_CFIG_B_VLD0; + switch (rp->rbr_sizes[0]) { + case 256: + val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT); + break; + case 512: + val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT); + break; + case 1 * 1024: + val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT); + break; + case 2 * 1024: + val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT); + break; + + default: + return -EINVAL; + } + + *ret = val; + return 0; +} + +static int niu_enable_rx_channel(struct niu *np, int channel, int on) +{ + u64 val = nr64(RXDMA_CFIG1(channel)); + int limit; + + if (on) + val |= RXDMA_CFIG1_EN; + else + val &= ~RXDMA_CFIG1_EN; + nw64(RXDMA_CFIG1(channel), val); + + limit = 1000; + while (--limit > 0) { + if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST) + break; + udelay(10); + } + if (limit <= 0) + return -ENODEV; + return 0; +} + +static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp) +{ + int err, channel = rp->rx_channel; + u64 val; + + err = niu_rx_channel_reset(np, channel); + if (err) + return err; + + err = niu_rx_channel_lpage_init(np, channel); + if (err) + return err; + + niu_rx_channel_wred_init(np, rp); + + nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY); + nw64(RX_DMA_CTL_STAT(channel), + (RX_DMA_CTL_STAT_MEX | + RX_DMA_CTL_STAT_RCRTHRES | + RX_DMA_CTL_STAT_RCRTO | + RX_DMA_CTL_STAT_RBR_EMPTY)); + nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32); + nw64(RXDMA_CFIG2(channel), (rp->mbox_dma & 0x00000000ffffffc0)); + nw64(RBR_CFIG_A(channel), + ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) | + (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR))); + err = niu_compute_rbr_cfig_b(rp, &val); + if (err) + return err; + nw64(RBR_CFIG_B(channel), val); + nw64(RCRCFIG_A(channel), + ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) | + (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR))); + nw64(RCRCFIG_B(channel), + ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) | + RCRCFIG_B_ENTOUT | + ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT)); + + err = niu_enable_rx_channel(np, channel, 1); + if (err) + return err; + + nw64(RBR_KICK(channel), rp->rbr_index); + + val = nr64(RX_DMA_CTL_STAT(channel)); + val |= RX_DMA_CTL_STAT_RBR_EMPTY; + nw64(RX_DMA_CTL_STAT(channel), val); + + return 0; +} + +static int niu_init_rx_channels(struct niu *np) +{ + unsigned long flags; + u64 seed = jiffies_64; + int err, i; + + niu_lock_parent(np, flags); + nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider); + nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL)); + niu_unlock_parent(np, flags); + + /* XXX RXDMA 32bit mode? XXX */ + + niu_init_rdc_groups(np); + niu_init_drr_weight(np); + + err = niu_init_hostinfo(np); + if (err) + return err; + + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + err = niu_init_one_rx_channel(np, rp); + if (err) + return err; + } + + return 0; +} + +static int niu_set_ip_frag_rule(struct niu *np) +{ + struct niu_parent *parent = np->parent; + struct niu_classifier *cp = &np->clas; + struct niu_tcam_entry *tp; + int index, err; + + /* XXX fix this allocation scheme XXX */ + index = cp->tcam_index; + tp = &parent->tcam[index]; + + /* Note that the noport bit is the same in both ipv4 and + * ipv6 format TCAM entries. + */ + memset(tp, 0, sizeof(*tp)); + tp->key[1] = TCAM_V4KEY1_NOPORT; + tp->key_mask[1] = TCAM_V4KEY1_NOPORT; + tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | + ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT)); + err = tcam_write(np, index, tp->key, tp->key_mask); + if (err) + return err; + err = tcam_assoc_write(np, index, tp->assoc_data); + if (err) + return err; + + return 0; +} + +static int niu_init_classifier_hw(struct niu *np) +{ + struct niu_parent *parent = np->parent; + struct niu_classifier *cp = &np->clas; + int i, err; + + nw64(H1POLY, cp->h1_init); + nw64(H2POLY, cp->h2_init); + + err = niu_init_hostinfo(np); + if (err) + return err; + + for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) { + struct niu_vlan_rdc *vp = &cp->vlan_mappings[i]; + + vlan_tbl_write(np, i, np->port, + vp->vlan_pref, vp->rdc_num); + } + + for (i = 0; i < cp->num_alt_mac_mappings; i++) { + struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i]; + + err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num, + ap->rdc_num, ap->mac_pref); + if (err) + return err; + } + + for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { + int index = i - CLASS_CODE_USER_PROG1; + + err = niu_set_tcam_key(np, i, parent->tcam_key[index]); + if (err) + return err; + err = niu_set_flow_key(np, i, parent->flow_key[index]); + if (err) + return err; + } + + err = niu_set_ip_frag_rule(np); + if (err) + return err; + + tcam_enable(np, 1); + + return 0; +} + +static int niu_zcp_write(struct niu *np, int index, u64 *data) +{ + nw64(ZCP_RAM_DATA0, data[0]); + nw64(ZCP_RAM_DATA1, data[1]); + nw64(ZCP_RAM_DATA2, data[2]); + nw64(ZCP_RAM_DATA3, data[3]); + nw64(ZCP_RAM_DATA4, data[4]); + nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL); + nw64(ZCP_RAM_ACC, + (ZCP_RAM_ACC_WRITE | + (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | + (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); + + return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, + 1000, 100); +} + +static int niu_zcp_read(struct niu *np, int index, u64 *data) +{ + int err; + + err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, + 1000, 100); + if (err) { + dev_err(np->device, PFX "%s: ZCP read busy won't clear, " + "ZCP_RAM_ACC[%llx]\n", np->dev->name, + (unsigned long long) nr64(ZCP_RAM_ACC)); + return err; + } + + nw64(ZCP_RAM_ACC, + (ZCP_RAM_ACC_READ | + (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | + (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); + + err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, + 1000, 100); + if (err) { + dev_err(np->device, PFX "%s: ZCP read busy2 won't clear, " + "ZCP_RAM_ACC[%llx]\n", np->dev->name, + (unsigned long long) nr64(ZCP_RAM_ACC)); + return err; + } + + data[0] = nr64(ZCP_RAM_DATA0); + data[1] = nr64(ZCP_RAM_DATA1); + data[2] = nr64(ZCP_RAM_DATA2); + data[3] = nr64(ZCP_RAM_DATA3); + data[4] = nr64(ZCP_RAM_DATA4); + + return 0; +} + +static void niu_zcp_cfifo_reset(struct niu *np) +{ + u64 val = nr64(RESET_CFIFO); + + val |= RESET_CFIFO_RST(np->port); + nw64(RESET_CFIFO, val); + udelay(10); + + val &= ~RESET_CFIFO_RST(np->port); + nw64(RESET_CFIFO, val); +} + +static int niu_init_zcp(struct niu *np) +{ + u64 data[5], rbuf[5]; + int i, max, err; + + if (np->parent->plat_type != PLAT_TYPE_NIU) { + if (np->port == 0 || np->port == 1) + max = ATLAS_P0_P1_CFIFO_ENTRIES; + else + max = ATLAS_P2_P3_CFIFO_ENTRIES; + } else + max = NIU_CFIFO_ENTRIES; + + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + data[4] = 0; + + for (i = 0; i < max; i++) { + err = niu_zcp_write(np, i, data); + if (err) + return err; + err = niu_zcp_read(np, i, rbuf); + if (err) + return err; + } + + niu_zcp_cfifo_reset(np); + nw64(CFIFO_ECC(np->port), 0); + nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL); + (void) nr64(ZCP_INT_STAT); + nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL); + + return 0; +} + +static void niu_ipp_write(struct niu *np, int index, u64 *data) +{ + u64 val = nr64_ipp(IPP_CFIG); + + nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W); + nw64_ipp(IPP_DFIFO_WR_PTR, index); + nw64_ipp(IPP_DFIFO_WR0, data[0]); + nw64_ipp(IPP_DFIFO_WR1, data[1]); + nw64_ipp(IPP_DFIFO_WR2, data[2]); + nw64_ipp(IPP_DFIFO_WR3, data[3]); + nw64_ipp(IPP_DFIFO_WR4, data[4]); + nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W); +} + +static void niu_ipp_read(struct niu *np, int index, u64 *data) +{ + nw64_ipp(IPP_DFIFO_RD_PTR, index); + data[0] = nr64_ipp(IPP_DFIFO_RD0); + data[1] = nr64_ipp(IPP_DFIFO_RD1); + data[2] = nr64_ipp(IPP_DFIFO_RD2); + data[3] = nr64_ipp(IPP_DFIFO_RD3); + data[4] = nr64_ipp(IPP_DFIFO_RD4); +} + +static int niu_ipp_reset(struct niu *np) +{ + return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST, + 1000, 100, "IPP_CFIG"); +} + +static int niu_init_ipp(struct niu *np) +{ + u64 data[5], rbuf[5], val; + int i, max, err; + + if (np->parent->plat_type != PLAT_TYPE_NIU) { + if (np->port == 0 || np->port == 1) + max = ATLAS_P0_P1_DFIFO_ENTRIES; + else + max = ATLAS_P2_P3_DFIFO_ENTRIES; + } else + max = NIU_DFIFO_ENTRIES; + + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + data[4] = 0; + + for (i = 0; i < max; i++) { + niu_ipp_write(np, i, data); + niu_ipp_read(np, i, rbuf); + } + + (void) nr64_ipp(IPP_INT_STAT); + (void) nr64_ipp(IPP_INT_STAT); + + err = niu_ipp_reset(np); + if (err) + return err; + + (void) nr64_ipp(IPP_PKT_DIS); + (void) nr64_ipp(IPP_BAD_CS_CNT); + (void) nr64_ipp(IPP_ECC); + + (void) nr64_ipp(IPP_INT_STAT); + + nw64_ipp(IPP_MSK, ~IPP_MSK_ALL); + + val = nr64_ipp(IPP_CFIG); + val &= ~IPP_CFIG_IP_MAX_PKT; + val |= (IPP_CFIG_IPP_ENABLE | + IPP_CFIG_DFIFO_ECC_EN | + IPP_CFIG_DROP_BAD_CRC | + IPP_CFIG_CKSUM_EN | + (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT)); + nw64_ipp(IPP_CFIG, val); + + return 0; +} + +static void niu_init_xif_xmac(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + u64 val; + + val = nr64_mac(XMAC_CONFIG); + + if ((np->flags & NIU_FLAGS_10G) != 0 && + (np->flags & NIU_FLAGS_FIBER) != 0) { + if (netif_carrier_ok(np->dev)) { + val |= XMAC_CONFIG_LED_POLARITY; + val &= ~XMAC_CONFIG_FORCE_LED_ON; + } else { + val |= XMAC_CONFIG_FORCE_LED_ON; + val &= ~XMAC_CONFIG_LED_POLARITY; + } + } + + val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; + + val |= XMAC_CONFIG_TX_OUTPUT_EN; + + if (lp->loopback_mode == LOOPBACK_MAC) { + val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; + val |= XMAC_CONFIG_LOOPBACK; + } else { + val &= ~XMAC_CONFIG_LOOPBACK; + } + + if (np->flags & NIU_FLAGS_10G) { + val &= ~XMAC_CONFIG_LFS_DISABLE; + } else { + val |= XMAC_CONFIG_LFS_DISABLE; + if (!(np->flags & NIU_FLAGS_FIBER)) + val |= XMAC_CONFIG_1G_PCS_BYPASS; + else + val &= ~XMAC_CONFIG_1G_PCS_BYPASS; + } + + val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; + + if (lp->active_speed == SPEED_100) + val |= XMAC_CONFIG_SEL_CLK_25MHZ; + else + val &= ~XMAC_CONFIG_SEL_CLK_25MHZ; + + nw64_mac(XMAC_CONFIG, val); + + val = nr64_mac(XMAC_CONFIG); + val &= ~XMAC_CONFIG_MODE_MASK; + if (np->flags & NIU_FLAGS_10G) { + val |= XMAC_CONFIG_MODE_XGMII; + } else { + if (lp->active_speed == SPEED_100) + val |= XMAC_CONFIG_MODE_MII; + else + val |= XMAC_CONFIG_MODE_GMII; + } + + nw64_mac(XMAC_CONFIG, val); +} + +static void niu_init_xif_bmac(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + u64 val; + + val = BMAC_XIF_CONFIG_TX_OUTPUT_EN; + + if (lp->loopback_mode == LOOPBACK_MAC) + val |= BMAC_XIF_CONFIG_MII_LOOPBACK; + else + val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK; + + if (lp->active_speed == SPEED_1000) + val |= BMAC_XIF_CONFIG_GMII_MODE; + else + val &= ~BMAC_XIF_CONFIG_GMII_MODE; + + val &= ~(BMAC_XIF_CONFIG_LINK_LED | + BMAC_XIF_CONFIG_LED_POLARITY); + + if (!(np->flags & NIU_FLAGS_10G) && + !(np->flags & NIU_FLAGS_FIBER) && + lp->active_speed == SPEED_100) + val |= BMAC_XIF_CONFIG_25MHZ_CLOCK; + else + val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK; + + nw64_mac(BMAC_XIF_CONFIG, val); +} + +static void niu_init_xif(struct niu *np) +{ + if (np->flags & NIU_FLAGS_XMAC) + niu_init_xif_xmac(np); + else + niu_init_xif_bmac(np); +} + +static void niu_pcs_mii_reset(struct niu *np) +{ + u64 val = nr64_pcs(PCS_MII_CTL); + val |= PCS_MII_CTL_RST; + nw64_pcs(PCS_MII_CTL, val); +} + +static void niu_xpcs_reset(struct niu *np) +{ + u64 val = nr64_xpcs(XPCS_CONTROL1); + val |= XPCS_CONTROL1_RESET; + nw64_xpcs(XPCS_CONTROL1, val); +} + +static int niu_init_pcs(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + u64 val; + + switch (np->flags & (NIU_FLAGS_10G | NIU_FLAGS_FIBER)) { + case NIU_FLAGS_FIBER: + /* 1G fiber */ + nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE); + nw64_pcs(PCS_DPATH_MODE, 0); + niu_pcs_mii_reset(np); + break; + + case NIU_FLAGS_10G: + case NIU_FLAGS_10G | NIU_FLAGS_FIBER: + if (!(np->flags & NIU_FLAGS_XMAC)) + return -EINVAL; + + /* 10G copper or fiber */ + val = nr64_mac(XMAC_CONFIG); + val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; + nw64_mac(XMAC_CONFIG, val); + + niu_xpcs_reset(np); + + val = nr64_xpcs(XPCS_CONTROL1); + if (lp->loopback_mode == LOOPBACK_PHY) + val |= XPCS_CONTROL1_LOOPBACK; + else + val &= ~XPCS_CONTROL1_LOOPBACK; + nw64_xpcs(XPCS_CONTROL1, val); + + nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0); + (void) nr64_xpcs(XPCS_SYMERR_CNT01); + (void) nr64_xpcs(XPCS_SYMERR_CNT23); + break; + + case 0: + /* 1G copper */ + nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII); + niu_pcs_mii_reset(np); + break; + + default: + return -EINVAL; + } + + return 0; +} + +static int niu_reset_tx_xmac(struct niu *np) +{ + return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST, + (XTXMAC_SW_RST_REG_RS | + XTXMAC_SW_RST_SOFT_RST), + 1000, 100, "XTXMAC_SW_RST"); +} + +static int niu_reset_tx_bmac(struct niu *np) +{ + int limit; + + nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET); + limit = 1000; + while (--limit >= 0) { + if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET)) + break; + udelay(100); + } + if (limit < 0) { + dev_err(np->device, PFX "Port %u TX BMAC would not reset, " + "BTXMAC_SW_RST[%llx]\n", + np->port, + (unsigned long long) nr64_mac(BTXMAC_SW_RST)); + return -ENODEV; + } + + return 0; +} + +static int niu_reset_tx_mac(struct niu *np) +{ + if (np->flags & NIU_FLAGS_XMAC) + return niu_reset_tx_xmac(np); + else + return niu_reset_tx_bmac(np); +} + +static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max) +{ + u64 val; + + val = nr64_mac(XMAC_MIN); + val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE | + XMAC_MIN_RX_MIN_PKT_SIZE); + val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT); + val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT); + nw64_mac(XMAC_MIN, val); + + nw64_mac(XMAC_MAX, max); + + nw64_mac(XTXMAC_STAT_MSK, ~(u64)0); + + val = nr64_mac(XMAC_IPG); + if (np->flags & NIU_FLAGS_10G) { + val &= ~XMAC_IPG_IPG_XGMII; + val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT); + } else { + val &= ~XMAC_IPG_IPG_MII_GMII; + val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT); + } + nw64_mac(XMAC_IPG, val); + + val = nr64_mac(XMAC_CONFIG); + val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC | + XMAC_CONFIG_STRETCH_MODE | + XMAC_CONFIG_VAR_MIN_IPG_EN | + XMAC_CONFIG_TX_ENABLE); + nw64_mac(XMAC_CONFIG, val); + + nw64_mac(TXMAC_FRM_CNT, 0); + nw64_mac(TXMAC_BYTE_CNT, 0); +} + +static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max) +{ + u64 val; + + nw64_mac(BMAC_MIN_FRAME, min); + nw64_mac(BMAC_MAX_FRAME, max); + + nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0); + nw64_mac(BMAC_CTRL_TYPE, 0x8808); + nw64_mac(BMAC_PREAMBLE_SIZE, 7); + + val = nr64_mac(BTXMAC_CONFIG); + val &= ~(BTXMAC_CONFIG_FCS_DISABLE | + BTXMAC_CONFIG_ENABLE); + nw64_mac(BTXMAC_CONFIG, val); +} + +static void niu_init_tx_mac(struct niu *np) +{ + u64 min, max; + + min = 64; + if (np->dev->mtu > ETH_DATA_LEN) + max = 9216; + else + max = 1522; + + /* The XMAC_MIN register only accepts values for TX min which + * have the low 3 bits cleared. + */ + BUILD_BUG_ON(min & 0x7); + + if (np->flags & NIU_FLAGS_XMAC) + niu_init_tx_xmac(np, min, max); + else + niu_init_tx_bmac(np, min, max); +} + +static int niu_reset_rx_xmac(struct niu *np) +{ + int limit; + + nw64_mac(XRXMAC_SW_RST, + XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST); + limit = 1000; + while (--limit >= 0) { + if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS | + XRXMAC_SW_RST_SOFT_RST))) + break; + udelay(100); + } + if (limit < 0) { + dev_err(np->device, PFX "Port %u RX XMAC would not reset, " + "XRXMAC_SW_RST[%llx]\n", + np->port, + (unsigned long long) nr64_mac(XRXMAC_SW_RST)); + return -ENODEV; + } + + return 0; +} + +static int niu_reset_rx_bmac(struct niu *np) +{ + int limit; + + nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET); + limit = 1000; + while (--limit >= 0) { + if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET)) + break; + udelay(100); + } + if (limit < 0) { + dev_err(np->device, PFX "Port %u RX BMAC would not reset, " + "BRXMAC_SW_RST[%llx]\n", + np->port, + (unsigned long long) nr64_mac(BRXMAC_SW_RST)); + return -ENODEV; + } + + return 0; +} + +static int niu_reset_rx_mac(struct niu *np) +{ + if (np->flags & NIU_FLAGS_XMAC) + return niu_reset_rx_xmac(np); + else + return niu_reset_rx_bmac(np); +} + +static void niu_init_rx_xmac(struct niu *np) +{ + struct niu_parent *parent = np->parent; + struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; + int first_rdc_table = tp->first_table_num; + unsigned long i; + u64 val; + + nw64_mac(XMAC_ADD_FILT0, 0); + nw64_mac(XMAC_ADD_FILT1, 0); + nw64_mac(XMAC_ADD_FILT2, 0); + nw64_mac(XMAC_ADD_FILT12_MASK, 0); + nw64_mac(XMAC_ADD_FILT00_MASK, 0); + for (i = 0; i < MAC_NUM_HASH; i++) + nw64_mac(XMAC_HASH_TBL(i), 0); + nw64_mac(XRXMAC_STAT_MSK, ~(u64)0); + niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); + niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); + + val = nr64_mac(XMAC_CONFIG); + val &= ~(XMAC_CONFIG_RX_MAC_ENABLE | + XMAC_CONFIG_PROMISCUOUS | + XMAC_CONFIG_PROMISC_GROUP | + XMAC_CONFIG_ERR_CHK_DIS | + XMAC_CONFIG_RX_CRC_CHK_DIS | + XMAC_CONFIG_RESERVED_MULTICAST | + XMAC_CONFIG_RX_CODEV_CHK_DIS | + XMAC_CONFIG_ADDR_FILTER_EN | + XMAC_CONFIG_RCV_PAUSE_ENABLE | + XMAC_CONFIG_STRIP_CRC | + XMAC_CONFIG_PASS_FLOW_CTRL | + XMAC_CONFIG_MAC2IPP_PKT_CNT_EN); + val |= (XMAC_CONFIG_HASH_FILTER_EN); + nw64_mac(XMAC_CONFIG, val); + + nw64_mac(RXMAC_BT_CNT, 0); + nw64_mac(RXMAC_BC_FRM_CNT, 0); + nw64_mac(RXMAC_MC_FRM_CNT, 0); + nw64_mac(RXMAC_FRAG_CNT, 0); + nw64_mac(RXMAC_HIST_CNT1, 0); + nw64_mac(RXMAC_HIST_CNT2, 0); + nw64_mac(RXMAC_HIST_CNT3, 0); + nw64_mac(RXMAC_HIST_CNT4, 0); + nw64_mac(RXMAC_HIST_CNT5, 0); + nw64_mac(RXMAC_HIST_CNT6, 0); + nw64_mac(RXMAC_HIST_CNT7, 0); + nw64_mac(RXMAC_MPSZER_CNT, 0); + nw64_mac(RXMAC_CRC_ER_CNT, 0); + nw64_mac(RXMAC_CD_VIO_CNT, 0); + nw64_mac(LINK_FAULT_CNT, 0); +} + +static void niu_init_rx_bmac(struct niu *np) +{ + struct niu_parent *parent = np->parent; + struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; + int first_rdc_table = tp->first_table_num; + unsigned long i; + u64 val; + + nw64_mac(BMAC_ADD_FILT0, 0); + nw64_mac(BMAC_ADD_FILT1, 0); + nw64_mac(BMAC_ADD_FILT2, 0); + nw64_mac(BMAC_ADD_FILT12_MASK, 0); + nw64_mac(BMAC_ADD_FILT00_MASK, 0); + for (i = 0; i < MAC_NUM_HASH; i++) + nw64_mac(BMAC_HASH_TBL(i), 0); + niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); + niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); + nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0); + + val = nr64_mac(BRXMAC_CONFIG); + val &= ~(BRXMAC_CONFIG_ENABLE | + BRXMAC_CONFIG_STRIP_PAD | + BRXMAC_CONFIG_STRIP_FCS | + BRXMAC_CONFIG_PROMISC | + BRXMAC_CONFIG_PROMISC_GRP | + BRXMAC_CONFIG_ADDR_FILT_EN | + BRXMAC_CONFIG_DISCARD_DIS); + val |= (BRXMAC_CONFIG_HASH_FILT_EN); + nw64_mac(BRXMAC_CONFIG, val); + + val = nr64_mac(BMAC_ADDR_CMPEN); + val |= BMAC_ADDR_CMPEN_EN0; + nw64_mac(BMAC_ADDR_CMPEN, val); +} + +static void niu_init_rx_mac(struct niu *np) +{ + niu_set_primary_mac(np, np->dev->dev_addr); + + if (np->flags & NIU_FLAGS_XMAC) + niu_init_rx_xmac(np); + else + niu_init_rx_bmac(np); +} + +static void niu_enable_tx_xmac(struct niu *np, int on) +{ + u64 val = nr64_mac(XMAC_CONFIG); + + if (on) + val |= XMAC_CONFIG_TX_ENABLE; + else + val &= ~XMAC_CONFIG_TX_ENABLE; + nw64_mac(XMAC_CONFIG, val); +} + +static void niu_enable_tx_bmac(struct niu *np, int on) +{ + u64 val = nr64_mac(BTXMAC_CONFIG); + + if (on) + val |= BTXMAC_CONFIG_ENABLE; + else + val &= ~BTXMAC_CONFIG_ENABLE; + nw64_mac(BTXMAC_CONFIG, val); +} + +static void niu_enable_tx_mac(struct niu *np, int on) +{ + if (np->flags & NIU_FLAGS_XMAC) + niu_enable_tx_xmac(np, on); + else + niu_enable_tx_bmac(np, on); +} + +static void niu_enable_rx_xmac(struct niu *np, int on) +{ + u64 val = nr64_mac(XMAC_CONFIG); + + val &= ~(XMAC_CONFIG_HASH_FILTER_EN | + XMAC_CONFIG_PROMISCUOUS); + + if (np->flags & NIU_FLAGS_MCAST) + val |= XMAC_CONFIG_HASH_FILTER_EN; + if (np->flags & NIU_FLAGS_PROMISC) + val |= XMAC_CONFIG_PROMISCUOUS; + + if (on) + val |= XMAC_CONFIG_RX_MAC_ENABLE; + else + val &= ~XMAC_CONFIG_RX_MAC_ENABLE; + nw64_mac(XMAC_CONFIG, val); +} + +static void niu_enable_rx_bmac(struct niu *np, int on) +{ + u64 val = nr64_mac(BRXMAC_CONFIG); + + val &= ~(BRXMAC_CONFIG_HASH_FILT_EN | + BRXMAC_CONFIG_PROMISC); + + if (np->flags & NIU_FLAGS_MCAST) + val |= BRXMAC_CONFIG_HASH_FILT_EN; + if (np->flags & NIU_FLAGS_PROMISC) + val |= BRXMAC_CONFIG_PROMISC; + + if (on) + val |= BRXMAC_CONFIG_ENABLE; + else + val &= ~BRXMAC_CONFIG_ENABLE; + nw64_mac(BRXMAC_CONFIG, val); +} + +static void niu_enable_rx_mac(struct niu *np, int on) +{ + if (np->flags & NIU_FLAGS_XMAC) + niu_enable_rx_xmac(np, on); + else + niu_enable_rx_bmac(np, on); +} + +static int niu_init_mac(struct niu *np) +{ + int err; + + niu_init_xif(np); + err = niu_init_pcs(np); + if (err) + return err; + + err = niu_reset_tx_mac(np); + if (err) + return err; + niu_init_tx_mac(np); + err = niu_reset_rx_mac(np); + if (err) + return err; + niu_init_rx_mac(np); + + /* This looks hookey but the RX MAC reset we just did will + * undo some of the state we setup in niu_init_tx_mac() so we + * have to call it again. In particular, the RX MAC reset will + * set the XMAC_MAX register back to it's default value. + */ + niu_init_tx_mac(np); + niu_enable_tx_mac(np, 1); + + niu_enable_rx_mac(np, 1); + + return 0; +} + +static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp) +{ + (void) niu_tx_channel_stop(np, rp->tx_channel); +} + +static void niu_stop_tx_channels(struct niu *np) +{ + int i; + + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + + niu_stop_one_tx_channel(np, rp); + } +} + +static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp) +{ + (void) niu_tx_channel_reset(np, rp->tx_channel); +} + +static void niu_reset_tx_channels(struct niu *np) +{ + int i; + + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + + niu_reset_one_tx_channel(np, rp); + } +} + +static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp) +{ + (void) niu_enable_rx_channel(np, rp->rx_channel, 0); +} + +static void niu_stop_rx_channels(struct niu *np) +{ + int i; + + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + niu_stop_one_rx_channel(np, rp); + } +} + +static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp) +{ + int channel = rp->rx_channel; + + (void) niu_rx_channel_reset(np, channel); + nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL); + nw64(RX_DMA_CTL_STAT(channel), 0); + (void) niu_enable_rx_channel(np, channel, 0); +} + +static void niu_reset_rx_channels(struct niu *np) +{ + int i; + + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + niu_reset_one_rx_channel(np, rp); + } +} + +static void niu_disable_ipp(struct niu *np) +{ + u64 rd, wr, val; + int limit; + + rd = nr64_ipp(IPP_DFIFO_RD_PTR); + wr = nr64_ipp(IPP_DFIFO_WR_PTR); + limit = 100; + while (--limit >= 0 && (rd != wr)) { + rd = nr64_ipp(IPP_DFIFO_RD_PTR); + wr = nr64_ipp(IPP_DFIFO_WR_PTR); + } + if (limit < 0 && + (rd != 0 && wr != 1)) { + dev_err(np->device, PFX "%s: IPP would not quiesce, " + "rd_ptr[%llx] wr_ptr[%llx]\n", + np->dev->name, + (unsigned long long) nr64_ipp(IPP_DFIFO_RD_PTR), + (unsigned long long) nr64_ipp(IPP_DFIFO_WR_PTR)); + } + + val = nr64_ipp(IPP_CFIG); + val &= ~(IPP_CFIG_IPP_ENABLE | + IPP_CFIG_DFIFO_ECC_EN | + IPP_CFIG_DROP_BAD_CRC | + IPP_CFIG_CKSUM_EN); + nw64_ipp(IPP_CFIG, val); + + (void) niu_ipp_reset(np); +} + +static int niu_init_hw(struct niu *np) +{ + int i, err; + + niudbg(IFUP, "%s: Initialize TXC\n", np->dev->name); + niu_txc_enable_port(np, 1); + niu_txc_port_dma_enable(np, 1); + niu_txc_set_imask(np, 0); + + niudbg(IFUP, "%s: Initialize TX channels\n", np->dev->name); + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + + err = niu_init_one_tx_channel(np, rp); + if (err) + return err; + } + + niudbg(IFUP, "%s: Initialize RX channels\n", np->dev->name); + err = niu_init_rx_channels(np); + if (err) + goto out_uninit_tx_channels; + + niudbg(IFUP, "%s: Initialize classifier\n", np->dev->name); + err = niu_init_classifier_hw(np); + if (err) + goto out_uninit_rx_channels; + + niudbg(IFUP, "%s: Initialize ZCP\n", np->dev->name); + err = niu_init_zcp(np); + if (err) + goto out_uninit_rx_channels; + + niudbg(IFUP, "%s: Initialize IPP\n", np->dev->name); + err = niu_init_ipp(np); + if (err) + goto out_uninit_rx_channels; + + niudbg(IFUP, "%s: Initialize MAC\n", np->dev->name); + err = niu_init_mac(np); + if (err) + goto out_uninit_ipp; + + return 0; + +out_uninit_ipp: + niudbg(IFUP, "%s: Uninit IPP\n", np->dev->name); + niu_disable_ipp(np); + +out_uninit_rx_channels: + niudbg(IFUP, "%s: Uninit RX channels\n", np->dev->name); + niu_stop_rx_channels(np); + niu_reset_rx_channels(np); + +out_uninit_tx_channels: + niudbg(IFUP, "%s: Uninit TX channels\n", np->dev->name); + niu_stop_tx_channels(np); + niu_reset_tx_channels(np); + + return err; +} + +static void niu_stop_hw(struct niu *np) +{ + niudbg(IFDOWN, "%s: Disable interrupts\n", np->dev->name); + niu_enable_interrupts(np, 0); + + niudbg(IFDOWN, "%s: Disable RX MAC\n", np->dev->name); + niu_enable_rx_mac(np, 0); + + niudbg(IFDOWN, "%s: Disable IPP\n", np->dev->name); + niu_disable_ipp(np); + + niudbg(IFDOWN, "%s: Stop TX channels\n", np->dev->name); + niu_stop_tx_channels(np); + + niudbg(IFDOWN, "%s: Stop RX channels\n", np->dev->name); + niu_stop_rx_channels(np); + + niudbg(IFDOWN, "%s: Reset TX channels\n", np->dev->name); + niu_reset_tx_channels(np); + + niudbg(IFDOWN, "%s: Reset RX channels\n", np->dev->name); + niu_reset_rx_channels(np); +} + +static int niu_request_irq(struct niu *np) +{ + int i, j, err; + + err = 0; + for (i = 0; i < np->num_ldg; i++) { + struct niu_ldg *lp = &np->ldg[i]; + + err = request_irq(lp->irq, niu_interrupt, + IRQF_SHARED | IRQF_SAMPLE_RANDOM, + np->dev->name, lp); + if (err) + goto out_free_irqs; + + } + + return 0; + +out_free_irqs: + for (j = 0; j < i; j++) { + struct niu_ldg *lp = &np->ldg[j]; + + free_irq(lp->irq, lp); + } + return err; +} + +static void niu_free_irq(struct niu *np) +{ + int i; + + for (i = 0; i < np->num_ldg; i++) { + struct niu_ldg *lp = &np->ldg[i]; + + free_irq(lp->irq, lp); + } +} + +static void niu_enable_napi(struct niu *np) +{ + int i; + + for (i = 0; i < np->num_ldg; i++) + napi_enable(&np->ldg[i].napi); +} + +static void niu_disable_napi(struct niu *np) +{ + int i; + + for (i = 0; i < np->num_ldg; i++) + napi_disable(&np->ldg[i].napi); +} + +static int niu_open(struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + int err; + + netif_carrier_off(dev); + + err = niu_alloc_channels(np); + if (err) + goto out_err; + + err = niu_enable_interrupts(np, 0); + if (err) + goto out_free_channels; + + err = niu_request_irq(np); + if (err) + goto out_free_channels; + + niu_enable_napi(np); + + spin_lock_irq(&np->lock); + + err = niu_init_hw(np); + if (!err) { + init_timer(&np->timer); + np->timer.expires = jiffies + HZ; + np->timer.data = (unsigned long) np; + np->timer.function = niu_timer; + + err = niu_enable_interrupts(np, 1); + if (err) + niu_stop_hw(np); + } + + spin_unlock_irq(&np->lock); + + if (err) { + niu_disable_napi(np); + goto out_free_irq; + } + + netif_start_queue(dev); + + if (np->link_config.loopback_mode != LOOPBACK_DISABLED) + netif_carrier_on(dev); + + add_timer(&np->timer); + + return 0; + +out_free_irq: + niu_free_irq(np); + +out_free_channels: + niu_free_channels(np); + +out_err: + return err; +} + +static void niu_full_shutdown(struct niu *np, struct net_device *dev) +{ + cancel_work_sync(&np->reset_task); + + niu_disable_napi(np); + netif_stop_queue(dev); + + del_timer_sync(&np->timer); + + spin_lock_irq(&np->lock); + + niu_stop_hw(np); + + spin_unlock_irq(&np->lock); +} + +static int niu_close(struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + + niu_full_shutdown(np, dev); + + niu_free_irq(np); + + niu_free_channels(np); + + return 0; +} + +static void niu_sync_xmac_stats(struct niu *np) +{ + struct niu_xmac_stats *mp = &np->mac_stats.xmac; + + mp->tx_frames += nr64_mac(TXMAC_FRM_CNT); + mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT); + + mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT); + mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT); + mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT); + mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT); + mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT); + mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1); + mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2); + mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3); + mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4); + mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5); + mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6); + mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7); + mp->rx_octets += nr64_mac(RXMAC_BT_CNT); + mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT); + mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT); + mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT); +} + +static void niu_sync_bmac_stats(struct niu *np) +{ + struct niu_bmac_stats *mp = &np->mac_stats.bmac; + + mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT); + mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT); + + mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT); + mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); + mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); + mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT); +} + +static void niu_sync_mac_stats(struct niu *np) +{ + if (np->flags & NIU_FLAGS_XMAC) + niu_sync_xmac_stats(np); + else + niu_sync_bmac_stats(np); +} + +static void niu_get_rx_stats(struct niu *np) +{ + unsigned long pkts, dropped, errors, bytes; + int i; + + pkts = dropped = errors = bytes = 0; + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + pkts += rp->rx_packets; + bytes += rp->rx_bytes; + dropped += rp->rx_dropped; + errors += rp->rx_errors; + } + np->net_stats.rx_packets = pkts; + np->net_stats.rx_bytes = bytes; + np->net_stats.rx_dropped = dropped; + np->net_stats.rx_errors = errors; +} + +static void niu_get_tx_stats(struct niu *np) +{ + unsigned long pkts, errors, bytes; + int i; + + pkts = errors = bytes = 0; + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + + pkts += rp->tx_packets; + bytes += rp->tx_bytes; + errors += rp->tx_errors; + } + np->net_stats.tx_packets = pkts; + np->net_stats.tx_bytes = bytes; + np->net_stats.tx_errors = errors; +} + +static struct net_device_stats *niu_get_stats(struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + + niu_get_rx_stats(np); + niu_get_tx_stats(np); + + return &np->net_stats; +} + +static void niu_load_hash_xmac(struct niu *np, u16 *hash) +{ + int i; + + for (i = 0; i < 16; i++) + nw64_mac(XMAC_HASH_TBL(i), hash[i]); +} + +static void niu_load_hash_bmac(struct niu *np, u16 *hash) +{ + int i; + + for (i = 0; i < 16; i++) + nw64_mac(BMAC_HASH_TBL(i), hash[i]); +} + +static void niu_load_hash(struct niu *np, u16 *hash) +{ + if (np->flags & NIU_FLAGS_XMAC) + niu_load_hash_xmac(np, hash); + else + niu_load_hash_bmac(np, hash); +} + +static void niu_set_rx_mode(struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + int i, alt_cnt, err; + struct dev_addr_list *addr; + unsigned long flags; + u16 hash[16] = { 0, }; + + spin_lock_irqsave(&np->lock, flags); + niu_enable_rx_mac(np, 0); + + np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC); + if (dev->flags & IFF_PROMISC) + np->flags |= NIU_FLAGS_PROMISC; + if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0)) + np->flags |= NIU_FLAGS_MCAST; + + alt_cnt = dev->uc_count; + if (alt_cnt > niu_num_alt_addr(np)) { + alt_cnt = 0; + np->flags |= NIU_FLAGS_PROMISC; + } + + if (alt_cnt) { + int index = 0; + + for (addr = dev->uc_list; addr; addr = addr->next) { + err = niu_set_alt_mac(np, index, + addr->da_addr); + if (err) + printk(KERN_WARNING PFX "%s: Error %d " + "adding alt mac %d\n", + dev->name, err, index); + err = niu_enable_alt_mac(np, index, 1); + if (err) + printk(KERN_WARNING PFX "%s: Error %d " + "enabling alt mac %d\n", + dev->name, err, index); + + index++; + } + } else { + for (i = 0; i < niu_num_alt_addr(np); i++) { + err = niu_enable_alt_mac(np, i, 0); + if (err) + printk(KERN_WARNING PFX "%s: Error %d " + "disabling alt mac %d\n", + dev->name, err, i); + } + } + if (dev->flags & IFF_ALLMULTI) { + for (i = 0; i < 16; i++) + hash[i] = 0xffff; + } else if (dev->mc_count > 0) { + for (addr = dev->mc_list; addr; addr = addr->next) { + u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr); + + crc >>= 24; + hash[crc >> 4] |= (1 << (15 - (crc & 0xf))); + } + } + + if (np->flags & NIU_FLAGS_MCAST) + niu_load_hash(np, hash); + + niu_enable_rx_mac(np, 1); + spin_unlock_irqrestore(&np->lock, flags); +} + +static int niu_set_mac_addr(struct net_device *dev, void *p) +{ + struct niu *np = netdev_priv(dev); + struct sockaddr *addr = p; + unsigned long flags; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + + if (!netif_running(dev)) + return 0; + + spin_lock_irqsave(&np->lock, flags); + niu_enable_rx_mac(np, 0); + niu_set_primary_mac(np, dev->dev_addr); + niu_enable_rx_mac(np, 1); + spin_unlock_irqrestore(&np->lock, flags); + + return 0; +} + +static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + return -EOPNOTSUPP; +} + +static void niu_netif_stop(struct niu *np) +{ + np->dev->trans_start = jiffies; /* prevent tx timeout */ + + niu_disable_napi(np); + + netif_tx_disable(np->dev); +} + +static void niu_netif_start(struct niu *np) +{ + /* NOTE: unconditional netif_wake_queue is only appropriate + * so long as all callers are assured to have free tx slots + * (such as after niu_init_hw). + */ + netif_wake_queue(np->dev); + + niu_enable_napi(np); + + niu_enable_interrupts(np, 1); +} + +static void niu_reset_task(struct work_struct *work) +{ + struct niu *np = container_of(work, struct niu, reset_task); + unsigned long flags; + int err; + + spin_lock_irqsave(&np->lock, flags); + if (!netif_running(np->dev)) { + spin_unlock_irqrestore(&np->lock, flags); + return; + } + + spin_unlock_irqrestore(&np->lock, flags); + + del_timer_sync(&np->timer); + + niu_netif_stop(np); + + spin_lock_irqsave(&np->lock, flags); + + niu_stop_hw(np); + + err = niu_init_hw(np); + if (!err) { + np->timer.expires = jiffies + HZ; + add_timer(&np->timer); + niu_netif_start(np); + } + + spin_unlock_irqrestore(&np->lock, flags); +} + +static void niu_tx_timeout(struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + + dev_err(np->device, PFX "%s: Transmit timed out, resetting\n", + dev->name); + + schedule_work(&np->reset_task); +} + +static void niu_set_txd(struct tx_ring_info *rp, int index, + u64 mapping, u64 len, u64 mark, + u64 n_frags) +{ + __le64 *desc = &rp->descr[index]; + + *desc = cpu_to_le64(mark | + (n_frags << TX_DESC_NUM_PTR_SHIFT) | + (len << TX_DESC_TR_LEN_SHIFT) | + (mapping & TX_DESC_SAD)); +} + +static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr, + u64 pad_bytes, u64 len) +{ + u16 eth_proto, eth_proto_inner; + u64 csum_bits, l3off, ihl, ret; + u8 ip_proto; + int ipv6; + + eth_proto = be16_to_cpu(ehdr->h_proto); + eth_proto_inner = eth_proto; + if (eth_proto == ETH_P_8021Q) { + struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr; + __be16 val = vp->h_vlan_encapsulated_proto; + + eth_proto_inner = be16_to_cpu(val); + } + + ipv6 = ihl = 0; + switch (skb->protocol) { + case __constant_htons(ETH_P_IP): + ip_proto = ip_hdr(skb)->protocol; + ihl = ip_hdr(skb)->ihl; + break; + case __constant_htons(ETH_P_IPV6): + ip_proto = ipv6_hdr(skb)->nexthdr; + ihl = (40 >> 2); + ipv6 = 1; + break; + default: + ip_proto = ihl = 0; + break; + } + + csum_bits = TXHDR_CSUM_NONE; + if (skb->ip_summed == CHECKSUM_PARTIAL) { + u64 start, stuff; + + csum_bits = (ip_proto == IPPROTO_TCP ? + TXHDR_CSUM_TCP : + (ip_proto == IPPROTO_UDP ? + TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP)); + + start = skb_transport_offset(skb) - + (pad_bytes + sizeof(struct tx_pkt_hdr)); + stuff = start + skb->csum_offset; + + csum_bits |= (start / 2) << TXHDR_L4START_SHIFT; + csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT; + } + + l3off = skb_network_offset(skb) - + (pad_bytes + sizeof(struct tx_pkt_hdr)); + + ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) | + (len << TXHDR_LEN_SHIFT) | + ((l3off / 2) << TXHDR_L3START_SHIFT) | + (ihl << TXHDR_IHL_SHIFT) | + ((eth_proto_inner < 1536) ? TXHDR_LLC : 0) | + ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) | + (ipv6 ? TXHDR_IP_VER : 0) | + csum_bits); + + return ret; +} + +static struct tx_ring_info *tx_ring_select(struct niu *np, struct sk_buff *skb) +{ + return &np->tx_rings[0]; +} + +static int niu_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + unsigned long align, headroom; + struct tx_ring_info *rp; + struct tx_pkt_hdr *tp; + unsigned int len, nfg; + struct ethhdr *ehdr; + int prod, i, tlen; + u64 mapping, mrk; + + rp = tx_ring_select(np, skb); + + if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) { + netif_stop_queue(dev); + dev_err(np->device, PFX "%s: BUG! Tx ring full when " + "queue awake!\n", dev->name); + rp->tx_errors++; + return NETDEV_TX_BUSY; + } + + if (skb->len < ETH_ZLEN) { + unsigned int pad_bytes = ETH_ZLEN - skb->len; + + if (skb_pad(skb, pad_bytes)) + goto out; + skb_put(skb, pad_bytes); + } + + len = sizeof(struct tx_pkt_hdr) + 15; + if (skb_headroom(skb) < len) { + struct sk_buff *skb_new; + + skb_new = skb_realloc_headroom(skb, len); + if (!skb_new) { + rp->tx_errors++; + goto out_drop; + } + kfree_skb(skb); + skb = skb_new; + } + + align = ((unsigned long) skb->data & (16 - 1)); + headroom = align + sizeof(struct tx_pkt_hdr); + + ehdr = (struct ethhdr *) skb->data; + tp = (struct tx_pkt_hdr *) skb_push(skb, headroom); + + len = skb->len - sizeof(struct tx_pkt_hdr); + tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len)); + tp->resv = 0; + + len = skb_headlen(skb); + mapping = np->ops->map_single(np->device, skb->data, + len, DMA_TO_DEVICE); + + prod = rp->prod; + + rp->tx_buffs[prod].skb = skb; + rp->tx_buffs[prod].mapping = mapping; + + mrk = TX_DESC_SOP; + if (++rp->mark_counter == rp->mark_freq) { + rp->mark_counter = 0; + mrk |= TX_DESC_MARK; + rp->mark_pending++; + } + + tlen = len; + nfg = skb_shinfo(skb)->nr_frags; + while (tlen > 0) { + tlen -= MAX_TX_DESC_LEN; + nfg++; + } + + while (len > 0) { + unsigned int this_len = len; + + if (this_len > MAX_TX_DESC_LEN) + this_len = MAX_TX_DESC_LEN; + + niu_set_txd(rp, prod, mapping, this_len, mrk, nfg); + mrk = nfg = 0; + + prod = NEXT_TX(rp, prod); + mapping += this_len; + len -= this_len; + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + len = frag->size; + mapping = np->ops->map_page(np->device, frag->page, + frag->page_offset, len, + DMA_TO_DEVICE); + + rp->tx_buffs[prod].skb = NULL; + rp->tx_buffs[prod].mapping = mapping; + + niu_set_txd(rp, prod, mapping, len, 0, 0); + + prod = NEXT_TX(rp, prod); + } + + if (prod < rp->prod) + rp->wrap_bit ^= TX_RING_KICK_WRAP; + rp->prod = prod; + + nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3)); + + if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) { + netif_stop_queue(dev); + if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)) + netif_wake_queue(dev); + } + + dev->trans_start = jiffies; + +out: + return NETDEV_TX_OK; + +out_drop: + rp->tx_errors++; + kfree_skb(skb); + goto out; +} + +static int niu_change_mtu(struct net_device *dev, int new_mtu) +{ + struct niu *np = netdev_priv(dev); + int err, orig_jumbo, new_jumbo; + + if (new_mtu < 68 || new_mtu > NIU_MAX_MTU) + return -EINVAL; + + orig_jumbo = (dev->mtu > ETH_DATA_LEN); + new_jumbo = (new_mtu > ETH_DATA_LEN); + + dev->mtu = new_mtu; + + if (!netif_running(dev) || + (orig_jumbo == new_jumbo)) + return 0; + + niu_full_shutdown(np, dev); + + niu_free_channels(np); + + niu_enable_napi(np); + + err = niu_alloc_channels(np); + if (err) + return err; + + spin_lock_irq(&np->lock); + + err = niu_init_hw(np); + if (!err) { + init_timer(&np->timer); + np->timer.expires = jiffies + HZ; + np->timer.data = (unsigned long) np; + np->timer.function = niu_timer; + + err = niu_enable_interrupts(np, 1); + if (err) + niu_stop_hw(np); + } + + spin_unlock_irq(&np->lock); + + if (!err) { + netif_start_queue(dev); + if (np->link_config.loopback_mode != LOOPBACK_DISABLED) + netif_carrier_on(dev); + + add_timer(&np->timer); + } + + return err; +} + +static void niu_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct niu *np = netdev_priv(dev); + struct niu_vpd *vpd = &np->vpd; + + strcpy(info->driver, DRV_MODULE_NAME); + strcpy(info->version, DRV_MODULE_VERSION); + sprintf(info->fw_version, "%d.%d", + vpd->fcode_major, vpd->fcode_minor); + if (np->parent->plat_type != PLAT_TYPE_NIU) + strcpy(info->bus_info, pci_name(np->pdev)); +} + +static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct niu *np = netdev_priv(dev); + struct niu_link_config *lp; + + lp = &np->link_config; + + memset(cmd, 0, sizeof(*cmd)); + cmd->phy_address = np->phy_addr; + cmd->supported = lp->supported; + cmd->advertising = lp->advertising; + cmd->autoneg = lp->autoneg; + cmd->speed = lp->active_speed; + cmd->duplex = lp->active_duplex; + + return 0; +} + +static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + return -EINVAL; +} + +static u32 niu_get_msglevel(struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + return np->msg_enable; +} + +static void niu_set_msglevel(struct net_device *dev, u32 value) +{ + struct niu *np = netdev_priv(dev); + np->msg_enable = value; +} + +static int niu_get_eeprom_len(struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + + return np->eeprom_len; +} + +static int niu_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct niu *np = netdev_priv(dev); + u32 offset, len, val; + + offset = eeprom->offset; + len = eeprom->len; + + if (offset + len < offset) + return -EINVAL; + if (offset >= np->eeprom_len) + return -EINVAL; + if (offset + len > np->eeprom_len) + len = eeprom->len = np->eeprom_len - offset; + + if (offset & 3) { + u32 b_offset, b_count; + + b_offset = offset & 3; + b_count = 4 - b_offset; + if (b_count > len) + b_count = len; + + val = nr64(ESPC_NCR((offset - b_offset) / 4)); + memcpy(data, ((char *)&val) + b_offset, b_count); + data += b_count; + len -= b_count; + offset += b_count; + } + while (len >= 4) { + val = nr64(ESPC_NCR(offset / 4)); + memcpy(data, &val, 4); + data += 4; + len -= 4; + offset += 4; + } + if (len) { + val = nr64(ESPC_NCR(offset / 4)); + memcpy(data, &val, len); + } + return 0; +} + +static const struct { + const char string[ETH_GSTRING_LEN]; +} niu_xmac_stat_keys[] = { + { "tx_frames" }, + { "tx_bytes" }, + { "tx_fifo_errors" }, + { "tx_overflow_errors" }, + { "tx_max_pkt_size_errors" }, + { "tx_underflow_errors" }, + { "rx_local_faults" }, + { "rx_remote_faults" }, + { "rx_link_faults" }, + { "rx_align_errors" }, + { "rx_frags" }, + { "rx_mcasts" }, + { "rx_bcasts" }, + { "rx_hist_cnt1" }, + { "rx_hist_cnt2" }, + { "rx_hist_cnt3" }, + { "rx_hist_cnt4" }, + { "rx_hist_cnt5" }, + { "rx_hist_cnt6" }, + { "rx_hist_cnt7" }, + { "rx_octets" }, + { "rx_code_violations" }, + { "rx_len_errors" }, + { "rx_crc_errors" }, + { "rx_underflows" }, + { "rx_overflows" }, + { "pause_off_state" }, + { "pause_on_state" }, + { "pause_received" }, +}; + +#define NUM_XMAC_STAT_KEYS ARRAY_SIZE(niu_xmac_stat_keys) + +static const struct { + const char string[ETH_GSTRING_LEN]; +} niu_bmac_stat_keys[] = { + { "tx_underflow_errors" }, + { "tx_max_pkt_size_errors" }, + { "tx_bytes" }, + { "tx_frames" }, + { "rx_overflows" }, + { "rx_frames" }, + { "rx_align_errors" }, + { "rx_crc_errors" }, + { "rx_len_errors" }, + { "pause_off_state" }, + { "pause_on_state" }, + { "pause_received" }, +}; + +#define NUM_BMAC_STAT_KEYS ARRAY_SIZE(niu_bmac_stat_keys) + +static const struct { + const char string[ETH_GSTRING_LEN]; +} niu_rxchan_stat_keys[] = { + { "rx_channel" }, + { "rx_packets" }, + { "rx_bytes" }, + { "rx_dropped" }, + { "rx_errors" }, +}; + +#define NUM_RXCHAN_STAT_KEYS ARRAY_SIZE(niu_rxchan_stat_keys) + +static const struct { + const char string[ETH_GSTRING_LEN]; +} niu_txchan_stat_keys[] = { + { "tx_channel" }, + { "tx_packets" }, + { "tx_bytes" }, + { "tx_errors" }, +}; + +#define NUM_TXCHAN_STAT_KEYS ARRAY_SIZE(niu_txchan_stat_keys) + +static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + struct niu *np = netdev_priv(dev); + int i; + + if (stringset != ETH_SS_STATS) + return; + + if (np->flags & NIU_FLAGS_XMAC) { + memcpy(data, niu_xmac_stat_keys, + sizeof(niu_xmac_stat_keys)); + data += sizeof(niu_xmac_stat_keys); + } else { + memcpy(data, niu_bmac_stat_keys, + sizeof(niu_bmac_stat_keys)); + data += sizeof(niu_bmac_stat_keys); + } + for (i = 0; i < np->num_rx_rings; i++) { + memcpy(data, niu_rxchan_stat_keys, + sizeof(niu_rxchan_stat_keys)); + data += sizeof(niu_rxchan_stat_keys); + } + for (i = 0; i < np->num_tx_rings; i++) { + memcpy(data, niu_txchan_stat_keys, + sizeof(niu_txchan_stat_keys)); + data += sizeof(niu_txchan_stat_keys); + } +} + +static int niu_get_stats_count(struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + + return ((np->flags & NIU_FLAGS_XMAC ? + NUM_XMAC_STAT_KEYS : + NUM_BMAC_STAT_KEYS) + + (np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) + + (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS)); +} + +static void niu_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct niu *np = netdev_priv(dev); + int i; + + niu_sync_mac_stats(np); + if (np->flags & NIU_FLAGS_XMAC) { + memcpy(data, &np->mac_stats.xmac, + sizeof(struct niu_xmac_stats)); + data += (sizeof(struct niu_xmac_stats) / sizeof(u64)); + } else { + memcpy(data, &np->mac_stats.bmac, + sizeof(struct niu_bmac_stats)); + data += (sizeof(struct niu_bmac_stats) / sizeof(u64)); + } + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + data[0] = rp->rx_channel; + data[1] = rp->rx_packets; + data[2] = rp->rx_bytes; + data[3] = rp->rx_dropped; + data[4] = rp->rx_errors; + data += 5; + } + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + + data[0] = rp->tx_channel; + data[1] = rp->tx_packets; + data[2] = rp->tx_bytes; + data[3] = rp->tx_errors; + data += 4; + } +} + +static u64 niu_led_state_save(struct niu *np) +{ + if (np->flags & NIU_FLAGS_XMAC) + return nr64_mac(XMAC_CONFIG); + else + return nr64_mac(BMAC_XIF_CONFIG); +} + +static void niu_led_state_restore(struct niu *np, u64 val) +{ + if (np->flags & NIU_FLAGS_XMAC) + nw64_mac(XMAC_CONFIG, val); + else + nw64_mac(BMAC_XIF_CONFIG, val); +} + +static void niu_force_led(struct niu *np, int on) +{ + u64 val, reg, bit; + + if (np->flags & NIU_FLAGS_XMAC) { + reg = XMAC_CONFIG; + bit = XMAC_CONFIG_FORCE_LED_ON; + } else { + reg = BMAC_XIF_CONFIG; + bit = BMAC_XIF_CONFIG_LINK_LED; + } + + val = nr64_mac(reg); + if (on) + val |= bit; + else + val &= ~bit; + nw64_mac(reg, val); +} + +static int niu_phys_id(struct net_device *dev, u32 data) +{ + struct niu *np = netdev_priv(dev); + u64 orig_led_state; + int i; + + if (!netif_running(dev)) + return -EAGAIN; + + if (data == 0) + data = 2; + + orig_led_state = niu_led_state_save(np); + for (i = 0; i < (data * 2); i++) { + int on = ((i % 2) == 0); + + niu_force_led(np, on); + + if (msleep_interruptible(500)) + break; + } + niu_led_state_restore(np, orig_led_state); + + return 0; +} + +static const struct ethtool_ops niu_ethtool_ops = { + .get_drvinfo = niu_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_msglevel = niu_get_msglevel, + .set_msglevel = niu_set_msglevel, + .get_eeprom_len = niu_get_eeprom_len, + .get_eeprom = niu_get_eeprom, + .get_settings = niu_get_settings, + .set_settings = niu_set_settings, + .get_strings = niu_get_strings, + .get_stats_count = niu_get_stats_count, + .get_ethtool_stats = niu_get_ethtool_stats, + .phys_id = niu_phys_id, +}; + +static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent, + int ldg, int ldn) +{ + if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) + return -EINVAL; + if (ldn < 0 || ldn > LDN_MAX) + return -EINVAL; + + parent->ldg_map[ldn] = ldg; + + if (np->parent->plat_type == PLAT_TYPE_NIU) { + /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by + * the firmware, and we're not supposed to change them. + * Validate the mapping, because if it's wrong we probably + * won't get any interrupts and that's painful to debug. + */ + if (nr64(LDG_NUM(ldn)) != ldg) { + dev_err(np->device, PFX "Port %u, mis-matched " + "LDG assignment " + "for ldn %d, should be %d is %llu\n", + np->port, ldn, ldg, + (unsigned long long) nr64(LDG_NUM(ldn))); + return -EINVAL; + } + } else + nw64(LDG_NUM(ldn), ldg); + + return 0; +} + +static int niu_set_ldg_timer_res(struct niu *np, int res) +{ + if (res < 0 || res > LDG_TIMER_RES_VAL) + return -EINVAL; + + + nw64(LDG_TIMER_RES, res); + + return 0; +} + +static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector) +{ + if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) || + (func < 0 || func > 3) || + (vector < 0 || vector > 0x1f)) + return -EINVAL; + + nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector); + + return 0; +} + +static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr) +{ + u64 frame, frame_base = (ESPC_PIO_STAT_READ_START | + (addr << ESPC_PIO_STAT_ADDR_SHIFT)); + int limit; + + if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT)) + return -EINVAL; + + frame = frame_base; + nw64(ESPC_PIO_STAT, frame); + limit = 64; + do { + udelay(5); + frame = nr64(ESPC_PIO_STAT); + if (frame & ESPC_PIO_STAT_READ_END) + break; + } while (limit--); + if (!(frame & ESPC_PIO_STAT_READ_END)) { + dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n", + (unsigned long long) frame); + return -ENODEV; + } + + frame = frame_base; + nw64(ESPC_PIO_STAT, frame); + limit = 64; + do { + udelay(5); + frame = nr64(ESPC_PIO_STAT); + if (frame & ESPC_PIO_STAT_READ_END) + break; + } while (limit--); + if (!(frame & ESPC_PIO_STAT_READ_END)) { + dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n", + (unsigned long long) frame); + return -ENODEV; + } + + frame = nr64(ESPC_PIO_STAT); + return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT; +} + +static int __devinit niu_pci_eeprom_read16(struct niu *np, u32 off) +{ + int err = niu_pci_eeprom_read(np, off); + u16 val; + + if (err < 0) + return err; + val = (err << 8); + err = niu_pci_eeprom_read(np, off + 1); + if (err < 0) + return err; + val |= (err & 0xff); + + return val; +} + +static int __devinit niu_pci_eeprom_read16_swp(struct niu *np, u32 off) +{ + int err = niu_pci_eeprom_read(np, off); + u16 val; + + if (err < 0) + return err; + + val = (err & 0xff); + err = niu_pci_eeprom_read(np, off + 1); + if (err < 0) + return err; + + val |= (err & 0xff) << 8; + + return val; +} + +static int __devinit niu_pci_vpd_get_propname(struct niu *np, + u32 off, + char *namebuf, + int namebuf_len) +{ + int i; + + for (i = 0; i < namebuf_len; i++) { + int err = niu_pci_eeprom_read(np, off + i); + if (err < 0) + return err; + *namebuf++ = err; + if (!err) + break; + } + if (i >= namebuf_len) + return -EINVAL; + + return i + 1; +} + +static void __devinit niu_vpd_parse_version(struct niu *np) +{ + struct niu_vpd *vpd = &np->vpd; + int len = strlen(vpd->version) + 1; + const char *s = vpd->version; + int i; + + for (i = 0; i < len - 5; i++) { + if (!strncmp(s + i, "FCode ", 5)) + break; + } + if (i >= len - 5) + return; + + s += i + 5; + sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor); + + niudbg(PROBE, "VPD_SCAN: FCODE major(%d) minor(%d)\n", + vpd->fcode_major, vpd->fcode_minor); + if (vpd->fcode_major > NIU_VPD_MIN_MAJOR || + (vpd->fcode_major == NIU_VPD_MIN_MAJOR && + vpd->fcode_minor >= NIU_VPD_MIN_MINOR)) + np->flags |= NIU_FLAGS_VPD_VALID; +} + +/* ESPC_PIO_EN_ENABLE must be set */ +static int __devinit niu_pci_vpd_scan_props(struct niu *np, + u32 start, u32 end) +{ + unsigned int found_mask = 0; +#define FOUND_MASK_MODEL 0x00000001 +#define FOUND_MASK_BMODEL 0x00000002 +#define FOUND_MASK_VERS 0x00000004 +#define FOUND_MASK_MAC 0x00000008 +#define FOUND_MASK_NMAC 0x00000010 +#define FOUND_MASK_PHY 0x00000020 +#define FOUND_MASK_ALL 0x0000003f + + niudbg(PROBE, "VPD_SCAN: start[%x] end[%x]\n", + start, end); + while (start < end) { + int len, err, instance, type, prop_len; + char namebuf[64]; + u8 *prop_buf; + int max_len; + + if (found_mask == FOUND_MASK_ALL) { + niu_vpd_parse_version(np); + return 1; + } + + err = niu_pci_eeprom_read(np, start + 2); + if (err < 0) + return err; + len = err; + start += 3; + + instance = niu_pci_eeprom_read(np, start); + type = niu_pci_eeprom_read(np, start + 3); + prop_len = niu_pci_eeprom_read(np, start + 4); + err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64); + if (err < 0) + return err; + + prop_buf = NULL; + max_len = 0; + if (!strcmp(namebuf, "model")) { + prop_buf = np->vpd.model; + max_len = NIU_VPD_MODEL_MAX; + found_mask |= FOUND_MASK_MODEL; + } else if (!strcmp(namebuf, "board-model")) { + prop_buf = np->vpd.board_model; + max_len = NIU_VPD_BD_MODEL_MAX; + found_mask |= FOUND_MASK_BMODEL; + } else if (!strcmp(namebuf, "version")) { + prop_buf = np->vpd.version; + max_len = NIU_VPD_VERSION_MAX; + found_mask |= FOUND_MASK_VERS; + } else if (!strcmp(namebuf, "local-mac-address")) { + prop_buf = np->vpd.local_mac; + max_len = ETH_ALEN; + found_mask |= FOUND_MASK_MAC; + } else if (!strcmp(namebuf, "num-mac-addresses")) { + prop_buf = &np->vpd.mac_num; + max_len = 1; + found_mask |= FOUND_MASK_NMAC; + } else if (!strcmp(namebuf, "phy-type")) { + prop_buf = np->vpd.phy_type; + max_len = NIU_VPD_PHY_TYPE_MAX; + found_mask |= FOUND_MASK_PHY; + } + + if (max_len && prop_len > max_len) { + dev_err(np->device, PFX "Property '%s' length (%d) is " + "too long.\n", namebuf, prop_len); + return -EINVAL; + } + + if (prop_buf) { + u32 off = start + 5 + err; + int i; + + niudbg(PROBE, "VPD_SCAN: Reading in property [%s] " + "len[%d]\n", namebuf, prop_len); + for (i = 0; i < prop_len; i++) + *prop_buf++ = niu_pci_eeprom_read(np, off + i); + } + + start += len; + } + + return 0; +} + +/* ESPC_PIO_EN_ENABLE must be set */ +static void __devinit niu_pci_vpd_fetch(struct niu *np, u32 start) +{ + u32 offset; + int err; + + err = niu_pci_eeprom_read16_swp(np, start + 1); + if (err < 0) + return; + + offset = err + 3; + + while (start + offset < ESPC_EEPROM_SIZE) { + u32 here = start + offset; + u32 end; + + err = niu_pci_eeprom_read(np, here); + if (err != 0x90) + return; + + err = niu_pci_eeprom_read16_swp(np, here + 1); + if (err < 0) + return; + + here = start + offset + 3; + end = start + offset + err; + + offset += err; + + err = niu_pci_vpd_scan_props(np, here, end); + if (err < 0 || err == 1) + return; + } +} + +/* ESPC_PIO_EN_ENABLE must be set */ +static u32 __devinit niu_pci_vpd_offset(struct niu *np) +{ + u32 start = 0, end = ESPC_EEPROM_SIZE, ret; + int err; + + while (start < end) { + ret = start; + + /* ROM header signature? */ + err = niu_pci_eeprom_read16(np, start + 0); + if (err != 0x55aa) + return 0; + + /* Apply offset to PCI data structure. */ + err = niu_pci_eeprom_read16(np, start + 23); + if (err < 0) + return 0; + start += err; + + /* Check for "PCIR" signature. */ + err = niu_pci_eeprom_read16(np, start + 0); + if (err != 0x5043) + return 0; + err = niu_pci_eeprom_read16(np, start + 2); + if (err != 0x4952) + return 0; + + /* Check for OBP image type. */ + err = niu_pci_eeprom_read(np, start + 20); + if (err < 0) + return 0; + if (err != 0x01) { + err = niu_pci_eeprom_read(np, ret + 2); + if (err < 0) + return 0; + + start = ret + (err * 512); + continue; + } + + err = niu_pci_eeprom_read16_swp(np, start + 8); + if (err < 0) + return err; + ret += err; + + err = niu_pci_eeprom_read(np, ret + 0); + if (err != 0x82) + return 0; + + return ret; + } + + return 0; +} + +static int __devinit niu_phy_type_prop_decode(struct niu *np, + const char *phy_prop) +{ + if (!strcmp(phy_prop, "mif")) { + /* 1G copper, MII */ + np->flags &= ~(NIU_FLAGS_FIBER | + NIU_FLAGS_10G); + np->mac_xcvr = MAC_XCVR_MII; + } else if (!strcmp(phy_prop, "xgf")) { + /* 10G fiber, XPCS */ + np->flags |= (NIU_FLAGS_10G | + NIU_FLAGS_FIBER); + np->mac_xcvr = MAC_XCVR_XPCS; + } else if (!strcmp(phy_prop, "pcs")) { + /* 1G fiber, PCS */ + np->flags &= ~NIU_FLAGS_10G; + np->flags |= NIU_FLAGS_FIBER; + np->mac_xcvr = MAC_XCVR_PCS; + } else if (!strcmp(phy_prop, "xgc")) { + /* 10G copper, XPCS */ + np->flags |= NIU_FLAGS_10G; + np->flags &= ~NIU_FLAGS_FIBER; + np->mac_xcvr = MAC_XCVR_XPCS; + } else { + return -EINVAL; + } + return 0; +} + +static void __devinit niu_pci_vpd_validate(struct niu *np) +{ + struct net_device *dev = np->dev; + struct niu_vpd *vpd = &np->vpd; + u8 val8; + + if (!is_valid_ether_addr(&vpd->local_mac[0])) { + dev_err(np->device, PFX "VPD MAC invalid, " + "falling back to SPROM.\n"); + + np->flags &= ~NIU_FLAGS_VPD_VALID; + return; + } + + if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { + dev_err(np->device, PFX "Illegal phy string [%s].\n", + np->vpd.phy_type); + dev_err(np->device, PFX "Falling back to SPROM.\n"); + np->flags &= ~NIU_FLAGS_VPD_VALID; + return; + } + + memcpy(dev->perm_addr, vpd->local_mac, ETH_ALEN); + + val8 = dev->perm_addr[5]; + dev->perm_addr[5] += np->port; + if (dev->perm_addr[5] < val8) + dev->perm_addr[4]++; + + memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); +} + +static int __devinit niu_pci_probe_sprom(struct niu *np) +{ + struct net_device *dev = np->dev; + int len, i; + u64 val, sum; + u8 val8; + + val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ); + val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT; + len = val / 4; + + np->eeprom_len = len; + + niudbg(PROBE, "SPROM: Image size %llu\n", (unsigned long long) val); + + sum = 0; + for (i = 0; i < len; i++) { + val = nr64(ESPC_NCR(i)); + sum += (val >> 0) & 0xff; + sum += (val >> 8) & 0xff; + sum += (val >> 16) & 0xff; + sum += (val >> 24) & 0xff; + } + niudbg(PROBE, "SPROM: Checksum %x\n", (int)(sum & 0xff)); + if ((sum & 0xff) != 0xab) { + dev_err(np->device, PFX "Bad SPROM checksum " + "(%x, should be 0xab)\n", (int) (sum & 0xff)); + return -EINVAL; + } + + val = nr64(ESPC_PHY_TYPE); + switch (np->port) { + case 0: + val = (val & ESPC_PHY_TYPE_PORT0) >> + ESPC_PHY_TYPE_PORT0_SHIFT; + break; + case 1: + val = (val & ESPC_PHY_TYPE_PORT1) >> + ESPC_PHY_TYPE_PORT1_SHIFT; + break; + case 2: + val = (val & ESPC_PHY_TYPE_PORT2) >> + ESPC_PHY_TYPE_PORT2_SHIFT; + break; + case 3: + val = (val & ESPC_PHY_TYPE_PORT3) >> + ESPC_PHY_TYPE_PORT3_SHIFT; + break; + default: + dev_err(np->device, PFX "Bogus port number %u\n", + np->port); + return -EINVAL; + } + niudbg(PROBE, "SPROM: PHY type %llx\n", (unsigned long long) val); + + switch (val) { + case ESPC_PHY_TYPE_1G_COPPER: + /* 1G copper, MII */ + np->flags &= ~(NIU_FLAGS_FIBER | + NIU_FLAGS_10G); + np->mac_xcvr = MAC_XCVR_MII; + break; + + case ESPC_PHY_TYPE_1G_FIBER: + /* 1G fiber, PCS */ + np->flags &= ~NIU_FLAGS_10G; + np->flags |= NIU_FLAGS_FIBER; + np->mac_xcvr = MAC_XCVR_PCS; + break; + + case ESPC_PHY_TYPE_10G_COPPER: + /* 10G copper, XPCS */ + np->flags |= NIU_FLAGS_10G; + np->flags &= ~NIU_FLAGS_FIBER; + np->mac_xcvr = MAC_XCVR_XPCS; + break; + + case ESPC_PHY_TYPE_10G_FIBER: + /* 10G fiber, XPCS */ + np->flags |= (NIU_FLAGS_10G | + NIU_FLAGS_FIBER); + np->mac_xcvr = MAC_XCVR_XPCS; + break; + + default: + dev_err(np->device, PFX "Bogus SPROM phy type %llu\n", + (unsigned long long) val); + return -EINVAL; + } + + val = nr64(ESPC_MAC_ADDR0); + niudbg(PROBE, "SPROM: MAC_ADDR0[%08llx]\n", + (unsigned long long) val); + dev->perm_addr[0] = (val >> 0) & 0xff; + dev->perm_addr[1] = (val >> 8) & 0xff; + dev->perm_addr[2] = (val >> 16) & 0xff; + dev->perm_addr[3] = (val >> 24) & 0xff; + + val = nr64(ESPC_MAC_ADDR1); + niudbg(PROBE, "SPROM: MAC_ADDR1[%08llx]\n", + (unsigned long long) val); + dev->perm_addr[4] = (val >> 0) & 0xff; + dev->perm_addr[5] = (val >> 8) & 0xff; + + if (!is_valid_ether_addr(&dev->perm_addr[0])) { + dev_err(np->device, PFX "SPROM MAC address invalid\n"); + dev_err(np->device, PFX "[ \n"); + for (i = 0; i < 6; i++) + printk("%02x ", dev->perm_addr[i]); + printk("]\n"); + return -EINVAL; + } + + val8 = dev->perm_addr[5]; + dev->perm_addr[5] += np->port; + if (dev->perm_addr[5] < val8) + dev->perm_addr[4]++; + + memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); + + val = nr64(ESPC_MOD_STR_LEN); + niudbg(PROBE, "SPROM: MOD_STR_LEN[%llu]\n", + (unsigned long long) val); + if (val > 8 * 4) + return -EINVAL; + + for (i = 0; i < val; i += 4) { + u64 tmp = nr64(ESPC_NCR(5 + (i / 4))); + + np->vpd.model[i + 3] = (tmp >> 0) & 0xff; + np->vpd.model[i + 2] = (tmp >> 8) & 0xff; + np->vpd.model[i + 1] = (tmp >> 16) & 0xff; + np->vpd.model[i + 0] = (tmp >> 24) & 0xff; + } + np->vpd.model[val] = '\0'; + + val = nr64(ESPC_BD_MOD_STR_LEN); + niudbg(PROBE, "SPROM: BD_MOD_STR_LEN[%llu]\n", + (unsigned long long) val); + if (val > 4 * 4) + return -EINVAL; + + for (i = 0; i < val; i += 4) { + u64 tmp = nr64(ESPC_NCR(14 + (i / 4))); + + np->vpd.board_model[i + 3] = (tmp >> 0) & 0xff; + np->vpd.board_model[i + 2] = (tmp >> 8) & 0xff; + np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff; + np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff; + } + np->vpd.board_model[val] = '\0'; + + np->vpd.mac_num = + nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL; + niudbg(PROBE, "SPROM: NUM_PORTS_MACS[%d]\n", + np->vpd.mac_num); + + return 0; +} + +static int __devinit niu_get_and_validate_port(struct niu *np) +{ + struct niu_parent *parent = np->parent; + + if (np->port <= 1) + np->flags |= NIU_FLAGS_XMAC; + + if (!parent->num_ports) { + if (parent->plat_type == PLAT_TYPE_NIU) { + parent->num_ports = 2; + } else { + parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) & + ESPC_NUM_PORTS_MACS_VAL; + + if (!parent->num_ports) + parent->num_ports = 4; + } + } + + niudbg(PROBE, "niu_get_and_validate_port: port[%d] num_ports[%d]\n", + np->port, parent->num_ports); + if (np->port >= parent->num_ports) + return -ENODEV; + + return 0; +} + +static int __devinit phy_record(struct niu_parent *parent, + struct phy_probe_info *p, + int dev_id_1, int dev_id_2, u8 phy_port, + int type) +{ + u32 id = (dev_id_1 << 16) | dev_id_2; + u8 idx; + + if (dev_id_1 < 0 || dev_id_2 < 0) + return 0; + if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) { + if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) + return 0; + } else { + if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R) + return 0; + } + + pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n", + parent->index, id, + (type == PHY_TYPE_PMA_PMD ? + "PMA/PMD" : + (type == PHY_TYPE_PCS ? + "PCS" : "MII")), + phy_port); + + if (p->cur[type] >= NIU_MAX_PORTS) { + printk(KERN_ERR PFX "Too many PHY ports.\n"); + return -EINVAL; + } + idx = p->cur[type]; + p->phy_id[type][idx] = id; + p->phy_port[type][idx] = phy_port; + p->cur[type] = idx + 1; + return 0; +} + +static int __devinit port_has_10g(struct phy_probe_info *p, int port) +{ + int i; + + for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) { + if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port) + return 1; + } + for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) { + if (p->phy_port[PHY_TYPE_PCS][i] == port) + return 1; + } + + return 0; +} + +static int __devinit count_10g_ports(struct phy_probe_info *p, int *lowest) +{ + int port, cnt; + + cnt = 0; + *lowest = 32; + for (port = 8; port < 32; port++) { + if (port_has_10g(p, port)) { + if (!cnt) + *lowest = port; + cnt++; + } + } + + return cnt; +} + +static int __devinit count_1g_ports(struct phy_probe_info *p, int *lowest) +{ + *lowest = 32; + if (p->cur[PHY_TYPE_MII]) + *lowest = p->phy_port[PHY_TYPE_MII][0]; + + return p->cur[PHY_TYPE_MII]; +} + +static void __devinit niu_n2_divide_channels(struct niu_parent *parent) +{ + int num_ports = parent->num_ports; + int i; + + for (i = 0; i < num_ports; i++) { + parent->rxchan_per_port[i] = (16 / num_ports); + parent->txchan_per_port[i] = (16 / num_ports); + + pr_info(PFX "niu%d: Port %u [%u RX chans] " + "[%u TX chans]\n", + parent->index, i, + parent->rxchan_per_port[i], + parent->txchan_per_port[i]); + } +} + +static void __devinit niu_divide_channels(struct niu_parent *parent, + int num_10g, int num_1g) +{ + int num_ports = parent->num_ports; + int rx_chans_per_10g, rx_chans_per_1g; + int tx_chans_per_10g, tx_chans_per_1g; + int i, tot_rx, tot_tx; + + if (!num_10g || !num_1g) { + rx_chans_per_10g = rx_chans_per_1g = + (NIU_NUM_RXCHAN / num_ports); + tx_chans_per_10g = tx_chans_per_1g = + (NIU_NUM_TXCHAN / num_ports); + } else { + rx_chans_per_1g = NIU_NUM_RXCHAN / 8; + rx_chans_per_10g = (NIU_NUM_RXCHAN - + (rx_chans_per_1g * num_1g)) / + num_10g; + + tx_chans_per_1g = NIU_NUM_TXCHAN / 6; + tx_chans_per_10g = (NIU_NUM_TXCHAN - + (tx_chans_per_1g * num_1g)) / + num_10g; + } + + tot_rx = tot_tx = 0; + for (i = 0; i < num_ports; i++) { + int type = phy_decode(parent->port_phy, i); + + if (type == PORT_TYPE_10G) { + parent->rxchan_per_port[i] = rx_chans_per_10g; + parent->txchan_per_port[i] = tx_chans_per_10g; + } else { + parent->rxchan_per_port[i] = rx_chans_per_1g; + parent->txchan_per_port[i] = tx_chans_per_1g; + } + pr_info(PFX "niu%d: Port %u [%u RX chans] " + "[%u TX chans]\n", + parent->index, i, + parent->rxchan_per_port[i], + parent->txchan_per_port[i]); + tot_rx += parent->rxchan_per_port[i]; + tot_tx += parent->txchan_per_port[i]; + } + + if (tot_rx > NIU_NUM_RXCHAN) { + printk(KERN_ERR PFX "niu%d: Too many RX channels (%d), " + "resetting to one per port.\n", + parent->index, tot_rx); + for (i = 0; i < num_ports; i++) + parent->rxchan_per_port[i] = 1; + } + if (tot_tx > NIU_NUM_TXCHAN) { + printk(KERN_ERR PFX "niu%d: Too many TX channels (%d), " + "resetting to one per port.\n", + parent->index, tot_tx); + for (i = 0; i < num_ports; i++) + parent->txchan_per_port[i] = 1; + } + if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) { + printk(KERN_WARNING PFX "niu%d: Driver bug, wasted channels, " + "RX[%d] TX[%d]\n", + parent->index, tot_rx, tot_tx); + } +} + +static void __devinit niu_divide_rdc_groups(struct niu_parent *parent, + int num_10g, int num_1g) +{ + int i, num_ports = parent->num_ports; + int rdc_group, rdc_groups_per_port; + int rdc_channel_base; + + rdc_group = 0; + rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports; + + rdc_channel_base = 0; + + for (i = 0; i < num_ports; i++) { + struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i]; + int grp, num_channels = parent->rxchan_per_port[i]; + int this_channel_offset; + + tp->first_table_num = rdc_group; + tp->num_tables = rdc_groups_per_port; + this_channel_offset = 0; + for (grp = 0; grp < tp->num_tables; grp++) { + struct rdc_table *rt = &tp->tables[grp]; + int slot; + + pr_info(PFX "niu%d: Port %d RDC tbl(%d) [ ", + parent->index, i, tp->first_table_num + grp); + for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) { + rt->rxdma_channel[slot] = + rdc_channel_base + this_channel_offset; + + printk("%d ", rt->rxdma_channel[slot]); + + if (++this_channel_offset == num_channels) + this_channel_offset = 0; + } + printk("]\n"); + } + + parent->rdc_default[i] = rdc_channel_base; + + rdc_channel_base += num_channels; + rdc_group += rdc_groups_per_port; + } +} + +static int __devinit fill_phy_probe_info(struct niu *np, + struct niu_parent *parent, + struct phy_probe_info *info) +{ + unsigned long flags; + int port, err; + + memset(info, 0, sizeof(*info)); + + /* Port 0 to 7 are reserved for onboard Serdes, probe the rest. */ + niu_lock_parent(np, flags); + err = 0; + for (port = 8; port < 32; port++) { + int dev_id_1, dev_id_2; + + dev_id_1 = mdio_read(np, port, + NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1); + dev_id_2 = mdio_read(np, port, + NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2); + err = phy_record(parent, info, dev_id_1, dev_id_2, port, + PHY_TYPE_PMA_PMD); + if (err) + break; + dev_id_1 = mdio_read(np, port, + NIU_PCS_DEV_ADDR, MII_PHYSID1); + dev_id_2 = mdio_read(np, port, + NIU_PCS_DEV_ADDR, MII_PHYSID2); + err = phy_record(parent, info, dev_id_1, dev_id_2, port, + PHY_TYPE_PCS); + if (err) + break; + dev_id_1 = mii_read(np, port, MII_PHYSID1); + dev_id_2 = mii_read(np, port, MII_PHYSID2); + err = phy_record(parent, info, dev_id_1, dev_id_2, port, + PHY_TYPE_MII); + if (err) + break; + } + niu_unlock_parent(np, flags); + + return err; +} + +static int __devinit walk_phys(struct niu *np, struct niu_parent *parent) +{ + struct phy_probe_info *info = &parent->phy_probe_info; + int lowest_10g, lowest_1g; + int num_10g, num_1g; + u32 val; + int err; + + err = fill_phy_probe_info(np, parent, info); + if (err) + return err; + + num_10g = count_10g_ports(info, &lowest_10g); + num_1g = count_1g_ports(info, &lowest_1g); + + switch ((num_10g << 4) | num_1g) { + case 0x24: + if (lowest_1g == 10) + parent->plat_type = PLAT_TYPE_VF_P0; + else if (lowest_1g == 26) + parent->plat_type = PLAT_TYPE_VF_P1; + else + goto unknown_vg_1g_port; + + /* fallthru */ + case 0x22: + val = (phy_encode(PORT_TYPE_10G, 0) | + phy_encode(PORT_TYPE_10G, 1) | + phy_encode(PORT_TYPE_1G, 2) | + phy_encode(PORT_TYPE_1G, 3)); + break; + + case 0x20: + val = (phy_encode(PORT_TYPE_10G, 0) | + phy_encode(PORT_TYPE_10G, 1)); + break; + + case 0x10: + val = phy_encode(PORT_TYPE_10G, np->port); + break; + + case 0x14: + if (lowest_1g == 10) + parent->plat_type = PLAT_TYPE_VF_P0; + else if (lowest_1g == 26) + parent->plat_type = PLAT_TYPE_VF_P1; + else + goto unknown_vg_1g_port; + + /* fallthru */ + case 0x13: + if ((lowest_10g & 0x7) == 0) + val = (phy_encode(PORT_TYPE_10G, 0) | + phy_encode(PORT_TYPE_1G, 1) | + phy_encode(PORT_TYPE_1G, 2) | + phy_encode(PORT_TYPE_1G, 3)); + else + val = (phy_encode(PORT_TYPE_1G, 0) | + phy_encode(PORT_TYPE_10G, 1) | + phy_encode(PORT_TYPE_1G, 2) | + phy_encode(PORT_TYPE_1G, 3)); + break; + + case 0x04: + if (lowest_1g == 10) + parent->plat_type = PLAT_TYPE_VF_P0; + else if (lowest_1g == 26) + parent->plat_type = PLAT_TYPE_VF_P1; + else + goto unknown_vg_1g_port; + + val = (phy_encode(PORT_TYPE_1G, 0) | + phy_encode(PORT_TYPE_1G, 1) | + phy_encode(PORT_TYPE_1G, 2) | + phy_encode(PORT_TYPE_1G, 3)); + break; + + default: + printk(KERN_ERR PFX "Unsupported port config " + "10G[%d] 1G[%d]\n", + num_10g, num_1g); + return -EINVAL; + } + + parent->port_phy = val; + + if (parent->plat_type == PLAT_TYPE_NIU) + niu_n2_divide_channels(parent); + else + niu_divide_channels(parent, num_10g, num_1g); + + niu_divide_rdc_groups(parent, num_10g, num_1g); + + return 0; + +unknown_vg_1g_port: + printk(KERN_ERR PFX "Cannot identify platform type, 1gport=%d\n", + lowest_1g); + return -EINVAL; +} + +static int __devinit niu_probe_ports(struct niu *np) +{ + struct niu_parent *parent = np->parent; + int err, i; + + niudbg(PROBE, "niu_probe_ports(): port_phy[%08x]\n", + parent->port_phy); + + if (parent->port_phy == PORT_PHY_UNKNOWN) { + err = walk_phys(np, parent); + if (err) + return err; + + niu_set_ldg_timer_res(np, 2); + for (i = 0; i <= LDN_MAX; i++) + niu_ldn_irq_enable(np, i, 0); + } + + if (parent->port_phy == PORT_PHY_INVALID) + return -EINVAL; + + return 0; +} + +static int __devinit niu_classifier_swstate_init(struct niu *np) +{ + struct niu_classifier *cp = &np->clas; + + niudbg(PROBE, "niu_classifier_swstate_init: num_tcam(%d)\n", + np->parent->tcam_num_entries); + + cp->tcam_index = (u16) np->port; + cp->h1_init = 0xffffffff; + cp->h2_init = 0xffff; + + return fflp_early_init(np); +} + +static void __devinit niu_link_config_init(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + + lp->advertising = (ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full | + ADVERTISED_10000baseT_Full | + ADVERTISED_Autoneg); + lp->speed = lp->active_speed = SPEED_INVALID; + lp->duplex = lp->active_duplex = DUPLEX_INVALID; +#if 0 + lp->loopback_mode = LOOPBACK_MAC; + lp->active_speed = SPEED_10000; + lp->active_duplex = DUPLEX_FULL; +#else + lp->loopback_mode = LOOPBACK_DISABLED; +#endif +} + +static int __devinit niu_init_mac_ipp_pcs_base(struct niu *np) +{ + switch (np->port) { + case 0: + np->mac_regs = np->regs + XMAC_PORT0_OFF; + np->ipp_off = 0x00000; + np->pcs_off = 0x04000; + np->xpcs_off = 0x02000; + break; + + case 1: + np->mac_regs = np->regs + XMAC_PORT1_OFF; + np->ipp_off = 0x08000; + np->pcs_off = 0x0a000; + np->xpcs_off = 0x08000; + break; + + case 2: + np->mac_regs = np->regs + BMAC_PORT2_OFF; + np->ipp_off = 0x04000; + np->pcs_off = 0x0e000; + np->xpcs_off = ~0UL; + break; + + case 3: + np->mac_regs = np->regs + BMAC_PORT3_OFF; + np->ipp_off = 0x0c000; + np->pcs_off = 0x12000; + np->xpcs_off = ~0UL; + break; + + default: + dev_err(np->device, PFX "Port %u is invalid, cannot " + "compute MAC block offset.\n", np->port); + return -EINVAL; + } + + return 0; +} + +static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map) +{ + struct msix_entry msi_vec[NIU_NUM_LDG]; + struct niu_parent *parent = np->parent; + struct pci_dev *pdev = np->pdev; + int i, num_irqs, err; + u8 first_ldg; + + first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port; + for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++) + ldg_num_map[i] = first_ldg + i; + + num_irqs = (parent->rxchan_per_port[np->port] + + parent->txchan_per_port[np->port] + + (np->port == 0 ? 3 : 1)); + BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports)); + +retry: + for (i = 0; i < num_irqs; i++) { + msi_vec[i].vector = 0; + msi_vec[i].entry = i; + } + + err = pci_enable_msix(pdev, msi_vec, num_irqs); + if (err < 0) { + np->flags &= ~NIU_FLAGS_MSIX; + return; + } + if (err > 0) { + num_irqs = err; + goto retry; + } + + np->flags |= NIU_FLAGS_MSIX; + for (i = 0; i < num_irqs; i++) + np->ldg[i].irq = msi_vec[i].vector; + np->num_ldg = num_irqs; +} + +static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map) +{ +#ifdef CONFIG_SPARC64 + struct of_device *op = np->op; + const u32 *int_prop; + int i; + + int_prop = of_get_property(op->node, "interrupts", NULL); + if (!int_prop) + return -ENODEV; + + for (i = 0; i < op->num_irqs; i++) { + ldg_num_map[i] = int_prop[i]; + np->ldg[i].irq = op->irqs[i]; + } + + np->num_ldg = op->num_irqs; + + return 0; +#else + return -EINVAL; +#endif +} + +static int __devinit niu_ldg_init(struct niu *np) +{ + struct niu_parent *parent = np->parent; + u8 ldg_num_map[NIU_NUM_LDG]; + int first_chan, num_chan; + int i, err, ldg_rotor; + u8 port; + + np->num_ldg = 1; + np->ldg[0].irq = np->dev->irq; + if (parent->plat_type == PLAT_TYPE_NIU) { + err = niu_n2_irq_init(np, ldg_num_map); + if (err) + return err; + } else + niu_try_msix(np, ldg_num_map); + + port = np->port; + for (i = 0; i < np->num_ldg; i++) { + struct niu_ldg *lp = &np->ldg[i]; + + netif_napi_add(np->dev, &lp->napi, niu_poll, 64); + + lp->np = np; + lp->ldg_num = ldg_num_map[i]; + lp->timer = 2; /* XXX */ + + /* On N2 NIU the firmware has setup the SID mappings so they go + * to the correct values that will route the LDG to the proper + * interrupt in the NCU interrupt table. + */ + if (np->parent->plat_type != PLAT_TYPE_NIU) { + err = niu_set_ldg_sid(np, lp->ldg_num, port, i); + if (err) + return err; + } + } + + /* We adopt the LDG assignment ordering used by the N2 NIU + * 'interrupt' properties because that simplifies a lot of + * things. This ordering is: + * + * MAC + * MIF (if port zero) + * SYSERR (if port zero) + * RX channels + * TX channels + */ + + ldg_rotor = 0; + + err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor], + LDN_MAC(port)); + if (err) + return err; + + ldg_rotor++; + if (ldg_rotor == np->num_ldg) + ldg_rotor = 0; + + if (port == 0) { + err = niu_ldg_assign_ldn(np, parent, + ldg_num_map[ldg_rotor], + LDN_MIF); + if (err) + return err; + + ldg_rotor++; + if (ldg_rotor == np->num_ldg) + ldg_rotor = 0; + + err = niu_ldg_assign_ldn(np, parent, + ldg_num_map[ldg_rotor], + LDN_DEVICE_ERROR); + if (err) + return err; + + ldg_rotor++; + if (ldg_rotor == np->num_ldg) + ldg_rotor = 0; + + } + + first_chan = 0; + for (i = 0; i < port; i++) + first_chan += parent->rxchan_per_port[port]; + num_chan = parent->rxchan_per_port[port]; + + for (i = first_chan; i < (first_chan + num_chan); i++) { + err = niu_ldg_assign_ldn(np, parent, + ldg_num_map[ldg_rotor], + LDN_RXDMA(i)); + if (err) + return err; + ldg_rotor++; + if (ldg_rotor == np->num_ldg) + ldg_rotor = 0; + } + + first_chan = 0; + for (i = 0; i < port; i++) + first_chan += parent->txchan_per_port[port]; + num_chan = parent->txchan_per_port[port]; + for (i = first_chan; i < (first_chan + num_chan); i++) { + err = niu_ldg_assign_ldn(np, parent, + ldg_num_map[ldg_rotor], + LDN_TXDMA(i)); + if (err) + return err; + ldg_rotor++; + if (ldg_rotor == np->num_ldg) + ldg_rotor = 0; + } + + return 0; +} + +static void __devexit niu_ldg_free(struct niu *np) +{ + if (np->flags & NIU_FLAGS_MSIX) + pci_disable_msix(np->pdev); +} + +static int __devinit niu_get_of_props(struct niu *np) +{ +#ifdef CONFIG_SPARC64 + struct net_device *dev = np->dev; + struct device_node *dp; + const char *phy_type; + const u8 *mac_addr; + int prop_len; + + if (np->parent->plat_type == PLAT_TYPE_NIU) + dp = np->op->node; + else + dp = pci_device_to_OF_node(np->pdev); + + phy_type = of_get_property(dp, "phy-type", &prop_len); + if (!phy_type) { + dev_err(np->device, PFX "%s: OF node lacks " + "phy-type property\n", + dp->full_name); + return -EINVAL; + } + + if (!strcmp(phy_type, "none")) + return -ENODEV; + + strcpy(np->vpd.phy_type, phy_type); + + if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { + dev_err(np->device, PFX "%s: Illegal phy string [%s].\n", + dp->full_name, np->vpd.phy_type); + return -EINVAL; + } + + mac_addr = of_get_property(dp, "local-mac-address", &prop_len); + if (!mac_addr) { + dev_err(np->device, PFX "%s: OF node lacks " + "local-mac-address property\n", + dp->full_name); + return -EINVAL; + } + if (prop_len != dev->addr_len) { + dev_err(np->device, PFX "%s: OF MAC address prop len (%d) " + "is wrong.\n", + dp->full_name, prop_len); + } + memcpy(dev->perm_addr, mac_addr, dev->addr_len); + if (!is_valid_ether_addr(&dev->perm_addr[0])) { + int i; + + dev_err(np->device, PFX "%s: OF MAC address is invalid\n", + dp->full_name); + dev_err(np->device, PFX "%s: [ \n", + dp->full_name); + for (i = 0; i < 6; i++) + printk("%02x ", dev->perm_addr[i]); + printk("]\n"); + return -EINVAL; + } + + memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); + + return 0; +#else + return -EINVAL; +#endif +} + +static int __devinit niu_get_invariants(struct niu *np) +{ + int err, have_props; + u32 offset; + + err = niu_get_of_props(np); + if (err == -ENODEV) + return err; + + have_props = !err; + + err = niu_get_and_validate_port(np); + if (err) + return err; + + err = niu_init_mac_ipp_pcs_base(np); + if (err) + return err; + + if (!have_props) { + if (np->parent->plat_type == PLAT_TYPE_NIU) + return -EINVAL; + + nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE); + offset = niu_pci_vpd_offset(np); + niudbg(PROBE, "niu_get_invariants: VPD offset [%08x]\n", + offset); + if (offset) + niu_pci_vpd_fetch(np, offset); + nw64(ESPC_PIO_EN, 0); + + if (np->flags & NIU_FLAGS_VPD_VALID) + niu_pci_vpd_validate(np); + + if (!(np->flags & NIU_FLAGS_VPD_VALID)) { + err = niu_pci_probe_sprom(np); + if (err) + return err; + } + } + + err = niu_probe_ports(np); + if (err) + return err; + + niu_ldg_init(np); + + niu_classifier_swstate_init(np); + niu_link_config_init(np); + + err = niu_determine_phy_disposition(np); + if (!err) + err = niu_init_link(np); + + return err; +} + +static LIST_HEAD(niu_parent_list); +static DEFINE_MUTEX(niu_parent_lock); +static int niu_parent_index; + +static ssize_t show_port_phy(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *plat_dev = to_platform_device(dev); + struct niu_parent *p = plat_dev->dev.platform_data; + u32 port_phy = p->port_phy; + char *orig_buf = buf; + int i; + + if (port_phy == PORT_PHY_UNKNOWN || + port_phy == PORT_PHY_INVALID) + return 0; + + for (i = 0; i < p->num_ports; i++) { + const char *type_str; + int type; + + type = phy_decode(port_phy, i); + if (type == PORT_TYPE_10G) + type_str = "10G"; + else + type_str = "1G"; + buf += sprintf(buf, + (i == 0) ? "%s" : " %s", + type_str); + } + buf += sprintf(buf, "\n"); + return buf - orig_buf; +} + +static ssize_t show_plat_type(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *plat_dev = to_platform_device(dev); + struct niu_parent *p = plat_dev->dev.platform_data; + const char *type_str; + + switch (p->plat_type) { + case PLAT_TYPE_ATLAS: + type_str = "atlas"; + break; + case PLAT_TYPE_NIU: + type_str = "niu"; + break; + case PLAT_TYPE_VF_P0: + type_str = "vf_p0"; + break; + case PLAT_TYPE_VF_P1: + type_str = "vf_p1"; + break; + default: + type_str = "unknown"; + break; + } + + return sprintf(buf, "%s\n", type_str); +} + +static ssize_t __show_chan_per_port(struct device *dev, + struct device_attribute *attr, char *buf, + int rx) +{ + struct platform_device *plat_dev = to_platform_device(dev); + struct niu_parent *p = plat_dev->dev.platform_data; + char *orig_buf = buf; + u8 *arr; + int i; + + arr = (rx ? p->rxchan_per_port : p->txchan_per_port); + + for (i = 0; i < p->num_ports; i++) { + buf += sprintf(buf, + (i == 0) ? "%d" : " %d", + arr[i]); + } + buf += sprintf(buf, "\n"); + + return buf - orig_buf; +} + +static ssize_t show_rxchan_per_port(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return __show_chan_per_port(dev, attr, buf, 1); +} + +static ssize_t show_txchan_per_port(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return __show_chan_per_port(dev, attr, buf, 1); +} + +static ssize_t show_num_ports(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *plat_dev = to_platform_device(dev); + struct niu_parent *p = plat_dev->dev.platform_data; + + return sprintf(buf, "%d\n", p->num_ports); +} + +static struct device_attribute niu_parent_attributes[] = { + __ATTR(port_phy, S_IRUGO, show_port_phy, NULL), + __ATTR(plat_type, S_IRUGO, show_plat_type, NULL), + __ATTR(rxchan_per_port, S_IRUGO, show_rxchan_per_port, NULL), + __ATTR(txchan_per_port, S_IRUGO, show_txchan_per_port, NULL), + __ATTR(num_ports, S_IRUGO, show_num_ports, NULL), + {} +}; + +static struct niu_parent * __devinit niu_new_parent(struct niu *np, + union niu_parent_id *id, + u8 ptype) +{ + struct platform_device *plat_dev; + struct niu_parent *p; + int i; + + niudbg(PROBE, "niu_new_parent: Creating new parent.\n"); + + plat_dev = platform_device_register_simple("niu", niu_parent_index, + NULL, 0); + if (!plat_dev) + return NULL; + + for (i = 0; attr_name(niu_parent_attributes[i]); i++) { + int err = device_create_file(&plat_dev->dev, + &niu_parent_attributes[i]); + if (err) + goto fail_unregister; + } + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + goto fail_unregister; + + p->index = niu_parent_index++; + + plat_dev->dev.platform_data = p; + p->plat_dev = plat_dev; + + memcpy(&p->id, id, sizeof(*id)); + p->plat_type = ptype; + INIT_LIST_HEAD(&p->list); + atomic_set(&p->refcnt, 0); + list_add(&p->list, &niu_parent_list); + spin_lock_init(&p->lock); + + p->rxdma_clock_divider = 7500; + + p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES; + if (p->plat_type == PLAT_TYPE_NIU) + p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES; + + for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { + int index = i - CLASS_CODE_USER_PROG1; + + p->tcam_key[index] = TCAM_KEY_TSEL; + p->flow_key[index] = (FLOW_KEY_IPSA | + FLOW_KEY_IPDA | + FLOW_KEY_PROTO | + (FLOW_KEY_L4_BYTE12 << + FLOW_KEY_L4_0_SHIFT) | + (FLOW_KEY_L4_BYTE12 << + FLOW_KEY_L4_1_SHIFT)); + } + + for (i = 0; i < LDN_MAX + 1; i++) + p->ldg_map[i] = LDG_INVALID; + + return p; + +fail_unregister: + platform_device_unregister(plat_dev); + return NULL; +} + +static struct niu_parent * __devinit niu_get_parent(struct niu *np, + union niu_parent_id *id, + u8 ptype) +{ + struct niu_parent *p, *tmp; + int port = np->port; + + niudbg(PROBE, "niu_get_parent: platform_type[%u] port[%u]\n", + ptype, port); + + mutex_lock(&niu_parent_lock); + p = NULL; + list_for_each_entry(tmp, &niu_parent_list, list) { + if (!memcmp(id, &tmp->id, sizeof(*id))) { + p = tmp; + break; + } + } + if (!p) + p = niu_new_parent(np, id, ptype); + + if (p) { + char port_name[6]; + int err; + + sprintf(port_name, "port%d", port); + err = sysfs_create_link(&p->plat_dev->dev.kobj, + &np->device->kobj, + port_name); + if (!err) { + p->ports[port] = np; + atomic_inc(&p->refcnt); + } + } + mutex_unlock(&niu_parent_lock); + + return p; +} + +static void niu_put_parent(struct niu *np) +{ + struct niu_parent *p = np->parent; + u8 port = np->port; + char port_name[6]; + + BUG_ON(!p || p->ports[port] != np); + + niudbg(PROBE, "niu_put_parent: port[%u]\n", port); + + sprintf(port_name, "port%d", port); + + mutex_lock(&niu_parent_lock); + + sysfs_remove_link(&p->plat_dev->dev.kobj, port_name); + + p->ports[port] = NULL; + np->parent = NULL; + + if (atomic_dec_and_test(&p->refcnt)) { + list_del(&p->list); + platform_device_unregister(p->plat_dev); + } + + mutex_unlock(&niu_parent_lock); +} + +static void *niu_pci_alloc_coherent(struct device *dev, size_t size, + u64 *handle, gfp_t flag) +{ + dma_addr_t dh; + void *ret; + + ret = dma_alloc_coherent(dev, size, &dh, flag); + if (ret) + *handle = dh; + return ret; +} + +static void niu_pci_free_coherent(struct device *dev, size_t size, + void *cpu_addr, u64 handle) +{ + dma_free_coherent(dev, size, cpu_addr, handle); +} + +static u64 niu_pci_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + return dma_map_page(dev, page, offset, size, direction); +} + +static void niu_pci_unmap_page(struct device *dev, u64 dma_address, + size_t size, enum dma_data_direction direction) +{ + return dma_unmap_page(dev, dma_address, size, direction); +} + +static u64 niu_pci_map_single(struct device *dev, void *cpu_addr, + size_t size, + enum dma_data_direction direction) +{ + return dma_map_single(dev, cpu_addr, size, direction); +} + +static void niu_pci_unmap_single(struct device *dev, u64 dma_address, + size_t size, + enum dma_data_direction direction) +{ + dma_unmap_single(dev, dma_address, size, direction); +} + +static const struct niu_ops niu_pci_ops = { + .alloc_coherent = niu_pci_alloc_coherent, + .free_coherent = niu_pci_free_coherent, + .map_page = niu_pci_map_page, + .unmap_page = niu_pci_unmap_page, + .map_single = niu_pci_map_single, + .unmap_single = niu_pci_unmap_single, +}; + +static void __devinit niu_driver_version(void) +{ + static int niu_version_printed; + + if (niu_version_printed++ == 0) + pr_info("%s", version); +} + +static struct net_device * __devinit niu_alloc_and_init( + struct device *gen_dev, struct pci_dev *pdev, + struct of_device *op, const struct niu_ops *ops, + u8 port) +{ + struct net_device *dev = alloc_etherdev(sizeof(struct niu)); + struct niu *np; + + if (!dev) { + dev_err(gen_dev, PFX "Etherdev alloc failed, aborting.\n"); + return NULL; + } + + SET_NETDEV_DEV(dev, gen_dev); + + np = netdev_priv(dev); + np->dev = dev; + np->pdev = pdev; + np->op = op; + np->device = gen_dev; + np->ops = ops; + + np->msg_enable = niu_debug; + + spin_lock_init(&np->lock); + INIT_WORK(&np->reset_task, niu_reset_task); + + np->port = port; + + return dev; +} + +static void __devinit niu_assign_netdev_ops(struct net_device *dev) +{ + dev->open = niu_open; + dev->stop = niu_close; + dev->get_stats = niu_get_stats; + dev->set_multicast_list = niu_set_rx_mode; + dev->set_mac_address = niu_set_mac_addr; + dev->do_ioctl = niu_ioctl; + dev->tx_timeout = niu_tx_timeout; + dev->hard_start_xmit = niu_start_xmit; + dev->ethtool_ops = &niu_ethtool_ops; + dev->watchdog_timeo = NIU_TX_TIMEOUT; + dev->change_mtu = niu_change_mtu; +} + +static void __devinit niu_device_announce(struct niu *np) +{ + struct net_device *dev = np->dev; + int i; + + pr_info("%s: NIU Ethernet ", dev->name); + for (i = 0; i < 6; i++) + printk("%2.2x%c", dev->dev_addr[i], + i == 5 ? '\n' : ':'); + + pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n", + dev->name, + (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), + (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), + (np->flags & NIU_FLAGS_FIBER ? "FIBER" : "COPPER"), + (np->mac_xcvr == MAC_XCVR_MII ? "MII" : + (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), + np->vpd.phy_type); +} + +static int __devinit niu_pci_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + unsigned long niureg_base, niureg_len; + union niu_parent_id parent_id; + struct net_device *dev; + struct niu *np; + int err, pos; + u64 dma_mask; + u16 val16; + + niu_driver_version(); + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, PFX "Cannot enable PCI device, " + "aborting.\n"); + return err; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || + !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, PFX "Cannot find proper PCI device " + "base addresses, aborting.\n"); + err = -ENODEV; + goto err_out_disable_pdev; + } + + err = pci_request_regions(pdev, DRV_MODULE_NAME); + if (err) { + dev_err(&pdev->dev, PFX "Cannot obtain PCI resources, " + "aborting.\n"); + goto err_out_disable_pdev; + } + + pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pos <= 0) { + dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, " + "aborting.\n"); + goto err_out_free_res; + } + + dev = niu_alloc_and_init(&pdev->dev, pdev, NULL, + &niu_pci_ops, PCI_FUNC(pdev->devfn)); + if (!dev) { + err = -ENOMEM; + goto err_out_free_res; + } + np = netdev_priv(dev); + + memset(&parent_id, 0, sizeof(parent_id)); + parent_id.pci.domain = pci_domain_nr(pdev->bus); + parent_id.pci.bus = pdev->bus->number; + parent_id.pci.device = PCI_SLOT(pdev->devfn); + + np->parent = niu_get_parent(np, &parent_id, + PLAT_TYPE_ATLAS); + if (!np->parent) { + err = -ENOMEM; + goto err_out_free_dev; + } + + pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16); + val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN; + val16 |= (PCI_EXP_DEVCTL_CERE | + PCI_EXP_DEVCTL_NFERE | + PCI_EXP_DEVCTL_FERE | + PCI_EXP_DEVCTL_URRE | + PCI_EXP_DEVCTL_RELAX_EN); + pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16); + + dma_mask = DMA_44BIT_MASK; + err = pci_set_dma_mask(pdev, dma_mask); + if (!err) { + dev->features |= NETIF_F_HIGHDMA; + err = pci_set_consistent_dma_mask(pdev, dma_mask); + if (err) { + dev_err(&pdev->dev, PFX "Unable to obtain 44 bit " + "DMA for consistent allocations, " + "aborting.\n"); + goto err_out_release_parent; + } + } + if (err || dma_mask == DMA_32BIT_MASK) { + err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (err) { + dev_err(&pdev->dev, PFX "No usable DMA configuration, " + "aborting.\n"); + goto err_out_release_parent; + } + } + + dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM); + + niureg_base = pci_resource_start(pdev, 0); + niureg_len = pci_resource_len(pdev, 0); + + np->regs = ioremap_nocache(niureg_base, niureg_len); + if (!np->regs) { + dev_err(&pdev->dev, PFX "Cannot map device registers, " + "aborting.\n"); + err = -ENOMEM; + goto err_out_release_parent; + } + + pci_set_master(pdev); + pci_save_state(pdev); + + dev->irq = pdev->irq; + + niu_assign_netdev_ops(dev); + + err = niu_get_invariants(np); + if (err) { + if (err != -ENODEV) + dev_err(&pdev->dev, PFX "Problem fetching invariants " + "of chip, aborting.\n"); + goto err_out_iounmap; + } + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, PFX "Cannot register net device, " + "aborting.\n"); + goto err_out_iounmap; + } + + pci_set_drvdata(pdev, dev); + + niu_device_announce(np); + + return 0; + +err_out_iounmap: + if (np->regs) { + iounmap(np->regs); + np->regs = NULL; + } + +err_out_release_parent: + niu_put_parent(np); + +err_out_free_dev: + free_netdev(dev); + +err_out_free_res: + pci_release_regions(pdev); + +err_out_disable_pdev: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + return err; +} + +static void __devexit niu_pci_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (dev) { + struct niu *np = netdev_priv(dev); + + unregister_netdev(dev); + if (np->regs) { + iounmap(np->regs); + np->regs = NULL; + } + + niu_ldg_free(np); + + niu_put_parent(np); + + free_netdev(dev); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + } +} + +static int niu_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct niu *np = netdev_priv(dev); + unsigned long flags; + + if (!netif_running(dev)) + return 0; + + flush_scheduled_work(); + niu_netif_stop(np); + + del_timer_sync(&np->timer); + + spin_lock_irqsave(&np->lock, flags); + niu_enable_interrupts(np, 0); + spin_unlock_irqrestore(&np->lock, flags); + + netif_device_detach(dev); + + spin_lock_irqsave(&np->lock, flags); + niu_stop_hw(np); + spin_unlock_irqrestore(&np->lock, flags); + + pci_save_state(pdev); + + return 0; +} + +static int niu_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct niu *np = netdev_priv(dev); + unsigned long flags; + int err; + + if (!netif_running(dev)) + return 0; + + pci_restore_state(pdev); + + netif_device_attach(dev); + + spin_lock_irqsave(&np->lock, flags); + + err = niu_init_hw(np); + if (!err) { + np->timer.expires = jiffies + HZ; + add_timer(&np->timer); + niu_netif_start(np); + } + + spin_unlock_irqrestore(&np->lock, flags); + + return err; +} + +static struct pci_driver niu_pci_driver = { + .name = DRV_MODULE_NAME, + .id_table = niu_pci_tbl, + .probe = niu_pci_init_one, + .remove = __devexit_p(niu_pci_remove_one), + .suspend = niu_suspend, + .resume = niu_resume, +}; + +#ifdef CONFIG_SPARC64 +static void *niu_phys_alloc_coherent(struct device *dev, size_t size, + u64 *dma_addr, gfp_t flag) +{ + unsigned long order = get_order(size); + unsigned long page = __get_free_pages(flag, order); + + if (page == 0UL) + return NULL; + memset((char *)page, 0, PAGE_SIZE << order); + *dma_addr = __pa(page); + + return (void *) page; +} + +static void niu_phys_free_coherent(struct device *dev, size_t size, + void *cpu_addr, u64 handle) +{ + unsigned long order = get_order(size); + + free_pages((unsigned long) cpu_addr, order); +} + +static u64 niu_phys_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + return page_to_phys(page) + offset; +} + +static void niu_phys_unmap_page(struct device *dev, u64 dma_address, + size_t size, enum dma_data_direction direction) +{ + /* Nothing to do. */ +} + +static u64 niu_phys_map_single(struct device *dev, void *cpu_addr, + size_t size, + enum dma_data_direction direction) +{ + return __pa(cpu_addr); +} + +static void niu_phys_unmap_single(struct device *dev, u64 dma_address, + size_t size, + enum dma_data_direction direction) +{ + /* Nothing to do. */ +} + +static const struct niu_ops niu_phys_ops = { + .alloc_coherent = niu_phys_alloc_coherent, + .free_coherent = niu_phys_free_coherent, + .map_page = niu_phys_map_page, + .unmap_page = niu_phys_unmap_page, + .map_single = niu_phys_map_single, + .unmap_single = niu_phys_unmap_single, +}; + +static unsigned long res_size(struct resource *r) +{ + return r->end - r->start + 1UL; +} + +static int __devinit niu_of_probe(struct of_device *op, + const struct of_device_id *match) +{ + union niu_parent_id parent_id; + struct net_device *dev; + struct niu *np; + const u32 *reg; + int err; + + niu_driver_version(); + + reg = of_get_property(op->node, "reg", NULL); + if (!reg) { + dev_err(&op->dev, PFX "%s: No 'reg' property, aborting.\n", + op->node->full_name); + return -ENODEV; + } + + dev = niu_alloc_and_init(&op->dev, NULL, op, + &niu_phys_ops, reg[0] & 0x1); + if (!dev) { + err = -ENOMEM; + goto err_out; + } + np = netdev_priv(dev); + + memset(&parent_id, 0, sizeof(parent_id)); + parent_id.of = of_get_parent(op->node); + + np->parent = niu_get_parent(np, &parent_id, + PLAT_TYPE_NIU); + if (!np->parent) { + err = -ENOMEM; + goto err_out_free_dev; + } + + dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM); + + np->regs = of_ioremap(&op->resource[1], 0, + res_size(&op->resource[1]), + "niu regs"); + if (!np->regs) { + dev_err(&op->dev, PFX "Cannot map device registers, " + "aborting.\n"); + err = -ENOMEM; + goto err_out_release_parent; + } + + np->vir_regs_1 = of_ioremap(&op->resource[2], 0, + res_size(&op->resource[2]), + "niu vregs-1"); + if (!np->vir_regs_1) { + dev_err(&op->dev, PFX "Cannot map device vir registers 1, " + "aborting.\n"); + err = -ENOMEM; + goto err_out_iounmap; + } + + np->vir_regs_2 = of_ioremap(&op->resource[3], 0, + res_size(&op->resource[3]), + "niu vregs-2"); + if (!np->vir_regs_2) { + dev_err(&op->dev, PFX "Cannot map device vir registers 2, " + "aborting.\n"); + err = -ENOMEM; + goto err_out_iounmap; + } + + niu_assign_netdev_ops(dev); + + err = niu_get_invariants(np); + if (err) { + if (err != -ENODEV) + dev_err(&op->dev, PFX "Problem fetching invariants " + "of chip, aborting.\n"); + goto err_out_iounmap; + } + + err = register_netdev(dev); + if (err) { + dev_err(&op->dev, PFX "Cannot register net device, " + "aborting.\n"); + goto err_out_iounmap; + } + + dev_set_drvdata(&op->dev, dev); + + niu_device_announce(np); + + return 0; + +err_out_iounmap: + if (np->vir_regs_1) { + of_iounmap(&op->resource[2], np->vir_regs_1, + res_size(&op->resource[2])); + np->vir_regs_1 = NULL; + } + + if (np->vir_regs_2) { + of_iounmap(&op->resource[3], np->vir_regs_2, + res_size(&op->resource[3])); + np->vir_regs_2 = NULL; + } + + if (np->regs) { + of_iounmap(&op->resource[1], np->regs, + res_size(&op->resource[1])); + np->regs = NULL; + } + +err_out_release_parent: + niu_put_parent(np); + +err_out_free_dev: + free_netdev(dev); + +err_out: + return err; +} + +static int __devexit niu_of_remove(struct of_device *op) +{ + struct net_device *dev = dev_get_drvdata(&op->dev); + + if (dev) { + struct niu *np = netdev_priv(dev); + + unregister_netdev(dev); + + if (np->vir_regs_1) { + of_iounmap(&op->resource[2], np->vir_regs_1, + res_size(&op->resource[2])); + np->vir_regs_1 = NULL; + } + + if (np->vir_regs_2) { + of_iounmap(&op->resource[3], np->vir_regs_2, + res_size(&op->resource[3])); + np->vir_regs_2 = NULL; + } + + if (np->regs) { + of_iounmap(&op->resource[1], np->regs, + res_size(&op->resource[1])); + np->regs = NULL; + } + + niu_ldg_free(np); + + niu_put_parent(np); + + free_netdev(dev); + dev_set_drvdata(&op->dev, NULL); + } + return 0; +} + +static struct of_device_id niu_match[] = { + { + .name = "network", + .compatible = "SUNW,niusl", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, niu_match); + +static struct of_platform_driver niu_of_driver = { + .name = "niu", + .match_table = niu_match, + .probe = niu_of_probe, + .remove = __devexit_p(niu_of_remove), +}; + +#endif /* CONFIG_SPARC64 */ + +static int __init niu_init(void) +{ + int err = 0; + + BUILD_BUG_ON((PAGE_SIZE < 4 * 1024) || + ((PAGE_SIZE > 32 * 1024) && + ((PAGE_SIZE % (32 * 1024)) != 0 && + (PAGE_SIZE % (16 * 1024)) != 0 && + (PAGE_SIZE % (8 * 1024)) != 0 && + (PAGE_SIZE % (4 * 1024)) != 0))); + + niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT); + +#ifdef CONFIG_SPARC64 + err = of_register_driver(&niu_of_driver, &of_bus_type); +#endif + + if (!err) { + err = pci_register_driver(&niu_pci_driver); +#ifdef CONFIG_SPARC64 + if (err) + of_unregister_driver(&niu_of_driver); +#endif + } + + return err; +} + +static void __exit niu_exit(void) +{ + pci_unregister_driver(&niu_pci_driver); +#ifdef CONFIG_SPARC64 + of_unregister_driver(&niu_of_driver); +#endif +} + +module_init(niu_init); +module_exit(niu_exit); diff --git a/drivers/net/niu.h b/drivers/net/niu.h new file mode 100644 index 000000000000..10e3f111b6d5 --- /dev/null +++ b/drivers/net/niu.h @@ -0,0 +1,3222 @@ +/* niu.h: Definitions for Neptune ethernet driver. + * + * Copyright (C) 2007 David S. Miller (davem@davemloft.net) + */ + +#ifndef _NIU_H +#define _NIU_H + +#define PIO 0x000000UL +#define FZC_PIO 0x080000UL +#define FZC_MAC 0x180000UL +#define FZC_IPP 0x280000UL +#define FFLP 0x300000UL +#define FZC_FFLP 0x380000UL +#define PIO_VADDR 0x400000UL +#define ZCP 0x500000UL +#define FZC_ZCP 0x580000UL +#define DMC 0x600000UL +#define FZC_DMC 0x680000UL +#define TXC 0x700000UL +#define FZC_TXC 0x780000UL +#define PIO_LDSV 0x800000UL +#define PIO_PIO_LDGIM 0x900000UL +#define PIO_IMASK0 0xa00000UL +#define PIO_IMASK1 0xb00000UL +#define FZC_PROM 0xc80000UL +#define FZC_PIM 0xd80000UL + +#define LDSV0(LDG) (PIO_LDSV + 0x00000UL + (LDG) * 0x2000UL) +#define LDSV1(LDG) (PIO_LDSV + 0x00008UL + (LDG) * 0x2000UL) +#define LDSV2(LDG) (PIO_LDSV + 0x00010UL + (LDG) * 0x2000UL) + +#define LDG_IMGMT(LDG) (PIO_LDSV + 0x00018UL + (LDG) * 0x2000UL) +#define LDG_IMGMT_ARM 0x0000000080000000ULL +#define LDG_IMGMT_TIMER 0x000000000000003fULL + +#define LD_IM0(IDX) (PIO_IMASK0 + 0x00000UL + (IDX) * 0x2000UL) +#define LD_IM0_MASK 0x0000000000000003ULL + +#define LD_IM1(IDX) (PIO_IMASK1 + 0x00000UL + (IDX) * 0x2000UL) +#define LD_IM1_MASK 0x0000000000000003ULL + +#define LDG_TIMER_RES (FZC_PIO + 0x00008UL) +#define LDG_TIMER_RES_VAL 0x00000000000fffffULL + +#define DIRTY_TID_CTL (FZC_PIO + 0x00010UL) +#define DIRTY_TID_CTL_NPTHRED 0x00000000003f0000ULL +#define DIRTY_TID_CTL_RDTHRED 0x00000000000003f0ULL +#define DIRTY_TID_CTL_DTIDCLR 0x0000000000000002ULL +#define DIRTY_TID_CTL_DTIDENAB 0x0000000000000001ULL + +#define DIRTY_TID_STAT (FZC_PIO + 0x00018UL) +#define DIRTY_TID_STAT_NPWSTAT 0x0000000000003f00ULL +#define DIRTY_TID_STAT_RDSTAT 0x000000000000003fULL + +#define RST_CTL (FZC_PIO + 0x00038UL) +#define RST_CTL_MAC_RST3 0x0000000000400000ULL +#define RST_CTL_MAC_RST2 0x0000000000200000ULL +#define RST_CTL_MAC_RST1 0x0000000000100000ULL +#define RST_CTL_MAC_RST0 0x0000000000080000ULL +#define RST_CTL_ACK_TO_EN 0x0000000000000800ULL +#define RST_CTL_ACK_TO_VAL 0x00000000000007feULL + +#define SMX_CFIG_DAT (FZC_PIO + 0x00040UL) +#define SMX_CFIG_DAT_RAS_DET 0x0000000080000000ULL +#define SMX_CFIG_DAT_RAS_INJ 0x0000000040000000ULL +#define SMX_CFIG_DAT_XACT_TO 0x000000000fffffffULL + +#define SMX_INT_STAT (FZC_PIO + 0x00048UL) +#define SMX_INT_STAT_STAT 0x00000000ffffffffULL + +#define SMX_CTL (FZC_PIO + 0x00050UL) +#define SMX_CTL_CTL 0x00000000ffffffffULL + +#define SMX_DBG_VEC (FZC_PIO + 0x00058UL) +#define SMX_DBG_VEC_VEC 0x00000000ffffffffULL + +#define PIO_DBG_SEL (FZC_PIO + 0x00060UL) +#define PIO_DBG_SEL_SEL 0x000000000000003fULL + +#define PIO_TRAIN_VEC (FZC_PIO + 0x00068UL) +#define PIO_TRAIN_VEC_VEC 0x00000000ffffffffULL + +#define PIO_ARB_CTL (FZC_PIO + 0x00070UL) +#define PIO_ARB_CTL_CTL 0x00000000ffffffffULL + +#define PIO_ARB_DBG_VEC (FZC_PIO + 0x00078UL) +#define PIO_ARB_DBG_VEC_VEC 0x00000000ffffffffULL + +#define SYS_ERR_MASK (FZC_PIO + 0x00090UL) +#define SYS_ERR_MASK_META2 0x0000000000000400ULL +#define SYS_ERR_MASK_META1 0x0000000000000200ULL +#define SYS_ERR_MASK_PEU 0x0000000000000100ULL +#define SYS_ERR_MASK_TXC 0x0000000000000080ULL +#define SYS_ERR_MASK_RDMC 0x0000000000000040ULL +#define SYS_ERR_MASK_TDMC 0x0000000000000020ULL +#define SYS_ERR_MASK_ZCP 0x0000000000000010ULL +#define SYS_ERR_MASK_FFLP 0x0000000000000008ULL +#define SYS_ERR_MASK_IPP 0x0000000000000004ULL +#define SYS_ERR_MASK_MAC 0x0000000000000002ULL +#define SYS_ERR_MASK_SMX 0x0000000000000001ULL + +#define SYS_ERR_STAT (FZC_PIO + 0x00098UL) +#define SYS_ERR_STAT_META2 0x0000000000000400ULL +#define SYS_ERR_STAT_META1 0x0000000000000200ULL +#define SYS_ERR_STAT_PEU 0x0000000000000100ULL +#define SYS_ERR_STAT_TXC 0x0000000000000080ULL +#define SYS_ERR_STAT_RDMC 0x0000000000000040ULL +#define SYS_ERR_STAT_TDMC 0x0000000000000020ULL +#define SYS_ERR_STAT_ZCP 0x0000000000000010ULL +#define SYS_ERR_STAT_FFLP 0x0000000000000008ULL +#define SYS_ERR_STAT_IPP 0x0000000000000004ULL +#define SYS_ERR_STAT_MAC 0x0000000000000002ULL +#define SYS_ERR_STAT_SMX 0x0000000000000001ULL + +#define SID(LDG) (FZC_PIO + 0x10200UL + (LDG) * 8UL) +#define SID_FUNC 0x0000000000000060ULL +#define SID_FUNC_SHIFT 5 +#define SID_VECTOR 0x000000000000001fULL +#define SID_VECTOR_SHIFT 0 + +#define LDG_NUM(LDN) (FZC_PIO + 0x20000UL + (LDN) * 8UL) + +#define XMAC_PORT0_OFF (FZC_MAC + 0x000000) +#define XMAC_PORT1_OFF (FZC_MAC + 0x006000) +#define BMAC_PORT2_OFF (FZC_MAC + 0x00c000) +#define BMAC_PORT3_OFF (FZC_MAC + 0x010000) + +/* XMAC registers, offset from np->mac_regs */ + +#define XTXMAC_SW_RST 0x00000UL +#define XTXMAC_SW_RST_REG_RS 0x0000000000000002ULL +#define XTXMAC_SW_RST_SOFT_RST 0x0000000000000001ULL + +#define XRXMAC_SW_RST 0x00008UL +#define XRXMAC_SW_RST_REG_RS 0x0000000000000002ULL +#define XRXMAC_SW_RST_SOFT_RST 0x0000000000000001ULL + +#define XTXMAC_STATUS 0x00020UL +#define XTXMAC_STATUS_FRAME_CNT_EXP 0x0000000000000800ULL +#define XTXMAC_STATUS_BYTE_CNT_EXP 0x0000000000000400ULL +#define XTXMAC_STATUS_TXFIFO_XFR_ERR 0x0000000000000010ULL +#define XTXMAC_STATUS_TXMAC_OFLOW 0x0000000000000008ULL +#define XTXMAC_STATUS_MAX_PSIZE_ERR 0x0000000000000004ULL +#define XTXMAC_STATUS_TXMAC_UFLOW 0x0000000000000002ULL +#define XTXMAC_STATUS_FRAME_XMITED 0x0000000000000001ULL + +#define XRXMAC_STATUS 0x00028UL +#define XRXMAC_STATUS_RXHIST7_CNT_EXP 0x0000000000100000ULL +#define XRXMAC_STATUS_LCL_FLT_STATUS 0x0000000000080000ULL +#define XRXMAC_STATUS_RFLT_DET 0x0000000000040000ULL +#define XRXMAC_STATUS_LFLT_CNT_EXP 0x0000000000020000ULL +#define XRXMAC_STATUS_PHY_MDINT 0x0000000000010000ULL +#define XRXMAC_STATUS_ALIGNERR_CNT_EXP 0x0000000000010000ULL +#define XRXMAC_STATUS_RXFRAG_CNT_EXP 0x0000000000008000ULL +#define XRXMAC_STATUS_RXMULTF_CNT_EXP 0x0000000000004000ULL +#define XRXMAC_STATUS_RXBCAST_CNT_EXP 0x0000000000002000ULL +#define XRXMAC_STATUS_RXHIST6_CNT_EXP 0x0000000000001000ULL +#define XRXMAC_STATUS_RXHIST5_CNT_EXP 0x0000000000000800ULL +#define XRXMAC_STATUS_RXHIST4_CNT_EXP 0x0000000000000400ULL +#define XRXMAC_STATUS_RXHIST3_CNT_EXP 0x0000000000000200ULL +#define XRXMAC_STATUS_RXHIST2_CNT_EXP 0x0000000000000100ULL +#define XRXMAC_STATUS_RXHIST1_CNT_EXP 0x0000000000000080ULL +#define XRXMAC_STATUS_RXOCTET_CNT_EXP 0x0000000000000040ULL +#define XRXMAC_STATUS_CVIOLERR_CNT_EXP 0x0000000000000020ULL +#define XRXMAC_STATUS_LENERR_CNT_EXP 0x0000000000000010ULL +#define XRXMAC_STATUS_CRCERR_CNT_EXP 0x0000000000000008ULL +#define XRXMAC_STATUS_RXUFLOW 0x0000000000000004ULL +#define XRXMAC_STATUS_RXOFLOW 0x0000000000000002ULL +#define XRXMAC_STATUS_FRAME_RCVD 0x0000000000000001ULL + +#define XMAC_FC_STAT 0x00030UL +#define XMAC_FC_STAT_RX_RCV_PAUSE_TIME 0x00000000ffff0000ULL +#define XMAC_FC_STAT_TX_MAC_NPAUSE 0x0000000000000004ULL +#define XMAC_FC_STAT_TX_MAC_PAUSE 0x0000000000000002ULL +#define XMAC_FC_STAT_RX_MAC_RPAUSE 0x0000000000000001ULL + +#define XTXMAC_STAT_MSK 0x00040UL +#define XTXMAC_STAT_MSK_FRAME_CNT_EXP 0x0000000000000800ULL +#define XTXMAC_STAT_MSK_BYTE_CNT_EXP 0x0000000000000400ULL +#define XTXMAC_STAT_MSK_TXFIFO_XFR_ERR 0x0000000000000010ULL +#define XTXMAC_STAT_MSK_TXMAC_OFLOW 0x0000000000000008ULL +#define XTXMAC_STAT_MSK_MAX_PSIZE_ERR 0x0000000000000004ULL +#define XTXMAC_STAT_MSK_TXMAC_UFLOW 0x0000000000000002ULL +#define XTXMAC_STAT_MSK_FRAME_XMITED 0x0000000000000001ULL + +#define XRXMAC_STAT_MSK 0x00048UL +#define XRXMAC_STAT_MSK_LCL_FLT_STAT_MSK 0x0000000000080000ULL +#define XRXMAC_STAT_MSK_RFLT_DET 0x0000000000040000ULL +#define XRXMAC_STAT_MSK_LFLT_CNT_EXP 0x0000000000020000ULL +#define XRXMAC_STAT_MSK_PHY_MDINT 0x0000000000010000ULL +#define XRXMAC_STAT_MSK_RXFRAG_CNT_EXP 0x0000000000008000ULL +#define XRXMAC_STAT_MSK_RXMULTF_CNT_EXP 0x0000000000004000ULL +#define XRXMAC_STAT_MSK_RXBCAST_CNT_EXP 0x0000000000002000ULL +#define XRXMAC_STAT_MSK_RXHIST6_CNT_EXP 0x0000000000001000ULL +#define XRXMAC_STAT_MSK_RXHIST5_CNT_EXP 0x0000000000000800ULL +#define XRXMAC_STAT_MSK_RXHIST4_CNT_EXP 0x0000000000000400ULL +#define XRXMAC_STAT_MSK_RXHIST3_CNT_EXP 0x0000000000000200ULL +#define XRXMAC_STAT_MSK_RXHIST2_CNT_EXP 0x0000000000000100ULL +#define XRXMAC_STAT_MSK_RXHIST1_CNT_EXP 0x0000000000000080ULL +#define XRXMAC_STAT_MSK_RXOCTET_CNT_EXP 0x0000000000000040ULL +#define XRXMAC_STAT_MSK_CVIOLERR_CNT_EXP 0x0000000000000020ULL +#define XRXMAC_STAT_MSK_LENERR_CNT_EXP 0x0000000000000010ULL +#define XRXMAC_STAT_MSK_CRCERR_CNT_EXP 0x0000000000000008ULL +#define XRXMAC_STAT_MSK_RXUFLOW_CNT_EXP 0x0000000000000004ULL +#define XRXMAC_STAT_MSK_RXOFLOW_CNT_EXP 0x0000000000000002ULL +#define XRXMAC_STAT_MSK_FRAME_RCVD 0x0000000000000001ULL + +#define XMAC_FC_MSK 0x00050UL +#define XMAC_FC_MSK_TX_MAC_NPAUSE 0x0000000000000004ULL +#define XMAC_FC_MSK_TX_MAC_PAUSE 0x0000000000000002ULL +#define XMAC_FC_MSK_RX_MAC_RPAUSE 0x0000000000000001ULL + +#define XMAC_CONFIG 0x00060UL +#define XMAC_CONFIG_SEL_CLK_25MHZ 0x0000000080000000ULL +#define XMAC_CONFIG_1G_PCS_BYPASS 0x0000000040000000ULL +#define XMAC_CONFIG_10G_XPCS_BYPASS 0x0000000020000000ULL +#define XMAC_CONFIG_MODE_MASK 0x0000000018000000ULL +#define XMAC_CONFIG_MODE_XGMII 0x0000000000000000ULL +#define XMAC_CONFIG_MODE_GMII 0x0000000008000000ULL +#define XMAC_CONFIG_MODE_MII 0x0000000010000000ULL +#define XMAC_CONFIG_LFS_DISABLE 0x0000000004000000ULL +#define XMAC_CONFIG_LOOPBACK 0x0000000002000000ULL +#define XMAC_CONFIG_TX_OUTPUT_EN 0x0000000001000000ULL +#define XMAC_CONFIG_SEL_POR_CLK_SRC 0x0000000000800000ULL +#define XMAC_CONFIG_LED_POLARITY 0x0000000000400000ULL +#define XMAC_CONFIG_FORCE_LED_ON 0x0000000000200000ULL +#define XMAC_CONFIG_PASS_FLOW_CTRL 0x0000000000100000ULL +#define XMAC_CONFIG_RCV_PAUSE_ENABLE 0x0000000000080000ULL +#define XMAC_CONFIG_MAC2IPP_PKT_CNT_EN 0x0000000000040000ULL +#define XMAC_CONFIG_STRIP_CRC 0x0000000000020000ULL +#define XMAC_CONFIG_ADDR_FILTER_EN 0x0000000000010000ULL +#define XMAC_CONFIG_HASH_FILTER_EN 0x0000000000008000ULL +#define XMAC_CONFIG_RX_CODEV_CHK_DIS 0x0000000000004000ULL +#define XMAC_CONFIG_RESERVED_MULTICAST 0x0000000000002000ULL +#define XMAC_CONFIG_RX_CRC_CHK_DIS 0x0000000000001000ULL +#define XMAC_CONFIG_ERR_CHK_DIS 0x0000000000000800ULL +#define XMAC_CONFIG_PROMISC_GROUP 0x0000000000000400ULL +#define XMAC_CONFIG_PROMISCUOUS 0x0000000000000200ULL +#define XMAC_CONFIG_RX_MAC_ENABLE 0x0000000000000100ULL +#define XMAC_CONFIG_WARNING_MSG_EN 0x0000000000000080ULL +#define XMAC_CONFIG_ALWAYS_NO_CRC 0x0000000000000008ULL +#define XMAC_CONFIG_VAR_MIN_IPG_EN 0x0000000000000004ULL +#define XMAC_CONFIG_STRETCH_MODE 0x0000000000000002ULL +#define XMAC_CONFIG_TX_ENABLE 0x0000000000000001ULL + +#define XMAC_IPG 0x00080UL +#define XMAC_IPG_STRETCH_CONST 0x0000000000e00000ULL +#define XMAC_IPG_STRETCH_CONST_SHIFT 21 +#define XMAC_IPG_STRETCH_RATIO 0x00000000001f0000ULL +#define XMAC_IPG_STRETCH_RATIO_SHIFT 16 +#define XMAC_IPG_IPG_MII_GMII 0x000000000000ff00ULL +#define XMAC_IPG_IPG_MII_GMII_SHIFT 8 +#define XMAC_IPG_IPG_XGMII 0x0000000000000007ULL +#define XMAC_IPG_IPG_XGMII_SHIFT 0 + +#define IPG_12_15_XGMII 3 +#define IPG_16_19_XGMII 4 +#define IPG_20_23_XGMII 5 +#define IPG_12_MII_GMII 10 +#define IPG_13_MII_GMII 11 +#define IPG_14_MII_GMII 12 +#define IPG_15_MII_GMII 13 +#define IPG_16_MII_GMII 14 + +#define XMAC_MIN 0x00088UL +#define XMAC_MIN_RX_MIN_PKT_SIZE 0x000000003ff00000ULL +#define XMAC_MIN_RX_MIN_PKT_SIZE_SHFT 20 +#define XMAC_MIN_SLOT_TIME 0x000000000003fc00ULL +#define XMAC_MIN_SLOT_TIME_SHFT 10 +#define XMAC_MIN_TX_MIN_PKT_SIZE 0x00000000000003ffULL +#define XMAC_MIN_TX_MIN_PKT_SIZE_SHFT 0 + +#define XMAC_MAX 0x00090UL +#define XMAC_MAX_FRAME_SIZE 0x0000000000003fffULL +#define XMAC_MAX_FRAME_SIZE_SHFT 0 + +#define XMAC_ADDR0 0x000a0UL +#define XMAC_ADDR0_ADDR0 0x000000000000ffffULL + +#define XMAC_ADDR1 0x000a8UL +#define XMAC_ADDR1_ADDR1 0x000000000000ffffULL + +#define XMAC_ADDR2 0x000b0UL +#define XMAC_ADDR2_ADDR2 0x000000000000ffffULL + +#define XMAC_ADDR_CMPEN 0x00208UL +#define XMAC_ADDR_CMPEN_EN15 0x0000000000008000ULL +#define XMAC_ADDR_CMPEN_EN14 0x0000000000004000ULL +#define XMAC_ADDR_CMPEN_EN13 0x0000000000002000ULL +#define XMAC_ADDR_CMPEN_EN12 0x0000000000001000ULL +#define XMAC_ADDR_CMPEN_EN11 0x0000000000000800ULL +#define XMAC_ADDR_CMPEN_EN10 0x0000000000000400ULL +#define XMAC_ADDR_CMPEN_EN9 0x0000000000000200ULL +#define XMAC_ADDR_CMPEN_EN8 0x0000000000000100ULL +#define XMAC_ADDR_CMPEN_EN7 0x0000000000000080ULL +#define XMAC_ADDR_CMPEN_EN6 0x0000000000000040ULL +#define XMAC_ADDR_CMPEN_EN5 0x0000000000000020ULL +#define XMAC_ADDR_CMPEN_EN4 0x0000000000000010ULL +#define XMAC_ADDR_CMPEN_EN3 0x0000000000000008ULL +#define XMAC_ADDR_CMPEN_EN2 0x0000000000000004ULL +#define XMAC_ADDR_CMPEN_EN1 0x0000000000000002ULL +#define XMAC_ADDR_CMPEN_EN0 0x0000000000000001ULL + +#define XMAC_NUM_ALT_ADDR 16 + +#define XMAC_ALT_ADDR0(NUM) (0x00218UL + (NUM)*0x18UL) +#define XMAC_ALT_ADDR0_ADDR0 0x000000000000ffffULL + +#define XMAC_ALT_ADDR1(NUM) (0x00220UL + (NUM)*0x18UL) +#define XMAC_ALT_ADDR1_ADDR1 0x000000000000ffffULL + +#define XMAC_ALT_ADDR2(NUM) (0x00228UL + (NUM)*0x18UL) +#define XMAC_ALT_ADDR2_ADDR2 0x000000000000ffffULL + +#define XMAC_ADD_FILT0 0x00818UL +#define XMAC_ADD_FILT0_FILT0 0x000000000000ffffULL + +#define XMAC_ADD_FILT1 0x00820UL +#define XMAC_ADD_FILT1_FILT1 0x000000000000ffffULL + +#define XMAC_ADD_FILT2 0x00828UL +#define XMAC_ADD_FILT2_FILT2 0x000000000000ffffULL + +#define XMAC_ADD_FILT12_MASK 0x00830UL +#define XMAC_ADD_FILT12_MASK_VAL 0x00000000000000ffULL + +#define XMAC_ADD_FILT00_MASK 0x00838UL +#define XMAC_ADD_FILT00_MASK_VAL 0x000000000000ffffULL + +#define XMAC_HASH_TBL(NUM) (0x00840UL + (NUM) * 0x8UL) +#define XMAC_HASH_TBL_VAL 0x000000000000ffffULL + +#define XMAC_NUM_HOST_INFO 20 + +#define XMAC_HOST_INFO(NUM) (0x00900UL + (NUM) * 0x8UL) + +#define XMAC_PA_DATA0 0x00b80UL +#define XMAC_PA_DATA0_VAL 0x00000000ffffffffULL + +#define XMAC_PA_DATA1 0x00b88UL +#define XMAC_PA_DATA1_VAL 0x00000000ffffffffULL + +#define XMAC_DEBUG_SEL 0x00b90UL +#define XMAC_DEBUG_SEL_XMAC 0x0000000000000078ULL +#define XMAC_DEBUG_SEL_MAC 0x0000000000000007ULL + +#define XMAC_TRAIN_VEC 0x00b98UL +#define XMAC_TRAIN_VEC_VAL 0x00000000ffffffffULL + +#define RXMAC_BT_CNT 0x00100UL +#define RXMAC_BT_CNT_COUNT 0x00000000ffffffffULL + +#define RXMAC_BC_FRM_CNT 0x00108UL +#define RXMAC_BC_FRM_CNT_COUNT 0x00000000001fffffULL + +#define RXMAC_MC_FRM_CNT 0x00110UL +#define RXMAC_MC_FRM_CNT_COUNT 0x00000000001fffffULL + +#define RXMAC_FRAG_CNT 0x00118UL +#define RXMAC_FRAG_CNT_COUNT 0x00000000001fffffULL + +#define RXMAC_HIST_CNT1 0x00120UL +#define RXMAC_HIST_CNT1_COUNT 0x00000000001fffffULL + +#define RXMAC_HIST_CNT2 0x00128UL +#define RXMAC_HIST_CNT2_COUNT 0x00000000001fffffULL + +#define RXMAC_HIST_CNT3 0x00130UL +#define RXMAC_HIST_CNT3_COUNT 0x00000000000fffffULL + +#define RXMAC_HIST_CNT4 0x00138UL +#define RXMAC_HIST_CNT4_COUNT 0x000000000007ffffULL + +#define RXMAC_HIST_CNT5 0x00140UL +#define RXMAC_HIST_CNT5_COUNT 0x000000000003ffffULL + +#define RXMAC_HIST_CNT6 0x00148UL +#define RXMAC_HIST_CNT6_COUNT 0x000000000000ffffULL + +#define RXMAC_MPSZER_CNT 0x00150UL +#define RXMAC_MPSZER_CNT_COUNT 0x00000000000000ffULL + +#define RXMAC_CRC_ER_CNT 0x00158UL +#define RXMAC_CRC_ER_CNT_COUNT 0x00000000000000ffULL + +#define RXMAC_CD_VIO_CNT 0x00160UL +#define RXMAC_CD_VIO_CNT_COUNT 0x00000000000000ffULL + +#define RXMAC_ALIGN_ERR_CNT 0x00168UL +#define RXMAC_ALIGN_ERR_CNT_COUNT 0x00000000000000ffULL + +#define TXMAC_FRM_CNT 0x00170UL +#define TXMAC_FRM_CNT_COUNT 0x00000000ffffffffULL + +#define TXMAC_BYTE_CNT 0x00178UL +#define TXMAC_BYTE_CNT_COUNT 0x00000000ffffffffULL + +#define LINK_FAULT_CNT 0x00180UL +#define LINK_FAULT_CNT_COUNT 0x00000000000000ffULL + +#define RXMAC_HIST_CNT7 0x00188UL +#define RXMAC_HIST_CNT7_COUNT 0x0000000007ffffffULL + +#define XMAC_SM_REG 0x001a8UL +#define XMAC_SM_REG_STATE 0x00000000ffffffffULL + +#define XMAC_INTER1 0x001b0UL +#define XMAC_INTERN1_SIGNALS1 0x00000000ffffffffULL + +#define XMAC_INTER2 0x001b8UL +#define XMAC_INTERN2_SIGNALS2 0x00000000ffffffffULL + +/* BMAC registers, offset from np->mac_regs */ + +#define BTXMAC_SW_RST 0x00000UL +#define BTXMAC_SW_RST_RESET 0x0000000000000001ULL + +#define BRXMAC_SW_RST 0x00008UL +#define BRXMAC_SW_RST_RESET 0x0000000000000001ULL + +#define BMAC_SEND_PAUSE 0x00010UL +#define BMAC_SEND_PAUSE_SEND 0x0000000000010000ULL +#define BMAC_SEND_PAUSE_TIME 0x000000000000ffffULL + +#define BTXMAC_STATUS 0x00020UL +#define BTXMAC_STATUS_XMIT 0x0000000000000001ULL +#define BTXMAC_STATUS_UNDERRUN 0x0000000000000002ULL +#define BTXMAC_STATUS_MAX_PKT_ERR 0x0000000000000004ULL +#define BTXMAC_STATUS_BYTE_CNT_EXP 0x0000000000000400ULL +#define BTXMAC_STATUS_FRAME_CNT_EXP 0x0000000000000800ULL + +#define BRXMAC_STATUS 0x00028UL +#define BRXMAC_STATUS_RX_PKT 0x0000000000000001ULL +#define BRXMAC_STATUS_OVERFLOW 0x0000000000000002ULL +#define BRXMAC_STATUS_FRAME_CNT_EXP 0x0000000000000004ULL +#define BRXMAC_STATUS_ALIGN_ERR_EXP 0x0000000000000008ULL +#define BRXMAC_STATUS_CRC_ERR_EXP 0x0000000000000010ULL +#define BRXMAC_STATUS_LEN_ERR_EXP 0x0000000000000020ULL + +#define BMAC_CTRL_STATUS 0x00030UL +#define BMAC_CTRL_STATUS_PAUSE_RECV 0x0000000000000001ULL +#define BMAC_CTRL_STATUS_PAUSE 0x0000000000000002ULL +#define BMAC_CTRL_STATUS_NOPAUSE 0x0000000000000004ULL +#define BMAC_CTRL_STATUS_TIME 0x00000000ffff0000ULL +#define BMAC_CTRL_STATUS_TIME_SHIFT 16 + +#define BTXMAC_STATUS_MASK 0x00040UL +#define BRXMAC_STATUS_MASK 0x00048UL +#define BMAC_CTRL_STATUS_MASK 0x00050UL + +#define BTXMAC_CONFIG 0x00060UL +#define BTXMAC_CONFIG_ENABLE 0x0000000000000001ULL +#define BTXMAC_CONFIG_FCS_DISABLE 0x0000000000000002ULL + +#define BRXMAC_CONFIG 0x00068UL +#define BRXMAC_CONFIG_DISCARD_DIS 0x0000000000000080ULL +#define BRXMAC_CONFIG_ADDR_FILT_EN 0x0000000000000040ULL +#define BRXMAC_CONFIG_HASH_FILT_EN 0x0000000000000020ULL +#define BRXMAC_CONFIG_PROMISC_GRP 0x0000000000000010ULL +#define BRXMAC_CONFIG_PROMISC 0x0000000000000008ULL +#define BRXMAC_CONFIG_STRIP_FCS 0x0000000000000004ULL +#define BRXMAC_CONFIG_STRIP_PAD 0x0000000000000002ULL +#define BRXMAC_CONFIG_ENABLE 0x0000000000000001ULL + +#define BMAC_CTRL_CONFIG 0x00070UL +#define BMAC_CTRL_CONFIG_TX_PAUSE_EN 0x0000000000000001ULL +#define BMAC_CTRL_CONFIG_RX_PAUSE_EN 0x0000000000000002ULL +#define BMAC_CTRL_CONFIG_PASS_CTRL 0x0000000000000004ULL + +#define BMAC_XIF_CONFIG 0x00078UL +#define BMAC_XIF_CONFIG_TX_OUTPUT_EN 0x0000000000000001ULL +#define BMAC_XIF_CONFIG_MII_LOOPBACK 0x0000000000000002ULL +#define BMAC_XIF_CONFIG_GMII_MODE 0x0000000000000008ULL +#define BMAC_XIF_CONFIG_LINK_LED 0x0000000000000020ULL +#define BMAC_XIF_CONFIG_LED_POLARITY 0x0000000000000040ULL +#define BMAC_XIF_CONFIG_25MHZ_CLOCK 0x0000000000000080ULL + +#define BMAC_MIN_FRAME 0x000a0UL +#define BMAC_MIN_FRAME_VAL 0x00000000000003ffULL + +#define BMAC_MAX_FRAME 0x000a8UL +#define BMAC_MAX_FRAME_MAX_BURST 0x000000003fff0000ULL +#define BMAC_MAX_FRAME_MAX_BURST_SHIFT 16 +#define BMAC_MAX_FRAME_MAX_FRAME 0x0000000000003fffULL +#define BMAC_MAX_FRAME_MAX_FRAME_SHIFT 0 + +#define BMAC_PREAMBLE_SIZE 0x000b0UL +#define BMAC_PREAMBLE_SIZE_VAL 0x00000000000003ffULL + +#define BMAC_CTRL_TYPE 0x000c8UL + +#define BMAC_ADDR0 0x00100UL +#define BMAC_ADDR0_ADDR0 0x000000000000ffffULL + +#define BMAC_ADDR1 0x00108UL +#define BMAC_ADDR1_ADDR1 0x000000000000ffffULL + +#define BMAC_ADDR2 0x00110UL +#define BMAC_ADDR2_ADDR2 0x000000000000ffffULL + +#define BMAC_NUM_ALT_ADDR 7 + +#define BMAC_ALT_ADDR0(NUM) (0x00118UL + (NUM)*0x18UL) +#define BMAC_ALT_ADDR0_ADDR0 0x000000000000ffffULL + +#define BMAC_ALT_ADDR1(NUM) (0x00120UL + (NUM)*0x18UL) +#define BMAC_ALT_ADDR1_ADDR1 0x000000000000ffffULL + +#define BMAC_ALT_ADDR2(NUM) (0x00128UL + (NUM)*0x18UL) +#define BMAC_ALT_ADDR2_ADDR2 0x000000000000ffffULL + +#define BMAC_FC_ADDR0 0x00268UL +#define BMAC_FC_ADDR0_ADDR0 0x000000000000ffffULL + +#define BMAC_FC_ADDR1 0x00270UL +#define BMAC_FC_ADDR1_ADDR1 0x000000000000ffffULL + +#define BMAC_FC_ADDR2 0x00278UL +#define BMAC_FC_ADDR2_ADDR2 0x000000000000ffffULL + +#define BMAC_ADD_FILT0 0x00298UL +#define BMAC_ADD_FILT0_FILT0 0x000000000000ffffULL + +#define BMAC_ADD_FILT1 0x002a0UL +#define BMAC_ADD_FILT1_FILT1 0x000000000000ffffULL + +#define BMAC_ADD_FILT2 0x002a8UL +#define BMAC_ADD_FILT2_FILT2 0x000000000000ffffULL + +#define BMAC_ADD_FILT12_MASK 0x002b0UL +#define BMAC_ADD_FILT12_MASK_VAL 0x00000000000000ffULL + +#define BMAC_ADD_FILT00_MASK 0x002b8UL +#define BMAC_ADD_FILT00_MASK_VAL 0x000000000000ffffULL + +#define BMAC_HASH_TBL(NUM) (0x002c0UL + (NUM) * 0x8UL) +#define BMAC_HASH_TBL_VAL 0x000000000000ffffULL + +#define BRXMAC_FRAME_CNT 0x00370 +#define BRXMAC_FRAME_CNT_COUNT 0x000000000000ffffULL + +#define BRXMAC_MAX_LEN_ERR_CNT 0x00378 + +#define BRXMAC_ALIGN_ERR_CNT 0x00380 +#define BRXMAC_ALIGN_ERR_CNT_COUNT 0x000000000000ffffULL + +#define BRXMAC_CRC_ERR_CNT 0x00388 +#define BRXMAC_ALIGN_ERR_CNT_COUNT 0x000000000000ffffULL + +#define BRXMAC_CODE_VIOL_ERR_CNT 0x00390 +#define BRXMAC_CODE_VIOL_ERR_CNT_COUNT 0x000000000000ffffULL + +#define BMAC_STATE_MACHINE 0x003a0 + +#define BMAC_ADDR_CMPEN 0x003f8UL +#define BMAC_ADDR_CMPEN_EN15 0x0000000000008000ULL +#define BMAC_ADDR_CMPEN_EN14 0x0000000000004000ULL +#define BMAC_ADDR_CMPEN_EN13 0x0000000000002000ULL +#define BMAC_ADDR_CMPEN_EN12 0x0000000000001000ULL +#define BMAC_ADDR_CMPEN_EN11 0x0000000000000800ULL +#define BMAC_ADDR_CMPEN_EN10 0x0000000000000400ULL +#define BMAC_ADDR_CMPEN_EN9 0x0000000000000200ULL +#define BMAC_ADDR_CMPEN_EN8 0x0000000000000100ULL +#define BMAC_ADDR_CMPEN_EN7 0x0000000000000080ULL +#define BMAC_ADDR_CMPEN_EN6 0x0000000000000040ULL +#define BMAC_ADDR_CMPEN_EN5 0x0000000000000020ULL +#define BMAC_ADDR_CMPEN_EN4 0x0000000000000010ULL +#define BMAC_ADDR_CMPEN_EN3 0x0000000000000008ULL +#define BMAC_ADDR_CMPEN_EN2 0x0000000000000004ULL +#define BMAC_ADDR_CMPEN_EN1 0x0000000000000002ULL +#define BMAC_ADDR_CMPEN_EN0 0x0000000000000001ULL + +#define BMAC_NUM_HOST_INFO 9 + +#define BMAC_HOST_INFO(NUM) (0x00400UL + (NUM) * 0x8UL) + +#define BTXMAC_BYTE_CNT 0x00448UL +#define BTXMAC_BYTE_CNT_COUNT 0x00000000ffffffffULL + +#define BTXMAC_FRM_CNT 0x00450UL +#define BTXMAC_FRM_CNT_COUNT 0x00000000ffffffffULL + +#define BRXMAC_BYTE_CNT 0x00458UL +#define BRXMAC_BYTE_CNT_COUNT 0x00000000ffffffffULL + +#define HOST_INFO_MPR 0x0000000000000100ULL +#define HOST_INFO_MACRDCTBLN 0x0000000000000007ULL + +/* XPCS registers, offset from np->regs + np->xpcs_off */ + +#define XPCS_CONTROL1 (FZC_MAC + 0x00000UL) +#define XPCS_CONTROL1_RESET 0x0000000000008000ULL +#define XPCS_CONTROL1_LOOPBACK 0x0000000000004000ULL +#define XPCS_CONTROL1_SPEED_SELECT3 0x0000000000002000ULL +#define XPCS_CONTROL1_CSR_LOW_PWR 0x0000000000000800ULL +#define XPCS_CONTROL1_CSR_SPEED1 0x0000000000000040ULL +#define XPCS_CONTROL1_CSR_SPEED0 0x000000000000003cULL + +#define XPCS_STATUS1 (FZC_MAC + 0x00008UL) +#define XPCS_STATUS1_CSR_FAULT 0x0000000000000080ULL +#define XPCS_STATUS1_CSR_RXLNK_STAT 0x0000000000000004ULL +#define XPCS_STATUS1_CSR_LPWR_ABLE 0x0000000000000002ULL + +#define XPCS_DEVICE_IDENTIFIER (FZC_MAC + 0x00010UL) +#define XPCS_DEVICE_IDENTIFIER_VAL 0x00000000ffffffffULL + +#define XPCS_SPEED_ABILITY (FZC_MAC + 0x00018UL) +#define XPCS_SPEED_ABILITY_10GIG 0x0000000000000001ULL + +#define XPCS_DEV_IN_PKG (FZC_MAC + 0x00020UL) +#define XPCS_DEV_IN_PKG_CSR_VEND2 0x0000000080000000ULL +#define XPCS_DEV_IN_PKG_CSR_VEND1 0x0000000040000000ULL +#define XPCS_DEV_IN_PKG_DTE_XS 0x0000000000000020ULL +#define XPCS_DEV_IN_PKG_PHY_XS 0x0000000000000010ULL +#define XPCS_DEV_IN_PKG_PCS 0x0000000000000008ULL +#define XPCS_DEV_IN_PKG_WIS 0x0000000000000004ULL +#define XPCS_DEV_IN_PKG_PMD_PMA 0x0000000000000002ULL +#define XPCS_DEV_IN_PKG_CLS22 0x0000000000000001ULL + +#define XPCS_CONTROL2 (FZC_MAC + 0x00028UL) +#define XPCS_CONTROL2_CSR_PSC_SEL 0x0000000000000003ULL + +#define XPCS_STATUS2 (FZC_MAC + 0x00030UL) +#define XPCS_STATUS2_CSR_DEV_PRES 0x000000000000c000ULL +#define XPCS_STATUS2_CSR_TX_FAULT 0x0000000000000800ULL +#define XPCS_STATUS2_CSR_RCV_FAULT 0x0000000000000400ULL +#define XPCS_STATUS2_TEN_GBASE_W 0x0000000000000004ULL +#define XPCS_STATUS2_TEN_GBASE_X 0x0000000000000002ULL +#define XPCS_STATUS2_TEN_GBASE_R 0x0000000000000001ULL + +#define XPCS_PKG_ID (FZC_MAC + 0x00038UL) +#define XPCS_PKG_ID_VAL 0x00000000ffffffffULL + +#define XPCS_STATUS(IDX) (FZC_MAC + 0x00040UL) +#define XPCS_STATUS_CSR_LANE_ALIGN 0x0000000000001000ULL +#define XPCS_STATUS_CSR_PATTEST_CAP 0x0000000000000800ULL +#define XPCS_STATUS_CSR_LANE3_SYNC 0x0000000000000008ULL +#define XPCS_STATUS_CSR_LANE2_SYNC 0x0000000000000004ULL +#define XPCS_STATUS_CSR_LANE1_SYNC 0x0000000000000002ULL +#define XPCS_STATUS_CSR_LANE0_SYNC 0x0000000000000001ULL + +#define XPCS_TEST_CONTROL (FZC_MAC + 0x00048UL) +#define XPCS_TEST_CONTROL_TXTST_EN 0x0000000000000004ULL +#define XPCS_TEST_CONTROL_TPAT_SEL 0x0000000000000003ULL + +#define XPCS_CFG_VENDOR1 (FZC_MAC + 0x00050UL) +#define XPCS_CFG_VENDOR1_DBG_IOTST 0x0000000000000080ULL +#define XPCS_CFG_VENDOR1_DBG_SEL 0x0000000000000078ULL +#define XPCS_CFG_VENDOR1_BYPASS_DET 0x0000000000000004ULL +#define XPCS_CFG_VENDOR1_TXBUF_EN 0x0000000000000002ULL +#define XPCS_CFG_VENDOR1_XPCS_EN 0x0000000000000001ULL + +#define XPCS_DIAG_VENDOR2 (FZC_MAC + 0x00058UL) +#define XPCS_DIAG_VENDOR2_SSM_LANE3 0x0000000001e00000ULL +#define XPCS_DIAG_VENDOR2_SSM_LANE2 0x00000000001e0000ULL +#define XPCS_DIAG_VENDOR2_SSM_LANE1 0x000000000001e000ULL +#define XPCS_DIAG_VENDOR2_SSM_LANE0 0x0000000000001e00ULL +#define XPCS_DIAG_VENDOR2_EBUF_SM 0x00000000000001feULL +#define XPCS_DIAG_VENDOR2_RCV_SM 0x0000000000000001ULL + +#define XPCS_MASK1 (FZC_MAC + 0x00060UL) +#define XPCS_MASK1_FAULT_MASK 0x0000000000000080ULL +#define XPCS_MASK1_RXALIGN_STAT_MSK 0x0000000000000004ULL + +#define XPCS_PKT_COUNT (FZC_MAC + 0x00068UL) +#define XPCS_PKT_COUNT_TX 0x00000000ffff0000ULL +#define XPCS_PKT_COUNT_RX 0x000000000000ffffULL + +#define XPCS_TX_SM (FZC_MAC + 0x00070UL) +#define XPCS_TX_SM_VAL 0x000000000000000fULL + +#define XPCS_DESKEW_ERR_CNT (FZC_MAC + 0x00078UL) +#define XPCS_DESKEW_ERR_CNT_VAL 0x00000000000000ffULL + +#define XPCS_SYMERR_CNT01 (FZC_MAC + 0x00080UL) +#define XPCS_SYMERR_CNT01_LANE1 0x00000000ffff0000ULL +#define XPCS_SYMERR_CNT01_LANE0 0x000000000000ffffULL + +#define XPCS_SYMERR_CNT23 (FZC_MAC + 0x00088UL) +#define XPCS_SYMERR_CNT23_LANE3 0x00000000ffff0000ULL +#define XPCS_SYMERR_CNT23_LANE2 0x000000000000ffffULL + +#define XPCS_TRAINING_VECTOR (FZC_MAC + 0x00090UL) +#define XPCS_TRAINING_VECTOR_VAL 0x00000000ffffffffULL + +/* PCS registers, offset from np->regs + np->pcs_off */ + +#define PCS_MII_CTL (FZC_MAC + 0x00000UL) +#define PCS_MII_CTL_RST 0x0000000000008000ULL +#define PCS_MII_CTL_10_100_SPEED 0x0000000000002000ULL +#define PCS_MII_AUTONEG_EN 0x0000000000001000ULL +#define PCS_MII_PWR_DOWN 0x0000000000000800ULL +#define PCS_MII_ISOLATE 0x0000000000000400ULL +#define PCS_MII_AUTONEG_RESTART 0x0000000000000200ULL +#define PCS_MII_DUPLEX 0x0000000000000100ULL +#define PCS_MII_COLL_TEST 0x0000000000000080ULL +#define PCS_MII_1000MB_SPEED 0x0000000000000040ULL + +#define PCS_MII_STAT (FZC_MAC + 0x00008UL) +#define PCS_MII_STAT_EXT_STATUS 0x0000000000000100ULL +#define PCS_MII_STAT_AUTONEG_DONE 0x0000000000000020ULL +#define PCS_MII_STAT_REMOTE_FAULT 0x0000000000000010ULL +#define PCS_MII_STAT_AUTONEG_ABLE 0x0000000000000008ULL +#define PCS_MII_STAT_LINK_STATUS 0x0000000000000004ULL +#define PCS_MII_STAT_JABBER_DET 0x0000000000000002ULL +#define PCS_MII_STAT_EXT_CAP 0x0000000000000001ULL + +#define PCS_MII_ADV (FZC_MAC + 0x00010UL) +#define PCS_MII_ADV_NEXT_PAGE 0x0000000000008000ULL +#define PCS_MII_ADV_ACK 0x0000000000004000ULL +#define PCS_MII_ADV_REMOTE_FAULT 0x0000000000003000ULL +#define PCS_MII_ADV_ASM_DIR 0x0000000000000100ULL +#define PCS_MII_ADV_PAUSE 0x0000000000000080ULL +#define PCS_MII_ADV_HALF_DUPLEX 0x0000000000000040ULL +#define PCS_MII_ADV_FULL_DUPLEX 0x0000000000000020ULL + +#define PCS_MII_PARTNER (FZC_MAC + 0x00018UL) +#define PCS_MII_PARTNER_NEXT_PAGE 0x0000000000008000ULL +#define PCS_MII_PARTNER_ACK 0x0000000000004000ULL +#define PCS_MII_PARTNER_REMOTE_FAULT 0x0000000000002000ULL +#define PCS_MII_PARTNER_PAUSE 0x0000000000000180ULL +#define PCS_MII_PARTNER_HALF_DUPLEX 0x0000000000000040ULL +#define PCS_MII_PARTNER_FULL_DUPLEX 0x0000000000000020ULL + +#define PCS_CONF (FZC_MAC + 0x00020UL) +#define PCS_CONF_MASK 0x0000000000000040ULL +#define PCS_CONF_10MS_TMR_OVERRIDE 0x0000000000000020ULL +#define PCS_CONF_JITTER_STUDY 0x0000000000000018ULL +#define PCS_CONF_SIGDET_ACTIVE_LOW 0x0000000000000004ULL +#define PCS_CONF_SIGDET_OVERRIDE 0x0000000000000002ULL +#define PCS_CONF_ENABLE 0x0000000000000001ULL + +#define PCS_STATE (FZC_MAC + 0x00028UL) +#define PCS_STATE_D_PARTNER_FAIL 0x0000000020000000ULL +#define PCS_STATE_D_WAIT_C_CODES_ACK 0x0000000010000000ULL +#define PCS_STATE_D_SYNC_LOSS 0x0000000008000000ULL +#define PCS_STATE_D_NO_GOOD_C_CODES 0x0000000004000000ULL +#define PCS_STATE_D_SERDES 0x0000000002000000ULL +#define PCS_STATE_D_BREAKLINK_C_CODES 0x0000000001000000ULL +#define PCS_STATE_L_SIGDET 0x0000000000400000ULL +#define PCS_STATE_L_SYNC_LOSS 0x0000000000200000ULL +#define PCS_STATE_L_C_CODES 0x0000000000100000ULL +#define PCS_STATE_LINK_CFG_STATE 0x000000000001e000ULL +#define PCS_STATE_SEQ_DET_STATE 0x0000000000001800ULL +#define PCS_STATE_WORD_SYNC_STATE 0x0000000000000700ULL +#define PCS_STATE_NO_IDLE 0x000000000000000fULL + +#define PCS_INTERRUPT (FZC_MAC + 0x00030UL) +#define PCS_INTERRUPT_LSTATUS 0x0000000000000004ULL + +#define PCS_DPATH_MODE (FZC_MAC + 0x000a0UL) +#define PCS_DPATH_MODE_PCS 0x0000000000000000ULL +#define PCS_DPATH_MODE_MII 0x0000000000000002ULL +#define PCS_DPATH_MODE_LINKUP_F_ENAB 0x0000000000000001ULL + +#define PCS_PKT_CNT (FZC_MAC + 0x000c0UL) +#define PCS_PKT_CNT_RX 0x0000000007ff0000ULL +#define PCS_PKT_CNT_TX 0x00000000000007ffULL + +#define MIF_BB_MDC (FZC_MAC + 0x16000UL) +#define MIF_BB_MDC_CLK 0x0000000000000001ULL + +#define MIF_BB_MDO (FZC_MAC + 0x16008UL) +#define MIF_BB_MDO_DAT 0x0000000000000001ULL + +#define MIF_BB_MDO_EN (FZC_MAC + 0x16010UL) +#define MIF_BB_MDO_EN_VAL 0x0000000000000001ULL + +#define MIF_FRAME_OUTPUT (FZC_MAC + 0x16018UL) +#define MIF_FRAME_OUTPUT_ST 0x00000000c0000000ULL +#define MIF_FRAME_OUTPUT_ST_SHIFT 30 +#define MIF_FRAME_OUTPUT_OP_ADDR 0x0000000000000000ULL +#define MIF_FRAME_OUTPUT_OP_WRITE 0x0000000010000000ULL +#define MIF_FRAME_OUTPUT_OP_READ_INC 0x0000000020000000ULL +#define MIF_FRAME_OUTPUT_OP_READ 0x0000000030000000ULL +#define MIF_FRAME_OUTPUT_OP_SHIFT 28 +#define MIF_FRAME_OUTPUT_PORT 0x000000000f800000ULL +#define MIF_FRAME_OUTPUT_PORT_SHIFT 23 +#define MIF_FRAME_OUTPUT_REG 0x00000000007c0000ULL +#define MIF_FRAME_OUTPUT_REG_SHIFT 18 +#define MIF_FRAME_OUTPUT_TA 0x0000000000030000ULL +#define MIF_FRAME_OUTPUT_TA_SHIFT 16 +#define MIF_FRAME_OUTPUT_DATA 0x000000000000ffffULL +#define MIF_FRAME_OUTPUT_DATA_SHIFT 0 + +#define MDIO_ADDR_OP(port, dev, reg) \ + ((0 << MIF_FRAME_OUTPUT_ST_SHIFT) | \ + MIF_FRAME_OUTPUT_OP_ADDR | \ + (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \ + (dev << MIF_FRAME_OUTPUT_REG_SHIFT) | \ + (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT) | \ + (reg << MIF_FRAME_OUTPUT_DATA_SHIFT)) + +#define MDIO_READ_OP(port, dev) \ + ((0 << MIF_FRAME_OUTPUT_ST_SHIFT) | \ + MIF_FRAME_OUTPUT_OP_READ | \ + (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \ + (dev << MIF_FRAME_OUTPUT_REG_SHIFT) | \ + (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT)) + +#define MDIO_WRITE_OP(port, dev, data) \ + ((0 << MIF_FRAME_OUTPUT_ST_SHIFT) | \ + MIF_FRAME_OUTPUT_OP_WRITE | \ + (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \ + (dev << MIF_FRAME_OUTPUT_REG_SHIFT) | \ + (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT) | \ + (data << MIF_FRAME_OUTPUT_DATA_SHIFT)) + +#define MII_READ_OP(port, reg) \ + ((1 << MIF_FRAME_OUTPUT_ST_SHIFT) | \ + (2 << MIF_FRAME_OUTPUT_OP_SHIFT) | \ + (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \ + (reg << MIF_FRAME_OUTPUT_REG_SHIFT) | \ + (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT)) + +#define MII_WRITE_OP(port, reg, data) \ + ((1 << MIF_FRAME_OUTPUT_ST_SHIFT) | \ + (1 << MIF_FRAME_OUTPUT_OP_SHIFT) | \ + (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \ + (reg << MIF_FRAME_OUTPUT_REG_SHIFT) | \ + (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT) | \ + (data << MIF_FRAME_OUTPUT_DATA_SHIFT)) + +#define MIF_CONFIG (FZC_MAC + 0x16020UL) +#define MIF_CONFIG_ATCA_GE 0x0000000000010000ULL +#define MIF_CONFIG_INDIRECT_MODE 0x0000000000008000ULL +#define MIF_CONFIG_POLL_PRT_PHYADDR 0x0000000000003c00ULL +#define MIF_CONFIG_POLL_DEV_REG_ADDR 0x00000000000003e0ULL +#define MIF_CONFIG_BB_MODE 0x0000000000000010ULL +#define MIF_CONFIG_POLL_EN 0x0000000000000008ULL +#define MIF_CONFIG_BB_SER_SEL 0x0000000000000006ULL +#define MIF_CONFIG_MANUAL_MODE 0x0000000000000001ULL + +#define MIF_POLL_STATUS (FZC_MAC + 0x16028UL) +#define MIF_POLL_STATUS_DATA 0x00000000ffff0000ULL +#define MIF_POLL_STATUS_STAT 0x000000000000ffffULL + +#define MIF_POLL_MASK (FZC_MAC + 0x16030UL) +#define MIF_POLL_MASK_VAL 0x000000000000ffffULL + +#define MIF_SM (FZC_MAC + 0x16038UL) +#define MIF_SM_PORT_ADDR 0x00000000001f0000ULL +#define MIF_SM_MDI_1 0x0000000000004000ULL +#define MIF_SM_MDI_0 0x0000000000002400ULL +#define MIF_SM_MDCLK 0x0000000000001000ULL +#define MIF_SM_MDO_EN 0x0000000000000800ULL +#define MIF_SM_MDO 0x0000000000000400ULL +#define MIF_SM_MDI 0x0000000000000200ULL +#define MIF_SM_CTL 0x00000000000001c0ULL +#define MIF_SM_EX 0x000000000000003fULL + +#define MIF_STATUS (FZC_MAC + 0x16040UL) +#define MIF_STATUS_MDINT1 0x0000000000000020ULL +#define MIF_STATUS_MDINT0 0x0000000000000010ULL + +#define MIF_MASK (FZC_MAC + 0x16048UL) +#define MIF_MASK_MDINT1 0x0000000000000020ULL +#define MIF_MASK_MDINT0 0x0000000000000010ULL +#define MIF_MASK_PEU_ERR 0x0000000000000008ULL +#define MIF_MASK_YC 0x0000000000000004ULL +#define MIF_MASK_XGE_ERR0 0x0000000000000002ULL +#define MIF_MASK_MIF_INIT_DONE 0x0000000000000001ULL + +#define ENET_SERDES_RESET (FZC_MAC + 0x14000UL) +#define ENET_SERDES_RESET_1 0x0000000000000002ULL +#define ENET_SERDES_RESET_0 0x0000000000000001ULL + +#define ENET_SERDES_CFG (FZC_MAC + 0x14008UL) +#define ENET_SERDES_BE_LOOPBACK 0x0000000000000002ULL +#define ENET_SERDES_CFG_FORCE_RDY 0x0000000000000001ULL + +#define ENET_SERDES_0_PLL_CFG (FZC_MAC + 0x14010UL) +#define ENET_SERDES_PLL_FBDIV0 0x0000000000000001ULL +#define ENET_SERDES_PLL_FBDIV1 0x0000000000000002ULL +#define ENET_SERDES_PLL_FBDIV2 0x0000000000000004ULL +#define ENET_SERDES_PLL_HRATE0 0x0000000000000008ULL +#define ENET_SERDES_PLL_HRATE1 0x0000000000000010ULL +#define ENET_SERDES_PLL_HRATE2 0x0000000000000020ULL +#define ENET_SERDES_PLL_HRATE3 0x0000000000000040ULL + +#define ENET_SERDES_0_CTRL_CFG (FZC_MAC + 0x14018UL) +#define ENET_SERDES_CTRL_SDET_0 0x0000000000000001ULL +#define ENET_SERDES_CTRL_SDET_1 0x0000000000000002ULL +#define ENET_SERDES_CTRL_SDET_2 0x0000000000000004ULL +#define ENET_SERDES_CTRL_SDET_3 0x0000000000000008ULL +#define ENET_SERDES_CTRL_EMPH_0 0x0000000000000070ULL +#define ENET_SERDES_CTRL_EMPH_0_SHIFT 4 +#define ENET_SERDES_CTRL_EMPH_1 0x0000000000000380ULL +#define ENET_SERDES_CTRL_EMPH_1_SHIFT 7 +#define ENET_SERDES_CTRL_EMPH_2 0x0000000000001c00ULL +#define ENET_SERDES_CTRL_EMPH_2_SHIFT 10 +#define ENET_SERDES_CTRL_EMPH_3 0x000000000000e000ULL +#define ENET_SERDES_CTRL_EMPH_3_SHIFT 13 +#define ENET_SERDES_CTRL_LADJ_0 0x0000000000070000ULL +#define ENET_SERDES_CTRL_LADJ_0_SHIFT 16 +#define ENET_SERDES_CTRL_LADJ_1 0x0000000000380000ULL +#define ENET_SERDES_CTRL_LADJ_1_SHIFT 19 +#define ENET_SERDES_CTRL_LADJ_2 0x0000000001c00000ULL +#define ENET_SERDES_CTRL_LADJ_2_SHIFT 22 +#define ENET_SERDES_CTRL_LADJ_3 0x000000000e000000ULL +#define ENET_SERDES_CTRL_LADJ_3_SHIFT 25 +#define ENET_SERDES_CTRL_RXITERM_0 0x0000000010000000ULL +#define ENET_SERDES_CTRL_RXITERM_1 0x0000000020000000ULL +#define ENET_SERDES_CTRL_RXITERM_2 0x0000000040000000ULL +#define ENET_SERDES_CTRL_RXITERM_3 0x0000000080000000ULL + +#define ENET_SERDES_0_TEST_CFG (FZC_MAC + 0x14020UL) +#define ENET_SERDES_TEST_MD_0 0x0000000000000003ULL +#define ENET_SERDES_TEST_MD_0_SHIFT 0 +#define ENET_SERDES_TEST_MD_1 0x000000000000000cULL +#define ENET_SERDES_TEST_MD_1_SHIFT 2 +#define ENET_SERDES_TEST_MD_2 0x0000000000000030ULL +#define ENET_SERDES_TEST_MD_2_SHIFT 4 +#define ENET_SERDES_TEST_MD_3 0x00000000000000c0ULL +#define ENET_SERDES_TEST_MD_3_SHIFT 6 + +#define ENET_TEST_MD_NO_LOOPBACK 0x0 +#define ENET_TEST_MD_EWRAP 0x1 +#define ENET_TEST_MD_PAD_LOOPBACK 0x2 +#define ENET_TEST_MD_REV_LOOPBACK 0x3 + +#define ENET_SERDES_1_PLL_CFG (FZC_MAC + 0x14028UL) +#define ENET_SERDES_1_CTRL_CFG (FZC_MAC + 0x14030UL) +#define ENET_SERDES_1_TEST_CFG (FZC_MAC + 0x14038UL) + +#define ENET_RGMII_CFG_REG (FZC_MAC + 0x14040UL) + +#define ESR_INT_SIGNALS (FZC_MAC + 0x14800UL) +#define ESR_INT_SIGNALS_ALL 0x00000000ffffffffULL +#define ESR_INT_SIGNALS_P0_BITS 0x0000000033e0000fULL +#define ESR_INT_SIGNALS_P1_BITS 0x000000000c1f00f0ULL +#define ESR_INT_SRDY0_P0 0x0000000020000000ULL +#define ESR_INT_DET0_P0 0x0000000010000000ULL +#define ESR_INT_SRDY0_P1 0x0000000008000000ULL +#define ESR_INT_DET0_P1 0x0000000004000000ULL +#define ESR_INT_XSRDY_P0 0x0000000002000000ULL +#define ESR_INT_XDP_P0_CH3 0x0000000001000000ULL +#define ESR_INT_XDP_P0_CH2 0x0000000000800000ULL +#define ESR_INT_XDP_P0_CH1 0x0000000000400000ULL +#define ESR_INT_XDP_P0_CH0 0x0000000000200000ULL +#define ESR_INT_XSRDY_P1 0x0000000000100000ULL +#define ESR_INT_XDP_P1_CH3 0x0000000000080000ULL +#define ESR_INT_XDP_P1_CH2 0x0000000000040000ULL +#define ESR_INT_XDP_P1_CH1 0x0000000000020000ULL +#define ESR_INT_XDP_P1_CH0 0x0000000000010000ULL +#define ESR_INT_SLOSS_P1_CH3 0x0000000000000080ULL +#define ESR_INT_SLOSS_P1_CH2 0x0000000000000040ULL +#define ESR_INT_SLOSS_P1_CH1 0x0000000000000020ULL +#define ESR_INT_SLOSS_P1_CH0 0x0000000000000010ULL +#define ESR_INT_SLOSS_P0_CH3 0x0000000000000008ULL +#define ESR_INT_SLOSS_P0_CH2 0x0000000000000004ULL +#define ESR_INT_SLOSS_P0_CH1 0x0000000000000002ULL +#define ESR_INT_SLOSS_P0_CH0 0x0000000000000001ULL + +#define ESR_DEBUG_SEL (FZC_MAC + 0x14808UL) +#define ESR_DEBUG_SEL_VAL 0x000000000000003fULL + +/* SerDes registers behind MIF */ +#define NIU_ESR_DEV_ADDR 0x1e +#define ESR_BASE 0x0000 + +#define ESR_RXTX_COMM_CTRL_L (ESR_BASE + 0x0000) +#define ESR_RXTX_COMM_CTRL_H (ESR_BASE + 0x0001) + +#define ESR_RXTX_RESET_CTRL_L (ESR_BASE + 0x0002) +#define ESR_RXTX_RESET_CTRL_H (ESR_BASE + 0x0003) + +#define ESR_RX_POWER_CTRL_L (ESR_BASE + 0x0004) +#define ESR_RX_POWER_CTRL_H (ESR_BASE + 0x0005) + +#define ESR_TX_POWER_CTRL_L (ESR_BASE + 0x0006) +#define ESR_TX_POWER_CTRL_H (ESR_BASE + 0x0007) + +#define ESR_MISC_POWER_CTRL_L (ESR_BASE + 0x0008) +#define ESR_MISC_POWER_CTRL_H (ESR_BASE + 0x0009) + +#define ESR_RXTX_CTRL_L(CHAN) (ESR_BASE + 0x0080 + (CHAN) * 0x10) +#define ESR_RXTX_CTRL_H(CHAN) (ESR_BASE + 0x0081 + (CHAN) * 0x10) +#define ESR_RXTX_CTRL_BIASCNTL 0x80000000 +#define ESR_RXTX_CTRL_RESV1 0x7c000000 +#define ESR_RXTX_CTRL_TDENFIFO 0x02000000 +#define ESR_RXTX_CTRL_TDWS20 0x01000000 +#define ESR_RXTX_CTRL_VMUXLO 0x00c00000 +#define ESR_RXTX_CTRL_VMUXLO_SHIFT 22 +#define ESR_RXTX_CTRL_VPULSELO 0x00300000 +#define ESR_RXTX_CTRL_VPULSELO_SHIFT 20 +#define ESR_RXTX_CTRL_RESV2 0x000f0000 +#define ESR_RXTX_CTRL_RESV3 0x0000c000 +#define ESR_RXTX_CTRL_RXPRESWIN 0x00003000 +#define ESR_RXTX_CTRL_RXPRESWIN_SHIFT 12 +#define ESR_RXTX_CTRL_RESV4 0x00000800 +#define ESR_RXTX_CTRL_RISEFALL 0x00000700 +#define ESR_RXTX_CTRL_RISEFALL_SHIFT 8 +#define ESR_RXTX_CTRL_RESV5 0x000000fe +#define ESR_RXTX_CTRL_ENSTRETCH 0x00000001 + +#define ESR_RXTX_TUNING_L(CHAN) (ESR_BASE + 0x0082 + (CHAN) * 0x10) +#define ESR_RXTX_TUNING_H(CHAN) (ESR_BASE + 0x0083 + (CHAN) * 0x10) + +#define ESR_RX_SYNCCHAR_L(CHAN) (ESR_BASE + 0x0084 + (CHAN) * 0x10) +#define ESR_RX_SYNCCHAR_H(CHAN) (ESR_BASE + 0x0085 + (CHAN) * 0x10) + +#define ESR_RXTX_TEST_L(CHAN) (ESR_BASE + 0x0086 + (CHAN) * 0x10) +#define ESR_RXTX_TEST_H(CHAN) (ESR_BASE + 0x0087 + (CHAN) * 0x10) + +#define ESR_GLUE_CTRL0_L(CHAN) (ESR_BASE + 0x0088 + (CHAN) * 0x10) +#define ESR_GLUE_CTRL0_H(CHAN) (ESR_BASE + 0x0089 + (CHAN) * 0x10) +#define ESR_GLUE_CTRL0_RESV1 0xf8000000 +#define ESR_GLUE_CTRL0_BLTIME 0x07000000 +#define ESR_GLUE_CTRL0_BLTIME_SHIFT 24 +#define ESR_GLUE_CTRL0_RESV2 0x00ff0000 +#define ESR_GLUE_CTRL0_RXLOS_TEST 0x00008000 +#define ESR_GLUE_CTRL0_RESV3 0x00004000 +#define ESR_GLUE_CTRL0_RXLOSENAB 0x00002000 +#define ESR_GLUE_CTRL0_FASTRESYNC 0x00001000 +#define ESR_GLUE_CTRL0_SRATE 0x00000f00 +#define ESR_GLUE_CTRL0_SRATE_SHIFT 8 +#define ESR_GLUE_CTRL0_THCNT 0x000000ff +#define ESR_GLUE_CTRL0_THCNT_SHIFT 0 + +#define BLTIME_64_CYCLES 0 +#define BLTIME_128_CYCLES 1 +#define BLTIME_256_CYCLES 2 +#define BLTIME_300_CYCLES 3 +#define BLTIME_384_CYCLES 4 +#define BLTIME_512_CYCLES 5 +#define BLTIME_1024_CYCLES 6 +#define BLTIME_2048_CYCLES 7 + +#define ESR_GLUE_CTRL1_L(CHAN) (ESR_BASE + 0x008a + (CHAN) * 0x10) +#define ESR_GLUE_CTRL1_H(CHAN) (ESR_BASE + 0x008b + (CHAN) * 0x10) +#define ESR_RXTX_TUNING1_L(CHAN) (ESR_BASE + 0x00c2 + (CHAN) * 0x10) +#define ESR_RXTX_TUNING1_H(CHAN) (ESR_BASE + 0x00c2 + (CHAN) * 0x10) +#define ESR_RXTX_TUNING2_L(CHAN) (ESR_BASE + 0x0102 + (CHAN) * 0x10) +#define ESR_RXTX_TUNING2_H(CHAN) (ESR_BASE + 0x0102 + (CHAN) * 0x10) +#define ESR_RXTX_TUNING3_L(CHAN) (ESR_BASE + 0x0142 + (CHAN) * 0x10) +#define ESR_RXTX_TUNING3_H(CHAN) (ESR_BASE + 0x0142 + (CHAN) * 0x10) + +#define NIU_ESR2_DEV_ADDR 0x1e +#define ESR2_BASE 0x8000 + +#define ESR2_TI_PLL_CFG_L (ESR2_BASE + 0x000) +#define ESR2_TI_PLL_CFG_H (ESR2_BASE + 0x001) +#define PLL_CFG_STD 0x00000c00 +#define PLL_CFG_STD_SHIFT 10 +#define PLL_CFG_LD 0x00000300 +#define PLL_CFG_LD_SHIFT 8 +#define PLL_CFG_MPY 0x0000001e +#define PLL_CFG_MPY_SHIFT 1 +#define PLL_CFG_ENPLL 0x00000001 + +#define ESR2_TI_PLL_STS_L (ESR2_BASE + 0x002) +#define ESR2_TI_PLL_STS_H (ESR2_BASE + 0x003) +#define PLL_STS_LOCK 0x00000001 + +#define ESR2_TI_PLL_TEST_CFG_L (ESR2_BASE + 0x004) +#define ESR2_TI_PLL_TEST_CFG_H (ESR2_BASE + 0x005) +#define PLL_TEST_INVPATT 0x00004000 +#define PLL_TEST_RATE 0x00003000 +#define PLL_TEST_RATE_SHIFT 12 +#define PLL_TEST_CFG_ENBSAC 0x00000400 +#define PLL_TEST_CFG_ENBSRX 0x00000200 +#define PLL_TEST_CFG_ENBSTX 0x00000100 +#define PLL_TEST_CFG_LOOPBACK_PAD 0x00000040 +#define PLL_TEST_CFG_LOOPBACK_CML_DIS 0x00000080 +#define PLL_TEST_CFG_LOOPBACK_CML_EN 0x000000c0 +#define PLL_TEST_CFG_CLKBYP 0x00000030 +#define PLL_TEST_CFG_CLKBYP_SHIFT 4 +#define PLL_TEST_CFG_EN_RXPATT 0x00000008 +#define PLL_TEST_CFG_EN_TXPATT 0x00000004 +#define PLL_TEST_CFG_TPATT 0x00000003 +#define PLL_TEST_CFG_TPATT_SHIFT 0 + +#define ESR2_TI_PLL_TX_CFG_L(CHAN) (ESR2_BASE + 0x100 + (CHAN) * 4) +#define ESR2_TI_PLL_TX_CFG_H(CHAN) (ESR2_BASE + 0x101 + (CHAN) * 4) +#define PLL_TX_CFG_RDTCT 0x00600000 +#define PLL_TX_CFG_RDTCT_SHIFT 21 +#define PLL_TX_CFG_ENIDL 0x00100000 +#define PLL_TX_CFG_BSTX 0x00020000 +#define PLL_TX_CFG_ENFTP 0x00010000 +#define PLL_TX_CFG_DE 0x0000f000 +#define PLL_TX_CFG_DE_SHIFT 12 +#define PLL_TX_CFG_SWING_125MV 0x00000000 +#define PLL_TX_CFG_SWING_250MV 0x00000200 +#define PLL_TX_CFG_SWING_500MV 0x00000400 +#define PLL_TX_CFG_SWING_625MV 0x00000600 +#define PLL_TX_CFG_SWING_750MV 0x00000800 +#define PLL_TX_CFG_SWING_1000MV 0x00000a00 +#define PLL_TX_CFG_SWING_1250MV 0x00000c00 +#define PLL_TX_CFG_SWING_1375MV 0x00000e00 +#define PLL_TX_CFG_CM 0x00000100 +#define PLL_TX_CFG_INVPAIR 0x00000080 +#define PLL_TX_CFG_RATE 0x00000060 +#define PLL_TX_CFG_RATE_SHIFT 5 +#define PLL_TX_CFG_BUSWIDTH 0x0000001c +#define PLL_TX_CFG_BUSWIDTH_SHIFT 2 +#define PLL_TX_CFG_ENTEST 0x00000002 +#define PLL_TX_CFG_ENTX 0x00000001 + +#define ESR2_TI_PLL_TX_STS_L(CHAN) (ESR2_BASE + 0x102 + (CHAN) * 4) +#define ESR2_TI_PLL_TX_STS_H(CHAN) (ESR2_BASE + 0x103 + (CHAN) * 4) +#define PLL_TX_STS_RDTCTIP 0x00000002 +#define PLL_TX_STS_TESTFAIL 0x00000001 + +#define ESR2_TI_PLL_RX_CFG_L(CHAN) (ESR2_BASE + 0x120 + (CHAN) * 4) +#define ESR2_TI_PLL_RX_CFG_H(CHAN) (ESR2_BASE + 0x121 + (CHAN) * 4) +#define PLL_RX_CFG_BSINRXN 0x02000000 +#define PLL_RX_CFG_BSINRXP 0x01000000 +#define PLL_RX_CFG_EQ_MAX_LF 0x00000000 +#define PLL_RX_CFG_EQ_LP_ADAPTIVE 0x00080000 +#define PLL_RX_CFG_EQ_LP_1084MHZ 0x00400000 +#define PLL_RX_CFG_EQ_LP_805MHZ 0x00480000 +#define PLL_RX_CFG_EQ_LP_573MHZ 0x00500000 +#define PLL_RX_CFG_EQ_LP_402MHZ 0x00580000 +#define PLL_RX_CFG_EQ_LP_304MHZ 0x00600000 +#define PLL_RX_CFG_EQ_LP_216MHZ 0x00680000 +#define PLL_RX_CFG_EQ_LP_156MHZ 0x00700000 +#define PLL_RX_CFG_EQ_LP_135MHZ 0x00780000 +#define PLL_RX_CFG_EQ_SHIFT 19 +#define PLL_RX_CFG_CDR 0x00070000 +#define PLL_RX_CFG_CDR_SHIFT 16 +#define PLL_RX_CFG_LOS_DIS 0x00000000 +#define PLL_RX_CFG_LOS_HTHRESH 0x00004000 +#define PLL_RX_CFG_LOS_LTHRESH 0x00008000 +#define PLL_RX_CFG_ALIGN_DIS 0x00000000 +#define PLL_RX_CFG_ALIGN_ENA 0x00001000 +#define PLL_RX_CFG_ALIGN_JOG 0x00002000 +#define PLL_RX_CFG_TERM_VDDT 0x00000000 +#define PLL_RX_CFG_TERM_0P8VDDT 0x00000100 +#define PLL_RX_CFG_TERM_FLOAT 0x00000300 +#define PLL_RX_CFG_INVPAIR 0x00000080 +#define PLL_RX_CFG_RATE 0x00000060 +#define PLL_RX_CFG_RATE_SHIFT 5 +#define PLL_RX_CFG_BUSWIDTH 0x0000001c +#define PLL_RX_CFG_BUSWIDTH_SHIFT 2 +#define PLL_RX_CFG_ENTEST 0x00000002 +#define PLL_RX_CFG_ENRX 0x00000001 + +#define ESR2_TI_PLL_RX_STS_L(CHAN) (ESR2_BASE + 0x122 + (CHAN) * 4) +#define ESR2_TI_PLL_RX_STS_H(CHAN) (ESR2_BASE + 0x123 + (CHAN) * 4) +#define PLL_RX_STS_CRCIDTCT 0x00000200 +#define PLL_RX_STS_CWDTCT 0x00000100 +#define PLL_RX_STS_BSRXN 0x00000020 +#define PLL_RX_STS_BSRXP 0x00000010 +#define PLL_RX_STS_LOSDTCT 0x00000008 +#define PLL_RX_STS_ODDCG 0x00000004 +#define PLL_RX_STS_SYNC 0x00000002 +#define PLL_RX_STS_TESTFAIL 0x00000001 + +#define ENET_VLAN_TBL(IDX) (FZC_FFLP + 0x00000UL + (IDX) * 8UL) +#define ENET_VLAN_TBL_PARITY1 0x0000000000020000ULL +#define ENET_VLAN_TBL_PARITY0 0x0000000000010000ULL +#define ENET_VLAN_TBL_VPR 0x0000000000000008ULL +#define ENET_VLAN_TBL_VLANRDCTBLN 0x0000000000000007ULL +#define ENET_VLAN_TBL_SHIFT(PORT) ((PORT) * 4) + +#define ENET_VLAN_TBL_NUM_ENTRIES 4096 + +#define FFLP_VLAN_PAR_ERR (FZC_FFLP + 0x0800UL) +#define FFLP_VLAN_PAR_ERR_ERR 0x0000000080000000ULL +#define FFLP_VLAN_PAR_ERR_M_ERR 0x0000000040000000ULL +#define FFLP_VLAN_PAR_ERR_ADDR 0x000000003ffc0000ULL +#define FFLP_VLAN_PAR_ERR_DATA 0x000000000003ffffULL + +#define L2_CLS(IDX) (FZC_FFLP + 0x20000UL + (IDX) * 8UL) +#define L2_CLS_VLD 0x0000000000010000ULL +#define L2_CLS_ETYPE 0x000000000000ffffULL +#define L2_CLS_ETYPE_SHIFT 0 + +#define L3_CLS(IDX) (FZC_FFLP + 0x20010UL + (IDX) * 8UL) +#define L3_CLS_VALID 0x0000000002000000ULL +#define L3_CLS_IPVER 0x0000000001000000ULL +#define L3_CLS_PID 0x0000000000ff0000ULL +#define L3_CLS_PID_SHIFT 16 +#define L3_CLS_TOSMASK 0x000000000000ff00ULL +#define L3_CLS_TOSMASK_SHIFT 8 +#define L3_CLS_TOS 0x00000000000000ffULL +#define L3_CLS_TOS_SHIFT 0 + +#define TCAM_KEY(IDX) (FZC_FFLP + 0x20030UL + (IDX) * 8UL) +#define TCAM_KEY_DISC 0x0000000000000008ULL +#define TCAM_KEY_TSEL 0x0000000000000004ULL +#define TCAM_KEY_IPADDR 0x0000000000000001ULL + +#define TCAM_KEY_0 (FZC_FFLP + 0x20090UL) +#define TCAM_KEY_0_KEY 0x00000000000000ffULL /* bits 192-199 */ + +#define TCAM_KEY_1 (FZC_FFLP + 0x20098UL) +#define TCAM_KEY_1_KEY 0xffffffffffffffffULL /* bits 128-191 */ + +#define TCAM_KEY_2 (FZC_FFLP + 0x200a0UL) +#define TCAM_KEY_2_KEY 0xffffffffffffffffULL /* bits 64-127 */ + +#define TCAM_KEY_3 (FZC_FFLP + 0x200a8UL) +#define TCAM_KEY_3_KEY 0xffffffffffffffffULL /* bits 0-63 */ + +#define TCAM_KEY_MASK_0 (FZC_FFLP + 0x200b0UL) +#define TCAM_KEY_MASK_0_KEY_SEL 0x00000000000000ffULL /* bits 192-199 */ + +#define TCAM_KEY_MASK_1 (FZC_FFLP + 0x200b8UL) +#define TCAM_KEY_MASK_1_KEY_SEL 0xffffffffffffffffULL /* bits 128-191 */ + +#define TCAM_KEY_MASK_2 (FZC_FFLP + 0x200c0UL) +#define TCAM_KEY_MASK_2_KEY_SEL 0xffffffffffffffffULL /* bits 64-127 */ + +#define TCAM_KEY_MASK_3 (FZC_FFLP + 0x200c8UL) +#define TCAM_KEY_MASK_3_KEY_SEL 0xffffffffffffffffULL /* bits 0-63 */ + +#define TCAM_CTL (FZC_FFLP + 0x200d0UL) +#define TCAM_CTL_RWC 0x00000000001c0000ULL +#define TCAM_CTL_RWC_TCAM_WRITE 0x0000000000000000ULL +#define TCAM_CTL_RWC_TCAM_READ 0x0000000000040000ULL +#define TCAM_CTL_RWC_TCAM_COMPARE 0x0000000000080000ULL +#define TCAM_CTL_RWC_RAM_WRITE 0x0000000000100000ULL +#define TCAM_CTL_RWC_RAM_READ 0x0000000000140000ULL +#define TCAM_CTL_STAT 0x0000000000020000ULL +#define TCAM_CTL_MATCH 0x0000000000010000ULL +#define TCAM_CTL_LOC 0x00000000000003ffULL + +#define TCAM_ERR (FZC_FFLP + 0x200d8UL) +#define TCAM_ERR_ERR 0x0000000080000000ULL +#define TCAM_ERR_P_ECC 0x0000000040000000ULL +#define TCAM_ERR_MULT 0x0000000020000000ULL +#define TCAM_ERR_ADDR 0x0000000000ff0000ULL +#define TCAM_ERR_SYNDROME 0x000000000000ffffULL + +#define HASH_LOOKUP_ERR_LOG1 (FZC_FFLP + 0x200e0UL) +#define HASH_LOOKUP_ERR_LOG1_ERR 0x0000000000000008ULL +#define HASH_LOOKUP_ERR_LOG1_MULT_LK 0x0000000000000004ULL +#define HASH_LOOKUP_ERR_LOG1_CU 0x0000000000000002ULL +#define HASH_LOOKUP_ERR_LOG1_MULT_BIT 0x0000000000000001ULL + +#define HASH_LOOKUP_ERR_LOG2 (FZC_FFLP + 0x200e8UL) +#define HASH_LOOKUP_ERR_LOG2_H1 0x000000007ffff800ULL +#define HASH_LOOKUP_ERR_LOG2_SUBAREA 0x0000000000000700ULL +#define HASH_LOOKUP_ERR_LOG2_SYNDROME 0x00000000000000ffULL + +#define FFLP_CFG_1 (FZC_FFLP + 0x20100UL) +#define FFLP_CFG_1_TCAM_DIS 0x0000000004000000ULL +#define FFLP_CFG_1_PIO_DBG_SEL 0x0000000003800000ULL +#define FFLP_CFG_1_PIO_FIO_RST 0x0000000000400000ULL +#define FFLP_CFG_1_PIO_FIO_LAT 0x0000000000300000ULL +#define FFLP_CFG_1_CAMLAT 0x00000000000f0000ULL +#define FFLP_CFG_1_CAMLAT_SHIFT 16 +#define FFLP_CFG_1_CAMRATIO 0x000000000000f000ULL +#define FFLP_CFG_1_CAMRATIO_SHIFT 12 +#define FFLP_CFG_1_FCRAMRATIO 0x0000000000000f00ULL +#define FFLP_CFG_1_FCRAMRATIO_SHIFT 8 +#define FFLP_CFG_1_FCRAMOUTDR_MASK 0x00000000000000f0ULL +#define FFLP_CFG_1_FCRAMOUTDR_NORMAL 0x0000000000000000ULL +#define FFLP_CFG_1_FCRAMOUTDR_STRONG 0x0000000000000050ULL +#define FFLP_CFG_1_FCRAMOUTDR_WEAK 0x00000000000000a0ULL +#define FFLP_CFG_1_FCRAMQS 0x0000000000000008ULL +#define FFLP_CFG_1_ERRORDIS 0x0000000000000004ULL +#define FFLP_CFG_1_FFLPINITDONE 0x0000000000000002ULL +#define FFLP_CFG_1_LLCSNAP 0x0000000000000001ULL + +#define DEFAULT_FCRAMRATIO 10 + +#define DEFAULT_TCAM_LATENCY 4 +#define DEFAULT_TCAM_ACCESS_RATIO 10 + +#define TCP_CFLAG_MSK (FZC_FFLP + 0x20108UL) +#define TCP_CFLAG_MSK_MASK 0x0000000000000fffULL + +#define FCRAM_REF_TMR (FZC_FFLP + 0x20110UL) +#define FCRAM_REF_TMR_MAX 0x00000000ffff0000ULL +#define FCRAM_REF_TMR_MAX_SHIFT 16 +#define FCRAM_REF_TMR_MIN 0x000000000000ffffULL +#define FCRAM_REF_TMR_MIN_SHIFT 0 + +#define DEFAULT_FCRAM_REFRESH_MAX 512 +#define DEFAULT_FCRAM_REFRESH_MIN 512 + +#define FCRAM_FIO_ADDR (FZC_FFLP + 0x20118UL) +#define FCRAM_FIO_ADDR_ADDR 0x00000000000000ffULL + +#define FCRAM_FIO_DAT (FZC_FFLP + 0x20120UL) +#define FCRAM_FIO_DAT_DATA 0x000000000000ffffULL + +#define FCRAM_ERR_TST0 (FZC_FFLP + 0x20128UL) +#define FCRAM_ERR_TST0_SYND 0x00000000000000ffULL + +#define FCRAM_ERR_TST1 (FZC_FFLP + 0x20130UL) +#define FCRAM_ERR_TST1_DAT 0x00000000ffffffffULL + +#define FCRAM_ERR_TST2 (FZC_FFLP + 0x20138UL) +#define FCRAM_ERR_TST2_DAT 0x00000000ffffffffULL + +#define FFLP_ERR_MASK (FZC_FFLP + 0x20140UL) +#define FFLP_ERR_MASK_HSH_TBL_DAT 0x00000000000007f8ULL +#define FFLP_ERR_MASK_HSH_TBL_LKUP 0x0000000000000004ULL +#define FFLP_ERR_MASK_TCAM 0x0000000000000002ULL +#define FFLP_ERR_MASK_VLAN 0x0000000000000001ULL + +#define FFLP_DBG_TRAIN_VCT (FZC_FFLP + 0x20148UL) +#define FFLP_DBG_TRAIN_VCT_VECTOR 0x00000000ffffffffULL + +#define FCRAM_PHY_RD_LAT (FZC_FFLP + 0x20150UL) +#define FCRAM_PHY_RD_LAT_LAT 0x00000000000000ffULL + +/* Ethernet TCAM format */ +#define TCAM_ETHKEY0_RESV1 0xffffffffffffff00ULL +#define TCAM_ETHKEY0_CLASS_CODE 0x00000000000000f8ULL +#define TCAM_ETHKEY0_CLASS_CODE_SHIFT 3 +#define TCAM_ETHKEY0_RESV2 0x0000000000000007ULL +#define TCAM_ETHKEY1_FRAME_BYTE0_7(NUM) (0xff << ((7 - NUM) * 8)) +#define TCAM_ETHKEY2_FRAME_BYTE8 0xff00000000000000ULL +#define TCAM_ETHKEY2_FRAME_BYTE8_SHIFT 56 +#define TCAM_ETHKEY2_FRAME_BYTE9 0x00ff000000000000ULL +#define TCAM_ETHKEY2_FRAME_BYTE9_SHIFT 48 +#define TCAM_ETHKEY2_FRAME_BYTE10 0x0000ff0000000000ULL +#define TCAM_ETHKEY2_FRAME_BYTE10_SHIFT 40 +#define TCAM_ETHKEY2_FRAME_RESV 0x000000ffffffffffULL +#define TCAM_ETHKEY3_FRAME_RESV 0xffffffffffffffffULL + +/* IPV4 TCAM format */ +#define TCAM_V4KEY0_RESV1 0xffffffffffffff00ULL +#define TCAM_V4KEY0_CLASS_CODE 0x00000000000000f8ULL +#define TCAM_V4KEY0_CLASS_CODE_SHIFT 3 +#define TCAM_V4KEY0_RESV2 0x0000000000000007ULL +#define TCAM_V4KEY1_L2RDCNUM 0xf800000000000000ULL +#define TCAM_V4KEY1_L2RDCNUM_SHIFT 59 +#define TCAM_V4KEY1_NOPORT 0x0400000000000000ULL +#define TCAM_V4KEY1_RESV 0x03ffffffffffffffULL +#define TCAM_V4KEY2_RESV 0xffff000000000000ULL +#define TCAM_V4KEY2_TOS 0x0000ff0000000000ULL +#define TCAM_V4KEY2_TOS_SHIFT 40 +#define TCAM_V4KEY2_PROTO 0x000000ff00000000ULL +#define TCAM_V4KEY2_PROTO_SHIFT 32 +#define TCAM_V4KEY2_PORT_SPI 0x00000000ffffffffULL +#define TCAM_V4KEY2_PORT_SPI_SHIFT 0 +#define TCAM_V4KEY3_SADDR 0xffffffff00000000ULL +#define TCAM_V4KEY3_SADDR_SHIFT 32 +#define TCAM_V4KEY3_DADDR 0x00000000ffffffffULL +#define TCAM_V4KEY3_DADDR_SHIFT 0 + +/* IPV6 TCAM format */ +#define TCAM_V6KEY0_RESV1 0xffffffffffffff00ULL +#define TCAM_V6KEY0_CLASS_CODE 0x00000000000000f8ULL +#define TCAM_V6KEY0_CLASS_CODE_SHIFT 3 +#define TCAM_V6KEY0_RESV2 0x0000000000000007ULL +#define TCAM_V6KEY1_L2RDCNUM 0xf800000000000000ULL +#define TCAM_V6KEY1_L2RDCNUM_SHIFT 59 +#define TCAM_V6KEY1_NOPORT 0x0400000000000000ULL +#define TCAM_V6KEY1_RESV 0x03ff000000000000ULL +#define TCAM_V6KEY1_TOS 0x0000ff0000000000ULL +#define TCAM_V6KEY1_TOS_SHIFT 40 +#define TCAM_V6KEY1_NEXT_HDR 0x000000ff00000000ULL +#define TCAM_V6KEY1_NEXT_HDR_SHIFT 32 +#define TCAM_V6KEY1_PORT_SPI 0x00000000ffffffffULL +#define TCAM_V6KEY1_PORT_SPI_SHIFT 0 +#define TCAM_V6KEY2_ADDR_HIGH 0xffffffffffffffffULL +#define TCAM_V6KEY3_ADDR_LOW 0xffffffffffffffffULL + +#define TCAM_ASSOCDATA_SYNDROME 0x000003fffc000000ULL +#define TCAM_ASSOCDATA_SYNDROME_SHIFT 26 +#define TCAM_ASSOCDATA_ZFID 0x0000000003ffc000ULL +#define TCAM_ASSOCDATA_ZFID_SHIFT 14 +#define TCAM_ASSOCDATA_V4_ECC_OK 0x0000000000002000ULL +#define TCAM_ASSOCDATA_DISC 0x0000000000001000ULL +#define TCAM_ASSOCDATA_TRES_MASK 0x0000000000000c00ULL +#define TCAM_ASSOCDATA_TRES_USE_L2RDC 0x0000000000000000ULL +#define TCAM_ASSOCDATA_TRES_USE_OFFSET 0x0000000000000400ULL +#define TCAM_ASSOCDATA_TRES_OVR_RDC 0x0000000000000800ULL +#define TCAM_ASSOCDATA_TRES_OVR_RDC_OFF 0x0000000000000c00ULL +#define TCAM_ASSOCDATA_RDCTBL 0x0000000000000380ULL +#define TCAM_ASSOCDATA_RDCTBL_SHIFT 7 +#define TCAM_ASSOCDATA_OFFSET 0x000000000000007cULL +#define TCAM_ASSOCDATA_OFFSET_SHIFT 2 +#define TCAM_ASSOCDATA_ZFVLD 0x0000000000000002ULL +#define TCAM_ASSOCDATA_AGE 0x0000000000000001ULL + +#define FLOW_KEY(IDX) (FZC_FFLP + 0x40000UL + (IDX) * 8UL) +#define FLOW_KEY_PORT 0x0000000000000200ULL +#define FLOW_KEY_L2DA 0x0000000000000100ULL +#define FLOW_KEY_VLAN 0x0000000000000080ULL +#define FLOW_KEY_IPSA 0x0000000000000040ULL +#define FLOW_KEY_IPDA 0x0000000000000020ULL +#define FLOW_KEY_PROTO 0x0000000000000010ULL +#define FLOW_KEY_L4_0 0x000000000000000cULL +#define FLOW_KEY_L4_0_SHIFT 2 +#define FLOW_KEY_L4_1 0x0000000000000003ULL +#define FLOW_KEY_L4_1_SHIFT 0 + +#define FLOW_KEY_L4_NONE 0x0 +#define FLOW_KEY_L4_RESV 0x1 +#define FLOW_KEY_L4_BYTE12 0x2 +#define FLOW_KEY_L4_BYTE56 0x3 + +#define H1POLY (FZC_FFLP + 0x40060UL) +#define H1POLY_INITVAL 0x00000000ffffffffULL + +#define H2POLY (FZC_FFLP + 0x40068UL) +#define H2POLY_INITVAL 0x000000000000ffffULL + +#define FLW_PRT_SEL(IDX) (FZC_FFLP + 0x40070UL + (IDX) * 8UL) +#define FLW_PRT_SEL_EXT 0x0000000000010000ULL +#define FLW_PRT_SEL_MASK 0x0000000000001f00ULL +#define FLW_PRT_SEL_MASK_SHIFT 8 +#define FLW_PRT_SEL_BASE 0x000000000000001fULL +#define FLW_PRT_SEL_BASE_SHIFT 0 + +#define HASH_TBL_ADDR(IDX) (FFLP + 0x00000UL + (IDX) * 8192UL) +#define HASH_TBL_ADDR_AUTOINC 0x0000000000800000ULL +#define HASH_TBL_ADDR_ADDR 0x00000000007fffffULL + +#define HASH_TBL_DATA(IDX) (FFLP + 0x00008UL + (IDX) * 8192UL) +#define HASH_TBL_DATA_DATA 0xffffffffffffffffULL + +/* FCRAM hash table entries are up to 8 64-bit words in size. + * The layout of each entry is determined by the settings in the + * first word, which is the header. + * + * The indexing is controllable per partition (there is one partition + * per RDC group, thus a total of eight) using the BASE and MASK fields + * of FLW_PRT_SEL above. + */ +#define FCRAM_SIZE 0x800000 +#define FCRAM_NUM_PARTITIONS 8 + +/* Generic HASH entry header, used for all non-optimized formats. */ +#define HASH_HEADER_FMT 0x8000000000000000ULL +#define HASH_HEADER_EXT 0x4000000000000000ULL +#define HASH_HEADER_VALID 0x2000000000000000ULL +#define HASH_HEADER_RESVD 0x1000000000000000ULL +#define HASH_HEADER_L2_DADDR 0x0ffffffffffff000ULL +#define HASH_HEADER_L2_DADDR_SHIFT 12 +#define HASH_HEADER_VLAN 0x0000000000000fffULL +#define HASH_HEADER_VLAN_SHIFT 0 + +/* Optimized format, just a header with a special layout defined below. + * Set FMT and EXT both to zero to indicate this layout is being used. + */ +#define HASH_OPT_HEADER_FMT 0x8000000000000000ULL +#define HASH_OPT_HEADER_EXT 0x4000000000000000ULL +#define HASH_OPT_HEADER_VALID 0x2000000000000000ULL +#define HASH_OPT_HEADER_RDCOFF 0x1f00000000000000ULL +#define HASH_OPT_HEADER_RDCOFF_SHIFT 56 +#define HASH_OPT_HEADER_HASH2 0x00ffff0000000000ULL +#define HASH_OPT_HEADER_HASH2_SHIFT 40 +#define HASH_OPT_HEADER_RESVD 0x000000ff00000000ULL +#define HASH_OPT_HEADER_USERINFO 0x00000000ffffffffULL +#define HASH_OPT_HEADER_USERINFO_SHIFT 0 + +/* Port and protocol word used for ipv4 and ipv6 layouts. */ +#define HASH_PORT_DPORT 0xffff000000000000ULL +#define HASH_PORT_DPORT_SHIFT 48 +#define HASH_PORT_SPORT 0x0000ffff00000000ULL +#define HASH_PORT_SPORT_SHIFT 32 +#define HASH_PORT_PROTO 0x00000000ff000000ULL +#define HASH_PORT_PROTO_SHIFT 24 +#define HASH_PORT_PORT_OFF 0x0000000000c00000ULL +#define HASH_PORT_PORT_OFF_SHIFT 22 +#define HASH_PORT_PORT_RESV 0x00000000003fffffULL + +/* Action word used for ipv4 and ipv6 layouts. */ +#define HASH_ACTION_RESV1 0xe000000000000000ULL +#define HASH_ACTION_RDCOFF 0x1f00000000000000ULL +#define HASH_ACTION_RDCOFF_SHIFT 56 +#define HASH_ACTION_ZFVALID 0x0080000000000000ULL +#define HASH_ACTION_RESV2 0x0070000000000000ULL +#define HASH_ACTION_ZFID 0x000fff0000000000ULL +#define HASH_ACTION_ZFID_SHIFT 40 +#define HASH_ACTION_RESV3 0x000000ff00000000ULL +#define HASH_ACTION_USERINFO 0x00000000ffffffffULL +#define HASH_ACTION_USERINFO_SHIFT 0 + +/* IPV4 address word. Addresses are in network endian. */ +#define HASH_IP4ADDR_SADDR 0xffffffff00000000ULL +#define HASH_IP4ADDR_SADDR_SHIFT 32 +#define HASH_IP4ADDR_DADDR 0x00000000ffffffffULL +#define HASH_IP4ADDR_DADDR_SHIFT 0 + +/* IPV6 address layout is 4 words, first two are saddr, next two + * are daddr. Addresses are in network endian. + */ + +struct fcram_hash_opt { + u64 header; +}; + +/* EXT=1, FMT=0 */ +struct fcram_hash_ipv4 { + u64 header; + u64 addrs; + u64 ports; + u64 action; +}; + +/* EXT=1, FMT=1 */ +struct fcram_hash_ipv6 { + u64 header; + u64 addrs[4]; + u64 ports; + u64 action; +}; + +#define HASH_TBL_DATA_LOG(IDX) (FFLP + 0x00010UL + (IDX) * 8192UL) +#define HASH_TBL_DATA_LOG_ERR 0x0000000080000000ULL +#define HASH_TBL_DATA_LOG_ADDR 0x000000007fffff00ULL +#define HASH_TBL_DATA_LOG_SYNDROME 0x00000000000000ffULL + +#define RX_DMA_CK_DIV (FZC_DMC + 0x00000UL) +#define RX_DMA_CK_DIV_CNT 0x000000000000ffffULL + +#define DEF_RDC(IDX) (FZC_DMC + 0x00008UL + (IDX) * 0x8UL) +#define DEF_RDC_VAL 0x000000000000001fULL + +#define PT_DRR_WT(IDX) (FZC_DMC + 0x00028UL + (IDX) * 0x8UL) +#define PT_DRR_WT_VAL 0x000000000000ffffULL + +#define PT_DRR_WEIGHT_DEFAULT_10G 0x0400 +#define PT_DRR_WEIGHT_DEFAULT_1G 0x0066 + +#define PT_USE(IDX) (FZC_DMC + 0x00048UL + (IDX) * 0x8UL) +#define PT_USE_CNT 0x00000000000fffffULL + +#define RED_RAN_INIT (FZC_DMC + 0x00068UL) +#define RED_RAN_INIT_OPMODE 0x0000000000010000ULL +#define RED_RAN_INIT_VAL 0x000000000000ffffULL + +#define RX_ADDR_MD (FZC_DMC + 0x00070UL) +#define RX_ADDR_MD_DBG_PT_MUX_SEL 0x000000000000000cULL +#define RX_ADDR_MD_RAM_ACC 0x0000000000000002ULL +#define RX_ADDR_MD_MODE32 0x0000000000000001ULL + +#define RDMC_PRE_PAR_ERR (FZC_DMC + 0x00078UL) +#define RDMC_PRE_PAR_ERR_ERR 0x0000000000008000ULL +#define RDMC_PRE_PAR_ERR_MERR 0x0000000000004000ULL +#define RDMC_PRE_PAR_ERR_ADDR 0x00000000000000ffULL + +#define RDMC_SHA_PAR_ERR (FZC_DMC + 0x00080UL) +#define RDMC_SHA_PAR_ERR_ERR 0x0000000000008000ULL +#define RDMC_SHA_PAR_ERR_MERR 0x0000000000004000ULL +#define RDMC_SHA_PAR_ERR_ADDR 0x00000000000000ffULL + +#define RDMC_MEM_ADDR (FZC_DMC + 0x00088UL) +#define RDMC_MEM_ADDR_PRE_SHAD 0x0000000000000100ULL +#define RDMC_MEM_ADDR_ADDR 0x00000000000000ffULL + +#define RDMC_MEM_DAT0 (FZC_DMC + 0x00090UL) +#define RDMC_MEM_DAT0_DATA 0x00000000ffffffffULL /* bits 31:0 */ + +#define RDMC_MEM_DAT1 (FZC_DMC + 0x00098UL) +#define RDMC_MEM_DAT1_DATA 0x00000000ffffffffULL /* bits 63:32 */ + +#define RDMC_MEM_DAT2 (FZC_DMC + 0x000a0UL) +#define RDMC_MEM_DAT2_DATA 0x00000000ffffffffULL /* bits 95:64 */ + +#define RDMC_MEM_DAT3 (FZC_DMC + 0x000a8UL) +#define RDMC_MEM_DAT3_DATA 0x00000000ffffffffULL /* bits 127:96 */ + +#define RDMC_MEM_DAT4 (FZC_DMC + 0x000b0UL) +#define RDMC_MEM_DAT4_DATA 0x00000000000fffffULL /* bits 147:128 */ + +#define RX_CTL_DAT_FIFO_STAT (FZC_DMC + 0x000b8UL) +#define RX_CTL_DAT_FIFO_STAT_ID_MISMATCH 0x0000000000000100ULL +#define RX_CTL_DAT_FIFO_STAT_ZCP_EOP_ERR 0x00000000000000f0ULL +#define RX_CTL_DAT_FIFO_STAT_IPP_EOP_ERR 0x000000000000000fULL + +#define RX_CTL_DAT_FIFO_MASK (FZC_DMC + 0x000c0UL) +#define RX_CTL_DAT_FIFO_MASK_ID_MISMATCH 0x0000000000000100ULL +#define RX_CTL_DAT_FIFO_MASK_ZCP_EOP_ERR 0x00000000000000f0ULL +#define RX_CTL_DAT_FIFO_MASK_IPP_EOP_ERR 0x000000000000000fULL + +#define RDMC_TRAINING_VECTOR (FZC_DMC + 0x000c8UL) +#define RDMC_TRAINING_VECTOR_TRAINING_VECTOR 0x00000000ffffffffULL + +#define RX_CTL_DAT_FIFO_STAT_DBG (FZC_DMC + 0x000d0UL) +#define RX_CTL_DAT_FIFO_STAT_DBG_ID_MISMATCH 0x0000000000000100ULL +#define RX_CTL_DAT_FIFO_STAT_DBG_ZCP_EOP_ERR 0x00000000000000f0ULL +#define RX_CTL_DAT_FIFO_STAT_DBG_IPP_EOP_ERR 0x000000000000000fULL + +#define RDC_TBL(TBL,SLOT) (FZC_ZCP + 0x10000UL + \ + (TBL) * (8UL * 16UL) + \ + (SLOT) * 8UL) +#define RDC_TBL_RDC 0x000000000000000fULL + +#define RX_LOG_PAGE_VLD(IDX) (FZC_DMC + 0x20000UL + (IDX) * 0x40UL) +#define RX_LOG_PAGE_VLD_FUNC 0x000000000000000cULL +#define RX_LOG_PAGE_VLD_FUNC_SHIFT 2 +#define RX_LOG_PAGE_VLD_PAGE1 0x0000000000000002ULL +#define RX_LOG_PAGE_VLD_PAGE0 0x0000000000000001ULL + +#define RX_LOG_MASK1(IDX) (FZC_DMC + 0x20008UL + (IDX) * 0x40UL) +#define RX_LOG_MASK1_MASK 0x00000000ffffffffULL + +#define RX_LOG_VAL1(IDX) (FZC_DMC + 0x20010UL + (IDX) * 0x40UL) +#define RX_LOG_VAL1_VALUE 0x00000000ffffffffULL + +#define RX_LOG_MASK2(IDX) (FZC_DMC + 0x20018UL + (IDX) * 0x40UL) +#define RX_LOG_MASK2_MASK 0x00000000ffffffffULL + +#define RX_LOG_VAL2(IDX) (FZC_DMC + 0x20020UL + (IDX) * 0x40UL) +#define RX_LOG_VAL2_VALUE 0x00000000ffffffffULL + +#define RX_LOG_PAGE_RELO1(IDX) (FZC_DMC + 0x20028UL + (IDX) * 0x40UL) +#define RX_LOG_PAGE_RELO1_RELO 0x00000000ffffffffULL + +#define RX_LOG_PAGE_RELO2(IDX) (FZC_DMC + 0x20030UL + (IDX) * 0x40UL) +#define RX_LOG_PAGE_RELO2_RELO 0x00000000ffffffffULL + +#define RX_LOG_PAGE_HDL(IDX) (FZC_DMC + 0x20038UL + (IDX) * 0x40UL) +#define RX_LOG_PAGE_HDL_HANDLE 0x00000000000fffffULL + +#define TX_LOG_PAGE_VLD(IDX) (FZC_DMC + 0x40000UL + (IDX) * 0x200UL) +#define TX_LOG_PAGE_VLD_FUNC 0x000000000000000cULL +#define TX_LOG_PAGE_VLD_FUNC_SHIFT 2 +#define TX_LOG_PAGE_VLD_PAGE1 0x0000000000000002ULL +#define TX_LOG_PAGE_VLD_PAGE0 0x0000000000000001ULL + +#define TX_LOG_MASK1(IDX) (FZC_DMC + 0x40008UL + (IDX) * 0x200UL) +#define TX_LOG_MASK1_MASK 0x00000000ffffffffULL + +#define TX_LOG_VAL1(IDX) (FZC_DMC + 0x40010UL + (IDX) * 0x200UL) +#define TX_LOG_VAL1_VALUE 0x00000000ffffffffULL + +#define TX_LOG_MASK2(IDX) (FZC_DMC + 0x40018UL + (IDX) * 0x200UL) +#define TX_LOG_MASK2_MASK 0x00000000ffffffffULL + +#define TX_LOG_VAL2(IDX) (FZC_DMC + 0x40020UL + (IDX) * 0x200UL) +#define TX_LOG_VAL2_VALUE 0x00000000ffffffffULL + +#define TX_LOG_PAGE_RELO1(IDX) (FZC_DMC + 0x40028UL + (IDX) * 0x200UL) +#define TX_LOG_PAGE_RELO1_RELO 0x00000000ffffffffULL + +#define TX_LOG_PAGE_RELO2(IDX) (FZC_DMC + 0x40030UL + (IDX) * 0x200UL) +#define TX_LOG_PAGE_RELO2_RELO 0x00000000ffffffffULL + +#define TX_LOG_PAGE_HDL(IDX) (FZC_DMC + 0x40038UL + (IDX) * 0x200UL) +#define TX_LOG_PAGE_HDL_HANDLE 0x00000000000fffffULL + +#define TX_ADDR_MD (FZC_DMC + 0x45000UL) +#define TX_ADDR_MD_MODE32 0x0000000000000001ULL + +#define RDC_RED_PARA(IDX) (FZC_DMC + 0x30000UL + (IDX) * 0x40UL) +#define RDC_RED_PARA_THRE_SYN 0x00000000fff00000ULL +#define RDC_RED_PARA_THRE_SYN_SHIFT 20 +#define RDC_RED_PARA_WIN_SYN 0x00000000000f0000ULL +#define RDC_RED_PARA_WIN_SYN_SHIFT 16 +#define RDC_RED_PARA_THRE 0x000000000000fff0ULL +#define RDC_RED_PARA_THRE_SHIFT 4 +#define RDC_RED_PARA_WIN 0x000000000000000fULL +#define RDC_RED_PARA_WIN_SHIFT 0 + +#define RED_DIS_CNT(IDX) (FZC_DMC + 0x30008UL + (IDX) * 0x40UL) +#define RED_DIS_CNT_OFLOW 0x0000000000010000ULL +#define RED_DIS_CNT_COUNT 0x000000000000ffffULL + +#define IPP_CFIG (FZC_IPP + 0x00000UL) +#define IPP_CFIG_SOFT_RST 0x0000000080000000ULL +#define IPP_CFIG_IP_MAX_PKT 0x0000000001ffff00ULL +#define IPP_CFIG_IP_MAX_PKT_SHIFT 8 +#define IPP_CFIG_FFLP_CS_PIO_W 0x0000000000000080ULL +#define IPP_CFIG_PFIFO_PIO_W 0x0000000000000040ULL +#define IPP_CFIG_DFIFO_PIO_W 0x0000000000000020ULL +#define IPP_CFIG_CKSUM_EN 0x0000000000000010ULL +#define IPP_CFIG_DROP_BAD_CRC 0x0000000000000008ULL +#define IPP_CFIG_DFIFO_ECC_EN 0x0000000000000004ULL +#define IPP_CFIG_DEBUG_BUS_OUT_EN 0x0000000000000002ULL +#define IPP_CFIG_IPP_ENABLE 0x0000000000000001ULL + +#define IPP_PKT_DIS (FZC_IPP + 0x00020UL) +#define IPP_PKT_DIS_COUNT 0x0000000000003fffULL + +#define IPP_BAD_CS_CNT (FZC_IPP + 0x00028UL) +#define IPP_BAD_CS_CNT_COUNT 0x0000000000003fffULL + +#define IPP_ECC (FZC_IPP + 0x00030UL) +#define IPP_ECC_COUNT 0x00000000000000ffULL + +#define IPP_INT_STAT (FZC_IPP + 0x00040UL) +#define IPP_INT_STAT_SOP_MISS 0x0000000080000000ULL +#define IPP_INT_STAT_EOP_MISS 0x0000000040000000ULL +#define IPP_INT_STAT_DFIFO_UE 0x0000000030000000ULL +#define IPP_INT_STAT_DFIFO_CE 0x000000000c000000ULL +#define IPP_INT_STAT_DFIFO_ECC 0x0000000003000000ULL +#define IPP_INT_STAT_DFIFO_ECC_IDX 0x00000000007ff000ULL +#define IPP_INT_STAT_PFIFO_PERR 0x0000000000000800ULL +#define IPP_INT_STAT_ECC_ERR_MAX 0x0000000000000400ULL +#define IPP_INT_STAT_PFIFO_ERR_IDX 0x00000000000003f0ULL +#define IPP_INT_STAT_PFIFO_OVER 0x0000000000000008ULL +#define IPP_INT_STAT_PFIFO_UND 0x0000000000000004ULL +#define IPP_INT_STAT_BAD_CS_MX 0x0000000000000002ULL +#define IPP_INT_STAT_PKT_DIS_MX 0x0000000000000001ULL +#define IPP_INT_STAT_ALL 0x00000000ff7fffffULL + +#define IPP_MSK (FZC_IPP + 0x00048UL) +#define IPP_MSK_ECC_ERR_MX 0x0000000000000080ULL +#define IPP_MSK_DFIFO_EOP_SOP 0x0000000000000040ULL +#define IPP_MSK_DFIFO_UC 0x0000000000000020ULL +#define IPP_MSK_PFIFO_PAR 0x0000000000000010ULL +#define IPP_MSK_PFIFO_OVER 0x0000000000000008ULL +#define IPP_MSK_PFIFO_UND 0x0000000000000004ULL +#define IPP_MSK_BAD_CS 0x0000000000000002ULL +#define IPP_MSK_PKT_DIS_CNT 0x0000000000000001ULL +#define IPP_MSK_ALL 0x00000000000000ffULL + +#define IPP_PFIFO_RD0 (FZC_IPP + 0x00060UL) +#define IPP_PFIFO_RD0_DATA 0x00000000ffffffffULL /* bits 31:0 */ + +#define IPP_PFIFO_RD1 (FZC_IPP + 0x00068UL) +#define IPP_PFIFO_RD1_DATA 0x00000000ffffffffULL /* bits 63:32 */ + +#define IPP_PFIFO_RD2 (FZC_IPP + 0x00070UL) +#define IPP_PFIFO_RD2_DATA 0x00000000ffffffffULL /* bits 95:64 */ + +#define IPP_PFIFO_RD3 (FZC_IPP + 0x00078UL) +#define IPP_PFIFO_RD3_DATA 0x00000000ffffffffULL /* bits 127:96 */ + +#define IPP_PFIFO_RD4 (FZC_IPP + 0x00080UL) +#define IPP_PFIFO_RD4_DATA 0x00000000ffffffffULL /* bits 145:128 */ + +#define IPP_PFIFO_WR0 (FZC_IPP + 0x00088UL) +#define IPP_PFIFO_WR0_DATA 0x00000000ffffffffULL /* bits 31:0 */ + +#define IPP_PFIFO_WR1 (FZC_IPP + 0x00090UL) +#define IPP_PFIFO_WR1_DATA 0x00000000ffffffffULL /* bits 63:32 */ + +#define IPP_PFIFO_WR2 (FZC_IPP + 0x00098UL) +#define IPP_PFIFO_WR2_DATA 0x00000000ffffffffULL /* bits 95:64 */ + +#define IPP_PFIFO_WR3 (FZC_IPP + 0x000a0UL) +#define IPP_PFIFO_WR3_DATA 0x00000000ffffffffULL /* bits 127:96 */ + +#define IPP_PFIFO_WR4 (FZC_IPP + 0x000a8UL) +#define IPP_PFIFO_WR4_DATA 0x00000000ffffffffULL /* bits 145:128 */ + +#define IPP_PFIFO_RD_PTR (FZC_IPP + 0x000b0UL) +#define IPP_PFIFO_RD_PTR_PTR 0x000000000000003fULL + +#define IPP_PFIFO_WR_PTR (FZC_IPP + 0x000b8UL) +#define IPP_PFIFO_WR_PTR_PTR 0x000000000000007fULL + +#define IPP_DFIFO_RD0 (FZC_IPP + 0x000c0UL) +#define IPP_DFIFO_RD0_DATA 0x00000000ffffffffULL /* bits 31:0 */ + +#define IPP_DFIFO_RD1 (FZC_IPP + 0x000c8UL) +#define IPP_DFIFO_RD1_DATA 0x00000000ffffffffULL /* bits 63:32 */ + +#define IPP_DFIFO_RD2 (FZC_IPP + 0x000d0UL) +#define IPP_DFIFO_RD2_DATA 0x00000000ffffffffULL /* bits 95:64 */ + +#define IPP_DFIFO_RD3 (FZC_IPP + 0x000d8UL) +#define IPP_DFIFO_RD3_DATA 0x00000000ffffffffULL /* bits 127:96 */ + +#define IPP_DFIFO_RD4 (FZC_IPP + 0x000e0UL) +#define IPP_DFIFO_RD4_DATA 0x00000000ffffffffULL /* bits 145:128 */ + +#define IPP_DFIFO_WR0 (FZC_IPP + 0x000e8UL) +#define IPP_DFIFO_WR0_DATA 0x00000000ffffffffULL /* bits 31:0 */ + +#define IPP_DFIFO_WR1 (FZC_IPP + 0x000f0UL) +#define IPP_DFIFO_WR1_DATA 0x00000000ffffffffULL /* bits 63:32 */ + +#define IPP_DFIFO_WR2 (FZC_IPP + 0x000f8UL) +#define IPP_DFIFO_WR2_DATA 0x00000000ffffffffULL /* bits 95:64 */ + +#define IPP_DFIFO_WR3 (FZC_IPP + 0x00100UL) +#define IPP_DFIFO_WR3_DATA 0x00000000ffffffffULL /* bits 127:96 */ + +#define IPP_DFIFO_WR4 (FZC_IPP + 0x00108UL) +#define IPP_DFIFO_WR4_DATA 0x00000000ffffffffULL /* bits 145:128 */ + +#define IPP_DFIFO_RD_PTR (FZC_IPP + 0x00110UL) +#define IPP_DFIFO_RD_PTR_PTR 0x0000000000000fffULL + +#define IPP_DFIFO_WR_PTR (FZC_IPP + 0x00118UL) +#define IPP_DFIFO_WR_PTR_PTR 0x0000000000000fffULL + +#define IPP_SM (FZC_IPP + 0x00120UL) +#define IPP_SM_SM 0x00000000ffffffffULL + +#define IPP_CS_STAT (FZC_IPP + 0x00128UL) +#define IPP_CS_STAT_BCYC_CNT 0x00000000ff000000ULL +#define IPP_CS_STAT_IP_LEN 0x0000000000fff000ULL +#define IPP_CS_STAT_CS_FAIL 0x0000000000000800ULL +#define IPP_CS_STAT_TERM 0x0000000000000400ULL +#define IPP_CS_STAT_BAD_NUM 0x0000000000000200ULL +#define IPP_CS_STAT_CS_STATE 0x00000000000001ffULL + +#define IPP_FFLP_CS_INFO (FZC_IPP + 0x00130UL) +#define IPP_FFLP_CS_INFO_PKT_ID 0x0000000000003c00ULL +#define IPP_FFLP_CS_INFO_L4_PROTO 0x0000000000000300ULL +#define IPP_FFLP_CS_INFO_V4_HD_LEN 0x00000000000000f0ULL +#define IPP_FFLP_CS_INFO_L3_VER 0x000000000000000cULL +#define IPP_FFLP_CS_INFO_L2_OP 0x0000000000000003ULL + +#define IPP_DBG_SEL (FZC_IPP + 0x00138UL) +#define IPP_DBG_SEL_SEL 0x000000000000000fULL + +#define IPP_DFIFO_ECC_SYND (FZC_IPP + 0x00140UL) +#define IPP_DFIFO_ECC_SYND_SYND 0x000000000000ffffULL + +#define IPP_DFIFO_EOP_RD_PTR (FZC_IPP + 0x00148UL) +#define IPP_DFIFO_EOP_RD_PTR_PTR 0x0000000000000fffULL + +#define IPP_ECC_CTL (FZC_IPP + 0x00150UL) +#define IPP_ECC_CTL_DIS_DBL 0x0000000080000000ULL +#define IPP_ECC_CTL_COR_DBL 0x0000000000020000ULL +#define IPP_ECC_CTL_COR_SNG 0x0000000000010000ULL +#define IPP_ECC_CTL_COR_ALL 0x0000000000000400ULL +#define IPP_ECC_CTL_COR_1 0x0000000000000100ULL +#define IPP_ECC_CTL_COR_LST 0x0000000000000004ULL +#define IPP_ECC_CTL_COR_SND 0x0000000000000002ULL +#define IPP_ECC_CTL_COR_FSR 0x0000000000000001ULL + +#define NIU_DFIFO_ENTRIES 1024 +#define ATLAS_P0_P1_DFIFO_ENTRIES 2048 +#define ATLAS_P2_P3_DFIFO_ENTRIES 1024 + +#define ZCP_CFIG (FZC_ZCP + 0x00000UL) +#define ZCP_CFIG_ZCP_32BIT_MODE 0x0000000001000000ULL +#define ZCP_CFIG_ZCP_DEBUG_SEL 0x0000000000ff0000ULL +#define ZCP_CFIG_DMA_TH 0x000000000000ffe0ULL +#define ZCP_CFIG_ECC_CHK_DIS 0x0000000000000010ULL +#define ZCP_CFIG_PAR_CHK_DIS 0x0000000000000008ULL +#define ZCP_CFIG_DIS_BUFF_RSP_IF 0x0000000000000004ULL +#define ZCP_CFIG_DIS_BUFF_REQ_IF 0x0000000000000002ULL +#define ZCP_CFIG_ZC_ENABLE 0x0000000000000001ULL + +#define ZCP_INT_STAT (FZC_ZCP + 0x00008UL) +#define ZCP_INT_STAT_RRFIFO_UNDERRUN 0x0000000000008000ULL +#define ZCP_INT_STAT_RRFIFO_OVERRUN 0x0000000000004000ULL +#define ZCP_INT_STAT_RSPFIFO_UNCOR_ERR 0x0000000000001000ULL +#define ZCP_INT_STAT_BUFFER_OVERFLOW 0x0000000000000800ULL +#define ZCP_INT_STAT_STAT_TBL_PERR 0x0000000000000400ULL +#define ZCP_INT_STAT_DYN_TBL_PERR 0x0000000000000200ULL +#define ZCP_INT_STAT_BUF_TBL_PERR 0x0000000000000100ULL +#define ZCP_INT_STAT_TT_PROGRAM_ERR 0x0000000000000080ULL +#define ZCP_INT_STAT_RSP_TT_INDEX_ERR 0x0000000000000040ULL +#define ZCP_INT_STAT_SLV_TT_INDEX_ERR 0x0000000000000020ULL +#define ZCP_INT_STAT_ZCP_TT_INDEX_ERR 0x0000000000000010ULL +#define ZCP_INT_STAT_CFIFO_ECC3 0x0000000000000008ULL +#define ZCP_INT_STAT_CFIFO_ECC2 0x0000000000000004ULL +#define ZCP_INT_STAT_CFIFO_ECC1 0x0000000000000002ULL +#define ZCP_INT_STAT_CFIFO_ECC0 0x0000000000000001ULL +#define ZCP_INT_STAT_ALL 0x000000000000ffffULL + +#define ZCP_INT_MASK (FZC_ZCP + 0x00010UL) +#define ZCP_INT_MASK_RRFIFO_UNDERRUN 0x0000000000008000ULL +#define ZCP_INT_MASK_RRFIFO_OVERRUN 0x0000000000004000ULL +#define ZCP_INT_MASK_LOJ 0x0000000000002000ULL +#define ZCP_INT_MASK_RSPFIFO_UNCOR_ERR 0x0000000000001000ULL +#define ZCP_INT_MASK_BUFFER_OVERFLOW 0x0000000000000800ULL +#define ZCP_INT_MASK_STAT_TBL_PERR 0x0000000000000400ULL +#define ZCP_INT_MASK_DYN_TBL_PERR 0x0000000000000200ULL +#define ZCP_INT_MASK_BUF_TBL_PERR 0x0000000000000100ULL +#define ZCP_INT_MASK_TT_PROGRAM_ERR 0x0000000000000080ULL +#define ZCP_INT_MASK_RSP_TT_INDEX_ERR 0x0000000000000040ULL +#define ZCP_INT_MASK_SLV_TT_INDEX_ERR 0x0000000000000020ULL +#define ZCP_INT_MASK_ZCP_TT_INDEX_ERR 0x0000000000000010ULL +#define ZCP_INT_MASK_CFIFO_ECC3 0x0000000000000008ULL +#define ZCP_INT_MASK_CFIFO_ECC2 0x0000000000000004ULL +#define ZCP_INT_MASK_CFIFO_ECC1 0x0000000000000002ULL +#define ZCP_INT_MASK_CFIFO_ECC0 0x0000000000000001ULL +#define ZCP_INT_MASK_ALL 0x000000000000ffffULL + +#define BAM4BUF (FZC_ZCP + 0x00018UL) +#define BAM4BUF_LOJ 0x0000000080000000ULL +#define BAM4BUF_EN_CK 0x0000000040000000ULL +#define BAM4BUF_IDX_END0 0x000000003ff00000ULL +#define BAM4BUF_IDX_ST0 0x00000000000ffc00ULL +#define BAM4BUF_OFFSET0 0x00000000000003ffULL + +#define BAM8BUF (FZC_ZCP + 0x00020UL) +#define BAM8BUF_LOJ 0x0000000080000000ULL +#define BAM8BUF_EN_CK 0x0000000040000000ULL +#define BAM8BUF_IDX_END1 0x000000003ff00000ULL +#define BAM8BUF_IDX_ST1 0x00000000000ffc00ULL +#define BAM8BUF_OFFSET1 0x00000000000003ffULL + +#define BAM16BUF (FZC_ZCP + 0x00028UL) +#define BAM16BUF_LOJ 0x0000000080000000ULL +#define BAM16BUF_EN_CK 0x0000000040000000ULL +#define BAM16BUF_IDX_END2 0x000000003ff00000ULL +#define BAM16BUF_IDX_ST2 0x00000000000ffc00ULL +#define BAM16BUF_OFFSET2 0x00000000000003ffULL + +#define BAM32BUF (FZC_ZCP + 0x00030UL) +#define BAM32BUF_LOJ 0x0000000080000000ULL +#define BAM32BUF_EN_CK 0x0000000040000000ULL +#define BAM32BUF_IDX_END3 0x000000003ff00000ULL +#define BAM32BUF_IDX_ST3 0x00000000000ffc00ULL +#define BAM32BUF_OFFSET3 0x00000000000003ffULL + +#define DST4BUF (FZC_ZCP + 0x00038UL) +#define DST4BUF_DS_OFFSET0 0x00000000000003ffULL + +#define DST8BUF (FZC_ZCP + 0x00040UL) +#define DST8BUF_DS_OFFSET1 0x00000000000003ffULL + +#define DST16BUF (FZC_ZCP + 0x00048UL) +#define DST16BUF_DS_OFFSET2 0x00000000000003ffULL + +#define DST32BUF (FZC_ZCP + 0x00050UL) +#define DST32BUF_DS_OFFSET3 0x00000000000003ffULL + +#define ZCP_RAM_DATA0 (FZC_ZCP + 0x00058UL) +#define ZCP_RAM_DATA0_DAT0 0x00000000ffffffffULL + +#define ZCP_RAM_DATA1 (FZC_ZCP + 0x00060UL) +#define ZCP_RAM_DAT10_DAT1 0x00000000ffffffffULL + +#define ZCP_RAM_DATA2 (FZC_ZCP + 0x00068UL) +#define ZCP_RAM_DATA2_DAT2 0x00000000ffffffffULL + +#define ZCP_RAM_DATA3 (FZC_ZCP + 0x00070UL) +#define ZCP_RAM_DATA3_DAT3 0x00000000ffffffffULL + +#define ZCP_RAM_DATA4 (FZC_ZCP + 0x00078UL) +#define ZCP_RAM_DATA4_DAT4 0x00000000000000ffULL + +#define ZCP_RAM_BE (FZC_ZCP + 0x00080UL) +#define ZCP_RAM_BE_VAL 0x000000000001ffffULL + +#define ZCP_RAM_ACC (FZC_ZCP + 0x00088UL) +#define ZCP_RAM_ACC_BUSY 0x0000000080000000ULL +#define ZCP_RAM_ACC_READ 0x0000000040000000ULL +#define ZCP_RAM_ACC_WRITE 0x0000000000000000ULL +#define ZCP_RAM_ACC_LOJ 0x0000000020000000ULL +#define ZCP_RAM_ACC_ZFCID 0x000000001ffe0000ULL +#define ZCP_RAM_ACC_ZFCID_SHIFT 17 +#define ZCP_RAM_ACC_RAM_SEL 0x000000000001f000ULL +#define ZCP_RAM_ACC_RAM_SEL_SHIFT 12 +#define ZCP_RAM_ACC_CFIFOADDR 0x0000000000000fffULL +#define ZCP_RAM_ACC_CFIFOADDR_SHIFT 0 + +#define ZCP_RAM_SEL_BAM(INDEX) (0x00 + (INDEX)) +#define ZCP_RAM_SEL_TT_STATIC 0x08 +#define ZCP_RAM_SEL_TT_DYNAMIC 0x09 +#define ZCP_RAM_SEL_CFIFO(PORT) (0x10 + (PORT)) + +#define NIU_CFIFO_ENTRIES 1024 +#define ATLAS_P0_P1_CFIFO_ENTRIES 2048 +#define ATLAS_P2_P3_CFIFO_ENTRIES 1024 + +#define CHK_BIT_DATA (FZC_ZCP + 0x00090UL) +#define CHK_BIT_DATA_DATA 0x000000000000ffffULL + +#define RESET_CFIFO (FZC_ZCP + 0x00098UL) +#define RESET_CFIFO_RST(PORT) (0x1 << (PORT)) + +#define CFIFO_ECC(PORT) (FZC_ZCP + 0x000a0UL + (PORT) * 8UL) +#define CFIFO_ECC_DIS_DBLBIT_ERR 0x0000000080000000ULL +#define CFIFO_ECC_DBLBIT_ERR 0x0000000000020000ULL +#define CFIFO_ECC_SINGLEBIT_ERR 0x0000000000010000ULL +#define CFIFO_ECC_ALL_PKT 0x0000000000000400ULL +#define CFIFO_ECC_LAST_LINE 0x0000000000000004ULL +#define CFIFO_ECC_2ND_LINE 0x0000000000000002ULL +#define CFIFO_ECC_1ST_LINE 0x0000000000000001ULL + +#define ZCP_TRAINING_VECTOR (FZC_ZCP + 0x000c0UL) +#define ZCP_TRAINING_VECTOR_VECTOR 0x00000000ffffffffULL + +#define ZCP_STATE_MACHINE (FZC_ZCP + 0x000c8UL) +#define ZCP_STATE_MACHINE_SM 0x00000000ffffffffULL + +/* Same bits as ZCP_INT_STAT */ +#define ZCP_INT_STAT_TEST (FZC_ZCP + 0x00108UL) + +#define RXDMA_CFIG1(IDX) (DMC + 0x00000UL + (IDX) * 0x200UL) +#define RXDMA_CFIG1_EN 0x0000000080000000ULL +#define RXDMA_CFIG1_RST 0x0000000040000000ULL +#define RXDMA_CFIG1_QST 0x0000000020000000ULL +#define RXDMA_CFIG1_MBADDR_H 0x0000000000000fffULL /* mboxaddr 43:32 */ + +#define RXDMA_CFIG2(IDX) (DMC + 0x00008UL + (IDX) * 0x200UL) +#define RXDMA_CFIG2_MBADDR_L 0x00000000ffffffc0ULL /* mboxaddr 31:6 */ +#define RXDMA_CFIG2_OFFSET 0x0000000000000006ULL +#define RXDMA_CFIG2_OFFSET_SHIFT 1 +#define RXDMA_CFIG2_FULL_HDR 0x0000000000000001ULL + +#define RBR_CFIG_A(IDX) (DMC + 0x00010UL + (IDX) * 0x200UL) +#define RBR_CFIG_A_LEN 0xffff000000000000ULL +#define RBR_CFIG_A_LEN_SHIFT 48 +#define RBR_CFIG_A_STADDR_BASE 0x00000ffffffc0000ULL +#define RBR_CFIG_A_STADDR 0x000000000003ffc0ULL + +#define RBR_CFIG_B(IDX) (DMC + 0x00018UL + (IDX) * 0x200UL) +#define RBR_CFIG_B_BLKSIZE 0x0000000003000000ULL +#define RBR_CFIG_B_BLKSIZE_SHIFT 24 +#define RBR_CFIG_B_VLD2 0x0000000000800000ULL +#define RBR_CFIG_B_BUFSZ2 0x0000000000030000ULL +#define RBR_CFIG_B_BUFSZ2_SHIFT 16 +#define RBR_CFIG_B_VLD1 0x0000000000008000ULL +#define RBR_CFIG_B_BUFSZ1 0x0000000000000300ULL +#define RBR_CFIG_B_BUFSZ1_SHIFT 8 +#define RBR_CFIG_B_VLD0 0x0000000000000080ULL +#define RBR_CFIG_B_BUFSZ0 0x0000000000000003ULL +#define RBR_CFIG_B_BUFSZ0_SHIFT 0 + +#define RBR_BLKSIZE_4K 0x0 +#define RBR_BLKSIZE_8K 0x1 +#define RBR_BLKSIZE_16K 0x2 +#define RBR_BLKSIZE_32K 0x3 +#define RBR_BUFSZ2_2K 0x0 +#define RBR_BUFSZ2_4K 0x1 +#define RBR_BUFSZ2_8K 0x2 +#define RBR_BUFSZ2_16K 0x3 +#define RBR_BUFSZ1_1K 0x0 +#define RBR_BUFSZ1_2K 0x1 +#define RBR_BUFSZ1_4K 0x2 +#define RBR_BUFSZ1_8K 0x3 +#define RBR_BUFSZ0_256 0x0 +#define RBR_BUFSZ0_512 0x1 +#define RBR_BUFSZ0_1K 0x2 +#define RBR_BUFSZ0_2K 0x3 + +#define RBR_KICK(IDX) (DMC + 0x00020UL + (IDX) * 0x200UL) +#define RBR_KICK_BKADD 0x000000000000ffffULL + +#define RBR_STAT(IDX) (DMC + 0x00028UL + (IDX) * 0x200UL) +#define RBR_STAT_QLEN 0x000000000000ffffULL + +#define RBR_HDH(IDX) (DMC + 0x00030UL + (IDX) * 0x200UL) +#define RBR_HDH_HEAD_H 0x0000000000000fffULL + +#define RBR_HDL(IDX) (DMC + 0x00038UL + (IDX) * 0x200UL) +#define RBR_HDL_HEAD_L 0x00000000fffffffcULL + +#define RCRCFIG_A(IDX) (DMC + 0x00040UL + (IDX) * 0x200UL) +#define RCRCFIG_A_LEN 0xffff000000000000ULL +#define RCRCFIG_A_LEN_SHIFT 48 +#define RCRCFIG_A_STADDR_BASE 0x00000ffffff80000ULL +#define RCRCFIG_A_STADDR 0x000000000007ffc0ULL + +#define RCRCFIG_B(IDX) (DMC + 0x00048UL + (IDX) * 0x200UL) +#define RCRCFIG_B_PTHRES 0x00000000ffff0000ULL +#define RCRCFIG_B_PTHRES_SHIFT 16 +#define RCRCFIG_B_ENTOUT 0x0000000000008000ULL +#define RCRCFIG_B_TIMEOUT 0x000000000000003fULL +#define RCRCFIG_B_TIMEOUT_SHIFT 0 + +#define RCRSTAT_A(IDX) (DMC + 0x00050UL + (IDX) * 0x200UL) +#define RCRSTAT_A_QLEN 0x000000000000ffffULL + +#define RCRSTAT_B(IDX) (DMC + 0x00058UL + (IDX) * 0x200UL) +#define RCRSTAT_B_TIPTR_H 0x0000000000000fffULL + +#define RCRSTAT_C(IDX) (DMC + 0x00060UL + (IDX) * 0x200UL) +#define RCRSTAT_C_TIPTR_L 0x00000000fffffff8ULL + +#define RX_DMA_CTL_STAT(IDX) (DMC + 0x00070UL + (IDX) * 0x200UL) +#define RX_DMA_CTL_STAT_RBR_TMOUT 0x0020000000000000ULL +#define RX_DMA_CTL_STAT_RSP_CNT_ERR 0x0010000000000000ULL +#define RX_DMA_CTL_STAT_BYTE_EN_BUS 0x0008000000000000ULL +#define RX_DMA_CTL_STAT_RSP_DAT_ERR 0x0004000000000000ULL +#define RX_DMA_CTL_STAT_RCR_ACK_ERR 0x0002000000000000ULL +#define RX_DMA_CTL_STAT_DC_FIFO_ERR 0x0001000000000000ULL +#define RX_DMA_CTL_STAT_MEX 0x0000800000000000ULL +#define RX_DMA_CTL_STAT_RCRTHRES 0x0000400000000000ULL +#define RX_DMA_CTL_STAT_RCRTO 0x0000200000000000ULL +#define RX_DMA_CTL_STAT_RCR_SHA_PAR 0x0000100000000000ULL +#define RX_DMA_CTL_STAT_RBR_PRE_PAR 0x0000080000000000ULL +#define RX_DMA_CTL_STAT_PORT_DROP_PKT 0x0000040000000000ULL +#define RX_DMA_CTL_STAT_WRED_DROP 0x0000020000000000ULL +#define RX_DMA_CTL_STAT_RBR_PRE_EMTY 0x0000010000000000ULL +#define RX_DMA_CTL_STAT_RCRSHADOW_FULL 0x0000008000000000ULL +#define RX_DMA_CTL_STAT_CONFIG_ERR 0x0000004000000000ULL +#define RX_DMA_CTL_STAT_RCRINCON 0x0000002000000000ULL +#define RX_DMA_CTL_STAT_RCRFULL 0x0000001000000000ULL +#define RX_DMA_CTL_STAT_RBR_EMPTY 0x0000000800000000ULL +#define RX_DMA_CTL_STAT_RBRFULL 0x0000000400000000ULL +#define RX_DMA_CTL_STAT_RBRLOGPAGE 0x0000000200000000ULL +#define RX_DMA_CTL_STAT_CFIGLOGPAGE 0x0000000100000000ULL +#define RX_DMA_CTL_STAT_PTRREAD 0x00000000ffff0000ULL +#define RX_DMA_CTL_STAT_PTRREAD_SHIFT 16 +#define RX_DMA_CTL_STAT_PKTREAD 0x000000000000ffffULL +#define RX_DMA_CTL_STAT_PKTREAD_SHIFT 0 + +#define RX_DMA_CTL_STAT_CHAN_FATAL (RX_DMA_CTL_STAT_RBR_TMOUT | \ + RX_DMA_CTL_STAT_RSP_CNT_ERR | \ + RX_DMA_CTL_STAT_BYTE_EN_BUS | \ + RX_DMA_CTL_STAT_RSP_DAT_ERR | \ + RX_DMA_CTL_STAT_RCR_ACK_ERR | \ + RX_DMA_CTL_STAT_RCR_SHA_PAR | \ + RX_DMA_CTL_STAT_RBR_PRE_PAR | \ + RX_DMA_CTL_STAT_CONFIG_ERR | \ + RX_DMA_CTL_STAT_RCRINCON | \ + RX_DMA_CTL_STAT_RCRFULL | \ + RX_DMA_CTL_STAT_RBRFULL | \ + RX_DMA_CTL_STAT_RBRLOGPAGE | \ + RX_DMA_CTL_STAT_CFIGLOGPAGE) + +#define RX_DMA_CTL_STAT_PORT_FATAL (RX_DMA_CTL_STAT_DC_FIFO_ERR) + +#define RX_DMA_CTL_WRITE_CLEAR_ERRS (RX_DMA_CTL_STAT_RBR_EMPTY | \ + RX_DMA_CTL_STAT_RCRSHADOW_FULL | \ + RX_DMA_CTL_STAT_RBR_PRE_EMTY | \ + RX_DMA_CTL_STAT_WRED_DROP | \ + RX_DMA_CTL_STAT_PORT_DROP_PKT | \ + RX_DMA_CTL_STAT_RCRTO | \ + RX_DMA_CTL_STAT_RCRTHRES | \ + RX_DMA_CTL_STAT_DC_FIFO_ERR) + +#define RCR_FLSH(IDX) (DMC + 0x00078UL + (IDX) * 0x200UL) +#define RCR_FLSH_FLSH 0x0000000000000001ULL + +#define RXMISC(IDX) (DMC + 0x00090UL + (IDX) * 0x200UL) +#define RXMISC_OFLOW 0x0000000000010000ULL +#define RXMISC_COUNT 0x000000000000ffffULL + +#define RX_DMA_CTL_STAT_DBG(IDX) (DMC + 0x00098UL + (IDX) * 0x200UL) +#define RX_DMA_CTL_STAT_DBG_RBR_TMOUT 0x0020000000000000ULL +#define RX_DMA_CTL_STAT_DBG_RSP_CNT_ERR 0x0010000000000000ULL +#define RX_DMA_CTL_STAT_DBG_BYTE_EN_BUS 0x0008000000000000ULL +#define RX_DMA_CTL_STAT_DBG_RSP_DAT_ERR 0x0004000000000000ULL +#define RX_DMA_CTL_STAT_DBG_RCR_ACK_ERR 0x0002000000000000ULL +#define RX_DMA_CTL_STAT_DBG_DC_FIFO_ERR 0x0001000000000000ULL +#define RX_DMA_CTL_STAT_DBG_MEX 0x0000800000000000ULL +#define RX_DMA_CTL_STAT_DBG_RCRTHRES 0x0000400000000000ULL +#define RX_DMA_CTL_STAT_DBG_RCRTO 0x0000200000000000ULL +#define RX_DMA_CTL_STAT_DBG_RCR_SHA_PAR 0x0000100000000000ULL +#define RX_DMA_CTL_STAT_DBG_RBR_PRE_PAR 0x0000080000000000ULL +#define RX_DMA_CTL_STAT_DBG_PORT_DROP_PKT 0x0000040000000000ULL +#define RX_DMA_CTL_STAT_DBG_WRED_DROP 0x0000020000000000ULL +#define RX_DMA_CTL_STAT_DBG_RBR_PRE_EMTY 0x0000010000000000ULL +#define RX_DMA_CTL_STAT_DBG_RCRSHADOW_FULL 0x0000008000000000ULL +#define RX_DMA_CTL_STAT_DBG_CONFIG_ERR 0x0000004000000000ULL +#define RX_DMA_CTL_STAT_DBG_RCRINCON 0x0000002000000000ULL +#define RX_DMA_CTL_STAT_DBG_RCRFULL 0x0000001000000000ULL +#define RX_DMA_CTL_STAT_DBG_RBR_EMPTY 0x0000000800000000ULL +#define RX_DMA_CTL_STAT_DBG_RBRFULL 0x0000000400000000ULL +#define RX_DMA_CTL_STAT_DBG_RBRLOGPAGE 0x0000000200000000ULL +#define RX_DMA_CTL_STAT_DBG_CFIGLOGPAGE 0x0000000100000000ULL +#define RX_DMA_CTL_STAT_DBG_PTRREAD 0x00000000ffff0000ULL +#define RX_DMA_CTL_STAT_DBG_PKTREAD 0x000000000000ffffULL + +#define RX_DMA_ENT_MSK(IDX) (DMC + 0x00068UL + (IDX) * 0x200UL) +#define RX_DMA_ENT_MSK_RBR_TMOUT 0x0000000000200000ULL +#define RX_DMA_ENT_MSK_RSP_CNT_ERR 0x0000000000100000ULL +#define RX_DMA_ENT_MSK_BYTE_EN_BUS 0x0000000000080000ULL +#define RX_DMA_ENT_MSK_RSP_DAT_ERR 0x0000000000040000ULL +#define RX_DMA_ENT_MSK_RCR_ACK_ERR 0x0000000000020000ULL +#define RX_DMA_ENT_MSK_DC_FIFO_ERR 0x0000000000010000ULL +#define RX_DMA_ENT_MSK_RCRTHRES 0x0000000000004000ULL +#define RX_DMA_ENT_MSK_RCRTO 0x0000000000002000ULL +#define RX_DMA_ENT_MSK_RCR_SHA_PAR 0x0000000000001000ULL +#define RX_DMA_ENT_MSK_RBR_PRE_PAR 0x0000000000000800ULL +#define RX_DMA_ENT_MSK_PORT_DROP_PKT 0x0000000000000400ULL +#define RX_DMA_ENT_MSK_WRED_DROP 0x0000000000000200ULL +#define RX_DMA_ENT_MSK_RBR_PRE_EMTY 0x0000000000000100ULL +#define RX_DMA_ENT_MSK_RCR_SHADOW_FULL 0x0000000000000080ULL +#define RX_DMA_ENT_MSK_CONFIG_ERR 0x0000000000000040ULL +#define RX_DMA_ENT_MSK_RCRINCON 0x0000000000000020ULL +#define RX_DMA_ENT_MSK_RCRFULL 0x0000000000000010ULL +#define RX_DMA_ENT_MSK_RBR_EMPTY 0x0000000000000008ULL +#define RX_DMA_ENT_MSK_RBRFULL 0x0000000000000004ULL +#define RX_DMA_ENT_MSK_RBRLOGPAGE 0x0000000000000002ULL +#define RX_DMA_ENT_MSK_CFIGLOGPAGE 0x0000000000000001ULL +#define RX_DMA_ENT_MSK_ALL 0x00000000003f7fffULL + +#define TX_RNG_CFIG(IDX) (DMC + 0x40000UL + (IDX) * 0x200UL) +#define TX_RNG_CFIG_LEN 0x1fff000000000000ULL +#define TX_RNG_CFIG_LEN_SHIFT 48 +#define TX_RNG_CFIG_STADDR_BASE 0x00000ffffff80000ULL +#define TX_RNG_CFIG_STADDR 0x000000000007ffc0ULL + +#define TX_RING_HDL(IDX) (DMC + 0x40010UL + (IDX) * 0x200UL) +#define TX_RING_HDL_WRAP 0x0000000000080000ULL +#define TX_RING_HDL_HEAD 0x000000000007fff8ULL +#define TX_RING_HDL_HEAD_SHIFT 3 + +#define TX_RING_KICK(IDX) (DMC + 0x40018UL + (IDX) * 0x200UL) +#define TX_RING_KICK_WRAP 0x0000000000080000ULL +#define TX_RING_KICK_TAIL 0x000000000007fff8ULL + +#define TX_ENT_MSK(IDX) (DMC + 0x40020UL + (IDX) * 0x200UL) +#define TX_ENT_MSK_MK 0x0000000000008000ULL +#define TX_ENT_MSK_MBOX_ERR 0x0000000000000080ULL +#define TX_ENT_MSK_PKT_SIZE_ERR 0x0000000000000040ULL +#define TX_ENT_MSK_TX_RING_OFLOW 0x0000000000000020ULL +#define TX_ENT_MSK_PREF_BUF_ECC_ERR 0x0000000000000010ULL +#define TX_ENT_MSK_NACK_PREF 0x0000000000000008ULL +#define TX_ENT_MSK_NACK_PKT_RD 0x0000000000000004ULL +#define TX_ENT_MSK_CONF_PART_ERR 0x0000000000000002ULL +#define TX_ENT_MSK_PKT_PRT_ERR 0x0000000000000001ULL + +#define TX_CS(IDX) (DMC + 0x40028UL + (IDX)*0x200UL) +#define TX_CS_PKT_CNT 0x0fff000000000000ULL +#define TX_CS_PKT_CNT_SHIFT 48 +#define TX_CS_LASTMARK 0x00000fff00000000ULL +#define TX_CS_LASTMARK_SHIFT 32 +#define TX_CS_RST 0x0000000080000000ULL +#define TX_CS_RST_STATE 0x0000000040000000ULL +#define TX_CS_MB 0x0000000020000000ULL +#define TX_CS_STOP_N_GO 0x0000000010000000ULL +#define TX_CS_SNG_STATE 0x0000000008000000ULL +#define TX_CS_MK 0x0000000000008000ULL +#define TX_CS_MMK 0x0000000000004000ULL +#define TX_CS_MBOX_ERR 0x0000000000000080ULL +#define TX_CS_PKT_SIZE_ERR 0x0000000000000040ULL +#define TX_CS_TX_RING_OFLOW 0x0000000000000020ULL +#define TX_CS_PREF_BUF_PAR_ERR 0x0000000000000010ULL +#define TX_CS_NACK_PREF 0x0000000000000008ULL +#define TX_CS_NACK_PKT_RD 0x0000000000000004ULL +#define TX_CS_CONF_PART_ERR 0x0000000000000002ULL +#define TX_CS_PKT_PRT_ERR 0x0000000000000001ULL + +#define TXDMA_MBH(IDX) (DMC + 0x40030UL + (IDX) * 0x200UL) +#define TXDMA_MBH_MBADDR 0x0000000000000fffULL + +#define TXDMA_MBL(IDX) (DMC + 0x40038UL + (IDX) * 0x200UL) +#define TXDMA_MBL_MBADDR 0x00000000ffffffc0ULL + +#define TX_DMA_PRE_ST(IDX) (DMC + 0x40040UL + (IDX) * 0x200UL) +#define TX_DMA_PRE_ST_SHADOW_HD 0x000000000007ffffULL + +#define TX_RNG_ERR_LOGH(IDX) (DMC + 0x40048UL + (IDX) * 0x200UL) +#define TX_RNG_ERR_LOGH_ERR 0x0000000080000000ULL +#define TX_RNG_ERR_LOGH_MERR 0x0000000040000000ULL +#define TX_RNG_ERR_LOGH_ERRCODE 0x0000000038000000ULL +#define TX_RNG_ERR_LOGH_ERRADDR 0x0000000000000fffULL + +#define TX_RNG_ERR_LOGL(IDX) (DMC + 0x40050UL + (IDX) * 0x200UL) +#define TX_RNG_ERR_LOGL_ERRADDR 0x00000000ffffffffULL + +#define TDMC_INTR_DBG(IDX) (DMC + 0x40060UL + (IDX) * 0x200UL) +#define TDMC_INTR_DBG_MK 0x0000000000008000ULL +#define TDMC_INTR_DBG_MBOX_ERR 0x0000000000000080ULL +#define TDMC_INTR_DBG_PKT_SIZE_ERR 0x0000000000000040ULL +#define TDMC_INTR_DBG_TX_RING_OFLOW 0x0000000000000020ULL +#define TDMC_INTR_DBG_PREF_BUF_PAR_ERR 0x0000000000000010ULL +#define TDMC_INTR_DBG_NACK_PREF 0x0000000000000008ULL +#define TDMC_INTR_DBG_NACK_PKT_RD 0x0000000000000004ULL +#define TDMC_INTR_DBG_CONF_PART_ERR 0x0000000000000002ULL +#define TDMC_INTR_DBG_PKT_PART_ERR 0x0000000000000001ULL + +#define TX_CS_DBG(IDX) (DMC + 0x40068UL + (IDX) * 0x200UL) +#define TX_CS_DBG_PKT_CNT 0x0fff000000000000ULL + +#define TDMC_INJ_PAR_ERR(IDX) (DMC + 0x45040UL + (IDX) * 0x200UL) +#define TDMC_INJ_PAR_ERR_VAL 0x000000000000ffffULL + +#define TDMC_DBG_SEL(IDX) (DMC + 0x45080UL + (IDX) * 0x200UL) +#define TDMC_DBG_SEL_DBG_SEL 0x000000000000003fULL + +#define TDMC_TRAINING_VECTOR(IDX) (DMC + 0x45088UL + (IDX) * 0x200UL) +#define TDMC_TRAINING_VECTOR_VEC 0x00000000ffffffffULL + +#define TXC_DMA_MAX(CHAN) (FZC_TXC + 0x00000UL + (CHAN)*0x1000UL) +#define TXC_DMA_MAX_LEN(CHAN) (FZC_TXC + 0x00008UL + (CHAN)*0x1000UL) + +#define TXC_CONTROL (FZC_TXC + 0x20000UL) +#define TXC_CONTROL_ENABLE 0x0000000000000010ULL +#define TXC_CONTROL_PORT_ENABLE(X) (1 << (X)) + +#define TXC_TRAINING_VEC (FZC_TXC + 0x20008UL) +#define TXC_TRAINING_VEC_MASK 0x00000000ffffffffULL + +#define TXC_DEBUG (FZC_TXC + 0x20010UL) +#define TXC_DEBUG_SELECT 0x000000000000003fULL + +#define TXC_MAX_REORDER (FZC_TXC + 0x20018UL) +#define TXC_MAX_REORDER_PORT3 0x000000000f000000ULL +#define TXC_MAX_REORDER_PORT2 0x00000000000f0000ULL +#define TXC_MAX_REORDER_PORT1 0x0000000000000f00ULL +#define TXC_MAX_REORDER_PORT0 0x000000000000000fULL + +#define TXC_PORT_CTL(PORT) (FZC_TXC + 0x20020UL + (PORT)*0x100UL) +#define TXC_PORT_CTL_CLR_ALL_STAT 0x0000000000000001ULL + +#define TXC_PKT_STUFFED(PORT) (FZC_TXC + 0x20030UL + (PORT)*0x100UL) +#define TXC_PKT_STUFFED_PP_REORDER 0x00000000ffff0000ULL +#define TXC_PKT_STUFFED_PP_PACKETASSY 0x000000000000ffffULL + +#define TXC_PKT_XMIT(PORT) (FZC_TXC + 0x20038UL + (PORT)*0x100UL) +#define TXC_PKT_XMIT_BYTES 0x00000000ffff0000ULL +#define TXC_PKT_XMIT_PKTS 0x000000000000ffffULL + +#define TXC_ROECC_CTL(PORT) (FZC_TXC + 0x20040UL + (PORT)*0x100UL) +#define TXC_ROECC_CTL_DISABLE_UE 0x0000000080000000ULL +#define TXC_ROECC_CTL_DBL_BIT_ERR 0x0000000000020000ULL +#define TXC_ROECC_CTL_SNGL_BIT_ERR 0x0000000000010000ULL +#define TXC_ROECC_CTL_ALL_PKTS 0x0000000000000400ULL +#define TXC_ROECC_CTL_ALT_PKTS 0x0000000000000200ULL +#define TXC_ROECC_CTL_ONE_PKT_ONLY 0x0000000000000100ULL +#define TXC_ROECC_CTL_LST_PKT_LINE 0x0000000000000004ULL +#define TXC_ROECC_CTL_2ND_PKT_LINE 0x0000000000000002ULL +#define TXC_ROECC_CTL_1ST_PKT_LINE 0x0000000000000001ULL + +#define TXC_ROECC_ST(PORT) (FZC_TXC + 0x20048UL + (PORT)*0x100UL) +#define TXC_ROECC_CLR_ST 0x0000000080000000ULL +#define TXC_ROECC_CE 0x0000000000020000ULL +#define TXC_ROECC_UE 0x0000000000010000ULL +#define TXC_ROECC_ST_ECC_ADDR 0x00000000000003ffULL + +#define TXC_RO_DATA0(PORT) (FZC_TXC + 0x20050UL + (PORT)*0x100UL) +#define TXC_RO_DATA0_DATA0 0x00000000ffffffffULL /* bits 31:0 */ + +#define TXC_RO_DATA1(PORT) (FZC_TXC + 0x20058UL + (PORT)*0x100UL) +#define TXC_RO_DATA1_DATA1 0x00000000ffffffffULL /* bits 63:32 */ + +#define TXC_RO_DATA2(PORT) (FZC_TXC + 0x20060UL + (PORT)*0x100UL) +#define TXC_RO_DATA2_DATA2 0x00000000ffffffffULL /* bits 95:64 */ + +#define TXC_RO_DATA3(PORT) (FZC_TXC + 0x20068UL + (PORT)*0x100UL) +#define TXC_RO_DATA3_DATA3 0x00000000ffffffffULL /* bits 127:96 */ + +#define TXC_RO_DATA4(PORT) (FZC_TXC + 0x20070UL + (PORT)*0x100UL) +#define TXC_RO_DATA4_DATA4 0x0000000000ffffffULL /* bits 151:128 */ + +#define TXC_SFECC_CTL(PORT) (FZC_TXC + 0x20078UL + (PORT)*0x100UL) +#define TXC_SFECC_CTL_DISABLE_UE 0x0000000080000000ULL +#define TXC_SFECC_CTL_DBL_BIT_ERR 0x0000000000020000ULL +#define TXC_SFECC_CTL_SNGL_BIT_ERR 0x0000000000010000ULL +#define TXC_SFECC_CTL_ALL_PKTS 0x0000000000000400ULL +#define TXC_SFECC_CTL_ALT_PKTS 0x0000000000000200ULL +#define TXC_SFECC_CTL_ONE_PKT_ONLY 0x0000000000000100ULL +#define TXC_SFECC_CTL_LST_PKT_LINE 0x0000000000000004ULL +#define TXC_SFECC_CTL_2ND_PKT_LINE 0x0000000000000002ULL +#define TXC_SFECC_CTL_1ST_PKT_LINE 0x0000000000000001ULL + +#define TXC_SFECC_ST(PORT) (FZC_TXC + 0x20080UL + (PORT)*0x100UL) +#define TXC_SFECC_ST_CLR_ST 0x0000000080000000ULL +#define TXC_SFECC_ST_CE 0x0000000000020000ULL +#define TXC_SFECC_ST_UE 0x0000000000010000ULL +#define TXC_SFECC_ST_ECC_ADDR 0x00000000000003ffULL + +#define TXC_SF_DATA0(PORT) (FZC_TXC + 0x20088UL + (PORT)*0x100UL) +#define TXC_SF_DATA0_DATA0 0x00000000ffffffffULL /* bits 31:0 */ + +#define TXC_SF_DATA1(PORT) (FZC_TXC + 0x20090UL + (PORT)*0x100UL) +#define TXC_SF_DATA1_DATA1 0x00000000ffffffffULL /* bits 63:32 */ + +#define TXC_SF_DATA2(PORT) (FZC_TXC + 0x20098UL + (PORT)*0x100UL) +#define TXC_SF_DATA2_DATA2 0x00000000ffffffffULL /* bits 95:64 */ + +#define TXC_SF_DATA3(PORT) (FZC_TXC + 0x200a0UL + (PORT)*0x100UL) +#define TXC_SF_DATA3_DATA3 0x00000000ffffffffULL /* bits 127:96 */ + +#define TXC_SF_DATA4(PORT) (FZC_TXC + 0x200a8UL + (PORT)*0x100UL) +#define TXC_SF_DATA4_DATA4 0x0000000000ffffffULL /* bits 151:128 */ + +#define TXC_RO_TIDS(PORT) (FZC_TXC + 0x200b0UL + (PORT)*0x100UL) +#define TXC_RO_TIDS_IN_USE 0x00000000ffffffffULL + +#define TXC_RO_STATE0(PORT) (FZC_TXC + 0x200b8UL + (PORT)*0x100UL) +#define TXC_RO_STATE0_DUPLICATE_TID 0x00000000ffffffffULL + +#define TXC_RO_STATE1(PORT) (FZC_TXC + 0x200c0UL + (PORT)*0x100UL) +#define TXC_RO_STATE1_UNUSED_TID 0x00000000ffffffffULL + +#define TXC_RO_STATE2(PORT) (FZC_TXC + 0x200c8UL + (PORT)*0x100UL) +#define TXC_RO_STATE2_TRANS_TIMEOUT 0x00000000ffffffffULL + +#define TXC_RO_STATE3(PORT) (FZC_TXC + 0x200d0UL + (PORT)*0x100UL) +#define TXC_RO_STATE3_ENAB_SPC_WMARK 0x0000000080000000ULL +#define TXC_RO_STATE3_RO_SPC_WMARK 0x000000007fe00000ULL +#define TXC_RO_STATE3_ROFIFO_SPC_AVAIL 0x00000000001ff800ULL +#define TXC_RO_STATE3_ENAB_RO_WMARK 0x0000000000000100ULL +#define TXC_RO_STATE3_HIGH_RO_USED 0x00000000000000f0ULL +#define TXC_RO_STATE3_NUM_RO_USED 0x000000000000000fULL + +#define TXC_RO_CTL(PORT) (FZC_TXC + 0x200d8UL + (PORT)*0x100UL) +#define TXC_RO_CTL_CLR_FAIL_STATE 0x0000000080000000ULL +#define TXC_RO_CTL_RO_ADDR 0x000000000f000000ULL +#define TXC_RO_CTL_ADDR_FAILED 0x0000000000400000ULL +#define TXC_RO_CTL_DMA_FAILED 0x0000000000200000ULL +#define TXC_RO_CTL_LEN_FAILED 0x0000000000100000ULL +#define TXC_RO_CTL_CAPT_ADDR_FAILED 0x0000000000040000ULL +#define TXC_RO_CTL_CAPT_DMA_FAILED 0x0000000000020000ULL +#define TXC_RO_CTL_CAPT_LEN_FAILED 0x0000000000010000ULL +#define TXC_RO_CTL_RO_STATE_RD_DONE 0x0000000000000080ULL +#define TXC_RO_CTL_RO_STATE_WR_DONE 0x0000000000000040ULL +#define TXC_RO_CTL_RO_STATE_RD 0x0000000000000020ULL +#define TXC_RO_CTL_RO_STATE_WR 0x0000000000000010ULL +#define TXC_RO_CTL_RO_STATE_ADDR 0x000000000000000fULL + +#define TXC_RO_ST_DATA0(PORT) (FZC_TXC + 0x200e0UL + (PORT)*0x100UL) +#define TXC_RO_ST_DATA0_DATA0 0x00000000ffffffffULL + +#define TXC_RO_ST_DATA1(PORT) (FZC_TXC + 0x200e8UL + (PORT)*0x100UL) +#define TXC_RO_ST_DATA1_DATA1 0x00000000ffffffffULL + +#define TXC_RO_ST_DATA2(PORT) (FZC_TXC + 0x200f0UL + (PORT)*0x100UL) +#define TXC_RO_ST_DATA2_DATA2 0x00000000ffffffffULL + +#define TXC_RO_ST_DATA3(PORT) (FZC_TXC + 0x200f8UL + (PORT)*0x100UL) +#define TXC_RO_ST_DATA3_DATA3 0x00000000ffffffffULL + +#define TXC_PORT_PACKET_REQ(PORT) (FZC_TXC + 0x20100UL + (PORT)*0x100UL) +#define TXC_PORT_PACKET_REQ_GATHER_REQ 0x00000000f0000000ULL +#define TXC_PORT_PACKET_REQ_PKT_REQ 0x000000000fff0000ULL +#define TXC_PORT_PACKET_REQ_PERR_ABRT 0x000000000000ffffULL + + /* bits are same as TXC_INT_STAT */ +#define TXC_INT_STAT_DBG (FZC_TXC + 0x20420UL) + +#define TXC_INT_STAT (FZC_TXC + 0x20428UL) +#define TXC_INT_STAT_VAL_SHIFT(PORT) ((PORT) * 8) +#define TXC_INT_STAT_VAL(PORT) (0x3f << TXC_INT_STAT_VAL_SHIFT(PORT)) +#define TXC_INT_STAT_SF_CE(PORT) (0x01 << TXC_INT_STAT_VAL_SHIFT(PORT)) +#define TXC_INT_STAT_SF_UE(PORT) (0x02 << TXC_INT_STAT_VAL_SHIFT(PORT)) +#define TXC_INT_STAT_RO_CE(PORT) (0x04 << TXC_INT_STAT_VAL_SHIFT(PORT)) +#define TXC_INT_STAT_RO_UE(PORT) (0x08 << TXC_INT_STAT_VAL_SHIFT(PORT)) +#define TXC_INT_STAT_REORDER_ERR(PORT) (0x10 << TXC_INT_STAT_VAL_SHIFT(PORT)) +#define TXC_INT_STAT_PKTASM_DEAD(PORT) (0x20 << TXC_INT_STAT_VAL_SHIFT(PORT)) + +#define TXC_INT_MASK (FZC_TXC + 0x20430UL) +#define TXC_INT_MASK_VAL_SHIFT(PORT) ((PORT) * 8) +#define TXC_INT_MASK_VAL(PORT) (0x3f << TXC_INT_STAT_VAL_SHIFT(PORT)) + +#define TXC_INT_MASK_SF_CE 0x01 +#define TXC_INT_MASK_SF_UE 0x02 +#define TXC_INT_MASK_RO_CE 0x04 +#define TXC_INT_MASK_RO_UE 0x08 +#define TXC_INT_MASK_REORDER_ERR 0x10 +#define TXC_INT_MASK_PKTASM_DEAD 0x20 +#define TXC_INT_MASK_ALL 0x3f + +#define TXC_PORT_DMA(IDX) (FZC_TXC + 0x20028UL + (IDX)*0x100UL) + +#define ESPC_PIO_EN (FZC_PROM + 0x40000UL) +#define ESPC_PIO_EN_ENABLE 0x0000000000000001ULL + +#define ESPC_PIO_STAT (FZC_PROM + 0x40008UL) +#define ESPC_PIO_STAT_READ_START 0x0000000080000000ULL +#define ESPC_PIO_STAT_READ_END 0x0000000040000000ULL +#define ESPC_PIO_STAT_WRITE_INIT 0x0000000020000000ULL +#define ESPC_PIO_STAT_WRITE_END 0x0000000010000000ULL +#define ESPC_PIO_STAT_ADDR 0x0000000003ffff00ULL +#define ESPC_PIO_STAT_ADDR_SHIFT 8 +#define ESPC_PIO_STAT_DATA 0x00000000000000ffULL +#define ESPC_PIO_STAT_DATA_SHIFT 0 + +#define ESPC_NCR(IDX) (FZC_PROM + 0x40020UL + (IDX)*0x8UL) +#define ESPC_NCR_VAL 0x00000000ffffffffULL + +#define ESPC_MAC_ADDR0 ESPC_NCR(0) +#define ESPC_MAC_ADDR1 ESPC_NCR(1) +#define ESPC_NUM_PORTS_MACS ESPC_NCR(2) +#define ESPC_NUM_PORTS_MACS_VAL 0x00000000000000ffULL +#define ESPC_MOD_STR_LEN ESPC_NCR(4) +#define ESPC_MOD_STR_1 ESPC_NCR(5) +#define ESPC_MOD_STR_2 ESPC_NCR(6) +#define ESPC_MOD_STR_3 ESPC_NCR(7) +#define ESPC_MOD_STR_4 ESPC_NCR(8) +#define ESPC_MOD_STR_5 ESPC_NCR(9) +#define ESPC_MOD_STR_6 ESPC_NCR(10) +#define ESPC_MOD_STR_7 ESPC_NCR(11) +#define ESPC_MOD_STR_8 ESPC_NCR(12) +#define ESPC_BD_MOD_STR_LEN ESPC_NCR(13) +#define ESPC_BD_MOD_STR_1 ESPC_NCR(14) +#define ESPC_BD_MOD_STR_2 ESPC_NCR(15) +#define ESPC_BD_MOD_STR_3 ESPC_NCR(16) +#define ESPC_BD_MOD_STR_4 ESPC_NCR(17) + +#define ESPC_PHY_TYPE ESPC_NCR(18) +#define ESPC_PHY_TYPE_PORT0 0x00000000ff000000ULL +#define ESPC_PHY_TYPE_PORT0_SHIFT 24 +#define ESPC_PHY_TYPE_PORT1 0x0000000000ff0000ULL +#define ESPC_PHY_TYPE_PORT1_SHIFT 16 +#define ESPC_PHY_TYPE_PORT2 0x000000000000ff00ULL +#define ESPC_PHY_TYPE_PORT2_SHIFT 8 +#define ESPC_PHY_TYPE_PORT3 0x00000000000000ffULL +#define ESPC_PHY_TYPE_PORT3_SHIFT 0 + +#define ESPC_PHY_TYPE_1G_COPPER 3 +#define ESPC_PHY_TYPE_1G_FIBER 2 +#define ESPC_PHY_TYPE_10G_COPPER 1 +#define ESPC_PHY_TYPE_10G_FIBER 0 + +#define ESPC_MAX_FM_SZ ESPC_NCR(19) + +#define ESPC_INTR_NUM ESPC_NCR(20) +#define ESPC_INTR_NUM_PORT0 0x00000000ff000000ULL +#define ESPC_INTR_NUM_PORT1 0x0000000000ff0000ULL +#define ESPC_INTR_NUM_PORT2 0x000000000000ff00ULL +#define ESPC_INTR_NUM_PORT3 0x00000000000000ffULL + +#define ESPC_VER_IMGSZ ESPC_NCR(21) +#define ESPC_VER_IMGSZ_IMGSZ 0x00000000ffff0000ULL +#define ESPC_VER_IMGSZ_IMGSZ_SHIFT 16 +#define ESPC_VER_IMGSZ_VER 0x000000000000ffffULL +#define ESPC_VER_IMGSZ_VER_SHIFT 0 + +#define ESPC_CHKSUM ESPC_NCR(22) +#define ESPC_CHKSUM_SUM 0x00000000000000ffULL + +#define ESPC_EEPROM_SIZE 0x100000 + +#define CLASS_CODE_UNRECOG 0x00 +#define CLASS_CODE_DUMMY1 0x01 +#define CLASS_CODE_ETHERTYPE1 0x02 +#define CLASS_CODE_ETHERTYPE2 0x03 +#define CLASS_CODE_USER_PROG1 0x04 +#define CLASS_CODE_USER_PROG2 0x05 +#define CLASS_CODE_USER_PROG3 0x06 +#define CLASS_CODE_USER_PROG4 0x07 +#define CLASS_CODE_TCP_IPV4 0x08 +#define CLASS_CODE_UDP_IPV4 0x09 +#define CLASS_CODE_AH_ESP_IPV4 0x0a +#define CLASS_CODE_SCTP_IPV4 0x0b +#define CLASS_CODE_TCP_IPV6 0x0c +#define CLASS_CODE_UDP_IPV6 0x0d +#define CLASS_CODE_AH_ESP_IPV6 0x0e +#define CLASS_CODE_SCTP_IPV6 0x0f +#define CLASS_CODE_ARP 0x10 +#define CLASS_CODE_RARP 0x11 +#define CLASS_CODE_DUMMY2 0x12 +#define CLASS_CODE_DUMMY3 0x13 +#define CLASS_CODE_DUMMY4 0x14 +#define CLASS_CODE_DUMMY5 0x15 +#define CLASS_CODE_DUMMY6 0x16 +#define CLASS_CODE_DUMMY7 0x17 +#define CLASS_CODE_DUMMY8 0x18 +#define CLASS_CODE_DUMMY9 0x19 +#define CLASS_CODE_DUMMY10 0x1a +#define CLASS_CODE_DUMMY11 0x1b +#define CLASS_CODE_DUMMY12 0x1c +#define CLASS_CODE_DUMMY13 0x1d +#define CLASS_CODE_DUMMY14 0x1e +#define CLASS_CODE_DUMMY15 0x1f + +/* Logical devices and device groups */ +#define LDN_RXDMA(CHAN) (0 + (CHAN)) +#define LDN_RESV1(OFF) (16 + (OFF)) +#define LDN_TXDMA(CHAN) (32 + (CHAN)) +#define LDN_RESV2(OFF) (56 + (OFF)) +#define LDN_MIF 63 +#define LDN_MAC(PORT) (64 + (PORT)) +#define LDN_DEVICE_ERROR 68 +#define LDN_MAX LDN_DEVICE_ERROR + +#define NIU_LDG_MIN 0 +#define NIU_LDG_MAX 63 +#define NIU_NUM_LDG 64 +#define LDG_INVALID 0xff + +/* PHY stuff */ +#define NIU_PMA_PMD_DEV_ADDR 1 +#define NIU_PCS_DEV_ADDR 3 + +#define NIU_PHY_ID_MASK 0xfffff0f0 +#define NIU_PHY_ID_BCM8704 0x00206030 +#define NIU_PHY_ID_BCM5464R 0x002060b0 + +#define BCM8704_PMA_PMD_DEV_ADDR 1 +#define BCM8704_PCS_DEV_ADDR 2 +#define BCM8704_USER_DEV3_ADDR 3 +#define BCM8704_PHYXS_DEV_ADDR 4 +#define BCM8704_USER_DEV4_ADDR 4 + +#define BCM8704_PMD_RCV_SIGDET 0x000a +#define PMD_RCV_SIGDET_LANE3 0x0010 +#define PMD_RCV_SIGDET_LANE2 0x0008 +#define PMD_RCV_SIGDET_LANE1 0x0004 +#define PMD_RCV_SIGDET_LANE0 0x0002 +#define PMD_RCV_SIGDET_GLOBAL 0x0001 + +#define BCM8704_PCS_10G_R_STATUS 0x0020 +#define PCS_10G_R_STATUS_LINKSTAT 0x1000 +#define PCS_10G_R_STATUS_PRBS31_ABLE 0x0004 +#define PCS_10G_R_STATUS_HI_BER 0x0002 +#define PCS_10G_R_STATUS_BLK_LOCK 0x0001 + +#define BCM8704_USER_CONTROL 0xc800 +#define USER_CONTROL_OPTXENB_LVL 0x8000 +#define USER_CONTROL_OPTXRST_LVL 0x4000 +#define USER_CONTROL_OPBIASFLT_LVL 0x2000 +#define USER_CONTROL_OBTMPFLT_LVL 0x1000 +#define USER_CONTROL_OPPRFLT_LVL 0x0800 +#define USER_CONTROL_OPTXFLT_LVL 0x0400 +#define USER_CONTROL_OPRXLOS_LVL 0x0200 +#define USER_CONTROL_OPRXFLT_LVL 0x0100 +#define USER_CONTROL_OPTXON_LVL 0x0080 +#define USER_CONTROL_RES1 0x007f +#define USER_CONTROL_RES1_SHIFT 0 + +#define BCM8704_USER_ANALOG_CLK 0xc801 +#define BCM8704_USER_PMD_RX_CONTROL 0xc802 + +#define BCM8704_USER_PMD_TX_CONTROL 0xc803 +#define USER_PMD_TX_CTL_RES1 0xfe00 +#define USER_PMD_TX_CTL_XFP_CLKEN 0x0100 +#define USER_PMD_TX_CTL_TX_DAC_TXD 0x00c0 +#define USER_PMD_TX_CTL_TX_DAC_TXD_SH 6 +#define USER_PMD_TX_CTL_TX_DAC_TXCK 0x0030 +#define USER_PMD_TX_CTL_TX_DAC_TXCK_SH 4 +#define USER_PMD_TX_CTL_TSD_LPWREN 0x0008 +#define USER_PMD_TX_CTL_TSCK_LPWREN 0x0004 +#define USER_PMD_TX_CTL_CMU_LPWREN 0x0002 +#define USER_PMD_TX_CTL_SFIFORST 0x0001 + +#define BCM8704_USER_ANALOG_STATUS0 0xc804 +#define BCM8704_USER_OPT_DIGITAL_CTRL 0xc808 +#define BCM8704_USER_TX_ALARM_STATUS 0x9004 + +#define USER_ODIG_CTRL_FMODE 0x8000 +#define USER_ODIG_CTRL_TX_PDOWN 0x4000 +#define USER_ODIG_CTRL_RX_PDOWN 0x2000 +#define USER_ODIG_CTRL_EFILT_EN 0x1000 +#define USER_ODIG_CTRL_OPT_RST 0x0800 +#define USER_ODIG_CTRL_PCS_TIB 0x0400 +#define USER_ODIG_CTRL_PCS_RI 0x0200 +#define USER_ODIG_CTRL_RESV1 0x0180 +#define USER_ODIG_CTRL_GPIOS 0x0060 +#define USER_ODIG_CTRL_GPIOS_SHIFT 5 +#define USER_ODIG_CTRL_RESV2 0x0010 +#define USER_ODIG_CTRL_LB_ERR_DIS 0x0008 +#define USER_ODIG_CTRL_RESV3 0x0006 +#define USER_ODIG_CTRL_TXONOFF_PD_DIS 0x0001 + +#define BCM8704_PHYXS_XGXS_LANE_STAT 0x0018 +#define PHYXS_XGXS_LANE_STAT_ALINGED 0x1000 +#define PHYXS_XGXS_LANE_STAT_PATTEST 0x0800 +#define PHYXS_XGXS_LANE_STAT_MAGIC 0x0400 +#define PHYXS_XGXS_LANE_STAT_LANE3 0x0008 +#define PHYXS_XGXS_LANE_STAT_LANE2 0x0004 +#define PHYXS_XGXS_LANE_STAT_LANE1 0x0002 +#define PHYXS_XGXS_LANE_STAT_LANE0 0x0001 + +#define BCM5464R_AUX_CTL 24 +#define BCM5464R_AUX_CTL_EXT_LB 0x8000 +#define BCM5464R_AUX_CTL_EXT_PLEN 0x4000 +#define BCM5464R_AUX_CTL_ER1000 0x3000 +#define BCM5464R_AUX_CTL_ER1000_SHIFT 12 +#define BCM5464R_AUX_CTL_RESV1 0x0800 +#define BCM5464R_AUX_CTL_WRITE_1 0x0400 +#define BCM5464R_AUX_CTL_RESV2 0x0300 +#define BCM5464R_AUX_CTL_PRESP_DIS 0x0080 +#define BCM5464R_AUX_CTL_RESV3 0x0040 +#define BCM5464R_AUX_CTL_ER100 0x0030 +#define BCM5464R_AUX_CTL_ER100_SHIFT 4 +#define BCM5464R_AUX_CTL_DIAG_MODE 0x0008 +#define BCM5464R_AUX_CTL_SR_SEL 0x0007 +#define BCM5464R_AUX_CTL_SR_SEL_SHIFT 0 + +#define BCM5464R_CTRL1000_AS_MASTER 0x0800 +#define BCM5464R_CTRL1000_ENABLE_AS_MASTER 0x1000 + +#define RCR_ENTRY_MULTI 0x8000000000000000ULL +#define RCR_ENTRY_PKT_TYPE 0x6000000000000000ULL +#define RCR_ENTRY_PKT_TYPE_SHIFT 61 +#define RCR_ENTRY_ZERO_COPY 0x1000000000000000ULL +#define RCR_ENTRY_NOPORT 0x0800000000000000ULL +#define RCR_ENTRY_PROMISC 0x0400000000000000ULL +#define RCR_ENTRY_ERROR 0x0380000000000000ULL +#define RCR_ENTRY_DCF_ERR 0x0040000000000000ULL +#define RCR_ENTRY_L2_LEN 0x003fff0000000000ULL +#define RCR_ENTRY_L2_LEN_SHIFT 40 +#define RCR_ENTRY_PKTBUFSZ 0x000000c000000000ULL +#define RCR_ENTRY_PKTBUFSZ_SHIFT 38 +#define RCR_ENTRY_PKT_BUF_ADDR 0x0000003fffffffffULL /* bits 43:6 */ +#define RCR_ENTRY_PKT_BUF_ADDR_SHIFT 6 + +#define RCR_PKT_TYPE_OTHER 0x0 +#define RCR_PKT_TYPE_TCP 0x1 +#define RCR_PKT_TYPE_UDP 0x2 +#define RCR_PKT_TYPE_SCTP 0x3 + +#define NIU_RXPULL_MAX ETH_HLEN + +struct rx_pkt_hdr0 { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 inputport:2, + maccheck:1, + class:4; + u8 vlan:1, + llcsnap:1, + noport:1, + badip:1, + tcamhit:1, + tres:2, + tzfvld:1; +#elif defined(__BIG_ENDIAN_BITFIELD) + u8 class:4, + maccheck:1, + inputport:2; + u8 tzfvld:1, + tres:2, + tcamhit:1, + badip:1, + noport:1, + llcsnap:1, + vlan:1; +#endif +}; + +struct rx_pkt_hdr1 { + u8 hwrsvd1; + u8 tcammatch; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 hwrsvd2:2, + hashit:1, + exact:1, + hzfvld:1, + hashsidx:3; +#elif defined(__BIG_ENDIAN_BITFIELD) + u8 hashsidx:3, + hzfvld:1, + exact:1, + hashit:1, + hwrsvd2:2; +#endif + u8 zcrsvd; + + /* Bits 11:8 of zero copy flow ID. */ +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 hwrsvd3:4, zflowid0:4; +#elif defined(__BIG_ENDIAN_BITFIELD) + u8 zflowid0:4, hwrsvd3:4; +#endif + + /* Bits 7:0 of zero copy flow ID. */ + u8 zflowid1; + + /* Bits 15:8 of hash value, H2. */ + u8 hashval2_0; + + /* Bits 7:0 of hash value, H2. */ + u8 hashval2_1; + + /* Bits 19:16 of hash value, H1. */ +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 hwrsvd4:4, hashval1_0:4; +#elif defined(__BIG_ENDIAN_BITFIELD) + u8 hashval1_0:4, hwrsvd4:4; +#endif + + /* Bits 15:8 of hash value, H1. */ + u8 hashval1_1; + + /* Bits 7:0 of hash value, H1. */ + u8 hashval1_2; + + u8 usrdata_0; /* Bits 39:32 of user data. */ + u8 usrdata_1; /* Bits 31:24 of user data. */ + u8 usrdata_2; /* Bits 23:16 of user data. */ + u8 usrdata_3; /* Bits 15:8 of user data. */ + u8 usrdata_4; /* Bits 7:0 of user data. */ +}; + +struct tx_dma_mbox { + u64 tx_dma_pre_st; + u64 tx_cs; + u64 tx_ring_kick; + u64 tx_ring_hdl; + u64 resv1; + u32 tx_rng_err_logl; + u32 tx_rng_err_logh; + u64 resv2; + u64 resv3; +}; + +struct tx_pkt_hdr { + __le64 flags; +#define TXHDR_PAD 0x0000000000000007ULL +#define TXHDR_PAD_SHIFT 0 +#define TXHDR_LEN 0x000000003fff0000ULL +#define TXHDR_LEN_SHIFT 16 +#define TXHDR_L4STUFF 0x0000003f00000000ULL +#define TXHDR_L4STUFF_SHIFT 32 +#define TXHDR_L4START 0x00003f0000000000ULL +#define TXHDR_L4START_SHIFT 40 +#define TXHDR_L3START 0x000f000000000000ULL +#define TXHDR_L3START_SHIFT 48 +#define TXHDR_IHL 0x00f0000000000000ULL +#define TXHDR_IHL_SHIFT 52 +#define TXHDR_VLAN 0x0100000000000000ULL +#define TXHDR_LLC 0x0200000000000000ULL +#define TXHDR_IP_VER 0x2000000000000000ULL +#define TXHDR_CSUM_NONE 0x0000000000000000ULL +#define TXHDR_CSUM_TCP 0x4000000000000000ULL +#define TXHDR_CSUM_UDP 0x8000000000000000ULL +#define TXHDR_CSUM_SCTP 0xc000000000000000ULL + __le64 resv; +}; + +#define TX_DESC_SOP 0x8000000000000000ULL +#define TX_DESC_MARK 0x4000000000000000ULL +#define TX_DESC_NUM_PTR 0x3c00000000000000ULL +#define TX_DESC_NUM_PTR_SHIFT 58 +#define TX_DESC_TR_LEN 0x01fff00000000000ULL +#define TX_DESC_TR_LEN_SHIFT 44 +#define TX_DESC_SAD 0x00000fffffffffffULL +#define TX_DESC_SAD_SHIFT 0 + +struct tx_buff_info { + struct sk_buff *skb; + u64 mapping; +}; + +struct txdma_mailbox { + __le64 tx_dma_pre_st; + __le64 tx_cs; + __le64 tx_ring_kick; + __le64 tx_ring_hdl; + __le64 resv1; + __le32 tx_rng_err_logl; + __le32 tx_rng_err_logh; + __le64 resv2[2]; +} __attribute__((aligned(64))); + +#define MAX_TX_RING_SIZE 256 +#define MAX_TX_DESC_LEN 4076 + +struct tx_ring_info { + struct tx_buff_info tx_buffs[MAX_TX_RING_SIZE]; + struct niu *np; + u64 tx_cs; + int pending; + int prod; + int cons; + int wrap_bit; + u16 last_pkt_cnt; + u16 tx_channel; + u16 mark_counter; + u16 mark_freq; + u16 mark_pending; + u16 __pad; + struct txdma_mailbox *mbox; + __le64 *descr; + + u64 tx_packets; + u64 tx_bytes; + u64 tx_errors; + + u64 mbox_dma; + u64 descr_dma; + int max_burst; +}; + +#define NEXT_TX(tp, index) \ + (((index) + 1) < (tp)->pending ? ((index) + 1) : 0) + +static inline u32 niu_tx_avail(struct tx_ring_info *tp) +{ + return (tp->pending - + ((tp->prod - tp->cons) & (MAX_TX_RING_SIZE - 1))); +} + +struct rxdma_mailbox { + __le64 rx_dma_ctl_stat; + __le64 rbr_stat; + __le32 rbr_hdl; + __le32 rbr_hdh; + __le64 resv1; + __le32 rcrstat_c; + __le32 rcrstat_b; + __le64 rcrstat_a; + __le64 resv2[2]; +} __attribute__((aligned(64))); + +#define MAX_RBR_RING_SIZE 128 +#define MAX_RCR_RING_SIZE (MAX_RBR_RING_SIZE * 2) + +#define RBR_REFILL_MIN 16 + +#define RX_SKB_ALLOC_SIZE 128 + NET_IP_ALIGN + +struct rx_ring_info { + struct niu *np; + int rx_channel; + u16 rbr_block_size; + u16 rbr_blocks_per_page; + u16 rbr_sizes[4]; + unsigned int rcr_index; + unsigned int rcr_table_size; + unsigned int rbr_index; + unsigned int rbr_pending; + unsigned int rbr_refill_pending; + unsigned int rbr_kick_thresh; + unsigned int rbr_table_size; + struct page **rxhash; + struct rxdma_mailbox *mbox; + __le64 *rcr; + __le32 *rbr; +#define RBR_DESCR_ADDR_SHIFT 12 + + u64 rx_packets; + u64 rx_bytes; + u64 rx_dropped; + u64 rx_errors; + + u64 mbox_dma; + u64 rcr_dma; + u64 rbr_dma; + + /* WRED */ + int nonsyn_window; + int nonsyn_threshold; + int syn_window; + int syn_threshold; + + /* interrupt mitigation */ + int rcr_pkt_threshold; + int rcr_timeout; +}; + +#define NEXT_RCR(rp, index) \ + (((index) + 1) < (rp)->rcr_table_size ? ((index) + 1) : 0) +#define NEXT_RBR(rp, index) \ + (((index) + 1) < (rp)->rbr_table_size ? ((index) + 1) : 0) + +#define NIU_MAX_PORTS 4 +#define NIU_NUM_RXCHAN 16 +#define NIU_NUM_TXCHAN 24 +#define MAC_NUM_HASH 16 + +#define NIU_MAX_MTU 9216 + +#define NIU_VPD_MIN_MAJOR 3 +#define NIU_VPD_MIN_MINOR 4 + +#define NIU_VPD_MODEL_MAX 32 +#define NIU_VPD_BD_MODEL_MAX 16 +#define NIU_VPD_VERSION_MAX 64 +#define NIU_VPD_PHY_TYPE_MAX 8 + +struct niu_vpd { + char model[NIU_VPD_MODEL_MAX]; + char board_model[NIU_VPD_BD_MODEL_MAX]; + char version[NIU_VPD_VERSION_MAX]; + char phy_type[NIU_VPD_PHY_TYPE_MAX]; + u8 mac_num; + u8 __pad; + u8 local_mac[6]; + int fcode_major; + int fcode_minor; +}; + +struct niu_altmac_rdc { + u8 alt_mac_num; + u8 rdc_num; + u8 mac_pref; +}; + +struct niu_vlan_rdc { + u8 rdc_num; + u8 vlan_pref; +}; + +struct niu_classifier { + struct niu_altmac_rdc alt_mac_mappings[16]; + struct niu_vlan_rdc vlan_mappings[ENET_VLAN_TBL_NUM_ENTRIES]; + + u16 tcam_index; + u16 num_alt_mac_mappings; + + u32 h1_init; + u16 h2_init; +}; + +#define NIU_NUM_RDC_TABLES 8 +#define NIU_RDC_TABLE_SLOTS 16 + +struct rdc_table { + u8 rxdma_channel[NIU_RDC_TABLE_SLOTS]; +}; + +struct niu_rdc_tables { + struct rdc_table tables[NIU_NUM_RDC_TABLES]; + int first_table_num; + int num_tables; +}; + +#define PHY_TYPE_PMA_PMD 0 +#define PHY_TYPE_PCS 1 +#define PHY_TYPE_MII 2 +#define PHY_TYPE_MAX 3 + +struct phy_probe_info { + u32 phy_id[PHY_TYPE_MAX][NIU_MAX_PORTS]; + u8 phy_port[PHY_TYPE_MAX][NIU_MAX_PORTS]; + u8 cur[PHY_TYPE_MAX]; + + struct device_attribute phy_port_attrs[PHY_TYPE_MAX * NIU_MAX_PORTS]; + struct device_attribute phy_type_attrs[PHY_TYPE_MAX * NIU_MAX_PORTS]; + struct device_attribute phy_id_attrs[PHY_TYPE_MAX * NIU_MAX_PORTS]; +}; + +struct niu_tcam_entry { + u64 key[4]; + u64 key_mask[4]; + u64 assoc_data; +}; + +struct device_node; +union niu_parent_id { + struct { + int domain; + int bus; + int device; + } pci; + struct device_node *of; +}; + +struct niu; +struct niu_parent { + struct platform_device *plat_dev; + int index; + + union niu_parent_id id; + + struct niu *ports[NIU_MAX_PORTS]; + + atomic_t refcnt; + struct list_head list; + + spinlock_t lock; + + u32 flags; +#define PARENT_FLGS_CLS_HWINIT 0x00000001 + + u32 port_phy; +#define PORT_PHY_UNKNOWN 0x00000000 +#define PORT_PHY_INVALID 0xffffffff +#define PORT_TYPE_10G 0x01 +#define PORT_TYPE_1G 0x02 +#define PORT_TYPE_MASK 0x03 + + u8 rxchan_per_port[NIU_MAX_PORTS]; + u8 txchan_per_port[NIU_MAX_PORTS]; + + struct niu_rdc_tables rdc_group_cfg[NIU_MAX_PORTS]; + u8 rdc_default[NIU_MAX_PORTS]; + + u8 ldg_map[LDN_MAX + 1]; + + u8 plat_type; +#define PLAT_TYPE_INVALID 0x00 +#define PLAT_TYPE_ATLAS 0x01 +#define PLAT_TYPE_NIU 0x02 +#define PLAT_TYPE_VF_P0 0x03 +#define PLAT_TYPE_VF_P1 0x04 + + u8 num_ports; + + u16 tcam_num_entries; +#define NIU_PCI_TCAM_ENTRIES 256 +#define NIU_NONPCI_TCAM_ENTRIES 128 +#define NIU_TCAM_ENTRIES_MAX 256 + + int rxdma_clock_divider; + + struct phy_probe_info phy_probe_info; + + struct niu_tcam_entry tcam[NIU_TCAM_ENTRIES_MAX]; + u64 l2_cls[2]; + u64 l3_cls[4]; + u64 tcam_key[12]; + u64 flow_key[12]; +}; + +struct niu_ops { + void *(*alloc_coherent)(struct device *dev, size_t size, + u64 *handle, gfp_t flag); + void (*free_coherent)(struct device *dev, size_t size, + void *cpu_addr, u64 handle); + u64 (*map_page)(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction); + void (*unmap_page)(struct device *dev, u64 dma_address, + size_t size, enum dma_data_direction direction); + u64 (*map_single)(struct device *dev, void *cpu_addr, + size_t size, + enum dma_data_direction direction); + void (*unmap_single)(struct device *dev, u64 dma_address, + size_t size, enum dma_data_direction direction); +}; + +struct niu_link_config { + /* Describes what we're trying to get. */ + u32 advertising; + u32 supported; + u16 speed; + u8 duplex; + u8 autoneg; + + /* Describes what we actually have. */ + u16 active_speed; + u8 active_duplex; +#define SPEED_INVALID 0xffff +#define DUPLEX_INVALID 0xff +#define AUTONEG_INVALID 0xff + + u8 loopback_mode; +#define LOOPBACK_DISABLED 0x00 +#define LOOPBACK_PHY 0x01 +#define LOOPBACK_MAC 0x02 +}; + +struct niu_ldg { + struct napi_struct napi; + struct niu *np; + u8 ldg_num; + u8 timer; + u64 v0, v1, v2; + unsigned int irq; +}; + +struct niu_xmac_stats { + u64 tx_frames; + u64 tx_bytes; + u64 tx_fifo_errors; + u64 tx_overflow_errors; + u64 tx_max_pkt_size_errors; + u64 tx_underflow_errors; + + u64 rx_local_faults; + u64 rx_remote_faults; + u64 rx_link_faults; + u64 rx_align_errors; + u64 rx_frags; + u64 rx_mcasts; + u64 rx_bcasts; + u64 rx_hist_cnt1; + u64 rx_hist_cnt2; + u64 rx_hist_cnt3; + u64 rx_hist_cnt4; + u64 rx_hist_cnt5; + u64 rx_hist_cnt6; + u64 rx_hist_cnt7; + u64 rx_octets; + u64 rx_code_violations; + u64 rx_len_errors; + u64 rx_crc_errors; + u64 rx_underflows; + u64 rx_overflows; + + u64 pause_off_state; + u64 pause_on_state; + u64 pause_received; +}; + +struct niu_bmac_stats { + u64 tx_underflow_errors; + u64 tx_max_pkt_size_errors; + u64 tx_bytes; + u64 tx_frames; + + u64 rx_overflows; + u64 rx_frames; + u64 rx_align_errors; + u64 rx_crc_errors; + u64 rx_len_errors; + + u64 pause_off_state; + u64 pause_on_state; + u64 pause_received; +}; + +union niu_mac_stats { + struct niu_xmac_stats xmac; + struct niu_bmac_stats bmac; +}; + +struct niu_phy_ops { + int (*serdes_init)(struct niu *np); + int (*xcvr_init)(struct niu *np); + int (*link_status)(struct niu *np, int *); +}; + +struct of_device; +struct niu { + void __iomem *regs; + struct net_device *dev; + struct pci_dev *pdev; + struct device *device; + struct niu_parent *parent; + + u32 flags; +#define NIU_FLAGS_MSIX 0x00400000 /* MSI-X in use */ +#define NIU_FLAGS_MCAST 0x00200000 /* multicast filter enabled */ +#define NIU_FLAGS_PROMISC 0x00100000 /* PROMISC enabled */ +#define NIU_FLAGS_VPD_VALID 0x00080000 /* VPD has valid version */ +#define NIU_FLAGS_10G 0x00040000 /* 0=1G 1=10G */ +#define NIU_FLAGS_FIBER 0x00020000 /* 0=COPPER 1=FIBER */ +#define NIU_FLAGS_XMAC 0x00010000 /* 0=BMAC 1=XMAC */ + + u32 msg_enable; + + /* Protects hw programming, and ring state. */ + spinlock_t lock; + + const struct niu_ops *ops; + struct net_device_stats net_stats; + union niu_mac_stats mac_stats; + + struct rx_ring_info *rx_rings; + struct tx_ring_info *tx_rings; + int num_rx_rings; + int num_tx_rings; + + struct niu_ldg ldg[NIU_NUM_LDG]; + int num_ldg; + + void __iomem *mac_regs; + unsigned long ipp_off; + unsigned long pcs_off; + unsigned long xpcs_off; + + struct timer_list timer; + const struct niu_phy_ops *phy_ops; + int phy_addr; + + struct niu_link_config link_config; + + struct work_struct reset_task; + + u8 port; + u8 mac_xcvr; +#define MAC_XCVR_MII 1 +#define MAC_XCVR_PCS 2 +#define MAC_XCVR_XPCS 3 + + struct niu_classifier clas; + + struct niu_vpd vpd; + u32 eeprom_len; + + struct of_device *op; + void __iomem *vir_regs_1; + void __iomem *vir_regs_2; +}; + +#endif /* _NIU_H */ diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c index 104aab3c957f..ea71f6d82661 100644 --- a/drivers/net/ns83820.c +++ b/drivers/net/ns83820.c @@ -1,4 +1,4 @@ -#define VERSION "0.22" +#define VERSION "0.23" /* ns83820.c by Benjamin LaHaise with contributions. * * Questions/comments/discussion to linux-ns83820@kvack.org. @@ -1247,6 +1247,149 @@ static struct net_device_stats *ns83820_get_stats(struct net_device *ndev) return &dev->stats; } +/* Let ethtool retrieve info */ +static int ns83820_get_settings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct ns83820 *dev = PRIV(ndev); + u32 cfg, tanar, tbicr; + int have_optical = 0; + int fullduplex = 0; + + /* + * Here's the list of available ethtool commands from other drivers: + * cmd->advertising = + * cmd->speed = + * cmd->duplex = + * cmd->port = 0; + * cmd->phy_address = + * cmd->transceiver = 0; + * cmd->autoneg = + * cmd->maxtxpkt = 0; + * cmd->maxrxpkt = 0; + */ + + /* read current configuration */ + cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY; + tanar = readl(dev->base + TANAR); + tbicr = readl(dev->base + TBICR); + + if (dev->CFG_cache & CFG_TBI_EN) { + /* we have an optical interface */ + have_optical = 1; + fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0; + + } else { + /* We have copper */ + fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0; + } + + cmd->supported = SUPPORTED_Autoneg; + + /* we have optical interface */ + if (dev->CFG_cache & CFG_TBI_EN) { + cmd->supported |= SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE; + cmd->port = PORT_FIBRE; + } /* TODO: else copper related support */ + + cmd->duplex = fullduplex ? DUPLEX_FULL : DUPLEX_HALF; + switch (cfg / CFG_SPDSTS0 & 3) { + case 2: + cmd->speed = SPEED_1000; + break; + case 1: + cmd->speed = SPEED_100; + break; + default: + cmd->speed = SPEED_10; + break; + } + cmd->autoneg = (tbicr & TBICR_MR_AN_ENABLE) ? 1: 0; + return 0; +} + +/* Let ethool change settings*/ +static int ns83820_set_settings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct ns83820 *dev = PRIV(ndev); + u32 cfg, tanar; + int have_optical = 0; + int fullduplex = 0; + + /* read current configuration */ + cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY; + tanar = readl(dev->base + TANAR); + + if (dev->CFG_cache & CFG_TBI_EN) { + /* we have optical */ + have_optical = 1; + fullduplex = (tanar & TANAR_FULL_DUP); + + } else { + /* we have copper */ + fullduplex = cfg & CFG_DUPSTS; + } + + spin_lock_irq(&dev->misc_lock); + spin_lock(&dev->tx_lock); + + /* Set duplex */ + if (cmd->duplex != fullduplex) { + if (have_optical) { + /*set full duplex*/ + if (cmd->duplex == DUPLEX_FULL) { + /* force full duplex */ + writel(readl(dev->base + TXCFG) + | TXCFG_CSI | TXCFG_HBI | TXCFG_ATP, + dev->base + TXCFG); + writel(readl(dev->base + RXCFG) | RXCFG_RX_FD, + dev->base + RXCFG); + /* Light up full duplex LED */ + writel(readl(dev->base + GPIOR) | GPIOR_GP1_OUT, + dev->base + GPIOR); + } else { + /*TODO: set half duplex */ + } + + } else { + /*we have copper*/ + /* TODO: Set duplex for copper cards */ + } + printk(KERN_INFO "%s: Duplex set via ethtool\n", + ndev->name); + } + + /* Set autonegotiation */ + if (1) { + if (cmd->autoneg == AUTONEG_ENABLE) { + /* restart auto negotiation */ + writel(TBICR_MR_AN_ENABLE | TBICR_MR_RESTART_AN, + dev->base + TBICR); + writel(TBICR_MR_AN_ENABLE, dev->base + TBICR); + dev->linkstate = LINK_AUTONEGOTIATE; + + printk(KERN_INFO "%s: autoneg enabled via ethtool\n", + ndev->name); + } else { + /* disable auto negotiation */ + writel(0x00000000, dev->base + TBICR); + } + + printk(KERN_INFO "%s: autoneg %s via ethtool\n", ndev->name, + cmd->autoneg ? "ENABLED" : "DISABLED"); + } + + phy_intr(ndev); + spin_unlock(&dev->tx_lock); + spin_unlock_irq(&dev->misc_lock); + + return 0; +} +/* end ethtool get/set support -df */ + static void ns83820_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { struct ns83820 *dev = PRIV(ndev); @@ -1263,8 +1406,10 @@ static u32 ns83820_get_link(struct net_device *ndev) } static const struct ethtool_ops ops = { - .get_drvinfo = ns83820_get_drvinfo, - .get_link = ns83820_get_link + .get_settings = ns83820_get_settings, + .set_settings = ns83820_set_settings, + .get_drvinfo = ns83820_get_drvinfo, + .get_link = ns83820_get_link }; /* this function is called in irq context from the ISR */ @@ -1582,7 +1727,7 @@ static void ns83820_set_multicast(struct net_device *ndev) else and_mask &= ~(RFCR_AAU | RFCR_AAM); - if (ndev->flags & IFF_ALLMULTI) + if (ndev->flags & IFF_ALLMULTI || ndev->mc_count) or_mask |= RFCR_AAM; else and_mask &= ~RFCR_AAM; @@ -1817,6 +1962,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_ long addr; int err; int using_dac = 0; + DECLARE_MAC_BUF(mac); /* See if we can set the dma mask early on; failure is fatal. */ if (sizeof(dma_addr_t) == 8 && @@ -1843,7 +1989,6 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_ spin_lock_init(&dev->misc_lock); dev->pci_dev = pci_dev; - SET_MODULE_OWNER(ndev); SET_NETDEV_DEV(ndev, &pci_dev->dev); INIT_WORK(&dev->tq_refill, queue_refill); @@ -2082,13 +2227,11 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_ ndev->features |= NETIF_F_HIGHDMA; } - printk(KERN_INFO "%s: ns83820 v" VERSION ": DP83820 v%u.%u: %02x:%02x:%02x:%02x:%02x:%02x io=0x%08lx irq=%d f=%s\n", + printk(KERN_INFO "%s: ns83820 v" VERSION ": DP83820 v%u.%u: %s io=0x%08lx irq=%d f=%s\n", ndev->name, (unsigned)readl(dev->base + SRR) >> 8, (unsigned)readl(dev->base + SRR) & 0xff, - ndev->dev_addr[0], ndev->dev_addr[1], - ndev->dev_addr[2], ndev->dev_addr[3], - ndev->dev_addr[4], ndev->dev_addr[5], + print_mac(mac, ndev->dev_addr), addr, pci_dev->irq, (ndev->features & NETIF_F_HIGHDMA) ? "h,sg" : "sg" ); diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index 0b3066a6fe40..9f9a421c99b3 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c @@ -34,24 +34,29 @@ #include <net/checksum.h> #include <asm/irq.h> +#include <asm/firmware.h> #include "pasemi_mac.h" +/* We have our own align, since ppc64 in general has it at 0 because + * of design flaws in some of the server bridge chips. However, for + * PWRficient doing the unaligned copies is more expensive than doing + * unaligned DMA, so make sure the data is aligned instead. + */ +#define LOCAL_SKB_ALIGN 2 /* TODO list * - * - Get rid of pci_{read,write}_config(), map registers with ioremap - * for performance - * - PHY support * - Multicast support * - Large MTU support - * - Other performance improvements + * - SW LRO + * - Multiqueue RX/TX */ /* Must be a power of two */ -#define RX_RING_SIZE 512 -#define TX_RING_SIZE 512 +#define RX_RING_SIZE 4096 +#define TX_RING_SIZE 4096 #define DEFAULT_MSG_ENABLE \ (NETIF_MSG_DRV | \ @@ -63,12 +68,16 @@ NETIF_MSG_RX_ERR | \ NETIF_MSG_TX_ERR) -#define TX_DESC(mac, num) ((mac)->tx->desc[(num) & (TX_RING_SIZE-1)]) -#define TX_DESC_INFO(mac, num) ((mac)->tx->desc_info[(num) & (TX_RING_SIZE-1)]) -#define RX_DESC(mac, num) ((mac)->rx->desc[(num) & (RX_RING_SIZE-1)]) -#define RX_DESC_INFO(mac, num) ((mac)->rx->desc_info[(num) & (RX_RING_SIZE-1)]) +#define TX_RING(mac, num) ((mac)->tx->ring[(num) & (TX_RING_SIZE-1)]) +#define TX_RING_INFO(mac, num) ((mac)->tx->ring_info[(num) & (TX_RING_SIZE-1)]) +#define RX_RING(mac, num) ((mac)->rx->ring[(num) & (RX_RING_SIZE-1)]) +#define RX_RING_INFO(mac, num) ((mac)->rx->ring_info[(num) & (RX_RING_SIZE-1)]) #define RX_BUFF(mac, num) ((mac)->rx->buffers[(num) & (RX_RING_SIZE-1)]) +#define RING_USED(ring) (((ring)->next_to_fill - (ring)->next_to_clean) \ + & ((ring)->size - 1)) +#define RING_AVAIL(ring) ((ring->size) - RING_USED(ring)) + #define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */ MODULE_LICENSE("GPL"); @@ -81,6 +90,43 @@ MODULE_PARM_DESC(debug, "PA Semi MAC bitmapped debugging message enable value"); static struct pasdma_status *dma_status; +static int translation_enabled(void) +{ +#if defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE) + return 1; +#else + return firmware_has_feature(FW_FEATURE_LPAR); +#endif +} + +static void write_iob_reg(struct pasemi_mac *mac, unsigned int reg, + unsigned int val) +{ + out_le32(mac->iob_regs+reg, val); +} + +static unsigned int read_mac_reg(struct pasemi_mac *mac, unsigned int reg) +{ + return in_le32(mac->regs+reg); +} + +static void write_mac_reg(struct pasemi_mac *mac, unsigned int reg, + unsigned int val) +{ + out_le32(mac->regs+reg, val); +} + +static unsigned int read_dma_reg(struct pasemi_mac *mac, unsigned int reg) +{ + return in_le32(mac->dma_regs+reg); +} + +static void write_dma_reg(struct pasemi_mac *mac, unsigned int reg, + unsigned int val) +{ + out_le32(mac->dma_regs+reg, val); +} + static int pasemi_get_mac_addr(struct pasemi_mac *mac) { struct pci_dev *pdev = mac->pdev; @@ -128,11 +174,36 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac) return 0; } +static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac, + struct sk_buff *skb, + dma_addr_t *dmas) +{ + int f; + int nfrags = skb_shinfo(skb)->nr_frags; + + pci_unmap_single(mac->dma_pdev, dmas[0], skb_headlen(skb), + PCI_DMA_TODEVICE); + + for (f = 0; f < nfrags; f++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; + + pci_unmap_page(mac->dma_pdev, dmas[f+1], frag->size, + PCI_DMA_TODEVICE); + } + dev_kfree_skb_irq(skb); + + /* Freed descriptor slot + main SKB ptr + nfrags additional ptrs, + * aligned up to a power of 2 + */ + return (nfrags + 3) & ~1; +} + static int pasemi_mac_setup_rx_resources(struct net_device *dev) { struct pasemi_mac_rxring *ring; struct pasemi_mac *mac = netdev_priv(dev); int chan_id = mac->dma_rxch; + unsigned int cfg; ring = kzalloc(sizeof(*ring), GFP_KERNEL); @@ -141,22 +212,22 @@ static int pasemi_mac_setup_rx_resources(struct net_device *dev) spin_lock_init(&ring->lock); - ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer) * + ring->size = RX_RING_SIZE; + ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) * RX_RING_SIZE, GFP_KERNEL); - if (!ring->desc_info) - goto out_desc_info; + if (!ring->ring_info) + goto out_ring_info; /* Allocate descriptors */ - ring->desc = dma_alloc_coherent(&mac->dma_pdev->dev, - RX_RING_SIZE * - sizeof(struct pas_dma_xct_descr), + ring->ring = dma_alloc_coherent(&mac->dma_pdev->dev, + RX_RING_SIZE * sizeof(u64), &ring->dma, GFP_KERNEL); - if (!ring->desc) - goto out_desc; + if (!ring->ring) + goto out_ring_desc; - memset(ring->desc, 0, RX_RING_SIZE * sizeof(struct pas_dma_xct_descr)); + memset(ring->ring, 0, RX_RING_SIZE * sizeof(u64)); ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64), @@ -166,22 +237,34 @@ static int pasemi_mac_setup_rx_resources(struct net_device *dev) memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64)); - pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEL(chan_id), - PAS_DMA_RXCHAN_BASEL_BRBL(ring->dma)); + write_dma_reg(mac, PAS_DMA_RXCHAN_BASEL(chan_id), PAS_DMA_RXCHAN_BASEL_BRBL(ring->dma)); + + write_dma_reg(mac, PAS_DMA_RXCHAN_BASEU(chan_id), + PAS_DMA_RXCHAN_BASEU_BRBH(ring->dma >> 32) | + PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 3)); + + cfg = PAS_DMA_RXCHAN_CFG_HBU(2); + + if (translation_enabled()) + cfg |= PAS_DMA_RXCHAN_CFG_CTR; - pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEU(chan_id), - PAS_DMA_RXCHAN_BASEU_BRBH(ring->dma >> 32) | - PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 2)); + write_dma_reg(mac, PAS_DMA_RXCHAN_CFG(chan_id), cfg); - pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_CFG(chan_id), - PAS_DMA_RXCHAN_CFG_HBU(1)); + write_dma_reg(mac, PAS_DMA_RXINT_BASEL(mac->dma_if), + PAS_DMA_RXINT_BASEL_BRBL(ring->buf_dma)); - pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEL(mac->dma_if), - PAS_DMA_RXINT_BASEL_BRBL(__pa(ring->buffers))); + write_dma_reg(mac, PAS_DMA_RXINT_BASEU(mac->dma_if), + PAS_DMA_RXINT_BASEU_BRBH(ring->buf_dma >> 32) | + PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3)); - pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEU(mac->dma_if), - PAS_DMA_RXINT_BASEU_BRBH(__pa(ring->buffers) >> 32) | - PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3)); + cfg = PAS_DMA_RXINT_CFG_DHL(3) | PAS_DMA_RXINT_CFG_L2 | + PAS_DMA_RXINT_CFG_LW | PAS_DMA_RXINT_CFG_RBP | + PAS_DMA_RXINT_CFG_HEN; + + if (translation_enabled()) + cfg |= PAS_DMA_RXINT_CFG_ITRR | PAS_DMA_RXINT_CFG_ITR; + + write_dma_reg(mac, PAS_DMA_RXINT_CFG(mac->dma_if), cfg); ring->next_to_fill = 0; ring->next_to_clean = 0; @@ -194,11 +277,11 @@ static int pasemi_mac_setup_rx_resources(struct net_device *dev) out_buffers: dma_free_coherent(&mac->dma_pdev->dev, - RX_RING_SIZE * sizeof(struct pas_dma_xct_descr), - mac->rx->desc, mac->rx->dma); -out_desc: - kfree(ring->desc_info); -out_desc_info: + RX_RING_SIZE * sizeof(u64), + mac->rx->ring, mac->rx->dma); +out_ring_desc: + kfree(ring->ring_info); +out_ring_info: kfree(ring); out_ring: return -ENOMEM; @@ -211,6 +294,7 @@ static int pasemi_mac_setup_tx_resources(struct net_device *dev) u32 val; int chan_id = mac->dma_txch; struct pasemi_mac_txring *ring; + unsigned int cfg; ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) @@ -218,35 +302,39 @@ static int pasemi_mac_setup_tx_resources(struct net_device *dev) spin_lock_init(&ring->lock); - ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer) * + ring->size = TX_RING_SIZE; + ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) * TX_RING_SIZE, GFP_KERNEL); - if (!ring->desc_info) - goto out_desc_info; + if (!ring->ring_info) + goto out_ring_info; /* Allocate descriptors */ - ring->desc = dma_alloc_coherent(&mac->dma_pdev->dev, - TX_RING_SIZE * - sizeof(struct pas_dma_xct_descr), + ring->ring = dma_alloc_coherent(&mac->dma_pdev->dev, + TX_RING_SIZE * sizeof(u64), &ring->dma, GFP_KERNEL); - if (!ring->desc) - goto out_desc; + if (!ring->ring) + goto out_ring_desc; - memset(ring->desc, 0, TX_RING_SIZE * sizeof(struct pas_dma_xct_descr)); + memset(ring->ring, 0, TX_RING_SIZE * sizeof(u64)); - pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEL(chan_id), - PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma)); + write_dma_reg(mac, PAS_DMA_TXCHAN_BASEL(chan_id), + PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma)); val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32); - val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 2); + val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 3); + + write_dma_reg(mac, PAS_DMA_TXCHAN_BASEU(chan_id), val); - pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEU(chan_id), val); + cfg = PAS_DMA_TXCHAN_CFG_TY_IFACE | + PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) | + PAS_DMA_TXCHAN_CFG_UP | + PAS_DMA_TXCHAN_CFG_WT(2); - pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_CFG(chan_id), - PAS_DMA_TXCHAN_CFG_TY_IFACE | - PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) | - PAS_DMA_TXCHAN_CFG_UP | - PAS_DMA_TXCHAN_CFG_WT(2)); + if (translation_enabled()) + cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR; - ring->next_to_use = 0; + write_dma_reg(mac, PAS_DMA_TXCHAN_CFG(chan_id), cfg); + + ring->next_to_fill = 0; ring->next_to_clean = 0; snprintf(ring->irq_name, sizeof(ring->irq_name), @@ -255,9 +343,9 @@ static int pasemi_mac_setup_tx_resources(struct net_device *dev) return 0; -out_desc: - kfree(ring->desc_info); -out_desc_info: +out_ring_desc: + kfree(ring->ring_info); +out_ring_info: kfree(ring); out_ring: return -ENOMEM; @@ -266,33 +354,37 @@ out_ring: static void pasemi_mac_free_tx_resources(struct net_device *dev) { struct pasemi_mac *mac = netdev_priv(dev); - unsigned int i; + unsigned int i, j; struct pasemi_mac_buffer *info; - struct pas_dma_xct_descr *dp; - - for (i = 0; i < TX_RING_SIZE; i++) { - info = &TX_DESC_INFO(mac, i); - dp = &TX_DESC(mac, i); - if (info->dma) { - if (info->skb) { - pci_unmap_single(mac->dma_pdev, - info->dma, - info->skb->len, - PCI_DMA_TODEVICE); - dev_kfree_skb_any(info->skb); - } - info->dma = 0; - info->skb = NULL; - dp->mactx = 0; - dp->ptr = 0; - } + dma_addr_t dmas[MAX_SKB_FRAGS+1]; + int freed; + int start, limit; + + start = mac->tx->next_to_clean; + limit = mac->tx->next_to_fill; + + /* Compensate for when fill has wrapped and clean has not */ + if (start > limit) + limit += TX_RING_SIZE; + + for (i = start; i < limit; i += freed) { + info = &TX_RING_INFO(mac, i+1); + if (info->dma && info->skb) { + for (j = 0; j <= skb_shinfo(info->skb)->nr_frags; j++) + dmas[j] = TX_RING_INFO(mac, i+1+j).dma; + freed = pasemi_mac_unmap_tx_skb(mac, info->skb, dmas); + } else + freed = 2; } + for (i = 0; i < TX_RING_SIZE; i++) + TX_RING(mac, i) = 0; + dma_free_coherent(&mac->dma_pdev->dev, - TX_RING_SIZE * sizeof(struct pas_dma_xct_descr), - mac->tx->desc, mac->tx->dma); + TX_RING_SIZE * sizeof(u64), + mac->tx->ring, mac->tx->dma); - kfree(mac->tx->desc_info); + kfree(mac->tx->ring_info); kfree(mac->tx); mac->tx = NULL; } @@ -302,72 +394,66 @@ static void pasemi_mac_free_rx_resources(struct net_device *dev) struct pasemi_mac *mac = netdev_priv(dev); unsigned int i; struct pasemi_mac_buffer *info; - struct pas_dma_xct_descr *dp; for (i = 0; i < RX_RING_SIZE; i++) { - info = &RX_DESC_INFO(mac, i); - dp = &RX_DESC(mac, i); - if (info->skb) { - if (info->dma) { - pci_unmap_single(mac->dma_pdev, - info->dma, - info->skb->len, - PCI_DMA_FROMDEVICE); - dev_kfree_skb_any(info->skb); - } - info->dma = 0; - info->skb = NULL; - dp->macrx = 0; - dp->ptr = 0; + info = &RX_RING_INFO(mac, i); + if (info->skb && info->dma) { + pci_unmap_single(mac->dma_pdev, + info->dma, + info->skb->len, + PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(info->skb); } + info->dma = 0; + info->skb = NULL; } + for (i = 0; i < RX_RING_SIZE; i++) + RX_RING(mac, i) = 0; + dma_free_coherent(&mac->dma_pdev->dev, - RX_RING_SIZE * sizeof(struct pas_dma_xct_descr), - mac->rx->desc, mac->rx->dma); + RX_RING_SIZE * sizeof(u64), + mac->rx->ring, mac->rx->dma); dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64), mac->rx->buffers, mac->rx->buf_dma); - kfree(mac->rx->desc_info); + kfree(mac->rx->ring_info); kfree(mac->rx); mac->rx = NULL; } -static void pasemi_mac_replenish_rx_ring(struct net_device *dev) +static void pasemi_mac_replenish_rx_ring(struct net_device *dev, int limit) { struct pasemi_mac *mac = netdev_priv(dev); - unsigned int i; - int start = mac->rx->next_to_fill; - unsigned int limit, count; - - limit = (mac->rx->next_to_clean + RX_RING_SIZE - - mac->rx->next_to_fill) & (RX_RING_SIZE - 1); - - /* Check to see if we're doing first-time setup */ - if (unlikely(mac->rx->next_to_clean == 0 && mac->rx->next_to_fill == 0)) - limit = RX_RING_SIZE; + int fill, count; if (limit <= 0) return; - i = start; - for (count = limit; count; count--) { - struct pasemi_mac_buffer *info = &RX_DESC_INFO(mac, i); - u64 *buff = &RX_BUFF(mac, i); + fill = mac->rx->next_to_fill; + for (count = 0; count < limit; count++) { + struct pasemi_mac_buffer *info = &RX_RING_INFO(mac, fill); + u64 *buff = &RX_BUFF(mac, fill); struct sk_buff *skb; dma_addr_t dma; + /* Entry in use? */ + WARN_ON(*buff); + /* skb might still be in there for recycle on short receives */ if (info->skb) skb = info->skb; - else + else { skb = dev_alloc_skb(BUF_SIZE); + skb_reserve(skb, LOCAL_SKB_ALIGN); + } if (unlikely(!skb)) break; - dma = pci_map_single(mac->dma_pdev, skb->data, skb->len, + dma = pci_map_single(mac->dma_pdev, skb->data, + BUF_SIZE - LOCAL_SKB_ALIGN, PCI_DMA_FROMDEVICE); if (unlikely(dma_mapping_error(dma))) { @@ -378,19 +464,15 @@ static void pasemi_mac_replenish_rx_ring(struct net_device *dev) info->skb = skb; info->dma = dma; *buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma); - i++; + fill++; } wmb(); - pci_write_config_dword(mac->dma_pdev, - PAS_DMA_RXCHAN_INCR(mac->dma_rxch), - limit - count); - pci_write_config_dword(mac->dma_pdev, - PAS_DMA_RXINT_INCR(mac->dma_if), - limit - count); + write_dma_reg(mac, PAS_DMA_RXINT_INCR(mac->dma_if), count); - mac->rx->next_to_fill += limit - count; + mac->rx->next_to_fill = (mac->rx->next_to_fill + count) & + (RX_RING_SIZE - 1); } static void pasemi_mac_restart_rx_intr(struct pasemi_mac *mac) @@ -404,9 +486,7 @@ static void pasemi_mac_restart_rx_intr(struct pasemi_mac *mac) reg = PAS_IOB_DMA_RXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_RXCH_RESET_PINTC; - pci_write_config_dword(mac->iob_pdev, - PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), - reg); + write_iob_reg(mac, PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg); } static void pasemi_mac_restart_tx_intr(struct pasemi_mac *mac) @@ -418,69 +498,96 @@ static void pasemi_mac_restart_tx_intr(struct pasemi_mac *mac) reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC; - pci_write_config_dword(mac->iob_pdev, - PAS_IOB_DMA_TXCH_RESET(mac->dma_txch), reg); + write_iob_reg(mac, PAS_IOB_DMA_TXCH_RESET(mac->dma_txch), reg); } +static inline void pasemi_mac_rx_error(struct pasemi_mac *mac, u64 macrx) +{ + unsigned int rcmdsta, ccmdsta; + + if (!netif_msg_rx_err(mac)) + return; + + rcmdsta = read_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); + ccmdsta = read_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch)); + + printk(KERN_ERR "pasemi_mac: rx error. macrx %016lx, rx status %lx\n", + macrx, *mac->rx_status); + + printk(KERN_ERR "pasemi_mac: rcmdsta %08x ccmdsta %08x\n", + rcmdsta, ccmdsta); +} + +static inline void pasemi_mac_tx_error(struct pasemi_mac *mac, u64 mactx) +{ + unsigned int cmdsta; + + if (!netif_msg_tx_err(mac)) + return; + + cmdsta = read_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch)); + + printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016lx, "\ + "tx status 0x%016lx\n", mactx, *mac->tx_status); + + printk(KERN_ERR "pasemi_mac: tcmdsta 0x%08x\n", cmdsta); +} + static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit) { unsigned int n; int count; - struct pas_dma_xct_descr *dp; struct pasemi_mac_buffer *info; struct sk_buff *skb; - unsigned int i, len; + unsigned int len; u64 macrx; dma_addr_t dma; + int buf_index; + u64 eval; spin_lock(&mac->rx->lock); n = mac->rx->next_to_clean; - for (count = limit; count; count--) { + prefetch(RX_RING(mac, n)); - rmb(); + for (count = 0; count < limit; count++) { + macrx = RX_RING(mac, n); - dp = &RX_DESC(mac, n); - macrx = dp->macrx; + if ((macrx & XCT_MACRX_E) || + (*mac->rx_status & PAS_STATUS_ERROR)) + pasemi_mac_rx_error(mac, macrx); if (!(macrx & XCT_MACRX_O)) break; - info = NULL; - /* We have to scan for our skb since there's no way - * to back-map them from the descriptor, and if we - * have several receive channels then they might not - * show up in the same order as they were put on the - * interface ring. - */ + BUG_ON(!(macrx & XCT_MACRX_RR_8BRES)); - dma = (dp->ptr & XCT_PTR_ADDR_M); - for (i = n; i < (n + RX_RING_SIZE); i++) { - info = &RX_DESC_INFO(mac, i); - if (info->dma == dma) - break; - } + eval = (RX_RING(mac, n+1) & XCT_RXRES_8B_EVAL_M) >> + XCT_RXRES_8B_EVAL_S; + buf_index = eval-1; + + dma = (RX_RING(mac, n+2) & XCT_PTR_ADDR_M); + info = &RX_RING_INFO(mac, buf_index); skb = info->skb; - info->dma = 0; - pci_unmap_single(mac->dma_pdev, dma, skb->len, - PCI_DMA_FROMDEVICE); + prefetch(skb); + prefetch(&skb->data_len); len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S; if (len < 256) { - struct sk_buff *new_skb = - netdev_alloc_skb(mac->netdev, len + NET_IP_ALIGN); + struct sk_buff *new_skb; + + new_skb = netdev_alloc_skb(mac->netdev, + len + LOCAL_SKB_ALIGN); if (new_skb) { - skb_reserve(new_skb, NET_IP_ALIGN); - memcpy(new_skb->data - NET_IP_ALIGN, - skb->data - NET_IP_ALIGN, - len + NET_IP_ALIGN); + skb_reserve(new_skb, LOCAL_SKB_ALIGN); + memcpy(new_skb->data, skb->data, len); /* save the skb in buffer_info as good */ skb = new_skb; } @@ -488,73 +595,131 @@ static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit) } else info->skb = NULL; - skb_put(skb, len); + pci_unmap_single(mac->dma_pdev, dma, len, PCI_DMA_FROMDEVICE); - skb->protocol = eth_type_trans(skb, mac->netdev); + info->dma = 0; + + skb_put(skb, len); - if ((macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK) { - skb->ip_summed = CHECKSUM_COMPLETE; + if (likely((macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum = (macrx & XCT_MACRX_CSUM_M) >> XCT_MACRX_CSUM_S; } else skb->ip_summed = CHECKSUM_NONE; - mac->stats.rx_bytes += len; - mac->stats.rx_packets++; + mac->netdev->stats.rx_bytes += len; + mac->netdev->stats.rx_packets++; + skb->protocol = eth_type_trans(skb, mac->netdev); netif_receive_skb(skb); - dp->ptr = 0; - dp->macrx = 0; + RX_RING(mac, n) = 0; + RX_RING(mac, n+1) = 0; + + /* Need to zero it out since hardware doesn't, since the + * replenish loop uses it to tell when it's done. + */ + RX_BUFF(mac, buf_index) = 0; + + n += 4; + } - n++; + if (n > RX_RING_SIZE) { + /* Errata 5971 workaround: L2 target of headers */ + write_iob_reg(mac, PAS_IOB_COM_PKTHDRCNT, 0); + n &= (RX_RING_SIZE-1); } - mac->rx->next_to_clean += limit - count; - pasemi_mac_replenish_rx_ring(mac->netdev); + mac->rx->next_to_clean = n; + + /* Increase is in number of 16-byte entries, and since each descriptor + * with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with + * count*2. + */ + write_dma_reg(mac, PAS_DMA_RXCHAN_INCR(mac->dma_rxch), count << 1); + + pasemi_mac_replenish_rx_ring(mac->netdev, count); spin_unlock(&mac->rx->lock); return count; } +/* Can't make this too large or we blow the kernel stack limits */ +#define TX_CLEAN_BATCHSIZE (128/MAX_SKB_FRAGS) + static int pasemi_mac_clean_tx(struct pasemi_mac *mac) { - int i; - struct pasemi_mac_buffer *info; - struct pas_dma_xct_descr *dp; - int start, count; - int flags; - + int i, j; + unsigned int start, descr_count, buf_count, batch_limit; + unsigned int ring_limit; + unsigned int total_count; + unsigned long flags; + struct sk_buff *skbs[TX_CLEAN_BATCHSIZE]; + dma_addr_t dmas[TX_CLEAN_BATCHSIZE][MAX_SKB_FRAGS+1]; + + total_count = 0; + batch_limit = TX_CLEAN_BATCHSIZE; +restart: spin_lock_irqsave(&mac->tx->lock, flags); start = mac->tx->next_to_clean; - count = 0; + ring_limit = mac->tx->next_to_fill; - for (i = start; i < mac->tx->next_to_use; i++) { - dp = &TX_DESC(mac, i); - if (!dp || (dp->mactx & XCT_MACTX_O)) + /* Compensate for when fill has wrapped but clean has not */ + if (start > ring_limit) + ring_limit += TX_RING_SIZE; + + buf_count = 0; + descr_count = 0; + + for (i = start; + descr_count < batch_limit && i < ring_limit; + i += buf_count) { + u64 mactx = TX_RING(mac, i); + struct sk_buff *skb; + + if ((mactx & XCT_MACTX_E) || + (*mac->tx_status & PAS_STATUS_ERROR)) + pasemi_mac_tx_error(mac, mactx); + + if (unlikely(mactx & XCT_MACTX_O)) + /* Not yet transmitted */ break; - count++; + skb = TX_RING_INFO(mac, i+1).skb; + skbs[descr_count] = skb; - info = &TX_DESC_INFO(mac, i); + buf_count = 2 + skb_shinfo(skb)->nr_frags; + for (j = 0; j <= skb_shinfo(skb)->nr_frags; j++) + dmas[descr_count][j] = TX_RING_INFO(mac, i+1+j).dma; - pci_unmap_single(mac->dma_pdev, info->dma, - info->skb->len, PCI_DMA_TODEVICE); - dev_kfree_skb_irq(info->skb); + TX_RING(mac, i) = 0; + TX_RING(mac, i+1) = 0; - info->skb = NULL; - info->dma = 0; - dp->mactx = 0; - dp->ptr = 0; + /* Since we always fill with an even number of entries, make + * sure we skip any unused one at the end as well. + */ + if (buf_count & 1) + buf_count++; + descr_count++; } - mac->tx->next_to_clean += count; - spin_unlock_irqrestore(&mac->tx->lock, flags); + mac->tx->next_to_clean = i & (TX_RING_SIZE-1); + spin_unlock_irqrestore(&mac->tx->lock, flags); netif_wake_queue(mac->netdev); - return count; + for (i = 0; i < descr_count; i++) + pasemi_mac_unmap_tx_skb(mac, skbs[i], dmas[i]); + + total_count += descr_count; + + /* If the batch was full, try to clean more */ + if (descr_count == batch_limit) + goto restart; + + return total_count; } @@ -567,15 +732,10 @@ static irqreturn_t pasemi_mac_rx_intr(int irq, void *data) if (!(*mac->rx_status & PAS_STATUS_CAUSE_M)) return IRQ_NONE; - if (*mac->rx_status & PAS_STATUS_ERROR) - printk("rx_status reported error\n"); - /* Don't reset packet count so it won't fire again but clear * all others. */ - pci_read_config_dword(mac->dma_pdev, PAS_DMA_RXINT_RCMDSTA(mac->dma_if), ®); - reg = 0; if (*mac->rx_status & PAS_STATUS_SOFT) reg |= PAS_IOB_DMA_RXCH_RESET_SINTC; @@ -584,11 +744,9 @@ static irqreturn_t pasemi_mac_rx_intr(int irq, void *data) if (*mac->rx_status & PAS_STATUS_TIMER) reg |= PAS_IOB_DMA_RXCH_RESET_TINTC; - netif_rx_schedule(dev); - - pci_write_config_dword(mac->iob_pdev, - PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg); + netif_rx_schedule(dev, &mac->napi); + write_iob_reg(mac, PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg); return IRQ_HANDLED; } @@ -613,9 +771,7 @@ static irqreturn_t pasemi_mac_tx_intr(int irq, void *data) if (*mac->tx_status & PAS_STATUS_ERROR) reg |= PAS_IOB_DMA_TXCH_RESET_DINTC; - pci_write_config_dword(mac->iob_pdev, - PAS_IOB_DMA_TXCH_RESET(mac->dma_txch), - reg); + write_iob_reg(mac, PAS_IOB_DMA_TXCH_RESET(mac->dma_txch), reg); return IRQ_HANDLED; } @@ -641,7 +797,7 @@ static void pasemi_adjust_link(struct net_device *dev) } else netif_carrier_on(dev); - pci_read_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, &flags); + flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); new_flags = flags & ~(PAS_MAC_CFG_PCFG_HD | PAS_MAC_CFG_PCFG_SPD_M | PAS_MAC_CFG_PCFG_TSR_M); @@ -673,7 +829,7 @@ static void pasemi_adjust_link(struct net_device *dev) mac->link = mac->phydev->link; if (new_flags != flags) - pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, new_flags); + write_mac_reg(mac, PAS_MAC_CFG_PCFG, new_flags); if (msg && netif_msg_link(mac)) printk(KERN_INFO "%s: Link is up at %d Mbps, %s duplex.\n", @@ -736,39 +892,30 @@ static int pasemi_mac_open(struct net_device *dev) int ret; /* enable rx section */ - pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_RXCMD, - PAS_DMA_COM_RXCMD_EN); + write_dma_reg(mac, PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN); /* enable tx section */ - pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_TXCMD, - PAS_DMA_COM_TXCMD_EN); + write_dma_reg(mac, PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN); flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) | PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) | PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12); - pci_write_config_dword(mac->pdev, PAS_MAC_CFG_TXP, flags); + write_mac_reg(mac, PAS_MAC_CFG_TXP, flags); - flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PE | - PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE; - - flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G; - - pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_RXCH_CFG(mac->dma_rxch), - PAS_IOB_DMA_RXCH_CFG_CNTTH(0)); + write_iob_reg(mac, PAS_IOB_DMA_RXCH_CFG(mac->dma_rxch), + PAS_IOB_DMA_RXCH_CFG_CNTTH(0)); - pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_TXCH_CFG(mac->dma_txch), - PAS_IOB_DMA_TXCH_CFG_CNTTH(32)); + write_iob_reg(mac, PAS_IOB_DMA_TXCH_CFG(mac->dma_txch), + PAS_IOB_DMA_TXCH_CFG_CNTTH(128)); /* Clear out any residual packet count state from firmware */ pasemi_mac_restart_rx_intr(mac); pasemi_mac_restart_tx_intr(mac); /* 0xffffff is max value, about 16ms */ - pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG, - PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0xffffff)); - - pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags); + write_iob_reg(mac, PAS_IOB_DMA_COM_TIMEOUTCFG, + PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0xffffff)); ret = pasemi_mac_setup_rx_resources(dev); if (ret) @@ -778,27 +925,48 @@ static int pasemi_mac_open(struct net_device *dev) if (ret) goto out_tx_resources; - pci_write_config_dword(mac->pdev, PAS_MAC_IPC_CHNL, - PAS_MAC_IPC_CHNL_DCHNO(mac->dma_rxch) | - PAS_MAC_IPC_CHNL_BCH(mac->dma_rxch)); + write_mac_reg(mac, PAS_MAC_IPC_CHNL, + PAS_MAC_IPC_CHNL_DCHNO(mac->dma_rxch) | + PAS_MAC_IPC_CHNL_BCH(mac->dma_rxch)); /* enable rx if */ - pci_write_config_dword(mac->dma_pdev, - PAS_DMA_RXINT_RCMDSTA(mac->dma_if), - PAS_DMA_RXINT_RCMDSTA_EN); + write_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if), + PAS_DMA_RXINT_RCMDSTA_EN | + PAS_DMA_RXINT_RCMDSTA_DROPS_M | + PAS_DMA_RXINT_RCMDSTA_BP | + PAS_DMA_RXINT_RCMDSTA_OO | + PAS_DMA_RXINT_RCMDSTA_BT); /* enable rx channel */ - pci_write_config_dword(mac->dma_pdev, - PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), - PAS_DMA_RXCHAN_CCMDSTA_EN | - PAS_DMA_RXCHAN_CCMDSTA_DU); + write_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), + PAS_DMA_RXCHAN_CCMDSTA_EN | + PAS_DMA_RXCHAN_CCMDSTA_DU | + PAS_DMA_RXCHAN_CCMDSTA_OD | + PAS_DMA_RXCHAN_CCMDSTA_FD | + PAS_DMA_RXCHAN_CCMDSTA_DT); /* enable tx channel */ - pci_write_config_dword(mac->dma_pdev, - PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), - PAS_DMA_TXCHAN_TCMDSTA_EN); + write_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), + PAS_DMA_TXCHAN_TCMDSTA_EN | + PAS_DMA_TXCHAN_TCMDSTA_SZ | + PAS_DMA_TXCHAN_TCMDSTA_DB | + PAS_DMA_TXCHAN_TCMDSTA_DE | + PAS_DMA_TXCHAN_TCMDSTA_DA); + + pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE); + + write_dma_reg(mac, PAS_DMA_RXCHAN_INCR(mac->dma_rxch), RX_RING_SIZE>>1); - pasemi_mac_replenish_rx_ring(dev); + flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PE | + PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE; + + if (mac->type == MAC_TYPE_GMAC) + flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G; + else + flags |= PAS_MAC_CFG_PCFG_TSR_10G | PAS_MAC_CFG_PCFG_SPD_10G; + + /* Enable interface in MAC */ + write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); ret = pasemi_mac_phy_init(dev); /* Some configs don't have PHYs (XAUI etc), so don't complain about @@ -808,7 +976,7 @@ static int pasemi_mac_open(struct net_device *dev) dev_warn(&mac->pdev->dev, "phy init failed: %d\n", ret); netif_start_queue(dev); - netif_poll_enable(dev); + napi_enable(&mac->napi); /* Interrupts are a bit different for our DMA controller: While * it's got one a regular PCI device header, the interrupt there @@ -845,7 +1013,7 @@ static int pasemi_mac_open(struct net_device *dev) out_rx_int: free_irq(mac->tx_irq, dev); out_tx_int: - netif_poll_disable(dev); + napi_disable(&mac->napi); netif_stop_queue(dev); pasemi_mac_free_tx_resources(dev); out_tx_resources: @@ -860,7 +1028,7 @@ out_rx_resources: static int pasemi_mac_close(struct net_device *dev) { struct pasemi_mac *mac = netdev_priv(dev); - unsigned int stat; + unsigned int sta; int retries; if (mac->phydev) { @@ -869,68 +1037,74 @@ static int pasemi_mac_close(struct net_device *dev) } netif_stop_queue(dev); + napi_disable(&mac->napi); + + sta = read_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); + if (sta & (PAS_DMA_RXINT_RCMDSTA_BP | + PAS_DMA_RXINT_RCMDSTA_OO | + PAS_DMA_RXINT_RCMDSTA_BT)) + printk(KERN_DEBUG "pasemi_mac: rcmdsta error: 0x%08x\n", sta); + + sta = read_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch)); + if (sta & (PAS_DMA_RXCHAN_CCMDSTA_DU | + PAS_DMA_RXCHAN_CCMDSTA_OD | + PAS_DMA_RXCHAN_CCMDSTA_FD | + PAS_DMA_RXCHAN_CCMDSTA_DT)) + printk(KERN_DEBUG "pasemi_mac: ccmdsta error: 0x%08x\n", sta); + + sta = read_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch)); + if (sta & (PAS_DMA_TXCHAN_TCMDSTA_SZ | + PAS_DMA_TXCHAN_TCMDSTA_DB | + PAS_DMA_TXCHAN_TCMDSTA_DE | + PAS_DMA_TXCHAN_TCMDSTA_DA)) + printk(KERN_DEBUG "pasemi_mac: tcmdsta error: 0x%08x\n", sta); /* Clean out any pending buffers */ pasemi_mac_clean_tx(mac); pasemi_mac_clean_rx(mac, RX_RING_SIZE); /* Disable interface */ - pci_write_config_dword(mac->dma_pdev, - PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), - PAS_DMA_TXCHAN_TCMDSTA_ST); - pci_write_config_dword(mac->dma_pdev, - PAS_DMA_RXINT_RCMDSTA(mac->dma_if), - PAS_DMA_RXINT_RCMDSTA_ST); - pci_write_config_dword(mac->dma_pdev, - PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), - PAS_DMA_RXCHAN_CCMDSTA_ST); + write_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), PAS_DMA_TXCHAN_TCMDSTA_ST); + write_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if), PAS_DMA_RXINT_RCMDSTA_ST); + write_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), PAS_DMA_RXCHAN_CCMDSTA_ST); for (retries = 0; retries < MAX_RETRIES; retries++) { - pci_read_config_dword(mac->dma_pdev, - PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), - &stat); - if (!(stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)) + sta = read_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch)); + if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) break; cond_resched(); } - if (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT) + if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT) dev_err(&mac->dma_pdev->dev, "Failed to stop tx channel\n"); for (retries = 0; retries < MAX_RETRIES; retries++) { - pci_read_config_dword(mac->dma_pdev, - PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), - &stat); - if (!(stat & PAS_DMA_RXCHAN_CCMDSTA_ACT)) + sta = read_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch)); + if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) break; cond_resched(); } - if (stat & PAS_DMA_RXCHAN_CCMDSTA_ACT) + if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT) dev_err(&mac->dma_pdev->dev, "Failed to stop rx channel\n"); for (retries = 0; retries < MAX_RETRIES; retries++) { - pci_read_config_dword(mac->dma_pdev, - PAS_DMA_RXINT_RCMDSTA(mac->dma_if), - &stat); - if (!(stat & PAS_DMA_RXINT_RCMDSTA_ACT)) + sta = read_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); + if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT)) break; cond_resched(); } - if (stat & PAS_DMA_RXINT_RCMDSTA_ACT) + if (sta & PAS_DMA_RXINT_RCMDSTA_ACT) dev_err(&mac->dma_pdev->dev, "Failed to stop rx interface\n"); /* Then, disable the channel. This must be done separately from * stopping, since you can't disable when active. */ - pci_write_config_dword(mac->dma_pdev, - PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), 0); - pci_write_config_dword(mac->dma_pdev, - PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), 0); - pci_write_config_dword(mac->dma_pdev, - PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0); + write_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), 0); + write_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), 0); + write_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0); free_irq(mac->tx_irq, dev); free_irq(mac->rx_irq, dev); @@ -946,11 +1120,11 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) { struct pasemi_mac *mac = netdev_priv(dev); struct pasemi_mac_txring *txring; - struct pasemi_mac_buffer *info; - struct pas_dma_xct_descr *dp; - u64 dflags; - dma_addr_t map; - int flags; + u64 dflags, mactx; + dma_addr_t map[MAX_SKB_FRAGS+1]; + unsigned int map_size[MAX_SKB_FRAGS+1]; + unsigned long flags; + int i, nfrags; dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_SS | XCT_MACTX_CRC_PAD; @@ -971,71 +1145,87 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) } } - map = pci_map_single(mac->dma_pdev, skb->data, skb->len, PCI_DMA_TODEVICE); + nfrags = skb_shinfo(skb)->nr_frags; - if (dma_mapping_error(map)) - return NETDEV_TX_BUSY; + map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb), + PCI_DMA_TODEVICE); + map_size[0] = skb_headlen(skb); + if (dma_mapping_error(map[0])) + goto out_err_nolock; + + for (i = 0; i < nfrags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + map[i+1] = pci_map_page(mac->dma_pdev, frag->page, + frag->page_offset, frag->size, + PCI_DMA_TODEVICE); + map_size[i+1] = frag->size; + if (dma_mapping_error(map[i+1])) { + nfrags = i; + goto out_err_nolock; + } + } + + mactx = dflags | XCT_MACTX_LLEN(skb->len); txring = mac->tx; spin_lock_irqsave(&txring->lock, flags); - if (txring->next_to_clean - txring->next_to_use == TX_RING_SIZE) { - spin_unlock_irqrestore(&txring->lock, flags); - pasemi_mac_clean_tx(mac); - pasemi_mac_restart_tx_intr(mac); - spin_lock_irqsave(&txring->lock, flags); - - if (txring->next_to_clean - txring->next_to_use == - TX_RING_SIZE) { - /* Still no room -- stop the queue and wait for tx - * intr when there's room. - */ - netif_stop_queue(dev); - goto out_err; - } + /* Avoid stepping on the same cache line that the DMA controller + * is currently about to send, so leave at least 8 words available. + * Total free space needed is mactx + fragments + 8 + */ + if (RING_AVAIL(txring) < nfrags + 10) { + /* no room -- stop the queue and wait for tx intr */ + netif_stop_queue(dev); + goto out_err; } + TX_RING(mac, txring->next_to_fill) = mactx; + txring->next_to_fill++; + TX_RING_INFO(mac, txring->next_to_fill).skb = skb; + for (i = 0; i <= nfrags; i++) { + TX_RING(mac, txring->next_to_fill+i) = + XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]); + TX_RING_INFO(mac, txring->next_to_fill+i).dma = map[i]; + } - dp = &TX_DESC(mac, txring->next_to_use); - info = &TX_DESC_INFO(mac, txring->next_to_use); + /* We have to add an even number of 8-byte entries to the ring + * even if the last one is unused. That means always an odd number + * of pointers + one mactx descriptor. + */ + if (nfrags & 1) + nfrags++; - dp->mactx = dflags | XCT_MACTX_LLEN(skb->len); - dp->ptr = XCT_PTR_LEN(skb->len) | XCT_PTR_ADDR(map); - info->dma = map; - info->skb = skb; + txring->next_to_fill = (txring->next_to_fill + nfrags + 1) & + (TX_RING_SIZE-1); - txring->next_to_use++; - mac->stats.tx_packets++; - mac->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; spin_unlock_irqrestore(&txring->lock, flags); - pci_write_config_dword(mac->dma_pdev, - PAS_DMA_TXCHAN_INCR(mac->dma_txch), 1); + write_dma_reg(mac, PAS_DMA_TXCHAN_INCR(mac->dma_txch), (nfrags+2) >> 1); return NETDEV_TX_OK; out_err: spin_unlock_irqrestore(&txring->lock, flags); - pci_unmap_single(mac->dma_pdev, map, skb->len, PCI_DMA_TODEVICE); - return NETDEV_TX_BUSY; -} +out_err_nolock: + while (nfrags--) + pci_unmap_single(mac->dma_pdev, map[nfrags], map_size[nfrags], + PCI_DMA_TODEVICE); -static struct net_device_stats *pasemi_mac_get_stats(struct net_device *dev) -{ - struct pasemi_mac *mac = netdev_priv(dev); - - return &mac->stats; + return NETDEV_TX_BUSY; } - static void pasemi_mac_set_rx_mode(struct net_device *dev) { struct pasemi_mac *mac = netdev_priv(dev); unsigned int flags; - pci_read_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, &flags); + flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); /* Set promiscuous */ if (dev->flags & IFF_PROMISC) @@ -1043,30 +1233,92 @@ static void pasemi_mac_set_rx_mode(struct net_device *dev) else flags &= ~PAS_MAC_CFG_PCFG_PR; - pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags); + write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); } -static int pasemi_mac_poll(struct net_device *dev, int *budget) +static int pasemi_mac_poll(struct napi_struct *napi, int budget) { - int pkts, limit = min(*budget, dev->quota); - struct pasemi_mac *mac = netdev_priv(dev); - - pkts = pasemi_mac_clean_rx(mac, limit); - - dev->quota -= pkts; - *budget -= pkts; + struct pasemi_mac *mac = container_of(napi, struct pasemi_mac, napi); + struct net_device *dev = mac->netdev; + int pkts; - if (pkts < limit) { + pasemi_mac_clean_tx(mac); + pkts = pasemi_mac_clean_rx(mac, budget); + if (pkts < budget) { /* all done, no more packets present */ - netif_rx_complete(dev); + netif_rx_complete(dev, napi); pasemi_mac_restart_rx_intr(mac); - return 0; - } else { - /* used up our quantum, so reschedule */ - return 1; } + return pkts; +} + +static void __iomem * __devinit map_onedev(struct pci_dev *p, int index) +{ + struct device_node *dn; + void __iomem *ret; + + dn = pci_device_to_OF_node(p); + if (!dn) + goto fallback; + + ret = of_iomap(dn, index); + if (!ret) + goto fallback; + + return ret; +fallback: + /* This is hardcoded and ugly, but we have some firmware versions + * that don't provide the register space in the device tree. Luckily + * they are at well-known locations so we can just do the math here. + */ + return ioremap(0xe0000000 + (p->devfn << 12), 0x2000); +} + +static int __devinit pasemi_mac_map_regs(struct pasemi_mac *mac) +{ + struct resource res; + struct device_node *dn; + int err; + + mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL); + if (!mac->dma_pdev) { + dev_err(&mac->pdev->dev, "Can't find DMA Controller\n"); + return -ENODEV; + } + + mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL); + if (!mac->iob_pdev) { + dev_err(&mac->pdev->dev, "Can't find I/O Bridge\n"); + return -ENODEV; + } + + mac->regs = map_onedev(mac->pdev, 0); + mac->dma_regs = map_onedev(mac->dma_pdev, 0); + mac->iob_regs = map_onedev(mac->iob_pdev, 0); + + if (!mac->regs || !mac->dma_regs || !mac->iob_regs) { + dev_err(&mac->pdev->dev, "Can't map registers\n"); + return -ENODEV; + } + + /* The dma status structure is located in the I/O bridge, and + * is cache coherent. + */ + if (!dma_status) { + dn = pci_device_to_OF_node(mac->iob_pdev); + if (dn) + err = of_address_to_resource(dn, 1, &res); + if (!dn || err) { + /* Fallback for old firmware */ + res.start = 0xfd800000; + res.end = res.start + 0x1000; + } + dma_status = __ioremap(res.start, res.end-res.start, 0); + } + + return 0; } static int __devinit @@ -1076,6 +1328,7 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct net_device *dev; struct pasemi_mac *mac; int err; + DECLARE_MAC_BUF(mac_buf); err = pci_enable_device(pdev); if (err) @@ -1089,7 +1342,6 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_disable_device; } - SET_MODULE_OWNER(dev); pci_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); @@ -1097,21 +1349,10 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) mac->pdev = pdev; mac->netdev = dev; - mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL); - if (!mac->dma_pdev) { - dev_err(&pdev->dev, "Can't find DMA Controller\n"); - err = -ENODEV; - goto out_free_netdev; - } + netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64); - mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL); - - if (!mac->iob_pdev) { - dev_err(&pdev->dev, "Can't find I/O Bridge\n"); - err = -ENODEV; - goto out_put_dma_pdev; - } + dev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX | NETIF_F_SG; /* These should come out of the device tree eventually */ mac->dma_txch = index; @@ -1148,18 +1389,11 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev->open = pasemi_mac_open; dev->stop = pasemi_mac_close; dev->hard_start_xmit = pasemi_mac_start_tx; - dev->get_stats = pasemi_mac_get_stats; dev->set_multicast_list = pasemi_mac_set_rx_mode; - dev->weight = 64; - dev->poll = pasemi_mac_poll; - dev->features = NETIF_F_HW_CSUM; - /* The dma status structure is located in the I/O bridge, and - * is cache coherent. - */ - if (!dma_status) - /* XXXOJN This should come from the device tree */ - dma_status = __ioremap(0xfd800000, 0x1000, 0); + err = pasemi_mac_map_regs(mac); + if (err) + goto out; mac->rx_status = &dma_status->rx_sta[mac->dma_rxch]; mac->tx_status = &dma_status->tx_sta[mac->dma_txch]; @@ -1175,21 +1409,27 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n", err); goto out; - } else + } else if netif_msg_probe(mac) printk(KERN_INFO "%s: PA Semi %s: intf %d, txch %d, rxch %d, " - "hw addr %02x:%02x:%02x:%02x:%02x:%02x\n", + "hw addr %s\n", dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI", mac->dma_if, mac->dma_txch, mac->dma_rxch, - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + print_mac(mac_buf, dev->dev_addr)); return err; out: - pci_dev_put(mac->iob_pdev); -out_put_dma_pdev: - pci_dev_put(mac->dma_pdev); -out_free_netdev: + if (mac->iob_pdev) + pci_dev_put(mac->iob_pdev); + if (mac->dma_pdev) + pci_dev_put(mac->dma_pdev); + if (mac->dma_regs) + iounmap(mac->dma_regs); + if (mac->iob_regs) + iounmap(mac->iob_regs); + if (mac->regs) + iounmap(mac->regs); + free_netdev(dev); out_disable_device: pci_disable_device(pdev); @@ -1213,6 +1453,10 @@ static void __devexit pasemi_mac_remove(struct pci_dev *pdev) pci_dev_put(mac->dma_pdev); pci_dev_put(mac->iob_pdev); + iounmap(mac->regs); + iounmap(mac->dma_regs); + iounmap(mac->iob_regs); + pci_set_drvdata(pdev, NULL); free_netdev(netdev); } diff --git a/drivers/net/pasemi_mac.h b/drivers/net/pasemi_mac.h index c29ee159c33d..60368df72634 100644 --- a/drivers/net/pasemi_mac.h +++ b/drivers/net/pasemi_mac.h @@ -28,35 +28,38 @@ struct pasemi_mac_txring { spinlock_t lock; - struct pas_dma_xct_descr *desc; + u64 *ring; dma_addr_t dma; unsigned int size; - unsigned int next_to_use; + unsigned int next_to_fill; unsigned int next_to_clean; - struct pasemi_mac_buffer *desc_info; + struct pasemi_mac_buffer *ring_info; char irq_name[10]; /* "eth%d tx" */ }; struct pasemi_mac_rxring { spinlock_t lock; - struct pas_dma_xct_descr *desc; /* RX channel descriptor ring */ + u64 *ring; /* RX channel descriptor ring */ dma_addr_t dma; u64 *buffers; /* RX interface buffer ring */ dma_addr_t buf_dma; unsigned int size; unsigned int next_to_fill; unsigned int next_to_clean; - struct pasemi_mac_buffer *desc_info; + struct pasemi_mac_buffer *ring_info; char irq_name[10]; /* "eth%d rx" */ }; struct pasemi_mac { struct net_device *netdev; + void __iomem *regs; + void __iomem *dma_regs; + void __iomem *iob_regs; struct pci_dev *pdev; struct pci_dev *dma_pdev; struct pci_dev *iob_pdev; struct phy_device *phydev; - struct net_device_stats stats; + struct napi_struct napi; /* Pointer to the cacheable per-channel status registers */ u64 *rx_status; @@ -85,7 +88,7 @@ struct pasemi_mac { char phy_id[BUS_ID_SIZE]; }; -/* Software status descriptor (desc_info) */ +/* Software status descriptor (ring_info) */ struct pasemi_mac_buffer { struct sk_buff *skb; dma_addr_t dma; @@ -98,20 +101,7 @@ struct pasdma_status { u64 tx_sta[20]; }; -/* descriptor structure */ -struct pas_dma_xct_descr { - union { - u64 mactx; - u64 macrx; - }; - union { - u64 ptr; - u64 rxb; - }; -}; - /* MAC CFG register offsets */ - enum { PAS_MAC_CFG_PCFG = 0x80, PAS_MAC_CFG_TXP = 0x98, @@ -215,6 +205,20 @@ enum { #define PAS_DMA_RXINT_RCMDSTA_ACT 0x00010000 #define PAS_DMA_RXINT_RCMDSTA_DROPS_M 0xfffe0000 #define PAS_DMA_RXINT_RCMDSTA_DROPS_S 17 +#define PAS_DMA_RXINT_CFG(i) (0x204+(i)*_PAS_DMA_RXINT_STRIDE) +#define PAS_DMA_RXINT_CFG_RBP 0x80000000 +#define PAS_DMA_RXINT_CFG_ITRR 0x40000000 +#define PAS_DMA_RXINT_CFG_DHL_M 0x07000000 +#define PAS_DMA_RXINT_CFG_DHL_S 24 +#define PAS_DMA_RXINT_CFG_DHL(x) (((x) << PAS_DMA_RXINT_CFG_DHL_S) & \ + PAS_DMA_RXINT_CFG_DHL_M) +#define PAS_DMA_RXINT_CFG_ITR 0x00400000 +#define PAS_DMA_RXINT_CFG_LW 0x00200000 +#define PAS_DMA_RXINT_CFG_L2 0x00100000 +#define PAS_DMA_RXINT_CFG_HEN 0x00080000 +#define PAS_DMA_RXINT_CFG_WIF 0x00000002 +#define PAS_DMA_RXINT_CFG_WIL 0x00000001 + #define PAS_DMA_RXINT_INCR(i) (0x210+(i)*_PAS_DMA_RXINT_STRIDE) #define PAS_DMA_RXINT_INCR_INCR_M 0x0000ffff #define PAS_DMA_RXINT_INCR_INCR_S 0 @@ -241,6 +245,10 @@ enum { #define PAS_DMA_TXCHAN_TCMDSTA_EN 0x00000001 /* Enabled */ #define PAS_DMA_TXCHAN_TCMDSTA_ST 0x00000002 /* Stop interface */ #define PAS_DMA_TXCHAN_TCMDSTA_ACT 0x00010000 /* Active */ +#define PAS_DMA_TXCHAN_TCMDSTA_SZ 0x00000800 +#define PAS_DMA_TXCHAN_TCMDSTA_DB 0x00000400 +#define PAS_DMA_TXCHAN_TCMDSTA_DE 0x00000200 +#define PAS_DMA_TXCHAN_TCMDSTA_DA 0x00000100 #define PAS_DMA_TXCHAN_CFG(c) (0x304+(c)*_PAS_DMA_TXCHAN_STRIDE) #define PAS_DMA_TXCHAN_CFG_TY_IFACE 0x00000000 /* Type = interface */ #define PAS_DMA_TXCHAN_CFG_TATTR_M 0x0000003c @@ -251,9 +259,11 @@ enum { #define PAS_DMA_TXCHAN_CFG_WT_S 6 #define PAS_DMA_TXCHAN_CFG_WT(x) (((x) << PAS_DMA_TXCHAN_CFG_WT_S) & \ PAS_DMA_TXCHAN_CFG_WT_M) -#define PAS_DMA_TXCHAN_CFG_CF 0x00001000 /* Clean first line */ -#define PAS_DMA_TXCHAN_CFG_CL 0x00002000 /* Clean last line */ +#define PAS_DMA_TXCHAN_CFG_TRD 0x00010000 /* translate data */ +#define PAS_DMA_TXCHAN_CFG_TRR 0x00008000 /* translate rings */ #define PAS_DMA_TXCHAN_CFG_UP 0x00004000 /* update tx descr when sent */ +#define PAS_DMA_TXCHAN_CFG_CL 0x00002000 /* Clean last line */ +#define PAS_DMA_TXCHAN_CFG_CF 0x00001000 /* Clean first line */ #define PAS_DMA_TXCHAN_INCR(c) (0x310+(c)*_PAS_DMA_TXCHAN_STRIDE) #define PAS_DMA_TXCHAN_BASEL(c) (0x318+(c)*_PAS_DMA_TXCHAN_STRIDE) #define PAS_DMA_TXCHAN_BASEL_BRBL_M 0xffffffc0 @@ -283,7 +293,11 @@ enum { #define PAS_DMA_RXCHAN_CCMDSTA_ST 0x00000002 /* Stop interface */ #define PAS_DMA_RXCHAN_CCMDSTA_ACT 0x00010000 /* Active */ #define PAS_DMA_RXCHAN_CCMDSTA_DU 0x00020000 +#define PAS_DMA_RXCHAN_CCMDSTA_OD 0x00002000 +#define PAS_DMA_RXCHAN_CCMDSTA_FD 0x00001000 +#define PAS_DMA_RXCHAN_CCMDSTA_DT 0x00000800 #define PAS_DMA_RXCHAN_CFG(c) (0x804+(c)*_PAS_DMA_RXCHAN_STRIDE) +#define PAS_DMA_RXCHAN_CFG_CTR 0x00000400 #define PAS_DMA_RXCHAN_CFG_HBU_M 0x00000380 #define PAS_DMA_RXCHAN_CFG_HBU_S 7 #define PAS_DMA_RXCHAN_CFG_HBU(x) (((x) << PAS_DMA_RXCHAN_CFG_HBU_S) & \ @@ -317,6 +331,12 @@ enum { #define PAS_STATUS_SOFT 0x4000000000000000ull #define PAS_STATUS_INT 0x8000000000000000ull +#define PAS_IOB_COM_PKTHDRCNT 0x120 +#define PAS_IOB_COM_PKTHDRCNT_PKTHDR1_M 0x0fff0000 +#define PAS_IOB_COM_PKTHDRCNT_PKTHDR1_S 16 +#define PAS_IOB_COM_PKTHDRCNT_PKTHDR0_M 0x00000fff +#define PAS_IOB_COM_PKTHDRCNT_PKTHDR0_S 0 + #define PAS_IOB_DMA_RXCH_CFG(i) (0x1100 + (i)*4) #define PAS_IOB_DMA_RXCH_CFG_CNTTH_M 0x00000fff #define PAS_IOB_DMA_RXCH_CFG_CNTTH_S 0 @@ -412,10 +432,9 @@ enum { /* Receive descriptor fields */ #define XCT_MACRX_T 0x8000000000000000ull #define XCT_MACRX_ST 0x4000000000000000ull -#define XCT_MACRX_NORES 0x0000000000000000ull -#define XCT_MACRX_8BRES 0x1000000000000000ull -#define XCT_MACRX_24BRES 0x2000000000000000ull -#define XCT_MACRX_40BRES 0x3000000000000000ull +#define XCT_MACRX_RR_M 0x3000000000000000ull +#define XCT_MACRX_RR_NORES 0x0000000000000000ull +#define XCT_MACRX_RR_8BRES 0x1000000000000000ull #define XCT_MACRX_O 0x0400000000000000ull #define XCT_MACRX_E 0x0200000000000000ull #define XCT_MACRX_FF 0x0100000000000000ull @@ -463,6 +482,17 @@ enum { #define XCT_PTR_ADDR(x) ((((long)(x)) << XCT_PTR_ADDR_S) & \ XCT_PTR_ADDR_M) +/* Receive interface 8byte result fields */ +#define XCT_RXRES_8B_L4O_M 0xff00000000000000ull +#define XCT_RXRES_8B_L4O_S 56 +#define XCT_RXRES_8B_RULE_M 0x00ffff0000000000ull +#define XCT_RXRES_8B_RULE_S 40 +#define XCT_RXRES_8B_EVAL_M 0x000000ffff000000ull +#define XCT_RXRES_8B_EVAL_S 24 +#define XCT_RXRES_8B_HTYPE_M 0x0000000000f00000ull +#define XCT_RXRES_8B_HASH_M 0x00000000000fffffull +#define XCT_RXRES_8B_HASH_S 0 + /* Receive interface buffer fields */ #define XCT_RXB_LEN_M 0x0ffff00000000000ull #define XCT_RXB_LEN_S 44 diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c index 3cdbe118200b..ed402e00e730 100644 --- a/drivers/net/pci-skeleton.c +++ b/drivers/net/pci-skeleton.c @@ -457,7 +457,6 @@ struct netdrv_private { void *mmio_addr; int drv_flags; struct pci_dev *pci_dev; - struct net_device_stats stats; struct timer_list timer; /* Media selection timer. */ unsigned char *rx_ring; unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */ @@ -505,7 +504,6 @@ static int netdrv_start_xmit (struct sk_buff *skb, static irqreturn_t netdrv_interrupt (int irq, void *dev_instance); static int netdrv_close (struct net_device *dev); static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); -static struct net_device_stats *netdrv_get_stats (struct net_device *dev); static void netdrv_set_rx_mode (struct net_device *dev); static void netdrv_hw_start (struct net_device *dev); @@ -604,7 +602,6 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev, DPRINTK ("EXIT, returning -ENOMEM\n"); return -ENOMEM; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); tp = dev->priv; @@ -740,6 +737,7 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev, int i, addr_len, option; void *ioaddr = NULL; static int board_idx = -1; + DECLARE_MAC_BUF(mac); /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -776,7 +774,6 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev, dev->open = netdrv_open; dev->hard_start_xmit = netdrv_start_xmit; dev->stop = netdrv_close; - dev->get_stats = netdrv_get_stats; dev->set_multicast_list = netdrv_set_rx_mode; dev->do_ioctl = netdrv_ioctl; dev->tx_timeout = netdrv_tx_timeout; @@ -800,15 +797,11 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev, tp->phys[0] = 32; - printk (KERN_INFO "%s: %s at 0x%lx, " - "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, " - "IRQ %d\n", + printk (KERN_INFO "%s: %s at 0x%lx, %sIRQ %d\n", dev->name, board_info[ent->driver_data].name, dev->base_addr, - dev->dev_addr[0], dev->dev_addr[1], - dev->dev_addr[2], dev->dev_addr[3], - dev->dev_addr[4], dev->dev_addr[5], + print_mac(mac, dev->dev_addr), dev->irq); printk (KERN_DEBUG "%s: Identified 8139 chip type '%s'\n", @@ -1277,7 +1270,7 @@ static void netdrv_tx_clear (struct netdrv_private *tp) if (rp->skb) { dev_kfree_skb (rp->skb); rp->skb = NULL; - tp->stats.tx_dropped++; + dev->stats.tx_dropped++; } } } @@ -1390,25 +1383,25 @@ static void netdrv_tx_interrupt (struct net_device *dev, /* There was an major error, log it. */ DPRINTK ("%s: Transmit error, Tx status %8.8x.\n", dev->name, txstatus); - tp->stats.tx_errors++; + dev->stats.tx_errors++; if (txstatus & TxAborted) { - tp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; NETDRV_W32 (TxConfig, TxClearAbt | (TX_DMA_BURST << TxDMAShift)); } if (txstatus & TxCarrierLost) - tp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (txstatus & TxOutOfWindow) - tp->stats.tx_window_errors++; + dev->stats.tx_window_errors++; } else { if (txstatus & TxUnderrun) { /* Add 64 to the Tx FIFO threshold. */ if (tp->tx_flag < 0x00300000) tp->tx_flag += 0x00020000; - tp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; } - tp->stats.collisions += (txstatus >> 24) & 15; - tp->stats.tx_bytes += txstatus & 0x7ff; - tp->stats.tx_packets++; + dev->stats.collisions += (txstatus >> 24) & 15; + dev->stats.tx_bytes += txstatus & 0x7ff; + dev->stats.tx_packets++; } /* Free the original skb. */ @@ -1461,13 +1454,13 @@ static void netdrv_rx_err (u32 rx_status, struct net_device *dev, dev->name, rx_status); /* A.C.: The chip hangs here. */ } - tp->stats.rx_errors++; + dev->stats.rx_errors++; if (rx_status & (RxBadSymbol | RxBadAlign)) - tp->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (rx_status & (RxRunt | RxTooLong)) - tp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; if (rx_status & RxCRCErr) - tp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; /* Reset the receiver, based on RealTek recommendation. (Bug?) */ tp->cur_rx = 0; @@ -1573,13 +1566,13 @@ static void netdrv_rx_interrupt (struct net_device *dev, skb->protocol = eth_type_trans (skb, dev); netif_rx (skb); dev->last_rx = jiffies; - tp->stats.rx_bytes += pkt_size; - tp->stats.rx_packets++; + dev->stats.rx_bytes += pkt_size; + dev->stats.rx_packets++; } else { printk (KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); - tp->stats.rx_dropped++; + dev->stats.rx_dropped++; } cur_rx = (cur_rx + rx_size + 4 + 3) & ~3; @@ -1608,7 +1601,7 @@ static void netdrv_weird_interrupt (struct net_device *dev, assert (ioaddr != NULL); /* Update the error count. */ - tp->stats.rx_missed_errors += NETDRV_R32 (RxMissed); + dev->stats.rx_missed_errors += NETDRV_R32 (RxMissed); NETDRV_W32 (RxMissed, 0); if ((status & RxUnderrun) && link_changed && @@ -1629,14 +1622,14 @@ static void netdrv_weird_interrupt (struct net_device *dev, /* XXX along with netdrv_rx_err, are we double-counting errors? */ if (status & (RxUnderrun | RxOverflow | RxErr | RxFIFOOver)) - tp->stats.rx_errors++; + dev->stats.rx_errors++; if (status & (PCSTimeout)) - tp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; if (status & (RxUnderrun | RxFIFOOver)) - tp->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; if (status & RxOverflow) { - tp->stats.rx_over_errors++; + dev->stats.rx_over_errors++; tp->cur_rx = NETDRV_R16 (RxBufAddr) % RX_BUF_LEN; NETDRV_W16_F (RxBufPtr, tp->cur_rx - 16); } @@ -1740,7 +1733,7 @@ static int netdrv_close (struct net_device *dev) NETDRV_W16 (IntrMask, 0x0000); /* Update the error counts. */ - tp->stats.rx_missed_errors += NETDRV_R32 (RxMissed); + dev->stats.rx_missed_errors += NETDRV_R32 (RxMissed); NETDRV_W32 (RxMissed, 0); spin_unlock_irqrestore (&tp->lock, flags); @@ -1807,31 +1800,6 @@ static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) return rc; } - -static struct net_device_stats *netdrv_get_stats (struct net_device *dev) -{ - struct netdrv_private *tp = dev->priv; - void *ioaddr = tp->mmio_addr; - - DPRINTK ("ENTER\n"); - - assert (tp != NULL); - - if (netif_running(dev)) { - unsigned long flags; - - spin_lock_irqsave (&tp->lock, flags); - - tp->stats.rx_missed_errors += NETDRV_R32 (RxMissed); - NETDRV_W32 (RxMissed, 0); - - spin_unlock_irqrestore (&tp->lock, flags); - } - - DPRINTK ("EXIT\n"); - return &tp->stats; -} - /* Set or clear the multicast filter for this adaptor. This routine is not state sensitive and need not be SMP locked. */ @@ -1909,7 +1877,7 @@ static int netdrv_suspend (struct pci_dev *pdev, pm_message_t state) NETDRV_W8 (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear)); /* Update the error counts. */ - tp->stats.rx_missed_errors += NETDRV_R32 (RxMissed); + dev->stats.rx_missed_errors += NETDRV_R32 (RxMissed); NETDRV_W32 (RxMissed, 0); spin_unlock_irqrestore (&tp->lock, flags); diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c index 2b395ee21f75..73dcbb7296da 100644 --- a/drivers/net/pcmcia/3c574_cs.c +++ b/drivers/net/pcmcia/3c574_cs.c @@ -343,6 +343,7 @@ static int tc574_config(struct pcmcia_device *link) u16 *phys_addr; char *cardname; union wn3_config config; + DECLARE_MAC_BUF(mac); phys_addr = (u16 *)dev->dev_addr; @@ -458,10 +459,10 @@ static int tc574_config(struct pcmcia_device *link) strcpy(lp->node.dev_name, dev->name); - printk(KERN_INFO "%s: %s at io %#3lx, irq %d, hw_addr ", - dev->name, cardname, dev->base_addr, dev->irq); - for (i = 0; i < 6; i++) - printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : ".\n")); + printk(KERN_INFO "%s: %s at io %#3lx, irq %d, " + "hw_addr %s.\n", + dev->name, cardname, dev->base_addr, dev->irq, + print_mac(mac, dev->dev_addr)); printk(" %dK FIFO split %s Rx:Tx, %sMII interface.\n", 8 << config.u.ram_size, ram_split[config.u.ram_split], config.u.autoselect ? "autoselect " : ""); diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c index 503f2685fb73..32076ca6a9e1 100644 --- a/drivers/net/pcmcia/3c589_cs.c +++ b/drivers/net/pcmcia/3c589_cs.c @@ -197,7 +197,6 @@ static int tc589_probe(struct pcmcia_device *link) link->conf.ConfigIndex = 1; /* The EL3-specific entries in the device structure. */ - SET_MODULE_OWNER(dev); dev->hard_start_xmit = &el3_start_xmit; dev->set_config = &el3_config; dev->get_stats = &el3_get_stats; @@ -256,6 +255,7 @@ static int tc589_config(struct pcmcia_device *link) int last_fn, last_ret, i, j, multi = 0, fifo; kio_addr_t ioaddr; char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; + DECLARE_MAC_BUF(mac); DEBUG(0, "3c589_config(0x%p)\n", link); @@ -331,11 +331,10 @@ static int tc589_config(struct pcmcia_device *link) strcpy(lp->node.dev_name, dev->name); - printk(KERN_INFO "%s: 3Com 3c%s, io %#3lx, irq %d, hw_addr ", - dev->name, (multi ? "562" : "589"), dev->base_addr, - dev->irq); - for (i = 0; i < 6; i++) - printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n")); + printk(KERN_INFO "%s: 3Com 3c%s, io %#3lx, irq %d, " + "hw_addr %s\n", + dev->name, (multi ? "562" : "589"), dev->base_addr, dev->irq, + print_mac(mac, dev->dev_addr)); printk(KERN_INFO " %dK FIFO split %s Rx:Tx, %s xcvr\n", (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3], if_names[dev->if_port]); diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c index 50dff1b81d34..a95a2cae6b23 100644 --- a/drivers/net/pcmcia/axnet_cs.c +++ b/drivers/net/pcmcia/axnet_cs.c @@ -232,7 +232,7 @@ static int get_prom(struct pcmcia_device *link) axnet_reset_8390(dev); mdelay(10); - for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++) + for (i = 0; i < ARRAY_SIZE(program_seq); i++) outb_p(program_seq[i].value, ioaddr + program_seq[i].offset); for (i = 0; i < 6; i += 2) { @@ -292,6 +292,7 @@ static int axnet_config(struct pcmcia_device *link) cisparse_t parse; int i, j, last_ret, last_fn; u_short buf[64]; + DECLARE_MAC_BUF(mac); DEBUG(0, "axnet_config(0x%p)\n", link); @@ -403,11 +404,11 @@ static int axnet_config(struct pcmcia_device *link) strcpy(info->node.dev_name, dev->name); - printk(KERN_INFO "%s: Asix AX88%d90: io %#3lx, irq %d, hw_addr ", + printk(KERN_INFO "%s: Asix AX88%d90: io %#3lx, irq %d, " + "hw_addr %s\n", dev->name, ((info->flags & IS_AX88790) ? 7 : 1), - dev->base_addr, dev->irq); - for (i = 0; i < 6; i++) - printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n")); + dev->base_addr, dev->irq, + print_mac(mac, dev->dev_addr)); if (info->phy_id != -1) { DEBUG(0, " MII transceiver at index %d, status %x.\n", info->phy_id, j); } else { @@ -771,6 +772,7 @@ static struct pcmcia_device_id axnet_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1106), PCMCIA_DEVICE_MANF_CARD(0x8a01, 0xc1ab), PCMCIA_DEVICE_MANF_CARD(0x021b, 0x0202), + PCMCIA_DEVICE_MANF_CARD(0xffff, 0x1090), PCMCIA_DEVICE_PROD_ID12("AmbiCom,Inc.", "Fast Ethernet PC Card(AMB8110)", 0x49b020a7, 0x119cc9fc), PCMCIA_DEVICE_PROD_ID124("Fast Ethernet", "16-bit PC Card", "AX88190", 0xb4be14e3, 0x9a12eb6a, 0xab9be5ef), PCMCIA_DEVICE_PROD_ID12("ASIX", "AX88190", 0x0959823b, 0xab9be5ef), @@ -1728,9 +1730,6 @@ static void axdev_setup(struct net_device *dev) if (ei_debug > 1) printk(version_8390); - SET_MODULE_OWNER(dev); - - ei_local = (struct ei_device *)netdev_priv(dev); spin_lock_init(&ei_local->page_lock); diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/pcmcia/com20020_cs.c index 0d1c7a41c9c6..ea9414c4d900 100644 --- a/drivers/net/pcmcia/com20020_cs.c +++ b/drivers/net/pcmcia/com20020_cs.c @@ -147,7 +147,7 @@ static int com20020_probe(struct pcmcia_device *p_dev) DEBUG(0, "com20020_attach()\n"); /* Create new network device */ - info = kmalloc(sizeof(struct com20020_dev_t), GFP_KERNEL); + info = kzalloc(sizeof(struct com20020_dev_t), GFP_KERNEL); if (!info) goto fail_alloc_info; @@ -155,7 +155,6 @@ static int com20020_probe(struct pcmcia_device *p_dev) if (!dev) goto fail_alloc_dev; - memset(info, 0, sizeof(struct com20020_dev_t)); lp = dev->priv; lp->timeout = timeout; lp->backplane = backplane; diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index 85d5f2ca4bb5..62844677c784 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c @@ -259,7 +259,6 @@ static int fmvj18x_probe(struct pcmcia_device *link) link->conf.IntType = INT_MEMORY_AND_IO; /* The FMVJ18x specific entries in the device structure. */ - SET_MODULE_OWNER(dev); dev->hard_start_xmit = &fjn_start_xmit; dev->set_config = &fjn_config; dev->get_stats = &fjn_get_stats; @@ -347,6 +346,7 @@ static int fmvj18x_config(struct pcmcia_device *link) cardtype_t cardtype; char *card_name = "unknown"; u_char *node_id; + DECLARE_MAC_BUF(mac); DEBUG(0, "fmvj18x_config(0x%p)\n", link); @@ -534,11 +534,10 @@ static int fmvj18x_config(struct pcmcia_device *link) strcpy(lp->node.dev_name, dev->name); /* print current configuration */ - printk(KERN_INFO "%s: %s, sram %s, port %#3lx, irq %d, hw_addr ", + printk(KERN_INFO "%s: %s, sram %s, port %#3lx, irq %d, " + "hw_addr %s\n", dev->name, card_name, sram_config == 0 ? "4K TX*2" : "8K TX*2", - dev->base_addr, dev->irq); - for (i = 0; i < 6; i++) - printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n")); + dev->base_addr, dev->irq, print_mac(mac, dev->dev_addr)); return 0; diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c index 4ecb8ca5a992..4eafa4f42cff 100644 --- a/drivers/net/pcmcia/ibmtr_cs.c +++ b/drivers/net/pcmcia/ibmtr_cs.c @@ -146,9 +146,8 @@ static int __devinit ibmtr_attach(struct pcmcia_device *link) DEBUG(0, "ibmtr_attach()\n"); /* Create new token-ring device */ - info = kmalloc(sizeof(*info), GFP_KERNEL); + info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; - memset(info,0,sizeof(*info)); dev = alloc_trdev(sizeof(struct tok_info)); if (!dev) { kfree(info); diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c index 73da611fd536..a355a93b908b 100644 --- a/drivers/net/pcmcia/nmclan_cs.c +++ b/drivers/net/pcmcia/nmclan_cs.c @@ -474,7 +474,6 @@ static int nmclan_probe(struct pcmcia_device *link) lp->tx_free_frames=AM2150_MAX_TX_FRAMES; - SET_MODULE_OWNER(dev); dev->hard_start_xmit = &mace_start_xmit; dev->set_config = &mace_config; dev->get_stats = &mace_get_stats; @@ -659,6 +658,7 @@ static int nmclan_config(struct pcmcia_device *link) u_char buf[64]; int i, last_ret, last_fn; kio_addr_t ioaddr; + DECLARE_MAC_BUF(mac); DEBUG(0, "nmclan_config(0x%p)\n", link); @@ -717,10 +717,10 @@ static int nmclan_config(struct pcmcia_device *link) strcpy(lp->node.dev_name, dev->name); - printk(KERN_INFO "%s: nmclan: port %#3lx, irq %d, %s port, hw_addr ", - dev->name, dev->base_addr, dev->irq, if_names[dev->if_port]); - for (i = 0; i < 6; i++) - printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n")); + printk(KERN_INFO "%s: nmclan: port %#3lx, irq %d, %s port," + " hw_addr %s\n", + dev->name, dev->base_addr, dev->irq, if_names[dev->if_port], + print_mac(mac, dev->dev_addr)); return 0; cs_failed: @@ -996,7 +996,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *) dev_id; mace_private *lp = netdev_priv(dev); - kio_addr_t ioaddr = dev->base_addr; + kio_addr_t ioaddr; int status; int IntrCnt = MACE_MAX_IR_ITERATIONS; @@ -1006,6 +1006,8 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) return IRQ_NONE; } + ioaddr = dev->base_addr; + if (lp->tx_irq_disabled) { printk( (lp->tx_irq_disabled? diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index 63de89e93b70..9d45e9696e16 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c @@ -38,7 +38,7 @@ #include <linux/delay.h> #include <linux/ethtool.h> #include <linux/netdevice.h> -#include <../drivers/net/8390.h> +#include "../8390.h" #include <pcmcia/cs_types.h> #include <pcmcia/cs.h> @@ -207,7 +207,7 @@ static hw_info_t hw_info[] = { { /* PCMCIA Technology OEM */ 0x01c8, 0x00, 0xa0, 0x0c, 0 } }; -#define NR_INFO (sizeof(hw_info)/sizeof(hw_info_t)) +#define NR_INFO ARRAY_SIZE(hw_info) static hw_info_t default_info = { 0, 0, 0, 0, 0 }; static hw_info_t dl10019_info = { 0, 0, 0, 0, IS_DL10019|HAS_MII }; @@ -259,7 +259,6 @@ static int pcnet_probe(struct pcmcia_device *link) link->conf.Attributes = CONF_ENABLE_IRQ; link->conf.IntType = INT_MEMORY_AND_IO; - SET_MODULE_OWNER(dev); dev->open = &pcnet_open; dev->stop = &pcnet_close; dev->set_config = &set_config; @@ -375,7 +374,7 @@ static hw_info_t *get_prom(struct pcmcia_device *link) pcnet_reset_8390(dev); mdelay(10); - for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++) + for (i = 0; i < ARRAY_SIZE(program_seq); i++) outb_p(program_seq[i].value, ioaddr + program_seq[i].offset); for (i = 0; i < 32; i++) @@ -522,6 +521,7 @@ static int pcnet_config(struct pcmcia_device *link) int has_shmem = 0; u_short buf[64]; hw_info_t *hw_info; + DECLARE_MAC_BUF(mac); DEBUG(0, "pcnet_config(0x%p)\n", link); @@ -671,9 +671,7 @@ static int pcnet_config(struct pcmcia_device *link) printk (" mem %#5lx,", dev->mem_start); if (info->flags & HAS_MISC_REG) printk(" %s xcvr,", if_names[dev->if_port]); - printk(" hw_addr "); - for (i = 0; i < 6; i++) - printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n")); + printk(" hw_addr %s\n", print_mac(mac, dev->dev_addr)); return 0; cs_failed: @@ -1664,6 +1662,7 @@ static struct pcmcia_device_id pcnet_ids[] = { PCMCIA_DEVICE_PROD_ID12("Laneed", "LD-CDF", 0x1b7827b2, 0xfec71e40), PCMCIA_DEVICE_PROD_ID12("Laneed", "LD-CDL/T", 0x1b7827b2, 0x79fba4f7), PCMCIA_DEVICE_PROD_ID12("Laneed", "LD-CDS", 0x1b7827b2, 0x931afaab), + PCMCIA_DEVICE_PROD_ID12("LEMEL", "LM-N89TX PRO", 0xbbefb52f, 0xd2897a97), PCMCIA_DEVICE_PROD_ID12("Linksys", "Combo PCMCIA EthernetCard (EC2T)", 0x0733cc81, 0x32ee8c78), PCMCIA_DEVICE_PROD_ID12("LINKSYS", "E-CARD", 0xf7cb0b07, 0x6701da11), PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 Integrated PC Card (PCM100)", 0x0733cc81, 0x453c3f9d), diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c index 7912dbd14251..58d716fd17cf 100644 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ b/drivers/net/pcmcia/smc91c92_cs.c @@ -336,7 +336,6 @@ static int smc91c92_probe(struct pcmcia_device *link) link->conf.IntType = INT_MEMORY_AND_IO; /* The SMC91c92-specific entries in the device structure. */ - SET_MODULE_OWNER(dev); dev->hard_start_xmit = &smc_start_xmit; dev->get_stats = &smc_get_stats; dev->set_config = &s9k_config; @@ -963,6 +962,7 @@ static int smc91c92_config(struct pcmcia_device *link) int i, j, rev; kio_addr_t ioaddr; u_long mir; + DECLARE_MAC_BUF(mac); DEBUG(0, "smc91c92_config(0x%p)\n", link); @@ -1075,10 +1075,9 @@ static int smc91c92_config(struct pcmcia_device *link) strcpy(smc->node.dev_name, dev->name); printk(KERN_INFO "%s: smc91c%s rev %d: io %#3lx, irq %d, " - "hw_addr ", dev->name, name, (rev & 0x0f), dev->base_addr, - dev->irq); - for (i = 0; i < 6; i++) - printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n")); + "hw_addr %s\n", + dev->name, name, (rev & 0x0f), dev->base_addr, dev->irq, + print_mac(mac, dev->dev_addr)); if (rev > 0) { if (mir & 0x3ff) @@ -1368,6 +1367,7 @@ static int smc_start_xmit(struct sk_buff *skb, struct net_device *dev) kio_addr_t ioaddr = dev->base_addr; u_short num_pages; short time_out, ir; + unsigned long flags; netif_stop_queue(dev); @@ -1395,6 +1395,7 @@ static int smc_start_xmit(struct sk_buff *skb, struct net_device *dev) /* A packet is now waiting. */ smc->packets_waiting++; + spin_lock_irqsave(&smc->lock, flags); SMC_SELECT_BANK(2); /* Paranoia, we should always be in window 2 */ /* need MC_RESET to keep the memory consistent. errata? */ @@ -1411,6 +1412,7 @@ static int smc_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Acknowledge the interrupt, send the packet. */ outw((ir&0xff00) | IM_ALLOC_INT, ioaddr + INTERRUPT); smc_hardware_send_packet(dev); /* Send the packet now.. */ + spin_unlock_irqrestore(&smc->lock, flags); return 0; } } @@ -1418,6 +1420,7 @@ static int smc_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Otherwise defer until the Tx-space-allocated interrupt. */ DEBUG(2, "%s: memory allocation deferred.\n", dev->name); outw((IM_ALLOC_INT << 8) | (ir & 0xff00), ioaddr + INTERRUPT); + spin_unlock_irqrestore(&smc->lock, flags); return 0; } @@ -1523,6 +1526,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id) DEBUG(3, "%s: SMC91c92 interrupt %d at %#x.\n", dev->name, irq, ioaddr); + spin_lock(&smc->lock); smc->watchdog = 0; saved_bank = inw(ioaddr + BANK_SELECT); if ((saved_bank & 0xff00) != 0x3300) { @@ -1620,6 +1624,7 @@ irq_done: readb(smc->base+MEGAHERTZ_ISR); } #endif + spin_unlock(&smc->lock); return IRQ_RETVAL(handled); } @@ -1902,6 +1907,9 @@ static void media_check(u_long arg) kio_addr_t ioaddr = dev->base_addr; u_short i, media, saved_bank; u_short link; + unsigned long flags; + + spin_lock_irqsave(&smc->lock, flags); saved_bank = inw(ioaddr + BANK_SELECT); @@ -1934,6 +1942,7 @@ static void media_check(u_long arg) smc->media.expires = jiffies + HZ/100; add_timer(&smc->media); SMC_SELECT_BANK(saved_bank); + spin_unlock_irqrestore(&smc->lock, flags); return; } @@ -2007,6 +2016,7 @@ reschedule: smc->media.expires = jiffies + HZ; add_timer(&smc->media); SMC_SELECT_BANK(saved_bank); + spin_unlock_irqrestore(&smc->lock, flags); } static int smc_link_ok(struct net_device *dev) @@ -2094,14 +2104,14 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) u16 saved_bank = inw(ioaddr + BANK_SELECT); int ret; - SMC_SELECT_BANK(3); spin_lock_irq(&smc->lock); + SMC_SELECT_BANK(3); if (smc->cfg & CFG_MII_SELECT) ret = mii_ethtool_gset(&smc->mii_if, ecmd); else ret = smc_netdev_get_ecmd(dev, ecmd); - spin_unlock_irq(&smc->lock); SMC_SELECT_BANK(saved_bank); + spin_unlock_irq(&smc->lock); return ret; } @@ -2112,14 +2122,14 @@ static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) u16 saved_bank = inw(ioaddr + BANK_SELECT); int ret; - SMC_SELECT_BANK(3); spin_lock_irq(&smc->lock); + SMC_SELECT_BANK(3); if (smc->cfg & CFG_MII_SELECT) ret = mii_ethtool_sset(&smc->mii_if, ecmd); else ret = smc_netdev_set_ecmd(dev, ecmd); - spin_unlock_irq(&smc->lock); SMC_SELECT_BANK(saved_bank); + spin_unlock_irq(&smc->lock); return ret; } @@ -2130,11 +2140,11 @@ static u32 smc_get_link(struct net_device *dev) u16 saved_bank = inw(ioaddr + BANK_SELECT); u32 ret; - SMC_SELECT_BANK(3); spin_lock_irq(&smc->lock); + SMC_SELECT_BANK(3); ret = smc_link_ok(dev); - spin_unlock_irq(&smc->lock); SMC_SELECT_BANK(saved_bank); + spin_unlock_irq(&smc->lock); return ret; } diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c index 258d6f396186..c3b69602e275 100644 --- a/drivers/net/pcmcia/xirc2ps_cs.c +++ b/drivers/net/pcmcia/xirc2ps_cs.c @@ -580,7 +580,6 @@ xirc2ps_probe(struct pcmcia_device *link) link->irq.Instance = dev; /* Fill in card specific entries */ - SET_MODULE_OWNER(dev); dev->hard_start_xmit = &do_start_xmit; dev->set_config = &do_config; dev->get_stats = &do_get_stats; @@ -732,6 +731,7 @@ xirc2ps_config(struct pcmcia_device * link) u_char buf[64]; cistpl_lan_node_id_t *node_id = (cistpl_lan_node_id_t*)parse.funce.data; cistpl_cftable_entry_t *cf = &parse.cftable_entry; + DECLARE_MAC_BUF(mac); local->dingo_ccr = NULL; @@ -1033,11 +1033,9 @@ xirc2ps_config(struct pcmcia_device * link) strcpy(local->node.dev_name, dev->name); /* give some infos about the hardware */ - printk(KERN_INFO "%s: %s: port %#3lx, irq %d, hwaddr", - dev->name, local->manf_str,(u_long)dev->base_addr, (int)dev->irq); - for (i = 0; i < 6; i++) - printk("%c%02X", i?':':' ', dev->dev_addr[i]); - printk("\n"); + printk(KERN_INFO "%s: %s: port %#3lx, irq %d, hwaddr %s\n", + dev->name, local->manf_str,(u_long)dev->base_addr, (int)dev->irq, + print_mac(mac, dev->dev_addr)); return 0; diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 465485a3fbc6..5f994b5beda1 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c @@ -23,11 +23,11 @@ #define DRV_NAME "pcnet32" #ifdef CONFIG_PCNET32_NAPI -#define DRV_VERSION "1.33-NAPI" +#define DRV_VERSION "1.34-NAPI" #else -#define DRV_VERSION "1.33" +#define DRV_VERSION "1.34" #endif -#define DRV_RELDATE "27.Jun.2006" +#define DRV_RELDATE "14.Aug.2007" #define PFX DRV_NAME ": " static const char *const version = @@ -210,31 +210,31 @@ static int homepna[MAX_UNITS]; /* The PCNET32 Rx and Tx ring descriptors. */ struct pcnet32_rx_head { - u32 base; - s16 buf_length; /* two`s complement of length */ - s16 status; - u32 msg_length; - u32 reserved; + __le32 base; + __le16 buf_length; /* two`s complement of length */ + __le16 status; + __le32 msg_length; + __le32 reserved; }; struct pcnet32_tx_head { - u32 base; - s16 length; /* two`s complement of length */ - s16 status; - u32 misc; - u32 reserved; + __le32 base; + __le16 length; /* two`s complement of length */ + __le16 status; + __le32 misc; + __le32 reserved; }; /* The PCNET32 32-Bit initialization block, described in databook. */ struct pcnet32_init_block { - u16 mode; - u16 tlen_rlen; + __le16 mode; + __le16 tlen_rlen; u8 phys_addr[6]; - u16 reserved; - u32 filter[2]; + __le16 reserved; + __le32 filter[2]; /* Receive and transmit ring base, along with extra bits. */ - u32 rx_ring; - u32 tx_ring; + __le32 rx_ring; + __le32 tx_ring; }; /* PCnet32 access functions */ @@ -280,6 +280,8 @@ struct pcnet32_private { unsigned int dirty_rx, /* ring entries to be freed. */ dirty_tx; + struct net_device *dev; + struct napi_struct napi; struct net_device_stats stats; char tx_full; char phycount; /* number of phys found */ @@ -440,15 +442,21 @@ static struct pcnet32_access pcnet32_dwio = { static void pcnet32_netif_stop(struct net_device *dev) { + struct pcnet32_private *lp = netdev_priv(dev); dev->trans_start = jiffies; - netif_poll_disable(dev); +#ifdef CONFIG_PCNET32_NAPI + napi_disable(&lp->napi); +#endif netif_tx_disable(dev); } static void pcnet32_netif_start(struct net_device *dev) { + struct pcnet32_private *lp = netdev_priv(dev); netif_wake_queue(dev); - netif_poll_enable(dev); +#ifdef CONFIG_PCNET32_NAPI + napi_enable(&lp->napi); +#endif } /* @@ -602,9 +610,9 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev, new_dma_addr_list[new] = pci_map_single(lp->pci_dev, rx_skbuff->data, PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); - new_rx_ring[new].base = (u32) le32_to_cpu(new_dma_addr_list[new]); - new_rx_ring[new].buf_length = le16_to_cpu(2 - PKT_BUF_SZ); - new_rx_ring[new].status = le16_to_cpu(0x8000); + new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]); + new_rx_ring[new].buf_length = cpu_to_le16(2 - PKT_BUF_SZ); + new_rx_ring[new].status = cpu_to_le16(0x8000); } /* and free any unneeded buffers */ for (; new < lp->rx_ring_size; new++) { @@ -816,7 +824,7 @@ static int pcnet32_set_ringparam(struct net_device *dev, if ((1 << i) != lp->rx_ring_size) pcnet32_realloc_rx_ring(dev, lp, i); - dev->weight = lp->rx_ring_size / 2; + lp->napi.weight = lp->rx_ring_size / 2; if (netif_running(dev)) { pcnet32_netif_start(dev); @@ -839,9 +847,14 @@ static void pcnet32_get_strings(struct net_device *dev, u32 stringset, memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test)); } -static int pcnet32_self_test_count(struct net_device *dev) +static int pcnet32_get_sset_count(struct net_device *dev, int sset) { - return PCNET32_TEST_LEN; + switch (sset) { + case ETH_SS_TEST: + return PCNET32_TEST_LEN; + default: + return -EOPNOTSUPP; + } } static void pcnet32_ethtool_test(struct net_device *dev, @@ -875,7 +888,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) int x, i; /* counters */ int numbuffs = 4; /* number of TX/RX buffers and descs */ u16 status = 0x8300; /* TX ring status */ - u16 teststatus; /* test of ring status */ + __le16 teststatus; /* test of ring status */ int rc; /* return code */ int size; /* size of packets */ unsigned char *packet; /* source packet data */ @@ -922,7 +935,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) packet = skb->data; skb_put(skb, size); /* create space for data */ lp->tx_skbuff[x] = skb; - lp->tx_ring[x].length = le16_to_cpu(-skb->len); + lp->tx_ring[x].length = cpu_to_le16(-skb->len); lp->tx_ring[x].misc = 0; /* put DA and SA into the skb */ @@ -942,10 +955,9 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) lp->tx_dma_addr[x] = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); - lp->tx_ring[x].base = - (u32) le32_to_cpu(lp->tx_dma_addr[x]); + lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]); wmb(); /* Make sure owner changes after all others are visible */ - lp->tx_ring[x].status = le16_to_cpu(status); + lp->tx_ring[x].status = cpu_to_le16(status); } } @@ -956,7 +968,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) x = a->read_csr(ioaddr, CSR15) & 0xfffc; lp->a.write_csr(ioaddr, CSR15, x | 0x0044); - teststatus = le16_to_cpu(0x8000); + teststatus = cpu_to_le16(0x8000); lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */ /* Check status of descriptors */ @@ -1086,6 +1098,7 @@ static int pcnet32_phys_id(struct net_device *dev, u32 data) mod_timer(&lp->blink_timer, jiffies); set_current_state(TASK_INTERRUPTIBLE); + /* AV: the limit here makes no sense whatsoever */ if ((!data) || (data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ))) data = (u32) (MAX_SCHEDULE_TIMEOUT / HZ); @@ -1211,7 +1224,7 @@ static void pcnet32_rx_entry(struct net_device *dev, newskb->data, PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); - rxp->base = le32_to_cpu(lp->rx_dma_addr[entry]); + rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]); rx_in_place = 1; } else skb = NULL; @@ -1255,7 +1268,7 @@ static void pcnet32_rx_entry(struct net_device *dev, return; } -static int pcnet32_rx(struct net_device *dev, int quota) +static int pcnet32_rx(struct net_device *dev, int budget) { struct pcnet32_private *lp = netdev_priv(dev); int entry = lp->cur_rx & lp->rx_mod_mask; @@ -1263,16 +1276,16 @@ static int pcnet32_rx(struct net_device *dev, int quota) int npackets = 0; /* If we own the next entry, it's a new packet. Send it up. */ - while (quota > npackets && (short)le16_to_cpu(rxp->status) >= 0) { + while (npackets < budget && (short)le16_to_cpu(rxp->status) >= 0) { pcnet32_rx_entry(dev, lp, rxp, entry); npackets += 1; /* * The docs say that the buffer length isn't touched, but Andrew * Boyd of QNX reports that some revs of the 79C965 clear it. */ - rxp->buf_length = le16_to_cpu(2 - PKT_BUF_SZ); + rxp->buf_length = cpu_to_le16(2 - PKT_BUF_SZ); wmb(); /* Make sure owner changes after others are visible */ - rxp->status = le16_to_cpu(0x8000); + rxp->status = cpu_to_le16(0x8000); entry = (++lp->cur_rx) & lp->rx_mod_mask; rxp = &lp->rx_ring[entry]; } @@ -1379,15 +1392,16 @@ static int pcnet32_tx(struct net_device *dev) } #ifdef CONFIG_PCNET32_NAPI -static int pcnet32_poll(struct net_device *dev, int *budget) +static int pcnet32_poll(struct napi_struct *napi, int budget) { - struct pcnet32_private *lp = netdev_priv(dev); - int quota = min(dev->quota, *budget); + struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi); + struct net_device *dev = lp->dev; unsigned long ioaddr = dev->base_addr; unsigned long flags; + int work_done; u16 val; - quota = pcnet32_rx(dev, quota); + work_done = pcnet32_rx(dev, budget); spin_lock_irqsave(&lp->lock, flags); if (pcnet32_tx(dev)) { @@ -1399,28 +1413,22 @@ static int pcnet32_poll(struct net_device *dev, int *budget) } spin_unlock_irqrestore(&lp->lock, flags); - *budget -= quota; - dev->quota -= quota; - - if (dev->quota == 0) { - return 1; - } - - netif_rx_complete(dev); - - spin_lock_irqsave(&lp->lock, flags); + if (work_done < budget) { + spin_lock_irqsave(&lp->lock, flags); - /* clear interrupt masks */ - val = lp->a.read_csr(ioaddr, CSR3); - val &= 0x00ff; - lp->a.write_csr(ioaddr, CSR3, val); + __netif_rx_complete(dev, napi); - /* Set interrupt enable. */ - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN); - mmiowb(); - spin_unlock_irqrestore(&lp->lock, flags); + /* clear interrupt masks */ + val = lp->a.read_csr(ioaddr, CSR3); + val &= 0x00ff; + lp->a.write_csr(ioaddr, CSR3, val); - return 0; + /* Set interrupt enable. */ + lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN); + mmiowb(); + spin_unlock_irqrestore(&lp->lock, flags); + } + return work_done; } #endif @@ -1506,16 +1514,12 @@ static const struct ethtool_ops pcnet32_ethtool_ops = { .get_link = pcnet32_get_link, .get_ringparam = pcnet32_get_ringparam, .set_ringparam = pcnet32_set_ringparam, - .get_tx_csum = ethtool_op_get_tx_csum, - .get_sg = ethtool_op_get_sg, - .get_tso = ethtool_op_get_tso, .get_strings = pcnet32_get_strings, - .self_test_count = pcnet32_self_test_count, .self_test = pcnet32_ethtool_test, .phys_id = pcnet32_phys_id, .get_regs_len = pcnet32_get_regs_len, .get_regs = pcnet32_get_regs, - .get_perm_addr = ethtool_op_get_perm_addr, + .get_sset_count = pcnet32_get_sset_count, }; /* only probes for non-PCI devices, the rest are handled by @@ -1816,9 +1820,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) } lp->pci_dev = pdev; + lp->dev = dev; + spin_lock_init(&lp->lock); - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); lp->name = chipname; lp->shared_irq = shared; @@ -1844,6 +1849,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) lp->mii_if.mdio_read = mdio_read; lp->mii_if.mdio_write = mdio_write; +#ifdef CONFIG_PCNET32_NAPI + netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2); +#endif + if (fdx && !(lp->options & PCNET32_PORT_ASEL) && ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) lp->options |= PCNET32_PORT_FD; @@ -1866,15 +1875,15 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) && dev->dev_addr[2] == 0x75) lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI; - lp->init_block->mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */ + lp->init_block->mode = cpu_to_le16(0x0003); /* Disable Rx and Tx. */ lp->init_block->tlen_rlen = - le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits); + cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits); for (i = 0; i < 6; i++) lp->init_block->phys_addr[i] = dev->dev_addr[i]; lp->init_block->filter[0] = 0x00000000; lp->init_block->filter[1] = 0x00000000; - lp->init_block->rx_ring = (u32) le32_to_cpu(lp->rx_ring_dma_addr); - lp->init_block->tx_ring = (u32) le32_to_cpu(lp->tx_ring_dma_addr); + lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr); + lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr); /* switch pcnet32 to 32bit mode */ a->write_bcr(ioaddr, 20, 2); @@ -1954,10 +1963,6 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) dev->ethtool_ops = &pcnet32_ethtool_ops; dev->tx_timeout = pcnet32_tx_timeout; dev->watchdog_timeo = (5 * HZ); - dev->weight = lp->rx_ring_size / 2; -#ifdef CONFIG_PCNET32_NAPI - dev->poll = pcnet32_poll; -#endif #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = pcnet32_poll_controller; @@ -2269,7 +2274,7 @@ static int pcnet32_open(struct net_device *dev) #endif lp->init_block->mode = - le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); + cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7); pcnet32_load_multicast(dev); if (pcnet32_init_ring(dev)) { @@ -2277,6 +2282,10 @@ static int pcnet32_open(struct net_device *dev) goto err_free_ring; } +#ifdef CONFIG_PCNET32_NAPI + napi_enable(&lp->napi); +#endif + /* Re-initialize the PCNET32, and start it when done. */ lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); @@ -2392,10 +2401,10 @@ static int pcnet32_init_ring(struct net_device *dev) lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->data, PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); - lp->rx_ring[i].base = (u32) le32_to_cpu(lp->rx_dma_addr[i]); - lp->rx_ring[i].buf_length = le16_to_cpu(2 - PKT_BUF_SZ); + lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]); + lp->rx_ring[i].buf_length = cpu_to_le16(2 - PKT_BUF_SZ); wmb(); /* Make sure owner changes after all others are visible */ - lp->rx_ring[i].status = le16_to_cpu(0x8000); + lp->rx_ring[i].status = cpu_to_le16(0x8000); } /* The Tx buffer address is filled in as needed, but we do need to clear * the upper ownership bit. */ @@ -2407,11 +2416,11 @@ static int pcnet32_init_ring(struct net_device *dev) } lp->init_block->tlen_rlen = - le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits); + cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits); for (i = 0; i < 6; i++) lp->init_block->phys_addr[i] = dev->dev_addr[i]; - lp->init_block->rx_ring = (u32) le32_to_cpu(lp->rx_ring_dma_addr); - lp->init_block->tx_ring = (u32) le32_to_cpu(lp->tx_ring_dma_addr); + lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr); + lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr); wmb(); /* Make sure all changes are visible */ return 0; } @@ -2520,16 +2529,16 @@ static int pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Caution: the write order is important here, set the status * with the "ownership" bits last. */ - lp->tx_ring[entry].length = le16_to_cpu(-skb->len); + lp->tx_ring[entry].length = cpu_to_le16(-skb->len); lp->tx_ring[entry].misc = 0x00000000; lp->tx_skbuff[entry] = skb; lp->tx_dma_addr[entry] = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); - lp->tx_ring[entry].base = (u32) le32_to_cpu(lp->tx_dma_addr[entry]); + lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]); wmb(); /* Make sure owner changes after all others are visible */ - lp->tx_ring[entry].status = le16_to_cpu(status); + lp->tx_ring[entry].status = cpu_to_le16(status); lp->cur_tx++; lp->stats.tx_bytes += skb->len; @@ -2600,18 +2609,18 @@ pcnet32_interrupt(int irq, void *dev_id) /* unlike for the lance, there is no restart needed */ } #ifdef CONFIG_PCNET32_NAPI - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &lp->napi)) { u16 val; /* set interrupt masks */ val = lp->a.read_csr(ioaddr, CSR3); val |= 0x5f00; lp->a.write_csr(ioaddr, CSR3, val); mmiowb(); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &lp->napi); break; } #else - pcnet32_rx(dev, dev->weight); + pcnet32_rx(dev, lp->napi.weight); if (pcnet32_tx(dev)) { /* reset the chip to clear the error condition, then restart */ lp->a.reset(ioaddr); @@ -2646,6 +2655,9 @@ static int pcnet32_close(struct net_device *dev) del_timer_sync(&lp->watchdog_timer); netif_stop_queue(dev); +#ifdef CONFIG_PCNET32_NAPI + napi_disable(&lp->napi); +#endif spin_lock_irqsave(&lp->lock, flags); @@ -2697,7 +2709,7 @@ static void pcnet32_load_multicast(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); volatile struct pcnet32_init_block *ib = lp->init_block; - volatile u16 *mcast_table = (u16 *) & ib->filter; + volatile __le16 *mcast_table = (__le16 *)ib->filter; struct dev_mc_list *dmi = dev->mc_list; unsigned long ioaddr = dev->base_addr; char *addrs; @@ -2706,8 +2718,8 @@ static void pcnet32_load_multicast(struct net_device *dev) /* set all multicast bits */ if (dev->flags & IFF_ALLMULTI) { - ib->filter[0] = 0xffffffff; - ib->filter[1] = 0xffffffff; + ib->filter[0] = cpu_to_le32(~0U); + ib->filter[1] = cpu_to_le32(~0U); lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff); lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff); lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff); @@ -2729,9 +2741,7 @@ static void pcnet32_load_multicast(struct net_device *dev) crc = ether_crc_le(6, addrs); crc = crc >> 26; - mcast_table[crc >> 4] = - le16_to_cpu(le16_to_cpu(mcast_table[crc >> 4]) | - (1 << (crc & 0xf))); + mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf)); } for (i = 0; i < 4; i++) lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i, @@ -2757,12 +2767,12 @@ static void pcnet32_set_multicast_list(struct net_device *dev) printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name); lp->init_block->mode = - le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << + cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << 7); lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000); } else { lp->init_block->mode = - le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); + cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7); lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff); pcnet32_load_multicast(dev); } @@ -2944,6 +2954,33 @@ static void pcnet32_watchdog(struct net_device *dev) mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); } +static int pcnet32_pm_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (netif_running(dev)) { + netif_device_detach(dev); + pcnet32_close(dev); + } + pci_save_state(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + return 0; +} + +static int pcnet32_pm_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + if (netif_running(dev)) { + pcnet32_open(dev); + netif_device_attach(dev); + } + return 0; +} + static void __devexit pcnet32_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); @@ -2967,6 +3004,8 @@ static struct pci_driver pcnet32_driver = { .probe = pcnet32_probe_pci, .remove = __devexit_p(pcnet32_remove_one), .id_table = pcnet32_pci_tbl, + .suspend = pcnet32_pm_suspend, + .resume = pcnet32_pm_resume, }; /* An additional parameter that may be passed in... */ diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index dd09011c7ee5..54b2ba996640 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -76,4 +76,27 @@ config FIXED_MII_100_FDX bool "Emulation for 100M Fdx fixed PHY behavior" depends on FIXED_PHY +config FIXED_MII_1000_FDX + bool "Emulation for 1000M Fdx fixed PHY behavior" + depends on FIXED_PHY + +config FIXED_MII_AMNT + int "Number of emulated PHYs to allocate " + depends on FIXED_PHY + default "1" + ---help--- + Sometimes it is required to have several independent emulated + PHYs on the bus (in case of multi-eth but phy-less HW for instance). + This control will have specified number allocated for each fixed + PHY type enabled. + +config MDIO_BITBANG + tristate "Support for bitbanged MDIO buses" + help + This module implements the MDIO bus protocol in software, + for use by low level drivers that export the ability to + drive the relevant pins. + + If in doubt, say N. + endif # PHYLIB diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 8885650647ff..3d6cc7b67a80 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -13,3 +13,4 @@ obj-$(CONFIG_VITESSE_PHY) += vitesse.o obj-$(CONFIG_BROADCOM_PHY) += broadcom.o obj-$(CONFIG_ICPLUS_PHY) += icplus.o obj-$(CONFIG_FIXED_PHY) += fixed.o +obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c index bb966911a137..56191822fa26 100644 --- a/drivers/net/phy/fixed.c +++ b/drivers/net/phy/fixed.c @@ -30,53 +30,31 @@ #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/phy.h> +#include <linux/phy_fixed.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/uaccess.h> -#define MII_REGS_NUM 7 - -/* - The idea is to emulate normal phy behavior by responding with - pre-defined values to mii BMCR read, so that read_status hook could - take all the needed info. -*/ - -struct fixed_phy_status { - u8 link; - u16 speed; - u8 duplex; -}; - -/*----------------------------------------------------------------------------- - * Private information hoder for mii_bus - *-----------------------------------------------------------------------------*/ -struct fixed_info { - u16 *regs; - u8 regs_num; - struct fixed_phy_status phy_status; - struct phy_device *phydev; /* pointer to the container */ - /* link & speed cb */ - int(*link_update)(struct net_device*, struct fixed_phy_status*); - -}; +/* we need to track the allocated pointers in order to free them on exit */ +static struct fixed_info *fixed_phy_ptrs[CONFIG_FIXED_MII_AMNT*MAX_PHY_AMNT]; /*----------------------------------------------------------------------------- * If something weird is required to be done with link/speed, * network driver is able to assign a function to implement this. * May be useful for PHY's that need to be software-driven. *-----------------------------------------------------------------------------*/ -int fixed_mdio_set_link_update(struct phy_device* phydev, - int(*link_update)(struct net_device*, struct fixed_phy_status*)) +int fixed_mdio_set_link_update(struct phy_device *phydev, + int (*link_update) (struct net_device *, + struct fixed_phy_status *)) { struct fixed_info *fixed; - if(link_update == NULL) + if (link_update == NULL) return -EINVAL; - if(phydev) { - if(phydev->bus) { + if (phydev) { + if (phydev->bus) { fixed = phydev->bus->priv; fixed->link_update = link_update; return 0; @@ -84,54 +62,64 @@ int fixed_mdio_set_link_update(struct phy_device* phydev, } return -EINVAL; } + EXPORT_SYMBOL(fixed_mdio_set_link_update); +struct fixed_info *fixed_mdio_get_phydev (int phydev_ind) +{ + if (phydev_ind >= MAX_PHY_AMNT) + return NULL; + return fixed_phy_ptrs[phydev_ind]; +} + +EXPORT_SYMBOL(fixed_mdio_get_phydev); + /*----------------------------------------------------------------------------- * This is used for updating internal mii regs from the status *-----------------------------------------------------------------------------*/ -#if defined(CONFIG_FIXED_MII_100_FDX) || defined(CONFIG_FIXED_MII_10_FDX) +#if defined(CONFIG_FIXED_MII_100_FDX) || defined(CONFIG_FIXED_MII_10_FDX) || defined(CONFIG_FIXED_MII_1000_FDX) static int fixed_mdio_update_regs(struct fixed_info *fixed) { u16 *regs = fixed->regs; u16 bmsr = 0; u16 bmcr = 0; - if(!regs) { + if (!regs) { printk(KERN_ERR "%s: regs not set up", __FUNCTION__); return -EINVAL; } - if(fixed->phy_status.link) + if (fixed->phy_status.link) bmsr |= BMSR_LSTATUS; - if(fixed->phy_status.duplex) { + if (fixed->phy_status.duplex) { bmcr |= BMCR_FULLDPLX; - switch ( fixed->phy_status.speed ) { + switch (fixed->phy_status.speed) { case 100: bmsr |= BMSR_100FULL; bmcr |= BMCR_SPEED100; - break; + break; case 10: bmsr |= BMSR_10FULL; - break; + break; } } else { - switch ( fixed->phy_status.speed ) { + switch (fixed->phy_status.speed) { case 100: bmsr |= BMSR_100HALF; bmcr |= BMCR_SPEED100; - break; + break; case 10: bmsr |= BMSR_100HALF; - break; + break; } } - regs[MII_BMCR] = bmcr; - regs[MII_BMSR] = bmsr | 0x800; /*we are always capable of 10 hdx*/ + regs[MII_BMCR] = bmcr; + regs[MII_BMSR] = bmsr | 0x800; /*we are always capable of 10 hdx */ return 0; } @@ -141,29 +129,30 @@ static int fixed_mii_read(struct mii_bus *bus, int phy_id, int location) struct fixed_info *fixed = bus->priv; /* if user has registered link update callback, use it */ - if(fixed->phydev) - if(fixed->phydev->attached_dev) { - if(fixed->link_update) { + if (fixed->phydev) + if (fixed->phydev->attached_dev) { + if (fixed->link_update) { fixed->link_update(fixed->phydev->attached_dev, - &fixed->phy_status); + &fixed->phy_status); fixed_mdio_update_regs(fixed); } - } + } if ((unsigned int)location >= fixed->regs_num) return -1; return fixed->regs[location]; } -static int fixed_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val) +static int fixed_mii_write(struct mii_bus *bus, int phy_id, int location, + u16 val) { - /* do nothing for now*/ + /* do nothing for now */ return 0; } static int fixed_mii_reset(struct mii_bus *bus) { - /*nothing here - no way/need to reset it*/ + /*nothing here - no way/need to reset it */ return 0; } #endif @@ -171,8 +160,8 @@ static int fixed_mii_reset(struct mii_bus *bus) static int fixed_config_aneg(struct phy_device *phydev) { /* :TODO:03/13/2006 09:45:37 PM:: - The full autoneg funcionality can be emulated, - but no need to have anything here for now + The full autoneg funcionality can be emulated, + but no need to have anything here for now */ return 0; } @@ -182,59 +171,79 @@ static int fixed_config_aneg(struct phy_device *phydev) * match will never return true... *-----------------------------------------------------------------------------*/ static struct phy_driver fixed_mdio_driver = { - .name = "Fixed PHY", - .features = PHY_BASIC_FEATURES, - .config_aneg = fixed_config_aneg, - .read_status = genphy_read_status, - .driver = { .owner = THIS_MODULE,}, + .name = "Fixed PHY", +#ifdef CONFIG_FIXED_MII_1000_FDX + .features = PHY_GBIT_FEATURES, +#else + .features = PHY_BASIC_FEATURES, +#endif + .config_aneg = fixed_config_aneg, + .read_status = genphy_read_status, + .driver = { .owner = THIS_MODULE, }, }; +static void fixed_mdio_release(struct device *dev) +{ + struct phy_device *phydev = container_of(dev, struct phy_device, dev); + struct mii_bus *bus = phydev->bus; + struct fixed_info *fixed = bus->priv; + + kfree(phydev); + kfree(bus->dev); + kfree(bus); + kfree(fixed->regs); + kfree(fixed); +} + /*----------------------------------------------------------------------------- * This func is used to create all the necessary stuff, bind * the fixed phy driver and register all it on the mdio_bus_type. - * speed is either 10 or 100, duplex is boolean. + * speed is either 10 or 100 or 1000, duplex is boolean. * number is used to create multiple fixed PHYs, so that several devices can * utilize them simultaneously. + * + * The device on mdio bus will look like [bus_id]:[phy_id], + * bus_id = number + * phy_id = speed+duplex. *-----------------------------------------------------------------------------*/ -#if defined(CONFIG_FIXED_MII_100_FDX) || defined(CONFIG_FIXED_MII_10_FDX) -static int fixed_mdio_register_device(int number, int speed, int duplex) +#if defined(CONFIG_FIXED_MII_100_FDX) || defined(CONFIG_FIXED_MII_10_FDX) || defined(CONFIG_FIXED_MII_1000_FDX) +struct fixed_info *fixed_mdio_register_device( + int bus_id, int speed, int duplex, u8 phy_id) { struct mii_bus *new_bus; struct fixed_info *fixed; struct phy_device *phydev; - int err = 0; + int err; - struct device* dev = kzalloc(sizeof(struct device), GFP_KERNEL); + struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL); - if (NULL == dev) - return -ENOMEM; + if (dev == NULL) + goto err_dev_alloc; new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL); - if (NULL == new_bus) { - kfree(dev); - return -ENOMEM; - } + if (new_bus == NULL) + goto err_bus_alloc; + fixed = kzalloc(sizeof(struct fixed_info), GFP_KERNEL); - if (NULL == fixed) { - kfree(dev); - kfree(new_bus); - return -ENOMEM; - } + if (fixed == NULL) + goto err_fixed_alloc; + + fixed->regs = kzalloc(MII_REGS_NUM * sizeof(int), GFP_KERNEL); + if (NULL == fixed->regs) + goto err_fixed_regs_alloc; - fixed->regs = kzalloc(MII_REGS_NUM*sizeof(int), GFP_KERNEL); fixed->regs_num = MII_REGS_NUM; fixed->phy_status.speed = speed; fixed->phy_status.duplex = duplex; fixed->phy_status.link = 1; - new_bus->name = "Fixed MII Bus", - new_bus->read = &fixed_mii_read, - new_bus->write = &fixed_mii_write, - new_bus->reset = &fixed_mii_reset, - - /*set up workspace*/ + new_bus->name = "Fixed MII Bus"; + new_bus->read = &fixed_mii_read; + new_bus->write = &fixed_mii_write; + new_bus->reset = &fixed_mii_reset; + /*set up workspace */ fixed_mdio_update_regs(fixed); new_bus->priv = fixed; @@ -243,119 +252,110 @@ static int fixed_mdio_register_device(int number, int speed, int duplex) /* create phy_device and register it on the mdio bus */ phydev = phy_device_create(new_bus, 0, 0); + if (phydev == NULL) + goto err_phy_dev_create; /* - Put the phydev pointer into the fixed pack so that bus read/write code could - be able to access for instance attached netdev. Well it doesn't have to do - so, only in case of utilizing user-specified link-update... + * Put the phydev pointer into the fixed pack so that bus read/write + * code could be able to access for instance attached netdev. Well it + * doesn't have to do so, only in case of utilizing user-specified + * link-update... */ - fixed->phydev = phydev; - if(NULL == phydev) { - err = -ENOMEM; - goto device_create_fail; - } + fixed->phydev = phydev; + phydev->speed = speed; + phydev->duplex = duplex; phydev->irq = PHY_IGNORE_INTERRUPT; phydev->dev.bus = &mdio_bus_type; - if(number) - snprintf(phydev->dev.bus_id, BUS_ID_SIZE, - "fixed_%d@%d:%d", number, speed, duplex); - else - snprintf(phydev->dev.bus_id, BUS_ID_SIZE, - "fixed@%d:%d", speed, duplex); - phydev->bus = new_bus; + snprintf(phydev->dev.bus_id, BUS_ID_SIZE, + PHY_ID_FMT, bus_id, phy_id); - err = device_register(&phydev->dev); - if(err) { - printk(KERN_ERR "Phy %s failed to register\n", - phydev->dev.bus_id); - goto bus_register_fail; - } + phydev->bus = new_bus; - /* - the mdio bus has phy_id match... In order not to do it - artificially, we are binding the driver here by hand; - it will be the same for all the fixed phys anyway. - */ phydev->dev.driver = &fixed_mdio_driver.driver; - + phydev->dev.release = fixed_mdio_release; err = phydev->dev.driver->probe(&phydev->dev); - if(err < 0) { - printk(KERN_ERR "Phy %s: problems with fixed driver\n",phydev->dev.bus_id); - goto probe_fail; + if (err < 0) { + printk(KERN_ERR "Phy %s: problems with fixed driver\n", + phydev->dev.bus_id); + goto err_out; } + err = device_register(&phydev->dev); + if (err) { + printk(KERN_ERR "Phy %s failed to register\n", + phydev->dev.bus_id); + goto err_out; + } + //phydev->state = PHY_RUNNING; /* make phy go up quick, but in 10Mbit/HDX + return fixed; - err = device_bind_driver(&phydev->dev); - if (err) - goto probe_fail; - - return 0; - -probe_fail: - device_unregister(&phydev->dev); -bus_register_fail: +err_out: kfree(phydev); -device_create_fail: - kfree(dev); - kfree(new_bus); +err_phy_dev_create: + kfree(fixed->regs); +err_fixed_regs_alloc: kfree(fixed); +err_fixed_alloc: + kfree(new_bus); +err_bus_alloc: + kfree(dev); +err_dev_alloc: + + return NULL; - return err; } #endif - MODULE_DESCRIPTION("Fixed PHY device & driver for PAL"); MODULE_AUTHOR("Vitaly Bordug"); MODULE_LICENSE("GPL"); static int __init fixed_init(void) { -#if 0 - int ret; - int duplex = 0; -#endif - - /* register on the bus... Not expected to be matched with anything there... */ + int cnt = 0; + int i; +/* register on the bus... Not expected to be matched + * with anything there... + * + */ phy_driver_register(&fixed_mdio_driver); - /* So let the fun begin... - We will create several mdio devices here, and will bound the upper - driver to them. - - Then the external software can lookup the phy bus by searching - fixed@speed:duplex, e.g. fixed@100:1, to be connected to the - virtual 100M Fdx phy. - - In case several virtual PHYs required, the bus_id will be in form - fixed_<num>@<speed>:<duplex>, which make it able even to define - driver-specific link control callback, if for instance PHY is completely - SW-driven. - - */ - -#ifdef CONFIG_FIXED_MII_DUPLEX -#if 0 - duplex = 1; -#endif +/* We will create several mdio devices here, and will bound the upper + * driver to them. + * + * Then the external software can lookup the phy bus by searching + * for 0:101, to be connected to the virtual 100M Fdx phy. + * + * In case several virtual PHYs required, the bus_id will be in form + * [num]:[duplex]+[speed], which make it able even to define + * driver-specific link control callback, if for instance PHY is + * completely SW-driven. + */ + for (i=1; i <= CONFIG_FIXED_MII_AMNT; i++) { +#ifdef CONFIG_FIXED_MII_1000_FDX + fixed_phy_ptrs[cnt++] = fixed_mdio_register_device(0, 1000, 1, i); #endif - #ifdef CONFIG_FIXED_MII_100_FDX - fixed_mdio_register_device(0, 100, 1); + fixed_phy_ptrs[cnt++] = fixed_mdio_register_device(1, 100, 1, i); #endif - #ifdef CONFIG_FIXED_MII_10_FDX - fixed_mdio_register_device(0, 10, 1); + fixed_phy_ptrs[cnt++] = fixed_mdio_register_device(2, 10, 1, i); #endif + } + return 0; } static void __exit fixed_exit(void) { + int i; + phy_driver_unregister(&fixed_mdio_driver); - /* :WARNING:02/18/2006 04:32:40 AM:: Cleanup all the created stuff */ + for (i=0; i < MAX_PHY_AMNT; i++) + if ( fixed_phy_ptrs[i] ) + device_unregister(&fixed_phy_ptrs[i]->phydev->dev); } module_init(fixed_init); diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c new file mode 100644 index 000000000000..8cd243d92af3 --- /dev/null +++ b/drivers/net/phy/mdio-bitbang.c @@ -0,0 +1,187 @@ +/* + * Bitbanged MDIO support. + * + * Author: Scott Wood <scottwood@freescale.com> + * Copyright (c) 2007 Freescale Semiconductor + * + * Based on CPM2 MDIO code which is: + * + * Copyright (c) 2003 Intracom S.A. + * by Pantelis Antoniou <panto@intracom.gr> + * + * 2005 (c) MontaVista Software, Inc. + * Vitaly Bordug <vbordug@ru.mvista.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/module.h> +#include <linux/mdio-bitbang.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/delay.h> + +#define MDIO_READ 1 +#define MDIO_WRITE 0 + +#define MDIO_SETUP_TIME 10 +#define MDIO_HOLD_TIME 10 + +/* Minimum MDC period is 400 ns, plus some margin for error. MDIO_DELAY + * is done twice per period. + */ +#define MDIO_DELAY 250 + +/* The PHY may take up to 300 ns to produce data, plus some margin + * for error. + */ +#define MDIO_READ_DELAY 350 + +/* MDIO must already be configured as output. */ +static void mdiobb_send_bit(struct mdiobb_ctrl *ctrl, int val) +{ + const struct mdiobb_ops *ops = ctrl->ops; + + ops->set_mdio_data(ctrl, val); + ndelay(MDIO_DELAY); + ops->set_mdc(ctrl, 1); + ndelay(MDIO_DELAY); + ops->set_mdc(ctrl, 0); +} + +/* MDIO must already be configured as input. */ +static int mdiobb_get_bit(struct mdiobb_ctrl *ctrl) +{ + const struct mdiobb_ops *ops = ctrl->ops; + + ndelay(MDIO_DELAY); + ops->set_mdc(ctrl, 1); + ndelay(MDIO_READ_DELAY); + ops->set_mdc(ctrl, 0); + + return ops->get_mdio_data(ctrl); +} + +/* MDIO must already be configured as output. */ +static void mdiobb_send_num(struct mdiobb_ctrl *ctrl, u16 val, int bits) +{ + int i; + + for (i = bits - 1; i >= 0; i--) + mdiobb_send_bit(ctrl, (val >> i) & 1); +} + +/* MDIO must already be configured as input. */ +static u16 mdiobb_get_num(struct mdiobb_ctrl *ctrl, int bits) +{ + int i; + u16 ret = 0; + + for (i = bits - 1; i >= 0; i--) { + ret <<= 1; + ret |= mdiobb_get_bit(ctrl); + } + + return ret; +} + +/* Utility to send the preamble, address, and + * register (common to read and write). + */ +static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int read, u8 phy, u8 reg) +{ + const struct mdiobb_ops *ops = ctrl->ops; + int i; + + ops->set_mdio_dir(ctrl, 1); + + /* + * Send a 32 bit preamble ('1's) with an extra '1' bit for good + * measure. The IEEE spec says this is a PHY optional + * requirement. The AMD 79C874 requires one after power up and + * one after a MII communications error. This means that we are + * doing more preambles than we need, but it is safer and will be + * much more robust. + */ + + for (i = 0; i < 32; i++) + mdiobb_send_bit(ctrl, 1); + + /* send the start bit (01) and the read opcode (10) or write (10) */ + mdiobb_send_bit(ctrl, 0); + mdiobb_send_bit(ctrl, 1); + mdiobb_send_bit(ctrl, read); + mdiobb_send_bit(ctrl, !read); + + mdiobb_send_num(ctrl, phy, 5); + mdiobb_send_num(ctrl, reg, 5); +} + + +static int mdiobb_read(struct mii_bus *bus, int phy, int reg) +{ + struct mdiobb_ctrl *ctrl = bus->priv; + int ret, i; + + mdiobb_cmd(ctrl, MDIO_READ, phy, reg); + ctrl->ops->set_mdio_dir(ctrl, 0); + + /* check the turnaround bit: the PHY should be driving it to zero */ + if (mdiobb_get_bit(ctrl) != 0) { + /* PHY didn't drive TA low -- flush any bits it + * may be trying to send. + */ + for (i = 0; i < 32; i++) + mdiobb_get_bit(ctrl); + + return 0xffff; + } + + ret = mdiobb_get_num(ctrl, 16); + mdiobb_get_bit(ctrl); + return ret; +} + +static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val) +{ + struct mdiobb_ctrl *ctrl = bus->priv; + + mdiobb_cmd(ctrl, MDIO_WRITE, phy, reg); + + /* send the turnaround (10) */ + mdiobb_send_bit(ctrl, 1); + mdiobb_send_bit(ctrl, 0); + + mdiobb_send_num(ctrl, val, 16); + + ctrl->ops->set_mdio_dir(ctrl, 0); + mdiobb_get_bit(ctrl); + return 0; +} + +struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl) +{ + struct mii_bus *bus; + + bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL); + if (!bus) + return NULL; + + __module_get(ctrl->ops->owner); + + bus->read = mdiobb_read; + bus->write = mdiobb_write; + bus->priv = ctrl; + + return bus; +} + +void free_mdio_bitbang(struct mii_bus *bus) +{ + struct mdiobb_ctrl *ctrl = bus->priv; + + module_put(ctrl->ops->owner); + kfree(bus); +} diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index f71dab347667..9bc11773705b 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -7,7 +7,7 @@ * Author: Andy Fleming * * Copyright (c) 2004 Freescale Semiconductor, Inc. - * Copyright (c) 2006 Maciej W. Rozycki + * Copyright (c) 2006, 2007 Maciej W. Rozycki * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -35,6 +35,7 @@ #include <linux/timer.h> #include <linux/workqueue.h> +#include <asm/atomic.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/uaccess.h> @@ -204,7 +205,7 @@ static const struct phy_setting settings[] = { }, }; -#define MAX_NUM_SETTINGS (sizeof(settings)/sizeof(struct phy_setting)) +#define MAX_NUM_SETTINGS ARRAY_SIZE(settings) /** * phy_find_setting - find a PHY settings array entry that matches speed & duplex @@ -261,7 +262,7 @@ void phy_sanitize_settings(struct phy_device *phydev) /* Sanitize settings based on PHY capabilities */ if ((features & SUPPORTED_Autoneg) == 0) - phydev->autoneg = 0; + phydev->autoneg = AUTONEG_DISABLE; idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex), features); @@ -374,7 +375,7 @@ int phy_mii_ioctl(struct phy_device *phydev, if (mii_data->phy_id == phydev->addr) { switch(mii_data->reg_num) { case MII_BMCR: - if (val & (BMCR_RESET|BMCR_ANENABLE)) + if ((val & (BMCR_RESET|BMCR_ANENABLE)) == 0) phydev->autoneg = AUTONEG_DISABLE; else phydev->autoneg = AUTONEG_ENABLE; @@ -409,6 +410,7 @@ int phy_mii_ioctl(struct phy_device *phydev, return 0; } +EXPORT_SYMBOL(phy_mii_ioctl); /** * phy_start_aneg - start auto-negotiation for this PHY device @@ -423,7 +425,7 @@ int phy_start_aneg(struct phy_device *phydev) { int err; - spin_lock(&phydev->lock); + spin_lock_bh(&phydev->lock); if (AUTONEG_DISABLE == phydev->autoneg) phy_sanitize_settings(phydev); @@ -444,7 +446,7 @@ int phy_start_aneg(struct phy_device *phydev) } out_unlock: - spin_unlock(&phydev->lock); + spin_unlock_bh(&phydev->lock); return err; } EXPORT_SYMBOL(phy_start_aneg); @@ -489,10 +491,10 @@ void phy_stop_machine(struct phy_device *phydev) { del_timer_sync(&phydev->phy_timer); - spin_lock(&phydev->lock); + spin_lock_bh(&phydev->lock); if (phydev->state > PHY_UP) phydev->state = PHY_UP; - spin_unlock(&phydev->lock); + spin_unlock_bh(&phydev->lock); phydev->adjust_state = NULL; } @@ -536,9 +538,9 @@ static void phy_force_reduction(struct phy_device *phydev) */ void phy_error(struct phy_device *phydev) { - spin_lock(&phydev->lock); + spin_lock_bh(&phydev->lock); phydev->state = PHY_HALTED; - spin_unlock(&phydev->lock); + spin_unlock_bh(&phydev->lock); } /** @@ -561,6 +563,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat) * queue will write the PHY to disable and clear the * interrupt, and then reenable the irq line. */ disable_irq_nosync(irq); + atomic_inc(&phydev->irq_disable); schedule_work(&phydev->phy_queue); @@ -631,6 +634,7 @@ int phy_start_interrupts(struct phy_device *phydev) INIT_WORK(&phydev->phy_queue, phy_change); + atomic_set(&phydev->irq_disable, 0); if (request_irq(phydev->irq, phy_interrupt, IRQF_SHARED, "phy_interrupt", @@ -661,13 +665,22 @@ int phy_stop_interrupts(struct phy_device *phydev) if (err) phy_error(phydev); + free_irq(phydev->irq, phydev); + /* - * Finish any pending work; we might have been scheduled to be called - * from keventd ourselves, but cancel_work_sync() handles that. + * Cannot call flush_scheduled_work() here as desired because + * of rtnl_lock(), but we do not really care about what would + * be done, except from enable_irq(), so cancel any work + * possibly pending and take care of the matter below. */ cancel_work_sync(&phydev->phy_queue); - - free_irq(phydev->irq, phydev); + /* + * If work indeed has been cancelled, disable_irq() will have + * been left unbalanced from phy_interrupt() and enable_irq() + * has to be called so that other devices on the line work. + */ + while (atomic_dec_return(&phydev->irq_disable) >= 0) + enable_irq(phydev->irq); return err; } @@ -689,11 +702,12 @@ static void phy_change(struct work_struct *work) if (err) goto phy_err; - spin_lock(&phydev->lock); + spin_lock_bh(&phydev->lock); if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) phydev->state = PHY_CHANGELINK; - spin_unlock(&phydev->lock); + spin_unlock_bh(&phydev->lock); + atomic_dec(&phydev->irq_disable); enable_irq(phydev->irq); /* Reenable interrupts */ @@ -707,6 +721,7 @@ static void phy_change(struct work_struct *work) irq_enable_err: disable_irq(phydev->irq); + atomic_inc(&phydev->irq_disable); phy_err: phy_error(phydev); } @@ -717,13 +732,11 @@ phy_err: */ void phy_stop(struct phy_device *phydev) { - spin_lock(&phydev->lock); + spin_lock_bh(&phydev->lock); if (PHY_HALTED == phydev->state) goto out_unlock; - phydev->state = PHY_HALTED; - if (phydev->irq != PHY_POLL) { /* Disable PHY Interrupts */ phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); @@ -732,8 +745,10 @@ void phy_stop(struct phy_device *phydev) phy_clear_interrupt(phydev); } + phydev->state = PHY_HALTED; + out_unlock: - spin_unlock(&phydev->lock); + spin_unlock_bh(&phydev->lock); /* * Cannot call flush_scheduled_work() here as desired because @@ -755,7 +770,7 @@ out_unlock: */ void phy_start(struct phy_device *phydev) { - spin_lock(&phydev->lock); + spin_lock_bh(&phydev->lock); switch (phydev->state) { case PHY_STARTING: @@ -769,7 +784,7 @@ void phy_start(struct phy_device *phydev) default: break; } - spin_unlock(&phydev->lock); + spin_unlock_bh(&phydev->lock); } EXPORT_SYMBOL(phy_stop); EXPORT_SYMBOL(phy_start); @@ -781,7 +796,7 @@ static void phy_timer(unsigned long data) int needs_aneg = 0; int err = 0; - spin_lock(&phydev->lock); + spin_lock_bh(&phydev->lock); if (phydev->adjust_state) phydev->adjust_state(phydev->attached_dev); @@ -947,7 +962,7 @@ static void phy_timer(unsigned long data) break; } - spin_unlock(&phydev->lock); + spin_unlock_bh(&phydev->lock); if (needs_aneg) err = phy_start_aneg(phydev); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index a8b74cdab1ea..c0461217b108 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -364,7 +364,7 @@ EXPORT_SYMBOL(genphy_config_advert); */ int genphy_setup_forced(struct phy_device *phydev) { - int ctl = BMCR_RESET; + int ctl = 0; phydev->pause = phydev->asym_pause = 0; @@ -644,7 +644,7 @@ static int phy_probe(struct device *dev) if (!(phydrv->flags & PHY_HAS_INTERRUPT)) phydev->irq = PHY_POLL; - spin_lock(&phydev->lock); + spin_lock_bh(&phydev->lock); /* Start out supporting everything. Eventually, * a controller will attach, and may modify one @@ -658,7 +658,7 @@ static int phy_probe(struct device *dev) if (phydev->drv->probe) err = phydev->drv->probe(phydev); - spin_unlock(&phydev->lock); + spin_unlock_bh(&phydev->lock); return err; @@ -670,9 +670,9 @@ static int phy_remove(struct device *dev) phydev = to_phy_device(dev); - spin_lock(&phydev->lock); + spin_lock_bh(&phydev->lock); phydev->state = PHY_DOWN; - spin_unlock(&phydev->lock); + spin_unlock_bh(&phydev->lock); if (phydev->drv->remove) phydev->drv->remove(phydev); diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index 596222b260d6..8874497b6bbf 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c @@ -21,6 +21,10 @@ /* Vitesse Extended Control Register 1 */ #define MII_VSC8244_EXT_CON1 0x17 #define MII_VSC8244_EXTCON1_INIT 0x0000 +#define MII_VSC8244_EXTCON1_TX_SKEW_MASK 0x0c00 +#define MII_VSC8244_EXTCON1_RX_SKEW_MASK 0x0300 +#define MII_VSC8244_EXTCON1_TX_SKEW 0x0800 +#define MII_VSC8244_EXTCON1_RX_SKEW 0x0200 /* Vitesse Interrupt Mask Register */ #define MII_VSC8244_IMASK 0x19 @@ -39,7 +43,7 @@ /* Vitesse Auxiliary Control/Status Register */ #define MII_VSC8244_AUX_CONSTAT 0x1c -#define MII_VSC8244_AUXCONSTAT_INIT 0x0004 +#define MII_VSC8244_AUXCONSTAT_INIT 0x0000 #define MII_VSC8244_AUXCONSTAT_DUPLEX 0x0020 #define MII_VSC8244_AUXCONSTAT_SPEED 0x0018 #define MII_VSC8244_AUXCONSTAT_GBIT 0x0010 @@ -51,6 +55,7 @@ MODULE_LICENSE("GPL"); static int vsc824x_config_init(struct phy_device *phydev) { + int extcon; int err; err = phy_write(phydev, MII_VSC8244_AUX_CONSTAT, @@ -58,14 +63,34 @@ static int vsc824x_config_init(struct phy_device *phydev) if (err < 0) return err; - err = phy_write(phydev, MII_VSC8244_EXT_CON1, - MII_VSC8244_EXTCON1_INIT); + extcon = phy_read(phydev, MII_VSC8244_EXT_CON1); + + if (extcon < 0) + return err; + + extcon &= ~(MII_VSC8244_EXTCON1_TX_SKEW_MASK | + MII_VSC8244_EXTCON1_RX_SKEW_MASK); + + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) + extcon |= (MII_VSC8244_EXTCON1_TX_SKEW | + MII_VSC8244_EXTCON1_RX_SKEW); + + err = phy_write(phydev, MII_VSC8244_EXT_CON1, extcon); + return err; } static int vsc824x_ack_interrupt(struct phy_device *phydev) { - int err = phy_read(phydev, MII_VSC8244_ISTAT); + int err = 0; + + /* + * Don't bother to ACK the interrupts if interrupts + * are disabled. The 824x cannot clear the interrupts + * if they are disabled. + */ + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + err = phy_read(phydev, MII_VSC8244_ISTAT); return (err < 0) ? err : 0; } @@ -77,8 +102,19 @@ static int vsc824x_config_intr(struct phy_device *phydev) if (phydev->interrupts == PHY_INTERRUPT_ENABLED) err = phy_write(phydev, MII_VSC8244_IMASK, MII_VSC8244_IMASK_MASK); - else + else { + /* + * The Vitesse PHY cannot clear the interrupt + * once it has disabled them, so we clear them first + */ + err = phy_read(phydev, MII_VSC8244_ISTAT); + + if (err < 0) + return err; + err = phy_write(phydev, MII_VSC8244_IMASK, 0); + } + return err; } diff --git a/drivers/net/plip.c b/drivers/net/plip.c index 8754cf3356b0..b5e9981d1060 100644 --- a/drivers/net/plip.c +++ b/drivers/net/plip.c @@ -148,13 +148,12 @@ static void plip_interrupt(int irq, void *dev_id); /* Functions for DEV methods */ static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev); static int plip_hard_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, - void *saddr, unsigned len); -static int plip_hard_header_cache(struct neighbour *neigh, + unsigned short type, const void *daddr, + const void *saddr, unsigned len); +static int plip_hard_header_cache(const struct neighbour *neigh, struct hh_cache *hh); static int plip_open(struct net_device *dev); static int plip_close(struct net_device *dev); -static struct net_device_stats *plip_get_stats(struct net_device *dev); static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); static int plip_preempt(void *handle); static void plip_wakeup(void *handle); @@ -206,7 +205,6 @@ struct plip_local { }; struct net_local { - struct net_device_stats enet_stats; struct net_device *dev; struct work_struct immediate; struct delayed_work deferred; @@ -221,11 +219,6 @@ struct net_local { int is_deferred; int port_owner; int should_relinquish; - int (*orig_hard_header)(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, - void *saddr, unsigned len); - int (*orig_hard_header_cache)(struct neighbour *neigh, - struct hh_cache *hh); spinlock_t lock; atomic_t kill_timer; struct semaphore killed_timer_sem; @@ -267,6 +260,11 @@ static inline unsigned char read_status (struct net_device *dev) return port->ops->read_status (port); } +static const struct header_ops plip_header_ops = { + .create = plip_hard_header, + .cache = plip_hard_header_cache, +}; + /* Entry point of PLIP driver. Probe the hardware, and register/initialize the driver. @@ -285,19 +283,13 @@ plip_init_netdev(struct net_device *dev) dev->hard_start_xmit = plip_tx_packet; dev->open = plip_open; dev->stop = plip_close; - dev->get_stats = plip_get_stats; dev->do_ioctl = plip_ioctl; - dev->header_cache_update = NULL; + dev->tx_queue_len = 10; dev->flags = IFF_POINTOPOINT|IFF_NOARP; memset(dev->dev_addr, 0xfc, ETH_ALEN); - /* Set the private structure */ - nl->orig_hard_header = dev->hard_header; - dev->hard_header = plip_hard_header; - - nl->orig_hard_header_cache = dev->hard_header_cache; - dev->hard_header_cache = plip_hard_header_cache; + dev->header_ops = &plip_header_ops; nl->port_owner = 0; @@ -430,8 +422,8 @@ plip_bh_timeout_error(struct net_device *dev, struct net_local *nl, dev->name, snd->state, c0); } else error = HS_TIMEOUT; - nl->enet_stats.tx_errors++; - nl->enet_stats.tx_aborted_errors++; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; } else if (nl->connection == PLIP_CN_RECEIVE) { if (rcv->state == PLIP_PK_TRIGGER) { /* Transmission was interrupted. */ @@ -448,7 +440,7 @@ plip_bh_timeout_error(struct net_device *dev, struct net_local *nl, printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n", dev->name, rcv->state, c0); } - nl->enet_stats.rx_dropped++; + dev->stats.rx_dropped++; } rcv->state = PLIP_PK_DONE; if (rcv->skb) { @@ -661,7 +653,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl, &rcv->nibble, &rcv->data)) return TIMEOUT; if (rcv->data != rcv->checksum) { - nl->enet_stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; if (net_debug) printk(KERN_DEBUG "%s: checksum error\n", dev->name); return ERROR; @@ -673,8 +665,8 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl, rcv->skb->protocol=plip_type_trans(rcv->skb, dev); netif_rx(rcv->skb); dev->last_rx = jiffies; - nl->enet_stats.rx_bytes += rcv->length.h; - nl->enet_stats.rx_packets++; + dev->stats.rx_bytes += rcv->length.h; + dev->stats.rx_packets++; rcv->skb = NULL; if (net_debug > 2) printk(KERN_DEBUG "%s: receive end\n", dev->name); @@ -776,7 +768,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl, if (nl->connection == PLIP_CN_RECEIVE) { spin_unlock_irq(&nl->lock); /* Interrupted. */ - nl->enet_stats.collisions++; + dev->stats.collisions++; return OK; } c0 = read_status(dev); @@ -792,7 +784,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl, {enable,disable}_irq *counts* them. -- AV */ ENABLE(dev->irq); - nl->enet_stats.collisions++; + dev->stats.collisions++; return OK; } disable_parport_interrupts (dev); @@ -840,9 +832,9 @@ plip_send_packet(struct net_device *dev, struct net_local *nl, &snd->nibble, snd->checksum)) return TIMEOUT; - nl->enet_stats.tx_bytes += snd->skb->len; + dev->stats.tx_bytes += snd->skb->len; dev_kfree_skb(snd->skb); - nl->enet_stats.tx_packets++; + dev->stats.tx_packets++; snd->state = PLIP_PK_DONE; case PLIP_PK_DONE: @@ -996,14 +988,14 @@ plip_tx_packet(struct sk_buff *skb, struct net_device *dev) } static void -plip_rewrite_address(struct net_device *dev, struct ethhdr *eth) +plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth) { - struct in_device *in_dev; + const struct in_device *in_dev = dev->ip_ptr; - if ((in_dev=dev->ip_ptr) != NULL) { + if (in_dev) { /* Any address will do - we take the first */ - struct in_ifaddr *ifa=in_dev->ifa_list; - if (ifa != NULL) { + const struct in_ifaddr *ifa = in_dev->ifa_list; + if (ifa) { memcpy(eth->h_source, dev->dev_addr, 6); memset(eth->h_dest, 0xfc, 2); memcpy(eth->h_dest+2, &ifa->ifa_address, 4); @@ -1013,26 +1005,25 @@ plip_rewrite_address(struct net_device *dev, struct ethhdr *eth) static int plip_hard_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, - void *saddr, unsigned len) + unsigned short type, const void *daddr, + const void *saddr, unsigned len) { - struct net_local *nl = netdev_priv(dev); int ret; - if ((ret = nl->orig_hard_header(skb, dev, type, daddr, saddr, len)) >= 0) + ret = eth_header(skb, dev, type, daddr, saddr, len); + if (ret >= 0) plip_rewrite_address (dev, (struct ethhdr *)skb->data); return ret; } -int plip_hard_header_cache(struct neighbour *neigh, +int plip_hard_header_cache(const struct neighbour *neigh, struct hh_cache *hh) { - struct net_local *nl = neigh->dev->priv; int ret; - if ((ret = nl->orig_hard_header_cache(neigh, hh)) == 0) - { + ret = eth_header_cache(neigh, hh); + if (ret == 0) { struct ethhdr *eth; eth = (struct ethhdr*)(((u8*)hh->hh_data) + @@ -1199,15 +1190,6 @@ plip_wakeup(void *handle) return; } -static struct net_device_stats * -plip_get_stats(struct net_device *dev) -{ - struct net_local *nl = netdev_priv(dev); - struct net_device_stats *r = &nl->enet_stats; - - return r; -} - static int plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { @@ -1278,7 +1260,6 @@ static void plip_attach (struct parport *port) strcpy(dev->name, name); - SET_MODULE_OWNER(dev); dev->irq = port->irq; dev->base_addr = port->base; if (port->irq == -1) { diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c index caabbc408c34..27f5b904f48e 100644 --- a/drivers/net/ppp_async.c +++ b/drivers/net/ppp_async.c @@ -159,12 +159,11 @@ ppp_asynctty_open(struct tty_struct *tty) int err; err = -ENOMEM; - ap = kmalloc(sizeof(*ap), GFP_KERNEL); + ap = kzalloc(sizeof(*ap), GFP_KERNEL); if (ap == 0) goto out; /* initialize the asyncppp structure */ - memset(ap, 0, sizeof(*ap)); ap->tty = tty; ap->mru = PPP_MRU; spin_lock_init(&ap->xmit_lock); diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c index 72c8d6628f58..eb98b661efba 100644 --- a/drivers/net/ppp_deflate.c +++ b/drivers/net/ppp_deflate.c @@ -121,12 +121,11 @@ static void *z_comp_alloc(unsigned char *options, int opt_len) if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE) return NULL; - state = kmalloc(sizeof(*state), + state = kzalloc(sizeof(*state), GFP_KERNEL); if (state == NULL) return NULL; - memset (state, 0, sizeof (struct ppp_deflate_state)); state->strm.next_in = NULL; state->w_size = w_size; state->strm.workspace = vmalloc(zlib_deflate_workspacesize()); @@ -341,11 +340,10 @@ static void *z_decomp_alloc(unsigned char *options, int opt_len) if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE) return NULL; - state = kmalloc(sizeof(*state), GFP_KERNEL); + state = kzalloc(sizeof(*state), GFP_KERNEL); if (state == NULL) return NULL; - memset (state, 0, sizeof (struct ppp_deflate_state)); state->w_size = w_size; state->strm.next_out = NULL; state->strm.workspace = kmalloc(zlib_inflate_workspacesize(), diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 3ef0092dc09c..4b49d0e8c7eb 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -899,17 +899,9 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Put the 2-byte PPP protocol number on the front, making sure there is room for the address and control fields. */ - if (skb_headroom(skb) < PPP_HDRLEN) { - struct sk_buff *ns; - - ns = alloc_skb(skb->len + dev->hard_header_len, GFP_ATOMIC); - if (ns == 0) - goto outf; - skb_reserve(ns, dev->hard_header_len); - skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len); - kfree_skb(skb); - skb = ns; - } + if (skb_cow_head(skb, PPP_HDRLEN)) + goto outf; + pp = skb_push(skb, 2); proto = npindex_to_proto[npi]; pp[0] = proto >> 8; @@ -1533,7 +1525,7 @@ ppp_input_error(struct ppp_channel *chan, int code) static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) { - if (skb->len >= 2) { + if (pskb_may_pull(skb, 2)) { #ifdef CONFIG_PPP_MULTILINK /* XXX do channel-level decompression here */ if (PPP_PROTO(skb) == PPP_MP) @@ -1585,7 +1577,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) if (ppp->vj == 0 || (ppp->flags & SC_REJ_COMP_TCP)) goto err; - if (skb_tailroom(skb) < 124) { + if (skb_tailroom(skb) < 124 || skb_cloned(skb)) { /* copy to a new sk_buff with more tailroom */ ns = dev_alloc_skb(skb->len + 128); if (ns == 0) { @@ -1656,23 +1648,29 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) /* check if the packet passes the pass and active filters */ /* the filter instructions are constructed assuming a four-byte PPP header on each packet */ - *skb_push(skb, 2) = 0; - if (ppp->pass_filter - && sk_run_filter(skb, ppp->pass_filter, - ppp->pass_len) == 0) { - if (ppp->debug & 1) - printk(KERN_DEBUG "PPP: inbound frame not passed\n"); - kfree_skb(skb); - return; - } - if (!(ppp->active_filter - && sk_run_filter(skb, ppp->active_filter, - ppp->active_len) == 0)) - ppp->last_recv = jiffies; - skb_pull(skb, 2); -#else - ppp->last_recv = jiffies; + if (ppp->pass_filter || ppp->active_filter) { + if (skb_cloned(skb) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) + goto err; + + *skb_push(skb, 2) = 0; + if (ppp->pass_filter + && sk_run_filter(skb, ppp->pass_filter, + ppp->pass_len) == 0) { + if (ppp->debug & 1) + printk(KERN_DEBUG "PPP: inbound frame " + "not passed\n"); + kfree_skb(skb); + return; + } + if (!(ppp->active_filter + && sk_run_filter(skb, ppp->active_filter, + ppp->active_len) == 0)) + ppp->last_recv = jiffies; + __skb_pull(skb, 2); + } else #endif /* CONFIG_PPP_FILTER */ + ppp->last_recv = jiffies; if ((ppp->dev->flags & IFF_UP) == 0 || ppp->npmode[npi] != NPMODE_PASS) { @@ -1726,7 +1724,7 @@ ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb) } /* the decompressor still expects the A/C bytes in the hdr */ len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2, - skb->len + 2, ns->data, ppp->mru + PPP_HDRLEN); + skb->len + 2, ns->data, obuff_size); if (len < 0) { /* Pass the compressed frame to pppd as an error indication. */ @@ -1770,7 +1768,7 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) struct channel *ch; int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; - if (!pskb_may_pull(skb, mphdrlen) || ppp->mrru == 0) + if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0) goto err; /* no good, throw it away */ /* Decode sequence number and begin/end bits */ @@ -2684,8 +2682,7 @@ static void __exit ppp_cleanup(void) if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count)) printk(KERN_ERR "PPP: removing module but units remain!\n"); cardmap_destroy(&all_ppp_units); - if (unregister_chrdev(PPP_MAJOR, "ppp") != 0) - printk(KERN_ERR "PPP: failed to unregister PPP device\n"); + unregister_chrdev(PPP_MAJOR, "ppp"); device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0)); class_destroy(ppp_class); } diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c index d5bdd2574659..c0b6d19d1457 100644 --- a/drivers/net/ppp_mppe.c +++ b/drivers/net/ppp_mppe.c @@ -136,7 +136,7 @@ struct ppp_mppe_state { * Key Derivation, from RFC 3078, RFC 3079. * Equivalent to Get_Key() for MS-CHAP as described in RFC 3079. */ -static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *InterimKey) +static void get_new_key_from_sha(struct ppp_mppe_state * state) { struct hash_desc desc; struct scatterlist sg[4]; @@ -153,8 +153,6 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *I desc.flags = 0; crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest); - - memcpy(InterimKey, state->sha1_digest, state->keylen); } /* @@ -163,21 +161,21 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *I */ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key) { - unsigned char InterimKey[MPPE_MAX_KEY_LEN]; struct scatterlist sg_in[1], sg_out[1]; struct blkcipher_desc desc = { .tfm = state->arc4 }; - get_new_key_from_sha(state, InterimKey); + get_new_key_from_sha(state); if (!initial_key) { - crypto_blkcipher_setkey(state->arc4, InterimKey, state->keylen); - setup_sg(sg_in, InterimKey, state->keylen); + crypto_blkcipher_setkey(state->arc4, state->sha1_digest, + state->keylen); + setup_sg(sg_in, state->sha1_digest, state->keylen); setup_sg(sg_out, state->session_key, state->keylen); if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, state->keylen) != 0) { printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n"); } } else { - memcpy(state->session_key, InterimKey, state->keylen); + memcpy(state->session_key, state->sha1_digest, state->keylen); } if (state->keylen == 8) { /* See RFC 3078 */ @@ -200,11 +198,10 @@ static void *mppe_alloc(unsigned char *options, int optlen) || options[0] != CI_MPPE || options[1] != CILEN_MPPE) goto out; - state = kmalloc(sizeof(*state), GFP_KERNEL); + state = kzalloc(sizeof(*state), GFP_KERNEL); if (state == NULL) goto out; - memset(state, 0, sizeof(*state)); state->arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(state->arc4)) { diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c index 5918fab38349..ce64032a465a 100644 --- a/drivers/net/ppp_synctty.c +++ b/drivers/net/ppp_synctty.c @@ -207,13 +207,12 @@ ppp_sync_open(struct tty_struct *tty) struct syncppp *ap; int err; - ap = kmalloc(sizeof(*ap), GFP_KERNEL); + ap = kzalloc(sizeof(*ap), GFP_KERNEL); err = -ENOMEM; if (ap == 0) goto out; /* initialize the syncppp structure */ - memset(ap, 0, sizeof(*ap)); ap->tty = tty; ap->mru = PPP_MRU; spin_lock_init(&ap->xmit_lock); diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index 6f98834e6ace..8936ed3469cf 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c @@ -78,6 +78,7 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> +#include <net/net_namespace.h> #include <net/sock.h> #include <asm/uaccess.h> @@ -102,25 +103,30 @@ static inline int cmp_2_addr(struct pppoe_addr *a, struct pppoe_addr *b) (memcmp(a->remote, b->remote, ETH_ALEN) == 0)); } -static inline int cmp_addr(struct pppoe_addr *a, unsigned long sid, char *addr) +static inline int cmp_addr(struct pppoe_addr *a, __be16 sid, char *addr) { return (a->sid == sid && (memcmp(a->remote,addr,ETH_ALEN) == 0)); } -static int hash_item(unsigned long sid, unsigned char *addr) +#if 8%PPPOE_HASH_BITS +#error 8 must be a multiple of PPPOE_HASH_BITS +#endif + +static int hash_item(__be16 sid, unsigned char *addr) { - char hash = 0; - int i, j; + unsigned char hash = 0; + unsigned int i; - for (i = 0; i < ETH_ALEN ; ++i) { - for (j = 0; j < 8/PPPOE_HASH_BITS ; ++j) { - hash ^= addr[i] >> ( j * PPPOE_HASH_BITS ); - } + for (i = 0 ; i < ETH_ALEN ; i++) { + hash ^= addr[i]; + } + for (i = 0 ; i < sizeof(sid_t)*8 ; i += 8 ){ + hash ^= (__force __u32)sid>>i; + } + for (i = 8 ; (i>>=1) >= PPPOE_HASH_BITS ; ) { + hash ^= hash>>i; } - - for (i = 0; i < (sizeof(unsigned long)*8) / PPPOE_HASH_BITS ; ++i) - hash ^= sid >> (i*PPPOE_HASH_BITS); return hash & ( PPPOE_HASH_SIZE - 1 ); } @@ -133,7 +139,7 @@ static struct pppox_sock *item_hash_table[PPPOE_HASH_SIZE]; * Set/get/delete/rehash items (internal versions) * **********************************************************************/ -static struct pppox_sock *__get_item(unsigned long sid, unsigned char *addr, int ifindex) +static struct pppox_sock *__get_item(__be16 sid, unsigned char *addr, int ifindex) { int hash = hash_item(sid, addr); struct pppox_sock *ret; @@ -165,7 +171,7 @@ static int __set_item(struct pppox_sock *po) return 0; } -static struct pppox_sock *__delete_item(unsigned long sid, char *addr, int ifindex) +static struct pppox_sock *__delete_item(__be16 sid, char *addr, int ifindex) { int hash = hash_item(sid, addr); struct pppox_sock *ret, **src; @@ -191,7 +197,7 @@ static struct pppox_sock *__delete_item(unsigned long sid, char *addr, int ifind * Set/get/delete/rehash items * **********************************************************************/ -static inline struct pppox_sock *get_item(unsigned long sid, +static inline struct pppox_sock *get_item(__be16 sid, unsigned char *addr, int ifindex) { struct pppox_sock *po; @@ -210,7 +216,7 @@ static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp) struct net_device *dev; int ifindex; - dev = dev_get_by_name(sp->sa_addr.pppoe.dev); + dev = dev_get_by_name(&init_net, sp->sa_addr.pppoe.dev); if(!dev) return NULL; ifindex = dev->ifindex; @@ -218,7 +224,7 @@ static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp) return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote, ifindex); } -static inline struct pppox_sock *delete_item(unsigned long sid, char *addr, int ifindex) +static inline struct pppox_sock *delete_item(__be16 sid, char *addr, int ifindex) { struct pppox_sock *ret; @@ -295,6 +301,9 @@ static int pppoe_device_event(struct notifier_block *this, { struct net_device *dev = (struct net_device *) ptr; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + /* Only look at sockets that are using this specific device. */ switch (event) { case NETDEV_CHANGEMTU: @@ -380,15 +389,18 @@ static int pppoe_rcv(struct sk_buff *skb, struct pppoe_hdr *ph; struct pppox_sock *po; - if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) - goto drop; - if (!(skb = skb_share_check(skb, GFP_ATOMIC))) goto out; + if (dev->nd_net != &init_net) + goto drop; + + if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) + goto drop; + ph = pppoe_hdr(skb); - po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source, dev->ifindex); + po = get_item(ph->sid, eth_hdr(skb)->h_source, dev->ifindex); if (po != NULL) return sk_receive_skb(sk_pppox(po), skb, 0); drop: @@ -412,6 +424,9 @@ static int pppoe_disc_rcv(struct sk_buff *skb, struct pppoe_hdr *ph; struct pppox_sock *po; + if (dev->nd_net != &init_net) + goto abort; + if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) goto abort; @@ -422,7 +437,7 @@ static int pppoe_disc_rcv(struct sk_buff *skb, if (ph->code != PADT_CODE) goto abort; - po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source, dev->ifindex); + po = get_item(ph->sid, eth_hdr(skb)->h_source, dev->ifindex); if (po) { struct sock *sk = sk_pppox(po); @@ -471,12 +486,12 @@ static struct proto pppoe_sk_proto = { * Initialize a new struct sock. * **********************************************************************/ -static int pppoe_create(struct socket *sock) +static int pppoe_create(struct net *net, struct socket *sock) { int error = -ENOMEM; struct sock *sk; - sk = sk_alloc(PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto, 1); + sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto, 1); if (!sk) goto out; @@ -588,7 +603,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, /* Don't re-bind if sid==0 */ if (sp->sa_addr.pppoe.sid != 0) { - dev = dev_get_by_name(sp->sa_addr.pppoe.dev); + dev = dev_get_by_name(&init_net, sp->sa_addr.pppoe.dev); error = -ENODEV; if (!dev) @@ -664,8 +679,8 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd, { struct sock *sk = sock->sk; struct pppox_sock *po = pppox_sk(sk); - int val = 0; - int err = 0; + int val; + int err; switch (cmd) { case PPPIOCGMRU: @@ -754,8 +769,9 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd, err = 0; break; - default:; - }; + default: + err = -ENOTTY; + } return err; } @@ -773,6 +789,7 @@ static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, struct net_device *dev; char *start; + lock_sock(sk); if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) { error = -ENOTCONN; goto end; @@ -783,8 +800,6 @@ static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, hdr.code = 0; hdr.sid = po->num; - lock_sock(sk); - dev = po->pppoe_dev; error = -EMSGSIZE; @@ -819,8 +834,8 @@ static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, } error = total_len; - dev->hard_header(skb, dev, ETH_P_PPP_SES, - po->pppoe_pa.remote, NULL, total_len); + dev_hard_header(skb, dev, ETH_P_PPP_SES, + po->pppoe_pa.remote, NULL, total_len); memcpy(ph, &hdr, sizeof(struct pppoe_hdr)); @@ -843,71 +858,44 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb) { struct pppox_sock *po = pppox_sk(sk); struct net_device *dev = po->pppoe_dev; - struct pppoe_hdr hdr; struct pppoe_hdr *ph; - int headroom = skb_headroom(skb); int data_len = skb->len; - struct sk_buff *skb2; if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) goto abort; - hdr.ver = 1; - hdr.type = 1; - hdr.code = 0; - hdr.sid = po->num; - hdr.length = htons(skb->len); - if (!dev) goto abort; - /* Copy the skb if there is no space for the header. */ - if (headroom < (sizeof(struct pppoe_hdr) + dev->hard_header_len)) { - skb2 = dev_alloc_skb(32+skb->len + - sizeof(struct pppoe_hdr) + - dev->hard_header_len); - - if (skb2 == NULL) - goto abort; - - skb_reserve(skb2, dev->hard_header_len + sizeof(struct pppoe_hdr)); - skb_copy_from_linear_data(skb, skb_put(skb2, skb->len), - skb->len); - } else { - /* Make a clone so as to not disturb the original skb, - * give dev_queue_xmit something it can free. - */ - skb2 = skb_clone(skb, GFP_ATOMIC); - - if (skb2 == NULL) - goto abort; - } - - ph = (struct pppoe_hdr *) skb_push(skb2, sizeof(struct pppoe_hdr)); - memcpy(ph, &hdr, sizeof(struct pppoe_hdr)); - skb2->protocol = __constant_htons(ETH_P_PPP_SES); + /* Copy the data if there is no space for the header or if it's + * read-only. + */ + if (skb_cow_head(skb, sizeof(*ph) + dev->hard_header_len)) + goto abort; - skb_reset_network_header(skb2); + __skb_push(skb, sizeof(*ph)); + skb_reset_network_header(skb); - skb2->dev = dev; + ph = pppoe_hdr(skb); + ph->ver = 1; + ph->type = 1; + ph->code = 0; + ph->sid = po->num; + ph->length = htons(data_len); - dev->hard_header(skb2, dev, ETH_P_PPP_SES, - po->pppoe_pa.remote, NULL, data_len); + skb->protocol = __constant_htons(ETH_P_PPP_SES); + skb->dev = dev; - /* We're transmitting skb2, and assuming that dev_queue_xmit - * will free it. The generic ppp layer however, is expecting - * that we give back 'skb' (not 'skb2') in case of failure, - * but free it in case of success. - */ + dev_hard_header(skb, dev, ETH_P_PPP_SES, + po->pppoe_pa.remote, NULL, data_len); - if (dev_queue_xmit(skb2) < 0) - goto abort; + dev_queue_xmit(skb); - kfree_skb(skb); return 1; abort: - return 0; + kfree_skb(skb); + return 1; } @@ -967,6 +955,7 @@ static int pppoe_seq_show(struct seq_file *seq, void *v) { struct pppox_sock *po; char *dev_name; + DECLARE_MAC_BUF(mac); if (v == SEQ_START_TOKEN) { seq_puts(seq, "Id Address Device\n"); @@ -976,11 +965,8 @@ static int pppoe_seq_show(struct seq_file *seq, void *v) po = v; dev_name = po->pppoe_pa.dev; - seq_printf(seq, "%08X %02X:%02X:%02X:%02X:%02X:%02X %8s\n", - po->pppoe_pa.sid, - po->pppoe_pa.remote[0], po->pppoe_pa.remote[1], - po->pppoe_pa.remote[2], po->pppoe_pa.remote[3], - po->pppoe_pa.remote[4], po->pppoe_pa.remote[5], dev_name); + seq_printf(seq, "%08X %s %8s\n", + po->pppoe_pa.sid, print_mac(mac, po->pppoe_pa.remote), dev_name); out: return 0; } @@ -1064,7 +1050,7 @@ static int __init pppoe_proc_init(void) { struct proc_dir_entry *p; - p = create_proc_entry("net/pppoe", S_IRUGO, NULL); + p = create_proc_entry("pppoe", S_IRUGO, init_net.proc_net); if (!p) return -ENOMEM; @@ -1135,7 +1121,7 @@ static void __exit pppoe_exit(void) dev_remove_pack(&pppoes_ptype); dev_remove_pack(&pppoed_ptype); unregister_netdevice_notifier(&pppoe_notifier); - remove_proc_entry("net/pppoe", NULL); + remove_proc_entry("pppoe", init_net.proc_net); proto_unregister(&pppoe_sk_proto); } diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c index 5891a0fbdc8b..921d4ef6d14b 100644 --- a/drivers/net/pppol2tp.c +++ b/drivers/net/pppol2tp.c @@ -91,6 +91,7 @@ #include <linux/hash.h> #include <linux/sort.h> #include <linux/proc_fs.h> +#include <net/net_namespace.h> #include <net/dst.h> #include <net/ip.h> #include <net/udp.h> @@ -491,44 +492,46 @@ static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb) u16 hdrflags; u16 tunnel_id, session_id; int length; - struct udphdr *uh; + int offset; tunnel = pppol2tp_sock_to_tunnel(sock); if (tunnel == NULL) goto error; + /* UDP always verifies the packet length. */ + __skb_pull(skb, sizeof(struct udphdr)); + /* Short packet? */ - if (skb->len < sizeof(struct udphdr)) { + if (!pskb_may_pull(skb, 12)) { PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO, "%s: recv short packet (len=%d)\n", tunnel->name, skb->len); goto error; } /* Point to L2TP header */ - ptr = skb->data + sizeof(struct udphdr); + ptr = skb->data; /* Get L2TP header flags */ hdrflags = ntohs(*(__be16*)ptr); /* Trace packet contents, if enabled */ if (tunnel->debug & PPPOL2TP_MSG_DATA) { + length = min(16u, skb->len); + if (!pskb_may_pull(skb, length)) + goto error; + printk(KERN_DEBUG "%s: recv: ", tunnel->name); - for (length = 0; length < 16; length++) - printk(" %02X", ptr[length]); + offset = 0; + do { + printk(" %02X", ptr[offset]); + } while (++offset < length); + printk("\n"); } /* Get length of L2TP packet */ - uh = (struct udphdr *) skb_transport_header(skb); - length = ntohs(uh->len) - sizeof(struct udphdr); - - /* Too short? */ - if (length < 12) { - PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO, - "%s: recv short L2TP packet (len=%d)\n", tunnel->name, length); - goto error; - } + length = skb->len; /* If type is control packet, it is handled by userspace. */ if (hdrflags & L2TP_HDRFLAG_T) { @@ -606,7 +609,6 @@ static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb) "%s: recv data has no seq numbers when required. " "Discarding\n", session->name); session->stats.rx_seq_discards++; - session->stats.rx_errors++; goto discard; } @@ -625,7 +627,6 @@ static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb) "%s: recv data has no seq numbers when required. " "Discarding\n", session->name); session->stats.rx_seq_discards++; - session->stats.rx_errors++; goto discard; } @@ -634,10 +635,14 @@ static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb) } /* If offset bit set, skip it. */ - if (hdrflags & L2TP_HDRFLAG_O) - ptr += 2 + ntohs(*(__be16 *) ptr); + if (hdrflags & L2TP_HDRFLAG_O) { + offset = ntohs(*(__be16 *)ptr); + skb->transport_header += 2 + offset; + if (!pskb_may_pull(skb, skb_transport_offset(skb) + 2)) + goto discard; + } - skb_pull(skb, ptr - skb->data); + __skb_pull(skb, skb_transport_offset(skb)); /* Skip PPP header, if present. In testing, Microsoft L2TP clients * don't send the PPP header (PPP header compression enabled), but @@ -673,7 +678,6 @@ static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb) */ if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) { session->stats.rx_seq_discards++; - session->stats.rx_errors++; PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG, "%s: oos pkt %hu len %d discarded, " "waiting for %hu, reorder_q_len=%d\n", @@ -698,6 +702,7 @@ static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb) return 0; discard: + session->stats.rx_errors++; kfree_skb(skb); sock_put(session->sock); @@ -824,6 +829,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh struct pppol2tp_session *session; struct pppol2tp_tunnel *tunnel; struct udphdr *uh; + unsigned int len; error = -ENOTCONN; if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) @@ -912,14 +918,15 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh } /* Queue the packet to IP for output */ + len = skb->len; error = ip_queue_xmit(skb, 1); /* Update stats */ if (error >= 0) { tunnel->stats.tx_packets++; - tunnel->stats.tx_bytes += skb->len; + tunnel->stats.tx_bytes += len; session->stats.tx_packets++; - session->stats.tx_bytes += skb->len; + session->stats.tx_bytes += len; } else { tunnel->stats.tx_errors++; session->stats.tx_errors++; @@ -956,8 +963,8 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) int data_len = skb->len; struct inet_sock *inet; __wsum csum = 0; - struct sk_buff *skb2 = NULL; struct udphdr *uh; + unsigned int len; if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) goto abort; @@ -986,41 +993,30 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) */ headroom = NET_SKB_PAD + sizeof(struct iphdr) + sizeof(struct udphdr) + hdr_len + sizeof(ppph); - if (skb_headroom(skb) < headroom) { - skb2 = skb_realloc_headroom(skb, headroom); - if (skb2 == NULL) - goto abort; - } else - skb2 = skb; - - /* Check that the socket has room */ - if (atomic_read(&sk_tun->sk_wmem_alloc) < sk_tun->sk_sndbuf) - skb_set_owner_w(skb2, sk_tun); - else - goto discard; + if (skb_cow_head(skb, headroom)) + goto abort; /* Setup PPP header */ - skb_push(skb2, sizeof(ppph)); - skb2->data[0] = ppph[0]; - skb2->data[1] = ppph[1]; + __skb_push(skb, sizeof(ppph)); + skb->data[0] = ppph[0]; + skb->data[1] = ppph[1]; /* Setup L2TP header */ - skb_push(skb2, hdr_len); - pppol2tp_build_l2tp_header(session, skb2->data); + pppol2tp_build_l2tp_header(session, __skb_push(skb, hdr_len)); /* Setup UDP header */ inet = inet_sk(sk_tun); - skb_push(skb2, sizeof(struct udphdr)); - skb_reset_transport_header(skb2); - uh = (struct udphdr *) skb2->data; + __skb_push(skb, sizeof(*uh)); + skb_reset_transport_header(skb); + uh = udp_hdr(skb); uh->source = inet->sport; uh->dest = inet->dport; uh->len = htons(sizeof(struct udphdr) + hdr_len + sizeof(ppph) + data_len); uh->check = 0; - /* Calculate UDP checksum if configured to do so */ + /* *BROKEN* Calculate UDP checksum if configured to do so */ if (sk_tun->sk_no_check != UDP_CSUM_NOXMIT) - csum = udp_csum_outgoing(sk_tun, skb2); + csum = udp_csum_outgoing(sk_tun, skb); /* Debug */ if (session->send_seq) @@ -1033,7 +1029,7 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) if (session->debug & PPPOL2TP_MSG_DATA) { int i; - unsigned char *datap = skb2->data; + unsigned char *datap = skb->data; printk(KERN_DEBUG "%s: xmit:", session->name); for (i = 0; i < data_len; i++) { @@ -1046,34 +1042,36 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) printk("\n"); } + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); + IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | + IPSKB_REROUTED); + nf_reset(skb); + /* Get routing info from the tunnel socket */ - skb2->dst = sk_dst_get(sk_tun); + dst_release(skb->dst); + skb->dst = sk_dst_get(sk_tun); /* Queue the packet to IP for output */ - rc = ip_queue_xmit(skb2, 1); + len = skb->len; + rc = ip_queue_xmit(skb, 1); /* Update stats */ if (rc >= 0) { tunnel->stats.tx_packets++; - tunnel->stats.tx_bytes += skb2->len; + tunnel->stats.tx_bytes += len; session->stats.tx_packets++; - session->stats.tx_bytes += skb2->len; + session->stats.tx_bytes += len; } else { tunnel->stats.tx_errors++; session->stats.tx_errors++; } - /* Free the original skb */ - kfree_skb(skb); - return 1; -discard: - /* Free the new skb. Caller will free original skb. */ - if (skb2 != skb) - kfree_skb(skb2); abort: - return 0; + /* Free the original skb */ + kfree_skb(skb); + return 1; } /***************************************************************************** @@ -1316,12 +1314,14 @@ static struct sock *pppol2tp_prepare_tunnel_socket(int fd, u16 tunnel_id, goto err; } + sk = sock->sk; + /* Quick sanity checks */ - err = -ESOCKTNOSUPPORT; - if (sock->type != SOCK_DGRAM) { + err = -EPROTONOSUPPORT; + if (sk->sk_protocol != IPPROTO_UDP) { PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR, - "tunl %hu: fd %d wrong type, got %d, expected %d\n", - tunnel_id, fd, sock->type, SOCK_DGRAM); + "tunl %hu: fd %d wrong protocol, got %d, expected %d\n", + tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP); goto err; } err = -EAFNOSUPPORT; @@ -1333,7 +1333,6 @@ static struct sock *pppol2tp_prepare_tunnel_socket(int fd, u16 tunnel_id, } err = -ENOTCONN; - sk = sock->sk; /* Check if this socket has already been prepped */ tunnel = (struct pppol2tp_tunnel *)sk->sk_user_data; @@ -1412,12 +1411,12 @@ static struct proto pppol2tp_sk_proto = { /* socket() handler. Initialize a new struct sock. */ -static int pppol2tp_create(struct socket *sock) +static int pppol2tp_create(struct net *net, struct socket *sock) { int error = -ENOMEM; struct sock *sk; - sk = sk_alloc(PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto, 1); + sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto, 1); if (!sk) goto out; @@ -2044,7 +2043,7 @@ end: */ static int pppol2tp_tunnel_getsockopt(struct sock *sk, struct pppol2tp_tunnel *tunnel, - int optname, int __user *val) + int optname, int *val) { int err = 0; @@ -2067,7 +2066,7 @@ static int pppol2tp_tunnel_getsockopt(struct sock *sk, */ static int pppol2tp_session_getsockopt(struct sock *sk, struct pppol2tp_session *session, - int optname, int __user *val) + int optname, int *val) { int err = 0; @@ -2446,7 +2445,7 @@ static int __init pppol2tp_init(void) goto out_unregister_pppol2tp_proto; #ifdef CONFIG_PROC_FS - pppol2tp_proc = create_proc_entry("pppol2tp", 0, proc_net); + pppol2tp_proc = create_proc_entry("pppol2tp", 0, init_net.proc_net); if (!pppol2tp_proc) { err = -ENOMEM; goto out_unregister_pppox_proto; @@ -2471,7 +2470,7 @@ static void __exit pppol2tp_exit(void) unregister_pppox_proto(PX_PROTO_OL2TP); #ifdef CONFIG_PROC_FS - remove_proc_entry("pppol2tp", proc_net); + remove_proc_entry("pppol2tp", init_net.proc_net); #endif proto_unregister(&pppol2tp_sk_proto); } diff --git a/drivers/net/pppox.c b/drivers/net/pppox.c index f3e47d0c2b3c..c6898c1fc54d 100644 --- a/drivers/net/pppox.c +++ b/drivers/net/pppox.c @@ -73,7 +73,7 @@ int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; struct pppox_sock *po = pppox_sk(sk); - int rc = 0; + int rc; lock_sock(sk); @@ -94,12 +94,9 @@ int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) break; } default: - if (pppox_protos[sk->sk_protocol]->ioctl) - rc = pppox_protos[sk->sk_protocol]->ioctl(sock, cmd, - arg); - - break; - }; + rc = pppox_protos[sk->sk_protocol]->ioctl ? + pppox_protos[sk->sk_protocol]->ioctl(sock, cmd, arg) : -ENOTTY; + } release_sock(sk); return rc; @@ -107,10 +104,13 @@ int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) EXPORT_SYMBOL(pppox_ioctl); -static int pppox_create(struct socket *sock, int protocol) +static int pppox_create(struct net *net, struct socket *sock, int protocol) { int rc = -EPROTOTYPE; + if (net != &init_net) + return -EAFNOSUPPORT; + if (protocol < 0 || protocol > PX_MAX_PROTO) goto out; @@ -126,7 +126,7 @@ static int pppox_create(struct socket *sock, int protocol) !try_module_get(pppox_protos[protocol]->owner)) goto out; - rc = pppox_protos[protocol]->create(sock); + rc = pppox_protos[protocol]->create(net, sock); module_put(pppox_protos[protocol]->owner); out: diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c index 08d25066f051..0a42bf517465 100644 --- a/drivers/net/ps3_gelic_net.c +++ b/drivers/net/ps3_gelic_net.c @@ -290,7 +290,8 @@ static void gelic_net_release_rx_chain(struct gelic_net_card *card) descr->buf_addr = 0; dev_kfree_skb_any(descr->skb); descr->skb = NULL; - descr->dmac_cmd_status = GELIC_NET_DESCR_NOT_IN_USE; + gelic_net_set_descr_status(descr, + GELIC_NET_DESCR_NOT_IN_USE); } descr = descr->next; } while (descr != card->rx_chain.head); @@ -350,19 +351,13 @@ static int gelic_net_alloc_rx_skbs(struct gelic_net_card *card) static void gelic_net_release_tx_descr(struct gelic_net_card *card, struct gelic_net_descr *descr) { - struct sk_buff *skb; + struct sk_buff *skb = descr->skb; + BUG_ON(!(descr->data_status & (1 << GELIC_NET_TXDESC_TAIL))); - if (descr->data_status & (1 << GELIC_NET_TXDESC_TAIL)) { - /* 2nd descriptor */ - skb = descr->skb; - dma_unmap_single(ctodev(card), descr->buf_addr, skb->len, - DMA_TO_DEVICE); - dev_kfree_skb_any(skb); - } else { - dma_unmap_single(ctodev(card), descr->buf_addr, - descr->buf_size, DMA_TO_DEVICE); - } + dma_unmap_single(ctodev(card), descr->buf_addr, skb->len, + DMA_TO_DEVICE); + dev_kfree_skb_any(skb); descr->buf_addr = 0; descr->buf_size = 0; @@ -374,7 +369,7 @@ static void gelic_net_release_tx_descr(struct gelic_net_card *card, descr->skb = NULL; /* set descr status */ - descr->dmac_cmd_status = GELIC_NET_DMAC_CMDSTAT_NOT_IN_USE; + gelic_net_set_descr_status(descr, GELIC_NET_DESCR_NOT_IN_USE); } /** @@ -403,23 +398,26 @@ static void gelic_net_release_tx_chain(struct gelic_net_card *card, int stop) "%s: forcing end of tx descriptor " \ "with status %x\n", __func__, status); - card->netdev_stats.tx_dropped++; + card->netdev->stats.tx_dropped++; break; case GELIC_NET_DESCR_COMPLETE: - card->netdev_stats.tx_packets++; - card->netdev_stats.tx_bytes += - tx_chain->tail->skb->len; + if (tx_chain->tail->skb) { + card->netdev->stats.tx_packets++; + card->netdev->stats.tx_bytes += + tx_chain->tail->skb->len; + } break; case GELIC_NET_DESCR_CARDOWNED: /* pending tx request */ default: /* any other value (== GELIC_NET_DESCR_NOT_IN_USE) */ - goto out; + if (!stop) + goto out; } gelic_net_release_tx_descr(card, tx_chain->tail); - release = 1; + release ++; } out: if (!stop && release) @@ -552,7 +550,7 @@ static int gelic_net_stop(struct net_device *netdev) { struct gelic_net_card *card = netdev_priv(netdev); - netif_poll_disable(netdev); + napi_disable(&card->napi); netif_stop_queue(netdev); /* turn off DMA, force end */ @@ -589,13 +587,10 @@ gelic_net_get_next_tx_descr(struct gelic_net_card *card) { if (!card->tx_chain.head) return NULL; - /* see if we can two consecutive free descrs */ + /* see if the next descriptor is free */ if (card->tx_chain.tail != card->tx_chain.head->next && gelic_net_get_descr_status(card->tx_chain.head) == - GELIC_NET_DESCR_NOT_IN_USE && - card->tx_chain.tail != card->tx_chain.head->next->next && - gelic_net_get_descr_status(card->tx_chain.head->next) == - GELIC_NET_DESCR_NOT_IN_USE ) + GELIC_NET_DESCR_NOT_IN_USE) return card->tx_chain.head; else return NULL; @@ -606,44 +601,66 @@ gelic_net_get_next_tx_descr(struct gelic_net_card *card) * gelic_net_set_txdescr_cmdstat - sets the tx descriptor command field * @descr: descriptor structure to fill out * @skb: packet to consider - * @middle: middle of frame * * fills out the command and status field of the descriptor structure, * depending on hardware checksum settings. This function assumes a wmb() * has executed before. */ static void gelic_net_set_txdescr_cmdstat(struct gelic_net_descr *descr, - struct sk_buff *skb, int middle) + struct sk_buff *skb) { - u32 eofr; - - if (middle) - eofr = 0; - else - eofr = GELIC_NET_DMAC_CMDSTAT_END_FRAME; - if (skb->ip_summed != CHECKSUM_PARTIAL) - descr->dmac_cmd_status = GELIC_NET_DMAC_CMDSTAT_NOCS | eofr; + descr->dmac_cmd_status = GELIC_NET_DMAC_CMDSTAT_NOCS | + GELIC_NET_DMAC_CMDSTAT_END_FRAME; else { /* is packet ip? * if yes: tcp? udp? */ if (skb->protocol == htons(ETH_P_IP)) { if (ip_hdr(skb)->protocol == IPPROTO_TCP) descr->dmac_cmd_status = - GELIC_NET_DMAC_CMDSTAT_TCPCS | eofr; + GELIC_NET_DMAC_CMDSTAT_TCPCS | + GELIC_NET_DMAC_CMDSTAT_END_FRAME; + else if (ip_hdr(skb)->protocol == IPPROTO_UDP) descr->dmac_cmd_status = - GELIC_NET_DMAC_CMDSTAT_UDPCS | eofr; + GELIC_NET_DMAC_CMDSTAT_UDPCS | + GELIC_NET_DMAC_CMDSTAT_END_FRAME; else /* * the stack should checksum non-tcp and non-udp * packets on his own: NETIF_F_IP_CSUM */ descr->dmac_cmd_status = - GELIC_NET_DMAC_CMDSTAT_NOCS | eofr; + GELIC_NET_DMAC_CMDSTAT_NOCS | + GELIC_NET_DMAC_CMDSTAT_END_FRAME; } } } +static inline struct sk_buff *gelic_put_vlan_tag(struct sk_buff *skb, + unsigned short tag) +{ + struct vlan_ethhdr *veth; + static unsigned int c; + + if (skb_headroom(skb) < VLAN_HLEN) { + struct sk_buff *sk_tmp = skb; + pr_debug("%s: hd=%d c=%ud\n", __func__, skb_headroom(skb), c); + skb = skb_realloc_headroom(sk_tmp, VLAN_HLEN); + if (!skb) + return NULL; + dev_kfree_skb_any(sk_tmp); + } + veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); + + /* Move the mac addresses to the top of buffer */ + memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN); + + veth->h_vlan_proto = __constant_htons(ETH_P_8021Q); + veth->h_vlan_TCI = htons(tag); + + return skb; +} + /** * gelic_net_prepare_tx_descr_v - get dma address of skb_data * @card: card structure @@ -657,66 +674,35 @@ static int gelic_net_prepare_tx_descr_v(struct gelic_net_card *card, struct gelic_net_descr *descr, struct sk_buff *skb) { - dma_addr_t buf[2]; - unsigned int vlan_len; + dma_addr_t buf; - if (skb->len < GELIC_NET_VLAN_POS) - return -EINVAL; - - memcpy(&descr->vlan, skb->data, GELIC_NET_VLAN_POS); if (card->vlan_index != -1) { - descr->vlan.h_vlan_proto = htons(ETH_P_8021Q); /* vlan 0x8100*/ - descr->vlan.h_vlan_TCI = htons(card->vlan_id[card->vlan_index]); - vlan_len = GELIC_NET_VLAN_POS + VLAN_HLEN; /* VLAN_HLEN=4 */ - } else - vlan_len = GELIC_NET_VLAN_POS; /* no vlan tag */ - - /* first descr */ - buf[0] = dma_map_single(ctodev(card), &descr->vlan, - vlan_len, DMA_TO_DEVICE); - - if (!buf[0]) { - dev_err(ctodev(card), - "dma map 1 failed (%p, %i). Dropping packet\n", - skb->data, vlan_len); - return -ENOMEM; + struct sk_buff *skb_tmp; + skb_tmp = gelic_put_vlan_tag(skb, + card->vlan_id[card->vlan_index]); + if (!skb_tmp) + return -ENOMEM; + skb = skb_tmp; } - descr->buf_addr = buf[0]; - descr->buf_size = vlan_len; - descr->skb = skb; /* not used */ - descr->data_status = 0; - gelic_net_set_txdescr_cmdstat(descr, skb, 1); /* not the frame end */ - - /* second descr */ - card->tx_chain.head = card->tx_chain.head->next; - descr->next_descr_addr = descr->next->bus_addr; - descr = descr->next; - if (gelic_net_get_descr_status(descr) != GELIC_NET_DESCR_NOT_IN_USE) - /* XXX will be removed */ - dev_err(ctodev(card), "descr is not free!\n"); + buf = dma_map_single(ctodev(card), skb->data, skb->len, DMA_TO_DEVICE); - buf[1] = dma_map_single(ctodev(card), skb->data + GELIC_NET_VLAN_POS, - skb->len - GELIC_NET_VLAN_POS, - DMA_TO_DEVICE); - - if (!buf[1]) { + if (!buf) { dev_err(ctodev(card), "dma map 2 failed (%p, %i). Dropping packet\n", - skb->data + GELIC_NET_VLAN_POS, - skb->len - GELIC_NET_VLAN_POS); - dma_unmap_single(ctodev(card), buf[0], vlan_len, - DMA_TO_DEVICE); + skb->data, skb->len); return -ENOMEM; } - descr->buf_addr = buf[1]; - descr->buf_size = skb->len - GELIC_NET_VLAN_POS; + descr->buf_addr = buf; + descr->buf_size = skb->len; descr->skb = skb; descr->data_status = 0; descr->next_descr_addr = 0; /* terminate hw descr */ - gelic_net_set_txdescr_cmdstat(descr, skb, 0); + gelic_net_set_txdescr_cmdstat(descr, skb); + /* bump free descriptor pointer */ + card->tx_chain.head = descr->next; return 0; } @@ -729,26 +715,18 @@ static int gelic_net_prepare_tx_descr_v(struct gelic_net_card *card, static int gelic_net_kick_txdma(struct gelic_net_card *card, struct gelic_net_descr *descr) { - int status = -ENXIO; - int count = 10; + int status = 0; if (card->tx_dma_progress) return 0; if (gelic_net_get_descr_status(descr) == GELIC_NET_DESCR_CARDOWNED) { card->tx_dma_progress = 1; - /* sometimes we need retry here */ - while (count--) { - status = lv1_net_start_tx_dma(bus_id(card), - dev_id(card), - descr->bus_addr, 0); - if (!status) - break; - } - if (!count) + status = lv1_net_start_tx_dma(bus_id(card), dev_id(card), + descr->bus_addr, 0); + if (status) dev_info(ctodev(card), "lv1_net_start_txdma failed," \ - "status=%d %#lx\n", - status, card->irq_status); + "status=%d\n", status); } return status; } @@ -763,47 +741,62 @@ static int gelic_net_kick_txdma(struct gelic_net_card *card, static int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev) { struct gelic_net_card *card = netdev_priv(netdev); - struct gelic_net_descr *descr = NULL; + struct gelic_net_descr *descr; int result; unsigned long flags; spin_lock_irqsave(&card->tx_dma_lock, flags); gelic_net_release_tx_chain(card, 0); - if (!skb) - goto kick; + descr = gelic_net_get_next_tx_descr(card); if (!descr) { + /* + * no more descriptors free + */ netif_stop_queue(netdev); spin_unlock_irqrestore(&card->tx_dma_lock, flags); return NETDEV_TX_BUSY; } - result = gelic_net_prepare_tx_descr_v(card, descr, skb); - - if (result) - goto error; - - card->tx_chain.head = card->tx_chain.head->next; - if (descr->prev) - descr->prev->next_descr_addr = descr->bus_addr; -kick: + result = gelic_net_prepare_tx_descr_v(card, descr, skb); + if (result) { + /* + * DMA map failed. As chanses are that failure + * would continue, just release skb and return + */ + card->netdev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + spin_unlock_irqrestore(&card->tx_dma_lock, flags); + return NETDEV_TX_OK; + } + /* + * link this prepared descriptor to previous one + * to achieve high performance + */ + descr->prev->next_descr_addr = descr->bus_addr; /* * as hardware descriptor is modified in the above lines, * ensure that the hardware sees it */ wmb(); - if (gelic_net_kick_txdma(card, card->tx_chain.tail)) - goto error; + if (gelic_net_kick_txdma(card, descr)) { + /* + * kick failed. + * release descriptors which were just prepared + */ + card->netdev->stats.tx_dropped++; + gelic_net_release_tx_descr(card, descr); + gelic_net_release_tx_descr(card, descr->next); + card->tx_chain.tail = descr->next->next; + dev_info(ctodev(card), "%s: kick failure\n", __func__); + } else { + /* OK, DMA started/reserved */ + netdev->trans_start = jiffies; + } - netdev->trans_start = jiffies; spin_unlock_irqrestore(&card->tx_dma_lock, flags); return NETDEV_TX_OK; - -error: - card->netdev_stats.tx_dropped++; - spin_unlock_irqrestore(&card->tx_dma_lock, flags); - return NETDEV_TX_LOCKED; } /** @@ -854,8 +847,8 @@ static void gelic_net_pass_skb_up(struct gelic_net_descr *descr, skb->ip_summed = CHECKSUM_NONE; /* update netdevice statistics */ - card->netdev_stats.rx_packets++; - card->netdev_stats.rx_bytes += skb->len; + card->netdev->stats.rx_packets++; + card->netdev->stats.rx_bytes += skb->len; /* pass skb up to stack */ netif_receive_skb(skb); @@ -895,38 +888,67 @@ static int gelic_net_decode_one_descr(struct gelic_net_card *card) (status == GELIC_NET_DESCR_FORCE_END)) { dev_info(ctodev(card), "dropping RX descriptor with state %x\n", status); - card->netdev_stats.rx_dropped++; + card->netdev->stats.rx_dropped++; goto refill; } - if ((status != GELIC_NET_DESCR_COMPLETE) && - (status != GELIC_NET_DESCR_FRAME_END)) { + if (status == GELIC_NET_DESCR_BUFFER_FULL) { + /* + * Buffer full would occur if and only if + * the frame length was longer than the size of this + * descriptor's buffer. If the frame length was equal + * to or shorter than buffer'size, FRAME_END condition + * would occur. + * Anyway this frame was longer than the MTU, + * just drop it. + */ + dev_info(ctodev(card), "overlength frame\n"); + goto refill; + } + /* + * descriptoers any other than FRAME_END here should + * be treated as error. + */ + if (status != GELIC_NET_DESCR_FRAME_END) { dev_dbg(ctodev(card), "RX descriptor with state %x\n", status); goto refill; } /* ok, we've got a packet in descr */ - gelic_net_pass_skb_up(descr, card); /* 1: skb_up sccess */ - + gelic_net_pass_skb_up(descr, card); refill: - descr->next_descr_addr = 0; /* unlink the descr */ + /* + * So that always DMAC can see the end + * of the descriptor chain to avoid + * from unwanted DMAC overrun. + */ + descr->next_descr_addr = 0; /* change the descriptor state: */ gelic_net_set_descr_status(descr, GELIC_NET_DESCR_NOT_IN_USE); - /* refill one desc - * FIXME: this can fail, but for now, just leave this - * descriptor without skb + /* + * this call can fail, but for now, just leave this + * decriptor without skb */ gelic_net_prepare_rx_descr(card, descr); + chain->head = descr; chain->tail = descr->next; + + /* + * Set this descriptor the end of the chain. + */ descr->prev->next_descr_addr = descr->bus_addr; + /* + * If dmac chain was met, DMAC stopped. + * thus re-enable it + */ if (dmac_chain_ended) { - gelic_net_enable_rxdmac(card); - dev_dbg(ctodev(card), "reenable rx dma\n"); + card->rx_dma_restart_required = 1; + dev_dbg(ctodev(card), "reenable rx dma scheduled\n"); } return 1; @@ -941,47 +963,25 @@ refill: * if the quota is exceeded, but the driver has still packets. * */ -static int gelic_net_poll(struct net_device *netdev, int *budget) +static int gelic_net_poll(struct napi_struct *napi, int budget) { - struct gelic_net_card *card = netdev_priv(netdev); - int packets_to_do, packets_done = 0; - int no_more_packets = 0; - - packets_to_do = min(*budget, netdev->quota); + struct gelic_net_card *card = container_of(napi, struct gelic_net_card, napi); + struct net_device *netdev = card->netdev; + int packets_done = 0; - while (packets_to_do) { - if (gelic_net_decode_one_descr(card)) { - packets_done++; - packets_to_do--; - } else { - /* no more packets for the stack */ - no_more_packets = 1; + while (packets_done < budget) { + if (!gelic_net_decode_one_descr(card)) break; - } - } - netdev->quota -= packets_done; - *budget -= packets_done; - if (no_more_packets) { - netif_rx_complete(netdev); - gelic_net_rx_irq_on(card); - return 0; - } else - return 1; -} -/** - * gelic_net_get_stats - get interface statistics - * @netdev: interface device structure - * - * returns the interface statistics residing in the gelic_net_card struct - */ -static struct net_device_stats *gelic_net_get_stats(struct net_device *netdev) -{ - struct gelic_net_card *card = netdev_priv(netdev); + packets_done++; + } - return &card->netdev_stats; + if (packets_done < budget) { + netif_rx_complete(netdev, napi); + gelic_net_rx_irq_on(card); + } + return packets_done; } - /** * gelic_net_change_mtu - changes the MTU of an interface * @netdev: interface device structure @@ -1016,17 +1016,23 @@ static irqreturn_t gelic_net_interrupt(int irq, void *ptr) if (!status) return IRQ_NONE; + if (card->rx_dma_restart_required) { + card->rx_dma_restart_required = 0; + gelic_net_enable_rxdmac(card); + } + if (status & GELIC_NET_RXINT) { gelic_net_rx_irq_off(card); - netif_rx_schedule(netdev); + netif_rx_schedule(netdev, &card->napi); } if (status & GELIC_NET_TXINT) { spin_lock_irqsave(&card->tx_dma_lock, flags); card->tx_dma_progress = 0; + gelic_net_release_tx_chain(card, 0); + /* kick outstanding tx descriptor if any */ + gelic_net_kick_txdma(card, card->tx_chain.tail); spin_unlock_irqrestore(&card->tx_dma_lock, flags); - /* start pending DMA */ - gelic_net_xmit(NULL, netdev); } return IRQ_HANDLED; } @@ -1068,7 +1074,7 @@ static int gelic_net_open_device(struct gelic_net_card *card) } result = request_irq(card->netdev->irq, gelic_net_interrupt, - IRQF_DISABLED, "gelic network", card->netdev); + IRQF_DISABLED, card->netdev->name, card->netdev); if (result) { dev_info(ctodev(card), "%s:%d: request_irq failed (%d)\n", @@ -1107,7 +1113,7 @@ static int gelic_net_open(struct net_device *netdev) card->descr, GELIC_NET_TX_DESCRIPTORS)) goto alloc_tx_failed; if (gelic_net_init_chain(card, &card->rx_chain, - card->descr + GELIC_NET_RX_DESCRIPTORS, + card->descr + GELIC_NET_TX_DESCRIPTORS, GELIC_NET_RX_DESCRIPTORS)) goto alloc_rx_failed; @@ -1121,6 +1127,8 @@ static int gelic_net_open(struct net_device *netdev) if (gelic_net_alloc_rx_skbs(card)) goto alloc_skbs_failed; + napi_enable(&card->napi); + card->tx_dma_progress = 0; card->ghiintmask = GELIC_NET_RXINT | GELIC_NET_TXINT; @@ -1129,7 +1137,6 @@ static int gelic_net_open(struct net_device *netdev) netif_start_queue(netdev); netif_carrier_on(netdev); - netif_poll_enable(netdev); return 0; @@ -1141,7 +1148,6 @@ alloc_tx_failed: return -ENOMEM; } -#ifdef GELIC_NET_ETHTOOL static void gelic_net_get_drvinfo (struct net_device *netdev, struct ethtool_drvinfo *info) { @@ -1261,7 +1267,6 @@ static struct ethtool_ops gelic_net_ethtool_ops = { .get_rx_csum = gelic_net_get_rx_csum, .set_rx_csum = gelic_net_set_rx_csum, }; -#endif /** * gelic_net_tx_timeout_task - task scheduled by the watchdog timeout @@ -1320,18 +1325,12 @@ static void gelic_net_setup_netdev_ops(struct net_device *netdev) netdev->open = &gelic_net_open; netdev->stop = &gelic_net_stop; netdev->hard_start_xmit = &gelic_net_xmit; - netdev->get_stats = &gelic_net_get_stats; netdev->set_multicast_list = &gelic_net_set_multi; netdev->change_mtu = &gelic_net_change_mtu; /* tx watchdog */ netdev->tx_timeout = &gelic_net_tx_timeout; netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; - /* NAPI */ - netdev->poll = &gelic_net_poll; - netdev->weight = GELIC_NET_NAPI_WEIGHT; -#ifdef GELIC_NET_ETHTOOL netdev->ethtool_ops = &gelic_net_ethtool_ops; -#endif } /** @@ -1349,8 +1348,8 @@ static int gelic_net_setup_netdev(struct gelic_net_card *card) unsigned int i; int status; u64 v1, v2; + DECLARE_MAC_BUF(mac); - SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &card->dev->core); spin_lock_init(&card->tx_dma_lock); @@ -1358,6 +1357,9 @@ static int gelic_net_setup_netdev(struct gelic_net_card *card) gelic_net_setup_netdev_ops(netdev); + netif_napi_add(netdev, &card->napi, + gelic_net_poll, GELIC_NET_NAPI_WEIGHT); + netdev->features = NETIF_F_IP_CSUM; status = lv1_net_control(bus_id(card), dev_id(card), @@ -1372,10 +1374,8 @@ static int gelic_net_setup_netdev(struct gelic_net_card *card) v1 <<= 16; memcpy(addr.sa_data, &v1, ETH_ALEN); memcpy(netdev->dev_addr, addr.sa_data, ETH_ALEN); - dev_info(ctodev(card), "MAC addr %02x:%02x:%02x:%02x:%02x:%02x\n", - netdev->dev_addr[0], netdev->dev_addr[1], - netdev->dev_addr[2], netdev->dev_addr[3], - netdev->dev_addr[4], netdev->dev_addr[5]); + dev_info(ctodev(card), "MAC addr %s\n", + print_mac(mac, netdev->dev_addr)); card->vlan_index = -1; /* no vlan */ for (i = 0; i < GELIC_NET_VLAN_MAX; i++) { @@ -1398,8 +1398,11 @@ static int gelic_net_setup_netdev(struct gelic_net_card *card) dev_dbg(ctodev(card), "vlan_id:%d, %lx\n", i, v1); } } - if (card->vlan_id[GELIC_NET_VLAN_WIRED - 1]) + + if (card->vlan_id[GELIC_NET_VLAN_WIRED - 1]) { card->vlan_index = GELIC_NET_VLAN_WIRED - 1; + netdev->hard_header_len += VLAN_HLEN; + } status = register_netdev(netdev); if (status) { diff --git a/drivers/net/ps3_gelic_net.h b/drivers/net/ps3_gelic_net.h index 5e1c28654e16..968560269a3b 100644 --- a/drivers/net/ps3_gelic_net.h +++ b/drivers/net/ps3_gelic_net.h @@ -28,21 +28,12 @@ #ifndef _GELIC_NET_H #define _GELIC_NET_H -#define GELIC_NET_DRV_NAME "Gelic Network Driver" -#define GELIC_NET_DRV_VERSION "1.0" - -#define GELIC_NET_ETHTOOL /* use ethtool */ - -/* ioctl */ -#define GELIC_NET_GET_MODE (SIOCDEVPRIVATE + 0) -#define GELIC_NET_SET_MODE (SIOCDEVPRIVATE + 1) - /* descriptors */ #define GELIC_NET_RX_DESCRIPTORS 128 /* num of descriptors */ #define GELIC_NET_TX_DESCRIPTORS 128 /* num of descriptors */ -#define GELIC_NET_MAX_MTU 2308 -#define GELIC_NET_MIN_MTU 64 +#define GELIC_NET_MAX_MTU VLAN_ETH_FRAME_LEN +#define GELIC_NET_MIN_MTU VLAN_ETH_ZLEN #define GELIC_NET_RXBUF_ALIGN 128 #define GELIC_NET_RX_CSUM_DEFAULT 1 /* hw chksum */ #define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ @@ -90,7 +81,8 @@ enum gelic_net_int1_status { */ #define GELIC_NET_RXVLNPKT 0x00200000 /* VLAN packet */ /* bit 20..16 reserved */ -#define GELIC_NET_RXRECNUM 0x0000ff00 /* reception receipt number */ +#define GELIC_NET_RXRRECNUM 0x0000ff00 /* reception receipt number */ +#define GELIC_NET_RXRRECNUM_SHIFT 8 /* bit 7..0 reserved */ #define GELIC_NET_TXDESC_TAIL 0 @@ -133,19 +125,19 @@ enum gelic_net_int1_status { * interrupt status */ #define GELIC_NET_DMAC_CMDSTAT_CHAIN_END 0x00000002 /* RXDCEIS:DMA stopped */ -#define GELIC_NET_DMAC_CMDSTAT_NOT_IN_USE 0xb0000000 #define GELIC_NET_DESCR_IND_PROC_SHIFT 28 #define GELIC_NET_DESCR_IND_PROC_MASKO 0x0fffffff enum gelic_net_descr_status { - GELIC_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */ + GELIC_NET_DESCR_COMPLETE = 0x00, /* used in tx */ + GELIC_NET_DESCR_BUFFER_FULL = 0x00, /* used in rx */ GELIC_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */ GELIC_NET_DESCR_PROTECTION_ERROR = 0x02, /* used in rx and tx */ GELIC_NET_DESCR_FRAME_END = 0x04, /* used in rx */ GELIC_NET_DESCR_FORCE_END = 0x05, /* used in rx and tx */ GELIC_NET_DESCR_CARDOWNED = 0x0a, /* used in rx and tx */ - GELIC_NET_DESCR_NOT_IN_USE /* any other value */ + GELIC_NET_DESCR_NOT_IN_USE = 0x0b /* any other value */ }; /* for lv1_net_control */ #define GELIC_NET_GET_MAC_ADDRESS 0x0000000000000001 @@ -202,6 +194,7 @@ struct gelic_net_descr_chain { struct gelic_net_card { struct net_device *netdev; + struct napi_struct napi; /* * hypervisor requires irq_status should be * 8 bytes aligned, but u64 member is @@ -216,10 +209,10 @@ struct gelic_net_card { struct gelic_net_descr_chain tx_chain; struct gelic_net_descr_chain rx_chain; + int rx_dma_restart_required; /* gurad dmac descriptor chain*/ spinlock_t chain_lock; - struct net_device_stats netdev_stats; int rx_csum; /* guard tx_dma_progress */ spinlock_t tx_dma_lock; diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index 8be8be451ada..30adf726743c 100755..100644 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c @@ -31,7 +31,6 @@ #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/if_vlan.h> -#include <linux/init.h> #include <linux/delay.h> #include <linux/mm.h> @@ -82,7 +81,7 @@ typedef enum { } PHY_DEVICE_et; typedef struct { - PHY_DEVICE_et phyDevice; + PHY_DEVICE_et phyDevice; u32 phyIdOUI; u16 phyIdModel; char *name; @@ -331,7 +330,7 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, PCI_DMA_FROMDEVICE); err = pci_dma_mapping_error(map); if(err) { - printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", + printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", qdev->ndev->name, err); dev_kfree_skb(lrg_buf_cb->skb); lrg_buf_cb->skb = NULL; @@ -885,14 +884,14 @@ static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev) u16 reg; /* Enable Auto-negotiation sense */ - ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®, + ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®, PHYAddr[qdev->mac_index]); reg |= PETBI_TBI_AUTO_SENSE; - ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, + ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, PHYAddr[qdev->mac_index]); ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER, - PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, + PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, PHYAddr[qdev->mac_index]); ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, @@ -946,7 +945,7 @@ static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); /* Write new PHYAD w/bit 5 set */ ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); - /* + /* * Disable diagnostic mode bit 2 = 0 * Power up device bit 11 = 0 * Link up (on) and activity (blink) @@ -956,18 +955,18 @@ static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) ql_mii_write_reg(qdev, 0x1c, 0xfaf0); } -static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev, +static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev, u16 phyIdReg0, u16 phyIdReg1) { PHY_DEVICE_et result = PHY_TYPE_UNKNOWN; - u32 oui; + u32 oui; u16 model; - int i; + int i; if (phyIdReg0 == 0xffff) { return result; } - + if (phyIdReg1 == 0xffff) { return result; } @@ -985,7 +984,7 @@ static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev, printk(KERN_INFO "%s: Phy: %s\n", qdev->ndev->name, PHY_DEVICES[i].name); - + break; } } @@ -1034,7 +1033,7 @@ static int ql_is_full_dup(struct ql3_adapter *qdev) { if (ql_mii_read_reg(qdev, 0x1A, ®)) return 0; - + return ((reg & 0x0080) && (reg & 0x1000)) != 0; } case PHY_VITESSE_VSC8211: @@ -1083,19 +1082,19 @@ static int PHY_Setup(struct ql3_adapter *qdev) /* Check if we have a Agere PHY */ if ((reg1 == 0xffff) || (reg2 == 0xffff)) { - /* Determine which MII address we should be using + /* Determine which MII address we should be using determined by the index of the card */ if (qdev->mac_index == 0) { miiAddr = MII_AGERE_ADDR_1; } else { miiAddr = MII_AGERE_ADDR_2; } - + err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr); if(err != 0) { printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n", qdev->ndev->name); - return err; + return err; } err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr); @@ -1104,9 +1103,9 @@ static int PHY_Setup(struct ql3_adapter *qdev) qdev->ndev->name); return err; } - + /* We need to remember to initialize the Agere PHY */ - agereAddrChangeNeeded = true; + agereAddrChangeNeeded = true; } /* Determine the particular PHY we have on board to apply @@ -1115,7 +1114,7 @@ static int PHY_Setup(struct ql3_adapter *qdev) if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) { /* need this here so address gets changed */ - phyAgereSpecificInit(qdev, miiAddr); + phyAgereSpecificInit(qdev, miiAddr); } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { printk(KERN_ERR "%s: PHY is unknown\n", qdev->ndev->name); return -EIO; @@ -1428,7 +1427,7 @@ static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) static void ql_phy_reset_ex(struct ql3_adapter *qdev) { - ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, + ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, PHYAddr[qdev->mac_index]); } @@ -1439,7 +1438,7 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) if(qdev->phyType == PHY_AGERE_ET1011C) { /* turn off external loopback */ - ql_mii_write_reg(qdev, 0x13, 0x0000); + ql_mii_write_reg(qdev, 0x13, 0x0000); } if(qdev->mac_index == 0) @@ -1453,23 +1452,23 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) portConfiguration = PORT_CONFIG_DEFAULT; /* Set the 1000 advertisements */ - ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, ®, + ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, ®, PHYAddr[qdev->mac_index]); reg &= ~PHY_GIG_ALL_PARAMS; - if(portConfiguration & + if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED & PORT_CONFIG_1000MB_SPEED) { reg |= PHY_GIG_ADV_1000F; } - - if(portConfiguration & + + if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED & PORT_CONFIG_1000MB_SPEED) { reg |= PHY_GIG_ADV_1000H; } - ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, + ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, PHYAddr[qdev->mac_index]); /* Set the 10/100 & pause negotiation advertisements */ @@ -1483,7 +1482,7 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { if(portConfiguration & PORT_CONFIG_100MB_SPEED) reg |= PHY_NEG_ADV_100F; - + if(portConfiguration & PORT_CONFIG_10MB_SPEED) reg |= PHY_NEG_ADV_10F; } @@ -1491,22 +1490,22 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { if(portConfiguration & PORT_CONFIG_100MB_SPEED) reg |= PHY_NEG_ADV_100H; - + if(portConfiguration & PORT_CONFIG_10MB_SPEED) reg |= PHY_NEG_ADV_10H; } if(portConfiguration & PORT_CONFIG_1000MB_SPEED) { - reg |= 1; + reg |= 1; } - ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, + ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, PHYAddr[qdev->mac_index]); ql_mii_read_reg_ex(qdev, CONTROL_REG, ®, PHYAddr[qdev->mac_index]); - - ql_mii_write_reg_ex(qdev, CONTROL_REG, + + ql_mii_write_reg_ex(qdev, CONTROL_REG, reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG, PHYAddr[qdev->mac_index]); } @@ -1661,7 +1660,7 @@ static void ql_link_state_machine(struct ql3_adapter *qdev) "%s: Reset in progress, skip processing link " "state.\n", qdev->ndev->name); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return; } @@ -1753,7 +1752,7 @@ static int ql_mii_setup(struct ql3_adapter *qdev) return -1; if (qdev->device_id == QL3032_DEVICE_ID) - ql_write_page0_reg(qdev, + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 0x0f00000); /* Divide 125MHz clock by 28 to meet PHY timing requirements */ @@ -1866,8 +1865,6 @@ static void ql_get_drvinfo(struct net_device *ndev, strncpy(drvinfo->version, ql3xxx_driver_version, 32); strncpy(drvinfo->fw_version, "N/A", 32); strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); - drvinfo->n_stats = 0; - drvinfo->testinfo_len = 0; drvinfo->regdump_len = 0; drvinfo->eedump_len = 0; } @@ -1904,7 +1901,6 @@ static void ql_get_pauseparam(struct net_device *ndev, static const struct ethtool_ops ql3xxx_ethtool_ops = { .get_settings = ql_get_settings, .get_drvinfo = ql_get_drvinfo, - .get_perm_addr = ethtool_op_get_perm_addr, .get_link = ethtool_op_get_link, .get_msglevel = ql_get_msglevel, .set_msglevel = ql_set_msglevel, @@ -1940,7 +1936,7 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev) err = pci_dma_mapping_error(map); if(err) { - printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", + printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", qdev->ndev->name, err); dev_kfree_skb(lrg_buf_cb->skb); lrg_buf_cb->skb = NULL; @@ -2048,14 +2044,14 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { printk(KERN_WARNING "Frame short but, frame was padded and sent.\n"); } - + tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; /* Check the transmit response flags for any errors */ if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { printk(KERN_ERR "Frame too short to be legal, frame not sent.\n"); - qdev->stats.tx_errors++; + qdev->ndev->stats.tx_errors++; retval = -EIO; goto frame_not_sent; } @@ -2063,7 +2059,7 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, if(tx_cb->seg_count == 0) { printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id); - qdev->stats.tx_errors++; + qdev->ndev->stats.tx_errors++; retval = -EIO; goto invalid_seg_count; } @@ -2082,8 +2078,8 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, PCI_DMA_TODEVICE); } } - qdev->stats.tx_packets++; - qdev->stats.tx_bytes += tx_cb->skb->len; + qdev->ndev->stats.tx_packets++; + qdev->ndev->stats.tx_bytes += tx_cb->skb->len; frame_not_sent: dev_kfree_skb_irq(tx_cb->skb); @@ -2112,13 +2108,13 @@ static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev) /* * The difference between 3022 and 3032 for inbound completions: - * 3022 uses two buffers per completion. The first buffer contains - * (some) header info, the second the remainder of the headers plus - * the data. For this chip we reserve some space at the top of the - * receive buffer so that the header info in buffer one can be - * prepended to the buffer two. Buffer two is the sent up while + * 3022 uses two buffers per completion. The first buffer contains + * (some) header info, the second the remainder of the headers plus + * the data. For this chip we reserve some space at the top of the + * receive buffer so that the header info in buffer one can be + * prepended to the buffer two. Buffer two is the sent up while * buffer one is returned to the hardware to be reused. - * 3032 receives all of it's data and headers in one buffer for a + * 3032 receives all of it's data and headers in one buffer for a * simpler process. 3032 also supports checksum verification as * can be seen in ql_process_macip_rx_intr(). */ @@ -2142,8 +2138,8 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, lrg_buf_cb2 = ql_get_lbuf(qdev); skb = lrg_buf_cb2->skb; - qdev->stats.rx_packets++; - qdev->stats.rx_bytes += length; + qdev->ndev->stats.rx_packets++; + qdev->ndev->stats.rx_bytes += length; skb_put(skb, length); pci_unmap_single(qdev->pdev, @@ -2209,13 +2205,13 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, skb_push(skb2, size), size); } else { u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); - if (checksum & - (IB_IP_IOCB_RSP_3032_ICE | - IB_IP_IOCB_RSP_3032_CE)) { + if (checksum & + (IB_IP_IOCB_RSP_3032_ICE | + IB_IP_IOCB_RSP_3032_CE)) { printk(KERN_ERR "%s: Bad checksum for this %s packet, checksum = %x.\n", __func__, - ((checksum & + ((checksum & IB_IP_IOCB_RSP_3032_TCP) ? "TCP" : "UDP"),checksum); } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || @@ -2227,8 +2223,8 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, skb2->protocol = eth_type_trans(skb2, qdev->ndev); netif_receive_skb(skb2); - qdev->stats.rx_packets++; - qdev->stats.rx_bytes += length; + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += length; ndev->last_rx = jiffies; lrg_buf_cb2->skb = NULL; @@ -2249,6 +2245,13 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, qdev->rsp_consumer_index) && (work_done < work_to_do)) { net_rsp = qdev->rsp_current; + rmb(); + /* + * Fix 4032 chipe undocumented "feature" where bit-8 is set if the + * inbound completion is for a VLAN. + */ + if (qdev->device_id == QL3032_DEVICE_ID) + net_rsp->opcode &= 0x7f; switch (net_rsp->opcode) { case OPCODE_OB_MAC_IOCB_FN0: @@ -2304,10 +2307,10 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, return work_done; } -static int ql_poll(struct net_device *ndev, int *budget) +static int ql_poll(struct napi_struct *napi, int budget) { - struct ql3_adapter *qdev = netdev_priv(ndev); - int work_to_do = min(*budget, ndev->quota); + struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); + struct net_device *ndev = qdev->ndev; int rx_cleaned = 0, tx_cleaned = 0; unsigned long hw_flags; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; @@ -2315,16 +2318,13 @@ static int ql_poll(struct net_device *ndev, int *budget) if (!netif_carrier_ok(ndev)) goto quit_polling; - ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, work_to_do); - *budget -= rx_cleaned; - ndev->quota -= rx_cleaned; + ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); - if( tx_cleaned + rx_cleaned != work_to_do || + if (tx_cleaned + rx_cleaned != budget || !netif_running(ndev)) { quit_polling: - netif_rx_complete(ndev); - spin_lock_irqsave(&qdev->hw_lock, hw_flags); + __netif_rx_complete(ndev, napi); ql_update_small_bufq_prod_index(qdev); ql_update_lrg_bufq_prod_index(qdev); writel(qdev->rsp_consumer_index, @@ -2332,9 +2332,8 @@ quit_polling: spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); ql_enable_interrupts(qdev); - return 0; } - return 1; + return tx_cleaned + rx_cleaned; } static irqreturn_t ql3xxx_isr(int irq, void *dev_id) @@ -2384,8 +2383,8 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id) spin_unlock(&qdev->adapter_lock); } else if (value & ISP_IMR_DISABLE_CMPL_INT) { ql_disable_interrupts(qdev); - if (likely(netif_rx_schedule_prep(ndev))) { - __netif_rx_schedule(ndev); + if (likely(netif_rx_schedule_prep(ndev, &qdev->napi))) { + __netif_rx_schedule(ndev, &qdev->napi); } } else { return IRQ_NONE; @@ -2395,12 +2394,12 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id) } /* - * Get the total number of segments needed for the + * Get the total number of segments needed for the * given number of fragments. This is necessary because * outbound address lists (OAL) will be used when more than - * two frags are given. Each address list has 5 addr/len + * two frags are given. Each address list has 5 addr/len * pairs. The 5th pair in each AOL is used to point to - * the next AOL if more frags are coming. + * the next AOL if more frags are coming. * That is why the frags:segment count ratio is not linear. */ static int ql_get_seg_count(struct ql3_adapter *qdev, @@ -2477,12 +2476,12 @@ static int ql_send_map(struct ql3_adapter *qdev, err = pci_dma_mapping_error(map); if(err) { - printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", + printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", qdev->ndev->name, err); return NETDEV_TX_BUSY; } - + oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); @@ -2512,7 +2511,7 @@ static int ql_send_map(struct ql3_adapter *qdev, err = pci_dma_mapping_error(map); if(err) { - printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n", + printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n", qdev->ndev->name, err); goto map_error; } @@ -2538,7 +2537,7 @@ static int ql_send_map(struct ql3_adapter *qdev, err = pci_dma_mapping_error(map); if(err) { - printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n", + printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n", qdev->ndev->name, err); goto map_error; } @@ -2559,10 +2558,10 @@ static int ql_send_map(struct ql3_adapter *qdev, map_error: /* A PCI mapping failed and now we will need to back out - * We need to traverse through the oal's and associated pages which + * We need to traverse through the oal's and associated pages which * have been mapped and now we must unmap them to clean up properly */ - + seg = 1; oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; oal = tx_cb->oal; @@ -2600,11 +2599,11 @@ map_error: * The difference between 3022 and 3032 sends: * 3022 only supports a simple single segment transmission. * 3032 supports checksumming and scatter/gather lists (fragments). - * The 3032 supports sglists by using the 3 addr/len pairs (ALP) - * in the IOCB plus a chain of outbound address lists (OAL) that - * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) - * will used to point to an OAL when more ALP entries are required. - * The IOCB is always the top of the chain followed by one or more + * The 3032 supports sglists by using the 3 addr/len pairs (ALP) + * in the IOCB plus a chain of outbound address lists (OAL) that + * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) + * will used to point to an OAL when more ALP entries are required. + * The IOCB is always the top of the chain followed by one or more * OALs (when necessary). */ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) @@ -2618,14 +2617,14 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) if (unlikely(atomic_read(&qdev->tx_count) < 2)) { return NETDEV_TX_BUSY; } - + tx_cb = &qdev->tx_buf[qdev->req_producer_index] ; if((tx_cb->seg_count = ql_get_seg_count(qdev, (skb_shinfo(skb)->nr_frags))) == -1) { printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__); return NETDEV_TX_OK; } - + mac_iocb_ptr = tx_cb->queue_entry; memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); mac_iocb_ptr->opcode = qdev->mac_ob_opcode; @@ -2637,12 +2636,12 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) if (qdev->device_id == QL3032_DEVICE_ID && skb->ip_summed == CHECKSUM_PARTIAL) ql_hw_csum_setup(skb, mac_iocb_ptr); - + if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) { printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__); return NETDEV_TX_BUSY; } - + wmb(); qdev->req_producer_index++; if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) @@ -2740,7 +2739,7 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name); return -ENOMEM; } - + qdev->lrg_buf_q_alloc_virt_addr = pci_alloc_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, @@ -3556,6 +3555,7 @@ static void ql_display_dev_info(struct net_device *ndev) { struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); struct pci_dev *pdev = qdev->pdev; + DECLARE_MAC_BUF(mac); printk(KERN_INFO PFX "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n", @@ -3581,10 +3581,8 @@ static void ql_display_dev_info(struct net_device *ndev) if (netif_msg_probe(qdev)) printk(KERN_INFO PFX - "%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n", - ndev->name, ndev->dev_addr[0], ndev->dev_addr[1], - ndev->dev_addr[2], ndev->dev_addr[3], ndev->dev_addr[4], - ndev->dev_addr[5]); + "%s: MAC address %s\n", + ndev->name, print_mac(mac, ndev->dev_addr)); } static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) @@ -3611,7 +3609,7 @@ static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) del_timer_sync(&qdev->adapter_timer); - netif_poll_disable(ndev); + napi_disable(&qdev->napi); if (do_reset) { int soft_reset; @@ -3699,7 +3697,7 @@ static int ql_adapter_up(struct ql3_adapter *qdev) mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); - netif_poll_enable(ndev); + napi_enable(&qdev->napi); ql_enable_interrupts(qdev); return 0; @@ -3752,12 +3750,6 @@ static int ql3xxx_open(struct net_device *ndev) return (ql_adapter_up(qdev)); } -static struct net_device_stats *ql3xxx_get_stats(struct net_device *dev) -{ - struct ql3_adapter *qdev = (struct ql3_adapter *)dev->priv; - return &qdev->stats; -} - static void ql3xxx_set_multicast_list(struct net_device *ndev) { /* @@ -4010,7 +4002,6 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, goto err_out_free_regions; } - SET_MODULE_OWNER(ndev); SET_NETDEV_DEV(ndev, &pdev->dev); pci_set_drvdata(pdev, ndev); @@ -4048,15 +4039,13 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, ndev->open = ql3xxx_open; ndev->hard_start_xmit = ql3xxx_send; ndev->stop = ql3xxx_close; - ndev->get_stats = ql3xxx_get_stats; ndev->set_multicast_list = ql3xxx_set_multicast_list; SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); ndev->set_mac_address = ql3xxx_set_mac_address; ndev->tx_timeout = ql3xxx_tx_timeout; ndev->watchdog_timeo = 5 * HZ; - ndev->poll = &ql_poll; - ndev->weight = 64; + netif_napi_add(ndev, &qdev->napi, ql_poll, 64); ndev->irq = pdev->irq; diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h index 4a832c46c274..fbcb0b949639 100755..100644 --- a/drivers/net/qla3xxx.h +++ b/drivers/net/qla3xxx.h @@ -556,7 +556,7 @@ enum { IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007, IP_ADDR_INDEX_REG_6 = 0x0008, IP_ADDR_INDEX_REG_OFFSET_MASK = 0x0030, - IP_ADDR_INDEX_REG_E = 0x0040, + IP_ADDR_INDEX_REG_E = 0x0040, }; enum { QL3032_PORT_CONTROL_DS = 0x0001, @@ -1112,7 +1112,7 @@ struct ql_rcv_buf_cb { * OAL has 5 entries: * 1 thru 4 point to frags * fifth points to next oal. - */ + */ #define MAX_OAL_CNT ((MAX_SKB_FRAGS-1)/4 + 1) struct oal_entry { @@ -1137,7 +1137,7 @@ struct ql_tx_buf_cb { struct ob_mac_iocb_req *queue_entry ; int seg_count; struct oal *oal; - struct map_list map[MAX_SKB_FRAGS+1]; + struct map_list map[MAX_SKB_FRAGS+1]; }; /* definitions for type field */ @@ -1175,6 +1175,8 @@ struct ql3_adapter { struct pci_dev *pdev; struct net_device *ndev; /* Parent NET device */ + struct napi_struct napi; + /* Hardware information */ u8 chip_rev_id; u8 pci_slot; @@ -1281,7 +1283,6 @@ struct ql3_adapter { u32 update_ob_opcode; /* Opcode to use for updating NCB */ u32 mb_bit_mask; /* MA Bits mask to use on transmission */ u32 numPorts; - struct net_device_stats stats; struct workqueue_struct *workqueue; struct delayed_work reset_work; struct delayed_work tx_timeout_work; diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index bb6896ae3151..419c00cbe6e9 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -384,6 +384,7 @@ struct rtl8169_private { void __iomem *mmio_addr; /* memory map physical address */ struct pci_dev *pci_dev; /* Index of PCI device */ struct net_device *dev; + struct napi_struct napi; struct net_device_stats stats; /* statistics of net device */ spinlock_t lock; /* spin lock flag */ u32 msg_enable; @@ -443,13 +444,13 @@ static void rtl_set_rx_mode(struct net_device *dev); static void rtl8169_tx_timeout(struct net_device *dev); static struct net_device_stats *rtl8169_get_stats(struct net_device *dev); static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *, - void __iomem *); + void __iomem *, u32 budget); static int rtl8169_change_mtu(struct net_device *dev, int new_mtu); static void rtl8169_down(struct net_device *dev); static void rtl8169_rx_clear(struct rtl8169_private *tp); #ifdef CONFIG_R8169_NAPI -static int rtl8169_poll(struct net_device *dev, int *budget); +static int rtl8169_poll(struct napi_struct *napi, int budget); #endif static const unsigned int rtl8169_rx_config = @@ -725,6 +726,12 @@ static int rtl8169_set_speed_xmii(struct net_device *dev, auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + if (tp->mac_version == RTL_GIGA_MAC_VER_12) { + /* Vendor specific (0x1f) and reserved (0x0e) MII registers. */ + mdio_write(ioaddr, 0x1f, 0x0000); + mdio_write(ioaddr, 0x0e, 0x0000); + } + tp->phy_auto_nego_reg = auto_nego; tp->phy_1000_ctrl_reg = giga_ctrl; @@ -970,24 +977,29 @@ static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = { }; struct rtl8169_counters { - u64 tx_packets; - u64 rx_packets; - u64 tx_errors; - u32 rx_errors; - u16 rx_missed; - u16 align_errors; - u32 tx_one_collision; - u32 tx_multi_collision; - u64 rx_unicast; - u64 rx_broadcast; - u32 rx_multicast; - u16 tx_aborted; - u16 tx_underun; + __le64 tx_packets; + __le64 rx_packets; + __le64 tx_errors; + __le32 rx_errors; + __le16 rx_missed; + __le16 align_errors; + __le32 tx_one_collision; + __le32 tx_multi_collision; + __le64 rx_unicast; + __le64 rx_broadcast; + __le32 rx_multicast; + __le16 tx_aborted; + __le16 tx_underun; }; -static int rtl8169_get_stats_count(struct net_device *dev) +static int rtl8169_get_sset_count(struct net_device *dev, int sset) { - return ARRAY_SIZE(rtl8169_gstrings); + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(rtl8169_gstrings); + default: + return -EOPNOTSUPP; + } } static void rtl8169_get_ethtool_stats(struct net_device *dev, @@ -1054,19 +1066,15 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .set_msglevel = rtl8169_set_msglevel, .get_rx_csum = rtl8169_get_rx_csum, .set_rx_csum = rtl8169_set_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, - .get_tso = ethtool_op_get_tso, .set_tso = ethtool_op_set_tso, .get_regs = rtl8169_get_regs, .get_wol = rtl8169_get_wol, .set_wol = rtl8169_set_wol, .get_strings = rtl8169_get_strings, - .get_stats_count = rtl8169_get_stats_count, + .get_sset_count = rtl8169_get_sset_count, .get_ethtool_stats = rtl8169_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, }; static void rtl8169_write_gmii_reg_bit(void __iomem *ioaddr, int reg, @@ -1223,7 +1231,10 @@ static void rtl8169_hw_phy_config(struct net_device *dev) return; } - /* phy config for RTL8169s mac_version C chip */ + if ((tp->mac_version != RTL_GIGA_MAC_VER_02) && + (tp->mac_version != RTL_GIGA_MAC_VER_03)) + return; + mdio_write(ioaddr, 31, 0x0001); //w 31 2 0 1 mdio_write(ioaddr, 21, 0x1000); //w 21 15 0 1000 mdio_write(ioaddr, 24, 0x65c7); //w 24 15 0 65c7 @@ -1497,7 +1508,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto out; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); tp = netdev_priv(dev); tp->dev = dev; @@ -1648,8 +1658,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->set_mac_address = rtl_set_mac_address; #ifdef CONFIG_R8169_NAPI - dev->poll = rtl8169_poll; - dev->weight = R8169_NAPI_WEIGHT; + netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT); #endif #ifdef CONFIG_R8169_VLAN @@ -1769,6 +1778,10 @@ static int rtl8169_open(struct net_device *dev) if (retval < 0) goto err_release_ring_2; +#ifdef CONFIG_R8169_NAPI + napi_enable(&tp->napi); +#endif + rtl_hw_start(dev); rtl8169_request_timer(dev); @@ -1910,7 +1923,11 @@ static void rtl_hw_start_8169(struct net_device *dev) rtl_set_rx_max_size(ioaddr); - rtl_set_rx_tx_config_registers(tp); + if ((tp->mac_version == RTL_GIGA_MAC_VER_01) || + (tp->mac_version == RTL_GIGA_MAC_VER_02) || + (tp->mac_version == RTL_GIGA_MAC_VER_03) || + (tp->mac_version == RTL_GIGA_MAC_VER_04)) + rtl_set_rx_tx_config_registers(tp); tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; @@ -1933,6 +1950,14 @@ static void rtl_hw_start_8169(struct net_device *dev) rtl_set_rx_tx_desc_registers(tp, ioaddr); + if ((tp->mac_version != RTL_GIGA_MAC_VER_01) && + (tp->mac_version != RTL_GIGA_MAC_VER_02) && + (tp->mac_version != RTL_GIGA_MAC_VER_03) && + (tp->mac_version != RTL_GIGA_MAC_VER_04)) { + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + rtl_set_rx_tx_config_registers(tp); + } + RTL_W8(Cfg9346, Cfg9346_Lock); /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ @@ -1947,8 +1972,6 @@ static void rtl_hw_start_8169(struct net_device *dev) /* Enable all known interrupts by setting the interrupt mask. */ RTL_W16(IntrMask, tp->intr_event); - - RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); } static void rtl_hw_start_8168(struct net_device *dev) @@ -2064,7 +2087,9 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) if (ret < 0) goto out; - netif_poll_enable(dev); +#ifdef CONFIG_R8169_NAPI + napi_enable(&tp->napi); +#endif rtl_hw_start(dev); @@ -2256,11 +2281,15 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev) synchronize_irq(dev->irq); /* Wait for any pending NAPI task to complete */ - netif_poll_disable(dev); +#ifdef CONFIG_R8169_NAPI + napi_disable(&tp->napi); +#endif rtl8169_irq_mask_and_ack(ioaddr); - netif_poll_enable(dev); +#ifdef CONFIG_R8169_NAPI + napi_enable(&tp->napi); +#endif } static void rtl8169_reinit_task(struct work_struct *work) @@ -2304,7 +2333,7 @@ static void rtl8169_reset_task(struct work_struct *work) rtl8169_wait_for_quiescence(dev); - rtl8169_rx_interrupt(dev, tp, tp->mmio_addr); + rtl8169_rx_interrupt(dev, tp, tp->mmio_addr, ~(u32)0); rtl8169_tx_clear(tp); if (tp->dirty_rx == tp->cur_rx) { @@ -2562,6 +2591,15 @@ static void rtl8169_tx_interrupt(struct net_device *dev, (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) { netif_wake_queue(dev); } + /* + * 8168 hack: TxPoll requests are lost when the Tx packets are + * too close. Let's kick an extra TxPoll request when a burst + * of start_xmit activity is detected (if it is not detected, + * it is slow enough). -- FR + */ + smp_rmb(); + if (tp->cur_tx != dirty_tx) + RTL_W8(TxPoll, NPQ); } } @@ -2609,14 +2647,14 @@ out: static int rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp, - void __iomem *ioaddr) + void __iomem *ioaddr, u32 budget) { unsigned int cur_rx, rx_left; unsigned int delta, count; cur_rx = tp->cur_rx; rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; - rx_left = rtl8169_rx_quota(rx_left, (u32) dev->quota); + rx_left = rtl8169_rx_quota(rx_left, budget); for (; rx_left > 0; rx_left--, cur_rx++) { unsigned int entry = cur_rx % NUM_RX_DESC; @@ -2761,20 +2799,22 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) rtl8169_check_link_status(dev, tp, ioaddr); #ifdef CONFIG_R8169_NAPI - RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event); - tp->intr_mask = ~tp->napi_event; - - if (likely(netif_rx_schedule_prep(dev))) - __netif_rx_schedule(dev); - else if (netif_msg_intr(tp)) { - printk(KERN_INFO "%s: interrupt %04x taken in poll\n", - dev->name, status); + if (status & tp->napi_event) { + RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event); + tp->intr_mask = ~tp->napi_event; + + if (likely(netif_rx_schedule_prep(dev, &tp->napi))) + __netif_rx_schedule(dev, &tp->napi); + else if (netif_msg_intr(tp)) { + printk(KERN_INFO "%s: interrupt %04x in poll\n", + dev->name, status); + } } break; #else /* Rx interrupt */ if (status & (RxOK | RxOverflow | RxFIFOOver)) - rtl8169_rx_interrupt(dev, tp, ioaddr); + rtl8169_rx_interrupt(dev, tp, ioaddr, ~(u32)0); /* Tx interrupt */ if (status & (TxOK | TxErr)) @@ -2797,20 +2837,18 @@ out: } #ifdef CONFIG_R8169_NAPI -static int rtl8169_poll(struct net_device *dev, int *budget) +static int rtl8169_poll(struct napi_struct *napi, int budget) { - unsigned int work_done, work_to_do = min(*budget, dev->quota); - struct rtl8169_private *tp = netdev_priv(dev); + struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); + struct net_device *dev = tp->dev; void __iomem *ioaddr = tp->mmio_addr; + int work_done; - work_done = rtl8169_rx_interrupt(dev, tp, ioaddr); + work_done = rtl8169_rx_interrupt(dev, tp, ioaddr, (u32) budget); rtl8169_tx_interrupt(dev, tp, ioaddr); - *budget -= work_done; - dev->quota -= work_done; - - if (work_done < work_to_do) { - netif_rx_complete(dev); + if (work_done < budget) { + netif_rx_complete(dev, napi); tp->intr_mask = 0xffff; /* * 20040426: the barrier is not strictly required but the @@ -2822,7 +2860,7 @@ static int rtl8169_poll(struct net_device *dev, int *budget) RTL_W16(IntrMask, tp->intr_event); } - return (work_done >= work_to_do); + return work_done; } #endif @@ -2851,7 +2889,7 @@ core_down: synchronize_irq(dev->irq); if (!poll_locked) { - netif_poll_disable(dev); + napi_disable(&tp->napi); poll_locked++; } @@ -2889,8 +2927,6 @@ static int rtl8169_close(struct net_device *dev) free_irq(dev->irq, dev); - netif_poll_enable(dev); - pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray, tp->RxPhyAddr); pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray, diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index df6b73872fdb..e7fd08adbbac 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c @@ -53,7 +53,6 @@ struct rionet_private { struct rio_mport *mport; struct sk_buff *rx_skb[RIONET_RX_RING_SIZE]; struct sk_buff *tx_skb[RIONET_TX_RING_SIZE]; - struct net_device_stats stats; int rx_slot; int tx_slot; int tx_cnt; @@ -91,12 +90,6 @@ static struct rio_dev *rionet_active[RIO_MAX_ROUTE_ENTRIES]; #define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001) #define RIONET_GET_DESTID(x) (*(u16 *)(x + 4)) -static struct net_device_stats *rionet_stats(struct net_device *ndev) -{ - struct rionet_private *rnet = ndev->priv; - return &rnet->stats; -} - static int rionet_rx_clean(struct net_device *ndev) { int i; @@ -120,15 +113,15 @@ static int rionet_rx_clean(struct net_device *ndev) error = netif_rx(rnet->rx_skb[i]); if (error == NET_RX_DROP) { - rnet->stats.rx_dropped++; + ndev->stats.rx_dropped++; } else if (error == NET_RX_BAD) { if (netif_msg_rx_err(rnet)) printk(KERN_WARNING "%s: bad rx packet\n", DRV_NAME); - rnet->stats.rx_errors++; + ndev->stats.rx_errors++; } else { - rnet->stats.rx_packets++; - rnet->stats.rx_bytes += RIO_MAX_MSG_SIZE; + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += RIO_MAX_MSG_SIZE; } } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != rnet->rx_slot); @@ -163,8 +156,8 @@ static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev, rio_add_outb_message(rnet->mport, rdev, 0, skb->data, skb->len); rnet->tx_skb[rnet->tx_slot] = skb; - rnet->stats.tx_packets++; - rnet->stats.tx_bytes += skb->len; + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += skb->len; if (++rnet->tx_cnt == RIONET_TX_RING_SIZE) netif_stop_queue(ndev); @@ -439,6 +432,7 @@ static int rionet_setup_netdev(struct rio_mport *mport) struct net_device *ndev = NULL; struct rionet_private *rnet; u16 device_id; + DECLARE_MAC_BUF(mac); /* Allocate our net_device structure */ ndev = alloc_etherdev(sizeof(struct rionet_private)); @@ -466,13 +460,10 @@ static int rionet_setup_netdev(struct rio_mport *mport) ndev->open = &rionet_open; ndev->hard_start_xmit = &rionet_start_xmit; ndev->stop = &rionet_close; - ndev->get_stats = &rionet_stats; ndev->mtu = RIO_MAX_MSG_SIZE - 14; ndev->features = NETIF_F_LLTX; SET_ETHTOOL_OPS(ndev, &rionet_ethtool_ops); - SET_MODULE_OWNER(ndev); - spin_lock_init(&rnet->lock); spin_lock_init(&rnet->tx_lock); @@ -482,13 +473,12 @@ static int rionet_setup_netdev(struct rio_mport *mport) if (rc != 0) goto out; - printk("%s: %s %s Version %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n", + printk("%s: %s %s Version %s, MAC %s\n", ndev->name, DRV_NAME, DRV_DESC, DRV_VERSION, - ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2], - ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]); + print_mac(mac, ndev->dev_addr)); out: return rc; diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c index 5c2e41fac6d2..19152f54ef2b 100644 --- a/drivers/net/rrunner.c +++ b/drivers/net/rrunner.c @@ -109,7 +109,6 @@ static int __devinit rr_init_one(struct pci_dev *pdev, rrpriv = netdev_priv(dev); - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); if (pci_request_regions(pdev, "rrunner")) { @@ -127,7 +126,6 @@ static int __devinit rr_init_one(struct pci_dev *pdev, dev->open = &rr_open; dev->hard_start_xmit = &rr_start_xmit; dev->stop = &rr_close; - dev->get_stats = &rr_get_stats; dev->do_ioctl = &rr_ioctl; dev->base_addr = pci_resource_start(pdev, 0); @@ -522,7 +520,7 @@ static int __devinit rr_init(struct net_device *dev) struct rr_regs __iomem *regs; struct eeprom *hw = NULL; u32 sram_size, rev; - int i; + DECLARE_MAC_BUF(mac); rrpriv = netdev_priv(dev); regs = rrpriv->regs; @@ -560,11 +558,7 @@ static int __devinit rr_init(struct net_device *dev) *(u32 *)(dev->dev_addr+2) = htonl(rr_read_eeprom_word(rrpriv, &hw->manf.BoardULA[4])); - printk(" MAC: "); - - for (i = 0; i < 5; i++) - printk("%2.2x:", dev->dev_addr[i]); - printk("%2.2x\n", dev->dev_addr[i]); + printk(" MAC: %s\n", print_mac(mac, dev->dev_addr)); sram_size = rr_read_eeprom_word(rrpriv, (void *)8); printk(" SRAM size 0x%06x\n", sram_size); @@ -809,7 +803,7 @@ static u32 rr_handle_event(struct net_device *dev, u32 prodidx, u32 eidx) case E_CON_REJ: printk(KERN_WARNING "%s: Connection rejected\n", dev->name); - rrpriv->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; break; case E_CON_TMOUT: printk(KERN_WARNING "%s: Connection timeout\n", @@ -818,7 +812,7 @@ static u32 rr_handle_event(struct net_device *dev, u32 prodidx, u32 eidx) case E_DISC_ERR: printk(KERN_WARNING "%s: HIPPI disconnect error\n", dev->name); - rrpriv->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; break; case E_INT_PRTY: printk(KERN_ERR "%s: HIPPI Internal Parity error\n", @@ -834,7 +828,7 @@ static u32 rr_handle_event(struct net_device *dev, u32 prodidx, u32 eidx) case E_TX_LINK_DROP: printk(KERN_WARNING "%s: Link lost during transmit\n", dev->name); - rrpriv->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, ®s->HostCtrl); wmb(); @@ -974,7 +968,7 @@ static void rx_int(struct net_device *dev, u32 rxlimit, u32 index) printk("len %x, mode %x\n", pkt_len, desc->mode); #endif if ( (rrpriv->rx_ring[index].mode & PACKET_BAD) == PACKET_BAD){ - rrpriv->stats.rx_dropped++; + dev->stats.rx_dropped++; goto defer; } @@ -987,7 +981,7 @@ static void rx_int(struct net_device *dev, u32 rxlimit, u32 index) skb = alloc_skb(pkt_len, GFP_ATOMIC); if (skb == NULL){ printk(KERN_WARNING "%s: Unable to allocate skb (%i bytes), deferring packet\n", dev->name, pkt_len); - rrpriv->stats.rx_dropped++; + dev->stats.rx_dropped++; goto defer; } else { pci_dma_sync_single_for_cpu(rrpriv->pci_dev, @@ -1025,7 +1019,7 @@ static void rx_int(struct net_device *dev, u32 rxlimit, u32 index) } else { printk("%s: Out of memory, deferring " "packet\n", dev->name); - rrpriv->stats.rx_dropped++; + dev->stats.rx_dropped++; goto defer; } } @@ -1034,8 +1028,8 @@ static void rx_int(struct net_device *dev, u32 rxlimit, u32 index) netif_rx(skb); /* send it up */ dev->last_rx = jiffies; - rrpriv->stats.rx_packets++; - rrpriv->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } defer: desc->mode = 0; @@ -1103,8 +1097,8 @@ static irqreturn_t rr_interrupt(int irq, void *dev_id) desc = &(rrpriv->tx_ring[txcon]); skb = rrpriv->tx_skbuff[txcon]; - rrpriv->stats.tx_packets++; - rrpriv->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo, skb->len, @@ -1492,16 +1486,6 @@ static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev) } -static struct net_device_stats *rr_get_stats(struct net_device *dev) -{ - struct rr_private *rrpriv; - - rrpriv = netdev_priv(dev); - - return(&rrpriv->stats); -} - - /* * Read the firmware out of the EEPROM and put it into the SRAM * (or from user space - later) diff --git a/drivers/net/rrunner.h b/drivers/net/rrunner.h index 9f3e050c4dc6..6a79825bc8cf 100644 --- a/drivers/net/rrunner.h +++ b/drivers/net/rrunner.h @@ -819,7 +819,6 @@ struct rr_private u32 tx_full; u32 fw_rev; volatile short fw_running; - struct net_device_stats stats; struct pci_dev *pci_dev; }; @@ -834,7 +833,6 @@ static irqreturn_t rr_interrupt(int irq, void *dev_id); static int rr_open(struct net_device *dev); static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev); static int rr_close(struct net_device *dev); -static struct net_device_stats *rr_get_stats(struct net_device *dev); static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static unsigned int rr_read_eeprom(struct rr_private *rrpriv, unsigned long offset, diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h index 4cb710bbe729..aef66e2d98d2 100644 --- a/drivers/net/s2io-regs.h +++ b/drivers/net/s2io-regs.h @@ -220,7 +220,7 @@ struct XENA_dev_config { u64 scheduled_int_ctrl; #define SCHED_INT_CTRL_TIMER_EN BIT(0) #define SCHED_INT_CTRL_ONE_SHOT BIT(1) -#define SCHED_INT_CTRL_INT2MSI TBD +#define SCHED_INT_CTRL_INT2MSI(val) vBIT(val,10,6) #define SCHED_INT_PERIOD TBD u64 txreqtimeout; @@ -325,33 +325,66 @@ struct XENA_dev_config { #define TXDMA_TPA_INT BIT(5) #define TXDMA_SM_INT BIT(6) u64 pfc_err_reg; +#define PFC_ECC_SG_ERR BIT(7) +#define PFC_ECC_DB_ERR BIT(15) +#define PFC_SM_ERR_ALARM BIT(23) +#define PFC_MISC_0_ERR BIT(31) +#define PFC_MISC_1_ERR BIT(32) +#define PFC_PCIX_ERR BIT(39) u64 pfc_err_mask; u64 pfc_err_alarm; u64 tda_err_reg; +#define TDA_Fn_ECC_SG_ERR vBIT(0xff,0,8) +#define TDA_Fn_ECC_DB_ERR vBIT(0xff,8,8) +#define TDA_SM0_ERR_ALARM BIT(22) +#define TDA_SM1_ERR_ALARM BIT(23) +#define TDA_PCIX_ERR BIT(39) u64 tda_err_mask; u64 tda_err_alarm; u64 pcc_err_reg; -#define PCC_FB_ECC_DB_ERR vBIT(0xFF, 16, 8) +#define PCC_FB_ECC_SG_ERR vBIT(0xFF,0,8) +#define PCC_TXB_ECC_SG_ERR vBIT(0xFF,8,8) +#define PCC_FB_ECC_DB_ERR vBIT(0xFF,16, 8) +#define PCC_TXB_ECC_DB_ERR vBIT(0xff,24,8) +#define PCC_SM_ERR_ALARM vBIT(0xff,32,8) +#define PCC_WR_ERR_ALARM vBIT(0xff,40,8) +#define PCC_N_SERR vBIT(0xff,48,8) +#define PCC_6_COF_OV_ERR BIT(56) +#define PCC_7_COF_OV_ERR BIT(57) +#define PCC_6_LSO_OV_ERR BIT(58) +#define PCC_7_LSO_OV_ERR BIT(59) #define PCC_ENABLE_FOUR vBIT(0x0F,0,8) - u64 pcc_err_mask; u64 pcc_err_alarm; u64 tti_err_reg; +#define TTI_ECC_SG_ERR BIT(7) +#define TTI_ECC_DB_ERR BIT(15) +#define TTI_SM_ERR_ALARM BIT(23) u64 tti_err_mask; u64 tti_err_alarm; u64 lso_err_reg; +#define LSO6_SEND_OFLOW BIT(12) +#define LSO7_SEND_OFLOW BIT(13) +#define LSO6_ABORT BIT(14) +#define LSO7_ABORT BIT(15) +#define LSO6_SM_ERR_ALARM BIT(22) +#define LSO7_SM_ERR_ALARM BIT(23) u64 lso_err_mask; u64 lso_err_alarm; u64 tpa_err_reg; +#define TPA_TX_FRM_DROP BIT(7) +#define TPA_SM_ERR_ALARM BIT(23) + u64 tpa_err_mask; u64 tpa_err_alarm; u64 sm_err_reg; +#define SM_SM_ERR_ALARM BIT(15) u64 sm_err_mask; u64 sm_err_alarm; @@ -450,22 +483,52 @@ struct XENA_dev_config { #define RXDMA_INT_RTI_INT_M BIT(3) u64 rda_err_reg; +#define RDA_RXDn_ECC_SG_ERR vBIT(0xFF,0,8) +#define RDA_RXDn_ECC_DB_ERR vBIT(0xFF,8,8) +#define RDA_FRM_ECC_SG_ERR BIT(23) +#define RDA_FRM_ECC_DB_N_AERR BIT(31) +#define RDA_SM1_ERR_ALARM BIT(38) +#define RDA_SM0_ERR_ALARM BIT(39) +#define RDA_MISC_ERR BIT(47) +#define RDA_PCIX_ERR BIT(55) +#define RDA_RXD_ECC_DB_SERR BIT(63) u64 rda_err_mask; u64 rda_err_alarm; u64 rc_err_reg; +#define RC_PRCn_ECC_SG_ERR vBIT(0xFF,0,8) +#define RC_PRCn_ECC_DB_ERR vBIT(0xFF,8,8) +#define RC_FTC_ECC_SG_ERR BIT(23) +#define RC_FTC_ECC_DB_ERR BIT(31) +#define RC_PRCn_SM_ERR_ALARM vBIT(0xFF,32,8) +#define RC_FTC_SM_ERR_ALARM BIT(47) +#define RC_RDA_FAIL_WR_Rn vBIT(0xFF,48,8) u64 rc_err_mask; u64 rc_err_alarm; u64 prc_pcix_err_reg; +#define PRC_PCI_AB_RD_Rn vBIT(0xFF,0,8) +#define PRC_PCI_DP_RD_Rn vBIT(0xFF,8,8) +#define PRC_PCI_AB_WR_Rn vBIT(0xFF,16,8) +#define PRC_PCI_DP_WR_Rn vBIT(0xFF,24,8) +#define PRC_PCI_AB_F_WR_Rn vBIT(0xFF,32,8) +#define PRC_PCI_DP_F_WR_Rn vBIT(0xFF,40,8) u64 prc_pcix_err_mask; u64 prc_pcix_err_alarm; u64 rpa_err_reg; +#define RPA_ECC_SG_ERR BIT(7) +#define RPA_ECC_DB_ERR BIT(15) +#define RPA_FLUSH_REQUEST BIT(22) +#define RPA_SM_ERR_ALARM BIT(23) +#define RPA_CREDIT_ERR BIT(31) u64 rpa_err_mask; u64 rpa_err_alarm; u64 rti_err_reg; +#define RTI_ECC_SG_ERR BIT(7) +#define RTI_ECC_DB_ERR BIT(15) +#define RTI_SM_ERR_ALARM BIT(23) u64 rti_err_mask; u64 rti_err_alarm; @@ -582,17 +645,43 @@ struct XENA_dev_config { #define MAC_INT_STATUS_RMAC_INT BIT(1) u64 mac_tmac_err_reg; -#define TMAC_ERR_REG_TMAC_ECC_DB_ERR BIT(15) -#define TMAC_ERR_REG_TMAC_TX_BUF_OVRN BIT(23) -#define TMAC_ERR_REG_TMAC_TX_CRI_ERR BIT(31) +#define TMAC_ECC_SG_ERR BIT(7) +#define TMAC_ECC_DB_ERR BIT(15) +#define TMAC_TX_BUF_OVRN BIT(23) +#define TMAC_TX_CRI_ERR BIT(31) +#define TMAC_TX_SM_ERR BIT(39) +#define TMAC_DESC_ECC_SG_ERR BIT(47) +#define TMAC_DESC_ECC_DB_ERR BIT(55) + u64 mac_tmac_err_mask; u64 mac_tmac_err_alarm; u64 mac_rmac_err_reg; -#define RMAC_ERR_REG_RX_BUFF_OVRN BIT(0) -#define RMAC_ERR_REG_RTS_ECC_DB_ERR BIT(14) -#define RMAC_ERR_REG_ECC_DB_ERR BIT(15) -#define RMAC_LINK_STATE_CHANGE_INT BIT(31) +#define RMAC_RX_BUFF_OVRN BIT(0) +#define RMAC_FRM_RCVD_INT BIT(1) +#define RMAC_UNUSED_INT BIT(2) +#define RMAC_RTS_PNUM_ECC_SG_ERR BIT(5) +#define RMAC_RTS_DS_ECC_SG_ERR BIT(6) +#define RMAC_RD_BUF_ECC_SG_ERR BIT(7) +#define RMAC_RTH_MAP_ECC_SG_ERR BIT(8) +#define RMAC_RTH_SPDM_ECC_SG_ERR BIT(9) +#define RMAC_RTS_VID_ECC_SG_ERR BIT(10) +#define RMAC_DA_SHADOW_ECC_SG_ERR BIT(11) +#define RMAC_RTS_PNUM_ECC_DB_ERR BIT(13) +#define RMAC_RTS_DS_ECC_DB_ERR BIT(14) +#define RMAC_RD_BUF_ECC_DB_ERR BIT(15) +#define RMAC_RTH_MAP_ECC_DB_ERR BIT(16) +#define RMAC_RTH_SPDM_ECC_DB_ERR BIT(17) +#define RMAC_RTS_VID_ECC_DB_ERR BIT(18) +#define RMAC_DA_SHADOW_ECC_DB_ERR BIT(19) +#define RMAC_LINK_STATE_CHANGE_INT BIT(31) +#define RMAC_RX_SM_ERR BIT(39) +#define RMAC_SINGLE_ECC_ERR (BIT(5) | BIT(6) | BIT(7) |\ + BIT(8) | BIT(9) | BIT(10)|\ + BIT(11)) +#define RMAC_DOUBLE_ECC_ERR (BIT(13) | BIT(14) | BIT(15) |\ + BIT(16) | BIT(17) | BIT(18)|\ + BIT(19)) u64 mac_rmac_err_mask; u64 mac_rmac_err_alarm; @@ -747,10 +836,10 @@ struct XENA_dev_config { #define MC_ERR_REG_MIRI_CRI_ERR_1 BIT(23) #define MC_ERR_REG_SM_ERR BIT(31) #define MC_ERR_REG_ECC_ALL_SNG (BIT(2) | BIT(3) | BIT(4) | BIT(5) |\ - BIT(6) | BIT(7) | BIT(17) | BIT(19)) + BIT(17) | BIT(19)) #define MC_ERR_REG_ECC_ALL_DBL (BIT(10) | BIT(11) | BIT(12) |\ - BIT(13) | BIT(14) | BIT(15) |\ - BIT(18) | BIT(20)) + BIT(13) | BIT(18) | BIT(20)) +#define PLL_LOCK_N BIT(39) u64 mc_err_mask; u64 mc_err_alarm; @@ -824,11 +913,17 @@ struct XENA_dev_config { #define XGXS_INT_MASK_RXGXS BIT(1) u64 xgxs_txgxs_err_reg; -#define TXGXS_ECC_DB_ERR BIT(15) +#define TXGXS_ECC_SG_ERR BIT(7) +#define TXGXS_ECC_DB_ERR BIT(15) +#define TXGXS_ESTORE_UFLOW BIT(31) +#define TXGXS_TX_SM_ERR BIT(39) + u64 xgxs_txgxs_err_mask; u64 xgxs_txgxs_err_alarm; u64 xgxs_rxgxs_err_reg; +#define RXGXS_ESTORE_OFLOW BIT(7) +#define RXGXS_RX_SM_ERR BIT(39) u64 xgxs_rxgxs_err_mask; u64 xgxs_rxgxs_err_alarm; diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index afef6c0c59fe..22e4054d4fcb 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c @@ -32,13 +32,13 @@ * rx_ring_sz: This defines the number of receive blocks each ring can have. * This is also an array of size 8. * rx_ring_mode: This defines the operation mode of all 8 rings. The valid - * values are 1, 2 and 3. + * values are 1, 2. * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver. * tx_fifo_len: This too is an array of 8. Each element defines the number of * Tx descriptors that can be associated with each corresponding FIFO. * intr_type: This defines the type of interrupt. The values can be 0(INTA), - * 1(MSI), 2(MSI_X). Default value is '0(INTA)' - * lro: Specifies whether to enable Large Receive Offload (LRO) or not. + * 2(MSI_X). Default value is '2(MSI_X)' + * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not. * Possible values '1' for enable '0' for disable. Default is '0' * lro_max_pkts: This parameter defines maximum number of packets can be * aggregated as a single large packet @@ -84,14 +84,14 @@ #include "s2io.h" #include "s2io-regs.h" -#define DRV_VERSION "2.0.23.1" +#define DRV_VERSION "2.0.26.5" /* S2io Driver name & version. */ static char s2io_driver_name[] = "Neterion"; static char s2io_driver_version[] = DRV_VERSION; -static int rxd_size[4] = {32,48,48,64}; -static int rxd_count[4] = {127,85,85,63}; +static int rxd_size[2] = {32,48}; +static int rxd_count[2] = {127,85}; static inline int RXD_IS_UP2DT(struct RxD_t *rxdp) { @@ -130,6 +130,11 @@ static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring) return 0; } +static inline int is_s2io_card_up(const struct s2io_nic * sp) +{ + return test_bit(__S2IO_STATE_CARD_UP, &sp->state); +} + /* Ethtool related variables and Macros. */ static char s2io_gstrings[][ETH_GSTRING_LEN] = { "Register test\t(offline)", @@ -263,46 +268,71 @@ static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = { {"serious_err_cnt"}, {"soft_reset_cnt"}, {"fifo_full_cnt"}, - {"ring_full_cnt"}, - ("alarm_transceiver_temp_high"), - ("alarm_transceiver_temp_low"), - ("alarm_laser_bias_current_high"), - ("alarm_laser_bias_current_low"), - ("alarm_laser_output_power_high"), - ("alarm_laser_output_power_low"), - ("warn_transceiver_temp_high"), - ("warn_transceiver_temp_low"), - ("warn_laser_bias_current_high"), - ("warn_laser_bias_current_low"), - ("warn_laser_output_power_high"), - ("warn_laser_output_power_low"), - ("lro_aggregated_pkts"), - ("lro_flush_both_count"), - ("lro_out_of_sequence_pkts"), - ("lro_flush_due_to_max_pkts"), - ("lro_avg_aggr_pkts"), - ("mem_alloc_fail_cnt"), - ("watchdog_timer_cnt"), - ("mem_allocated"), - ("mem_freed"), - ("link_up_cnt"), - ("link_down_cnt"), - ("link_up_time"), - ("link_down_time"), - ("tx_tcode_buf_abort_cnt"), - ("tx_tcode_desc_abort_cnt"), - ("tx_tcode_parity_err_cnt"), - ("tx_tcode_link_loss_cnt"), - ("tx_tcode_list_proc_err_cnt"), - ("rx_tcode_parity_err_cnt"), - ("rx_tcode_abort_cnt"), - ("rx_tcode_parity_abort_cnt"), - ("rx_tcode_rda_fail_cnt"), - ("rx_tcode_unkn_prot_cnt"), - ("rx_tcode_fcs_err_cnt"), - ("rx_tcode_buf_size_err_cnt"), - ("rx_tcode_rxd_corrupt_cnt"), - ("rx_tcode_unkn_err_cnt") + {"ring_0_full_cnt"}, + {"ring_1_full_cnt"}, + {"ring_2_full_cnt"}, + {"ring_3_full_cnt"}, + {"ring_4_full_cnt"}, + {"ring_5_full_cnt"}, + {"ring_6_full_cnt"}, + {"ring_7_full_cnt"}, + {"alarm_transceiver_temp_high"}, + {"alarm_transceiver_temp_low"}, + {"alarm_laser_bias_current_high"}, + {"alarm_laser_bias_current_low"}, + {"alarm_laser_output_power_high"}, + {"alarm_laser_output_power_low"}, + {"warn_transceiver_temp_high"}, + {"warn_transceiver_temp_low"}, + {"warn_laser_bias_current_high"}, + {"warn_laser_bias_current_low"}, + {"warn_laser_output_power_high"}, + {"warn_laser_output_power_low"}, + {"lro_aggregated_pkts"}, + {"lro_flush_both_count"}, + {"lro_out_of_sequence_pkts"}, + {"lro_flush_due_to_max_pkts"}, + {"lro_avg_aggr_pkts"}, + {"mem_alloc_fail_cnt"}, + {"pci_map_fail_cnt"}, + {"watchdog_timer_cnt"}, + {"mem_allocated"}, + {"mem_freed"}, + {"link_up_cnt"}, + {"link_down_cnt"}, + {"link_up_time"}, + {"link_down_time"}, + {"tx_tcode_buf_abort_cnt"}, + {"tx_tcode_desc_abort_cnt"}, + {"tx_tcode_parity_err_cnt"}, + {"tx_tcode_link_loss_cnt"}, + {"tx_tcode_list_proc_err_cnt"}, + {"rx_tcode_parity_err_cnt"}, + {"rx_tcode_abort_cnt"}, + {"rx_tcode_parity_abort_cnt"}, + {"rx_tcode_rda_fail_cnt"}, + {"rx_tcode_unkn_prot_cnt"}, + {"rx_tcode_fcs_err_cnt"}, + {"rx_tcode_buf_size_err_cnt"}, + {"rx_tcode_rxd_corrupt_cnt"}, + {"rx_tcode_unkn_err_cnt"}, + {"tda_err_cnt"}, + {"pfc_err_cnt"}, + {"pcc_err_cnt"}, + {"tti_err_cnt"}, + {"tpa_err_cnt"}, + {"sm_err_cnt"}, + {"lso_err_cnt"}, + {"mac_tmac_err_cnt"}, + {"mac_rmac_err_cnt"}, + {"xgxs_txgxs_err_cnt"}, + {"xgxs_rxgxs_err_cnt"}, + {"rc_err_cnt"}, + {"prc_pcix_err_cnt"}, + {"rpa_err_cnt"}, + {"rda_err_cnt"}, + {"rti_err_cnt"}, + {"mc_err_cnt"} }; #define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN @@ -325,6 +355,16 @@ static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = { timer.data = (unsigned long) arg; \ mod_timer(&timer, (jiffies + exp)) \ +/* copy mac addr to def_mac_addr array */ +static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr) +{ + sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr); + sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8); + sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16); + sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24); + sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32); + sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40); +} /* Add the vlan */ static void s2io_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) @@ -422,14 +462,15 @@ S2IO_PARM_INT(mc_pause_threshold_q4q7, 187); S2IO_PARM_INT(shared_splits, 0); S2IO_PARM_INT(tmac_util_period, 5); S2IO_PARM_INT(rmac_util_period, 5); -S2IO_PARM_INT(bimodal, 0); S2IO_PARM_INT(l3l4hdr_size, 128); /* Frequency of Rx desc syncs expressed as power of 2 */ S2IO_PARM_INT(rxsync_frequency, 3); -/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */ -S2IO_PARM_INT(intr_type, 0); +/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */ +S2IO_PARM_INT(intr_type, 2); /* Large receive offload feature */ -S2IO_PARM_INT(lro, 0); +static unsigned int lro_enable; +module_param_named(lro, lro_enable, uint, 0); + /* Max pkts to be aggregated by LRO at one time. If not specified, * aggregation happens until we hit max IP pkt size(64K) */ @@ -531,7 +572,7 @@ static int init_shared_mem(struct s2io_nic *nic) for (i = 0; i < config->tx_fifo_num; i++) { int fifo_len = config->tx_cfg[i].fifo_len; int list_holder_size = fifo_len * sizeof(struct list_info_hold); - mac_control->fifos[i].list_info = kmalloc(list_holder_size, + mac_control->fifos[i].list_info = kzalloc(list_holder_size, GFP_KERNEL); if (!mac_control->fifos[i].list_info) { DBG_PRINT(INFO_DBG, @@ -539,7 +580,6 @@ static int init_shared_mem(struct s2io_nic *nic) return -ENOMEM; } mem_allocated += list_holder_size; - memset(mac_control->fifos[i].list_info, 0, list_holder_size); } for (i = 0; i < config->tx_fifo_num; i++) { int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len, @@ -670,7 +710,7 @@ static int init_shared_mem(struct s2io_nic *nic) GFP_KERNEL); if (!rx_blocks->rxds) return -ENOMEM; - mem_allocated += + mem_allocated += (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]); for (l=0; l<rxd_count[nic->rxd_mode];l++) { rx_blocks->rxds[l].virt_addr = @@ -701,7 +741,7 @@ static int init_shared_mem(struct s2io_nic *nic) (u64) tmp_p_addr_next; } } - if (nic->rxd_mode >= RXD_MODE_3A) { + if (nic->rxd_mode == RXD_MODE_3B) { /* * Allocation of Storages for buffer addresses in 2BUFF mode * and the buffers as well. @@ -732,7 +772,7 @@ static int init_shared_mem(struct s2io_nic *nic) (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL); if (!ba->ba_0_org) return -ENOMEM; - mem_allocated += + mem_allocated += (BUF0_LEN + ALIGN_SIZE); tmp = (unsigned long)ba->ba_0_org; tmp += ALIGN_SIZE; @@ -743,7 +783,7 @@ static int init_shared_mem(struct s2io_nic *nic) (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL); if (!ba->ba_1_org) return -ENOMEM; - mem_allocated + mem_allocated += (BUF1_LEN + ALIGN_SIZE); tmp = (unsigned long) ba->ba_1_org; tmp += ALIGN_SIZE; @@ -828,7 +868,7 @@ static void free_shared_mem(struct s2io_nic *nic) mac_control->fifos[i]. list_info[mem_blks]. list_phy_addr); - nic->mac_control.stats_info->sw_stat.mem_freed + nic->mac_control.stats_info->sw_stat.mem_freed += PAGE_SIZE; } /* If we got a zero DMA address during allocation, @@ -843,11 +883,11 @@ static void free_shared_mem(struct s2io_nic *nic) dev->name); DBG_PRINT(INIT_DBG, "Virtual address %p\n", mac_control->zerodma_virt_addr); - nic->mac_control.stats_info->sw_stat.mem_freed + nic->mac_control.stats_info->sw_stat.mem_freed += PAGE_SIZE; } kfree(mac_control->fifos[i].list_info); - nic->mac_control.stats_info->sw_stat.mem_freed += + nic->mac_control.stats_info->sw_stat.mem_freed += (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold)); } @@ -865,12 +905,12 @@ static void free_shared_mem(struct s2io_nic *nic) tmp_v_addr, tmp_p_addr); nic->mac_control.stats_info->sw_stat.mem_freed += size; kfree(mac_control->rings[i].rx_blocks[j].rxds); - nic->mac_control.stats_info->sw_stat.mem_freed += + nic->mac_control.stats_info->sw_stat.mem_freed += ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]); } } - if (nic->rxd_mode >= RXD_MODE_3A) { + if (nic->rxd_mode == RXD_MODE_3B) { /* Freeing buffer storage addresses in 2BUFF mode. */ for (i = 0; i < config->rx_ring_num; i++) { blk_cnt = config->rx_cfg[i].num_rxd / @@ -891,11 +931,12 @@ static void free_shared_mem(struct s2io_nic *nic) k++; } kfree(mac_control->rings[i].ba[j]); - nic->mac_control.stats_info->sw_stat.mem_freed += (sizeof(struct buffAdd) * - (rxd_count[nic->rxd_mode] + 1)); + nic->mac_control.stats_info->sw_stat.mem_freed += + (sizeof(struct buffAdd) * + (rxd_count[nic->rxd_mode] + 1)); } kfree(mac_control->rings[i].ba); - nic->mac_control.stats_info->sw_stat.mem_freed += + nic->mac_control.stats_info->sw_stat.mem_freed += (sizeof(struct buffAdd *) * blk_cnt); } } @@ -905,12 +946,12 @@ static void free_shared_mem(struct s2io_nic *nic) mac_control->stats_mem_sz, mac_control->stats_mem, mac_control->stats_mem_phy); - nic->mac_control.stats_info->sw_stat.mem_freed += + nic->mac_control.stats_info->sw_stat.mem_freed += mac_control->stats_mem_sz; } if (nic->ufo_in_band_v) { kfree(nic->ufo_in_band_v); - nic->mac_control.stats_info->sw_stat.mem_freed + nic->mac_control.stats_info->sw_stat.mem_freed += (ufo_size * sizeof(u64)); } } @@ -1455,7 +1496,7 @@ static int init_nic(struct s2io_nic *nic) &bar0->rts_frm_len_n[i]); } } - + /* Disable differentiated services steering logic */ for (i = 0; i < 64; i++) { if (rts_ds_steer(nic, i, 0) == FAILURE) { @@ -1535,90 +1576,57 @@ static int init_nic(struct s2io_nic *nic) time++; } - if (nic->config.bimodal) { - int k = 0; - for (k = 0; k < config->rx_ring_num; k++) { - val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD; - val64 |= TTI_CMD_MEM_OFFSET(0x38+k); - writeq(val64, &bar0->tti_command_mem); - + /* RTI Initialization */ + if (nic->device_type == XFRAME_II_DEVICE) { /* - * Once the operation completes, the Strobe bit of the command - * register will be reset. We poll for this particular condition - * We wait for a maximum of 500ms for the operation to complete, - * if it's not complete by then we return error. - */ - time = 0; - while (TRUE) { - val64 = readq(&bar0->tti_command_mem); - if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) { - break; - } - if (time > 10) { - DBG_PRINT(ERR_DBG, - "%s: TTI init Failed\n", - dev->name); - return -1; - } - time++; - msleep(50); - } - } - } else { - - /* RTI Initialization */ - if (nic->device_type == XFRAME_II_DEVICE) { - /* - * Programmed to generate Apprx 500 Intrs per - * second - */ - int count = (nic->config.bus_speed * 125)/4; - val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count); - } else { - val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF); - } - val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) | - RTI_DATA1_MEM_RX_URNG_B(0x10) | - RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN; - - writeq(val64, &bar0->rti_data1_mem); + * Programmed to generate Apprx 500 Intrs per + * second + */ + int count = (nic->config.bus_speed * 125)/4; + val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count); + } else + val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF); + val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) | + RTI_DATA1_MEM_RX_URNG_B(0x10) | + RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN; + + writeq(val64, &bar0->rti_data1_mem); + + val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) | + RTI_DATA2_MEM_RX_UFC_B(0x2) ; + if (nic->config.intr_type == MSI_X) + val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \ + RTI_DATA2_MEM_RX_UFC_D(0x40)); + else + val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \ + RTI_DATA2_MEM_RX_UFC_D(0x80)); + writeq(val64, &bar0->rti_data2_mem); - val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) | - RTI_DATA2_MEM_RX_UFC_B(0x2) ; - if (nic->intr_type == MSI_X) - val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \ - RTI_DATA2_MEM_RX_UFC_D(0x40)); - else - val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \ - RTI_DATA2_MEM_RX_UFC_D(0x80)); - writeq(val64, &bar0->rti_data2_mem); + for (i = 0; i < config->rx_ring_num; i++) { + val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD + | RTI_CMD_MEM_OFFSET(i); + writeq(val64, &bar0->rti_command_mem); - for (i = 0; i < config->rx_ring_num; i++) { - val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD - | RTI_CMD_MEM_OFFSET(i); - writeq(val64, &bar0->rti_command_mem); + /* + * Once the operation completes, the Strobe bit of the + * command register will be reset. We poll for this + * particular condition. We wait for a maximum of 500ms + * for the operation to complete, if it's not complete + * by then we return error. + */ + time = 0; + while (TRUE) { + val64 = readq(&bar0->rti_command_mem); + if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) + break; - /* - * Once the operation completes, the Strobe bit of the - * command register will be reset. We poll for this - * particular condition. We wait for a maximum of 500ms - * for the operation to complete, if it's not complete - * by then we return error. - */ - time = 0; - while (TRUE) { - val64 = readq(&bar0->rti_command_mem); - if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) { - break; - } - if (time > 10) { - DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n", - dev->name); - return -1; - } - time++; - msleep(50); + if (time > 10) { + DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n", + dev->name); + return -1; } + time++; + msleep(50); } } @@ -1723,7 +1731,7 @@ static int init_nic(struct s2io_nic *nic) static int s2io_link_fault_indication(struct s2io_nic *nic) { - if (nic->intr_type != INTA) + if (nic->config.intr_type != INTA) return MAC_RMAC_ERR_TIMER; if (nic->device_type == XFRAME_II_DEVICE) return LINK_UP_DOWN_INTERRUPT; @@ -1732,6 +1740,150 @@ static int s2io_link_fault_indication(struct s2io_nic *nic) } /** + * do_s2io_write_bits - update alarm bits in alarm register + * @value: alarm bits + * @flag: interrupt status + * @addr: address value + * Description: update alarm bits in alarm register + * Return Value: + * NONE. + */ +static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr) +{ + u64 temp64; + + temp64 = readq(addr); + + if(flag == ENABLE_INTRS) + temp64 &= ~((u64) value); + else + temp64 |= ((u64) value); + writeq(temp64, addr); +} + +static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag) +{ + struct XENA_dev_config __iomem *bar0 = nic->bar0; + register u64 gen_int_mask = 0; + + if (mask & TX_DMA_INTR) { + + gen_int_mask |= TXDMA_INT_M; + + do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT | + TXDMA_PCC_INT | TXDMA_TTI_INT | + TXDMA_LSO_INT | TXDMA_TPA_INT | + TXDMA_SM_INT, flag, &bar0->txdma_int_mask); + + do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM | + PFC_MISC_0_ERR | PFC_MISC_1_ERR | + PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag, + &bar0->pfc_err_mask); + + do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM | + TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR | + TDA_PCIX_ERR, flag, &bar0->tda_err_mask); + + do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR | + PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM | + PCC_N_SERR | PCC_6_COF_OV_ERR | + PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR | + PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR | + PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask); + + do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR | + TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask); + + do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT | + LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM | + LSO6_SEND_OFLOW | LSO7_SEND_OFLOW, + flag, &bar0->lso_err_mask); + + do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP, + flag, &bar0->tpa_err_mask); + + do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask); + + } + + if (mask & TX_MAC_INTR) { + gen_int_mask |= TXMAC_INT_M; + do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag, + &bar0->mac_int_mask); + do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR | + TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR | + TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR, + flag, &bar0->mac_tmac_err_mask); + } + + if (mask & TX_XGXS_INTR) { + gen_int_mask |= TXXGXS_INT_M; + do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag, + &bar0->xgxs_int_mask); + do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR | + TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR, + flag, &bar0->xgxs_txgxs_err_mask); + } + + if (mask & RX_DMA_INTR) { + gen_int_mask |= RXDMA_INT_M; + do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M | + RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M, + flag, &bar0->rxdma_int_mask); + do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR | + RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM | + RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR | + RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask); + do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn | + PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn | + PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag, + &bar0->prc_pcix_err_mask); + do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR | + RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag, + &bar0->rpa_err_mask); + do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR | + RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM | + RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR | + RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR, + flag, &bar0->rda_err_mask); + do_s2io_write_bits(RTI_SM_ERR_ALARM | + RTI_ECC_SG_ERR | RTI_ECC_DB_ERR, + flag, &bar0->rti_err_mask); + } + + if (mask & RX_MAC_INTR) { + gen_int_mask |= RXMAC_INT_M; + do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag, + &bar0->mac_int_mask); + do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR | + RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR | + RMAC_DOUBLE_ECC_ERR | + RMAC_LINK_STATE_CHANGE_INT, + flag, &bar0->mac_rmac_err_mask); + } + + if (mask & RX_XGXS_INTR) + { + gen_int_mask |= RXXGXS_INT_M; + do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag, + &bar0->xgxs_int_mask); + do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag, + &bar0->xgxs_rxgxs_err_mask); + } + + if (mask & MC_INTR) { + gen_int_mask |= MC_INT_M; + do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask); + do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG | + MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag, + &bar0->mc_err_mask); + } + nic->general_int_mask = gen_int_mask; + + /* Remove this line when alarm interrupts are enabled */ + nic->general_int_mask = 0; +} +/** * en_dis_able_nic_intrs - Enable or Disable the interrupts * @nic: device private variable, * @mask: A mask indicating which Intr block must be modified and, @@ -1745,17 +1897,16 @@ static int s2io_link_fault_indication(struct s2io_nic *nic) static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag) { struct XENA_dev_config __iomem *bar0 = nic->bar0; - register u64 val64 = 0, temp64 = 0; + register u64 temp64 = 0, intr_mask = 0; + + intr_mask = nic->general_int_mask; /* Top level interrupt classification */ /* PIC Interrupts */ - if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) { + if (mask & TX_PIC_INTR) { /* Enable PIC Intrs in the general intr mask register */ - val64 = TXPIC_INT_M; + intr_mask |= TXPIC_INT_M; if (flag == ENABLE_INTRS) { - temp64 = readq(&bar0->general_int_mask); - temp64 &= ~((u64) val64); - writeq(temp64, &bar0->general_int_mask); /* * If Hercules adapter enable GPIO otherwise * disable all PCIX, Flash, MDIO, IIC and GPIO @@ -1764,64 +1915,25 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag) */ if (s2io_link_fault_indication(nic) == LINK_UP_DOWN_INTERRUPT ) { - temp64 = readq(&bar0->pic_int_mask); - temp64 &= ~((u64) PIC_INT_GPIO); - writeq(temp64, &bar0->pic_int_mask); - temp64 = readq(&bar0->gpio_int_mask); - temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP); - writeq(temp64, &bar0->gpio_int_mask); - } else { + do_s2io_write_bits(PIC_INT_GPIO, flag, + &bar0->pic_int_mask); + do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag, + &bar0->gpio_int_mask); + } else writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask); - } - /* - * No MSI Support is available presently, so TTI and - * RTI interrupts are also disabled. - */ } else if (flag == DISABLE_INTRS) { /* * Disable PIC Intrs in the general * intr mask register */ writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask); - temp64 = readq(&bar0->general_int_mask); - val64 |= temp64; - writeq(val64, &bar0->general_int_mask); - } - } - - /* MAC Interrupts */ - /* Enabling/Disabling MAC interrupts */ - if (mask & (TX_MAC_INTR | RX_MAC_INTR)) { - val64 = TXMAC_INT_M | RXMAC_INT_M; - if (flag == ENABLE_INTRS) { - temp64 = readq(&bar0->general_int_mask); - temp64 &= ~((u64) val64); - writeq(temp64, &bar0->general_int_mask); - /* - * All MAC block error interrupts are disabled for now - * TODO - */ - } else if (flag == DISABLE_INTRS) { - /* - * Disable MAC Intrs in the general intr mask register - */ - writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask); - writeq(DISABLE_ALL_INTRS, - &bar0->mac_rmac_err_mask); - - temp64 = readq(&bar0->general_int_mask); - val64 |= temp64; - writeq(val64, &bar0->general_int_mask); } } /* Tx traffic interrupts */ if (mask & TX_TRAFFIC_INTR) { - val64 = TXTRAFFIC_INT_M; + intr_mask |= TXTRAFFIC_INT_M; if (flag == ENABLE_INTRS) { - temp64 = readq(&bar0->general_int_mask); - temp64 &= ~((u64) val64); - writeq(temp64, &bar0->general_int_mask); /* * Enable all the Tx side interrupts * writing 0 Enables all 64 TX interrupt levels @@ -1833,19 +1945,13 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag) * register. */ writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask); - temp64 = readq(&bar0->general_int_mask); - val64 |= temp64; - writeq(val64, &bar0->general_int_mask); } } /* Rx traffic interrupts */ if (mask & RX_TRAFFIC_INTR) { - val64 = RXTRAFFIC_INT_M; + intr_mask |= RXTRAFFIC_INT_M; if (flag == ENABLE_INTRS) { - temp64 = readq(&bar0->general_int_mask); - temp64 &= ~((u64) val64); - writeq(temp64, &bar0->general_int_mask); /* writing 0 Enables all 8 RX interrupt levels */ writeq(0x0, &bar0->rx_traffic_mask); } else if (flag == DISABLE_INTRS) { @@ -1854,11 +1960,17 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag) * register. */ writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask); - temp64 = readq(&bar0->general_int_mask); - val64 |= temp64; - writeq(val64, &bar0->general_int_mask); } } + + temp64 = readq(&bar0->general_int_mask); + if (flag == ENABLE_INTRS) + temp64 &= ~((u64) intr_mask); + else + temp64 = DISABLE_ALL_INTRS; + writeq(temp64, &bar0->general_int_mask); + + nic->general_int_mask = readq(&bar0->general_int_mask); } /** @@ -1871,7 +1983,7 @@ static int verify_pcc_quiescent(struct s2io_nic *sp, int flag) int ret = 0, herc; struct XENA_dev_config __iomem *bar0 = sp->bar0; u64 val64 = readq(&bar0->adapter_status); - + herc = (sp->device_type == XFRAME_II_DEVICE); if (flag == FALSE) { @@ -2017,8 +2129,6 @@ static int start_nic(struct s2io_nic *nic) &bar0->prc_rxd0_n[i]); val64 = readq(&bar0->prc_ctrl_n[i]); - if (nic->config.bimodal) - val64 |= PRC_CTRL_BIMODAL_INTERRUPT; if (nic->rxd_mode == RXD_MODE_1) val64 |= PRC_CTRL_RC_ENABLED; else @@ -2062,14 +2172,6 @@ static int start_nic(struct s2io_nic *nic) writeq(val64, &bar0->adapter_control); /* - * Clearing any possible Link state change interrupts that - * could have popped up just before Enabling the card. - */ - val64 = readq(&bar0->mac_rmac_err_reg); - if (val64) - writeq(val64, &bar0->mac_rmac_err_reg); - - /* * Verify if the device is ready to be enabled, if so enable * it. */ @@ -2186,7 +2288,7 @@ static void free_tx_buffers(struct s2io_nic *nic) mac_control->fifos[i].list_info[j].list_virt_addr; skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j); if (skb) { - nic->mac_control.stats_info->sw_stat.mem_freed + nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize; dev_kfree_skb(skb); cnt++; @@ -2222,9 +2324,9 @@ static void stop_nic(struct s2io_nic *nic) config = &nic->config; /* Disable all interrupts */ + en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS); interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; - interruptible |= TX_PIC_INTR | RX_PIC_INTR; - interruptible |= TX_MAC_INTR | RX_MAC_INTR; + interruptible |= TX_PIC_INTR; en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS); /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */ @@ -2233,44 +2335,6 @@ static void stop_nic(struct s2io_nic *nic) writeq(val64, &bar0->adapter_control); } -static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \ - sk_buff *skb) -{ - struct net_device *dev = nic->dev; - struct sk_buff *frag_list; - void *tmp; - - /* Buffer-1 receives L3/L4 headers */ - ((struct RxD3*)rxdp)->Buffer1_ptr = pci_map_single - (nic->pdev, skb->data, l3l4hdr_size + 4, - PCI_DMA_FROMDEVICE); - - /* skb_shinfo(skb)->frag_list will have L4 data payload */ - skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE); - if (skb_shinfo(skb)->frag_list == NULL) { - nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; - DBG_PRINT(INFO_DBG, "%s: dev_alloc_skb failed\n ", dev->name); - return -ENOMEM ; - } - frag_list = skb_shinfo(skb)->frag_list; - skb->truesize += frag_list->truesize; - nic->mac_control.stats_info->sw_stat.mem_allocated - += frag_list->truesize; - frag_list->next = NULL; - tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1); - frag_list->data = tmp; - skb_reset_tail_pointer(frag_list); - - /* Buffer-2 receives L4 data payload */ - ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev, - frag_list->data, dev->mtu, - PCI_DMA_FROMDEVICE); - rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4); - rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu); - - return SUCCESS; -} - /** * fill_rx_buffers - Allocates the Rx side skbs * @nic: device private variable @@ -2307,6 +2371,9 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) unsigned long flags; struct RxD_t *first_rxdp = NULL; u64 Buffer0_ptr = 0, Buffer1_ptr = 0; + struct RxD1 *rxdp1; + struct RxD3 *rxdp3; + struct swStat *stats = &nic->mac_control.stats_info->sw_stat; mac_control = &nic->mac_control; config = &nic->config; @@ -2359,7 +2426,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) (block_no * (rxd_count[nic->rxd_mode] + 1)) + off; } if ((rxdp->Control_1 & RXD_OWN_XENA) && - ((nic->rxd_mode >= RXD_MODE_3A) && + ((nic->rxd_mode == RXD_MODE_3B) && (rxdp->Control_2 & BIT(0)))) { mac_control->rings[ring_no].rx_curr_put_info. offset = off; @@ -2370,10 +2437,8 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) HEADER_802_2_SIZE + HEADER_SNAP_SIZE; if (nic->rxd_mode == RXD_MODE_1) size += NET_IP_ALIGN; - else if (nic->rxd_mode == RXD_MODE_3B) - size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4; else - size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4; + size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4; /* allocate skb */ skb = dev_alloc_skb(size); @@ -2388,37 +2453,39 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) mem_alloc_fail_cnt++; return -ENOMEM ; } - nic->mac_control.stats_info->sw_stat.mem_allocated + nic->mac_control.stats_info->sw_stat.mem_allocated += skb->truesize; if (nic->rxd_mode == RXD_MODE_1) { /* 1 buffer mode - normal operation mode */ + rxdp1 = (struct RxD1*)rxdp; memset(rxdp, 0, sizeof(struct RxD1)); skb_reserve(skb, NET_IP_ALIGN); - ((struct RxD1*)rxdp)->Buffer0_ptr = pci_map_single + rxdp1->Buffer0_ptr = pci_map_single (nic->pdev, skb->data, size - NET_IP_ALIGN, PCI_DMA_FROMDEVICE); - rxdp->Control_2 = + if( (rxdp1->Buffer0_ptr == 0) || + (rxdp1->Buffer0_ptr == + DMA_ERROR_CODE)) + goto pci_map_failed; + + rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); - } else if (nic->rxd_mode >= RXD_MODE_3A) { + } else if (nic->rxd_mode == RXD_MODE_3B) { /* - * 2 or 3 buffer mode - - * Both 2 buffer mode and 3 buffer mode provides 128 + * 2 buffer mode - + * 2 buffer mode provides 128 * byte aligned receive buffers. - * - * 3 buffer mode provides header separation where in - * skb->data will have L3/L4 headers where as - * skb_shinfo(skb)->frag_list will have the L4 data - * payload */ + rxdp3 = (struct RxD3*)rxdp; /* save buffer pointers to avoid frequent dma mapping */ - Buffer0_ptr = ((struct RxD3*)rxdp)->Buffer0_ptr; - Buffer1_ptr = ((struct RxD3*)rxdp)->Buffer1_ptr; + Buffer0_ptr = rxdp3->Buffer0_ptr; + Buffer1_ptr = rxdp3->Buffer1_ptr; memset(rxdp, 0, sizeof(struct RxD3)); /* restore the buffer pointers for dma sync*/ - ((struct RxD3*)rxdp)->Buffer0_ptr = Buffer0_ptr; - ((struct RxD3*)rxdp)->Buffer1_ptr = Buffer1_ptr; + rxdp3->Buffer0_ptr = Buffer0_ptr; + rxdp3->Buffer1_ptr = Buffer1_ptr; ba = &mac_control->rings[ring_no].ba[block_no][off]; skb_reserve(skb, BUF0_LEN); @@ -2428,14 +2495,18 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) skb->data = (void *) (unsigned long)tmp; skb_reset_tail_pointer(skb); - if (!(((struct RxD3*)rxdp)->Buffer0_ptr)) - ((struct RxD3*)rxdp)->Buffer0_ptr = + if (!(rxdp3->Buffer0_ptr)) + rxdp3->Buffer0_ptr = pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, PCI_DMA_FROMDEVICE); else pci_dma_sync_single_for_device(nic->pdev, - (dma_addr_t) ((struct RxD3*)rxdp)->Buffer0_ptr, + (dma_addr_t) rxdp3->Buffer0_ptr, BUF0_LEN, PCI_DMA_FROMDEVICE); + if( (rxdp3->Buffer0_ptr == 0) || + (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) + goto pci_map_failed; + rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); if (nic->rxd_mode == RXD_MODE_3B) { /* Two buffer mode */ @@ -2444,33 +2515,30 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) * Buffer2 will have L3/L4 header plus * L4 payload */ - ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single + rxdp3->Buffer2_ptr = pci_map_single (nic->pdev, skb->data, dev->mtu + 4, PCI_DMA_FROMDEVICE); - /* Buffer-1 will be dummy buffer. Not used */ - if (!(((struct RxD3*)rxdp)->Buffer1_ptr)) { - ((struct RxD3*)rxdp)->Buffer1_ptr = + if( (rxdp3->Buffer2_ptr == 0) || + (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) + goto pci_map_failed; + + rxdp3->Buffer1_ptr = pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN, PCI_DMA_FROMDEVICE); + if( (rxdp3->Buffer1_ptr == 0) || + (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) { + pci_unmap_single + (nic->pdev, + (dma_addr_t)rxdp3->Buffer2_ptr, + dev->mtu + 4, + PCI_DMA_FROMDEVICE); + goto pci_map_failed; } rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); rxdp->Control_2 |= SET_BUFFER2_SIZE_3 (dev->mtu + 4); - } else { - /* 3 buffer mode */ - if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) { - nic->mac_control.stats_info->sw_stat.\ - mem_freed += skb->truesize; - dev_kfree_skb_irq(skb); - if (first_rxdp) { - wmb(); - first_rxdp->Control_1 |= - RXD_OWN_XENA; - } - return -ENOMEM ; - } } rxdp->Control_2 |= BIT(0); } @@ -2505,6 +2573,11 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) } return SUCCESS; +pci_map_failed: + stats->pci_map_fail_cnt++; + stats->mem_freed += skb->truesize; + dev_kfree_skb_irq(skb); + return -ENOMEM; } static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk) @@ -2515,6 +2588,8 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk) struct RxD_t *rxdp; struct mac_info *mac_control; struct buffAdd *ba; + struct RxD1 *rxdp1; + struct RxD3 *rxdp3; mac_control = &sp->mac_control; for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) { @@ -2526,40 +2601,30 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk) continue; } if (sp->rxd_mode == RXD_MODE_1) { + rxdp1 = (struct RxD1*)rxdp; pci_unmap_single(sp->pdev, (dma_addr_t) - ((struct RxD1*)rxdp)->Buffer0_ptr, - dev->mtu + - HEADER_ETHERNET_II_802_3_SIZE - + HEADER_802_2_SIZE + - HEADER_SNAP_SIZE, - PCI_DMA_FROMDEVICE); + rxdp1->Buffer0_ptr, + dev->mtu + + HEADER_ETHERNET_II_802_3_SIZE + + HEADER_802_2_SIZE + + HEADER_SNAP_SIZE, + PCI_DMA_FROMDEVICE); memset(rxdp, 0, sizeof(struct RxD1)); } else if(sp->rxd_mode == RXD_MODE_3B) { + rxdp3 = (struct RxD3*)rxdp; ba = &mac_control->rings[ring_no]. ba[blk][j]; pci_unmap_single(sp->pdev, (dma_addr_t) - ((struct RxD3*)rxdp)->Buffer0_ptr, - BUF0_LEN, - PCI_DMA_FROMDEVICE); - pci_unmap_single(sp->pdev, (dma_addr_t) - ((struct RxD3*)rxdp)->Buffer1_ptr, - BUF1_LEN, - PCI_DMA_FROMDEVICE); - pci_unmap_single(sp->pdev, (dma_addr_t) - ((struct RxD3*)rxdp)->Buffer2_ptr, - dev->mtu + 4, - PCI_DMA_FROMDEVICE); - memset(rxdp, 0, sizeof(struct RxD3)); - } else { - pci_unmap_single(sp->pdev, (dma_addr_t) - ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN, + rxdp3->Buffer0_ptr, + BUF0_LEN, PCI_DMA_FROMDEVICE); pci_unmap_single(sp->pdev, (dma_addr_t) - ((struct RxD3*)rxdp)->Buffer1_ptr, - l3l4hdr_size + 4, + rxdp3->Buffer1_ptr, + BUF1_LEN, PCI_DMA_FROMDEVICE); pci_unmap_single(sp->pdev, (dma_addr_t) - ((struct RxD3*)rxdp)->Buffer2_ptr, dev->mtu, + rxdp3->Buffer2_ptr, + dev->mtu + 4, PCI_DMA_FROMDEVICE); memset(rxdp, 0, sizeof(struct RxD3)); } @@ -2604,7 +2669,7 @@ static void free_rx_buffers(struct s2io_nic *sp) /** * s2io_poll - Rx interrupt handler for NAPI support - * @dev : pointer to the device structure. + * @napi : pointer to the napi structure. * @budget : The number of packets that were budgeted to be processed * during one pass through the 'Poll" function. * Description: @@ -2615,22 +2680,23 @@ static void free_rx_buffers(struct s2io_nic *sp) * 0 on success and 1 if there are No Rx packets to be processed. */ -static int s2io_poll(struct net_device *dev, int *budget) +static int s2io_poll(struct napi_struct *napi, int budget) { - struct s2io_nic *nic = dev->priv; + struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi); + struct net_device *dev = nic->dev; int pkt_cnt = 0, org_pkts_to_process; struct mac_info *mac_control; struct config_param *config; struct XENA_dev_config __iomem *bar0 = nic->bar0; int i; - atomic_inc(&nic->isr_cnt); + if (!is_s2io_card_up(nic)) + return 0; + mac_control = &nic->mac_control; config = &nic->config; - nic->pkts_to_process = *budget; - if (nic->pkts_to_process > dev->quota) - nic->pkts_to_process = dev->quota; + nic->pkts_to_process = budget; org_pkts_to_process = nic->pkts_to_process; writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); @@ -2644,12 +2710,8 @@ static int s2io_poll(struct net_device *dev, int *budget) goto no_rx; } } - if (!pkt_cnt) - pkt_cnt = 1; - dev->quota -= pkt_cnt; - *budget -= pkt_cnt; - netif_rx_complete(dev); + netif_rx_complete(dev, napi); for (i = 0; i < config->rx_ring_num; i++) { if (fill_rx_buffers(nic, i) == -ENOMEM) { @@ -2661,13 +2723,9 @@ static int s2io_poll(struct net_device *dev, int *budget) /* Re enable the Rx interrupts. */ writeq(0x0, &bar0->rx_traffic_mask); readl(&bar0->rx_traffic_mask); - atomic_dec(&nic->isr_cnt); - return 0; + return pkt_cnt; no_rx: - dev->quota -= pkt_cnt; - *budget -= pkt_cnt; - for (i = 0; i < config->rx_ring_num; i++) { if (fill_rx_buffers(nic, i) == -ENOMEM) { DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); @@ -2675,8 +2733,7 @@ no_rx: break; } } - atomic_dec(&nic->isr_cnt); - return 1; + return pkt_cnt; } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -2703,7 +2760,6 @@ static void s2io_netpoll(struct net_device *dev) disable_irq(dev->irq); - atomic_inc(&nic->isr_cnt); mac_control = &nic->mac_control; config = &nic->config; @@ -2728,7 +2784,6 @@ static void s2io_netpoll(struct net_device *dev) break; } } - atomic_dec(&nic->isr_cnt); enable_irq(dev->irq); return; } @@ -2756,14 +2811,10 @@ static void rx_intr_handler(struct ring_info *ring_data) struct sk_buff *skb; int pkt_cnt = 0; int i; + struct RxD1* rxdp1; + struct RxD3* rxdp3; spin_lock(&nic->rx_lock); - if (atomic_read(&nic->card_state) == CARD_DOWN) { - DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n", - __FUNCTION__, dev->name); - spin_unlock(&nic->rx_lock); - return; - } get_info = ring_data->rx_curr_get_info; get_block = get_info.block_index; @@ -2796,32 +2847,23 @@ static void rx_intr_handler(struct ring_info *ring_data) return; } if (nic->rxd_mode == RXD_MODE_1) { + rxdp1 = (struct RxD1*)rxdp; pci_unmap_single(nic->pdev, (dma_addr_t) - ((struct RxD1*)rxdp)->Buffer0_ptr, - dev->mtu + - HEADER_ETHERNET_II_802_3_SIZE + - HEADER_802_2_SIZE + - HEADER_SNAP_SIZE, - PCI_DMA_FROMDEVICE); + rxdp1->Buffer0_ptr, + dev->mtu + + HEADER_ETHERNET_II_802_3_SIZE + + HEADER_802_2_SIZE + + HEADER_SNAP_SIZE, + PCI_DMA_FROMDEVICE); } else if (nic->rxd_mode == RXD_MODE_3B) { + rxdp3 = (struct RxD3*)rxdp; pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) - ((struct RxD3*)rxdp)->Buffer0_ptr, - BUF0_LEN, PCI_DMA_FROMDEVICE); - pci_unmap_single(nic->pdev, (dma_addr_t) - ((struct RxD3*)rxdp)->Buffer2_ptr, - dev->mtu + 4, - PCI_DMA_FROMDEVICE); - } else { - pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) - ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN, - PCI_DMA_FROMDEVICE); - pci_unmap_single(nic->pdev, (dma_addr_t) - ((struct RxD3*)rxdp)->Buffer1_ptr, - l3l4hdr_size + 4, - PCI_DMA_FROMDEVICE); + rxdp3->Buffer0_ptr, + BUF0_LEN, PCI_DMA_FROMDEVICE); pci_unmap_single(nic->pdev, (dma_addr_t) - ((struct RxD3*)rxdp)->Buffer2_ptr, - dev->mtu, PCI_DMA_FROMDEVICE); + rxdp3->Buffer2_ptr, + dev->mtu + 4, + PCI_DMA_FROMDEVICE); } prefetch(skb->data); rx_osm_handler(ring_data, rxdp); @@ -3207,135 +3249,6 @@ static void s2io_updt_xpak_counter(struct net_device *dev) } /** - * alarm_intr_handler - Alarm Interrrupt handler - * @nic: device private variable - * Description: If the interrupt was neither because of Rx packet or Tx - * complete, this function is called. If the interrupt was to indicate - * a loss of link, the OSM link status handler is invoked for any other - * alarm interrupt the block that raised the interrupt is displayed - * and a H/W reset is issued. - * Return Value: - * NONE -*/ - -static void alarm_intr_handler(struct s2io_nic *nic) -{ - struct net_device *dev = (struct net_device *) nic->dev; - struct XENA_dev_config __iomem *bar0 = nic->bar0; - register u64 val64 = 0, err_reg = 0; - u64 cnt; - int i; - if (atomic_read(&nic->card_state) == CARD_DOWN) - return; - if (pci_channel_offline(nic->pdev)) - return; - nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0; - /* Handling the XPAK counters update */ - if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) { - /* waiting for an hour */ - nic->mac_control.stats_info->xpak_stat.xpak_timer_count++; - } else { - s2io_updt_xpak_counter(dev); - /* reset the count to zero */ - nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0; - } - - /* Handling link status change error Intr */ - if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) { - err_reg = readq(&bar0->mac_rmac_err_reg); - writeq(err_reg, &bar0->mac_rmac_err_reg); - if (err_reg & RMAC_LINK_STATE_CHANGE_INT) { - schedule_work(&nic->set_link_task); - } - } - - /* Handling Ecc errors */ - val64 = readq(&bar0->mc_err_reg); - writeq(val64, &bar0->mc_err_reg); - if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) { - if (val64 & MC_ERR_REG_ECC_ALL_DBL) { - nic->mac_control.stats_info->sw_stat. - double_ecc_errs++; - DBG_PRINT(INIT_DBG, "%s: Device indicates ", - dev->name); - DBG_PRINT(INIT_DBG, "double ECC error!!\n"); - if (nic->device_type != XFRAME_II_DEVICE) { - /* Reset XframeI only if critical error */ - if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 | - MC_ERR_REG_MIRI_ECC_DB_ERR_1)) { - netif_stop_queue(dev); - schedule_work(&nic->rst_timer_task); - nic->mac_control.stats_info->sw_stat. - soft_reset_cnt++; - } - } - } else { - nic->mac_control.stats_info->sw_stat. - single_ecc_errs++; - } - } - - /* In case of a serious error, the device will be Reset. */ - val64 = readq(&bar0->serr_source); - if (val64 & SERR_SOURCE_ANY) { - nic->mac_control.stats_info->sw_stat.serious_err_cnt++; - DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name); - DBG_PRINT(ERR_DBG, "serious error %llx!!\n", - (unsigned long long)val64); - netif_stop_queue(dev); - schedule_work(&nic->rst_timer_task); - nic->mac_control.stats_info->sw_stat.soft_reset_cnt++; - } - - /* - * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC - * Error occurs, the adapter will be recycled by disabling the - * adapter enable bit and enabling it again after the device - * becomes Quiescent. - */ - val64 = readq(&bar0->pcc_err_reg); - writeq(val64, &bar0->pcc_err_reg); - if (val64 & PCC_FB_ECC_DB_ERR) { - u64 ac = readq(&bar0->adapter_control); - ac &= ~(ADAPTER_CNTL_EN); - writeq(ac, &bar0->adapter_control); - ac = readq(&bar0->adapter_control); - schedule_work(&nic->set_link_task); - } - /* Check for data parity error */ - val64 = readq(&bar0->pic_int_status); - if (val64 & PIC_INT_GPIO) { - val64 = readq(&bar0->gpio_int_reg); - if (val64 & GPIO_INT_REG_DP_ERR_INT) { - nic->mac_control.stats_info->sw_stat.parity_err_cnt++; - schedule_work(&nic->rst_timer_task); - nic->mac_control.stats_info->sw_stat.soft_reset_cnt++; - } - } - - /* Check for ring full counter */ - if (nic->device_type & XFRAME_II_DEVICE) { - val64 = readq(&bar0->ring_bump_counter1); - for (i=0; i<4; i++) { - cnt = ( val64 & vBIT(0xFFFF,(i*16),16)); - cnt >>= 64 - ((i+1)*16); - nic->mac_control.stats_info->sw_stat.ring_full_cnt - += cnt; - } - - val64 = readq(&bar0->ring_bump_counter2); - for (i=0; i<4; i++) { - cnt = ( val64 & vBIT(0xFFFF,(i*16),16)); - cnt >>= 64 - ((i+1)*16); - nic->mac_control.stats_info->sw_stat.ring_full_cnt - += cnt; - } - } - - /* Other type of interrupts are not being handled now, TODO */ -} - -/** * wait_for_cmd_complete - waits for a command to complete. * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure. @@ -3425,23 +3338,8 @@ static void s2io_reset(struct s2io_nic * sp) /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */ pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd)); - if (sp->device_type == XFRAME_II_DEVICE) { - int ret; - ret = pci_set_power_state(sp->pdev, 3); - if (!ret) - ret = pci_set_power_state(sp->pdev, 0); - else { - DBG_PRINT(ERR_DBG,"%s PME based SW_Reset failed!\n", - __FUNCTION__); - goto old_way; - } - msleep(20); - goto new_way; - } -old_way: val64 = SW_RESET_ALL; writeq(val64, &bar0->sw_reset); -new_way: if (strstr(sp->product_name, "CX4")) { msleep(750); } @@ -3484,7 +3382,7 @@ new_way: /* Reset device statistics maintained by OS */ memset(&sp->stats, 0, sizeof (struct net_device_stats)); - + up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt; down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt; up_time = sp->mac_control.stats_info->sw_stat.link_up_time; @@ -3526,7 +3424,7 @@ new_way: } /* restore the previously assigned mac address */ - s2io_set_mac_addr(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr); + do_s2io_prog_unicast(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr); sp->device_enabled_once = FALSE; } @@ -3623,7 +3521,7 @@ static int s2io_set_swapper(struct s2io_nic * sp) SWAPPER_CTRL_RXF_W_FE | SWAPPER_CTRL_XMSI_FE | SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE); - if (sp->intr_type == INTA) + if (sp->config.intr_type == INTA) val64 |= SWAPPER_CTRL_XMSI_SE; writeq(val64, &bar0->swapper_ctrl); #else @@ -3646,7 +3544,7 @@ static int s2io_set_swapper(struct s2io_nic * sp) SWAPPER_CTRL_RXF_W_FE | SWAPPER_CTRL_XMSI_FE | SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE); - if (sp->intr_type == INTA) + if (sp->config.intr_type == INTA) val64 |= SWAPPER_CTRL_XMSI_SE; writeq(val64, &bar0->swapper_ctrl); #endif @@ -3731,56 +3629,6 @@ static void store_xmsi_data(struct s2io_nic *nic) } } -int s2io_enable_msi(struct s2io_nic *nic) -{ - struct XENA_dev_config __iomem *bar0 = nic->bar0; - u16 msi_ctrl, msg_val; - struct config_param *config = &nic->config; - struct net_device *dev = nic->dev; - u64 val64, tx_mat, rx_mat; - int i, err; - - val64 = readq(&bar0->pic_control); - val64 &= ~BIT(1); - writeq(val64, &bar0->pic_control); - - err = pci_enable_msi(nic->pdev); - if (err) { - DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n", - nic->dev->name); - return err; - } - - /* - * Enable MSI and use MSI-1 in stead of the standard MSI-0 - * for interrupt handling. - */ - pci_read_config_word(nic->pdev, 0x4c, &msg_val); - msg_val ^= 0x1; - pci_write_config_word(nic->pdev, 0x4c, msg_val); - pci_read_config_word(nic->pdev, 0x4c, &msg_val); - - pci_read_config_word(nic->pdev, 0x42, &msi_ctrl); - msi_ctrl |= 0x10; - pci_write_config_word(nic->pdev, 0x42, msi_ctrl); - - /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */ - tx_mat = readq(&bar0->tx_mat0_n[0]); - for (i=0; i<config->tx_fifo_num; i++) { - tx_mat |= TX_MAT_SET(i, 1); - } - writeq(tx_mat, &bar0->tx_mat0_n[0]); - - rx_mat = readq(&bar0->rx_mat); - for (i=0; i<config->rx_ring_num; i++) { - rx_mat |= RX_MAT_SET(i, 1); - } - writeq(rx_mat, &bar0->rx_mat); - - dev->irq = nic->pdev->irq; - return 0; -} - static int s2io_enable_msi_x(struct s2io_nic *nic) { struct XENA_dev_config __iomem *bar0 = nic->bar0; @@ -3788,34 +3636,31 @@ static int s2io_enable_msi_x(struct s2io_nic *nic) u16 msi_control; /* Temp variable */ int ret, i, j, msix_indx = 1; - nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry), + nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry), GFP_KERNEL); - if (nic->entries == NULL) { + if (!nic->entries) { DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \ __FUNCTION__); nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; return -ENOMEM; } - nic->mac_control.stats_info->sw_stat.mem_allocated + nic->mac_control.stats_info->sw_stat.mem_allocated += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); - memset(nic->entries, 0,MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); nic->s2io_entries = - kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry), + kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry), GFP_KERNEL); - if (nic->s2io_entries == NULL) { - DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", + if (!nic->s2io_entries) { + DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", __FUNCTION__); nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; kfree(nic->entries); - nic->mac_control.stats_info->sw_stat.mem_freed + nic->mac_control.stats_info->sw_stat.mem_freed += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); return -ENOMEM; } - nic->mac_control.stats_info->sw_stat.mem_allocated + nic->mac_control.stats_info->sw_stat.mem_allocated += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); - memset(nic->s2io_entries, 0, - MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); for (i=0; i< MAX_REQUESTED_MSI_X; i++) { nic->entries[i].entry = i; @@ -3833,27 +3678,15 @@ static int s2io_enable_msi_x(struct s2io_nic *nic) } writeq(tx_mat, &bar0->tx_mat0_n[0]); - if (!nic->config.bimodal) { - rx_mat = readq(&bar0->rx_mat); - for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) { - rx_mat |= RX_MAT_SET(j, msix_indx); - nic->s2io_entries[msix_indx].arg - = &nic->mac_control.rings[j]; - nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE; - nic->s2io_entries[msix_indx].in_use = MSIX_FLG; - } - writeq(rx_mat, &bar0->rx_mat); - } else { - tx_mat = readq(&bar0->tx_mat0_n[7]); - for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) { - tx_mat |= TX_MAT_SET(i, msix_indx); - nic->s2io_entries[msix_indx].arg - = &nic->mac_control.rings[j]; - nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE; - nic->s2io_entries[msix_indx].in_use = MSIX_FLG; - } - writeq(tx_mat, &bar0->tx_mat0_n[7]); + rx_mat = readq(&bar0->rx_mat); + for (j = 0; j < nic->config.rx_ring_num; j++, msix_indx++) { + rx_mat |= RX_MAT_SET(j, msix_indx); + nic->s2io_entries[msix_indx].arg + = &nic->mac_control.rings[j]; + nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE; + nic->s2io_entries[msix_indx].in_use = MSIX_FLG; } + writeq(rx_mat, &bar0->rx_mat); nic->avail_msix_vectors = 0; ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X); @@ -3865,10 +3698,10 @@ static int s2io_enable_msi_x(struct s2io_nic *nic) if (ret) { DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name); kfree(nic->entries); - nic->mac_control.stats_info->sw_stat.mem_freed + nic->mac_control.stats_info->sw_stat.mem_freed += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); kfree(nic->s2io_entries); - nic->mac_control.stats_info->sw_stat.mem_freed + nic->mac_control.stats_info->sw_stat.mem_freed += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); nic->entries = NULL; nic->s2io_entries = NULL; @@ -3889,6 +3722,59 @@ static int s2io_enable_msi_x(struct s2io_nic *nic) return 0; } +/* Handle software interrupt used during MSI(X) test */ +static irqreturn_t __devinit s2io_test_intr(int irq, void *dev_id) +{ + struct s2io_nic *sp = dev_id; + + sp->msi_detected = 1; + wake_up(&sp->msi_wait); + + return IRQ_HANDLED; +} + +/* Test interrupt path by forcing a a software IRQ */ +static int __devinit s2io_test_msi(struct s2io_nic *sp) +{ + struct pci_dev *pdev = sp->pdev; + struct XENA_dev_config __iomem *bar0 = sp->bar0; + int err; + u64 val64, saved64; + + err = request_irq(sp->entries[1].vector, s2io_test_intr, 0, + sp->name, sp); + if (err) { + DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n", + sp->dev->name, pci_name(pdev), pdev->irq); + return err; + } + + init_waitqueue_head (&sp->msi_wait); + sp->msi_detected = 0; + + saved64 = val64 = readq(&bar0->scheduled_int_ctrl); + val64 |= SCHED_INT_CTRL_ONE_SHOT; + val64 |= SCHED_INT_CTRL_TIMER_EN; + val64 |= SCHED_INT_CTRL_INT2MSI(1); + writeq(val64, &bar0->scheduled_int_ctrl); + + wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10); + + if (!sp->msi_detected) { + /* MSI(X) test failed, go back to INTx mode */ + DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated" + "using MSI(X) during test\n", sp->dev->name, + pci_name(pdev)); + + err = -EOPNOTSUPP; + } + + free_irq(sp->entries[1].vector, sp); + + writeq(saved64, &bar0->scheduled_int_ctrl); + + return err; +} /* ********************************************************* * * Functions defined below concern the OS part of the driver * * ********************************************************* */ @@ -3917,6 +3803,50 @@ static int s2io_open(struct net_device *dev) netif_carrier_off(dev); sp->last_link_state = 0; + napi_enable(&sp->napi); + + if (sp->config.intr_type == MSI_X) { + int ret = s2io_enable_msi_x(sp); + + if (!ret) { + u16 msi_control; + + ret = s2io_test_msi(sp); + + /* rollback MSI-X, will re-enable during add_isr() */ + kfree(sp->entries); + sp->mac_control.stats_info->sw_stat.mem_freed += + (MAX_REQUESTED_MSI_X * + sizeof(struct msix_entry)); + kfree(sp->s2io_entries); + sp->mac_control.stats_info->sw_stat.mem_freed += + (MAX_REQUESTED_MSI_X * + sizeof(struct s2io_msix_entry)); + sp->entries = NULL; + sp->s2io_entries = NULL; + + pci_read_config_word(sp->pdev, 0x42, &msi_control); + msi_control &= 0xFFFE; /* Disable MSI */ + pci_write_config_word(sp->pdev, 0x42, msi_control); + + pci_disable_msix(sp->pdev); + + } + if (ret) { + + DBG_PRINT(ERR_DBG, + "%s: MSI-X requested but failed to enable\n", + dev->name); + sp->config.intr_type = INTA; + } + } + + /* NAPI doesn't work well with MSI(X) */ + if (sp->config.intr_type != INTA) { + if(sp->config.napi) + sp->config.napi = 0; + } + /* Initialize H/W and enable interrupts */ err = s2io_card_up(sp); if (err) { @@ -3925,7 +3855,7 @@ static int s2io_open(struct net_device *dev) goto hw_init_failed; } - if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) { + if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) { DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n"); s2io_card_down(sp); err = -ENODEV; @@ -3936,15 +3866,16 @@ static int s2io_open(struct net_device *dev) return 0; hw_init_failed: - if (sp->intr_type == MSI_X) { + napi_disable(&sp->napi); + if (sp->config.intr_type == MSI_X) { if (sp->entries) { kfree(sp->entries); - sp->mac_control.stats_info->sw_stat.mem_freed + sp->mac_control.stats_info->sw_stat.mem_freed += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); } if (sp->s2io_entries) { kfree(sp->s2io_entries); - sp->mac_control.stats_info->sw_stat.mem_freed + sp->mac_control.stats_info->sw_stat.mem_freed += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); } } @@ -3969,6 +3900,7 @@ static int s2io_close(struct net_device *dev) struct s2io_nic *sp = dev->priv; netif_stop_queue(dev); + napi_disable(&sp->napi); /* Reset card, kill tasklet and free Tx and Rx buffers. */ s2io_card_down(sp); @@ -4001,6 +3933,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) struct mac_info *mac_control; struct config_param *config; int offload_type; + struct swStat *stats = &sp->mac_control.stats_info->sw_stat; mac_control = &sp->mac_control; config = &sp->config; @@ -4014,7 +3947,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) } spin_lock_irqsave(&sp->tx_lock, flags); - if (atomic_read(&sp->card_state) == CARD_DOWN) { + if (!is_s2io_card_up(sp)) { DBG_PRINT(TX_DBG, "%s: Card going down for reset\n", dev->name); spin_unlock_irqrestore(&sp->tx_lock, flags); @@ -4085,11 +4018,18 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) txdp->Buffer_Pointer = pci_map_single(sp->pdev, sp->ufo_in_band_v, sizeof(u64), PCI_DMA_TODEVICE); + if((txdp->Buffer_Pointer == 0) || + (txdp->Buffer_Pointer == DMA_ERROR_CODE)) + goto pci_map_failed; txdp++; } txdp->Buffer_Pointer = pci_map_single (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); + if((txdp->Buffer_Pointer == 0) || + (txdp->Buffer_Pointer == DMA_ERROR_CODE)) + goto pci_map_failed; + txdp->Host_Control = (unsigned long) skb; txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); if (offload_type == SKB_GSO_UDP) @@ -4146,14 +4086,22 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) spin_unlock_irqrestore(&sp->tx_lock, flags); return 0; +pci_map_failed: + stats->pci_map_fail_cnt++; + netif_stop_queue(dev); + stats->mem_freed += skb->truesize; + dev_kfree_skb(skb); + spin_unlock_irqrestore(&sp->tx_lock, flags); + return 0; } static void s2io_alarm_handle(unsigned long data) { struct s2io_nic *sp = (struct s2io_nic *)data; + struct net_device *dev = sp->dev; - alarm_intr_handler(sp); + s2io_handle_errors(dev); mod_timer(&sp->alarm_timer, jiffies + HZ / 2); } @@ -4186,50 +4134,17 @@ static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n) return 0; } -static irqreturn_t s2io_msi_handle(int irq, void *dev_id) -{ - struct net_device *dev = (struct net_device *) dev_id; - struct s2io_nic *sp = dev->priv; - int i; - struct mac_info *mac_control; - struct config_param *config; - - atomic_inc(&sp->isr_cnt); - mac_control = &sp->mac_control; - config = &sp->config; - DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__); - - /* If Intr is because of Rx Traffic */ - for (i = 0; i < config->rx_ring_num; i++) - rx_intr_handler(&mac_control->rings[i]); - - /* If Intr is because of Tx Traffic */ - for (i = 0; i < config->tx_fifo_num; i++) - tx_intr_handler(&mac_control->fifos[i]); - - /* - * If the Rx buffer count is below the panic threshold then - * reallocate the buffers from the interrupt handler itself, - * else schedule a tasklet to reallocate the buffers. - */ - for (i = 0; i < config->rx_ring_num; i++) - s2io_chk_rx_buffers(sp, i); - - atomic_dec(&sp->isr_cnt); - return IRQ_HANDLED; -} - static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) { struct ring_info *ring = (struct ring_info *)dev_id; struct s2io_nic *sp = ring->nic; - atomic_inc(&sp->isr_cnt); + if (!is_s2io_card_up(sp)) + return IRQ_HANDLED; rx_intr_handler(ring); s2io_chk_rx_buffers(sp, ring->ring_no); - atomic_dec(&sp->isr_cnt); return IRQ_HANDLED; } @@ -4238,9 +4153,10 @@ static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id) struct fifo_info *fifo = (struct fifo_info *)dev_id; struct s2io_nic *sp = fifo->nic; - atomic_inc(&sp->isr_cnt); + if (!is_s2io_card_up(sp)) + return IRQ_HANDLED; + tx_intr_handler(fifo); - atomic_dec(&sp->isr_cnt); return IRQ_HANDLED; } static void s2io_txpic_intr_handle(struct s2io_nic *sp) @@ -4305,6 +4221,292 @@ static void s2io_txpic_intr_handle(struct s2io_nic *sp) } /** + * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter + * @value: alarm bits + * @addr: address value + * @cnt: counter variable + * Description: Check for alarm and increment the counter + * Return Value: + * 1 - if alarm bit set + * 0 - if alarm bit is not set + */ +static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr, + unsigned long long *cnt) +{ + u64 val64; + val64 = readq(addr); + if ( val64 & value ) { + writeq(val64, addr); + (*cnt)++; + return 1; + } + return 0; + +} + +/** + * s2io_handle_errors - Xframe error indication handler + * @nic: device private variable + * Description: Handle alarms such as loss of link, single or + * double ECC errors, critical and serious errors. + * Return Value: + * NONE + */ +static void s2io_handle_errors(void * dev_id) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct s2io_nic *sp = dev->priv; + struct XENA_dev_config __iomem *bar0 = sp->bar0; + u64 temp64 = 0,val64=0; + int i = 0; + + struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat; + struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat; + + if (!is_s2io_card_up(sp)) + return; + + if (pci_channel_offline(sp->pdev)) + return; + + memset(&sw_stat->ring_full_cnt, 0, + sizeof(sw_stat->ring_full_cnt)); + + /* Handling the XPAK counters update */ + if(stats->xpak_timer_count < 72000) { + /* waiting for an hour */ + stats->xpak_timer_count++; + } else { + s2io_updt_xpak_counter(dev); + /* reset the count to zero */ + stats->xpak_timer_count = 0; + } + + /* Handling link status change error Intr */ + if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) { + val64 = readq(&bar0->mac_rmac_err_reg); + writeq(val64, &bar0->mac_rmac_err_reg); + if (val64 & RMAC_LINK_STATE_CHANGE_INT) + schedule_work(&sp->set_link_task); + } + + /* In case of a serious error, the device will be Reset. */ + if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source, + &sw_stat->serious_err_cnt)) + goto reset; + + /* Check for data parity error */ + if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg, + &sw_stat->parity_err_cnt)) + goto reset; + + /* Check for ring full counter */ + if (sp->device_type == XFRAME_II_DEVICE) { + val64 = readq(&bar0->ring_bump_counter1); + for (i=0; i<4; i++) { + temp64 = ( val64 & vBIT(0xFFFF,(i*16),16)); + temp64 >>= 64 - ((i+1)*16); + sw_stat->ring_full_cnt[i] += temp64; + } + + val64 = readq(&bar0->ring_bump_counter2); + for (i=0; i<4; i++) { + temp64 = ( val64 & vBIT(0xFFFF,(i*16),16)); + temp64 >>= 64 - ((i+1)*16); + sw_stat->ring_full_cnt[i+4] += temp64; + } + } + + val64 = readq(&bar0->txdma_int_status); + /*check for pfc_err*/ + if (val64 & TXDMA_PFC_INT) { + if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM| + PFC_MISC_0_ERR | PFC_MISC_1_ERR| + PFC_PCIX_ERR, &bar0->pfc_err_reg, + &sw_stat->pfc_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg, + &sw_stat->pfc_err_cnt); + } + + /*check for tda_err*/ + if (val64 & TXDMA_TDA_INT) { + if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM | + TDA_SM1_ERR_ALARM, &bar0->tda_err_reg, + &sw_stat->tda_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR, + &bar0->tda_err_reg, &sw_stat->tda_err_cnt); + } + /*check for pcc_err*/ + if (val64 & TXDMA_PCC_INT) { + if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM + | PCC_N_SERR | PCC_6_COF_OV_ERR + | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR + | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR + | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg, + &sw_stat->pcc_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR, + &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt); + } + + /*check for tti_err*/ + if (val64 & TXDMA_TTI_INT) { + if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg, + &sw_stat->tti_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR, + &bar0->tti_err_reg, &sw_stat->tti_err_cnt); + } + + /*check for lso_err*/ + if (val64 & TXDMA_LSO_INT) { + if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT + | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM, + &bar0->lso_err_reg, &sw_stat->lso_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW, + &bar0->lso_err_reg, &sw_stat->lso_err_cnt); + } + + /*check for tpa_err*/ + if (val64 & TXDMA_TPA_INT) { + if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg, + &sw_stat->tpa_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg, + &sw_stat->tpa_err_cnt); + } + + /*check for sm_err*/ + if (val64 & TXDMA_SM_INT) { + if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg, + &sw_stat->sm_err_cnt)) + goto reset; + } + + val64 = readq(&bar0->mac_int_status); + if (val64 & MAC_INT_STATUS_TMAC_INT) { + if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR, + &bar0->mac_tmac_err_reg, + &sw_stat->mac_tmac_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR + | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR, + &bar0->mac_tmac_err_reg, + &sw_stat->mac_tmac_err_cnt); + } + + val64 = readq(&bar0->xgxs_int_status); + if (val64 & XGXS_INT_STATUS_TXGXS) { + if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR, + &bar0->xgxs_txgxs_err_reg, + &sw_stat->xgxs_txgxs_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR, + &bar0->xgxs_txgxs_err_reg, + &sw_stat->xgxs_txgxs_err_cnt); + } + + val64 = readq(&bar0->rxdma_int_status); + if (val64 & RXDMA_INT_RC_INT_M) { + if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR + | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM, + &bar0->rc_err_reg, &sw_stat->rc_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR + | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg, + &sw_stat->rc_err_cnt); + if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn + | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg, + &sw_stat->prc_pcix_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn + | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg, + &sw_stat->prc_pcix_err_cnt); + } + + if (val64 & RXDMA_INT_RPA_INT_M) { + if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR, + &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, + &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt); + } + + if (val64 & RXDMA_INT_RDA_INT_M) { + if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR + | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM + | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR, + &bar0->rda_err_reg, &sw_stat->rda_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR + | RDA_MISC_ERR | RDA_PCIX_ERR, + &bar0->rda_err_reg, &sw_stat->rda_err_cnt); + } + + if (val64 & RXDMA_INT_RTI_INT_M) { + if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg, + &sw_stat->rti_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR, + &bar0->rti_err_reg, &sw_stat->rti_err_cnt); + } + + val64 = readq(&bar0->mac_int_status); + if (val64 & MAC_INT_STATUS_RMAC_INT) { + if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR, + &bar0->mac_rmac_err_reg, + &sw_stat->mac_rmac_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR| + RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg, + &sw_stat->mac_rmac_err_cnt); + } + + val64 = readq(&bar0->xgxs_int_status); + if (val64 & XGXS_INT_STATUS_RXGXS) { + if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, + &bar0->xgxs_rxgxs_err_reg, + &sw_stat->xgxs_rxgxs_err_cnt)) + goto reset; + } + + val64 = readq(&bar0->mc_int_status); + if(val64 & MC_INT_STATUS_MC_INT) { + if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg, + &sw_stat->mc_err_cnt)) + goto reset; + + /* Handling Ecc errors */ + if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) { + writeq(val64, &bar0->mc_err_reg); + if (val64 & MC_ERR_REG_ECC_ALL_DBL) { + sw_stat->double_ecc_errs++; + if (sp->device_type != XFRAME_II_DEVICE) { + /* + * Reset XframeI only if critical error + */ + if (val64 & + (MC_ERR_REG_MIRI_ECC_DB_ERR_0 | + MC_ERR_REG_MIRI_ECC_DB_ERR_1)) + goto reset; + } + } else + sw_stat->single_ecc_errs++; + } + } + return; + +reset: + netif_stop_queue(dev); + schedule_work(&sp->rst_timer_task); + sw_stat->soft_reset_cnt++; + return; +} + +/** * s2io_isr - ISR handler of the device . * @irq: the irq of the device. * @dev_id: a void pointer to the dev structure of the NIC. @@ -4331,7 +4533,9 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) if (pci_channel_offline(sp->pdev)) return IRQ_NONE; - atomic_inc(&sp->isr_cnt); + if (!is_s2io_card_up(sp)) + return IRQ_NONE; + mac_control = &sp->mac_control; config = &sp->config; @@ -4341,73 +4545,75 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) * 1. Rx of packet. * 2. Tx complete. * 3. Link down. - * 4. Error in any functional blocks of the NIC. */ reason = readq(&bar0->general_int_status); - if (!reason) { - /* The interrupt was not raised by us. */ - atomic_dec(&sp->isr_cnt); - return IRQ_NONE; - } - else if (unlikely(reason == S2IO_MINUS_ONE) ) { - /* Disable device and get out */ - atomic_dec(&sp->isr_cnt); - return IRQ_NONE; + if (unlikely(reason == S2IO_MINUS_ONE) ) { + /* Nothing much can be done. Get out */ + return IRQ_HANDLED; } - if (napi) { - if (reason & GEN_INTR_RXTRAFFIC) { - if ( likely ( netif_rx_schedule_prep(dev)) ) { - __netif_rx_schedule(dev); - writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask); + if (reason & (GEN_INTR_RXTRAFFIC | + GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) + { + writeq(S2IO_MINUS_ONE, &bar0->general_int_mask); + + if (config->napi) { + if (reason & GEN_INTR_RXTRAFFIC) { + if (likely(netif_rx_schedule_prep(dev, + &sp->napi))) { + __netif_rx_schedule(dev, &sp->napi); + writeq(S2IO_MINUS_ONE, + &bar0->rx_traffic_mask); + } else + writeq(S2IO_MINUS_ONE, + &bar0->rx_traffic_int); } - else + } else { + /* + * rx_traffic_int reg is an R1 register, writing all 1's + * will ensure that the actual interrupt causing bit + * get's cleared and hence a read can be avoided. + */ + if (reason & GEN_INTR_RXTRAFFIC) writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); + + for (i = 0; i < config->rx_ring_num; i++) + rx_intr_handler(&mac_control->rings[i]); } - } else { + /* - * Rx handler is called by default, without checking for the - * cause of interrupt. - * rx_traffic_int reg is an R1 register, writing all 1's + * tx_traffic_int reg is an R1 register, writing all 1's * will ensure that the actual interrupt causing bit get's * cleared and hence a read can be avoided. */ - if (reason & GEN_INTR_RXTRAFFIC) - writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); + if (reason & GEN_INTR_TXTRAFFIC) + writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int); - for (i = 0; i < config->rx_ring_num; i++) { - rx_intr_handler(&mac_control->rings[i]); - } - } + for (i = 0; i < config->tx_fifo_num; i++) + tx_intr_handler(&mac_control->fifos[i]); - /* - * tx_traffic_int reg is an R1 register, writing all 1's - * will ensure that the actual interrupt causing bit get's - * cleared and hence a read can be avoided. - */ - if (reason & GEN_INTR_TXTRAFFIC) - writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int); + if (reason & GEN_INTR_TXPIC) + s2io_txpic_intr_handle(sp); - for (i = 0; i < config->tx_fifo_num; i++) - tx_intr_handler(&mac_control->fifos[i]); + /* + * Reallocate the buffers from the interrupt handler itself. + */ + if (!config->napi) { + for (i = 0; i < config->rx_ring_num; i++) + s2io_chk_rx_buffers(sp, i); + } + writeq(sp->general_int_mask, &bar0->general_int_mask); + readl(&bar0->general_int_status); - if (reason & GEN_INTR_TXPIC) - s2io_txpic_intr_handle(sp); - /* - * If the Rx buffer count is below the panic threshold then - * reallocate the buffers from the interrupt handler itself, - * else schedule a tasklet to reallocate the buffers. - */ - if (!napi) { - for (i = 0; i < config->rx_ring_num; i++) - s2io_chk_rx_buffers(sp, i); - } + return IRQ_HANDLED; - writeq(0, &bar0->general_int_mask); - readl(&bar0->general_int_status); + } + else if (!reason) { + /* The interrupt was not raised by us */ + return IRQ_NONE; + } - atomic_dec(&sp->isr_cnt); return IRQ_HANDLED; } @@ -4420,7 +4626,7 @@ static void s2io_updt_stats(struct s2io_nic *sp) u64 val64; int cnt = 0; - if (atomic_read(&sp->card_state) == CARD_UP) { + if (is_s2io_card_up(sp)) { /* Apprx 30us on a 133 MHz bus */ val64 = SET_UPDT_CLICKS(10) | STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN; @@ -4434,7 +4640,7 @@ static void s2io_updt_stats(struct s2io_nic *sp) if (cnt == 5) break; /* Updt failed */ } while(1); - } + } } /** @@ -4651,8 +4857,48 @@ static void s2io_set_multicast(struct net_device *dev) } } +/* add unicast MAC address to CAM */ +static int do_s2io_add_unicast(struct s2io_nic *sp, u64 addr, int off) +{ + u64 val64; + struct XENA_dev_config __iomem *bar0 = sp->bar0; + + writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr), + &bar0->rmac_addr_data0_mem); + + val64 = + RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | + RMAC_ADDR_CMD_MEM_OFFSET(off); + writeq(val64, &bar0->rmac_addr_cmd_mem); + + /* Wait till command completes */ + if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, + RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, + S2IO_BIT_RESET)) { + DBG_PRINT(INFO_DBG, "add_mac_addr failed\n"); + return FAILURE; + } + return SUCCESS; +} + /** - * s2io_set_mac_addr - Programs the Xframe mac address + * s2io_set_mac_addr driver entry point + */ +static int s2io_set_mac_addr(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + /* store the MAC address in CAM */ + return (do_s2io_prog_unicast(dev, dev->dev_addr)); +} + +/** + * do_s2io_prog_unicast - Programs the Xframe mac address * @dev : pointer to the device structure. * @addr: a uchar pointer to the new mac address which is to be set. * Description : This procedure will program the Xframe to receive @@ -4660,56 +4906,31 @@ static void s2io_set_multicast(struct net_device *dev) * Return value: SUCCESS on success and an appropriate (-)ve integer * as defined in errno.h file on failure. */ - -static int s2io_set_mac_addr(struct net_device *dev, u8 * addr) +static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr) { struct s2io_nic *sp = dev->priv; - struct XENA_dev_config __iomem *bar0 = sp->bar0; - register u64 val64, mac_addr = 0; + register u64 mac_addr = 0, perm_addr = 0; int i; - u64 old_mac_addr = 0; /* - * Set the new MAC address as the new unicast filter and reflect this - * change on the device address registered with the OS. It will be - * at offset 0. - */ + * Set the new MAC address as the new unicast filter and reflect this + * change on the device address registered with the OS. It will be + * at offset 0. + */ for (i = 0; i < ETH_ALEN; i++) { mac_addr <<= 8; mac_addr |= addr[i]; - old_mac_addr <<= 8; - old_mac_addr |= sp->def_mac_addr[0].mac_addr[i]; + perm_addr <<= 8; + perm_addr |= sp->def_mac_addr[0].mac_addr[i]; } - if(0 == mac_addr) + /* check if the dev_addr is different than perm_addr */ + if (mac_addr == perm_addr) return SUCCESS; /* Update the internal structure with this new mac address */ - if(mac_addr != old_mac_addr) { - memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN)); - sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_addr); - sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_addr >> 8); - sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_addr >> 16); - sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_addr >> 24); - sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_addr >> 32); - sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_addr >> 40); - } - - writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr), - &bar0->rmac_addr_data0_mem); - - val64 = - RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | - RMAC_ADDR_CMD_MEM_OFFSET(0); - writeq(val64, &bar0->rmac_addr_cmd_mem); - /* Wait till command completes */ - if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, - RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) { - DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name); - return FAILURE; - } - - return SUCCESS; + do_s2io_copy_mac_addr(sp, 0, mac_addr); + return (do_s2io_add_unicast(sp, mac_addr, 0)); } /** @@ -4757,7 +4978,9 @@ static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info) info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); info->port = PORT_FIBRE; - /* info->transceiver?? TODO */ + + /* info->transceiver */ + info->transceiver = XCVR_EXTERNAL; if (netif_carrier_ok(sp->dev)) { info->speed = 10000; @@ -4794,12 +5017,6 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev, strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info)); info->regdump_len = XENA_REG_SPACE; info->eedump_len = XENA_EEPROM_SPACE; - info->testinfo_len = S2IO_TEST_LEN; - - if (sp->device_type == XFRAME_I_DEVICE) - info->n_stats = XFRAME_I_STAT_LEN; - else - info->n_stats = XFRAME_II_STAT_LEN; } /** @@ -4927,19 +5144,17 @@ static void s2io_ethtool_gringparam(struct net_device *dev, ering->rx_max_pending = MAX_RX_DESC_1; else if (sp->rxd_mode == RXD_MODE_3B) ering->rx_max_pending = MAX_RX_DESC_2; - else if (sp->rxd_mode == RXD_MODE_3A) - ering->rx_max_pending = MAX_RX_DESC_3; ering->tx_max_pending = MAX_TX_DESC; - for (i = 0 ; i < sp->config.tx_fifo_num ; i++) { + for (i = 0 ; i < sp->config.tx_fifo_num ; i++) tx_desc_count += sp->config.tx_cfg[i].fifo_len; - } + DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds); ering->tx_pending = tx_desc_count; rx_desc_count = 0; - for (i = 0 ; i < sp->config.rx_ring_num ; i++) { + for (i = 0 ; i < sp->config.rx_ring_num ; i++) rx_desc_count += sp->config.rx_cfg[i].num_rxd; - } + ering->rx_pending = rx_desc_count; ering->rx_mini_max_pending = 0; @@ -5695,7 +5910,7 @@ static void s2io_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 * tmp_stats) { - int i = 0; + int i = 0, k; struct s2io_nic *sp = dev->priv; struct stat_block *stat_info = sp->mac_control.stats_info; @@ -5890,7 +6105,8 @@ static void s2io_get_ethtool_stats(struct net_device *dev, tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt; tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt; tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt; - tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt; + for (k = 0; k < MAX_RX_RINGS; k++) + tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k]; tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high; tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low; tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high; @@ -5923,6 +6139,7 @@ static void s2io_get_ethtool_stats(struct net_device *dev, else tmp_stats[i++] = 0; tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt; + tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt; tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt; tmp_stats[i++] = stat_info->sw_stat.mem_allocated; tmp_stats[i++] = stat_info->sw_stat.mem_freed; @@ -5946,6 +6163,23 @@ static void s2io_get_ethtool_stats(struct net_device *dev, tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt; tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt; tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt; } static int s2io_ethtool_get_regs_len(struct net_device *dev) @@ -5978,9 +6212,25 @@ static int s2io_get_eeprom_len(struct net_device *dev) return (XENA_EEPROM_SPACE); } -static int s2io_ethtool_self_test_count(struct net_device *dev) +static int s2io_get_sset_count(struct net_device *dev, int sset) { - return (S2IO_TEST_LEN); + struct s2io_nic *sp = dev->priv; + + switch (sset) { + case ETH_SS_TEST: + return S2IO_TEST_LEN; + case ETH_SS_STATS: + switch(sp->device_type) { + case XFRAME_I_DEVICE: + return XFRAME_I_STAT_LEN; + case XFRAME_II_DEVICE: + return XFRAME_II_STAT_LEN; + default: + return 0; + } + default: + return -EOPNOTSUPP; + } } static void s2io_ethtool_get_strings(struct net_device *dev, @@ -6007,22 +6257,6 @@ static void s2io_ethtool_get_strings(struct net_device *dev, sizeof(ethtool_driver_stats_keys)); } } -static int s2io_ethtool_get_stats_count(struct net_device *dev) -{ - struct s2io_nic *sp = dev->priv; - int stat_count = 0; - switch(sp->device_type) { - case XFRAME_I_DEVICE: - stat_count = XFRAME_I_STAT_LEN; - break; - - case XFRAME_II_DEVICE: - stat_count = XFRAME_II_STAT_LEN; - break; - } - - return stat_count; -} static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data) { @@ -6063,20 +6297,16 @@ static const struct ethtool_ops netdev_ethtool_ops = { .set_pauseparam = s2io_ethtool_setpause_data, .get_rx_csum = s2io_ethtool_get_rx_csum, .set_rx_csum = s2io_ethtool_set_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = s2io_ethtool_op_set_tx_csum, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, .get_tso = s2io_ethtool_op_get_tso, .set_tso = s2io_ethtool_op_set_tso, - .get_ufo = ethtool_op_get_ufo, .set_ufo = ethtool_op_set_ufo, - .self_test_count = s2io_ethtool_self_test_count, .self_test = s2io_ethtool_test, .get_strings = s2io_ethtool_get_strings, .phys_id = s2io_ethtool_idnic, - .get_stats_count = s2io_ethtool_get_stats_count, - .get_ethtool_stats = s2io_get_ethtool_stats + .get_ethtool_stats = s2io_get_ethtool_stats, + .get_sset_count = s2io_get_sset_count, }; /** @@ -6199,7 +6429,7 @@ static void s2io_set_link(struct work_struct *work) if (!netif_running(dev)) goto out_unlock; - if (test_and_set_bit(0, &(nic->link_state))) { + if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) { /* The card is being reset, no point doing anything */ goto out_unlock; } @@ -6237,13 +6467,10 @@ static void s2io_set_link(struct work_struct *work) netif_stop_queue(dev); } } - val64 = readq(&bar0->adapter_status); - if (!LINK_IS_UP(val64)) { - DBG_PRINT(ERR_DBG, "%s:", dev->name); - DBG_PRINT(ERR_DBG, " Link down after enabling "); - DBG_PRINT(ERR_DBG, "device \n"); - } else - s2io_link(nic, LINK_UP); + val64 = readq(&bar0->adapter_control); + val64 |= ADAPTER_LED_ON; + writeq(val64, &bar0->adapter_control); + s2io_link(nic, LINK_UP); } else { if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type, subid)) { @@ -6252,9 +6479,13 @@ static void s2io_set_link(struct work_struct *work) writeq(val64, &bar0->gpio_control); val64 = readq(&bar0->gpio_control); } + /* turn off LED */ + val64 = readq(&bar0->adapter_control); + val64 = val64 &(~ADAPTER_LED_ON); + writeq(val64, &bar0->adapter_control); s2io_link(nic, LINK_DOWN); } - clear_bit(0, &(nic->link_state)); + clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state)); out_unlock: rtnl_unlock(); @@ -6266,9 +6497,10 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, u64 *temp2, int size) { struct net_device *dev = sp->dev; - struct sk_buff *frag_list; + struct swStat *stats = &sp->mac_control.stats_info->sw_stat; if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) { + struct RxD1 *rxdp1 = (struct RxD1 *)rxdp; /* allocate skb */ if (*skb) { DBG_PRINT(INFO_DBG, "SKB is not NULL\n"); @@ -6277,7 +6509,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, * using same mapped address for the Rxd * buffer pointer */ - ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0; + rxdp1->Buffer0_ptr = *temp0; } else { *skb = dev_alloc_skb(size); if (!(*skb)) { @@ -6288,24 +6520,29 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, mem_alloc_fail_cnt++; return -ENOMEM ; } - sp->mac_control.stats_info->sw_stat.mem_allocated + sp->mac_control.stats_info->sw_stat.mem_allocated += (*skb)->truesize; /* storing the mapped addr in a temp variable * such it will be used for next rxd whose * Host Control is NULL */ - ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0 = + rxdp1->Buffer0_ptr = *temp0 = pci_map_single( sp->pdev, (*skb)->data, size - NET_IP_ALIGN, PCI_DMA_FROMDEVICE); + if( (rxdp1->Buffer0_ptr == 0) || + (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) { + goto memalloc_failed; + } rxdp->Host_Control = (unsigned long) (*skb); } } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) { + struct RxD3 *rxdp3 = (struct RxD3 *)rxdp; /* Two buffer Mode */ if (*skb) { - ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2; - ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0; - ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1; + rxdp3->Buffer2_ptr = *temp2; + rxdp3->Buffer0_ptr = *temp0; + rxdp3->Buffer1_ptr = *temp1; } else { *skb = dev_alloc_skb(size); if (!(*skb)) { @@ -6316,75 +6553,52 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, mem_alloc_fail_cnt++; return -ENOMEM; } - sp->mac_control.stats_info->sw_stat.mem_allocated + sp->mac_control.stats_info->sw_stat.mem_allocated += (*skb)->truesize; - ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 = + rxdp3->Buffer2_ptr = *temp2 = pci_map_single(sp->pdev, (*skb)->data, dev->mtu + 4, PCI_DMA_FROMDEVICE); - ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 = + if( (rxdp3->Buffer2_ptr == 0) || + (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) { + goto memalloc_failed; + } + rxdp3->Buffer0_ptr = *temp0 = pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN, PCI_DMA_FROMDEVICE); + if( (rxdp3->Buffer0_ptr == 0) || + (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) { + pci_unmap_single (sp->pdev, + (dma_addr_t)rxdp3->Buffer2_ptr, + dev->mtu + 4, PCI_DMA_FROMDEVICE); + goto memalloc_failed; + } rxdp->Host_Control = (unsigned long) (*skb); /* Buffer-1 will be dummy buffer not used */ - ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 = + rxdp3->Buffer1_ptr = *temp1 = pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, - PCI_DMA_FROMDEVICE); - } - } else if ((rxdp->Host_Control == 0)) { - /* Three buffer mode */ - if (*skb) { - ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0; - ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1; - ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2; - } else { - *skb = dev_alloc_skb(size); - if (!(*skb)) { - DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name); - DBG_PRINT(INFO_DBG, "memory to allocate "); - DBG_PRINT(INFO_DBG, "3 buf mode SKBs\n"); - sp->mac_control.stats_info->sw_stat. \ - mem_alloc_fail_cnt++; - return -ENOMEM; - } - sp->mac_control.stats_info->sw_stat.mem_allocated - += (*skb)->truesize; - ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 = - pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN, - PCI_DMA_FROMDEVICE); - /* Buffer-1 receives L3/L4 headers */ - ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 = - pci_map_single( sp->pdev, (*skb)->data, - l3l4hdr_size + 4, PCI_DMA_FROMDEVICE); - /* - * skb_shinfo(skb)->frag_list will have L4 - * data payload - */ - skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu + - ALIGN_SIZE); - if (skb_shinfo(*skb)->frag_list == NULL) { - DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \ - failed\n ", dev->name); - sp->mac_control.stats_info->sw_stat. \ - mem_alloc_fail_cnt++; - return -ENOMEM ; + if( (rxdp3->Buffer1_ptr == 0) || + (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) { + pci_unmap_single (sp->pdev, + (dma_addr_t)rxdp3->Buffer0_ptr, + BUF0_LEN, PCI_DMA_FROMDEVICE); + pci_unmap_single (sp->pdev, + (dma_addr_t)rxdp3->Buffer2_ptr, + dev->mtu + 4, PCI_DMA_FROMDEVICE); + goto memalloc_failed; } - frag_list = skb_shinfo(*skb)->frag_list; - frag_list->next = NULL; - sp->mac_control.stats_info->sw_stat.mem_allocated - += frag_list->truesize; - /* - * Buffer-2 receives L4 data payload - */ - ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 = - pci_map_single( sp->pdev, frag_list->data, - dev->mtu, PCI_DMA_FROMDEVICE); } } return 0; + memalloc_failed: + stats->pci_map_fail_cnt++; + stats->mem_freed += (*skb)->truesize; + dev_kfree_skb(*skb); + return -ENOMEM; } + static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp, int size) { @@ -6395,10 +6609,6 @@ static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp, rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4); - } else { - rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); - rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4); - rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu); } } @@ -6420,8 +6630,6 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp) size += NET_IP_ALIGN; else if (sp->rxd_mode == RXD_MODE_3B) size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4; - else - size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4; for (i = 0; i < config->rx_ring_num; i++) { blk_cnt = config->rx_cfg[i].num_rxd / @@ -6431,7 +6639,7 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp) for (k = 0; k < rxd_count[sp->rxd_mode]; k++) { rxdp = mac_control->rings[i]. rx_blocks[j].rxds[k].virt_addr; - if(sp->rxd_mode >= RXD_MODE_3A) + if(sp->rxd_mode == RXD_MODE_3B) ba = &mac_control->rings[i].ba[j][k]; if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,(u64 *)&temp0_64, @@ -6458,30 +6666,18 @@ static int s2io_add_isr(struct s2io_nic * sp) struct net_device *dev = sp->dev; int err = 0; - if (sp->intr_type == MSI) - ret = s2io_enable_msi(sp); - else if (sp->intr_type == MSI_X) + if (sp->config.intr_type == MSI_X) ret = s2io_enable_msi_x(sp); if (ret) { DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name); - sp->intr_type = INTA; + sp->config.intr_type = INTA; } /* Store the values of the MSIX table in the struct s2io_nic structure */ store_xmsi_data(sp); /* After proper initialization of H/W, register ISR */ - if (sp->intr_type == MSI) { - err = request_irq((int) sp->pdev->irq, s2io_msi_handle, - IRQF_SHARED, sp->name, dev); - if (err) { - pci_disable_msi(sp->pdev); - DBG_PRINT(ERR_DBG, "%s: MSI registration failed\n", - dev->name); - return -1; - } - } - if (sp->intr_type == MSI_X) { + if (sp->config.intr_type == MSI_X) { int i, msix_tx_cnt=0,msix_rx_cnt=0; for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) { @@ -6533,7 +6729,7 @@ static int s2io_add_isr(struct s2io_nic * sp) printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt); printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt); } - if (sp->intr_type == INTA) { + if (sp->config.intr_type == INTA) { err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED, sp->name, dev); if (err) { @@ -6546,10 +6742,10 @@ static int s2io_add_isr(struct s2io_nic * sp) } static void s2io_rem_isr(struct s2io_nic * sp) { - int cnt = 0; struct net_device *dev = sp->dev; + struct swStat *stats = &sp->mac_control.stats_info->sw_stat; - if (sp->intr_type == MSI_X) { + if (sp->config.intr_type == MSI_X) { int i; u16 msi_control; @@ -6558,32 +6754,28 @@ static void s2io_rem_isr(struct s2io_nic * sp) int vector = sp->entries[i].vector; void *arg = sp->s2io_entries[i].arg; + synchronize_irq(vector); free_irq(vector, arg); } + + kfree(sp->entries); + stats->mem_freed += + (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); + kfree(sp->s2io_entries); + stats->mem_freed += + (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); + sp->entries = NULL; + sp->s2io_entries = NULL; + pci_read_config_word(sp->pdev, 0x42, &msi_control); msi_control &= 0xFFFE; /* Disable MSI */ pci_write_config_word(sp->pdev, 0x42, msi_control); pci_disable_msix(sp->pdev); } else { + synchronize_irq(sp->pdev->irq); free_irq(sp->pdev->irq, dev); - if (sp->intr_type == MSI) { - u16 val; - - pci_disable_msi(sp->pdev); - pci_read_config_word(sp->pdev, 0x4c, &val); - val ^= 0x1; - pci_write_config_word(sp->pdev, 0x4c, val); - } } - /* Waiting till all Interrupt handlers are complete */ - cnt = 0; - do { - msleep(10); - if (!atomic_read(&sp->isr_cnt)) - break; - cnt++; - } while(cnt < 5); } static void do_s2io_card_down(struct s2io_nic * sp, int do_io) @@ -6595,10 +6787,10 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io) del_timer_sync(&sp->alarm_timer); /* If s2io_set_link task is executing, wait till it completes. */ - while (test_and_set_bit(0, &(sp->link_state))) { + while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) { msleep(50); } - atomic_set(&sp->card_state, CARD_DOWN); + clear_bit(__S2IO_STATE_CARD_UP, &sp->state); /* disable Tx and Rx traffic on the NIC */ if (do_io) @@ -6649,7 +6841,7 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io) free_rx_buffers(sp); spin_unlock_irqrestore(&sp->rx_lock, flags); - clear_bit(0, &(sp->link_state)); + clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state)); } static void s2io_card_down(struct s2io_nic * sp) @@ -6720,7 +6912,7 @@ static int s2io_card_up(struct s2io_nic * sp) /* Add interrupt service routine */ if (s2io_add_isr(sp) != 0) { - if (sp->intr_type == MSI_X) + if (sp->config.intr_type == MSI_X) s2io_rem_isr(sp); s2io_reset(sp); free_rx_buffers(sp); @@ -6733,17 +6925,16 @@ static int s2io_card_up(struct s2io_nic * sp) tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev); /* Enable select interrupts */ - if (sp->intr_type != INTA) + en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS); + if (sp->config.intr_type != INTA) en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS); else { interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; - interruptible |= TX_PIC_INTR | RX_PIC_INTR; - interruptible |= TX_MAC_INTR | RX_MAC_INTR; + interruptible |= TX_PIC_INTR; en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS); } - - atomic_set(&sp->card_state, CARD_UP); + set_bit(__S2IO_STATE_CARD_UP, &sp->state); return 0; } @@ -6897,7 +7088,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n", dev->name, err_mask); sp->stats.rx_crc_errors++; - sp->mac_control.stats_info->sw_stat.mem_freed + sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize; dev_kfree_skb(skb); atomic_dec(&sp->rx_bufs_left[ring_no]); @@ -6907,6 +7098,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) } /* Updating statistics */ + sp->stats.rx_packets++; rxdp->Host_Control = 0; if (sp->rxd_mode == RXD_MODE_1) { int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2); @@ -6914,7 +7106,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) sp->stats.rx_bytes += len; skb_put(skb, len); - } else if (sp->rxd_mode >= RXD_MODE_3A) { + } else if (sp->rxd_mode == RXD_MODE_3B) { int get_block = ring_data->rx_curr_get_info.block_index; int get_off = ring_data->rx_curr_get_info.offset; int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2); @@ -6924,18 +7116,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) struct buffAdd *ba = &ring_data->ba[get_block][get_off]; sp->stats.rx_bytes += buf0_len + buf2_len; memcpy(buff, ba->ba_0, buf0_len); - - if (sp->rxd_mode == RXD_MODE_3A) { - int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2); - - skb_put(skb, buf1_len); - skb->len += buf2_len; - skb->data_len += buf2_len; - skb_put(skb_shinfo(skb)->frag_list, buf2_len); - sp->stats.rx_bytes += buf1_len; - - } else - skb_put(skb, buf2_len); + skb_put(skb, buf2_len); } if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) || @@ -6956,7 +7137,8 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) int ret = 0; ret = s2io_club_tcp_session(skb->data, &tcp, - &tcp_len, &lro, rxdp, sp); + &tcp_len, &lro, + rxdp, sp); switch (ret) { case 3: /* Begin anew */ lro->parent = skb; @@ -7061,13 +7243,13 @@ static void s2io_link(struct s2io_nic * sp, int link) DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name); netif_carrier_off(dev); if(sp->mac_control.stats_info->sw_stat.link_up_cnt) - sp->mac_control.stats_info->sw_stat.link_up_time = + sp->mac_control.stats_info->sw_stat.link_up_time = jiffies - sp->start_time; sp->mac_control.stats_info->sw_stat.link_down_cnt++; } else { DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name); if (sp->mac_control.stats_info->sw_stat.link_down_cnt) - sp->mac_control.stats_info->sw_stat.link_down_time = + sp->mac_control.stats_info->sw_stat.link_down_time = jiffies - sp->start_time; sp->mac_control.stats_info->sw_stat.link_up_cnt++; netif_carrier_on(dev); @@ -7124,19 +7306,12 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type) if (*dev_intr_type != INTA) napi = 0; -#ifndef CONFIG_PCI_MSI - if (*dev_intr_type != INTA) { - DBG_PRINT(ERR_DBG, "s2io: This kernel does not support" - "MSI/MSI-X. Defaulting to INTA\n"); - *dev_intr_type = INTA; - } -#else - if (*dev_intr_type > MSI_X) { + if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) { DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. " "Defaulting to INTA\n"); *dev_intr_type = INTA; } -#endif + if ((*dev_intr_type == MSI_X) && ((pdev->device != PCI_DEVICE_ID_HERC_WIN) && (pdev->device != PCI_DEVICE_ID_HERC_UNI))) { @@ -7145,10 +7320,10 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type) *dev_intr_type = INTA; } - if (rx_ring_mode > 3) { + if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) { DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n"); - DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n"); - rx_ring_mode = 3; + DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n"); + rx_ring_mode = 1; } return SUCCESS; } @@ -7213,6 +7388,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) struct config_param *config; int mode; u8 dev_intr_type = intr_type; + DECLARE_MAC_BUF(mac); if ((ret = s2io_verify_parm(pdev, &dev_intr_type))) return ret; @@ -7240,28 +7416,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) pci_disable_device(pdev); return -ENOMEM; } - if (dev_intr_type != MSI_X) { - if (pci_request_regions(pdev, s2io_driver_name)) { - DBG_PRINT(ERR_DBG, "Request Regions failed\n"); - pci_disable_device(pdev); - return -ENODEV; - } - } - else { - if (!(request_mem_region(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0), s2io_driver_name))) { - DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n"); - pci_disable_device(pdev); - return -ENODEV; - } - if (!(request_mem_region(pci_resource_start(pdev, 2), - pci_resource_len(pdev, 2), s2io_driver_name))) { - DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n"); - release_mem_region(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); - pci_disable_device(pdev); - return -ENODEV; - } + if ((ret = pci_request_regions(pdev, s2io_driver_name))) { + DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret); + pci_disable_device(pdev); + return -ENODEV; } dev = alloc_etherdev(sizeof(struct s2io_nic)); @@ -7274,7 +7432,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) pci_set_master(pdev); pci_set_drvdata(pdev, dev); - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); /* Private member variable initialized to s2io NIC structure */ @@ -7288,10 +7445,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) sp->rxd_mode = RXD_MODE_1; if (rx_ring_mode == 2) sp->rxd_mode = RXD_MODE_3B; - if (rx_ring_mode == 3) - sp->rxd_mode = RXD_MODE_3A; - sp->intr_type = dev_intr_type; + sp->config.intr_type = dev_intr_type; if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) || (pdev->device == PCI_DEVICE_ID_HERC_UNI)) @@ -7299,7 +7454,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) else sp->device_type = XFRAME_I_DEVICE; - sp->lro = lro; + sp->lro = lro_enable; /* Initialize some PCI/PCI-X fields of the NIC. */ s2io_init_pci(sp); @@ -7314,6 +7469,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) mac_control = &sp->mac_control; config = &sp->config; + config->napi = napi; + /* Tx side parameters. */ config->tx_fifo_num = tx_fifo_num; for (i = 0; i < MAX_TX_FIFOS; i++) { @@ -7361,9 +7518,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) for (i = 0; i < config->rx_ring_num; i++) atomic_set(&sp->rx_bufs_left[i], 0); - /* Initialize the number of ISRs currently running */ - atomic_set(&sp->isr_cnt, 0); - /* initialize the shared memory used by the NIC and the host */ if (init_shared_mem(sp)) { DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", @@ -7406,6 +7560,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) dev->get_stats = &s2io_get_stats; dev->set_multicast_list = &s2io_set_multicast; dev->do_ioctl = &s2io_ioctl; + dev->set_mac_address = &s2io_set_mac_addr; dev->change_mtu = &s2io_change_mtu; SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; @@ -7415,8 +7570,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) * will use eth_mac_addr() for dev->set_mac_address * mac address will be set every time dev->open() is called */ - dev->poll = s2io_poll; - dev->weight = 32; + netif_napi_add(dev, &sp->napi, s2io_poll, 32); #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = s2io_netpoll; @@ -7492,7 +7646,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) /* Set the factory defined MAC address initially */ dev->addr_len = ETH_ALEN; memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN); + memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN); + /* Store the values of the MSIX table in the s2io_nic structure */ + store_xmsi_data(sp); /* reset Nic and bring it to known state */ s2io_reset(sp); @@ -7500,9 +7657,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) * Initialize the tasklet status and link state flags * and the card state parameter */ - atomic_set(&(sp->card_state), 0); sp->tasklet_status = 0; - sp->link_state = 0; + sp->state = 0; /* Initialize spinlocks */ spin_lock_init(&sp->tx_lock); @@ -7538,14 +7694,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) sp->product_name, pdev->revision); DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name, s2io_driver_version); - DBG_PRINT(ERR_DBG, "%s: MAC ADDR: " - "%02x:%02x:%02x:%02x:%02x:%02x", dev->name, - sp->def_mac_addr[0].mac_addr[0], - sp->def_mac_addr[0].mac_addr[1], - sp->def_mac_addr[0].mac_addr[2], - sp->def_mac_addr[0].mac_addr[3], - sp->def_mac_addr[0].mac_addr[4], - sp->def_mac_addr[0].mac_addr[5]); + DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %s\n", + dev->name, print_mac(mac, dev->dev_addr)); DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num); if (sp->device_type & XFRAME_II_DEVICE) { mode = s2io_print_pci_mode(sp); @@ -7565,21 +7715,14 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n", dev->name); break; - case RXD_MODE_3A: - DBG_PRINT(ERR_DBG, "%s: 3-Buffer receive mode enabled\n", - dev->name); - break; } if (napi) DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); - switch(sp->intr_type) { + switch(sp->config.intr_type) { case INTA: DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); break; - case MSI: - DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI\n", dev->name); - break; case MSI_X: DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name); break; @@ -7593,14 +7736,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) /* Initialize device name */ sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name); - /* Initialize bimodal Interrupts */ - sp->config.bimodal = bimodal; - if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) { - sp->config.bimodal = 0; - DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n", - dev->name); - } - /* * Make Link state as off at this point, when the Link change * interrupt comes the state will be automatically changed to @@ -7619,14 +7754,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) mem_alloc_failed: free_shared_mem(sp); pci_disable_device(pdev); - if (dev_intr_type != MSI_X) - pci_release_regions(pdev); - else { - release_mem_region(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); - release_mem_region(pci_resource_start(pdev, 2), - pci_resource_len(pdev, 2)); - } + pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); free_netdev(dev); @@ -7661,14 +7789,7 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev) free_shared_mem(sp); iounmap(sp->bar0); iounmap(sp->bar1); - if (sp->intr_type != MSI_X) - pci_release_regions(pdev); - else { - release_mem_region(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); - release_mem_region(pci_resource_start(pdev, 2), - pci_resource_len(pdev, 2)); - } + pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); free_netdev(dev); pci_disable_device(pdev); @@ -7680,7 +7801,7 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev) * the module loadable parameters and initializes PCI configuration space. */ -int __init s2io_starter(void) +static int __init s2io_starter(void) { return pci_register_driver(&s2io_driver); } diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 3887fe63a908..f6b45565304f 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h @@ -74,6 +74,10 @@ static int debug_level = ERR_DBG; /* DEBUG message print. */ #define DBG_PRINT(dbg_level, args...) if(!(debug_level<dbg_level)) printk(args) +#ifndef DMA_ERROR_CODE +#define DMA_ERROR_CODE (~(dma_addr_t)0x0) +#endif + /* Protocol assist features of the NIC */ #define L3_CKSUM_OK 0xFFFF #define L4_CKSUM_OK 0xFFFF @@ -87,7 +91,7 @@ struct swStat { unsigned long long serious_err_cnt; unsigned long long soft_reset_cnt; unsigned long long fifo_full_cnt; - unsigned long long ring_full_cnt; + unsigned long long ring_full_cnt[8]; /* LRO statistics */ unsigned long long clubbed_frms_cnt; unsigned long long sending_both; @@ -97,6 +101,7 @@ struct swStat { unsigned long long num_aggregations; /* Other statistics */ unsigned long long mem_alloc_fail_cnt; + unsigned long long pci_map_fail_cnt; unsigned long long watchdog_timer_cnt; unsigned long long mem_allocated; unsigned long long mem_freed; @@ -121,6 +126,26 @@ struct swStat { unsigned long long rx_buf_size_err_cnt; unsigned long long rx_rxd_corrupt_cnt; unsigned long long rx_unkn_err_cnt; + + /* Error/alarm statistics*/ + unsigned long long tda_err_cnt; + unsigned long long pfc_err_cnt; + unsigned long long pcc_err_cnt; + unsigned long long tti_err_cnt; + unsigned long long lso_err_cnt; + unsigned long long tpa_err_cnt; + unsigned long long sm_err_cnt; + unsigned long long mac_tmac_err_cnt; + unsigned long long mac_rmac_err_cnt; + unsigned long long xgxs_txgxs_err_cnt; + unsigned long long xgxs_rxgxs_err_cnt; + unsigned long long rc_err_cnt; + unsigned long long prc_pcix_err_cnt; + unsigned long long rpa_err_cnt; + unsigned long long rda_err_cnt; + unsigned long long rti_err_cnt; + unsigned long long mc_err_cnt; + }; /* Xpak releated alarm and warnings */ @@ -407,6 +432,11 @@ struct config_param { struct tx_fifo_config tx_cfg[MAX_TX_FIFOS]; /*Per-Tx FIFO config */ u32 max_txds; /*Max no. of Tx buffer descriptor per TxDL */ u64 tx_intr_type; +#define INTA 0 +#define MSI_X 2 + u8 intr_type; + u8 napi; + /* Specifies if Tx Intr is UTILZ or PER_LIST type. */ /* Rx Side */ @@ -414,7 +444,6 @@ struct config_param { #define MAX_RX_BLOCKS_PER_RING 150 struct rx_ring_config rx_cfg[MAX_RX_RINGS]; /*Per-Rx Ring config */ - u8 bimodal; /*Flag for setting bimodal interrupts*/ #define HEADER_ETHERNET_II_802_3_SIZE 14 #define HEADER_802_2_SIZE 3 @@ -575,8 +604,7 @@ struct RxD_block { #define SIZE_OF_BLOCK 4096 #define RXD_MODE_1 0 /* One Buffer mode */ -#define RXD_MODE_3A 1 /* Three Buffer mode */ -#define RXD_MODE_3B 2 /* Two Buffer mode */ +#define RXD_MODE_3B 1 /* Two Buffer mode */ /* Structure to hold virtual addresses of Buf0 and Buf1 in * 2buf mode. */ @@ -773,6 +801,13 @@ struct lro { u8 saw_ts; }; +/* These flags represent the devices temporary state */ +enum s2io_device_state_t +{ + __S2IO_STATE_LINK_TASK=0, + __S2IO_STATE_CARD_UP +}; + /* Structure representing one instance of the NIC */ struct s2io_nic { int rxd_mode; @@ -782,6 +817,7 @@ struct s2io_nic { */ int pkts_to_process; struct net_device *dev; + struct napi_struct napi; struct mac_info mac_control; struct config_param config; struct pci_dev *pdev; @@ -850,13 +886,11 @@ struct s2io_nic { int task_flag; unsigned long long start_time; -#define CARD_DOWN 1 -#define CARD_UP 2 - atomic_t card_state; - volatile unsigned long link_state; struct vlan_group *vlgrp; #define MSIX_FLG 0xA5 struct msix_entry *entries; + int msi_detected; + wait_queue_head_t msi_wait; struct s2io_msix_entry *s2io_entries; char desc[MAX_REQUESTED_MSI_X][25]; @@ -874,14 +908,9 @@ struct s2io_nic { unsigned long sending_both; u8 lro; u16 lro_max_aggr_per_sess; - -#define INTA 0 -#define MSI 1 -#define MSI_X 2 - u8 intr_type; - + volatile unsigned long state; spinlock_t rx_lock; - atomic_t isr_cnt; + u64 general_int_mask; u64 *ufo_in_band_v; #define VPD_STRING_LEN 80 u8 product_name[VPD_STRING_LEN]; @@ -1006,7 +1035,7 @@ static void free_shared_mem(struct s2io_nic *sp); static int init_nic(struct s2io_nic *nic); static void rx_intr_handler(struct ring_info *ring_data); static void tx_intr_handler(struct fifo_info *fifo_data); -static void alarm_intr_handler(struct s2io_nic *sp); +static void s2io_handle_errors(void * dev_id); static int s2io_starter(void); static void s2io_closer(void); @@ -1016,12 +1045,10 @@ static void s2io_set_multicast(struct net_device *dev); static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp); static void s2io_link(struct s2io_nic * sp, int link); static void s2io_reset(struct s2io_nic * sp); -static int s2io_poll(struct net_device *dev, int *budget); +static int s2io_poll(struct napi_struct *napi, int budget); static void s2io_init_pci(struct s2io_nic * sp); -static int s2io_set_mac_addr(struct net_device *dev, u8 * addr); +static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr); static void s2io_alarm_handle(unsigned long data); -static int s2io_enable_msi(struct s2io_nic *nic); -static irqreturn_t s2io_msi_handle(int irq, void *dev_id); static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id); static irqreturn_t diff --git a/drivers/net/saa9730.c b/drivers/net/saa9730.c index 451486b32f23..14361e885415 100644 --- a/drivers/net/saa9730.c +++ b/drivers/net/saa9730.c @@ -151,30 +151,30 @@ static void __attribute_used__ show_saa9730_regs(struct lan_saa9730_private *lp) printk("lp->lan_saa9730_regs->CamData = %x\n", readl(&lp->lan_saa9730_regs->CamData)); } - printk("lp->stats.tx_packets = %lx\n", lp->stats.tx_packets); - printk("lp->stats.tx_errors = %lx\n", lp->stats.tx_errors); - printk("lp->stats.tx_aborted_errors = %lx\n", - lp->stats.tx_aborted_errors); - printk("lp->stats.tx_window_errors = %lx\n", - lp->stats.tx_window_errors); - printk("lp->stats.tx_carrier_errors = %lx\n", - lp->stats.tx_carrier_errors); - printk("lp->stats.tx_fifo_errors = %lx\n", - lp->stats.tx_fifo_errors); - printk("lp->stats.tx_heartbeat_errors = %lx\n", - lp->stats.tx_heartbeat_errors); - printk("lp->stats.collisions = %lx\n", lp->stats.collisions); - - printk("lp->stats.rx_packets = %lx\n", lp->stats.rx_packets); - printk("lp->stats.rx_errors = %lx\n", lp->stats.rx_errors); - printk("lp->stats.rx_dropped = %lx\n", lp->stats.rx_dropped); - printk("lp->stats.rx_crc_errors = %lx\n", lp->stats.rx_crc_errors); - printk("lp->stats.rx_frame_errors = %lx\n", - lp->stats.rx_frame_errors); - printk("lp->stats.rx_fifo_errors = %lx\n", - lp->stats.rx_fifo_errors); - printk("lp->stats.rx_length_errors = %lx\n", - lp->stats.rx_length_errors); + printk("dev->stats.tx_packets = %lx\n", dev->stats.tx_packets); + printk("dev->stats.tx_errors = %lx\n", dev->stats.tx_errors); + printk("dev->stats.tx_aborted_errors = %lx\n", + dev->stats.tx_aborted_errors); + printk("dev->stats.tx_window_errors = %lx\n", + dev->stats.tx_window_errors); + printk("dev->stats.tx_carrier_errors = %lx\n", + dev->stats.tx_carrier_errors); + printk("dev->stats.tx_fifo_errors = %lx\n", + dev->stats.tx_fifo_errors); + printk("dev->stats.tx_heartbeat_errors = %lx\n", + dev->stats.tx_heartbeat_errors); + printk("dev->stats.collisions = %lx\n", dev->stats.collisions); + + printk("dev->stats.rx_packets = %lx\n", dev->stats.rx_packets); + printk("dev->stats.rx_errors = %lx\n", dev->stats.rx_errors); + printk("dev->stats.rx_dropped = %lx\n", dev->stats.rx_dropped); + printk("dev->stats.rx_crc_errors = %lx\n", dev->stats.rx_crc_errors); + printk("dev->stats.rx_frame_errors = %lx\n", + dev->stats.rx_frame_errors); + printk("dev->stats.rx_fifo_errors = %lx\n", + dev->stats.rx_fifo_errors); + printk("dev->stats.rx_length_errors = %lx\n", + dev->stats.rx_length_errors); printk("lp->lan_saa9730_regs->DebugPCIMasterAddr = %x\n", readl(&lp->lan_saa9730_regs->DebugPCIMasterAddr)); @@ -605,24 +605,24 @@ static int lan_saa9730_tx(struct net_device *dev) printk("lan_saa9730_tx: tx error = %x\n", tx_status); - lp->stats.tx_errors++; + dev->stats.tx_errors++; if (tx_status & (TX_STATUS_EX_COLL << TX_STAT_CTL_STATUS_SHF)) - lp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; if (tx_status & (TX_STATUS_LATE_COLL << TX_STAT_CTL_STATUS_SHF)) - lp->stats.tx_window_errors++; + dev->stats.tx_window_errors++; if (tx_status & (TX_STATUS_L_CARR << TX_STAT_CTL_STATUS_SHF)) - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (tx_status & (TX_STATUS_UNDER << TX_STAT_CTL_STATUS_SHF)) - lp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; if (tx_status & (TX_STATUS_SQ_ERR << TX_STAT_CTL_STATUS_SHF)) - lp->stats.tx_heartbeat_errors++; + dev->stats.tx_heartbeat_errors++; - lp->stats.collisions += + dev->stats.collisions += tx_status & TX_STATUS_TX_COLL_MSK; } @@ -684,10 +684,10 @@ static int lan_saa9730_rx(struct net_device *dev) printk ("%s: Memory squeeze, deferring packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; } else { - lp->stats.rx_bytes += len; - lp->stats.rx_packets++; + dev->stats.rx_bytes += len; + dev->stats.rx_packets++; skb_reserve(skb, 2); /* 16 byte align */ skb_put(skb, len); /* make room */ skb_copy_to_linear_data(skb, @@ -704,19 +704,19 @@ static int lan_saa9730_rx(struct net_device *dev) ("lan_saa9730_rx: We got an error packet = %x\n", rx_status); - lp->stats.rx_errors++; + dev->stats.rx_errors++; if (rx_status & (RX_STATUS_CRC_ERR << RX_STAT_CTL_STATUS_SHF)) - lp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; if (rx_status & (RX_STATUS_ALIGN_ERR << RX_STAT_CTL_STATUS_SHF)) - lp->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (rx_status & (RX_STATUS_OVERFLOW << RX_STAT_CTL_STATUS_SHF)) - lp->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; if (rx_status & (RX_STATUS_LONG_ERR << RX_STAT_CTL_STATUS_SHF)) - lp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; } /* Indicate we have processed the buffer. */ @@ -853,7 +853,7 @@ static void lan_saa9730_tx_timeout(struct net_device *dev) struct lan_saa9730_private *lp = netdev_priv(dev); /* Transmitter timeout, serious problems */ - lp->stats.tx_errors++; + dev->stats.tx_errors++; printk("%s: transmit timed out, reset\n", dev->name); /*show_saa9730_regs(lp); */ lan_saa9730_restart(lp); @@ -886,8 +886,8 @@ static int lan_saa9730_start_xmit(struct sk_buff *skb, return -1; } - lp->stats.tx_bytes += len; - lp->stats.tx_packets++; + dev->stats.tx_bytes += len; + dev->stats.tx_packets++; dev->trans_start = jiffies; netif_wake_queue(dev); @@ -919,14 +919,6 @@ static int lan_saa9730_close(struct net_device *dev) return 0; } -static struct net_device_stats *lan_saa9730_get_stats(struct net_device - *dev) -{ - struct lan_saa9730_private *lp = netdev_priv(dev); - - return &lp->stats; -} - static void lan_saa9730_set_multicast(struct net_device *dev) { struct lan_saa9730_private *lp = netdev_priv(dev); @@ -940,15 +932,14 @@ static void lan_saa9730_set_multicast(struct net_device *dev) CAM_CONTROL_GROUP_ACC | CAM_CONTROL_BROAD_ACC, &lp->lan_saa9730_regs->CamCtl); } else { - if (dev->flags & IFF_ALLMULTI) { + if (dev->flags & IFF_ALLMULTI || dev->mc_count) { /* accept all multicast packets */ - writel(CAM_CONTROL_COMP_EN | CAM_CONTROL_GROUP_ACC | - CAM_CONTROL_BROAD_ACC, - &lp->lan_saa9730_regs->CamCtl); - } else { /* * Will handle the multicast stuff later. -carstenl */ + writel(CAM_CONTROL_COMP_EN | CAM_CONTROL_GROUP_ACC | + CAM_CONTROL_BROAD_ACC, + &lp->lan_saa9730_regs->CamCtl); } } @@ -1041,7 +1032,6 @@ static int lan_saa9730_init(struct net_device *dev, struct pci_dev *pdev, dev->open = lan_saa9730_open; dev->hard_start_xmit = lan_saa9730_start_xmit; dev->stop = lan_saa9730_close; - dev->get_stats = lan_saa9730_get_stats; dev->set_multicast_list = lan_saa9730_set_multicast; dev->tx_timeout = lan_saa9730_tx_timeout; dev->watchdog_timeo = (HZ >> 1); diff --git a/drivers/net/saa9730.h b/drivers/net/saa9730.h index f656f2f40bb8..010a120ea938 100644 --- a/drivers/net/saa9730.h +++ b/drivers/net/saa9730.h @@ -378,7 +378,6 @@ struct lan_saa9730_private { unsigned char PhysicalAddress[LAN_SAA9730_CAM_ENTRIES][6]; - struct net_device_stats stats; spinlock_t lock; }; diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c index 1de3eec1a792..487f9d2ac5b4 100644 --- a/drivers/net/sb1000.c +++ b/drivers/net/sb1000.c @@ -76,7 +76,6 @@ struct sb1000_private { unsigned char rx_session_id[NPIDS]; unsigned char rx_frame_id[NPIDS]; unsigned char rx_pkt_type[NPIDS]; - struct net_device_stats stats; }; /* prototypes for Linux interface */ @@ -85,7 +84,6 @@ static int sb1000_open(struct net_device *dev); static int sb1000_dev_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd); static int sb1000_start_xmit(struct sk_buff *skb, struct net_device *dev); static irqreturn_t sb1000_interrupt(int irq, void *dev_id); -static struct net_device_stats *sb1000_stats(struct net_device *dev); static int sb1000_close(struct net_device *dev); @@ -189,7 +187,6 @@ sb1000_probe_one(struct pnp_dev *pdev, const struct pnp_device_id *id) */ dev->flags = IFF_POINTOPOINT|IFF_NOARP; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); if (sb1000_debug > 0) @@ -200,7 +197,6 @@ sb1000_probe_one(struct pnp_dev *pdev, const struct pnp_device_id *id) dev->do_ioctl = sb1000_dev_ioctl; dev->hard_start_xmit = sb1000_start_xmit; dev->stop = sb1000_close; - dev->get_stats = sb1000_stats; /* hardware address is 0:0:serial_number */ dev->dev_addr[2] = serial_number >> 24 & 0xff; @@ -740,7 +736,7 @@ sb1000_rx(struct net_device *dev) unsigned int skbsize; struct sk_buff *skb; struct sb1000_private *lp = netdev_priv(dev); - struct net_device_stats *stats = &lp->stats; + struct net_device_stats *stats = &dev->stats; /* SB1000 frame constants */ const int FrameSize = FRAMESIZE; @@ -1003,11 +999,11 @@ static int sb1000_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) switch (cmd) { case SIOCGCMSTATS: /* get statistics */ - stats[0] = lp->stats.rx_bytes; + stats[0] = dev->stats.rx_bytes; stats[1] = lp->rx_frames; - stats[2] = lp->stats.rx_packets; - stats[3] = lp->stats.rx_errors; - stats[4] = lp->stats.rx_dropped; + stats[2] = dev->stats.rx_packets; + stats[3] = dev->stats.rx_errors; + stats[4] = dev->stats.rx_dropped; if(copy_to_user(ifr->ifr_data, stats, sizeof(stats))) return -EFAULT; status = 0; @@ -1133,12 +1129,6 @@ static irqreturn_t sb1000_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static struct net_device_stats *sb1000_stats(struct net_device *dev) -{ - struct sb1000_private *lp = netdev_priv(dev); - return &lp->stats; -} - static int sb1000_close(struct net_device *dev) { int i; diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c index e7fdcf15b5a7..7b53d658e337 100644 --- a/drivers/net/sb1250-mac.c +++ b/drivers/net/sb1250-mac.c @@ -1,5 +1,6 @@ /* * Copyright (C) 2001,2002,2003,2004 Broadcom Corporation + * Copyright (c) 2006, 2007 Maciej W. Rozycki * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -18,7 +19,12 @@ * * This driver is designed for the Broadcom SiByte SOC built-in * Ethernet controllers. Written by Mitch Lichtenberg at Broadcom Corp. + * + * Updated to the driver model and the PHY abstraction layer + * by Maciej W. Rozycki. */ + +#include <linux/bug.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> @@ -32,9 +38,15 @@ #include <linux/skbuff.h> #include <linux/init.h> #include <linux/bitops.h> -#include <asm/processor.h> /* Processor type for cache alignment. */ -#include <asm/io.h> +#include <linux/err.h> +#include <linux/ethtool.h> +#include <linux/mii.h> +#include <linux/phy.h> +#include <linux/platform_device.h> + #include <asm/cache.h> +#include <asm/io.h> +#include <asm/processor.h> /* Processor type for cache alignment. */ /* This is only here until the firmware is ready. In that case, the firmware leaves the ethernet address in the register for us. */ @@ -48,7 +60,7 @@ /* These identify the driver base version and may not be removed. */ #if 0 -static char version1[] __devinitdata = +static char version1[] __initdata = "sb1250-mac.c:1.00 1/11/2001 Written by Mitch Lichtenberg\n"; #endif @@ -57,8 +69,6 @@ static char version1[] __devinitdata = #define CONFIG_SBMAC_COALESCE -#define MAX_UNITS 4 /* More are supported, limit only on options */ - /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (2*HZ) @@ -74,26 +84,6 @@ static int debug = 1; module_param(debug, int, S_IRUGO); MODULE_PARM_DESC(debug, "Debug messages"); -/* mii status msgs */ -static int noisy_mii = 1; -module_param(noisy_mii, int, S_IRUGO); -MODULE_PARM_DESC(noisy_mii, "MII status messages"); - -/* Used to pass the media type, etc. - Both 'options[]' and 'full_duplex[]' should exist for driver - interoperability. - The media type is usually passed in 'options[]'. -*/ -#ifdef MODULE -static int options[MAX_UNITS] = {-1, -1, -1, -1}; -module_param_array(options, int, NULL, S_IRUGO); -MODULE_PARM_DESC(options, "1-" __MODULE_STRING(MAX_UNITS)); - -static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1}; -module_param_array(full_duplex, int, NULL, S_IRUGO); -MODULE_PARM_DESC(full_duplex, "1-" __MODULE_STRING(MAX_UNITS)); -#endif - #ifdef CONFIG_SBMAC_COALESCE static int int_pktcnt_tx = 255; module_param(int_pktcnt_tx, int, S_IRUGO); @@ -112,6 +102,7 @@ module_param(int_timeout_rx, int, S_IRUGO); MODULE_PARM_DESC(int_timeout_rx, "RX timeout value"); #endif +#include <asm/sibyte/board.h> #include <asm/sibyte/sb1250.h> #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) #include <asm/sibyte/bcm1480_regs.h> @@ -135,22 +126,43 @@ MODULE_PARM_DESC(int_timeout_rx, "RX timeout value"); #error invalid SiByte MAC configuation #endif +#ifdef K_INT_PHY +#define SBMAC_PHY_INT K_INT_PHY +#else +#define SBMAC_PHY_INT PHY_POLL +#endif + /********************************************************************** * Simple types ********************************************************************* */ +enum sbmac_speed { + sbmac_speed_none = 0, + sbmac_speed_10 = SPEED_10, + sbmac_speed_100 = SPEED_100, + sbmac_speed_1000 = SPEED_1000, +}; -typedef enum { sbmac_speed_auto, sbmac_speed_10, - sbmac_speed_100, sbmac_speed_1000 } sbmac_speed_t; - -typedef enum { sbmac_duplex_auto, sbmac_duplex_half, - sbmac_duplex_full } sbmac_duplex_t; +enum sbmac_duplex { + sbmac_duplex_none = -1, + sbmac_duplex_half = DUPLEX_HALF, + sbmac_duplex_full = DUPLEX_FULL, +}; -typedef enum { sbmac_fc_auto, sbmac_fc_disabled, sbmac_fc_frame, - sbmac_fc_collision, sbmac_fc_carrier } sbmac_fc_t; +enum sbmac_fc { + sbmac_fc_none, + sbmac_fc_disabled, + sbmac_fc_frame, + sbmac_fc_collision, + sbmac_fc_carrier, +}; -typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on, - sbmac_state_broken } sbmac_state_t; +enum sbmac_state { + sbmac_state_uninit, + sbmac_state_off, + sbmac_state_on, + sbmac_state_broken, +}; /********************************************************************** @@ -176,55 +188,61 @@ typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on, * DMA Descriptor structure ********************************************************************* */ -typedef struct sbdmadscr_s { +struct sbdmadscr { uint64_t dscr_a; uint64_t dscr_b; -} sbdmadscr_t; - -typedef unsigned long paddr_t; +}; /********************************************************************** * DMA Controller structure ********************************************************************* */ -typedef struct sbmacdma_s { +struct sbmacdma { /* * This stuff is used to identify the channel and the registers * associated with it. */ - - struct sbmac_softc *sbdma_eth; /* back pointer to associated MAC */ - int sbdma_channel; /* channel number */ - int sbdma_txdir; /* direction (1=transmit) */ - int sbdma_maxdescr; /* total # of descriptors in ring */ + struct sbmac_softc *sbdma_eth; /* back pointer to associated + MAC */ + int sbdma_channel; /* channel number */ + int sbdma_txdir; /* direction (1=transmit) */ + int sbdma_maxdescr; /* total # of descriptors + in ring */ #ifdef CONFIG_SBMAC_COALESCE - int sbdma_int_pktcnt; /* # descriptors rx/tx before interrupt*/ - int sbdma_int_timeout; /* # usec rx/tx interrupt */ + int sbdma_int_pktcnt; + /* # descriptors rx/tx + before interrupt */ + int sbdma_int_timeout; + /* # usec rx/tx interrupt */ #endif - - volatile void __iomem *sbdma_config0; /* DMA config register 0 */ - volatile void __iomem *sbdma_config1; /* DMA config register 1 */ - volatile void __iomem *sbdma_dscrbase; /* Descriptor base address */ - volatile void __iomem *sbdma_dscrcnt; /* Descriptor count register */ - volatile void __iomem *sbdma_curdscr; /* current descriptor address */ - volatile void __iomem *sbdma_oodpktlost;/* pkt drop (rx only) */ - + void __iomem *sbdma_config0; /* DMA config register 0 */ + void __iomem *sbdma_config1; /* DMA config register 1 */ + void __iomem *sbdma_dscrbase; + /* descriptor base address */ + void __iomem *sbdma_dscrcnt; /* descriptor count register */ + void __iomem *sbdma_curdscr; /* current descriptor + address */ + void __iomem *sbdma_oodpktlost; + /* pkt drop (rx only) */ /* * This stuff is for maintenance of the ring */ - - sbdmadscr_t *sbdma_dscrtable_unaligned; - sbdmadscr_t *sbdma_dscrtable; /* base of descriptor table */ - sbdmadscr_t *sbdma_dscrtable_end; /* end of descriptor table */ - - struct sk_buff **sbdma_ctxtable; /* context table, one per descr */ - - paddr_t sbdma_dscrtable_phys; /* and also the phys addr */ - sbdmadscr_t *sbdma_addptr; /* next dscr for sw to add */ - sbdmadscr_t *sbdma_remptr; /* next dscr for sw to remove */ -} sbmacdma_t; + void *sbdma_dscrtable_unaligned; + struct sbdmadscr *sbdma_dscrtable; + /* base of descriptor table */ + struct sbdmadscr *sbdma_dscrtable_end; + /* end of descriptor table */ + struct sk_buff **sbdma_ctxtable; + /* context table, one + per descr */ + dma_addr_t sbdma_dscrtable_phys; + /* and also the phys addr */ + struct sbdmadscr *sbdma_addptr; /* next dscr for sw to add */ + struct sbdmadscr *sbdma_remptr; /* next dscr for sw + to remove */ +}; /********************************************************************** @@ -236,47 +254,43 @@ struct sbmac_softc { /* * Linux-specific things */ + struct net_device *sbm_dev; /* pointer to linux device */ + struct napi_struct napi; + struct phy_device *phy_dev; /* the associated PHY device */ + struct mii_bus mii_bus; /* the MII bus */ + int phy_irq[PHY_MAX_ADDR]; + spinlock_t sbm_lock; /* spin lock */ + int sbm_devflags; /* current device flags */ - struct net_device *sbm_dev; /* pointer to linux device */ - spinlock_t sbm_lock; /* spin lock */ - struct timer_list sbm_timer; /* for monitoring MII */ - struct net_device_stats sbm_stats; - int sbm_devflags; /* current device flags */ - - int sbm_phy_oldbmsr; - int sbm_phy_oldanlpar; - int sbm_phy_oldk1stsr; - int sbm_phy_oldlinkstat; - int sbm_buffersize; - - unsigned char sbm_phys[2]; + int sbm_buffersize; /* * Controller-specific things */ - - void __iomem *sbm_base; /* MAC's base address */ - sbmac_state_t sbm_state; /* current state */ - - volatile void __iomem *sbm_macenable; /* MAC Enable Register */ - volatile void __iomem *sbm_maccfg; /* MAC Configuration Register */ - volatile void __iomem *sbm_fifocfg; /* FIFO configuration register */ - volatile void __iomem *sbm_framecfg; /* Frame configuration register */ - volatile void __iomem *sbm_rxfilter; /* receive filter register */ - volatile void __iomem *sbm_isr; /* Interrupt status register */ - volatile void __iomem *sbm_imr; /* Interrupt mask register */ - volatile void __iomem *sbm_mdio; /* MDIO register */ - - sbmac_speed_t sbm_speed; /* current speed */ - sbmac_duplex_t sbm_duplex; /* current duplex */ - sbmac_fc_t sbm_fc; /* current flow control setting */ - - unsigned char sbm_hwaddr[ETHER_ADDR_LEN]; - - sbmacdma_t sbm_txdma; /* for now, only use channel 0 */ - sbmacdma_t sbm_rxdma; - int rx_hw_checksum; - int sbe_idx; + void __iomem *sbm_base; /* MAC's base address */ + enum sbmac_state sbm_state; /* current state */ + + void __iomem *sbm_macenable; /* MAC Enable Register */ + void __iomem *sbm_maccfg; /* MAC Config Register */ + void __iomem *sbm_fifocfg; /* FIFO Config Register */ + void __iomem *sbm_framecfg; /* Frame Config Register */ + void __iomem *sbm_rxfilter; /* Receive Filter Register */ + void __iomem *sbm_isr; /* Interrupt Status Register */ + void __iomem *sbm_imr; /* Interrupt Mask Register */ + void __iomem *sbm_mdio; /* MDIO Register */ + + enum sbmac_speed sbm_speed; /* current speed */ + enum sbmac_duplex sbm_duplex; /* current duplex */ + enum sbmac_fc sbm_fc; /* cur. flow control setting */ + int sbm_pause; /* current pause setting */ + int sbm_link; /* current link state */ + + unsigned char sbm_hwaddr[ETHER_ADDR_LEN]; + + struct sbmacdma sbm_txdma; /* only channel 0 for now */ + struct sbmacdma sbm_rxdma; + int rx_hw_checksum; + int sbe_idx; }; @@ -288,55 +302,58 @@ struct sbmac_softc { * Prototypes ********************************************************************* */ -static void sbdma_initctx(sbmacdma_t *d, - struct sbmac_softc *s, - int chan, - int txrx, - int maxdescr); -static void sbdma_channel_start(sbmacdma_t *d, int rxtx); -static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *m); -static int sbdma_add_txbuffer(sbmacdma_t *d,struct sk_buff *m); -static void sbdma_emptyring(sbmacdma_t *d); -static void sbdma_fillring(sbmacdma_t *d); -static int sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d, int work_to_do, int poll); -static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d, int poll); +static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, + int txrx, int maxdescr); +static void sbdma_channel_start(struct sbmacdma *d, int rxtx); +static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *m); +static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m); +static void sbdma_emptyring(struct sbmacdma *d); +static void sbdma_fillring(struct sbmacdma *d); +static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, + int work_to_do, int poll); +static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, + int poll); static int sbmac_initctx(struct sbmac_softc *s); static void sbmac_channel_start(struct sbmac_softc *s); static void sbmac_channel_stop(struct sbmac_softc *s); -static sbmac_state_t sbmac_set_channel_state(struct sbmac_softc *,sbmac_state_t); -static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff); +static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *, + enum sbmac_state); +static void sbmac_promiscuous_mode(struct sbmac_softc *sc, int onoff); static uint64_t sbmac_addr2reg(unsigned char *ptr); -static irqreturn_t sbmac_intr(int irq,void *dev_instance); +static irqreturn_t sbmac_intr(int irq, void *dev_instance); static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev); static void sbmac_setmulti(struct sbmac_softc *sc); -static int sbmac_init(struct net_device *dev, int idx); -static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed); -static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc_t fc); +static int sbmac_init(struct platform_device *pldev, long long base); +static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed); +static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex, + enum sbmac_fc fc); static int sbmac_open(struct net_device *dev); -static void sbmac_timer(unsigned long data); static void sbmac_tx_timeout (struct net_device *dev); -static struct net_device_stats *sbmac_get_stats(struct net_device *dev); static void sbmac_set_rx_mode(struct net_device *dev); static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static int sbmac_close(struct net_device *dev); -static int sbmac_poll(struct net_device *poll_dev, int *budget); +static int sbmac_poll(struct napi_struct *napi, int budget); -static int sbmac_mii_poll(struct sbmac_softc *s,int noisy); +static void sbmac_mii_poll(struct net_device *dev); static int sbmac_mii_probe(struct net_device *dev); -static void sbmac_mii_sync(struct sbmac_softc *s); -static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitcnt); -static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx); -static void sbmac_mii_write(struct sbmac_softc *s,int phyaddr,int regidx, - unsigned int regval); +static void sbmac_mii_sync(void __iomem *sbm_mdio); +static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data, + int bitcnt); +static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx); +static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx, + u16 val); /********************************************************************** * Globals ********************************************************************* */ -static uint64_t sbmac_orig_hwaddr[MAX_UNITS]; +static char sbmac_string[] = "sb1250-mac"; +static char sbmac_pretty[] = "SB1250 MAC"; + +static char sbmac_mdio_string[] = "sb1250-mac-mdio"; /********************************************************************** @@ -348,185 +365,66 @@ static uint64_t sbmac_orig_hwaddr[MAX_UNITS]; #define MII_COMMAND_WRITE 0x01 #define MII_COMMAND_ACK 0x02 -#define BMCR_RESET 0x8000 -#define BMCR_LOOPBACK 0x4000 -#define BMCR_SPEED0 0x2000 -#define BMCR_ANENABLE 0x1000 -#define BMCR_POWERDOWN 0x0800 -#define BMCR_ISOLATE 0x0400 -#define BMCR_RESTARTAN 0x0200 -#define BMCR_DUPLEX 0x0100 -#define BMCR_COLTEST 0x0080 -#define BMCR_SPEED1 0x0040 -#define BMCR_SPEED1000 BMCR_SPEED1 -#define BMCR_SPEED100 BMCR_SPEED0 -#define BMCR_SPEED10 0 - -#define BMSR_100BT4 0x8000 -#define BMSR_100BT_FDX 0x4000 -#define BMSR_100BT_HDX 0x2000 -#define BMSR_10BT_FDX 0x1000 -#define BMSR_10BT_HDX 0x0800 -#define BMSR_100BT2_FDX 0x0400 -#define BMSR_100BT2_HDX 0x0200 -#define BMSR_1000BT_XSR 0x0100 -#define BMSR_PRESUP 0x0040 -#define BMSR_ANCOMPLT 0x0020 -#define BMSR_REMFAULT 0x0010 -#define BMSR_AUTONEG 0x0008 -#define BMSR_LINKSTAT 0x0004 -#define BMSR_JABDETECT 0x0002 -#define BMSR_EXTCAPAB 0x0001 - -#define PHYIDR1 0x2000 -#define PHYIDR2 0x5C60 - -#define ANAR_NP 0x8000 -#define ANAR_RF 0x2000 -#define ANAR_ASYPAUSE 0x0800 -#define ANAR_PAUSE 0x0400 -#define ANAR_T4 0x0200 -#define ANAR_TXFD 0x0100 -#define ANAR_TXHD 0x0080 -#define ANAR_10FD 0x0040 -#define ANAR_10HD 0x0020 -#define ANAR_PSB 0x0001 - -#define ANLPAR_NP 0x8000 -#define ANLPAR_ACK 0x4000 -#define ANLPAR_RF 0x2000 -#define ANLPAR_ASYPAUSE 0x0800 -#define ANLPAR_PAUSE 0x0400 -#define ANLPAR_T4 0x0200 -#define ANLPAR_TXFD 0x0100 -#define ANLPAR_TXHD 0x0080 -#define ANLPAR_10FD 0x0040 -#define ANLPAR_10HD 0x0020 -#define ANLPAR_PSB 0x0001 /* 802.3 */ - -#define ANER_PDF 0x0010 -#define ANER_LPNPABLE 0x0008 -#define ANER_NPABLE 0x0004 -#define ANER_PAGERX 0x0002 -#define ANER_LPANABLE 0x0001 - -#define ANNPTR_NP 0x8000 -#define ANNPTR_MP 0x2000 -#define ANNPTR_ACK2 0x1000 -#define ANNPTR_TOGTX 0x0800 -#define ANNPTR_CODE 0x0008 - -#define ANNPRR_NP 0x8000 -#define ANNPRR_MP 0x2000 -#define ANNPRR_ACK3 0x1000 -#define ANNPRR_TOGTX 0x0800 -#define ANNPRR_CODE 0x0008 - -#define K1TCR_TESTMODE 0x0000 -#define K1TCR_MSMCE 0x1000 -#define K1TCR_MSCV 0x0800 -#define K1TCR_RPTR 0x0400 -#define K1TCR_1000BT_FDX 0x200 -#define K1TCR_1000BT_HDX 0x100 - -#define K1STSR_MSMCFLT 0x8000 -#define K1STSR_MSCFGRES 0x4000 -#define K1STSR_LRSTAT 0x2000 -#define K1STSR_RRSTAT 0x1000 -#define K1STSR_LP1KFD 0x0800 -#define K1STSR_LP1KHD 0x0400 -#define K1STSR_LPASMDIR 0x0200 - -#define K1SCR_1KX_FDX 0x8000 -#define K1SCR_1KX_HDX 0x4000 -#define K1SCR_1KT_FDX 0x2000 -#define K1SCR_1KT_HDX 0x1000 - -#define STRAP_PHY1 0x0800 -#define STRAP_NCMODE 0x0400 -#define STRAP_MANMSCFG 0x0200 -#define STRAP_ANENABLE 0x0100 -#define STRAP_MSVAL 0x0080 -#define STRAP_1KHDXADV 0x0010 -#define STRAP_1KFDXADV 0x0008 -#define STRAP_100ADV 0x0004 -#define STRAP_SPEEDSEL 0x0000 -#define STRAP_SPEED100 0x0001 - -#define PHYSUP_SPEED1000 0x10 -#define PHYSUP_SPEED100 0x08 -#define PHYSUP_SPEED10 0x00 -#define PHYSUP_LINKUP 0x04 -#define PHYSUP_FDX 0x02 - -#define MII_BMCR 0x00 /* Basic mode control register (rw) */ -#define MII_BMSR 0x01 /* Basic mode status register (ro) */ -#define MII_PHYIDR1 0x02 -#define MII_PHYIDR2 0x03 - -#define MII_K1STSR 0x0A /* 1K Status Register (ro) */ -#define MII_ANLPAR 0x05 /* Autonegotiation lnk partner abilities (rw) */ - - #define M_MAC_MDIO_DIR_OUTPUT 0 /* for clarity */ #define ENABLE 1 #define DISABLE 0 /********************************************************************** - * SBMAC_MII_SYNC(s) + * SBMAC_MII_SYNC(sbm_mdio) * * Synchronize with the MII - send a pattern of bits to the MII * that will guarantee that it is ready to accept a command. * * Input parameters: - * s - sbmac structure + * sbm_mdio - address of the MAC's MDIO register * * Return value: * nothing ********************************************************************* */ -static void sbmac_mii_sync(struct sbmac_softc *s) +static void sbmac_mii_sync(void __iomem *sbm_mdio) { int cnt; uint64_t bits; int mac_mdio_genc; - mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC; + mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; bits = M_MAC_MDIO_DIR_OUTPUT | M_MAC_MDIO_OUT; - __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio); + __raw_writeq(bits | mac_mdio_genc, sbm_mdio); for (cnt = 0; cnt < 32; cnt++) { - __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio); - __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio); + __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio); + __raw_writeq(bits | mac_mdio_genc, sbm_mdio); } } /********************************************************************** - * SBMAC_MII_SENDDATA(s,data,bitcnt) + * SBMAC_MII_SENDDATA(sbm_mdio, data, bitcnt) * * Send some bits to the MII. The bits to be sent are right- * justified in the 'data' parameter. * * Input parameters: - * s - sbmac structure - * data - data to send - * bitcnt - number of bits to send + * sbm_mdio - address of the MAC's MDIO register + * data - data to send + * bitcnt - number of bits to send ********************************************************************* */ -static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitcnt) +static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data, + int bitcnt) { int i; uint64_t bits; unsigned int curmask; int mac_mdio_genc; - mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC; + mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; bits = M_MAC_MDIO_DIR_OUTPUT; - __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio); + __raw_writeq(bits | mac_mdio_genc, sbm_mdio); curmask = 1 << (bitcnt - 1); @@ -534,9 +432,9 @@ static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitc if (data & curmask) bits |= M_MAC_MDIO_OUT; else bits &= ~M_MAC_MDIO_OUT; - __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio); - __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio); - __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio); + __raw_writeq(bits | mac_mdio_genc, sbm_mdio); + __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio); + __raw_writeq(bits | mac_mdio_genc, sbm_mdio); curmask >>= 1; } } @@ -544,21 +442,22 @@ static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitc /********************************************************************** - * SBMAC_MII_READ(s,phyaddr,regidx) - * + * SBMAC_MII_READ(bus, phyaddr, regidx) * Read a PHY register. * * Input parameters: - * s - sbmac structure + * bus - MDIO bus handle * phyaddr - PHY's address - * regidx = index of register to read + * regnum - index of register to read * * Return value: - * value read, or 0 if an error occurred. + * value read, or 0xffff if an error occurred. ********************************************************************* */ -static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx) +static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx) { + struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv; + void __iomem *sbm_mdio = sc->sbm_mdio; int idx; int error; int regval; @@ -568,8 +467,7 @@ static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx) * Synchronize ourselves so that the PHY knows the next * thing coming down is a command */ - - sbmac_mii_sync(s); + sbmac_mii_sync(sbm_mdio); /* * Send the data to the PHY. The sequence is @@ -578,37 +476,37 @@ static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx) * the PHY addr (5 bits) * the register index (5 bits) */ + sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2); + sbmac_mii_senddata(sbm_mdio, MII_COMMAND_READ, 2); + sbmac_mii_senddata(sbm_mdio, phyaddr, 5); + sbmac_mii_senddata(sbm_mdio, regidx, 5); - sbmac_mii_senddata(s,MII_COMMAND_START, 2); - sbmac_mii_senddata(s,MII_COMMAND_READ, 2); - sbmac_mii_senddata(s,phyaddr, 5); - sbmac_mii_senddata(s,regidx, 5); - - mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC; + mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; /* * Switch the port around without a clock transition. */ - __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio); + __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); /* * Send out a clock pulse to signal we want the status */ - - __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio); - __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio); + __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, + sbm_mdio); + __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); /* * If an error occurred, the PHY will signal '1' back */ - error = __raw_readq(s->sbm_mdio) & M_MAC_MDIO_IN; + error = __raw_readq(sbm_mdio) & M_MAC_MDIO_IN; /* * Issue an 'idle' clock pulse, but keep the direction * the same. */ - __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio); - __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio); + __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, + sbm_mdio); + __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); regval = 0; @@ -616,55 +514,60 @@ static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx) regval <<= 1; if (error == 0) { - if (__raw_readq(s->sbm_mdio) & M_MAC_MDIO_IN) + if (__raw_readq(sbm_mdio) & M_MAC_MDIO_IN) regval |= 1; } - __raw_writeq(M_MAC_MDIO_DIR_INPUT|M_MAC_MDC | mac_mdio_genc, s->sbm_mdio); - __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio); + __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, + sbm_mdio); + __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); } /* Switch back to output */ - __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, s->sbm_mdio); + __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio); if (error == 0) return regval; - return 0; + return 0xffff; } /********************************************************************** - * SBMAC_MII_WRITE(s,phyaddr,regidx,regval) + * SBMAC_MII_WRITE(bus, phyaddr, regidx, regval) * * Write a value to a PHY register. * * Input parameters: - * s - sbmac structure + * bus - MDIO bus handle * phyaddr - PHY to use - * regidx - register within the PHY - * regval - data to write to register + * regidx - register within the PHY + * regval - data to write to register * * Return value: - * nothing + * 0 for success ********************************************************************* */ -static void sbmac_mii_write(struct sbmac_softc *s,int phyaddr,int regidx, - unsigned int regval) +static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx, + u16 regval) { + struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv; + void __iomem *sbm_mdio = sc->sbm_mdio; int mac_mdio_genc; - sbmac_mii_sync(s); + sbmac_mii_sync(sbm_mdio); + + sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2); + sbmac_mii_senddata(sbm_mdio, MII_COMMAND_WRITE, 2); + sbmac_mii_senddata(sbm_mdio, phyaddr, 5); + sbmac_mii_senddata(sbm_mdio, regidx, 5); + sbmac_mii_senddata(sbm_mdio, MII_COMMAND_ACK, 2); + sbmac_mii_senddata(sbm_mdio, regval, 16); - sbmac_mii_senddata(s,MII_COMMAND_START,2); - sbmac_mii_senddata(s,MII_COMMAND_WRITE,2); - sbmac_mii_senddata(s,phyaddr, 5); - sbmac_mii_senddata(s,regidx, 5); - sbmac_mii_senddata(s,MII_COMMAND_ACK,2); - sbmac_mii_senddata(s,regval,16); + mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; - mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC; + __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio); - __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, s->sbm_mdio); + return 0; } @@ -677,8 +580,8 @@ static void sbmac_mii_write(struct sbmac_softc *s,int phyaddr,int regidx, * way. * * Input parameters: - * d - sbmacdma_t structure (DMA channel context) - * s - sbmac_softc structure (pointer to a MAC) + * d - struct sbmacdma (DMA channel context) + * s - struct sbmac_softc (pointer to a MAC) * chan - channel number (0..1 right now) * txrx - Identifies DMA_TX or DMA_RX for channel direction * maxdescr - number of descriptors @@ -687,11 +590,8 @@ static void sbmac_mii_write(struct sbmac_softc *s,int phyaddr,int regidx, * nothing ********************************************************************* */ -static void sbdma_initctx(sbmacdma_t *d, - struct sbmac_softc *s, - int chan, - int txrx, - int maxdescr) +static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, + int txrx, int maxdescr) { #ifdef CONFIG_SBMAC_COALESCE int int_pktcnt, int_timeout; @@ -710,27 +610,27 @@ static void sbdma_initctx(sbmacdma_t *d, s->sbe_idx =(s->sbm_base - A_MAC_BASE_0)/MAC_SPACING; #endif - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BYTES))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_COLLISIONS))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_LATE_COL))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_EX_COL))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_FCS_ERROR))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_ABORT))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BAD))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_GOOD))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_RUNT))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_OVERSIZE))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BYTES))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_MCAST))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BCAST))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BAD))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_GOOD))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_RUNT))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_OVERSIZE))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_FCS_ERROR))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_LENGTH_ERROR))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_CODE_ERROR))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_ALIGN_ERROR))); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BYTES); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_COLLISIONS); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_LATE_COL); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_EX_COL); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_FCS_ERROR); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_ABORT); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BAD); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_GOOD); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_RUNT); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_OVERSIZE); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BYTES); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_MCAST); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BCAST); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BAD); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_GOOD); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_RUNT); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_OVERSIZE); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_FCS_ERROR); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_LENGTH_ERROR); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_CODE_ERROR); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_ALIGN_ERROR); /* * initialize register pointers @@ -758,18 +658,17 @@ static void sbdma_initctx(sbmacdma_t *d, d->sbdma_maxdescr = maxdescr; - d->sbdma_dscrtable_unaligned = - d->sbdma_dscrtable = (sbdmadscr_t *) - kmalloc((d->sbdma_maxdescr+1)*sizeof(sbdmadscr_t), GFP_KERNEL); + d->sbdma_dscrtable_unaligned = kcalloc(d->sbdma_maxdescr + 1, + sizeof(*d->sbdma_dscrtable), + GFP_KERNEL); /* * The descriptor table must be aligned to at least 16 bytes or the * MAC will corrupt it. */ - d->sbdma_dscrtable = (sbdmadscr_t *) - ALIGN((unsigned long)d->sbdma_dscrtable, sizeof(sbdmadscr_t)); - - memset(d->sbdma_dscrtable,0,d->sbdma_maxdescr*sizeof(sbdmadscr_t)); + d->sbdma_dscrtable = (struct sbdmadscr *) + ALIGN((unsigned long)d->sbdma_dscrtable_unaligned, + sizeof(*d->sbdma_dscrtable)); d->sbdma_dscrtable_end = d->sbdma_dscrtable + d->sbdma_maxdescr; @@ -779,10 +678,8 @@ static void sbdma_initctx(sbmacdma_t *d, * And context table */ - d->sbdma_ctxtable = (struct sk_buff **) - kmalloc(d->sbdma_maxdescr*sizeof(struct sk_buff *), GFP_KERNEL); - - memset(d->sbdma_ctxtable,0,d->sbdma_maxdescr*sizeof(struct sk_buff *)); + d->sbdma_ctxtable = kcalloc(d->sbdma_maxdescr, + sizeof(*d->sbdma_ctxtable), GFP_KERNEL); #ifdef CONFIG_SBMAC_COALESCE /* @@ -819,7 +716,7 @@ static void sbdma_initctx(sbmacdma_t *d, * nothing ********************************************************************* */ -static void sbdma_channel_start(sbmacdma_t *d, int rxtx ) +static void sbdma_channel_start(struct sbmacdma *d, int rxtx) { /* * Turn on the DMA channel @@ -860,7 +757,7 @@ static void sbdma_channel_start(sbmacdma_t *d, int rxtx ) * nothing ********************************************************************* */ -static void sbdma_channel_stop(sbmacdma_t *d) +static void sbdma_channel_stop(struct sbmacdma *d) { /* * Turn off the DMA channel @@ -909,10 +806,10 @@ static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset) ********************************************************************* */ -static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb) +static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb) { - sbdmadscr_t *dsc; - sbdmadscr_t *nextdsc; + struct sbdmadscr *dsc; + struct sbdmadscr *nextdsc; struct sk_buff *sb_new = NULL; int pktsize = ENET_PACKET_SIZE; @@ -953,7 +850,7 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb) if (sb == NULL) { sb_new = dev_alloc_skb(ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN); if (sb_new == NULL) { - printk(KERN_INFO "%s: sk_buff allocation failed\n", + pr_info("%s: sk_buff allocation failed\n", d->sbdma_eth->sbm_dev->name); return -ENOBUFS; } @@ -1024,10 +921,10 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb) ********************************************************************* */ -static int sbdma_add_txbuffer(sbmacdma_t *d,struct sk_buff *sb) +static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *sb) { - sbdmadscr_t *dsc; - sbdmadscr_t *nextdsc; + struct sbdmadscr *dsc; + struct sbdmadscr *nextdsc; uint64_t phys; uint64_t ncb; int length; @@ -1113,7 +1010,7 @@ static int sbdma_add_txbuffer(sbmacdma_t *d,struct sk_buff *sb) * nothing ********************************************************************* */ -static void sbdma_emptyring(sbmacdma_t *d) +static void sbdma_emptyring(struct sbmacdma *d) { int idx; struct sk_buff *sb; @@ -1141,7 +1038,7 @@ static void sbdma_emptyring(sbmacdma_t *d) * nothing ********************************************************************* */ -static void sbdma_fillring(sbmacdma_t *d) +static void sbdma_fillring(struct sbmacdma *d) { int idx; @@ -1188,12 +1085,13 @@ static void sbmac_netpoll(struct net_device *netdev) * nothing ********************************************************************* */ -static int sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d, - int work_to_do, int poll) +static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, + int work_to_do, int poll) { + struct net_device *dev = sc->sbm_dev; int curidx; int hwidx; - sbdmadscr_t *dsc; + struct sbdmadscr *dsc; struct sk_buff *sb; int len; int work_done = 0; @@ -1203,7 +1101,7 @@ static int sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d, again: /* Check if the HW dropped any frames */ - sc->sbm_stats.rx_fifo_errors + dev->stats.rx_fifo_errors += __raw_readq(sc->sbm_rxdma.sbdma_oodpktlost) & 0xffff; __raw_writeq(0, sc->sbm_rxdma.sbdma_oodpktlost); @@ -1225,8 +1123,9 @@ again: prefetch(dsc); prefetch(&d->sbdma_ctxtable[curidx]); - hwidx = (int) (((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - - d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t)); + hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - + d->sbdma_dscrtable_phys) / + sizeof(*d->sbdma_dscrtable); /* * If they're the same, that means we've processed all @@ -1262,7 +1161,7 @@ again: if (unlikely (sbdma_add_rcvbuffer(d,NULL) == -ENOBUFS)) { - sc->sbm_stats.rx_dropped++; + dev->stats.rx_dropped++; sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */ /* No point in continuing at the moment */ printk(KERN_ERR "dropped packet (1)\n"); @@ -1298,13 +1197,13 @@ again: dropped = netif_rx(sb); if (dropped == NET_RX_DROP) { - sc->sbm_stats.rx_dropped++; + dev->stats.rx_dropped++; d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); goto done; } else { - sc->sbm_stats.rx_bytes += len; - sc->sbm_stats.rx_packets++; + dev->stats.rx_bytes += len; + dev->stats.rx_packets++; } } } else { @@ -1312,7 +1211,7 @@ again: * Packet was mangled somehow. Just drop it and * put it back on the receive ring. */ - sc->sbm_stats.rx_errors++; + dev->stats.rx_errors++; sbdma_add_rcvbuffer(d,sb); } @@ -1350,11 +1249,13 @@ done: * nothing ********************************************************************* */ -static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d, int poll) +static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, + int poll) { + struct net_device *dev = sc->sbm_dev; int curidx; int hwidx; - sbdmadscr_t *dsc; + struct sbdmadscr *dsc; struct sk_buff *sb; unsigned long flags; int packets_handled = 0; @@ -1364,8 +1265,8 @@ static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d, int poll) if (d->sbdma_remptr == d->sbdma_addptr) goto end_unlock; - hwidx = (int) (((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - - d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t)); + hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - + d->sbdma_dscrtable_phys) / sizeof(*d->sbdma_dscrtable); for (;;) { /* @@ -1402,8 +1303,8 @@ static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d, int poll) * Stats */ - sc->sbm_stats.tx_bytes += sb->len; - sc->sbm_stats.tx_packets++; + dev->stats.tx_bytes += sb->len; + dev->stats.tx_packets++; /* * for transmits, we just free buffers. @@ -1468,14 +1369,6 @@ static int sbmac_initctx(struct sbmac_softc *s) s->sbm_imr = s->sbm_base + R_MAC_INT_MASK; s->sbm_mdio = s->sbm_base + R_MAC_MDIO; - s->sbm_phys[0] = 1; - s->sbm_phys[1] = 0; - - s->sbm_phy_oldbmsr = 0; - s->sbm_phy_oldanlpar = 0; - s->sbm_phy_oldk1stsr = 0; - s->sbm_phy_oldlinkstat = 0; - /* * Initialize the DMA channels. Right now, only one per MAC is used * Note: Only do this _once_, as it allocates memory from the kernel! @@ -1490,19 +1383,11 @@ static int sbmac_initctx(struct sbmac_softc *s) s->sbm_state = sbmac_state_off; - /* - * Initial speed is (XXX TEMP) 10MBit/s HDX no FC - */ - - s->sbm_speed = sbmac_speed_10; - s->sbm_duplex = sbmac_duplex_half; - s->sbm_fc = sbmac_fc_disabled; - return 0; } -static void sbdma_uninitctx(struct sbmacdma_s *d) +static void sbdma_uninitctx(struct sbmacdma *d) { if (d->sbdma_dscrtable_unaligned) { kfree(d->sbdma_dscrtable_unaligned); @@ -1538,7 +1423,7 @@ static void sbmac_uninitctx(struct sbmac_softc *sc) static void sbmac_channel_start(struct sbmac_softc *s) { uint64_t reg; - volatile void __iomem *port; + void __iomem *port; uint64_t cfg,fifo,framecfg; int idx, th_value; @@ -1801,10 +1686,10 @@ static void sbmac_channel_stop(struct sbmac_softc *s) * Return value: * old state ********************************************************************* */ -static sbmac_state_t sbmac_set_channel_state(struct sbmac_softc *sc, - sbmac_state_t state) +static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *sc, + enum sbmac_state state) { - sbmac_state_t oldstate = sc->sbm_state; + enum sbmac_state oldstate = sc->sbm_state; /* * If same as previous state, return @@ -1939,14 +1824,14 @@ static uint64_t sbmac_addr2reg(unsigned char *ptr) * * Input parameters: * s - sbmac structure - * speed - speed to set MAC to (see sbmac_speed_t enum) + * speed - speed to set MAC to (see enum sbmac_speed) * * Return value: * 1 if successful * 0 indicates invalid parameters ********************************************************************* */ -static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed) +static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed) { uint64_t cfg; uint64_t framecfg; @@ -2004,8 +1889,6 @@ static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed) cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN; break; - case sbmac_speed_auto: /* XXX not implemented */ - /* fall through */ default: return 0; } @@ -2028,15 +1911,16 @@ static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed) * * Input parameters: * s - sbmac structure - * duplex - duplex setting (see sbmac_duplex_t) - * fc - flow control setting (see sbmac_fc_t) + * duplex - duplex setting (see enum sbmac_duplex) + * fc - flow control setting (see enum sbmac_fc) * * Return value: * 1 if ok * 0 if an invalid parameter combination was specified ********************************************************************* */ -static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc_t fc) +static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex, + enum sbmac_fc fc) { uint64_t cfg; @@ -2078,8 +1962,6 @@ static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR; break; - case sbmac_fc_auto: /* XXX not implemented */ - /* fall through */ case sbmac_fc_frame: /* not valid in half duplex */ default: /* invalid selection */ return 0; @@ -2098,15 +1980,12 @@ static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc case sbmac_fc_collision: /* not valid in full duplex */ case sbmac_fc_carrier: /* not valid in full duplex */ - case sbmac_fc_auto: /* XXX not implemented */ - /* fall through */ default: return 0; } break; - case sbmac_duplex_auto: - /* XXX not implemented */ - break; + default: + return 0; } /* @@ -2154,20 +2033,13 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance) * Transmits on channel 0 */ - if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) { + if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) sbdma_tx_process(sc,&(sc->sbm_txdma), 0); -#ifdef CONFIG_NETPOLL_TRAP - if (netpoll_trap()) { - if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state)) - __netif_schedule(dev); - } -#endif - } if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) { - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &sc->napi)) { __raw_writeq(0, sc->sbm_imr); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &sc->napi); /* Depend on the exit from poll to reenable intr */ } else { @@ -2236,7 +2108,7 @@ static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev) static void sbmac_setmulti(struct sbmac_softc *sc) { uint64_t reg; - volatile void __iomem *port; + void __iomem *port; int idx; struct dev_mc_list *mclist; struct net_device *dev = sc->sbm_dev; @@ -2392,7 +2264,7 @@ static int sb1250_change_mtu(struct net_device *_dev, int new_mtu) if (new_mtu > ENET_PACKET_SIZE) return -EINVAL; _dev->mtu = new_mtu; - printk(KERN_INFO "changing the mtu to %d\n", new_mtu); + pr_info("changing the mtu to %d\n", new_mtu); return 0; } @@ -2408,19 +2280,17 @@ static int sb1250_change_mtu(struct net_device *_dev, int new_mtu) * status ********************************************************************* */ -static int sbmac_init(struct net_device *dev, int idx) +static int sbmac_init(struct platform_device *pldev, long long base) { - struct sbmac_softc *sc; + struct net_device *dev = pldev->dev.driver_data; + int idx = pldev->id; + struct sbmac_softc *sc = netdev_priv(dev); unsigned char *eaddr; uint64_t ea_reg; int i; int err; + DECLARE_MAC_BUF(mac); - sc = netdev_priv(dev); - - /* Determine controller base address */ - - sc->sbm_base = IOADDR(dev->base_addr); sc->sbm_dev = dev; sc->sbe_idx = idx; @@ -2465,58 +2335,67 @@ static int sbmac_init(struct net_device *dev, int idx) dev->open = sbmac_open; dev->hard_start_xmit = sbmac_start_tx; dev->stop = sbmac_close; - dev->get_stats = sbmac_get_stats; dev->set_multicast_list = sbmac_set_rx_mode; dev->do_ioctl = sbmac_mii_ioctl; dev->tx_timeout = sbmac_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; - dev->poll = sbmac_poll; - dev->weight = 16; + + netif_napi_add(dev, &sc->napi, sbmac_poll, 16); dev->change_mtu = sb1250_change_mtu; #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = sbmac_netpoll; #endif + dev->irq = UNIT_INT(idx); + /* This is needed for PASS2 for Rx H/W checksum feature */ sbmac_set_iphdr_offset(sc); err = register_netdev(dev); - if (err) - goto out_uninit; - - if (sc->rx_hw_checksum == ENABLE) { - printk(KERN_INFO "%s: enabling TCP rcv checksum\n", - sc->sbm_dev->name); + if (err) { + printk(KERN_ERR "%s.%d: unable to register netdev\n", + sbmac_string, idx); + sbmac_uninitctx(sc); + return err; } + pr_info("%s.%d: registered as %s\n", sbmac_string, idx, dev->name); + + if (sc->rx_hw_checksum == ENABLE) + pr_info("%s: enabling TCP rcv checksum\n", dev->name); + /* * Display Ethernet address (this is called during the config * process so we need to finish off the config message that * was being displayed) */ - printk(KERN_INFO - "%s: SiByte Ethernet at 0x%08lX, address: %02X:%02X:%02X:%02X:%02X:%02X\n", - dev->name, dev->base_addr, - eaddr[0],eaddr[1],eaddr[2],eaddr[3],eaddr[4],eaddr[5]); - + pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %s\n", + dev->name, base, print_mac(mac, eaddr)); - return 0; + sc->mii_bus.name = sbmac_mdio_string; + sc->mii_bus.id = idx; + sc->mii_bus.priv = sc; + sc->mii_bus.read = sbmac_mii_read; + sc->mii_bus.write = sbmac_mii_write; + sc->mii_bus.irq = sc->phy_irq; + for (i = 0; i < PHY_MAX_ADDR; ++i) + sc->mii_bus.irq[i] = SBMAC_PHY_INT; -out_uninit: - sbmac_uninitctx(sc); + sc->mii_bus.dev = &pldev->dev; + dev_set_drvdata(&pldev->dev, &sc->mii_bus); - return err; + return 0; } static int sbmac_open(struct net_device *dev) { struct sbmac_softc *sc = netdev_priv(dev); + int err; - if (debug > 1) { - printk(KERN_DEBUG "%s: sbmac_open() irq %d.\n", dev->name, dev->irq); - } + if (debug > 1) + pr_debug("%s: sbmac_open() irq %d.\n", dev->name, dev->irq); /* * map/route interrupt (clear status first, in case something @@ -2525,23 +2404,35 @@ static int sbmac_open(struct net_device *dev) */ __raw_readq(sc->sbm_isr); - if (request_irq(dev->irq, &sbmac_intr, IRQF_SHARED, dev->name, dev)) - return -EBUSY; + err = request_irq(dev->irq, &sbmac_intr, IRQF_SHARED, dev->name, dev); + if (err) { + printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name, + dev->irq); + goto out_err; + } /* - * Probe phy address + * Probe PHY address */ - - if(sbmac_mii_probe(dev) == -1) { - printk("%s: failed to probe PHY.\n", dev->name); - return -EINVAL; + err = mdiobus_register(&sc->mii_bus); + if (err) { + printk(KERN_ERR "%s: unable to register MDIO bus\n", + dev->name); + goto out_unirq; } + sc->sbm_speed = sbmac_speed_none; + sc->sbm_duplex = sbmac_duplex_none; + sc->sbm_fc = sbmac_fc_none; + sc->sbm_pause = -1; + sc->sbm_link = 0; + /* - * Configure default speed + * Attach to the PHY */ - - sbmac_mii_poll(sc,noisy_mii); + err = sbmac_mii_probe(dev); + if (err) + goto out_unregister; /* * Turn on the channel @@ -2549,200 +2440,133 @@ static int sbmac_open(struct net_device *dev) sbmac_set_channel_state(sc,sbmac_state_on); - /* - * XXX Station address is in dev->dev_addr - */ - - if (dev->if_port == 0) - dev->if_port = 0; - netif_start_queue(dev); sbmac_set_rx_mode(dev); - /* Set the timer to check for link beat. */ - init_timer(&sc->sbm_timer); - sc->sbm_timer.expires = jiffies + 2 * HZ/100; - sc->sbm_timer.data = (unsigned long)dev; - sc->sbm_timer.function = &sbmac_timer; - add_timer(&sc->sbm_timer); + phy_start(sc->phy_dev); + + napi_enable(&sc->napi); return 0; + +out_unregister: + mdiobus_unregister(&sc->mii_bus); + +out_unirq: + free_irq(dev->irq, dev); + +out_err: + return err; } static int sbmac_mii_probe(struct net_device *dev) { + struct sbmac_softc *sc = netdev_priv(dev); + struct phy_device *phy_dev; int i; - struct sbmac_softc *s = netdev_priv(dev); - u16 bmsr, id1, id2; - u32 vendor, device; - - for (i=1; i<31; i++) { - bmsr = sbmac_mii_read(s, i, MII_BMSR); - if (bmsr != 0) { - s->sbm_phys[0] = i; - id1 = sbmac_mii_read(s, i, MII_PHYIDR1); - id2 = sbmac_mii_read(s, i, MII_PHYIDR2); - vendor = ((u32)id1 << 6) | ((id2 >> 10) & 0x3f); - device = (id2 >> 4) & 0x3f; - - printk(KERN_INFO "%s: found phy %d, vendor %06x part %02x\n", - dev->name, i, vendor, device); - return i; - } - } - return -1; -} - - -static int sbmac_mii_poll(struct sbmac_softc *s,int noisy) -{ - int bmsr,bmcr,k1stsr,anlpar; - int chg; - char buffer[100]; - char *p = buffer; - /* Read the mode status and mode control registers. */ - bmsr = sbmac_mii_read(s,s->sbm_phys[0],MII_BMSR); - bmcr = sbmac_mii_read(s,s->sbm_phys[0],MII_BMCR); - - /* get the link partner status */ - anlpar = sbmac_mii_read(s,s->sbm_phys[0],MII_ANLPAR); - - /* if supported, read the 1000baseT register */ - if (bmsr & BMSR_1000BT_XSR) { - k1stsr = sbmac_mii_read(s,s->sbm_phys[0],MII_K1STSR); - } - else { - k1stsr = 0; + for (i = 0; i < PHY_MAX_ADDR; i++) { + phy_dev = sc->mii_bus.phy_map[i]; + if (phy_dev) + break; } - - chg = 0; - - if ((bmsr & BMSR_LINKSTAT) == 0) { - /* - * If link status is down, clear out old info so that when - * it comes back up it will force us to reconfigure speed - */ - s->sbm_phy_oldbmsr = 0; - s->sbm_phy_oldanlpar = 0; - s->sbm_phy_oldk1stsr = 0; - return 0; + if (!phy_dev) { + printk(KERN_ERR "%s: no PHY found\n", dev->name); + return -ENXIO; } - if ((s->sbm_phy_oldbmsr != bmsr) || - (s->sbm_phy_oldanlpar != anlpar) || - (s->sbm_phy_oldk1stsr != k1stsr)) { - if (debug > 1) { - printk(KERN_DEBUG "%s: bmsr:%x/%x anlpar:%x/%x k1stsr:%x/%x\n", - s->sbm_dev->name, - s->sbm_phy_oldbmsr,bmsr, - s->sbm_phy_oldanlpar,anlpar, - s->sbm_phy_oldk1stsr,k1stsr); - } - s->sbm_phy_oldbmsr = bmsr; - s->sbm_phy_oldanlpar = anlpar; - s->sbm_phy_oldk1stsr = k1stsr; - chg = 1; + phy_dev = phy_connect(dev, phy_dev->dev.bus_id, &sbmac_mii_poll, 0, + PHY_INTERFACE_MODE_GMII); + if (IS_ERR(phy_dev)) { + printk(KERN_ERR "%s: could not attach to PHY\n", dev->name); + return PTR_ERR(phy_dev); } - if (chg == 0) - return 0; + /* Remove any features not supported by the controller */ + phy_dev->supported &= SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_MII | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause; + phy_dev->advertising = phy_dev->supported; - p += sprintf(p,"Link speed: "); - - if (k1stsr & K1STSR_LP1KFD) { - s->sbm_speed = sbmac_speed_1000; - s->sbm_duplex = sbmac_duplex_full; - s->sbm_fc = sbmac_fc_frame; - p += sprintf(p,"1000BaseT FDX"); - } - else if (k1stsr & K1STSR_LP1KHD) { - s->sbm_speed = sbmac_speed_1000; - s->sbm_duplex = sbmac_duplex_half; - s->sbm_fc = sbmac_fc_disabled; - p += sprintf(p,"1000BaseT HDX"); - } - else if (anlpar & ANLPAR_TXFD) { - s->sbm_speed = sbmac_speed_100; - s->sbm_duplex = sbmac_duplex_full; - s->sbm_fc = (anlpar & ANLPAR_PAUSE) ? sbmac_fc_frame : sbmac_fc_disabled; - p += sprintf(p,"100BaseT FDX"); - } - else if (anlpar & ANLPAR_TXHD) { - s->sbm_speed = sbmac_speed_100; - s->sbm_duplex = sbmac_duplex_half; - s->sbm_fc = sbmac_fc_disabled; - p += sprintf(p,"100BaseT HDX"); - } - else if (anlpar & ANLPAR_10FD) { - s->sbm_speed = sbmac_speed_10; - s->sbm_duplex = sbmac_duplex_full; - s->sbm_fc = sbmac_fc_frame; - p += sprintf(p,"10BaseT FDX"); - } - else if (anlpar & ANLPAR_10HD) { - s->sbm_speed = sbmac_speed_10; - s->sbm_duplex = sbmac_duplex_half; - s->sbm_fc = sbmac_fc_collision; - p += sprintf(p,"10BaseT HDX"); - } - else { - p += sprintf(p,"Unknown"); - } + pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", + dev->name, phy_dev->drv->name, + phy_dev->dev.bus_id, phy_dev->irq); - if (noisy) { - printk(KERN_INFO "%s: %s\n",s->sbm_dev->name,buffer); - } + sc->phy_dev = phy_dev; - return 1; + return 0; } -static void sbmac_timer(unsigned long data) +static void sbmac_mii_poll(struct net_device *dev) { - struct net_device *dev = (struct net_device *)data; struct sbmac_softc *sc = netdev_priv(dev); - int next_tick = HZ; - int mii_status; + struct phy_device *phy_dev = sc->phy_dev; + unsigned long flags; + enum sbmac_fc fc; + int link_chg, speed_chg, duplex_chg, pause_chg, fc_chg; + + link_chg = (sc->sbm_link != phy_dev->link); + speed_chg = (sc->sbm_speed != phy_dev->speed); + duplex_chg = (sc->sbm_duplex != phy_dev->duplex); + pause_chg = (sc->sbm_pause != phy_dev->pause); + + if (!link_chg && !speed_chg && !duplex_chg && !pause_chg) + return; /* Hmmm... */ + + if (!phy_dev->link) { + if (link_chg) { + sc->sbm_link = phy_dev->link; + sc->sbm_speed = sbmac_speed_none; + sc->sbm_duplex = sbmac_duplex_none; + sc->sbm_fc = sbmac_fc_disabled; + sc->sbm_pause = -1; + pr_info("%s: link unavailable\n", dev->name); + } + return; + } - spin_lock_irq (&sc->sbm_lock); + if (phy_dev->duplex == DUPLEX_FULL) { + if (phy_dev->pause) + fc = sbmac_fc_frame; + else + fc = sbmac_fc_disabled; + } else + fc = sbmac_fc_collision; + fc_chg = (sc->sbm_fc != fc); - /* make IFF_RUNNING follow the MII status bit "Link established" */ - mii_status = sbmac_mii_read(sc, sc->sbm_phys[0], MII_BMSR); + pr_info("%s: link available: %dbase-%cD\n", dev->name, phy_dev->speed, + phy_dev->duplex == DUPLEX_FULL ? 'F' : 'H'); - if ( (mii_status & BMSR_LINKSTAT) != (sc->sbm_phy_oldlinkstat) ) { - sc->sbm_phy_oldlinkstat = mii_status & BMSR_LINKSTAT; - if (mii_status & BMSR_LINKSTAT) { - netif_carrier_on(dev); - } - else { - netif_carrier_off(dev); - } - } + spin_lock_irqsave(&sc->sbm_lock, flags); - /* - * Poll the PHY to see what speed we should be running at - */ + sc->sbm_speed = phy_dev->speed; + sc->sbm_duplex = phy_dev->duplex; + sc->sbm_fc = fc; + sc->sbm_pause = phy_dev->pause; + sc->sbm_link = phy_dev->link; - if (sbmac_mii_poll(sc,noisy_mii)) { - if (sc->sbm_state != sbmac_state_off) { - /* - * something changed, restart the channel - */ - if (debug > 1) { - printk("%s: restarting channel because speed changed\n", - sc->sbm_dev->name); - } - sbmac_channel_stop(sc); - sbmac_channel_start(sc); - } + if ((speed_chg || duplex_chg || fc_chg) && + sc->sbm_state != sbmac_state_off) { + /* + * something changed, restart the channel + */ + if (debug > 1) + pr_debug("%s: restarting channel " + "because PHY state changed\n", dev->name); + sbmac_channel_stop(sc); + sbmac_channel_start(sc); } - spin_unlock_irq (&sc->sbm_lock); - - sc->sbm_timer.expires = jiffies + next_tick; - add_timer(&sc->sbm_timer); + spin_unlock_irqrestore(&sc->sbm_lock, flags); } @@ -2754,7 +2578,7 @@ static void sbmac_tx_timeout (struct net_device *dev) dev->trans_start = jiffies; - sc->sbm_stats.tx_errors++; + dev->stats.tx_errors++; spin_unlock_irq (&sc->sbm_lock); @@ -2764,22 +2588,6 @@ static void sbmac_tx_timeout (struct net_device *dev) -static struct net_device_stats *sbmac_get_stats(struct net_device *dev) -{ - struct sbmac_softc *sc = netdev_priv(dev); - unsigned long flags; - - spin_lock_irqsave(&sc->sbm_lock, flags); - - /* XXX update other stats here */ - - spin_unlock_irqrestore(&sc->sbm_lock, flags); - - return &sc->sbm_stats; -} - - - static void sbmac_set_rx_mode(struct net_device *dev) { unsigned long flags; @@ -2811,62 +2619,34 @@ static void sbmac_set_rx_mode(struct net_device *dev) static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct sbmac_softc *sc = netdev_priv(dev); - u16 *data = (u16 *)&rq->ifr_ifru; - unsigned long flags; - int retval; - spin_lock_irqsave(&sc->sbm_lock, flags); - retval = 0; - - switch(cmd) { - case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */ - data[0] = sc->sbm_phys[0] & 0x1f; - /* Fall Through */ - case SIOCDEVPRIVATE+1: /* Read the specified MII register. */ - data[3] = sbmac_mii_read(sc, data[0] & 0x1f, data[1] & 0x1f); - break; - case SIOCDEVPRIVATE+2: /* Write the specified MII register */ - if (!capable(CAP_NET_ADMIN)) { - retval = -EPERM; - break; - } - if (debug > 1) { - printk(KERN_DEBUG "%s: sbmac_mii_ioctl: write %02X %02X %02X\n",dev->name, - data[0],data[1],data[2]); - } - sbmac_mii_write(sc, data[0] & 0x1f, data[1] & 0x1f, data[2]); - break; - default: - retval = -EOPNOTSUPP; - } + if (!netif_running(dev) || !sc->phy_dev) + return -EINVAL; - spin_unlock_irqrestore(&sc->sbm_lock, flags); - return retval; + return phy_mii_ioctl(sc->phy_dev, if_mii(rq), cmd); } static int sbmac_close(struct net_device *dev) { struct sbmac_softc *sc = netdev_priv(dev); - unsigned long flags; - int irq; - sbmac_set_channel_state(sc,sbmac_state_off); + napi_disable(&sc->napi); - del_timer_sync(&sc->sbm_timer); + phy_stop(sc->phy_dev); - spin_lock_irqsave(&sc->sbm_lock, flags); + sbmac_set_channel_state(sc, sbmac_state_off); netif_stop_queue(dev); - if (debug > 1) { - printk(KERN_DEBUG "%s: Shutting down ethercard\n",dev->name); - } + if (debug > 1) + pr_debug("%s: Shutting down ethercard\n", dev->name); - spin_unlock_irqrestore(&sc->sbm_lock, flags); + phy_disconnect(sc->phy_dev); + sc->phy_dev = NULL; + + mdiobus_unregister(&sc->mii_bus); - irq = dev->irq; - synchronize_irq(irq); - free_irq(irq, dev); + free_irq(dev->irq, dev); sbdma_emptyring(&(sc->sbm_txdma)); sbdma_emptyring(&(sc->sbm_rxdma)); @@ -2874,26 +2654,17 @@ static int sbmac_close(struct net_device *dev) return 0; } -static int sbmac_poll(struct net_device *dev, int *budget) +static int sbmac_poll(struct napi_struct *napi, int budget) { - int work_to_do; + struct sbmac_softc *sc = container_of(napi, struct sbmac_softc, napi); + struct net_device *dev = sc->sbm_dev; int work_done; - struct sbmac_softc *sc = netdev_priv(dev); - - work_to_do = min(*budget, dev->quota); - work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), work_to_do, 1); - - if (work_done > work_to_do) - printk(KERN_ERR "%s exceeded work_to_do budget=%d quota=%d work-done=%d\n", - sc->sbm_dev->name, *budget, dev->quota, work_done); + work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), budget, 1); sbdma_tx_process(sc, &(sc->sbm_txdma), 1); - *budget -= work_done; - dev->quota -= work_done; - - if (work_done < work_to_do) { - netif_rx_complete(dev); + if (work_done < budget) { + netif_rx_complete(dev, napi); #ifdef CONFIG_SBMAC_COALESCE __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | @@ -2905,57 +2676,198 @@ static int sbmac_poll(struct net_device *dev, int *budget) #endif } - return (work_done >= work_to_do); + return work_done; +} + + +static int __init sbmac_probe(struct platform_device *pldev) +{ + struct net_device *dev; + struct sbmac_softc *sc; + void __iomem *sbm_base; + struct resource *res; + u64 sbmac_orig_hwaddr; + int err; + + res = platform_get_resource(pldev, IORESOURCE_MEM, 0); + BUG_ON(!res); + sbm_base = ioremap_nocache(res->start, res->end - res->start + 1); + if (!sbm_base) { + printk(KERN_ERR "%s: unable to map device registers\n", + pldev->dev.bus_id); + err = -ENOMEM; + goto out_out; + } + + /* + * The R_MAC_ETHERNET_ADDR register will be set to some nonzero + * value for us by the firmware if we're going to use this MAC. + * If we find a zero, skip this MAC. + */ + sbmac_orig_hwaddr = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR); + pr_debug("%s: %sconfiguring MAC at 0x%08Lx\n", pldev->dev.bus_id, + sbmac_orig_hwaddr ? "" : "not ", (long long)res->start); + if (sbmac_orig_hwaddr == 0) { + err = 0; + goto out_unmap; + } + + /* + * Okay, cool. Initialize this MAC. + */ + dev = alloc_etherdev(sizeof(struct sbmac_softc)); + if (!dev) { + printk(KERN_ERR "%s: unable to allocate etherdev\n", + pldev->dev.bus_id); + err = -ENOMEM; + goto out_unmap; + } + + pldev->dev.driver_data = dev; + SET_NETDEV_DEV(dev, &pldev->dev); + + sc = netdev_priv(dev); + sc->sbm_base = sbm_base; + + err = sbmac_init(pldev, res->start); + if (err) + goto out_kfree; + + return 0; + +out_kfree: + free_netdev(dev); + __raw_writeq(sbmac_orig_hwaddr, sbm_base + R_MAC_ETHERNET_ADDR); + +out_unmap: + iounmap(sbm_base); + +out_out: + return err; +} + +static int __exit sbmac_remove(struct platform_device *pldev) +{ + struct net_device *dev = pldev->dev.driver_data; + struct sbmac_softc *sc = netdev_priv(dev); + + unregister_netdev(dev); + sbmac_uninitctx(sc); + iounmap(sc->sbm_base); + free_netdev(dev); + + return 0; } + +static struct platform_device **sbmac_pldev; +static int sbmac_max_units; + #if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR) -static void -sbmac_setup_hwaddr(int chan,char *addr) +static void __init sbmac_setup_hwaddr(int idx, char *addr) { + void __iomem *sbm_base; + unsigned long start, end; uint8_t eaddr[6]; uint64_t val; - unsigned long port; - port = A_MAC_CHANNEL_BASE(chan); - sbmac_parse_hwaddr(addr,eaddr); + if (idx >= sbmac_max_units) + return; + + start = A_MAC_CHANNEL_BASE(idx); + end = A_MAC_CHANNEL_BASE(idx + 1) - 1; + + sbm_base = ioremap_nocache(start, end - start + 1); + if (!sbm_base) { + printk(KERN_ERR "%s: unable to map device registers\n", + sbmac_string); + return; + } + + sbmac_parse_hwaddr(addr, eaddr); val = sbmac_addr2reg(eaddr); - __raw_writeq(val, IOADDR(port+R_MAC_ETHERNET_ADDR)); - val = __raw_readq(IOADDR(port+R_MAC_ETHERNET_ADDR)); + __raw_writeq(val, sbm_base + R_MAC_ETHERNET_ADDR); + val = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR); + + iounmap(sbm_base); } #endif -static struct net_device *dev_sbmac[MAX_UNITS]; +static int __init sbmac_platform_probe_one(int idx) +{ + struct platform_device *pldev; + struct { + struct resource r; + char name[strlen(sbmac_pretty) + 4]; + } *res; + int err; + + res = kzalloc(sizeof(*res), GFP_KERNEL); + if (!res) { + printk(KERN_ERR "%s.%d: unable to allocate memory\n", + sbmac_string, idx); + err = -ENOMEM; + goto out_err; + } + + /* + * This is the base address of the MAC. + */ + snprintf(res->name, sizeof(res->name), "%s %d", sbmac_pretty, idx); + res->r.name = res->name; + res->r.flags = IORESOURCE_MEM; + res->r.start = A_MAC_CHANNEL_BASE(idx); + res->r.end = A_MAC_CHANNEL_BASE(idx + 1) - 1; + + pldev = platform_device_register_simple(sbmac_string, idx, &res->r, 1); + if (IS_ERR(pldev)) { + printk(KERN_ERR "%s.%d: unable to register platform device\n", + sbmac_string, idx); + err = PTR_ERR(pldev); + goto out_kfree; + } + + if (!pldev->dev.driver) { + err = 0; /* No hardware at this address. */ + goto out_unregister; + } + + sbmac_pldev[idx] = pldev; + return 0; + +out_unregister: + platform_device_unregister(pldev); + +out_kfree: + kfree(res); + +out_err: + return err; +} -static int __init -sbmac_init_module(void) +static void __init sbmac_platform_probe(void) { - int idx; - struct net_device *dev; - unsigned long port; - int chip_max_units; + int i; /* Set the number of available units based on the SOC type. */ switch (soc_type) { case K_SYS_SOC_TYPE_BCM1250: case K_SYS_SOC_TYPE_BCM1250_ALT: - chip_max_units = 3; + sbmac_max_units = 3; break; case K_SYS_SOC_TYPE_BCM1120: case K_SYS_SOC_TYPE_BCM1125: case K_SYS_SOC_TYPE_BCM1125H: - case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */ - chip_max_units = 2; + case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */ + sbmac_max_units = 2; break; case K_SYS_SOC_TYPE_BCM1x55: case K_SYS_SOC_TYPE_BCM1x80: - chip_max_units = 4; + sbmac_max_units = 4; break; default: - chip_max_units = 0; - break; + return; /* none */ } - if (chip_max_units > MAX_UNITS) - chip_max_units = MAX_UNITS; /* * For bringup when not using the firmware, we can pre-fill @@ -2963,89 +2875,71 @@ sbmac_init_module(void) * specified in this file (or maybe from the config file?) */ #ifdef SBMAC_ETH0_HWADDR - if (chip_max_units > 0) - sbmac_setup_hwaddr(0,SBMAC_ETH0_HWADDR); + sbmac_setup_hwaddr(0, SBMAC_ETH0_HWADDR); #endif #ifdef SBMAC_ETH1_HWADDR - if (chip_max_units > 1) - sbmac_setup_hwaddr(1,SBMAC_ETH1_HWADDR); + sbmac_setup_hwaddr(1, SBMAC_ETH1_HWADDR); #endif #ifdef SBMAC_ETH2_HWADDR - if (chip_max_units > 2) - sbmac_setup_hwaddr(2,SBMAC_ETH2_HWADDR); + sbmac_setup_hwaddr(2, SBMAC_ETH2_HWADDR); #endif #ifdef SBMAC_ETH3_HWADDR - if (chip_max_units > 3) - sbmac_setup_hwaddr(3,SBMAC_ETH3_HWADDR); + sbmac_setup_hwaddr(3, SBMAC_ETH3_HWADDR); #endif + sbmac_pldev = kcalloc(sbmac_max_units, sizeof(*sbmac_pldev), + GFP_KERNEL); + if (!sbmac_pldev) { + printk(KERN_ERR "%s: unable to allocate memory\n", + sbmac_string); + return; + } + /* * Walk through the Ethernet controllers and find * those who have their MAC addresses set. */ - for (idx = 0; idx < chip_max_units; idx++) { + for (i = 0; i < sbmac_max_units; i++) + if (sbmac_platform_probe_one(i)) + break; +} - /* - * This is the base address of the MAC. - */ - port = A_MAC_CHANNEL_BASE(idx); +static void __exit sbmac_platform_cleanup(void) +{ + int i; - /* - * The R_MAC_ETHERNET_ADDR register will be set to some nonzero - * value for us by the firmware if we are going to use this MAC. - * If we find a zero, skip this MAC. - */ + for (i = 0; i < sbmac_max_units; i++) + platform_device_unregister(sbmac_pldev[i]); + kfree(sbmac_pldev); +} - sbmac_orig_hwaddr[idx] = __raw_readq(IOADDR(port+R_MAC_ETHERNET_ADDR)); - if (sbmac_orig_hwaddr[idx] == 0) { - printk(KERN_DEBUG "sbmac: not configuring MAC at " - "%lx\n", port); - continue; - } - /* - * Okay, cool. Initialize this MAC. - */ +static struct platform_driver sbmac_driver = { + .probe = sbmac_probe, + .remove = __exit_p(sbmac_remove), + .driver = { + .name = sbmac_string, + }, +}; - dev = alloc_etherdev(sizeof(struct sbmac_softc)); - if (!dev) - return -ENOMEM; +static int __init sbmac_init_module(void) +{ + int err; - printk(KERN_DEBUG "sbmac: configuring MAC at %lx\n", port); + err = platform_driver_register(&sbmac_driver); + if (err) + return err; - dev->irq = UNIT_INT(idx); - dev->base_addr = port; - dev->mem_end = 0; - if (sbmac_init(dev, idx)) { - port = A_MAC_CHANNEL_BASE(idx); - __raw_writeq(sbmac_orig_hwaddr[idx], IOADDR(port+R_MAC_ETHERNET_ADDR)); - free_netdev(dev); - continue; - } - dev_sbmac[idx] = dev; - } - return 0; -} + sbmac_platform_probe(); + return err; +} -static void __exit -sbmac_cleanup_module(void) +static void __exit sbmac_cleanup_module(void) { - struct net_device *dev; - int idx; - - for (idx = 0; idx < MAX_UNITS; idx++) { - struct sbmac_softc *sc; - dev = dev_sbmac[idx]; - if (!dev) - continue; - - sc = netdev_priv(dev); - unregister_netdev(dev); - sbmac_uninitctx(sc); - free_netdev(dev); - } + sbmac_platform_cleanup(); + platform_driver_unregister(&sbmac_driver); } module_init(sbmac_init_module); diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c index 5b7284c955dc..37b42394560d 100644 --- a/drivers/net/sc92031.c +++ b/drivers/net/sc92031.c @@ -1372,9 +1372,14 @@ static void sc92031_ethtool_get_strings(struct net_device *dev, SILAN_STATS_NUM * ETH_GSTRING_LEN); } -static int sc92031_ethtool_get_stats_count(struct net_device *dev) +static int sc92031_ethtool_get_sset_count(struct net_device *dev, int sset) { - return SILAN_STATS_NUM; + switch (sset) { + case ETH_SS_STATS: + return SILAN_STATS_NUM; + default: + return -EOPNOTSUPP; + } } static void sc92031_ethtool_get_ethtool_stats(struct net_device *dev, @@ -1396,14 +1401,9 @@ static struct ethtool_ops sc92031_ethtool_ops = { .set_wol = sc92031_ethtool_set_wol, .nway_reset = sc92031_ethtool_nway_reset, .get_link = ethtool_op_get_link, - .get_tx_csum = ethtool_op_get_tx_csum, - .get_sg = ethtool_op_get_sg, - .get_tso = ethtool_op_get_tso, .get_strings = sc92031_ethtool_get_strings, - .get_stats_count = sc92031_ethtool_get_stats_count, + .get_sset_count = sc92031_ethtool_get_sset_count, .get_ethtool_stats = sc92031_ethtool_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, - .get_ufo = ethtool_op_get_ufo, }; static int __devinit sc92031_probe(struct pci_dev *pdev, diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c index 4bce7c4f373c..48c64fb20eec 100644 --- a/drivers/net/seeq8005.c +++ b/drivers/net/seeq8005.c @@ -67,7 +67,6 @@ static unsigned int net_debug = NET_DEBUG; /* Information that need to be kept for each board. */ struct net_local { - struct net_device_stats stats; unsigned short receive_ptr; /* What address in packet memory do we expect a recv_pkt_header? */ long open_time; /* Useless example local info. */ }; @@ -86,7 +85,6 @@ static int seeq8005_send_packet(struct sk_buff *skb, struct net_device *dev); static irqreturn_t seeq8005_interrupt(int irq, void *dev_id); static void seeq8005_rx(struct net_device *dev); static int seeq8005_close(struct net_device *dev); -static struct net_device_stats *seeq8005_get_stats(struct net_device *dev); static void set_multicast_list(struct net_device *dev); /* Example routines you must write ;->. */ @@ -160,6 +158,7 @@ static int __init seeq8005_probe1(struct net_device *dev, int ioaddr) int old_dmaar; int old_rear; int retval; + DECLARE_MAC_BUF(mac); if (!request_region(ioaddr, SEEQ8005_IO_EXTENT, "seeq8005")) return -ENODEV; @@ -303,7 +302,8 @@ static int __init seeq8005_probe1(struct net_device *dev, int ioaddr) /* Retrieve and print the ethernet address. */ for (i = 0; i < 6; i++) - printk(" %2.2x", dev->dev_addr[i] = SA_prom[i+6]); + dev->dev_addr[i] = SA_prom[i+6]; + printk("%s", print_mac(mac, dev->dev_addr)); if (dev->irq == 0xff) ; /* Do nothing: a user-level program will set it. */ @@ -338,7 +338,6 @@ static int __init seeq8005_probe1(struct net_device *dev, int ioaddr) dev->hard_start_xmit = seeq8005_send_packet; dev->tx_timeout = seeq8005_timeout; dev->watchdog_timeo = HZ/20; - dev->get_stats = seeq8005_get_stats; dev->set_multicast_list = set_multicast_list; dev->flags &= ~IFF_MULTICAST; @@ -391,7 +390,6 @@ static void seeq8005_timeout(struct net_device *dev) static int seeq8005_send_packet(struct sk_buff *skb, struct net_device *dev) { - struct net_local *lp = netdev_priv(dev); short length = skb->len; unsigned char *buf; @@ -407,7 +405,7 @@ static int seeq8005_send_packet(struct sk_buff *skb, struct net_device *dev) hardware_send_packet(dev, buf, length); dev->trans_start = jiffies; - lp->stats.tx_bytes += length; + dev->stats.tx_bytes += length; dev_kfree_skb (skb); /* You might need to clean up and record Tx statistics here. */ @@ -463,7 +461,7 @@ static irqreturn_t seeq8005_interrupt(int irq, void *dev_id) if (status & SEEQSTAT_TX_INT) { handled = 1; outw( SEEQCMD_TX_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD); - lp->stats.tx_packets++; + dev->stats.tx_packets++; netif_wake_queue(dev); /* Inform upper layers. */ } if (status & SEEQSTAT_RX_INT) { @@ -531,11 +529,11 @@ static void seeq8005_rx(struct net_device *dev) } if (pkt_hdr & SEEQPKTS_ANY_ERROR) { /* There was an error. */ - lp->stats.rx_errors++; - if (pkt_hdr & SEEQPKTS_SHORT) lp->stats.rx_frame_errors++; - if (pkt_hdr & SEEQPKTS_DRIB) lp->stats.rx_frame_errors++; - if (pkt_hdr & SEEQPKTS_OVERSIZE) lp->stats.rx_over_errors++; - if (pkt_hdr & SEEQPKTS_CRC_ERR) lp->stats.rx_crc_errors++; + dev->stats.rx_errors++; + if (pkt_hdr & SEEQPKTS_SHORT) dev->stats.rx_frame_errors++; + if (pkt_hdr & SEEQPKTS_DRIB) dev->stats.rx_frame_errors++; + if (pkt_hdr & SEEQPKTS_OVERSIZE) dev->stats.rx_over_errors++; + if (pkt_hdr & SEEQPKTS_CRC_ERR) dev->stats.rx_crc_errors++; /* skip over this packet */ outw( SEEQCMD_FIFO_WRITE | SEEQCMD_DMA_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD); outw( (lp->receive_ptr & 0xff00)>>8, SEEQ_REA); @@ -547,7 +545,7 @@ static void seeq8005_rx(struct net_device *dev) skb = dev_alloc_skb(pkt_len); if (skb == NULL) { printk("%s: Memory squeeze, dropping packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; break; } skb_reserve(skb, 2); /* align data on 16 byte */ @@ -567,8 +565,8 @@ static void seeq8005_rx(struct net_device *dev) skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } } while ((--boguscount) && (pkt_hdr & SEEQPKTH_CHAIN)); @@ -599,15 +597,6 @@ static int seeq8005_close(struct net_device *dev) } -/* Get the current statistics. This may be called with the card open or - closed. */ -static struct net_device_stats *seeq8005_get_stats(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - - return &lp->stats; -} - /* Set or clear the multicast filter for this adaptor. num_addrs == -1 Promiscuous mode, receive all packets num_addrs == 0 Normal mode, clear multicast list diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c index 384b4685e977..ff4056310356 100644 --- a/drivers/net/sgiseeq.c +++ b/drivers/net/sgiseeq.c @@ -75,6 +75,7 @@ struct sgiseeq_init_block { /* Note the name ;-) */ struct sgiseeq_private { struct sgiseeq_init_block *srings; + dma_addr_t srings_dma; /* Ptrs to the descriptors in uncached space. */ struct sgiseeq_rx_desc *rx_desc; @@ -92,8 +93,6 @@ struct sgiseeq_private { unsigned char control; unsigned char mode; - struct net_device_stats stats; - spinlock_t tx_lock; }; @@ -266,18 +265,17 @@ static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp, return 0; } -static inline void record_rx_errors(struct sgiseeq_private *sp, - unsigned char status) +static void record_rx_errors(struct net_device *dev, unsigned char status) { if (status & SEEQ_RSTAT_OVERF || status & SEEQ_RSTAT_SFRAME) - sp->stats.rx_over_errors++; + dev->stats.rx_over_errors++; if (status & SEEQ_RSTAT_CERROR) - sp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; if (status & SEEQ_RSTAT_DERROR) - sp->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (status & SEEQ_RSTAT_REOF) - sp->stats.rx_errors++; + dev->stats.rx_errors++; } static inline void rx_maybe_restart(struct sgiseeq_private *sp, @@ -327,8 +325,8 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp if (memcmp(eth_hdr(skb)->h_source, dev->dev_addr, ETH_ALEN)) { netif_rx(skb); dev->last_rx = jiffies; - sp->stats.rx_packets++; - sp->stats.rx_bytes += len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; } else { /* Silently drop my own packets */ dev_kfree_skb_irq(skb); @@ -336,10 +334,10 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp } else { printk (KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", dev->name); - sp->stats.rx_dropped++; + dev->stats.rx_dropped++; } } else { - record_rx_errors(sp, pkt_status); + record_rx_errors(dev, pkt_status); } /* Return the entry to the ring pool. */ @@ -391,11 +389,11 @@ static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp if (!(status & (HPC3_ETXCTRL_ACTIVE | SEEQ_TSTAT_PTRANS))) { /* Oops, HPC detected some sort of error. */ if (status & SEEQ_TSTAT_R16) - sp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; if (status & SEEQ_TSTAT_UFLOW) - sp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; if (status & SEEQ_TSTAT_LCLS) - sp->stats.collisions++; + dev->stats.collisions++; } /* Ack 'em... */ @@ -411,7 +409,7 @@ static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp } break; } - sp->stats.tx_packets++; + dev->stats.tx_packets++; sp->tx_old = NEXT_TX(sp->tx_old); td->tdma.cntinfo &= ~(HPCDMA_XIU | HPCDMA_XIE); td->tdma.cntinfo |= HPCDMA_EOX; @@ -515,7 +513,7 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Setup... */ skblen = skb->len; len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen; - sp->stats.tx_bytes += len; + dev->stats.tx_bytes += len; entry = sp->tx_new; td = &sp->tx_desc[entry]; @@ -568,13 +566,6 @@ static void timeout(struct net_device *dev) netif_wake_queue(dev); } -static struct net_device_stats *sgiseeq_get_stats(struct net_device *dev) -{ - struct sgiseeq_private *sp = netdev_priv(dev); - - return &sp->stats; -} - static void sgiseeq_set_multicast(struct net_device *dev) { struct sgiseeq_private *sp = (struct sgiseeq_private *) dev->priv; @@ -631,6 +622,7 @@ static int __init sgiseeq_probe(struct platform_device *pdev) struct sgiseeq_private *sp; struct net_device *dev; int err, i; + DECLARE_MAC_BUF(mac); dev = alloc_etherdev(sizeof (struct sgiseeq_private)); if (!dev) { @@ -643,13 +635,20 @@ static int __init sgiseeq_probe(struct platform_device *pdev) sp = netdev_priv(dev); /* Make private data page aligned */ - sr = (struct sgiseeq_init_block *) get_zeroed_page(GFP_KERNEL); + sr = dma_alloc_coherent(&pdev->dev, sizeof(*sp->srings), + &sp->srings_dma, GFP_KERNEL); if (!sr) { printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n"); err = -ENOMEM; goto err_out_free_dev; } sp->srings = sr; + sp->rx_desc = sp->srings->rxvector; + sp->tx_desc = sp->srings->txvector; + + /* A couple calculations now, saves many cycles later. */ + setup_rx_ring(sp->rx_desc, SEEQ_RX_BUFFERS); + setup_tx_ring(sp->tx_desc, SEEQ_TX_BUFFERS); memcpy(dev->dev_addr, pd->mac, ETH_ALEN); @@ -662,19 +661,6 @@ static int __init sgiseeq_probe(struct platform_device *pdev) sp->name = sgiseeqstr; sp->mode = SEEQ_RCMD_RBCAST; - sp->rx_desc = (struct sgiseeq_rx_desc *) - CKSEG1ADDR(ALIGNED(&sp->srings->rxvector[0])); - dma_cache_wback_inv((unsigned long)&sp->srings->rxvector, - sizeof(sp->srings->rxvector)); - sp->tx_desc = (struct sgiseeq_tx_desc *) - CKSEG1ADDR(ALIGNED(&sp->srings->txvector[0])); - dma_cache_wback_inv((unsigned long)&sp->srings->txvector, - sizeof(sp->srings->txvector)); - - /* A couple calculations now, saves many cycles later. */ - setup_rx_ring(sp->rx_desc, SEEQ_RX_BUFFERS); - setup_tx_ring(sp->tx_desc, SEEQ_TX_BUFFERS); - /* Setup PIO and DMA transfer timing */ sp->hregs->pconfig = 0x161; sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP | @@ -699,7 +685,6 @@ static int __init sgiseeq_probe(struct platform_device *pdev) dev->hard_start_xmit = sgiseeq_start_xmit; dev->tx_timeout = timeout; dev->watchdog_timeo = (200 * HZ) / 1000; - dev->get_stats = sgiseeq_get_stats; dev->set_multicast_list = sgiseeq_set_multicast; dev->set_mac_address = sgiseeq_set_mac_address; dev->irq = irq; @@ -711,9 +696,8 @@ static int __init sgiseeq_probe(struct platform_device *pdev) goto err_out_free_page; } - printk(KERN_INFO "%s: %s ", dev->name, sgiseeqstr); - for (i = 0; i < 6; i++) - printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':'); + printk(KERN_INFO "%s: %s %s\n", + dev->name, sgiseeqstr, print_mac(mac, dev->dev_addr)); return 0; @@ -726,15 +710,18 @@ err_out: return err; } -static void __exit sgiseeq_remove(struct platform_device *pdev) +static int __exit sgiseeq_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct sgiseeq_private *sp = netdev_priv(dev); unregister_netdev(dev); - free_page((unsigned long) sp->srings); + dma_free_coherent(&pdev->dev, sizeof(*sp->srings), sp->srings, + sp->srings_dma); free_netdev(dev); platform_set_drvdata(pdev, NULL); + + return 0; } static struct platform_driver sgiseeq_driver = { diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c index e886e8d7cfdf..228f650250f6 100644 --- a/drivers/net/shaper.c +++ b/drivers/net/shaper.c @@ -86,6 +86,7 @@ #include <net/dst.h> #include <net/arp.h> +#include <net/net_namespace.h> struct shaper_cb { unsigned long shapeclock; /* Time it should go out */ @@ -170,7 +171,7 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev) */ if(time_after(SHAPERCB(skb)->shapeclock,jiffies + SHAPER_LATENCY)) { dev_kfree_skb(skb); - shaper->stats.tx_dropped++; + dev->stats.tx_dropped++; } else skb_queue_tail(&shaper->sendq, skb); } @@ -181,7 +182,7 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev) { ptr=skb_dequeue(&shaper->sendq); dev_kfree_skb(ptr); - shaper->stats.collisions++; + dev->stats.collisions++; } shaper_kick(shaper); spin_unlock(&shaper->lock); @@ -206,8 +207,8 @@ static void shaper_queue_xmit(struct shaper *shaper, struct sk_buff *skb) shaper->dev->name,newskb->priority); dev_queue_xmit(newskb); - shaper->stats.tx_bytes += skb->len; - shaper->stats.tx_packets++; + shaper->dev->stats.tx_bytes += skb->len; + shaper->dev->stats.tx_packets++; if(sh_debug) printk("Kicked new frame out.\n"); @@ -329,22 +330,17 @@ static int shaper_close(struct net_device *dev) * ARP and other resolutions and not before. */ -static struct net_device_stats *shaper_get_stats(struct net_device *dev) -{ - struct shaper *sh=dev->priv; - return &sh->stats; -} - static int shaper_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, void *saddr, unsigned len) + unsigned short type, + const void *daddr, const void *saddr, unsigned len) { struct shaper *sh=dev->priv; int v; if(sh_debug) printk("Shaper header\n"); - skb->dev=sh->dev; - v=sh->hard_header(skb,sh->dev,type,daddr,saddr,len); - skb->dev=dev; + skb->dev = sh->dev; + v = dev_hard_header(skb, sh->dev, type, daddr, saddr, len); + skb->dev = dev; return v; } @@ -356,7 +352,7 @@ static int shaper_rebuild_header(struct sk_buff *skb) if(sh_debug) printk("Shaper rebuild header\n"); skb->dev=sh->dev; - v=sh->rebuild_header(skb); + v = sh->dev->header_ops->rebuild(skb); skb->dev=dev; return v; } @@ -420,51 +416,17 @@ static int shaper_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p) #endif +static const struct header_ops shaper_ops = { + .create = shaper_header, + .rebuild = shaper_rebuild_header, +}; + static int shaper_attach(struct net_device *shdev, struct shaper *sh, struct net_device *dev) { sh->dev = dev; - sh->hard_start_xmit=dev->hard_start_xmit; sh->get_stats=dev->get_stats; - if(dev->hard_header) - { - sh->hard_header=dev->hard_header; - shdev->hard_header = shaper_header; - } - else - shdev->hard_header = NULL; - - if(dev->rebuild_header) - { - sh->rebuild_header = dev->rebuild_header; - shdev->rebuild_header = shaper_rebuild_header; - } - else - shdev->rebuild_header = NULL; -#if 0 - if(dev->hard_header_cache) - { - sh->hard_header_cache = dev->hard_header_cache; - shdev->hard_header_cache= shaper_cache; - } - else - { - shdev->hard_header_cache= NULL; - } - - if(dev->header_cache_update) - { - sh->header_cache_update = dev->header_cache_update; - shdev->header_cache_update = shaper_cache_update; - } - else - shdev->header_cache_update= NULL; -#else - shdev->header_cache_update = NULL; - shdev->hard_header_cache = NULL; -#endif shdev->neigh_setup = shaper_neigh_setup_dev; - shdev->hard_header_len=dev->hard_header_len; shdev->type=dev->type; shdev->addr_len=dev->addr_len; @@ -488,7 +450,7 @@ static int shaper_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { case SHAPER_SET_DEV: { - struct net_device *them=__dev_get_by_name(ss->ss_name); + struct net_device *them=__dev_get_by_name(&init_net, ss->ss_name); if(them==NULL) return -ENODEV; if(sh->dev) @@ -532,14 +494,11 @@ static void __init shaper_setup(struct net_device *dev) * Set up the shaper. */ - SET_MODULE_OWNER(dev); - shaper_init_priv(dev); dev->open = shaper_open; dev->stop = shaper_close; dev->hard_start_xmit = shaper_start_xmit; - dev->get_stats = shaper_get_stats; dev->set_multicast_list = NULL; /* @@ -550,12 +509,6 @@ static void __init shaper_setup(struct net_device *dev) * Handlers for when we attach to a device. */ - dev->hard_header = shaper_header; - dev->rebuild_header = shaper_rebuild_header; -#if 0 - dev->hard_header_cache = shaper_cache; - dev->header_cache_update= shaper_cache_update; -#endif dev->neigh_setup = shaper_neigh_setup_dev; dev->do_ioctl = shaper_ioctl; dev->hard_header_len = 0; @@ -600,10 +553,9 @@ static int __init shaper_init(void) return -ENODEV; alloc_size = sizeof(*dev) * shapers; - devs = kmalloc(alloc_size, GFP_KERNEL); + devs = kzalloc(alloc_size, GFP_KERNEL); if (!devs) return -ENOMEM; - memset(devs, 0, alloc_size); for (i = 0; i < shapers; i++) { diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c index ec2ad9f0efa2..720088396bb9 100644 --- a/drivers/net/sis190.c +++ b/drivers/net/sis190.c @@ -47,24 +47,13 @@ #define PHY_ID_ANY 0x1f #define MII_REG_ANY 0x1f -#ifdef CONFIG_SIS190_NAPI -#define NAPI_SUFFIX "-NAPI" -#else -#define NAPI_SUFFIX "" -#endif - -#define DRV_VERSION "1.2" NAPI_SUFFIX +#define DRV_VERSION "1.2" #define DRV_NAME "sis190" #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION #define PFX DRV_NAME ": " -#ifdef CONFIG_SIS190_NAPI -#define sis190_rx_skb netif_receive_skb -#define sis190_rx_quota(count, quota) min(count, quota) -#else #define sis190_rx_skb netif_rx #define sis190_rx_quota(count, quota) count -#endif #define MAC_ADDR_LEN 6 @@ -281,7 +270,6 @@ struct sis190_private { void __iomem *mmio_addr; struct pci_dev *pci_dev; struct net_device *dev; - struct net_device_stats stats; spinlock_t lock; u32 rx_buf_sz; u32 cur_rx; @@ -580,7 +568,7 @@ static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats) static int sis190_rx_interrupt(struct net_device *dev, struct sis190_private *tp, void __iomem *ioaddr) { - struct net_device_stats *stats = &tp->stats; + struct net_device_stats *stats = &dev->stats; u32 rx_left, cur_rx = tp->cur_rx; u32 delta, count; @@ -694,8 +682,8 @@ static void sis190_tx_interrupt(struct net_device *dev, skb = tp->Tx_skbuff[entry]; - tp->stats.tx_packets++; - tp->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; sis190_unmap_tx_skb(tp->pci_dev, skb, txd); tp->Tx_skbuff[entry] = NULL; @@ -1091,7 +1079,7 @@ static void sis190_tx_clear(struct sis190_private *tp) tp->Tx_skbuff[i] = NULL; dev_kfree_skb(skb); - tp->stats.tx_dropped++; + tp->dev->stats.tx_dropped++; } tp->cur_tx = tp->dirty_tx = 0; } @@ -1115,10 +1103,8 @@ static void sis190_down(struct net_device *dev) synchronize_irq(dev->irq); - if (!poll_locked) { - netif_poll_disable(dev); + if (!poll_locked) poll_locked++; - } synchronize_sched(); @@ -1137,8 +1123,6 @@ static int sis190_close(struct net_device *dev) free_irq(dev->irq, dev); - netif_poll_enable(dev); - pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma); pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma); @@ -1158,7 +1142,7 @@ static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(skb->len < ETH_ZLEN)) { if (skb_padto(skb, ETH_ZLEN)) { - tp->stats.tx_dropped++; + dev->stats.tx_dropped++; goto out; } len = ETH_ZLEN; @@ -1211,13 +1195,6 @@ out: return NETDEV_TX_OK; } -static struct net_device_stats *sis190_get_stats(struct net_device *dev) -{ - struct sis190_private *tp = netdev_priv(dev); - - return &tp->stats; -} - static void sis190_free_phy(struct list_head *first_phy) { struct sis190_phy *cur, *next; @@ -1436,7 +1413,6 @@ static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev) goto err_out_0; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); tp = netdev_priv(dev); @@ -1593,6 +1569,9 @@ static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev, pci_name(pdev)); isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0965, NULL); + if (!isa_bridge) + isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0966, NULL); + if (!isa_bridge) { net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n", pci_name(pdev)); @@ -1780,6 +1759,7 @@ static int __devinit sis190_init_one(struct pci_dev *pdev, struct net_device *dev; void __iomem *ioaddr; int rc; + DECLARE_MAC_BUF(mac); if (!printed_version) { net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n"); @@ -1808,7 +1788,6 @@ static int __devinit sis190_init_one(struct pci_dev *pdev, dev->open = sis190_open; dev->stop = sis190_close; dev->do_ioctl = sis190_ioctl; - dev->get_stats = sis190_get_stats; dev->tx_timeout = sis190_tx_timeout; dev->watchdog_timeo = SIS190_TX_TIMEOUT; dev->hard_start_xmit = sis190_start_xmit; @@ -1831,12 +1810,9 @@ static int __devinit sis190_init_one(struct pci_dev *pdev, goto err_remove_mii; net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), " - "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", - pci_name(pdev), sis_chip_info[ent->driver_data].name, - ioaddr, dev->irq, - dev->dev_addr[0], dev->dev_addr[1], - dev->dev_addr[2], dev->dev_addr[3], - dev->dev_addr[4], dev->dev_addr[5]); + "%s\n", + pci_name(pdev), sis_chip_info[ent->driver_data].name, + ioaddr, dev->irq, print_mac(mac, dev->dev_addr)); net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name, (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII"); diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index 7c6e4808399a..0857d2c88aa0 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c @@ -158,7 +158,6 @@ typedef struct _BufferDesc { } BufferDesc; struct sis900_private { - struct net_device_stats stats; struct pci_dev * pci_dev; spinlock_t lock; @@ -221,7 +220,6 @@ static void sis900_finish_xmit (struct net_device *net_dev); static irqreturn_t sis900_interrupt(int irq, void *dev_instance); static int sis900_close(struct net_device *net_dev); static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd); -static struct net_device_stats *sis900_get_stats(struct net_device *net_dev); static u16 sis900_mcast_bitnr(u8 *addr, u8 revision); static void set_rx_mode(struct net_device *net_dev); static void sis900_reset(struct net_device *net_dev); @@ -406,6 +404,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev, int i, ret; const char *card_name = card_names[pci_id->driver_data]; const char *dev_name = pci_name(pci_dev); + DECLARE_MAC_BUF(mac); /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -430,7 +429,6 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev, net_dev = alloc_etherdev(sizeof(struct sis900_private)); if (!net_dev) return -ENOMEM; - SET_MODULE_OWNER(net_dev); SET_NETDEV_DEV(net_dev, &pci_dev->dev); /* We do a request_region() to register /proc/ioports info. */ @@ -467,7 +465,6 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev, net_dev->open = &sis900_open; net_dev->hard_start_xmit = &sis900_start_xmit; net_dev->stop = &sis900_close; - net_dev->get_stats = &sis900_get_stats; net_dev->set_config = &sis900_set_config; net_dev->set_multicast_list = &set_rx_mode; net_dev->do_ioctl = &mii_ioctl; @@ -537,11 +534,9 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev, goto err_unmap_rx; /* print some information about our NIC */ - printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ", net_dev->name, - card_name, ioaddr, net_dev->irq); - for (i = 0; i < 5; i++) - printk("%2.2x:", (u8)net_dev->dev_addr[i]); - printk("%2.2x.\n", net_dev->dev_addr[i]); + printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %s\n", + net_dev->name, card_name, ioaddr, net_dev->irq, + print_mac(mac, net_dev->dev_addr)); /* Detect Wake on Lan support */ ret = (inl(net_dev->base_addr + CFGPMC) & PMESP) >> 27; @@ -1543,7 +1538,7 @@ static void sis900_tx_timeout(struct net_device *net_dev) sis_priv->tx_skbuff[i] = NULL; sis_priv->tx_ring[i].cmdsts = 0; sis_priv->tx_ring[i].bufptr = 0; - sis_priv->stats.tx_dropped++; + net_dev->stats.tx_dropped++; } } sis_priv->tx_full = 0; @@ -1740,15 +1735,15 @@ static int sis900_rx(struct net_device *net_dev) printk(KERN_DEBUG "%s: Corrupted packet " "received, buffer status = 0x%8.8x/%d.\n", net_dev->name, rx_status, data_size); - sis_priv->stats.rx_errors++; + net_dev->stats.rx_errors++; if (rx_status & OVERRUN) - sis_priv->stats.rx_over_errors++; + net_dev->stats.rx_over_errors++; if (rx_status & (TOOLONG|RUNT)) - sis_priv->stats.rx_length_errors++; + net_dev->stats.rx_length_errors++; if (rx_status & (RXISERR | FAERR)) - sis_priv->stats.rx_frame_errors++; + net_dev->stats.rx_frame_errors++; if (rx_status & CRCERR) - sis_priv->stats.rx_crc_errors++; + net_dev->stats.rx_crc_errors++; /* reset buffer descriptor state */ sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; } else { @@ -1769,7 +1764,7 @@ static int sis900_rx(struct net_device *net_dev) * in the rx ring */ skb = sis_priv->rx_skbuff[entry]; - sis_priv->stats.rx_dropped++; + net_dev->stats.rx_dropped++; goto refill_rx_ring; } @@ -1794,10 +1789,10 @@ static int sis900_rx(struct net_device *net_dev) /* some network statistics */ if ((rx_status & BCAST) == MCAST) - sis_priv->stats.multicast++; + net_dev->stats.multicast++; net_dev->last_rx = jiffies; - sis_priv->stats.rx_bytes += rx_size; - sis_priv->stats.rx_packets++; + net_dev->stats.rx_bytes += rx_size; + net_dev->stats.rx_packets++; sis_priv->dirty_rx++; refill_rx_ring: sis_priv->rx_skbuff[entry] = skb; @@ -1828,7 +1823,7 @@ refill_rx_ring: printk(KERN_INFO "%s: Memory squeeze," "deferring packet.\n", net_dev->name); - sis_priv->stats.rx_dropped++; + net_dev->stats.rx_dropped++; break; } sis_priv->rx_skbuff[entry] = skb; @@ -1879,20 +1874,20 @@ static void sis900_finish_xmit (struct net_device *net_dev) printk(KERN_DEBUG "%s: Transmit " "error, Tx status %8.8x.\n", net_dev->name, tx_status); - sis_priv->stats.tx_errors++; + net_dev->stats.tx_errors++; if (tx_status & UNDERRUN) - sis_priv->stats.tx_fifo_errors++; + net_dev->stats.tx_fifo_errors++; if (tx_status & ABORT) - sis_priv->stats.tx_aborted_errors++; + net_dev->stats.tx_aborted_errors++; if (tx_status & NOCARRIER) - sis_priv->stats.tx_carrier_errors++; + net_dev->stats.tx_carrier_errors++; if (tx_status & OWCOLL) - sis_priv->stats.tx_window_errors++; + net_dev->stats.tx_window_errors++; } else { /* packet successfully transmitted */ - sis_priv->stats.collisions += (tx_status & COLCNT) >> 16; - sis_priv->stats.tx_bytes += tx_status & DSIZE; - sis_priv->stats.tx_packets++; + net_dev->stats.collisions += (tx_status & COLCNT) >> 16; + net_dev->stats.tx_bytes += tx_status & DSIZE; + net_dev->stats.tx_packets++; } /* Free the original skb. */ skb = sis_priv->tx_skbuff[entry]; @@ -2139,21 +2134,6 @@ static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd) } /** - * sis900_get_stats - Get sis900 read/write statistics - * @net_dev: the net device to get statistics for - * - * get tx/rx statistics for sis900 - */ - -static struct net_device_stats * -sis900_get_stats(struct net_device *net_dev) -{ - struct sis900_private *sis_priv = net_dev->priv; - - return &sis_priv->stats; -} - -/** * sis900_set_config - Set media type by net_device.set_config * @dev: the net device for media type change * @map: ifmap passed by ifconfig diff --git a/drivers/net/sk98lin/Makefile b/drivers/net/sk98lin/Makefile new file mode 100644 index 000000000000..afd900d5d730 --- /dev/null +++ b/drivers/net/sk98lin/Makefile @@ -0,0 +1,87 @@ +# +# Makefile for the SysKonnect SK-98xx device driver. +# + + +# +# Standalone driver params +# SKPARAM += -DSK_KERNEL_24 +# SKPARAM += -DSK_KERNEL_24_26 +# SKPARAM += -DSK_KERNEL_26 +# SKPARAM += -DSK_KERNEL_22_24 + +obj-$(CONFIG_SK98LIN) += sk98lin.o +sk98lin-objs := \ + skge.o \ + skethtool.o \ + skdim.o \ + skaddr.o \ + skgehwt.o \ + skgeinit.o \ + skgepnmi.o \ + skgesirq.o \ + ski2c.o \ + sklm80.o \ + skqueue.o \ + skrlmt.o \ + sktimer.o \ + skvpd.o \ + skxmac2.o + +# DBGDEF = \ +# -DDEBUG + +ifdef DEBUG +DBGDEF += \ +-DSK_DEBUG_CHKMOD=0x00000000L \ +-DSK_DEBUG_CHKCAT=0x00000000L +endif + + +# **** possible debug modules for SK_DEBUG_CHKMOD ***************** +# SK_DBGMOD_MERR 0x00000001L /* general module error indication */ +# SK_DBGMOD_HWM 0x00000002L /* Hardware init module */ +# SK_DBGMOD_RLMT 0x00000004L /* RLMT module */ +# SK_DBGMOD_VPD 0x00000008L /* VPD module */ +# SK_DBGMOD_I2C 0x00000010L /* I2C module */ +# SK_DBGMOD_PNMI 0x00000020L /* PNMI module */ +# SK_DBGMOD_CSUM 0x00000040L /* CSUM module */ +# SK_DBGMOD_ADDR 0x00000080L /* ADDR module */ +# SK_DBGMOD_DRV 0x00010000L /* DRV module */ + +# **** possible debug categories for SK_DEBUG_CHKCAT ************** +# *** common modules *** +# SK_DBGCAT_INIT 0x00000001L module/driver initialization +# SK_DBGCAT_CTRL 0x00000002L controlling: add/rmv MCA/MAC and other controls (IOCTL) +# SK_DBGCAT_ERR 0x00000004L error handling paths +# SK_DBGCAT_TX 0x00000008L transmit path +# SK_DBGCAT_RX 0x00000010L receive path +# SK_DBGCAT_IRQ 0x00000020L general IRQ handling +# SK_DBGCAT_QUEUE 0x00000040L any queue management +# SK_DBGCAT_DUMP 0x00000080L large data output e.g. hex dump +# SK_DBGCAT_FATAL 0x00000100L large data output e.g. hex dump + +# *** driver (file skge.c) *** +# SK_DBGCAT_DRV_ENTRY 0x00010000 entry points +# SK_DBGCAT_DRV_??? 0x00020000 not used +# SK_DBGCAT_DRV_MCA 0x00040000 multicast +# SK_DBGCAT_DRV_TX_PROGRESS 0x00080000 tx path +# SK_DBGCAT_DRV_RX_PROGRESS 0x00100000 rx path +# SK_DBGCAT_DRV_PROGRESS 0x00200000 general runtime +# SK_DBGCAT_DRV_??? 0x00400000 not used +# SK_DBGCAT_DRV_PROM 0x00800000 promiscuous mode +# SK_DBGCAT_DRV_TX_FRAME 0x01000000 display tx frames +# SK_DBGCAT_DRV_ERROR 0x02000000 error conditions +# SK_DBGCAT_DRV_INT_SRC 0x04000000 interrupts sources +# SK_DBGCAT_DRV_EVENT 0x08000000 driver events + +EXTRA_CFLAGS += -Idrivers/net/sk98lin -DSK_DIAG_SUPPORT -DGENESIS -DYUKON $(DBGDEF) $(SKPARAM) + +clean: + rm -f core *.o *.a *.s + + + + + + diff --git a/drivers/net/sk98lin/h/lm80.h b/drivers/net/sk98lin/h/lm80.h new file mode 100644 index 000000000000..4e2dbbf78000 --- /dev/null +++ b/drivers/net/sk98lin/h/lm80.h @@ -0,0 +1,179 @@ +/****************************************************************************** + * + * Name: lm80.h + * Project: Gigabit Ethernet Adapters, Common Modules + * Version: $Revision: 1.6 $ + * Date: $Date: 2003/05/13 17:26:52 $ + * Purpose: Contains all defines for the LM80 Chip + * (National Semiconductor). + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +#ifndef __INC_LM80_H +#define __INC_LM80_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* defines ********************************************************************/ + +/* + * LM80 register definition + * + * All registers are 8 bit wide + */ +#define LM80_CFG 0x00 /* Configuration Register */ +#define LM80_ISRC_1 0x01 /* Interrupt Status Register 1 */ +#define LM80_ISRC_2 0x02 /* Interrupt Status Register 2 */ +#define LM80_IMSK_1 0x03 /* Interrupt Mask Register 1 */ +#define LM80_IMSK_2 0x04 /* Interrupt Mask Register 2 */ +#define LM80_FAN_CTRL 0x05 /* Fan Devisor/RST#/OS# Register */ +#define LM80_TEMP_CTRL 0x06 /* OS# Config, Temp Res. Reg */ + /* 0x07 - 0x1f reserved */ + /* current values */ +#define LM80_VT0_IN 0x20 /* current Voltage 0 value */ +#define LM80_VT1_IN 0x21 /* current Voltage 1 value */ +#define LM80_VT2_IN 0x22 /* current Voltage 2 value */ +#define LM80_VT3_IN 0x23 /* current Voltage 3 value */ +#define LM80_VT4_IN 0x24 /* current Voltage 4 value */ +#define LM80_VT5_IN 0x25 /* current Voltage 5 value */ +#define LM80_VT6_IN 0x26 /* current Voltage 6 value */ +#define LM80_TEMP_IN 0x27 /* current Temperature value */ +#define LM80_FAN1_IN 0x28 /* current Fan 1 count */ +#define LM80_FAN2_IN 0x29 /* current Fan 2 count */ + /* limit values */ +#define LM80_VT0_HIGH_LIM 0x2a /* high limit val for Voltage 0 */ +#define LM80_VT0_LOW_LIM 0x2b /* low limit val for Voltage 0 */ +#define LM80_VT1_HIGH_LIM 0x2c /* high limit val for Voltage 1 */ +#define LM80_VT1_LOW_LIM 0x2d /* low limit val for Voltage 1 */ +#define LM80_VT2_HIGH_LIM 0x2e /* high limit val for Voltage 2 */ +#define LM80_VT2_LOW_LIM 0x2f /* low limit val for Voltage 2 */ +#define LM80_VT3_HIGH_LIM 0x30 /* high limit val for Voltage 3 */ +#define LM80_VT3_LOW_LIM 0x31 /* low limit val for Voltage 3 */ +#define LM80_VT4_HIGH_LIM 0x32 /* high limit val for Voltage 4 */ +#define LM80_VT4_LOW_LIM 0x33 /* low limit val for Voltage 4 */ +#define LM80_VT5_HIGH_LIM 0x34 /* high limit val for Voltage 5 */ +#define LM80_VT5_LOW_LIM 0x35 /* low limit val for Voltage 5 */ +#define LM80_VT6_HIGH_LIM 0x36 /* high limit val for Voltage 6 */ +#define LM80_VT6_LOW_LIM 0x37 /* low limit val for Voltage 6 */ +#define LM80_THOT_LIM_UP 0x38 /* hot temperature limit (high) */ +#define LM80_THOT_LIM_LO 0x39 /* hot temperature limit (low) */ +#define LM80_TOS_LIM_UP 0x3a /* OS temperature limit (high) */ +#define LM80_TOS_LIM_LO 0x3b /* OS temperature limit (low) */ +#define LM80_FAN1_COUNT_LIM 0x3c /* Fan 1 count limit (high) */ +#define LM80_FAN2_COUNT_LIM 0x3d /* Fan 2 count limit (low) */ + /* 0x3e - 0x3f reserved */ + +/* + * LM80 bit definitions + */ + +/* LM80_CFG Configuration Register */ +#define LM80_CFG_START (1<<0) /* start monitoring operation */ +#define LM80_CFG_INT_ENA (1<<1) /* enables the INT# Interrupt output */ +#define LM80_CFG_INT_POL (1<<2) /* INT# pol: 0 act low, 1 act high */ +#define LM80_CFG_INT_CLR (1<<3) /* disables INT#/RST_OUT#/OS# outputs */ +#define LM80_CFG_RESET (1<<4) /* signals a reset */ +#define LM80_CFG_CHASS_CLR (1<<5) /* clears Chassis Intrusion (CI) pin */ +#define LM80_CFG_GPO (1<<6) /* drives the GPO# pin */ +#define LM80_CFG_INIT (1<<7) /* restore power on defaults */ + +/* LM80_ISRC_1 Interrupt Status Register 1 */ +/* LM80_IMSK_1 Interrupt Mask Register 1 */ +#define LM80_IS_VT0 (1<<0) /* limit exceeded for Voltage 0 */ +#define LM80_IS_VT1 (1<<1) /* limit exceeded for Voltage 1 */ +#define LM80_IS_VT2 (1<<2) /* limit exceeded for Voltage 2 */ +#define LM80_IS_VT3 (1<<3) /* limit exceeded for Voltage 3 */ +#define LM80_IS_VT4 (1<<4) /* limit exceeded for Voltage 4 */ +#define LM80_IS_VT5 (1<<5) /* limit exceeded for Voltage 5 */ +#define LM80_IS_VT6 (1<<6) /* limit exceeded for Voltage 6 */ +#define LM80_IS_INT_IN (1<<7) /* state of INT_IN# */ + +/* LM80_ISRC_2 Interrupt Status Register 2 */ +/* LM80_IMSK_2 Interrupt Mask Register 2 */ +#define LM80_IS_TEMP (1<<0) /* HOT temperature limit exceeded */ +#define LM80_IS_BTI (1<<1) /* state of BTI# pin */ +#define LM80_IS_FAN1 (1<<2) /* count limit exceeded for Fan 1 */ +#define LM80_IS_FAN2 (1<<3) /* count limit exceeded for Fan 2 */ +#define LM80_IS_CI (1<<4) /* Chassis Intrusion occured */ +#define LM80_IS_OS (1<<5) /* OS temperature limit exceeded */ + /* bit 6 and 7 are reserved in LM80_ISRC_2 */ +#define LM80_IS_HT_IRQ_MD (1<<6) /* Hot temperature interrupt mode */ +#define LM80_IS_OT_IRQ_MD (1<<7) /* OS temperature interrupt mode */ + +/* LM80_FAN_CTRL Fan Devisor/RST#/OS# Register */ +#define LM80_FAN1_MD_SEL (1<<0) /* Fan 1 mode select */ +#define LM80_FAN2_MD_SEL (1<<1) /* Fan 2 mode select */ +#define LM80_FAN1_PRM_CTL (3<<2) /* Fan 1 speed control */ +#define LM80_FAN2_PRM_CTL (3<<4) /* Fan 2 speed control */ +#define LM80_FAN_OS_ENA (1<<6) /* enable OS mode on RST_OUT#/OS# pins*/ +#define LM80_FAN_RST_ENA (1<<7) /* sets RST_OUT#/OS# pins in RST mode */ + +/* LM80_TEMP_CTRL OS# Config, Temp Res. Reg */ +#define LM80_TEMP_OS_STAT (1<<0) /* mirrors the state of RST_OUT#/OS# */ +#define LM80_TEMP_OS_POL (1<<1) /* select OS# polarity */ +#define LM80_TEMP_OS_MODE (1<<2) /* selects Interrupt mode */ +#define LM80_TEMP_RES (1<<3) /* selects 9 or 11 bit temp resulution*/ +#define LM80_TEMP_LSB (0xf<<4)/* 4 LSBs of 11 bit temp data */ +#define LM80_TEMP_LSB_9 (1<<7) /* LSB of 9 bit temperature data */ + + /* 0x07 - 0x1f reserved */ +/* LM80_VT0_IN current Voltage 0 value */ +/* LM80_VT1_IN current Voltage 1 value */ +/* LM80_VT2_IN current Voltage 2 value */ +/* LM80_VT3_IN current Voltage 3 value */ +/* LM80_VT4_IN current Voltage 4 value */ +/* LM80_VT5_IN current Voltage 5 value */ +/* LM80_VT6_IN current Voltage 6 value */ +/* LM80_TEMP_IN current temperature value */ +/* LM80_FAN1_IN current Fan 1 count */ +/* LM80_FAN2_IN current Fan 2 count */ +/* LM80_VT0_HIGH_LIM high limit val for Voltage 0 */ +/* LM80_VT0_LOW_LIM low limit val for Voltage 0 */ +/* LM80_VT1_HIGH_LIM high limit val for Voltage 1 */ +/* LM80_VT1_LOW_LIM low limit val for Voltage 1 */ +/* LM80_VT2_HIGH_LIM high limit val for Voltage 2 */ +/* LM80_VT2_LOW_LIM low limit val for Voltage 2 */ +/* LM80_VT3_HIGH_LIM high limit val for Voltage 3 */ +/* LM80_VT3_LOW_LIM low limit val for Voltage 3 */ +/* LM80_VT4_HIGH_LIM high limit val for Voltage 4 */ +/* LM80_VT4_LOW_LIM low limit val for Voltage 4 */ +/* LM80_VT5_HIGH_LIM high limit val for Voltage 5 */ +/* LM80_VT5_LOW_LIM low limit val for Voltage 5 */ +/* LM80_VT6_HIGH_LIM high limit val for Voltage 6 */ +/* LM80_VT6_LOW_LIM low limit val for Voltage 6 */ +/* LM80_THOT_LIM_UP hot temperature limit (high) */ +/* LM80_THOT_LIM_LO hot temperature limit (low) */ +/* LM80_TOS_LIM_UP OS temperature limit (high) */ +/* LM80_TOS_LIM_LO OS temperature limit (low) */ +/* LM80_FAN1_COUNT_LIM Fan 1 count limit (high) */ +/* LM80_FAN2_COUNT_LIM Fan 2 count limit (low) */ + /* 0x3e - 0x3f reserved */ + +#define LM80_ADDR 0x28 /* LM80 default addr */ + +/* typedefs *******************************************************************/ + + +/* function prototypes ********************************************************/ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* __INC_LM80_H */ diff --git a/drivers/net/sk98lin/h/skaddr.h b/drivers/net/sk98lin/h/skaddr.h new file mode 100644 index 000000000000..423ad063d09b --- /dev/null +++ b/drivers/net/sk98lin/h/skaddr.h @@ -0,0 +1,285 @@ +/****************************************************************************** + * + * Name: skaddr.h + * Project: Gigabit Ethernet Adapters, ADDR-Modul + * Version: $Revision: 1.29 $ + * Date: $Date: 2003/05/13 16:57:24 $ + * Purpose: Header file for Address Management (MC, UC, Prom). + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +/****************************************************************************** + * + * Description: + * + * This module is intended to manage multicast addresses and promiscuous mode + * on GEnesis adapters. + * + * Include File Hierarchy: + * + * "skdrv1st.h" + * ... + * "sktypes.h" + * "skqueue.h" + * "skaddr.h" + * ... + * "skdrv2nd.h" + * + ******************************************************************************/ + +#ifndef __INC_SKADDR_H +#define __INC_SKADDR_H + +#ifdef __cplusplus +extern "C" { +#endif /* cplusplus */ + +/* defines ********************************************************************/ + +#define SK_MAC_ADDR_LEN 6 /* Length of MAC address. */ +#define SK_MAX_ADDRS 14 /* #Addrs for exact match. */ + +/* ----- Common return values ----- */ + +#define SK_ADDR_SUCCESS 0 /* Function returned successfully. */ +#define SK_ADDR_ILLEGAL_PORT 100 /* Port number too high. */ +#define SK_ADDR_TOO_EARLY 101 /* Function called too early. */ + +/* ----- Clear/Add flag bits ----- */ + +#define SK_ADDR_PERMANENT 1 /* RLMT Address */ + +/* ----- Additional Clear flag bits ----- */ + +#define SK_MC_SW_ONLY 2 /* Do not update HW when clearing. */ + +/* ----- Override flag bits ----- */ + +#define SK_ADDR_LOGICAL_ADDRESS 0 +#define SK_ADDR_VIRTUAL_ADDRESS (SK_ADDR_LOGICAL_ADDRESS) /* old */ +#define SK_ADDR_PHYSICAL_ADDRESS 1 +#define SK_ADDR_CLEAR_LOGICAL 2 +#define SK_ADDR_SET_LOGICAL 4 + +/* ----- Override return values ----- */ + +#define SK_ADDR_OVERRIDE_SUCCESS (SK_ADDR_SUCCESS) +#define SK_ADDR_DUPLICATE_ADDRESS 1 +#define SK_ADDR_MULTICAST_ADDRESS 2 + +/* ----- Partitioning of excact match table ----- */ + +#define SK_ADDR_EXACT_MATCHES 16 /* #Exact match entries. */ + +#define SK_ADDR_FIRST_MATCH_RLMT 1 +#define SK_ADDR_LAST_MATCH_RLMT 2 +#define SK_ADDR_FIRST_MATCH_DRV 3 +#define SK_ADDR_LAST_MATCH_DRV (SK_ADDR_EXACT_MATCHES - 1) + +/* ----- SkAddrMcAdd/SkAddrMcUpdate return values ----- */ + +#define SK_MC_FILTERING_EXACT 0 /* Exact filtering. */ +#define SK_MC_FILTERING_INEXACT 1 /* Inexact filtering. */ + +/* ----- Additional SkAddrMcAdd return values ----- */ + +#define SK_MC_ILLEGAL_ADDRESS 2 /* Illegal address. */ +#define SK_MC_ILLEGAL_PORT 3 /* Illegal port (not the active one). */ +#define SK_MC_RLMT_OVERFLOW 4 /* Too many RLMT mc addresses. */ + +/* Promiscuous mode bits ----- */ + +#define SK_PROM_MODE_NONE 0 /* Normal receive. */ +#define SK_PROM_MODE_LLC 1 /* Receive all LLC frames. */ +#define SK_PROM_MODE_ALL_MC 2 /* Receive all multicast frames. */ +/* #define SK_PROM_MODE_NON_LLC 4 */ /* Receive all non-LLC frames. */ + +/* Macros */ + +#ifdef OLD_STUFF +#ifndef SK_ADDR_EQUAL +/* + * "&" instead of "&&" allows better optimization on IA-64. + * The replacement is safe here, as all bytes exist. + */ +#ifndef SK_ADDR_DWORD_COMPARE +#define SK_ADDR_EQUAL(A1,A2) ( \ + (((SK_U8 *)(A1))[5] == ((SK_U8 *)(A2))[5]) & \ + (((SK_U8 *)(A1))[4] == ((SK_U8 *)(A2))[4]) & \ + (((SK_U8 *)(A1))[3] == ((SK_U8 *)(A2))[3]) & \ + (((SK_U8 *)(A1))[2] == ((SK_U8 *)(A2))[2]) & \ + (((SK_U8 *)(A1))[1] == ((SK_U8 *)(A2))[1]) & \ + (((SK_U8 *)(A1))[0] == ((SK_U8 *)(A2))[0])) +#else /* SK_ADDR_DWORD_COMPARE */ +#define SK_ADDR_EQUAL(A1,A2) ( \ + (*(SK_U32 *)&(((SK_U8 *)(A1))[2]) == *(SK_U32 *)&(((SK_U8 *)(A2))[2])) & \ + (*(SK_U32 *)&(((SK_U8 *)(A1))[0]) == *(SK_U32 *)&(((SK_U8 *)(A2))[0]))) +#endif /* SK_ADDR_DWORD_COMPARE */ +#endif /* SK_ADDR_EQUAL */ +#endif /* 0 */ + +#ifndef SK_ADDR_EQUAL +#ifndef SK_ADDR_DWORD_COMPARE +#define SK_ADDR_EQUAL(A1,A2) ( \ + (((SK_U8 SK_FAR *)(A1))[5] == ((SK_U8 SK_FAR *)(A2))[5]) & \ + (((SK_U8 SK_FAR *)(A1))[4] == ((SK_U8 SK_FAR *)(A2))[4]) & \ + (((SK_U8 SK_FAR *)(A1))[3] == ((SK_U8 SK_FAR *)(A2))[3]) & \ + (((SK_U8 SK_FAR *)(A1))[2] == ((SK_U8 SK_FAR *)(A2))[2]) & \ + (((SK_U8 SK_FAR *)(A1))[1] == ((SK_U8 SK_FAR *)(A2))[1]) & \ + (((SK_U8 SK_FAR *)(A1))[0] == ((SK_U8 SK_FAR *)(A2))[0])) +#else /* SK_ADDR_DWORD_COMPARE */ +#define SK_ADDR_EQUAL(A1,A2) ( \ + (*(SK_U16 SK_FAR *)&(((SK_U8 SK_FAR *)(A1))[4]) == \ + *(SK_U16 SK_FAR *)&(((SK_U8 SK_FAR *)(A2))[4])) && \ + (*(SK_U32 SK_FAR *)&(((SK_U8 SK_FAR *)(A1))[0]) == \ + *(SK_U32 SK_FAR *)&(((SK_U8 SK_FAR *)(A2))[0]))) +#endif /* SK_ADDR_DWORD_COMPARE */ +#endif /* SK_ADDR_EQUAL */ + +/* typedefs *******************************************************************/ + +typedef struct s_MacAddr { + SK_U8 a[SK_MAC_ADDR_LEN]; +} SK_MAC_ADDR; + + +/* SK_FILTER is used to ensure alignment of the filter. */ +typedef union s_InexactFilter { + SK_U8 Bytes[8]; + SK_U64 Val; /* Dummy entry for alignment only. */ +} SK_FILTER64; + + +typedef struct s_AddrNet SK_ADDR_NET; + + +typedef struct s_AddrPort { + +/* ----- Public part (read-only) ----- */ + + SK_MAC_ADDR CurrentMacAddress; /* Current physical MAC Address. */ + SK_MAC_ADDR PermanentMacAddress; /* Permanent physical MAC Address. */ + int PromMode; /* Promiscuous Mode. */ + +/* ----- Private part ----- */ + + SK_MAC_ADDR PreviousMacAddress; /* Prev. phys. MAC Address. */ + SK_BOOL CurrentMacAddressSet; /* CurrentMacAddress is set. */ + SK_U8 Align01; + + SK_U32 FirstExactMatchRlmt; + SK_U32 NextExactMatchRlmt; + SK_U32 FirstExactMatchDrv; + SK_U32 NextExactMatchDrv; + SK_MAC_ADDR Exact[SK_ADDR_EXACT_MATCHES]; + SK_FILTER64 InexactFilter; /* For 64-bit hash register. */ + SK_FILTER64 InexactRlmtFilter; /* For 64-bit hash register. */ + SK_FILTER64 InexactDrvFilter; /* For 64-bit hash register. */ +} SK_ADDR_PORT; + + +struct s_AddrNet { +/* ----- Public part (read-only) ----- */ + + SK_MAC_ADDR CurrentMacAddress; /* Logical MAC Address. */ + SK_MAC_ADDR PermanentMacAddress; /* Logical MAC Address. */ + +/* ----- Private part ----- */ + + SK_U32 ActivePort; /* View of module ADDR. */ + SK_BOOL CurrentMacAddressSet; /* CurrentMacAddress is set. */ + SK_U8 Align01; + SK_U16 Align02; +}; + + +typedef struct s_Addr { + +/* ----- Public part (read-only) ----- */ + + SK_ADDR_NET Net[SK_MAX_NETS]; + SK_ADDR_PORT Port[SK_MAX_MACS]; + +/* ----- Private part ----- */ +} SK_ADDR; + +/* function prototypes ********************************************************/ + +#ifndef SK_KR_PROTO + +/* Functions provided by SkAddr */ + +/* ANSI/C++ compliant function prototypes */ + +extern int SkAddrInit( + SK_AC *pAC, + SK_IOC IoC, + int Level); + +extern int SkAddrMcClear( + SK_AC *pAC, + SK_IOC IoC, + SK_U32 PortNumber, + int Flags); + +extern int SkAddrMcAdd( + SK_AC *pAC, + SK_IOC IoC, + SK_U32 PortNumber, + SK_MAC_ADDR *pMc, + int Flags); + +extern int SkAddrMcUpdate( + SK_AC *pAC, + SK_IOC IoC, + SK_U32 PortNumber); + +extern int SkAddrOverride( + SK_AC *pAC, + SK_IOC IoC, + SK_U32 PortNumber, + SK_MAC_ADDR SK_FAR *pNewAddr, + int Flags); + +extern int SkAddrPromiscuousChange( + SK_AC *pAC, + SK_IOC IoC, + SK_U32 PortNumber, + int NewPromMode); + +#ifndef SK_SLIM +extern int SkAddrSwap( + SK_AC *pAC, + SK_IOC IoC, + SK_U32 FromPortNumber, + SK_U32 ToPortNumber); +#endif + +#else /* defined(SK_KR_PROTO)) */ + +/* Non-ANSI/C++ compliant function prototypes */ + +#error KR-style prototypes are not yet provided. + +#endif /* defined(SK_KR_PROTO)) */ + + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* __INC_SKADDR_H */ diff --git a/drivers/net/sk98lin/h/skcsum.h b/drivers/net/sk98lin/h/skcsum.h new file mode 100644 index 000000000000..6e256bd9a28c --- /dev/null +++ b/drivers/net/sk98lin/h/skcsum.h @@ -0,0 +1,213 @@ +/****************************************************************************** + * + * Name: skcsum.h + * Project: GEnesis - SysKonnect SK-NET Gigabit Ethernet (SK-98xx) + * Version: $Revision: 1.10 $ + * Date: $Date: 2003/08/20 13:59:57 $ + * Purpose: Store/verify Internet checksum in send/receive packets. + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2001 SysKonnect GmbH. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +/****************************************************************************** + * + * Description: + * + * Public header file for the "GEnesis" common module "CSUM". + * + * "GEnesis" is an abbreviation of "Gigabit Ethernet Network System in Silicon" + * and is the code name of this SysKonnect project. + * + * Compilation Options: + * + * SK_USE_CSUM - Define if CSUM is to be used. Otherwise, CSUM will be an + * empty module. + * + * SKCS_OVERWRITE_PROTO - Define to overwrite the default protocol id + * definitions. In this case, all SKCS_PROTO_xxx definitions must be made + * external. + * + * SKCS_OVERWRITE_STATUS - Define to overwrite the default return status + * definitions. In this case, all SKCS_STATUS_xxx definitions must be made + * external. + * + * Include File Hierarchy: + * + * "h/skcsum.h" + * "h/sktypes.h" + * "h/skqueue.h" + * + ******************************************************************************/ + +#ifndef __INC_SKCSUM_H +#define __INC_SKCSUM_H + +#include "h/sktypes.h" +#include "h/skqueue.h" + +/* defines ********************************************************************/ + +/* + * Define the default bit flags for 'SKCS_PACKET_INFO.ProtocolFlags' if no user + * overwrite. + */ +#ifndef SKCS_OVERWRITE_PROTO /* User overwrite? */ +#define SKCS_PROTO_IP 0x1 /* IP (Internet Protocol version 4) */ +#define SKCS_PROTO_TCP 0x2 /* TCP (Transmission Control Protocol) */ +#define SKCS_PROTO_UDP 0x4 /* UDP (User Datagram Protocol) */ + +/* Indices for protocol statistics. */ +#define SKCS_PROTO_STATS_IP 0 +#define SKCS_PROTO_STATS_UDP 1 +#define SKCS_PROTO_STATS_TCP 2 +#define SKCS_NUM_PROTOCOLS 3 /* Number of supported protocols. */ +#endif /* !SKCS_OVERWRITE_PROTO */ + +/* + * Define the default SKCS_STATUS type and values if no user overwrite. + * + * SKCS_STATUS_UNKNOWN_IP_VERSION - Not an IP v4 frame. + * SKCS_STATUS_IP_CSUM_ERROR - IP checksum error. + * SKCS_STATUS_IP_CSUM_ERROR_TCP - IP checksum error in TCP frame. + * SKCS_STATUS_IP_CSUM_ERROR_UDP - IP checksum error in UDP frame + * SKCS_STATUS_IP_FRAGMENT - IP fragment (IP checksum ok). + * SKCS_STATUS_IP_CSUM_OK - IP checksum ok (not a TCP or UDP frame). + * SKCS_STATUS_TCP_CSUM_ERROR - TCP checksum error (IP checksum ok). + * SKCS_STATUS_UDP_CSUM_ERROR - UDP checksum error (IP checksum ok). + * SKCS_STATUS_TCP_CSUM_OK - IP and TCP checksum ok. + * SKCS_STATUS_UDP_CSUM_OK - IP and UDP checksum ok. + * SKCS_STATUS_IP_CSUM_OK_NO_UDP - IP checksum OK and no UDP checksum. + */ +#ifndef SKCS_OVERWRITE_STATUS /* User overwrite? */ +#define SKCS_STATUS int /* Define status type. */ + +#define SKCS_STATUS_UNKNOWN_IP_VERSION 1 +#define SKCS_STATUS_IP_CSUM_ERROR 2 +#define SKCS_STATUS_IP_FRAGMENT 3 +#define SKCS_STATUS_IP_CSUM_OK 4 +#define SKCS_STATUS_TCP_CSUM_ERROR 5 +#define SKCS_STATUS_UDP_CSUM_ERROR 6 +#define SKCS_STATUS_TCP_CSUM_OK 7 +#define SKCS_STATUS_UDP_CSUM_OK 8 +/* needed for Microsoft */ +#define SKCS_STATUS_IP_CSUM_ERROR_UDP 9 +#define SKCS_STATUS_IP_CSUM_ERROR_TCP 10 +/* UDP checksum may be omitted */ +#define SKCS_STATUS_IP_CSUM_OK_NO_UDP 11 +#endif /* !SKCS_OVERWRITE_STATUS */ + +/* Clear protocol statistics event. */ +#define SK_CSUM_EVENT_CLEAR_PROTO_STATS 1 + +/* + * Add two values in one's complement. + * + * Note: One of the two input values may be "longer" than 16-bit, but then the + * resulting sum may be 17 bits long. In this case, add zero to the result using + * SKCS_OC_ADD() again. + * + * Result = Value1 + Value2 + */ +#define SKCS_OC_ADD(Result, Value1, Value2) { \ + unsigned long Sum; \ + \ + Sum = (unsigned long) (Value1) + (unsigned long) (Value2); \ + /* Add-in any carry. */ \ + (Result) = (Sum & 0xffff) + (Sum >> 16); \ +} + +/* + * Subtract two values in one's complement. + * + * Result = Value1 - Value2 + */ +#define SKCS_OC_SUB(Result, Value1, Value2) \ + SKCS_OC_ADD((Result), (Value1), ~(Value2) & 0xffff) + +/* typedefs *******************************************************************/ + +/* + * SKCS_PROTO_STATS - The CSUM protocol statistics structure. + * + * There is one instance of this structure for each protocol supported. + */ +typedef struct s_CsProtocolStatistics { + SK_U64 RxOkCts; /* Receive checksum ok. */ + SK_U64 RxUnableCts; /* Unable to verify receive checksum. */ + SK_U64 RxErrCts; /* Receive checksum error. */ + SK_U64 TxOkCts; /* Transmit checksum ok. */ + SK_U64 TxUnableCts; /* Unable to calculate checksum in hw. */ +} SKCS_PROTO_STATS; + +/* + * s_Csum - The CSUM module context structure. + */ +typedef struct s_Csum { + /* Enabled receive SK_PROTO_XXX bit flags. */ + unsigned ReceiveFlags[SK_MAX_NETS]; +#ifdef TX_CSUM + unsigned TransmitFlags[SK_MAX_NETS]; +#endif /* TX_CSUM */ + + /* The protocol statistics structure; one per supported protocol. */ + SKCS_PROTO_STATS ProtoStats[SK_MAX_NETS][SKCS_NUM_PROTOCOLS]; +} SK_CSUM; + +/* + * SKCS_PACKET_INFO - The packet information structure. + */ +typedef struct s_CsPacketInfo { + /* Bit field specifiying the desired/found protocols. */ + unsigned ProtocolFlags; + + /* Length of complete IP header, including any option fields. */ + unsigned IpHeaderLength; + + /* IP header checksum. */ + unsigned IpHeaderChecksum; + + /* TCP/UDP pseudo header checksum. */ + unsigned PseudoHeaderChecksum; +} SKCS_PACKET_INFO; + +/* function prototypes ********************************************************/ + +#ifndef SK_CS_CALCULATE_CHECKSUM +extern unsigned SkCsCalculateChecksum( + void *pData, + unsigned Length); +#endif /* SK_CS_CALCULATE_CHECKSUM */ + +extern int SkCsEvent( + SK_AC *pAc, + SK_IOC Ioc, + SK_U32 Event, + SK_EVPARA Param); + +extern SKCS_STATUS SkCsGetReceiveInfo( + SK_AC *pAc, + void *pIpHeader, + unsigned Checksum1, + unsigned Checksum2, + int NetNumber); + +extern void SkCsSetReceiveFlags( + SK_AC *pAc, + unsigned ReceiveFlags, + unsigned *pChecksum1Offset, + unsigned *pChecksum2Offset, + int NetNumber); + +#endif /* __INC_SKCSUM_H */ diff --git a/drivers/net/sk98lin/h/skdebug.h b/drivers/net/sk98lin/h/skdebug.h new file mode 100644 index 000000000000..3cba171d74b2 --- /dev/null +++ b/drivers/net/sk98lin/h/skdebug.h @@ -0,0 +1,74 @@ +/****************************************************************************** + * + * Name: skdebug.h + * Project: Gigabit Ethernet Adapters, Common Modules + * Version: $Revision: 1.14 $ + * Date: $Date: 2003/05/13 17:26:00 $ + * Purpose: SK specific DEBUG support + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +#ifndef __INC_SKDEBUG_H +#define __INC_SKDEBUG_H + +#ifdef DEBUG +#ifndef SK_DBG_MSG +#define SK_DBG_MSG(pAC,comp,cat,arg) \ + if ( ((comp) & SK_DBG_CHKMOD(pAC)) && \ + ((cat) & SK_DBG_CHKCAT(pAC)) ) { \ + SK_DBG_PRINTF arg ; \ + } +#endif +#else +#define SK_DBG_MSG(pAC,comp,lev,arg) +#endif + +/* PLS NOTE: + * ========= + * Due to any restrictions of kernel printf routines do not use other + * format identifiers as: %x %d %c %s . + * Never use any combined format identifiers such as: %lx %ld in your + * printf - argument (arg) because some OS specific kernel printfs may + * only support some basic identifiers. + */ + +/* Debug modules */ + +#define SK_DBGMOD_MERR 0x00000001L /* general module error indication */ +#define SK_DBGMOD_HWM 0x00000002L /* Hardware init module */ +#define SK_DBGMOD_RLMT 0x00000004L /* RLMT module */ +#define SK_DBGMOD_VPD 0x00000008L /* VPD module */ +#define SK_DBGMOD_I2C 0x00000010L /* I2C module */ +#define SK_DBGMOD_PNMI 0x00000020L /* PNMI module */ +#define SK_DBGMOD_CSUM 0x00000040L /* CSUM module */ +#define SK_DBGMOD_ADDR 0x00000080L /* ADDR module */ +#define SK_DBGMOD_PECP 0x00000100L /* PECP module */ +#define SK_DBGMOD_POWM 0x00000200L /* Power Management module */ + +/* Debug events */ + +#define SK_DBGCAT_INIT 0x00000001L /* module/driver initialization */ +#define SK_DBGCAT_CTRL 0x00000002L /* controlling devices */ +#define SK_DBGCAT_ERR 0x00000004L /* error handling paths */ +#define SK_DBGCAT_TX 0x00000008L /* transmit path */ +#define SK_DBGCAT_RX 0x00000010L /* receive path */ +#define SK_DBGCAT_IRQ 0x00000020L /* general IRQ handling */ +#define SK_DBGCAT_QUEUE 0x00000040L /* any queue management */ +#define SK_DBGCAT_DUMP 0x00000080L /* large data output e.g. hex dump */ +#define SK_DBGCAT_FATAL 0x00000100L /* fatal error */ + +#endif /* __INC_SKDEBUG_H */ diff --git a/drivers/net/sk98lin/h/skdrv1st.h b/drivers/net/sk98lin/h/skdrv1st.h new file mode 100644 index 000000000000..91b8d4f45904 --- /dev/null +++ b/drivers/net/sk98lin/h/skdrv1st.h @@ -0,0 +1,188 @@ +/****************************************************************************** + * + * Name: skdrv1st.h + * Project: GEnesis, PCI Gigabit Ethernet Adapter + * Version: $Revision: 1.4 $ + * Date: $Date: 2003/11/12 14:28:14 $ + * Purpose: First header file for driver and all other modules + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +/****************************************************************************** + * + * Description: + * + * This is the first include file of the driver, which includes all + * neccessary system header files and some of the GEnesis header files. + * It also defines some basic items. + * + * Include File Hierarchy: + * + * see skge.c + * + ******************************************************************************/ + +#ifndef __INC_SKDRV1ST_H +#define __INC_SKDRV1ST_H + +typedef struct s_AC SK_AC; + +/* Set card versions */ +#define SK_FAR + +/* override some default functions with optimized linux functions */ + +#define SK_PNMI_STORE_U16(p,v) memcpy((char*)(p),(char*)&(v),2) +#define SK_PNMI_STORE_U32(p,v) memcpy((char*)(p),(char*)&(v),4) +#define SK_PNMI_STORE_U64(p,v) memcpy((char*)(p),(char*)&(v),8) +#define SK_PNMI_READ_U16(p,v) memcpy((char*)&(v),(char*)(p),2) +#define SK_PNMI_READ_U32(p,v) memcpy((char*)&(v),(char*)(p),4) +#define SK_PNMI_READ_U64(p,v) memcpy((char*)&(v),(char*)(p),8) + +#define SK_ADDR_EQUAL(a1,a2) (!memcmp(a1,a2,6)) + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/bitops.h> +#include <asm/byteorder.h> +#include <asm/io.h> +#include <asm/irq.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> + +#include <linux/init.h> +#include <asm/uaccess.h> +#include <net/checksum.h> + +#define SK_CS_CALCULATE_CHECKSUM +#ifndef CONFIG_X86_64 +#define SkCsCalculateChecksum(p,l) ((~ip_compute_csum(p, l)) & 0xffff) +#else +#define SkCsCalculateChecksum(p,l) ((~ip_fast_csum(p, l)) & 0xffff) +#endif + +#include "h/sktypes.h" +#include "h/skerror.h" +#include "h/skdebug.h" +#include "h/lm80.h" +#include "h/xmac_ii.h" + +#ifdef __LITTLE_ENDIAN +#define SK_LITTLE_ENDIAN +#else +#define SK_BIG_ENDIAN +#endif + +#define SK_NET_DEVICE net_device + + +/* we use gethrtime(), return unit: nanoseconds */ +#define SK_TICKS_PER_SEC 100 + +#define SK_MEM_MAPPED_IO + +// #define SK_RLMT_SLOW_LOOKAHEAD + +#define SK_MAX_MACS 2 +#define SK_MAX_NETS 2 + +#define SK_IOC char __iomem * + +typedef struct s_DrvRlmtMbuf SK_MBUF; + +#define SK_CONST64 INT64_C +#define SK_CONSTU64 UINT64_C + +#define SK_MEMCPY(dest,src,size) memcpy(dest,src,size) +#define SK_MEMCMP(s1,s2,size) memcmp(s1,s2,size) +#define SK_MEMSET(dest,val,size) memset(dest,val,size) +#define SK_STRLEN(pStr) strlen((char*)(pStr)) +#define SK_STRNCPY(pDest,pSrc,size) strncpy((char*)(pDest),(char*)(pSrc),size) +#define SK_STRCMP(pStr1,pStr2) strcmp((char*)(pStr1),(char*)(pStr2)) + +/* macros to access the adapter */ +#define SK_OUT8(b,a,v) writeb((v), ((b)+(a))) +#define SK_OUT16(b,a,v) writew((v), ((b)+(a))) +#define SK_OUT32(b,a,v) writel((v), ((b)+(a))) +#define SK_IN8(b,a,pv) (*(pv) = readb((b)+(a))) +#define SK_IN16(b,a,pv) (*(pv) = readw((b)+(a))) +#define SK_IN32(b,a,pv) (*(pv) = readl((b)+(a))) + +#define int8_t char +#define int16_t short +#define int32_t long +#define int64_t long long +#define uint8_t u_char +#define uint16_t u_short +#define uint32_t u_long +#define uint64_t unsigned long long +#define t_scalar_t int +#define t_uscalar_t unsigned int +#define uintptr_t unsigned long + +#define __CONCAT__(A,B) A##B + +#define INT32_C(a) __CONCAT__(a,L) +#define INT64_C(a) __CONCAT__(a,LL) +#define UINT32_C(a) __CONCAT__(a,UL) +#define UINT64_C(a) __CONCAT__(a,ULL) + +#ifdef DEBUG +#define SK_DBG_PRINTF printk +#ifndef SK_DEBUG_CHKMOD +#define SK_DEBUG_CHKMOD 0 +#endif +#ifndef SK_DEBUG_CHKCAT +#define SK_DEBUG_CHKCAT 0 +#endif +/* those come from the makefile */ +#define SK_DBG_CHKMOD(pAC) (SK_DEBUG_CHKMOD) +#define SK_DBG_CHKCAT(pAC) (SK_DEBUG_CHKCAT) + +extern void SkDbgPrintf(const char *format,...); + +#define SK_DBGMOD_DRV 0x00010000 + +/**** possible driver debug categories ********************************/ +#define SK_DBGCAT_DRV_ENTRY 0x00010000 +#define SK_DBGCAT_DRV_SAP 0x00020000 +#define SK_DBGCAT_DRV_MCA 0x00040000 +#define SK_DBGCAT_DRV_TX_PROGRESS 0x00080000 +#define SK_DBGCAT_DRV_RX_PROGRESS 0x00100000 +#define SK_DBGCAT_DRV_PROGRESS 0x00200000 +#define SK_DBGCAT_DRV_MSG 0x00400000 +#define SK_DBGCAT_DRV_PROM 0x00800000 +#define SK_DBGCAT_DRV_TX_FRAME 0x01000000 +#define SK_DBGCAT_DRV_ERROR 0x02000000 +#define SK_DBGCAT_DRV_INT_SRC 0x04000000 +#define SK_DBGCAT_DRV_EVENT 0x08000000 + +#endif + +#define SK_ERR_LOG SkErrorLog + +extern void SkErrorLog(SK_AC*, int, int, char*); + +#endif + diff --git a/drivers/net/sk98lin/h/skdrv2nd.h b/drivers/net/sk98lin/h/skdrv2nd.h new file mode 100644 index 000000000000..3fa67171e832 --- /dev/null +++ b/drivers/net/sk98lin/h/skdrv2nd.h @@ -0,0 +1,447 @@ +/****************************************************************************** + * + * Name: skdrv2nd.h + * Project: GEnesis, PCI Gigabit Ethernet Adapter + * Version: $Revision: 1.10 $ + * Date: $Date: 2003/12/11 16:04:45 $ + * Purpose: Second header file for driver and all other modules + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +/****************************************************************************** + * + * Description: + * + * This is the second include file of the driver, which includes all other + * neccessary files and defines all structures and constants used by the + * driver and the common modules. + * + * Include File Hierarchy: + * + * see skge.c + * + ******************************************************************************/ + +#ifndef __INC_SKDRV2ND_H +#define __INC_SKDRV2ND_H + +#include "h/skqueue.h" +#include "h/skgehwt.h" +#include "h/sktimer.h" +#include "h/ski2c.h" +#include "h/skgepnmi.h" +#include "h/skvpd.h" +#include "h/skgehw.h" +#include "h/skgeinit.h" +#include "h/skaddr.h" +#include "h/skgesirq.h" +#include "h/skcsum.h" +#include "h/skrlmt.h" +#include "h/skgedrv.h" + + +extern SK_MBUF *SkDrvAllocRlmtMbuf(SK_AC*, SK_IOC, unsigned); +extern void SkDrvFreeRlmtMbuf(SK_AC*, SK_IOC, SK_MBUF*); +extern SK_U64 SkOsGetTime(SK_AC*); +extern int SkPciReadCfgDWord(SK_AC*, int, SK_U32*); +extern int SkPciReadCfgWord(SK_AC*, int, SK_U16*); +extern int SkPciReadCfgByte(SK_AC*, int, SK_U8*); +extern int SkPciWriteCfgWord(SK_AC*, int, SK_U16); +extern int SkPciWriteCfgByte(SK_AC*, int, SK_U8); +extern int SkDrvEvent(SK_AC*, SK_IOC IoC, SK_U32, SK_EVPARA); + +#ifdef SK_DIAG_SUPPORT +extern int SkDrvEnterDiagMode(SK_AC *pAc); +extern int SkDrvLeaveDiagMode(SK_AC *pAc); +#endif + +struct s_DrvRlmtMbuf { + SK_MBUF *pNext; /* Pointer to next RLMT Mbuf. */ + SK_U8 *pData; /* Data buffer (virtually contig.). */ + unsigned Size; /* Data buffer size. */ + unsigned Length; /* Length of packet (<= Size). */ + SK_U32 PortIdx; /* Receiving/transmitting port. */ +#ifdef SK_RLMT_MBUF_PRIVATE + SK_RLMT_MBUF Rlmt; /* Private part for RLMT. */ +#endif /* SK_RLMT_MBUF_PRIVATE */ + struct sk_buff *pOs; /* Pointer to message block */ +}; + + +/* + * Time macros + */ +#if SK_TICKS_PER_SEC == 100 +#define SK_PNMI_HUNDREDS_SEC(t) (t) +#else +#define SK_PNMI_HUNDREDS_SEC(t) ((((unsigned long)t) * 100) / \ + (SK_TICKS_PER_SEC)) +#endif + +/* + * New SkOsGetTime + */ +#define SkOsGetTimeCurrent(pAC, pUsec) {\ + struct timeval t;\ + do_gettimeofday(&t);\ + *pUsec = ((((t.tv_sec) * 1000000L)+t.tv_usec)/10000);\ +} + + +/* + * ioctl definitions + */ +#define SK_IOCTL_BASE (SIOCDEVPRIVATE) +#define SK_IOCTL_GETMIB (SK_IOCTL_BASE + 0) +#define SK_IOCTL_SETMIB (SK_IOCTL_BASE + 1) +#define SK_IOCTL_PRESETMIB (SK_IOCTL_BASE + 2) +#define SK_IOCTL_GEN (SK_IOCTL_BASE + 3) +#define SK_IOCTL_DIAG (SK_IOCTL_BASE + 4) + +typedef struct s_IOCTL SK_GE_IOCTL; + +struct s_IOCTL { + char __user * pData; + unsigned int Len; +}; + + +/* + * define sizes of descriptor rings in bytes + */ + +#define TX_RING_SIZE (8*1024) +#define RX_RING_SIZE (24*1024) + +/* + * Buffer size for ethernet packets + */ +#define ETH_BUF_SIZE 1540 +#define ETH_MAX_MTU 1514 +#define ETH_MIN_MTU 60 +#define ETH_MULTICAST_BIT 0x01 +#define SK_JUMBO_MTU 9000 + +/* + * transmit priority selects the queue: LOW=asynchron, HIGH=synchron + */ +#define TX_PRIO_LOW 0 +#define TX_PRIO_HIGH 1 + +/* + * alignment of rx/tx descriptors + */ +#define DESCR_ALIGN 64 + +/* + * definitions for pnmi. TODO + */ +#define SK_DRIVER_RESET(pAC, IoC) 0 +#define SK_DRIVER_SENDEVENT(pAC, IoC) 0 +#define SK_DRIVER_SELFTEST(pAC, IoC) 0 +/* For get mtu you must add an own function */ +#define SK_DRIVER_GET_MTU(pAc,IoC,i) 0 +#define SK_DRIVER_SET_MTU(pAc,IoC,i,v) 0 +#define SK_DRIVER_PRESET_MTU(pAc,IoC,i,v) 0 + +/* +** Interim definition of SK_DRV_TIMER placed in this file until +** common modules have been finalized +*/ +#define SK_DRV_TIMER 11 +#define SK_DRV_MODERATION_TIMER 1 +#define SK_DRV_MODERATION_TIMER_LENGTH 1000000 /* 1 second */ +#define SK_DRV_RX_CLEANUP_TIMER 2 +#define SK_DRV_RX_CLEANUP_TIMER_LENGTH 1000000 /* 100 millisecs */ + +/* +** Definitions regarding transmitting frames +** any calculating any checksum. +*/ +#define C_LEN_ETHERMAC_HEADER_DEST_ADDR 6 +#define C_LEN_ETHERMAC_HEADER_SRC_ADDR 6 +#define C_LEN_ETHERMAC_HEADER_LENTYPE 2 +#define C_LEN_ETHERMAC_HEADER ( (C_LEN_ETHERMAC_HEADER_DEST_ADDR) + \ + (C_LEN_ETHERMAC_HEADER_SRC_ADDR) + \ + (C_LEN_ETHERMAC_HEADER_LENTYPE) ) + +#define C_LEN_ETHERMTU_MINSIZE 46 +#define C_LEN_ETHERMTU_MAXSIZE_STD 1500 +#define C_LEN_ETHERMTU_MAXSIZE_JUMBO 9000 + +#define C_LEN_ETHERNET_MINSIZE ( (C_LEN_ETHERMAC_HEADER) + \ + (C_LEN_ETHERMTU_MINSIZE) ) + +#define C_OFFSET_IPHEADER C_LEN_ETHERMAC_HEADER +#define C_OFFSET_IPHEADER_IPPROTO 9 +#define C_OFFSET_TCPHEADER_TCPCS 16 +#define C_OFFSET_UDPHEADER_UDPCS 6 + +#define C_OFFSET_IPPROTO ( (C_LEN_ETHERMAC_HEADER) + \ + (C_OFFSET_IPHEADER_IPPROTO) ) + +#define C_PROTO_ID_UDP 17 /* refer to RFC 790 or Stevens' */ +#define C_PROTO_ID_TCP 6 /* TCP/IP illustrated for details */ + +/* TX and RX descriptors *****************************************************/ + +typedef struct s_RxD RXD; /* the receive descriptor */ + +struct s_RxD { + volatile SK_U32 RBControl; /* Receive Buffer Control */ + SK_U32 VNextRxd; /* Next receive descriptor,low dword */ + SK_U32 VDataLow; /* Receive buffer Addr, low dword */ + SK_U32 VDataHigh; /* Receive buffer Addr, high dword */ + SK_U32 FrameStat; /* Receive Frame Status word */ + SK_U32 TimeStamp; /* Time stamp from XMAC */ + SK_U32 TcpSums; /* TCP Sum 2 / TCP Sum 1 */ + SK_U32 TcpSumStarts; /* TCP Sum Start 2 / TCP Sum Start 1 */ + RXD *pNextRxd; /* Pointer to next Rxd */ + struct sk_buff *pMBuf; /* Pointer to Linux' socket buffer */ +}; + +typedef struct s_TxD TXD; /* the transmit descriptor */ + +struct s_TxD { + volatile SK_U32 TBControl; /* Transmit Buffer Control */ + SK_U32 VNextTxd; /* Next transmit descriptor,low dword */ + SK_U32 VDataLow; /* Transmit Buffer Addr, low dword */ + SK_U32 VDataHigh; /* Transmit Buffer Addr, high dword */ + SK_U32 FrameStat; /* Transmit Frame Status Word */ + SK_U32 TcpSumOfs; /* Reserved / TCP Sum Offset */ + SK_U16 TcpSumSt; /* TCP Sum Start */ + SK_U16 TcpSumWr; /* TCP Sum Write */ + SK_U32 TcpReserved; /* not used */ + TXD *pNextTxd; /* Pointer to next Txd */ + struct sk_buff *pMBuf; /* Pointer to Linux' socket buffer */ +}; + +/* Used interrupt bits in the interrupts source register *********************/ + +#define DRIVER_IRQS ((IS_IRQ_SW) | \ + (IS_R1_F) |(IS_R2_F) | \ + (IS_XS1_F) |(IS_XA1_F) | \ + (IS_XS2_F) |(IS_XA2_F)) + +#define SPECIAL_IRQS ((IS_HW_ERR) |(IS_I2C_READY) | \ + (IS_EXT_REG) |(IS_TIMINT) | \ + (IS_PA_TO_RX1) |(IS_PA_TO_RX2) | \ + (IS_PA_TO_TX1) |(IS_PA_TO_TX2) | \ + (IS_MAC1) |(IS_LNK_SYNC_M1)| \ + (IS_MAC2) |(IS_LNK_SYNC_M2)| \ + (IS_R1_C) |(IS_R2_C) | \ + (IS_XS1_C) |(IS_XA1_C) | \ + (IS_XS2_C) |(IS_XA2_C)) + +#define IRQ_MASK ((IS_IRQ_SW) | \ + (IS_R1_B) |(IS_R1_F) |(IS_R2_B) |(IS_R2_F) | \ + (IS_XS1_B) |(IS_XS1_F) |(IS_XA1_B)|(IS_XA1_F)| \ + (IS_XS2_B) |(IS_XS2_F) |(IS_XA2_B)|(IS_XA2_F)| \ + (IS_HW_ERR) |(IS_I2C_READY)| \ + (IS_EXT_REG) |(IS_TIMINT) | \ + (IS_PA_TO_RX1) |(IS_PA_TO_RX2)| \ + (IS_PA_TO_TX1) |(IS_PA_TO_TX2)| \ + (IS_MAC1) |(IS_MAC2) | \ + (IS_R1_C) |(IS_R2_C) | \ + (IS_XS1_C) |(IS_XA1_C) | \ + (IS_XS2_C) |(IS_XA2_C)) + +#define IRQ_HWE_MASK (IS_ERR_MSK) /* enable all HW irqs */ + +typedef struct s_DevNet DEV_NET; + +struct s_DevNet { + int PortNr; + int NetNr; + SK_AC *pAC; +}; + +typedef struct s_TxPort TX_PORT; + +struct s_TxPort { + /* the transmit descriptor rings */ + caddr_t pTxDescrRing; /* descriptor area memory */ + SK_U64 VTxDescrRing; /* descr. area bus virt. addr. */ + TXD *pTxdRingHead; /* Head of Tx rings */ + TXD *pTxdRingTail; /* Tail of Tx rings */ + TXD *pTxdRingPrev; /* descriptor sent previously */ + int TxdRingFree; /* # of free entrys */ + spinlock_t TxDesRingLock; /* serialize descriptor accesses */ + SK_IOC HwAddr; /* bmu registers address */ + int PortIndex; /* index number of port (0 or 1) */ +}; + +typedef struct s_RxPort RX_PORT; + +struct s_RxPort { + /* the receive descriptor rings */ + caddr_t pRxDescrRing; /* descriptor area memory */ + SK_U64 VRxDescrRing; /* descr. area bus virt. addr. */ + RXD *pRxdRingHead; /* Head of Rx rings */ + RXD *pRxdRingTail; /* Tail of Rx rings */ + RXD *pRxdRingPrev; /* descriptor given to BMU previously */ + int RxdRingFree; /* # of free entrys */ + int RxCsum; /* use receive checksum hardware */ + spinlock_t RxDesRingLock; /* serialize descriptor accesses */ + int RxFillLimit; /* limit for buffers in ring */ + SK_IOC HwAddr; /* bmu registers address */ + int PortIndex; /* index number of port (0 or 1) */ +}; + +/* Definitions needed for interrupt moderation *******************************/ + +#define IRQ_EOF_AS_TX ((IS_XA1_F) | (IS_XA2_F)) +#define IRQ_EOF_SY_TX ((IS_XS1_F) | (IS_XS2_F)) +#define IRQ_MASK_TX_ONLY ((IRQ_EOF_AS_TX)| (IRQ_EOF_SY_TX)) +#define IRQ_MASK_RX_ONLY ((IS_R1_F) | (IS_R2_F)) +#define IRQ_MASK_SP_ONLY (SPECIAL_IRQS) +#define IRQ_MASK_TX_RX ((IRQ_MASK_TX_ONLY)| (IRQ_MASK_RX_ONLY)) +#define IRQ_MASK_SP_RX ((SPECIAL_IRQS) | (IRQ_MASK_RX_ONLY)) +#define IRQ_MASK_SP_TX ((SPECIAL_IRQS) | (IRQ_MASK_TX_ONLY)) +#define IRQ_MASK_RX_TX_SP ((SPECIAL_IRQS) | (IRQ_MASK_TX_RX)) + +#define C_INT_MOD_NONE 1 +#define C_INT_MOD_STATIC 2 +#define C_INT_MOD_DYNAMIC 4 + +#define C_CLK_FREQ_GENESIS 53215000 /* shorter: 53.125 MHz */ +#define C_CLK_FREQ_YUKON 78215000 /* shorter: 78.125 MHz */ + +#define C_INTS_PER_SEC_DEFAULT 2000 +#define C_INT_MOD_ENABLE_PERCENTAGE 50 /* if higher 50% enable */ +#define C_INT_MOD_DISABLE_PERCENTAGE 50 /* if lower 50% disable */ +#define C_INT_MOD_IPS_LOWER_RANGE 30 +#define C_INT_MOD_IPS_UPPER_RANGE 40000 + + +typedef struct s_DynIrqModInfo DIM_INFO; +struct s_DynIrqModInfo { + unsigned long PrevTimeVal; + unsigned int PrevSysLoad; + unsigned int PrevUsedTime; + unsigned int PrevTotalTime; + int PrevUsedDescrRatio; + int NbrProcessedDescr; + SK_U64 PrevPort0RxIntrCts; + SK_U64 PrevPort1RxIntrCts; + SK_U64 PrevPort0TxIntrCts; + SK_U64 PrevPort1TxIntrCts; + SK_BOOL ModJustEnabled; /* Moderation just enabled yes/no */ + + int MaxModIntsPerSec; /* Moderation Threshold */ + int MaxModIntsPerSecUpperLimit; /* Upper limit for DIM */ + int MaxModIntsPerSecLowerLimit; /* Lower limit for DIM */ + + long MaskIrqModeration; /* ModIrqType (eg. 'TxRx') */ + SK_BOOL DisplayStats; /* Stats yes/no */ + SK_BOOL AutoSizing; /* Resize DIM-timer on/off */ + int IntModTypeSelect; /* EnableIntMod (eg. 'dynamic') */ + + SK_TIMER ModTimer; /* just some timer */ +}; + +typedef struct s_PerStrm PER_STRM; + +#define SK_ALLOC_IRQ 0x00000001 + +#ifdef SK_DIAG_SUPPORT +#define DIAG_ACTIVE 1 +#define DIAG_NOTACTIVE 0 +#endif + +/**************************************************************************** + * Per board structure / Adapter Context structure: + * Allocated within attach(9e) and freed within detach(9e). + * Contains all 'per device' necessary handles, flags, locks etc.: + */ +struct s_AC { + SK_GEINIT GIni; /* GE init struct */ + SK_PNMI Pnmi; /* PNMI data struct */ + SK_VPD vpd; /* vpd data struct */ + SK_QUEUE Event; /* Event queue */ + SK_HWT Hwt; /* Hardware Timer control struct */ + SK_TIMCTRL Tim; /* Software Timer control struct */ + SK_I2C I2c; /* I2C relevant data structure */ + SK_ADDR Addr; /* for Address module */ + SK_CSUM Csum; /* for checksum module */ + SK_RLMT Rlmt; /* for rlmt module */ + spinlock_t SlowPathLock; /* Normal IRQ lock */ + struct timer_list BlinkTimer; /* for LED blinking */ + int LedsOn; + SK_PNMI_STRUCT_DATA PnmiStruct; /* structure to get all Pnmi-Data */ + int RlmtMode; /* link check mode to set */ + int RlmtNets; /* Number of nets */ + + SK_IOC IoBase; /* register set of adapter */ + int BoardLevel; /* level of active hw init (0-2) */ + + SK_U32 AllocFlag; /* flag allocation of resources */ + struct pci_dev *PciDev; /* for access to pci config space */ + struct SK_NET_DEVICE *dev[2]; /* pointer to device struct */ + + int RxBufSize; /* length of receive buffers */ + struct net_device_stats stats; /* linux 'netstat -i' statistics */ + int Index; /* internal board index number */ + + /* adapter RAM sizes for queues of active port */ + int RxQueueSize; /* memory used for receive queue */ + int TxSQueueSize; /* memory used for sync. tx queue */ + int TxAQueueSize; /* memory used for async. tx queue */ + + int PromiscCount; /* promiscuous mode counter */ + int AllMultiCount; /* allmulticast mode counter */ + int MulticCount; /* number of different MC */ + /* addresses for this board */ + /* (may be more than HW can)*/ + + int HWRevision; /* Hardware revision */ + int ActivePort; /* the active XMAC port */ + int MaxPorts; /* number of activated ports */ + int TxDescrPerRing; /* # of descriptors per tx ring */ + int RxDescrPerRing; /* # of descriptors per rx ring */ + + caddr_t pDescrMem; /* Pointer to the descriptor area */ + dma_addr_t pDescrMemDMA; /* PCI DMA address of area */ + + /* the port structures with descriptor rings */ + TX_PORT TxPort[SK_MAX_MACS][2]; + RX_PORT RxPort[SK_MAX_MACS]; + + SK_BOOL CheckQueue; /* check event queue soon */ + SK_TIMER DrvCleanupTimer;/* to check for pending descriptors */ + DIM_INFO DynIrqModInfo; /* all data related to DIM */ + + /* Only for tests */ + int PortDown; + int ChipsetType; /* Chipset family type + * 0 == Genesis family support + * 1 == Yukon family support + */ +#ifdef SK_DIAG_SUPPORT + SK_U32 DiagModeActive; /* is diag active? */ + SK_BOOL DiagFlowCtrl; /* for control purposes */ + SK_PNMI_STRUCT_DATA PnmiBackup; /* backup structure for all Pnmi-Data */ + SK_BOOL WasIfUp[SK_MAX_MACS]; /* for OpenClose while + * DIAG is busy with NIC + */ +#endif + +}; + + +#endif /* __INC_SKDRV2ND_H */ + diff --git a/drivers/net/sk98lin/h/skerror.h b/drivers/net/sk98lin/h/skerror.h new file mode 100644 index 000000000000..da062f766238 --- /dev/null +++ b/drivers/net/sk98lin/h/skerror.h @@ -0,0 +1,55 @@ +/****************************************************************************** + * + * Name: skerror.h + * Project: Gigabit Ethernet Adapters, Common Modules + * Version: $Revision: 1.7 $ + * Date: $Date: 2003/05/13 17:25:13 $ + * Purpose: SK specific Error log support + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +#ifndef _INC_SKERROR_H_ +#define _INC_SKERROR_H_ + +/* + * Define Error Classes + */ +#define SK_ERRCL_OTHER (0) /* Other error */ +#define SK_ERRCL_CONFIG (1L<<0) /* Configuration error */ +#define SK_ERRCL_INIT (1L<<1) /* Initialization error */ +#define SK_ERRCL_NORES (1L<<2) /* Out of Resources error */ +#define SK_ERRCL_SW (1L<<3) /* Internal Software error */ +#define SK_ERRCL_HW (1L<<4) /* Hardware Failure */ +#define SK_ERRCL_COMM (1L<<5) /* Communication error */ + + +/* + * Define Error Code Bases + */ +#define SK_ERRBASE_RLMT 100 /* Base Error number for RLMT */ +#define SK_ERRBASE_HWINIT 200 /* Base Error number for HWInit */ +#define SK_ERRBASE_VPD 300 /* Base Error number for VPD */ +#define SK_ERRBASE_PNMI 400 /* Base Error number for PNMI */ +#define SK_ERRBASE_CSUM 500 /* Base Error number for Checksum */ +#define SK_ERRBASE_SIRQ 600 /* Base Error number for Special IRQ */ +#define SK_ERRBASE_I2C 700 /* Base Error number for I2C module */ +#define SK_ERRBASE_QUEUE 800 /* Base Error number for Scheduler */ +#define SK_ERRBASE_ADDR 900 /* Base Error number for Address module */ +#define SK_ERRBASE_PECP 1000 /* Base Error number for PECP */ +#define SK_ERRBASE_DRV 1100 /* Base Error number for Driver */ + +#endif /* _INC_SKERROR_H_ */ diff --git a/drivers/net/sk98lin/h/skgedrv.h b/drivers/net/sk98lin/h/skgedrv.h new file mode 100644 index 000000000000..44fd4c3de818 --- /dev/null +++ b/drivers/net/sk98lin/h/skgedrv.h @@ -0,0 +1,51 @@ +/****************************************************************************** + * + * Name: skgedrv.h + * Project: Gigabit Ethernet Adapters, Common Modules + * Version: $Revision: 1.10 $ + * Date: $Date: 2003/07/04 12:25:01 $ + * Purpose: Interface with the driver + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +#ifndef __INC_SKGEDRV_H_ +#define __INC_SKGEDRV_H_ + +/* defines ********************************************************************/ + +/* + * Define the driver events. + * Usually the events are defined by the destination module. + * In case of the driver we put the definition of the events here. + */ +#define SK_DRV_PORT_RESET 1 /* The port needs to be reset */ +#define SK_DRV_NET_UP 2 /* The net is operational */ +#define SK_DRV_NET_DOWN 3 /* The net is down */ +#define SK_DRV_SWITCH_SOFT 4 /* Ports switch with both links connected */ +#define SK_DRV_SWITCH_HARD 5 /* Port switch due to link failure */ +#define SK_DRV_RLMT_SEND 6 /* Send a RLMT packet */ +#define SK_DRV_ADAP_FAIL 7 /* The whole adapter fails */ +#define SK_DRV_PORT_FAIL 8 /* One port fails */ +#define SK_DRV_SWITCH_INTERN 9 /* Port switch by the driver itself */ +#define SK_DRV_POWER_DOWN 10 /* Power down mode */ +#define SK_DRV_TIMER 11 /* Timer for free use */ +#ifdef SK_NO_RLMT +#define SK_DRV_LINK_UP 12 /* Link Up event for driver */ +#define SK_DRV_LINK_DOWN 13 /* Link Down event for driver */ +#endif +#define SK_DRV_DOWNSHIFT_DET 14 /* Downshift 4-Pair / 2-Pair (YUKON only) */ +#endif /* __INC_SKGEDRV_H_ */ diff --git a/drivers/net/sk98lin/h/skgehw.h b/drivers/net/sk98lin/h/skgehw.h new file mode 100644 index 000000000000..f6282b7956db --- /dev/null +++ b/drivers/net/sk98lin/h/skgehw.h @@ -0,0 +1,2126 @@ +/****************************************************************************** + * + * Name: skgehw.h + * Project: Gigabit Ethernet Adapters, Common Modules + * Version: $Revision: 1.56 $ + * Date: $Date: 2003/09/23 09:01:00 $ + * Purpose: Defines and Macros for the Gigabit Ethernet Adapter Product Family + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +#ifndef __INC_SKGEHW_H +#define __INC_SKGEHW_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* defines ********************************************************************/ + +#define BIT_31 (1UL << 31) +#define BIT_30 (1L << 30) +#define BIT_29 (1L << 29) +#define BIT_28 (1L << 28) +#define BIT_27 (1L << 27) +#define BIT_26 (1L << 26) +#define BIT_25 (1L << 25) +#define BIT_24 (1L << 24) +#define BIT_23 (1L << 23) +#define BIT_22 (1L << 22) +#define BIT_21 (1L << 21) +#define BIT_20 (1L << 20) +#define BIT_19 (1L << 19) +#define BIT_18 (1L << 18) +#define BIT_17 (1L << 17) +#define BIT_16 (1L << 16) +#define BIT_15 (1L << 15) +#define BIT_14 (1L << 14) +#define BIT_13 (1L << 13) +#define BIT_12 (1L << 12) +#define BIT_11 (1L << 11) +#define BIT_10 (1L << 10) +#define BIT_9 (1L << 9) +#define BIT_8 (1L << 8) +#define BIT_7 (1L << 7) +#define BIT_6 (1L << 6) +#define BIT_5 (1L << 5) +#define BIT_4 (1L << 4) +#define BIT_3 (1L << 3) +#define BIT_2 (1L << 2) +#define BIT_1 (1L << 1) +#define BIT_0 1L + +#define BIT_15S (1U << 15) +#define BIT_14S (1 << 14) +#define BIT_13S (1 << 13) +#define BIT_12S (1 << 12) +#define BIT_11S (1 << 11) +#define BIT_10S (1 << 10) +#define BIT_9S (1 << 9) +#define BIT_8S (1 << 8) +#define BIT_7S (1 << 7) +#define BIT_6S (1 << 6) +#define BIT_5S (1 << 5) +#define BIT_4S (1 << 4) +#define BIT_3S (1 << 3) +#define BIT_2S (1 << 2) +#define BIT_1S (1 << 1) +#define BIT_0S 1 + +#define SHIFT31(x) ((x) << 31) +#define SHIFT30(x) ((x) << 30) +#define SHIFT29(x) ((x) << 29) +#define SHIFT28(x) ((x) << 28) +#define SHIFT27(x) ((x) << 27) +#define SHIFT26(x) ((x) << 26) +#define SHIFT25(x) ((x) << 25) +#define SHIFT24(x) ((x) << 24) +#define SHIFT23(x) ((x) << 23) +#define SHIFT22(x) ((x) << 22) +#define SHIFT21(x) ((x) << 21) +#define SHIFT20(x) ((x) << 20) +#define SHIFT19(x) ((x) << 19) +#define SHIFT18(x) ((x) << 18) +#define SHIFT17(x) ((x) << 17) +#define SHIFT16(x) ((x) << 16) +#define SHIFT15(x) ((x) << 15) +#define SHIFT14(x) ((x) << 14) +#define SHIFT13(x) ((x) << 13) +#define SHIFT12(x) ((x) << 12) +#define SHIFT11(x) ((x) << 11) +#define SHIFT10(x) ((x) << 10) +#define SHIFT9(x) ((x) << 9) +#define SHIFT8(x) ((x) << 8) +#define SHIFT7(x) ((x) << 7) +#define SHIFT6(x) ((x) << 6) +#define SHIFT5(x) ((x) << 5) +#define SHIFT4(x) ((x) << 4) +#define SHIFT3(x) ((x) << 3) +#define SHIFT2(x) ((x) << 2) +#define SHIFT1(x) ((x) << 1) +#define SHIFT0(x) ((x) << 0) + +/* + * Configuration Space header + * Since this module is used for different OS', those may be + * duplicate on some of them (e.g. Linux). But to keep the + * common source, we have to live with this... + */ +#define PCI_VENDOR_ID 0x00 /* 16 bit Vendor ID */ +#define PCI_DEVICE_ID 0x02 /* 16 bit Device ID */ +#define PCI_COMMAND 0x04 /* 16 bit Command */ +#define PCI_STATUS 0x06 /* 16 bit Status */ +#define PCI_REV_ID 0x08 /* 8 bit Revision ID */ +#define PCI_CLASS_CODE 0x09 /* 24 bit Class Code */ +#define PCI_CACHE_LSZ 0x0c /* 8 bit Cache Line Size */ +#define PCI_LAT_TIM 0x0d /* 8 bit Latency Timer */ +#define PCI_HEADER_T 0x0e /* 8 bit Header Type */ +#define PCI_BIST 0x0f /* 8 bit Built-in selftest */ +#define PCI_BASE_1ST 0x10 /* 32 bit 1st Base address */ +#define PCI_BASE_2ND 0x14 /* 32 bit 2nd Base address */ + /* Byte 0x18..0x2b: reserved */ +#define PCI_SUB_VID 0x2c /* 16 bit Subsystem Vendor ID */ +#define PCI_SUB_ID 0x2e /* 16 bit Subsystem ID */ +#define PCI_BASE_ROM 0x30 /* 32 bit Expansion ROM Base Address */ +#define PCI_CAP_PTR 0x34 /* 8 bit Capabilities Ptr */ + /* Byte 0x35..0x3b: reserved */ +#define PCI_IRQ_LINE 0x3c /* 8 bit Interrupt Line */ +#define PCI_IRQ_PIN 0x3d /* 8 bit Interrupt Pin */ +#define PCI_MIN_GNT 0x3e /* 8 bit Min_Gnt */ +#define PCI_MAX_LAT 0x3f /* 8 bit Max_Lat */ + /* Device Dependent Region */ +#define PCI_OUR_REG_1 0x40 /* 32 bit Our Register 1 */ +#define PCI_OUR_REG_2 0x44 /* 32 bit Our Register 2 */ + /* Power Management Region */ +#define PCI_PM_CAP_ID 0x48 /* 8 bit Power Management Cap. ID */ +#define PCI_PM_NITEM 0x49 /* 8 bit Next Item Ptr */ +#define PCI_PM_CAP_REG 0x4a /* 16 bit Power Management Capabilities */ +#define PCI_PM_CTL_STS 0x4c /* 16 bit Power Manag. Control/Status */ + /* Byte 0x4e: reserved */ +#define PCI_PM_DAT_REG 0x4f /* 8 bit Power Manag. Data Register */ + /* VPD Region */ +#define PCI_VPD_CAP_ID 0x50 /* 8 bit VPD Cap. ID */ +#define PCI_VPD_NITEM 0x51 /* 8 bit Next Item Ptr */ +#define PCI_VPD_ADR_REG 0x52 /* 16 bit VPD Address Register */ +#define PCI_VPD_DAT_REG 0x54 /* 32 bit VPD Data Register */ + /* Byte 0x58..0x59: reserved */ +#define PCI_SER_LD_CTRL 0x5a /* 16 bit SEEPROM Loader Ctrl (YUKON only) */ + /* Byte 0x5c..0xff: reserved */ + +/* + * I2C Address (PCI Config) + * + * Note: The temperature and voltage sensors are relocated on a different + * I2C bus. + */ +#define I2C_ADDR_VPD 0xa0 /* I2C address for the VPD EEPROM */ + +/* + * Define Bits and Values of the registers + */ +/* PCI_COMMAND 16 bit Command */ + /* Bit 15..11: reserved */ +#define PCI_INT_DIS BIT_10S /* Interrupt INTx# disable (PCI 2.3) */ +#define PCI_FBTEN BIT_9S /* Fast Back-To-Back enable */ +#define PCI_SERREN BIT_8S /* SERR enable */ +#define PCI_ADSTEP BIT_7S /* Address Stepping */ +#define PCI_PERREN BIT_6S /* Parity Report Response enable */ +#define PCI_VGA_SNOOP BIT_5S /* VGA palette snoop */ +#define PCI_MWIEN BIT_4S /* Memory write an inv cycl ena */ +#define PCI_SCYCEN BIT_3S /* Special Cycle enable */ +#define PCI_BMEN BIT_2S /* Bus Master enable */ +#define PCI_MEMEN BIT_1S /* Memory Space Access enable */ +#define PCI_IOEN BIT_0S /* I/O Space Access enable */ + +#define PCI_COMMAND_VAL (PCI_FBTEN | PCI_SERREN | PCI_PERREN | PCI_MWIEN |\ + PCI_BMEN | PCI_MEMEN | PCI_IOEN) + +/* PCI_STATUS 16 bit Status */ +#define PCI_PERR BIT_15S /* Parity Error */ +#define PCI_SERR BIT_14S /* Signaled SERR */ +#define PCI_RMABORT BIT_13S /* Received Master Abort */ +#define PCI_RTABORT BIT_12S /* Received Target Abort */ + /* Bit 11: reserved */ +#define PCI_DEVSEL (3<<9) /* Bit 10.. 9: DEVSEL Timing */ +#define PCI_DEV_FAST (0<<9) /* fast */ +#define PCI_DEV_MEDIUM (1<<9) /* medium */ +#define PCI_DEV_SLOW (2<<9) /* slow */ +#define PCI_DATAPERR BIT_8S /* DATA Parity error detected */ +#define PCI_FB2BCAP BIT_7S /* Fast Back-to-Back Capability */ +#define PCI_UDF BIT_6S /* User Defined Features */ +#define PCI_66MHZCAP BIT_5S /* 66 MHz PCI bus clock capable */ +#define PCI_NEWCAP BIT_4S /* New cap. list implemented */ +#define PCI_INT_STAT BIT_3S /* Interrupt INTx# Status (PCI 2.3) */ + /* Bit 2.. 0: reserved */ + +#define PCI_ERRBITS (PCI_PERR | PCI_SERR | PCI_RMABORT | PCI_RTABORT |\ + PCI_DATAPERR) + +/* PCI_CLASS_CODE 24 bit Class Code */ +/* Byte 2: Base Class (02) */ +/* Byte 1: SubClass (00) */ +/* Byte 0: Programming Interface (00) */ + +/* PCI_CACHE_LSZ 8 bit Cache Line Size */ +/* Possible values: 0,2,4,8,16,32,64,128 */ + +/* PCI_HEADER_T 8 bit Header Type */ +#define PCI_HD_MF_DEV BIT_7S /* 0= single, 1= multi-func dev */ +#define PCI_HD_TYPE 0x7f /* Bit 6..0: Header Layout 0= normal */ + +/* PCI_BIST 8 bit Built-in selftest */ +/* Built-in Self test not supported (optional) */ + +/* PCI_BASE_1ST 32 bit 1st Base address */ +#define PCI_MEMSIZE 0x4000L /* use 16 kB Memory Base */ +#define PCI_MEMBASE_MSK 0xffffc000L /* Bit 31..14: Memory Base Address */ +#define PCI_MEMSIZE_MSK 0x00003ff0L /* Bit 13.. 4: Memory Size Req. */ +#define PCI_PREFEN BIT_3 /* Prefetchable */ +#define PCI_MEM_TYP (3L<<2) /* Bit 2.. 1: Memory Type */ +#define PCI_MEM32BIT (0L<<1) /* Base addr anywhere in 32 Bit range */ +#define PCI_MEM1M (1L<<1) /* Base addr below 1 MegaByte */ +#define PCI_MEM64BIT (2L<<1) /* Base addr anywhere in 64 Bit range */ +#define PCI_MEMSPACE BIT_0 /* Memory Space Indicator */ + +/* PCI_BASE_2ND 32 bit 2nd Base address */ +#define PCI_IOBASE 0xffffff00L /* Bit 31.. 8: I/O Base address */ +#define PCI_IOSIZE 0x000000fcL /* Bit 7.. 2: I/O Size Requirements */ + /* Bit 1: reserved */ +#define PCI_IOSPACE BIT_0 /* I/O Space Indicator */ + +/* PCI_BASE_ROM 32 bit Expansion ROM Base Address */ +#define PCI_ROMBASE_MSK 0xfffe0000L /* Bit 31..17: ROM Base address */ +#define PCI_ROMBASE_SIZ (0x1cL<<14) /* Bit 16..14: Treat as Base or Size */ +#define PCI_ROMSIZE (0x38L<<11) /* Bit 13..11: ROM Size Requirements */ + /* Bit 10.. 1: reserved */ +#define PCI_ROMEN BIT_0 /* Address Decode enable */ + +/* Device Dependent Region */ +/* PCI_OUR_REG_1 32 bit Our Register 1 */ + /* Bit 31..29: reserved */ +#define PCI_PHY_COMA BIT_28 /* Set PHY to Coma Mode (YUKON only) */ +#define PCI_TEST_CAL BIT_27 /* Test PCI buffer calib. (YUKON only) */ +#define PCI_EN_CAL BIT_26 /* Enable PCI buffer calib. (YUKON only) */ +#define PCI_VIO BIT_25 /* PCI I/O Voltage, 0 = 3.3V, 1 = 5V */ +#define PCI_DIS_BOOT BIT_24 /* Disable BOOT via ROM */ +#define PCI_EN_IO BIT_23 /* Mapping to I/O space */ +#define PCI_EN_FPROM BIT_22 /* Enable FLASH mapping to memory */ + /* 1 = Map Flash to memory */ + /* 0 = Disable addr. dec */ +#define PCI_PAGESIZE (3L<<20) /* Bit 21..20: FLASH Page Size */ +#define PCI_PAGE_16 (0L<<20) /* 16 k pages */ +#define PCI_PAGE_32K (1L<<20) /* 32 k pages */ +#define PCI_PAGE_64K (2L<<20) /* 64 k pages */ +#define PCI_PAGE_128K (3L<<20) /* 128 k pages */ + /* Bit 19: reserved */ +#define PCI_PAGEREG (7L<<16) /* Bit 18..16: Page Register */ +#define PCI_NOTAR BIT_15 /* No turnaround cycle */ +#define PCI_FORCE_BE BIT_14 /* Assert all BEs on MR */ +#define PCI_DIS_MRL BIT_13 /* Disable Mem Read Line */ +#define PCI_DIS_MRM BIT_12 /* Disable Mem Read Multiple */ +#define PCI_DIS_MWI BIT_11 /* Disable Mem Write & Invalidate */ +#define PCI_DISC_CLS BIT_10 /* Disc: cacheLsz bound */ +#define PCI_BURST_DIS BIT_9 /* Burst Disable */ +#define PCI_DIS_PCI_CLK BIT_8 /* Disable PCI clock driving */ +#define PCI_SKEW_DAS (0xfL<<4) /* Bit 7.. 4: Skew Ctrl, DAS Ext */ +#define PCI_SKEW_BASE 0xfL /* Bit 3.. 0: Skew Ctrl, Base */ + + +/* PCI_OUR_REG_2 32 bit Our Register 2 */ +#define PCI_VPD_WR_THR (0xffL<<24) /* Bit 31..24: VPD Write Threshold */ +#define PCI_DEV_SEL (0x7fL<<17) /* Bit 23..17: EEPROM Device Select */ +#define PCI_VPD_ROM_SZ (7L<<14) /* Bit 16..14: VPD ROM Size */ + /* Bit 13..12: reserved */ +#define PCI_PATCH_DIR (0xfL<<8) /* Bit 11.. 8: Ext Patches dir 3..0 */ +#define PCI_PATCH_DIR_3 BIT_11 +#define PCI_PATCH_DIR_2 BIT_10 +#define PCI_PATCH_DIR_1 BIT_9 +#define PCI_PATCH_DIR_0 BIT_8 +#define PCI_EXT_PATCHS (0xfL<<4) /* Bit 7.. 4: Extended Patches 3..0 */ +#define PCI_EXT_PATCH_3 BIT_7 +#define PCI_EXT_PATCH_2 BIT_6 +#define PCI_EXT_PATCH_1 BIT_5 +#define PCI_EXT_PATCH_0 BIT_4 +#define PCI_EN_DUMMY_RD BIT_3 /* Enable Dummy Read */ +#define PCI_REV_DESC BIT_2 /* Reverse Desc. Bytes */ + /* Bit 1: reserved */ +#define PCI_USEDATA64 BIT_0 /* Use 64Bit Data bus ext */ + + +/* Power Management Region */ +/* PCI_PM_CAP_REG 16 bit Power Management Capabilities */ +#define PCI_PME_SUP_MSK (0x1f<<11) /* Bit 15..11: PM Event Support Mask */ +#define PCI_PME_D3C_SUP BIT_15S /* PME from D3cold Support (if Vaux) */ +#define PCI_PME_D3H_SUP BIT_14S /* PME from D3hot Support */ +#define PCI_PME_D2_SUP BIT_13S /* PME from D2 Support */ +#define PCI_PME_D1_SUP BIT_12S /* PME from D1 Support */ +#define PCI_PME_D0_SUP BIT_11S /* PME from D0 Support */ +#define PCI_PM_D2_SUP BIT_10S /* D2 Support in 33 MHz mode */ +#define PCI_PM_D1_SUP BIT_9S /* D1 Support */ + /* Bit 8.. 6: reserved */ +#define PCI_PM_DSI BIT_5S /* Device Specific Initialization */ +#define PCI_PM_APS BIT_4S /* Auxialiary Power Source */ +#define PCI_PME_CLOCK BIT_3S /* PM Event Clock */ +#define PCI_PM_VER_MSK 7 /* Bit 2.. 0: PM PCI Spec. version */ + +/* PCI_PM_CTL_STS 16 bit Power Management Control/Status */ +#define PCI_PME_STATUS BIT_15S /* PME Status (YUKON only) */ +#define PCI_PM_DAT_SCL (3<<13) /* Bit 14..13: Data Reg. scaling factor */ +#define PCI_PM_DAT_SEL (0xf<<9) /* Bit 12.. 9: PM data selector field */ +#define PCI_PME_EN BIT_8S /* Enable PME# generation (YUKON only) */ + /* Bit 7.. 2: reserved */ +#define PCI_PM_STATE_MSK 3 /* Bit 1.. 0: Power Management State */ + +#define PCI_PM_STATE_D0 0 /* D0: Operational (default) */ +#define PCI_PM_STATE_D1 1 /* D1: (YUKON only) */ +#define PCI_PM_STATE_D2 2 /* D2: (YUKON only) */ +#define PCI_PM_STATE_D3 3 /* D3: HOT, Power Down and Reset */ + +/* VPD Region */ +/* PCI_VPD_ADR_REG 16 bit VPD Address Register */ +#define PCI_VPD_FLAG BIT_15S /* starts VPD rd/wr cycle */ +#define PCI_VPD_ADR_MSK 0x7fffL /* Bit 14.. 0: VPD address mask */ + +/* Control Register File (Address Map) */ + +/* + * Bank 0 + */ +#define B0_RAP 0x0000 /* 8 bit Register Address Port */ + /* 0x0001 - 0x0003: reserved */ +#define B0_CTST 0x0004 /* 16 bit Control/Status register */ +#define B0_LED 0x0006 /* 8 Bit LED register */ +#define B0_POWER_CTRL 0x0007 /* 8 Bit Power Control reg (YUKON only) */ +#define B0_ISRC 0x0008 /* 32 bit Interrupt Source Register */ +#define B0_IMSK 0x000c /* 32 bit Interrupt Mask Register */ +#define B0_HWE_ISRC 0x0010 /* 32 bit HW Error Interrupt Src Reg */ +#define B0_HWE_IMSK 0x0014 /* 32 bit HW Error Interrupt Mask Reg */ +#define B0_SP_ISRC 0x0018 /* 32 bit Special Interrupt Source Reg */ + /* 0x001c: reserved */ + +/* B0 XMAC 1 registers (GENESIS only) */ +#define B0_XM1_IMSK 0x0020 /* 16 bit r/w XMAC 1 Interrupt Mask Register*/ + /* 0x0022 - 0x0027: reserved */ +#define B0_XM1_ISRC 0x0028 /* 16 bit ro XMAC 1 Interrupt Status Reg */ + /* 0x002a - 0x002f: reserved */ +#define B0_XM1_PHY_ADDR 0x0030 /* 16 bit r/w XMAC 1 PHY Address Register */ + /* 0x0032 - 0x0033: reserved */ +#define B0_XM1_PHY_DATA 0x0034 /* 16 bit r/w XMAC 1 PHY Data Register */ + /* 0x0036 - 0x003f: reserved */ + +/* B0 XMAC 2 registers (GENESIS only) */ +#define B0_XM2_IMSK 0x0040 /* 16 bit r/w XMAC 2 Interrupt Mask Register*/ + /* 0x0042 - 0x0047: reserved */ +#define B0_XM2_ISRC 0x0048 /* 16 bit ro XMAC 2 Interrupt Status Reg */ + /* 0x004a - 0x004f: reserved */ +#define B0_XM2_PHY_ADDR 0x0050 /* 16 bit r/w XMAC 2 PHY Address Register */ + /* 0x0052 - 0x0053: reserved */ +#define B0_XM2_PHY_DATA 0x0054 /* 16 bit r/w XMAC 2 PHY Data Register */ + /* 0x0056 - 0x005f: reserved */ + +/* BMU Control Status Registers */ +#define B0_R1_CSR 0x0060 /* 32 bit BMU Ctrl/Stat Rx Queue 1 */ +#define B0_R2_CSR 0x0064 /* 32 bit BMU Ctrl/Stat Rx Queue 2 */ +#define B0_XS1_CSR 0x0068 /* 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */ +#define B0_XA1_CSR 0x006c /* 32 bit BMU Ctrl/Stat Async Tx Queue 1*/ +#define B0_XS2_CSR 0x0070 /* 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */ +#define B0_XA2_CSR 0x0074 /* 32 bit BMU Ctrl/Stat Async Tx Queue 2*/ + /* 0x0078 - 0x007f: reserved */ + +/* + * Bank 1 + * - completely empty (this is the RAP Block window) + * Note: if RAP = 1 this page is reserved + */ + +/* + * Bank 2 + */ +/* NA reg = 48 bit Network Address Register, 3x16 or 8x8 bit readable */ +#define B2_MAC_1 0x0100 /* NA reg MAC Address 1 */ + /* 0x0106 - 0x0107: reserved */ +#define B2_MAC_2 0x0108 /* NA reg MAC Address 2 */ + /* 0x010e - 0x010f: reserved */ +#define B2_MAC_3 0x0110 /* NA reg MAC Address 3 */ + /* 0x0116 - 0x0117: reserved */ +#define B2_CONN_TYP 0x0118 /* 8 bit Connector type */ +#define B2_PMD_TYP 0x0119 /* 8 bit PMD type */ +#define B2_MAC_CFG 0x011a /* 8 bit MAC Configuration / Chip Revision */ +#define B2_CHIP_ID 0x011b /* 8 bit Chip Identification Number */ + /* Eprom registers are currently of no use */ +#define B2_E_0 0x011c /* 8 bit EPROM Byte 0 (ext. SRAM size */ +#define B2_E_1 0x011d /* 8 bit EPROM Byte 1 (PHY type) */ +#define B2_E_2 0x011e /* 8 bit EPROM Byte 2 */ +#define B2_E_3 0x011f /* 8 bit EPROM Byte 3 */ +#define B2_FAR 0x0120 /* 32 bit Flash-Prom Addr Reg/Cnt */ +#define B2_FDP 0x0124 /* 8 bit Flash-Prom Data Port */ + /* 0x0125 - 0x0127: reserved */ +#define B2_LD_CTRL 0x0128 /* 8 bit EPROM loader control register */ +#define B2_LD_TEST 0x0129 /* 8 bit EPROM loader test register */ + /* 0x012a - 0x012f: reserved */ +#define B2_TI_INI 0x0130 /* 32 bit Timer Init Value */ +#define B2_TI_VAL 0x0134 /* 32 bit Timer Value */ +#define B2_TI_CTRL 0x0138 /* 8 bit Timer Control */ +#define B2_TI_TEST 0x0139 /* 8 Bit Timer Test */ + /* 0x013a - 0x013f: reserved */ +#define B2_IRQM_INI 0x0140 /* 32 bit IRQ Moderation Timer Init Reg.*/ +#define B2_IRQM_VAL 0x0144 /* 32 bit IRQ Moderation Timer Value */ +#define B2_IRQM_CTRL 0x0148 /* 8 bit IRQ Moderation Timer Control */ +#define B2_IRQM_TEST 0x0149 /* 8 bit IRQ Moderation Timer Test */ +#define B2_IRQM_MSK 0x014c /* 32 bit IRQ Moderation Mask */ +#define B2_IRQM_HWE_MSK 0x0150 /* 32 bit IRQ Moderation HW Error Mask */ + /* 0x0154 - 0x0157: reserved */ +#define B2_TST_CTRL1 0x0158 /* 8 bit Test Control Register 1 */ +#define B2_TST_CTRL2 0x0159 /* 8 bit Test Control Register 2 */ + /* 0x015a - 0x015b: reserved */ +#define B2_GP_IO 0x015c /* 32 bit General Purpose I/O Register */ +#define B2_I2C_CTRL 0x0160 /* 32 bit I2C HW Control Register */ +#define B2_I2C_DATA 0x0164 /* 32 bit I2C HW Data Register */ +#define B2_I2C_IRQ 0x0168 /* 32 bit I2C HW IRQ Register */ +#define B2_I2C_SW 0x016c /* 32 bit I2C SW Port Register */ + +/* Blink Source Counter (GENESIS only) */ +#define B2_BSC_INI 0x0170 /* 32 bit Blink Source Counter Init Val */ +#define B2_BSC_VAL 0x0174 /* 32 bit Blink Source Counter Value */ +#define B2_BSC_CTRL 0x0178 /* 8 bit Blink Source Counter Control */ +#define B2_BSC_STAT 0x0179 /* 8 bit Blink Source Counter Status */ +#define B2_BSC_TST 0x017a /* 16 bit Blink Source Counter Test Reg */ + /* 0x017c - 0x017f: reserved */ + +/* + * Bank 3 + */ +/* RAM Random Registers */ +#define B3_RAM_ADDR 0x0180 /* 32 bit RAM Address, to read or write */ +#define B3_RAM_DATA_LO 0x0184 /* 32 bit RAM Data Word (low dWord) */ +#define B3_RAM_DATA_HI 0x0188 /* 32 bit RAM Data Word (high dWord) */ + /* 0x018c - 0x018f: reserved */ + +/* RAM Interface Registers */ +/* + * The HW-Spec. calls this registers Timeout Value 0..11. But this names are + * not usable in SW. Please notice these are NOT real timeouts, these are + * the number of qWords transferred continuously. + */ +#define B3_RI_WTO_R1 0x0190 /* 8 bit WR Timeout Queue R1 (TO0) */ +#define B3_RI_WTO_XA1 0x0191 /* 8 bit WR Timeout Queue XA1 (TO1) */ +#define B3_RI_WTO_XS1 0x0192 /* 8 bit WR Timeout Queue XS1 (TO2) */ +#define B3_RI_RTO_R1 0x0193 /* 8 bit RD Timeout Queue R1 (TO3) */ +#define B3_RI_RTO_XA1 0x0194 /* 8 bit RD Timeout Queue XA1 (TO4) */ +#define B3_RI_RTO_XS1 0x0195 /* 8 bit RD Timeout Queue XS1 (TO5) */ +#define B3_RI_WTO_R2 0x0196 /* 8 bit WR Timeout Queue R2 (TO6) */ +#define B3_RI_WTO_XA2 0x0197 /* 8 bit WR Timeout Queue XA2 (TO7) */ +#define B3_RI_WTO_XS2 0x0198 /* 8 bit WR Timeout Queue XS2 (TO8) */ +#define B3_RI_RTO_R2 0x0199 /* 8 bit RD Timeout Queue R2 (TO9) */ +#define B3_RI_RTO_XA2 0x019a /* 8 bit RD Timeout Queue XA2 (TO10)*/ +#define B3_RI_RTO_XS2 0x019b /* 8 bit RD Timeout Queue XS2 (TO11)*/ +#define B3_RI_TO_VAL 0x019c /* 8 bit Current Timeout Count Val */ + /* 0x019d - 0x019f: reserved */ +#define B3_RI_CTRL 0x01a0 /* 16 bit RAM Interface Control Register */ +#define B3_RI_TEST 0x01a2 /* 8 bit RAM Interface Test Register */ + /* 0x01a3 - 0x01af: reserved */ + +/* MAC Arbiter Registers (GENESIS only) */ +/* these are the no. of qWord transferred continuously and NOT real timeouts */ +#define B3_MA_TOINI_RX1 0x01b0 /* 8 bit Timeout Init Val Rx Path MAC 1 */ +#define B3_MA_TOINI_RX2 0x01b1 /* 8 bit Timeout Init Val Rx Path MAC 2 */ +#define B3_MA_TOINI_TX1 0x01b2 /* 8 bit Timeout Init Val Tx Path MAC 1 */ +#define B3_MA_TOINI_TX2 0x01b3 /* 8 bit Timeout Init Val Tx Path MAC 2 */ +#define B3_MA_TOVAL_RX1 0x01b4 /* 8 bit Timeout Value Rx Path MAC 1 */ +#define B3_MA_TOVAL_RX2 0x01b5 /* 8 bit Timeout Value Rx Path MAC 1 */ +#define B3_MA_TOVAL_TX1 0x01b6 /* 8 bit Timeout Value Tx Path MAC 2 */ +#define B3_MA_TOVAL_TX2 0x01b7 /* 8 bit Timeout Value Tx Path MAC 2 */ +#define B3_MA_TO_CTRL 0x01b8 /* 16 bit MAC Arbiter Timeout Ctrl Reg */ +#define B3_MA_TO_TEST 0x01ba /* 16 bit MAC Arbiter Timeout Test Reg */ + /* 0x01bc - 0x01bf: reserved */ +#define B3_MA_RCINI_RX1 0x01c0 /* 8 bit Recovery Init Val Rx Path MAC 1 */ +#define B3_MA_RCINI_RX2 0x01c1 /* 8 bit Recovery Init Val Rx Path MAC 2 */ +#define B3_MA_RCINI_TX1 0x01c2 /* 8 bit Recovery Init Val Tx Path MAC 1 */ +#define B3_MA_RCINI_TX2 0x01c3 /* 8 bit Recovery Init Val Tx Path MAC 2 */ +#define B3_MA_RCVAL_RX1 0x01c4 /* 8 bit Recovery Value Rx Path MAC 1 */ +#define B3_MA_RCVAL_RX2 0x01c5 /* 8 bit Recovery Value Rx Path MAC 1 */ +#define B3_MA_RCVAL_TX1 0x01c6 /* 8 bit Recovery Value Tx Path MAC 2 */ +#define B3_MA_RCVAL_TX2 0x01c7 /* 8 bit Recovery Value Tx Path MAC 2 */ +#define B3_MA_RC_CTRL 0x01c8 /* 16 bit MAC Arbiter Recovery Ctrl Reg */ +#define B3_MA_RC_TEST 0x01ca /* 16 bit MAC Arbiter Recovery Test Reg */ + /* 0x01cc - 0x01cf: reserved */ + +/* Packet Arbiter Registers (GENESIS only) */ +/* these are real timeouts */ +#define B3_PA_TOINI_RX1 0x01d0 /* 16 bit Timeout Init Val Rx Path MAC 1 */ + /* 0x01d2 - 0x01d3: reserved */ +#define B3_PA_TOINI_RX2 0x01d4 /* 16 bit Timeout Init Val Rx Path MAC 2 */ + /* 0x01d6 - 0x01d7: reserved */ +#define B3_PA_TOINI_TX1 0x01d8 /* 16 bit Timeout Init Val Tx Path MAC 1 */ + /* 0x01da - 0x01db: reserved */ +#define B3_PA_TOINI_TX2 0x01dc /* 16 bit Timeout Init Val Tx Path MAC 2 */ + /* 0x01de - 0x01df: reserved */ +#define B3_PA_TOVAL_RX1 0x01e0 /* 16 bit Timeout Val Rx Path MAC 1 */ + /* 0x01e2 - 0x01e3: reserved */ +#define B3_PA_TOVAL_RX2 0x01e4 /* 16 bit Timeout Val Rx Path MAC 2 */ + /* 0x01e6 - 0x01e7: reserved */ +#define B3_PA_TOVAL_TX1 0x01e8 /* 16 bit Timeout Val Tx Path MAC 1 */ + /* 0x01ea - 0x01eb: reserved */ +#define B3_PA_TOVAL_TX2 0x01ec /* 16 bit Timeout Val Tx Path MAC 2 */ + /* 0x01ee - 0x01ef: reserved */ +#define B3_PA_CTRL 0x01f0 /* 16 bit Packet Arbiter Ctrl Register */ +#define B3_PA_TEST 0x01f2 /* 16 bit Packet Arbiter Test Register */ + /* 0x01f4 - 0x01ff: reserved */ + +/* + * Bank 4 - 5 + */ +/* Transmit Arbiter Registers MAC 1 and 2, use MR_ADDR() to access */ +#define TXA_ITI_INI 0x0200 /* 32 bit Tx Arb Interval Timer Init Val*/ +#define TXA_ITI_VAL 0x0204 /* 32 bit Tx Arb Interval Timer Value */ +#define TXA_LIM_INI 0x0208 /* 32 bit Tx Arb Limit Counter Init Val */ +#define TXA_LIM_VAL 0x020c /* 32 bit Tx Arb Limit Counter Value */ +#define TXA_CTRL 0x0210 /* 8 bit Tx Arbiter Control Register */ +#define TXA_TEST 0x0211 /* 8 bit Tx Arbiter Test Register */ +#define TXA_STAT 0x0212 /* 8 bit Tx Arbiter Status Register */ + /* 0x0213 - 0x027f: reserved */ + /* 0x0280 - 0x0292: MAC 2 */ + /* 0x0213 - 0x027f: reserved */ + +/* + * Bank 6 + */ +/* External registers (GENESIS only) */ +#define B6_EXT_REG 0x0300 + +/* + * Bank 7 + */ +/* This is a copy of the Configuration register file (lower half) */ +#define B7_CFG_SPC 0x0380 + +/* + * Bank 8 - 15 + */ +/* Receive and Transmit Queue Registers, use Q_ADDR() to access */ +#define B8_Q_REGS 0x0400 + +/* Queue Register Offsets, use Q_ADDR() to access */ +#define Q_D 0x00 /* 8*32 bit Current Descriptor */ +#define Q_DA_L 0x20 /* 32 bit Current Descriptor Address Low dWord */ +#define Q_DA_H 0x24 /* 32 bit Current Descriptor Address High dWord */ +#define Q_AC_L 0x28 /* 32 bit Current Address Counter Low dWord */ +#define Q_AC_H 0x2c /* 32 bit Current Address Counter High dWord */ +#define Q_BC 0x30 /* 32 bit Current Byte Counter */ +#define Q_CSR 0x34 /* 32 bit BMU Control/Status Register */ +#define Q_F 0x38 /* 32 bit Flag Register */ +#define Q_T1 0x3c /* 32 bit Test Register 1 */ +#define Q_T1_TR 0x3c /* 8 bit Test Register 1 Transfer SM */ +#define Q_T1_WR 0x3d /* 8 bit Test Register 1 Write Descriptor SM */ +#define Q_T1_RD 0x3e /* 8 bit Test Register 1 Read Descriptor SM */ +#define Q_T1_SV 0x3f /* 8 bit Test Register 1 Supervisor SM */ +#define Q_T2 0x40 /* 32 bit Test Register 2 */ +#define Q_T3 0x44 /* 32 bit Test Register 3 */ + /* 0x48 - 0x7f: reserved */ + +/* + * Bank 16 - 23 + */ +/* RAM Buffer Registers */ +#define B16_RAM_REGS 0x0800 + +/* RAM Buffer Register Offsets, use RB_ADDR() to access */ +#define RB_START 0x00 /* 32 bit RAM Buffer Start Address */ +#define RB_END 0x04 /* 32 bit RAM Buffer End Address */ +#define RB_WP 0x08 /* 32 bit RAM Buffer Write Pointer */ +#define RB_RP 0x0c /* 32 bit RAM Buffer Read Pointer */ +#define RB_RX_UTPP 0x10 /* 32 bit Rx Upper Threshold, Pause Pack */ +#define RB_RX_LTPP 0x14 /* 32 bit Rx Lower Threshold, Pause Pack */ +#define RB_RX_UTHP 0x18 /* 32 bit Rx Upper Threshold, High Prio */ +#define RB_RX_LTHP 0x1c /* 32 bit Rx Lower Threshold, High Prio */ + /* 0x10 - 0x1f: reserved at Tx RAM Buffer Registers */ +#define RB_PC 0x20 /* 32 bit RAM Buffer Packet Counter */ +#define RB_LEV 0x24 /* 32 bit RAM Buffer Level Register */ +#define RB_CTRL 0x28 /* 8 bit RAM Buffer Control Register */ +#define RB_TST1 0x29 /* 8 bit RAM Buffer Test Register 1 */ +#define RB_TST2 0x2A /* 8 bit RAM Buffer Test Register 2 */ + /* 0x2c - 0x7f: reserved */ + +/* + * Bank 24 + */ +/* + * Receive MAC FIFO, Receive LED, and Link_Sync regs (GENESIS only) + * use MR_ADDR() to access + */ +#define RX_MFF_EA 0x0c00 /* 32 bit Receive MAC FIFO End Address */ +#define RX_MFF_WP 0x0c04 /* 32 bit Receive MAC FIFO Write Pointer */ + /* 0x0c08 - 0x0c0b: reserved */ +#define RX_MFF_RP 0x0c0c /* 32 bit Receive MAC FIFO Read Pointer */ +#define RX_MFF_PC 0x0c10 /* 32 bit Receive MAC FIFO Packet Cnt */ +#define RX_MFF_LEV 0x0c14 /* 32 bit Receive MAC FIFO Level */ +#define RX_MFF_CTRL1 0x0c18 /* 16 bit Receive MAC FIFO Control Reg 1*/ +#define RX_MFF_STAT_TO 0x0c1a /* 8 bit Receive MAC Status Timeout */ +#define RX_MFF_TIST_TO 0x0c1b /* 8 bit Receive MAC Time Stamp Timeout */ +#define RX_MFF_CTRL2 0x0c1c /* 8 bit Receive MAC FIFO Control Reg 2*/ +#define RX_MFF_TST1 0x0c1d /* 8 bit Receive MAC FIFO Test Reg 1 */ +#define RX_MFF_TST2 0x0c1e /* 8 bit Receive MAC FIFO Test Reg 2 */ + /* 0x0c1f: reserved */ +#define RX_LED_INI 0x0c20 /* 32 bit Receive LED Cnt Init Value */ +#define RX_LED_VAL 0x0c24 /* 32 bit Receive LED Cnt Current Value */ +#define RX_LED_CTRL 0x0c28 /* 8 bit Receive LED Cnt Control Reg */ +#define RX_LED_TST 0x0c29 /* 8 bit Receive LED Cnt Test Register */ + /* 0x0c2a - 0x0c2f: reserved */ +#define LNK_SYNC_INI 0x0c30 /* 32 bit Link Sync Cnt Init Value */ +#define LNK_SYNC_VAL 0x0c34 /* 32 bit Link Sync Cnt Current Value */ +#define LNK_SYNC_CTRL 0x0c38 /* 8 bit Link Sync Cnt Control Register */ +#define LNK_SYNC_TST 0x0c39 /* 8 bit Link Sync Cnt Test Register */ + /* 0x0c3a - 0x0c3b: reserved */ +#define LNK_LED_REG 0x0c3c /* 8 bit Link LED Register */ + /* 0x0c3d - 0x0c3f: reserved */ + +/* Receive GMAC FIFO (YUKON only), use MR_ADDR() to access */ +#define RX_GMF_EA 0x0c40 /* 32 bit Rx GMAC FIFO End Address */ +#define RX_GMF_AF_THR 0x0c44 /* 32 bit Rx GMAC FIFO Almost Full Thresh. */ +#define RX_GMF_CTRL_T 0x0c48 /* 32 bit Rx GMAC FIFO Control/Test */ +#define RX_GMF_FL_MSK 0x0c4c /* 32 bit Rx GMAC FIFO Flush Mask */ +#define RX_GMF_FL_THR 0x0c50 /* 32 bit Rx GMAC FIFO Flush Threshold */ + /* 0x0c54 - 0x0c5f: reserved */ +#define RX_GMF_WP 0x0c60 /* 32 bit Rx GMAC FIFO Write Pointer */ + /* 0x0c64 - 0x0c67: reserved */ +#define RX_GMF_WLEV 0x0c68 /* 32 bit Rx GMAC FIFO Write Level */ + /* 0x0c6c - 0x0c6f: reserved */ +#define RX_GMF_RP 0x0c70 /* 32 bit Rx GMAC FIFO Read Pointer */ + /* 0x0c74 - 0x0c77: reserved */ +#define RX_GMF_RLEV 0x0c78 /* 32 bit Rx GMAC FIFO Read Level */ + /* 0x0c7c - 0x0c7f: reserved */ + +/* + * Bank 25 + */ + /* 0x0c80 - 0x0cbf: MAC 2 */ + /* 0x0cc0 - 0x0cff: reserved */ + +/* + * Bank 26 + */ +/* + * Transmit MAC FIFO and Transmit LED Registers (GENESIS only), + * use MR_ADDR() to access + */ +#define TX_MFF_EA 0x0d00 /* 32 bit Transmit MAC FIFO End Address */ +#define TX_MFF_WP 0x0d04 /* 32 bit Transmit MAC FIFO WR Pointer */ +#define TX_MFF_WSP 0x0d08 /* 32 bit Transmit MAC FIFO WR Shadow Ptr */ +#define TX_MFF_RP 0x0d0c /* 32 bit Transmit MAC FIFO RD Pointer */ +#define TX_MFF_PC 0x0d10 /* 32 bit Transmit MAC FIFO Packet Cnt */ +#define TX_MFF_LEV 0x0d14 /* 32 bit Transmit MAC FIFO Level */ +#define TX_MFF_CTRL1 0x0d18 /* 16 bit Transmit MAC FIFO Ctrl Reg 1 */ +#define TX_MFF_WAF 0x0d1a /* 8 bit Transmit MAC Wait after flush */ + /* 0x0c1b: reserved */ +#define TX_MFF_CTRL2 0x0d1c /* 8 bit Transmit MAC FIFO Ctrl Reg 2 */ +#define TX_MFF_TST1 0x0d1d /* 8 bit Transmit MAC FIFO Test Reg 1 */ +#define TX_MFF_TST2 0x0d1e /* 8 bit Transmit MAC FIFO Test Reg 2 */ + /* 0x0d1f: reserved */ +#define TX_LED_INI 0x0d20 /* 32 bit Transmit LED Cnt Init Value */ +#define TX_LED_VAL 0x0d24 /* 32 bit Transmit LED Cnt Current Val */ +#define TX_LED_CTRL 0x0d28 /* 8 bit Transmit LED Cnt Control Reg */ +#define TX_LED_TST 0x0d29 /* 8 bit Transmit LED Cnt Test Reg */ + /* 0x0d2a - 0x0d3f: reserved */ + +/* Transmit GMAC FIFO (YUKON only), use MR_ADDR() to access */ +#define TX_GMF_EA 0x0d40 /* 32 bit Tx GMAC FIFO End Address */ +#define TX_GMF_AE_THR 0x0d44 /* 32 bit Tx GMAC FIFO Almost Empty Thresh.*/ +#define TX_GMF_CTRL_T 0x0d48 /* 32 bit Tx GMAC FIFO Control/Test */ + /* 0x0d4c - 0x0d5f: reserved */ +#define TX_GMF_WP 0x0d60 /* 32 bit Tx GMAC FIFO Write Pointer */ +#define TX_GMF_WSP 0x0d64 /* 32 bit Tx GMAC FIFO Write Shadow Ptr. */ +#define TX_GMF_WLEV 0x0d68 /* 32 bit Tx GMAC FIFO Write Level */ + /* 0x0d6c - 0x0d6f: reserved */ +#define TX_GMF_RP 0x0d70 /* 32 bit Tx GMAC FIFO Read Pointer */ +#define TX_GMF_RSTP 0x0d74 /* 32 bit Tx GMAC FIFO Restart Pointer */ +#define TX_GMF_RLEV 0x0d78 /* 32 bit Tx GMAC FIFO Read Level */ + /* 0x0d7c - 0x0d7f: reserved */ + +/* + * Bank 27 + */ + /* 0x0d80 - 0x0dbf: MAC 2 */ + /* 0x0daa - 0x0dff: reserved */ + +/* + * Bank 28 + */ +/* Descriptor Poll Timer Registers */ +#define B28_DPT_INI 0x0e00 /* 24 bit Descriptor Poll Timer Init Val */ +#define B28_DPT_VAL 0x0e04 /* 24 bit Descriptor Poll Timer Curr Val */ +#define B28_DPT_CTRL 0x0e08 /* 8 bit Descriptor Poll Timer Ctrl Reg */ + /* 0x0e09: reserved */ +#define B28_DPT_TST 0x0e0a /* 8 bit Descriptor Poll Timer Test Reg */ + /* 0x0e0b: reserved */ + +/* Time Stamp Timer Registers (YUKON only) */ + /* 0x0e10: reserved */ +#define GMAC_TI_ST_VAL 0x0e14 /* 32 bit Time Stamp Timer Curr Val */ +#define GMAC_TI_ST_CTRL 0x0e18 /* 8 bit Time Stamp Timer Ctrl Reg */ + /* 0x0e19: reserved */ +#define GMAC_TI_ST_TST 0x0e1a /* 8 bit Time Stamp Timer Test Reg */ + /* 0x0e1b - 0x0e7f: reserved */ + +/* + * Bank 29 + */ + /* 0x0e80 - 0x0efc: reserved */ + +/* + * Bank 30 + */ +/* GMAC and GPHY Control Registers (YUKON only) */ +#define GMAC_CTRL 0x0f00 /* 32 bit GMAC Control Reg */ +#define GPHY_CTRL 0x0f04 /* 32 bit GPHY Control Reg */ +#define GMAC_IRQ_SRC 0x0f08 /* 8 bit GMAC Interrupt Source Reg */ + /* 0x0f09 - 0x0f0b: reserved */ +#define GMAC_IRQ_MSK 0x0f0c /* 8 bit GMAC Interrupt Mask Reg */ + /* 0x0f0d - 0x0f0f: reserved */ +#define GMAC_LINK_CTRL 0x0f10 /* 16 bit Link Control Reg */ + /* 0x0f14 - 0x0f1f: reserved */ + +/* Wake-up Frame Pattern Match Control Registers (YUKON only) */ + +#define WOL_REG_OFFS 0x20 /* HW-Bug: Address is + 0x20 against spec. */ + +#define WOL_CTRL_STAT 0x0f20 /* 16 bit WOL Control/Status Reg */ +#define WOL_MATCH_CTL 0x0f22 /* 8 bit WOL Match Control Reg */ +#define WOL_MATCH_RES 0x0f23 /* 8 bit WOL Match Result Reg */ +#define WOL_MAC_ADDR_LO 0x0f24 /* 32 bit WOL MAC Address Low */ +#define WOL_MAC_ADDR_HI 0x0f28 /* 16 bit WOL MAC Address High */ +#define WOL_PATT_RPTR 0x0f2c /* 8 bit WOL Pattern Read Ptr */ + +/* use this macro to access above registers */ +#define WOL_REG(Reg) ((Reg) + (pAC->GIni.GIWolOffs)) + + +/* WOL Pattern Length Registers (YUKON only) */ + +#define WOL_PATT_LEN_LO 0x0f30 /* 32 bit WOL Pattern Length 3..0 */ +#define WOL_PATT_LEN_HI 0x0f34 /* 24 bit WOL Pattern Length 6..4 */ + +/* WOL Pattern Counter Registers (YUKON only) */ + +#define WOL_PATT_CNT_0 0x0f38 /* 32 bit WOL Pattern Counter 3..0 */ +#define WOL_PATT_CNT_4 0x0f3c /* 24 bit WOL Pattern Counter 6..4 */ + /* 0x0f40 - 0x0f7f: reserved */ + +/* + * Bank 31 + */ +/* 0x0f80 - 0x0fff: reserved */ + +/* + * Bank 32 - 33 + */ +#define WOL_PATT_RAM_1 0x1000 /* WOL Pattern RAM Link 1 */ + +/* + * Bank 0x22 - 0x3f + */ +/* 0x1100 - 0x1fff: reserved */ + +/* + * Bank 0x40 - 0x4f + */ +#define BASE_XMAC_1 0x2000 /* XMAC 1 registers */ + +/* + * Bank 0x50 - 0x5f + */ + +#define BASE_GMAC_1 0x2800 /* GMAC 1 registers */ + +/* + * Bank 0x60 - 0x6f + */ +#define BASE_XMAC_2 0x3000 /* XMAC 2 registers */ + +/* + * Bank 0x70 - 0x7f + */ +#define BASE_GMAC_2 0x3800 /* GMAC 2 registers */ + +/* + * Control Register Bit Definitions: + */ +/* B0_RAP 8 bit Register Address Port */ + /* Bit 7: reserved */ +#define RAP_RAP 0x3f /* Bit 6..0: 0 = block 0,..,6f = block 6f */ + +/* B0_CTST 16 bit Control/Status register */ + /* Bit 15..14: reserved */ +#define CS_CLK_RUN_HOT BIT_13S /* CLK_RUN hot m. (YUKON-Lite only) */ +#define CS_CLK_RUN_RST BIT_12S /* CLK_RUN reset (YUKON-Lite only) */ +#define CS_CLK_RUN_ENA BIT_11S /* CLK_RUN enable (YUKON-Lite only) */ +#define CS_VAUX_AVAIL BIT_10S /* VAUX available (YUKON only) */ +#define CS_BUS_CLOCK BIT_9S /* Bus Clock 0/1 = 33/66 MHz */ +#define CS_BUS_SLOT_SZ BIT_8S /* Slot Size 0/1 = 32/64 bit slot */ +#define CS_ST_SW_IRQ BIT_7S /* Set IRQ SW Request */ +#define CS_CL_SW_IRQ BIT_6S /* Clear IRQ SW Request */ +#define CS_STOP_DONE BIT_5S /* Stop Master is finished */ +#define CS_STOP_MAST BIT_4S /* Command Bit to stop the master */ +#define CS_MRST_CLR BIT_3S /* Clear Master reset */ +#define CS_MRST_SET BIT_2S /* Set Master reset */ +#define CS_RST_CLR BIT_1S /* Clear Software reset */ +#define CS_RST_SET BIT_0S /* Set Software reset */ + +/* B0_LED 8 Bit LED register */ + /* Bit 7.. 2: reserved */ +#define LED_STAT_ON BIT_1S /* Status LED on */ +#define LED_STAT_OFF BIT_0S /* Status LED off */ + +/* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */ +#define PC_VAUX_ENA BIT_7 /* Switch VAUX Enable */ +#define PC_VAUX_DIS BIT_6 /* Switch VAUX Disable */ +#define PC_VCC_ENA BIT_5 /* Switch VCC Enable */ +#define PC_VCC_DIS BIT_4 /* Switch VCC Disable */ +#define PC_VAUX_ON BIT_3 /* Switch VAUX On */ +#define PC_VAUX_OFF BIT_2 /* Switch VAUX Off */ +#define PC_VCC_ON BIT_1 /* Switch VCC On */ +#define PC_VCC_OFF BIT_0 /* Switch VCC Off */ + +/* B0_ISRC 32 bit Interrupt Source Register */ +/* B0_IMSK 32 bit Interrupt Mask Register */ +/* B0_SP_ISRC 32 bit Special Interrupt Source Reg */ +/* B2_IRQM_MSK 32 bit IRQ Moderation Mask */ +#define IS_ALL_MSK 0xbfffffffUL /* All Interrupt bits */ +#define IS_HW_ERR BIT_31 /* Interrupt HW Error */ + /* Bit 30: reserved */ +#define IS_PA_TO_RX1 BIT_29 /* Packet Arb Timeout Rx1 */ +#define IS_PA_TO_RX2 BIT_28 /* Packet Arb Timeout Rx2 */ +#define IS_PA_TO_TX1 BIT_27 /* Packet Arb Timeout Tx1 */ +#define IS_PA_TO_TX2 BIT_26 /* Packet Arb Timeout Tx2 */ +#define IS_I2C_READY BIT_25 /* IRQ on end of I2C Tx */ +#define IS_IRQ_SW BIT_24 /* SW forced IRQ */ +#define IS_EXT_REG BIT_23 /* IRQ from LM80 or PHY (GENESIS only) */ + /* IRQ from PHY (YUKON only) */ +#define IS_TIMINT BIT_22 /* IRQ from Timer */ +#define IS_MAC1 BIT_21 /* IRQ from MAC 1 */ +#define IS_LNK_SYNC_M1 BIT_20 /* Link Sync Cnt wrap MAC 1 */ +#define IS_MAC2 BIT_19 /* IRQ from MAC 2 */ +#define IS_LNK_SYNC_M2 BIT_18 /* Link Sync Cnt wrap MAC 2 */ +/* Receive Queue 1 */ +#define IS_R1_B BIT_17 /* Q_R1 End of Buffer */ +#define IS_R1_F BIT_16 /* Q_R1 End of Frame */ +#define IS_R1_C BIT_15 /* Q_R1 Encoding Error */ +/* Receive Queue 2 */ +#define IS_R2_B BIT_14 /* Q_R2 End of Buffer */ +#define IS_R2_F BIT_13 /* Q_R2 End of Frame */ +#define IS_R2_C BIT_12 /* Q_R2 Encoding Error */ +/* Synchronous Transmit Queue 1 */ +#define IS_XS1_B BIT_11 /* Q_XS1 End of Buffer */ +#define IS_XS1_F BIT_10 /* Q_XS1 End of Frame */ +#define IS_XS1_C BIT_9 /* Q_XS1 Encoding Error */ +/* Asynchronous Transmit Queue 1 */ +#define IS_XA1_B BIT_8 /* Q_XA1 End of Buffer */ +#define IS_XA1_F BIT_7 /* Q_XA1 End of Frame */ +#define IS_XA1_C BIT_6 /* Q_XA1 Encoding Error */ +/* Synchronous Transmit Queue 2 */ +#define IS_XS2_B BIT_5 /* Q_XS2 End of Buffer */ +#define IS_XS2_F BIT_4 /* Q_XS2 End of Frame */ +#define IS_XS2_C BIT_3 /* Q_XS2 Encoding Error */ +/* Asynchronous Transmit Queue 2 */ +#define IS_XA2_B BIT_2 /* Q_XA2 End of Buffer */ +#define IS_XA2_F BIT_1 /* Q_XA2 End of Frame */ +#define IS_XA2_C BIT_0 /* Q_XA2 Encoding Error */ + + +/* B0_HWE_ISRC 32 bit HW Error Interrupt Src Reg */ +/* B0_HWE_IMSK 32 bit HW Error Interrupt Mask Reg */ +/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */ +#define IS_ERR_MSK 0x00000fffL /* All Error bits */ + /* Bit 31..14: reserved */ +#define IS_IRQ_TIST_OV BIT_13 /* Time Stamp Timer Overflow (YUKON only) */ +#define IS_IRQ_SENSOR BIT_12 /* IRQ from Sensor (YUKON only) */ +#define IS_IRQ_MST_ERR BIT_11 /* IRQ master error detected */ +#define IS_IRQ_STAT BIT_10 /* IRQ status exception */ +#define IS_NO_STAT_M1 BIT_9 /* No Rx Status from MAC 1 */ +#define IS_NO_STAT_M2 BIT_8 /* No Rx Status from MAC 2 */ +#define IS_NO_TIST_M1 BIT_7 /* No Time Stamp from MAC 1 */ +#define IS_NO_TIST_M2 BIT_6 /* No Time Stamp from MAC 2 */ +#define IS_RAM_RD_PAR BIT_5 /* RAM Read Parity Error */ +#define IS_RAM_WR_PAR BIT_4 /* RAM Write Parity Error */ +#define IS_M1_PAR_ERR BIT_3 /* MAC 1 Parity Error */ +#define IS_M2_PAR_ERR BIT_2 /* MAC 2 Parity Error */ +#define IS_R1_PAR_ERR BIT_1 /* Queue R1 Parity Error */ +#define IS_R2_PAR_ERR BIT_0 /* Queue R2 Parity Error */ + +/* B2_CONN_TYP 8 bit Connector type */ +/* B2_PMD_TYP 8 bit PMD type */ +/* Values of connector and PMD type comply to SysKonnect internal std */ + +/* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */ +#define CFG_CHIP_R_MSK (0xf<<4) /* Bit 7.. 4: Chip Revision */ + /* Bit 3.. 2: reserved */ +#define CFG_DIS_M2_CLK BIT_1S /* Disable Clock for 2nd MAC */ +#define CFG_SNG_MAC BIT_0S /* MAC Config: 0=2 MACs / 1=1 MAC*/ + +/* B2_CHIP_ID 8 bit Chip Identification Number */ +#define CHIP_ID_GENESIS 0x0a /* Chip ID for GENESIS */ +#define CHIP_ID_YUKON 0xb0 /* Chip ID for YUKON */ +#define CHIP_ID_YUKON_LITE 0xb1 /* Chip ID for YUKON-Lite (Rev. A1-A3) */ +#define CHIP_ID_YUKON_LP 0xb2 /* Chip ID for YUKON-LP */ + +#define CHIP_REV_YU_LITE_A1 3 /* Chip Rev. for YUKON-Lite A1,A2 */ +#define CHIP_REV_YU_LITE_A3 7 /* Chip Rev. for YUKON-Lite A3 */ + +/* B2_FAR 32 bit Flash-Prom Addr Reg/Cnt */ +#define FAR_ADDR 0x1ffffL /* Bit 16.. 0: FPROM Address mask */ + +/* B2_LD_CTRL 8 bit EPROM loader control register */ +/* Bits are currently reserved */ + +/* B2_LD_TEST 8 bit EPROM loader test register */ + /* Bit 7.. 4: reserved */ +#define LD_T_ON BIT_3S /* Loader Test mode on */ +#define LD_T_OFF BIT_2S /* Loader Test mode off */ +#define LD_T_STEP BIT_1S /* Decrement FPROM addr. Counter */ +#define LD_START BIT_0S /* Start loading FPROM */ + +/* + * Timer Section + */ +/* B2_TI_CTRL 8 bit Timer control */ +/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */ + /* Bit 7.. 3: reserved */ +#define TIM_START BIT_2S /* Start Timer */ +#define TIM_STOP BIT_1S /* Stop Timer */ +#define TIM_CLR_IRQ BIT_0S /* Clear Timer IRQ (!IRQM) */ + +/* B2_TI_TEST 8 Bit Timer Test */ +/* B2_IRQM_TEST 8 bit IRQ Moderation Timer Test */ +/* B28_DPT_TST 8 bit Descriptor Poll Timer Test Reg */ + /* Bit 7.. 3: reserved */ +#define TIM_T_ON BIT_2S /* Test mode on */ +#define TIM_T_OFF BIT_1S /* Test mode off */ +#define TIM_T_STEP BIT_0S /* Test step */ + +/* B28_DPT_INI 32 bit Descriptor Poll Timer Init Val */ +/* B28_DPT_VAL 32 bit Descriptor Poll Timer Curr Val */ + /* Bit 31..24: reserved */ +#define DPT_MSK 0x00ffffffL /* Bit 23.. 0: Desc Poll Timer Bits */ + +/* B28_DPT_CTRL 8 bit Descriptor Poll Timer Ctrl Reg */ + /* Bit 7.. 2: reserved */ +#define DPT_START BIT_1S /* Start Descriptor Poll Timer */ +#define DPT_STOP BIT_0S /* Stop Descriptor Poll Timer */ + +/* B2_E_3 8 bit lower 4 bits used for HW self test result */ +#define B2_E3_RES_MASK 0x0f + +/* B2_TST_CTRL1 8 bit Test Control Register 1 */ +#define TST_FRC_DPERR_MR BIT_7S /* force DATAPERR on MST RD */ +#define TST_FRC_DPERR_MW BIT_6S /* force DATAPERR on MST WR */ +#define TST_FRC_DPERR_TR BIT_5S /* force DATAPERR on TRG RD */ +#define TST_FRC_DPERR_TW BIT_4S /* force DATAPERR on TRG WR */ +#define TST_FRC_APERR_M BIT_3S /* force ADDRPERR on MST */ +#define TST_FRC_APERR_T BIT_2S /* force ADDRPERR on TRG */ +#define TST_CFG_WRITE_ON BIT_1S /* Enable Config Reg WR */ +#define TST_CFG_WRITE_OFF BIT_0S /* Disable Config Reg WR */ + +/* B2_TST_CTRL2 8 bit Test Control Register 2 */ + /* Bit 7.. 4: reserved */ + /* force the following error on the next master read/write */ +#define TST_FRC_DPERR_MR64 BIT_3S /* DataPERR RD 64 */ +#define TST_FRC_DPERR_MW64 BIT_2S /* DataPERR WR 64 */ +#define TST_FRC_APERR_1M64 BIT_1S /* AddrPERR on 1. phase */ +#define TST_FRC_APERR_2M64 BIT_0S /* AddrPERR on 2. phase */ + +/* B2_GP_IO 32 bit General Purpose I/O Register */ + /* Bit 31..26: reserved */ +#define GP_DIR_9 BIT_25 /* IO_9 direct, 0=In/1=Out */ +#define GP_DIR_8 BIT_24 /* IO_8 direct, 0=In/1=Out */ +#define GP_DIR_7 BIT_23 /* IO_7 direct, 0=In/1=Out */ +#define GP_DIR_6 BIT_22 /* IO_6 direct, 0=In/1=Out */ +#define GP_DIR_5 BIT_21 /* IO_5 direct, 0=In/1=Out */ +#define GP_DIR_4 BIT_20 /* IO_4 direct, 0=In/1=Out */ +#define GP_DIR_3 BIT_19 /* IO_3 direct, 0=In/1=Out */ +#define GP_DIR_2 BIT_18 /* IO_2 direct, 0=In/1=Out */ +#define GP_DIR_1 BIT_17 /* IO_1 direct, 0=In/1=Out */ +#define GP_DIR_0 BIT_16 /* IO_0 direct, 0=In/1=Out */ + /* Bit 15..10: reserved */ +#define GP_IO_9 BIT_9 /* IO_9 pin */ +#define GP_IO_8 BIT_8 /* IO_8 pin */ +#define GP_IO_7 BIT_7 /* IO_7 pin */ +#define GP_IO_6 BIT_6 /* IO_6 pin */ +#define GP_IO_5 BIT_5 /* IO_5 pin */ +#define GP_IO_4 BIT_4 /* IO_4 pin */ +#define GP_IO_3 BIT_3 /* IO_3 pin */ +#define GP_IO_2 BIT_2 /* IO_2 pin */ +#define GP_IO_1 BIT_1 /* IO_1 pin */ +#define GP_IO_0 BIT_0 /* IO_0 pin */ + +/* B2_I2C_CTRL 32 bit I2C HW Control Register */ +#define I2C_FLAG BIT_31 /* Start read/write if WR */ +#define I2C_ADDR (0x7fffL<<16) /* Bit 30..16: Addr to be RD/WR */ +#define I2C_DEV_SEL (0x7fL<<9) /* Bit 15.. 9: I2C Device Select */ + /* Bit 8.. 5: reserved */ +#define I2C_BURST_LEN BIT_4 /* Burst Len, 1/4 bytes */ +#define I2C_DEV_SIZE (7<<1) /* Bit 3.. 1: I2C Device Size */ +#define I2C_025K_DEV (0<<1) /* 0: 256 Bytes or smal. */ +#define I2C_05K_DEV (1<<1) /* 1: 512 Bytes */ +#define I2C_1K_DEV (2<<1) /* 2: 1024 Bytes */ +#define I2C_2K_DEV (3<<1) /* 3: 2048 Bytes */ +#define I2C_4K_DEV (4<<1) /* 4: 4096 Bytes */ +#define I2C_8K_DEV (5<<1) /* 5: 8192 Bytes */ +#define I2C_16K_DEV (6<<1) /* 6: 16384 Bytes */ +#define I2C_32K_DEV (7<<1) /* 7: 32768 Bytes */ +#define I2C_STOP BIT_0 /* Interrupt I2C transfer */ + +/* B2_I2C_IRQ 32 bit I2C HW IRQ Register */ + /* Bit 31.. 1 reserved */ +#define I2C_CLR_IRQ BIT_0 /* Clear I2C IRQ */ + +/* B2_I2C_SW 32 bit (8 bit access) I2C HW SW Port Register */ + /* Bit 7.. 3: reserved */ +#define I2C_DATA_DIR BIT_2S /* direction of I2C_DATA */ +#define I2C_DATA BIT_1S /* I2C Data Port */ +#define I2C_CLK BIT_0S /* I2C Clock Port */ + +/* + * I2C Address + */ +#define I2C_SENS_ADDR LM80_ADDR /* I2C Sensor Address, (Volt and Temp)*/ + + +/* B2_BSC_CTRL 8 bit Blink Source Counter Control */ + /* Bit 7.. 2: reserved */ +#define BSC_START BIT_1S /* Start Blink Source Counter */ +#define BSC_STOP BIT_0S /* Stop Blink Source Counter */ + +/* B2_BSC_STAT 8 bit Blink Source Counter Status */ + /* Bit 7.. 1: reserved */ +#define BSC_SRC BIT_0S /* Blink Source, 0=Off / 1=On */ + +/* B2_BSC_TST 16 bit Blink Source Counter Test Reg */ +#define BSC_T_ON BIT_2S /* Test mode on */ +#define BSC_T_OFF BIT_1S /* Test mode off */ +#define BSC_T_STEP BIT_0S /* Test step */ + + +/* B3_RAM_ADDR 32 bit RAM Address, to read or write */ + /* Bit 31..19: reserved */ +#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */ + +/* RAM Interface Registers */ +/* B3_RI_CTRL 16 bit RAM Iface Control Register */ + /* Bit 15..10: reserved */ +#define RI_CLR_RD_PERR BIT_9S /* Clear IRQ RAM Read Parity Err */ +#define RI_CLR_WR_PERR BIT_8S /* Clear IRQ RAM Write Parity Err*/ + /* Bit 7.. 2: reserved */ +#define RI_RST_CLR BIT_1S /* Clear RAM Interface Reset */ +#define RI_RST_SET BIT_0S /* Set RAM Interface Reset */ + +/* B3_RI_TEST 8 bit RAM Iface Test Register */ + /* Bit 15.. 4: reserved */ +#define RI_T_EV BIT_3S /* Timeout Event occured */ +#define RI_T_ON BIT_2S /* Timeout Timer Test On */ +#define RI_T_OFF BIT_1S /* Timeout Timer Test Off */ +#define RI_T_STEP BIT_0S /* Timeout Timer Step */ + +/* MAC Arbiter Registers */ +/* B3_MA_TO_CTRL 16 bit MAC Arbiter Timeout Ctrl Reg */ + /* Bit 15.. 4: reserved */ +#define MA_FOE_ON BIT_3S /* XMAC Fast Output Enable ON */ +#define MA_FOE_OFF BIT_2S /* XMAC Fast Output Enable OFF */ +#define MA_RST_CLR BIT_1S /* Clear MAC Arbiter Reset */ +#define MA_RST_SET BIT_0S /* Set MAC Arbiter Reset */ + +/* B3_MA_RC_CTRL 16 bit MAC Arbiter Recovery Ctrl Reg */ + /* Bit 15.. 8: reserved */ +#define MA_ENA_REC_TX2 BIT_7S /* Enable Recovery Timer TX2 */ +#define MA_DIS_REC_TX2 BIT_6S /* Disable Recovery Timer TX2 */ +#define MA_ENA_REC_TX1 BIT_5S /* Enable Recovery Timer TX1 */ +#define MA_DIS_REC_TX1 BIT_4S /* Disable Recovery Timer TX1 */ +#define MA_ENA_REC_RX2 BIT_3S /* Enable Recovery Timer RX2 */ +#define MA_DIS_REC_RX2 BIT_2S /* Disable Recovery Timer RX2 */ +#define MA_ENA_REC_RX1 BIT_1S /* Enable Recovery Timer RX1 */ +#define MA_DIS_REC_RX1 BIT_0S /* Disable Recovery Timer RX1 */ + +/* Packet Arbiter Registers */ +/* B3_PA_CTRL 16 bit Packet Arbiter Ctrl Register */ + /* Bit 15..14: reserved */ +#define PA_CLR_TO_TX2 BIT_13S /* Clear IRQ Packet Timeout TX2 */ +#define PA_CLR_TO_TX1 BIT_12S /* Clear IRQ Packet Timeout TX1 */ +#define PA_CLR_TO_RX2 BIT_11S /* Clear IRQ Packet Timeout RX2 */ +#define PA_CLR_TO_RX1 BIT_10S /* Clear IRQ Packet Timeout RX1 */ +#define PA_ENA_TO_TX2 BIT_9S /* Enable Timeout Timer TX2 */ +#define PA_DIS_TO_TX2 BIT_8S /* Disable Timeout Timer TX2 */ +#define PA_ENA_TO_TX1 BIT_7S /* Enable Timeout Timer TX1 */ +#define PA_DIS_TO_TX1 BIT_6S /* Disable Timeout Timer TX1 */ +#define PA_ENA_TO_RX2 BIT_5S /* Enable Timeout Timer RX2 */ +#define PA_DIS_TO_RX2 BIT_4S /* Disable Timeout Timer RX2 */ +#define PA_ENA_TO_RX1 BIT_3S /* Enable Timeout Timer RX1 */ +#define PA_DIS_TO_RX1 BIT_2S /* Disable Timeout Timer RX1 */ +#define PA_RST_CLR BIT_1S /* Clear MAC Arbiter Reset */ +#define PA_RST_SET BIT_0S /* Set MAC Arbiter Reset */ + +#define PA_ENA_TO_ALL (PA_ENA_TO_RX1 | PA_ENA_TO_RX2 |\ + PA_ENA_TO_TX1 | PA_ENA_TO_TX2) + +/* Rx/Tx Path related Arbiter Test Registers */ +/* B3_MA_TO_TEST 16 bit MAC Arbiter Timeout Test Reg */ +/* B3_MA_RC_TEST 16 bit MAC Arbiter Recovery Test Reg */ +/* B3_PA_TEST 16 bit Packet Arbiter Test Register */ +/* Bit 15, 11, 7, and 3 are reserved in B3_PA_TEST */ +#define TX2_T_EV BIT_15S /* TX2 Timeout/Recv Event occured */ +#define TX2_T_ON BIT_14S /* TX2 Timeout/Recv Timer Test On */ +#define TX2_T_OFF BIT_13S /* TX2 Timeout/Recv Timer Tst Off */ +#define TX2_T_STEP BIT_12S /* TX2 Timeout/Recv Timer Step */ +#define TX1_T_EV BIT_11S /* TX1 Timeout/Recv Event occured */ +#define TX1_T_ON BIT_10S /* TX1 Timeout/Recv Timer Test On */ +#define TX1_T_OFF BIT_9S /* TX1 Timeout/Recv Timer Tst Off */ +#define TX1_T_STEP BIT_8S /* TX1 Timeout/Recv Timer Step */ +#define RX2_T_EV BIT_7S /* RX2 Timeout/Recv Event occured */ +#define RX2_T_ON BIT_6S /* RX2 Timeout/Recv Timer Test On */ +#define RX2_T_OFF BIT_5S /* RX2 Timeout/Recv Timer Tst Off */ +#define RX2_T_STEP BIT_4S /* RX2 Timeout/Recv Timer Step */ +#define RX1_T_EV BIT_3S /* RX1 Timeout/Recv Event occured */ +#define RX1_T_ON BIT_2S /* RX1 Timeout/Recv Timer Test On */ +#define RX1_T_OFF BIT_1S /* RX1 Timeout/Recv Timer Tst Off */ +#define RX1_T_STEP BIT_0S /* RX1 Timeout/Recv Timer Step */ + + +/* Transmit Arbiter Registers MAC 1 and 2, use MR_ADDR() to access */ +/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */ +/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */ +/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */ +/* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */ + /* Bit 31..24: reserved */ +#define TXA_MAX_VAL 0x00ffffffUL/* Bit 23.. 0: Max TXA Timer/Cnt Val */ + +/* TXA_CTRL 8 bit Tx Arbiter Control Register */ +#define TXA_ENA_FSYNC BIT_7S /* Enable force of sync Tx queue */ +#define TXA_DIS_FSYNC BIT_6S /* Disable force of sync Tx queue */ +#define TXA_ENA_ALLOC BIT_5S /* Enable alloc of free bandwidth */ +#define TXA_DIS_ALLOC BIT_4S /* Disable alloc of free bandwidth */ +#define TXA_START_RC BIT_3S /* Start sync Rate Control */ +#define TXA_STOP_RC BIT_2S /* Stop sync Rate Control */ +#define TXA_ENA_ARB BIT_1S /* Enable Tx Arbiter */ +#define TXA_DIS_ARB BIT_0S /* Disable Tx Arbiter */ + +/* TXA_TEST 8 bit Tx Arbiter Test Register */ + /* Bit 7.. 6: reserved */ +#define TXA_INT_T_ON BIT_5S /* Tx Arb Interval Timer Test On */ +#define TXA_INT_T_OFF BIT_4S /* Tx Arb Interval Timer Test Off */ +#define TXA_INT_T_STEP BIT_3S /* Tx Arb Interval Timer Step */ +#define TXA_LIM_T_ON BIT_2S /* Tx Arb Limit Timer Test On */ +#define TXA_LIM_T_OFF BIT_1S /* Tx Arb Limit Timer Test Off */ +#define TXA_LIM_T_STEP BIT_0S /* Tx Arb Limit Timer Step */ + +/* TXA_STAT 8 bit Tx Arbiter Status Register */ + /* Bit 7.. 1: reserved */ +#define TXA_PRIO_XS BIT_0S /* sync queue has prio to send */ + +/* Q_BC 32 bit Current Byte Counter */ + /* Bit 31..16: reserved */ +#define BC_MAX 0xffff /* Bit 15.. 0: Byte counter */ + +/* BMU Control Status Registers */ +/* B0_R1_CSR 32 bit BMU Ctrl/Stat Rx Queue 1 */ +/* B0_R2_CSR 32 bit BMU Ctrl/Stat Rx Queue 2 */ +/* B0_XA1_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */ +/* B0_XS1_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 1 */ +/* B0_XA2_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */ +/* B0_XS2_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 2 */ +/* Q_CSR 32 bit BMU Control/Status Register */ + /* Bit 31..25: reserved */ +#define CSR_SV_IDLE BIT_24 /* BMU SM Idle */ + /* Bit 23..22: reserved */ +#define CSR_DESC_CLR BIT_21 /* Clear Reset for Descr */ +#define CSR_DESC_SET BIT_20 /* Set Reset for Descr */ +#define CSR_FIFO_CLR BIT_19 /* Clear Reset for FIFO */ +#define CSR_FIFO_SET BIT_18 /* Set Reset for FIFO */ +#define CSR_HPI_RUN BIT_17 /* Release HPI SM */ +#define CSR_HPI_RST BIT_16 /* Reset HPI SM to Idle */ +#define CSR_SV_RUN BIT_15 /* Release Supervisor SM */ +#define CSR_SV_RST BIT_14 /* Reset Supervisor SM */ +#define CSR_DREAD_RUN BIT_13 /* Release Descr Read SM */ +#define CSR_DREAD_RST BIT_12 /* Reset Descr Read SM */ +#define CSR_DWRITE_RUN BIT_11 /* Release Descr Write SM */ +#define CSR_DWRITE_RST BIT_10 /* Reset Descr Write SM */ +#define CSR_TRANS_RUN BIT_9 /* Release Transfer SM */ +#define CSR_TRANS_RST BIT_8 /* Reset Transfer SM */ +#define CSR_ENA_POL BIT_7 /* Enable Descr Polling */ +#define CSR_DIS_POL BIT_6 /* Disable Descr Polling */ +#define CSR_STOP BIT_5 /* Stop Rx/Tx Queue */ +#define CSR_START BIT_4 /* Start Rx/Tx Queue */ +#define CSR_IRQ_CL_P BIT_3 /* (Rx) Clear Parity IRQ */ +#define CSR_IRQ_CL_B BIT_2 /* Clear EOB IRQ */ +#define CSR_IRQ_CL_F BIT_1 /* Clear EOF IRQ */ +#define CSR_IRQ_CL_C BIT_0 /* Clear ERR IRQ */ + +#define CSR_SET_RESET (CSR_DESC_SET | CSR_FIFO_SET | CSR_HPI_RST |\ + CSR_SV_RST | CSR_DREAD_RST | CSR_DWRITE_RST |\ + CSR_TRANS_RST) +#define CSR_CLR_RESET (CSR_DESC_CLR | CSR_FIFO_CLR | CSR_HPI_RUN |\ + CSR_SV_RUN | CSR_DREAD_RUN | CSR_DWRITE_RUN |\ + CSR_TRANS_RUN) + +/* Q_F 32 bit Flag Register */ + /* Bit 31..28: reserved */ +#define F_ALM_FULL BIT_27 /* Rx FIFO: almost full */ +#define F_EMPTY BIT_27 /* Tx FIFO: empty flag */ +#define F_FIFO_EOF BIT_26 /* Tag (EOF Flag) bit in FIFO */ +#define F_WM_REACHED BIT_25 /* Watermark reached */ + /* reserved */ +#define F_FIFO_LEVEL (0x1fL<<16) /* Bit 23..16: # of Qwords in FIFO */ + /* Bit 15..11: reserved */ +#define F_WATER_MARK 0x0007ffL /* Bit 10.. 0: Watermark */ + +/* Q_T1 32 bit Test Register 1 */ +/* Holds four State Machine control Bytes */ +#define SM_CTRL_SV_MSK (0xffL<<24) /* Bit 31..24: Control Supervisor SM */ +#define SM_CTRL_RD_MSK (0xffL<<16) /* Bit 23..16: Control Read Desc SM */ +#define SM_CTRL_WR_MSK (0xffL<<8) /* Bit 15.. 8: Control Write Desc SM */ +#define SM_CTRL_TR_MSK 0xffL /* Bit 7.. 0: Control Transfer SM */ + +/* Q_T1_TR 8 bit Test Register 1 Transfer SM */ +/* Q_T1_WR 8 bit Test Register 1 Write Descriptor SM */ +/* Q_T1_RD 8 bit Test Register 1 Read Descriptor SM */ +/* Q_T1_SV 8 bit Test Register 1 Supervisor SM */ + +/* The control status byte of each machine looks like ... */ +#define SM_STATE 0xf0 /* Bit 7.. 4: State which shall be loaded */ +#define SM_LOAD BIT_3S /* Load the SM with SM_STATE */ +#define SM_TEST_ON BIT_2S /* Switch on SM Test Mode */ +#define SM_TEST_OFF BIT_1S /* Go off the Test Mode */ +#define SM_STEP BIT_0S /* Step the State Machine */ +/* The encoding of the states is not supported by the Diagnostics Tool */ + +/* Q_T2 32 bit Test Register 2 */ + /* Bit 31.. 8: reserved */ +#define T2_AC_T_ON BIT_7 /* Address Counter Test Mode on */ +#define T2_AC_T_OFF BIT_6 /* Address Counter Test Mode off */ +#define T2_BC_T_ON BIT_5 /* Byte Counter Test Mode on */ +#define T2_BC_T_OFF BIT_4 /* Byte Counter Test Mode off */ +#define T2_STEP04 BIT_3 /* Inc AC/Dec BC by 4 */ +#define T2_STEP03 BIT_2 /* Inc AC/Dec BC by 3 */ +#define T2_STEP02 BIT_1 /* Inc AC/Dec BC by 2 */ +#define T2_STEP01 BIT_0 /* Inc AC/Dec BC by 1 */ + +/* Q_T3 32 bit Test Register 3 */ + /* Bit 31.. 7: reserved */ +#define T3_MUX_MSK (7<<4) /* Bit 6.. 4: Mux Position */ + /* Bit 3: reserved */ +#define T3_VRAM_MSK 7 /* Bit 2.. 0: Virtual RAM Buffer Address */ + +/* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */ +/* RB_START 32 bit RAM Buffer Start Address */ +/* RB_END 32 bit RAM Buffer End Address */ +/* RB_WP 32 bit RAM Buffer Write Pointer */ +/* RB_RP 32 bit RAM Buffer Read Pointer */ +/* RB_RX_UTPP 32 bit Rx Upper Threshold, Pause Pack */ +/* RB_RX_LTPP 32 bit Rx Lower Threshold, Pause Pack */ +/* RB_RX_UTHP 32 bit Rx Upper Threshold, High Prio */ +/* RB_RX_LTHP 32 bit Rx Lower Threshold, High Prio */ +/* RB_PC 32 bit RAM Buffer Packet Counter */ +/* RB_LEV 32 bit RAM Buffer Level Register */ + /* Bit 31..19: reserved */ +#define RB_MSK 0x0007ffff /* Bit 18.. 0: RAM Buffer Pointer Bits */ + +/* RB_TST2 8 bit RAM Buffer Test Register 2 */ + /* Bit 7.. 4: reserved */ +#define RB_PC_DEC BIT_3S /* Packet Counter Decrem */ +#define RB_PC_T_ON BIT_2S /* Packet Counter Test On */ +#define RB_PC_T_OFF BIT_1S /* Packet Counter Tst Off */ +#define RB_PC_INC BIT_0S /* Packet Counter Increm */ + +/* RB_TST1 8 bit RAM Buffer Test Register 1 */ + /* Bit 7: reserved */ +#define RB_WP_T_ON BIT_6S /* Write Pointer Test On */ +#define RB_WP_T_OFF BIT_5S /* Write Pointer Test Off */ +#define RB_WP_INC BIT_4S /* Write Pointer Increm */ + /* Bit 3: reserved */ +#define RB_RP_T_ON BIT_2S /* Read Pointer Test On */ +#define RB_RP_T_OFF BIT_1S /* Read Pointer Test Off */ +#define RB_RP_DEC BIT_0S /* Read Pointer Decrement */ + +/* RB_CTRL 8 bit RAM Buffer Control Register */ + /* Bit 7.. 6: reserved */ +#define RB_ENA_STFWD BIT_5S /* Enable Store & Forward */ +#define RB_DIS_STFWD BIT_4S /* Disable Store & Forward */ +#define RB_ENA_OP_MD BIT_3S /* Enable Operation Mode */ +#define RB_DIS_OP_MD BIT_2S /* Disable Operation Mode */ +#define RB_RST_CLR BIT_1S /* Clear RAM Buf STM Reset */ +#define RB_RST_SET BIT_0S /* Set RAM Buf STM Reset */ + + +/* Receive and Transmit MAC FIFO Registers (GENESIS only) */ + +/* RX_MFF_EA 32 bit Receive MAC FIFO End Address */ +/* RX_MFF_WP 32 bit Receive MAC FIFO Write Pointer */ +/* RX_MFF_RP 32 bit Receive MAC FIFO Read Pointer */ +/* RX_MFF_PC 32 bit Receive MAC FIFO Packet Counter */ +/* RX_MFF_LEV 32 bit Receive MAC FIFO Level */ +/* TX_MFF_EA 32 bit Transmit MAC FIFO End Address */ +/* TX_MFF_WP 32 bit Transmit MAC FIFO Write Pointer */ +/* TX_MFF_WSP 32 bit Transmit MAC FIFO WR Shadow Pointer */ +/* TX_MFF_RP 32 bit Transmit MAC FIFO Read Pointer */ +/* TX_MFF_PC 32 bit Transmit MAC FIFO Packet Cnt */ +/* TX_MFF_LEV 32 bit Transmit MAC FIFO Level */ + /* Bit 31.. 6: reserved */ +#define MFF_MSK 0x007fL /* Bit 5.. 0: MAC FIFO Address/Ptr Bits */ + +/* RX_MFF_CTRL1 16 bit Receive MAC FIFO Control Reg 1 */ + /* Bit 15..14: reserved */ +#define MFF_ENA_RDY_PAT BIT_13S /* Enable Ready Patch */ +#define MFF_DIS_RDY_PAT BIT_12S /* Disable Ready Patch */ +#define MFF_ENA_TIM_PAT BIT_11S /* Enable Timing Patch */ +#define MFF_DIS_TIM_PAT BIT_10S /* Disable Timing Patch */ +#define MFF_ENA_ALM_FUL BIT_9S /* Enable AlmostFull Sign */ +#define MFF_DIS_ALM_FUL BIT_8S /* Disable AlmostFull Sign */ +#define MFF_ENA_PAUSE BIT_7S /* Enable Pause Signaling */ +#define MFF_DIS_PAUSE BIT_6S /* Disable Pause Signaling */ +#define MFF_ENA_FLUSH BIT_5S /* Enable Frame Flushing */ +#define MFF_DIS_FLUSH BIT_4S /* Disable Frame Flushing */ +#define MFF_ENA_TIST BIT_3S /* Enable Time Stamp Gener */ +#define MFF_DIS_TIST BIT_2S /* Disable Time Stamp Gener */ +#define MFF_CLR_INTIST BIT_1S /* Clear IRQ No Time Stamp */ +#define MFF_CLR_INSTAT BIT_0S /* Clear IRQ No Status */ + +#define MFF_RX_CTRL_DEF MFF_ENA_TIM_PAT + +/* TX_MFF_CTRL1 16 bit Transmit MAC FIFO Control Reg 1 */ +#define MFF_CLR_PERR BIT_15S /* Clear Parity Error IRQ */ + /* Bit 14: reserved */ +#define MFF_ENA_PKT_REC BIT_13S /* Enable Packet Recovery */ +#define MFF_DIS_PKT_REC BIT_12S /* Disable Packet Recovery */ +/* MFF_ENA_TIM_PAT (see RX_MFF_CTRL1) Bit 11: Enable Timing Patch */ +/* MFF_DIS_TIM_PAT (see RX_MFF_CTRL1) Bit 10: Disable Timing Patch */ +/* MFF_ENA_ALM_FUL (see RX_MFF_CTRL1) Bit 9: Enable Almost Full Sign */ +/* MFF_DIS_ALM_FUL (see RX_MFF_CTRL1) Bit 8: Disable Almost Full Sign */ +#define MFF_ENA_W4E BIT_7S /* Enable Wait for Empty */ +#define MFF_DIS_W4E BIT_6S /* Disable Wait for Empty */ +/* MFF_ENA_FLUSH (see RX_MFF_CTRL1) Bit 5: Enable Frame Flushing */ +/* MFF_DIS_FLUSH (see RX_MFF_CTRL1) Bit 4: Disable Frame Flushing */ +#define MFF_ENA_LOOPB BIT_3S /* Enable Loopback */ +#define MFF_DIS_LOOPB BIT_2S /* Disable Loopback */ +#define MFF_CLR_MAC_RST BIT_1S /* Clear XMAC Reset */ +#define MFF_SET_MAC_RST BIT_0S /* Set XMAC Reset */ + +#define MFF_TX_CTRL_DEF (MFF_ENA_PKT_REC | MFF_ENA_TIM_PAT | MFF_ENA_FLUSH) + +/* RX_MFF_TST2 8 bit Receive MAC FIFO Test Register 2 */ +/* TX_MFF_TST2 8 bit Transmit MAC FIFO Test Register 2 */ + /* Bit 7: reserved */ +#define MFF_WSP_T_ON BIT_6S /* Tx: Write Shadow Ptr TestOn */ +#define MFF_WSP_T_OFF BIT_5S /* Tx: Write Shadow Ptr TstOff */ +#define MFF_WSP_INC BIT_4S /* Tx: Write Shadow Ptr Increment */ +#define MFF_PC_DEC BIT_3S /* Packet Counter Decrement */ +#define MFF_PC_T_ON BIT_2S /* Packet Counter Test On */ +#define MFF_PC_T_OFF BIT_1S /* Packet Counter Test Off */ +#define MFF_PC_INC BIT_0S /* Packet Counter Increment */ + +/* RX_MFF_TST1 8 bit Receive MAC FIFO Test Register 1 */ +/* TX_MFF_TST1 8 bit Transmit MAC FIFO Test Register 1 */ + /* Bit 7: reserved */ +#define MFF_WP_T_ON BIT_6S /* Write Pointer Test On */ +#define MFF_WP_T_OFF BIT_5S /* Write Pointer Test Off */ +#define MFF_WP_INC BIT_4S /* Write Pointer Increm */ + /* Bit 3: reserved */ +#define MFF_RP_T_ON BIT_2S /* Read Pointer Test On */ +#define MFF_RP_T_OFF BIT_1S /* Read Pointer Test Off */ +#define MFF_RP_DEC BIT_0S /* Read Pointer Decrement */ + +/* RX_MFF_CTRL2 8 bit Receive MAC FIFO Control Reg 2 */ +/* TX_MFF_CTRL2 8 bit Transmit MAC FIFO Control Reg 2 */ + /* Bit 7..4: reserved */ +#define MFF_ENA_OP_MD BIT_3S /* Enable Operation Mode */ +#define MFF_DIS_OP_MD BIT_2S /* Disable Operation Mode */ +#define MFF_RST_CLR BIT_1S /* Clear MAC FIFO Reset */ +#define MFF_RST_SET BIT_0S /* Set MAC FIFO Reset */ + + +/* Link LED Counter Registers (GENESIS only) */ + +/* RX_LED_CTRL 8 bit Receive LED Cnt Control Reg */ +/* TX_LED_CTRL 8 bit Transmit LED Cnt Control Reg */ +/* LNK_SYNC_CTRL 8 bit Link Sync Cnt Control Register */ + /* Bit 7.. 3: reserved */ +#define LED_START BIT_2S /* Start Timer */ +#define LED_STOP BIT_1S /* Stop Timer */ +#define LED_STATE BIT_0S /* Rx/Tx: LED State, 1=LED on */ +#define LED_CLR_IRQ BIT_0S /* Lnk: Clear Link IRQ */ + +/* RX_LED_TST 8 bit Receive LED Cnt Test Register */ +/* TX_LED_TST 8 bit Transmit LED Cnt Test Register */ +/* LNK_SYNC_TST 8 bit Link Sync Cnt Test Register */ + /* Bit 7.. 3: reserved */ +#define LED_T_ON BIT_2S /* LED Counter Test mode On */ +#define LED_T_OFF BIT_1S /* LED Counter Test mode Off */ +#define LED_T_STEP BIT_0S /* LED Counter Step */ + +/* LNK_LED_REG 8 bit Link LED Register */ + /* Bit 7.. 6: reserved */ +#define LED_BLK_ON BIT_5S /* Link LED Blinking On */ +#define LED_BLK_OFF BIT_4S /* Link LED Blinking Off */ +#define LED_SYNC_ON BIT_3S /* Use Sync Wire to switch LED */ +#define LED_SYNC_OFF BIT_2S /* Disable Sync Wire Input */ +#define LED_ON BIT_1S /* switch LED on */ +#define LED_OFF BIT_0S /* switch LED off */ + +/* Receive and Transmit GMAC FIFO Registers (YUKON only) */ + +/* RX_GMF_EA 32 bit Rx GMAC FIFO End Address */ +/* RX_GMF_AF_THR 32 bit Rx GMAC FIFO Almost Full Thresh. */ +/* RX_GMF_WP 32 bit Rx GMAC FIFO Write Pointer */ +/* RX_GMF_WLEV 32 bit Rx GMAC FIFO Write Level */ +/* RX_GMF_RP 32 bit Rx GMAC FIFO Read Pointer */ +/* RX_GMF_RLEV 32 bit Rx GMAC FIFO Read Level */ +/* TX_GMF_EA 32 bit Tx GMAC FIFO End Address */ +/* TX_GMF_AE_THR 32 bit Tx GMAC FIFO Almost Empty Thresh.*/ +/* TX_GMF_WP 32 bit Tx GMAC FIFO Write Pointer */ +/* TX_GMF_WSP 32 bit Tx GMAC FIFO Write Shadow Ptr. */ +/* TX_GMF_WLEV 32 bit Tx GMAC FIFO Write Level */ +/* TX_GMF_RP 32 bit Tx GMAC FIFO Read Pointer */ +/* TX_GMF_RSTP 32 bit Tx GMAC FIFO Restart Pointer */ +/* TX_GMF_RLEV 32 bit Tx GMAC FIFO Read Level */ + +/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */ + /* Bits 31..15: reserved */ +#define GMF_WP_TST_ON BIT_14 /* Write Pointer Test On */ +#define GMF_WP_TST_OFF BIT_13 /* Write Pointer Test Off */ +#define GMF_WP_STEP BIT_12 /* Write Pointer Step/Increment */ + /* Bit 11: reserved */ +#define GMF_RP_TST_ON BIT_10 /* Read Pointer Test On */ +#define GMF_RP_TST_OFF BIT_9 /* Read Pointer Test Off */ +#define GMF_RP_STEP BIT_8 /* Read Pointer Step/Increment */ +#define GMF_RX_F_FL_ON BIT_7 /* Rx FIFO Flush Mode On */ +#define GMF_RX_F_FL_OFF BIT_6 /* Rx FIFO Flush Mode Off */ +#define GMF_CLI_RX_FO BIT_5 /* Clear IRQ Rx FIFO Overrun */ +#define GMF_CLI_RX_FC BIT_4 /* Clear IRQ Rx Frame Complete */ +#define GMF_OPER_ON BIT_3 /* Operational Mode On */ +#define GMF_OPER_OFF BIT_2 /* Operational Mode Off */ +#define GMF_RST_CLR BIT_1 /* Clear GMAC FIFO Reset */ +#define GMF_RST_SET BIT_0 /* Set GMAC FIFO Reset */ + +/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */ + /* Bits 31..19: reserved */ +#define GMF_WSP_TST_ON BIT_18 /* Write Shadow Pointer Test On */ +#define GMF_WSP_TST_OFF BIT_17 /* Write Shadow Pointer Test Off */ +#define GMF_WSP_STEP BIT_16 /* Write Shadow Pointer Step/Increment */ + /* Bits 15..7: same as for RX_GMF_CTRL_T */ +#define GMF_CLI_TX_FU BIT_6 /* Clear IRQ Tx FIFO Underrun */ +#define GMF_CLI_TX_FC BIT_5 /* Clear IRQ Tx Frame Complete */ +#define GMF_CLI_TX_PE BIT_4 /* Clear IRQ Tx Parity Error */ + /* Bits 3..0: same as for RX_GMF_CTRL_T */ + +#define GMF_RX_CTRL_DEF (GMF_OPER_ON | GMF_RX_F_FL_ON) +#define GMF_TX_CTRL_DEF GMF_OPER_ON + +#define RX_GMF_FL_THR_DEF 0x0a /* Rx GMAC FIFO Flush Threshold default */ + +/* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */ + /* Bit 7.. 3: reserved */ +#define GMT_ST_START BIT_2S /* Start Time Stamp Timer */ +#define GMT_ST_STOP BIT_1S /* Stop Time Stamp Timer */ +#define GMT_ST_CLR_IRQ BIT_0S /* Clear Time Stamp Timer IRQ */ + +/* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */ + /* Bits 31.. 8: reserved */ +#define GMC_H_BURST_ON BIT_7 /* Half Duplex Burst Mode On */ +#define GMC_H_BURST_OFF BIT_6 /* Half Duplex Burst Mode Off */ +#define GMC_F_LOOPB_ON BIT_5 /* FIFO Loopback On */ +#define GMC_F_LOOPB_OFF BIT_4 /* FIFO Loopback Off */ +#define GMC_PAUSE_ON BIT_3 /* Pause On */ +#define GMC_PAUSE_OFF BIT_2 /* Pause Off */ +#define GMC_RST_CLR BIT_1 /* Clear GMAC Reset */ +#define GMC_RST_SET BIT_0 /* Set GMAC Reset */ + +/* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */ + /* Bits 31..29: reserved */ +#define GPC_SEL_BDT BIT_28 /* Select Bi-Dir. Transfer for MDC/MDIO */ +#define GPC_INT_POL_HI BIT_27 /* IRQ Polarity is Active HIGH */ +#define GPC_75_OHM BIT_26 /* Use 75 Ohm Termination instead of 50 */ +#define GPC_DIS_FC BIT_25 /* Disable Automatic Fiber/Copper Detection */ +#define GPC_DIS_SLEEP BIT_24 /* Disable Energy Detect */ +#define GPC_HWCFG_M_3 BIT_23 /* HWCFG_MODE[3] */ +#define GPC_HWCFG_M_2 BIT_22 /* HWCFG_MODE[2] */ +#define GPC_HWCFG_M_1 BIT_21 /* HWCFG_MODE[1] */ +#define GPC_HWCFG_M_0 BIT_20 /* HWCFG_MODE[0] */ +#define GPC_ANEG_0 BIT_19 /* ANEG[0] */ +#define GPC_ENA_XC BIT_18 /* Enable MDI crossover */ +#define GPC_DIS_125 BIT_17 /* Disable 125 MHz clock */ +#define GPC_ANEG_3 BIT_16 /* ANEG[3] */ +#define GPC_ANEG_2 BIT_15 /* ANEG[2] */ +#define GPC_ANEG_1 BIT_14 /* ANEG[1] */ +#define GPC_ENA_PAUSE BIT_13 /* Enable Pause (SYM_OR_REM) */ +#define GPC_PHYADDR_4 BIT_12 /* Bit 4 of Phy Addr */ +#define GPC_PHYADDR_3 BIT_11 /* Bit 3 of Phy Addr */ +#define GPC_PHYADDR_2 BIT_10 /* Bit 2 of Phy Addr */ +#define GPC_PHYADDR_1 BIT_9 /* Bit 1 of Phy Addr */ +#define GPC_PHYADDR_0 BIT_8 /* Bit 0 of Phy Addr */ + /* Bits 7..2: reserved */ +#define GPC_RST_CLR BIT_1 /* Clear GPHY Reset */ +#define GPC_RST_SET BIT_0 /* Set GPHY Reset */ + +#define GPC_HWCFG_GMII_COP (GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | \ + GPC_HWCFG_M_1 | GPC_HWCFG_M_0) + +#define GPC_HWCFG_GMII_FIB ( GPC_HWCFG_M_2 | \ + GPC_HWCFG_M_1 | GPC_HWCFG_M_0) + +#define GPC_ANEG_ADV_ALL_M (GPC_ANEG_3 | GPC_ANEG_2 | \ + GPC_ANEG_1 | GPC_ANEG_0) + +/* forced speed and duplex mode (don't mix with other ANEG bits) */ +#define GPC_FRC10MBIT_HALF 0 +#define GPC_FRC10MBIT_FULL GPC_ANEG_0 +#define GPC_FRC100MBIT_HALF GPC_ANEG_1 +#define GPC_FRC100MBIT_FULL (GPC_ANEG_0 | GPC_ANEG_1) + +/* auto-negotiation with limited advertised speeds */ +/* mix only with master/slave settings (for copper) */ +#define GPC_ADV_1000_HALF GPC_ANEG_2 +#define GPC_ADV_1000_FULL GPC_ANEG_3 +#define GPC_ADV_ALL (GPC_ANEG_2 | GPC_ANEG_3) + +/* master/slave settings */ +/* only for copper with 1000 Mbps */ +#define GPC_FORCE_MASTER 0 +#define GPC_FORCE_SLAVE GPC_ANEG_0 +#define GPC_PREF_MASTER GPC_ANEG_1 +#define GPC_PREF_SLAVE (GPC_ANEG_1 | GPC_ANEG_0) + +/* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */ +/* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */ +#define GM_IS_TX_CO_OV BIT_5 /* Transmit Counter Overflow IRQ */ +#define GM_IS_RX_CO_OV BIT_4 /* Receive Counter Overflow IRQ */ +#define GM_IS_TX_FF_UR BIT_3 /* Transmit FIFO Underrun */ +#define GM_IS_TX_COMPL BIT_2 /* Frame Transmission Complete */ +#define GM_IS_RX_FF_OR BIT_1 /* Receive FIFO Overrun */ +#define GM_IS_RX_COMPL BIT_0 /* Frame Reception Complete */ + +#define GMAC_DEF_MSK (GM_IS_TX_CO_OV | GM_IS_RX_CO_OV | \ + GM_IS_TX_FF_UR) + +/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ + /* Bits 15.. 2: reserved */ +#define GMLC_RST_CLR BIT_1S /* Clear GMAC Link Reset */ +#define GMLC_RST_SET BIT_0S /* Set GMAC Link Reset */ + + +/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */ +#define WOL_CTL_LINK_CHG_OCC BIT_15S +#define WOL_CTL_MAGIC_PKT_OCC BIT_14S +#define WOL_CTL_PATTERN_OCC BIT_13S + +#define WOL_CTL_CLEAR_RESULT BIT_12S + +#define WOL_CTL_ENA_PME_ON_LINK_CHG BIT_11S +#define WOL_CTL_DIS_PME_ON_LINK_CHG BIT_10S +#define WOL_CTL_ENA_PME_ON_MAGIC_PKT BIT_9S +#define WOL_CTL_DIS_PME_ON_MAGIC_PKT BIT_8S +#define WOL_CTL_ENA_PME_ON_PATTERN BIT_7S +#define WOL_CTL_DIS_PME_ON_PATTERN BIT_6S + +#define WOL_CTL_ENA_LINK_CHG_UNIT BIT_5S +#define WOL_CTL_DIS_LINK_CHG_UNIT BIT_4S +#define WOL_CTL_ENA_MAGIC_PKT_UNIT BIT_3S +#define WOL_CTL_DIS_MAGIC_PKT_UNIT BIT_2S +#define WOL_CTL_ENA_PATTERN_UNIT BIT_1S +#define WOL_CTL_DIS_PATTERN_UNIT BIT_0S + +#define WOL_CTL_DEFAULT \ + (WOL_CTL_DIS_PME_ON_LINK_CHG | \ + WOL_CTL_DIS_PME_ON_PATTERN | \ + WOL_CTL_DIS_PME_ON_MAGIC_PKT | \ + WOL_CTL_DIS_LINK_CHG_UNIT | \ + WOL_CTL_DIS_PATTERN_UNIT | \ + WOL_CTL_DIS_MAGIC_PKT_UNIT) + +/* WOL_MATCH_CTL 8 bit WOL Match Control Reg */ +#define WOL_CTL_PATT_ENA(x) (BIT_0 << (x)) + +#define SK_NUM_WOL_PATTERN 7 +#define SK_PATTERN_PER_WORD 4 +#define SK_BITMASK_PATTERN 7 +#define SK_POW_PATTERN_LENGTH 128 + +#define WOL_LENGTH_MSK 0x7f +#define WOL_LENGTH_SHIFT 8 + + +/* Receive and Transmit Descriptors ******************************************/ + +/* Transmit Descriptor struct */ +typedef struct s_HwTxd { + SK_U32 volatile TxCtrl; /* Transmit Buffer Control Field */ + SK_U32 TxNext; /* Physical Address Pointer to the next TxD */ + SK_U32 TxAdrLo; /* Physical Tx Buffer Address lower dword */ + SK_U32 TxAdrHi; /* Physical Tx Buffer Address upper dword */ + SK_U32 TxStat; /* Transmit Frame Status Word */ +#ifndef SK_USE_REV_DESC + SK_U16 TxTcpOffs; /* TCP Checksum Calculation Start Value */ + SK_U16 TxRes1; /* 16 bit reserved field */ + SK_U16 TxTcpWp; /* TCP Checksum Write Position */ + SK_U16 TxTcpSp; /* TCP Checksum Calculation Start Position */ +#else /* SK_USE_REV_DESC */ + SK_U16 TxRes1; /* 16 bit reserved field */ + SK_U16 TxTcpOffs; /* TCP Checksum Calculation Start Value */ + SK_U16 TxTcpSp; /* TCP Checksum Calculation Start Position */ + SK_U16 TxTcpWp; /* TCP Checksum Write Position */ +#endif /* SK_USE_REV_DESC */ + SK_U32 TxRes2; /* 32 bit reserved field */ +} SK_HWTXD; + +/* Receive Descriptor struct */ +typedef struct s_HwRxd { + SK_U32 volatile RxCtrl; /* Receive Buffer Control Field */ + SK_U32 RxNext; /* Physical Address Pointer to the next RxD */ + SK_U32 RxAdrLo; /* Physical Rx Buffer Address lower dword */ + SK_U32 RxAdrHi; /* Physical Rx Buffer Address upper dword */ + SK_U32 RxStat; /* Receive Frame Status Word */ + SK_U32 RxTiSt; /* Receive Time Stamp (from XMAC on GENESIS) */ +#ifndef SK_USE_REV_DESC + SK_U16 RxTcpSum1; /* TCP Checksum 1 */ + SK_U16 RxTcpSum2; /* TCP Checksum 2 */ + SK_U16 RxTcpSp1; /* TCP Checksum Calculation Start Position 1 */ + SK_U16 RxTcpSp2; /* TCP Checksum Calculation Start Position 2 */ +#else /* SK_USE_REV_DESC */ + SK_U16 RxTcpSum2; /* TCP Checksum 2 */ + SK_U16 RxTcpSum1; /* TCP Checksum 1 */ + SK_U16 RxTcpSp2; /* TCP Checksum Calculation Start Position 2 */ + SK_U16 RxTcpSp1; /* TCP Checksum Calculation Start Position 1 */ +#endif /* SK_USE_REV_DESC */ +} SK_HWRXD; + +/* + * Drivers which use the reverse descriptor feature (PCI_OUR_REG_2) + * should set the define SK_USE_REV_DESC. + * Structures are 'normaly' not endianess dependent. But in + * this case the SK_U16 fields are bound to bit positions inside the + * descriptor. RxTcpSum1 e.g. must start at bit 0 within the 6.th DWord. + * The bit positions inside a DWord are of course endianess dependent and + * swaps if the DWord is swapped by the hardware. + */ + + +/* Descriptor Bit Definition */ +/* TxCtrl Transmit Buffer Control Field */ +/* RxCtrl Receive Buffer Control Field */ +#define BMU_OWN BIT_31 /* OWN bit: 0=host/1=BMU */ +#define BMU_STF BIT_30 /* Start of Frame */ +#define BMU_EOF BIT_29 /* End of Frame */ +#define BMU_IRQ_EOB BIT_28 /* Req "End of Buffer" IRQ */ +#define BMU_IRQ_EOF BIT_27 /* Req "End of Frame" IRQ */ +/* TxCtrl specific bits */ +#define BMU_STFWD BIT_26 /* (Tx) Store & Forward Frame */ +#define BMU_NO_FCS BIT_25 /* (Tx) Disable MAC FCS (CRC) generation */ +#define BMU_SW BIT_24 /* (Tx) 1 bit res. for SW use */ +/* RxCtrl specific bits */ +#define BMU_DEV_0 BIT_26 /* (Rx) Transfer data to Dev0 */ +#define BMU_STAT_VAL BIT_25 /* (Rx) Rx Status Valid */ +#define BMU_TIST_VAL BIT_24 /* (Rx) Rx TimeStamp Valid */ + /* Bit 23..16: BMU Check Opcodes */ +#define BMU_CHECK (0x55L<<16) /* Default BMU check */ +#define BMU_TCP_CHECK (0x56L<<16) /* Descr with TCP ext */ +#define BMU_UDP_CHECK (0x57L<<16) /* Descr with UDP ext (YUKON only) */ +#define BMU_BBC 0xffffL /* Bit 15.. 0: Buffer Byte Counter */ + +/* TxStat Transmit Frame Status Word */ +/* RxStat Receive Frame Status Word */ +/* + *Note: TxStat is reserved for ASIC loopback mode only + * + * The Bits of the Status words are defined in xmac_ii.h + * (see XMR_FS bits) + */ + +/* macros ********************************************************************/ + +/* Receive and Transmit Queues */ +#define Q_R1 0x0000 /* Receive Queue 1 */ +#define Q_R2 0x0080 /* Receive Queue 2 */ +#define Q_XS1 0x0200 /* Synchronous Transmit Queue 1 */ +#define Q_XA1 0x0280 /* Asynchronous Transmit Queue 1 */ +#define Q_XS2 0x0300 /* Synchronous Transmit Queue 2 */ +#define Q_XA2 0x0380 /* Asynchronous Transmit Queue 2 */ + +/* + * Macro Q_ADDR() + * + * Use this macro to access the Receive and Transmit Queue Registers. + * + * para: + * Queue Queue to access. + * Values: Q_R1, Q_R2, Q_XS1, Q_XA1, Q_XS2, and Q_XA2 + * Offs Queue register offset. + * Values: Q_D, Q_DA_L ... Q_T2, Q_T3 + * + * usage SK_IN32(pAC, Q_ADDR(Q_R2, Q_BC), pVal) + */ +#define Q_ADDR(Queue, Offs) (B8_Q_REGS + (Queue) + (Offs)) + +/* + * Macro RB_ADDR() + * + * Use this macro to access the RAM Buffer Registers. + * + * para: + * Queue Queue to access. + * Values: Q_R1, Q_R2, Q_XS1, Q_XA1, Q_XS2, and Q_XA2 + * Offs Queue register offset. + * Values: RB_START, RB_END ... RB_LEV, RB_CTRL + * + * usage SK_IN32(pAC, RB_ADDR(Q_R2, RB_RP), pVal) + */ +#define RB_ADDR(Queue, Offs) (B16_RAM_REGS + (Queue) + (Offs)) + + +/* MAC Related Registers */ +#define MAC_1 0 /* belongs to the port near the slot */ +#define MAC_2 1 /* belongs to the port far away from the slot */ + +/* + * Macro MR_ADDR() + * + * Use this macro to access a MAC Related Registers inside the ASIC. + * + * para: + * Mac MAC to access. + * Values: MAC_1, MAC_2 + * Offs MAC register offset. + * Values: RX_MFF_EA, RX_MFF_WP ... LNK_LED_REG, + * TX_MFF_EA, TX_MFF_WP ... TX_LED_TST + * + * usage SK_IN32(pAC, MR_ADDR(MAC_1, TX_MFF_EA), pVal) + */ +#define MR_ADDR(Mac, Offs) (((Mac) << 7) + (Offs)) + +#ifdef SK_LITTLE_ENDIAN +#define XM_WORD_LO 0 +#define XM_WORD_HI 1 +#else /* !SK_LITTLE_ENDIAN */ +#define XM_WORD_LO 1 +#define XM_WORD_HI 0 +#endif /* !SK_LITTLE_ENDIAN */ + + +/* + * macros to access the XMAC (GENESIS only) + * + * XM_IN16(), to read a 16 bit register (e.g. XM_MMU_CMD) + * XM_OUT16(), to write a 16 bit register (e.g. XM_MMU_CMD) + * XM_IN32(), to read a 32 bit register (e.g. XM_TX_EV_CNT) + * XM_OUT32(), to write a 32 bit register (e.g. XM_TX_EV_CNT) + * XM_INADDR(), to read a network address register (e.g. XM_SRC_CHK) + * XM_OUTADDR(), to write a network address register (e.g. XM_SRC_CHK) + * XM_INHASH(), to read the XM_HSM_CHK register + * XM_OUTHASH() to write the XM_HSM_CHK register + * + * para: + * Mac XMAC to access values: MAC_1 or MAC_2 + * IoC I/O context needed for SK I/O macros + * Reg XMAC Register to read or write + * (p)Val Value or pointer to the value which should be read or written + * + * usage: XM_OUT16(IoC, MAC_1, XM_MMU_CMD, Value); + */ + +#define XMA(Mac, Reg) \ + ((BASE_XMAC_1 + (Mac) * (BASE_XMAC_2 - BASE_XMAC_1)) | ((Reg) << 1)) + +#define XM_IN16(IoC, Mac, Reg, pVal) \ + SK_IN16((IoC), XMA((Mac), (Reg)), (pVal)) + +#define XM_OUT16(IoC, Mac, Reg, Val) \ + SK_OUT16((IoC), XMA((Mac), (Reg)), (Val)) + +#define XM_IN32(IoC, Mac, Reg, pVal) { \ + SK_IN16((IoC), XMA((Mac), (Reg)), \ + (SK_U16 SK_FAR*)&((SK_U16 SK_FAR*)(pVal))[XM_WORD_LO]); \ + SK_IN16((IoC), XMA((Mac), (Reg+2)), \ + (SK_U16 SK_FAR*)&((SK_U16 SK_FAR*)(pVal))[XM_WORD_HI]); \ +} + +#define XM_OUT32(IoC, Mac, Reg, Val) { \ + SK_OUT16((IoC), XMA((Mac), (Reg)), (SK_U16)((Val) & 0xffffL)); \ + SK_OUT16((IoC), XMA((Mac), (Reg+2)), (SK_U16)(((Val) >> 16) & 0xffffL));\ +} + +/* Remember: we are always writing to / reading from LITTLE ENDIAN memory */ + +#define XM_INADDR(IoC, Mac, Reg, pVal) { \ + SK_U16 Word; \ + SK_U8 *pByte; \ + pByte = (SK_U8 *)&((SK_U8 *)(pVal))[0]; \ + SK_IN16((IoC), XMA((Mac), (Reg)), &Word); \ + pByte[0] = (SK_U8)(Word & 0x00ff); \ + pByte[1] = (SK_U8)((Word >> 8) & 0x00ff); \ + SK_IN16((IoC), XMA((Mac), (Reg+2)), &Word); \ + pByte[2] = (SK_U8)(Word & 0x00ff); \ + pByte[3] = (SK_U8)((Word >> 8) & 0x00ff); \ + SK_IN16((IoC), XMA((Mac), (Reg+4)), &Word); \ + pByte[4] = (SK_U8)(Word & 0x00ff); \ + pByte[5] = (SK_U8)((Word >> 8) & 0x00ff); \ +} + +#define XM_OUTADDR(IoC, Mac, Reg, pVal) { \ + SK_U8 SK_FAR *pByte; \ + pByte = (SK_U8 SK_FAR *)&((SK_U8 SK_FAR *)(pVal))[0]; \ + SK_OUT16((IoC), XMA((Mac), (Reg)), (SK_U16) \ + (((SK_U16)(pByte[0]) & 0x00ff) | \ + (((SK_U16)(pByte[1]) << 8) & 0xff00))); \ + SK_OUT16((IoC), XMA((Mac), (Reg+2)), (SK_U16) \ + (((SK_U16)(pByte[2]) & 0x00ff) | \ + (((SK_U16)(pByte[3]) << 8) & 0xff00))); \ + SK_OUT16((IoC), XMA((Mac), (Reg+4)), (SK_U16) \ + (((SK_U16)(pByte[4]) & 0x00ff) | \ + (((SK_U16)(pByte[5]) << 8) & 0xff00))); \ +} + +#define XM_INHASH(IoC, Mac, Reg, pVal) { \ + SK_U16 Word; \ + SK_U8 SK_FAR *pByte; \ + pByte = (SK_U8 SK_FAR *)&((SK_U8 SK_FAR *)(pVal))[0]; \ + SK_IN16((IoC), XMA((Mac), (Reg)), &Word); \ + pByte[0] = (SK_U8)(Word & 0x00ff); \ + pByte[1] = (SK_U8)((Word >> 8) & 0x00ff); \ + SK_IN16((IoC), XMA((Mac), (Reg+2)), &Word); \ + pByte[2] = (SK_U8)(Word & 0x00ff); \ + pByte[3] = (SK_U8)((Word >> 8) & 0x00ff); \ + SK_IN16((IoC), XMA((Mac), (Reg+4)), &Word); \ + pByte[4] = (SK_U8)(Word & 0x00ff); \ + pByte[5] = (SK_U8)((Word >> 8) & 0x00ff); \ + SK_IN16((IoC), XMA((Mac), (Reg+6)), &Word); \ + pByte[6] = (SK_U8)(Word & 0x00ff); \ + pByte[7] = (SK_U8)((Word >> 8) & 0x00ff); \ +} + +#define XM_OUTHASH(IoC, Mac, Reg, pVal) { \ + SK_U8 SK_FAR *pByte; \ + pByte = (SK_U8 SK_FAR *)&((SK_U8 SK_FAR *)(pVal))[0]; \ + SK_OUT16((IoC), XMA((Mac), (Reg)), (SK_U16) \ + (((SK_U16)(pByte[0]) & 0x00ff)| \ + (((SK_U16)(pByte[1]) << 8) & 0xff00))); \ + SK_OUT16((IoC), XMA((Mac), (Reg+2)), (SK_U16) \ + (((SK_U16)(pByte[2]) & 0x00ff)| \ + (((SK_U16)(pByte[3]) << 8) & 0xff00))); \ + SK_OUT16((IoC), XMA((Mac), (Reg+4)), (SK_U16) \ + (((SK_U16)(pByte[4]) & 0x00ff)| \ + (((SK_U16)(pByte[5]) << 8) & 0xff00))); \ + SK_OUT16((IoC), XMA((Mac), (Reg+6)), (SK_U16) \ + (((SK_U16)(pByte[6]) & 0x00ff)| \ + (((SK_U16)(pByte[7]) << 8) & 0xff00))); \ +} + +/* + * macros to access the GMAC (YUKON only) + * + * GM_IN16(), to read a 16 bit register (e.g. GM_GP_STAT) + * GM_OUT16(), to write a 16 bit register (e.g. GM_GP_CTRL) + * GM_IN32(), to read a 32 bit register (e.g. GM_) + * GM_OUT32(), to write a 32 bit register (e.g. GM_) + * GM_INADDR(), to read a network address register (e.g. GM_SRC_ADDR_1L) + * GM_OUTADDR(), to write a network address register (e.g. GM_SRC_ADDR_2L) + * GM_INHASH(), to read the GM_MC_ADDR_H1 register + * GM_OUTHASH() to write the GM_MC_ADDR_H1 register + * + * para: + * Mac GMAC to access values: MAC_1 or MAC_2 + * IoC I/O context needed for SK I/O macros + * Reg GMAC Register to read or write + * (p)Val Value or pointer to the value which should be read or written + * + * usage: GM_OUT16(IoC, MAC_1, GM_GP_CTRL, Value); + */ + +#define GMA(Mac, Reg) \ + ((BASE_GMAC_1 + (Mac) * (BASE_GMAC_2 - BASE_GMAC_1)) | (Reg)) + +#define GM_IN16(IoC, Mac, Reg, pVal) \ + SK_IN16((IoC), GMA((Mac), (Reg)), (pVal)) + +#define GM_OUT16(IoC, Mac, Reg, Val) \ + SK_OUT16((IoC), GMA((Mac), (Reg)), (Val)) + +#define GM_IN32(IoC, Mac, Reg, pVal) { \ + SK_IN16((IoC), GMA((Mac), (Reg)), \ + (SK_U16 SK_FAR*)&((SK_U16 SK_FAR*)(pVal))[XM_WORD_LO]); \ + SK_IN16((IoC), GMA((Mac), (Reg+4)), \ + (SK_U16 SK_FAR*)&((SK_U16 SK_FAR*)(pVal))[XM_WORD_HI]); \ +} + +#define GM_OUT32(IoC, Mac, Reg, Val) { \ + SK_OUT16((IoC), GMA((Mac), (Reg)), (SK_U16)((Val) & 0xffffL)); \ + SK_OUT16((IoC), GMA((Mac), (Reg+4)), (SK_U16)(((Val) >> 16) & 0xffffL));\ +} + +#define GM_INADDR(IoC, Mac, Reg, pVal) { \ + SK_U16 Word; \ + SK_U8 *pByte; \ + pByte = (SK_U8 *)&((SK_U8 *)(pVal))[0]; \ + SK_IN16((IoC), GMA((Mac), (Reg)), &Word); \ + pByte[0] = (SK_U8)(Word & 0x00ff); \ + pByte[1] = (SK_U8)((Word >> 8) & 0x00ff); \ + SK_IN16((IoC), GMA((Mac), (Reg+4)), &Word); \ + pByte[2] = (SK_U8)(Word & 0x00ff); \ + pByte[3] = (SK_U8)((Word >> 8) & 0x00ff); \ + SK_IN16((IoC), GMA((Mac), (Reg+8)), &Word); \ + pByte[4] = (SK_U8)(Word & 0x00ff); \ + pByte[5] = (SK_U8)((Word >> 8) & 0x00ff); \ +} + +#define GM_OUTADDR(IoC, Mac, Reg, pVal) { \ + SK_U8 SK_FAR *pByte; \ + pByte = (SK_U8 SK_FAR *)&((SK_U8 SK_FAR *)(pVal))[0]; \ + SK_OUT16((IoC), GMA((Mac), (Reg)), (SK_U16) \ + (((SK_U16)(pByte[0]) & 0x00ff) | \ + (((SK_U16)(pByte[1]) << 8) & 0xff00))); \ + SK_OUT16((IoC), GMA((Mac), (Reg+4)), (SK_U16) \ + (((SK_U16)(pByte[2]) & 0x00ff) | \ + (((SK_U16)(pByte[3]) << 8) & 0xff00))); \ + SK_OUT16((IoC), GMA((Mac), (Reg+8)), (SK_U16) \ + (((SK_U16)(pByte[4]) & 0x00ff) | \ + (((SK_U16)(pByte[5]) << 8) & 0xff00))); \ +} + +#define GM_INHASH(IoC, Mac, Reg, pVal) { \ + SK_U16 Word; \ + SK_U8 *pByte; \ + pByte = (SK_U8 *)&((SK_U8 *)(pVal))[0]; \ + SK_IN16((IoC), GMA((Mac), (Reg)), &Word); \ + pByte[0] = (SK_U8)(Word & 0x00ff); \ + pByte[1] = (SK_U8)((Word >> 8) & 0x00ff); \ + SK_IN16((IoC), GMA((Mac), (Reg+4)), &Word); \ + pByte[2] = (SK_U8)(Word & 0x00ff); \ + pByte[3] = (SK_U8)((Word >> 8) & 0x00ff); \ + SK_IN16((IoC), GMA((Mac), (Reg+8)), &Word); \ + pByte[4] = (SK_U8)(Word & 0x00ff); \ + pByte[5] = (SK_U8)((Word >> 8) & 0x00ff); \ + SK_IN16((IoC), GMA((Mac), (Reg+12)), &Word); \ + pByte[6] = (SK_U8)(Word & 0x00ff); \ + pByte[7] = (SK_U8)((Word >> 8) & 0x00ff); \ +} + +#define GM_OUTHASH(IoC, Mac, Reg, pVal) { \ + SK_U8 *pByte; \ + pByte = (SK_U8 *)&((SK_U8 *)(pVal))[0]; \ + SK_OUT16((IoC), GMA((Mac), (Reg)), (SK_U16) \ + (((SK_U16)(pByte[0]) & 0x00ff)| \ + (((SK_U16)(pByte[1]) << 8) & 0xff00))); \ + SK_OUT16((IoC), GMA((Mac), (Reg+4)), (SK_U16) \ + (((SK_U16)(pByte[2]) & 0x00ff)| \ + (((SK_U16)(pByte[3]) << 8) & 0xff00))); \ + SK_OUT16((IoC), GMA((Mac), (Reg+8)), (SK_U16) \ + (((SK_U16)(pByte[4]) & 0x00ff)| \ + (((SK_U16)(pByte[5]) << 8) & 0xff00))); \ + SK_OUT16((IoC), GMA((Mac), (Reg+12)), (SK_U16) \ + (((SK_U16)(pByte[6]) & 0x00ff)| \ + (((SK_U16)(pByte[7]) << 8) & 0xff00))); \ +} + +/* + * Different MAC Types + */ +#define SK_MAC_XMAC 0 /* Xaqti XMAC II */ +#define SK_MAC_GMAC 1 /* Marvell GMAC */ + +/* + * Different PHY Types + */ +#define SK_PHY_XMAC 0 /* integrated in XMAC II */ +#define SK_PHY_BCOM 1 /* Broadcom BCM5400 */ +#define SK_PHY_LONE 2 /* Level One LXT1000 */ +#define SK_PHY_NAT 3 /* National DP83891 */ +#define SK_PHY_MARV_COPPER 4 /* Marvell 88E1011S */ +#define SK_PHY_MARV_FIBER 5 /* Marvell 88E1011S working on fiber */ + +/* + * PHY addresses (bits 12..8 of PHY address reg) + */ +#define PHY_ADDR_XMAC (0<<8) +#define PHY_ADDR_BCOM (1<<8) +#define PHY_ADDR_LONE (3<<8) +#define PHY_ADDR_NAT (0<<8) + +/* GPHY address (bits 15..11 of SMI control reg) */ +#define PHY_ADDR_MARV 0 + +/* + * macros to access the PHY + * + * PHY_READ() read a 16 bit value from the PHY + * PHY_WRITE() write a 16 bit value to the PHY + * + * para: + * IoC I/O context needed for SK I/O macros + * pPort Pointer to port struct for PhyAddr + * Mac XMAC to access values: MAC_1 or MAC_2 + * PhyReg PHY Register to read or write + * (p)Val Value or pointer to the value which should be read or + * written. + * + * usage: PHY_READ(IoC, pPort, MAC_1, PHY_CTRL, Value); + * Warning: a PHY_READ on an uninitialized PHY (PHY still in reset) never + * comes back. This is checked in DEBUG mode. + */ +#ifndef DEBUG +#define PHY_READ(IoC, pPort, Mac, PhyReg, pVal) { \ + SK_U16 Mmu; \ + \ + XM_OUT16((IoC), (Mac), XM_PHY_ADDR, (PhyReg) | (pPort)->PhyAddr); \ + XM_IN16((IoC), (Mac), XM_PHY_DATA, (pVal)); \ + if ((pPort)->PhyType != SK_PHY_XMAC) { \ + do { \ + XM_IN16((IoC), (Mac), XM_MMU_CMD, &Mmu); \ + } while ((Mmu & XM_MMU_PHY_RDY) == 0); \ + XM_IN16((IoC), (Mac), XM_PHY_DATA, (pVal)); \ + } \ +} +#else +#define PHY_READ(IoC, pPort, Mac, PhyReg, pVal) { \ + SK_U16 Mmu; \ + int __i = 0; \ + \ + XM_OUT16((IoC), (Mac), XM_PHY_ADDR, (PhyReg) | (pPort)->PhyAddr); \ + XM_IN16((IoC), (Mac), XM_PHY_DATA, (pVal)); \ + if ((pPort)->PhyType != SK_PHY_XMAC) { \ + do { \ + XM_IN16((IoC), (Mac), XM_MMU_CMD, &Mmu); \ + __i++; \ + if (__i > 100000) { \ + SK_DBG_PRINTF("*****************************\n"); \ + SK_DBG_PRINTF("PHY_READ on uninitialized PHY\n"); \ + SK_DBG_PRINTF("*****************************\n"); \ + break; \ + } \ + } while ((Mmu & XM_MMU_PHY_RDY) == 0); \ + XM_IN16((IoC), (Mac), XM_PHY_DATA, (pVal)); \ + } \ +} +#endif /* DEBUG */ + +#define PHY_WRITE(IoC, pPort, Mac, PhyReg, Val) { \ + SK_U16 Mmu; \ + \ + if ((pPort)->PhyType != SK_PHY_XMAC) { \ + do { \ + XM_IN16((IoC), (Mac), XM_MMU_CMD, &Mmu); \ + } while ((Mmu & XM_MMU_PHY_BUSY) != 0); \ + } \ + XM_OUT16((IoC), (Mac), XM_PHY_ADDR, (PhyReg) | (pPort)->PhyAddr); \ + XM_OUT16((IoC), (Mac), XM_PHY_DATA, (Val)); \ + if ((pPort)->PhyType != SK_PHY_XMAC) { \ + do { \ + XM_IN16((IoC), (Mac), XM_MMU_CMD, &Mmu); \ + } while ((Mmu & XM_MMU_PHY_BUSY) != 0); \ + } \ +} + +/* + * Macro PCI_C() + * + * Use this macro to access PCI config register from the I/O space. + * + * para: + * Addr PCI configuration register to access. + * Values: PCI_VENDOR_ID ... PCI_VPD_ADR_REG, + * + * usage SK_IN16(pAC, PCI_C(PCI_VENDOR_ID), pVal); + */ +#define PCI_C(Addr) (B7_CFG_SPC + (Addr)) /* PCI Config Space */ + +/* + * Macro SK_HW_ADDR(Base, Addr) + * + * Calculates the effective HW address + * + * para: + * Base I/O or memory base address + * Addr Address offset + * + * usage: May be used in SK_INxx and SK_OUTxx macros + * #define SK_IN8(pAC, Addr, pVal) ...\ + * *pVal = (SK_U8)inp(SK_HW_ADDR(pAC->Hw.Iop, Addr))) + */ +#ifdef SK_MEM_MAPPED_IO +#define SK_HW_ADDR(Base, Addr) ((Base) + (Addr)) +#else /* SK_MEM_MAPPED_IO */ +#define SK_HW_ADDR(Base, Addr) \ + ((Base) + (((Addr) & 0x7f) | (((Addr) >> 7 > 0) ? 0x80 : 0))) +#endif /* SK_MEM_MAPPED_IO */ + +#define SZ_LONG (sizeof(SK_U32)) + +/* + * Macro SK_HWAC_LINK_LED() + * + * Use this macro to set the link LED mode. + * para: + * pAC Pointer to adapter context struct + * IoC I/O context needed for SK I/O macros + * Port Port number + * Mode Mode to set for this LED + */ +#define SK_HWAC_LINK_LED(pAC, IoC, Port, Mode) \ + SK_OUT8(IoC, MR_ADDR(Port, LNK_LED_REG), Mode); + + +/* typedefs *******************************************************************/ + + +/* function prototypes ********************************************************/ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* __INC_SKGEHW_H */ diff --git a/drivers/net/sk98lin/h/skgehwt.h b/drivers/net/sk98lin/h/skgehwt.h new file mode 100644 index 000000000000..e6b0016a695c --- /dev/null +++ b/drivers/net/sk98lin/h/skgehwt.h @@ -0,0 +1,48 @@ +/****************************************************************************** + * + * Name: skhwt.h + * Project: Gigabit Ethernet Adapters, Event Scheduler Module + * Version: $Revision: 1.7 $ + * Date: $Date: 2003/09/16 12:55:08 $ + * Purpose: Defines for the hardware timer functions + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +/* + * SKGEHWT.H contains all defines and types for the timer functions + */ + +#ifndef _SKGEHWT_H_ +#define _SKGEHWT_H_ + +/* + * SK Hardware Timer + * - needed wherever the HWT module is used + * - use in Adapters context name pAC->Hwt + */ +typedef struct s_Hwt { + SK_U32 TStart; /* HWT start */ + SK_U32 TStop; /* HWT stop */ + int TActive; /* HWT: flag : active/inactive */ +} SK_HWT; + +extern void SkHwtInit(SK_AC *pAC, SK_IOC Ioc); +extern void SkHwtStart(SK_AC *pAC, SK_IOC Ioc, SK_U32 Time); +extern void SkHwtStop(SK_AC *pAC, SK_IOC Ioc); +extern SK_U32 SkHwtRead(SK_AC *pAC, SK_IOC Ioc); +extern void SkHwtIsr(SK_AC *pAC, SK_IOC Ioc); +#endif /* _SKGEHWT_H_ */ diff --git a/drivers/net/sk98lin/h/skgei2c.h b/drivers/net/sk98lin/h/skgei2c.h new file mode 100644 index 000000000000..d9b6f6d8dfe2 --- /dev/null +++ b/drivers/net/sk98lin/h/skgei2c.h @@ -0,0 +1,210 @@ +/****************************************************************************** + * + * Name: skgei2c.h + * Project: Gigabit Ethernet Adapters, TWSI-Module + * Version: $Revision: 1.25 $ + * Date: $Date: 2003/10/20 09:06:05 $ + * Purpose: Special defines for TWSI + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +/* + * SKGEI2C.H contains all SK-98xx specific defines for the TWSI handling + */ + +#ifndef _INC_SKGEI2C_H_ +#define _INC_SKGEI2C_H_ + +/* + * Macros to access the B2_I2C_CTRL + */ +#define SK_I2C_CTL(IoC, flag, dev, dev_size, reg, burst) \ + SK_OUT32(IoC, B2_I2C_CTRL,\ + (flag ? 0x80000000UL : 0x0L) | \ + (((SK_U32)reg << 16) & I2C_ADDR) | \ + (((SK_U32)dev << 9) & I2C_DEV_SEL) | \ + (dev_size & I2C_DEV_SIZE) | \ + ((burst << 4) & I2C_BURST_LEN)) + +#define SK_I2C_STOP(IoC) { \ + SK_U32 I2cCtrl; \ + SK_IN32(IoC, B2_I2C_CTRL, &I2cCtrl); \ + SK_OUT32(IoC, B2_I2C_CTRL, I2cCtrl | I2C_STOP); \ +} + +#define SK_I2C_GET_CTL(IoC, pI2cCtrl) SK_IN32(IoC, B2_I2C_CTRL, pI2cCtrl) + +/* + * Macros to access the TWSI SW Registers + */ +#define SK_I2C_SET_BIT(IoC, SetBits) { \ + SK_U8 OrgBits; \ + SK_IN8(IoC, B2_I2C_SW, &OrgBits); \ + SK_OUT8(IoC, B2_I2C_SW, OrgBits | (SK_U8)(SetBits)); \ +} + +#define SK_I2C_CLR_BIT(IoC, ClrBits) { \ + SK_U8 OrgBits; \ + SK_IN8(IoC, B2_I2C_SW, &OrgBits); \ + SK_OUT8(IoC, B2_I2C_SW, OrgBits & ~((SK_U8)(ClrBits))); \ +} + +#define SK_I2C_GET_SW(IoC, pI2cSw) SK_IN8(IoC, B2_I2C_SW, pI2cSw) + +/* + * define the possible sensor states + */ +#define SK_SEN_IDLE 0 /* Idle: sensor not read */ +#define SK_SEN_VALUE 1 /* Value Read cycle */ +#define SK_SEN_VALEXT 2 /* Extended Value Read cycle */ + +/* + * Conversion factor to convert read Voltage sensor to milli Volt + * Conversion factor to convert read Temperature sensor to 10th degree Celsius + */ +#define SK_LM80_VT_LSB 22 /* 22mV LSB resolution */ +#define SK_LM80_TEMP_LSB 10 /* 1 degree LSB resolution */ +#define SK_LM80_TEMPEXT_LSB 5 /* 0.5 degree LSB resolution for ext. val. */ + +/* + * formula: counter = (22500*60)/(rpm * divisor * pulses/2) + * assuming: 6500rpm, 4 pulses, divisor 1 + */ +#define SK_LM80_FAN_FAKTOR ((22500L*60)/(1*2)) + +/* + * Define sensor management data + * Maximum is reached on Genesis copper dual port and Yukon-64 + * Board specific maximum is in pAC->I2c.MaxSens + */ +#define SK_MAX_SENSORS 8 /* maximal no. of installed sensors */ +#define SK_MIN_SENSORS 5 /* minimal no. of installed sensors */ + +/* + * To watch the state machine (SM) use the timer in two ways + * instead of one as hitherto + */ +#define SK_TIMER_WATCH_SM 0 /* Watch the SM to finish in a spec. time */ +#define SK_TIMER_NEW_GAUGING 1 /* Start a new gauging when timer expires */ + +/* + * Defines for the individual thresholds + */ + +/* Temperature sensor */ +#define SK_SEN_TEMP_HIGH_ERR 800 /* Temperature High Err Threshold */ +#define SK_SEN_TEMP_HIGH_WARN 700 /* Temperature High Warn Threshold */ +#define SK_SEN_TEMP_LOW_WARN 100 /* Temperature Low Warn Threshold */ +#define SK_SEN_TEMP_LOW_ERR 0 /* Temperature Low Err Threshold */ + +/* VCC which should be 5 V */ +#define SK_SEN_PCI_5V_HIGH_ERR 5588 /* Voltage PCI High Err Threshold */ +#define SK_SEN_PCI_5V_HIGH_WARN 5346 /* Voltage PCI High Warn Threshold */ +#define SK_SEN_PCI_5V_LOW_WARN 4664 /* Voltage PCI Low Warn Threshold */ +#define SK_SEN_PCI_5V_LOW_ERR 4422 /* Voltage PCI Low Err Threshold */ + +/* + * VIO may be 5 V or 3.3 V. Initialization takes two parts: + * 1. Initialize lowest lower limit and highest higher limit. + * 2. After the first value is read correct the upper or the lower limit to + * the appropriate C constant. + * + * Warning limits are +-5% of the exepected voltage. + * Error limits are +-10% of the expected voltage. + */ + +/* Bug fix AF: 16.Aug.2001: Correct the init base of LM80 sensor */ + +#define SK_SEN_PCI_IO_5V_HIGH_ERR 5566 /* + 10% V PCI-IO High Err Threshold */ +#define SK_SEN_PCI_IO_5V_HIGH_WARN 5324 /* + 5% V PCI-IO High Warn Threshold */ + /* 5000 mVolt */ +#define SK_SEN_PCI_IO_5V_LOW_WARN 4686 /* - 5% V PCI-IO Low Warn Threshold */ +#define SK_SEN_PCI_IO_5V_LOW_ERR 4444 /* - 10% V PCI-IO Low Err Threshold */ + +#define SK_SEN_PCI_IO_RANGE_LIMITER 4000 /* 4000 mV range delimiter */ + +/* correction values for the second pass */ +#define SK_SEN_PCI_IO_3V3_HIGH_ERR 3850 /* + 15% V PCI-IO High Err Threshold */ +#define SK_SEN_PCI_IO_3V3_HIGH_WARN 3674 /* + 10% V PCI-IO High Warn Threshold */ + /* 3300 mVolt */ +#define SK_SEN_PCI_IO_3V3_LOW_WARN 2926 /* - 10% V PCI-IO Low Warn Threshold */ +#define SK_SEN_PCI_IO_3V3_LOW_ERR 2772 /* - 15% V PCI-IO Low Err Threshold */ + +/* + * VDD voltage + */ +#define SK_SEN_VDD_HIGH_ERR 3630 /* Voltage ASIC High Err Threshold */ +#define SK_SEN_VDD_HIGH_WARN 3476 /* Voltage ASIC High Warn Threshold */ +#define SK_SEN_VDD_LOW_WARN 3146 /* Voltage ASIC Low Warn Threshold */ +#define SK_SEN_VDD_LOW_ERR 2970 /* Voltage ASIC Low Err Threshold */ + +/* + * PHY PLL 3V3 voltage + */ +#define SK_SEN_PLL_3V3_HIGH_ERR 3630 /* Voltage PMA High Err Threshold */ +#define SK_SEN_PLL_3V3_HIGH_WARN 3476 /* Voltage PMA High Warn Threshold */ +#define SK_SEN_PLL_3V3_LOW_WARN 3146 /* Voltage PMA Low Warn Threshold */ +#define SK_SEN_PLL_3V3_LOW_ERR 2970 /* Voltage PMA Low Err Threshold */ + +/* + * VAUX (YUKON only) + */ +#define SK_SEN_VAUX_3V3_HIGH_ERR 3630 /* Voltage VAUX High Err Threshold */ +#define SK_SEN_VAUX_3V3_HIGH_WARN 3476 /* Voltage VAUX High Warn Threshold */ +#define SK_SEN_VAUX_3V3_LOW_WARN 3146 /* Voltage VAUX Low Warn Threshold */ +#define SK_SEN_VAUX_3V3_LOW_ERR 2970 /* Voltage VAUX Low Err Threshold */ +#define SK_SEN_VAUX_0V_WARN_ERR 0 /* if VAUX not present */ +#define SK_SEN_VAUX_RANGE_LIMITER 1000 /* 1000 mV range delimiter */ + +/* + * PHY 2V5 voltage + */ +#define SK_SEN_PHY_2V5_HIGH_ERR 2750 /* Voltage PHY High Err Threshold */ +#define SK_SEN_PHY_2V5_HIGH_WARN 2640 /* Voltage PHY High Warn Threshold */ +#define SK_SEN_PHY_2V5_LOW_WARN 2376 /* Voltage PHY Low Warn Threshold */ +#define SK_SEN_PHY_2V5_LOW_ERR 2222 /* Voltage PHY Low Err Threshold */ + +/* + * ASIC Core 1V5 voltage (YUKON only) + */ +#define SK_SEN_CORE_1V5_HIGH_ERR 1650 /* Voltage ASIC Core High Err Threshold */ +#define SK_SEN_CORE_1V5_HIGH_WARN 1575 /* Voltage ASIC Core High Warn Threshold */ +#define SK_SEN_CORE_1V5_LOW_WARN 1425 /* Voltage ASIC Core Low Warn Threshold */ +#define SK_SEN_CORE_1V5_LOW_ERR 1350 /* Voltage ASIC Core Low Err Threshold */ + +/* + * FAN 1 speed + */ +/* assuming: 6500rpm +-15%, 4 pulses, + * warning at: 80 % + * error at: 70 % + * no upper limit + */ +#define SK_SEN_FAN_HIGH_ERR 20000 /* FAN Speed High Err Threshold */ +#define SK_SEN_FAN_HIGH_WARN 20000 /* FAN Speed High Warn Threshold */ +#define SK_SEN_FAN_LOW_WARN 5200 /* FAN Speed Low Warn Threshold */ +#define SK_SEN_FAN_LOW_ERR 4550 /* FAN Speed Low Err Threshold */ + +/* + * Some Voltages need dynamic thresholds + */ +#define SK_SEN_DYN_INIT_NONE 0 /* No dynamic init of thresholds */ +#define SK_SEN_DYN_INIT_PCI_IO 10 /* Init PCI-IO with new thresholds */ +#define SK_SEN_DYN_INIT_VAUX 11 /* Init VAUX with new thresholds */ + +extern int SkLm80ReadSensor(SK_AC *pAC, SK_IOC IoC, SK_SENSOR *pSen); +#endif /* n_INC_SKGEI2C_H */ diff --git a/drivers/net/sk98lin/h/skgeinit.h b/drivers/net/sk98lin/h/skgeinit.h new file mode 100644 index 000000000000..143e635ec24d --- /dev/null +++ b/drivers/net/sk98lin/h/skgeinit.h @@ -0,0 +1,797 @@ +/****************************************************************************** + * + * Name: skgeinit.h + * Project: Gigabit Ethernet Adapters, Common Modules + * Version: $Revision: 1.83 $ + * Date: $Date: 2003/09/16 14:07:37 $ + * Purpose: Structures and prototypes for the GE Init Module + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +#ifndef __INC_SKGEINIT_H_ +#define __INC_SKGEINIT_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* defines ********************************************************************/ + +#define SK_TEST_VAL 0x11335577UL + +/* modifying Link LED behaviour (used with SkGeLinkLED()) */ +#define SK_LNK_OFF LED_OFF +#define SK_LNK_ON (LED_ON | LED_BLK_OFF | LED_SYNC_OFF) +#define SK_LNK_BLINK (LED_ON | LED_BLK_ON | LED_SYNC_ON) +#define SK_LNK_PERM (LED_ON | LED_BLK_OFF | LED_SYNC_ON) +#define SK_LNK_TST (LED_ON | LED_BLK_ON | LED_SYNC_OFF) + +/* parameter 'Mode' when calling SK_HWAC_LINK_LED() */ +#define SK_LED_OFF LED_OFF +#define SK_LED_ACTIVE (LED_ON | LED_BLK_OFF | LED_SYNC_OFF) +#define SK_LED_STANDBY (LED_ON | LED_BLK_ON | LED_SYNC_OFF) + +/* addressing LED Registers in SkGeXmitLED() */ +#define XMIT_LED_INI 0 +#define XMIT_LED_CNT (RX_LED_VAL - RX_LED_INI) +#define XMIT_LED_CTRL (RX_LED_CTRL- RX_LED_INI) +#define XMIT_LED_TST (RX_LED_TST - RX_LED_INI) + +/* parameter 'Mode' when calling SkGeXmitLED() */ +#define SK_LED_DIS 0 +#define SK_LED_ENA 1 +#define SK_LED_TST 2 + +/* Counter and Timer constants, for a host clock of 62.5 MHz */ +#define SK_XMIT_DUR 0x002faf08UL /* 50 ms */ +#define SK_BLK_DUR 0x01dcd650UL /* 500 ms */ + +#define SK_DPOLL_DEF 0x00ee6b28UL /* 250 ms at 62.5 MHz */ + +#define SK_DPOLL_MAX 0x00ffffffUL /* 268 ms at 62.5 MHz */ + /* 215 ms at 78.12 MHz */ + +#define SK_FACT_62 100 /* is given in percent */ +#define SK_FACT_53 85 /* on GENESIS: 53.12 MHz */ +#define SK_FACT_78 125 /* on YUKON: 78.12 MHz */ + +/* Timeout values */ +#define SK_MAC_TO_53 72 /* MAC arbiter timeout */ +#define SK_PKT_TO_53 0x2000 /* Packet arbiter timeout */ +#define SK_PKT_TO_MAX 0xffff /* Maximum value */ +#define SK_RI_TO_53 36 /* RAM interface timeout */ + +#define SK_PHY_ACC_TO 600000 /* PHY access timeout */ + +/* RAM Buffer High Pause Threshold values */ +#define SK_RB_ULPP ( 8 * 1024) /* Upper Level in kB/8 */ +#define SK_RB_LLPP_S (10 * 1024) /* Lower Level for small Queues */ +#define SK_RB_LLPP_B (16 * 1024) /* Lower Level for big Queues */ + +#ifndef SK_BMU_RX_WM +#define SK_BMU_RX_WM 0x600 /* BMU Rx Watermark */ +#endif +#ifndef SK_BMU_TX_WM +#define SK_BMU_TX_WM 0x600 /* BMU Tx Watermark */ +#endif + +/* XMAC II Rx High Watermark */ +#define SK_XM_RX_HI_WM 0x05aa /* 1450 */ + +/* XMAC II Tx Threshold */ +#define SK_XM_THR_REDL 0x01fb /* .. for redundant link usage */ +#define SK_XM_THR_SL 0x01fb /* .. for single link adapters */ +#define SK_XM_THR_MULL 0x01fb /* .. for multiple link usage */ +#define SK_XM_THR_JUMBO 0x03fc /* .. for jumbo frame usage */ + +/* values for GIPortUsage */ +#define SK_RED_LINK 1 /* redundant link usage */ +#define SK_MUL_LINK 2 /* multiple link usage */ +#define SK_JUMBO_LINK 3 /* driver uses jumbo frames */ + +/* Minimum RAM Buffer Rx Queue Size */ +#define SK_MIN_RXQ_SIZE 16 /* 16 kB */ + +/* Minimum RAM Buffer Tx Queue Size */ +#define SK_MIN_TXQ_SIZE 16 /* 16 kB */ + +/* Queue Size units */ +#define QZ_UNITS 0x7 +#define QZ_STEP 8 + +/* Percentage of queue size from whole memory */ +/* 80 % for receive */ +#define RAM_QUOTA_RX 80L +/* 0% for sync transfer */ +#define RAM_QUOTA_SYNC 0L +/* the rest (20%) is taken for async transfer */ + +/* Get the rounded queue size in Bytes in 8k steps */ +#define ROUND_QUEUE_SIZE(SizeInBytes) \ + ((((unsigned long) (SizeInBytes) + (QZ_STEP*1024L)-1) / 1024) & \ + ~(QZ_STEP-1)) + +/* Get the rounded queue size in KBytes in 8k steps */ +#define ROUND_QUEUE_SIZE_KB(Kilobytes) \ + ROUND_QUEUE_SIZE((Kilobytes) * 1024L) + +/* Types of RAM Buffer Queues */ +#define SK_RX_SRAM_Q 1 /* small receive queue */ +#define SK_RX_BRAM_Q 2 /* big receive queue */ +#define SK_TX_RAM_Q 3 /* small or big transmit queue */ + +/* parameter 'Dir' when calling SkGeStopPort() */ +#define SK_STOP_TX 1 /* Stops the transmit path, resets the XMAC */ +#define SK_STOP_RX 2 /* Stops the receive path */ +#define SK_STOP_ALL 3 /* Stops Rx and Tx path, resets the XMAC */ + +/* parameter 'RstMode' when calling SkGeStopPort() */ +#define SK_SOFT_RST 1 /* perform a software reset */ +#define SK_HARD_RST 2 /* perform a hardware reset */ + +/* Init Levels */ +#define SK_INIT_DATA 0 /* Init level 0: init data structures */ +#define SK_INIT_IO 1 /* Init level 1: init with IOs */ +#define SK_INIT_RUN 2 /* Init level 2: init for run time */ + +/* Link Mode Parameter */ +#define SK_LMODE_HALF 1 /* Half Duplex Mode */ +#define SK_LMODE_FULL 2 /* Full Duplex Mode */ +#define SK_LMODE_AUTOHALF 3 /* AutoHalf Duplex Mode */ +#define SK_LMODE_AUTOFULL 4 /* AutoFull Duplex Mode */ +#define SK_LMODE_AUTOBOTH 5 /* AutoBoth Duplex Mode */ +#define SK_LMODE_AUTOSENSE 6 /* configured mode auto sensing */ +#define SK_LMODE_INDETERMINATED 7 /* indeterminated */ + +/* Auto-negotiation timeout in 100ms granularity */ +#define SK_AND_MAX_TO 6 /* Wait 600 msec before link comes up */ + +/* Auto-negotiation error codes */ +#define SK_AND_OK 0 /* no error */ +#define SK_AND_OTHER 1 /* other error than below */ +#define SK_AND_DUP_CAP 2 /* Duplex capabilities error */ + + +/* Link Speed Capabilities */ +#define SK_LSPEED_CAP_AUTO (1<<0) /* Automatic resolution */ +#define SK_LSPEED_CAP_10MBPS (1<<1) /* 10 Mbps */ +#define SK_LSPEED_CAP_100MBPS (1<<2) /* 100 Mbps */ +#define SK_LSPEED_CAP_1000MBPS (1<<3) /* 1000 Mbps */ +#define SK_LSPEED_CAP_INDETERMINATED (1<<4) /* indeterminated */ + +/* Link Speed Parameter */ +#define SK_LSPEED_AUTO 1 /* Automatic resolution */ +#define SK_LSPEED_10MBPS 2 /* 10 Mbps */ +#define SK_LSPEED_100MBPS 3 /* 100 Mbps */ +#define SK_LSPEED_1000MBPS 4 /* 1000 Mbps */ +#define SK_LSPEED_INDETERMINATED 5 /* indeterminated */ + +/* Link Speed Current State */ +#define SK_LSPEED_STAT_UNKNOWN 1 +#define SK_LSPEED_STAT_10MBPS 2 +#define SK_LSPEED_STAT_100MBPS 3 +#define SK_LSPEED_STAT_1000MBPS 4 +#define SK_LSPEED_STAT_INDETERMINATED 5 + + +/* Link Capability Parameter */ +#define SK_LMODE_CAP_HALF (1<<0) /* Half Duplex Mode */ +#define SK_LMODE_CAP_FULL (1<<1) /* Full Duplex Mode */ +#define SK_LMODE_CAP_AUTOHALF (1<<2) /* AutoHalf Duplex Mode */ +#define SK_LMODE_CAP_AUTOFULL (1<<3) /* AutoFull Duplex Mode */ +#define SK_LMODE_CAP_INDETERMINATED (1<<4) /* indeterminated */ + +/* Link Mode Current State */ +#define SK_LMODE_STAT_UNKNOWN 1 /* Unknown Duplex Mode */ +#define SK_LMODE_STAT_HALF 2 /* Half Duplex Mode */ +#define SK_LMODE_STAT_FULL 3 /* Full Duplex Mode */ +#define SK_LMODE_STAT_AUTOHALF 4 /* Half Duplex Mode obtained by Auto-Neg */ +#define SK_LMODE_STAT_AUTOFULL 5 /* Full Duplex Mode obtained by Auto-Neg */ +#define SK_LMODE_STAT_INDETERMINATED 6 /* indeterminated */ + +/* Flow Control Mode Parameter (and capabilities) */ +#define SK_FLOW_MODE_NONE 1 /* No Flow-Control */ +#define SK_FLOW_MODE_LOC_SEND 2 /* Local station sends PAUSE */ +#define SK_FLOW_MODE_SYMMETRIC 3 /* Both stations may send PAUSE */ +#define SK_FLOW_MODE_SYM_OR_REM 4 /* Both stations may send PAUSE or + * just the remote station may send PAUSE + */ +#define SK_FLOW_MODE_INDETERMINATED 5 /* indeterminated */ + +/* Flow Control Status Parameter */ +#define SK_FLOW_STAT_NONE 1 /* No Flow Control */ +#define SK_FLOW_STAT_REM_SEND 2 /* Remote Station sends PAUSE */ +#define SK_FLOW_STAT_LOC_SEND 3 /* Local station sends PAUSE */ +#define SK_FLOW_STAT_SYMMETRIC 4 /* Both station may send PAUSE */ +#define SK_FLOW_STAT_INDETERMINATED 5 /* indeterminated */ + +/* Master/Slave Mode Capabilities */ +#define SK_MS_CAP_AUTO (1<<0) /* Automatic resolution */ +#define SK_MS_CAP_MASTER (1<<1) /* This station is master */ +#define SK_MS_CAP_SLAVE (1<<2) /* This station is slave */ +#define SK_MS_CAP_INDETERMINATED (1<<3) /* indeterminated */ + +/* Set Master/Slave Mode Parameter (and capabilities) */ +#define SK_MS_MODE_AUTO 1 /* Automatic resolution */ +#define SK_MS_MODE_MASTER 2 /* This station is master */ +#define SK_MS_MODE_SLAVE 3 /* This station is slave */ +#define SK_MS_MODE_INDETERMINATED 4 /* indeterminated */ + +/* Master/Slave Status Parameter */ +#define SK_MS_STAT_UNSET 1 /* The M/S status is not set */ +#define SK_MS_STAT_MASTER 2 /* This station is master */ +#define SK_MS_STAT_SLAVE 3 /* This station is slave */ +#define SK_MS_STAT_FAULT 4 /* M/S resolution failed */ +#define SK_MS_STAT_INDETERMINATED 5 /* indeterminated */ + +/* parameter 'Mode' when calling SkXmSetRxCmd() */ +#define SK_STRIP_FCS_ON (1<<0) /* Enable FCS stripping of Rx frames */ +#define SK_STRIP_FCS_OFF (1<<1) /* Disable FCS stripping of Rx frames */ +#define SK_STRIP_PAD_ON (1<<2) /* Enable pad byte stripping of Rx fr */ +#define SK_STRIP_PAD_OFF (1<<3) /* Disable pad byte stripping of Rx fr */ +#define SK_LENERR_OK_ON (1<<4) /* Don't chk fr for in range len error */ +#define SK_LENERR_OK_OFF (1<<5) /* Check frames for in range len error */ +#define SK_BIG_PK_OK_ON (1<<6) /* Don't set Rx Error bit for big frames */ +#define SK_BIG_PK_OK_OFF (1<<7) /* Set Rx Error bit for big frames */ +#define SK_SELF_RX_ON (1<<8) /* Enable Rx of own packets */ +#define SK_SELF_RX_OFF (1<<9) /* Disable Rx of own packets */ + +/* parameter 'Para' when calling SkMacSetRxTxEn() */ +#define SK_MAC_LOOPB_ON (1<<0) /* Enable MAC Loopback Mode */ +#define SK_MAC_LOOPB_OFF (1<<1) /* Disable MAC Loopback Mode */ +#define SK_PHY_LOOPB_ON (1<<2) /* Enable PHY Loopback Mode */ +#define SK_PHY_LOOPB_OFF (1<<3) /* Disable PHY Loopback Mode */ +#define SK_PHY_FULLD_ON (1<<4) /* Enable GMII Full Duplex */ +#define SK_PHY_FULLD_OFF (1<<5) /* Disable GMII Full Duplex */ + +/* States of PState */ +#define SK_PRT_RESET 0 /* the port is reset */ +#define SK_PRT_STOP 1 /* the port is stopped (similar to SW reset) */ +#define SK_PRT_INIT 2 /* the port is initialized */ +#define SK_PRT_RUN 3 /* the port has an active link */ + +/* PHY power down modes */ +#define PHY_PM_OPERATIONAL_MODE 0 /* PHY operational mode */ +#define PHY_PM_DEEP_SLEEP 1 /* coma mode --> minimal power */ +#define PHY_PM_IEEE_POWER_DOWN 2 /* IEEE 22.2.4.1.5 compl. power down */ +#define PHY_PM_ENERGY_DETECT 3 /* energy detect */ +#define PHY_PM_ENERGY_DETECT_PLUS 4 /* energy detect plus */ + +/* Default receive frame limit for Workaround of XMAC Errata */ +#define SK_DEF_RX_WA_LIM SK_CONSTU64(100) + +/* values for GILedBlinkCtrl (LED Blink Control) */ +#define SK_ACT_LED_BLINK (1<<0) /* Active LED blinking */ +#define SK_DUP_LED_NORMAL (1<<1) /* Duplex LED normal */ +#define SK_LED_LINK100_ON (1<<2) /* Link 100M LED on */ + +/* Link Partner Status */ +#define SK_LIPA_UNKNOWN 0 /* Link partner is in unknown state */ +#define SK_LIPA_MANUAL 1 /* Link partner is in detected manual state */ +#define SK_LIPA_AUTO 2 /* Link partner is in auto-negotiation state */ + +/* Maximum Restarts before restart is ignored (3Com WA) */ +#define SK_MAX_LRESTART 3 /* Max. 3 times the link is restarted */ + +/* Max. Auto-neg. timeouts before link detection in sense mode is reset */ +#define SK_MAX_ANEG_TO 10 /* Max. 10 times the sense mode is reset */ + +/* structures *****************************************************************/ + +/* + * MAC specific functions + */ +typedef struct s_GeMacFunc { + int (*pFnMacUpdateStats)(SK_AC *pAC, SK_IOC IoC, unsigned int Port); + int (*pFnMacStatistic)(SK_AC *pAC, SK_IOC IoC, unsigned int Port, + SK_U16 StatAddr, SK_U32 SK_FAR *pVal); + int (*pFnMacResetCounter)(SK_AC *pAC, SK_IOC IoC, unsigned int Port); + int (*pFnMacOverflow)(SK_AC *pAC, SK_IOC IoC, unsigned int Port, + SK_U16 IStatus, SK_U64 SK_FAR *pVal); +} SK_GEMACFUNC; + +/* + * Port Structure + */ +typedef struct s_GePort { +#ifndef SK_DIAG + SK_TIMER PWaTimer; /* Workaround Timer */ + SK_TIMER HalfDupChkTimer; +#endif /* SK_DIAG */ + SK_U32 PPrevShorts; /* Previous Short Counter checking */ + SK_U32 PPrevFcs; /* Previous FCS Error Counter checking */ + SK_U64 PPrevRx; /* Previous RxOk Counter checking */ + SK_U64 PRxLim; /* Previous RxOk Counter checking */ + SK_U64 LastOctets; /* For half duplex hang check */ + int PLinkResCt; /* Link Restart Counter */ + int PAutoNegTimeOut;/* Auto-negotiation timeout current value */ + int PAutoNegTOCt; /* Auto-negotiation Timeout Counter */ + int PRxQSize; /* Port Rx Queue Size in kB */ + int PXSQSize; /* Port Synchronous Transmit Queue Size in kB */ + int PXAQSize; /* Port Asynchronous Transmit Queue Size in kB */ + SK_U32 PRxQRamStart; /* Receive Queue RAM Buffer Start Address */ + SK_U32 PRxQRamEnd; /* Receive Queue RAM Buffer End Address */ + SK_U32 PXsQRamStart; /* Sync Tx Queue RAM Buffer Start Address */ + SK_U32 PXsQRamEnd; /* Sync Tx Queue RAM Buffer End Address */ + SK_U32 PXaQRamStart; /* Async Tx Queue RAM Buffer Start Address */ + SK_U32 PXaQRamEnd; /* Async Tx Queue RAM Buffer End Address */ + SK_U32 PRxOverCnt; /* Receive Overflow Counter */ + int PRxQOff; /* Rx Queue Address Offset */ + int PXsQOff; /* Synchronous Tx Queue Address Offset */ + int PXaQOff; /* Asynchronous Tx Queue Address Offset */ + int PhyType; /* PHY used on this port */ + int PState; /* Port status (reset, stop, init, run) */ + SK_U16 PhyId1; /* PHY Id1 on this port */ + SK_U16 PhyAddr; /* MDIO/MDC PHY address */ + SK_U16 PIsave; /* Saved Interrupt status word */ + SK_U16 PSsave; /* Saved PHY status word */ + SK_U16 PGmANegAdv; /* Saved GPhy AutoNegAdvertisment register */ + SK_BOOL PHWLinkUp; /* The hardware Link is up (wiring) */ + SK_BOOL PLinkBroken; /* Is Link broken ? */ + SK_BOOL PCheckPar; /* Do we check for parity errors ? */ + SK_BOOL HalfDupTimerActive; + SK_U8 PLinkCap; /* Link Capabilities */ + SK_U8 PLinkModeConf; /* Link Mode configured */ + SK_U8 PLinkMode; /* Link Mode currently used */ + SK_U8 PLinkModeStatus;/* Link Mode Status */ + SK_U8 PLinkSpeedCap; /* Link Speed Capabilities(10/100/1000 Mbps) */ + SK_U8 PLinkSpeed; /* configured Link Speed (10/100/1000 Mbps) */ + SK_U8 PLinkSpeedUsed; /* current Link Speed (10/100/1000 Mbps) */ + SK_U8 PFlowCtrlCap; /* Flow Control Capabilities */ + SK_U8 PFlowCtrlMode; /* Flow Control Mode */ + SK_U8 PFlowCtrlStatus;/* Flow Control Status */ + SK_U8 PMSCap; /* Master/Slave Capabilities */ + SK_U8 PMSMode; /* Master/Slave Mode */ + SK_U8 PMSStatus; /* Master/Slave Status */ + SK_BOOL PAutoNegFail; /* Auto-negotiation fail flag */ + SK_U8 PLipaAutoNeg; /* Auto-negotiation possible with Link Partner */ + SK_U8 PCableLen; /* Cable Length */ + SK_U8 PMdiPairLen[4]; /* MDI[0..3] Pair Length */ + SK_U8 PMdiPairSts[4]; /* MDI[0..3] Pair Diagnostic Status */ + SK_U8 PPhyPowerState; /* PHY current power state */ + int PMacColThres; /* MAC Collision Threshold */ + int PMacJamLen; /* MAC Jam length */ + int PMacJamIpgVal; /* MAC Jam IPG */ + int PMacJamIpgData; /* MAC IPG Jam to Data */ + int PMacIpgData; /* MAC Data IPG */ + SK_BOOL PMacLimit4; /* reset collision counter and backoff algorithm */ +} SK_GEPORT; + +/* + * Gigabit Ethernet Initialization Struct + * (has to be included in the adapter context) + */ +typedef struct s_GeInit { + int GIChipId; /* Chip Identification Number */ + int GIChipRev; /* Chip Revision Number */ + SK_U8 GIPciHwRev; /* PCI HW Revision Number */ + SK_BOOL GIGenesis; /* Genesis adapter ? */ + SK_BOOL GIYukon; /* YUKON-A1/Bx chip */ + SK_BOOL GIYukonLite; /* YUKON-Lite chip */ + SK_BOOL GICopperType; /* Copper Type adapter ? */ + SK_BOOL GIPciSlot64; /* 64-bit PCI Slot */ + SK_BOOL GIPciClock66; /* 66 MHz PCI Clock */ + SK_BOOL GIVauxAvail; /* VAUX available (YUKON) */ + SK_BOOL GIYukon32Bit; /* 32-Bit YUKON adapter */ + SK_U16 GILedBlinkCtrl; /* LED Blink Control */ + int GIMacsFound; /* Number of MACs found on this adapter */ + int GIMacType; /* MAC Type used on this adapter */ + int GIHstClkFact; /* Host Clock Factor (62.5 / HstClk * 100) */ + int GIPortUsage; /* Driver Port Usage */ + int GILevel; /* Initialization Level completed */ + int GIRamSize; /* The RAM size of the adapter in kB */ + int GIWolOffs; /* WOL Register Offset (HW-Bug in Rev. A) */ + SK_U32 GIRamOffs; /* RAM Address Offset for addr calculation */ + SK_U32 GIPollTimerVal; /* Descr. Poll Timer Init Val (HstClk ticks) */ + SK_U32 GIValIrqMask; /* Value for Interrupt Mask */ + SK_U32 GITimeStampCnt; /* Time Stamp High Counter (YUKON only) */ + SK_GEPORT GP[SK_MAX_MACS];/* Port Dependent Information */ + SK_GEMACFUNC GIFunc; /* MAC depedent functions */ +} SK_GEINIT; + +/* + * Error numbers and messages for skxmac2.c and skgeinit.c + */ +#define SKERR_HWI_E001 (SK_ERRBASE_HWINIT) +#define SKERR_HWI_E001MSG "SkXmClrExactAddr() has got illegal parameters" +#define SKERR_HWI_E002 (SKERR_HWI_E001+1) +#define SKERR_HWI_E002MSG "SkGeInit(): Level 1 call missing" +#define SKERR_HWI_E003 (SKERR_HWI_E002+1) +#define SKERR_HWI_E003MSG "SkGeInit() called with illegal init Level" +#define SKERR_HWI_E004 (SKERR_HWI_E003+1) +#define SKERR_HWI_E004MSG "SkGeInitPort(): Queue Size illegal configured" +#define SKERR_HWI_E005 (SKERR_HWI_E004+1) +#define SKERR_HWI_E005MSG "SkGeInitPort(): cannot init running ports" +#define SKERR_HWI_E006 (SKERR_HWI_E005+1) +#define SKERR_HWI_E006MSG "SkGeMacInit(): PState does not match HW state" +#define SKERR_HWI_E007 (SKERR_HWI_E006+1) +#define SKERR_HWI_E007MSG "SkXmInitDupMd() called with invalid Dup Mode" +#define SKERR_HWI_E008 (SKERR_HWI_E007+1) +#define SKERR_HWI_E008MSG "SkXmSetRxCmd() called with invalid Mode" +#define SKERR_HWI_E009 (SKERR_HWI_E008+1) +#define SKERR_HWI_E009MSG "SkGeCfgSync() called although PXSQSize zero" +#define SKERR_HWI_E010 (SKERR_HWI_E009+1) +#define SKERR_HWI_E010MSG "SkGeCfgSync() called with invalid parameters" +#define SKERR_HWI_E011 (SKERR_HWI_E010+1) +#define SKERR_HWI_E011MSG "SkGeInitPort(): Receive Queue Size too small" +#define SKERR_HWI_E012 (SKERR_HWI_E011+1) +#define SKERR_HWI_E012MSG "SkGeInitPort(): invalid Queue Size specified" +#define SKERR_HWI_E013 (SKERR_HWI_E012+1) +#define SKERR_HWI_E013MSG "SkGeInitPort(): cfg changed for running queue" +#define SKERR_HWI_E014 (SKERR_HWI_E013+1) +#define SKERR_HWI_E014MSG "SkGeInitPort(): unknown GIPortUsage specified" +#define SKERR_HWI_E015 (SKERR_HWI_E014+1) +#define SKERR_HWI_E015MSG "Illegal Link mode parameter" +#define SKERR_HWI_E016 (SKERR_HWI_E015+1) +#define SKERR_HWI_E016MSG "Illegal Flow control mode parameter" +#define SKERR_HWI_E017 (SKERR_HWI_E016+1) +#define SKERR_HWI_E017MSG "Illegal value specified for GIPollTimerVal" +#define SKERR_HWI_E018 (SKERR_HWI_E017+1) +#define SKERR_HWI_E018MSG "FATAL: SkGeStopPort() does not terminate (Tx)" +#define SKERR_HWI_E019 (SKERR_HWI_E018+1) +#define SKERR_HWI_E019MSG "Illegal Speed parameter" +#define SKERR_HWI_E020 (SKERR_HWI_E019+1) +#define SKERR_HWI_E020MSG "Illegal Master/Slave parameter" +#define SKERR_HWI_E021 (SKERR_HWI_E020+1) +#define SKERR_HWI_E021MSG "MacUpdateStats(): cannot update statistic counter" +#define SKERR_HWI_E022 (SKERR_HWI_E021+1) +#define SKERR_HWI_E022MSG "MacStatistic(): illegal statistic base address" +#define SKERR_HWI_E023 (SKERR_HWI_E022+1) +#define SKERR_HWI_E023MSG "SkGeInitPort(): Transmit Queue Size too small" +#define SKERR_HWI_E024 (SKERR_HWI_E023+1) +#define SKERR_HWI_E024MSG "FATAL: SkGeStopPort() does not terminate (Rx)" +#define SKERR_HWI_E025 (SKERR_HWI_E024+1) +#define SKERR_HWI_E025MSG "" + +/* function prototypes ********************************************************/ + +#ifndef SK_KR_PROTO + +/* + * public functions in skgeinit.c + */ +extern void SkGePollTxD( + SK_AC *pAC, + SK_IOC IoC, + int Port, + SK_BOOL PollTxD); + +extern void SkGeYellowLED( + SK_AC *pAC, + SK_IOC IoC, + int State); + +extern int SkGeCfgSync( + SK_AC *pAC, + SK_IOC IoC, + int Port, + SK_U32 IntTime, + SK_U32 LimCount, + int SyncMode); + +extern void SkGeLoadLnkSyncCnt( + SK_AC *pAC, + SK_IOC IoC, + int Port, + SK_U32 CntVal); + +extern void SkGeStopPort( + SK_AC *pAC, + SK_IOC IoC, + int Port, + int Dir, + int RstMode); + +extern int SkGeInit( + SK_AC *pAC, + SK_IOC IoC, + int Level); + +extern void SkGeDeInit( + SK_AC *pAC, + SK_IOC IoC); + +extern int SkGeInitPort( + SK_AC *pAC, + SK_IOC IoC, + int Port); + +extern void SkGeXmitLED( + SK_AC *pAC, + SK_IOC IoC, + int Led, + int Mode); + +extern int SkGeInitAssignRamToQueues( + SK_AC *pAC, + int ActivePort, + SK_BOOL DualNet); + +/* + * public functions in skxmac2.c + */ +extern void SkMacRxTxDisable( + SK_AC *pAC, + SK_IOC IoC, + int Port); + +extern void SkMacSoftRst( + SK_AC *pAC, + SK_IOC IoC, + int Port); + +extern void SkMacHardRst( + SK_AC *pAC, + SK_IOC IoC, + int Port); + +extern void SkXmInitMac( + SK_AC *pAC, + SK_IOC IoC, + int Port); + +extern void SkGmInitMac( + SK_AC *pAC, + SK_IOC IoC, + int Port); + +extern void SkMacInitPhy( + SK_AC *pAC, + SK_IOC IoC, + int Port, + SK_BOOL DoLoop); + +extern void SkMacIrqDisable( + SK_AC *pAC, + SK_IOC IoC, + int Port); + +extern void SkMacFlushTxFifo( + SK_AC *pAC, + SK_IOC IoC, + int Port); + +extern void SkMacIrq( + SK_AC *pAC, + SK_IOC IoC, + int Port); + +extern int SkMacAutoNegDone( + SK_AC *pAC, + SK_IOC IoC, + int Port); + +extern void SkMacAutoNegLipaPhy( + SK_AC *pAC, + SK_IOC IoC, + int Port, + SK_U16 IStatus); + +extern int SkMacRxTxEnable( + SK_AC *pAC, + SK_IOC IoC, + int Port); + +extern void SkMacPromiscMode( + SK_AC *pAC, + SK_IOC IoC, + int Port, + SK_BOOL Enable); + +extern void SkMacHashing( + SK_AC *pAC, + SK_IOC IoC, + int Port, + SK_BOOL Enable); + +extern void SkXmPhyRead( + SK_AC *pAC, + SK_IOC IoC, + int Port, + int Addr, + SK_U16 SK_FAR *pVal); + +extern void SkXmPhyWrite( + SK_AC *pAC, + SK_IOC IoC, + int Port, + int Addr, + SK_U16 Val); + +extern void SkGmPhyRead( + SK_AC *pAC, + SK_IOC IoC, + int Port, + int Addr, + SK_U16 SK_FAR *pVal); + +extern void SkGmPhyWrite( + SK_AC *pAC, + SK_IOC IoC, + int Port, + int Addr, + SK_U16 Val); + +extern void SkXmClrExactAddr( + SK_AC *pAC, + SK_IOC IoC, + int Port, + int StartNum, + int StopNum); + +extern void SkXmAutoNegLipaXmac( + SK_AC *pAC, + SK_IOC IoC, + int Port, + SK_U16 IStatus); + +extern int SkXmUpdateStats( + SK_AC *pAC, + SK_IOC IoC, + unsigned int Port); + +extern int SkGmUpdateStats( + SK_AC *pAC, + SK_IOC IoC, + unsigned int Port); + +extern int SkXmMacStatistic( + SK_AC *pAC, + SK_IOC IoC, + unsigned int Port, + SK_U16 StatAddr, + SK_U32 SK_FAR *pVal); + +extern int SkGmMacStatistic( + SK_AC *pAC, + SK_IOC IoC, + unsigned int Port, + SK_U16 StatAddr, + SK_U32 SK_FAR *pVal); + +extern int SkXmResetCounter( + SK_AC *pAC, + SK_IOC IoC, + unsigned int Port); + +extern int SkGmResetCounter( + SK_AC *pAC, + SK_IOC IoC, + unsigned int Port); + +extern int SkXmOverflowStatus( + SK_AC *pAC, + SK_IOC IoC, + unsigned int Port, + SK_U16 IStatus, + SK_U64 SK_FAR *pStatus); + +extern int SkGmOverflowStatus( + SK_AC *pAC, + SK_IOC IoC, + unsigned int Port, + SK_U16 MacStatus, + SK_U64 SK_FAR *pStatus); + +extern int SkGmCableDiagStatus( + SK_AC *pAC, + SK_IOC IoC, + int Port, + SK_BOOL StartTest); + +#ifdef SK_DIAG +extern void SkGePhyRead( + SK_AC *pAC, + SK_IOC IoC, + int Port, + int Addr, + SK_U16 *pVal); + +extern void SkGePhyWrite( + SK_AC *pAC, + SK_IOC IoC, + int Port, + int Addr, + SK_U16 Val); + +extern void SkMacSetRxCmd( + SK_AC *pAC, + SK_IOC IoC, + int Port, + int Mode); +extern void SkMacCrcGener( + SK_AC *pAC, + SK_IOC IoC, + int Port, + SK_BOOL Enable); +extern void SkMacTimeStamp( + SK_AC *pAC, + SK_IOC IoC, + int Port, + SK_BOOL Enable); +extern void SkXmSendCont( + SK_AC *pAC, + SK_IOC IoC, + int Port, + SK_BOOL Enable); +#endif /* SK_DIAG */ + +#else /* SK_KR_PROTO */ + +/* + * public functions in skgeinit.c + */ +extern void SkGePollTxD(); +extern void SkGeYellowLED(); +extern int SkGeCfgSync(); +extern void SkGeLoadLnkSyncCnt(); +extern void SkGeStopPort(); +extern int SkGeInit(); +extern void SkGeDeInit(); +extern int SkGeInitPort(); +extern void SkGeXmitLED(); +extern int SkGeInitAssignRamToQueues(); + +/* + * public functions in skxmac2.c + */ +extern void SkMacRxTxDisable(); +extern void SkMacSoftRst(); +extern void SkMacHardRst(); +extern void SkMacInitPhy(); +extern int SkMacRxTxEnable(); +extern void SkMacPromiscMode(); +extern void SkMacHashing(); +extern void SkMacIrqDisable(); +extern void SkMacFlushTxFifo(); +extern void SkMacIrq(); +extern int SkMacAutoNegDone(); +extern void SkMacAutoNegLipaPhy(); +extern void SkXmInitMac(); +extern void SkXmPhyRead(); +extern void SkXmPhyWrite(); +extern void SkGmInitMac(); +extern void SkGmPhyRead(); +extern void SkGmPhyWrite(); +extern void SkXmClrExactAddr(); +extern void SkXmAutoNegLipaXmac(); +extern int SkXmUpdateStats(); +extern int SkGmUpdateStats(); +extern int SkXmMacStatistic(); +extern int SkGmMacStatistic(); +extern int SkXmResetCounter(); +extern int SkGmResetCounter(); +extern int SkXmOverflowStatus(); +extern int SkGmOverflowStatus(); +extern int SkGmCableDiagStatus(); + +#ifdef SK_DIAG +extern void SkGePhyRead(); +extern void SkGePhyWrite(); +extern void SkMacSetRxCmd(); +extern void SkMacCrcGener(); +extern void SkMacTimeStamp(); +extern void SkXmSendCont(); +#endif /* SK_DIAG */ + +#endif /* SK_KR_PROTO */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* __INC_SKGEINIT_H_ */ diff --git a/drivers/net/sk98lin/h/skgepnm2.h b/drivers/net/sk98lin/h/skgepnm2.h new file mode 100644 index 000000000000..ddd304f1a48b --- /dev/null +++ b/drivers/net/sk98lin/h/skgepnm2.h @@ -0,0 +1,334 @@ +/***************************************************************************** + * + * Name: skgepnm2.h + * Project: GEnesis, PCI Gigabit Ethernet Adapter + * Version: $Revision: 1.36 $ + * Date: $Date: 2003/05/23 12:45:13 $ + * Purpose: Defines for Private Network Management Interface + * + ****************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +#ifndef _SKGEPNM2_H_ +#define _SKGEPNM2_H_ + +/* + * General definitions + */ +#define SK_PNMI_CHIPSET_XMAC 1 /* XMAC11800FP */ +#define SK_PNMI_CHIPSET_YUKON 2 /* YUKON */ + +#define SK_PNMI_BUS_PCI 1 /* PCI bus*/ + +/* + * Actions + */ +#define SK_PNMI_ACT_IDLE 1 +#define SK_PNMI_ACT_RESET 2 +#define SK_PNMI_ACT_SELFTEST 3 +#define SK_PNMI_ACT_RESETCNT 4 + +/* + * VPD releated defines + */ + +#define SK_PNMI_VPD_RW 1 +#define SK_PNMI_VPD_RO 2 + +#define SK_PNMI_VPD_OK 0 +#define SK_PNMI_VPD_NOTFOUND 1 +#define SK_PNMI_VPD_CUT 2 +#define SK_PNMI_VPD_TIMEOUT 3 +#define SK_PNMI_VPD_FULL 4 +#define SK_PNMI_VPD_NOWRITE 5 +#define SK_PNMI_VPD_FATAL 6 + +#define SK_PNMI_VPD_IGNORE 0 +#define SK_PNMI_VPD_CREATE 1 +#define SK_PNMI_VPD_DELETE 2 + + +/* + * RLMT related defines + */ +#define SK_PNMI_DEF_RLMT_CHG_THRES 240 /* 4 changes per minute */ + + +/* + * VCT internal status values + */ +#define SK_PNMI_VCT_PENDING 32 +#define SK_PNMI_VCT_TEST_DONE 64 +#define SK_PNMI_VCT_LINK 128 + +/* + * Internal table definitions + */ +#define SK_PNMI_GET 0 +#define SK_PNMI_PRESET 1 +#define SK_PNMI_SET 2 + +#define SK_PNMI_RO 0 +#define SK_PNMI_RW 1 +#define SK_PNMI_WO 2 + +typedef struct s_OidTabEntry { + SK_U32 Id; + SK_U32 InstanceNo; + unsigned int StructSize; + unsigned int Offset; + int Access; + int (* Func)(SK_AC *pAc, SK_IOC pIo, int action, + SK_U32 Id, char* pBuf, unsigned int* pLen, + SK_U32 Instance, unsigned int TableIndex, + SK_U32 NetNumber); + SK_U16 Param; +} SK_PNMI_TAB_ENTRY; + + +/* + * Trap lengths + */ +#define SK_PNMI_TRAP_SIMPLE_LEN 17 +#define SK_PNMI_TRAP_SENSOR_LEN_BASE 46 +#define SK_PNMI_TRAP_RLMT_CHANGE_LEN 23 +#define SK_PNMI_TRAP_RLMT_PORT_LEN 23 + +/* + * Number of MAC types supported + */ +#define SK_PNMI_MAC_TYPES (SK_MAC_GMAC + 1) + +/* + * MAC statistic data list (overall set for MAC types used) + */ +enum SK_MACSTATS { + SK_PNMI_HTX = 0, + SK_PNMI_HTX_OCTET, + SK_PNMI_HTX_OCTETHIGH = SK_PNMI_HTX_OCTET, + SK_PNMI_HTX_OCTETLOW, + SK_PNMI_HTX_BROADCAST, + SK_PNMI_HTX_MULTICAST, + SK_PNMI_HTX_UNICAST, + SK_PNMI_HTX_BURST, + SK_PNMI_HTX_PMACC, + SK_PNMI_HTX_MACC, + SK_PNMI_HTX_COL, + SK_PNMI_HTX_SINGLE_COL, + SK_PNMI_HTX_MULTI_COL, + SK_PNMI_HTX_EXCESS_COL, + SK_PNMI_HTX_LATE_COL, + SK_PNMI_HTX_DEFFERAL, + SK_PNMI_HTX_EXCESS_DEF, + SK_PNMI_HTX_UNDERRUN, + SK_PNMI_HTX_CARRIER, + SK_PNMI_HTX_UTILUNDER, + SK_PNMI_HTX_UTILOVER, + SK_PNMI_HTX_64, + SK_PNMI_HTX_127, + SK_PNMI_HTX_255, + SK_PNMI_HTX_511, + SK_PNMI_HTX_1023, + SK_PNMI_HTX_MAX, + SK_PNMI_HTX_LONGFRAMES, + SK_PNMI_HTX_SYNC, + SK_PNMI_HTX_SYNC_OCTET, + SK_PNMI_HTX_RESERVED, + + SK_PNMI_HRX, + SK_PNMI_HRX_OCTET, + SK_PNMI_HRX_OCTETHIGH = SK_PNMI_HRX_OCTET, + SK_PNMI_HRX_OCTETLOW, + SK_PNMI_HRX_BADOCTET, + SK_PNMI_HRX_BADOCTETHIGH = SK_PNMI_HRX_BADOCTET, + SK_PNMI_HRX_BADOCTETLOW, + SK_PNMI_HRX_BROADCAST, + SK_PNMI_HRX_MULTICAST, + SK_PNMI_HRX_UNICAST, + SK_PNMI_HRX_PMACC, + SK_PNMI_HRX_MACC, + SK_PNMI_HRX_PMACC_ERR, + SK_PNMI_HRX_MACC_UNKWN, + SK_PNMI_HRX_BURST, + SK_PNMI_HRX_MISSED, + SK_PNMI_HRX_FRAMING, + SK_PNMI_HRX_UNDERSIZE, + SK_PNMI_HRX_OVERFLOW, + SK_PNMI_HRX_JABBER, + SK_PNMI_HRX_CARRIER, + SK_PNMI_HRX_IRLENGTH, + SK_PNMI_HRX_SYMBOL, + SK_PNMI_HRX_SHORTS, + SK_PNMI_HRX_RUNT, + SK_PNMI_HRX_TOO_LONG, + SK_PNMI_HRX_FCS, + SK_PNMI_HRX_CEXT, + SK_PNMI_HRX_UTILUNDER, + SK_PNMI_HRX_UTILOVER, + SK_PNMI_HRX_64, + SK_PNMI_HRX_127, + SK_PNMI_HRX_255, + SK_PNMI_HRX_511, + SK_PNMI_HRX_1023, + SK_PNMI_HRX_MAX, + SK_PNMI_HRX_LONGFRAMES, + + SK_PNMI_HRX_RESERVED, + + SK_PNMI_MAX_IDX /* NOTE: Ensure SK_PNMI_CNT_NO is set to this value */ +}; + +/* + * MAC specific data + */ +typedef struct s_PnmiStatAddr { + SK_U16 Reg; /* MAC register containing the value */ + SK_BOOL GetOffset; /* TRUE: Offset managed by PNMI (call GetStatVal())*/ +} SK_PNMI_STATADDR; + + +/* + * SK_PNMI_STRUCT_DATA copy offset evaluation macros + */ +#define SK_PNMI_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_STRUCT_DATA *)0)->e)) +#define SK_PNMI_MAI_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_STRUCT_DATA *)0)->e)) +#define SK_PNMI_VPD_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_VPD *)0)->e)) +#define SK_PNMI_SEN_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_SENSOR *)0)->e)) +#define SK_PNMI_CHK_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_CHECKSUM *)0)->e)) +#define SK_PNMI_STA_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_STAT *)0)->e)) +#define SK_PNMI_CNF_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_CONF *)0)->e)) +#define SK_PNMI_RLM_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_RLMT *)0)->e)) +#define SK_PNMI_MON_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_RLMT_MONITOR *)0)->e)) +#define SK_PNMI_TRP_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_TRAP *)0)->e)) + +#define SK_PNMI_SET_STAT(b,s,o) {SK_U32 Val32; char *pVal; \ + Val32 = (s); \ + pVal = (char *)(b) + ((SK_U32)(SK_UPTR) \ + &(((SK_PNMI_STRUCT_DATA *)0)-> \ + ReturnStatus.ErrorStatus)); \ + SK_PNMI_STORE_U32(pVal, Val32); \ + Val32 = (o); \ + pVal = (char *)(b) + ((SK_U32)(SK_UPTR) \ + &(((SK_PNMI_STRUCT_DATA *)0)-> \ + ReturnStatus.ErrorOffset)); \ + SK_PNMI_STORE_U32(pVal, Val32);} + +/* + * Time macros + */ +#ifndef SK_PNMI_HUNDREDS_SEC +#if SK_TICKS_PER_SEC == 100 +#define SK_PNMI_HUNDREDS_SEC(t) (t) +#else +#define SK_PNMI_HUNDREDS_SEC(t) (((t) * 100) / (SK_TICKS_PER_SEC)) +#endif /* !SK_TICKS_PER_SEC */ +#endif /* !SK_PNMI_HUNDREDS_SEC */ + +/* + * Macros to work around alignment problems + */ +#ifndef SK_PNMI_STORE_U16 +#define SK_PNMI_STORE_U16(p,v) {*(char *)(p) = *((char *)&(v)); \ + *((char *)(p) + 1) = \ + *(((char *)&(v)) + 1);} +#endif + +#ifndef SK_PNMI_STORE_U32 +#define SK_PNMI_STORE_U32(p,v) {*(char *)(p) = *((char *)&(v)); \ + *((char *)(p) + 1) = \ + *(((char *)&(v)) + 1); \ + *((char *)(p) + 2) = \ + *(((char *)&(v)) + 2); \ + *((char *)(p) + 3) = \ + *(((char *)&(v)) + 3);} +#endif + +#ifndef SK_PNMI_STORE_U64 +#define SK_PNMI_STORE_U64(p,v) {*(char *)(p) = *((char *)&(v)); \ + *((char *)(p) + 1) = \ + *(((char *)&(v)) + 1); \ + *((char *)(p) + 2) = \ + *(((char *)&(v)) + 2); \ + *((char *)(p) + 3) = \ + *(((char *)&(v)) + 3); \ + *((char *)(p) + 4) = \ + *(((char *)&(v)) + 4); \ + *((char *)(p) + 5) = \ + *(((char *)&(v)) + 5); \ + *((char *)(p) + 6) = \ + *(((char *)&(v)) + 6); \ + *((char *)(p) + 7) = \ + *(((char *)&(v)) + 7);} +#endif + +#ifndef SK_PNMI_READ_U16 +#define SK_PNMI_READ_U16(p,v) {*((char *)&(v)) = *(char *)(p); \ + *(((char *)&(v)) + 1) = \ + *((char *)(p) + 1);} +#endif + +#ifndef SK_PNMI_READ_U32 +#define SK_PNMI_READ_U32(p,v) {*((char *)&(v)) = *(char *)(p); \ + *(((char *)&(v)) + 1) = \ + *((char *)(p) + 1); \ + *(((char *)&(v)) + 2) = \ + *((char *)(p) + 2); \ + *(((char *)&(v)) + 3) = \ + *((char *)(p) + 3);} +#endif + +#ifndef SK_PNMI_READ_U64 +#define SK_PNMI_READ_U64(p,v) {*((char *)&(v)) = *(char *)(p); \ + *(((char *)&(v)) + 1) = \ + *((char *)(p) + 1); \ + *(((char *)&(v)) + 2) = \ + *((char *)(p) + 2); \ + *(((char *)&(v)) + 3) = \ + *((char *)(p) + 3); \ + *(((char *)&(v)) + 4) = \ + *((char *)(p) + 4); \ + *(((char *)&(v)) + 5) = \ + *((char *)(p) + 5); \ + *(((char *)&(v)) + 6) = \ + *((char *)(p) + 6); \ + *(((char *)&(v)) + 7) = \ + *((char *)(p) + 7);} +#endif + +/* + * Macros for Debug + */ +#ifdef DEBUG + +#define SK_PNMI_CHECKFLAGS(vSt) {if (pAC->Pnmi.MacUpdatedFlag > 0 || \ + pAC->Pnmi.RlmtUpdatedFlag > 0 || \ + pAC->Pnmi.SirqUpdatedFlag > 0) { \ + SK_DBG_MSG(pAC, \ + SK_DBGMOD_PNMI, \ + SK_DBGCAT_CTRL, \ + ("PNMI: ERR: %s MacUFlag=%d, RlmtUFlag=%d, SirqUFlag=%d\n", \ + vSt, \ + pAC->Pnmi.MacUpdatedFlag, \ + pAC->Pnmi.RlmtUpdatedFlag, \ + pAC->Pnmi.SirqUpdatedFlag))}} + +#else /* !DEBUG */ + +#define SK_PNMI_CHECKFLAGS(vSt) /* Nothing */ + +#endif /* !DEBUG */ + +#endif /* _SKGEPNM2_H_ */ diff --git a/drivers/net/sk98lin/h/skgepnmi.h b/drivers/net/sk98lin/h/skgepnmi.h new file mode 100644 index 000000000000..1ed214ccb253 --- /dev/null +++ b/drivers/net/sk98lin/h/skgepnmi.h @@ -0,0 +1,962 @@ +/***************************************************************************** + * + * Name: skgepnmi.h + * Project: GEnesis, PCI Gigabit Ethernet Adapter + * Version: $Revision: 1.62 $ + * Date: $Date: 2003/08/15 12:31:52 $ + * Purpose: Defines for Private Network Management Interface + * + ****************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +#ifndef _SKGEPNMI_H_ +#define _SKGEPNMI_H_ + +/* + * Include dependencies + */ +#include "h/sktypes.h" +#include "h/skerror.h" +#include "h/sktimer.h" +#include "h/ski2c.h" +#include "h/skaddr.h" +#include "h/skrlmt.h" +#include "h/skvpd.h" + +/* + * Management Database Version + */ +#define SK_PNMI_MDB_VERSION 0x00030001 /* 3.1 */ + + +/* + * Event definitions + */ +#define SK_PNMI_EVT_SIRQ_OVERFLOW 1 /* Counter overflow */ +#define SK_PNMI_EVT_SEN_WAR_LOW 2 /* Lower war thres exceeded */ +#define SK_PNMI_EVT_SEN_WAR_UPP 3 /* Upper war thres exceeded */ +#define SK_PNMI_EVT_SEN_ERR_LOW 4 /* Lower err thres exceeded */ +#define SK_PNMI_EVT_SEN_ERR_UPP 5 /* Upper err thres exceeded */ +#define SK_PNMI_EVT_CHG_EST_TIMER 6 /* Timer event for RLMT Chg */ +#define SK_PNMI_EVT_UTILIZATION_TIMER 7 /* Timer event for Utiliza. */ +#define SK_PNMI_EVT_CLEAR_COUNTER 8 /* Clear statistic counters */ +#define SK_PNMI_EVT_XMAC_RESET 9 /* XMAC will be reset */ + +#define SK_PNMI_EVT_RLMT_PORT_UP 10 /* Port came logically up */ +#define SK_PNMI_EVT_RLMT_PORT_DOWN 11 /* Port went logically down */ +#define SK_PNMI_EVT_RLMT_SEGMENTATION 13 /* Two SP root bridges found */ +#define SK_PNMI_EVT_RLMT_ACTIVE_DOWN 14 /* Port went logically down */ +#define SK_PNMI_EVT_RLMT_ACTIVE_UP 15 /* Port came logically up */ +#define SK_PNMI_EVT_RLMT_SET_NETS 16 /* 1. Parameter is number of nets + 1 = single net; 2 = dual net */ +#define SK_PNMI_EVT_VCT_RESET 17 /* VCT port reset timer event started with SET. */ + + +/* + * Return values + */ +#define SK_PNMI_ERR_OK 0 +#define SK_PNMI_ERR_GENERAL 1 +#define SK_PNMI_ERR_TOO_SHORT 2 +#define SK_PNMI_ERR_BAD_VALUE 3 +#define SK_PNMI_ERR_READ_ONLY 4 +#define SK_PNMI_ERR_UNKNOWN_OID 5 +#define SK_PNMI_ERR_UNKNOWN_INST 6 +#define SK_PNMI_ERR_UNKNOWN_NET 7 +#define SK_PNMI_ERR_NOT_SUPPORTED 10 + + +/* + * Return values of driver reset function SK_DRIVER_RESET() and + * driver event function SK_DRIVER_EVENT() + */ +#define SK_PNMI_ERR_OK 0 +#define SK_PNMI_ERR_FAIL 1 + + +/* + * Return values of driver test function SK_DRIVER_SELFTEST() + */ +#define SK_PNMI_TST_UNKNOWN (1 << 0) +#define SK_PNMI_TST_TRANCEIVER (1 << 1) +#define SK_PNMI_TST_ASIC (1 << 2) +#define SK_PNMI_TST_SENSOR (1 << 3) +#define SK_PNMI_TST_POWERMGMT (1 << 4) +#define SK_PNMI_TST_PCI (1 << 5) +#define SK_PNMI_TST_MAC (1 << 6) + + +/* + * RLMT specific definitions + */ +#define SK_PNMI_RLMT_STATUS_STANDBY 1 +#define SK_PNMI_RLMT_STATUS_ACTIVE 2 +#define SK_PNMI_RLMT_STATUS_ERROR 3 + +#define SK_PNMI_RLMT_LSTAT_PHY_DOWN 1 +#define SK_PNMI_RLMT_LSTAT_AUTONEG 2 +#define SK_PNMI_RLMT_LSTAT_LOG_DOWN 3 +#define SK_PNMI_RLMT_LSTAT_LOG_UP 4 +#define SK_PNMI_RLMT_LSTAT_INDETERMINATED 5 + +#define SK_PNMI_RLMT_MODE_CHK_LINK (SK_RLMT_CHECK_LINK) +#define SK_PNMI_RLMT_MODE_CHK_RX (SK_RLMT_CHECK_LOC_LINK) +#define SK_PNMI_RLMT_MODE_CHK_SPT (SK_RLMT_CHECK_SEG) +/* #define SK_PNMI_RLMT_MODE_CHK_EX */ + +/* + * OID definition + */ +#ifndef _NDIS_ /* Check, whether NDIS already included OIDs */ + +#define OID_GEN_XMIT_OK 0x00020101 +#define OID_GEN_RCV_OK 0x00020102 +#define OID_GEN_XMIT_ERROR 0x00020103 +#define OID_GEN_RCV_ERROR 0x00020104 +#define OID_GEN_RCV_NO_BUFFER 0x00020105 + +/* #define OID_GEN_DIRECTED_BYTES_XMIT 0x00020201 */ +#define OID_GEN_DIRECTED_FRAMES_XMIT 0x00020202 +/* #define OID_GEN_MULTICAST_BYTES_XMIT 0x00020203 */ +#define OID_GEN_MULTICAST_FRAMES_XMIT 0x00020204 +/* #define OID_GEN_BROADCAST_BYTES_XMIT 0x00020205 */ +#define OID_GEN_BROADCAST_FRAMES_XMIT 0x00020206 +/* #define OID_GEN_DIRECTED_BYTES_RCV 0x00020207 */ +#define OID_GEN_DIRECTED_FRAMES_RCV 0x00020208 +/* #define OID_GEN_MULTICAST_BYTES_RCV 0x00020209 */ +#define OID_GEN_MULTICAST_FRAMES_RCV 0x0002020A +/* #define OID_GEN_BROADCAST_BYTES_RCV 0x0002020B */ +#define OID_GEN_BROADCAST_FRAMES_RCV 0x0002020C +#define OID_GEN_RCV_CRC_ERROR 0x0002020D +#define OID_GEN_TRANSMIT_QUEUE_LENGTH 0x0002020E + +#define OID_802_3_PERMANENT_ADDRESS 0x01010101 +#define OID_802_3_CURRENT_ADDRESS 0x01010102 +/* #define OID_802_3_MULTICAST_LIST 0x01010103 */ +/* #define OID_802_3_MAXIMUM_LIST_SIZE 0x01010104 */ +/* #define OID_802_3_MAC_OPTIONS 0x01010105 */ + +#define OID_802_3_RCV_ERROR_ALIGNMENT 0x01020101 +#define OID_802_3_XMIT_ONE_COLLISION 0x01020102 +#define OID_802_3_XMIT_MORE_COLLISIONS 0x01020103 +#define OID_802_3_XMIT_DEFERRED 0x01020201 +#define OID_802_3_XMIT_MAX_COLLISIONS 0x01020202 +#define OID_802_3_RCV_OVERRUN 0x01020203 +#define OID_802_3_XMIT_UNDERRUN 0x01020204 +#define OID_802_3_XMIT_TIMES_CRS_LOST 0x01020206 +#define OID_802_3_XMIT_LATE_COLLISIONS 0x01020207 + +/* + * PnP and PM OIDs + */ +#ifdef SK_POWER_MGMT +#define OID_PNP_CAPABILITIES 0xFD010100 +#define OID_PNP_SET_POWER 0xFD010101 +#define OID_PNP_QUERY_POWER 0xFD010102 +#define OID_PNP_ADD_WAKE_UP_PATTERN 0xFD010103 +#define OID_PNP_REMOVE_WAKE_UP_PATTERN 0xFD010104 +#define OID_PNP_ENABLE_WAKE_UP 0xFD010106 +#endif /* SK_POWER_MGMT */ + +#endif /* _NDIS_ */ + +#define OID_SKGE_MDB_VERSION 0xFF010100 +#define OID_SKGE_SUPPORTED_LIST 0xFF010101 +#define OID_SKGE_VPD_FREE_BYTES 0xFF010102 +#define OID_SKGE_VPD_ENTRIES_LIST 0xFF010103 +#define OID_SKGE_VPD_ENTRIES_NUMBER 0xFF010104 +#define OID_SKGE_VPD_KEY 0xFF010105 +#define OID_SKGE_VPD_VALUE 0xFF010106 +#define OID_SKGE_VPD_ACCESS 0xFF010107 +#define OID_SKGE_VPD_ACTION 0xFF010108 + +#define OID_SKGE_PORT_NUMBER 0xFF010110 +#define OID_SKGE_DEVICE_TYPE 0xFF010111 +#define OID_SKGE_DRIVER_DESCR 0xFF010112 +#define OID_SKGE_DRIVER_VERSION 0xFF010113 +#define OID_SKGE_HW_DESCR 0xFF010114 +#define OID_SKGE_HW_VERSION 0xFF010115 +#define OID_SKGE_CHIPSET 0xFF010116 +#define OID_SKGE_ACTION 0xFF010117 +#define OID_SKGE_RESULT 0xFF010118 +#define OID_SKGE_BUS_TYPE 0xFF010119 +#define OID_SKGE_BUS_SPEED 0xFF01011A +#define OID_SKGE_BUS_WIDTH 0xFF01011B +/* 0xFF01011C unused */ +#define OID_SKGE_DIAG_ACTION 0xFF01011D +#define OID_SKGE_DIAG_RESULT 0xFF01011E +#define OID_SKGE_MTU 0xFF01011F +#define OID_SKGE_PHYS_CUR_ADDR 0xFF010120 +#define OID_SKGE_PHYS_FAC_ADDR 0xFF010121 +#define OID_SKGE_PMD 0xFF010122 +#define OID_SKGE_CONNECTOR 0xFF010123 +#define OID_SKGE_LINK_CAP 0xFF010124 +#define OID_SKGE_LINK_MODE 0xFF010125 +#define OID_SKGE_LINK_MODE_STATUS 0xFF010126 +#define OID_SKGE_LINK_STATUS 0xFF010127 +#define OID_SKGE_FLOWCTRL_CAP 0xFF010128 +#define OID_SKGE_FLOWCTRL_MODE 0xFF010129 +#define OID_SKGE_FLOWCTRL_STATUS 0xFF01012A +#define OID_SKGE_PHY_OPERATION_CAP 0xFF01012B +#define OID_SKGE_PHY_OPERATION_MODE 0xFF01012C +#define OID_SKGE_PHY_OPERATION_STATUS 0xFF01012D +#define OID_SKGE_MULTICAST_LIST 0xFF01012E +#define OID_SKGE_CURRENT_PACKET_FILTER 0xFF01012F + +#define OID_SKGE_TRAP 0xFF010130 +#define OID_SKGE_TRAP_NUMBER 0xFF010131 + +#define OID_SKGE_RLMT_MODE 0xFF010140 +#define OID_SKGE_RLMT_PORT_NUMBER 0xFF010141 +#define OID_SKGE_RLMT_PORT_ACTIVE 0xFF010142 +#define OID_SKGE_RLMT_PORT_PREFERRED 0xFF010143 +#define OID_SKGE_INTERMEDIATE_SUPPORT 0xFF010160 + +#define OID_SKGE_SPEED_CAP 0xFF010170 +#define OID_SKGE_SPEED_MODE 0xFF010171 +#define OID_SKGE_SPEED_STATUS 0xFF010172 + +#define OID_SKGE_BOARDLEVEL 0xFF010180 + +#define OID_SKGE_SENSOR_NUMBER 0xFF020100 +#define OID_SKGE_SENSOR_INDEX 0xFF020101 +#define OID_SKGE_SENSOR_DESCR 0xFF020102 +#define OID_SKGE_SENSOR_TYPE 0xFF020103 +#define OID_SKGE_SENSOR_VALUE 0xFF020104 +#define OID_SKGE_SENSOR_WAR_THRES_LOW 0xFF020105 +#define OID_SKGE_SENSOR_WAR_THRES_UPP 0xFF020106 +#define OID_SKGE_SENSOR_ERR_THRES_LOW 0xFF020107 +#define OID_SKGE_SENSOR_ERR_THRES_UPP 0xFF020108 +#define OID_SKGE_SENSOR_STATUS 0xFF020109 +#define OID_SKGE_SENSOR_WAR_CTS 0xFF02010A +#define OID_SKGE_SENSOR_ERR_CTS 0xFF02010B +#define OID_SKGE_SENSOR_WAR_TIME 0xFF02010C +#define OID_SKGE_SENSOR_ERR_TIME 0xFF02010D + +#define OID_SKGE_CHKSM_NUMBER 0xFF020110 +#define OID_SKGE_CHKSM_RX_OK_CTS 0xFF020111 +#define OID_SKGE_CHKSM_RX_UNABLE_CTS 0xFF020112 +#define OID_SKGE_CHKSM_RX_ERR_CTS 0xFF020113 +#define OID_SKGE_CHKSM_TX_OK_CTS 0xFF020114 +#define OID_SKGE_CHKSM_TX_UNABLE_CTS 0xFF020115 + +#define OID_SKGE_STAT_TX 0xFF020120 +#define OID_SKGE_STAT_TX_OCTETS 0xFF020121 +#define OID_SKGE_STAT_TX_BROADCAST 0xFF020122 +#define OID_SKGE_STAT_TX_MULTICAST 0xFF020123 +#define OID_SKGE_STAT_TX_UNICAST 0xFF020124 +#define OID_SKGE_STAT_TX_LONGFRAMES 0xFF020125 +#define OID_SKGE_STAT_TX_BURST 0xFF020126 +#define OID_SKGE_STAT_TX_PFLOWC 0xFF020127 +#define OID_SKGE_STAT_TX_FLOWC 0xFF020128 +#define OID_SKGE_STAT_TX_SINGLE_COL 0xFF020129 +#define OID_SKGE_STAT_TX_MULTI_COL 0xFF02012A +#define OID_SKGE_STAT_TX_EXCESS_COL 0xFF02012B +#define OID_SKGE_STAT_TX_LATE_COL 0xFF02012C +#define OID_SKGE_STAT_TX_DEFFERAL 0xFF02012D +#define OID_SKGE_STAT_TX_EXCESS_DEF 0xFF02012E +#define OID_SKGE_STAT_TX_UNDERRUN 0xFF02012F +#define OID_SKGE_STAT_TX_CARRIER 0xFF020130 +/* #define OID_SKGE_STAT_TX_UTIL 0xFF020131 */ +#define OID_SKGE_STAT_TX_64 0xFF020132 +#define OID_SKGE_STAT_TX_127 0xFF020133 +#define OID_SKGE_STAT_TX_255 0xFF020134 +#define OID_SKGE_STAT_TX_511 0xFF020135 +#define OID_SKGE_STAT_TX_1023 0xFF020136 +#define OID_SKGE_STAT_TX_MAX 0xFF020137 +#define OID_SKGE_STAT_TX_SYNC 0xFF020138 +#define OID_SKGE_STAT_TX_SYNC_OCTETS 0xFF020139 +#define OID_SKGE_STAT_RX 0xFF02013A +#define OID_SKGE_STAT_RX_OCTETS 0xFF02013B +#define OID_SKGE_STAT_RX_BROADCAST 0xFF02013C +#define OID_SKGE_STAT_RX_MULTICAST 0xFF02013D +#define OID_SKGE_STAT_RX_UNICAST 0xFF02013E +#define OID_SKGE_STAT_RX_PFLOWC 0xFF02013F +#define OID_SKGE_STAT_RX_FLOWC 0xFF020140 +#define OID_SKGE_STAT_RX_PFLOWC_ERR 0xFF020141 +#define OID_SKGE_STAT_RX_FLOWC_UNKWN 0xFF020142 +#define OID_SKGE_STAT_RX_BURST 0xFF020143 +#define OID_SKGE_STAT_RX_MISSED 0xFF020144 +#define OID_SKGE_STAT_RX_FRAMING 0xFF020145 +#define OID_SKGE_STAT_RX_OVERFLOW 0xFF020146 +#define OID_SKGE_STAT_RX_JABBER 0xFF020147 +#define OID_SKGE_STAT_RX_CARRIER 0xFF020148 +#define OID_SKGE_STAT_RX_IR_LENGTH 0xFF020149 +#define OID_SKGE_STAT_RX_SYMBOL 0xFF02014A +#define OID_SKGE_STAT_RX_SHORTS 0xFF02014B +#define OID_SKGE_STAT_RX_RUNT 0xFF02014C +#define OID_SKGE_STAT_RX_CEXT 0xFF02014D +#define OID_SKGE_STAT_RX_TOO_LONG 0xFF02014E +#define OID_SKGE_STAT_RX_FCS 0xFF02014F +/* #define OID_SKGE_STAT_RX_UTIL 0xFF020150 */ +#define OID_SKGE_STAT_RX_64 0xFF020151 +#define OID_SKGE_STAT_RX_127 0xFF020152 +#define OID_SKGE_STAT_RX_255 0xFF020153 +#define OID_SKGE_STAT_RX_511 0xFF020154 +#define OID_SKGE_STAT_RX_1023 0xFF020155 +#define OID_SKGE_STAT_RX_MAX 0xFF020156 +#define OID_SKGE_STAT_RX_LONGFRAMES 0xFF020157 + +#define OID_SKGE_RLMT_CHANGE_CTS 0xFF020160 +#define OID_SKGE_RLMT_CHANGE_TIME 0xFF020161 +#define OID_SKGE_RLMT_CHANGE_ESTIM 0xFF020162 +#define OID_SKGE_RLMT_CHANGE_THRES 0xFF020163 + +#define OID_SKGE_RLMT_PORT_INDEX 0xFF020164 +#define OID_SKGE_RLMT_STATUS 0xFF020165 +#define OID_SKGE_RLMT_TX_HELLO_CTS 0xFF020166 +#define OID_SKGE_RLMT_RX_HELLO_CTS 0xFF020167 +#define OID_SKGE_RLMT_TX_SP_REQ_CTS 0xFF020168 +#define OID_SKGE_RLMT_RX_SP_CTS 0xFF020169 + +#define OID_SKGE_RLMT_MONITOR_NUMBER 0xFF010150 +#define OID_SKGE_RLMT_MONITOR_INDEX 0xFF010151 +#define OID_SKGE_RLMT_MONITOR_ADDR 0xFF010152 +#define OID_SKGE_RLMT_MONITOR_ERRS 0xFF010153 +#define OID_SKGE_RLMT_MONITOR_TIMESTAMP 0xFF010154 +#define OID_SKGE_RLMT_MONITOR_ADMIN 0xFF010155 + +#define OID_SKGE_TX_SW_QUEUE_LEN 0xFF020170 +#define OID_SKGE_TX_SW_QUEUE_MAX 0xFF020171 +#define OID_SKGE_TX_RETRY 0xFF020172 +#define OID_SKGE_RX_INTR_CTS 0xFF020173 +#define OID_SKGE_TX_INTR_CTS 0xFF020174 +#define OID_SKGE_RX_NO_BUF_CTS 0xFF020175 +#define OID_SKGE_TX_NO_BUF_CTS 0xFF020176 +#define OID_SKGE_TX_USED_DESCR_NO 0xFF020177 +#define OID_SKGE_RX_DELIVERED_CTS 0xFF020178 +#define OID_SKGE_RX_OCTETS_DELIV_CTS 0xFF020179 +#define OID_SKGE_RX_HW_ERROR_CTS 0xFF02017A +#define OID_SKGE_TX_HW_ERROR_CTS 0xFF02017B +#define OID_SKGE_IN_ERRORS_CTS 0xFF02017C +#define OID_SKGE_OUT_ERROR_CTS 0xFF02017D +#define OID_SKGE_ERR_RECOVERY_CTS 0xFF02017E +#define OID_SKGE_SYSUPTIME 0xFF02017F + +#define OID_SKGE_ALL_DATA 0xFF020190 + +/* Defines for VCT. */ +#define OID_SKGE_VCT_GET 0xFF020200 +#define OID_SKGE_VCT_SET 0xFF020201 +#define OID_SKGE_VCT_STATUS 0xFF020202 + +#ifdef SK_DIAG_SUPPORT +/* Defines for driver DIAG mode. */ +#define OID_SKGE_DIAG_MODE 0xFF020204 +#endif /* SK_DIAG_SUPPORT */ + +/* New OIDs */ +#define OID_SKGE_DRIVER_RELDATE 0xFF020210 +#define OID_SKGE_DRIVER_FILENAME 0xFF020211 +#define OID_SKGE_CHIPID 0xFF020212 +#define OID_SKGE_RAMSIZE 0xFF020213 +#define OID_SKGE_VAUXAVAIL 0xFF020214 +#define OID_SKGE_PHY_TYPE 0xFF020215 +#define OID_SKGE_PHY_LP_MODE 0xFF020216 + +/* VCT struct to store a backup copy of VCT data after a port reset. */ +typedef struct s_PnmiVct { + SK_U8 VctStatus; + SK_U8 PCableLen; + SK_U32 PMdiPairLen[4]; + SK_U8 PMdiPairSts[4]; +} SK_PNMI_VCT; + + +/* VCT status values (to be given to CPA via OID_SKGE_VCT_STATUS). */ +#define SK_PNMI_VCT_NONE 0 +#define SK_PNMI_VCT_OLD_VCT_DATA 1 +#define SK_PNMI_VCT_NEW_VCT_DATA 2 +#define SK_PNMI_VCT_OLD_DSP_DATA 4 +#define SK_PNMI_VCT_NEW_DSP_DATA 8 +#define SK_PNMI_VCT_RUNNING 16 + + +/* VCT cable test status. */ +#define SK_PNMI_VCT_NORMAL_CABLE 0 +#define SK_PNMI_VCT_SHORT_CABLE 1 +#define SK_PNMI_VCT_OPEN_CABLE 2 +#define SK_PNMI_VCT_TEST_FAIL 3 +#define SK_PNMI_VCT_IMPEDANCE_MISMATCH 4 + +#define OID_SKGE_TRAP_SEN_WAR_LOW 500 +#define OID_SKGE_TRAP_SEN_WAR_UPP 501 +#define OID_SKGE_TRAP_SEN_ERR_LOW 502 +#define OID_SKGE_TRAP_SEN_ERR_UPP 503 +#define OID_SKGE_TRAP_RLMT_CHANGE_THRES 520 +#define OID_SKGE_TRAP_RLMT_CHANGE_PORT 521 +#define OID_SKGE_TRAP_RLMT_PORT_DOWN 522 +#define OID_SKGE_TRAP_RLMT_PORT_UP 523 +#define OID_SKGE_TRAP_RLMT_SEGMENTATION 524 + +#ifdef SK_DIAG_SUPPORT +/* Defines for driver DIAG mode. */ +#define SK_DIAG_ATTACHED 2 +#define SK_DIAG_RUNNING 1 +#define SK_DIAG_IDLE 0 +#endif /* SK_DIAG_SUPPORT */ + +/* + * Generic PNMI IOCTL subcommand definitions. + */ +#define SK_GET_SINGLE_VAR 1 +#define SK_SET_SINGLE_VAR 2 +#define SK_PRESET_SINGLE_VAR 3 +#define SK_GET_FULL_MIB 4 +#define SK_SET_FULL_MIB 5 +#define SK_PRESET_FULL_MIB 6 + + +/* + * Define error numbers and messages for syslog + */ +#define SK_PNMI_ERR001 (SK_ERRBASE_PNMI + 1) +#define SK_PNMI_ERR001MSG "SkPnmiGetStruct: Unknown OID" +#define SK_PNMI_ERR002 (SK_ERRBASE_PNMI + 2) +#define SK_PNMI_ERR002MSG "SkPnmiGetStruct: Cannot read VPD keys" +#define SK_PNMI_ERR003 (SK_ERRBASE_PNMI + 3) +#define SK_PNMI_ERR003MSG "OidStruct: Called with wrong OID" +#define SK_PNMI_ERR004 (SK_ERRBASE_PNMI + 4) +#define SK_PNMI_ERR004MSG "OidStruct: Called with wrong action" +#define SK_PNMI_ERR005 (SK_ERRBASE_PNMI + 5) +#define SK_PNMI_ERR005MSG "Perform: Cannot reset driver" +#define SK_PNMI_ERR006 (SK_ERRBASE_PNMI + 6) +#define SK_PNMI_ERR006MSG "Perform: Unknown OID action command" +#define SK_PNMI_ERR007 (SK_ERRBASE_PNMI + 7) +#define SK_PNMI_ERR007MSG "General: Driver description not initialized" +#define SK_PNMI_ERR008 (SK_ERRBASE_PNMI + 8) +#define SK_PNMI_ERR008MSG "Addr: Tried to get unknown OID" +#define SK_PNMI_ERR009 (SK_ERRBASE_PNMI + 9) +#define SK_PNMI_ERR009MSG "Addr: Unknown OID" +#define SK_PNMI_ERR010 (SK_ERRBASE_PNMI + 10) +#define SK_PNMI_ERR010MSG "CsumStat: Unknown OID" +#define SK_PNMI_ERR011 (SK_ERRBASE_PNMI + 11) +#define SK_PNMI_ERR011MSG "SensorStat: Sensor descr string too long" +#define SK_PNMI_ERR012 (SK_ERRBASE_PNMI + 12) +#define SK_PNMI_ERR012MSG "SensorStat: Unknown OID" +#define SK_PNMI_ERR013 (SK_ERRBASE_PNMI + 13) +#define SK_PNMI_ERR013MSG "" +#define SK_PNMI_ERR014 (SK_ERRBASE_PNMI + 14) +#define SK_PNMI_ERR014MSG "Vpd: Cannot read VPD keys" +#define SK_PNMI_ERR015 (SK_ERRBASE_PNMI + 15) +#define SK_PNMI_ERR015MSG "Vpd: Internal array for VPD keys to small" +#define SK_PNMI_ERR016 (SK_ERRBASE_PNMI + 16) +#define SK_PNMI_ERR016MSG "Vpd: Key string too long" +#define SK_PNMI_ERR017 (SK_ERRBASE_PNMI + 17) +#define SK_PNMI_ERR017MSG "Vpd: Invalid VPD status pointer" +#define SK_PNMI_ERR018 (SK_ERRBASE_PNMI + 18) +#define SK_PNMI_ERR018MSG "Vpd: VPD data not valid" +#define SK_PNMI_ERR019 (SK_ERRBASE_PNMI + 19) +#define SK_PNMI_ERR019MSG "Vpd: VPD entries list string too long" +#define SK_PNMI_ERR021 (SK_ERRBASE_PNMI + 21) +#define SK_PNMI_ERR021MSG "Vpd: VPD data string too long" +#define SK_PNMI_ERR022 (SK_ERRBASE_PNMI + 22) +#define SK_PNMI_ERR022MSG "Vpd: VPD data string too long should be errored before" +#define SK_PNMI_ERR023 (SK_ERRBASE_PNMI + 23) +#define SK_PNMI_ERR023MSG "Vpd: Unknown OID in get action" +#define SK_PNMI_ERR024 (SK_ERRBASE_PNMI + 24) +#define SK_PNMI_ERR024MSG "Vpd: Unknown OID in preset/set action" +#define SK_PNMI_ERR025 (SK_ERRBASE_PNMI + 25) +#define SK_PNMI_ERR025MSG "Vpd: Cannot write VPD after modify entry" +#define SK_PNMI_ERR026 (SK_ERRBASE_PNMI + 26) +#define SK_PNMI_ERR026MSG "Vpd: Cannot update VPD" +#define SK_PNMI_ERR027 (SK_ERRBASE_PNMI + 27) +#define SK_PNMI_ERR027MSG "Vpd: Cannot delete VPD entry" +#define SK_PNMI_ERR028 (SK_ERRBASE_PNMI + 28) +#define SK_PNMI_ERR028MSG "Vpd: Cannot update VPD after delete entry" +#define SK_PNMI_ERR029 (SK_ERRBASE_PNMI + 29) +#define SK_PNMI_ERR029MSG "General: Driver description string too long" +#define SK_PNMI_ERR030 (SK_ERRBASE_PNMI + 30) +#define SK_PNMI_ERR030MSG "General: Driver version not initialized" +#define SK_PNMI_ERR031 (SK_ERRBASE_PNMI + 31) +#define SK_PNMI_ERR031MSG "General: Driver version string too long" +#define SK_PNMI_ERR032 (SK_ERRBASE_PNMI + 32) +#define SK_PNMI_ERR032MSG "General: Cannot read VPD Name for HW descr" +#define SK_PNMI_ERR033 (SK_ERRBASE_PNMI + 33) +#define SK_PNMI_ERR033MSG "General: HW description string too long" +#define SK_PNMI_ERR034 (SK_ERRBASE_PNMI + 34) +#define SK_PNMI_ERR034MSG "General: Unknown OID" +#define SK_PNMI_ERR035 (SK_ERRBASE_PNMI + 35) +#define SK_PNMI_ERR035MSG "Rlmt: Unknown OID" +#define SK_PNMI_ERR036 (SK_ERRBASE_PNMI + 36) +#define SK_PNMI_ERR036MSG "" +#define SK_PNMI_ERR037 (SK_ERRBASE_PNMI + 37) +#define SK_PNMI_ERR037MSG "Rlmt: SK_RLMT_MODE_CHANGE event return not 0" +#define SK_PNMI_ERR038 (SK_ERRBASE_PNMI + 38) +#define SK_PNMI_ERR038MSG "Rlmt: SK_RLMT_PREFPORT_CHANGE event return not 0" +#define SK_PNMI_ERR039 (SK_ERRBASE_PNMI + 39) +#define SK_PNMI_ERR039MSG "RlmtStat: Unknown OID" +#define SK_PNMI_ERR040 (SK_ERRBASE_PNMI + 40) +#define SK_PNMI_ERR040MSG "PowerManagement: Unknown OID" +#define SK_PNMI_ERR041 (SK_ERRBASE_PNMI + 41) +#define SK_PNMI_ERR041MSG "MacPrivateConf: Unknown OID" +#define SK_PNMI_ERR042 (SK_ERRBASE_PNMI + 42) +#define SK_PNMI_ERR042MSG "MacPrivateConf: SK_HWEV_SET_ROLE returned not 0" +#define SK_PNMI_ERR043 (SK_ERRBASE_PNMI + 43) +#define SK_PNMI_ERR043MSG "MacPrivateConf: SK_HWEV_SET_LMODE returned not 0" +#define SK_PNMI_ERR044 (SK_ERRBASE_PNMI + 44) +#define SK_PNMI_ERR044MSG "MacPrivateConf: SK_HWEV_SET_FLOWMODE returned not 0" +#define SK_PNMI_ERR045 (SK_ERRBASE_PNMI + 45) +#define SK_PNMI_ERR045MSG "MacPrivateConf: SK_HWEV_SET_SPEED returned not 0" +#define SK_PNMI_ERR046 (SK_ERRBASE_PNMI + 46) +#define SK_PNMI_ERR046MSG "Monitor: Unknown OID" +#define SK_PNMI_ERR047 (SK_ERRBASE_PNMI + 47) +#define SK_PNMI_ERR047MSG "SirqUpdate: Event function returns not 0" +#define SK_PNMI_ERR048 (SK_ERRBASE_PNMI + 48) +#define SK_PNMI_ERR048MSG "RlmtUpdate: Event function returns not 0" +#define SK_PNMI_ERR049 (SK_ERRBASE_PNMI + 49) +#define SK_PNMI_ERR049MSG "SkPnmiInit: Invalid size of 'CounterOffset' struct!!" +#define SK_PNMI_ERR050 (SK_ERRBASE_PNMI + 50) +#define SK_PNMI_ERR050MSG "SkPnmiInit: Invalid size of 'StatAddr' table!!" +#define SK_PNMI_ERR051 (SK_ERRBASE_PNMI + 51) +#define SK_PNMI_ERR051MSG "SkPnmiEvent: Port switch suspicious" +#define SK_PNMI_ERR052 (SK_ERRBASE_PNMI + 52) +#define SK_PNMI_ERR052MSG "" +#define SK_PNMI_ERR053 (SK_ERRBASE_PNMI + 53) +#define SK_PNMI_ERR053MSG "General: Driver release date not initialized" +#define SK_PNMI_ERR054 (SK_ERRBASE_PNMI + 54) +#define SK_PNMI_ERR054MSG "General: Driver release date string too long" +#define SK_PNMI_ERR055 (SK_ERRBASE_PNMI + 55) +#define SK_PNMI_ERR055MSG "General: Driver file name not initialized" +#define SK_PNMI_ERR056 (SK_ERRBASE_PNMI + 56) +#define SK_PNMI_ERR056MSG "General: Driver file name string too long" + +/* + * Management counter macros called by the driver + */ +#define SK_PNMI_SET_DRIVER_DESCR(pAC,v) ((pAC)->Pnmi.pDriverDescription = \ + (char *)(v)) + +#define SK_PNMI_SET_DRIVER_VER(pAC,v) ((pAC)->Pnmi.pDriverVersion = \ + (char *)(v)) + +#define SK_PNMI_SET_DRIVER_RELDATE(pAC,v) ((pAC)->Pnmi.pDriverReleaseDate = \ + (char *)(v)) + +#define SK_PNMI_SET_DRIVER_FILENAME(pAC,v) ((pAC)->Pnmi.pDriverFileName = \ + (char *)(v)) + +#define SK_PNMI_CNT_TX_QUEUE_LEN(pAC,v,p) \ + { \ + (pAC)->Pnmi.Port[p].TxSwQueueLen = (SK_U64)(v); \ + if ((pAC)->Pnmi.Port[p].TxSwQueueLen > (pAC)->Pnmi.Port[p].TxSwQueueMax) { \ + (pAC)->Pnmi.Port[p].TxSwQueueMax = (pAC)->Pnmi.Port[p].TxSwQueueLen; \ + } \ + } +#define SK_PNMI_CNT_TX_RETRY(pAC,p) (((pAC)->Pnmi.Port[p].TxRetryCts)++) +#define SK_PNMI_CNT_RX_INTR(pAC,p) (((pAC)->Pnmi.Port[p].RxIntrCts)++) +#define SK_PNMI_CNT_TX_INTR(pAC,p) (((pAC)->Pnmi.Port[p].TxIntrCts)++) +#define SK_PNMI_CNT_NO_RX_BUF(pAC,p) (((pAC)->Pnmi.Port[p].RxNoBufCts)++) +#define SK_PNMI_CNT_NO_TX_BUF(pAC,p) (((pAC)->Pnmi.Port[p].TxNoBufCts)++) +#define SK_PNMI_CNT_USED_TX_DESCR(pAC,v,p) \ + ((pAC)->Pnmi.Port[p].TxUsedDescrNo=(SK_U64)(v)); +#define SK_PNMI_CNT_RX_OCTETS_DELIVERED(pAC,v,p) \ + { \ + ((pAC)->Pnmi.Port[p].RxDeliveredCts)++; \ + (pAC)->Pnmi.Port[p].RxOctetsDeliveredCts += (SK_U64)(v); \ + } +#define SK_PNMI_CNT_ERR_RECOVERY(pAC,p) (((pAC)->Pnmi.Port[p].ErrRecoveryCts)++); + +#define SK_PNMI_CNT_SYNC_OCTETS(pAC,p,v) \ + { \ + if ((p) < SK_MAX_MACS) { \ + ((pAC)->Pnmi.Port[p].StatSyncCts)++; \ + (pAC)->Pnmi.Port[p].StatSyncOctetsCts += (SK_U64)(v); \ + } \ + } + +#define SK_PNMI_CNT_RX_LONGFRAMES(pAC,p) \ + { \ + if ((p) < SK_MAX_MACS) { \ + ((pAC)->Pnmi.Port[p].StatRxLongFrameCts++); \ + } \ + } + +#define SK_PNMI_CNT_RX_FRAMETOOLONG(pAC,p) \ + { \ + if ((p) < SK_MAX_MACS) { \ + ((pAC)->Pnmi.Port[p].StatRxFrameTooLongCts++); \ + } \ + } + +#define SK_PNMI_CNT_RX_PMACC_ERR(pAC,p) \ + { \ + if ((p) < SK_MAX_MACS) { \ + ((pAC)->Pnmi.Port[p].StatRxPMaccErr++); \ + } \ + } + +/* + * Conversion Macros + */ +#define SK_PNMI_PORT_INST2LOG(i) ((unsigned int)(i) - 1) +#define SK_PNMI_PORT_LOG2INST(l) ((unsigned int)(l) + 1) +#define SK_PNMI_PORT_PHYS2LOG(p) ((unsigned int)(p) + 1) +#define SK_PNMI_PORT_LOG2PHYS(pAC,l) ((unsigned int)(l) - 1) +#define SK_PNMI_PORT_PHYS2INST(pAC,p) \ + (pAC->Pnmi.DualNetActiveFlag ? 2 : ((unsigned int)(p) + 2)) +#define SK_PNMI_PORT_INST2PHYS(pAC,i) ((unsigned int)(i) - 2) + +/* + * Structure definition for SkPnmiGetStruct and SkPnmiSetStruct + */ +#define SK_PNMI_VPD_KEY_SIZE 5 +#define SK_PNMI_VPD_BUFSIZE (VPD_SIZE) +#define SK_PNMI_VPD_ENTRIES (VPD_SIZE / 4) +#define SK_PNMI_VPD_DATALEN 128 /* Number of data bytes */ + +#define SK_PNMI_MULTICAST_LISTLEN 64 +#define SK_PNMI_SENSOR_ENTRIES (SK_MAX_SENSORS) +#define SK_PNMI_CHECKSUM_ENTRIES 3 +#define SK_PNMI_MAC_ENTRIES (SK_MAX_MACS + 1) +#define SK_PNMI_MONITOR_ENTRIES 20 +#define SK_PNMI_TRAP_ENTRIES 10 +#define SK_PNMI_TRAPLEN 128 +#define SK_PNMI_STRINGLEN1 80 +#define SK_PNMI_STRINGLEN2 25 +#define SK_PNMI_TRAP_QUEUE_LEN 512 + +typedef struct s_PnmiVpd { + char VpdKey[SK_PNMI_VPD_KEY_SIZE]; + char VpdValue[SK_PNMI_VPD_DATALEN]; + SK_U8 VpdAccess; + SK_U8 VpdAction; +} SK_PNMI_VPD; + +typedef struct s_PnmiSensor { + SK_U8 SensorIndex; + char SensorDescr[SK_PNMI_STRINGLEN2]; + SK_U8 SensorType; + SK_U32 SensorValue; + SK_U32 SensorWarningThresholdLow; + SK_U32 SensorWarningThresholdHigh; + SK_U32 SensorErrorThresholdLow; + SK_U32 SensorErrorThresholdHigh; + SK_U8 SensorStatus; + SK_U64 SensorWarningCts; + SK_U64 SensorErrorCts; + SK_U64 SensorWarningTimestamp; + SK_U64 SensorErrorTimestamp; +} SK_PNMI_SENSOR; + +typedef struct s_PnmiChecksum { + SK_U64 ChecksumRxOkCts; + SK_U64 ChecksumRxUnableCts; + SK_U64 ChecksumRxErrCts; + SK_U64 ChecksumTxOkCts; + SK_U64 ChecksumTxUnableCts; +} SK_PNMI_CHECKSUM; + +typedef struct s_PnmiStat { + SK_U64 StatTxOkCts; + SK_U64 StatTxOctetsOkCts; + SK_U64 StatTxBroadcastOkCts; + SK_U64 StatTxMulticastOkCts; + SK_U64 StatTxUnicastOkCts; + SK_U64 StatTxLongFramesCts; + SK_U64 StatTxBurstCts; + SK_U64 StatTxPauseMacCtrlCts; + SK_U64 StatTxMacCtrlCts; + SK_U64 StatTxSingleCollisionCts; + SK_U64 StatTxMultipleCollisionCts; + SK_U64 StatTxExcessiveCollisionCts; + SK_U64 StatTxLateCollisionCts; + SK_U64 StatTxDeferralCts; + SK_U64 StatTxExcessiveDeferralCts; + SK_U64 StatTxFifoUnderrunCts; + SK_U64 StatTxCarrierCts; + SK_U64 Dummy1; /* StatTxUtilization */ + SK_U64 StatTx64Cts; + SK_U64 StatTx127Cts; + SK_U64 StatTx255Cts; + SK_U64 StatTx511Cts; + SK_U64 StatTx1023Cts; + SK_U64 StatTxMaxCts; + SK_U64 StatTxSyncCts; + SK_U64 StatTxSyncOctetsCts; + SK_U64 StatRxOkCts; + SK_U64 StatRxOctetsOkCts; + SK_U64 StatRxBroadcastOkCts; + SK_U64 StatRxMulticastOkCts; + SK_U64 StatRxUnicastOkCts; + SK_U64 StatRxLongFramesCts; + SK_U64 StatRxPauseMacCtrlCts; + SK_U64 StatRxMacCtrlCts; + SK_U64 StatRxPauseMacCtrlErrorCts; + SK_U64 StatRxMacCtrlUnknownCts; + SK_U64 StatRxBurstCts; + SK_U64 StatRxMissedCts; + SK_U64 StatRxFramingCts; + SK_U64 StatRxFifoOverflowCts; + SK_U64 StatRxJabberCts; + SK_U64 StatRxCarrierCts; + SK_U64 StatRxIRLengthCts; + SK_U64 StatRxSymbolCts; + SK_U64 StatRxShortsCts; + SK_U64 StatRxRuntCts; + SK_U64 StatRxCextCts; + SK_U64 StatRxTooLongCts; + SK_U64 StatRxFcsCts; + SK_U64 Dummy2; /* StatRxUtilization */ + SK_U64 StatRx64Cts; + SK_U64 StatRx127Cts; + SK_U64 StatRx255Cts; + SK_U64 StatRx511Cts; + SK_U64 StatRx1023Cts; + SK_U64 StatRxMaxCts; +} SK_PNMI_STAT; + +typedef struct s_PnmiConf { + char ConfMacCurrentAddr[6]; + char ConfMacFactoryAddr[6]; + SK_U8 ConfPMD; + SK_U8 ConfConnector; + SK_U32 ConfPhyType; + SK_U32 ConfPhyMode; + SK_U8 ConfLinkCapability; + SK_U8 ConfLinkMode; + SK_U8 ConfLinkModeStatus; + SK_U8 ConfLinkStatus; + SK_U8 ConfFlowCtrlCapability; + SK_U8 ConfFlowCtrlMode; + SK_U8 ConfFlowCtrlStatus; + SK_U8 ConfPhyOperationCapability; + SK_U8 ConfPhyOperationMode; + SK_U8 ConfPhyOperationStatus; + SK_U8 ConfSpeedCapability; + SK_U8 ConfSpeedMode; + SK_U8 ConfSpeedStatus; +} SK_PNMI_CONF; + +typedef struct s_PnmiRlmt { + SK_U32 RlmtIndex; + SK_U32 RlmtStatus; + SK_U64 RlmtTxHelloCts; + SK_U64 RlmtRxHelloCts; + SK_U64 RlmtTxSpHelloReqCts; + SK_U64 RlmtRxSpHelloCts; +} SK_PNMI_RLMT; + +typedef struct s_PnmiRlmtMonitor { + SK_U32 RlmtMonitorIndex; + char RlmtMonitorAddr[6]; + SK_U64 RlmtMonitorErrorCts; + SK_U64 RlmtMonitorTimestamp; + SK_U8 RlmtMonitorAdmin; +} SK_PNMI_RLMT_MONITOR; + +typedef struct s_PnmiRequestStatus { + SK_U32 ErrorStatus; + SK_U32 ErrorOffset; +} SK_PNMI_REQUEST_STATUS; + +typedef struct s_PnmiStrucData { + SK_U32 MgmtDBVersion; + SK_PNMI_REQUEST_STATUS ReturnStatus; + SK_U32 VpdFreeBytes; + char VpdEntriesList[SK_PNMI_VPD_ENTRIES * SK_PNMI_VPD_KEY_SIZE]; + SK_U32 VpdEntriesNumber; + SK_PNMI_VPD Vpd[SK_PNMI_VPD_ENTRIES]; + SK_U32 PortNumber; + SK_U32 DeviceType; + char DriverDescr[SK_PNMI_STRINGLEN1]; + char DriverVersion[SK_PNMI_STRINGLEN2]; + char DriverReleaseDate[SK_PNMI_STRINGLEN1]; + char DriverFileName[SK_PNMI_STRINGLEN1]; + char HwDescr[SK_PNMI_STRINGLEN1]; + char HwVersion[SK_PNMI_STRINGLEN2]; + SK_U16 Chipset; + SK_U32 ChipId; + SK_U8 VauxAvail; + SK_U32 RamSize; + SK_U32 MtuSize; + SK_U32 Action; + SK_U32 TestResult; + SK_U8 BusType; + SK_U8 BusSpeed; + SK_U8 BusWidth; + SK_U8 SensorNumber; + SK_PNMI_SENSOR Sensor[SK_PNMI_SENSOR_ENTRIES]; + SK_U8 ChecksumNumber; + SK_PNMI_CHECKSUM Checksum[SK_PNMI_CHECKSUM_ENTRIES]; + SK_PNMI_STAT Stat[SK_PNMI_MAC_ENTRIES]; + SK_PNMI_CONF Conf[SK_PNMI_MAC_ENTRIES]; + SK_U8 RlmtMode; + SK_U32 RlmtPortNumber; + SK_U8 RlmtPortActive; + SK_U8 RlmtPortPreferred; + SK_U64 RlmtChangeCts; + SK_U64 RlmtChangeTime; + SK_U64 RlmtChangeEstimate; + SK_U64 RlmtChangeThreshold; + SK_PNMI_RLMT Rlmt[SK_MAX_MACS]; + SK_U32 RlmtMonitorNumber; + SK_PNMI_RLMT_MONITOR RlmtMonitor[SK_PNMI_MONITOR_ENTRIES]; + SK_U32 TrapNumber; + SK_U8 Trap[SK_PNMI_TRAP_QUEUE_LEN]; + SK_U64 TxSwQueueLen; + SK_U64 TxSwQueueMax; + SK_U64 TxRetryCts; + SK_U64 RxIntrCts; + SK_U64 TxIntrCts; + SK_U64 RxNoBufCts; + SK_U64 TxNoBufCts; + SK_U64 TxUsedDescrNo; + SK_U64 RxDeliveredCts; + SK_U64 RxOctetsDeliveredCts; + SK_U64 RxHwErrorsCts; + SK_U64 TxHwErrorsCts; + SK_U64 InErrorsCts; + SK_U64 OutErrorsCts; + SK_U64 ErrRecoveryCts; + SK_U64 SysUpTime; +} SK_PNMI_STRUCT_DATA; + +#define SK_PNMI_STRUCT_SIZE (sizeof(SK_PNMI_STRUCT_DATA)) +#define SK_PNMI_MIN_STRUCT_SIZE ((unsigned int)(SK_UPTR)\ + &(((SK_PNMI_STRUCT_DATA *)0)->VpdFreeBytes)) + /* + * ReturnStatus field + * must be located + * before VpdFreeBytes + */ + +/* + * Various definitions + */ +#define SK_PNMI_MAX_PROTOS 3 + +#define SK_PNMI_CNT_NO 66 /* Must have the value of the enum + * SK_PNMI_MAX_IDX. Define SK_PNMI_CHECK + * for check while init phase 1 + */ + +/* + * Estimate data structure + */ +typedef struct s_PnmiEstimate { + unsigned int EstValueIndex; + SK_U64 EstValue[7]; + SK_U64 Estimate; + SK_TIMER EstTimer; +} SK_PNMI_ESTIMATE; + + +/* + * VCT timer data structure + */ +typedef struct s_VctTimer { + SK_TIMER VctTimer; +} SK_PNMI_VCT_TIMER; + + +/* + * PNMI specific adapter context structure + */ +typedef struct s_PnmiPort { + SK_U64 StatSyncCts; + SK_U64 StatSyncOctetsCts; + SK_U64 StatRxLongFrameCts; + SK_U64 StatRxFrameTooLongCts; + SK_U64 StatRxPMaccErr; + SK_U64 TxSwQueueLen; + SK_U64 TxSwQueueMax; + SK_U64 TxRetryCts; + SK_U64 RxIntrCts; + SK_U64 TxIntrCts; + SK_U64 RxNoBufCts; + SK_U64 TxNoBufCts; + SK_U64 TxUsedDescrNo; + SK_U64 RxDeliveredCts; + SK_U64 RxOctetsDeliveredCts; + SK_U64 RxHwErrorsCts; + SK_U64 TxHwErrorsCts; + SK_U64 InErrorsCts; + SK_U64 OutErrorsCts; + SK_U64 ErrRecoveryCts; + SK_U64 RxShortZeroMark; + SK_U64 CounterOffset[SK_PNMI_CNT_NO]; + SK_U32 CounterHigh[SK_PNMI_CNT_NO]; + SK_BOOL ActiveFlag; + SK_U8 Align[3]; +} SK_PNMI_PORT; + + +typedef struct s_PnmiData { + SK_PNMI_PORT Port [SK_MAX_MACS]; + SK_PNMI_PORT BufPort [SK_MAX_MACS]; /* 2002-09-13 pweber */ + SK_U64 VirtualCounterOffset[SK_PNMI_CNT_NO]; + SK_U32 TestResult; + char HwVersion[10]; + SK_U16 Align01; + + char *pDriverDescription; + char *pDriverVersion; + char *pDriverReleaseDate; + char *pDriverFileName; + + int MacUpdatedFlag; + int RlmtUpdatedFlag; + int SirqUpdatedFlag; + + SK_U64 RlmtChangeCts; + SK_U64 RlmtChangeTime; + SK_PNMI_ESTIMATE RlmtChangeEstimate; + SK_U64 RlmtChangeThreshold; + + SK_U64 StartUpTime; + SK_U32 DeviceType; + char PciBusSpeed; + char PciBusWidth; + char Chipset; + char PMD; + char Connector; + SK_BOOL DualNetActiveFlag; + SK_U16 Align02; + + char TrapBuf[SK_PNMI_TRAP_QUEUE_LEN]; + unsigned int TrapBufFree; + unsigned int TrapQueueBeg; + unsigned int TrapQueueEnd; + unsigned int TrapBufPad; + unsigned int TrapUnique; + SK_U8 VctStatus[SK_MAX_MACS]; + SK_PNMI_VCT VctBackup[SK_MAX_MACS]; + SK_PNMI_VCT_TIMER VctTimeout[SK_MAX_MACS]; +#ifdef SK_DIAG_SUPPORT + SK_U32 DiagAttached; +#endif /* SK_DIAG_SUPPORT */ +} SK_PNMI; + + +/* + * Function prototypes + */ +extern int SkPnmiInit(SK_AC *pAC, SK_IOC IoC, int Level); +extern int SkPnmiSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void* pBuf, + unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex); +extern int SkPnmiGetStruct(SK_AC *pAC, SK_IOC IoC, void* pBuf, + unsigned int *pLen, SK_U32 NetIndex); +extern int SkPnmiPreSetStruct(SK_AC *pAC, SK_IOC IoC, void* pBuf, + unsigned int *pLen, SK_U32 NetIndex); +extern int SkPnmiSetStruct(SK_AC *pAC, SK_IOC IoC, void* pBuf, + unsigned int *pLen, SK_U32 NetIndex); +extern int SkPnmiEvent(SK_AC *pAC, SK_IOC IoC, SK_U32 Event, + SK_EVPARA Param); +extern int SkPnmiGenIoctl(SK_AC *pAC, SK_IOC IoC, void * pBuf, + unsigned int * pLen, SK_U32 NetIndex); + +#endif diff --git a/drivers/net/sk98lin/h/skgesirq.h b/drivers/net/sk98lin/h/skgesirq.h new file mode 100644 index 000000000000..3eec6274e413 --- /dev/null +++ b/drivers/net/sk98lin/h/skgesirq.h @@ -0,0 +1,110 @@ +/****************************************************************************** + * + * Name: skgesirq.h + * Project: Gigabit Ethernet Adapters, Common Modules + * Version: $Revision: 1.30 $ + * Date: $Date: 2003/07/04 12:34:13 $ + * Purpose: SK specific Gigabit Ethernet special IRQ functions + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +#ifndef _INC_SKGESIRQ_H_ +#define _INC_SKGESIRQ_H_ + +/* Define return codes of SkGePortCheckUp and CheckShort */ +#define SK_HW_PS_NONE 0 /* No action needed */ +#define SK_HW_PS_RESTART 1 /* Restart needed */ +#define SK_HW_PS_LINK 2 /* Link Up actions needed */ + +/* + * Define the Event the special IRQ/INI module can handle + */ +#define SK_HWEV_WATIM 1 /* Timeout for WA Errata #2 XMAC */ +#define SK_HWEV_PORT_START 2 /* Port Start Event by RLMT */ +#define SK_HWEV_PORT_STOP 3 /* Port Stop Event by RLMT */ +#define SK_HWEV_CLEAR_STAT 4 /* Clear Statistics by PNMI */ +#define SK_HWEV_UPDATE_STAT 5 /* Update Statistics by PNMI */ +#define SK_HWEV_SET_LMODE 6 /* Set Link Mode by PNMI */ +#define SK_HWEV_SET_FLOWMODE 7 /* Set Flow Control Mode by PNMI */ +#define SK_HWEV_SET_ROLE 8 /* Set Master/Slave (Role) by PNMI */ +#define SK_HWEV_SET_SPEED 9 /* Set Link Speed by PNMI */ +#define SK_HWEV_HALFDUP_CHK 10 /* Half Duplex Hangup Workaround */ + +#define SK_WA_ACT_TIME (5000000UL) /* 5 sec */ +#define SK_WA_INA_TIME (100000UL) /* 100 msec */ + +#define SK_HALFDUP_CHK_TIME (10000UL) /* 10 msec */ + +/* + * Define the error numbers and messages + */ +#define SKERR_SIRQ_E001 (SK_ERRBASE_SIRQ+0) +#define SKERR_SIRQ_E001MSG "Unknown event" +#define SKERR_SIRQ_E002 (SKERR_SIRQ_E001+1) +#define SKERR_SIRQ_E002MSG "Packet timeout RX1" +#define SKERR_SIRQ_E003 (SKERR_SIRQ_E002+1) +#define SKERR_SIRQ_E003MSG "Packet timeout RX2" +#define SKERR_SIRQ_E004 (SKERR_SIRQ_E003+1) +#define SKERR_SIRQ_E004MSG "MAC 1 not correctly initialized" +#define SKERR_SIRQ_E005 (SKERR_SIRQ_E004+1) +#define SKERR_SIRQ_E005MSG "MAC 2 not correctly initialized" +#define SKERR_SIRQ_E006 (SKERR_SIRQ_E005+1) +#define SKERR_SIRQ_E006MSG "CHECK failure R1" +#define SKERR_SIRQ_E007 (SKERR_SIRQ_E006+1) +#define SKERR_SIRQ_E007MSG "CHECK failure R2" +#define SKERR_SIRQ_E008 (SKERR_SIRQ_E007+1) +#define SKERR_SIRQ_E008MSG "CHECK failure XS1" +#define SKERR_SIRQ_E009 (SKERR_SIRQ_E008+1) +#define SKERR_SIRQ_E009MSG "CHECK failure XA1" +#define SKERR_SIRQ_E010 (SKERR_SIRQ_E009+1) +#define SKERR_SIRQ_E010MSG "CHECK failure XS2" +#define SKERR_SIRQ_E011 (SKERR_SIRQ_E010+1) +#define SKERR_SIRQ_E011MSG "CHECK failure XA2" +#define SKERR_SIRQ_E012 (SKERR_SIRQ_E011+1) +#define SKERR_SIRQ_E012MSG "unexpected IRQ Master error" +#define SKERR_SIRQ_E013 (SKERR_SIRQ_E012+1) +#define SKERR_SIRQ_E013MSG "unexpected IRQ Status error" +#define SKERR_SIRQ_E014 (SKERR_SIRQ_E013+1) +#define SKERR_SIRQ_E014MSG "Parity error on RAM (read)" +#define SKERR_SIRQ_E015 (SKERR_SIRQ_E014+1) +#define SKERR_SIRQ_E015MSG "Parity error on RAM (write)" +#define SKERR_SIRQ_E016 (SKERR_SIRQ_E015+1) +#define SKERR_SIRQ_E016MSG "Parity error MAC 1" +#define SKERR_SIRQ_E017 (SKERR_SIRQ_E016+1) +#define SKERR_SIRQ_E017MSG "Parity error MAC 2" +#define SKERR_SIRQ_E018 (SKERR_SIRQ_E017+1) +#define SKERR_SIRQ_E018MSG "Parity error RX 1" +#define SKERR_SIRQ_E019 (SKERR_SIRQ_E018+1) +#define SKERR_SIRQ_E019MSG "Parity error RX 2" +#define SKERR_SIRQ_E020 (SKERR_SIRQ_E019+1) +#define SKERR_SIRQ_E020MSG "MAC transmit FIFO underrun" +#define SKERR_SIRQ_E021 (SKERR_SIRQ_E020+1) +#define SKERR_SIRQ_E021MSG "Spurious TWSI interrupt" +#define SKERR_SIRQ_E022 (SKERR_SIRQ_E021+1) +#define SKERR_SIRQ_E022MSG "Cable pair swap error" +#define SKERR_SIRQ_E023 (SKERR_SIRQ_E022+1) +#define SKERR_SIRQ_E023MSG "Auto-negotiation error" +#define SKERR_SIRQ_E024 (SKERR_SIRQ_E023+1) +#define SKERR_SIRQ_E024MSG "FIFO overflow error" +#define SKERR_SIRQ_E025 (SKERR_SIRQ_E024+1) +#define SKERR_SIRQ_E025MSG "2 Pair Downshift detected" + +extern void SkGeSirqIsr(SK_AC *pAC, SK_IOC IoC, SK_U32 Istatus); +extern int SkGeSirqEvent(SK_AC *pAC, SK_IOC IoC, SK_U32 Event, SK_EVPARA Para); +extern void SkHWLinkDown(SK_AC *pAC, SK_IOC IoC, int Port); + +#endif /* _INC_SKGESIRQ_H_ */ diff --git a/drivers/net/sk98lin/h/ski2c.h b/drivers/net/sk98lin/h/ski2c.h new file mode 100644 index 000000000000..6a63f4a15de6 --- /dev/null +++ b/drivers/net/sk98lin/h/ski2c.h @@ -0,0 +1,174 @@ +/****************************************************************************** + * + * Name: ski2c.h + * Project: Gigabit Ethernet Adapters, TWSI-Module + * Version: $Revision: 1.35 $ + * Date: $Date: 2003/10/20 09:06:30 $ + * Purpose: Defines to access Voltage and Temperature Sensor + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +/* + * SKI2C.H contains all I2C specific defines + */ + +#ifndef _SKI2C_H_ +#define _SKI2C_H_ + +typedef struct s_Sensor SK_SENSOR; + +#include "h/skgei2c.h" + +/* + * Define the I2C events. + */ +#define SK_I2CEV_IRQ 1 /* IRQ happened Event */ +#define SK_I2CEV_TIM 2 /* Timeout event */ +#define SK_I2CEV_CLEAR 3 /* Clear MIB Values */ + +/* + * Define READ and WRITE Constants. + */ +#define I2C_READ 0 +#define I2C_WRITE 1 +#define I2C_BURST 1 +#define I2C_SINGLE 0 + +#define SKERR_I2C_E001 (SK_ERRBASE_I2C+0) +#define SKERR_I2C_E001MSG "Sensor index unknown" +#define SKERR_I2C_E002 (SKERR_I2C_E001+1) +#define SKERR_I2C_E002MSG "TWSI: transfer does not complete" +#define SKERR_I2C_E003 (SKERR_I2C_E002+1) +#define SKERR_I2C_E003MSG "LM80: NAK on device send" +#define SKERR_I2C_E004 (SKERR_I2C_E003+1) +#define SKERR_I2C_E004MSG "LM80: NAK on register send" +#define SKERR_I2C_E005 (SKERR_I2C_E004+1) +#define SKERR_I2C_E005MSG "LM80: NAK on device (2) send" +#define SKERR_I2C_E006 (SKERR_I2C_E005+1) +#define SKERR_I2C_E006MSG "Unknown event" +#define SKERR_I2C_E007 (SKERR_I2C_E006+1) +#define SKERR_I2C_E007MSG "LM80 read out of state" +#define SKERR_I2C_E008 (SKERR_I2C_E007+1) +#define SKERR_I2C_E008MSG "Unexpected sensor read completed" +#define SKERR_I2C_E009 (SKERR_I2C_E008+1) +#define SKERR_I2C_E009MSG "WARNING: temperature sensor out of range" +#define SKERR_I2C_E010 (SKERR_I2C_E009+1) +#define SKERR_I2C_E010MSG "WARNING: voltage sensor out of range" +#define SKERR_I2C_E011 (SKERR_I2C_E010+1) +#define SKERR_I2C_E011MSG "ERROR: temperature sensor out of range" +#define SKERR_I2C_E012 (SKERR_I2C_E011+1) +#define SKERR_I2C_E012MSG "ERROR: voltage sensor out of range" +#define SKERR_I2C_E013 (SKERR_I2C_E012+1) +#define SKERR_I2C_E013MSG "ERROR: couldn't init sensor" +#define SKERR_I2C_E014 (SKERR_I2C_E013+1) +#define SKERR_I2C_E014MSG "WARNING: fan sensor out of range" +#define SKERR_I2C_E015 (SKERR_I2C_E014+1) +#define SKERR_I2C_E015MSG "ERROR: fan sensor out of range" +#define SKERR_I2C_E016 (SKERR_I2C_E015+1) +#define SKERR_I2C_E016MSG "TWSI: active transfer does not complete" + +/* + * Define Timeout values + */ +#define SK_I2C_TIM_LONG 2000000L /* 2 seconds */ +#define SK_I2C_TIM_SHORT 100000L /* 100 milliseconds */ +#define SK_I2C_TIM_WATCH 1000000L /* 1 second */ + +/* + * Define trap and error log hold times + */ +#ifndef SK_SEN_ERR_TR_HOLD +#define SK_SEN_ERR_TR_HOLD (4*SK_TICKS_PER_SEC) +#endif +#ifndef SK_SEN_ERR_LOG_HOLD +#define SK_SEN_ERR_LOG_HOLD (60*SK_TICKS_PER_SEC) +#endif +#ifndef SK_SEN_WARN_TR_HOLD +#define SK_SEN_WARN_TR_HOLD (15*SK_TICKS_PER_SEC) +#endif +#ifndef SK_SEN_WARN_LOG_HOLD +#define SK_SEN_WARN_LOG_HOLD (15*60*SK_TICKS_PER_SEC) +#endif + +/* + * Defines for SenType + */ +#define SK_SEN_UNKNOWN 0 +#define SK_SEN_TEMP 1 +#define SK_SEN_VOLT 2 +#define SK_SEN_FAN 3 + +/* + * Define for the SenErrorFlag + */ +#define SK_SEN_ERR_NOT_PRESENT 0 /* Error Flag: Sensor not present */ +#define SK_SEN_ERR_OK 1 /* Error Flag: O.K. */ +#define SK_SEN_ERR_WARN 2 /* Error Flag: Warning */ +#define SK_SEN_ERR_ERR 3 /* Error Flag: Error */ +#define SK_SEN_ERR_FAULTY 4 /* Error Flag: Faulty */ + +/* + * Define the Sensor struct + */ +struct s_Sensor { + char *SenDesc; /* Description */ + int SenType; /* Voltage or Temperature */ + SK_I32 SenValue; /* Current value of the sensor */ + SK_I32 SenThreErrHigh; /* High error Threshhold of this sensor */ + SK_I32 SenThreWarnHigh; /* High warning Threshhold of this sensor */ + SK_I32 SenThreErrLow; /* Lower error Threshold of the sensor */ + SK_I32 SenThreWarnLow; /* Lower warning Threshold of the sensor */ + int SenErrFlag; /* Sensor indicated an error */ + SK_BOOL SenInit; /* Is sensor initialized ? */ + SK_U64 SenErrCts; /* Error trap counter */ + SK_U64 SenWarnCts; /* Warning trap counter */ + SK_U64 SenBegErrTS; /* Begin error timestamp */ + SK_U64 SenBegWarnTS; /* Begin warning timestamp */ + SK_U64 SenLastErrTrapTS; /* Last error trap timestamp */ + SK_U64 SenLastErrLogTS; /* Last error log timestamp */ + SK_U64 SenLastWarnTrapTS; /* Last warning trap timestamp */ + SK_U64 SenLastWarnLogTS; /* Last warning log timestamp */ + int SenState; /* Sensor State (see HW specific include) */ + int (*SenRead)(SK_AC *pAC, SK_IOC IoC, struct s_Sensor *pSen); + /* Sensors read function */ + SK_U16 SenReg; /* Register Address for this sensor */ + SK_U8 SenDev; /* Device Selection for this sensor */ +}; + +typedef struct s_I2c { + SK_SENSOR SenTable[SK_MAX_SENSORS]; /* Sensor Table */ + int CurrSens; /* Which sensor is currently queried */ + int MaxSens; /* Max. number of sensors */ + int TimerMode; /* Use the timer also to watch the state machine */ + int InitLevel; /* Initialized Level */ +#ifndef SK_DIAG + int DummyReads; /* Number of non-checked dummy reads */ + SK_TIMER SenTimer; /* Sensors timer */ +#endif /* !SK_DIAG */ +} SK_I2C; + +extern int SkI2cInit(SK_AC *pAC, SK_IOC IoC, int Level); +#ifdef SK_DIAG +extern SK_U32 SkI2cRead(SK_AC *pAC, SK_IOC IoC, int Dev, int Size, int Reg, + int Burst); +#else /* !SK_DIAG */ +extern int SkI2cEvent(SK_AC *pAC, SK_IOC IoC, SK_U32 Event, SK_EVPARA Para); +extern void SkI2cWaitIrq(SK_AC *pAC, SK_IOC IoC); +extern void SkI2cIsr(SK_AC *pAC, SK_IOC IoC); +#endif /* !SK_DIAG */ +#endif /* n_SKI2C_H */ + diff --git a/drivers/net/sk98lin/h/skqueue.h b/drivers/net/sk98lin/h/skqueue.h new file mode 100644 index 000000000000..2ec40d4fdf60 --- /dev/null +++ b/drivers/net/sk98lin/h/skqueue.h @@ -0,0 +1,94 @@ +/****************************************************************************** + * + * Name: skqueue.h + * Project: Gigabit Ethernet Adapters, Event Scheduler Module + * Version: $Revision: 1.16 $ + * Date: $Date: 2003/09/16 12:50:32 $ + * Purpose: Defines for the Event queue + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +/* + * SKQUEUE.H contains all defines and types for the event queue + */ + +#ifndef _SKQUEUE_H_ +#define _SKQUEUE_H_ + + +/* + * define the event classes to be served + */ +#define SKGE_DRV 1 /* Driver Event Class */ +#define SKGE_RLMT 2 /* RLMT Event Class */ +#define SKGE_I2C 3 /* I2C Event Class */ +#define SKGE_PNMI 4 /* PNMI Event Class */ +#define SKGE_CSUM 5 /* Checksum Event Class */ +#define SKGE_HWAC 6 /* Hardware Access Event Class */ + +#define SKGE_SWT 9 /* Software Timer Event Class */ +#define SKGE_LACP 10 /* LACP Aggregation Event Class */ +#define SKGE_RSF 11 /* RSF Aggregation Event Class */ +#define SKGE_MARKER 12 /* MARKER Aggregation Event Class */ +#define SKGE_FD 13 /* FD Distributor Event Class */ + +/* + * define event queue as circular buffer + */ +#define SK_MAX_EVENT 64 + +/* + * Parameter union for the Para stuff + */ +typedef union u_EvPara { + void *pParaPtr; /* Parameter Pointer */ + SK_U64 Para64; /* Parameter 64bit version */ + SK_U32 Para32[2]; /* Parameter Array of 32bit parameters */ +} SK_EVPARA; + +/* + * Event Queue + * skqueue.c + * events are class/value pairs + * class is addressee, e.g. RLMT, PNMI etc. + * value is command, e.g. line state change, ring op change etc. + */ +typedef struct s_EventElem { + SK_U32 Class; /* Event class */ + SK_U32 Event; /* Event value */ + SK_EVPARA Para; /* Event parameter */ +} SK_EVENTELEM; + +typedef struct s_Queue { + SK_EVENTELEM EvQueue[SK_MAX_EVENT]; + SK_EVENTELEM *EvPut; + SK_EVENTELEM *EvGet; +} SK_QUEUE; + +extern void SkEventInit(SK_AC *pAC, SK_IOC Ioc, int Level); +extern void SkEventQueue(SK_AC *pAC, SK_U32 Class, SK_U32 Event, + SK_EVPARA Para); +extern int SkEventDispatcher(SK_AC *pAC, SK_IOC Ioc); + + +/* Define Error Numbers and messages */ +#define SKERR_Q_E001 (SK_ERRBASE_QUEUE+0) +#define SKERR_Q_E001MSG "Event queue overflow" +#define SKERR_Q_E002 (SKERR_Q_E001+1) +#define SKERR_Q_E002MSG "Undefined event class" +#endif /* _SKQUEUE_H_ */ + diff --git a/drivers/net/sk98lin/h/skrlmt.h b/drivers/net/sk98lin/h/skrlmt.h new file mode 100644 index 000000000000..ca75dfdcf2d6 --- /dev/null +++ b/drivers/net/sk98lin/h/skrlmt.h @@ -0,0 +1,438 @@ +/****************************************************************************** + * + * Name: skrlmt.h + * Project: GEnesis, PCI Gigabit Ethernet Adapter + * Version: $Revision: 1.37 $ + * Date: $Date: 2003/04/15 09:43:43 $ + * Purpose: Header file for Redundant Link ManagemenT. + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +/****************************************************************************** + * + * Description: + * + * This is the header file for Redundant Link ManagemenT. + * + * Include File Hierarchy: + * + * "skdrv1st.h" + * ... + * "sktypes.h" + * "skqueue.h" + * "skaddr.h" + * "skrlmt.h" + * ... + * "skdrv2nd.h" + * + ******************************************************************************/ + +#ifndef __INC_SKRLMT_H +#define __INC_SKRLMT_H + +#ifdef __cplusplus +extern "C" { +#endif /* cplusplus */ + +/* defines ********************************************************************/ + +#define SK_RLMT_NET_DOWN_TEMP 1 /* NET_DOWN due to last port down. */ +#define SK_RLMT_NET_DOWN_FINAL 2 /* NET_DOWN due to RLMT_STOP. */ + +/* ----- Default queue sizes - must be multiples of 8 KB ----- */ + +/* Less than 8 KB free in RX queue => pause frames. */ +#define SK_RLMT_STANDBY_QRXSIZE 128 /* Size of rx standby queue in KB. */ +#define SK_RLMT_STANDBY_QXASIZE 32 /* Size of async standby queue in KB. */ +#define SK_RLMT_STANDBY_QXSSIZE 0 /* Size of sync standby queue in KB. */ + +#define SK_RLMT_MAX_TX_BUF_SIZE 60 /* Maximum RLMT transmit size. */ + +/* ----- PORT states ----- */ + +#define SK_RLMT_PS_INIT 0 /* Port state: Init. */ +#define SK_RLMT_PS_LINK_DOWN 1 /* Port state: Link down. */ +#define SK_RLMT_PS_DOWN 2 /* Port state: Port down. */ +#define SK_RLMT_PS_GOING_UP 3 /* Port state: Going up. */ +#define SK_RLMT_PS_UP 4 /* Port state: Up. */ + +/* ----- RLMT states ----- */ + +#define SK_RLMT_RS_INIT 0 /* RLMT state: Init. */ +#define SK_RLMT_RS_NET_DOWN 1 /* RLMT state: Net down. */ +#define SK_RLMT_RS_NET_UP 2 /* RLMT state: Net up. */ + +/* ----- PORT events ----- */ + +#define SK_RLMT_LINK_UP 1001 /* Link came up. */ +#define SK_RLMT_LINK_DOWN 1002 /* Link went down. */ +#define SK_RLMT_PORT_ADDR 1003 /* Port address changed. */ + +/* ----- RLMT events ----- */ + +#define SK_RLMT_START 2001 /* Start RLMT. */ +#define SK_RLMT_STOP 2002 /* Stop RLMT. */ +#define SK_RLMT_PACKET_RECEIVED 2003 /* Packet was received for RLMT. */ +#define SK_RLMT_STATS_CLEAR 2004 /* Clear statistics. */ +#define SK_RLMT_STATS_UPDATE 2005 /* Update statistics. */ +#define SK_RLMT_PREFPORT_CHANGE 2006 /* Change preferred port. */ +#define SK_RLMT_MODE_CHANGE 2007 /* New RlmtMode. */ +#define SK_RLMT_SET_NETS 2008 /* Number of Nets (1 or 2). */ + +/* ----- RLMT mode bits ----- */ + +/* + * CAUTION: These defines are private to RLMT. + * Please use the RLMT mode defines below. + */ + +#define SK_RLMT_CHECK_LINK 1 /* Check Link. */ +#define SK_RLMT_CHECK_LOC_LINK 2 /* Check other link on same adapter. */ +#define SK_RLMT_CHECK_SEG 4 /* Check segmentation. */ + +#ifndef RLMT_CHECK_REMOTE +#define SK_RLMT_CHECK_OTHERS SK_RLMT_CHECK_LOC_LINK +#else /* RLMT_CHECK_REMOTE */ +#define SK_RLMT_CHECK_REM_LINK 8 /* Check link(s) on other adapter(s). */ +#define SK_RLMT_MAX_REMOTE_PORTS_CHECKED 3 +#define SK_RLMT_CHECK_OTHERS \ + (SK_RLMT_CHECK_LOC_LINK | SK_RLMT_CHECK_REM_LINK) +#endif /* RLMT_CHECK_REMOTE */ + +#ifndef SK_RLMT_ENABLE_TRANSPARENT +#define SK_RLMT_TRANSPARENT 0 /* RLMT transparent - inactive. */ +#else /* SK_RLMT_ENABLE_TRANSPARENT */ +#define SK_RLMT_TRANSPARENT 128 /* RLMT transparent. */ +#endif /* SK_RLMT_ENABLE_TRANSPARENT */ + +/* ----- RLMT modes ----- */ + +/* Check Link State. */ +#define SK_RLMT_MODE_CLS (SK_RLMT_CHECK_LINK) + +/* Check Local Ports: check other links on the same adapter. */ +#define SK_RLMT_MODE_CLP (SK_RLMT_CHECK_LINK | SK_RLMT_CHECK_LOC_LINK) + +/* Check Local Ports and Segmentation Status. */ +#define SK_RLMT_MODE_CLPSS \ + (SK_RLMT_CHECK_LINK | SK_RLMT_CHECK_LOC_LINK | SK_RLMT_CHECK_SEG) + +#ifdef RLMT_CHECK_REMOTE +/* Check Local and Remote Ports: check links (local or remote). */ + Name of define TBD! +#define SK_RLMT_MODE_CRP \ + (SK_RLMT_CHECK_LINK | SK_RLMT_CHECK_LOC_LINK | SK_RLMT_CHECK_REM_LINK) + +/* Check Local and Remote Ports and Segmentation Status. */ + Name of define TBD! +#define SK_RLMT_MODE_CRPSS \ + (SK_RLMT_CHECK_LINK | SK_RLMT_CHECK_LOC_LINK | \ + SK_RLMT_CHECK_REM_LINK | SK_RLMT_CHECK_SEG) +#endif /* RLMT_CHECK_REMOTE */ + +/* ----- RLMT lookahead result bits ----- */ + +#define SK_RLMT_RX_RLMT 1 /* Give packet to RLMT. */ +#define SK_RLMT_RX_PROTOCOL 2 /* Give packet to protocol. */ + +/* Macros */ + +#if 0 +SK_AC *pAC /* adapter context */ +SK_U32 PortNum /* receiving port */ +unsigned PktLen /* received packet's length */ +SK_BOOL IsBc /* Flag: packet is broadcast */ +unsigned *pOffset /* offs. of bytes to present to SK_RLMT_LOOKAHEAD */ +unsigned *pNumBytes /* #Bytes to present to SK_RLMT_LOOKAHEAD */ +#endif /* 0 */ + +#define SK_RLMT_PRE_LOOKAHEAD(pAC,PortNum,PktLen,IsBc,pOffset,pNumBytes) { \ + SK_AC *_pAC; \ + SK_U32 _PortNum; \ + _pAC = (pAC); \ + _PortNum = (SK_U32)(PortNum); \ + /* _pAC->Rlmt.Port[_PortNum].PacketsRx++; */ \ + _pAC->Rlmt.Port[_PortNum].PacketsPerTimeSlot++; \ + if (_pAC->Rlmt.RlmtOff) { \ + *(pNumBytes) = 0; \ + } \ + else {\ + if ((_pAC->Rlmt.Port[_PortNum].Net->RlmtMode & SK_RLMT_TRANSPARENT) != 0) { \ + *(pNumBytes) = 0; \ + } \ + else if (IsBc) { \ + if (_pAC->Rlmt.Port[_PortNum].Net->RlmtMode != SK_RLMT_MODE_CLS) { \ + *(pNumBytes) = 6; \ + *(pOffset) = 6; \ + } \ + else { \ + *(pNumBytes) = 0; \ + } \ + } \ + else { \ + if ((PktLen) > SK_RLMT_MAX_TX_BUF_SIZE) { \ + /* _pAC->Rlmt.Port[_PortNum].DataPacketsPerTimeSlot++; */ \ + *(pNumBytes) = 0; \ + } \ + else { \ + *(pNumBytes) = 6; \ + *(pOffset) = 0; \ + } \ + } \ + } \ +} + +#if 0 +SK_AC *pAC /* adapter context */ +SK_U32 PortNum /* receiving port */ +SK_U8 *pLaPacket, /* received packet's data (points to pOffset) */ +SK_BOOL IsBc /* Flag: packet is broadcast */ +SK_BOOL IsMc /* Flag: packet is multicast */ +unsigned *pForRlmt /* Result: bits SK_RLMT_RX_RLMT, SK_RLMT_RX_PROTOCOL */ +SK_RLMT_LOOKAHEAD() expects *pNumBytes from +packet offset *pOffset (s.a.) at *pLaPacket. + +If you use SK_RLMT_LOOKAHEAD in a path where you already know if the packet is +BC, MC, or UC, you should use constants for IsBc and IsMc, so that your compiler +can trash unneeded parts of the if construction. +#endif /* 0 */ + +#define SK_RLMT_LOOKAHEAD(pAC,PortNum,pLaPacket,IsBc,IsMc,pForRlmt) { \ + SK_AC *_pAC; \ + SK_U32 _PortNum; \ + SK_U8 *_pLaPacket; \ + _pAC = (pAC); \ + _PortNum = (SK_U32)(PortNum); \ + _pLaPacket = (SK_U8 *)(pLaPacket); \ + if (IsBc) {\ + if (!SK_ADDR_EQUAL(_pLaPacket, _pAC->Addr.Net[_pAC->Rlmt.Port[ \ + _PortNum].Net->NetNumber].CurrentMacAddress.a)) { \ + _pAC->Rlmt.Port[_PortNum].BcTimeStamp = SkOsGetTime(_pAC); \ + _pAC->Rlmt.CheckSwitch = SK_TRUE; \ + } \ + /* _pAC->Rlmt.Port[_PortNum].DataPacketsPerTimeSlot++; */ \ + *(pForRlmt) = SK_RLMT_RX_PROTOCOL; \ + } \ + else if (IsMc) { \ + if (SK_ADDR_EQUAL(_pLaPacket, BridgeMcAddr.a)) { \ + _pAC->Rlmt.Port[_PortNum].BpduPacketsPerTimeSlot++; \ + if (_pAC->Rlmt.Port[_PortNum].Net->RlmtMode & SK_RLMT_CHECK_SEG) { \ + *(pForRlmt) = SK_RLMT_RX_RLMT | SK_RLMT_RX_PROTOCOL; \ + } \ + else { \ + *(pForRlmt) = SK_RLMT_RX_PROTOCOL; \ + } \ + } \ + else if (SK_ADDR_EQUAL(_pLaPacket, SkRlmtMcAddr.a)) { \ + *(pForRlmt) = SK_RLMT_RX_RLMT; \ + } \ + else { \ + /* _pAC->Rlmt.Port[_PortNum].DataPacketsPerTimeSlot++; */ \ + *(pForRlmt) = SK_RLMT_RX_PROTOCOL; \ + } \ + } \ + else { \ + if (SK_ADDR_EQUAL( \ + _pLaPacket, \ + _pAC->Addr.Port[_PortNum].CurrentMacAddress.a)) { \ + *(pForRlmt) = SK_RLMT_RX_RLMT; \ + } \ + else { \ + /* _pAC->Rlmt.Port[_PortNum].DataPacketsPerTimeSlot++; */ \ + *(pForRlmt) = SK_RLMT_RX_PROTOCOL; \ + } \ + } \ +} + +#ifdef SK_RLMT_FAST_LOOKAHEAD +Error: SK_RLMT_FAST_LOOKAHEAD no longer used. Use new macros for lookahead. +#endif /* SK_RLMT_FAST_LOOKAHEAD */ +#ifdef SK_RLMT_SLOW_LOOKAHEAD +Error: SK_RLMT_SLOW_LOOKAHEAD no longer used. Use new macros for lookahead. +#endif /* SK_RLMT_SLOW_LOOKAHEAD */ + +/* typedefs *******************************************************************/ + +#ifdef SK_RLMT_MBUF_PRIVATE +typedef struct s_RlmtMbuf { + some content +} SK_RLMT_MBUF; +#endif /* SK_RLMT_MBUF_PRIVATE */ + + +#ifdef SK_LA_INFO +typedef struct s_Rlmt_PacketInfo { + unsigned PacketLength; /* Length of packet. */ + unsigned PacketType; /* Directed/Multicast/Broadcast. */ +} SK_RLMT_PINFO; +#endif /* SK_LA_INFO */ + + +typedef struct s_RootId { + SK_U8 Id[8]; /* Root Bridge Id. */ +} SK_RLMT_ROOT_ID; + + +typedef struct s_port { + SK_MAC_ADDR CheckAddr; + SK_BOOL SuspectTx; +} SK_PORT_CHECK; + + +typedef struct s_RlmtNet SK_RLMT_NET; + + +typedef struct s_RlmtPort { + +/* ----- Public part (read-only) ----- */ + + SK_U8 PortState; /* Current state of this port. */ + + /* For PNMI */ + SK_BOOL LinkDown; + SK_BOOL PortDown; + SK_U8 Align01; + + SK_U32 PortNumber; /* Number of port on adapter. */ + SK_RLMT_NET * Net; /* Net port belongs to. */ + + SK_U64 TxHelloCts; + SK_U64 RxHelloCts; + SK_U64 TxSpHelloReqCts; + SK_U64 RxSpHelloCts; + +/* ----- Private part ----- */ + +/* SK_U64 PacketsRx; */ /* Total packets received. */ + SK_U32 PacketsPerTimeSlot; /* Packets rxed between TOs. */ +/* SK_U32 DataPacketsPerTimeSlot; */ /* Data packets ... */ + SK_U32 BpduPacketsPerTimeSlot; /* BPDU packets rxed in TS. */ + SK_U64 BcTimeStamp; /* Time of last BC receive. */ + SK_U64 GuTimeStamp; /* Time of entering GOING_UP. */ + + SK_TIMER UpTimer; /* Timer struct Link/Port up. */ + SK_TIMER DownRxTimer; /* Timer struct down rx. */ + SK_TIMER DownTxTimer; /* Timer struct down tx. */ + + SK_U32 CheckingState; /* Checking State. */ + + SK_ADDR_PORT * AddrPort; + + SK_U8 Random[4]; /* Random value. */ + unsigned PortsChecked; /* #ports checked. */ + unsigned PortsSuspect; /* #ports checked that are s. */ + SK_PORT_CHECK PortCheck[1]; +/* SK_PORT_CHECK PortCheck[SK_MAX_MACS - 1]; */ + + SK_BOOL PortStarted; /* Port is started. */ + SK_BOOL PortNoRx; /* NoRx for >= 1 time slot. */ + SK_BOOL RootIdSet; + SK_RLMT_ROOT_ID Root; /* Root Bridge Id. */ +} SK_RLMT_PORT; + + +struct s_RlmtNet { + +/* ----- Public part (read-only) ----- */ + + SK_U32 NetNumber; /* Number of net. */ + + SK_RLMT_PORT * Port[SK_MAX_MACS]; /* Ports that belong to this net. */ + SK_U32 NumPorts; /* Number of ports. */ + SK_U32 PrefPort; /* Preferred port. */ + + /* For PNMI */ + + SK_U32 ChgBcPrio; /* Change Priority of last broadcast received */ + SK_U32 RlmtMode; /* Check ... */ + SK_U32 ActivePort; /* Active port. */ + SK_U32 Preference; /* 0xFFFFFFFF: Automatic. */ + + SK_U8 RlmtState; /* Current RLMT state. */ + +/* ----- Private part ----- */ + SK_BOOL RootIdSet; + SK_U16 Align01; + + int LinksUp; /* #Links up. */ + int PortsUp; /* #Ports up. */ + SK_U32 TimeoutValue; /* RLMT timeout value. */ + + SK_U32 CheckingState; /* Checking State. */ + SK_RLMT_ROOT_ID Root; /* Root Bridge Id. */ + + SK_TIMER LocTimer; /* Timer struct. */ + SK_TIMER SegTimer; /* Timer struct. */ +}; + + +typedef struct s_Rlmt { + +/* ----- Public part (read-only) ----- */ + + SK_U32 NumNets; /* Number of nets. */ + SK_U32 NetsStarted; /* Number of nets started. */ + SK_RLMT_NET Net[SK_MAX_NETS]; /* Array of available nets. */ + SK_RLMT_PORT Port[SK_MAX_MACS]; /* Array of available ports. */ + +/* ----- Private part ----- */ + SK_BOOL CheckSwitch; + SK_BOOL RlmtOff; /* set to zero if the Mac addresses + are equal or the second one + is zero */ + SK_U16 Align01; + +} SK_RLMT; + + +extern SK_MAC_ADDR BridgeMcAddr; +extern SK_MAC_ADDR SkRlmtMcAddr; + +/* function prototypes ********************************************************/ + + +#ifndef SK_KR_PROTO + +/* Functions provided by SkRlmt */ + +/* ANSI/C++ compliant function prototypes */ + +extern void SkRlmtInit( + SK_AC *pAC, + SK_IOC IoC, + int Level); + +extern int SkRlmtEvent( + SK_AC *pAC, + SK_IOC IoC, + SK_U32 Event, + SK_EVPARA Para); + +#else /* defined(SK_KR_PROTO) */ + +/* Non-ANSI/C++ compliant function prototypes */ + +#error KR-style function prototypes are not yet provided. + +#endif /* defined(SK_KR_PROTO)) */ + + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* __INC_SKRLMT_H */ diff --git a/drivers/net/sk98lin/h/sktimer.h b/drivers/net/sk98lin/h/sktimer.h new file mode 100644 index 000000000000..04e6d7c1ec33 --- /dev/null +++ b/drivers/net/sk98lin/h/sktimer.h @@ -0,0 +1,63 @@ +/****************************************************************************** + * + * Name: sktimer.h + * Project: Gigabit Ethernet Adapters, Event Scheduler Module + * Version: $Revision: 1.11 $ + * Date: $Date: 2003/09/16 12:58:18 $ + * Purpose: Defines for the timer functions + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +/* + * SKTIMER.H contains all defines and types for the timer functions + */ + +#ifndef _SKTIMER_H_ +#define _SKTIMER_H_ + +#include "h/skqueue.h" + +/* + * SK timer + * - needed wherever a timer is used. Put this in your data structure + * wherever you want. + */ +typedef struct s_Timer SK_TIMER; + +struct s_Timer { + SK_TIMER *TmNext; /* linked list */ + SK_U32 TmClass; /* Timer Event class */ + SK_U32 TmEvent; /* Timer Event value */ + SK_EVPARA TmPara; /* Timer Event parameter */ + SK_U32 TmDelta; /* delta time */ + int TmActive; /* flag: active/inactive */ +}; + +/* + * Timer control struct. + * - use in Adapters context name pAC->Tim + */ +typedef struct s_TimCtrl { + SK_TIMER *StQueue; /* Head of Timer queue */ +} SK_TIMCTRL; + +extern void SkTimerInit(SK_AC *pAC, SK_IOC Ioc, int Level); +extern void SkTimerStop(SK_AC *pAC, SK_IOC Ioc, SK_TIMER *pTimer); +extern void SkTimerStart(SK_AC *pAC, SK_IOC Ioc, SK_TIMER *pTimer, + SK_U32 Time, SK_U32 Class, SK_U32 Event, SK_EVPARA Para); +extern void SkTimerDone(SK_AC *pAC, SK_IOC Ioc); +#endif /* _SKTIMER_H_ */ diff --git a/drivers/net/sk98lin/h/sktypes.h b/drivers/net/sk98lin/h/sktypes.h new file mode 100644 index 000000000000..40edc96e1055 --- /dev/null +++ b/drivers/net/sk98lin/h/sktypes.h @@ -0,0 +1,69 @@ +/****************************************************************************** + * + * Name: sktypes.h + * Project: GEnesis, PCI Gigabit Ethernet Adapter + * Version: $Revision: 1.2 $ + * Date: $Date: 2003/10/07 08:16:51 $ + * Purpose: Define data types for Linux + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +/****************************************************************************** + * + * Description: + * + * In this file, all data types that are needed by the common modules + * are mapped to Linux data types. + * + * + * Include File Hierarchy: + * + * + ******************************************************************************/ + +#ifndef __INC_SKTYPES_H +#define __INC_SKTYPES_H + + +/* defines *******************************************************************/ + +/* + * Data types with a specific size. 'I' = signed, 'U' = unsigned. + */ +#define SK_I8 s8 +#define SK_U8 u8 +#define SK_I16 s16 +#define SK_U16 u16 +#define SK_I32 s32 +#define SK_U32 u32 +#define SK_I64 s64 +#define SK_U64 u64 + +#define SK_UPTR ulong /* casting pointer <-> integral */ + +/* +* Boolean type. +*/ +#define SK_BOOL SK_U8 +#define SK_FALSE 0 +#define SK_TRUE (!SK_FALSE) + +/* typedefs *******************************************************************/ + +/* function prototypes ********************************************************/ + +#endif /* __INC_SKTYPES_H */ diff --git a/drivers/net/sk98lin/h/skversion.h b/drivers/net/sk98lin/h/skversion.h new file mode 100644 index 000000000000..a1a7294828e5 --- /dev/null +++ b/drivers/net/sk98lin/h/skversion.h @@ -0,0 +1,38 @@ +/****************************************************************************** + * + * Name: version.h + * Project: GEnesis, PCI Gigabit Ethernet Adapter + * Version: $Revision: 1.5 $ + * Date: $Date: 2003/10/07 08:16:51 $ + * Purpose: SK specific Error log support + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +#ifdef lint +static const char SysKonnectFileId[] = "@(#) (C) SysKonnect GmbH."; +static const char SysKonnectBuildNumber[] = + "@(#)SK-BUILD: 6.23 PL: 01"; +#endif /* !defined(lint) */ + +#define BOOT_STRING "sk98lin: Network Device Driver v6.23\n" \ + "(C)Copyright 1999-2004 Marvell(R)." + +#define VER_STRING "6.23" +#define DRIVER_FILE_NAME "sk98lin" +#define DRIVER_REL_DATE "Feb-13-2004" + + diff --git a/drivers/net/sk98lin/h/skvpd.h b/drivers/net/sk98lin/h/skvpd.h new file mode 100644 index 000000000000..fdd9e48e8040 --- /dev/null +++ b/drivers/net/sk98lin/h/skvpd.h @@ -0,0 +1,248 @@ +/****************************************************************************** + * + * Name: skvpd.h + * Project: GEnesis, PCI Gigabit Ethernet Adapter + * Version: $Revision: 1.15 $ + * Date: $Date: 2003/01/13 10:39:38 $ + * Purpose: Defines and Macros for VPD handling + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2003 SysKonnect GmbH. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +/* + * skvpd.h contains Diagnostic specific defines for VPD handling + */ + +#ifndef __INC_SKVPD_H_ +#define __INC_SKVPD_H_ + +/* + * Define Resource Type Identifiers and VPD keywords + */ +#define RES_ID 0x82 /* Resource Type ID String (Product Name) */ +#define RES_VPD_R 0x90 /* start of VPD read only area */ +#define RES_VPD_W 0x91 /* start of VPD read/write area */ +#define RES_END 0x78 /* Resource Type End Tag */ + +#ifndef VPD_NAME +#define VPD_NAME "Name" /* Product Name, VPD name of RES_ID */ +#endif /* VPD_NAME */ +#define VPD_PN "PN" /* Adapter Part Number */ +#define VPD_EC "EC" /* Adapter Engineering Level */ +#define VPD_MN "MN" /* Manufacture ID */ +#define VPD_SN "SN" /* Serial Number */ +#define VPD_CP "CP" /* Extended Capability */ +#define VPD_RV "RV" /* Checksum and Reserved */ +#define VPD_YA "YA" /* Asset Tag Identifier */ +#define VPD_VL "VL" /* First Error Log Message (SK specific) */ +#define VPD_VF "VF" /* Second Error Log Message (SK specific) */ +#define VPD_RW "RW" /* Remaining Read / Write Area */ + +/* 'type' values for vpd_setup_para() */ +#define VPD_RO_KEY 1 /* RO keys are "PN", "EC", "MN", "SN", "RV" */ +#define VPD_RW_KEY 2 /* RW keys are "Yx", "Vx", and "RW" */ + +/* 'op' values for vpd_setup_para() */ +#define ADD_KEY 1 /* add the key at the pos "RV" or "RW" */ +#define OWR_KEY 2 /* overwrite key if already exists */ + +/* + * Define READ and WRITE Constants. + */ + +#define VPD_DEV_ID_GENESIS 0x4300 + +#define VPD_SIZE_YUKON 256 +#define VPD_SIZE_GENESIS 512 +#define VPD_SIZE 512 +#define VPD_READ 0x0000 +#define VPD_WRITE 0x8000 + +#define VPD_STOP(pAC,IoC) VPD_OUT16(pAC,IoC,PCI_VPD_ADR_REG,VPD_WRITE) + +#define VPD_GET_RES_LEN(p) ((unsigned int) \ + (* (SK_U8 *)&(p)[1]) |\ + ((* (SK_U8 *)&(p)[2]) << 8)) +#define VPD_GET_VPD_LEN(p) ((unsigned int)(* (SK_U8 *)&(p)[2])) +#define VPD_GET_VAL(p) ((char *)&(p)[3]) + +#define VPD_MAX_LEN 50 + +/* VPD status */ + /* bit 7..1 reserved */ +#define VPD_VALID (1<<0) /* VPD data buffer, vpd_free_ro, */ + /* and vpd_free_rw valid */ + +/* + * VPD structs + */ +typedef struct s_vpd_status { + unsigned short Align01; /* Alignment */ + unsigned short vpd_status; /* VPD status, description see above */ + int vpd_free_ro; /* unused bytes in read only area */ + int vpd_free_rw; /* bytes available in read/write area */ +} SK_VPD_STATUS; + +typedef struct s_vpd { + SK_VPD_STATUS v; /* VPD status structure */ + char vpd_buf[VPD_SIZE]; /* VPD buffer */ + int rom_size; /* VPD ROM Size from PCI_OUR_REG_2 */ + int vpd_size; /* saved VPD-size */ +} SK_VPD; + +typedef struct s_vpd_para { + unsigned int p_len; /* parameter length */ + char *p_val; /* points to the value */ +} SK_VPD_PARA; + +/* + * structure of Large Resource Type Identifiers + */ + +/* was removed because of alignment problems */ + +/* + * structure of VPD keywords + */ +typedef struct s_vpd_key { + char p_key[2]; /* 2 bytes ID string */ + unsigned char p_len; /* 1 byte length */ + char p_val; /* start of the value string */ +} SK_VPD_KEY; + + +/* + * System specific VPD macros + */ +#ifndef SKDIAG +#ifndef VPD_DO_IO +#define VPD_OUT8(pAC,IoC,Addr,Val) (void)SkPciWriteCfgByte(pAC,Addr,Val) +#define VPD_OUT16(pAC,IoC,Addr,Val) (void)SkPciWriteCfgWord(pAC,Addr,Val) +#define VPD_IN8(pAC,IoC,Addr,pVal) (void)SkPciReadCfgByte(pAC,Addr,pVal) +#define VPD_IN16(pAC,IoC,Addr,pVal) (void)SkPciReadCfgWord(pAC,Addr,pVal) +#define VPD_IN32(pAC,IoC,Addr,pVal) (void)SkPciReadCfgDWord(pAC,Addr,pVal) +#else /* VPD_DO_IO */ +#define VPD_OUT8(pAC,IoC,Addr,Val) SK_OUT8(IoC,PCI_C(Addr),Val) +#define VPD_OUT16(pAC,IoC,Addr,Val) SK_OUT16(IoC,PCI_C(Addr),Val) +#define VPD_IN8(pAC,IoC,Addr,pVal) SK_IN8(IoC,PCI_C(Addr),pVal) +#define VPD_IN16(pAC,IoC,Addr,pVal) SK_IN16(IoC,PCI_C(Addr),pVal) +#define VPD_IN32(pAC,IoC,Addr,pVal) SK_IN32(IoC,PCI_C(Addr),pVal) +#endif /* VPD_DO_IO */ +#else /* SKDIAG */ +#define VPD_OUT8(pAC,Ioc,Addr,Val) { \ + if ((pAC)->DgT.DgUseCfgCycle) \ + SkPciWriteCfgByte(pAC,Addr,Val); \ + else \ + SK_OUT8(pAC,PCI_C(Addr),Val); \ + } +#define VPD_OUT16(pAC,Ioc,Addr,Val) { \ + if ((pAC)->DgT.DgUseCfgCycle) \ + SkPciWriteCfgWord(pAC,Addr,Val); \ + else \ + SK_OUT16(pAC,PCI_C(Addr),Val); \ + } +#define VPD_IN8(pAC,Ioc,Addr,pVal) { \ + if ((pAC)->DgT.DgUseCfgCycle) \ + SkPciReadCfgByte(pAC,Addr,pVal); \ + else \ + SK_IN8(pAC,PCI_C(Addr),pVal); \ + } +#define VPD_IN16(pAC,Ioc,Addr,pVal) { \ + if ((pAC)->DgT.DgUseCfgCycle) \ + SkPciReadCfgWord(pAC,Addr,pVal); \ + else \ + SK_IN16(pAC,PCI_C(Addr),pVal); \ + } +#define VPD_IN32(pAC,Ioc,Addr,pVal) { \ + if ((pAC)->DgT.DgUseCfgCycle) \ + SkPciReadCfgDWord(pAC,Addr,pVal); \ + else \ + SK_IN32(pAC,PCI_C(Addr),pVal); \ + } +#endif /* nSKDIAG */ + +/* function prototypes ********************************************************/ + +#ifndef SK_KR_PROTO +#ifdef SKDIAG +extern SK_U32 VpdReadDWord( + SK_AC *pAC, + SK_IOC IoC, + int addr); +#endif /* SKDIAG */ + +extern SK_VPD_STATUS *VpdStat( + SK_AC *pAC, + SK_IOC IoC); + +extern int VpdKeys( + SK_AC *pAC, + SK_IOC IoC, + char *buf, + int *len, + int *elements); + +extern int VpdRead( + SK_AC *pAC, + SK_IOC IoC, + const char *key, + char *buf, + int *len); + +extern SK_BOOL VpdMayWrite( + char *key); + +extern int VpdWrite( + SK_AC *pAC, + SK_IOC IoC, + const char *key, + const char *buf); + +extern int VpdDelete( + SK_AC *pAC, + SK_IOC IoC, + char *key); + +extern int VpdUpdate( + SK_AC *pAC, + SK_IOC IoC); + +#ifdef SKDIAG +extern int VpdReadBlock( + SK_AC *pAC, + SK_IOC IoC, + char *buf, + int addr, + int len); + +extern int VpdWriteBlock( + SK_AC *pAC, + SK_IOC IoC, + char *buf, + int addr, + int len); +#endif /* SKDIAG */ +#else /* SK_KR_PROTO */ +extern SK_U32 VpdReadDWord(); +extern SK_VPD_STATUS *VpdStat(); +extern int VpdKeys(); +extern int VpdRead(); +extern SK_BOOL VpdMayWrite(); +extern int VpdWrite(); +extern int VpdDelete(); +extern int VpdUpdate(); +#endif /* SK_KR_PROTO */ + +#endif /* __INC_SKVPD_H_ */ diff --git a/drivers/net/sk98lin/h/xmac_ii.h b/drivers/net/sk98lin/h/xmac_ii.h new file mode 100644 index 000000000000..7f8e6d0084c7 --- /dev/null +++ b/drivers/net/sk98lin/h/xmac_ii.h @@ -0,0 +1,1579 @@ +/****************************************************************************** + * + * Name: xmac_ii.h + * Project: Gigabit Ethernet Adapters, Common Modules + * Version: $Revision: 1.52 $ + * Date: $Date: 2003/10/02 16:35:50 $ + * Purpose: Defines and Macros for Gigabit Ethernet Controller + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +#ifndef __INC_XMAC_H +#define __INC_XMAC_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* defines ********************************************************************/ + +/* + * XMAC II registers + * + * The XMAC registers are 16 or 32 bits wide. + * The XMACs host processor interface is set to 16 bit mode, + * therefore ALL registers will be addressed with 16 bit accesses. + * + * The following macros are provided to access the XMAC registers + * XM_IN16(), XM_OUT16, XM_IN32(), XM_OUT32(), XM_INADR(), XM_OUTADR(), + * XM_INHASH(), and XM_OUTHASH(). + * The macros are defined in SkGeHw.h. + * + * Note: NA reg = Network Address e.g DA, SA etc. + * + */ +#define XM_MMU_CMD 0x0000 /* 16 bit r/w MMU Command Register */ + /* 0x0004: reserved */ +#define XM_POFF 0x0008 /* 32 bit r/w Packet Offset Register */ +#define XM_BURST 0x000c /* 32 bit r/w Burst Register for half duplex*/ +#define XM_1L_VLAN_TAG 0x0010 /* 16 bit r/w One Level VLAN Tag ID */ +#define XM_2L_VLAN_TAG 0x0014 /* 16 bit r/w Two Level VLAN Tag ID */ + /* 0x0018 - 0x001e: reserved */ +#define XM_TX_CMD 0x0020 /* 16 bit r/w Transmit Command Register */ +#define XM_TX_RT_LIM 0x0024 /* 16 bit r/w Transmit Retry Limit Register */ +#define XM_TX_STIME 0x0028 /* 16 bit r/w Transmit Slottime Register */ +#define XM_TX_IPG 0x002c /* 16 bit r/w Transmit Inter Packet Gap */ +#define XM_RX_CMD 0x0030 /* 16 bit r/w Receive Command Register */ +#define XM_PHY_ADDR 0x0034 /* 16 bit r/w PHY Address Register */ +#define XM_PHY_DATA 0x0038 /* 16 bit r/w PHY Data Register */ + /* 0x003c: reserved */ +#define XM_GP_PORT 0x0040 /* 32 bit r/w General Purpose Port Register */ +#define XM_IMSK 0x0044 /* 16 bit r/w Interrupt Mask Register */ +#define XM_ISRC 0x0048 /* 16 bit r/o Interrupt Status Register */ +#define XM_HW_CFG 0x004c /* 16 bit r/w Hardware Config Register */ + /* 0x0050 - 0x005e: reserved */ +#define XM_TX_LO_WM 0x0060 /* 16 bit r/w Tx FIFO Low Water Mark */ +#define XM_TX_HI_WM 0x0062 /* 16 bit r/w Tx FIFO High Water Mark */ +#define XM_TX_THR 0x0064 /* 16 bit r/w Tx Request Threshold */ +#define XM_HT_THR 0x0066 /* 16 bit r/w Host Request Threshold */ +#define XM_PAUSE_DA 0x0068 /* NA reg r/w Pause Destination Address */ + /* 0x006e: reserved */ +#define XM_CTL_PARA 0x0070 /* 32 bit r/w Control Parameter Register */ +#define XM_MAC_OPCODE 0x0074 /* 16 bit r/w Opcode for MAC control frames */ +#define XM_MAC_PTIME 0x0076 /* 16 bit r/w Pause time for MAC ctrl frames*/ +#define XM_TX_STAT 0x0078 /* 32 bit r/o Tx Status LIFO Register */ + + /* 0x0080 - 0x00fc: 16 NA reg r/w Exact Match Address Registers */ + /* use the XM_EXM() macro to address */ +#define XM_EXM_START 0x0080 /* r/w Start Address of the EXM Regs */ + + /* + * XM_EXM(Reg) + * + * returns the XMAC address offset of specified Exact Match Addr Reg + * + * para: Reg EXM register to addr (0 .. 15) + * + * usage: XM_INADDR(IoC, MAC_1, XM_EXM(i), &val[i]); + */ +#define XM_EXM(Reg) (XM_EXM_START + ((Reg) << 3)) + +#define XM_SRC_CHK 0x0100 /* NA reg r/w Source Check Address Register */ +#define XM_SA 0x0108 /* NA reg r/w Station Address Register */ +#define XM_HSM 0x0110 /* 64 bit r/w Hash Match Address Registers */ +#define XM_RX_LO_WM 0x0118 /* 16 bit r/w Receive Low Water Mark */ +#define XM_RX_HI_WM 0x011a /* 16 bit r/w Receive High Water Mark */ +#define XM_RX_THR 0x011c /* 32 bit r/w Receive Request Threshold */ +#define XM_DEV_ID 0x0120 /* 32 bit r/o Device ID Register */ +#define XM_MODE 0x0124 /* 32 bit r/w Mode Register */ +#define XM_LSA 0x0128 /* NA reg r/o Last Source Register */ + /* 0x012e: reserved */ +#define XM_TS_READ 0x0130 /* 32 bit r/o Time Stamp Read Register */ +#define XM_TS_LOAD 0x0134 /* 32 bit r/o Time Stamp Load Value */ + /* 0x0138 - 0x01fe: reserved */ +#define XM_STAT_CMD 0x0200 /* 16 bit r/w Statistics Command Register */ +#define XM_RX_CNT_EV 0x0204 /* 32 bit r/o Rx Counter Event Register */ +#define XM_TX_CNT_EV 0x0208 /* 32 bit r/o Tx Counter Event Register */ +#define XM_RX_EV_MSK 0x020c /* 32 bit r/w Rx Counter Event Mask */ +#define XM_TX_EV_MSK 0x0210 /* 32 bit r/w Tx Counter Event Mask */ + /* 0x0204 - 0x027e: reserved */ +#define XM_TXF_OK 0x0280 /* 32 bit r/o Frames Transmitted OK Conuter */ +#define XM_TXO_OK_HI 0x0284 /* 32 bit r/o Octets Transmitted OK High Cnt*/ +#define XM_TXO_OK_LO 0x0288 /* 32 bit r/o Octets Transmitted OK Low Cnt */ +#define XM_TXF_BC_OK 0x028c /* 32 bit r/o Broadcast Frames Xmitted OK */ +#define XM_TXF_MC_OK 0x0290 /* 32 bit r/o Multicast Frames Xmitted OK */ +#define XM_TXF_UC_OK 0x0294 /* 32 bit r/o Unicast Frames Xmitted OK */ +#define XM_TXF_LONG 0x0298 /* 32 bit r/o Tx Long Frame Counter */ +#define XM_TXE_BURST 0x029c /* 32 bit r/o Tx Burst Event Counter */ +#define XM_TXF_MPAUSE 0x02a0 /* 32 bit r/o Tx Pause MAC Ctrl Frame Cnt */ +#define XM_TXF_MCTRL 0x02a4 /* 32 bit r/o Tx MAC Ctrl Frame Counter */ +#define XM_TXF_SNG_COL 0x02a8 /* 32 bit r/o Tx Single Collision Counter */ +#define XM_TXF_MUL_COL 0x02ac /* 32 bit r/o Tx Multiple Collision Counter */ +#define XM_TXF_ABO_COL 0x02b0 /* 32 bit r/o Tx aborted due to Exces. Col. */ +#define XM_TXF_LAT_COL 0x02b4 /* 32 bit r/o Tx Late Collision Counter */ +#define XM_TXF_DEF 0x02b8 /* 32 bit r/o Tx Deferred Frame Counter */ +#define XM_TXF_EX_DEF 0x02bc /* 32 bit r/o Tx Excessive Deferall Counter */ +#define XM_TXE_FIFO_UR 0x02c0 /* 32 bit r/o Tx FIFO Underrun Event Cnt */ +#define XM_TXE_CS_ERR 0x02c4 /* 32 bit r/o Tx Carrier Sense Error Cnt */ +#define XM_TXP_UTIL 0x02c8 /* 32 bit r/o Tx Utilization in % */ + /* 0x02cc - 0x02ce: reserved */ +#define XM_TXF_64B 0x02d0 /* 32 bit r/o 64 Byte Tx Frame Counter */ +#define XM_TXF_127B 0x02d4 /* 32 bit r/o 65-127 Byte Tx Frame Counter */ +#define XM_TXF_255B 0x02d8 /* 32 bit r/o 128-255 Byte Tx Frame Counter */ +#define XM_TXF_511B 0x02dc /* 32 bit r/o 256-511 Byte Tx Frame Counter */ +#define XM_TXF_1023B 0x02e0 /* 32 bit r/o 512-1023 Byte Tx Frame Counter*/ +#define XM_TXF_MAX_SZ 0x02e4 /* 32 bit r/o 1024-MaxSize Byte Tx Frame Cnt*/ + /* 0x02e8 - 0x02fe: reserved */ +#define XM_RXF_OK 0x0300 /* 32 bit r/o Frames Received OK */ +#define XM_RXO_OK_HI 0x0304 /* 32 bit r/o Octets Received OK High Cnt */ +#define XM_RXO_OK_LO 0x0308 /* 32 bit r/o Octets Received OK Low Counter*/ +#define XM_RXF_BC_OK 0x030c /* 32 bit r/o Broadcast Frames Received OK */ +#define XM_RXF_MC_OK 0x0310 /* 32 bit r/o Multicast Frames Received OK */ +#define XM_RXF_UC_OK 0x0314 /* 32 bit r/o Unicast Frames Received OK */ +#define XM_RXF_MPAUSE 0x0318 /* 32 bit r/o Rx Pause MAC Ctrl Frame Cnt */ +#define XM_RXF_MCTRL 0x031c /* 32 bit r/o Rx MAC Ctrl Frame Counter */ +#define XM_RXF_INV_MP 0x0320 /* 32 bit r/o Rx invalid Pause Frame Cnt */ +#define XM_RXF_INV_MOC 0x0324 /* 32 bit r/o Rx Frames with inv. MAC Opcode*/ +#define XM_RXE_BURST 0x0328 /* 32 bit r/o Rx Burst Event Counter */ +#define XM_RXE_FMISS 0x032c /* 32 bit r/o Rx Missed Frames Event Cnt */ +#define XM_RXF_FRA_ERR 0x0330 /* 32 bit r/o Rx Framing Error Counter */ +#define XM_RXE_FIFO_OV 0x0334 /* 32 bit r/o Rx FIFO overflow Event Cnt */ +#define XM_RXF_JAB_PKT 0x0338 /* 32 bit r/o Rx Jabber Packet Frame Cnt */ +#define XM_RXE_CAR_ERR 0x033c /* 32 bit r/o Rx Carrier Event Error Cnt */ +#define XM_RXF_LEN_ERR 0x0340 /* 32 bit r/o Rx in Range Length Error */ +#define XM_RXE_SYM_ERR 0x0344 /* 32 bit r/o Rx Symbol Error Counter */ +#define XM_RXE_SHT_ERR 0x0348 /* 32 bit r/o Rx Short Event Error Cnt */ +#define XM_RXE_RUNT 0x034c /* 32 bit r/o Rx Runt Event Counter */ +#define XM_RXF_LNG_ERR 0x0350 /* 32 bit r/o Rx Frame too Long Error Cnt */ +#define XM_RXF_FCS_ERR 0x0354 /* 32 bit r/o Rx Frame Check Seq. Error Cnt */ + /* 0x0358 - 0x035a: reserved */ +#define XM_RXF_CEX_ERR 0x035c /* 32 bit r/o Rx Carrier Ext Error Frame Cnt*/ +#define XM_RXP_UTIL 0x0360 /* 32 bit r/o Rx Utilization in % */ + /* 0x0364 - 0x0366: reserved */ +#define XM_RXF_64B 0x0368 /* 32 bit r/o 64 Byte Rx Frame Counter */ +#define XM_RXF_127B 0x036c /* 32 bit r/o 65-127 Byte Rx Frame Counter */ +#define XM_RXF_255B 0x0370 /* 32 bit r/o 128-255 Byte Rx Frame Counter */ +#define XM_RXF_511B 0x0374 /* 32 bit r/o 256-511 Byte Rx Frame Counter */ +#define XM_RXF_1023B 0x0378 /* 32 bit r/o 512-1023 Byte Rx Frame Counter*/ +#define XM_RXF_MAX_SZ 0x037c /* 32 bit r/o 1024-MaxSize Byte Rx Frame Cnt*/ + /* 0x02e8 - 0x02fe: reserved */ + + +/*----------------------------------------------------------------------------*/ +/* + * XMAC Bit Definitions + * + * If the bit access behaviour differs from the register access behaviour + * (r/w, r/o) this is documented after the bit number. + * The following bit access behaviours are used: + * (sc) self clearing + * (ro) read only + */ + +/* XM_MMU_CMD 16 bit r/w MMU Command Register */ + /* Bit 15..13: reserved */ +#define XM_MMU_PHY_RDY (1<<12) /* Bit 12: PHY Read Ready */ +#define XM_MMU_PHY_BUSY (1<<11) /* Bit 11: PHY Busy */ +#define XM_MMU_IGN_PF (1<<10) /* Bit 10: Ignore Pause Frame */ +#define XM_MMU_MAC_LB (1<<9) /* Bit 9: Enable MAC Loopback */ + /* Bit 8: reserved */ +#define XM_MMU_FRC_COL (1<<7) /* Bit 7: Force Collision */ +#define XM_MMU_SIM_COL (1<<6) /* Bit 6: Simulate Collision */ +#define XM_MMU_NO_PRE (1<<5) /* Bit 5: No MDIO Preamble */ +#define XM_MMU_GMII_FD (1<<4) /* Bit 4: GMII uses Full Duplex */ +#define XM_MMU_RAT_CTRL (1<<3) /* Bit 3: Enable Rate Control */ +#define XM_MMU_GMII_LOOP (1<<2) /* Bit 2: PHY is in Loopback Mode */ +#define XM_MMU_ENA_RX (1<<1) /* Bit 1: Enable Receiver */ +#define XM_MMU_ENA_TX (1<<0) /* Bit 0: Enable Transmitter */ + + +/* XM_TX_CMD 16 bit r/w Transmit Command Register */ + /* Bit 15..7: reserved */ +#define XM_TX_BK2BK (1<<6) /* Bit 6: Ignor Carrier Sense (Tx Bk2Bk)*/ +#define XM_TX_ENC_BYP (1<<5) /* Bit 5: Set Encoder in Bypass Mode */ +#define XM_TX_SAM_LINE (1<<4) /* Bit 4: (sc) Start utilization calculation */ +#define XM_TX_NO_GIG_MD (1<<3) /* Bit 3: Disable Carrier Extension */ +#define XM_TX_NO_PRE (1<<2) /* Bit 2: Disable Preamble Generation */ +#define XM_TX_NO_CRC (1<<1) /* Bit 1: Disable CRC Generation */ +#define XM_TX_AUTO_PAD (1<<0) /* Bit 0: Enable Automatic Padding */ + + +/* XM_TX_RT_LIM 16 bit r/w Transmit Retry Limit Register */ + /* Bit 15..5: reserved */ +#define XM_RT_LIM_MSK 0x1f /* Bit 4..0: Tx Retry Limit */ + + +/* XM_TX_STIME 16 bit r/w Transmit Slottime Register */ + /* Bit 15..7: reserved */ +#define XM_STIME_MSK 0x7f /* Bit 6..0: Tx Slottime bits */ + + +/* XM_TX_IPG 16 bit r/w Transmit Inter Packet Gap */ + /* Bit 15..8: reserved */ +#define XM_IPG_MSK 0xff /* Bit 7..0: IPG value bits */ + + +/* XM_RX_CMD 16 bit r/w Receive Command Register */ + /* Bit 15..9: reserved */ +#define XM_RX_LENERR_OK (1<<8) /* Bit 8 don't set Rx Err bit for */ + /* inrange error packets */ +#define XM_RX_BIG_PK_OK (1<<7) /* Bit 7 don't set Rx Err bit for */ + /* jumbo packets */ +#define XM_RX_IPG_CAP (1<<6) /* Bit 6 repl. type field with IPG */ +#define XM_RX_TP_MD (1<<5) /* Bit 5: Enable transparent Mode */ +#define XM_RX_STRIP_FCS (1<<4) /* Bit 4: Enable FCS Stripping */ +#define XM_RX_SELF_RX (1<<3) /* Bit 3: Enable Rx of own packets */ +#define XM_RX_SAM_LINE (1<<2) /* Bit 2: (sc) Start utilization calculation */ +#define XM_RX_STRIP_PAD (1<<1) /* Bit 1: Strip pad bytes of Rx frames */ +#define XM_RX_DIS_CEXT (1<<0) /* Bit 0: Disable carrier ext. check */ + + +/* XM_PHY_ADDR 16 bit r/w PHY Address Register */ + /* Bit 15..5: reserved */ +#define XM_PHY_ADDR_SZ 0x1f /* Bit 4..0: PHY Address bits */ + + +/* XM_GP_PORT 32 bit r/w General Purpose Port Register */ + /* Bit 31..7: reserved */ +#define XM_GP_ANIP (1L<<6) /* Bit 6: (ro) Auto-Neg. in progress */ +#define XM_GP_FRC_INT (1L<<5) /* Bit 5: (sc) Force Interrupt */ + /* Bit 4: reserved */ +#define XM_GP_RES_MAC (1L<<3) /* Bit 3: (sc) Reset MAC and FIFOs */ +#define XM_GP_RES_STAT (1L<<2) /* Bit 2: (sc) Reset the statistics module */ + /* Bit 1: reserved */ +#define XM_GP_INP_ASS (1L<<0) /* Bit 0: (ro) GP Input Pin asserted */ + + +/* XM_IMSK 16 bit r/w Interrupt Mask Register */ +/* XM_ISRC 16 bit r/o Interrupt Status Register */ + /* Bit 15: reserved */ +#define XM_IS_LNK_AE (1<<14) /* Bit 14: Link Asynchronous Event */ +#define XM_IS_TX_ABORT (1<<13) /* Bit 13: Transmit Abort, late Col. etc */ +#define XM_IS_FRC_INT (1<<12) /* Bit 12: Force INT bit set in GP */ +#define XM_IS_INP_ASS (1<<11) /* Bit 11: Input Asserted, GP bit 0 set */ +#define XM_IS_LIPA_RC (1<<10) /* Bit 10: Link Partner requests config */ +#define XM_IS_RX_PAGE (1<<9) /* Bit 9: Page Received */ +#define XM_IS_TX_PAGE (1<<8) /* Bit 8: Next Page Loaded for Transmit */ +#define XM_IS_AND (1<<7) /* Bit 7: Auto-Negotiation Done */ +#define XM_IS_TSC_OV (1<<6) /* Bit 6: Time Stamp Counter Overflow */ +#define XM_IS_RXC_OV (1<<5) /* Bit 5: Rx Counter Event Overflow */ +#define XM_IS_TXC_OV (1<<4) /* Bit 4: Tx Counter Event Overflow */ +#define XM_IS_RXF_OV (1<<3) /* Bit 3: Receive FIFO Overflow */ +#define XM_IS_TXF_UR (1<<2) /* Bit 2: Transmit FIFO Underrun */ +#define XM_IS_TX_COMP (1<<1) /* Bit 1: Frame Tx Complete */ +#define XM_IS_RX_COMP (1<<0) /* Bit 0: Frame Rx Complete */ + +#define XM_DEF_MSK (~(XM_IS_INP_ASS | XM_IS_LIPA_RC | XM_IS_RX_PAGE |\ + XM_IS_AND | XM_IS_RXC_OV | XM_IS_TXC_OV | XM_IS_TXF_UR)) + + +/* XM_HW_CFG 16 bit r/w Hardware Config Register */ + /* Bit 15.. 4: reserved */ +#define XM_HW_GEN_EOP (1<<3) /* Bit 3: generate End of Packet pulse */ +#define XM_HW_COM4SIG (1<<2) /* Bit 2: use Comma Detect for Sig. Det.*/ + /* Bit 1: reserved */ +#define XM_HW_GMII_MD (1<<0) /* Bit 0: GMII Interface selected */ + + +/* XM_TX_LO_WM 16 bit r/w Tx FIFO Low Water Mark */ +/* XM_TX_HI_WM 16 bit r/w Tx FIFO High Water Mark */ + /* Bit 15..10 reserved */ +#define XM_TX_WM_MSK 0x01ff /* Bit 9.. 0 Tx FIFO Watermark bits */ + +/* XM_TX_THR 16 bit r/w Tx Request Threshold */ +/* XM_HT_THR 16 bit r/w Host Request Threshold */ +/* XM_RX_THR 16 bit r/w Rx Request Threshold */ + /* Bit 15..11 reserved */ +#define XM_THR_MSK 0x03ff /* Bit 10.. 0 Rx/Tx Request Threshold bits */ + + +/* XM_TX_STAT 32 bit r/o Tx Status LIFO Register */ +#define XM_ST_VALID (1UL<<31) /* Bit 31: Status Valid */ +#define XM_ST_BYTE_CNT (0x3fffL<<17) /* Bit 30..17: Tx frame Length */ +#define XM_ST_RETRY_CNT (0x1fL<<12) /* Bit 16..12: Retry Count */ +#define XM_ST_EX_COL (1L<<11) /* Bit 11: Excessive Collisions */ +#define XM_ST_EX_DEF (1L<<10) /* Bit 10: Excessive Deferral */ +#define XM_ST_BURST (1L<<9) /* Bit 9: p. xmitted in burst md*/ +#define XM_ST_DEFER (1L<<8) /* Bit 8: packet was defered */ +#define XM_ST_BC (1L<<7) /* Bit 7: Broadcast packet */ +#define XM_ST_MC (1L<<6) /* Bit 6: Multicast packet */ +#define XM_ST_UC (1L<<5) /* Bit 5: Unicast packet */ +#define XM_ST_TX_UR (1L<<4) /* Bit 4: FIFO Underrun occured */ +#define XM_ST_CS_ERR (1L<<3) /* Bit 3: Carrier Sense Error */ +#define XM_ST_LAT_COL (1L<<2) /* Bit 2: Late Collision Error */ +#define XM_ST_MUL_COL (1L<<1) /* Bit 1: Multiple Collisions */ +#define XM_ST_SGN_COL (1L<<0) /* Bit 0: Single Collision */ + +/* XM_RX_LO_WM 16 bit r/w Receive Low Water Mark */ +/* XM_RX_HI_WM 16 bit r/w Receive High Water Mark */ + /* Bit 15..11: reserved */ +#define XM_RX_WM_MSK 0x03ff /* Bit 11.. 0: Rx FIFO Watermark bits */ + + +/* XM_DEV_ID 32 bit r/o Device ID Register */ +#define XM_DEV_OUI (0x00ffffffUL<<8) /* Bit 31..8: Device OUI */ +#define XM_DEV_REV (0x07L << 5) /* Bit 7..5: Chip Rev Num */ + + +/* XM_MODE 32 bit r/w Mode Register */ + /* Bit 31..27: reserved */ +#define XM_MD_ENA_REJ (1L<<26) /* Bit 26: Enable Frame Reject */ +#define XM_MD_SPOE_E (1L<<25) /* Bit 25: Send Pause on Edge */ + /* extern generated */ +#define XM_MD_TX_REP (1L<<24) /* Bit 24: Transmit Repeater Mode */ +#define XM_MD_SPOFF_I (1L<<23) /* Bit 23: Send Pause on FIFO full */ + /* intern generated */ +#define XM_MD_LE_STW (1L<<22) /* Bit 22: Rx Stat Word in Little Endian */ +#define XM_MD_TX_CONT (1L<<21) /* Bit 21: Send Continuous */ +#define XM_MD_TX_PAUSE (1L<<20) /* Bit 20: (sc) Send Pause Frame */ +#define XM_MD_ATS (1L<<19) /* Bit 19: Append Time Stamp */ +#define XM_MD_SPOL_I (1L<<18) /* Bit 18: Send Pause on Low */ + /* intern generated */ +#define XM_MD_SPOH_I (1L<<17) /* Bit 17: Send Pause on High */ + /* intern generated */ +#define XM_MD_CAP (1L<<16) /* Bit 16: Check Address Pair */ +#define XM_MD_ENA_HASH (1L<<15) /* Bit 15: Enable Hashing */ +#define XM_MD_CSA (1L<<14) /* Bit 14: Check Station Address */ +#define XM_MD_CAA (1L<<13) /* Bit 13: Check Address Array */ +#define XM_MD_RX_MCTRL (1L<<12) /* Bit 12: Rx MAC Control Frame */ +#define XM_MD_RX_RUNT (1L<<11) /* Bit 11: Rx Runt Frames */ +#define XM_MD_RX_IRLE (1L<<10) /* Bit 10: Rx in Range Len Err Frame */ +#define XM_MD_RX_LONG (1L<<9) /* Bit 9: Rx Long Frame */ +#define XM_MD_RX_CRCE (1L<<8) /* Bit 8: Rx CRC Error Frame */ +#define XM_MD_RX_ERR (1L<<7) /* Bit 7: Rx Error Frame */ +#define XM_MD_DIS_UC (1L<<6) /* Bit 6: Disable Rx Unicast */ +#define XM_MD_DIS_MC (1L<<5) /* Bit 5: Disable Rx Multicast */ +#define XM_MD_DIS_BC (1L<<4) /* Bit 4: Disable Rx Broadcast */ +#define XM_MD_ENA_PROM (1L<<3) /* Bit 3: Enable Promiscuous */ +#define XM_MD_ENA_BE (1L<<2) /* Bit 2: Enable Big Endian */ +#define XM_MD_FTF (1L<<1) /* Bit 1: (sc) Flush Tx FIFO */ +#define XM_MD_FRF (1L<<0) /* Bit 0: (sc) Flush Rx FIFO */ + +#define XM_PAUSE_MODE (XM_MD_SPOE_E | XM_MD_SPOL_I | XM_MD_SPOH_I) +#define XM_DEF_MODE (XM_MD_RX_RUNT | XM_MD_RX_IRLE | XM_MD_RX_LONG |\ + XM_MD_RX_CRCE | XM_MD_RX_ERR | XM_MD_CSA | XM_MD_CAA) + +/* XM_STAT_CMD 16 bit r/w Statistics Command Register */ + /* Bit 16..6: reserved */ +#define XM_SC_SNP_RXC (1<<5) /* Bit 5: (sc) Snap Rx Counters */ +#define XM_SC_SNP_TXC (1<<4) /* Bit 4: (sc) Snap Tx Counters */ +#define XM_SC_CP_RXC (1<<3) /* Bit 3: Copy Rx Counters Continuously */ +#define XM_SC_CP_TXC (1<<2) /* Bit 2: Copy Tx Counters Continuously */ +#define XM_SC_CLR_RXC (1<<1) /* Bit 1: (sc) Clear Rx Counters */ +#define XM_SC_CLR_TXC (1<<0) /* Bit 0: (sc) Clear Tx Counters */ + + +/* XM_RX_CNT_EV 32 bit r/o Rx Counter Event Register */ +/* XM_RX_EV_MSK 32 bit r/w Rx Counter Event Mask */ +#define XMR_MAX_SZ_OV (1UL<<31) /* Bit 31: 1024-MaxSize Rx Cnt Ov*/ +#define XMR_1023B_OV (1L<<30) /* Bit 30: 512-1023Byte Rx Cnt Ov*/ +#define XMR_511B_OV (1L<<29) /* Bit 29: 256-511 Byte Rx Cnt Ov*/ +#define XMR_255B_OV (1L<<28) /* Bit 28: 128-255 Byte Rx Cnt Ov*/ +#define XMR_127B_OV (1L<<27) /* Bit 27: 65-127 Byte Rx Cnt Ov */ +#define XMR_64B_OV (1L<<26) /* Bit 26: 64 Byte Rx Cnt Ov */ +#define XMR_UTIL_OV (1L<<25) /* Bit 25: Rx Util Cnt Overflow */ +#define XMR_UTIL_UR (1L<<24) /* Bit 24: Rx Util Cnt Underrun */ +#define XMR_CEX_ERR_OV (1L<<23) /* Bit 23: CEXT Err Cnt Ov */ + /* Bit 22: reserved */ +#define XMR_FCS_ERR_OV (1L<<21) /* Bit 21: Rx FCS Error Cnt Ov */ +#define XMR_LNG_ERR_OV (1L<<20) /* Bit 20: Rx too Long Err Cnt Ov*/ +#define XMR_RUNT_OV (1L<<19) /* Bit 19: Runt Event Cnt Ov */ +#define XMR_SHT_ERR_OV (1L<<18) /* Bit 18: Rx Short Ev Err Cnt Ov*/ +#define XMR_SYM_ERR_OV (1L<<17) /* Bit 17: Rx Sym Err Cnt Ov */ + /* Bit 16: reserved */ +#define XMR_CAR_ERR_OV (1L<<15) /* Bit 15: Rx Carr Ev Err Cnt Ov */ +#define XMR_JAB_PKT_OV (1L<<14) /* Bit 14: Rx Jabb Packet Cnt Ov */ +#define XMR_FIFO_OV (1L<<13) /* Bit 13: Rx FIFO Ov Ev Cnt Ov */ +#define XMR_FRA_ERR_OV (1L<<12) /* Bit 12: Rx Framing Err Cnt Ov */ +#define XMR_FMISS_OV (1L<<11) /* Bit 11: Rx Missed Ev Cnt Ov */ +#define XMR_BURST (1L<<10) /* Bit 10: Rx Burst Event Cnt Ov */ +#define XMR_INV_MOC (1L<<9) /* Bit 9: Rx with inv. MAC OC Ov*/ +#define XMR_INV_MP (1L<<8) /* Bit 8: Rx inv Pause Frame Ov */ +#define XMR_MCTRL_OV (1L<<7) /* Bit 7: Rx MAC Ctrl-F Cnt Ov */ +#define XMR_MPAUSE_OV (1L<<6) /* Bit 6: Rx Pause MAC Ctrl-F Ov*/ +#define XMR_UC_OK_OV (1L<<5) /* Bit 5: Rx Unicast Frame CntOv*/ +#define XMR_MC_OK_OV (1L<<4) /* Bit 4: Rx Multicast Cnt Ov */ +#define XMR_BC_OK_OV (1L<<3) /* Bit 3: Rx Broadcast Cnt Ov */ +#define XMR_OK_LO_OV (1L<<2) /* Bit 2: Octets Rx OK Low CntOv*/ +#define XMR_OK_HI_OV (1L<<1) /* Bit 1: Octets Rx OK Hi Cnt Ov*/ +#define XMR_OK_OV (1L<<0) /* Bit 0: Frames Received Ok Ov */ + +#define XMR_DEF_MSK (XMR_OK_LO_OV | XMR_OK_HI_OV) + +/* XM_TX_CNT_EV 32 bit r/o Tx Counter Event Register */ +/* XM_TX_EV_MSK 32 bit r/w Tx Counter Event Mask */ + /* Bit 31..26: reserved */ +#define XMT_MAX_SZ_OV (1L<<25) /* Bit 25: 1024-MaxSize Tx Cnt Ov*/ +#define XMT_1023B_OV (1L<<24) /* Bit 24: 512-1023Byte Tx Cnt Ov*/ +#define XMT_511B_OV (1L<<23) /* Bit 23: 256-511 Byte Tx Cnt Ov*/ +#define XMT_255B_OV (1L<<22) /* Bit 22: 128-255 Byte Tx Cnt Ov*/ +#define XMT_127B_OV (1L<<21) /* Bit 21: 65-127 Byte Tx Cnt Ov */ +#define XMT_64B_OV (1L<<20) /* Bit 20: 64 Byte Tx Cnt Ov */ +#define XMT_UTIL_OV (1L<<19) /* Bit 19: Tx Util Cnt Overflow */ +#define XMT_UTIL_UR (1L<<18) /* Bit 18: Tx Util Cnt Underrun */ +#define XMT_CS_ERR_OV (1L<<17) /* Bit 17: Tx Carr Sen Err Cnt Ov*/ +#define XMT_FIFO_UR_OV (1L<<16) /* Bit 16: Tx FIFO Ur Ev Cnt Ov */ +#define XMT_EX_DEF_OV (1L<<15) /* Bit 15: Tx Ex Deferall Cnt Ov */ +#define XMT_DEF (1L<<14) /* Bit 14: Tx Deferred Cnt Ov */ +#define XMT_LAT_COL_OV (1L<<13) /* Bit 13: Tx Late Col Cnt Ov */ +#define XMT_ABO_COL_OV (1L<<12) /* Bit 12: Tx abo dueto Ex Col Ov*/ +#define XMT_MUL_COL_OV (1L<<11) /* Bit 11: Tx Mult Col Cnt Ov */ +#define XMT_SNG_COL (1L<<10) /* Bit 10: Tx Single Col Cnt Ov */ +#define XMT_MCTRL_OV (1L<<9) /* Bit 9: Tx MAC Ctrl Counter Ov*/ +#define XMT_MPAUSE (1L<<8) /* Bit 8: Tx Pause MAC Ctrl-F Ov*/ +#define XMT_BURST (1L<<7) /* Bit 7: Tx Burst Event Cnt Ov */ +#define XMT_LONG (1L<<6) /* Bit 6: Tx Long Frame Cnt Ov */ +#define XMT_UC_OK_OV (1L<<5) /* Bit 5: Tx Unicast Cnt Ov */ +#define XMT_MC_OK_OV (1L<<4) /* Bit 4: Tx Multicast Cnt Ov */ +#define XMT_BC_OK_OV (1L<<3) /* Bit 3: Tx Broadcast Cnt Ov */ +#define XMT_OK_LO_OV (1L<<2) /* Bit 2: Octets Tx OK Low CntOv*/ +#define XMT_OK_HI_OV (1L<<1) /* Bit 1: Octets Tx OK Hi Cnt Ov*/ +#define XMT_OK_OV (1L<<0) /* Bit 0: Frames Tx Ok Ov */ + +#define XMT_DEF_MSK (XMT_OK_LO_OV | XMT_OK_HI_OV) + +/* + * Receive Frame Status Encoding + */ +#define XMR_FS_LEN (0x3fffUL<<18) /* Bit 31..18: Rx Frame Length */ +#define XMR_FS_2L_VLAN (1L<<17) /* Bit 17: tagged wh 2Lev VLAN ID*/ +#define XMR_FS_1L_VLAN (1L<<16) /* Bit 16: tagged wh 1Lev VLAN ID*/ +#define XMR_FS_BC (1L<<15) /* Bit 15: Broadcast Frame */ +#define XMR_FS_MC (1L<<14) /* Bit 14: Multicast Frame */ +#define XMR_FS_UC (1L<<13) /* Bit 13: Unicast Frame */ + /* Bit 12: reserved */ +#define XMR_FS_BURST (1L<<11) /* Bit 11: Burst Mode */ +#define XMR_FS_CEX_ERR (1L<<10) /* Bit 10: Carrier Ext. Error */ +#define XMR_FS_802_3 (1L<<9) /* Bit 9: 802.3 Frame */ +#define XMR_FS_COL_ERR (1L<<8) /* Bit 8: Collision Error */ +#define XMR_FS_CAR_ERR (1L<<7) /* Bit 7: Carrier Event Error */ +#define XMR_FS_LEN_ERR (1L<<6) /* Bit 6: In-Range Length Error */ +#define XMR_FS_FRA_ERR (1L<<5) /* Bit 5: Framing Error */ +#define XMR_FS_RUNT (1L<<4) /* Bit 4: Runt Frame */ +#define XMR_FS_LNG_ERR (1L<<3) /* Bit 3: Giant (Jumbo) Frame */ +#define XMR_FS_FCS_ERR (1L<<2) /* Bit 2: Frame Check Sequ Err */ +#define XMR_FS_ERR (1L<<1) /* Bit 1: Frame Error */ +#define XMR_FS_MCTRL (1L<<0) /* Bit 0: MAC Control Packet */ + +/* + * XMR_FS_ERR will be set if + * XMR_FS_FCS_ERR, XMR_FS_LNG_ERR, XMR_FS_RUNT, + * XMR_FS_FRA_ERR, XMR_FS_LEN_ERR, or XMR_FS_CEX_ERR + * is set. XMR_FS_LNG_ERR and XMR_FS_LEN_ERR will issue + * XMR_FS_ERR unless the corresponding bit in the Receive Command + * Register is set. + */ +#define XMR_FS_ANY_ERR XMR_FS_ERR + +/*----------------------------------------------------------------------------*/ +/* + * XMAC-PHY Registers, indirect addressed over the XMAC + */ +#define PHY_XMAC_CTRL 0x00 /* 16 bit r/w PHY Control Register */ +#define PHY_XMAC_STAT 0x01 /* 16 bit r/w PHY Status Register */ +#define PHY_XMAC_ID0 0x02 /* 16 bit r/o PHY ID0 Register */ +#define PHY_XMAC_ID1 0x03 /* 16 bit r/o PHY ID1 Register */ +#define PHY_XMAC_AUNE_ADV 0x04 /* 16 bit r/w Auto-Neg. Advertisement */ +#define PHY_XMAC_AUNE_LP 0x05 /* 16 bit r/o Link Partner Abi Reg */ +#define PHY_XMAC_AUNE_EXP 0x06 /* 16 bit r/o Auto-Neg. Expansion Reg */ +#define PHY_XMAC_NEPG 0x07 /* 16 bit r/w Next Page Register */ +#define PHY_XMAC_NEPG_LP 0x08 /* 16 bit r/o Next Page Link Partner */ + /* 0x09 - 0x0e: reserved */ +#define PHY_XMAC_EXT_STAT 0x0f /* 16 bit r/o Ext Status Register */ +#define PHY_XMAC_RES_ABI 0x10 /* 16 bit r/o PHY Resolved Ability */ + +/*----------------------------------------------------------------------------*/ +/* + * Broadcom-PHY Registers, indirect addressed over XMAC + */ +#define PHY_BCOM_CTRL 0x00 /* 16 bit r/w PHY Control Register */ +#define PHY_BCOM_STAT 0x01 /* 16 bit r/o PHY Status Register */ +#define PHY_BCOM_ID0 0x02 /* 16 bit r/o PHY ID0 Register */ +#define PHY_BCOM_ID1 0x03 /* 16 bit r/o PHY ID1 Register */ +#define PHY_BCOM_AUNE_ADV 0x04 /* 16 bit r/w Auto-Neg. Advertisement */ +#define PHY_BCOM_AUNE_LP 0x05 /* 16 bit r/o Link Part Ability Reg */ +#define PHY_BCOM_AUNE_EXP 0x06 /* 16 bit r/o Auto-Neg. Expansion Reg */ +#define PHY_BCOM_NEPG 0x07 /* 16 bit r/w Next Page Register */ +#define PHY_BCOM_NEPG_LP 0x08 /* 16 bit r/o Next Page Link Partner */ + /* Broadcom-specific registers */ +#define PHY_BCOM_1000T_CTRL 0x09 /* 16 bit r/w 1000Base-T Ctrl Reg */ +#define PHY_BCOM_1000T_STAT 0x0a /* 16 bit r/o 1000Base-T Status Reg */ + /* 0x0b - 0x0e: reserved */ +#define PHY_BCOM_EXT_STAT 0x0f /* 16 bit r/o Extended Status Reg */ +#define PHY_BCOM_P_EXT_CTRL 0x10 /* 16 bit r/w PHY Extended Ctrl Reg */ +#define PHY_BCOM_P_EXT_STAT 0x11 /* 16 bit r/o PHY Extended Stat Reg */ +#define PHY_BCOM_RE_CTR 0x12 /* 16 bit r/w Receive Error Counter */ +#define PHY_BCOM_FC_CTR 0x13 /* 16 bit r/w False Carrier Sense Cnt */ +#define PHY_BCOM_RNO_CTR 0x14 /* 16 bit r/w Receiver NOT_OK Cnt */ + /* 0x15 - 0x17: reserved */ +#define PHY_BCOM_AUX_CTRL 0x18 /* 16 bit r/w Auxiliary Control Reg */ +#define PHY_BCOM_AUX_STAT 0x19 /* 16 bit r/o Auxiliary Stat Summary */ +#define PHY_BCOM_INT_STAT 0x1a /* 16 bit r/o Interrupt Status Reg */ +#define PHY_BCOM_INT_MASK 0x1b /* 16 bit r/w Interrupt Mask Reg */ + /* 0x1c: reserved */ + /* 0x1d - 0x1f: test registers */ + +/*----------------------------------------------------------------------------*/ +/* + * Marvel-PHY Registers, indirect addressed over GMAC + */ +#define PHY_MARV_CTRL 0x00 /* 16 bit r/w PHY Control Register */ +#define PHY_MARV_STAT 0x01 /* 16 bit r/o PHY Status Register */ +#define PHY_MARV_ID0 0x02 /* 16 bit r/o PHY ID0 Register */ +#define PHY_MARV_ID1 0x03 /* 16 bit r/o PHY ID1 Register */ +#define PHY_MARV_AUNE_ADV 0x04 /* 16 bit r/w Auto-Neg. Advertisement */ +#define PHY_MARV_AUNE_LP 0x05 /* 16 bit r/o Link Part Ability Reg */ +#define PHY_MARV_AUNE_EXP 0x06 /* 16 bit r/o Auto-Neg. Expansion Reg */ +#define PHY_MARV_NEPG 0x07 /* 16 bit r/w Next Page Register */ +#define PHY_MARV_NEPG_LP 0x08 /* 16 bit r/o Next Page Link Partner */ + /* Marvel-specific registers */ +#define PHY_MARV_1000T_CTRL 0x09 /* 16 bit r/w 1000Base-T Ctrl Reg */ +#define PHY_MARV_1000T_STAT 0x0a /* 16 bit r/o 1000Base-T Status Reg */ + /* 0x0b - 0x0e: reserved */ +#define PHY_MARV_EXT_STAT 0x0f /* 16 bit r/o Extended Status Reg */ +#define PHY_MARV_PHY_CTRL 0x10 /* 16 bit r/w PHY Specific Ctrl Reg */ +#define PHY_MARV_PHY_STAT 0x11 /* 16 bit r/o PHY Specific Stat Reg */ +#define PHY_MARV_INT_MASK 0x12 /* 16 bit r/w Interrupt Mask Reg */ +#define PHY_MARV_INT_STAT 0x13 /* 16 bit r/o Interrupt Status Reg */ +#define PHY_MARV_EXT_CTRL 0x14 /* 16 bit r/w Ext. PHY Specific Ctrl */ +#define PHY_MARV_RXE_CNT 0x15 /* 16 bit r/w Receive Error Counter */ +#define PHY_MARV_EXT_ADR 0x16 /* 16 bit r/w Ext. Ad. for Cable Diag. */ + /* 0x17: reserved */ +#define PHY_MARV_LED_CTRL 0x18 /* 16 bit r/w LED Control Reg */ +#define PHY_MARV_LED_OVER 0x19 /* 16 bit r/w Manual LED Override Reg */ +#define PHY_MARV_EXT_CTRL_2 0x1a /* 16 bit r/w Ext. PHY Specific Ctrl 2 */ +#define PHY_MARV_EXT_P_STAT 0x1b /* 16 bit r/w Ext. PHY Spec. Stat Reg */ +#define PHY_MARV_CABLE_DIAG 0x1c /* 16 bit r/o Cable Diagnostic Reg */ + /* 0x1d - 0x1f: reserved */ + +/*----------------------------------------------------------------------------*/ +/* + * Level One-PHY Registers, indirect addressed over XMAC + */ +#define PHY_LONE_CTRL 0x00 /* 16 bit r/w PHY Control Register */ +#define PHY_LONE_STAT 0x01 /* 16 bit r/o PHY Status Register */ +#define PHY_LONE_ID0 0x02 /* 16 bit r/o PHY ID0 Register */ +#define PHY_LONE_ID1 0x03 /* 16 bit r/o PHY ID1 Register */ +#define PHY_LONE_AUNE_ADV 0x04 /* 16 bit r/w Auto-Neg. Advertisement */ +#define PHY_LONE_AUNE_LP 0x05 /* 16 bit r/o Link Part Ability Reg */ +#define PHY_LONE_AUNE_EXP 0x06 /* 16 bit r/o Auto-Neg. Expansion Reg */ +#define PHY_LONE_NEPG 0x07 /* 16 bit r/w Next Page Register */ +#define PHY_LONE_NEPG_LP 0x08 /* 16 bit r/o Next Page Link Partner */ + /* Level One-specific registers */ +#define PHY_LONE_1000T_CTRL 0x09 /* 16 bit r/w 1000Base-T Control Reg*/ +#define PHY_LONE_1000T_STAT 0x0a /* 16 bit r/o 1000Base-T Status Reg */ + /* 0x0b -0x0e: reserved */ +#define PHY_LONE_EXT_STAT 0x0f /* 16 bit r/o Extended Status Reg */ +#define PHY_LONE_PORT_CFG 0x10 /* 16 bit r/w Port Configuration Reg*/ +#define PHY_LONE_Q_STAT 0x11 /* 16 bit r/o Quick Status Reg */ +#define PHY_LONE_INT_ENAB 0x12 /* 16 bit r/w Interrupt Enable Reg */ +#define PHY_LONE_INT_STAT 0x13 /* 16 bit r/o Interrupt Status Reg */ +#define PHY_LONE_LED_CFG 0x14 /* 16 bit r/w LED Configuration Reg */ +#define PHY_LONE_PORT_CTRL 0x15 /* 16 bit r/w Port Control Reg */ +#define PHY_LONE_CIM 0x16 /* 16 bit r/o CIM Reg */ + /* 0x17 -0x1c: reserved */ + +/*----------------------------------------------------------------------------*/ +/* + * National-PHY Registers, indirect addressed over XMAC + */ +#define PHY_NAT_CTRL 0x00 /* 16 bit r/w PHY Control Register */ +#define PHY_NAT_STAT 0x01 /* 16 bit r/w PHY Status Register */ +#define PHY_NAT_ID0 0x02 /* 16 bit r/o PHY ID0 Register */ +#define PHY_NAT_ID1 0x03 /* 16 bit r/o PHY ID1 Register */ +#define PHY_NAT_AUNE_ADV 0x04 /* 16 bit r/w Auto-Neg. Advertisement */ +#define PHY_NAT_AUNE_LP 0x05 /* 16 bit r/o Link Partner Ability Reg */ +#define PHY_NAT_AUNE_EXP 0x06 /* 16 bit r/o Auto-Neg. Expansion Reg */ +#define PHY_NAT_NEPG 0x07 /* 16 bit r/w Next Page Register */ +#define PHY_NAT_NEPG_LP 0x08 /* 16 bit r/o Next Page Link Partner Reg */ + /* National-specific registers */ +#define PHY_NAT_1000T_CTRL 0x09 /* 16 bit r/w 1000Base-T Control Reg */ +#define PHY_NAT_1000T_STAT 0x0a /* 16 bit r/o 1000Base-T Status Reg */ + /* 0x0b -0x0e: reserved */ +#define PHY_NAT_EXT_STAT 0x0f /* 16 bit r/o Extended Status Register */ +#define PHY_NAT_EXT_CTRL1 0x10 /* 16 bit r/o Extended Control Reg1 */ +#define PHY_NAT_Q_STAT1 0x11 /* 16 bit r/o Quick Status Reg1 */ +#define PHY_NAT_10B_OP 0x12 /* 16 bit r/o 10Base-T Operations Reg */ +#define PHY_NAT_EXT_CTRL2 0x13 /* 16 bit r/o Extended Control Reg1 */ +#define PHY_NAT_Q_STAT2 0x14 /* 16 bit r/o Quick Status Reg2 */ + /* 0x15 -0x18: reserved */ +#define PHY_NAT_PHY_ADDR 0x19 /* 16 bit r/o PHY Address Register */ + + +/*----------------------------------------------------------------------------*/ + +/* + * PHY bit definitions + * Bits defined as PHY_X_..., PHY_B_..., PHY_L_... or PHY_N_... are + * XMAC/Broadcom/LevelOne/National/Marvell-specific. + * All other are general. + */ + +/***** PHY_XMAC_CTRL 16 bit r/w PHY Control Register *****/ +/***** PHY_BCOM_CTRL 16 bit r/w PHY Control Register *****/ +/***** PHY_MARV_CTRL 16 bit r/w PHY Status Register *****/ +/***** PHY_LONE_CTRL 16 bit r/w PHY Control Register *****/ +#define PHY_CT_RESET (1<<15) /* Bit 15: (sc) clear all PHY related regs */ +#define PHY_CT_LOOP (1<<14) /* Bit 14: enable Loopback over PHY */ +#define PHY_CT_SPS_LSB (1<<13) /* Bit 13: (BC,L1) Speed select, lower bit */ +#define PHY_CT_ANE (1<<12) /* Bit 12: Auto-Negotiation Enabled */ +#define PHY_CT_PDOWN (1<<11) /* Bit 11: (BC,L1) Power Down Mode */ +#define PHY_CT_ISOL (1<<10) /* Bit 10: (BC,L1) Isolate Mode */ +#define PHY_CT_RE_CFG (1<<9) /* Bit 9: (sc) Restart Auto-Negotiation */ +#define PHY_CT_DUP_MD (1<<8) /* Bit 8: Duplex Mode */ +#define PHY_CT_COL_TST (1<<7) /* Bit 7: (BC,L1) Collision Test enabled */ +#define PHY_CT_SPS_MSB (1<<6) /* Bit 6: (BC,L1) Speed select, upper bit */ + /* Bit 5..0: reserved */ + +#define PHY_CT_SP1000 PHY_CT_SPS_MSB /* enable speed of 1000 Mbps */ +#define PHY_CT_SP100 PHY_CT_SPS_LSB /* enable speed of 100 Mbps */ +#define PHY_CT_SP10 (0) /* enable speed of 10 Mbps */ + + +/***** PHY_XMAC_STAT 16 bit r/w PHY Status Register *****/ +/***** PHY_BCOM_STAT 16 bit r/w PHY Status Register *****/ +/***** PHY_MARV_STAT 16 bit r/w PHY Status Register *****/ +/***** PHY_LONE_STAT 16 bit r/w PHY Status Register *****/ + /* Bit 15..9: reserved */ + /* (BC/L1) 100/10 Mbps cap bits ignored*/ +#define PHY_ST_EXT_ST (1<<8) /* Bit 8: Extended Status Present */ + /* Bit 7: reserved */ +#define PHY_ST_PRE_SUP (1<<6) /* Bit 6: (BC/L1) preamble suppression */ +#define PHY_ST_AN_OVER (1<<5) /* Bit 5: Auto-Negotiation Over */ +#define PHY_ST_REM_FLT (1<<4) /* Bit 4: Remote Fault Condition Occured */ +#define PHY_ST_AN_CAP (1<<3) /* Bit 3: Auto-Negotiation Capability */ +#define PHY_ST_LSYNC (1<<2) /* Bit 2: Link Synchronized */ +#define PHY_ST_JAB_DET (1<<1) /* Bit 1: (BC/L1) Jabber Detected */ +#define PHY_ST_EXT_REG (1<<0) /* Bit 0: Extended Register available */ + + +/***** PHY_XMAC_ID1 16 bit r/o PHY ID1 Register */ +/***** PHY_BCOM_ID1 16 bit r/o PHY ID1 Register */ +/***** PHY_MARV_ID1 16 bit r/o PHY ID1 Register */ +/***** PHY_LONE_ID1 16 bit r/o PHY ID1 Register */ +#define PHY_I1_OUI_MSK (0x3f<<10) /* Bit 15..10: Organization Unique ID */ +#define PHY_I1_MOD_NUM (0x3f<<4) /* Bit 9.. 4: Model Number */ +#define PHY_I1_REV_MSK 0x0f /* Bit 3.. 0: Revision Number */ + +/* different Broadcom PHY Ids */ +#define PHY_BCOM_ID1_A1 0x6041 +#define PHY_BCOM_ID1_B2 0x6043 +#define PHY_BCOM_ID1_C0 0x6044 +#define PHY_BCOM_ID1_C5 0x6047 + + +/***** PHY_XMAC_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/ +/***** PHY_XMAC_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/ +#define PHY_AN_NXT_PG (1<<15) /* Bit 15: Request Next Page */ +#define PHY_X_AN_ACK (1<<14) /* Bit 14: (ro) Acknowledge Received */ +#define PHY_X_AN_RFB (3<<12) /* Bit 13..12: Remote Fault Bits */ + /* Bit 11.. 9: reserved */ +#define PHY_X_AN_PAUSE (3<<7) /* Bit 8.. 7: Pause Bits */ +#define PHY_X_AN_HD (1<<6) /* Bit 6: Half Duplex */ +#define PHY_X_AN_FD (1<<5) /* Bit 5: Full Duplex */ + /* Bit 4.. 0: reserved */ + +/***** PHY_BCOM_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/ +/***** PHY_BCOM_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/ +/* PHY_AN_NXT_PG (see XMAC) Bit 15: Request Next Page */ + /* Bit 14: reserved */ +#define PHY_B_AN_RF (1<<13) /* Bit 13: Remote Fault */ + /* Bit 12: reserved */ +#define PHY_B_AN_ASP (1<<11) /* Bit 11: Asymmetric Pause */ +#define PHY_B_AN_PC (1<<10) /* Bit 10: Pause Capable */ + /* Bit 9..5: 100/10 BT cap bits ingnored */ +#define PHY_B_AN_SEL 0x1f /* Bit 4..0: Selector Field, 00001=Ethernet*/ + +/***** PHY_LONE_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/ +/***** PHY_LONE_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/ +/* PHY_AN_NXT_PG (see XMAC) Bit 15: Request Next Page */ + /* Bit 14: reserved */ +#define PHY_L_AN_RF (1<<13) /* Bit 13: Remote Fault */ + /* Bit 12: reserved */ +#define PHY_L_AN_ASP (1<<11) /* Bit 11: Asymmetric Pause */ +#define PHY_L_AN_PC (1<<10) /* Bit 10: Pause Capable */ + /* Bit 9..5: 100/10 BT cap bits ingnored */ +#define PHY_L_AN_SEL 0x1f /* Bit 4..0: Selector Field, 00001=Ethernet*/ + +/***** PHY_NAT_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/ +/***** PHY_NAT_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/ +/* PHY_AN_NXT_PG (see XMAC) Bit 15: Request Next Page */ + /* Bit 14: reserved */ +#define PHY_N_AN_RF (1<<13) /* Bit 13: Remote Fault */ + /* Bit 12: reserved */ +#define PHY_N_AN_100F (1<<11) /* Bit 11: 100Base-T2 FD Support */ +#define PHY_N_AN_100H (1<<10) /* Bit 10: 100Base-T2 HD Support */ + /* Bit 9..5: 100/10 BT cap bits ingnored */ +#define PHY_N_AN_SEL 0x1f /* Bit 4..0: Selector Field, 00001=Ethernet*/ + +/* field type definition for PHY_x_AN_SEL */ +#define PHY_SEL_TYPE 0x01 /* 00001 = Ethernet */ + +/***** PHY_XMAC_AUNE_EXP 16 bit r/o Auto-Negotiation Expansion Reg *****/ + /* Bit 15..4: reserved */ +#define PHY_ANE_LP_NP (1<<3) /* Bit 3: Link Partner can Next Page */ +#define PHY_ANE_LOC_NP (1<<2) /* Bit 2: Local PHY can Next Page */ +#define PHY_ANE_RX_PG (1<<1) /* Bit 1: Page Received */ + /* Bit 0: reserved */ + +/***** PHY_BCOM_AUNE_EXP 16 bit r/o Auto-Negotiation Expansion Reg *****/ +/***** PHY_LONE_AUNE_EXP 16 bit r/o Auto-Negotiation Expansion Reg *****/ +/***** PHY_MARV_AUNE_EXP 16 bit r/o Auto-Negotiation Expansion Reg *****/ + /* Bit 15..5: reserved */ +#define PHY_ANE_PAR_DF (1<<4) /* Bit 4: Parallel Detection Fault */ +/* PHY_ANE_LP_NP (see XMAC) Bit 3: Link Partner can Next Page */ +/* PHY_ANE_LOC_NP (see XMAC) Bit 2: Local PHY can Next Page */ +/* PHY_ANE_RX_PG (see XMAC) Bit 1: Page Received */ +#define PHY_ANE_LP_CAP (1<<0) /* Bit 0: Link Partner Auto-Neg. Cap. */ + +/***** PHY_XMAC_NEPG 16 bit r/w Next Page Register *****/ +/***** PHY_BCOM_NEPG 16 bit r/w Next Page Register *****/ +/***** PHY_LONE_NEPG 16 bit r/w Next Page Register *****/ +/***** PHY_XMAC_NEPG_LP 16 bit r/o Next Page Link Partner *****/ +/***** PHY_BCOM_NEPG_LP 16 bit r/o Next Page Link Partner *****/ +/***** PHY_LONE_NEPG_LP 16 bit r/o Next Page Link Partner *****/ +#define PHY_NP_MORE (1<<15) /* Bit 15: More, Next Pages to follow */ +#define PHY_NP_ACK1 (1<<14) /* Bit 14: (ro) Ack1, for receiving a message */ +#define PHY_NP_MSG_VAL (1<<13) /* Bit 13: Message Page valid */ +#define PHY_NP_ACK2 (1<<12) /* Bit 12: Ack2, comply with msg content */ +#define PHY_NP_TOG (1<<11) /* Bit 11: Toggle Bit, ensure sync */ +#define PHY_NP_MSG 0x07ff /* Bit 10..0: Message from/to Link Partner */ + +/* + * XMAC-Specific + */ +/***** PHY_XMAC_EXT_STAT 16 bit r/w Extended Status Register *****/ +#define PHY_X_EX_FD (1<<15) /* Bit 15: Device Supports Full Duplex */ +#define PHY_X_EX_HD (1<<14) /* Bit 14: Device Supports Half Duplex */ + /* Bit 13..0: reserved */ + +/***** PHY_XMAC_RES_ABI 16 bit r/o PHY Resolved Ability *****/ + /* Bit 15..9: reserved */ +#define PHY_X_RS_PAUSE (3<<7) /* Bit 8..7: selected Pause Mode */ +#define PHY_X_RS_HD (1<<6) /* Bit 6: Half Duplex Mode selected */ +#define PHY_X_RS_FD (1<<5) /* Bit 5: Full Duplex Mode selected */ +#define PHY_X_RS_ABLMIS (1<<4) /* Bit 4: duplex or pause cap mismatch */ +#define PHY_X_RS_PAUMIS (1<<3) /* Bit 3: pause capability mismatch */ + /* Bit 2..0: reserved */ +/* + * Remote Fault Bits (PHY_X_AN_RFB) encoding + */ +#define X_RFB_OK (0<<12) /* Bit 13..12 No errors, Link OK */ +#define X_RFB_LF (1<<12) /* Bit 13..12 Link Failure */ +#define X_RFB_OFF (2<<12) /* Bit 13..12 Offline */ +#define X_RFB_AN_ERR (3<<12) /* Bit 13..12 Auto-Negotiation Error */ + +/* + * Pause Bits (PHY_X_AN_PAUSE and PHY_X_RS_PAUSE) encoding + */ +#define PHY_X_P_NO_PAUSE (0<<7) /* Bit 8..7: no Pause Mode */ +#define PHY_X_P_SYM_MD (1<<7) /* Bit 8..7: symmetric Pause Mode */ +#define PHY_X_P_ASYM_MD (2<<7) /* Bit 8..7: asymmetric Pause Mode */ +#define PHY_X_P_BOTH_MD (3<<7) /* Bit 8..7: both Pause Mode */ + + +/* + * Broadcom-Specific + */ +/***** PHY_BCOM_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ +#define PHY_B_1000C_TEST (7<<13) /* Bit 15..13: Test Modes */ +#define PHY_B_1000C_MSE (1<<12) /* Bit 12: Master/Slave Enable */ +#define PHY_B_1000C_MSC (1<<11) /* Bit 11: M/S Configuration */ +#define PHY_B_1000C_RD (1<<10) /* Bit 10: Repeater/DTE */ +#define PHY_B_1000C_AFD (1<<9) /* Bit 9: Advertise Full Duplex */ +#define PHY_B_1000C_AHD (1<<8) /* Bit 8: Advertise Half Duplex */ + /* Bit 7..0: reserved */ + +/***** PHY_BCOM_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ +/***** PHY_MARV_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ +#define PHY_B_1000S_MSF (1<<15) /* Bit 15: Master/Slave Fault */ +#define PHY_B_1000S_MSR (1<<14) /* Bit 14: Master/Slave Result */ +#define PHY_B_1000S_LRS (1<<13) /* Bit 13: Local Receiver Status */ +#define PHY_B_1000S_RRS (1<<12) /* Bit 12: Remote Receiver Status */ +#define PHY_B_1000S_LP_FD (1<<11) /* Bit 11: Link Partner can FD */ +#define PHY_B_1000S_LP_HD (1<<10) /* Bit 10: Link Partner can HD */ + /* Bit 9..8: reserved */ +#define PHY_B_1000S_IEC 0xff /* Bit 7..0: Idle Error Count */ + +/***** PHY_BCOM_EXT_STAT 16 bit r/o Extended Status Register *****/ +#define PHY_B_ES_X_FD_CAP (1<<15) /* Bit 15: 1000Base-X FD capable */ +#define PHY_B_ES_X_HD_CAP (1<<14) /* Bit 14: 1000Base-X HD capable */ +#define PHY_B_ES_T_FD_CAP (1<<13) /* Bit 13: 1000Base-T FD capable */ +#define PHY_B_ES_T_HD_CAP (1<<12) /* Bit 12: 1000Base-T HD capable */ + /* Bit 11..0: reserved */ + +/***** PHY_BCOM_P_EXT_CTRL 16 bit r/w PHY Extended Control Reg *****/ +#define PHY_B_PEC_MAC_PHY (1<<15) /* Bit 15: 10BIT/GMI-Interface */ +#define PHY_B_PEC_DIS_CROSS (1<<14) /* Bit 14: Disable MDI Crossover */ +#define PHY_B_PEC_TX_DIS (1<<13) /* Bit 13: Tx output Disabled */ +#define PHY_B_PEC_INT_DIS (1<<12) /* Bit 12: Interrupts Disabled */ +#define PHY_B_PEC_F_INT (1<<11) /* Bit 11: Force Interrupt */ +#define PHY_B_PEC_BY_45 (1<<10) /* Bit 10: Bypass 4B5B-Decoder */ +#define PHY_B_PEC_BY_SCR (1<<9) /* Bit 9: Bypass Scrambler */ +#define PHY_B_PEC_BY_MLT3 (1<<8) /* Bit 8: Bypass MLT3 Encoder */ +#define PHY_B_PEC_BY_RXA (1<<7) /* Bit 7: Bypass Rx Alignm. */ +#define PHY_B_PEC_RES_SCR (1<<6) /* Bit 6: Reset Scrambler */ +#define PHY_B_PEC_EN_LTR (1<<5) /* Bit 5: Ena LED Traffic Mode */ +#define PHY_B_PEC_LED_ON (1<<4) /* Bit 4: Force LED's on */ +#define PHY_B_PEC_LED_OFF (1<<3) /* Bit 3: Force LED's off */ +#define PHY_B_PEC_EX_IPG (1<<2) /* Bit 2: Extend Tx IPG Mode */ +#define PHY_B_PEC_3_LED (1<<1) /* Bit 1: Three Link LED mode */ +#define PHY_B_PEC_HIGH_LA (1<<0) /* Bit 0: GMII FIFO Elasticy */ + +/***** PHY_BCOM_P_EXT_STAT 16 bit r/o PHY Extended Status Reg *****/ + /* Bit 15..14: reserved */ +#define PHY_B_PES_CROSS_STAT (1<<13) /* Bit 13: MDI Crossover Status */ +#define PHY_B_PES_INT_STAT (1<<12) /* Bit 12: Interrupt Status */ +#define PHY_B_PES_RRS (1<<11) /* Bit 11: Remote Receiver Stat. */ +#define PHY_B_PES_LRS (1<<10) /* Bit 10: Local Receiver Stat. */ +#define PHY_B_PES_LOCKED (1<<9) /* Bit 9: Locked */ +#define PHY_B_PES_LS (1<<8) /* Bit 8: Link Status */ +#define PHY_B_PES_RF (1<<7) /* Bit 7: Remote Fault */ +#define PHY_B_PES_CE_ER (1<<6) /* Bit 6: Carrier Ext Error */ +#define PHY_B_PES_BAD_SSD (1<<5) /* Bit 5: Bad SSD */ +#define PHY_B_PES_BAD_ESD (1<<4) /* Bit 4: Bad ESD */ +#define PHY_B_PES_RX_ER (1<<3) /* Bit 3: Receive Error */ +#define PHY_B_PES_TX_ER (1<<2) /* Bit 2: Transmit Error */ +#define PHY_B_PES_LOCK_ER (1<<1) /* Bit 1: Lock Error */ +#define PHY_B_PES_MLT3_ER (1<<0) /* Bit 0: MLT3 code Error */ + +/***** PHY_BCOM_FC_CTR 16 bit r/w False Carrier Counter *****/ + /* Bit 15..8: reserved */ +#define PHY_B_FC_CTR 0xff /* Bit 7..0: False Carrier Counter */ + +/***** PHY_BCOM_RNO_CTR 16 bit r/w Receive NOT_OK Counter *****/ +#define PHY_B_RC_LOC_MSK 0xff00 /* Bit 15..8: Local Rx NOT_OK cnt */ +#define PHY_B_RC_REM_MSK 0x00ff /* Bit 7..0: Remote Rx NOT_OK cnt */ + +/***** PHY_BCOM_AUX_CTRL 16 bit r/w Auxiliary Control Reg *****/ +#define PHY_B_AC_L_SQE (1<<15) /* Bit 15: Low Squelch */ +#define PHY_B_AC_LONG_PACK (1<<14) /* Bit 14: Rx Long Packets */ +#define PHY_B_AC_ER_CTRL (3<<12) /* Bit 13..12: Edgerate Control */ + /* Bit 11: reserved */ +#define PHY_B_AC_TX_TST (1<<10) /* Bit 10: Tx test bit, always 1 */ + /* Bit 9.. 8: reserved */ +#define PHY_B_AC_DIS_PRF (1<<7) /* Bit 7: dis part resp filter */ + /* Bit 6: reserved */ +#define PHY_B_AC_DIS_PM (1<<5) /* Bit 5: dis power management */ + /* Bit 4: reserved */ +#define PHY_B_AC_DIAG (1<<3) /* Bit 3: Diagnostic Mode */ + /* Bit 2.. 0: reserved */ + +/***** PHY_BCOM_AUX_STAT 16 bit r/o Auxiliary Status Reg *****/ +#define PHY_B_AS_AN_C (1<<15) /* Bit 15: AutoNeg complete */ +#define PHY_B_AS_AN_CA (1<<14) /* Bit 14: AN Complete Ack */ +#define PHY_B_AS_ANACK_D (1<<13) /* Bit 13: AN Ack Detect */ +#define PHY_B_AS_ANAB_D (1<<12) /* Bit 12: AN Ability Detect */ +#define PHY_B_AS_NPW (1<<11) /* Bit 11: AN Next Page Wait */ +#define PHY_B_AS_AN_RES_MSK (7<<8) /* Bit 10..8: AN HDC */ +#define PHY_B_AS_PDF (1<<7) /* Bit 7: Parallel Detect. Fault */ +#define PHY_B_AS_RF (1<<6) /* Bit 6: Remote Fault */ +#define PHY_B_AS_ANP_R (1<<5) /* Bit 5: AN Page Received */ +#define PHY_B_AS_LP_ANAB (1<<4) /* Bit 4: LP AN Ability */ +#define PHY_B_AS_LP_NPAB (1<<3) /* Bit 3: LP Next Page Ability */ +#define PHY_B_AS_LS (1<<2) /* Bit 2: Link Status */ +#define PHY_B_AS_PRR (1<<1) /* Bit 1: Pause Resolution-Rx */ +#define PHY_B_AS_PRT (1<<0) /* Bit 0: Pause Resolution-Tx */ + +#define PHY_B_AS_PAUSE_MSK (PHY_B_AS_PRR | PHY_B_AS_PRT) + +/***** PHY_BCOM_INT_STAT 16 bit r/o Interrupt Status Reg *****/ +/***** PHY_BCOM_INT_MASK 16 bit r/w Interrupt Mask Reg *****/ + /* Bit 15: reserved */ +#define PHY_B_IS_PSE (1<<14) /* Bit 14: Pair Swap Error */ +#define PHY_B_IS_MDXI_SC (1<<13) /* Bit 13: MDIX Status Change */ +#define PHY_B_IS_HCT (1<<12) /* Bit 12: counter above 32k */ +#define PHY_B_IS_LCT (1<<11) /* Bit 11: counter above 128 */ +#define PHY_B_IS_AN_PR (1<<10) /* Bit 10: Page Received */ +#define PHY_B_IS_NO_HDCL (1<<9) /* Bit 9: No HCD Link */ +#define PHY_B_IS_NO_HDC (1<<8) /* Bit 8: No HCD */ +#define PHY_B_IS_NEG_USHDC (1<<7) /* Bit 7: Negotiated Unsup. HCD */ +#define PHY_B_IS_SCR_S_ER (1<<6) /* Bit 6: Scrambler Sync Error */ +#define PHY_B_IS_RRS_CHANGE (1<<5) /* Bit 5: Remote Rx Stat Change */ +#define PHY_B_IS_LRS_CHANGE (1<<4) /* Bit 4: Local Rx Stat Change */ +#define PHY_B_IS_DUP_CHANGE (1<<3) /* Bit 3: Duplex Mode Change */ +#define PHY_B_IS_LSP_CHANGE (1<<2) /* Bit 2: Link Speed Change */ +#define PHY_B_IS_LST_CHANGE (1<<1) /* Bit 1: Link Status Changed */ +#define PHY_B_IS_CRC_ER (1<<0) /* Bit 0: CRC Error */ + +#define PHY_B_DEF_MSK (~(PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) + +/* Pause Bits (PHY_B_AN_ASP and PHY_B_AN_PC) encoding */ +#define PHY_B_P_NO_PAUSE (0<<10) /* Bit 11..10: no Pause Mode */ +#define PHY_B_P_SYM_MD (1<<10) /* Bit 11..10: symmetric Pause Mode */ +#define PHY_B_P_ASYM_MD (2<<10) /* Bit 11..10: asymmetric Pause Mode */ +#define PHY_B_P_BOTH_MD (3<<10) /* Bit 11..10: both Pause Mode */ + +/* + * Resolved Duplex mode and Capabilities (Aux Status Summary Reg) + */ +#define PHY_B_RES_1000FD (7<<8) /* Bit 10..8: 1000Base-T Full Dup. */ +#define PHY_B_RES_1000HD (6<<8) /* Bit 10..8: 1000Base-T Half Dup. */ +/* others: 100/10: invalid for us */ + +/* + * Level One-Specific + */ +/***** PHY_LONE_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ +#define PHY_L_1000C_TEST (7<<13) /* Bit 15..13: Test Modes */ +#define PHY_L_1000C_MSE (1<<12) /* Bit 12: Master/Slave Enable */ +#define PHY_L_1000C_MSC (1<<11) /* Bit 11: M/S Configuration */ +#define PHY_L_1000C_RD (1<<10) /* Bit 10: Repeater/DTE */ +#define PHY_L_1000C_AFD (1<<9) /* Bit 9: Advertise Full Duplex */ +#define PHY_L_1000C_AHD (1<<8) /* Bit 8: Advertise Half Duplex */ + /* Bit 7..0: reserved */ + +/***** PHY_LONE_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ +#define PHY_L_1000S_MSF (1<<15) /* Bit 15: Master/Slave Fault */ +#define PHY_L_1000S_MSR (1<<14) /* Bit 14: Master/Slave Result */ +#define PHY_L_1000S_LRS (1<<13) /* Bit 13: Local Receiver Status */ +#define PHY_L_1000S_RRS (1<<12) /* Bit 12: Remote Receiver Status */ +#define PHY_L_1000S_LP_FD (1<<11) /* Bit 11: Link Partner can FD */ +#define PHY_L_1000S_LP_HD (1<<10) /* Bit 10: Link Partner can HD */ + /* Bit 9..8: reserved */ +#define PHY_B_1000S_IEC 0xff /* Bit 7..0: Idle Error Count */ + +/***** PHY_LONE_EXT_STAT 16 bit r/o Extended Status Register *****/ +#define PHY_L_ES_X_FD_CAP (1<<15) /* Bit 15: 1000Base-X FD capable */ +#define PHY_L_ES_X_HD_CAP (1<<14) /* Bit 14: 1000Base-X HD capable */ +#define PHY_L_ES_T_FD_CAP (1<<13) /* Bit 13: 1000Base-T FD capable */ +#define PHY_L_ES_T_HD_CAP (1<<12) /* Bit 12: 1000Base-T HD capable */ + /* Bit 11..0: reserved */ + +/***** PHY_LONE_PORT_CFG 16 bit r/w Port Configuration Reg *****/ +#define PHY_L_PC_REP_MODE (1<<15) /* Bit 15: Repeater Mode */ + /* Bit 14: reserved */ +#define PHY_L_PC_TX_DIS (1<<13) /* Bit 13: Tx output Disabled */ +#define PHY_L_PC_BY_SCR (1<<12) /* Bit 12: Bypass Scrambler */ +#define PHY_L_PC_BY_45 (1<<11) /* Bit 11: Bypass 4B5B-Decoder */ +#define PHY_L_PC_JAB_DIS (1<<10) /* Bit 10: Jabber Disabled */ +#define PHY_L_PC_SQE (1<<9) /* Bit 9: Enable Heartbeat */ +#define PHY_L_PC_TP_LOOP (1<<8) /* Bit 8: TP Loopback */ +#define PHY_L_PC_SSS (1<<7) /* Bit 7: Smart Speed Selection */ +#define PHY_L_PC_FIFO_SIZE (1<<6) /* Bit 6: FIFO Size */ +#define PHY_L_PC_PRE_EN (1<<5) /* Bit 5: Preamble Enable */ +#define PHY_L_PC_CIM (1<<4) /* Bit 4: Carrier Integrity Mon */ +#define PHY_L_PC_10_SER (1<<3) /* Bit 3: Use Serial Output */ +#define PHY_L_PC_ANISOL (1<<2) /* Bit 2: Unisolate Port */ +#define PHY_L_PC_TEN_BIT (1<<1) /* Bit 1: 10bit iface mode on */ +#define PHY_L_PC_ALTCLOCK (1<<0) /* Bit 0: (ro) ALTCLOCK Mode on */ + +/***** PHY_LONE_Q_STAT 16 bit r/o Quick Status Reg *****/ +#define PHY_L_QS_D_RATE (3<<14) /* Bit 15..14: Data Rate */ +#define PHY_L_QS_TX_STAT (1<<13) /* Bit 13: Transmitting */ +#define PHY_L_QS_RX_STAT (1<<12) /* Bit 12: Receiving */ +#define PHY_L_QS_COL_STAT (1<<11) /* Bit 11: Collision */ +#define PHY_L_QS_L_STAT (1<<10) /* Bit 10: Link is up */ +#define PHY_L_QS_DUP_MOD (1<<9) /* Bit 9: Full/Half Duplex */ +#define PHY_L_QS_AN (1<<8) /* Bit 8: AutoNeg is On */ +#define PHY_L_QS_AN_C (1<<7) /* Bit 7: AN is Complete */ +#define PHY_L_QS_LLE (7<<4) /* Bit 6: Line Length Estim. */ +#define PHY_L_QS_PAUSE (1<<3) /* Bit 3: LP advertised Pause */ +#define PHY_L_QS_AS_PAUSE (1<<2) /* Bit 2: LP adv. asym. Pause */ +#define PHY_L_QS_ISOLATE (1<<1) /* Bit 1: CIM Isolated */ +#define PHY_L_QS_EVENT (1<<0) /* Bit 0: Event has occurred */ + +/***** PHY_LONE_INT_ENAB 16 bit r/w Interrupt Enable Reg *****/ +/***** PHY_LONE_INT_STAT 16 bit r/o Interrupt Status Reg *****/ + /* Bit 15..14: reserved */ +#define PHY_L_IS_AN_F (1<<13) /* Bit 13: Auto-Negotiation fault */ + /* Bit 12: not described */ +#define PHY_L_IS_CROSS (1<<11) /* Bit 11: Crossover used */ +#define PHY_L_IS_POL (1<<10) /* Bit 10: Polarity correct. used */ +#define PHY_L_IS_SS (1<<9) /* Bit 9: Smart Speed Downgrade */ +#define PHY_L_IS_CFULL (1<<8) /* Bit 8: Counter Full */ +#define PHY_L_IS_AN_C (1<<7) /* Bit 7: AutoNeg Complete */ +#define PHY_L_IS_SPEED (1<<6) /* Bit 6: Speed Changed */ +#define PHY_L_IS_DUP (1<<5) /* Bit 5: Duplex Changed */ +#define PHY_L_IS_LS (1<<4) /* Bit 4: Link Status Changed */ +#define PHY_L_IS_ISOL (1<<3) /* Bit 3: Isolate Occured */ +#define PHY_L_IS_MDINT (1<<2) /* Bit 2: (ro) STAT: MII Int Pending */ +#define PHY_L_IS_INTEN (1<<1) /* Bit 1: ENAB: Enable IRQs */ +#define PHY_L_IS_FORCE (1<<0) /* Bit 0: ENAB: Force Interrupt */ + +/* int. mask */ +#define PHY_L_DEF_MSK (PHY_L_IS_LS | PHY_L_IS_ISOL | PHY_L_IS_INTEN) + +/***** PHY_LONE_LED_CFG 16 bit r/w LED Configuration Reg *****/ +#define PHY_L_LC_LEDC (3<<14) /* Bit 15..14: Col/Blink/On/Off */ +#define PHY_L_LC_LEDR (3<<12) /* Bit 13..12: Rx/Blink/On/Off */ +#define PHY_L_LC_LEDT (3<<10) /* Bit 11..10: Tx/Blink/On/Off */ +#define PHY_L_LC_LEDG (3<<8) /* Bit 9..8: Giga/Blink/On/Off */ +#define PHY_L_LC_LEDS (3<<6) /* Bit 7..6: 10-100/Blink/On/Off */ +#define PHY_L_LC_LEDL (3<<4) /* Bit 5..4: Link/Blink/On/Off */ +#define PHY_L_LC_LEDF (3<<2) /* Bit 3..2: Duplex/Blink/On/Off */ +#define PHY_L_LC_PSTRECH (1<<1) /* Bit 1: Strech LED Pulses */ +#define PHY_L_LC_FREQ (1<<0) /* Bit 0: 30/100 ms */ + +/***** PHY_LONE_PORT_CTRL 16 bit r/w Port Control Reg *****/ +#define PHY_L_PC_TX_TCLK (1<<15) /* Bit 15: Enable TX_TCLK */ + /* Bit 14: reserved */ +#define PHY_L_PC_ALT_NP (1<<13) /* Bit 14: Alternate Next Page */ +#define PHY_L_PC_GMII_ALT (1<<12) /* Bit 13: Alternate GMII driver */ + /* Bit 11: reserved */ +#define PHY_L_PC_TEN_CRS (1<<10) /* Bit 10: Extend CRS*/ + /* Bit 9..0: not described */ + +/***** PHY_LONE_CIM 16 bit r/o CIM Reg *****/ +#define PHY_L_CIM_ISOL (255<<8)/* Bit 15..8: Isolate Count */ +#define PHY_L_CIM_FALSE_CAR (255<<0)/* Bit 7..0: False Carrier Count */ + + +/* + * Pause Bits (PHY_L_AN_ASP and PHY_L_AN_PC) encoding + */ +#define PHY_L_P_NO_PAUSE (0<<10) /* Bit 11..10: no Pause Mode */ +#define PHY_L_P_SYM_MD (1<<10) /* Bit 11..10: symmetric Pause Mode */ +#define PHY_L_P_ASYM_MD (2<<10) /* Bit 11..10: asymmetric Pause Mode */ +#define PHY_L_P_BOTH_MD (3<<10) /* Bit 11..10: both Pause Mode */ + + +/* + * National-Specific + */ +/***** PHY_NAT_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ +#define PHY_N_1000C_TEST (7<<13) /* Bit 15..13: Test Modes */ +#define PHY_N_1000C_MSE (1<<12) /* Bit 12: Master/Slave Enable */ +#define PHY_N_1000C_MSC (1<<11) /* Bit 11: M/S Configuration */ +#define PHY_N_1000C_RD (1<<10) /* Bit 10: Repeater/DTE */ +#define PHY_N_1000C_AFD (1<<9) /* Bit 9: Advertise Full Duplex */ +#define PHY_N_1000C_AHD (1<<8) /* Bit 8: Advertise Half Duplex */ +#define PHY_N_1000C_APC (1<<7) /* Bit 7: Asymmetric Pause Cap. */ + /* Bit 6..0: reserved */ + +/***** PHY_NAT_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ +#define PHY_N_1000S_MSF (1<<15) /* Bit 15: Master/Slave Fault */ +#define PHY_N_1000S_MSR (1<<14) /* Bit 14: Master/Slave Result */ +#define PHY_N_1000S_LRS (1<<13) /* Bit 13: Local Receiver Status */ +#define PHY_N_1000S_RRS (1<<12) /* Bit 12: Remote Receiver Status*/ +#define PHY_N_1000S_LP_FD (1<<11) /* Bit 11: Link Partner can FD */ +#define PHY_N_1000S_LP_HD (1<<10) /* Bit 10: Link Partner can HD */ +#define PHY_N_1000C_LP_APC (1<<9) /* Bit 9: LP Asym. Pause Cap. */ + /* Bit 8: reserved */ +#define PHY_N_1000S_IEC 0xff /* Bit 7..0: Idle Error Count */ + +/***** PHY_NAT_EXT_STAT 16 bit r/o Extended Status Register *****/ +#define PHY_N_ES_X_FD_CAP (1<<15) /* Bit 15: 1000Base-X FD capable */ +#define PHY_N_ES_X_HD_CAP (1<<14) /* Bit 14: 1000Base-X HD capable */ +#define PHY_N_ES_T_FD_CAP (1<<13) /* Bit 13: 1000Base-T FD capable */ +#define PHY_N_ES_T_HD_CAP (1<<12) /* Bit 12: 1000Base-T HD capable */ + /* Bit 11..0: reserved */ + +/* todo: those are still missing */ +/***** PHY_NAT_EXT_CTRL1 16 bit r/o Extended Control Reg1 *****/ +/***** PHY_NAT_Q_STAT1 16 bit r/o Quick Status Reg1 *****/ +/***** PHY_NAT_10B_OP 16 bit r/o 10Base-T Operations Reg *****/ +/***** PHY_NAT_EXT_CTRL2 16 bit r/o Extended Control Reg1 *****/ +/***** PHY_NAT_Q_STAT2 16 bit r/o Quick Status Reg2 *****/ +/***** PHY_NAT_PHY_ADDR 16 bit r/o PHY Address Register *****/ + +/* + * Marvell-Specific + */ +/***** PHY_MARV_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/ +/***** PHY_MARV_AUNE_LP 16 bit r/w Link Part Ability Reg *****/ +#define PHY_M_AN_NXT_PG BIT_15 /* Request Next Page */ +#define PHY_M_AN_ACK BIT_14 /* (ro) Acknowledge Received */ +#define PHY_M_AN_RF BIT_13 /* Remote Fault */ + /* Bit 12: reserved */ +#define PHY_M_AN_ASP BIT_11 /* Asymmetric Pause */ +#define PHY_M_AN_PC BIT_10 /* MAC Pause implemented */ +#define PHY_M_AN_100_FD BIT_8 /* Advertise 100Base-TX Full Duplex */ +#define PHY_M_AN_100_HD BIT_7 /* Advertise 100Base-TX Half Duplex */ +#define PHY_M_AN_10_FD BIT_6 /* Advertise 10Base-TX Full Duplex */ +#define PHY_M_AN_10_HD BIT_5 /* Advertise 10Base-TX Half Duplex */ + +/* special defines for FIBER (88E1011S only) */ +#define PHY_M_AN_ASP_X BIT_8 /* Asymmetric Pause */ +#define PHY_M_AN_PC_X BIT_7 /* MAC Pause implemented */ +#define PHY_M_AN_1000X_AHD BIT_6 /* Advertise 10000Base-X Half Duplex */ +#define PHY_M_AN_1000X_AFD BIT_5 /* Advertise 10000Base-X Full Duplex */ + +/* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */ +#define PHY_M_P_NO_PAUSE_X (0<<7) /* Bit 8.. 7: no Pause Mode */ +#define PHY_M_P_SYM_MD_X (1<<7) /* Bit 8.. 7: symmetric Pause Mode */ +#define PHY_M_P_ASYM_MD_X (2<<7) /* Bit 8.. 7: asymmetric Pause Mode */ +#define PHY_M_P_BOTH_MD_X (3<<7) /* Bit 8.. 7: both Pause Mode */ + +/***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ +#define PHY_M_1000C_TEST (7<<13) /* Bit 15..13: Test Modes */ +#define PHY_M_1000C_MSE (1<<12) /* Bit 12: Manual Master/Slave Enable */ +#define PHY_M_1000C_MSC (1<<11) /* Bit 11: M/S Configuration (1=Master) */ +#define PHY_M_1000C_MPD (1<<10) /* Bit 10: Multi-Port Device */ +#define PHY_M_1000C_AFD (1<<9) /* Bit 9: Advertise Full Duplex */ +#define PHY_M_1000C_AHD (1<<8) /* Bit 8: Advertise Half Duplex */ + /* Bit 7..0: reserved */ + +/***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/ +#define PHY_M_PC_TX_FFD_MSK (3<<14) /* Bit 15..14: Tx FIFO Depth Mask */ +#define PHY_M_PC_RX_FFD_MSK (3<<12) /* Bit 13..12: Rx FIFO Depth Mask */ +#define PHY_M_PC_ASS_CRS_TX (1<<11) /* Bit 11: Assert CRS on Transmit */ +#define PHY_M_PC_FL_GOOD (1<<10) /* Bit 10: Force Link Good */ +#define PHY_M_PC_EN_DET_MSK (3<<8) /* Bit 9.. 8: Energy Detect Mask */ +#define PHY_M_PC_ENA_EXT_D (1<<7) /* Bit 7: Enable Ext. Distance (10BT) */ +#define PHY_M_PC_MDIX_MSK (3<<5) /* Bit 6.. 5: MDI/MDIX Config. Mask */ +#define PHY_M_PC_DIS_125CLK (1<<4) /* Bit 4: Disable 125 CLK */ +#define PHY_M_PC_MAC_POW_UP (1<<3) /* Bit 3: MAC Power up */ +#define PHY_M_PC_SQE_T_ENA (1<<2) /* Bit 2: SQE Test Enabled */ +#define PHY_M_PC_POL_R_DIS (1<<1) /* Bit 1: Polarity Reversal Disabled */ +#define PHY_M_PC_DIS_JABBER (1<<0) /* Bit 0: Disable Jabber */ + +#define PHY_M_PC_EN_DET SHIFT8(2) /* Energy Detect (Mode 1) */ +#define PHY_M_PC_EN_DET_PLUS SHIFT8(3) /* Energy Detect Plus (Mode 2) */ + +#define PHY_M_PC_MDI_XMODE(x) SHIFT5(x) +#define PHY_M_PC_MAN_MDI 0 /* 00 = Manual MDI configuration */ +#define PHY_M_PC_MAN_MDIX 1 /* 01 = Manual MDIX configuration */ +#define PHY_M_PC_ENA_AUTO 3 /* 11 = Enable Automatic Crossover */ + +/***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/ +#define PHY_M_PS_SPEED_MSK (3<<14) /* Bit 15..14: Speed Mask */ +#define PHY_M_PS_SPEED_1000 (1<<15) /* 10 = 1000 Mbps */ +#define PHY_M_PS_SPEED_100 (1<<14) /* 01 = 100 Mbps */ +#define PHY_M_PS_SPEED_10 0 /* 00 = 10 Mbps */ +#define PHY_M_PS_FULL_DUP (1<<13) /* Bit 13: Full Duplex */ +#define PHY_M_PS_PAGE_REC (1<<12) /* Bit 12: Page Received */ +#define PHY_M_PS_SPDUP_RES (1<<11) /* Bit 11: Speed & Duplex Resolved */ +#define PHY_M_PS_LINK_UP (1<<10) /* Bit 10: Link Up */ +#define PHY_M_PS_CABLE_MSK (3<<7) /* Bit 9.. 7: Cable Length Mask */ +#define PHY_M_PS_MDI_X_STAT (1<<6) /* Bit 6: MDI Crossover Stat (1=MDIX) */ +#define PHY_M_PS_DOWNS_STAT (1<<5) /* Bit 5: Downshift Status (1=downsh.) */ +#define PHY_M_PS_ENDET_STAT (1<<4) /* Bit 4: Energy Detect Status (1=act) */ +#define PHY_M_PS_TX_P_EN (1<<3) /* Bit 3: Tx Pause Enabled */ +#define PHY_M_PS_RX_P_EN (1<<2) /* Bit 2: Rx Pause Enabled */ +#define PHY_M_PS_POL_REV (1<<1) /* Bit 1: Polarity Reversed */ +#define PHY_M_PC_JABBER (1<<0) /* Bit 0: Jabber */ + +#define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN) + +/***** PHY_MARV_INT_MASK 16 bit r/w Interrupt Mask Reg *****/ +/***** PHY_MARV_INT_STAT 16 bit r/o Interrupt Status Reg *****/ +#define PHY_M_IS_AN_ERROR (1<<15) /* Bit 15: Auto-Negotiation Error */ +#define PHY_M_IS_LSP_CHANGE (1<<14) /* Bit 14: Link Speed Changed */ +#define PHY_M_IS_DUP_CHANGE (1<<13) /* Bit 13: Duplex Mode Changed */ +#define PHY_M_IS_AN_PR (1<<12) /* Bit 12: Page Received */ +#define PHY_M_IS_AN_COMPL (1<<11) /* Bit 11: Auto-Negotiation Completed */ +#define PHY_M_IS_LST_CHANGE (1<<10) /* Bit 10: Link Status Changed */ +#define PHY_M_IS_SYMB_ERROR (1<<9) /* Bit 9: Symbol Error */ +#define PHY_M_IS_FALSE_CARR (1<<8) /* Bit 8: False Carrier */ +#define PHY_M_IS_FIFO_ERROR (1<<7) /* Bit 7: FIFO Overflow/Underrun Error */ +#define PHY_M_IS_MDI_CHANGE (1<<6) /* Bit 6: MDI Crossover Changed */ +#define PHY_M_IS_DOWNSH_DET (1<<5) /* Bit 5: Downshift Detected */ +#define PHY_M_IS_END_CHANGE (1<<4) /* Bit 4: Energy Detect Changed */ + /* Bit 3..2: reserved */ +#define PHY_M_IS_POL_CHANGE (1<<1) /* Bit 1: Polarity Changed */ +#define PHY_M_IS_JABBER (1<<0) /* Bit 0: Jabber */ + +#define PHY_M_DEF_MSK (PHY_M_IS_AN_ERROR | PHY_M_IS_AN_PR | \ + PHY_M_IS_LST_CHANGE | PHY_M_IS_FIFO_ERROR) + +/***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/ +#define PHY_M_EC_M_DSC_MSK (3<<10) /* Bit 11..10: Master downshift counter */ +#define PHY_M_EC_S_DSC_MSK (3<<8) /* Bit 9.. 8: Slave downshift counter */ +#define PHY_M_EC_MAC_S_MSK (7<<4) /* Bit 6.. 4: Def. MAC interface speed */ +#define PHY_M_EC_FIB_AN_ENA (1<<3) /* Bit 3: Fiber Auto-Neg. Enable */ + +#define PHY_M_EC_M_DSC(x) SHIFT10(x) /* 00=1x; 01=2x; 10=3x; 11=4x */ +#define PHY_M_EC_S_DSC(x) SHIFT8(x) /* 00=dis; 01=1x; 10=2x; 11=3x */ +#define PHY_M_EC_MAC_S(x) SHIFT4(x) /* 01X=0; 110=2.5; 111=25 (MHz) */ + +#define MAC_TX_CLK_0_MHZ 2 +#define MAC_TX_CLK_2_5_MHZ 6 +#define MAC_TX_CLK_25_MHZ 7 + +/***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/ +#define PHY_M_LEDC_DIS_LED (1<<15) /* Bit 15: Disable LED */ +#define PHY_M_LEDC_PULS_MSK (7<<12) /* Bit 14..12: Pulse Stretch Mask */ +#define PHY_M_LEDC_F_INT (1<<11) /* Bit 11: Force Interrupt */ +#define PHY_M_LEDC_BL_R_MSK (7<<8) /* Bit 10.. 8: Blink Rate Mask */ + /* Bit 7.. 5: reserved */ +#define PHY_M_LEDC_LINK_MSK (3<<3) /* Bit 4.. 3: Link Control Mask */ +#define PHY_M_LEDC_DP_CTRL (1<<2) /* Bit 2: Duplex Control */ +#define PHY_M_LEDC_RX_CTRL (1<<1) /* Bit 1: Rx activity / Link */ +#define PHY_M_LEDC_TX_CTRL (1<<0) /* Bit 0: Tx activity / Link */ + +#define PHY_M_LED_PULS_DUR(x) SHIFT12(x) /* Pulse Stretch Duration */ + +#define PULS_NO_STR 0 /* no pulse stretching */ +#define PULS_21MS 1 /* 21 ms to 42 ms */ +#define PULS_42MS 2 /* 42 ms to 84 ms */ +#define PULS_84MS 3 /* 84 ms to 170 ms */ +#define PULS_170MS 4 /* 170 ms to 340 ms */ +#define PULS_340MS 5 /* 340 ms to 670 ms */ +#define PULS_670MS 6 /* 670 ms to 1.3 s */ +#define PULS_1300MS 7 /* 1.3 s to 2.7 s */ + +#define PHY_M_LED_BLINK_RT(x) SHIFT8(x) /* Blink Rate */ + +#define BLINK_42MS 0 /* 42 ms */ +#define BLINK_84MS 1 /* 84 ms */ +#define BLINK_170MS 2 /* 170 ms */ +#define BLINK_340MS 3 /* 340 ms */ +#define BLINK_670MS 4 /* 670 ms */ + /* values 5 - 7: reserved */ + +/***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/ +#define PHY_M_LED_MO_DUP(x) SHIFT10(x) /* Bit 11..10: Duplex */ +#define PHY_M_LED_MO_10(x) SHIFT8(x) /* Bit 9.. 8: Link 10 */ +#define PHY_M_LED_MO_100(x) SHIFT6(x) /* Bit 7.. 6: Link 100 */ +#define PHY_M_LED_MO_1000(x) SHIFT4(x) /* Bit 5.. 4: Link 1000 */ +#define PHY_M_LED_MO_RX(x) SHIFT2(x) /* Bit 3.. 2: Rx */ +#define PHY_M_LED_MO_TX(x) SHIFT0(x) /* Bit 1.. 0: Tx */ + +#define MO_LED_NORM 0 +#define MO_LED_BLINK 1 +#define MO_LED_OFF 2 +#define MO_LED_ON 3 + +/***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/ + /* Bit 15.. 7: reserved */ +#define PHY_M_EC2_FI_IMPED (1<<6) /* Bit 6: Fiber Input Impedance */ +#define PHY_M_EC2_FO_IMPED (1<<5) /* Bit 5: Fiber Output Impedance */ +#define PHY_M_EC2_FO_M_CLK (1<<4) /* Bit 4: Fiber Mode Clock Enable */ +#define PHY_M_EC2_FO_BOOST (1<<3) /* Bit 3: Fiber Output Boost */ +#define PHY_M_EC2_FO_AM_MSK 7 /* Bit 2.. 0: Fiber Output Amplitude */ + +/***** PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/ +#define PHY_M_FC_AUTO_SEL (1<<15) /* Bit 15: Fiber/Copper Auto Sel. dis. */ +#define PHY_M_FC_AN_REG_ACC (1<<14) /* Bit 14: Fiber/Copper Autoneg. reg acc */ +#define PHY_M_FC_RESULUTION (1<<13) /* Bit 13: Fiber/Copper Resulution */ +#define PHY_M_SER_IF_AN_BP (1<<12) /* Bit 12: Ser IF autoneg. bypass enable */ +#define PHY_M_SER_IF_BP_ST (1<<11) /* Bit 11: Ser IF autoneg. bypass status */ +#define PHY_M_IRQ_POLARITY (1<<10) /* Bit 10: IRQ polarity */ + /* Bit 9..4: reserved */ +#define PHY_M_UNDOC1 (1<< 7) /* undocumented bit !! */ +#define PHY_M_MODE_MASK (0xf<<0)/* Bit 3..0: copy of HWCFG MODE[3:0] */ + + +/***** PHY_MARV_CABLE_DIAG 16 bit r/o Cable Diagnostic Reg *****/ +#define PHY_M_CABD_ENA_TEST (1<<15) /* Bit 15: Enable Test */ +#define PHY_M_CABD_STAT_MSK (3<<13) /* Bit 14..13: Status */ + /* Bit 12.. 8: reserved */ +#define PHY_M_CABD_DIST_MSK 0xff /* Bit 7.. 0: Distance */ + +/* values for Cable Diagnostic Status (11=fail; 00=OK; 10=open; 01=short) */ +#define CABD_STAT_NORMAL 0 +#define CABD_STAT_SHORT 1 +#define CABD_STAT_OPEN 2 +#define CABD_STAT_FAIL 3 + + +/* + * GMAC registers + * + * The GMAC registers are 16 or 32 bits wide. + * The GMACs host processor interface is 16 bits wide, + * therefore ALL registers will be addressed with 16 bit accesses. + * + * The following macros are provided to access the GMAC registers + * GM_IN16(), GM_OUT16, GM_IN32(), GM_OUT32(), GM_INADR(), GM_OUTADR(), + * GM_INHASH(), and GM_OUTHASH(). + * The macros are defined in SkGeHw.h. + * + * Note: NA reg = Network Address e.g DA, SA etc. + * + */ + +/* Port Registers */ +#define GM_GP_STAT 0x0000 /* 16 bit r/o General Purpose Status */ +#define GM_GP_CTRL 0x0004 /* 16 bit r/w General Purpose Control */ +#define GM_TX_CTRL 0x0008 /* 16 bit r/w Transmit Control Reg. */ +#define GM_RX_CTRL 0x000c /* 16 bit r/w Receive Control Reg. */ +#define GM_TX_FLOW_CTRL 0x0010 /* 16 bit r/w Transmit Flow-Control */ +#define GM_TX_PARAM 0x0014 /* 16 bit r/w Transmit Parameter Reg. */ +#define GM_SERIAL_MODE 0x0018 /* 16 bit r/w Serial Mode Register */ + +/* Source Address Registers */ +#define GM_SRC_ADDR_1L 0x001c /* 16 bit r/w Source Address 1 (low) */ +#define GM_SRC_ADDR_1M 0x0020 /* 16 bit r/w Source Address 1 (middle) */ +#define GM_SRC_ADDR_1H 0x0024 /* 16 bit r/w Source Address 1 (high) */ +#define GM_SRC_ADDR_2L 0x0028 /* 16 bit r/w Source Address 2 (low) */ +#define GM_SRC_ADDR_2M 0x002c /* 16 bit r/w Source Address 2 (middle) */ +#define GM_SRC_ADDR_2H 0x0030 /* 16 bit r/w Source Address 2 (high) */ + +/* Multicast Address Hash Registers */ +#define GM_MC_ADDR_H1 0x0034 /* 16 bit r/w Multicast Address Hash 1 */ +#define GM_MC_ADDR_H2 0x0038 /* 16 bit r/w Multicast Address Hash 2 */ +#define GM_MC_ADDR_H3 0x003c /* 16 bit r/w Multicast Address Hash 3 */ +#define GM_MC_ADDR_H4 0x0040 /* 16 bit r/w Multicast Address Hash 4 */ + +/* Interrupt Source Registers */ +#define GM_TX_IRQ_SRC 0x0044 /* 16 bit r/o Tx Overflow IRQ Source */ +#define GM_RX_IRQ_SRC 0x0048 /* 16 bit r/o Rx Overflow IRQ Source */ +#define GM_TR_IRQ_SRC 0x004c /* 16 bit r/o Tx/Rx Over. IRQ Source */ + +/* Interrupt Mask Registers */ +#define GM_TX_IRQ_MSK 0x0050 /* 16 bit r/w Tx Overflow IRQ Mask */ +#define GM_RX_IRQ_MSK 0x0054 /* 16 bit r/w Rx Overflow IRQ Mask */ +#define GM_TR_IRQ_MSK 0x0058 /* 16 bit r/w Tx/Rx Over. IRQ Mask */ + +/* Serial Management Interface (SMI) Registers */ +#define GM_SMI_CTRL 0x0080 /* 16 bit r/w SMI Control Register */ +#define GM_SMI_DATA 0x0084 /* 16 bit r/w SMI Data Register */ +#define GM_PHY_ADDR 0x0088 /* 16 bit r/w GPHY Address Register */ + +/* MIB Counters */ +#define GM_MIB_CNT_BASE 0x0100 /* Base Address of MIB Counters */ +#define GM_MIB_CNT_SIZE 44 /* Number of MIB Counters */ + +/* + * MIB Counters base address definitions (low word) - + * use offset 4 for access to high word (32 bit r/o) + */ +#define GM_RXF_UC_OK \ + (GM_MIB_CNT_BASE + 0) /* Unicast Frames Received OK */ +#define GM_RXF_BC_OK \ + (GM_MIB_CNT_BASE + 8) /* Broadcast Frames Received OK */ +#define GM_RXF_MPAUSE \ + (GM_MIB_CNT_BASE + 16) /* Pause MAC Ctrl Frames Received */ +#define GM_RXF_MC_OK \ + (GM_MIB_CNT_BASE + 24) /* Multicast Frames Received OK */ +#define GM_RXF_FCS_ERR \ + (GM_MIB_CNT_BASE + 32) /* Rx Frame Check Seq. Error */ + /* GM_MIB_CNT_BASE + 40: reserved */ +#define GM_RXO_OK_LO \ + (GM_MIB_CNT_BASE + 48) /* Octets Received OK Low */ +#define GM_RXO_OK_HI \ + (GM_MIB_CNT_BASE + 56) /* Octets Received OK High */ +#define GM_RXO_ERR_LO \ + (GM_MIB_CNT_BASE + 64) /* Octets Received Invalid Low */ +#define GM_RXO_ERR_HI \ + (GM_MIB_CNT_BASE + 72) /* Octets Received Invalid High */ +#define GM_RXF_SHT \ + (GM_MIB_CNT_BASE + 80) /* Frames <64 Byte Received OK */ +#define GM_RXE_FRAG \ + (GM_MIB_CNT_BASE + 88) /* Frames <64 Byte Received with FCS Err */ +#define GM_RXF_64B \ + (GM_MIB_CNT_BASE + 96) /* 64 Byte Rx Frame */ +#define GM_RXF_127B \ + (GM_MIB_CNT_BASE + 104) /* 65-127 Byte Rx Frame */ +#define GM_RXF_255B \ + (GM_MIB_CNT_BASE + 112) /* 128-255 Byte Rx Frame */ +#define GM_RXF_511B \ + (GM_MIB_CNT_BASE + 120) /* 256-511 Byte Rx Frame */ +#define GM_RXF_1023B \ + (GM_MIB_CNT_BASE + 128) /* 512-1023 Byte Rx Frame */ +#define GM_RXF_1518B \ + (GM_MIB_CNT_BASE + 136) /* 1024-1518 Byte Rx Frame */ +#define GM_RXF_MAX_SZ \ + (GM_MIB_CNT_BASE + 144) /* 1519-MaxSize Byte Rx Frame */ +#define GM_RXF_LNG_ERR \ + (GM_MIB_CNT_BASE + 152) /* Rx Frame too Long Error */ +#define GM_RXF_JAB_PKT \ + (GM_MIB_CNT_BASE + 160) /* Rx Jabber Packet Frame */ + /* GM_MIB_CNT_BASE + 168: reserved */ +#define GM_RXE_FIFO_OV \ + (GM_MIB_CNT_BASE + 176) /* Rx FIFO overflow Event */ + /* GM_MIB_CNT_BASE + 184: reserved */ +#define GM_TXF_UC_OK \ + (GM_MIB_CNT_BASE + 192) /* Unicast Frames Xmitted OK */ +#define GM_TXF_BC_OK \ + (GM_MIB_CNT_BASE + 200) /* Broadcast Frames Xmitted OK */ +#define GM_TXF_MPAUSE \ + (GM_MIB_CNT_BASE + 208) /* Pause MAC Ctrl Frames Xmitted */ +#define GM_TXF_MC_OK \ + (GM_MIB_CNT_BASE + 216) /* Multicast Frames Xmitted OK */ +#define GM_TXO_OK_LO \ + (GM_MIB_CNT_BASE + 224) /* Octets Transmitted OK Low */ +#define GM_TXO_OK_HI \ + (GM_MIB_CNT_BASE + 232) /* Octets Transmitted OK High */ +#define GM_TXF_64B \ + (GM_MIB_CNT_BASE + 240) /* 64 Byte Tx Frame */ +#define GM_TXF_127B \ + (GM_MIB_CNT_BASE + 248) /* 65-127 Byte Tx Frame */ +#define GM_TXF_255B \ + (GM_MIB_CNT_BASE + 256) /* 128-255 Byte Tx Frame */ +#define GM_TXF_511B \ + (GM_MIB_CNT_BASE + 264) /* 256-511 Byte Tx Frame */ +#define GM_TXF_1023B \ + (GM_MIB_CNT_BASE + 272) /* 512-1023 Byte Tx Frame */ +#define GM_TXF_1518B \ + (GM_MIB_CNT_BASE + 280) /* 1024-1518 Byte Tx Frame */ +#define GM_TXF_MAX_SZ \ + (GM_MIB_CNT_BASE + 288) /* 1519-MaxSize Byte Tx Frame */ + /* GM_MIB_CNT_BASE + 296: reserved */ +#define GM_TXF_COL \ + (GM_MIB_CNT_BASE + 304) /* Tx Collision */ +#define GM_TXF_LAT_COL \ + (GM_MIB_CNT_BASE + 312) /* Tx Late Collision */ +#define GM_TXF_ABO_COL \ + (GM_MIB_CNT_BASE + 320) /* Tx aborted due to Exces. Col. */ +#define GM_TXF_MUL_COL \ + (GM_MIB_CNT_BASE + 328) /* Tx Multiple Collision */ +#define GM_TXF_SNG_COL \ + (GM_MIB_CNT_BASE + 336) /* Tx Single Collision */ +#define GM_TXE_FIFO_UR \ + (GM_MIB_CNT_BASE + 344) /* Tx FIFO Underrun Event */ + +/*----------------------------------------------------------------------------*/ +/* + * GMAC Bit Definitions + * + * If the bit access behaviour differs from the register access behaviour + * (r/w, r/o) this is documented after the bit number. + * The following bit access behaviours are used: + * (sc) self clearing + * (r/o) read only + */ + +/* GM_GP_STAT 16 bit r/o General Purpose Status Register */ +#define GM_GPSR_SPEED (1<<15) /* Bit 15: Port Speed (1 = 100 Mbps) */ +#define GM_GPSR_DUPLEX (1<<14) /* Bit 14: Duplex Mode (1 = Full) */ +#define GM_GPSR_FC_TX_DIS (1<<13) /* Bit 13: Tx Flow-Control Mode Disabled */ +#define GM_GPSR_LINK_UP (1<<12) /* Bit 12: Link Up Status */ +#define GM_GPSR_PAUSE (1<<11) /* Bit 11: Pause State */ +#define GM_GPSR_TX_ACTIVE (1<<10) /* Bit 10: Tx in Progress */ +#define GM_GPSR_EXC_COL (1<<9) /* Bit 9: Excessive Collisions Occured */ +#define GM_GPSR_LAT_COL (1<<8) /* Bit 8: Late Collisions Occured */ + /* Bit 7..6: reserved */ +#define GM_GPSR_PHY_ST_CH (1<<5) /* Bit 5: PHY Status Change */ +#define GM_GPSR_GIG_SPEED (1<<4) /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */ +#define GM_GPSR_PART_MODE (1<<3) /* Bit 3: Partition mode */ +#define GM_GPSR_FC_RX_DIS (1<<2) /* Bit 2: Rx Flow-Control Mode Disabled */ +#define GM_GPSR_PROM_EN (1<<1) /* Bit 1: Promiscuous Mode Enabled */ + /* Bit 0: reserved */ + +/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */ + /* Bit 15: reserved */ +#define GM_GPCR_PROM_ENA (1<<14) /* Bit 14: Enable Promiscuous Mode */ +#define GM_GPCR_FC_TX_DIS (1<<13) /* Bit 13: Disable Tx Flow-Control Mode */ +#define GM_GPCR_TX_ENA (1<<12) /* Bit 12: Enable Transmit */ +#define GM_GPCR_RX_ENA (1<<11) /* Bit 11: Enable Receive */ +#define GM_GPCR_BURST_ENA (1<<10) /* Bit 10: Enable Burst Mode */ +#define GM_GPCR_LOOP_ENA (1<<9) /* Bit 9: Enable MAC Loopback Mode */ +#define GM_GPCR_PART_ENA (1<<8) /* Bit 8: Enable Partition Mode */ +#define GM_GPCR_GIGS_ENA (1<<7) /* Bit 7: Gigabit Speed (1000 Mbps) */ +#define GM_GPCR_FL_PASS (1<<6) /* Bit 6: Force Link Pass */ +#define GM_GPCR_DUP_FULL (1<<5) /* Bit 5: Full Duplex Mode */ +#define GM_GPCR_FC_RX_DIS (1<<4) /* Bit 4: Disable Rx Flow-Control Mode */ +#define GM_GPCR_SPEED_100 (1<<3) /* Bit 3: Port Speed 100 Mbps */ +#define GM_GPCR_AU_DUP_DIS (1<<2) /* Bit 2: Disable Auto-Update Duplex */ +#define GM_GPCR_AU_FCT_DIS (1<<1) /* Bit 1: Disable Auto-Update Flow-C. */ +#define GM_GPCR_AU_SPD_DIS (1<<0) /* Bit 0: Disable Auto-Update Speed */ + +#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100) +#define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS |\ + GM_GPCR_AU_SPD_DIS) + +/* GM_TX_CTRL 16 bit r/w Transmit Control Register */ +#define GM_TXCR_FORCE_JAM (1<<15) /* Bit 15: Force Jam / Flow-Control */ +#define GM_TXCR_CRC_DIS (1<<14) /* Bit 14: Disable insertion of CRC */ +#define GM_TXCR_PAD_DIS (1<<13) /* Bit 13: Disable padding of packets */ +#define GM_TXCR_COL_THR_MSK (7<<10) /* Bit 12..10: Collision Threshold */ + +#define TX_COL_THR(x) (SHIFT10(x) & GM_TXCR_COL_THR_MSK) + +#define TX_COL_DEF 0x04 + +/* GM_RX_CTRL 16 bit r/w Receive Control Register */ +#define GM_RXCR_UCF_ENA (1<<15) /* Bit 15: Enable Unicast filtering */ +#define GM_RXCR_MCF_ENA (1<<14) /* Bit 14: Enable Multicast filtering */ +#define GM_RXCR_CRC_DIS (1<<13) /* Bit 13: Remove 4-byte CRC */ +#define GM_RXCR_PASS_FC (1<<12) /* Bit 12: Pass FC packets to FIFO */ + +/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */ +#define GM_TXPA_JAMLEN_MSK (0x03<<14) /* Bit 15..14: Jam Length */ +#define GM_TXPA_JAMIPG_MSK (0x1f<<9) /* Bit 13..9: Jam IPG */ +#define GM_TXPA_JAMDAT_MSK (0x1f<<4) /* Bit 8..4: IPG Jam to Data */ + /* Bit 3..0: reserved */ + +#define TX_JAM_LEN_VAL(x) (SHIFT14(x) & GM_TXPA_JAMLEN_MSK) +#define TX_JAM_IPG_VAL(x) (SHIFT9(x) & GM_TXPA_JAMIPG_MSK) +#define TX_IPG_JAM_DATA(x) (SHIFT4(x) & GM_TXPA_JAMDAT_MSK) + +#define TX_JAM_LEN_DEF 0x03 +#define TX_JAM_IPG_DEF 0x0b +#define TX_IPG_JAM_DEF 0x1c + +/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */ +#define GM_SMOD_DATABL_MSK (0x1f<<11) /* Bit 15..11: Data Blinder (r/o) */ +#define GM_SMOD_LIMIT_4 (1<<10) /* Bit 10: 4 consecutive Tx trials */ +#define GM_SMOD_VLAN_ENA (1<<9) /* Bit 9: Enable VLAN (Max. Frame Len) */ +#define GM_SMOD_JUMBO_ENA (1<<8) /* Bit 8: Enable Jumbo (Max. Frame Len) */ + /* Bit 7..5: reserved */ +#define GM_SMOD_IPG_MSK 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */ + +#define DATA_BLIND_VAL(x) (SHIFT11(x) & GM_SMOD_DATABL_MSK) +#define DATA_BLIND_DEF 0x04 + +#define IPG_DATA_VAL(x) (x & GM_SMOD_IPG_MSK) +#define IPG_DATA_DEF 0x1e + +/* GM_SMI_CTRL 16 bit r/w SMI Control Register */ +#define GM_SMI_CT_PHY_A_MSK (0x1f<<11) /* Bit 15..11: PHY Device Address */ +#define GM_SMI_CT_REG_A_MSK (0x1f<<6) /* Bit 10.. 6: PHY Register Address */ +#define GM_SMI_CT_OP_RD (1<<5) /* Bit 5: OpCode Read (0=Write)*/ +#define GM_SMI_CT_RD_VAL (1<<4) /* Bit 4: Read Valid (Read completed) */ +#define GM_SMI_CT_BUSY (1<<3) /* Bit 3: Busy (Operation in progress) */ + /* Bit 2..0: reserved */ + +#define GM_SMI_CT_PHY_AD(x) (SHIFT11(x) & GM_SMI_CT_PHY_A_MSK) +#define GM_SMI_CT_REG_AD(x) (SHIFT6(x) & GM_SMI_CT_REG_A_MSK) + + /* GM_PHY_ADDR 16 bit r/w GPHY Address Register */ + /* Bit 15..6: reserved */ +#define GM_PAR_MIB_CLR (1<<5) /* Bit 5: Set MIB Clear Counter Mode */ +#define GM_PAR_MIB_TST (1<<4) /* Bit 4: MIB Load Counter (Test Mode) */ + /* Bit 3..0: reserved */ + +/* Receive Frame Status Encoding */ +#define GMR_FS_LEN (0xffffUL<<16) /* Bit 31..16: Rx Frame Length */ + /* Bit 15..14: reserved */ +#define GMR_FS_VLAN (1L<<13) /* Bit 13: VLAN Packet */ +#define GMR_FS_JABBER (1L<<12) /* Bit 12: Jabber Packet */ +#define GMR_FS_UN_SIZE (1L<<11) /* Bit 11: Undersize Packet */ +#define GMR_FS_MC (1L<<10) /* Bit 10: Multicast Packet */ +#define GMR_FS_BC (1L<<9) /* Bit 9: Broadcast Packet */ +#define GMR_FS_RX_OK (1L<<8) /* Bit 8: Receive OK (Good Packet) */ +#define GMR_FS_GOOD_FC (1L<<7) /* Bit 7: Good Flow-Control Packet */ +#define GMR_FS_BAD_FC (1L<<6) /* Bit 6: Bad Flow-Control Packet */ +#define GMR_FS_MII_ERR (1L<<5) /* Bit 5: MII Error */ +#define GMR_FS_LONG_ERR (1L<<4) /* Bit 4: Too Long Packet */ +#define GMR_FS_FRAGMENT (1L<<3) /* Bit 3: Fragment */ + /* Bit 2: reserved */ +#define GMR_FS_CRC_ERR (1L<<1) /* Bit 1: CRC Error */ +#define GMR_FS_RX_FF_OV (1L<<0) /* Bit 0: Rx FIFO Overflow */ + +/* + * GMR_FS_ANY_ERR (analogous to XMR_FS_ANY_ERR) + */ +#define GMR_FS_ANY_ERR (GMR_FS_CRC_ERR | \ + GMR_FS_LONG_ERR | \ + GMR_FS_MII_ERR | \ + GMR_FS_BAD_FC | \ + GMR_FS_GOOD_FC | \ + GMR_FS_JABBER) + +/* Rx GMAC FIFO Flush Mask (default) */ +#define RX_FF_FL_DEF_MSK (GMR_FS_CRC_ERR | \ + GMR_FS_RX_FF_OV | \ + GMR_FS_MII_ERR | \ + GMR_FS_BAD_FC | \ + GMR_FS_GOOD_FC | \ + GMR_FS_UN_SIZE | \ + GMR_FS_JABBER) + +/* typedefs *******************************************************************/ + + +/* function prototypes ********************************************************/ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* __INC_XMAC_H */ diff --git a/drivers/net/sk98lin/skaddr.c b/drivers/net/sk98lin/skaddr.c new file mode 100644 index 000000000000..6e6c56aa6d6f --- /dev/null +++ b/drivers/net/sk98lin/skaddr.c @@ -0,0 +1,1788 @@ +/****************************************************************************** + * + * Name: skaddr.c + * Project: Gigabit Ethernet Adapters, ADDR-Module + * Version: $Revision: 1.52 $ + * Date: $Date: 2003/06/02 13:46:15 $ + * Purpose: Manage Addresses (Multicast and Unicast) and Promiscuous Mode. + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +/****************************************************************************** + * + * Description: + * + * This module is intended to manage multicast addresses, address override, + * and promiscuous mode on GEnesis and Yukon adapters. + * + * Address Layout: + * port address: physical MAC address + * 1st exact match: logical MAC address (GEnesis only) + * 2nd exact match: RLMT multicast (GEnesis only) + * exact match 3-13: OS-specific multicasts (GEnesis only) + * + * Include File Hierarchy: + * + * "skdrv1st.h" + * "skdrv2nd.h" + * + ******************************************************************************/ + +#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM)))) +static const char SysKonnectFileId[] = + "@(#) $Id: skaddr.c,v 1.52 2003/06/02 13:46:15 tschilli Exp $ (C) Marvell."; +#endif /* DEBUG ||!LINT || !SK_SLIM */ + +#define __SKADDR_C + +#ifdef __cplusplus +extern "C" { +#endif /* cplusplus */ + +#include "h/skdrv1st.h" +#include "h/skdrv2nd.h" + +/* defines ********************************************************************/ + + +#define XMAC_POLY 0xEDB88320UL /* CRC32-Poly - XMAC: Little Endian */ +#define GMAC_POLY 0x04C11DB7L /* CRC16-Poly - GMAC: Little Endian */ +#define HASH_BITS 6 /* #bits in hash */ +#define SK_MC_BIT 0x01 + +/* Error numbers and messages. */ + +#define SKERR_ADDR_E001 (SK_ERRBASE_ADDR + 0) +#define SKERR_ADDR_E001MSG "Bad Flags." +#define SKERR_ADDR_E002 (SKERR_ADDR_E001 + 1) +#define SKERR_ADDR_E002MSG "New Error." + +/* typedefs *******************************************************************/ + +/* None. */ + +/* global variables ***********************************************************/ + +/* 64-bit hash values with all bits set. */ + +static const SK_U16 OnesHash[4] = {0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF}; + +/* local variables ************************************************************/ + +#ifdef DEBUG +static int Next0[SK_MAX_MACS] = {0}; +#endif /* DEBUG */ + +static int SkAddrGmacMcAdd(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber, + SK_MAC_ADDR *pMc, int Flags); +static int SkAddrGmacMcClear(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber, + int Flags); +static int SkAddrGmacMcUpdate(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber); +static int SkAddrGmacPromiscuousChange(SK_AC *pAC, SK_IOC IoC, + SK_U32 PortNumber, int NewPromMode); +static int SkAddrXmacMcAdd(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber, + SK_MAC_ADDR *pMc, int Flags); +static int SkAddrXmacMcClear(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber, + int Flags); +static int SkAddrXmacMcUpdate(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber); +static int SkAddrXmacPromiscuousChange(SK_AC *pAC, SK_IOC IoC, + SK_U32 PortNumber, int NewPromMode); + +/* functions ******************************************************************/ + +/****************************************************************************** + * + * SkAddrInit - initialize data, set state to init + * + * Description: + * + * SK_INIT_DATA + * ============ + * + * This routine clears the multicast tables and resets promiscuous mode. + * Some entries are reserved for the "logical MAC address", the + * SK-RLMT multicast address, and the BPDU multicast address. + * + * + * SK_INIT_IO + * ========== + * + * All permanent MAC addresses are read from EPROM. + * If the current MAC addresses are not already set in software, + * they are set to the values of the permanent addresses. + * The current addresses are written to the corresponding MAC. + * + * + * SK_INIT_RUN + * =========== + * + * Nothing. + * + * Context: + * init, pageable + * + * Returns: + * SK_ADDR_SUCCESS + */ +int SkAddrInit( +SK_AC *pAC, /* the adapter context */ +SK_IOC IoC, /* I/O context */ +int Level) /* initialization level */ +{ + int j; + SK_U32 i; + SK_U8 *InAddr; + SK_U16 *OutAddr; + SK_ADDR_PORT *pAPort; + + switch (Level) { + case SK_INIT_DATA: + SK_MEMSET((char *) &pAC->Addr, (SK_U8) 0, + (SK_U16) sizeof(SK_ADDR)); + + for (i = 0; i < SK_MAX_MACS; i++) { + pAPort = &pAC->Addr.Port[i]; + pAPort->PromMode = SK_PROM_MODE_NONE; + + pAPort->FirstExactMatchRlmt = SK_ADDR_FIRST_MATCH_RLMT; + pAPort->FirstExactMatchDrv = SK_ADDR_FIRST_MATCH_DRV; + pAPort->NextExactMatchRlmt = SK_ADDR_FIRST_MATCH_RLMT; + pAPort->NextExactMatchDrv = SK_ADDR_FIRST_MATCH_DRV; + } +#ifdef xDEBUG + for (i = 0; i < SK_MAX_MACS; i++) { + if (pAC->Addr.Port[i].NextExactMatchRlmt < + SK_ADDR_FIRST_MATCH_RLMT) { + Next0[i] |= 4; + } + } +#endif /* DEBUG */ + /* pAC->Addr.InitDone = SK_INIT_DATA; */ + break; + + case SK_INIT_IO: +#ifndef SK_NO_RLMT + for (i = 0; i < SK_MAX_NETS; i++) { + pAC->Addr.Net[i].ActivePort = pAC->Rlmt.Net[i].ActivePort; + } +#endif /* !SK_NO_RLMT */ +#ifdef xDEBUG + for (i = 0; i < SK_MAX_MACS; i++) { + if (pAC->Addr.Port[i].NextExactMatchRlmt < + SK_ADDR_FIRST_MATCH_RLMT) { + Next0[i] |= 8; + } + } +#endif /* DEBUG */ + + /* Read permanent logical MAC address from Control Register File. */ + for (j = 0; j < SK_MAC_ADDR_LEN; j++) { + InAddr = (SK_U8 *) &pAC->Addr.Net[0].PermanentMacAddress.a[j]; + SK_IN8(IoC, B2_MAC_1 + j, InAddr); + } + + if (!pAC->Addr.Net[0].CurrentMacAddressSet) { + /* Set the current logical MAC address to the permanent one. */ + pAC->Addr.Net[0].CurrentMacAddress = + pAC->Addr.Net[0].PermanentMacAddress; + pAC->Addr.Net[0].CurrentMacAddressSet = SK_TRUE; + } + + /* Set the current logical MAC address. */ + pAC->Addr.Port[pAC->Addr.Net[0].ActivePort].Exact[0] = + pAC->Addr.Net[0].CurrentMacAddress; +#if SK_MAX_NETS > 1 + /* Set logical MAC address for net 2 to (log | 3). */ + if (!pAC->Addr.Net[1].CurrentMacAddressSet) { + pAC->Addr.Net[1].PermanentMacAddress = + pAC->Addr.Net[0].PermanentMacAddress; + pAC->Addr.Net[1].PermanentMacAddress.a[5] |= 3; + /* Set the current logical MAC address to the permanent one. */ + pAC->Addr.Net[1].CurrentMacAddress = + pAC->Addr.Net[1].PermanentMacAddress; + pAC->Addr.Net[1].CurrentMacAddressSet = SK_TRUE; + } +#endif /* SK_MAX_NETS > 1 */ + +#ifdef DEBUG + for (i = 0; i < (SK_U32) pAC->GIni.GIMacsFound; i++) { + SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_INIT, + ("Permanent MAC Address (Net%d): %02X %02X %02X %02X %02X %02X\n", + i, + pAC->Addr.Net[i].PermanentMacAddress.a[0], + pAC->Addr.Net[i].PermanentMacAddress.a[1], + pAC->Addr.Net[i].PermanentMacAddress.a[2], + pAC->Addr.Net[i].PermanentMacAddress.a[3], + pAC->Addr.Net[i].PermanentMacAddress.a[4], + pAC->Addr.Net[i].PermanentMacAddress.a[5])) + + SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_INIT, + ("Logical MAC Address (Net%d): %02X %02X %02X %02X %02X %02X\n", + i, + pAC->Addr.Net[i].CurrentMacAddress.a[0], + pAC->Addr.Net[i].CurrentMacAddress.a[1], + pAC->Addr.Net[i].CurrentMacAddress.a[2], + pAC->Addr.Net[i].CurrentMacAddress.a[3], + pAC->Addr.Net[i].CurrentMacAddress.a[4], + pAC->Addr.Net[i].CurrentMacAddress.a[5])) + } +#endif /* DEBUG */ + + for (i = 0; i < (SK_U32) pAC->GIni.GIMacsFound; i++) { + pAPort = &pAC->Addr.Port[i]; + + /* Read permanent port addresses from Control Register File. */ + for (j = 0; j < SK_MAC_ADDR_LEN; j++) { + InAddr = (SK_U8 *) &pAPort->PermanentMacAddress.a[j]; + SK_IN8(IoC, B2_MAC_2 + 8 * i + j, InAddr); + } + + if (!pAPort->CurrentMacAddressSet) { + /* + * Set the current and previous physical MAC address + * of this port to its permanent MAC address. + */ + pAPort->CurrentMacAddress = pAPort->PermanentMacAddress; + pAPort->PreviousMacAddress = pAPort->PermanentMacAddress; + pAPort->CurrentMacAddressSet = SK_TRUE; + } + + /* Set port's current physical MAC address. */ + OutAddr = (SK_U16 *) &pAPort->CurrentMacAddress.a[0]; +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + XM_OUTADDR(IoC, i, XM_SA, OutAddr); + } +#endif /* GENESIS */ +#ifdef YUKON + if (!pAC->GIni.GIGenesis) { + GM_OUTADDR(IoC, i, GM_SRC_ADDR_1L, OutAddr); + } +#endif /* YUKON */ +#ifdef DEBUG + SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_INIT, + ("SkAddrInit: Permanent Physical MAC Address: %02X %02X %02X %02X %02X %02X\n", + pAPort->PermanentMacAddress.a[0], + pAPort->PermanentMacAddress.a[1], + pAPort->PermanentMacAddress.a[2], + pAPort->PermanentMacAddress.a[3], + pAPort->PermanentMacAddress.a[4], + pAPort->PermanentMacAddress.a[5])) + + SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_INIT, + ("SkAddrInit: Physical MAC Address: %02X %02X %02X %02X %02X %02X\n", + pAPort->CurrentMacAddress.a[0], + pAPort->CurrentMacAddress.a[1], + pAPort->CurrentMacAddress.a[2], + pAPort->CurrentMacAddress.a[3], + pAPort->CurrentMacAddress.a[4], + pAPort->CurrentMacAddress.a[5])) +#endif /* DEBUG */ + } + /* pAC->Addr.InitDone = SK_INIT_IO; */ + break; + + case SK_INIT_RUN: +#ifdef xDEBUG + for (i = 0; i < SK_MAX_MACS; i++) { + if (pAC->Addr.Port[i].NextExactMatchRlmt < + SK_ADDR_FIRST_MATCH_RLMT) { + Next0[i] |= 16; + } + } +#endif /* DEBUG */ + + /* pAC->Addr.InitDone = SK_INIT_RUN; */ + break; + + default: /* error */ + break; + } + + return (SK_ADDR_SUCCESS); + +} /* SkAddrInit */ + +#ifndef SK_SLIM + +/****************************************************************************** + * + * SkAddrMcClear - clear the multicast table + * + * Description: + * This routine clears the multicast table. + * + * If not suppressed by Flag SK_MC_SW_ONLY, the hardware is updated + * immediately. + * + * It calls either SkAddrXmacMcClear or SkAddrGmacMcClear, according + * to the adapter in use. The real work is done there. + * + * Context: + * runtime, pageable + * may be called starting with SK_INIT_DATA with flag SK_MC_SW_ONLY + * may be called after SK_INIT_IO without limitation + * + * Returns: + * SK_ADDR_SUCCESS + * SK_ADDR_ILLEGAL_PORT + */ +int SkAddrMcClear( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* I/O context */ +SK_U32 PortNumber, /* Index of affected port */ +int Flags) /* permanent/non-perm, sw-only */ +{ + int ReturnCode; + + if (PortNumber >= (SK_U32) pAC->GIni.GIMacsFound) { + return (SK_ADDR_ILLEGAL_PORT); + } + + if (pAC->GIni.GIGenesis) { + ReturnCode = SkAddrXmacMcClear(pAC, IoC, PortNumber, Flags); + } + else { + ReturnCode = SkAddrGmacMcClear(pAC, IoC, PortNumber, Flags); + } + + return (ReturnCode); + +} /* SkAddrMcClear */ + +#endif /* !SK_SLIM */ + +#ifndef SK_SLIM + +/****************************************************************************** + * + * SkAddrXmacMcClear - clear the multicast table + * + * Description: + * This routine clears the multicast table + * (either entry 2 or entries 3-16 and InexactFilter) of the given port. + * If not suppressed by Flag SK_MC_SW_ONLY, the hardware is updated + * immediately. + * + * Context: + * runtime, pageable + * may be called starting with SK_INIT_DATA with flag SK_MC_SW_ONLY + * may be called after SK_INIT_IO without limitation + * + * Returns: + * SK_ADDR_SUCCESS + * SK_ADDR_ILLEGAL_PORT + */ +static int SkAddrXmacMcClear( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* I/O context */ +SK_U32 PortNumber, /* Index of affected port */ +int Flags) /* permanent/non-perm, sw-only */ +{ + int i; + + if (Flags & SK_ADDR_PERMANENT) { /* permanent => RLMT */ + + /* Clear RLMT multicast addresses. */ + pAC->Addr.Port[PortNumber].NextExactMatchRlmt = SK_ADDR_FIRST_MATCH_RLMT; + } + else { /* not permanent => DRV */ + + /* Clear InexactFilter */ + for (i = 0; i < 8; i++) { + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] = 0; + } + + /* Clear DRV multicast addresses. */ + + pAC->Addr.Port[PortNumber].NextExactMatchDrv = SK_ADDR_FIRST_MATCH_DRV; + } + + if (!(Flags & SK_MC_SW_ONLY)) { + (void) SkAddrXmacMcUpdate(pAC, IoC, PortNumber); + } + + return (SK_ADDR_SUCCESS); + +} /* SkAddrXmacMcClear */ + +#endif /* !SK_SLIM */ + +#ifndef SK_SLIM + +/****************************************************************************** + * + * SkAddrGmacMcClear - clear the multicast table + * + * Description: + * This routine clears the multicast hashing table (InexactFilter) + * (either the RLMT or the driver bits) of the given port. + * + * If not suppressed by Flag SK_MC_SW_ONLY, the hardware is updated + * immediately. + * + * Context: + * runtime, pageable + * may be called starting with SK_INIT_DATA with flag SK_MC_SW_ONLY + * may be called after SK_INIT_IO without limitation + * + * Returns: + * SK_ADDR_SUCCESS + * SK_ADDR_ILLEGAL_PORT + */ +static int SkAddrGmacMcClear( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* I/O context */ +SK_U32 PortNumber, /* Index of affected port */ +int Flags) /* permanent/non-perm, sw-only */ +{ + int i; + +#ifdef DEBUG + SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, + ("GMAC InexactFilter (not cleared): %02X %02X %02X %02X %02X %02X %02X %02X\n", + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[0], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[1], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[2], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[3], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[4], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[5], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[6], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[7])) +#endif /* DEBUG */ + + /* Clear InexactFilter */ + for (i = 0; i < 8; i++) { + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] = 0; + } + + if (Flags & SK_ADDR_PERMANENT) { /* permanent => RLMT */ + + /* Copy DRV bits to InexactFilter. */ + for (i = 0; i < 8; i++) { + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] |= + pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[i]; + + /* Clear InexactRlmtFilter. */ + pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[i] = 0; + + } + } + else { /* not permanent => DRV */ + + /* Copy RLMT bits to InexactFilter. */ + for (i = 0; i < 8; i++) { + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] |= + pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[i]; + + /* Clear InexactDrvFilter. */ + pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[i] = 0; + } + } + +#ifdef DEBUG + SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, + ("GMAC InexactFilter (cleared): %02X %02X %02X %02X %02X %02X %02X %02X\n", + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[0], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[1], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[2], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[3], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[4], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[5], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[6], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[7])) +#endif /* DEBUG */ + + if (!(Flags & SK_MC_SW_ONLY)) { + (void) SkAddrGmacMcUpdate(pAC, IoC, PortNumber); + } + + return (SK_ADDR_SUCCESS); + +} /* SkAddrGmacMcClear */ + +#ifndef SK_ADDR_CHEAT + +/****************************************************************************** + * + * SkXmacMcHash - hash multicast address + * + * Description: + * This routine computes the hash value for a multicast address. + * A CRC32 algorithm is used. + * + * Notes: + * The code was adapted from the XaQti data sheet. + * + * Context: + * runtime, pageable + * + * Returns: + * Hash value of multicast address. + */ +static SK_U32 SkXmacMcHash( +unsigned char *pMc) /* Multicast address */ +{ + SK_U32 Idx; + SK_U32 Bit; + SK_U32 Data; + SK_U32 Crc; + + Crc = 0xFFFFFFFFUL; + for (Idx = 0; Idx < SK_MAC_ADDR_LEN; Idx++) { + Data = *pMc++; + for (Bit = 0; Bit < 8; Bit++, Data >>= 1) { + Crc = (Crc >> 1) ^ (((Crc ^ Data) & 1) ? XMAC_POLY : 0); + } + } + + return (Crc & ((1 << HASH_BITS) - 1)); + +} /* SkXmacMcHash */ + + +/****************************************************************************** + * + * SkGmacMcHash - hash multicast address + * + * Description: + * This routine computes the hash value for a multicast address. + * A CRC16 algorithm is used. + * + * Notes: + * + * + * Context: + * runtime, pageable + * + * Returns: + * Hash value of multicast address. + */ +static SK_U32 SkGmacMcHash( +unsigned char *pMc) /* Multicast address */ +{ + SK_U32 Data; + SK_U32 TmpData; + SK_U32 Crc; + int Byte; + int Bit; + + Crc = 0xFFFFFFFFUL; + for (Byte = 0; Byte < 6; Byte++) { + /* Get next byte. */ + Data = (SK_U32) pMc[Byte]; + + /* Change bit order in byte. */ + TmpData = Data; + for (Bit = 0; Bit < 8; Bit++) { + if (TmpData & 1L) { + Data |= 1L << (7 - Bit); + } + else { + Data &= ~(1L << (7 - Bit)); + } + TmpData >>= 1; + } + + Crc ^= (Data << 24); + for (Bit = 0; Bit < 8; Bit++) { + if (Crc & 0x80000000) { + Crc = (Crc << 1) ^ GMAC_POLY; + } + else { + Crc <<= 1; + } + } + } + + return (Crc & ((1 << HASH_BITS) - 1)); + +} /* SkGmacMcHash */ + +#endif /* !SK_ADDR_CHEAT */ + +/****************************************************************************** + * + * SkAddrMcAdd - add a multicast address to a port + * + * Description: + * This routine enables reception for a given address on the given port. + * + * It calls either SkAddrXmacMcAdd or SkAddrGmacMcAdd, according to the + * adapter in use. The real work is done there. + * + * Notes: + * The return code is only valid for SK_PROM_MODE_NONE. + * + * Context: + * runtime, pageable + * may be called after SK_INIT_DATA + * + * Returns: + * SK_MC_FILTERING_EXACT + * SK_MC_FILTERING_INEXACT + * SK_MC_ILLEGAL_ADDRESS + * SK_MC_ILLEGAL_PORT + * SK_MC_RLMT_OVERFLOW + */ +int SkAddrMcAdd( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* I/O context */ +SK_U32 PortNumber, /* Port Number */ +SK_MAC_ADDR *pMc, /* multicast address to be added */ +int Flags) /* permanent/non-permanent */ +{ + int ReturnCode; + + if (PortNumber >= (SK_U32) pAC->GIni.GIMacsFound) { + return (SK_ADDR_ILLEGAL_PORT); + } + + if (pAC->GIni.GIGenesis) { + ReturnCode = SkAddrXmacMcAdd(pAC, IoC, PortNumber, pMc, Flags); + } + else { + ReturnCode = SkAddrGmacMcAdd(pAC, IoC, PortNumber, pMc, Flags); + } + + return (ReturnCode); + +} /* SkAddrMcAdd */ + + +/****************************************************************************** + * + * SkAddrXmacMcAdd - add a multicast address to a port + * + * Description: + * This routine enables reception for a given address on the given port. + * + * Notes: + * The return code is only valid for SK_PROM_MODE_NONE. + * + * The multicast bit is only checked if there are no free exact match + * entries. + * + * Context: + * runtime, pageable + * may be called after SK_INIT_DATA + * + * Returns: + * SK_MC_FILTERING_EXACT + * SK_MC_FILTERING_INEXACT + * SK_MC_ILLEGAL_ADDRESS + * SK_MC_RLMT_OVERFLOW + */ +static int SkAddrXmacMcAdd( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* I/O context */ +SK_U32 PortNumber, /* Port Number */ +SK_MAC_ADDR *pMc, /* multicast address to be added */ +int Flags) /* permanent/non-permanent */ +{ + int i; + SK_U8 Inexact; +#ifndef SK_ADDR_CHEAT + SK_U32 HashBit; +#endif /* !defined(SK_ADDR_CHEAT) */ + + if (Flags & SK_ADDR_PERMANENT) { /* permanent => RLMT */ +#ifdef xDEBUG + if (pAC->Addr.Port[PortNumber].NextExactMatchRlmt < + SK_ADDR_FIRST_MATCH_RLMT) { + Next0[PortNumber] |= 1; + return (SK_MC_RLMT_OVERFLOW); + } +#endif /* DEBUG */ + + if (pAC->Addr.Port[PortNumber].NextExactMatchRlmt > + SK_ADDR_LAST_MATCH_RLMT) { + return (SK_MC_RLMT_OVERFLOW); + } + + /* Set a RLMT multicast address. */ + + pAC->Addr.Port[PortNumber].Exact[ + pAC->Addr.Port[PortNumber].NextExactMatchRlmt++] = *pMc; + + return (SK_MC_FILTERING_EXACT); + } + +#ifdef xDEBUG + if (pAC->Addr.Port[PortNumber].NextExactMatchDrv < + SK_ADDR_FIRST_MATCH_DRV) { + Next0[PortNumber] |= 2; + return (SK_MC_RLMT_OVERFLOW); + } +#endif /* DEBUG */ + + if (pAC->Addr.Port[PortNumber].NextExactMatchDrv <= SK_ADDR_LAST_MATCH_DRV) { + + /* Set exact match entry. */ + pAC->Addr.Port[PortNumber].Exact[ + pAC->Addr.Port[PortNumber].NextExactMatchDrv++] = *pMc; + + /* Clear InexactFilter */ + for (i = 0; i < 8; i++) { + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] = 0; + } + } + else { + if (!(pMc->a[0] & SK_MC_BIT)) { + /* Hashing only possible with multicast addresses */ + return (SK_MC_ILLEGAL_ADDRESS); + } +#ifndef SK_ADDR_CHEAT + /* Compute hash value of address. */ + HashBit = 63 - SkXmacMcHash(&pMc->a[0]); + + /* Add bit to InexactFilter. */ + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[HashBit / 8] |= + 1 << (HashBit % 8); +#else /* SK_ADDR_CHEAT */ + /* Set all bits in InexactFilter. */ + for (i = 0; i < 8; i++) { + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] = 0xFF; + } +#endif /* SK_ADDR_CHEAT */ + } + + for (Inexact = 0, i = 0; i < 8; i++) { + Inexact |= pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i]; + } + + if (Inexact == 0 && pAC->Addr.Port[PortNumber].PromMode == 0) { + return (SK_MC_FILTERING_EXACT); + } + else { + return (SK_MC_FILTERING_INEXACT); + } + +} /* SkAddrXmacMcAdd */ + + +/****************************************************************************** + * + * SkAddrGmacMcAdd - add a multicast address to a port + * + * Description: + * This routine enables reception for a given address on the given port. + * + * Notes: + * The return code is only valid for SK_PROM_MODE_NONE. + * + * Context: + * runtime, pageable + * may be called after SK_INIT_DATA + * + * Returns: + * SK_MC_FILTERING_INEXACT + * SK_MC_ILLEGAL_ADDRESS + */ +static int SkAddrGmacMcAdd( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* I/O context */ +SK_U32 PortNumber, /* Port Number */ +SK_MAC_ADDR *pMc, /* multicast address to be added */ +int Flags) /* permanent/non-permanent */ +{ + int i; +#ifndef SK_ADDR_CHEAT + SK_U32 HashBit; +#endif /* !defined(SK_ADDR_CHEAT) */ + + if (!(pMc->a[0] & SK_MC_BIT)) { + /* Hashing only possible with multicast addresses */ + return (SK_MC_ILLEGAL_ADDRESS); + } + +#ifndef SK_ADDR_CHEAT + + /* Compute hash value of address. */ + HashBit = SkGmacMcHash(&pMc->a[0]); + + if (Flags & SK_ADDR_PERMANENT) { /* permanent => RLMT */ + + /* Add bit to InexactRlmtFilter. */ + pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[HashBit / 8] |= + 1 << (HashBit % 8); + + /* Copy bit to InexactFilter. */ + for (i = 0; i < 8; i++) { + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] |= + pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[i]; + } +#ifdef DEBUG + SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, + ("GMAC InexactRlmtFilter: %02X %02X %02X %02X %02X %02X %02X %02X\n", + pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[0], + pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[1], + pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[2], + pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[3], + pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[4], + pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[5], + pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[6], + pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[7])) +#endif /* DEBUG */ + } + else { /* not permanent => DRV */ + + /* Add bit to InexactDrvFilter. */ + pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[HashBit / 8] |= + 1 << (HashBit % 8); + + /* Copy bit to InexactFilter. */ + for (i = 0; i < 8; i++) { + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] |= + pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[i]; + } +#ifdef DEBUG + SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, + ("GMAC InexactDrvFilter: %02X %02X %02X %02X %02X %02X %02X %02X\n", + pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[0], + pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[1], + pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[2], + pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[3], + pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[4], + pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[5], + pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[6], + pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[7])) +#endif /* DEBUG */ + } + +#else /* SK_ADDR_CHEAT */ + + /* Set all bits in InexactFilter. */ + for (i = 0; i < 8; i++) { + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] = 0xFF; + } +#endif /* SK_ADDR_CHEAT */ + + return (SK_MC_FILTERING_INEXACT); + +} /* SkAddrGmacMcAdd */ + +#endif /* !SK_SLIM */ + +/****************************************************************************** + * + * SkAddrMcUpdate - update the HW MC address table and set the MAC address + * + * Description: + * This routine enables reception of the addresses contained in a local + * table for a given port. + * It also programs the port's current physical MAC address. + * + * It calls either SkAddrXmacMcUpdate or SkAddrGmacMcUpdate, according + * to the adapter in use. The real work is done there. + * + * Notes: + * The return code is only valid for SK_PROM_MODE_NONE. + * + * Context: + * runtime, pageable + * may be called after SK_INIT_IO + * + * Returns: + * SK_MC_FILTERING_EXACT + * SK_MC_FILTERING_INEXACT + * SK_ADDR_ILLEGAL_PORT + */ +int SkAddrMcUpdate( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* I/O context */ +SK_U32 PortNumber) /* Port Number */ +{ + int ReturnCode = 0; +#if (!defined(SK_SLIM) || defined(DEBUG)) + if (PortNumber >= (SK_U32) pAC->GIni.GIMacsFound) { + return (SK_ADDR_ILLEGAL_PORT); + } +#endif /* !SK_SLIM || DEBUG */ + +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + ReturnCode = SkAddrXmacMcUpdate(pAC, IoC, PortNumber); + } +#endif /* GENESIS */ +#ifdef YUKON + if (!pAC->GIni.GIGenesis) { + ReturnCode = SkAddrGmacMcUpdate(pAC, IoC, PortNumber); + } +#endif /* YUKON */ + return (ReturnCode); + +} /* SkAddrMcUpdate */ + + +#ifdef GENESIS + +/****************************************************************************** + * + * SkAddrXmacMcUpdate - update the HW MC address table and set the MAC address + * + * Description: + * This routine enables reception of the addresses contained in a local + * table for a given port. + * It also programs the port's current physical MAC address. + * + * Notes: + * The return code is only valid for SK_PROM_MODE_NONE. + * + * Context: + * runtime, pageable + * may be called after SK_INIT_IO + * + * Returns: + * SK_MC_FILTERING_EXACT + * SK_MC_FILTERING_INEXACT + * SK_ADDR_ILLEGAL_PORT + */ +static int SkAddrXmacMcUpdate( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* I/O context */ +SK_U32 PortNumber) /* Port Number */ +{ + SK_U32 i; + SK_U8 Inexact; + SK_U16 *OutAddr; + SK_ADDR_PORT *pAPort; + + SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, + ("SkAddrXmacMcUpdate on Port %u.\n", PortNumber)) + + pAPort = &pAC->Addr.Port[PortNumber]; + +#ifdef DEBUG + SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, + ("Next0 on Port %d: %d\n", PortNumber, Next0[PortNumber])) +#endif /* DEBUG */ + + /* Start with 0 to also program the logical MAC address. */ + for (i = 0; i < pAPort->NextExactMatchRlmt; i++) { + /* Set exact match address i on XMAC */ + OutAddr = (SK_U16 *) &pAPort->Exact[i].a[0]; + XM_OUTADDR(IoC, PortNumber, XM_EXM(i), OutAddr); + } + + /* Clear other permanent exact match addresses on XMAC */ + if (pAPort->NextExactMatchRlmt <= SK_ADDR_LAST_MATCH_RLMT) { + + SkXmClrExactAddr(pAC, IoC, PortNumber, pAPort->NextExactMatchRlmt, + SK_ADDR_LAST_MATCH_RLMT); + } + + for (i = pAPort->FirstExactMatchDrv; i < pAPort->NextExactMatchDrv; i++) { + OutAddr = (SK_U16 *) &pAPort->Exact[i].a[0]; + XM_OUTADDR(IoC, PortNumber, XM_EXM(i), OutAddr); + } + + /* Clear other non-permanent exact match addresses on XMAC */ + if (pAPort->NextExactMatchDrv <= SK_ADDR_LAST_MATCH_DRV) { + + SkXmClrExactAddr(pAC, IoC, PortNumber, pAPort->NextExactMatchDrv, + SK_ADDR_LAST_MATCH_DRV); + } + + for (Inexact = 0, i = 0; i < 8; i++) { + Inexact |= pAPort->InexactFilter.Bytes[i]; + } + + if (pAPort->PromMode & SK_PROM_MODE_ALL_MC) { + + /* Set all bits in 64-bit hash register. */ + XM_OUTHASH(IoC, PortNumber, XM_HSM, &OnesHash); + + /* Enable Hashing */ + SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE); + } + else if (Inexact != 0) { + + /* Set 64-bit hash register to InexactFilter. */ + XM_OUTHASH(IoC, PortNumber, XM_HSM, &pAPort->InexactFilter.Bytes[0]); + + /* Enable Hashing */ + SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE); + } + else { + /* Disable Hashing */ + SkMacHashing(pAC, IoC, (int) PortNumber, SK_FALSE); + } + + if (pAPort->PromMode != SK_PROM_MODE_NONE) { + (void) SkAddrXmacPromiscuousChange(pAC, IoC, PortNumber, pAPort->PromMode); + } + + /* Set port's current physical MAC address. */ + OutAddr = (SK_U16 *) &pAPort->CurrentMacAddress.a[0]; + + XM_OUTADDR(IoC, PortNumber, XM_SA, OutAddr); + +#ifdef xDEBUG + for (i = 0; i < pAPort->NextExactMatchRlmt; i++) { + SK_U8 InAddr8[6]; + SK_U16 *InAddr; + + /* Get exact match address i from port PortNumber. */ + InAddr = (SK_U16 *) &InAddr8[0]; + + XM_INADDR(IoC, PortNumber, XM_EXM(i), InAddr); + + SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, + ("SkAddrXmacMcUpdate: MC address %d on Port %u: ", + "%02x %02x %02x %02x %02x %02x -- %02x %02x %02x %02x %02x %02x\n", + i, + PortNumber, + InAddr8[0], + InAddr8[1], + InAddr8[2], + InAddr8[3], + InAddr8[4], + InAddr8[5], + pAPort->Exact[i].a[0], + pAPort->Exact[i].a[1], + pAPort->Exact[i].a[2], + pAPort->Exact[i].a[3], + pAPort->Exact[i].a[4], + pAPort->Exact[i].a[5])) + } +#endif /* DEBUG */ + + /* Determine return value. */ + if (Inexact == 0 && pAPort->PromMode == 0) { + return (SK_MC_FILTERING_EXACT); + } + else { + return (SK_MC_FILTERING_INEXACT); + } + +} /* SkAddrXmacMcUpdate */ + +#endif /* GENESIS */ + +#ifdef YUKON + +/****************************************************************************** + * + * SkAddrGmacMcUpdate - update the HW MC address table and set the MAC address + * + * Description: + * This routine enables reception of the addresses contained in a local + * table for a given port. + * It also programs the port's current physical MAC address. + * + * Notes: + * The return code is only valid for SK_PROM_MODE_NONE. + * + * Context: + * runtime, pageable + * may be called after SK_INIT_IO + * + * Returns: + * SK_MC_FILTERING_EXACT + * SK_MC_FILTERING_INEXACT + * SK_ADDR_ILLEGAL_PORT + */ +static int SkAddrGmacMcUpdate( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* I/O context */ +SK_U32 PortNumber) /* Port Number */ +{ +#ifndef SK_SLIM + SK_U32 i; + SK_U8 Inexact; +#endif /* not SK_SLIM */ + SK_U16 *OutAddr; + SK_ADDR_PORT *pAPort; + + SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, + ("SkAddrGmacMcUpdate on Port %u.\n", PortNumber)) + + pAPort = &pAC->Addr.Port[PortNumber]; + +#ifdef DEBUG + SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, + ("Next0 on Port %d: %d\n", PortNumber, Next0[PortNumber])) +#endif /* DEBUG */ + +#ifndef SK_SLIM + for (Inexact = 0, i = 0; i < 8; i++) { + Inexact |= pAPort->InexactFilter.Bytes[i]; + } + + /* Set 64-bit hash register to InexactFilter. */ + GM_OUTHASH(IoC, PortNumber, GM_MC_ADDR_H1, + &pAPort->InexactFilter.Bytes[0]); + + if (pAPort->PromMode & SK_PROM_MODE_ALL_MC) { + + /* Set all bits in 64-bit hash register. */ + GM_OUTHASH(IoC, PortNumber, GM_MC_ADDR_H1, &OnesHash); + + /* Enable Hashing */ + SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE); + } + else { + /* Enable Hashing. */ + SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE); + } + + if (pAPort->PromMode != SK_PROM_MODE_NONE) { + (void) SkAddrGmacPromiscuousChange(pAC, IoC, PortNumber, pAPort->PromMode); + } +#else /* SK_SLIM */ + + /* Set all bits in 64-bit hash register. */ + GM_OUTHASH(IoC, PortNumber, GM_MC_ADDR_H1, &OnesHash); + + /* Enable Hashing */ + SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE); + + (void) SkAddrGmacPromiscuousChange(pAC, IoC, PortNumber, pAPort->PromMode); + +#endif /* SK_SLIM */ + + /* Set port's current physical MAC address. */ + OutAddr = (SK_U16 *) &pAPort->CurrentMacAddress.a[0]; + GM_OUTADDR(IoC, PortNumber, GM_SRC_ADDR_1L, OutAddr); + + /* Set port's current logical MAC address. */ + OutAddr = (SK_U16 *) &pAPort->Exact[0].a[0]; + GM_OUTADDR(IoC, PortNumber, GM_SRC_ADDR_2L, OutAddr); + +#ifdef DEBUG + SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, + ("SkAddrGmacMcUpdate: Permanent Physical MAC Address: %02X %02X %02X %02X %02X %02X\n", + pAPort->Exact[0].a[0], + pAPort->Exact[0].a[1], + pAPort->Exact[0].a[2], + pAPort->Exact[0].a[3], + pAPort->Exact[0].a[4], + pAPort->Exact[0].a[5])) + + SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, + ("SkAddrGmacMcUpdate: Physical MAC Address: %02X %02X %02X %02X %02X %02X\n", + pAPort->CurrentMacAddress.a[0], + pAPort->CurrentMacAddress.a[1], + pAPort->CurrentMacAddress.a[2], + pAPort->CurrentMacAddress.a[3], + pAPort->CurrentMacAddress.a[4], + pAPort->CurrentMacAddress.a[5])) +#endif /* DEBUG */ + +#ifndef SK_SLIM + /* Determine return value. */ + if (Inexact == 0 && pAPort->PromMode == 0) { + return (SK_MC_FILTERING_EXACT); + } + else { + return (SK_MC_FILTERING_INEXACT); + } +#else /* SK_SLIM */ + return (SK_MC_FILTERING_INEXACT); +#endif /* SK_SLIM */ + +} /* SkAddrGmacMcUpdate */ + +#endif /* YUKON */ + +#ifndef SK_NO_MAO + +/****************************************************************************** + * + * SkAddrOverride - override a port's MAC address + * + * Description: + * This routine overrides the MAC address of one port. + * + * Context: + * runtime, pageable + * may be called after SK_INIT_IO + * + * Returns: + * SK_ADDR_SUCCESS if successful. + * SK_ADDR_DUPLICATE_ADDRESS if duplicate MAC address. + * SK_ADDR_MULTICAST_ADDRESS if multicast or broadcast address. + * SK_ADDR_TOO_EARLY if SK_INIT_IO was not executed before. + */ +int SkAddrOverride( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* I/O context */ +SK_U32 PortNumber, /* Port Number */ +SK_MAC_ADDR SK_FAR *pNewAddr, /* new MAC address */ +int Flags) /* logical/physical MAC address */ +{ +#ifndef SK_NO_RLMT + SK_EVPARA Para; +#endif /* !SK_NO_RLMT */ + SK_U32 NetNumber; + SK_U32 i; + SK_U16 SK_FAR *OutAddr; + +#ifndef SK_NO_RLMT + NetNumber = pAC->Rlmt.Port[PortNumber].Net->NetNumber; +#else + NetNumber = 0; +#endif /* SK_NO_RLMT */ +#if (!defined(SK_SLIM) || defined(DEBUG)) + if (PortNumber >= (SK_U32) pAC->GIni.GIMacsFound) { + return (SK_ADDR_ILLEGAL_PORT); + } +#endif /* !SK_SLIM || DEBUG */ + if (pNewAddr != NULL && (pNewAddr->a[0] & SK_MC_BIT) != 0) { + return (SK_ADDR_MULTICAST_ADDRESS); + } + + if (!pAC->Addr.Net[NetNumber].CurrentMacAddressSet) { + return (SK_ADDR_TOO_EARLY); + } + + if (Flags & SK_ADDR_SET_LOGICAL) { /* Activate logical MAC address. */ + /* Parameter *pNewAddr is ignored. */ + for (i = 0; i < (SK_U32) pAC->GIni.GIMacsFound; i++) { + if (!pAC->Addr.Port[i].CurrentMacAddressSet) { + return (SK_ADDR_TOO_EARLY); + } + } +#ifndef SK_NO_RLMT + /* Set PortNumber to number of net's active port. */ + PortNumber = pAC->Rlmt.Net[NetNumber]. + Port[pAC->Addr.Net[NetNumber].ActivePort]->PortNumber; +#endif /* !SK_NO_RLMT */ + pAC->Addr.Port[PortNumber].Exact[0] = + pAC->Addr.Net[NetNumber].CurrentMacAddress; + + /* Write address to first exact match entry of active port. */ + (void) SkAddrMcUpdate(pAC, IoC, PortNumber); + } + else if (Flags & SK_ADDR_CLEAR_LOGICAL) { + /* Deactivate logical MAC address. */ + /* Parameter *pNewAddr is ignored. */ + for (i = 0; i < (SK_U32) pAC->GIni.GIMacsFound; i++) { + if (!pAC->Addr.Port[i].CurrentMacAddressSet) { + return (SK_ADDR_TOO_EARLY); + } + } +#ifndef SK_NO_RLMT + /* Set PortNumber to number of net's active port. */ + PortNumber = pAC->Rlmt.Net[NetNumber]. + Port[pAC->Addr.Net[NetNumber].ActivePort]->PortNumber; +#endif /* !SK_NO_RLMT */ + for (i = 0; i < SK_MAC_ADDR_LEN; i++ ) { + pAC->Addr.Port[PortNumber].Exact[0].a[i] = 0; + } + + /* Write address to first exact match entry of active port. */ + (void) SkAddrMcUpdate(pAC, IoC, PortNumber); + } + else if (Flags & SK_ADDR_PHYSICAL_ADDRESS) { /* Physical MAC address. */ + if (SK_ADDR_EQUAL(pNewAddr->a, + pAC->Addr.Net[NetNumber].CurrentMacAddress.a)) { + return (SK_ADDR_DUPLICATE_ADDRESS); + } + + for (i = 0; i < (SK_U32) pAC->GIni.GIMacsFound; i++) { + if (!pAC->Addr.Port[i].CurrentMacAddressSet) { + return (SK_ADDR_TOO_EARLY); + } + + if (SK_ADDR_EQUAL(pNewAddr->a, + pAC->Addr.Port[i].CurrentMacAddress.a)) { + if (i == PortNumber) { + return (SK_ADDR_SUCCESS); + } + else { + return (SK_ADDR_DUPLICATE_ADDRESS); + } + } + } + + pAC->Addr.Port[PortNumber].PreviousMacAddress = + pAC->Addr.Port[PortNumber].CurrentMacAddress; + pAC->Addr.Port[PortNumber].CurrentMacAddress = *pNewAddr; + + /* Change port's physical MAC address. */ + OutAddr = (SK_U16 SK_FAR *) pNewAddr; +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + XM_OUTADDR(IoC, PortNumber, XM_SA, OutAddr); + } +#endif /* GENESIS */ +#ifdef YUKON + if (!pAC->GIni.GIGenesis) { + GM_OUTADDR(IoC, PortNumber, GM_SRC_ADDR_1L, OutAddr); + } +#endif /* YUKON */ + +#ifndef SK_NO_RLMT + /* Report address change to RLMT. */ + Para.Para32[0] = PortNumber; + Para.Para32[0] = -1; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_PORT_ADDR, Para); +#endif /* !SK_NO_RLMT */ + } + else { /* Logical MAC address. */ + if (SK_ADDR_EQUAL(pNewAddr->a, + pAC->Addr.Net[NetNumber].CurrentMacAddress.a)) { + return (SK_ADDR_SUCCESS); + } + + for (i = 0; i < (SK_U32) pAC->GIni.GIMacsFound; i++) { + if (!pAC->Addr.Port[i].CurrentMacAddressSet) { + return (SK_ADDR_TOO_EARLY); + } + + if (SK_ADDR_EQUAL(pNewAddr->a, + pAC->Addr.Port[i].CurrentMacAddress.a)) { + return (SK_ADDR_DUPLICATE_ADDRESS); + } + } + + /* + * In case that the physical and the logical MAC addresses are equal + * we must also change the physical MAC address here. + * In this case we have an adapter which initially was programmed with + * two identical MAC addresses. + */ + if (SK_ADDR_EQUAL(pAC->Addr.Port[PortNumber].CurrentMacAddress.a, + pAC->Addr.Port[PortNumber].Exact[0].a)) { + + pAC->Addr.Port[PortNumber].PreviousMacAddress = + pAC->Addr.Port[PortNumber].CurrentMacAddress; + pAC->Addr.Port[PortNumber].CurrentMacAddress = *pNewAddr; + +#ifndef SK_NO_RLMT + /* Report address change to RLMT. */ + Para.Para32[0] = PortNumber; + Para.Para32[0] = -1; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_PORT_ADDR, Para); +#endif /* !SK_NO_RLMT */ + } + +#ifndef SK_NO_RLMT + /* Set PortNumber to number of net's active port. */ + PortNumber = pAC->Rlmt.Net[NetNumber]. + Port[pAC->Addr.Net[NetNumber].ActivePort]->PortNumber; +#endif /* !SK_NO_RLMT */ + pAC->Addr.Net[NetNumber].CurrentMacAddress = *pNewAddr; + pAC->Addr.Port[PortNumber].Exact[0] = *pNewAddr; +#ifdef DEBUG + SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, + ("SkAddrOverride: Permanent MAC Address: %02X %02X %02X %02X %02X %02X\n", + pAC->Addr.Net[NetNumber].PermanentMacAddress.a[0], + pAC->Addr.Net[NetNumber].PermanentMacAddress.a[1], + pAC->Addr.Net[NetNumber].PermanentMacAddress.a[2], + pAC->Addr.Net[NetNumber].PermanentMacAddress.a[3], + pAC->Addr.Net[NetNumber].PermanentMacAddress.a[4], + pAC->Addr.Net[NetNumber].PermanentMacAddress.a[5])) + + SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, + ("SkAddrOverride: New logical MAC Address: %02X %02X %02X %02X %02X %02X\n", + pAC->Addr.Net[NetNumber].CurrentMacAddress.a[0], + pAC->Addr.Net[NetNumber].CurrentMacAddress.a[1], + pAC->Addr.Net[NetNumber].CurrentMacAddress.a[2], + pAC->Addr.Net[NetNumber].CurrentMacAddress.a[3], + pAC->Addr.Net[NetNumber].CurrentMacAddress.a[4], + pAC->Addr.Net[NetNumber].CurrentMacAddress.a[5])) +#endif /* DEBUG */ + + /* Write address to first exact match entry of active port. */ + (void) SkAddrMcUpdate(pAC, IoC, PortNumber); + } + + return (SK_ADDR_SUCCESS); + +} /* SkAddrOverride */ + + +#endif /* SK_NO_MAO */ + +/****************************************************************************** + * + * SkAddrPromiscuousChange - set promiscuous mode for given port + * + * Description: + * This routine manages promiscuous mode: + * - none + * - all LLC frames + * - all MC frames + * + * It calls either SkAddrXmacPromiscuousChange or + * SkAddrGmacPromiscuousChange, according to the adapter in use. + * The real work is done there. + * + * Context: + * runtime, pageable + * may be called after SK_INIT_IO + * + * Returns: + * SK_ADDR_SUCCESS + * SK_ADDR_ILLEGAL_PORT + */ +int SkAddrPromiscuousChange( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* I/O context */ +SK_U32 PortNumber, /* port whose promiscuous mode changes */ +int NewPromMode) /* new promiscuous mode */ +{ + int ReturnCode = 0; +#if (!defined(SK_SLIM) || defined(DEBUG)) + if (PortNumber >= (SK_U32) pAC->GIni.GIMacsFound) { + return (SK_ADDR_ILLEGAL_PORT); + } +#endif /* !SK_SLIM || DEBUG */ + +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + ReturnCode = + SkAddrXmacPromiscuousChange(pAC, IoC, PortNumber, NewPromMode); + } +#endif /* GENESIS */ +#ifdef YUKON + if (!pAC->GIni.GIGenesis) { + ReturnCode = + SkAddrGmacPromiscuousChange(pAC, IoC, PortNumber, NewPromMode); + } +#endif /* YUKON */ + + return (ReturnCode); + +} /* SkAddrPromiscuousChange */ + +#ifdef GENESIS + +/****************************************************************************** + * + * SkAddrXmacPromiscuousChange - set promiscuous mode for given port + * + * Description: + * This routine manages promiscuous mode: + * - none + * - all LLC frames + * - all MC frames + * + * Context: + * runtime, pageable + * may be called after SK_INIT_IO + * + * Returns: + * SK_ADDR_SUCCESS + * SK_ADDR_ILLEGAL_PORT + */ +static int SkAddrXmacPromiscuousChange( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* I/O context */ +SK_U32 PortNumber, /* port whose promiscuous mode changes */ +int NewPromMode) /* new promiscuous mode */ +{ + int i; + SK_BOOL InexactModeBit; + SK_U8 Inexact; + SK_U8 HwInexact; + SK_FILTER64 HwInexactFilter; + SK_U16 LoMode; /* Lower 16 bits of XMAC Mode Register. */ + int CurPromMode = SK_PROM_MODE_NONE; + + /* Read CurPromMode from Hardware. */ + XM_IN16(IoC, PortNumber, XM_MODE, &LoMode); + + if ((LoMode & XM_MD_ENA_PROM) != 0) { + /* Promiscuous mode! */ + CurPromMode |= SK_PROM_MODE_LLC; + } + + for (Inexact = 0xFF, i = 0; i < 8; i++) { + Inexact &= pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i]; + } + if (Inexact == 0xFF) { + CurPromMode |= (pAC->Addr.Port[PortNumber].PromMode & SK_PROM_MODE_ALL_MC); + } + else { + /* Get InexactModeBit (bit XM_MD_ENA_HASH in mode register) */ + XM_IN16(IoC, PortNumber, XM_MODE, &LoMode); + + InexactModeBit = (LoMode & XM_MD_ENA_HASH) != 0; + + /* Read 64-bit hash register from XMAC */ + XM_INHASH(IoC, PortNumber, XM_HSM, &HwInexactFilter.Bytes[0]); + + for (HwInexact = 0xFF, i = 0; i < 8; i++) { + HwInexact &= HwInexactFilter.Bytes[i]; + } + + if (InexactModeBit && (HwInexact == 0xFF)) { + CurPromMode |= SK_PROM_MODE_ALL_MC; + } + } + + pAC->Addr.Port[PortNumber].PromMode = NewPromMode; + + if (NewPromMode == CurPromMode) { + return (SK_ADDR_SUCCESS); + } + + if ((NewPromMode & SK_PROM_MODE_ALL_MC) && + !(CurPromMode & SK_PROM_MODE_ALL_MC)) { /* All MC. */ + + /* Set all bits in 64-bit hash register. */ + XM_OUTHASH(IoC, PortNumber, XM_HSM, &OnesHash); + + /* Enable Hashing */ + SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE); + } + else if ((CurPromMode & SK_PROM_MODE_ALL_MC) && + !(NewPromMode & SK_PROM_MODE_ALL_MC)) { /* Norm MC. */ + for (Inexact = 0, i = 0; i < 8; i++) { + Inexact |= pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i]; + } + if (Inexact == 0) { + /* Disable Hashing */ + SkMacHashing(pAC, IoC, (int) PortNumber, SK_FALSE); + } + else { + /* Set 64-bit hash register to InexactFilter. */ + XM_OUTHASH(IoC, PortNumber, XM_HSM, + &pAC->Addr.Port[PortNumber].InexactFilter.Bytes[0]); + + /* Enable Hashing */ + SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE); + } + } + + if ((NewPromMode & SK_PROM_MODE_LLC) && + !(CurPromMode & SK_PROM_MODE_LLC)) { /* Prom. LLC */ + /* Set the MAC in Promiscuous Mode */ + SkMacPromiscMode(pAC, IoC, (int) PortNumber, SK_TRUE); + } + else if ((CurPromMode & SK_PROM_MODE_LLC) && + !(NewPromMode & SK_PROM_MODE_LLC)) { /* Norm. LLC. */ + /* Clear Promiscuous Mode */ + SkMacPromiscMode(pAC, IoC, (int) PortNumber, SK_FALSE); + } + + return (SK_ADDR_SUCCESS); + +} /* SkAddrXmacPromiscuousChange */ + +#endif /* GENESIS */ + +#ifdef YUKON + +/****************************************************************************** + * + * SkAddrGmacPromiscuousChange - set promiscuous mode for given port + * + * Description: + * This routine manages promiscuous mode: + * - none + * - all LLC frames + * - all MC frames + * + * Context: + * runtime, pageable + * may be called after SK_INIT_IO + * + * Returns: + * SK_ADDR_SUCCESS + * SK_ADDR_ILLEGAL_PORT + */ +static int SkAddrGmacPromiscuousChange( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* I/O context */ +SK_U32 PortNumber, /* port whose promiscuous mode changes */ +int NewPromMode) /* new promiscuous mode */ +{ + SK_U16 ReceiveControl; /* GMAC Receive Control Register */ + int CurPromMode = SK_PROM_MODE_NONE; + + /* Read CurPromMode from Hardware. */ + GM_IN16(IoC, PortNumber, GM_RX_CTRL, &ReceiveControl); + + if ((ReceiveControl & (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA)) == 0) { + /* Promiscuous mode! */ + CurPromMode |= SK_PROM_MODE_LLC; + } + + if ((ReceiveControl & GM_RXCR_MCF_ENA) == 0) { + /* All Multicast mode! */ + CurPromMode |= (pAC->Addr.Port[PortNumber].PromMode & SK_PROM_MODE_ALL_MC); + } + + pAC->Addr.Port[PortNumber].PromMode = NewPromMode; + + if (NewPromMode == CurPromMode) { + return (SK_ADDR_SUCCESS); + } + + if ((NewPromMode & SK_PROM_MODE_ALL_MC) && + !(CurPromMode & SK_PROM_MODE_ALL_MC)) { /* All MC */ + + /* Set all bits in 64-bit hash register. */ + GM_OUTHASH(IoC, PortNumber, GM_MC_ADDR_H1, &OnesHash); + + /* Enable Hashing */ + SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE); + } + + if ((CurPromMode & SK_PROM_MODE_ALL_MC) && + !(NewPromMode & SK_PROM_MODE_ALL_MC)) { /* Norm. MC */ + + /* Set 64-bit hash register to InexactFilter. */ + GM_OUTHASH(IoC, PortNumber, GM_MC_ADDR_H1, + &pAC->Addr.Port[PortNumber].InexactFilter.Bytes[0]); + + /* Enable Hashing. */ + SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE); + } + + if ((NewPromMode & SK_PROM_MODE_LLC) && + !(CurPromMode & SK_PROM_MODE_LLC)) { /* Prom. LLC */ + + /* Set the MAC to Promiscuous Mode. */ + SkMacPromiscMode(pAC, IoC, (int) PortNumber, SK_TRUE); + } + else if ((CurPromMode & SK_PROM_MODE_LLC) && + !(NewPromMode & SK_PROM_MODE_LLC)) { /* Norm. LLC */ + + /* Clear Promiscuous Mode. */ + SkMacPromiscMode(pAC, IoC, (int) PortNumber, SK_FALSE); + } + + return (SK_ADDR_SUCCESS); + +} /* SkAddrGmacPromiscuousChange */ + +#endif /* YUKON */ + +#ifndef SK_SLIM + +/****************************************************************************** + * + * SkAddrSwap - swap address info + * + * Description: + * This routine swaps address info of two ports. + * + * Context: + * runtime, pageable + * may be called after SK_INIT_IO + * + * Returns: + * SK_ADDR_SUCCESS + * SK_ADDR_ILLEGAL_PORT + */ +int SkAddrSwap( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* I/O context */ +SK_U32 FromPortNumber, /* Port1 Index */ +SK_U32 ToPortNumber) /* Port2 Index */ +{ + int i; + SK_U8 Byte; + SK_MAC_ADDR MacAddr; + SK_U32 DWord; + + if (FromPortNumber >= (SK_U32) pAC->GIni.GIMacsFound) { + return (SK_ADDR_ILLEGAL_PORT); + } + + if (ToPortNumber >= (SK_U32) pAC->GIni.GIMacsFound) { + return (SK_ADDR_ILLEGAL_PORT); + } + + if (pAC->Rlmt.Port[FromPortNumber].Net != pAC->Rlmt.Port[ToPortNumber].Net) { + return (SK_ADDR_ILLEGAL_PORT); + } + + /* + * Swap: + * - Exact Match Entries (GEnesis and Yukon) + * Yukon uses first entry for the logical MAC + * address (stored in the second GMAC register). + * - FirstExactMatchRlmt (GEnesis only) + * - NextExactMatchRlmt (GEnesis only) + * - FirstExactMatchDrv (GEnesis only) + * - NextExactMatchDrv (GEnesis only) + * - 64-bit filter (InexactFilter) + * - Promiscuous Mode + * of ports. + */ + + for (i = 0; i < SK_ADDR_EXACT_MATCHES; i++) { + MacAddr = pAC->Addr.Port[FromPortNumber].Exact[i]; + pAC->Addr.Port[FromPortNumber].Exact[i] = + pAC->Addr.Port[ToPortNumber].Exact[i]; + pAC->Addr.Port[ToPortNumber].Exact[i] = MacAddr; + } + + for (i = 0; i < 8; i++) { + Byte = pAC->Addr.Port[FromPortNumber].InexactFilter.Bytes[i]; + pAC->Addr.Port[FromPortNumber].InexactFilter.Bytes[i] = + pAC->Addr.Port[ToPortNumber].InexactFilter.Bytes[i]; + pAC->Addr.Port[ToPortNumber].InexactFilter.Bytes[i] = Byte; + } + + i = pAC->Addr.Port[FromPortNumber].PromMode; + pAC->Addr.Port[FromPortNumber].PromMode = pAC->Addr.Port[ToPortNumber].PromMode; + pAC->Addr.Port[ToPortNumber].PromMode = i; + + if (pAC->GIni.GIGenesis) { + DWord = pAC->Addr.Port[FromPortNumber].FirstExactMatchRlmt; + pAC->Addr.Port[FromPortNumber].FirstExactMatchRlmt = + pAC->Addr.Port[ToPortNumber].FirstExactMatchRlmt; + pAC->Addr.Port[ToPortNumber].FirstExactMatchRlmt = DWord; + + DWord = pAC->Addr.Port[FromPortNumber].NextExactMatchRlmt; + pAC->Addr.Port[FromPortNumber].NextExactMatchRlmt = + pAC->Addr.Port[ToPortNumber].NextExactMatchRlmt; + pAC->Addr.Port[ToPortNumber].NextExactMatchRlmt = DWord; + + DWord = pAC->Addr.Port[FromPortNumber].FirstExactMatchDrv; + pAC->Addr.Port[FromPortNumber].FirstExactMatchDrv = + pAC->Addr.Port[ToPortNumber].FirstExactMatchDrv; + pAC->Addr.Port[ToPortNumber].FirstExactMatchDrv = DWord; + + DWord = pAC->Addr.Port[FromPortNumber].NextExactMatchDrv; + pAC->Addr.Port[FromPortNumber].NextExactMatchDrv = + pAC->Addr.Port[ToPortNumber].NextExactMatchDrv; + pAC->Addr.Port[ToPortNumber].NextExactMatchDrv = DWord; + } + + /* CAUTION: Solution works if only ports of one adapter are in use. */ + for (i = 0; (SK_U32) i < pAC->Rlmt.Net[pAC->Rlmt.Port[ToPortNumber]. + Net->NetNumber].NumPorts; i++) { + if (pAC->Rlmt.Net[pAC->Rlmt.Port[ToPortNumber].Net->NetNumber]. + Port[i]->PortNumber == ToPortNumber) { + pAC->Addr.Net[pAC->Rlmt.Port[ToPortNumber].Net->NetNumber]. + ActivePort = i; + /* 20001207 RA: Was "ToPortNumber;". */ + } + } + + (void) SkAddrMcUpdate(pAC, IoC, FromPortNumber); + (void) SkAddrMcUpdate(pAC, IoC, ToPortNumber); + + return (SK_ADDR_SUCCESS); + +} /* SkAddrSwap */ + +#endif /* !SK_SLIM */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + diff --git a/drivers/net/sk98lin/skdim.c b/drivers/net/sk98lin/skdim.c new file mode 100644 index 000000000000..37ce03fb8de3 --- /dev/null +++ b/drivers/net/sk98lin/skdim.c @@ -0,0 +1,742 @@ +/****************************************************************************** + * + * Name: skdim.c + * Project: GEnesis, PCI Gigabit Ethernet Adapter + * Version: $Revision: 1.5 $ + * Date: $Date: 2003/11/28 12:55:40 $ + * Purpose: All functions to maintain interrupt moderation + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +/****************************************************************************** + * + * Description: + * + * This module is intended to manage the dynamic interrupt moderation on both + * GEnesis and Yukon adapters. + * + * Include File Hierarchy: + * + * "skdrv1st.h" + * "skdrv2nd.h" + * + ******************************************************************************/ + +#ifndef lint +static const char SysKonnectFileId[] = + "@(#) $Id: skdim.c,v 1.5 2003/11/28 12:55:40 rroesler Exp $ (C) SysKonnect."; +#endif + +#define __SKADDR_C + +#ifdef __cplusplus +#error C++ is not yet supported. +extern "C" { +#endif + +/******************************************************************************* +** +** Includes +** +*******************************************************************************/ + +#ifndef __INC_SKDRV1ST_H +#include "h/skdrv1st.h" +#endif + +#ifndef __INC_SKDRV2ND_H +#include "h/skdrv2nd.h" +#endif + +#include <linux/kernel_stat.h> + +/******************************************************************************* +** +** Defines +** +*******************************************************************************/ + +/******************************************************************************* +** +** Typedefs +** +*******************************************************************************/ + +/******************************************************************************* +** +** Local function prototypes +** +*******************************************************************************/ + +static unsigned int GetCurrentSystemLoad(SK_AC *pAC); +static SK_U64 GetIsrCalls(SK_AC *pAC); +static SK_BOOL IsIntModEnabled(SK_AC *pAC); +static void SetCurrIntCtr(SK_AC *pAC); +static void EnableIntMod(SK_AC *pAC); +static void DisableIntMod(SK_AC *pAC); +static void ResizeDimTimerDuration(SK_AC *pAC); +static void DisplaySelectedModerationType(SK_AC *pAC); +static void DisplaySelectedModerationMask(SK_AC *pAC); +static void DisplayDescrRatio(SK_AC *pAC); + +/******************************************************************************* +** +** Global variables +** +*******************************************************************************/ + +/******************************************************************************* +** +** Local variables +** +*******************************************************************************/ + +/******************************************************************************* +** +** Global functions +** +*******************************************************************************/ + +/******************************************************************************* +** Function : SkDimModerate +** Description : Called in every ISR to check if moderation is to be applied +** or not for the current number of interrupts +** Programmer : Ralph Roesler +** Last Modified: 22-mar-03 +** Returns : void (!) +** Notes : - +*******************************************************************************/ + +void +SkDimModerate(SK_AC *pAC) { + unsigned int CurrSysLoad = 0; /* expressed in percent */ + unsigned int LoadIncrease = 0; /* expressed in percent */ + SK_U64 ThresholdInts = 0; + SK_U64 IsrCallsPerSec = 0; + +#define M_DIMINFO pAC->DynIrqModInfo + + if (!IsIntModEnabled(pAC)) { + if (M_DIMINFO.IntModTypeSelect == C_INT_MOD_DYNAMIC) { + CurrSysLoad = GetCurrentSystemLoad(pAC); + if (CurrSysLoad > 75) { + /* + ** More than 75% total system load! Enable the moderation + ** to shield the system against too many interrupts. + */ + EnableIntMod(pAC); + } else if (CurrSysLoad > M_DIMINFO.PrevSysLoad) { + LoadIncrease = (CurrSysLoad - M_DIMINFO.PrevSysLoad); + if (LoadIncrease > ((M_DIMINFO.PrevSysLoad * + C_INT_MOD_ENABLE_PERCENTAGE) / 100)) { + if (CurrSysLoad > 10) { + /* + ** More than 50% increase with respect to the + ** previous load of the system. Most likely this + ** is due to our ISR-proc... + */ + EnableIntMod(pAC); + } + } + } else { + /* + ** Neither too much system load at all nor too much increase + ** with respect to the previous system load. Hence, we can leave + ** the ISR-handling like it is without enabling moderation. + */ + } + M_DIMINFO.PrevSysLoad = CurrSysLoad; + } + } else { + if (M_DIMINFO.IntModTypeSelect == C_INT_MOD_DYNAMIC) { + ThresholdInts = ((M_DIMINFO.MaxModIntsPerSec * + C_INT_MOD_DISABLE_PERCENTAGE) / 100); + IsrCallsPerSec = GetIsrCalls(pAC); + if (IsrCallsPerSec <= ThresholdInts) { + /* + ** The number of interrupts within the last second is + ** lower than the disable_percentage of the desried + ** maxrate. Therefore we can disable the moderation. + */ + DisableIntMod(pAC); + M_DIMINFO.MaxModIntsPerSec = + (M_DIMINFO.MaxModIntsPerSecUpperLimit + + M_DIMINFO.MaxModIntsPerSecLowerLimit) / 2; + } else { + /* + ** The number of interrupts per sec is the same as expected. + ** Evalulate the descriptor-ratio. If it has changed, a resize + ** in the moderation timer might be useful + */ + if (M_DIMINFO.AutoSizing) { + ResizeDimTimerDuration(pAC); + } + } + } + } + + /* + ** Some information to the log... + */ + if (M_DIMINFO.DisplayStats) { + DisplaySelectedModerationType(pAC); + DisplaySelectedModerationMask(pAC); + DisplayDescrRatio(pAC); + } + + M_DIMINFO.NbrProcessedDescr = 0; + SetCurrIntCtr(pAC); +} + +/******************************************************************************* +** Function : SkDimStartModerationTimer +** Description : Starts the audit-timer for the dynamic interrupt moderation +** Programmer : Ralph Roesler +** Last Modified: 22-mar-03 +** Returns : void (!) +** Notes : - +*******************************************************************************/ + +void +SkDimStartModerationTimer(SK_AC *pAC) { + SK_EVPARA EventParam; /* Event struct for timer event */ + + SK_MEMSET((char *) &EventParam, 0, sizeof(EventParam)); + EventParam.Para32[0] = SK_DRV_MODERATION_TIMER; + SkTimerStart(pAC, pAC->IoBase, &pAC->DynIrqModInfo.ModTimer, + SK_DRV_MODERATION_TIMER_LENGTH, + SKGE_DRV, SK_DRV_TIMER, EventParam); +} + +/******************************************************************************* +** Function : SkDimEnableModerationIfNeeded +** Description : Either enables or disables moderation +** Programmer : Ralph Roesler +** Last Modified: 22-mar-03 +** Returns : void (!) +** Notes : This function is called when a particular adapter is opened +** There is no Disable function, because when all interrupts +** might be disable, the moderation timer has no meaning at all +******************************************************************************/ + +void +SkDimEnableModerationIfNeeded(SK_AC *pAC) { + + if (M_DIMINFO.IntModTypeSelect == C_INT_MOD_STATIC) { + EnableIntMod(pAC); /* notification print in this function */ + } else if (M_DIMINFO.IntModTypeSelect == C_INT_MOD_DYNAMIC) { + SkDimStartModerationTimer(pAC); + if (M_DIMINFO.DisplayStats) { + printk("Dynamic moderation has been enabled\n"); + } + } else { + if (M_DIMINFO.DisplayStats) { + printk("No moderation has been enabled\n"); + } + } +} + +/******************************************************************************* +** Function : SkDimDisplayModerationSettings +** Description : Displays the current settings regarding interrupt moderation +** Programmer : Ralph Roesler +** Last Modified: 22-mar-03 +** Returns : void (!) +** Notes : - +*******************************************************************************/ + +void +SkDimDisplayModerationSettings(SK_AC *pAC) { + DisplaySelectedModerationType(pAC); + DisplaySelectedModerationMask(pAC); +} + +/******************************************************************************* +** +** Local functions +** +*******************************************************************************/ + +/******************************************************************************* +** Function : GetCurrentSystemLoad +** Description : Retrieves the current system load of the system. This load +** is evaluated for all processors within the system. +** Programmer : Ralph Roesler +** Last Modified: 22-mar-03 +** Returns : unsigned int: load expressed in percentage +** Notes : The possible range being returned is from 0 up to 100. +** Whereas 0 means 'no load at all' and 100 'system fully loaded' +** It is impossible to determine what actually causes the system +** to be in 100%, but maybe that is due to too much interrupts. +*******************************************************************************/ + +static unsigned int +GetCurrentSystemLoad(SK_AC *pAC) { + unsigned long jif = jiffies; + unsigned int UserTime = 0; + unsigned int SystemTime = 0; + unsigned int NiceTime = 0; + unsigned int IdleTime = 0; + unsigned int TotalTime = 0; + unsigned int UsedTime = 0; + unsigned int SystemLoad = 0; + + /* unsigned int NbrCpu = 0; */ + + /* + ** The following lines have been commented out, because + ** from kernel 2.5.44 onwards, the kernel-owned structure + ** + ** struct kernel_stat kstat + ** + ** is not marked as an exported symbol in the file + ** + ** kernel/ksyms.c + ** + ** As a consequence, using this driver as KLM is not possible + ** and any access of the structure kernel_stat via the + ** dedicated macros kstat_cpu(i).cpustat.xxx is to be avoided. + ** + ** The kstat-information might be added again in future + ** versions of the 2.5.xx kernel, but for the time being, + ** number of interrupts will serve as indication how much + ** load we currently have... + ** + ** for (NbrCpu = 0; NbrCpu < num_online_cpus(); NbrCpu++) { + ** UserTime = UserTime + kstat_cpu(NbrCpu).cpustat.user; + ** NiceTime = NiceTime + kstat_cpu(NbrCpu).cpustat.nice; + ** SystemTime = SystemTime + kstat_cpu(NbrCpu).cpustat.system; + ** } + */ + SK_U64 ThresholdInts = 0; + SK_U64 IsrCallsPerSec = 0; + + ThresholdInts = ((M_DIMINFO.MaxModIntsPerSec * + C_INT_MOD_ENABLE_PERCENTAGE) + 100); + IsrCallsPerSec = GetIsrCalls(pAC); + if (IsrCallsPerSec >= ThresholdInts) { + /* + ** We do not know how much the real CPU-load is! + ** Return 80% as a default in order to activate DIM + */ + SystemLoad = 80; + return (SystemLoad); + } + + UsedTime = UserTime + NiceTime + SystemTime; + + IdleTime = jif * num_online_cpus() - UsedTime; + TotalTime = UsedTime + IdleTime; + + SystemLoad = ( 100 * (UsedTime - M_DIMINFO.PrevUsedTime) ) / + (TotalTime - M_DIMINFO.PrevTotalTime); + + if (M_DIMINFO.DisplayStats) { + printk("Current system load is: %u\n", SystemLoad); + } + + M_DIMINFO.PrevTotalTime = TotalTime; + M_DIMINFO.PrevUsedTime = UsedTime; + + return (SystemLoad); +} + +/******************************************************************************* +** Function : GetIsrCalls +** Description : Depending on the selected moderation mask, this function will +** return the number of interrupts handled in the previous time- +** frame. This evaluated number is based on the current number +** of interrupts stored in PNMI-context and the previous stored +** interrupts. +** Programmer : Ralph Roesler +** Last Modified: 23-mar-03 +** Returns : int: the number of interrupts being executed in the last +** timeframe +** Notes : It makes only sense to call this function, when dynamic +** interrupt moderation is applied +*******************************************************************************/ + +static SK_U64 +GetIsrCalls(SK_AC *pAC) { + SK_U64 RxPort0IntDiff = 0; + SK_U64 RxPort1IntDiff = 0; + SK_U64 TxPort0IntDiff = 0; + SK_U64 TxPort1IntDiff = 0; + + if (pAC->DynIrqModInfo.MaskIrqModeration == IRQ_MASK_TX_ONLY) { + if (pAC->GIni.GIMacsFound == 2) { + TxPort1IntDiff = pAC->Pnmi.Port[1].TxIntrCts - + pAC->DynIrqModInfo.PrevPort1TxIntrCts; + } + TxPort0IntDiff = pAC->Pnmi.Port[0].TxIntrCts - + pAC->DynIrqModInfo.PrevPort0TxIntrCts; + } else if (pAC->DynIrqModInfo.MaskIrqModeration == IRQ_MASK_RX_ONLY) { + if (pAC->GIni.GIMacsFound == 2) { + RxPort1IntDiff = pAC->Pnmi.Port[1].RxIntrCts - + pAC->DynIrqModInfo.PrevPort1RxIntrCts; + } + RxPort0IntDiff = pAC->Pnmi.Port[0].RxIntrCts - + pAC->DynIrqModInfo.PrevPort0RxIntrCts; + } else { + if (pAC->GIni.GIMacsFound == 2) { + RxPort1IntDiff = pAC->Pnmi.Port[1].RxIntrCts - + pAC->DynIrqModInfo.PrevPort1RxIntrCts; + TxPort1IntDiff = pAC->Pnmi.Port[1].TxIntrCts - + pAC->DynIrqModInfo.PrevPort1TxIntrCts; + } + RxPort0IntDiff = pAC->Pnmi.Port[0].RxIntrCts - + pAC->DynIrqModInfo.PrevPort0RxIntrCts; + TxPort0IntDiff = pAC->Pnmi.Port[0].TxIntrCts - + pAC->DynIrqModInfo.PrevPort0TxIntrCts; + } + + return (RxPort0IntDiff + RxPort1IntDiff + TxPort0IntDiff + TxPort1IntDiff); +} + +/******************************************************************************* +** Function : GetRxCalls +** Description : This function will return the number of times a receive inter- +** rupt was processed. This is needed to evaluate any resizing +** factor. +** Programmer : Ralph Roesler +** Last Modified: 23-mar-03 +** Returns : SK_U64: the number of RX-ints being processed +** Notes : It makes only sense to call this function, when dynamic +** interrupt moderation is applied +*******************************************************************************/ + +static SK_U64 +GetRxCalls(SK_AC *pAC) { + SK_U64 RxPort0IntDiff = 0; + SK_U64 RxPort1IntDiff = 0; + + if (pAC->GIni.GIMacsFound == 2) { + RxPort1IntDiff = pAC->Pnmi.Port[1].RxIntrCts - + pAC->DynIrqModInfo.PrevPort1RxIntrCts; + } + RxPort0IntDiff = pAC->Pnmi.Port[0].RxIntrCts - + pAC->DynIrqModInfo.PrevPort0RxIntrCts; + + return (RxPort0IntDiff + RxPort1IntDiff); +} + +/******************************************************************************* +** Function : SetCurrIntCtr +** Description : Will store the current number orf occured interrupts in the +** adapter context. This is needed to evaluated the number of +** interrupts within a current timeframe. +** Programmer : Ralph Roesler +** Last Modified: 23-mar-03 +** Returns : void (!) +** Notes : - +*******************************************************************************/ + +static void +SetCurrIntCtr(SK_AC *pAC) { + if (pAC->GIni.GIMacsFound == 2) { + pAC->DynIrqModInfo.PrevPort1RxIntrCts = pAC->Pnmi.Port[1].RxIntrCts; + pAC->DynIrqModInfo.PrevPort1TxIntrCts = pAC->Pnmi.Port[1].TxIntrCts; + } + pAC->DynIrqModInfo.PrevPort0RxIntrCts = pAC->Pnmi.Port[0].RxIntrCts; + pAC->DynIrqModInfo.PrevPort0TxIntrCts = pAC->Pnmi.Port[0].TxIntrCts; +} + +/******************************************************************************* +** Function : IsIntModEnabled() +** Description : Retrieves the current value of the interrupts moderation +** command register. Its content determines whether any +** moderation is running or not. +** Programmer : Ralph Roesler +** Last Modified: 23-mar-03 +** Returns : SK_TRUE : if mod timer running +** SK_FALSE : if no moderation is being performed +** Notes : - +*******************************************************************************/ + +static SK_BOOL +IsIntModEnabled(SK_AC *pAC) { + unsigned long CtrCmd; + + SK_IN32(pAC->IoBase, B2_IRQM_CTRL, &CtrCmd); + if ((CtrCmd & TIM_START) == TIM_START) { + return SK_TRUE; + } else { + return SK_FALSE; + } +} + +/******************************************************************************* +** Function : EnableIntMod() +** Description : Enables the interrupt moderation using the values stored in +** in the pAC->DynIntMod data structure +** Programmer : Ralph Roesler +** Last Modified: 22-mar-03 +** Returns : - +** Notes : - +*******************************************************************************/ + +static void +EnableIntMod(SK_AC *pAC) { + unsigned long ModBase; + + if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) { + ModBase = C_CLK_FREQ_GENESIS / pAC->DynIrqModInfo.MaxModIntsPerSec; + } else { + ModBase = C_CLK_FREQ_YUKON / pAC->DynIrqModInfo.MaxModIntsPerSec; + } + + SK_OUT32(pAC->IoBase, B2_IRQM_INI, ModBase); + SK_OUT32(pAC->IoBase, B2_IRQM_MSK, pAC->DynIrqModInfo.MaskIrqModeration); + SK_OUT32(pAC->IoBase, B2_IRQM_CTRL, TIM_START); + if (M_DIMINFO.DisplayStats) { + printk("Enabled interrupt moderation (%i ints/sec)\n", + M_DIMINFO.MaxModIntsPerSec); + } +} + +/******************************************************************************* +** Function : DisableIntMod() +** Description : Disables the interrupt moderation independent of what inter- +** rupts are running or not +** Programmer : Ralph Roesler +** Last Modified: 23-mar-03 +** Returns : - +** Notes : - +*******************************************************************************/ + +static void +DisableIntMod(SK_AC *pAC) { + + SK_OUT32(pAC->IoBase, B2_IRQM_CTRL, TIM_STOP); + if (M_DIMINFO.DisplayStats) { + printk("Disabled interrupt moderation\n"); + } +} + +/******************************************************************************* +** Function : ResizeDimTimerDuration(); +** Description : Checks the current used descriptor ratio and resizes the +** duration timer (longer/smaller) if possible. +** Programmer : Ralph Roesler +** Last Modified: 23-mar-03 +** Returns : - +** Notes : There are both maximum and minimum timer duration value. +** This function assumes that interrupt moderation is already +** enabled! +*******************************************************************************/ + +static void +ResizeDimTimerDuration(SK_AC *pAC) { + SK_BOOL IncreaseTimerDuration; + int TotalMaxNbrDescr; + int UsedDescrRatio; + int RatioDiffAbs; + int RatioDiffRel; + int NewMaxModIntsPerSec; + int ModAdjValue; + long ModBase; + + /* + ** Check first if we are allowed to perform any modification + */ + if (IsIntModEnabled(pAC)) { + if (M_DIMINFO.IntModTypeSelect != C_INT_MOD_DYNAMIC) { + return; + } else { + if (M_DIMINFO.ModJustEnabled) { + M_DIMINFO.ModJustEnabled = SK_FALSE; + return; + } + } + } + + /* + ** If we got until here, we have to evaluate the amount of the + ** descriptor ratio change... + */ + TotalMaxNbrDescr = pAC->RxDescrPerRing * GetRxCalls(pAC); + UsedDescrRatio = (M_DIMINFO.NbrProcessedDescr * 100) / TotalMaxNbrDescr; + + if (UsedDescrRatio > M_DIMINFO.PrevUsedDescrRatio) { + RatioDiffAbs = (UsedDescrRatio - M_DIMINFO.PrevUsedDescrRatio); + RatioDiffRel = (RatioDiffAbs * 100) / UsedDescrRatio; + M_DIMINFO.PrevUsedDescrRatio = UsedDescrRatio; + IncreaseTimerDuration = SK_FALSE; /* in other words: DECREASE */ + } else if (UsedDescrRatio < M_DIMINFO.PrevUsedDescrRatio) { + RatioDiffAbs = (M_DIMINFO.PrevUsedDescrRatio - UsedDescrRatio); + RatioDiffRel = (RatioDiffAbs * 100) / M_DIMINFO.PrevUsedDescrRatio; + M_DIMINFO.PrevUsedDescrRatio = UsedDescrRatio; + IncreaseTimerDuration = SK_TRUE; /* in other words: INCREASE */ + } else { + RatioDiffAbs = (M_DIMINFO.PrevUsedDescrRatio - UsedDescrRatio); + RatioDiffRel = (RatioDiffAbs * 100) / M_DIMINFO.PrevUsedDescrRatio; + M_DIMINFO.PrevUsedDescrRatio = UsedDescrRatio; + IncreaseTimerDuration = SK_TRUE; /* in other words: INCREASE */ + } + + /* + ** Now we can determine the change in percent + */ + if ((RatioDiffRel >= 0) && (RatioDiffRel <= 5) ) { + ModAdjValue = 1; /* 1% change - maybe some other value in future */ + } else if ((RatioDiffRel > 5) && (RatioDiffRel <= 10) ) { + ModAdjValue = 1; /* 1% change - maybe some other value in future */ + } else if ((RatioDiffRel > 10) && (RatioDiffRel <= 15) ) { + ModAdjValue = 1; /* 1% change - maybe some other value in future */ + } else { + ModAdjValue = 1; /* 1% change - maybe some other value in future */ + } + + if (IncreaseTimerDuration) { + NewMaxModIntsPerSec = M_DIMINFO.MaxModIntsPerSec + + (M_DIMINFO.MaxModIntsPerSec * ModAdjValue) / 100; + } else { + NewMaxModIntsPerSec = M_DIMINFO.MaxModIntsPerSec - + (M_DIMINFO.MaxModIntsPerSec * ModAdjValue) / 100; + } + + /* + ** Check if we exceed boundaries... + */ + if ( (NewMaxModIntsPerSec > M_DIMINFO.MaxModIntsPerSecUpperLimit) || + (NewMaxModIntsPerSec < M_DIMINFO.MaxModIntsPerSecLowerLimit)) { + if (M_DIMINFO.DisplayStats) { + printk("Cannot change ModTim from %i to %i ints/sec\n", + M_DIMINFO.MaxModIntsPerSec, NewMaxModIntsPerSec); + } + return; + } else { + if (M_DIMINFO.DisplayStats) { + printk("Resized ModTim from %i to %i ints/sec\n", + M_DIMINFO.MaxModIntsPerSec, NewMaxModIntsPerSec); + } + } + + M_DIMINFO.MaxModIntsPerSec = NewMaxModIntsPerSec; + + if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) { + ModBase = C_CLK_FREQ_GENESIS / pAC->DynIrqModInfo.MaxModIntsPerSec; + } else { + ModBase = C_CLK_FREQ_YUKON / pAC->DynIrqModInfo.MaxModIntsPerSec; + } + + /* + ** We do not need to touch any other registers + */ + SK_OUT32(pAC->IoBase, B2_IRQM_INI, ModBase); +} + +/******************************************************************************* +** Function : DisplaySelectedModerationType() +** Description : Displays what type of moderation we have +** Programmer : Ralph Roesler +** Last Modified: 23-mar-03 +** Returns : void! +** Notes : - +*******************************************************************************/ + +static void +DisplaySelectedModerationType(SK_AC *pAC) { + + if (pAC->DynIrqModInfo.DisplayStats) { + if (pAC->DynIrqModInfo.IntModTypeSelect == C_INT_MOD_STATIC) { + printk("Static int moderation runs with %i INTS/sec\n", + pAC->DynIrqModInfo.MaxModIntsPerSec); + } else if (pAC->DynIrqModInfo.IntModTypeSelect == C_INT_MOD_DYNAMIC) { + if (IsIntModEnabled(pAC)) { + printk("Dynamic int moderation runs with %i INTS/sec\n", + pAC->DynIrqModInfo.MaxModIntsPerSec); + } else { + printk("Dynamic int moderation currently not applied\n"); + } + } else { + printk("No interrupt moderation selected!\n"); + } + } +} + +/******************************************************************************* +** Function : DisplaySelectedModerationMask() +** Description : Displays what interrupts are moderated +** Programmer : Ralph Roesler +** Last Modified: 23-mar-03 +** Returns : void! +** Notes : - +*******************************************************************************/ + +static void +DisplaySelectedModerationMask(SK_AC *pAC) { + + if (pAC->DynIrqModInfo.DisplayStats) { + if (pAC->DynIrqModInfo.IntModTypeSelect != C_INT_MOD_NONE) { + switch (pAC->DynIrqModInfo.MaskIrqModeration) { + case IRQ_MASK_TX_ONLY: + printk("Only Tx-interrupts are moderated\n"); + break; + case IRQ_MASK_RX_ONLY: + printk("Only Rx-interrupts are moderated\n"); + break; + case IRQ_MASK_SP_ONLY: + printk("Only special-interrupts are moderated\n"); + break; + case IRQ_MASK_TX_RX: + printk("Tx- and Rx-interrupts are moderated\n"); + break; + case IRQ_MASK_SP_RX: + printk("Special- and Rx-interrupts are moderated\n"); + break; + case IRQ_MASK_SP_TX: + printk("Special- and Tx-interrupts are moderated\n"); + break; + case IRQ_MASK_RX_TX_SP: + printk("All Rx-, Tx and special-interrupts are moderated\n"); + break; + default: + printk("Don't know what is moderated\n"); + break; + } + } else { + printk("No specific interrupts masked for moderation\n"); + } + } +} + +/******************************************************************************* +** Function : DisplayDescrRatio +** Description : Like the name states... +** Programmer : Ralph Roesler +** Last Modified: 23-mar-03 +** Returns : void! +** Notes : - +*******************************************************************************/ + +static void +DisplayDescrRatio(SK_AC *pAC) { + int TotalMaxNbrDescr = 0; + + if (pAC->DynIrqModInfo.DisplayStats) { + TotalMaxNbrDescr = pAC->RxDescrPerRing * GetRxCalls(pAC); + printk("Ratio descriptors: %i/%i\n", + M_DIMINFO.NbrProcessedDescr, TotalMaxNbrDescr); + } +} + +/******************************************************************************* +** +** End of file +** +*******************************************************************************/ diff --git a/drivers/net/sk98lin/skethtool.c b/drivers/net/sk98lin/skethtool.c new file mode 100644 index 000000000000..5a6da8950faa --- /dev/null +++ b/drivers/net/sk98lin/skethtool.c @@ -0,0 +1,627 @@ +/****************************************************************************** + * + * Name: skethtool.c + * Project: GEnesis, PCI Gigabit Ethernet Adapter + * Version: $Revision: 1.7 $ + * Date: $Date: 2004/09/29 13:32:07 $ + * Purpose: All functions regarding ethtool handling + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2004 Marvell. + * + * Driver for Marvell Yukon/2 chipset and SysKonnect Gigabit Ethernet + * Server Adapters. + * + * Author: Ralph Roesler (rroesler@syskonnect.de) + * Mirko Lindner (mlindner@syskonnect.de) + * + * Address all question to: linux@syskonnect.de + * + * The technical manual for the adapters is available from SysKonnect's + * web pages: www.syskonnect.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + *****************************************************************************/ + +#include "h/skdrv1st.h" +#include "h/skdrv2nd.h" +#include "h/skversion.h" + +#include <linux/ethtool.h> +#include <linux/timer.h> +#include <linux/delay.h> + +/****************************************************************************** + * + * Defines + * + *****************************************************************************/ + +#define SUPP_COPPER_ALL (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ + SUPPORTED_1000baseT_Half| SUPPORTED_1000baseT_Full| \ + SUPPORTED_TP) + +#define ADV_COPPER_ALL (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \ + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \ + ADVERTISED_1000baseT_Half| ADVERTISED_1000baseT_Full| \ + ADVERTISED_TP) + +#define SUPP_FIBRE_ALL (SUPPORTED_1000baseT_Full | \ + SUPPORTED_FIBRE | \ + SUPPORTED_Autoneg) + +#define ADV_FIBRE_ALL (ADVERTISED_1000baseT_Full | \ + ADVERTISED_FIBRE | \ + ADVERTISED_Autoneg) + + +/****************************************************************************** + * + * Local Functions + * + *****************************************************************************/ + +/***************************************************************************** + * + * getSettings - retrieves the current settings of the selected adapter + * + * Description: + * The current configuration of the selected adapter is returned. + * This configuration involves a)speed, b)duplex and c)autoneg plus + * a number of other variables. + * + * Returns: always 0 + * + */ +static int getSettings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + const DEV_NET *pNet = netdev_priv(dev); + int port = pNet->PortNr; + const SK_AC *pAC = pNet->pAC; + const SK_GEPORT *pPort = &pAC->GIni.GP[port]; + + static int DuplexAutoNegConfMap[9][3]= { + { -1 , -1 , -1 }, + { 0 , -1 , -1 }, + { SK_LMODE_HALF , DUPLEX_HALF, AUTONEG_DISABLE }, + { SK_LMODE_FULL , DUPLEX_FULL, AUTONEG_DISABLE }, + { SK_LMODE_AUTOHALF , DUPLEX_HALF, AUTONEG_ENABLE }, + { SK_LMODE_AUTOFULL , DUPLEX_FULL, AUTONEG_ENABLE }, + { SK_LMODE_AUTOBOTH , DUPLEX_FULL, AUTONEG_ENABLE }, + { SK_LMODE_AUTOSENSE , -1 , -1 }, + { SK_LMODE_INDETERMINATED, -1 , -1 } + }; + static int SpeedConfMap[6][2] = { + { 0 , -1 }, + { SK_LSPEED_AUTO , -1 }, + { SK_LSPEED_10MBPS , SPEED_10 }, + { SK_LSPEED_100MBPS , SPEED_100 }, + { SK_LSPEED_1000MBPS , SPEED_1000 }, + { SK_LSPEED_INDETERMINATED, -1 } + }; + static int AdvSpeedMap[6][2] = { + { 0 , -1 }, + { SK_LSPEED_AUTO , -1 }, + { SK_LSPEED_10MBPS , ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full }, + { SK_LSPEED_100MBPS , ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full }, + { SK_LSPEED_1000MBPS , ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full}, + { SK_LSPEED_INDETERMINATED, -1 } + }; + + ecmd->phy_address = port; + ecmd->speed = SpeedConfMap[pPort->PLinkSpeedUsed][1]; + ecmd->duplex = DuplexAutoNegConfMap[pPort->PLinkModeStatus][1]; + ecmd->autoneg = DuplexAutoNegConfMap[pPort->PLinkModeStatus][2]; + ecmd->transceiver = XCVR_INTERNAL; + + if (pAC->GIni.GICopperType) { + ecmd->port = PORT_TP; + ecmd->supported = (SUPP_COPPER_ALL|SUPPORTED_Autoneg); + if (pAC->GIni.GIGenesis) { + ecmd->supported &= ~(SUPPORTED_10baseT_Half); + ecmd->supported &= ~(SUPPORTED_10baseT_Full); + ecmd->supported &= ~(SUPPORTED_100baseT_Half); + ecmd->supported &= ~(SUPPORTED_100baseT_Full); + } else { + if (pAC->GIni.GIChipId == CHIP_ID_YUKON) { + ecmd->supported &= ~(SUPPORTED_1000baseT_Half); + } +#ifdef CHIP_ID_YUKON_FE + if (pAC->GIni.GIChipId == CHIP_ID_YUKON_FE) { + ecmd->supported &= ~(SUPPORTED_1000baseT_Half); + ecmd->supported &= ~(SUPPORTED_1000baseT_Full); + } +#endif + } + if (pAC->GIni.GP[0].PLinkSpeed != SK_LSPEED_AUTO) { + ecmd->advertising = AdvSpeedMap[pPort->PLinkSpeed][1]; + if (pAC->GIni.GIChipId == CHIP_ID_YUKON) { + ecmd->advertising &= ~(SUPPORTED_1000baseT_Half); + } + } else { + ecmd->advertising = ecmd->supported; + } + + if (ecmd->autoneg == AUTONEG_ENABLE) + ecmd->advertising |= ADVERTISED_Autoneg; + } else { + ecmd->port = PORT_FIBRE; + ecmd->supported = SUPP_FIBRE_ALL; + ecmd->advertising = ADV_FIBRE_ALL; + } + return 0; +} + +/* + * MIB infrastructure uses instance value starting at 1 + * based on board and port. + */ +static inline u32 pnmiInstance(const DEV_NET *pNet) +{ + return 1 + (pNet->pAC->RlmtNets == 2) + pNet->PortNr; +} + +/***************************************************************************** + * + * setSettings - configures the settings of a selected adapter + * + * Description: + * Possible settings that may be altered are a)speed, b)duplex or + * c)autonegotiation. + * + * Returns: + * 0: everything fine, no error + * <0: the return value is the error code of the failure + */ +static int setSettings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + DEV_NET *pNet = netdev_priv(dev); + SK_AC *pAC = pNet->pAC; + u32 instance; + char buf[4]; + int len = 1; + + if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100 + && ecmd->speed != SPEED_1000) + return -EINVAL; + + if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) + return -EINVAL; + + if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE) + return -EINVAL; + + if (ecmd->autoneg == AUTONEG_DISABLE) + *buf = (ecmd->duplex == DUPLEX_FULL) + ? SK_LMODE_FULL : SK_LMODE_HALF; + else + *buf = (ecmd->duplex == DUPLEX_FULL) + ? SK_LMODE_AUTOFULL : SK_LMODE_AUTOHALF; + + instance = pnmiInstance(pNet); + if (SkPnmiSetVar(pAC, pAC->IoBase, OID_SKGE_LINK_MODE, + &buf, &len, instance, pNet->NetNr) != SK_PNMI_ERR_OK) + return -EINVAL; + + switch(ecmd->speed) { + case SPEED_1000: + *buf = SK_LSPEED_1000MBPS; + break; + case SPEED_100: + *buf = SK_LSPEED_100MBPS; + break; + case SPEED_10: + *buf = SK_LSPEED_10MBPS; + } + + if (SkPnmiSetVar(pAC, pAC->IoBase, OID_SKGE_SPEED_MODE, + &buf, &len, instance, pNet->NetNr) != SK_PNMI_ERR_OK) + return -EINVAL; + + return 0; +} + +/***************************************************************************** + * + * getDriverInfo - returns generic driver and adapter information + * + * Description: + * Generic driver information is returned via this function, such as + * the name of the driver, its version and and firmware version. + * In addition to this, the location of the selected adapter is + * returned as a bus info string (e.g. '01:05.0'). + * + * Returns: N/A + * + */ +static void getDriverInfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + const DEV_NET *pNet = netdev_priv(dev); + const SK_AC *pAC = pNet->pAC; + char vers[32]; + + snprintf(vers, sizeof(vers)-1, VER_STRING "(v%d.%d)", + (pAC->GIni.GIPciHwRev >> 4) & 0xf, pAC->GIni.GIPciHwRev & 0xf); + + strlcpy(info->driver, DRIVER_FILE_NAME, sizeof(info->driver)); + strcpy(info->version, vers); + strcpy(info->fw_version, "N/A"); + strlcpy(info->bus_info, pci_name(pAC->PciDev), ETHTOOL_BUSINFO_LEN); +} + +/* + * Ethtool statistics support. + */ +static const char StringsStats[][ETH_GSTRING_LEN] = { + "rx_packets", "tx_packets", + "rx_bytes", "tx_bytes", + "rx_errors", "tx_errors", + "rx_dropped", "tx_dropped", + "multicasts", "collisions", + "rx_length_errors", "rx_buffer_overflow_errors", + "rx_crc_errors", "rx_frame_errors", + "rx_too_short_errors", "rx_too_long_errors", + "rx_carrier_extension_errors", "rx_symbol_errors", + "rx_llc_mac_size_errors", "rx_carrier_errors", + "rx_jabber_errors", "rx_missed_errors", + "tx_abort_collision_errors", "tx_carrier_errors", + "tx_buffer_underrun_errors", "tx_heartbeat_errors", + "tx_window_errors", +}; + +static int getStatsCount(struct net_device *dev) +{ + return ARRAY_SIZE(StringsStats); +} + +static void getStrings(struct net_device *dev, u32 stringset, u8 *data) +{ + switch(stringset) { + case ETH_SS_STATS: + memcpy(data, *StringsStats, sizeof(StringsStats)); + break; + } +} + +static void getEthtoolStats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + const DEV_NET *pNet = netdev_priv(dev); + const SK_AC *pAC = pNet->pAC; + const SK_PNMI_STRUCT_DATA *pPnmiStruct = &pAC->PnmiStruct; + + *data++ = pPnmiStruct->Stat[0].StatRxOkCts; + *data++ = pPnmiStruct->Stat[0].StatTxOkCts; + *data++ = pPnmiStruct->Stat[0].StatRxOctetsOkCts; + *data++ = pPnmiStruct->Stat[0].StatTxOctetsOkCts; + *data++ = pPnmiStruct->InErrorsCts; + *data++ = pPnmiStruct->Stat[0].StatTxSingleCollisionCts; + *data++ = pPnmiStruct->RxNoBufCts; + *data++ = pPnmiStruct->TxNoBufCts; + *data++ = pPnmiStruct->Stat[0].StatRxMulticastOkCts; + *data++ = pPnmiStruct->Stat[0].StatTxSingleCollisionCts; + *data++ = pPnmiStruct->Stat[0].StatRxRuntCts; + *data++ = pPnmiStruct->Stat[0].StatRxFifoOverflowCts; + *data++ = pPnmiStruct->Stat[0].StatRxFcsCts; + *data++ = pPnmiStruct->Stat[0].StatRxFramingCts; + *data++ = pPnmiStruct->Stat[0].StatRxShortsCts; + *data++ = pPnmiStruct->Stat[0].StatRxTooLongCts; + *data++ = pPnmiStruct->Stat[0].StatRxCextCts; + *data++ = pPnmiStruct->Stat[0].StatRxSymbolCts; + *data++ = pPnmiStruct->Stat[0].StatRxIRLengthCts; + *data++ = pPnmiStruct->Stat[0].StatRxCarrierCts; + *data++ = pPnmiStruct->Stat[0].StatRxJabberCts; + *data++ = pPnmiStruct->Stat[0].StatRxMissedCts; + *data++ = pAC->stats.tx_aborted_errors; + *data++ = pPnmiStruct->Stat[0].StatTxCarrierCts; + *data++ = pPnmiStruct->Stat[0].StatTxFifoUnderrunCts; + *data++ = pPnmiStruct->Stat[0].StatTxCarrierCts; + *data++ = pAC->stats.tx_window_errors; +} + + +/***************************************************************************** + * + * toggleLeds - Changes the LED state of an adapter + * + * Description: + * This function changes the current state of all LEDs of an adapter so + * that it can be located by a user. + * + * Returns: N/A + * + */ +static void toggleLeds(DEV_NET *pNet, int on) +{ + SK_AC *pAC = pNet->pAC; + int port = pNet->PortNr; + void __iomem *io = pAC->IoBase; + + if (pAC->GIni.GIGenesis) { + SK_OUT8(io, MR_ADDR(port,LNK_LED_REG), + on ? SK_LNK_ON : SK_LNK_OFF); + SkGeYellowLED(pAC, io, + on ? (LED_ON >> 1) : (LED_OFF >> 1)); + SkGeXmitLED(pAC, io, MR_ADDR(port,RX_LED_INI), + on ? SK_LED_TST : SK_LED_DIS); + + if (pAC->GIni.GP[port].PhyType == SK_PHY_BCOM) + SkXmPhyWrite(pAC, io, port, PHY_BCOM_P_EXT_CTRL, + on ? PHY_B_PEC_LED_ON : PHY_B_PEC_LED_OFF); + else if (pAC->GIni.GP[port].PhyType == SK_PHY_LONE) + SkXmPhyWrite(pAC, io, port, PHY_LONE_LED_CFG, + on ? 0x0800 : PHY_L_LC_LEDT); + else + SkGeXmitLED(pAC, io, MR_ADDR(port,TX_LED_INI), + on ? SK_LED_TST : SK_LED_DIS); + } else { + const u16 YukLedOn = (PHY_M_LED_MO_DUP(MO_LED_ON) | + PHY_M_LED_MO_10(MO_LED_ON) | + PHY_M_LED_MO_100(MO_LED_ON) | + PHY_M_LED_MO_1000(MO_LED_ON) | + PHY_M_LED_MO_RX(MO_LED_ON)); + const u16 YukLedOff = (PHY_M_LED_MO_DUP(MO_LED_OFF) | + PHY_M_LED_MO_10(MO_LED_OFF) | + PHY_M_LED_MO_100(MO_LED_OFF) | + PHY_M_LED_MO_1000(MO_LED_OFF) | + PHY_M_LED_MO_RX(MO_LED_OFF)); + + + SkGmPhyWrite(pAC,io,port,PHY_MARV_LED_CTRL,0); + SkGmPhyWrite(pAC,io,port,PHY_MARV_LED_OVER, + on ? YukLedOn : YukLedOff); + } +} + +/***************************************************************************** + * + * skGeBlinkTimer - Changes the LED state of an adapter + * + * Description: + * This function changes the current state of all LEDs of an adapter so + * that it can be located by a user. If the requested time interval for + * this test has elapsed, this function cleans up everything that was + * temporarily setup during the locate NIC test. This involves of course + * also closing or opening any adapter so that the initial board state + * is recovered. + * + * Returns: N/A + * + */ +void SkGeBlinkTimer(unsigned long data) +{ + struct net_device *dev = (struct net_device *) data; + DEV_NET *pNet = netdev_priv(dev); + SK_AC *pAC = pNet->pAC; + + toggleLeds(pNet, pAC->LedsOn); + + pAC->LedsOn = !pAC->LedsOn; + mod_timer(&pAC->BlinkTimer, jiffies + HZ/4); +} + +/***************************************************************************** + * + * locateDevice - start the locate NIC feature of the elected adapter + * + * Description: + * This function is used if the user want to locate a particular NIC. + * All LEDs are regularly switched on and off, so the NIC can easily + * be identified. + * + * Returns: + * ==0: everything fine, no error, locateNIC test was started + * !=0: one locateNIC test runs already + * + */ +static int locateDevice(struct net_device *dev, u32 data) +{ + DEV_NET *pNet = netdev_priv(dev); + SK_AC *pAC = pNet->pAC; + + if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)) + data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ); + + /* start blinking */ + pAC->LedsOn = 0; + mod_timer(&pAC->BlinkTimer, jiffies); + msleep_interruptible(data * 1000); + del_timer_sync(&pAC->BlinkTimer); + toggleLeds(pNet, 0); + + return 0; +} + +/***************************************************************************** + * + * getPauseParams - retrieves the pause parameters + * + * Description: + * All current pause parameters of a selected adapter are placed + * in the passed ethtool_pauseparam structure and are returned. + * + * Returns: N/A + * + */ +static void getPauseParams(struct net_device *dev, struct ethtool_pauseparam *epause) +{ + DEV_NET *pNet = netdev_priv(dev); + SK_AC *pAC = pNet->pAC; + SK_GEPORT *pPort = &pAC->GIni.GP[pNet->PortNr]; + + epause->rx_pause = (pPort->PFlowCtrlMode == SK_FLOW_MODE_SYMMETRIC) || + (pPort->PFlowCtrlMode == SK_FLOW_MODE_SYM_OR_REM); + + epause->tx_pause = epause->rx_pause || (pPort->PFlowCtrlMode == SK_FLOW_MODE_LOC_SEND); + epause->autoneg = epause->rx_pause || epause->tx_pause; +} + +/***************************************************************************** + * + * setPauseParams - configures the pause parameters of an adapter + * + * Description: + * This function sets the Rx or Tx pause parameters + * + * Returns: + * ==0: everything fine, no error + * !=0: the return value is the error code of the failure + */ +static int setPauseParams(struct net_device *dev , struct ethtool_pauseparam *epause) +{ + DEV_NET *pNet = netdev_priv(dev); + SK_AC *pAC = pNet->pAC; + SK_GEPORT *pPort = &pAC->GIni.GP[pNet->PortNr]; + u32 instance = pnmiInstance(pNet); + struct ethtool_pauseparam old; + u8 oldspeed = pPort->PLinkSpeedUsed; + char buf[4]; + int len = 1; + int ret; + + /* + ** we have to determine the current settings to see if + ** the operator requested any modification of the flow + ** control parameters... + */ + getPauseParams(dev, &old); + + /* + ** perform modifications regarding the changes + ** requested by the operator + */ + if (epause->autoneg != old.autoneg) + *buf = epause->autoneg ? SK_FLOW_MODE_NONE : SK_FLOW_MODE_SYMMETRIC; + else { + if (epause->rx_pause && epause->tx_pause) + *buf = SK_FLOW_MODE_SYMMETRIC; + else if (epause->rx_pause && !epause->tx_pause) + *buf = SK_FLOW_MODE_SYM_OR_REM; + else if (!epause->rx_pause && epause->tx_pause) + *buf = SK_FLOW_MODE_LOC_SEND; + else + *buf = SK_FLOW_MODE_NONE; + } + + ret = SkPnmiSetVar(pAC, pAC->IoBase, OID_SKGE_FLOWCTRL_MODE, + &buf, &len, instance, pNet->NetNr); + + if (ret != SK_PNMI_ERR_OK) { + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_CTRL, + ("ethtool (sk98lin): error changing rx/tx pause (%i)\n", ret)); + goto err; + } + + /* + ** It may be that autoneg has been disabled! Therefore + ** set the speed to the previously used value... + */ + if (!epause->autoneg) { + len = 1; + ret = SkPnmiSetVar(pAC, pAC->IoBase, OID_SKGE_SPEED_MODE, + &oldspeed, &len, instance, pNet->NetNr); + if (ret != SK_PNMI_ERR_OK) + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_CTRL, + ("ethtool (sk98lin): error setting speed (%i)\n", ret)); + } + err: + return ret ? -EIO : 0; +} + +/* Only Yukon supports checksum offload. */ +static int setScatterGather(struct net_device *dev, u32 data) +{ + DEV_NET *pNet = netdev_priv(dev); + SK_AC *pAC = pNet->pAC; + + if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) + return -EOPNOTSUPP; + return ethtool_op_set_sg(dev, data); +} + +static int setTxCsum(struct net_device *dev, u32 data) +{ + DEV_NET *pNet = netdev_priv(dev); + SK_AC *pAC = pNet->pAC; + + if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) + return -EOPNOTSUPP; + + return ethtool_op_set_tx_csum(dev, data); +} + +static u32 getRxCsum(struct net_device *dev) +{ + DEV_NET *pNet = netdev_priv(dev); + SK_AC *pAC = pNet->pAC; + + return pAC->RxPort[pNet->PortNr].RxCsum; +} + +static int setRxCsum(struct net_device *dev, u32 data) +{ + DEV_NET *pNet = netdev_priv(dev); + SK_AC *pAC = pNet->pAC; + + if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) + return -EOPNOTSUPP; + + pAC->RxPort[pNet->PortNr].RxCsum = data != 0; + return 0; +} + +static int getRegsLen(struct net_device *dev) +{ + return 0x4000; +} + +/* + * Returns copy of whole control register region + * Note: skip RAM address register because accessing it will + * cause bus hangs! + */ +static void getRegs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + DEV_NET *pNet = netdev_priv(dev); + const void __iomem *io = pNet->pAC->IoBase; + + regs->version = 1; + memset(p, 0, regs->len); + memcpy_fromio(p, io, B3_RAM_ADDR); + + memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, + regs->len - B3_RI_WTO_R1); +} + +const struct ethtool_ops SkGeEthtoolOps = { + .get_settings = getSettings, + .set_settings = setSettings, + .get_drvinfo = getDriverInfo, + .get_strings = getStrings, + .get_stats_count = getStatsCount, + .get_ethtool_stats = getEthtoolStats, + .phys_id = locateDevice, + .get_pauseparam = getPauseParams, + .set_pauseparam = setPauseParams, + .get_link = ethtool_op_get_link, + .get_sg = ethtool_op_get_sg, + .set_sg = setScatterGather, + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = setTxCsum, + .get_rx_csum = getRxCsum, + .set_rx_csum = setRxCsum, + .get_regs = getRegs, + .get_regs_len = getRegsLen, +}; diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c new file mode 100644 index 000000000000..20890e44f99a --- /dev/null +++ b/drivers/net/sk98lin/skge.c @@ -0,0 +1,5218 @@ +/****************************************************************************** + * + * Name: skge.c + * Project: GEnesis, PCI Gigabit Ethernet Adapter + * Version: $Revision: 1.45 $ + * Date: $Date: 2004/02/12 14:41:02 $ + * Purpose: The main driver source module + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. + * + * Driver for Marvell Yukon chipset and SysKonnect Gigabit Ethernet + * Server Adapters. + * + * Created 10-Feb-1999, based on Linux' acenic.c, 3c59x.c and + * SysKonnects GEnesis Solaris driver + * Author: Christoph Goos (cgoos@syskonnect.de) + * Mirko Lindner (mlindner@syskonnect.de) + * + * Address all question to: linux@syskonnect.de + * + * The technical manual for the adapters is available from SysKonnect's + * web pages: www.syskonnect.com + * Goto "Support" and search Knowledge Base for "manual". + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +/****************************************************************************** + * + * Possible compiler options (#define xxx / -Dxxx): + * + * debugging can be enable by changing SK_DEBUG_CHKMOD and + * SK_DEBUG_CHKCAT in makefile (described there). + * + ******************************************************************************/ + +/****************************************************************************** + * + * Description: + * + * This is the main module of the Linux GE driver. + * + * All source files except skge.c, skdrv1st.h, skdrv2nd.h and sktypes.h + * are part of SysKonnect's COMMON MODULES for the SK-98xx adapters. + * Those are used for drivers on multiple OS', so some thing may seem + * unnecessary complicated on Linux. Please do not try to 'clean up' + * them without VERY good reasons, because this will make it more + * difficult to keep the Linux driver in synchronisation with the + * other versions. + * + * Include file hierarchy: + * + * <linux/module.h> + * + * "h/skdrv1st.h" + * <linux/types.h> + * <linux/kernel.h> + * <linux/string.h> + * <linux/errno.h> + * <linux/ioport.h> + * <linux/slab.h> + * <linux/interrupt.h> + * <linux/pci.h> + * <linux/bitops.h> + * <asm/byteorder.h> + * <asm/io.h> + * <linux/netdevice.h> + * <linux/etherdevice.h> + * <linux/skbuff.h> + * those three depending on kernel version used: + * <linux/bios32.h> + * <linux/init.h> + * <asm/uaccess.h> + * <net/checksum.h> + * + * "h/skerror.h" + * "h/skdebug.h" + * "h/sktypes.h" + * "h/lm80.h" + * "h/xmac_ii.h" + * + * "h/skdrv2nd.h" + * "h/skqueue.h" + * "h/skgehwt.h" + * "h/sktimer.h" + * "h/ski2c.h" + * "h/skgepnmi.h" + * "h/skvpd.h" + * "h/skgehw.h" + * "h/skgeinit.h" + * "h/skaddr.h" + * "h/skgesirq.h" + * "h/skrlmt.h" + * + ******************************************************************************/ + +#include "h/skversion.h" + +#include <linux/in.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/init.h> +#include <linux/dma-mapping.h> +#include <linux/ip.h> +#include <linux/mii.h> +#include <linux/mm.h> + +#include "h/skdrv1st.h" +#include "h/skdrv2nd.h" + +/******************************************************************************* + * + * Defines + * + ******************************************************************************/ + +/* for debuging on x86 only */ +/* #define BREAKPOINT() asm(" int $3"); */ + +/* use the transmit hw checksum driver functionality */ +#define USE_SK_TX_CHECKSUM + +/* use the receive hw checksum driver functionality */ +#define USE_SK_RX_CHECKSUM + +/* use the scatter-gather functionality with sendfile() */ +#define SK_ZEROCOPY + +/* use of a transmit complete interrupt */ +#define USE_TX_COMPLETE + +/* + * threshold for copying small receive frames + * set to 0 to avoid copying, set to 9001 to copy all frames + */ +#define SK_COPY_THRESHOLD 50 + +/* number of adapters that can be configured via command line params */ +#define SK_MAX_CARD_PARAM 16 + + + +/* + * use those defines for a compile-in version of the driver instead + * of command line parameters + */ +// #define LINK_SPEED_A {"Auto", } +// #define LINK_SPEED_B {"Auto", } +// #define AUTO_NEG_A {"Sense", } +// #define AUTO_NEG_B {"Sense", } +// #define DUP_CAP_A {"Both", } +// #define DUP_CAP_B {"Both", } +// #define FLOW_CTRL_A {"SymOrRem", } +// #define FLOW_CTRL_B {"SymOrRem", } +// #define ROLE_A {"Auto", } +// #define ROLE_B {"Auto", } +// #define PREF_PORT {"A", } +// #define CON_TYPE {"Auto", } +// #define RLMT_MODE {"CheckLinkState", } + +#define DEV_KFREE_SKB(skb) dev_kfree_skb(skb) +#define DEV_KFREE_SKB_IRQ(skb) dev_kfree_skb_irq(skb) +#define DEV_KFREE_SKB_ANY(skb) dev_kfree_skb_any(skb) + + +/* Set blink mode*/ +#define OEM_CONFIG_VALUE ( SK_ACT_LED_BLINK | \ + SK_DUP_LED_NORMAL | \ + SK_LED_LINK100_ON) + + +/* Isr return value */ +#define SkIsrRetVar irqreturn_t +#define SkIsrRetNone IRQ_NONE +#define SkIsrRetHandled IRQ_HANDLED + + +/******************************************************************************* + * + * Local Function Prototypes + * + ******************************************************************************/ + +static void FreeResources(struct SK_NET_DEVICE *dev); +static int SkGeBoardInit(struct SK_NET_DEVICE *dev, SK_AC *pAC); +static SK_BOOL BoardAllocMem(SK_AC *pAC); +static void BoardFreeMem(SK_AC *pAC); +static void BoardInitMem(SK_AC *pAC); +static void SetupRing(SK_AC*, void*, uintptr_t, RXD**, RXD**, RXD**, int*, SK_BOOL); +static SkIsrRetVar SkGeIsr(int irq, void *dev_id); +static SkIsrRetVar SkGeIsrOnePort(int irq, void *dev_id); +static int SkGeOpen(struct SK_NET_DEVICE *dev); +static int SkGeClose(struct SK_NET_DEVICE *dev); +static int SkGeXmit(struct sk_buff *skb, struct SK_NET_DEVICE *dev); +static int SkGeSetMacAddr(struct SK_NET_DEVICE *dev, void *p); +static void SkGeSetRxMode(struct SK_NET_DEVICE *dev); +static struct net_device_stats *SkGeStats(struct SK_NET_DEVICE *dev); +static int SkGeIoctl(struct SK_NET_DEVICE *dev, struct ifreq *rq, int cmd); +static void GetConfiguration(SK_AC*); +static int XmitFrame(SK_AC*, TX_PORT*, struct sk_buff*); +static void FreeTxDescriptors(SK_AC*pAC, TX_PORT*); +static void FillRxRing(SK_AC*, RX_PORT*); +static SK_BOOL FillRxDescriptor(SK_AC*, RX_PORT*); +static void ReceiveIrq(SK_AC*, RX_PORT*, SK_BOOL); +static void ClearAndStartRx(SK_AC*, int); +static void ClearTxIrq(SK_AC*, int, int); +static void ClearRxRing(SK_AC*, RX_PORT*); +static void ClearTxRing(SK_AC*, TX_PORT*); +static int SkGeChangeMtu(struct SK_NET_DEVICE *dev, int new_mtu); +static void PortReInitBmu(SK_AC*, int); +static int SkGeIocMib(DEV_NET*, unsigned int, int); +static int SkGeInitPCI(SK_AC *pAC); +static void StartDrvCleanupTimer(SK_AC *pAC); +static void StopDrvCleanupTimer(SK_AC *pAC); +static int XmitFrameSG(SK_AC*, TX_PORT*, struct sk_buff*); + +#ifdef SK_DIAG_SUPPORT +static SK_U32 ParseDeviceNbrFromSlotName(const char *SlotName); +static int SkDrvInitAdapter(SK_AC *pAC, int devNbr); +static int SkDrvDeInitAdapter(SK_AC *pAC, int devNbr); +#endif + +/******************************************************************************* + * + * Extern Function Prototypes + * + ******************************************************************************/ +extern void SkDimEnableModerationIfNeeded(SK_AC *pAC); +extern void SkDimDisplayModerationSettings(SK_AC *pAC); +extern void SkDimStartModerationTimer(SK_AC *pAC); +extern void SkDimModerate(SK_AC *pAC); +extern void SkGeBlinkTimer(unsigned long data); + +#ifdef DEBUG +static void DumpMsg(struct sk_buff*, char*); +static void DumpData(char*, int); +static void DumpLong(char*, int); +#endif + +/* global variables *********************************************************/ +static SK_BOOL DoPrintInterfaceChange = SK_TRUE; +extern const struct ethtool_ops SkGeEthtoolOps; + +/* local variables **********************************************************/ +static uintptr_t TxQueueAddr[SK_MAX_MACS][2] = {{0x680, 0x600},{0x780, 0x700}}; +static uintptr_t RxQueueAddr[SK_MAX_MACS] = {0x400, 0x480}; + +/***************************************************************************** + * + * SkPciWriteCfgDWord - write a 32 bit value to pci config space + * + * Description: + * This routine writes a 32 bit value to the pci configuration + * space. + * + * Returns: + * 0 - indicate everything worked ok. + * != 0 - error indication + */ +static inline int SkPciWriteCfgDWord( +SK_AC *pAC, /* Adapter Control structure pointer */ +int PciAddr, /* PCI register address */ +SK_U32 Val) /* pointer to store the read value */ +{ + pci_write_config_dword(pAC->PciDev, PciAddr, Val); + return(0); +} /* SkPciWriteCfgDWord */ + +/***************************************************************************** + * + * SkGeInitPCI - Init the PCI resources + * + * Description: + * This function initialize the PCI resources and IO + * + * Returns: + * 0 - indicate everything worked ok. + * != 0 - error indication + */ +static __devinit int SkGeInitPCI(SK_AC *pAC) +{ + struct SK_NET_DEVICE *dev = pAC->dev[0]; + struct pci_dev *pdev = pAC->PciDev; + int retval; + + dev->mem_start = pci_resource_start (pdev, 0); + pci_set_master(pdev); + + retval = pci_request_regions(pdev, "sk98lin"); + if (retval) + goto out; + +#ifdef SK_BIG_ENDIAN + /* + * On big endian machines, we use the adapter's aibility of + * reading the descriptors as big endian. + */ + { + SK_U32 our2; + SkPciReadCfgDWord(pAC, PCI_OUR_REG_2, &our2); + our2 |= PCI_REV_DESC; + SkPciWriteCfgDWord(pAC, PCI_OUR_REG_2, our2); + } +#endif + + /* + * Remap the regs into kernel space. + */ + pAC->IoBase = ioremap_nocache(dev->mem_start, 0x4000); + if (!pAC->IoBase) { + retval = -EIO; + goto out_release; + } + + return 0; + + out_release: + pci_release_regions(pdev); + out: + return retval; +} + + +/***************************************************************************** + * + * FreeResources - release resources allocated for adapter + * + * Description: + * This function releases the IRQ, unmaps the IO and + * frees the desriptor ring. + * + * Returns: N/A + * + */ +static void FreeResources(struct SK_NET_DEVICE *dev) +{ +SK_U32 AllocFlag; +DEV_NET *pNet; +SK_AC *pAC; + + pNet = netdev_priv(dev); + pAC = pNet->pAC; + AllocFlag = pAC->AllocFlag; + if (pAC->PciDev) { + pci_release_regions(pAC->PciDev); + } + if (AllocFlag & SK_ALLOC_IRQ) { + free_irq(dev->irq, dev); + } + if (pAC->IoBase) { + iounmap(pAC->IoBase); + } + if (pAC->pDescrMem) { + BoardFreeMem(pAC); + } + +} /* FreeResources */ + +MODULE_AUTHOR("Mirko Lindner <mlindner@syskonnect.de>"); +MODULE_DESCRIPTION("SysKonnect SK-NET Gigabit Ethernet SK-98xx driver"); +MODULE_LICENSE("GPL"); + +#ifdef LINK_SPEED_A +static char *Speed_A[SK_MAX_CARD_PARAM] = LINK_SPEED; +#else +static char *Speed_A[SK_MAX_CARD_PARAM] = {"", }; +#endif + +#ifdef LINK_SPEED_B +static char *Speed_B[SK_MAX_CARD_PARAM] = LINK_SPEED; +#else +static char *Speed_B[SK_MAX_CARD_PARAM] = {"", }; +#endif + +#ifdef AUTO_NEG_A +static char *AutoNeg_A[SK_MAX_CARD_PARAM] = AUTO_NEG_A; +#else +static char *AutoNeg_A[SK_MAX_CARD_PARAM] = {"", }; +#endif + +#ifdef DUP_CAP_A +static char *DupCap_A[SK_MAX_CARD_PARAM] = DUP_CAP_A; +#else +static char *DupCap_A[SK_MAX_CARD_PARAM] = {"", }; +#endif + +#ifdef FLOW_CTRL_A +static char *FlowCtrl_A[SK_MAX_CARD_PARAM] = FLOW_CTRL_A; +#else +static char *FlowCtrl_A[SK_MAX_CARD_PARAM] = {"", }; +#endif + +#ifdef ROLE_A +static char *Role_A[SK_MAX_CARD_PARAM] = ROLE_A; +#else +static char *Role_A[SK_MAX_CARD_PARAM] = {"", }; +#endif + +#ifdef AUTO_NEG_B +static char *AutoNeg_B[SK_MAX_CARD_PARAM] = AUTO_NEG_B; +#else +static char *AutoNeg_B[SK_MAX_CARD_PARAM] = {"", }; +#endif + +#ifdef DUP_CAP_B +static char *DupCap_B[SK_MAX_CARD_PARAM] = DUP_CAP_B; +#else +static char *DupCap_B[SK_MAX_CARD_PARAM] = {"", }; +#endif + +#ifdef FLOW_CTRL_B +static char *FlowCtrl_B[SK_MAX_CARD_PARAM] = FLOW_CTRL_B; +#else +static char *FlowCtrl_B[SK_MAX_CARD_PARAM] = {"", }; +#endif + +#ifdef ROLE_B +static char *Role_B[SK_MAX_CARD_PARAM] = ROLE_B; +#else +static char *Role_B[SK_MAX_CARD_PARAM] = {"", }; +#endif + +#ifdef CON_TYPE +static char *ConType[SK_MAX_CARD_PARAM] = CON_TYPE; +#else +static char *ConType[SK_MAX_CARD_PARAM] = {"", }; +#endif + +#ifdef PREF_PORT +static char *PrefPort[SK_MAX_CARD_PARAM] = PREF_PORT; +#else +static char *PrefPort[SK_MAX_CARD_PARAM] = {"", }; +#endif + +#ifdef RLMT_MODE +static char *RlmtMode[SK_MAX_CARD_PARAM] = RLMT_MODE; +#else +static char *RlmtMode[SK_MAX_CARD_PARAM] = {"", }; +#endif + +static int IntsPerSec[SK_MAX_CARD_PARAM]; +static char *Moderation[SK_MAX_CARD_PARAM]; +static char *ModerationMask[SK_MAX_CARD_PARAM]; +static char *AutoSizing[SK_MAX_CARD_PARAM]; +static char *Stats[SK_MAX_CARD_PARAM]; + +module_param_array(Speed_A, charp, NULL, 0); +module_param_array(Speed_B, charp, NULL, 0); +module_param_array(AutoNeg_A, charp, NULL, 0); +module_param_array(AutoNeg_B, charp, NULL, 0); +module_param_array(DupCap_A, charp, NULL, 0); +module_param_array(DupCap_B, charp, NULL, 0); +module_param_array(FlowCtrl_A, charp, NULL, 0); +module_param_array(FlowCtrl_B, charp, NULL, 0); +module_param_array(Role_A, charp, NULL, 0); +module_param_array(Role_B, charp, NULL, 0); +module_param_array(ConType, charp, NULL, 0); +module_param_array(PrefPort, charp, NULL, 0); +module_param_array(RlmtMode, charp, NULL, 0); +/* used for interrupt moderation */ +module_param_array(IntsPerSec, int, NULL, 0); +module_param_array(Moderation, charp, NULL, 0); +module_param_array(Stats, charp, NULL, 0); +module_param_array(ModerationMask, charp, NULL, 0); +module_param_array(AutoSizing, charp, NULL, 0); + +/***************************************************************************** + * + * SkGeBoardInit - do level 0 and 1 initialization + * + * Description: + * This function prepares the board hardware for running. The desriptor + * ring is set up, the IRQ is allocated and the configuration settings + * are examined. + * + * Returns: + * 0, if everything is ok + * !=0, on error + */ +static int __devinit SkGeBoardInit(struct SK_NET_DEVICE *dev, SK_AC *pAC) +{ +short i; +unsigned long Flags; +char *DescrString = "sk98lin: Driver for Linux"; /* this is given to PNMI */ +char *VerStr = VER_STRING; +int Ret; /* return code of request_irq */ +SK_BOOL DualNet; + + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, + ("IoBase: %08lX\n", (unsigned long)pAC->IoBase)); + for (i=0; i<SK_MAX_MACS; i++) { + pAC->TxPort[i][0].HwAddr = pAC->IoBase + TxQueueAddr[i][0]; + pAC->TxPort[i][0].PortIndex = i; + pAC->RxPort[i].HwAddr = pAC->IoBase + RxQueueAddr[i]; + pAC->RxPort[i].PortIndex = i; + } + + /* Initialize the mutexes */ + for (i=0; i<SK_MAX_MACS; i++) { + spin_lock_init(&pAC->TxPort[i][0].TxDesRingLock); + spin_lock_init(&pAC->RxPort[i].RxDesRingLock); + } + spin_lock_init(&pAC->SlowPathLock); + + /* setup phy_id blink timer */ + pAC->BlinkTimer.function = SkGeBlinkTimer; + pAC->BlinkTimer.data = (unsigned long) dev; + init_timer(&pAC->BlinkTimer); + + /* level 0 init common modules here */ + + spin_lock_irqsave(&pAC->SlowPathLock, Flags); + /* Does a RESET on board ...*/ + if (SkGeInit(pAC, pAC->IoBase, SK_INIT_DATA) != 0) { + printk("HWInit (0) failed.\n"); + spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); + return -EIO; + } + SkI2cInit( pAC, pAC->IoBase, SK_INIT_DATA); + SkEventInit(pAC, pAC->IoBase, SK_INIT_DATA); + SkPnmiInit( pAC, pAC->IoBase, SK_INIT_DATA); + SkAddrInit( pAC, pAC->IoBase, SK_INIT_DATA); + SkRlmtInit( pAC, pAC->IoBase, SK_INIT_DATA); + SkTimerInit(pAC, pAC->IoBase, SK_INIT_DATA); + + pAC->BoardLevel = SK_INIT_DATA; + pAC->RxBufSize = ETH_BUF_SIZE; + + SK_PNMI_SET_DRIVER_DESCR(pAC, DescrString); + SK_PNMI_SET_DRIVER_VER(pAC, VerStr); + + spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); + + /* level 1 init common modules here (HW init) */ + spin_lock_irqsave(&pAC->SlowPathLock, Flags); + if (SkGeInit(pAC, pAC->IoBase, SK_INIT_IO) != 0) { + printk("sk98lin: HWInit (1) failed.\n"); + spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); + return -EIO; + } + SkI2cInit( pAC, pAC->IoBase, SK_INIT_IO); + SkEventInit(pAC, pAC->IoBase, SK_INIT_IO); + SkPnmiInit( pAC, pAC->IoBase, SK_INIT_IO); + SkAddrInit( pAC, pAC->IoBase, SK_INIT_IO); + SkRlmtInit( pAC, pAC->IoBase, SK_INIT_IO); + SkTimerInit(pAC, pAC->IoBase, SK_INIT_IO); + + /* Set chipset type support */ + pAC->ChipsetType = 0; + if ((pAC->GIni.GIChipId == CHIP_ID_YUKON) || + (pAC->GIni.GIChipId == CHIP_ID_YUKON_LITE)) { + pAC->ChipsetType = 1; + } + + GetConfiguration(pAC); + if (pAC->RlmtNets == 2) { + pAC->GIni.GIPortUsage = SK_MUL_LINK; + } + + pAC->BoardLevel = SK_INIT_IO; + spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); + + if (pAC->GIni.GIMacsFound == 2) { + Ret = request_irq(dev->irq, SkGeIsr, IRQF_SHARED, "sk98lin", dev); + } else if (pAC->GIni.GIMacsFound == 1) { + Ret = request_irq(dev->irq, SkGeIsrOnePort, IRQF_SHARED, + "sk98lin", dev); + } else { + printk(KERN_WARNING "sk98lin: Illegal number of ports: %d\n", + pAC->GIni.GIMacsFound); + return -EIO; + } + + if (Ret) { + printk(KERN_WARNING "sk98lin: Requested IRQ %d is busy.\n", + dev->irq); + return Ret; + } + pAC->AllocFlag |= SK_ALLOC_IRQ; + + /* Alloc memory for this board (Mem for RxD/TxD) : */ + if(!BoardAllocMem(pAC)) { + printk("No memory for descriptor rings.\n"); + return -ENOMEM; + } + + BoardInitMem(pAC); + /* tschilling: New common function with minimum size check. */ + DualNet = SK_FALSE; + if (pAC->RlmtNets == 2) { + DualNet = SK_TRUE; + } + + if (SkGeInitAssignRamToQueues( + pAC, + pAC->ActivePort, + DualNet)) { + BoardFreeMem(pAC); + printk("sk98lin: SkGeInitAssignRamToQueues failed.\n"); + return -EIO; + } + + return (0); +} /* SkGeBoardInit */ + + +/***************************************************************************** + * + * BoardAllocMem - allocate the memory for the descriptor rings + * + * Description: + * This function allocates the memory for all descriptor rings. + * Each ring is aligned for the desriptor alignment and no ring + * has a 4 GByte boundary in it (because the upper 32 bit must + * be constant for all descriptiors in one rings). + * + * Returns: + * SK_TRUE, if all memory could be allocated + * SK_FALSE, if not + */ +static __devinit SK_BOOL BoardAllocMem(SK_AC *pAC) +{ +caddr_t pDescrMem; /* pointer to descriptor memory area */ +size_t AllocLength; /* length of complete descriptor area */ +int i; /* loop counter */ +unsigned long BusAddr; + + + /* rings plus one for alignment (do not cross 4 GB boundary) */ + /* RX_RING_SIZE is assumed bigger than TX_RING_SIZE */ +#if (BITS_PER_LONG == 32) + AllocLength = (RX_RING_SIZE + TX_RING_SIZE) * pAC->GIni.GIMacsFound + 8; +#else + AllocLength = (RX_RING_SIZE + TX_RING_SIZE) * pAC->GIni.GIMacsFound + + RX_RING_SIZE + 8; +#endif + + pDescrMem = pci_alloc_consistent(pAC->PciDev, AllocLength, + &pAC->pDescrMemDMA); + + if (pDescrMem == NULL) { + return (SK_FALSE); + } + pAC->pDescrMem = pDescrMem; + BusAddr = (unsigned long) pAC->pDescrMemDMA; + + /* Descriptors need 8 byte alignment, and this is ensured + * by pci_alloc_consistent. + */ + for (i=0; i<pAC->GIni.GIMacsFound; i++) { + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS, + ("TX%d/A: pDescrMem: %lX, PhysDescrMem: %lX\n", + i, (unsigned long) pDescrMem, + BusAddr)); + pAC->TxPort[i][0].pTxDescrRing = pDescrMem; + pAC->TxPort[i][0].VTxDescrRing = BusAddr; + pDescrMem += TX_RING_SIZE; + BusAddr += TX_RING_SIZE; + + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS, + ("RX%d: pDescrMem: %lX, PhysDescrMem: %lX\n", + i, (unsigned long) pDescrMem, + (unsigned long)BusAddr)); + pAC->RxPort[i].pRxDescrRing = pDescrMem; + pAC->RxPort[i].VRxDescrRing = BusAddr; + pDescrMem += RX_RING_SIZE; + BusAddr += RX_RING_SIZE; + } /* for */ + + return (SK_TRUE); +} /* BoardAllocMem */ + + +/**************************************************************************** + * + * BoardFreeMem - reverse of BoardAllocMem + * + * Description: + * Free all memory allocated in BoardAllocMem: adapter context, + * descriptor rings, locks. + * + * Returns: N/A + */ +static void BoardFreeMem( +SK_AC *pAC) +{ +size_t AllocLength; /* length of complete descriptor area */ + + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, + ("BoardFreeMem\n")); +#if (BITS_PER_LONG == 32) + AllocLength = (RX_RING_SIZE + TX_RING_SIZE) * pAC->GIni.GIMacsFound + 8; +#else + AllocLength = (RX_RING_SIZE + TX_RING_SIZE) * pAC->GIni.GIMacsFound + + RX_RING_SIZE + 8; +#endif + + pci_free_consistent(pAC->PciDev, AllocLength, + pAC->pDescrMem, pAC->pDescrMemDMA); + pAC->pDescrMem = NULL; +} /* BoardFreeMem */ + + +/***************************************************************************** + * + * BoardInitMem - initiate the descriptor rings + * + * Description: + * This function sets the descriptor rings up in memory. + * The adapter is initialized with the descriptor start addresses. + * + * Returns: N/A + */ +static __devinit void BoardInitMem(SK_AC *pAC) +{ +int i; /* loop counter */ +int RxDescrSize; /* the size of a rx descriptor rounded up to alignment*/ +int TxDescrSize; /* the size of a tx descriptor rounded up to alignment*/ + + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, + ("BoardInitMem\n")); + + RxDescrSize = (((sizeof(RXD) - 1) / DESCR_ALIGN) + 1) * DESCR_ALIGN; + pAC->RxDescrPerRing = RX_RING_SIZE / RxDescrSize; + TxDescrSize = (((sizeof(TXD) - 1) / DESCR_ALIGN) + 1) * DESCR_ALIGN; + pAC->TxDescrPerRing = TX_RING_SIZE / RxDescrSize; + + for (i=0; i<pAC->GIni.GIMacsFound; i++) { + SetupRing( + pAC, + pAC->TxPort[i][0].pTxDescrRing, + pAC->TxPort[i][0].VTxDescrRing, + (RXD**)&pAC->TxPort[i][0].pTxdRingHead, + (RXD**)&pAC->TxPort[i][0].pTxdRingTail, + (RXD**)&pAC->TxPort[i][0].pTxdRingPrev, + &pAC->TxPort[i][0].TxdRingFree, + SK_TRUE); + SetupRing( + pAC, + pAC->RxPort[i].pRxDescrRing, + pAC->RxPort[i].VRxDescrRing, + &pAC->RxPort[i].pRxdRingHead, + &pAC->RxPort[i].pRxdRingTail, + &pAC->RxPort[i].pRxdRingPrev, + &pAC->RxPort[i].RxdRingFree, + SK_FALSE); + } +} /* BoardInitMem */ + + +/***************************************************************************** + * + * SetupRing - create one descriptor ring + * + * Description: + * This function creates one descriptor ring in the given memory area. + * The head, tail and number of free descriptors in the ring are set. + * + * Returns: + * none + */ +static void SetupRing( +SK_AC *pAC, +void *pMemArea, /* a pointer to the memory area for the ring */ +uintptr_t VMemArea, /* the virtual bus address of the memory area */ +RXD **ppRingHead, /* address where the head should be written */ +RXD **ppRingTail, /* address where the tail should be written */ +RXD **ppRingPrev, /* address where the tail should be written */ +int *pRingFree, /* address where the # of free descr. goes */ +SK_BOOL IsTx) /* flag: is this a tx ring */ +{ +int i; /* loop counter */ +int DescrSize; /* the size of a descriptor rounded up to alignment*/ +int DescrNum; /* number of descriptors per ring */ +RXD *pDescr; /* pointer to a descriptor (receive or transmit) */ +RXD *pNextDescr; /* pointer to the next descriptor */ +RXD *pPrevDescr; /* pointer to the previous descriptor */ +uintptr_t VNextDescr; /* the virtual bus address of the next descriptor */ + + if (IsTx == SK_TRUE) { + DescrSize = (((sizeof(TXD) - 1) / DESCR_ALIGN) + 1) * + DESCR_ALIGN; + DescrNum = TX_RING_SIZE / DescrSize; + } else { + DescrSize = (((sizeof(RXD) - 1) / DESCR_ALIGN) + 1) * + DESCR_ALIGN; + DescrNum = RX_RING_SIZE / DescrSize; + } + + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS, + ("Descriptor size: %d Descriptor Number: %d\n", + DescrSize,DescrNum)); + + pDescr = (RXD*) pMemArea; + pPrevDescr = NULL; + pNextDescr = (RXD*) (((char*)pDescr) + DescrSize); + VNextDescr = VMemArea + DescrSize; + for(i=0; i<DescrNum; i++) { + /* set the pointers right */ + pDescr->VNextRxd = VNextDescr & 0xffffffffULL; + pDescr->pNextRxd = pNextDescr; + if (!IsTx) pDescr->TcpSumStarts = ETH_HLEN << 16 | ETH_HLEN; + + /* advance one step */ + pPrevDescr = pDescr; + pDescr = pNextDescr; + pNextDescr = (RXD*) (((char*)pDescr) + DescrSize); + VNextDescr += DescrSize; + } + pPrevDescr->pNextRxd = (RXD*) pMemArea; + pPrevDescr->VNextRxd = VMemArea; + pDescr = (RXD*) pMemArea; + *ppRingHead = (RXD*) pMemArea; + *ppRingTail = *ppRingHead; + *ppRingPrev = pPrevDescr; + *pRingFree = DescrNum; +} /* SetupRing */ + + +/***************************************************************************** + * + * PortReInitBmu - re-initiate the descriptor rings for one port + * + * Description: + * This function reinitializes the descriptor rings of one port + * in memory. The port must be stopped before. + * The HW is initialized with the descriptor start addresses. + * + * Returns: + * none + */ +static void PortReInitBmu( +SK_AC *pAC, /* pointer to adapter context */ +int PortIndex) /* index of the port for which to re-init */ +{ + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, + ("PortReInitBmu ")); + + /* set address of first descriptor of ring in BMU */ + SK_OUT32(pAC->IoBase, TxQueueAddr[PortIndex][TX_PRIO_LOW]+ Q_DA_L, + (uint32_t)(((caddr_t) + (pAC->TxPort[PortIndex][TX_PRIO_LOW].pTxdRingHead) - + pAC->TxPort[PortIndex][TX_PRIO_LOW].pTxDescrRing + + pAC->TxPort[PortIndex][TX_PRIO_LOW].VTxDescrRing) & + 0xFFFFFFFF)); + SK_OUT32(pAC->IoBase, TxQueueAddr[PortIndex][TX_PRIO_LOW]+ Q_DA_H, + (uint32_t)(((caddr_t) + (pAC->TxPort[PortIndex][TX_PRIO_LOW].pTxdRingHead) - + pAC->TxPort[PortIndex][TX_PRIO_LOW].pTxDescrRing + + pAC->TxPort[PortIndex][TX_PRIO_LOW].VTxDescrRing) >> 32)); + SK_OUT32(pAC->IoBase, RxQueueAddr[PortIndex]+Q_DA_L, + (uint32_t)(((caddr_t)(pAC->RxPort[PortIndex].pRxdRingHead) - + pAC->RxPort[PortIndex].pRxDescrRing + + pAC->RxPort[PortIndex].VRxDescrRing) & 0xFFFFFFFF)); + SK_OUT32(pAC->IoBase, RxQueueAddr[PortIndex]+Q_DA_H, + (uint32_t)(((caddr_t)(pAC->RxPort[PortIndex].pRxdRingHead) - + pAC->RxPort[PortIndex].pRxDescrRing + + pAC->RxPort[PortIndex].VRxDescrRing) >> 32)); +} /* PortReInitBmu */ + + +/**************************************************************************** + * + * SkGeIsr - handle adapter interrupts + * + * Description: + * The interrupt routine is called when the network adapter + * generates an interrupt. It may also be called if another device + * shares this interrupt vector with the driver. + * + * Returns: N/A + * + */ +static SkIsrRetVar SkGeIsr(int irq, void *dev_id) +{ +struct SK_NET_DEVICE *dev = (struct SK_NET_DEVICE *)dev_id; +DEV_NET *pNet; +SK_AC *pAC; +SK_U32 IntSrc; /* interrupts source register contents */ + + pNet = netdev_priv(dev); + pAC = pNet->pAC; + + /* + * Check and process if its our interrupt + */ + SK_IN32(pAC->IoBase, B0_SP_ISRC, &IntSrc); + if (IntSrc == 0) { + return SkIsrRetNone; + } + + while (((IntSrc & IRQ_MASK) & ~SPECIAL_IRQS) != 0) { +#if 0 /* software irq currently not used */ + if (IntSrc & IS_IRQ_SW) { + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBGCAT_DRV_INT_SRC, + ("Software IRQ\n")); + } +#endif + if (IntSrc & IS_R1_F) { + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBGCAT_DRV_INT_SRC, + ("EOF RX1 IRQ\n")); + ReceiveIrq(pAC, &pAC->RxPort[0], SK_TRUE); + SK_PNMI_CNT_RX_INTR(pAC, 0); + } + if (IntSrc & IS_R2_F) { + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBGCAT_DRV_INT_SRC, + ("EOF RX2 IRQ\n")); + ReceiveIrq(pAC, &pAC->RxPort[1], SK_TRUE); + SK_PNMI_CNT_RX_INTR(pAC, 1); + } +#ifdef USE_TX_COMPLETE /* only if tx complete interrupt used */ + if (IntSrc & IS_XA1_F) { + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBGCAT_DRV_INT_SRC, + ("EOF AS TX1 IRQ\n")); + SK_PNMI_CNT_TX_INTR(pAC, 0); + spin_lock(&pAC->TxPort[0][TX_PRIO_LOW].TxDesRingLock); + FreeTxDescriptors(pAC, &pAC->TxPort[0][TX_PRIO_LOW]); + spin_unlock(&pAC->TxPort[0][TX_PRIO_LOW].TxDesRingLock); + } + if (IntSrc & IS_XA2_F) { + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBGCAT_DRV_INT_SRC, + ("EOF AS TX2 IRQ\n")); + SK_PNMI_CNT_TX_INTR(pAC, 1); + spin_lock(&pAC->TxPort[1][TX_PRIO_LOW].TxDesRingLock); + FreeTxDescriptors(pAC, &pAC->TxPort[1][TX_PRIO_LOW]); + spin_unlock(&pAC->TxPort[1][TX_PRIO_LOW].TxDesRingLock); + } +#if 0 /* only if sync. queues used */ + if (IntSrc & IS_XS1_F) { + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBGCAT_DRV_INT_SRC, + ("EOF SY TX1 IRQ\n")); + SK_PNMI_CNT_TX_INTR(pAC, 1); + spin_lock(&pAC->TxPort[0][TX_PRIO_HIGH].TxDesRingLock); + FreeTxDescriptors(pAC, 0, TX_PRIO_HIGH); + spin_unlock(&pAC->TxPort[0][TX_PRIO_HIGH].TxDesRingLock); + ClearTxIrq(pAC, 0, TX_PRIO_HIGH); + } + if (IntSrc & IS_XS2_F) { + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBGCAT_DRV_INT_SRC, + ("EOF SY TX2 IRQ\n")); + SK_PNMI_CNT_TX_INTR(pAC, 1); + spin_lock(&pAC->TxPort[1][TX_PRIO_HIGH].TxDesRingLock); + FreeTxDescriptors(pAC, 1, TX_PRIO_HIGH); + spin_unlock(&pAC->TxPort[1][TX_PRIO_HIGH].TxDesRingLock); + ClearTxIrq(pAC, 1, TX_PRIO_HIGH); + } +#endif +#endif + + /* do all IO at once */ + if (IntSrc & IS_R1_F) + ClearAndStartRx(pAC, 0); + if (IntSrc & IS_R2_F) + ClearAndStartRx(pAC, 1); +#ifdef USE_TX_COMPLETE /* only if tx complete interrupt used */ + if (IntSrc & IS_XA1_F) + ClearTxIrq(pAC, 0, TX_PRIO_LOW); + if (IntSrc & IS_XA2_F) + ClearTxIrq(pAC, 1, TX_PRIO_LOW); +#endif + SK_IN32(pAC->IoBase, B0_ISRC, &IntSrc); + } /* while (IntSrc & IRQ_MASK != 0) */ + + IntSrc &= pAC->GIni.GIValIrqMask; + if ((IntSrc & SPECIAL_IRQS) || pAC->CheckQueue) { + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, + ("SPECIAL IRQ DP-Cards => %x\n", IntSrc)); + pAC->CheckQueue = SK_FALSE; + spin_lock(&pAC->SlowPathLock); + if (IntSrc & SPECIAL_IRQS) + SkGeSirqIsr(pAC, pAC->IoBase, IntSrc); + + SkEventDispatcher(pAC, pAC->IoBase); + spin_unlock(&pAC->SlowPathLock); + } + /* + * do it all again is case we cleared an interrupt that + * came in after handling the ring (OUTs may be delayed + * in hardware buffers, but are through after IN) + * + * rroesler: has been commented out and shifted to + * SkGeDrvEvent(), because it is timer + * guarded now + * + ReceiveIrq(pAC, &pAC->RxPort[0], SK_TRUE); + ReceiveIrq(pAC, &pAC->RxPort[1], SK_TRUE); + */ + + if (pAC->CheckQueue) { + pAC->CheckQueue = SK_FALSE; + spin_lock(&pAC->SlowPathLock); + SkEventDispatcher(pAC, pAC->IoBase); + spin_unlock(&pAC->SlowPathLock); + } + + /* IRQ is processed - Enable IRQs again*/ + SK_OUT32(pAC->IoBase, B0_IMSK, pAC->GIni.GIValIrqMask); + + return SkIsrRetHandled; +} /* SkGeIsr */ + + +/**************************************************************************** + * + * SkGeIsrOnePort - handle adapter interrupts for single port adapter + * + * Description: + * The interrupt routine is called when the network adapter + * generates an interrupt. It may also be called if another device + * shares this interrupt vector with the driver. + * This is the same as above, but handles only one port. + * + * Returns: N/A + * + */ +static SkIsrRetVar SkGeIsrOnePort(int irq, void *dev_id) +{ +struct SK_NET_DEVICE *dev = (struct SK_NET_DEVICE *)dev_id; +DEV_NET *pNet; +SK_AC *pAC; +SK_U32 IntSrc; /* interrupts source register contents */ + + pNet = netdev_priv(dev); + pAC = pNet->pAC; + + /* + * Check and process if its our interrupt + */ + SK_IN32(pAC->IoBase, B0_SP_ISRC, &IntSrc); + if (IntSrc == 0) { + return SkIsrRetNone; + } + + while (((IntSrc & IRQ_MASK) & ~SPECIAL_IRQS) != 0) { +#if 0 /* software irq currently not used */ + if (IntSrc & IS_IRQ_SW) { + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBGCAT_DRV_INT_SRC, + ("Software IRQ\n")); + } +#endif + if (IntSrc & IS_R1_F) { + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBGCAT_DRV_INT_SRC, + ("EOF RX1 IRQ\n")); + ReceiveIrq(pAC, &pAC->RxPort[0], SK_TRUE); + SK_PNMI_CNT_RX_INTR(pAC, 0); + } +#ifdef USE_TX_COMPLETE /* only if tx complete interrupt used */ + if (IntSrc & IS_XA1_F) { + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBGCAT_DRV_INT_SRC, + ("EOF AS TX1 IRQ\n")); + SK_PNMI_CNT_TX_INTR(pAC, 0); + spin_lock(&pAC->TxPort[0][TX_PRIO_LOW].TxDesRingLock); + FreeTxDescriptors(pAC, &pAC->TxPort[0][TX_PRIO_LOW]); + spin_unlock(&pAC->TxPort[0][TX_PRIO_LOW].TxDesRingLock); + } +#if 0 /* only if sync. queues used */ + if (IntSrc & IS_XS1_F) { + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBGCAT_DRV_INT_SRC, + ("EOF SY TX1 IRQ\n")); + SK_PNMI_CNT_TX_INTR(pAC, 0); + spin_lock(&pAC->TxPort[0][TX_PRIO_HIGH].TxDesRingLock); + FreeTxDescriptors(pAC, 0, TX_PRIO_HIGH); + spin_unlock(&pAC->TxPort[0][TX_PRIO_HIGH].TxDesRingLock); + ClearTxIrq(pAC, 0, TX_PRIO_HIGH); + } +#endif +#endif + + /* do all IO at once */ + if (IntSrc & IS_R1_F) + ClearAndStartRx(pAC, 0); +#ifdef USE_TX_COMPLETE /* only if tx complete interrupt used */ + if (IntSrc & IS_XA1_F) + ClearTxIrq(pAC, 0, TX_PRIO_LOW); +#endif + SK_IN32(pAC->IoBase, B0_ISRC, &IntSrc); + } /* while (IntSrc & IRQ_MASK != 0) */ + + IntSrc &= pAC->GIni.GIValIrqMask; + if ((IntSrc & SPECIAL_IRQS) || pAC->CheckQueue) { + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, + ("SPECIAL IRQ SP-Cards => %x\n", IntSrc)); + pAC->CheckQueue = SK_FALSE; + spin_lock(&pAC->SlowPathLock); + if (IntSrc & SPECIAL_IRQS) + SkGeSirqIsr(pAC, pAC->IoBase, IntSrc); + + SkEventDispatcher(pAC, pAC->IoBase); + spin_unlock(&pAC->SlowPathLock); + } + /* + * do it all again is case we cleared an interrupt that + * came in after handling the ring (OUTs may be delayed + * in hardware buffers, but are through after IN) + * + * rroesler: has been commented out and shifted to + * SkGeDrvEvent(), because it is timer + * guarded now + * + ReceiveIrq(pAC, &pAC->RxPort[0], SK_TRUE); + */ + + /* IRQ is processed - Enable IRQs again*/ + SK_OUT32(pAC->IoBase, B0_IMSK, pAC->GIni.GIValIrqMask); + + return SkIsrRetHandled; +} /* SkGeIsrOnePort */ + +#ifdef CONFIG_NET_POLL_CONTROLLER +/**************************************************************************** + * + * SkGePollController - polling receive, for netconsole + * + * Description: + * Polling receive - used by netconsole and other diagnostic tools + * to allow network i/o with interrupts disabled. + * + * Returns: N/A + */ +static void SkGePollController(struct net_device *dev) +{ + disable_irq(dev->irq); + SkGeIsr(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +/**************************************************************************** + * + * SkGeOpen - handle start of initialized adapter + * + * Description: + * This function starts the initialized adapter. + * The board level variable is set and the adapter is + * brought to full functionality. + * The device flags are set for operation. + * Do all necessary level 2 initialization, enable interrupts and + * give start command to RLMT. + * + * Returns: + * 0 on success + * != 0 on error + */ +static int SkGeOpen( +struct SK_NET_DEVICE *dev) +{ + DEV_NET *pNet; + SK_AC *pAC; + unsigned long Flags; /* for spin lock */ + int i; + SK_EVPARA EvPara; /* an event parameter union */ + + pNet = netdev_priv(dev); + pAC = pNet->pAC; + + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, + ("SkGeOpen: pAC=0x%lX:\n", (unsigned long)pAC)); + +#ifdef SK_DIAG_SUPPORT + if (pAC->DiagModeActive == DIAG_ACTIVE) { + if (pAC->Pnmi.DiagAttached == SK_DIAG_RUNNING) { + return (-1); /* still in use by diag; deny actions */ + } + } +#endif + + /* Set blink mode */ + if ((pAC->PciDev->vendor == 0x1186) || (pAC->PciDev->vendor == 0x11ab )) + pAC->GIni.GILedBlinkCtrl = OEM_CONFIG_VALUE; + + if (pAC->BoardLevel == SK_INIT_DATA) { + /* level 1 init common modules here */ + if (SkGeInit(pAC, pAC->IoBase, SK_INIT_IO) != 0) { + printk("%s: HWInit (1) failed.\n", pAC->dev[pNet->PortNr]->name); + return (-1); + } + SkI2cInit (pAC, pAC->IoBase, SK_INIT_IO); + SkEventInit (pAC, pAC->IoBase, SK_INIT_IO); + SkPnmiInit (pAC, pAC->IoBase, SK_INIT_IO); + SkAddrInit (pAC, pAC->IoBase, SK_INIT_IO); + SkRlmtInit (pAC, pAC->IoBase, SK_INIT_IO); + SkTimerInit (pAC, pAC->IoBase, SK_INIT_IO); + pAC->BoardLevel = SK_INIT_IO; + } + + if (pAC->BoardLevel != SK_INIT_RUN) { + /* tschilling: Level 2 init modules here, check return value. */ + if (SkGeInit(pAC, pAC->IoBase, SK_INIT_RUN) != 0) { + printk("%s: HWInit (2) failed.\n", pAC->dev[pNet->PortNr]->name); + return (-1); + } + SkI2cInit (pAC, pAC->IoBase, SK_INIT_RUN); + SkEventInit (pAC, pAC->IoBase, SK_INIT_RUN); + SkPnmiInit (pAC, pAC->IoBase, SK_INIT_RUN); + SkAddrInit (pAC, pAC->IoBase, SK_INIT_RUN); + SkRlmtInit (pAC, pAC->IoBase, SK_INIT_RUN); + SkTimerInit (pAC, pAC->IoBase, SK_INIT_RUN); + pAC->BoardLevel = SK_INIT_RUN; + } + + for (i=0; i<pAC->GIni.GIMacsFound; i++) { + /* Enable transmit descriptor polling. */ + SkGePollTxD(pAC, pAC->IoBase, i, SK_TRUE); + FillRxRing(pAC, &pAC->RxPort[i]); + } + SkGeYellowLED(pAC, pAC->IoBase, 1); + + StartDrvCleanupTimer(pAC); + SkDimEnableModerationIfNeeded(pAC); + SkDimDisplayModerationSettings(pAC); + + pAC->GIni.GIValIrqMask &= IRQ_MASK; + + /* enable Interrupts */ + SK_OUT32(pAC->IoBase, B0_IMSK, pAC->GIni.GIValIrqMask); + SK_OUT32(pAC->IoBase, B0_HWE_IMSK, IRQ_HWE_MASK); + + spin_lock_irqsave(&pAC->SlowPathLock, Flags); + + if ((pAC->RlmtMode != 0) && (pAC->MaxPorts == 0)) { + EvPara.Para32[0] = pAC->RlmtNets; + EvPara.Para32[1] = -1; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_SET_NETS, + EvPara); + EvPara.Para32[0] = pAC->RlmtMode; + EvPara.Para32[1] = 0; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_MODE_CHANGE, + EvPara); + } + + EvPara.Para32[0] = pNet->NetNr; + EvPara.Para32[1] = -1; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_START, EvPara); + SkEventDispatcher(pAC, pAC->IoBase); + spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); + + pAC->MaxPorts++; + + + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, + ("SkGeOpen suceeded\n")); + + return (0); +} /* SkGeOpen */ + + +/**************************************************************************** + * + * SkGeClose - Stop initialized adapter + * + * Description: + * Close initialized adapter. + * + * Returns: + * 0 - on success + * error code - on error + */ +static int SkGeClose( +struct SK_NET_DEVICE *dev) +{ + DEV_NET *pNet; + DEV_NET *newPtrNet; + SK_AC *pAC; + + unsigned long Flags; /* for spin lock */ + int i; + int PortIdx; + SK_EVPARA EvPara; + + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, + ("SkGeClose: pAC=0x%lX ", (unsigned long)pAC)); + + pNet = netdev_priv(dev); + pAC = pNet->pAC; + +#ifdef SK_DIAG_SUPPORT + if (pAC->DiagModeActive == DIAG_ACTIVE) { + if (pAC->DiagFlowCtrl == SK_FALSE) { + /* + ** notify that the interface which has been closed + ** by operator interaction must not be started up + ** again when the DIAG has finished. + */ + newPtrNet = netdev_priv(pAC->dev[0]); + if (newPtrNet == pNet) { + pAC->WasIfUp[0] = SK_FALSE; + } else { + pAC->WasIfUp[1] = SK_FALSE; + } + return 0; /* return to system everything is fine... */ + } else { + pAC->DiagFlowCtrl = SK_FALSE; + } + } +#endif + + netif_stop_queue(dev); + + if (pAC->RlmtNets == 1) + PortIdx = pAC->ActivePort; + else + PortIdx = pNet->NetNr; + + StopDrvCleanupTimer(pAC); + + /* + * Clear multicast table, promiscuous mode .... + */ + SkAddrMcClear(pAC, pAC->IoBase, PortIdx, 0); + SkAddrPromiscuousChange(pAC, pAC->IoBase, PortIdx, + SK_PROM_MODE_NONE); + + if (pAC->MaxPorts == 1) { + spin_lock_irqsave(&pAC->SlowPathLock, Flags); + /* disable interrupts */ + SK_OUT32(pAC->IoBase, B0_IMSK, 0); + EvPara.Para32[0] = pNet->NetNr; + EvPara.Para32[1] = -1; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara); + SkEventDispatcher(pAC, pAC->IoBase); + SK_OUT32(pAC->IoBase, B0_IMSK, 0); + /* stop the hardware */ + SkGeDeInit(pAC, pAC->IoBase); + pAC->BoardLevel = SK_INIT_DATA; + spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); + } else { + + spin_lock_irqsave(&pAC->SlowPathLock, Flags); + EvPara.Para32[0] = pNet->NetNr; + EvPara.Para32[1] = -1; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara); + SkPnmiEvent(pAC, pAC->IoBase, SK_PNMI_EVT_XMAC_RESET, EvPara); + SkEventDispatcher(pAC, pAC->IoBase); + spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); + + /* Stop port */ + spin_lock_irqsave(&pAC->TxPort[pNet->PortNr] + [TX_PRIO_LOW].TxDesRingLock, Flags); + SkGeStopPort(pAC, pAC->IoBase, pNet->PortNr, + SK_STOP_ALL, SK_HARD_RST); + spin_unlock_irqrestore(&pAC->TxPort[pNet->PortNr] + [TX_PRIO_LOW].TxDesRingLock, Flags); + } + + if (pAC->RlmtNets == 1) { + /* clear all descriptor rings */ + for (i=0; i<pAC->GIni.GIMacsFound; i++) { + ReceiveIrq(pAC, &pAC->RxPort[i], SK_TRUE); + ClearRxRing(pAC, &pAC->RxPort[i]); + ClearTxRing(pAC, &pAC->TxPort[i][TX_PRIO_LOW]); + } + } else { + /* clear port descriptor rings */ + ReceiveIrq(pAC, &pAC->RxPort[pNet->PortNr], SK_TRUE); + ClearRxRing(pAC, &pAC->RxPort[pNet->PortNr]); + ClearTxRing(pAC, &pAC->TxPort[pNet->PortNr][TX_PRIO_LOW]); + } + + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, + ("SkGeClose: done ")); + + SK_MEMSET(&(pAC->PnmiBackup), 0, sizeof(SK_PNMI_STRUCT_DATA)); + SK_MEMCPY(&(pAC->PnmiBackup), &(pAC->PnmiStruct), + sizeof(SK_PNMI_STRUCT_DATA)); + + pAC->MaxPorts--; + + return (0); +} /* SkGeClose */ + + +/***************************************************************************** + * + * SkGeXmit - Linux frame transmit function + * + * Description: + * The system calls this function to send frames onto the wire. + * It puts the frame in the tx descriptor ring. If the ring is + * full then, the 'tbusy' flag is set. + * + * Returns: + * 0, if everything is ok + * !=0, on error + * WARNING: returning 1 in 'tbusy' case caused system crashes (double + * allocated skb's) !!! + */ +static int SkGeXmit(struct sk_buff *skb, struct SK_NET_DEVICE *dev) +{ +DEV_NET *pNet; +SK_AC *pAC; +int Rc; /* return code of XmitFrame */ + + pNet = netdev_priv(dev); + pAC = pNet->pAC; + + if ((!skb_shinfo(skb)->nr_frags) || + (pAC->GIni.GIChipId == CHIP_ID_GENESIS)) { + /* Don't activate scatter-gather and hardware checksum */ + + if (pAC->RlmtNets == 2) + Rc = XmitFrame( + pAC, + &pAC->TxPort[pNet->PortNr][TX_PRIO_LOW], + skb); + else + Rc = XmitFrame( + pAC, + &pAC->TxPort[pAC->ActivePort][TX_PRIO_LOW], + skb); + } else { + /* scatter-gather and hardware TCP checksumming anabled*/ + if (pAC->RlmtNets == 2) + Rc = XmitFrameSG( + pAC, + &pAC->TxPort[pNet->PortNr][TX_PRIO_LOW], + skb); + else + Rc = XmitFrameSG( + pAC, + &pAC->TxPort[pAC->ActivePort][TX_PRIO_LOW], + skb); + } + + /* Transmitter out of resources? */ + if (Rc <= 0) { + netif_stop_queue(dev); + } + + /* If not taken, give buffer ownership back to the + * queueing layer. + */ + if (Rc < 0) + return (1); + + dev->trans_start = jiffies; + return (0); +} /* SkGeXmit */ + + +/***************************************************************************** + * + * XmitFrame - fill one socket buffer into the transmit ring + * + * Description: + * This function puts a message into the transmit descriptor ring + * if there is a descriptors left. + * Linux skb's consist of only one continuous buffer. + * The first step locks the ring. It is held locked + * all time to avoid problems with SWITCH_../PORT_RESET. + * Then the descriptoris allocated. + * The second part is linking the buffer to the descriptor. + * At the very last, the Control field of the descriptor + * is made valid for the BMU and a start TX command is given + * if necessary. + * + * Returns: + * > 0 - on succes: the number of bytes in the message + * = 0 - on resource shortage: this frame sent or dropped, now + * the ring is full ( -> set tbusy) + * < 0 - on failure: other problems ( -> return failure to upper layers) + */ +static int XmitFrame( +SK_AC *pAC, /* pointer to adapter context */ +TX_PORT *pTxPort, /* pointer to struct of port to send to */ +struct sk_buff *pMessage) /* pointer to send-message */ +{ + TXD *pTxd; /* the rxd to fill */ + TXD *pOldTxd; + unsigned long Flags; + SK_U64 PhysAddr; + int BytesSend = pMessage->len; + + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS, ("X")); + + spin_lock_irqsave(&pTxPort->TxDesRingLock, Flags); +#ifndef USE_TX_COMPLETE + FreeTxDescriptors(pAC, pTxPort); +#endif + if (pTxPort->TxdRingFree == 0) { + /* + ** no enough free descriptors in ring at the moment. + ** Maybe free'ing some old one help? + */ + FreeTxDescriptors(pAC, pTxPort); + if (pTxPort->TxdRingFree == 0) { + spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags); + SK_PNMI_CNT_NO_TX_BUF(pAC, pTxPort->PortIndex); + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBGCAT_DRV_TX_PROGRESS, + ("XmitFrame failed\n")); + /* + ** the desired message can not be sent + ** Because tbusy seems to be set, the message + ** should not be freed here. It will be used + ** by the scheduler of the ethernet handler + */ + return (-1); + } + } + + /* + ** If the passed socket buffer is of smaller MTU-size than 60, + ** copy everything into new buffer and fill all bytes between + ** the original packet end and the new packet end of 60 with 0x00. + ** This is to resolve faulty padding by the HW with 0xaa bytes. + */ + if (BytesSend < C_LEN_ETHERNET_MINSIZE) { + if (skb_padto(pMessage, C_LEN_ETHERNET_MINSIZE)) { + spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags); + return 0; + } + pMessage->len = C_LEN_ETHERNET_MINSIZE; + } + + /* + ** advance head counter behind descriptor needed for this frame, + ** so that needed descriptor is reserved from that on. The next + ** action will be to add the passed buffer to the TX-descriptor + */ + pTxd = pTxPort->pTxdRingHead; + pTxPort->pTxdRingHead = pTxd->pNextTxd; + pTxPort->TxdRingFree--; + +#ifdef SK_DUMP_TX + DumpMsg(pMessage, "XmitFrame"); +#endif + + /* + ** First step is to map the data to be sent via the adapter onto + ** the DMA memory. Kernel 2.2 uses virt_to_bus(), but kernels 2.4 + ** and 2.6 need to use pci_map_page() for that mapping. + */ + PhysAddr = (SK_U64) pci_map_page(pAC->PciDev, + virt_to_page(pMessage->data), + ((unsigned long) pMessage->data & ~PAGE_MASK), + pMessage->len, + PCI_DMA_TODEVICE); + pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff); + pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32); + pTxd->pMBuf = pMessage; + + if (pMessage->ip_summed == CHECKSUM_PARTIAL) { + u16 hdrlen = skb_transport_offset(pMessage); + u16 offset = hdrlen + pMessage->csum_offset; + + if ((ipip_hdr(pMessage)->protocol == IPPROTO_UDP) && + (pAC->GIni.GIChipRev == 0) && + (pAC->GIni.GIChipId == CHIP_ID_YUKON)) { + pTxd->TBControl = BMU_TCP_CHECK; + } else { + pTxd->TBControl = BMU_UDP_CHECK; + } + + pTxd->TcpSumOfs = 0; + pTxd->TcpSumSt = hdrlen; + pTxd->TcpSumWr = offset; + + pTxd->TBControl |= BMU_OWN | BMU_STF | + BMU_SW | BMU_EOF | +#ifdef USE_TX_COMPLETE + BMU_IRQ_EOF | +#endif + pMessage->len; + } else { + pTxd->TBControl = BMU_OWN | BMU_STF | BMU_CHECK | + BMU_SW | BMU_EOF | +#ifdef USE_TX_COMPLETE + BMU_IRQ_EOF | +#endif + pMessage->len; + } + + /* + ** If previous descriptor already done, give TX start cmd + */ + pOldTxd = xchg(&pTxPort->pTxdRingPrev, pTxd); + if ((pOldTxd->TBControl & BMU_OWN) == 0) { + SK_OUT8(pTxPort->HwAddr, Q_CSR, CSR_START); + } + + /* + ** after releasing the lock, the skb may immediately be free'd + */ + spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags); + if (pTxPort->TxdRingFree != 0) { + return (BytesSend); + } else { + return (0); + } + +} /* XmitFrame */ + +/***************************************************************************** + * + * XmitFrameSG - fill one socket buffer into the transmit ring + * (use SG and TCP/UDP hardware checksumming) + * + * Description: + * This function puts a message into the transmit descriptor ring + * if there is a descriptors left. + * + * Returns: + * > 0 - on succes: the number of bytes in the message + * = 0 - on resource shortage: this frame sent or dropped, now + * the ring is full ( -> set tbusy) + * < 0 - on failure: other problems ( -> return failure to upper layers) + */ +static int XmitFrameSG( +SK_AC *pAC, /* pointer to adapter context */ +TX_PORT *pTxPort, /* pointer to struct of port to send to */ +struct sk_buff *pMessage) /* pointer to send-message */ +{ + + TXD *pTxd; + TXD *pTxdFst; + TXD *pTxdLst; + int CurrFrag; + int BytesSend; + skb_frag_t *sk_frag; + SK_U64 PhysAddr; + unsigned long Flags; + SK_U32 Control; + + spin_lock_irqsave(&pTxPort->TxDesRingLock, Flags); +#ifndef USE_TX_COMPLETE + FreeTxDescriptors(pAC, pTxPort); +#endif + if ((skb_shinfo(pMessage)->nr_frags +1) > pTxPort->TxdRingFree) { + FreeTxDescriptors(pAC, pTxPort); + if ((skb_shinfo(pMessage)->nr_frags + 1) > pTxPort->TxdRingFree) { + spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags); + SK_PNMI_CNT_NO_TX_BUF(pAC, pTxPort->PortIndex); + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBGCAT_DRV_TX_PROGRESS, + ("XmitFrameSG failed - Ring full\n")); + /* this message can not be sent now */ + return(-1); + } + } + + pTxd = pTxPort->pTxdRingHead; + pTxdFst = pTxd; + pTxdLst = pTxd; + BytesSend = 0; + + /* + ** Map the first fragment (header) into the DMA-space + */ + PhysAddr = (SK_U64) pci_map_page(pAC->PciDev, + virt_to_page(pMessage->data), + ((unsigned long) pMessage->data & ~PAGE_MASK), + skb_headlen(pMessage), + PCI_DMA_TODEVICE); + + pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff); + pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32); + + /* + ** Does the HW need to evaluate checksum for TCP or UDP packets? + */ + if (pMessage->ip_summed == CHECKSUM_PARTIAL) { + u16 hdrlen = skb_transport_offset(pMessage); + u16 offset = hdrlen + pMessage->csum_offset; + + Control = BMU_STFWD; + + /* + ** We have to use the opcode for tcp here, because the + ** opcode for udp is not working in the hardware yet + ** (Revision 2.0) + */ + if ((ipip_hdr(pMessage)->protocol == IPPROTO_UDP) && + (pAC->GIni.GIChipRev == 0) && + (pAC->GIni.GIChipId == CHIP_ID_YUKON)) { + Control |= BMU_TCP_CHECK; + } else { + Control |= BMU_UDP_CHECK; + } + + pTxd->TcpSumOfs = 0; + pTxd->TcpSumSt = hdrlen; + pTxd->TcpSumWr = offset; + } else + Control = BMU_CHECK | BMU_SW; + + pTxd->TBControl = BMU_STF | Control | skb_headlen(pMessage); + + pTxd = pTxd->pNextTxd; + pTxPort->TxdRingFree--; + BytesSend += skb_headlen(pMessage); + + /* + ** Browse over all SG fragments and map each of them into the DMA space + */ + for (CurrFrag = 0; CurrFrag < skb_shinfo(pMessage)->nr_frags; CurrFrag++) { + sk_frag = &skb_shinfo(pMessage)->frags[CurrFrag]; + /* + ** we already have the proper value in entry + */ + PhysAddr = (SK_U64) pci_map_page(pAC->PciDev, + sk_frag->page, + sk_frag->page_offset, + sk_frag->size, + PCI_DMA_TODEVICE); + + pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff); + pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32); + pTxd->pMBuf = pMessage; + + pTxd->TBControl = Control | BMU_OWN | sk_frag->size; + + /* + ** Do we have the last fragment? + */ + if( (CurrFrag+1) == skb_shinfo(pMessage)->nr_frags ) { +#ifdef USE_TX_COMPLETE + pTxd->TBControl |= BMU_EOF | BMU_IRQ_EOF; +#else + pTxd->TBControl |= BMU_EOF; +#endif + pTxdFst->TBControl |= BMU_OWN | BMU_SW; + } + pTxdLst = pTxd; + pTxd = pTxd->pNextTxd; + pTxPort->TxdRingFree--; + BytesSend += sk_frag->size; + } + + /* + ** If previous descriptor already done, give TX start cmd + */ + if ((pTxPort->pTxdRingPrev->TBControl & BMU_OWN) == 0) { + SK_OUT8(pTxPort->HwAddr, Q_CSR, CSR_START); + } + + pTxPort->pTxdRingPrev = pTxdLst; + pTxPort->pTxdRingHead = pTxd; + + spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags); + + if (pTxPort->TxdRingFree > 0) { + return (BytesSend); + } else { + return (0); + } +} + +/***************************************************************************** + * + * FreeTxDescriptors - release descriptors from the descriptor ring + * + * Description: + * This function releases descriptors from a transmit ring if they + * have been sent by the BMU. + * If a descriptors is sent, it can be freed and the message can + * be freed, too. + * The SOFTWARE controllable bit is used to prevent running around a + * completely free ring for ever. If this bit is no set in the + * frame (by XmitFrame), this frame has never been sent or is + * already freed. + * The Tx descriptor ring lock must be held while calling this function !!! + * + * Returns: + * none + */ +static void FreeTxDescriptors( +SK_AC *pAC, /* pointer to the adapter context */ +TX_PORT *pTxPort) /* pointer to destination port structure */ +{ +TXD *pTxd; /* pointer to the checked descriptor */ +TXD *pNewTail; /* pointer to 'end' of the ring */ +SK_U32 Control; /* TBControl field of descriptor */ +SK_U64 PhysAddr; /* address of DMA mapping */ + + pNewTail = pTxPort->pTxdRingTail; + pTxd = pNewTail; + /* + ** loop forever; exits if BMU_SW bit not set in start frame + ** or BMU_OWN bit set in any frame + */ + while (1) { + Control = pTxd->TBControl; + if ((Control & BMU_SW) == 0) { + /* + ** software controllable bit is set in first + ** fragment when given to BMU. Not set means that + ** this fragment was never sent or is already + ** freed ( -> ring completely free now). + */ + pTxPort->pTxdRingTail = pTxd; + netif_wake_queue(pAC->dev[pTxPort->PortIndex]); + return; + } + if (Control & BMU_OWN) { + pTxPort->pTxdRingTail = pTxd; + if (pTxPort->TxdRingFree > 0) { + netif_wake_queue(pAC->dev[pTxPort->PortIndex]); + } + return; + } + + /* + ** release the DMA mapping, because until not unmapped + ** this buffer is considered being under control of the + ** adapter card! + */ + PhysAddr = ((SK_U64) pTxd->VDataHigh) << (SK_U64) 32; + PhysAddr |= (SK_U64) pTxd->VDataLow; + pci_unmap_page(pAC->PciDev, PhysAddr, + pTxd->pMBuf->len, + PCI_DMA_TODEVICE); + + if (Control & BMU_EOF) + DEV_KFREE_SKB_ANY(pTxd->pMBuf); /* free message */ + + pTxPort->TxdRingFree++; + pTxd->TBControl &= ~BMU_SW; + pTxd = pTxd->pNextTxd; /* point behind fragment with EOF */ + } /* while(forever) */ +} /* FreeTxDescriptors */ + +/***************************************************************************** + * + * FillRxRing - fill the receive ring with valid descriptors + * + * Description: + * This function fills the receive ring descriptors with data + * segments and makes them valid for the BMU. + * The active ring is filled completely, if possible. + * The non-active ring is filled only partial to save memory. + * + * Description of rx ring structure: + * head - points to the descriptor which will be used next by the BMU + * tail - points to the next descriptor to give to the BMU + * + * Returns: N/A + */ +static void FillRxRing( +SK_AC *pAC, /* pointer to the adapter context */ +RX_PORT *pRxPort) /* ptr to port struct for which the ring + should be filled */ +{ +unsigned long Flags; + + spin_lock_irqsave(&pRxPort->RxDesRingLock, Flags); + while (pRxPort->RxdRingFree > pRxPort->RxFillLimit) { + if(!FillRxDescriptor(pAC, pRxPort)) + break; + } + spin_unlock_irqrestore(&pRxPort->RxDesRingLock, Flags); +} /* FillRxRing */ + + +/***************************************************************************** + * + * FillRxDescriptor - fill one buffer into the receive ring + * + * Description: + * The function allocates a new receive buffer and + * puts it into the next descriptor. + * + * Returns: + * SK_TRUE - a buffer was added to the ring + * SK_FALSE - a buffer could not be added + */ +static SK_BOOL FillRxDescriptor( +SK_AC *pAC, /* pointer to the adapter context struct */ +RX_PORT *pRxPort) /* ptr to port struct of ring to fill */ +{ +struct sk_buff *pMsgBlock; /* pointer to a new message block */ +RXD *pRxd; /* the rxd to fill */ +SK_U16 Length; /* data fragment length */ +SK_U64 PhysAddr; /* physical address of a rx buffer */ + + pMsgBlock = alloc_skb(pAC->RxBufSize, GFP_ATOMIC); + if (pMsgBlock == NULL) { + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBGCAT_DRV_ENTRY, + ("%s: Allocation of rx buffer failed !\n", + pAC->dev[pRxPort->PortIndex]->name)); + SK_PNMI_CNT_NO_RX_BUF(pAC, pRxPort->PortIndex); + return(SK_FALSE); + } + skb_reserve(pMsgBlock, 2); /* to align IP frames */ + /* skb allocated ok, so add buffer */ + pRxd = pRxPort->pRxdRingTail; + pRxPort->pRxdRingTail = pRxd->pNextRxd; + pRxPort->RxdRingFree--; + Length = pAC->RxBufSize; + PhysAddr = (SK_U64) pci_map_page(pAC->PciDev, + virt_to_page(pMsgBlock->data), + ((unsigned long) pMsgBlock->data & + ~PAGE_MASK), + pAC->RxBufSize - 2, + PCI_DMA_FROMDEVICE); + + pRxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff); + pRxd->VDataHigh = (SK_U32) (PhysAddr >> 32); + pRxd->pMBuf = pMsgBlock; + pRxd->RBControl = BMU_OWN | + BMU_STF | + BMU_IRQ_EOF | + BMU_TCP_CHECK | + Length; + return (SK_TRUE); + +} /* FillRxDescriptor */ + + +/***************************************************************************** + * + * ReQueueRxBuffer - fill one buffer back into the receive ring + * + * Description: + * Fill a given buffer back into the rx ring. The buffer + * has been previously allocated and aligned, and its phys. + * address calculated, so this is no more necessary. + * + * Returns: N/A + */ +static void ReQueueRxBuffer( +SK_AC *pAC, /* pointer to the adapter context struct */ +RX_PORT *pRxPort, /* ptr to port struct of ring to fill */ +struct sk_buff *pMsg, /* pointer to the buffer */ +SK_U32 PhysHigh, /* phys address high dword */ +SK_U32 PhysLow) /* phys address low dword */ +{ +RXD *pRxd; /* the rxd to fill */ +SK_U16 Length; /* data fragment length */ + + pRxd = pRxPort->pRxdRingTail; + pRxPort->pRxdRingTail = pRxd->pNextRxd; + pRxPort->RxdRingFree--; + Length = pAC->RxBufSize; + + pRxd->VDataLow = PhysLow; + pRxd->VDataHigh = PhysHigh; + pRxd->pMBuf = pMsg; + pRxd->RBControl = BMU_OWN | + BMU_STF | + BMU_IRQ_EOF | + BMU_TCP_CHECK | + Length; + return; +} /* ReQueueRxBuffer */ + +/***************************************************************************** + * + * ReceiveIrq - handle a receive IRQ + * + * Description: + * This function is called when a receive IRQ is set. + * It walks the receive descriptor ring and sends up all + * frames that are complete. + * + * Returns: N/A + */ +static void ReceiveIrq( + SK_AC *pAC, /* pointer to adapter context */ + RX_PORT *pRxPort, /* pointer to receive port struct */ + SK_BOOL SlowPathLock) /* indicates if SlowPathLock is needed */ +{ +RXD *pRxd; /* pointer to receive descriptors */ +SK_U32 Control; /* control field of descriptor */ +struct sk_buff *pMsg; /* pointer to message holding frame */ +struct sk_buff *pNewMsg; /* pointer to a new message for copying frame */ +int FrameLength; /* total length of received frame */ +SK_MBUF *pRlmtMbuf; /* ptr to a buffer for giving a frame to rlmt */ +SK_EVPARA EvPara; /* an event parameter union */ +unsigned long Flags; /* for spin lock */ +int PortIndex = pRxPort->PortIndex; +unsigned int Offset; +unsigned int NumBytes; +unsigned int ForRlmt; +SK_BOOL IsBc; +SK_BOOL IsMc; +SK_BOOL IsBadFrame; /* Bad frame */ + +SK_U32 FrameStat; +SK_U64 PhysAddr; + +rx_start: + /* do forever; exit if BMU_OWN found */ + for ( pRxd = pRxPort->pRxdRingHead ; + pRxPort->RxdRingFree < pAC->RxDescrPerRing ; + pRxd = pRxd->pNextRxd, + pRxPort->pRxdRingHead = pRxd, + pRxPort->RxdRingFree ++) { + + /* + * For a better understanding of this loop + * Go through every descriptor beginning at the head + * Please note: the ring might be completely received so the OWN bit + * set is not a good crirteria to leave that loop. + * Therefore the RingFree counter is used. + * On entry of this loop pRxd is a pointer to the Rxd that needs + * to be checked next. + */ + + Control = pRxd->RBControl; + + /* check if this descriptor is ready */ + if ((Control & BMU_OWN) != 0) { + /* this descriptor is not yet ready */ + /* This is the usual end of the loop */ + /* We don't need to start the ring again */ + FillRxRing(pAC, pRxPort); + return; + } + pAC->DynIrqModInfo.NbrProcessedDescr++; + + /* get length of frame and check it */ + FrameLength = Control & BMU_BBC; + if (FrameLength > pAC->RxBufSize) { + goto rx_failed; + } + + /* check for STF and EOF */ + if ((Control & (BMU_STF | BMU_EOF)) != (BMU_STF | BMU_EOF)) { + goto rx_failed; + } + + /* here we have a complete frame in the ring */ + pMsg = pRxd->pMBuf; + + FrameStat = pRxd->FrameStat; + + /* check for frame length mismatch */ +#define XMR_FS_LEN_SHIFT 18 +#define GMR_FS_LEN_SHIFT 16 + if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) { + if (FrameLength != (SK_U32) (FrameStat >> XMR_FS_LEN_SHIFT)) { + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBGCAT_DRV_RX_PROGRESS, + ("skge: Frame length mismatch (%u/%u).\n", + FrameLength, + (SK_U32) (FrameStat >> XMR_FS_LEN_SHIFT))); + goto rx_failed; + } + } + else { + if (FrameLength != (SK_U32) (FrameStat >> GMR_FS_LEN_SHIFT)) { + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBGCAT_DRV_RX_PROGRESS, + ("skge: Frame length mismatch (%u/%u).\n", + FrameLength, + (SK_U32) (FrameStat >> XMR_FS_LEN_SHIFT))); + goto rx_failed; + } + } + + /* Set Rx Status */ + if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) { + IsBc = (FrameStat & XMR_FS_BC) != 0; + IsMc = (FrameStat & XMR_FS_MC) != 0; + IsBadFrame = (FrameStat & + (XMR_FS_ANY_ERR | XMR_FS_2L_VLAN)) != 0; + } else { + IsBc = (FrameStat & GMR_FS_BC) != 0; + IsMc = (FrameStat & GMR_FS_MC) != 0; + IsBadFrame = (((FrameStat & GMR_FS_ANY_ERR) != 0) || + ((FrameStat & GMR_FS_RX_OK) == 0)); + } + + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, 0, + ("Received frame of length %d on port %d\n", + FrameLength, PortIndex)); + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, 0, + ("Number of free rx descriptors: %d\n", + pRxPort->RxdRingFree)); +/* DumpMsg(pMsg, "Rx"); */ + + if ((Control & BMU_STAT_VAL) != BMU_STAT_VAL || (IsBadFrame)) { +#if 0 + (FrameStat & (XMR_FS_ANY_ERR | XMR_FS_2L_VLAN)) != 0) { +#endif + /* there is a receive error in this frame */ + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBGCAT_DRV_RX_PROGRESS, + ("skge: Error in received frame, dropped!\n" + "Control: %x\nRxStat: %x\n", + Control, FrameStat)); + + ReQueueRxBuffer(pAC, pRxPort, pMsg, + pRxd->VDataHigh, pRxd->VDataLow); + + continue; + } + + /* + * if short frame then copy data to reduce memory waste + */ + if ((FrameLength < SK_COPY_THRESHOLD) && + ((pNewMsg = alloc_skb(FrameLength+2, GFP_ATOMIC)) != NULL)) { + /* + * Short frame detected and allocation successfull + */ + /* use new skb and copy data */ + skb_reserve(pNewMsg, 2); + skb_put(pNewMsg, FrameLength); + PhysAddr = ((SK_U64) pRxd->VDataHigh) << (SK_U64)32; + PhysAddr |= (SK_U64) pRxd->VDataLow; + + pci_dma_sync_single_for_cpu(pAC->PciDev, + (dma_addr_t) PhysAddr, + FrameLength, + PCI_DMA_FROMDEVICE); + skb_copy_to_linear_data(pNewMsg, pMsg, FrameLength); + + pci_dma_sync_single_for_device(pAC->PciDev, + (dma_addr_t) PhysAddr, + FrameLength, + PCI_DMA_FROMDEVICE); + ReQueueRxBuffer(pAC, pRxPort, pMsg, + pRxd->VDataHigh, pRxd->VDataLow); + + pMsg = pNewMsg; + + } + else { + /* + * if large frame, or SKB allocation failed, pass + * the SKB directly to the networking + */ + + PhysAddr = ((SK_U64) pRxd->VDataHigh) << (SK_U64)32; + PhysAddr |= (SK_U64) pRxd->VDataLow; + + /* release the DMA mapping */ + pci_unmap_single(pAC->PciDev, + PhysAddr, + pAC->RxBufSize - 2, + PCI_DMA_FROMDEVICE); + + /* set length in message */ + skb_put(pMsg, FrameLength); + } /* frame > SK_COPY_TRESHOLD */ + +#ifdef USE_SK_RX_CHECKSUM + pMsg->csum = pRxd->TcpSums & 0xffff; + pMsg->ip_summed = CHECKSUM_COMPLETE; +#else + pMsg->ip_summed = CHECKSUM_NONE; +#endif + + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, 1,("V")); + ForRlmt = SK_RLMT_RX_PROTOCOL; +#if 0 + IsBc = (FrameStat & XMR_FS_BC)==XMR_FS_BC; +#endif + SK_RLMT_PRE_LOOKAHEAD(pAC, PortIndex, FrameLength, + IsBc, &Offset, &NumBytes); + if (NumBytes != 0) { +#if 0 + IsMc = (FrameStat & XMR_FS_MC)==XMR_FS_MC; +#endif + SK_RLMT_LOOKAHEAD(pAC, PortIndex, + &pMsg->data[Offset], + IsBc, IsMc, &ForRlmt); + } + if (ForRlmt == SK_RLMT_RX_PROTOCOL) { + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, 1,("W")); + /* send up only frames from active port */ + if ((PortIndex == pAC->ActivePort) || + (pAC->RlmtNets == 2)) { + /* frame for upper layer */ + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, 1,("U")); +#ifdef xDEBUG + DumpMsg(pMsg, "Rx"); +#endif + SK_PNMI_CNT_RX_OCTETS_DELIVERED(pAC, + FrameLength, pRxPort->PortIndex); + + pMsg->protocol = eth_type_trans(pMsg, + pAC->dev[pRxPort->PortIndex]); + netif_rx(pMsg); + pAC->dev[pRxPort->PortIndex]->last_rx = jiffies; + } + else { + /* drop frame */ + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBGCAT_DRV_RX_PROGRESS, + ("D")); + DEV_KFREE_SKB(pMsg); + } + + } /* if not for rlmt */ + else { + /* packet for rlmt */ + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBGCAT_DRV_RX_PROGRESS, ("R")); + pRlmtMbuf = SkDrvAllocRlmtMbuf(pAC, + pAC->IoBase, FrameLength); + if (pRlmtMbuf != NULL) { + pRlmtMbuf->pNext = NULL; + pRlmtMbuf->Length = FrameLength; + pRlmtMbuf->PortIdx = PortIndex; + EvPara.pParaPtr = pRlmtMbuf; + memcpy((char*)(pRlmtMbuf->pData), + (char*)(pMsg->data), + FrameLength); + + /* SlowPathLock needed? */ + if (SlowPathLock == SK_TRUE) { + spin_lock_irqsave(&pAC->SlowPathLock, Flags); + SkEventQueue(pAC, SKGE_RLMT, + SK_RLMT_PACKET_RECEIVED, + EvPara); + pAC->CheckQueue = SK_TRUE; + spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); + } else { + SkEventQueue(pAC, SKGE_RLMT, + SK_RLMT_PACKET_RECEIVED, + EvPara); + pAC->CheckQueue = SK_TRUE; + } + + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBGCAT_DRV_RX_PROGRESS, + ("Q")); + } + if ((pAC->dev[pRxPort->PortIndex]->flags & + (IFF_PROMISC | IFF_ALLMULTI)) != 0 || + (ForRlmt & SK_RLMT_RX_PROTOCOL) == + SK_RLMT_RX_PROTOCOL) { + pMsg->protocol = eth_type_trans(pMsg, + pAC->dev[pRxPort->PortIndex]); + netif_rx(pMsg); + pAC->dev[pRxPort->PortIndex]->last_rx = jiffies; + } + else { + DEV_KFREE_SKB(pMsg); + } + + } /* if packet for rlmt */ + } /* for ... scanning the RXD ring */ + + /* RXD ring is empty -> fill and restart */ + FillRxRing(pAC, pRxPort); + /* do not start if called from Close */ + if (pAC->BoardLevel > SK_INIT_DATA) { + ClearAndStartRx(pAC, PortIndex); + } + return; + +rx_failed: + /* remove error frame */ + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ERROR, + ("Schrottdescriptor, length: 0x%x\n", FrameLength)); + + /* release the DMA mapping */ + + PhysAddr = ((SK_U64) pRxd->VDataHigh) << (SK_U64)32; + PhysAddr |= (SK_U64) pRxd->VDataLow; + pci_unmap_page(pAC->PciDev, + PhysAddr, + pAC->RxBufSize - 2, + PCI_DMA_FROMDEVICE); + DEV_KFREE_SKB_IRQ(pRxd->pMBuf); + pRxd->pMBuf = NULL; + pRxPort->RxdRingFree++; + pRxPort->pRxdRingHead = pRxd->pNextRxd; + goto rx_start; + +} /* ReceiveIrq */ + + +/***************************************************************************** + * + * ClearAndStartRx - give a start receive command to BMU, clear IRQ + * + * Description: + * This function sends a start command and a clear interrupt + * command for one receive queue to the BMU. + * + * Returns: N/A + * none + */ +static void ClearAndStartRx( +SK_AC *pAC, /* pointer to the adapter context */ +int PortIndex) /* index of the receive port (XMAC) */ +{ + SK_OUT8(pAC->IoBase, + RxQueueAddr[PortIndex]+Q_CSR, + CSR_START | CSR_IRQ_CL_F); +} /* ClearAndStartRx */ + + +/***************************************************************************** + * + * ClearTxIrq - give a clear transmit IRQ command to BMU + * + * Description: + * This function sends a clear tx IRQ command for one + * transmit queue to the BMU. + * + * Returns: N/A + */ +static void ClearTxIrq( +SK_AC *pAC, /* pointer to the adapter context */ +int PortIndex, /* index of the transmit port (XMAC) */ +int Prio) /* priority or normal queue */ +{ + SK_OUT8(pAC->IoBase, + TxQueueAddr[PortIndex][Prio]+Q_CSR, + CSR_IRQ_CL_F); +} /* ClearTxIrq */ + + +/***************************************************************************** + * + * ClearRxRing - remove all buffers from the receive ring + * + * Description: + * This function removes all receive buffers from the ring. + * The receive BMU must be stopped before calling this function. + * + * Returns: N/A + */ +static void ClearRxRing( +SK_AC *pAC, /* pointer to adapter context */ +RX_PORT *pRxPort) /* pointer to rx port struct */ +{ +RXD *pRxd; /* pointer to the current descriptor */ +unsigned long Flags; +SK_U64 PhysAddr; + + if (pRxPort->RxdRingFree == pAC->RxDescrPerRing) { + return; + } + spin_lock_irqsave(&pRxPort->RxDesRingLock, Flags); + pRxd = pRxPort->pRxdRingHead; + do { + if (pRxd->pMBuf != NULL) { + + PhysAddr = ((SK_U64) pRxd->VDataHigh) << (SK_U64)32; + PhysAddr |= (SK_U64) pRxd->VDataLow; + pci_unmap_page(pAC->PciDev, + PhysAddr, + pAC->RxBufSize - 2, + PCI_DMA_FROMDEVICE); + DEV_KFREE_SKB(pRxd->pMBuf); + pRxd->pMBuf = NULL; + } + pRxd->RBControl &= BMU_OWN; + pRxd = pRxd->pNextRxd; + pRxPort->RxdRingFree++; + } while (pRxd != pRxPort->pRxdRingTail); + pRxPort->pRxdRingTail = pRxPort->pRxdRingHead; + spin_unlock_irqrestore(&pRxPort->RxDesRingLock, Flags); +} /* ClearRxRing */ + +/***************************************************************************** + * + * ClearTxRing - remove all buffers from the transmit ring + * + * Description: + * This function removes all transmit buffers from the ring. + * The transmit BMU must be stopped before calling this function + * and transmitting at the upper level must be disabled. + * The BMU own bit of all descriptors is cleared, the rest is + * done by calling FreeTxDescriptors. + * + * Returns: N/A + */ +static void ClearTxRing( +SK_AC *pAC, /* pointer to adapter context */ +TX_PORT *pTxPort) /* pointer to tx prt struct */ +{ +TXD *pTxd; /* pointer to the current descriptor */ +int i; +unsigned long Flags; + + spin_lock_irqsave(&pTxPort->TxDesRingLock, Flags); + pTxd = pTxPort->pTxdRingHead; + for (i=0; i<pAC->TxDescrPerRing; i++) { + pTxd->TBControl &= ~BMU_OWN; + pTxd = pTxd->pNextTxd; + } + FreeTxDescriptors(pAC, pTxPort); + spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags); +} /* ClearTxRing */ + +/***************************************************************************** + * + * SkGeSetMacAddr - Set the hardware MAC address + * + * Description: + * This function sets the MAC address used by the adapter. + * + * Returns: + * 0, if everything is ok + * !=0, on error + */ +static int SkGeSetMacAddr(struct SK_NET_DEVICE *dev, void *p) +{ + +DEV_NET *pNet = netdev_priv(dev); +SK_AC *pAC = pNet->pAC; + +struct sockaddr *addr = p; +unsigned long Flags; + + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, + ("SkGeSetMacAddr starts now...\n")); + if(netif_running(dev)) + return -EBUSY; + + memcpy(dev->dev_addr, addr->sa_data,dev->addr_len); + + spin_lock_irqsave(&pAC->SlowPathLock, Flags); + + if (pAC->RlmtNets == 2) + SkAddrOverride(pAC, pAC->IoBase, pNet->NetNr, + (SK_MAC_ADDR*)dev->dev_addr, SK_ADDR_VIRTUAL_ADDRESS); + else + SkAddrOverride(pAC, pAC->IoBase, pAC->ActivePort, + (SK_MAC_ADDR*)dev->dev_addr, SK_ADDR_VIRTUAL_ADDRESS); + + + + spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); + return 0; +} /* SkGeSetMacAddr */ + + +/***************************************************************************** + * + * SkGeSetRxMode - set receive mode + * + * Description: + * This function sets the receive mode of an adapter. The adapter + * supports promiscuous mode, allmulticast mode and a number of + * multicast addresses. If more multicast addresses the available + * are selected, a hash function in the hardware is used. + * + * Returns: + * 0, if everything is ok + * !=0, on error + */ +static void SkGeSetRxMode(struct SK_NET_DEVICE *dev) +{ + +DEV_NET *pNet; +SK_AC *pAC; + +struct dev_mc_list *pMcList; +int i; +int PortIdx; +unsigned long Flags; + + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, + ("SkGeSetRxMode starts now... ")); + + pNet = netdev_priv(dev); + pAC = pNet->pAC; + if (pAC->RlmtNets == 1) + PortIdx = pAC->ActivePort; + else + PortIdx = pNet->NetNr; + + spin_lock_irqsave(&pAC->SlowPathLock, Flags); + if (dev->flags & IFF_PROMISC) { + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, + ("PROMISCUOUS mode\n")); + SkAddrPromiscuousChange(pAC, pAC->IoBase, PortIdx, + SK_PROM_MODE_LLC); + } else if (dev->flags & IFF_ALLMULTI) { + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, + ("ALLMULTI mode\n")); + SkAddrPromiscuousChange(pAC, pAC->IoBase, PortIdx, + SK_PROM_MODE_ALL_MC); + } else { + SkAddrPromiscuousChange(pAC, pAC->IoBase, PortIdx, + SK_PROM_MODE_NONE); + SkAddrMcClear(pAC, pAC->IoBase, PortIdx, 0); + + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, + ("Number of MC entries: %d ", dev->mc_count)); + + pMcList = dev->mc_list; + for (i=0; i<dev->mc_count; i++, pMcList = pMcList->next) { + SkAddrMcAdd(pAC, pAC->IoBase, PortIdx, + (SK_MAC_ADDR*)pMcList->dmi_addr, 0); + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_MCA, + ("%02x:%02x:%02x:%02x:%02x:%02x\n", + pMcList->dmi_addr[0], + pMcList->dmi_addr[1], + pMcList->dmi_addr[2], + pMcList->dmi_addr[3], + pMcList->dmi_addr[4], + pMcList->dmi_addr[5])); + } + SkAddrMcUpdate(pAC, pAC->IoBase, PortIdx); + } + spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); + + return; +} /* SkGeSetRxMode */ + + +/***************************************************************************** + * + * SkGeChangeMtu - set the MTU to another value + * + * Description: + * This function sets is called whenever the MTU size is changed + * (ifconfig mtu xxx dev ethX). If the MTU is bigger than standard + * ethernet MTU size, long frame support is activated. + * + * Returns: + * 0, if everything is ok + * !=0, on error + */ +static int SkGeChangeMtu(struct SK_NET_DEVICE *dev, int NewMtu) +{ +DEV_NET *pNet; +struct net_device *pOtherDev; +SK_AC *pAC; +unsigned long Flags; +int i; +SK_EVPARA EvPara; + + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, + ("SkGeChangeMtu starts now...\n")); + + pNet = netdev_priv(dev); + pAC = pNet->pAC; + + if ((NewMtu < 68) || (NewMtu > SK_JUMBO_MTU)) { + return -EINVAL; + } + + if(pAC->BoardLevel != SK_INIT_RUN) { + return -EINVAL; + } + +#ifdef SK_DIAG_SUPPORT + if (pAC->DiagModeActive == DIAG_ACTIVE) { + if (pAC->DiagFlowCtrl == SK_FALSE) { + return -1; /* still in use, deny any actions of MTU */ + } else { + pAC->DiagFlowCtrl = SK_FALSE; + } + } +#endif + + pOtherDev = pAC->dev[1 - pNet->NetNr]; + + if ( netif_running(pOtherDev) && (pOtherDev->mtu > 1500) + && (NewMtu <= 1500)) + return 0; + + pAC->RxBufSize = NewMtu + 32; + dev->mtu = NewMtu; + + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, + ("New MTU: %d\n", NewMtu)); + + /* + ** Prevent any reconfiguration while changing the MTU + ** by disabling any interrupts + */ + SK_OUT32(pAC->IoBase, B0_IMSK, 0); + spin_lock_irqsave(&pAC->SlowPathLock, Flags); + + /* + ** Notify RLMT that any ports are to be stopped + */ + EvPara.Para32[0] = 0; + EvPara.Para32[1] = -1; + if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) { + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara); + EvPara.Para32[0] = 1; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara); + } else { + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara); + } + + /* + ** After calling the SkEventDispatcher(), RLMT is aware about + ** the stopped ports -> configuration can take place! + */ + SkEventDispatcher(pAC, pAC->IoBase); + + for (i=0; i<pAC->GIni.GIMacsFound; i++) { + spin_lock(&pAC->TxPort[i][TX_PRIO_LOW].TxDesRingLock); + netif_stop_queue(pAC->dev[i]); + + } + + /* + ** Depending on the desired MTU size change, a different number of + ** RX buffers need to be allocated + */ + if (NewMtu > 1500) { + /* + ** Use less rx buffers + */ + for (i=0; i<pAC->GIni.GIMacsFound; i++) { + if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) { + pAC->RxPort[i].RxFillLimit = pAC->RxDescrPerRing - + (pAC->RxDescrPerRing / 4); + } else { + if (i == pAC->ActivePort) { + pAC->RxPort[i].RxFillLimit = pAC->RxDescrPerRing - + (pAC->RxDescrPerRing / 4); + } else { + pAC->RxPort[i].RxFillLimit = pAC->RxDescrPerRing - + (pAC->RxDescrPerRing / 10); + } + } + } + } else { + /* + ** Use the normal amount of rx buffers + */ + for (i=0; i<pAC->GIni.GIMacsFound; i++) { + if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) { + pAC->RxPort[i].RxFillLimit = 1; + } else { + if (i == pAC->ActivePort) { + pAC->RxPort[i].RxFillLimit = 1; + } else { + pAC->RxPort[i].RxFillLimit = pAC->RxDescrPerRing - + (pAC->RxDescrPerRing / 4); + } + } + } + } + + SkGeDeInit(pAC, pAC->IoBase); + + /* + ** enable/disable hardware support for long frames + */ + if (NewMtu > 1500) { +// pAC->JumboActivated = SK_TRUE; /* is never set back !!! */ + pAC->GIni.GIPortUsage = SK_JUMBO_LINK; + } else { + if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) { + pAC->GIni.GIPortUsage = SK_MUL_LINK; + } else { + pAC->GIni.GIPortUsage = SK_RED_LINK; + } + } + + SkGeInit( pAC, pAC->IoBase, SK_INIT_IO); + SkI2cInit( pAC, pAC->IoBase, SK_INIT_IO); + SkEventInit(pAC, pAC->IoBase, SK_INIT_IO); + SkPnmiInit( pAC, pAC->IoBase, SK_INIT_IO); + SkAddrInit( pAC, pAC->IoBase, SK_INIT_IO); + SkRlmtInit( pAC, pAC->IoBase, SK_INIT_IO); + SkTimerInit(pAC, pAC->IoBase, SK_INIT_IO); + + /* + ** tschilling: + ** Speed and others are set back to default in level 1 init! + */ + GetConfiguration(pAC); + + SkGeInit( pAC, pAC->IoBase, SK_INIT_RUN); + SkI2cInit( pAC, pAC->IoBase, SK_INIT_RUN); + SkEventInit(pAC, pAC->IoBase, SK_INIT_RUN); + SkPnmiInit( pAC, pAC->IoBase, SK_INIT_RUN); + SkAddrInit( pAC, pAC->IoBase, SK_INIT_RUN); + SkRlmtInit( pAC, pAC->IoBase, SK_INIT_RUN); + SkTimerInit(pAC, pAC->IoBase, SK_INIT_RUN); + + /* + ** clear and reinit the rx rings here + */ + for (i=0; i<pAC->GIni.GIMacsFound; i++) { + ReceiveIrq(pAC, &pAC->RxPort[i], SK_TRUE); + ClearRxRing(pAC, &pAC->RxPort[i]); + FillRxRing(pAC, &pAC->RxPort[i]); + + /* + ** Enable transmit descriptor polling + */ + SkGePollTxD(pAC, pAC->IoBase, i, SK_TRUE); + FillRxRing(pAC, &pAC->RxPort[i]); + }; + + SkGeYellowLED(pAC, pAC->IoBase, 1); + SkDimEnableModerationIfNeeded(pAC); + SkDimDisplayModerationSettings(pAC); + + netif_start_queue(pAC->dev[pNet->PortNr]); + for (i=pAC->GIni.GIMacsFound-1; i>=0; i--) { + spin_unlock(&pAC->TxPort[i][TX_PRIO_LOW].TxDesRingLock); + } + + /* + ** Enable Interrupts again + */ + SK_OUT32(pAC->IoBase, B0_IMSK, pAC->GIni.GIValIrqMask); + SK_OUT32(pAC->IoBase, B0_HWE_IMSK, IRQ_HWE_MASK); + + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_START, EvPara); + SkEventDispatcher(pAC, pAC->IoBase); + + /* + ** Notify RLMT about the changing and restarting one (or more) ports + */ + if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) { + EvPara.Para32[0] = pAC->RlmtNets; + EvPara.Para32[1] = -1; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_SET_NETS, EvPara); + EvPara.Para32[0] = pNet->PortNr; + EvPara.Para32[1] = -1; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_START, EvPara); + + if (netif_running(pOtherDev)) { + DEV_NET *pOtherNet = netdev_priv(pOtherDev); + EvPara.Para32[0] = pOtherNet->PortNr; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_START, EvPara); + } + } else { + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_START, EvPara); + } + + SkEventDispatcher(pAC, pAC->IoBase); + spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); + + /* + ** While testing this driver with latest kernel 2.5 (2.5.70), it + ** seems as if upper layers have a problem to handle a successful + ** return value of '0'. If such a zero is returned, the complete + ** system hangs for several minutes (!), which is in acceptable. + ** + ** Currently it is not clear, what the exact reason for this problem + ** is. The implemented workaround for 2.5 is to return the desired + ** new MTU size if all needed changes for the new MTU size where + ** performed. In kernels 2.2 and 2.4, a zero value is returned, + ** which indicates the successful change of the mtu-size. + */ + return NewMtu; + +} /* SkGeChangeMtu */ + + +/***************************************************************************** + * + * SkGeStats - return ethernet device statistics + * + * Description: + * This function return statistic data about the ethernet device + * to the operating system. + * + * Returns: + * pointer to the statistic structure. + */ +static struct net_device_stats *SkGeStats(struct SK_NET_DEVICE *dev) +{ +DEV_NET *pNet = netdev_priv(dev); +SK_AC *pAC = pNet->pAC; +SK_PNMI_STRUCT_DATA *pPnmiStruct; /* structure for all Pnmi-Data */ +SK_PNMI_STAT *pPnmiStat; /* pointer to virtual XMAC stat. data */ +SK_PNMI_CONF *pPnmiConf; /* pointer to virtual link config. */ +unsigned int Size; /* size of pnmi struct */ +unsigned long Flags; /* for spin lock */ + + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, + ("SkGeStats starts now...\n")); + pPnmiStruct = &pAC->PnmiStruct; + +#ifdef SK_DIAG_SUPPORT + if ((pAC->DiagModeActive == DIAG_NOTACTIVE) && + (pAC->BoardLevel == SK_INIT_RUN)) { +#endif + SK_MEMSET(pPnmiStruct, 0, sizeof(SK_PNMI_STRUCT_DATA)); + spin_lock_irqsave(&pAC->SlowPathLock, Flags); + Size = SK_PNMI_STRUCT_SIZE; + SkPnmiGetStruct(pAC, pAC->IoBase, pPnmiStruct, &Size, pNet->NetNr); + spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); +#ifdef SK_DIAG_SUPPORT + } +#endif + + pPnmiStat = &pPnmiStruct->Stat[0]; + pPnmiConf = &pPnmiStruct->Conf[0]; + + pAC->stats.rx_packets = (SK_U32) pPnmiStruct->RxDeliveredCts & 0xFFFFFFFF; + pAC->stats.tx_packets = (SK_U32) pPnmiStat->StatTxOkCts & 0xFFFFFFFF; + pAC->stats.rx_bytes = (SK_U32) pPnmiStruct->RxOctetsDeliveredCts; + pAC->stats.tx_bytes = (SK_U32) pPnmiStat->StatTxOctetsOkCts; + + if (dev->mtu <= 1500) { + pAC->stats.rx_errors = (SK_U32) pPnmiStruct->InErrorsCts & 0xFFFFFFFF; + } else { + pAC->stats.rx_errors = (SK_U32) ((pPnmiStruct->InErrorsCts - + pPnmiStat->StatRxTooLongCts) & 0xFFFFFFFF); + } + + + if (pAC->GIni.GP[0].PhyType == SK_PHY_XMAC && pAC->HWRevision < 12) + pAC->stats.rx_errors = pAC->stats.rx_errors - pPnmiStat->StatRxShortsCts; + + pAC->stats.tx_errors = (SK_U32) pPnmiStat->StatTxSingleCollisionCts & 0xFFFFFFFF; + pAC->stats.rx_dropped = (SK_U32) pPnmiStruct->RxNoBufCts & 0xFFFFFFFF; + pAC->stats.tx_dropped = (SK_U32) pPnmiStruct->TxNoBufCts & 0xFFFFFFFF; + pAC->stats.multicast = (SK_U32) pPnmiStat->StatRxMulticastOkCts & 0xFFFFFFFF; + pAC->stats.collisions = (SK_U32) pPnmiStat->StatTxSingleCollisionCts & 0xFFFFFFFF; + + /* detailed rx_errors: */ + pAC->stats.rx_length_errors = (SK_U32) pPnmiStat->StatRxRuntCts & 0xFFFFFFFF; + pAC->stats.rx_over_errors = (SK_U32) pPnmiStat->StatRxFifoOverflowCts & 0xFFFFFFFF; + pAC->stats.rx_crc_errors = (SK_U32) pPnmiStat->StatRxFcsCts & 0xFFFFFFFF; + pAC->stats.rx_frame_errors = (SK_U32) pPnmiStat->StatRxFramingCts & 0xFFFFFFFF; + pAC->stats.rx_fifo_errors = (SK_U32) pPnmiStat->StatRxFifoOverflowCts & 0xFFFFFFFF; + pAC->stats.rx_missed_errors = (SK_U32) pPnmiStat->StatRxMissedCts & 0xFFFFFFFF; + + /* detailed tx_errors */ + pAC->stats.tx_aborted_errors = (SK_U32) 0; + pAC->stats.tx_carrier_errors = (SK_U32) pPnmiStat->StatTxCarrierCts & 0xFFFFFFFF; + pAC->stats.tx_fifo_errors = (SK_U32) pPnmiStat->StatTxFifoUnderrunCts & 0xFFFFFFFF; + pAC->stats.tx_heartbeat_errors = (SK_U32) pPnmiStat->StatTxCarrierCts & 0xFFFFFFFF; + pAC->stats.tx_window_errors = (SK_U32) 0; + + return(&pAC->stats); +} /* SkGeStats */ + +/* + * Basic MII register access + */ +static int SkGeMiiIoctl(struct net_device *dev, + struct mii_ioctl_data *data, int cmd) +{ + DEV_NET *pNet = netdev_priv(dev); + SK_AC *pAC = pNet->pAC; + SK_IOC IoC = pAC->IoBase; + int Port = pNet->PortNr; + SK_GEPORT *pPrt = &pAC->GIni.GP[Port]; + unsigned long Flags; + int err = 0; + int reg = data->reg_num & 0x1f; + SK_U16 val = data->val_in; + + if (!netif_running(dev)) + return -ENODEV; /* Phy still in reset */ + + spin_lock_irqsave(&pAC->SlowPathLock, Flags); + switch(cmd) { + case SIOCGMIIPHY: + data->phy_id = pPrt->PhyAddr; + + /* fallthru */ + case SIOCGMIIREG: + if (pAC->GIni.GIGenesis) + SkXmPhyRead(pAC, IoC, Port, reg, &val); + else + SkGmPhyRead(pAC, IoC, Port, reg, &val); + + data->val_out = val; + break; + + case SIOCSMIIREG: + if (!capable(CAP_NET_ADMIN)) + err = -EPERM; + + else if (pAC->GIni.GIGenesis) + SkXmPhyWrite(pAC, IoC, Port, reg, val); + else + SkGmPhyWrite(pAC, IoC, Port, reg, val); + break; + default: + err = -EOPNOTSUPP; + } + spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); + return err; +} + + +/***************************************************************************** + * + * SkGeIoctl - IO-control function + * + * Description: + * This function is called if an ioctl is issued on the device. + * There are three subfunction for reading, writing and test-writing + * the private MIB data structure (useful for SysKonnect-internal tools). + * + * Returns: + * 0, if everything is ok + * !=0, on error + */ +static int SkGeIoctl(struct SK_NET_DEVICE *dev, struct ifreq *rq, int cmd) +{ +DEV_NET *pNet; +SK_AC *pAC; +void *pMemBuf; +struct pci_dev *pdev = NULL; +SK_GE_IOCTL Ioctl; +unsigned int Err = 0; +int Size = 0; +int Ret = 0; +unsigned int Length = 0; +int HeaderLength = sizeof(SK_U32) + sizeof(SK_U32); + + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, + ("SkGeIoctl starts now...\n")); + + pNet = netdev_priv(dev); + pAC = pNet->pAC; + + if (cmd == SIOCGMIIPHY || cmd == SIOCSMIIREG || cmd == SIOCGMIIREG) + return SkGeMiiIoctl(dev, if_mii(rq), cmd); + + if(copy_from_user(&Ioctl, rq->ifr_data, sizeof(SK_GE_IOCTL))) { + return -EFAULT; + } + + switch(cmd) { + case SK_IOCTL_SETMIB: + case SK_IOCTL_PRESETMIB: + if (!capable(CAP_NET_ADMIN)) return -EPERM; + case SK_IOCTL_GETMIB: + if(copy_from_user(&pAC->PnmiStruct, Ioctl.pData, + Ioctl.Len<sizeof(pAC->PnmiStruct)? + Ioctl.Len : sizeof(pAC->PnmiStruct))) { + return -EFAULT; + } + Size = SkGeIocMib(pNet, Ioctl.Len, cmd); + if(copy_to_user(Ioctl.pData, &pAC->PnmiStruct, + Ioctl.Len<Size? Ioctl.Len : Size)) { + return -EFAULT; + } + Ioctl.Len = Size; + if(copy_to_user(rq->ifr_data, &Ioctl, sizeof(SK_GE_IOCTL))) { + return -EFAULT; + } + break; + case SK_IOCTL_GEN: + if (Ioctl.Len < (sizeof(pAC->PnmiStruct) + HeaderLength)) { + Length = Ioctl.Len; + } else { + Length = sizeof(pAC->PnmiStruct) + HeaderLength; + } + if (NULL == (pMemBuf = kmalloc(Length, GFP_KERNEL))) { + return -ENOMEM; + } + if(copy_from_user(pMemBuf, Ioctl.pData, Length)) { + Err = -EFAULT; + goto fault_gen; + } + if ((Ret = SkPnmiGenIoctl(pAC, pAC->IoBase, pMemBuf, &Length, 0)) < 0) { + Err = -EFAULT; + goto fault_gen; + } + if(copy_to_user(Ioctl.pData, pMemBuf, Length) ) { + Err = -EFAULT; + goto fault_gen; + } + Ioctl.Len = Length; + if(copy_to_user(rq->ifr_data, &Ioctl, sizeof(SK_GE_IOCTL))) { + Err = -EFAULT; + goto fault_gen; + } +fault_gen: + kfree(pMemBuf); /* cleanup everything */ + break; +#ifdef SK_DIAG_SUPPORT + case SK_IOCTL_DIAG: + if (!capable(CAP_NET_ADMIN)) return -EPERM; + if (Ioctl.Len < (sizeof(pAC->PnmiStruct) + HeaderLength)) { + Length = Ioctl.Len; + } else { + Length = sizeof(pAC->PnmiStruct) + HeaderLength; + } + if (NULL == (pMemBuf = kmalloc(Length, GFP_KERNEL))) { + return -ENOMEM; + } + if(copy_from_user(pMemBuf, Ioctl.pData, Length)) { + Err = -EFAULT; + goto fault_diag; + } + pdev = pAC->PciDev; + Length = 3 * sizeof(SK_U32); /* Error, Bus and Device */ + /* + ** While coding this new IOCTL interface, only a few lines of code + ** are to to be added. Therefore no dedicated function has been + ** added. If more functionality is added, a separate function + ** should be used... + */ + * ((SK_U32 *)pMemBuf) = 0; + * ((SK_U32 *)pMemBuf + 1) = pdev->bus->number; + * ((SK_U32 *)pMemBuf + 2) = ParseDeviceNbrFromSlotName(pci_name(pdev)); + if(copy_to_user(Ioctl.pData, pMemBuf, Length) ) { + Err = -EFAULT; + goto fault_diag; + } + Ioctl.Len = Length; + if(copy_to_user(rq->ifr_data, &Ioctl, sizeof(SK_GE_IOCTL))) { + Err = -EFAULT; + goto fault_diag; + } +fault_diag: + kfree(pMemBuf); /* cleanup everything */ + break; +#endif + default: + Err = -EOPNOTSUPP; + } + + return(Err); + +} /* SkGeIoctl */ + + +/***************************************************************************** + * + * SkGeIocMib - handle a GetMib, SetMib- or PresetMib-ioctl message + * + * Description: + * This function reads/writes the MIB data using PNMI (Private Network + * Management Interface). + * The destination for the data must be provided with the + * ioctl call and is given to the driver in the form of + * a user space address. + * Copying from the user-provided data area into kernel messages + * and back is done by copy_from_user and copy_to_user calls in + * SkGeIoctl. + * + * Returns: + * returned size from PNMI call + */ +static int SkGeIocMib( +DEV_NET *pNet, /* pointer to the adapter context */ +unsigned int Size, /* length of ioctl data */ +int mode) /* flag for set/preset */ +{ +unsigned long Flags; /* for spin lock */ +SK_AC *pAC; + + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, + ("SkGeIocMib starts now...\n")); + pAC = pNet->pAC; + /* access MIB */ + spin_lock_irqsave(&pAC->SlowPathLock, Flags); + switch(mode) { + case SK_IOCTL_GETMIB: + SkPnmiGetStruct(pAC, pAC->IoBase, &pAC->PnmiStruct, &Size, + pNet->NetNr); + break; + case SK_IOCTL_PRESETMIB: + SkPnmiPreSetStruct(pAC, pAC->IoBase, &pAC->PnmiStruct, &Size, + pNet->NetNr); + break; + case SK_IOCTL_SETMIB: + SkPnmiSetStruct(pAC, pAC->IoBase, &pAC->PnmiStruct, &Size, + pNet->NetNr); + break; + default: + break; + } + spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, + ("MIB data access succeeded\n")); + return (Size); +} /* SkGeIocMib */ + + +/***************************************************************************** + * + * GetConfiguration - read configuration information + * + * Description: + * This function reads per-adapter configuration information from + * the options provided on the command line. + * + * Returns: + * none + */ +static void GetConfiguration( +SK_AC *pAC) /* pointer to the adapter context structure */ +{ +SK_I32 Port; /* preferred port */ +SK_BOOL AutoSet; +SK_BOOL DupSet; +int LinkSpeed = SK_LSPEED_AUTO; /* Link speed */ +int AutoNeg = 1; /* autoneg off (0) or on (1) */ +int DuplexCap = 0; /* 0=both,1=full,2=half */ +int FlowCtrl = SK_FLOW_MODE_SYM_OR_REM; /* FlowControl */ +int MSMode = SK_MS_MODE_AUTO; /* master/slave mode */ + +SK_BOOL IsConTypeDefined = SK_TRUE; +SK_BOOL IsLinkSpeedDefined = SK_TRUE; +SK_BOOL IsFlowCtrlDefined = SK_TRUE; +SK_BOOL IsRoleDefined = SK_TRUE; +SK_BOOL IsModeDefined = SK_TRUE; +/* + * The two parameters AutoNeg. and DuplexCap. map to one configuration + * parameter. The mapping is described by this table: + * DuplexCap -> | both | full | half | + * AutoNeg | | | | + * ----------------------------------------------------------------- + * Off | illegal | Full | Half | + * ----------------------------------------------------------------- + * On | AutoBoth | AutoFull | AutoHalf | + * ----------------------------------------------------------------- + * Sense | AutoSense | AutoSense | AutoSense | + */ +int Capabilities[3][3] = + { { -1, SK_LMODE_FULL , SK_LMODE_HALF }, + {SK_LMODE_AUTOBOTH , SK_LMODE_AUTOFULL , SK_LMODE_AUTOHALF }, + {SK_LMODE_AUTOSENSE, SK_LMODE_AUTOSENSE, SK_LMODE_AUTOSENSE} }; + +#define DC_BOTH 0 +#define DC_FULL 1 +#define DC_HALF 2 +#define AN_OFF 0 +#define AN_ON 1 +#define AN_SENS 2 +#define M_CurrPort pAC->GIni.GP[Port] + + + /* + ** Set the default values first for both ports! + */ + for (Port = 0; Port < SK_MAX_MACS; Port++) { + M_CurrPort.PLinkModeConf = Capabilities[AN_ON][DC_BOTH]; + M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_SYM_OR_REM; + M_CurrPort.PMSMode = SK_MS_MODE_AUTO; + M_CurrPort.PLinkSpeed = SK_LSPEED_AUTO; + } + + /* + ** Check merged parameter ConType. If it has not been used, + ** verify any other parameter (e.g. AutoNeg) and use default values. + ** + ** Stating both ConType and other lowlevel link parameters is also + ** possible. If this is the case, the passed ConType-parameter is + ** overwritten by the lowlevel link parameter. + ** + ** The following settings are used for a merged ConType-parameter: + ** + ** ConType DupCap AutoNeg FlowCtrl Role Speed + ** ------- ------ ------- -------- ---------- ----- + ** Auto Both On SymOrRem Auto Auto + ** 100FD Full Off None <ignored> 100 + ** 100HD Half Off None <ignored> 100 + ** 10FD Full Off None <ignored> 10 + ** 10HD Half Off None <ignored> 10 + ** + ** This ConType parameter is used for all ports of the adapter! + */ + if ( (ConType != NULL) && + (pAC->Index < SK_MAX_CARD_PARAM) && + (ConType[pAC->Index] != NULL) ) { + + /* Check chipset family */ + if ((!pAC->ChipsetType) && + (strcmp(ConType[pAC->Index],"Auto")!=0) && + (strcmp(ConType[pAC->Index],"")!=0)) { + /* Set the speed parameter back */ + printk("sk98lin: Illegal value \"%s\" " + "for ConType." + " Using Auto.\n", + ConType[pAC->Index]); + + sprintf(ConType[pAC->Index], "Auto"); + } + + if (strcmp(ConType[pAC->Index],"")==0) { + IsConTypeDefined = SK_FALSE; /* No ConType defined */ + } else if (strcmp(ConType[pAC->Index],"Auto")==0) { + for (Port = 0; Port < SK_MAX_MACS; Port++) { + M_CurrPort.PLinkModeConf = Capabilities[AN_ON][DC_BOTH]; + M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_SYM_OR_REM; + M_CurrPort.PMSMode = SK_MS_MODE_AUTO; + M_CurrPort.PLinkSpeed = SK_LSPEED_AUTO; + } + } else if (strcmp(ConType[pAC->Index],"100FD")==0) { + for (Port = 0; Port < SK_MAX_MACS; Port++) { + M_CurrPort.PLinkModeConf = Capabilities[AN_OFF][DC_FULL]; + M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_NONE; + M_CurrPort.PMSMode = SK_MS_MODE_AUTO; + M_CurrPort.PLinkSpeed = SK_LSPEED_100MBPS; + } + } else if (strcmp(ConType[pAC->Index],"100HD")==0) { + for (Port = 0; Port < SK_MAX_MACS; Port++) { + M_CurrPort.PLinkModeConf = Capabilities[AN_OFF][DC_HALF]; + M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_NONE; + M_CurrPort.PMSMode = SK_MS_MODE_AUTO; + M_CurrPort.PLinkSpeed = SK_LSPEED_100MBPS; + } + } else if (strcmp(ConType[pAC->Index],"10FD")==0) { + for (Port = 0; Port < SK_MAX_MACS; Port++) { + M_CurrPort.PLinkModeConf = Capabilities[AN_OFF][DC_FULL]; + M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_NONE; + M_CurrPort.PMSMode = SK_MS_MODE_AUTO; + M_CurrPort.PLinkSpeed = SK_LSPEED_10MBPS; + } + } else if (strcmp(ConType[pAC->Index],"10HD")==0) { + for (Port = 0; Port < SK_MAX_MACS; Port++) { + M_CurrPort.PLinkModeConf = Capabilities[AN_OFF][DC_HALF]; + M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_NONE; + M_CurrPort.PMSMode = SK_MS_MODE_AUTO; + M_CurrPort.PLinkSpeed = SK_LSPEED_10MBPS; + } + } else { + printk("sk98lin: Illegal value \"%s\" for ConType\n", + ConType[pAC->Index]); + IsConTypeDefined = SK_FALSE; /* Wrong ConType defined */ + } + } else { + IsConTypeDefined = SK_FALSE; /* No ConType defined */ + } + + /* + ** Parse any parameter settings for port A: + ** a) any LinkSpeed stated? + */ + if (Speed_A != NULL && pAC->Index<SK_MAX_CARD_PARAM && + Speed_A[pAC->Index] != NULL) { + if (strcmp(Speed_A[pAC->Index],"")==0) { + IsLinkSpeedDefined = SK_FALSE; + } else if (strcmp(Speed_A[pAC->Index],"Auto")==0) { + LinkSpeed = SK_LSPEED_AUTO; + } else if (strcmp(Speed_A[pAC->Index],"10")==0) { + LinkSpeed = SK_LSPEED_10MBPS; + } else if (strcmp(Speed_A[pAC->Index],"100")==0) { + LinkSpeed = SK_LSPEED_100MBPS; + } else if (strcmp(Speed_A[pAC->Index],"1000")==0) { + LinkSpeed = SK_LSPEED_1000MBPS; + } else { + printk("sk98lin: Illegal value \"%s\" for Speed_A\n", + Speed_A[pAC->Index]); + IsLinkSpeedDefined = SK_FALSE; + } + } else { + IsLinkSpeedDefined = SK_FALSE; + } + + /* + ** Check speed parameter: + ** Only copper type adapter and GE V2 cards + */ + if (((!pAC->ChipsetType) || (pAC->GIni.GICopperType != SK_TRUE)) && + ((LinkSpeed != SK_LSPEED_AUTO) && + (LinkSpeed != SK_LSPEED_1000MBPS))) { + printk("sk98lin: Illegal value for Speed_A. " + "Not a copper card or GE V2 card\n Using " + "speed 1000\n"); + LinkSpeed = SK_LSPEED_1000MBPS; + } + + /* + ** Decide whether to set new config value if somethig valid has + ** been received. + */ + if (IsLinkSpeedDefined) { + pAC->GIni.GP[0].PLinkSpeed = LinkSpeed; + } + + /* + ** b) Any Autonegotiation and DuplexCapabilities set? + ** Please note that both belong together... + */ + AutoNeg = AN_ON; /* tschilling: Default: Autonegotiation on! */ + AutoSet = SK_FALSE; + if (AutoNeg_A != NULL && pAC->Index<SK_MAX_CARD_PARAM && + AutoNeg_A[pAC->Index] != NULL) { + AutoSet = SK_TRUE; + if (strcmp(AutoNeg_A[pAC->Index],"")==0) { + AutoSet = SK_FALSE; + } else if (strcmp(AutoNeg_A[pAC->Index],"On")==0) { + AutoNeg = AN_ON; + } else if (strcmp(AutoNeg_A[pAC->Index],"Off")==0) { + AutoNeg = AN_OFF; + } else if (strcmp(AutoNeg_A[pAC->Index],"Sense")==0) { + AutoNeg = AN_SENS; + } else { + printk("sk98lin: Illegal value \"%s\" for AutoNeg_A\n", + AutoNeg_A[pAC->Index]); + } + } + + DuplexCap = DC_BOTH; + DupSet = SK_FALSE; + if (DupCap_A != NULL && pAC->Index<SK_MAX_CARD_PARAM && + DupCap_A[pAC->Index] != NULL) { + DupSet = SK_TRUE; + if (strcmp(DupCap_A[pAC->Index],"")==0) { + DupSet = SK_FALSE; + } else if (strcmp(DupCap_A[pAC->Index],"Both")==0) { + DuplexCap = DC_BOTH; + } else if (strcmp(DupCap_A[pAC->Index],"Full")==0) { + DuplexCap = DC_FULL; + } else if (strcmp(DupCap_A[pAC->Index],"Half")==0) { + DuplexCap = DC_HALF; + } else { + printk("sk98lin: Illegal value \"%s\" for DupCap_A\n", + DupCap_A[pAC->Index]); + } + } + + /* + ** Check for illegal combinations + */ + if ((LinkSpeed == SK_LSPEED_1000MBPS) && + ((DuplexCap == SK_LMODE_STAT_AUTOHALF) || + (DuplexCap == SK_LMODE_STAT_HALF)) && + (pAC->ChipsetType)) { + printk("sk98lin: Half Duplex not possible with Gigabit speed!\n" + " Using Full Duplex.\n"); + DuplexCap = DC_FULL; + } + + if ( AutoSet && AutoNeg==AN_SENS && DupSet) { + printk("sk98lin, Port A: DuplexCapabilities" + " ignored using Sense mode\n"); + } + + if (AutoSet && AutoNeg==AN_OFF && DupSet && DuplexCap==DC_BOTH){ + printk("sk98lin: Port A: Illegal combination" + " of values AutoNeg. and DuplexCap.\n Using " + "Full Duplex\n"); + DuplexCap = DC_FULL; + } + + if (AutoSet && AutoNeg==AN_OFF && !DupSet) { + DuplexCap = DC_FULL; + } + + if (!AutoSet && DupSet) { + printk("sk98lin: Port A: Duplex setting not" + " possible in\n default AutoNegotiation mode" + " (Sense).\n Using AutoNegotiation On\n"); + AutoNeg = AN_ON; + } + + /* + ** set the desired mode + */ + if (AutoSet || DupSet) { + pAC->GIni.GP[0].PLinkModeConf = Capabilities[AutoNeg][DuplexCap]; + } + + /* + ** c) Any Flowcontrol-parameter set? + */ + if (FlowCtrl_A != NULL && pAC->Index<SK_MAX_CARD_PARAM && + FlowCtrl_A[pAC->Index] != NULL) { + if (strcmp(FlowCtrl_A[pAC->Index],"") == 0) { + IsFlowCtrlDefined = SK_FALSE; + } else if (strcmp(FlowCtrl_A[pAC->Index],"SymOrRem") == 0) { + FlowCtrl = SK_FLOW_MODE_SYM_OR_REM; + } else if (strcmp(FlowCtrl_A[pAC->Index],"Sym")==0) { + FlowCtrl = SK_FLOW_MODE_SYMMETRIC; + } else if (strcmp(FlowCtrl_A[pAC->Index],"LocSend")==0) { + FlowCtrl = SK_FLOW_MODE_LOC_SEND; + } else if (strcmp(FlowCtrl_A[pAC->Index],"None")==0) { + FlowCtrl = SK_FLOW_MODE_NONE; + } else { + printk("sk98lin: Illegal value \"%s\" for FlowCtrl_A\n", + FlowCtrl_A[pAC->Index]); + IsFlowCtrlDefined = SK_FALSE; + } + } else { + IsFlowCtrlDefined = SK_FALSE; + } + + if (IsFlowCtrlDefined) { + if ((AutoNeg == AN_OFF) && (FlowCtrl != SK_FLOW_MODE_NONE)) { + printk("sk98lin: Port A: FlowControl" + " impossible without AutoNegotiation," + " disabled\n"); + FlowCtrl = SK_FLOW_MODE_NONE; + } + pAC->GIni.GP[0].PFlowCtrlMode = FlowCtrl; + } + + /* + ** d) What is with the RoleParameter? + */ + if (Role_A != NULL && pAC->Index<SK_MAX_CARD_PARAM && + Role_A[pAC->Index] != NULL) { + if (strcmp(Role_A[pAC->Index],"")==0) { + IsRoleDefined = SK_FALSE; + } else if (strcmp(Role_A[pAC->Index],"Auto")==0) { + MSMode = SK_MS_MODE_AUTO; + } else if (strcmp(Role_A[pAC->Index],"Master")==0) { + MSMode = SK_MS_MODE_MASTER; + } else if (strcmp(Role_A[pAC->Index],"Slave")==0) { + MSMode = SK_MS_MODE_SLAVE; + } else { + printk("sk98lin: Illegal value \"%s\" for Role_A\n", + Role_A[pAC->Index]); + IsRoleDefined = SK_FALSE; + } + } else { + IsRoleDefined = SK_FALSE; + } + + if (IsRoleDefined == SK_TRUE) { + pAC->GIni.GP[0].PMSMode = MSMode; + } + + + + /* + ** Parse any parameter settings for port B: + ** a) any LinkSpeed stated? + */ + IsConTypeDefined = SK_TRUE; + IsLinkSpeedDefined = SK_TRUE; + IsFlowCtrlDefined = SK_TRUE; + IsModeDefined = SK_TRUE; + + if (Speed_B != NULL && pAC->Index<SK_MAX_CARD_PARAM && + Speed_B[pAC->Index] != NULL) { + if (strcmp(Speed_B[pAC->Index],"")==0) { + IsLinkSpeedDefined = SK_FALSE; + } else if (strcmp(Speed_B[pAC->Index],"Auto")==0) { + LinkSpeed = SK_LSPEED_AUTO; + } else if (strcmp(Speed_B[pAC->Index],"10")==0) { + LinkSpeed = SK_LSPEED_10MBPS; + } else if (strcmp(Speed_B[pAC->Index],"100")==0) { + LinkSpeed = SK_LSPEED_100MBPS; + } else if (strcmp(Speed_B[pAC->Index],"1000")==0) { + LinkSpeed = SK_LSPEED_1000MBPS; + } else { + printk("sk98lin: Illegal value \"%s\" for Speed_B\n", + Speed_B[pAC->Index]); + IsLinkSpeedDefined = SK_FALSE; + } + } else { + IsLinkSpeedDefined = SK_FALSE; + } + + /* + ** Check speed parameter: + ** Only copper type adapter and GE V2 cards + */ + if (((!pAC->ChipsetType) || (pAC->GIni.GICopperType != SK_TRUE)) && + ((LinkSpeed != SK_LSPEED_AUTO) && + (LinkSpeed != SK_LSPEED_1000MBPS))) { + printk("sk98lin: Illegal value for Speed_B. " + "Not a copper card or GE V2 card\n Using " + "speed 1000\n"); + LinkSpeed = SK_LSPEED_1000MBPS; + } + + /* + ** Decide whether to set new config value if somethig valid has + ** been received. + */ + if (IsLinkSpeedDefined) { + pAC->GIni.GP[1].PLinkSpeed = LinkSpeed; + } + + /* + ** b) Any Autonegotiation and DuplexCapabilities set? + ** Please note that both belong together... + */ + AutoNeg = AN_SENS; /* default: do auto Sense */ + AutoSet = SK_FALSE; + if (AutoNeg_B != NULL && pAC->Index<SK_MAX_CARD_PARAM && + AutoNeg_B[pAC->Index] != NULL) { + AutoSet = SK_TRUE; + if (strcmp(AutoNeg_B[pAC->Index],"")==0) { + AutoSet = SK_FALSE; + } else if (strcmp(AutoNeg_B[pAC->Index],"On")==0) { + AutoNeg = AN_ON; + } else if (strcmp(AutoNeg_B[pAC->Index],"Off")==0) { + AutoNeg = AN_OFF; + } else if (strcmp(AutoNeg_B[pAC->Index],"Sense")==0) { + AutoNeg = AN_SENS; + } else { + printk("sk98lin: Illegal value \"%s\" for AutoNeg_B\n", + AutoNeg_B[pAC->Index]); + } + } + + DuplexCap = DC_BOTH; + DupSet = SK_FALSE; + if (DupCap_B != NULL && pAC->Index<SK_MAX_CARD_PARAM && + DupCap_B[pAC->Index] != NULL) { + DupSet = SK_TRUE; + if (strcmp(DupCap_B[pAC->Index],"")==0) { + DupSet = SK_FALSE; + } else if (strcmp(DupCap_B[pAC->Index],"Both")==0) { + DuplexCap = DC_BOTH; + } else if (strcmp(DupCap_B[pAC->Index],"Full")==0) { + DuplexCap = DC_FULL; + } else if (strcmp(DupCap_B[pAC->Index],"Half")==0) { + DuplexCap = DC_HALF; + } else { + printk("sk98lin: Illegal value \"%s\" for DupCap_B\n", + DupCap_B[pAC->Index]); + } + } + + + /* + ** Check for illegal combinations + */ + if ((LinkSpeed == SK_LSPEED_1000MBPS) && + ((DuplexCap == SK_LMODE_STAT_AUTOHALF) || + (DuplexCap == SK_LMODE_STAT_HALF)) && + (pAC->ChipsetType)) { + printk("sk98lin: Half Duplex not possible with Gigabit speed!\n" + " Using Full Duplex.\n"); + DuplexCap = DC_FULL; + } + + if (AutoSet && AutoNeg==AN_SENS && DupSet) { + printk("sk98lin, Port B: DuplexCapabilities" + " ignored using Sense mode\n"); + } + + if (AutoSet && AutoNeg==AN_OFF && DupSet && DuplexCap==DC_BOTH){ + printk("sk98lin: Port B: Illegal combination" + " of values AutoNeg. and DuplexCap.\n Using " + "Full Duplex\n"); + DuplexCap = DC_FULL; + } + + if (AutoSet && AutoNeg==AN_OFF && !DupSet) { + DuplexCap = DC_FULL; + } + + if (!AutoSet && DupSet) { + printk("sk98lin: Port B: Duplex setting not" + " possible in\n default AutoNegotiation mode" + " (Sense).\n Using AutoNegotiation On\n"); + AutoNeg = AN_ON; + } + + /* + ** set the desired mode + */ + if (AutoSet || DupSet) { + pAC->GIni.GP[1].PLinkModeConf = Capabilities[AutoNeg][DuplexCap]; + } + + /* + ** c) Any FlowCtrl parameter set? + */ + if (FlowCtrl_B != NULL && pAC->Index<SK_MAX_CARD_PARAM && + FlowCtrl_B[pAC->Index] != NULL) { + if (strcmp(FlowCtrl_B[pAC->Index],"") == 0) { + IsFlowCtrlDefined = SK_FALSE; + } else if (strcmp(FlowCtrl_B[pAC->Index],"SymOrRem") == 0) { + FlowCtrl = SK_FLOW_MODE_SYM_OR_REM; + } else if (strcmp(FlowCtrl_B[pAC->Index],"Sym")==0) { + FlowCtrl = SK_FLOW_MODE_SYMMETRIC; + } else if (strcmp(FlowCtrl_B[pAC->Index],"LocSend")==0) { + FlowCtrl = SK_FLOW_MODE_LOC_SEND; + } else if (strcmp(FlowCtrl_B[pAC->Index],"None")==0) { + FlowCtrl = SK_FLOW_MODE_NONE; + } else { + printk("sk98lin: Illegal value \"%s\" for FlowCtrl_B\n", + FlowCtrl_B[pAC->Index]); + IsFlowCtrlDefined = SK_FALSE; + } + } else { + IsFlowCtrlDefined = SK_FALSE; + } + + if (IsFlowCtrlDefined) { + if ((AutoNeg == AN_OFF) && (FlowCtrl != SK_FLOW_MODE_NONE)) { + printk("sk98lin: Port B: FlowControl" + " impossible without AutoNegotiation," + " disabled\n"); + FlowCtrl = SK_FLOW_MODE_NONE; + } + pAC->GIni.GP[1].PFlowCtrlMode = FlowCtrl; + } + + /* + ** d) What is the RoleParameter? + */ + if (Role_B != NULL && pAC->Index<SK_MAX_CARD_PARAM && + Role_B[pAC->Index] != NULL) { + if (strcmp(Role_B[pAC->Index],"")==0) { + IsRoleDefined = SK_FALSE; + } else if (strcmp(Role_B[pAC->Index],"Auto")==0) { + MSMode = SK_MS_MODE_AUTO; + } else if (strcmp(Role_B[pAC->Index],"Master")==0) { + MSMode = SK_MS_MODE_MASTER; + } else if (strcmp(Role_B[pAC->Index],"Slave")==0) { + MSMode = SK_MS_MODE_SLAVE; + } else { + printk("sk98lin: Illegal value \"%s\" for Role_B\n", + Role_B[pAC->Index]); + IsRoleDefined = SK_FALSE; + } + } else { + IsRoleDefined = SK_FALSE; + } + + if (IsRoleDefined) { + pAC->GIni.GP[1].PMSMode = MSMode; + } + + /* + ** Evaluate settings for both ports + */ + pAC->ActivePort = 0; + if (PrefPort != NULL && pAC->Index<SK_MAX_CARD_PARAM && + PrefPort[pAC->Index] != NULL) { + if (strcmp(PrefPort[pAC->Index],"") == 0) { /* Auto */ + pAC->ActivePort = 0; + pAC->Rlmt.Net[0].Preference = -1; /* auto */ + pAC->Rlmt.Net[0].PrefPort = 0; + } else if (strcmp(PrefPort[pAC->Index],"A") == 0) { + /* + ** do not set ActivePort here, thus a port + ** switch is issued after net up. + */ + Port = 0; + pAC->Rlmt.Net[0].Preference = Port; + pAC->Rlmt.Net[0].PrefPort = Port; + } else if (strcmp(PrefPort[pAC->Index],"B") == 0) { + /* + ** do not set ActivePort here, thus a port + ** switch is issued after net up. + */ + if (pAC->GIni.GIMacsFound == 1) { + printk("sk98lin: Illegal value \"B\" for PrefPort.\n" + " Port B not available on single port adapters.\n"); + + pAC->ActivePort = 0; + pAC->Rlmt.Net[0].Preference = -1; /* auto */ + pAC->Rlmt.Net[0].PrefPort = 0; + } else { + Port = 1; + pAC->Rlmt.Net[0].Preference = Port; + pAC->Rlmt.Net[0].PrefPort = Port; + } + } else { + printk("sk98lin: Illegal value \"%s\" for PrefPort\n", + PrefPort[pAC->Index]); + } + } + + pAC->RlmtNets = 1; + + if (RlmtMode != NULL && pAC->Index<SK_MAX_CARD_PARAM && + RlmtMode[pAC->Index] != NULL) { + if (strcmp(RlmtMode[pAC->Index], "") == 0) { + pAC->RlmtMode = 0; + } else if (strcmp(RlmtMode[pAC->Index], "CheckLinkState") == 0) { + pAC->RlmtMode = SK_RLMT_CHECK_LINK; + } else if (strcmp(RlmtMode[pAC->Index], "CheckLocalPort") == 0) { + pAC->RlmtMode = SK_RLMT_CHECK_LINK | + SK_RLMT_CHECK_LOC_LINK; + } else if (strcmp(RlmtMode[pAC->Index], "CheckSeg") == 0) { + pAC->RlmtMode = SK_RLMT_CHECK_LINK | + SK_RLMT_CHECK_LOC_LINK | + SK_RLMT_CHECK_SEG; + } else if ((strcmp(RlmtMode[pAC->Index], "DualNet") == 0) && + (pAC->GIni.GIMacsFound == 2)) { + pAC->RlmtMode = SK_RLMT_CHECK_LINK; + pAC->RlmtNets = 2; + } else { + printk("sk98lin: Illegal value \"%s\" for" + " RlmtMode, using default\n", + RlmtMode[pAC->Index]); + pAC->RlmtMode = 0; + } + } else { + pAC->RlmtMode = 0; + } + + /* + ** Check the interrupt moderation parameters + */ + if (Moderation[pAC->Index] != NULL) { + if (strcmp(Moderation[pAC->Index], "") == 0) { + pAC->DynIrqModInfo.IntModTypeSelect = C_INT_MOD_NONE; + } else if (strcmp(Moderation[pAC->Index], "Static") == 0) { + pAC->DynIrqModInfo.IntModTypeSelect = C_INT_MOD_STATIC; + } else if (strcmp(Moderation[pAC->Index], "Dynamic") == 0) { + pAC->DynIrqModInfo.IntModTypeSelect = C_INT_MOD_DYNAMIC; + } else if (strcmp(Moderation[pAC->Index], "None") == 0) { + pAC->DynIrqModInfo.IntModTypeSelect = C_INT_MOD_NONE; + } else { + printk("sk98lin: Illegal value \"%s\" for Moderation.\n" + " Disable interrupt moderation.\n", + Moderation[pAC->Index]); + pAC->DynIrqModInfo.IntModTypeSelect = C_INT_MOD_NONE; + } + } else { + pAC->DynIrqModInfo.IntModTypeSelect = C_INT_MOD_NONE; + } + + if (Stats[pAC->Index] != NULL) { + if (strcmp(Stats[pAC->Index], "Yes") == 0) { + pAC->DynIrqModInfo.DisplayStats = SK_TRUE; + } else { + pAC->DynIrqModInfo.DisplayStats = SK_FALSE; + } + } else { + pAC->DynIrqModInfo.DisplayStats = SK_FALSE; + } + + if (ModerationMask[pAC->Index] != NULL) { + if (strcmp(ModerationMask[pAC->Index], "Rx") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_ONLY; + } else if (strcmp(ModerationMask[pAC->Index], "Tx") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_TX_ONLY; + } else if (strcmp(ModerationMask[pAC->Index], "Sp") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_SP_ONLY; + } else if (strcmp(ModerationMask[pAC->Index], "RxSp") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_SP_RX; + } else if (strcmp(ModerationMask[pAC->Index], "SpRx") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_SP_RX; + } else if (strcmp(ModerationMask[pAC->Index], "RxTx") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_TX_RX; + } else if (strcmp(ModerationMask[pAC->Index], "TxRx") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_TX_RX; + } else if (strcmp(ModerationMask[pAC->Index], "TxSp") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_SP_TX; + } else if (strcmp(ModerationMask[pAC->Index], "SpTx") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_SP_TX; + } else if (strcmp(ModerationMask[pAC->Index], "RxTxSp") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_TX_SP; + } else if (strcmp(ModerationMask[pAC->Index], "RxSpTx") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_TX_SP; + } else if (strcmp(ModerationMask[pAC->Index], "TxRxSp") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_TX_SP; + } else if (strcmp(ModerationMask[pAC->Index], "TxSpRx") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_TX_SP; + } else if (strcmp(ModerationMask[pAC->Index], "SpTxRx") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_TX_SP; + } else if (strcmp(ModerationMask[pAC->Index], "SpRxTx") == 0) { + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_TX_SP; + } else { /* some rubbish */ + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_ONLY; + } + } else { /* operator has stated nothing */ + pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_TX_RX; + } + + if (AutoSizing[pAC->Index] != NULL) { + if (strcmp(AutoSizing[pAC->Index], "On") == 0) { + pAC->DynIrqModInfo.AutoSizing = SK_FALSE; + } else { + pAC->DynIrqModInfo.AutoSizing = SK_FALSE; + } + } else { /* operator has stated nothing */ + pAC->DynIrqModInfo.AutoSizing = SK_FALSE; + } + + if (IntsPerSec[pAC->Index] != 0) { + if ((IntsPerSec[pAC->Index]< C_INT_MOD_IPS_LOWER_RANGE) || + (IntsPerSec[pAC->Index] > C_INT_MOD_IPS_UPPER_RANGE)) { + printk("sk98lin: Illegal value \"%d\" for IntsPerSec. (Range: %d - %d)\n" + " Using default value of %i.\n", + IntsPerSec[pAC->Index], + C_INT_MOD_IPS_LOWER_RANGE, + C_INT_MOD_IPS_UPPER_RANGE, + C_INTS_PER_SEC_DEFAULT); + pAC->DynIrqModInfo.MaxModIntsPerSec = C_INTS_PER_SEC_DEFAULT; + } else { + pAC->DynIrqModInfo.MaxModIntsPerSec = IntsPerSec[pAC->Index]; + } + } else { + pAC->DynIrqModInfo.MaxModIntsPerSec = C_INTS_PER_SEC_DEFAULT; + } + + /* + ** Evaluate upper and lower moderation threshold + */ + pAC->DynIrqModInfo.MaxModIntsPerSecUpperLimit = + pAC->DynIrqModInfo.MaxModIntsPerSec + + (pAC->DynIrqModInfo.MaxModIntsPerSec / 2); + + pAC->DynIrqModInfo.MaxModIntsPerSecLowerLimit = + pAC->DynIrqModInfo.MaxModIntsPerSec - + (pAC->DynIrqModInfo.MaxModIntsPerSec / 2); + + pAC->DynIrqModInfo.PrevTimeVal = jiffies; /* initial value */ + + +} /* GetConfiguration */ + + +/***************************************************************************** + * + * ProductStr - return a adapter identification string from vpd + * + * Description: + * This function reads the product name string from the vpd area + * and puts it the field pAC->DeviceString. + * + * Returns: N/A + */ +static inline int ProductStr( + SK_AC *pAC, /* pointer to adapter context */ + char *DeviceStr, /* result string */ + int StrLen /* length of the string */ +) +{ +char Keyword[] = VPD_NAME; /* vpd productname identifier */ +int ReturnCode; /* return code from vpd_read */ +unsigned long Flags; + + spin_lock_irqsave(&pAC->SlowPathLock, Flags); + ReturnCode = VpdRead(pAC, pAC->IoBase, Keyword, DeviceStr, &StrLen); + spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); + + return ReturnCode; +} /* ProductStr */ + +/***************************************************************************** + * + * StartDrvCleanupTimer - Start timer to check for descriptors which + * might be placed in descriptor ring, but + * havent been handled up to now + * + * Description: + * This function requests a HW-timer fo the Yukon card. The actions to + * perform when this timer expires, are located in the SkDrvEvent(). + * + * Returns: N/A + */ +static void +StartDrvCleanupTimer(SK_AC *pAC) { + SK_EVPARA EventParam; /* Event struct for timer event */ + + SK_MEMSET((char *) &EventParam, 0, sizeof(EventParam)); + EventParam.Para32[0] = SK_DRV_RX_CLEANUP_TIMER; + SkTimerStart(pAC, pAC->IoBase, &pAC->DrvCleanupTimer, + SK_DRV_RX_CLEANUP_TIMER_LENGTH, + SKGE_DRV, SK_DRV_TIMER, EventParam); +} + +/***************************************************************************** + * + * StopDrvCleanupTimer - Stop timer to check for descriptors + * + * Description: + * This function requests a HW-timer fo the Yukon card. The actions to + * perform when this timer expires, are located in the SkDrvEvent(). + * + * Returns: N/A + */ +static void +StopDrvCleanupTimer(SK_AC *pAC) { + SkTimerStop(pAC, pAC->IoBase, &pAC->DrvCleanupTimer); + SK_MEMSET((char *) &pAC->DrvCleanupTimer, 0, sizeof(SK_TIMER)); +} + +/****************************************************************************/ +/* functions for common modules *********************************************/ +/****************************************************************************/ + + +/***************************************************************************** + * + * SkDrvAllocRlmtMbuf - allocate an RLMT mbuf + * + * Description: + * This routine returns an RLMT mbuf or NULL. The RLMT Mbuf structure + * is embedded into a socket buff data area. + * + * Context: + * runtime + * + * Returns: + * NULL or pointer to Mbuf. + */ +SK_MBUF *SkDrvAllocRlmtMbuf( +SK_AC *pAC, /* pointer to adapter context */ +SK_IOC IoC, /* the IO-context */ +unsigned BufferSize) /* size of the requested buffer */ +{ +SK_MBUF *pRlmtMbuf; /* pointer to a new rlmt-mbuf structure */ +struct sk_buff *pMsgBlock; /* pointer to a new message block */ + + pMsgBlock = alloc_skb(BufferSize + sizeof(SK_MBUF), GFP_ATOMIC); + if (pMsgBlock == NULL) { + return (NULL); + } + pRlmtMbuf = (SK_MBUF*) pMsgBlock->data; + skb_reserve(pMsgBlock, sizeof(SK_MBUF)); + pRlmtMbuf->pNext = NULL; + pRlmtMbuf->pOs = pMsgBlock; + pRlmtMbuf->pData = pMsgBlock->data; /* Data buffer. */ + pRlmtMbuf->Size = BufferSize; /* Data buffer size. */ + pRlmtMbuf->Length = 0; /* Length of packet (<= Size). */ + return (pRlmtMbuf); + +} /* SkDrvAllocRlmtMbuf */ + + +/***************************************************************************** + * + * SkDrvFreeRlmtMbuf - free an RLMT mbuf + * + * Description: + * This routine frees one or more RLMT mbuf(s). + * + * Context: + * runtime + * + * Returns: + * Nothing + */ +void SkDrvFreeRlmtMbuf( +SK_AC *pAC, /* pointer to adapter context */ +SK_IOC IoC, /* the IO-context */ +SK_MBUF *pMbuf) /* size of the requested buffer */ +{ +SK_MBUF *pFreeMbuf; +SK_MBUF *pNextMbuf; + + pFreeMbuf = pMbuf; + do { + pNextMbuf = pFreeMbuf->pNext; + DEV_KFREE_SKB_ANY(pFreeMbuf->pOs); + pFreeMbuf = pNextMbuf; + } while ( pFreeMbuf != NULL ); +} /* SkDrvFreeRlmtMbuf */ + + +/***************************************************************************** + * + * SkOsGetTime - provide a time value + * + * Description: + * This routine provides a time value. The unit is 1/HZ (defined by Linux). + * It is not used for absolute time, but only for time differences. + * + * + * Returns: + * Time value + */ +SK_U64 SkOsGetTime(SK_AC *pAC) +{ + SK_U64 PrivateJiffies; + SkOsGetTimeCurrent(pAC, &PrivateJiffies); + return PrivateJiffies; +} /* SkOsGetTime */ + + +/***************************************************************************** + * + * SkPciReadCfgDWord - read a 32 bit value from pci config space + * + * Description: + * This routine reads a 32 bit value from the pci configuration + * space. + * + * Returns: + * 0 - indicate everything worked ok. + * != 0 - error indication + */ +int SkPciReadCfgDWord( +SK_AC *pAC, /* Adapter Control structure pointer */ +int PciAddr, /* PCI register address */ +SK_U32 *pVal) /* pointer to store the read value */ +{ + pci_read_config_dword(pAC->PciDev, PciAddr, pVal); + return(0); +} /* SkPciReadCfgDWord */ + + +/***************************************************************************** + * + * SkPciReadCfgWord - read a 16 bit value from pci config space + * + * Description: + * This routine reads a 16 bit value from the pci configuration + * space. + * + * Returns: + * 0 - indicate everything worked ok. + * != 0 - error indication + */ +int SkPciReadCfgWord( +SK_AC *pAC, /* Adapter Control structure pointer */ +int PciAddr, /* PCI register address */ +SK_U16 *pVal) /* pointer to store the read value */ +{ + pci_read_config_word(pAC->PciDev, PciAddr, pVal); + return(0); +} /* SkPciReadCfgWord */ + + +/***************************************************************************** + * + * SkPciReadCfgByte - read a 8 bit value from pci config space + * + * Description: + * This routine reads a 8 bit value from the pci configuration + * space. + * + * Returns: + * 0 - indicate everything worked ok. + * != 0 - error indication + */ +int SkPciReadCfgByte( +SK_AC *pAC, /* Adapter Control structure pointer */ +int PciAddr, /* PCI register address */ +SK_U8 *pVal) /* pointer to store the read value */ +{ + pci_read_config_byte(pAC->PciDev, PciAddr, pVal); + return(0); +} /* SkPciReadCfgByte */ + + +/***************************************************************************** + * + * SkPciWriteCfgWord - write a 16 bit value to pci config space + * + * Description: + * This routine writes a 16 bit value to the pci configuration + * space. The flag PciConfigUp indicates whether the config space + * is accesible or must be set up first. + * + * Returns: + * 0 - indicate everything worked ok. + * != 0 - error indication + */ +int SkPciWriteCfgWord( +SK_AC *pAC, /* Adapter Control structure pointer */ +int PciAddr, /* PCI register address */ +SK_U16 Val) /* pointer to store the read value */ +{ + pci_write_config_word(pAC->PciDev, PciAddr, Val); + return(0); +} /* SkPciWriteCfgWord */ + + +/***************************************************************************** + * + * SkPciWriteCfgWord - write a 8 bit value to pci config space + * + * Description: + * This routine writes a 8 bit value to the pci configuration + * space. The flag PciConfigUp indicates whether the config space + * is accesible or must be set up first. + * + * Returns: + * 0 - indicate everything worked ok. + * != 0 - error indication + */ +int SkPciWriteCfgByte( +SK_AC *pAC, /* Adapter Control structure pointer */ +int PciAddr, /* PCI register address */ +SK_U8 Val) /* pointer to store the read value */ +{ + pci_write_config_byte(pAC->PciDev, PciAddr, Val); + return(0); +} /* SkPciWriteCfgByte */ + + +/***************************************************************************** + * + * SkDrvEvent - handle driver events + * + * Description: + * This function handles events from all modules directed to the driver + * + * Context: + * Is called under protection of slow path lock. + * + * Returns: + * 0 if everything ok + * < 0 on error + * + */ +int SkDrvEvent( +SK_AC *pAC, /* pointer to adapter context */ +SK_IOC IoC, /* io-context */ +SK_U32 Event, /* event-id */ +SK_EVPARA Param) /* event-parameter */ +{ +SK_MBUF *pRlmtMbuf; /* pointer to a rlmt-mbuf structure */ +struct sk_buff *pMsg; /* pointer to a message block */ +int FromPort; /* the port from which we switch away */ +int ToPort; /* the port we switch to */ +SK_EVPARA NewPara; /* parameter for further events */ +int Stat; +unsigned long Flags; +SK_BOOL DualNet; + + switch (Event) { + case SK_DRV_ADAP_FAIL: + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT, + ("ADAPTER FAIL EVENT\n")); + printk("%s: Adapter failed.\n", pAC->dev[0]->name); + /* disable interrupts */ + SK_OUT32(pAC->IoBase, B0_IMSK, 0); + /* cgoos */ + break; + case SK_DRV_PORT_FAIL: + FromPort = Param.Para32[0]; + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT, + ("PORT FAIL EVENT, Port: %d\n", FromPort)); + if (FromPort == 0) { + printk("%s: Port A failed.\n", pAC->dev[0]->name); + } else { + printk("%s: Port B failed.\n", pAC->dev[1]->name); + } + /* cgoos */ + break; + case SK_DRV_PORT_RESET: /* SK_U32 PortIdx */ + /* action list 4 */ + FromPort = Param.Para32[0]; + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT, + ("PORT RESET EVENT, Port: %d ", FromPort)); + NewPara.Para64 = FromPort; + SkPnmiEvent(pAC, IoC, SK_PNMI_EVT_XMAC_RESET, NewPara); + spin_lock_irqsave( + &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock, + Flags); + + SkGeStopPort(pAC, IoC, FromPort, SK_STOP_ALL, SK_HARD_RST); + netif_carrier_off(pAC->dev[Param.Para32[0]]); + spin_unlock_irqrestore( + &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock, + Flags); + + /* clear rx ring from received frames */ + ReceiveIrq(pAC, &pAC->RxPort[FromPort], SK_FALSE); + + ClearTxRing(pAC, &pAC->TxPort[FromPort][TX_PRIO_LOW]); + spin_lock_irqsave( + &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock, + Flags); + + /* tschilling: Handling of return value inserted. */ + if (SkGeInitPort(pAC, IoC, FromPort)) { + if (FromPort == 0) { + printk("%s: SkGeInitPort A failed.\n", pAC->dev[0]->name); + } else { + printk("%s: SkGeInitPort B failed.\n", pAC->dev[1]->name); + } + } + SkAddrMcUpdate(pAC,IoC, FromPort); + PortReInitBmu(pAC, FromPort); + SkGePollTxD(pAC, IoC, FromPort, SK_TRUE); + ClearAndStartRx(pAC, FromPort); + spin_unlock_irqrestore( + &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock, + Flags); + break; + case SK_DRV_NET_UP: /* SK_U32 PortIdx */ + { struct net_device *dev = pAC->dev[Param.Para32[0]]; + /* action list 5 */ + FromPort = Param.Para32[0]; + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT, + ("NET UP EVENT, Port: %d ", Param.Para32[0])); + /* Mac update */ + SkAddrMcUpdate(pAC,IoC, FromPort); + + if (DoPrintInterfaceChange) { + printk("%s: network connection up using" + " port %c\n", pAC->dev[Param.Para32[0]]->name, 'A'+Param.Para32[0]); + + /* tschilling: Values changed according to LinkSpeedUsed. */ + Stat = pAC->GIni.GP[FromPort].PLinkSpeedUsed; + if (Stat == SK_LSPEED_STAT_10MBPS) { + printk(" speed: 10\n"); + } else if (Stat == SK_LSPEED_STAT_100MBPS) { + printk(" speed: 100\n"); + } else if (Stat == SK_LSPEED_STAT_1000MBPS) { + printk(" speed: 1000\n"); + } else { + printk(" speed: unknown\n"); + } + + + Stat = pAC->GIni.GP[FromPort].PLinkModeStatus; + if (Stat == SK_LMODE_STAT_AUTOHALF || + Stat == SK_LMODE_STAT_AUTOFULL) { + printk(" autonegotiation: yes\n"); + } + else { + printk(" autonegotiation: no\n"); + } + if (Stat == SK_LMODE_STAT_AUTOHALF || + Stat == SK_LMODE_STAT_HALF) { + printk(" duplex mode: half\n"); + } + else { + printk(" duplex mode: full\n"); + } + Stat = pAC->GIni.GP[FromPort].PFlowCtrlStatus; + if (Stat == SK_FLOW_STAT_REM_SEND ) { + printk(" flowctrl: remote send\n"); + } + else if (Stat == SK_FLOW_STAT_LOC_SEND ){ + printk(" flowctrl: local send\n"); + } + else if (Stat == SK_FLOW_STAT_SYMMETRIC ){ + printk(" flowctrl: symmetric\n"); + } + else { + printk(" flowctrl: none\n"); + } + + /* tschilling: Check against CopperType now. */ + if ((pAC->GIni.GICopperType == SK_TRUE) && + (pAC->GIni.GP[FromPort].PLinkSpeedUsed == + SK_LSPEED_STAT_1000MBPS)) { + Stat = pAC->GIni.GP[FromPort].PMSStatus; + if (Stat == SK_MS_STAT_MASTER ) { + printk(" role: master\n"); + } + else if (Stat == SK_MS_STAT_SLAVE ) { + printk(" role: slave\n"); + } + else { + printk(" role: ???\n"); + } + } + + /* + Display dim (dynamic interrupt moderation) + informations + */ + if (pAC->DynIrqModInfo.IntModTypeSelect == C_INT_MOD_STATIC) + printk(" irq moderation: static (%d ints/sec)\n", + pAC->DynIrqModInfo.MaxModIntsPerSec); + else if (pAC->DynIrqModInfo.IntModTypeSelect == C_INT_MOD_DYNAMIC) + printk(" irq moderation: dynamic (%d ints/sec)\n", + pAC->DynIrqModInfo.MaxModIntsPerSec); + else + printk(" irq moderation: disabled\n"); + + + printk(" scatter-gather: %s\n", + (dev->features & NETIF_F_SG) ? "enabled" : "disabled"); + printk(" tx-checksum: %s\n", + (dev->features & NETIF_F_IP_CSUM) ? "enabled" : "disabled"); + printk(" rx-checksum: %s\n", + pAC->RxPort[Param.Para32[0]].RxCsum ? "enabled" : "disabled"); + + } else { + DoPrintInterfaceChange = SK_TRUE; + } + + if ((Param.Para32[0] != pAC->ActivePort) && + (pAC->RlmtNets == 1)) { + NewPara.Para32[0] = pAC->ActivePort; + NewPara.Para32[1] = Param.Para32[0]; + SkEventQueue(pAC, SKGE_DRV, SK_DRV_SWITCH_INTERN, + NewPara); + } + + /* Inform the world that link protocol is up. */ + netif_carrier_on(dev); + break; + } + case SK_DRV_NET_DOWN: /* SK_U32 Reason */ + /* action list 7 */ + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT, + ("NET DOWN EVENT ")); + if (DoPrintInterfaceChange) { + printk("%s: network connection down\n", + pAC->dev[Param.Para32[1]]->name); + } else { + DoPrintInterfaceChange = SK_TRUE; + } + netif_carrier_off(pAC->dev[Param.Para32[1]]); + break; + case SK_DRV_SWITCH_HARD: /* SK_U32 FromPortIdx SK_U32 ToPortIdx */ + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT, + ("PORT SWITCH HARD ")); + case SK_DRV_SWITCH_SOFT: /* SK_U32 FromPortIdx SK_U32 ToPortIdx */ + /* action list 6 */ + printk("%s: switching to port %c\n", pAC->dev[0]->name, + 'A'+Param.Para32[1]); + case SK_DRV_SWITCH_INTERN: /* SK_U32 FromPortIdx SK_U32 ToPortIdx */ + FromPort = Param.Para32[0]; + ToPort = Param.Para32[1]; + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT, + ("PORT SWITCH EVENT, From: %d To: %d (Pref %d) ", + FromPort, ToPort, pAC->Rlmt.Net[0].PrefPort)); + NewPara.Para64 = FromPort; + SkPnmiEvent(pAC, IoC, SK_PNMI_EVT_XMAC_RESET, NewPara); + NewPara.Para64 = ToPort; + SkPnmiEvent(pAC, IoC, SK_PNMI_EVT_XMAC_RESET, NewPara); + spin_lock_irqsave( + &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock, + Flags); + spin_lock(&pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock); + SkGeStopPort(pAC, IoC, FromPort, SK_STOP_ALL, SK_SOFT_RST); + SkGeStopPort(pAC, IoC, ToPort, SK_STOP_ALL, SK_SOFT_RST); + spin_unlock(&pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock); + spin_unlock_irqrestore( + &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock, + Flags); + + ReceiveIrq(pAC, &pAC->RxPort[FromPort], SK_FALSE); /* clears rx ring */ + ReceiveIrq(pAC, &pAC->RxPort[ToPort], SK_FALSE); /* clears rx ring */ + + ClearTxRing(pAC, &pAC->TxPort[FromPort][TX_PRIO_LOW]); + ClearTxRing(pAC, &pAC->TxPort[ToPort][TX_PRIO_LOW]); + spin_lock_irqsave( + &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock, + Flags); + spin_lock(&pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock); + pAC->ActivePort = ToPort; +#if 0 + SetQueueSizes(pAC); +#else + /* tschilling: New common function with minimum size check. */ + DualNet = SK_FALSE; + if (pAC->RlmtNets == 2) { + DualNet = SK_TRUE; + } + + if (SkGeInitAssignRamToQueues( + pAC, + pAC->ActivePort, + DualNet)) { + spin_unlock(&pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock); + spin_unlock_irqrestore( + &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock, + Flags); + printk("SkGeInitAssignRamToQueues failed.\n"); + break; + } +#endif + /* tschilling: Handling of return values inserted. */ + if (SkGeInitPort(pAC, IoC, FromPort) || + SkGeInitPort(pAC, IoC, ToPort)) { + printk("%s: SkGeInitPort failed.\n", pAC->dev[0]->name); + } + if (Event == SK_DRV_SWITCH_SOFT) { + SkMacRxTxEnable(pAC, IoC, FromPort); + } + SkMacRxTxEnable(pAC, IoC, ToPort); + SkAddrSwap(pAC, IoC, FromPort, ToPort); + SkAddrMcUpdate(pAC, IoC, FromPort); + SkAddrMcUpdate(pAC, IoC, ToPort); + PortReInitBmu(pAC, FromPort); + PortReInitBmu(pAC, ToPort); + SkGePollTxD(pAC, IoC, FromPort, SK_TRUE); + SkGePollTxD(pAC, IoC, ToPort, SK_TRUE); + ClearAndStartRx(pAC, FromPort); + ClearAndStartRx(pAC, ToPort); + spin_unlock(&pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock); + spin_unlock_irqrestore( + &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock, + Flags); + break; + case SK_DRV_RLMT_SEND: /* SK_MBUF *pMb */ + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT, + ("RLS ")); + pRlmtMbuf = (SK_MBUF*) Param.pParaPtr; + pMsg = (struct sk_buff*) pRlmtMbuf->pOs; + skb_put(pMsg, pRlmtMbuf->Length); + if (XmitFrame(pAC, &pAC->TxPort[pRlmtMbuf->PortIdx][TX_PRIO_LOW], + pMsg) < 0) + + DEV_KFREE_SKB_ANY(pMsg); + break; + case SK_DRV_TIMER: + if (Param.Para32[0] == SK_DRV_MODERATION_TIMER) { + /* + ** expiration of the moderation timer implies that + ** dynamic moderation is to be applied + */ + SkDimStartModerationTimer(pAC); + SkDimModerate(pAC); + if (pAC->DynIrqModInfo.DisplayStats) { + SkDimDisplayModerationSettings(pAC); + } + } else if (Param.Para32[0] == SK_DRV_RX_CLEANUP_TIMER) { + /* + ** check if we need to check for descriptors which + ** haven't been handled the last millisecs + */ + StartDrvCleanupTimer(pAC); + if (pAC->GIni.GIMacsFound == 2) { + ReceiveIrq(pAC, &pAC->RxPort[1], SK_FALSE); + } + ReceiveIrq(pAC, &pAC->RxPort[0], SK_FALSE); + } else { + printk("Expiration of unknown timer\n"); + } + break; + default: + break; + } + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT, + ("END EVENT ")); + + return (0); +} /* SkDrvEvent */ + + +/***************************************************************************** + * + * SkErrorLog - log errors + * + * Description: + * This function logs errors to the system buffer and to the console + * + * Returns: + * 0 if everything ok + * < 0 on error + * + */ +void SkErrorLog( +SK_AC *pAC, +int ErrClass, +int ErrNum, +char *pErrorMsg) +{ +char ClassStr[80]; + + switch (ErrClass) { + case SK_ERRCL_OTHER: + strcpy(ClassStr, "Other error"); + break; + case SK_ERRCL_CONFIG: + strcpy(ClassStr, "Configuration error"); + break; + case SK_ERRCL_INIT: + strcpy(ClassStr, "Initialization error"); + break; + case SK_ERRCL_NORES: + strcpy(ClassStr, "Out of resources error"); + break; + case SK_ERRCL_SW: + strcpy(ClassStr, "internal Software error"); + break; + case SK_ERRCL_HW: + strcpy(ClassStr, "Hardware failure"); + break; + case SK_ERRCL_COMM: + strcpy(ClassStr, "Communication error"); + break; + } + printk(KERN_INFO "%s: -- ERROR --\n Class: %s\n" + " Nr: 0x%x\n Msg: %s\n", pAC->dev[0]->name, + ClassStr, ErrNum, pErrorMsg); + +} /* SkErrorLog */ + +#ifdef SK_DIAG_SUPPORT + +/***************************************************************************** + * + * SkDrvEnterDiagMode - handles DIAG attach request + * + * Description: + * Notify the kernel to NOT access the card any longer due to DIAG + * Deinitialize the Card + * + * Returns: + * int + */ +int SkDrvEnterDiagMode( +SK_AC *pAc) /* pointer to adapter context */ +{ + DEV_NET *pNet = netdev_priv(pAc->dev[0]); + SK_AC *pAC = pNet->pAC; + + SK_MEMCPY(&(pAc->PnmiBackup), &(pAc->PnmiStruct), + sizeof(SK_PNMI_STRUCT_DATA)); + + pAC->DiagModeActive = DIAG_ACTIVE; + if (pAC->BoardLevel > SK_INIT_DATA) { + if (netif_running(pAC->dev[0])) { + pAC->WasIfUp[0] = SK_TRUE; + pAC->DiagFlowCtrl = SK_TRUE; /* for SkGeClose */ + DoPrintInterfaceChange = SK_FALSE; + SkDrvDeInitAdapter(pAC, 0); /* performs SkGeClose */ + } else { + pAC->WasIfUp[0] = SK_FALSE; + } + if (pNet != netdev_priv(pAC->dev[1])) { + pNet = netdev_priv(pAC->dev[1]); + if (netif_running(pAC->dev[1])) { + pAC->WasIfUp[1] = SK_TRUE; + pAC->DiagFlowCtrl = SK_TRUE; /* for SkGeClose */ + DoPrintInterfaceChange = SK_FALSE; + SkDrvDeInitAdapter(pAC, 1); /* do SkGeClose */ + } else { + pAC->WasIfUp[1] = SK_FALSE; + } + } + pAC->BoardLevel = SK_INIT_DATA; + } + return(0); +} + +/***************************************************************************** + * + * SkDrvLeaveDiagMode - handles DIAG detach request + * + * Description: + * Notify the kernel to may access the card again after use by DIAG + * Initialize the Card + * + * Returns: + * int + */ +int SkDrvLeaveDiagMode( +SK_AC *pAc) /* pointer to adapter control context */ +{ + SK_MEMCPY(&(pAc->PnmiStruct), &(pAc->PnmiBackup), + sizeof(SK_PNMI_STRUCT_DATA)); + pAc->DiagModeActive = DIAG_NOTACTIVE; + pAc->Pnmi.DiagAttached = SK_DIAG_IDLE; + if (pAc->WasIfUp[0] == SK_TRUE) { + pAc->DiagFlowCtrl = SK_TRUE; /* for SkGeClose */ + DoPrintInterfaceChange = SK_FALSE; + SkDrvInitAdapter(pAc, 0); /* first device */ + } + if (pAc->WasIfUp[1] == SK_TRUE) { + pAc->DiagFlowCtrl = SK_TRUE; /* for SkGeClose */ + DoPrintInterfaceChange = SK_FALSE; + SkDrvInitAdapter(pAc, 1); /* second device */ + } + return(0); +} + +/***************************************************************************** + * + * ParseDeviceNbrFromSlotName - Evaluate PCI device number + * + * Description: + * This function parses the PCI slot name information string and will + * retrieve the devcie number out of it. The slot_name maintianed by + * linux is in the form of '02:0a.0', whereas the first two characters + * represent the bus number in hex (in the sample above this is + * pci bus 0x02) and the next two characters the device number (0x0a). + * + * Returns: + * SK_U32: The device number from the PCI slot name + */ + +static SK_U32 ParseDeviceNbrFromSlotName( +const char *SlotName) /* pointer to pci slot name eg. '02:0a.0' */ +{ + char *CurrCharPos = (char *) SlotName; + int FirstNibble = -1; + int SecondNibble = -1; + SK_U32 Result = 0; + + while (*CurrCharPos != '\0') { + if (*CurrCharPos == ':') { + while (*CurrCharPos != '.') { + CurrCharPos++; + if ( (*CurrCharPos >= '0') && + (*CurrCharPos <= '9')) { + if (FirstNibble == -1) { + /* dec. value for '0' */ + FirstNibble = *CurrCharPos - 48; + } else { + SecondNibble = *CurrCharPos - 48; + } + } else if ( (*CurrCharPos >= 'a') && + (*CurrCharPos <= 'f') ) { + if (FirstNibble == -1) { + FirstNibble = *CurrCharPos - 87; + } else { + SecondNibble = *CurrCharPos - 87; + } + } else { + Result = 0; + } + } + + Result = FirstNibble; + Result = Result << 4; /* first nibble is higher one */ + Result = Result | SecondNibble; + } + CurrCharPos++; /* next character */ + } + return (Result); +} + +/**************************************************************************** + * + * SkDrvDeInitAdapter - deinitialize adapter (this function is only + * called if Diag attaches to that card) + * + * Description: + * Close initialized adapter. + * + * Returns: + * 0 - on success + * error code - on error + */ +static int SkDrvDeInitAdapter( +SK_AC *pAC, /* pointer to adapter context */ +int devNbr) /* what device is to be handled */ +{ + struct SK_NET_DEVICE *dev; + + dev = pAC->dev[devNbr]; + + /* On Linux 2.6 the network driver does NOT mess with reference + ** counts. The driver MUST be able to be unloaded at any time + ** due to the possibility of hotplug. + */ + if (SkGeClose(dev) != 0) { + return (-1); + } + return (0); + +} /* SkDrvDeInitAdapter() */ + +/**************************************************************************** + * + * SkDrvInitAdapter - Initialize adapter (this function is only + * called if Diag deattaches from that card) + * + * Description: + * Close initialized adapter. + * + * Returns: + * 0 - on success + * error code - on error + */ +static int SkDrvInitAdapter( +SK_AC *pAC, /* pointer to adapter context */ +int devNbr) /* what device is to be handled */ +{ + struct SK_NET_DEVICE *dev; + + dev = pAC->dev[devNbr]; + + if (SkGeOpen(dev) != 0) { + return (-1); + } + + /* + ** Use correct MTU size and indicate to kernel TX queue can be started + */ + if (SkGeChangeMtu(dev, dev->mtu) != 0) { + return (-1); + } + return (0); + +} /* SkDrvInitAdapter */ + +#endif + +#ifdef DEBUG +/****************************************************************************/ +/* "debug only" section *****************************************************/ +/****************************************************************************/ + + +/***************************************************************************** + * + * DumpMsg - print a frame + * + * Description: + * This function prints frames to the system logfile/to the console. + * + * Returns: N/A + * + */ +static void DumpMsg(struct sk_buff *skb, char *str) +{ + int msglen; + + if (skb == NULL) { + printk("DumpMsg(): NULL-Message\n"); + return; + } + + if (skb->data == NULL) { + printk("DumpMsg(): Message empty\n"); + return; + } + + msglen = skb->len; + if (msglen > 64) + msglen = 64; + + printk("--- Begin of message from %s , len %d (from %d) ----\n", str, msglen, skb->len); + + DumpData((char *)skb->data, msglen); + + printk("------- End of message ---------\n"); +} /* DumpMsg */ + + + +/***************************************************************************** + * + * DumpData - print a data area + * + * Description: + * This function prints a area of data to the system logfile/to the + * console. + * + * Returns: N/A + * + */ +static void DumpData(char *p, int size) +{ +register int i; +int haddr, addr; +char hex_buffer[180]; +char asc_buffer[180]; +char HEXCHAR[] = "0123456789ABCDEF"; + + addr = 0; + haddr = 0; + hex_buffer[0] = 0; + asc_buffer[0] = 0; + for (i=0; i < size; ) { + if (*p >= '0' && *p <='z') + asc_buffer[addr] = *p; + else + asc_buffer[addr] = '.'; + addr++; + asc_buffer[addr] = 0; + hex_buffer[haddr] = HEXCHAR[(*p & 0xf0) >> 4]; + haddr++; + hex_buffer[haddr] = HEXCHAR[*p & 0x0f]; + haddr++; + hex_buffer[haddr] = ' '; + haddr++; + hex_buffer[haddr] = 0; + p++; + i++; + if (i%16 == 0) { + printk("%s %s\n", hex_buffer, asc_buffer); + addr = 0; + haddr = 0; + } + } +} /* DumpData */ + + +/***************************************************************************** + * + * DumpLong - print a data area as long values + * + * Description: + * This function prints a area of data to the system logfile/to the + * console. + * + * Returns: N/A + * + */ +static void DumpLong(char *pc, int size) +{ +register int i; +int haddr, addr; +char hex_buffer[180]; +char asc_buffer[180]; +char HEXCHAR[] = "0123456789ABCDEF"; +long *p; +int l; + + addr = 0; + haddr = 0; + hex_buffer[0] = 0; + asc_buffer[0] = 0; + p = (long*) pc; + for (i=0; i < size; ) { + l = (long) *p; + hex_buffer[haddr] = HEXCHAR[(l >> 28) & 0xf]; + haddr++; + hex_buffer[haddr] = HEXCHAR[(l >> 24) & 0xf]; + haddr++; + hex_buffer[haddr] = HEXCHAR[(l >> 20) & 0xf]; + haddr++; + hex_buffer[haddr] = HEXCHAR[(l >> 16) & 0xf]; + haddr++; + hex_buffer[haddr] = HEXCHAR[(l >> 12) & 0xf]; + haddr++; + hex_buffer[haddr] = HEXCHAR[(l >> 8) & 0xf]; + haddr++; + hex_buffer[haddr] = HEXCHAR[(l >> 4) & 0xf]; + haddr++; + hex_buffer[haddr] = HEXCHAR[l & 0x0f]; + haddr++; + hex_buffer[haddr] = ' '; + haddr++; + hex_buffer[haddr] = 0; + p++; + i++; + if (i%8 == 0) { + printk("%4x %s\n", (i-8)*4, hex_buffer); + haddr = 0; + } + } + printk("------------------------\n"); +} /* DumpLong */ + +#endif + +static int __devinit skge_probe_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + SK_AC *pAC; + DEV_NET *pNet = NULL; + struct net_device *dev = NULL; + static int boards_found = 0; + int error = -ENODEV; + int using_dac = 0; + char DeviceStr[80]; + + if (pci_enable_device(pdev)) + goto out; + + /* Configure DMA attributes. */ + if (sizeof(dma_addr_t) > sizeof(u32) && + !(error = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) { + using_dac = 1; + error = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); + if (error < 0) { + printk(KERN_ERR "sk98lin %s unable to obtain 64 bit DMA " + "for consistent allocations\n", pci_name(pdev)); + goto out_disable_device; + } + } else { + error = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (error) { + printk(KERN_ERR "sk98lin %s no usable DMA configuration\n", + pci_name(pdev)); + goto out_disable_device; + } + } + + error = -ENOMEM; + dev = alloc_etherdev(sizeof(DEV_NET)); + if (!dev) { + printk(KERN_ERR "sk98lin: unable to allocate etherdev " + "structure!\n"); + goto out_disable_device; + } + + pNet = netdev_priv(dev); + pNet->pAC = kzalloc(sizeof(SK_AC), GFP_KERNEL); + if (!pNet->pAC) { + printk(KERN_ERR "sk98lin: unable to allocate adapter " + "structure!\n"); + goto out_free_netdev; + } + + pAC = pNet->pAC; + pAC->PciDev = pdev; + + pAC->dev[0] = dev; + pAC->dev[1] = dev; + pAC->CheckQueue = SK_FALSE; + + dev->irq = pdev->irq; + + error = SkGeInitPCI(pAC); + if (error) { + printk(KERN_ERR "sk98lin: PCI setup failed: %i\n", error); + goto out_free_netdev; + } + + dev->open = &SkGeOpen; + dev->stop = &SkGeClose; + dev->hard_start_xmit = &SkGeXmit; + dev->get_stats = &SkGeStats; + dev->set_multicast_list = &SkGeSetRxMode; + dev->set_mac_address = &SkGeSetMacAddr; + dev->do_ioctl = &SkGeIoctl; + dev->change_mtu = &SkGeChangeMtu; +#ifdef CONFIG_NET_POLL_CONTROLLER + dev->poll_controller = &SkGePollController; +#endif + SET_NETDEV_DEV(dev, &pdev->dev); + SET_ETHTOOL_OPS(dev, &SkGeEthtoolOps); + + /* Use only if yukon hardware */ + if (pAC->ChipsetType) { +#ifdef USE_SK_TX_CHECKSUM + dev->features |= NETIF_F_IP_CSUM; +#endif +#ifdef SK_ZEROCOPY + dev->features |= NETIF_F_SG; +#endif +#ifdef USE_SK_RX_CHECKSUM + pAC->RxPort[0].RxCsum = 1; +#endif + } + + if (using_dac) + dev->features |= NETIF_F_HIGHDMA; + + pAC->Index = boards_found++; + + error = SkGeBoardInit(dev, pAC); + if (error) + goto out_free_netdev; + + /* Read Adapter name from VPD */ + if (ProductStr(pAC, DeviceStr, sizeof(DeviceStr)) != 0) { + error = -EIO; + printk(KERN_ERR "sk98lin: Could not read VPD data.\n"); + goto out_free_resources; + } + + /* Register net device */ + error = register_netdev(dev); + if (error) { + printk(KERN_ERR "sk98lin: Could not register device.\n"); + goto out_free_resources; + } + + /* Print adapter specific string from vpd */ + printk("%s: %s\n", dev->name, DeviceStr); + + /* Print configuration settings */ + printk(" PrefPort:%c RlmtMode:%s\n", + 'A' + pAC->Rlmt.Net[0].Port[pAC->Rlmt.Net[0].PrefPort]->PortNumber, + (pAC->RlmtMode==0) ? "Check Link State" : + ((pAC->RlmtMode==1) ? "Check Link State" : + ((pAC->RlmtMode==3) ? "Check Local Port" : + ((pAC->RlmtMode==7) ? "Check Segmentation" : + ((pAC->RlmtMode==17) ? "Dual Check Link State" :"Error"))))); + + SkGeYellowLED(pAC, pAC->IoBase, 1); + + memcpy(&dev->dev_addr, &pAC->Addr.Net[0].CurrentMacAddress, 6); + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); + + pNet->PortNr = 0; + pNet->NetNr = 0; + + boards_found++; + + pci_set_drvdata(pdev, dev); + + /* More then one port found */ + if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) { + dev = alloc_etherdev(sizeof(DEV_NET)); + if (!dev) { + printk(KERN_ERR "sk98lin: unable to allocate etherdev " + "structure!\n"); + goto single_port; + } + + pNet = netdev_priv(dev); + pNet->PortNr = 1; + pNet->NetNr = 1; + pNet->pAC = pAC; + + dev->open = &SkGeOpen; + dev->stop = &SkGeClose; + dev->hard_start_xmit = &SkGeXmit; + dev->get_stats = &SkGeStats; + dev->set_multicast_list = &SkGeSetRxMode; + dev->set_mac_address = &SkGeSetMacAddr; + dev->do_ioctl = &SkGeIoctl; + dev->change_mtu = &SkGeChangeMtu; + SET_NETDEV_DEV(dev, &pdev->dev); + SET_ETHTOOL_OPS(dev, &SkGeEthtoolOps); + + if (pAC->ChipsetType) { +#ifdef USE_SK_TX_CHECKSUM + dev->features |= NETIF_F_IP_CSUM; +#endif +#ifdef SK_ZEROCOPY + dev->features |= NETIF_F_SG; +#endif +#ifdef USE_SK_RX_CHECKSUM + pAC->RxPort[1].RxCsum = 1; +#endif + } + + if (using_dac) + dev->features |= NETIF_F_HIGHDMA; + + error = register_netdev(dev); + if (error) { + printk(KERN_ERR "sk98lin: Could not register device" + " for second port. (%d)\n", error); + free_netdev(dev); + goto single_port; + } + + pAC->dev[1] = dev; + memcpy(&dev->dev_addr, + &pAC->Addr.Net[1].CurrentMacAddress, 6); + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); + + printk("%s: %s\n", dev->name, DeviceStr); + printk(" PrefPort:B RlmtMode:Dual Check Link State\n"); + } + +single_port: + + /* Save the hardware revision */ + pAC->HWRevision = (((pAC->GIni.GIPciHwRev >> 4) & 0x0F)*10) + + (pAC->GIni.GIPciHwRev & 0x0F); + + /* Set driver globals */ + pAC->Pnmi.pDriverFileName = DRIVER_FILE_NAME; + pAC->Pnmi.pDriverReleaseDate = DRIVER_REL_DATE; + + memset(&pAC->PnmiBackup, 0, sizeof(SK_PNMI_STRUCT_DATA)); + memcpy(&pAC->PnmiBackup, &pAC->PnmiStruct, sizeof(SK_PNMI_STRUCT_DATA)); + + return 0; + + out_free_resources: + FreeResources(dev); + out_free_netdev: + free_netdev(dev); + out_disable_device: + pci_disable_device(pdev); + out: + return error; +} + +static void __devexit skge_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + DEV_NET *pNet = netdev_priv(dev); + SK_AC *pAC = pNet->pAC; + struct net_device *otherdev = pAC->dev[1]; + + unregister_netdev(dev); + + SkGeYellowLED(pAC, pAC->IoBase, 0); + + if (pAC->BoardLevel == SK_INIT_RUN) { + SK_EVPARA EvPara; + unsigned long Flags; + + /* board is still alive */ + spin_lock_irqsave(&pAC->SlowPathLock, Flags); + EvPara.Para32[0] = 0; + EvPara.Para32[1] = -1; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara); + EvPara.Para32[0] = 1; + EvPara.Para32[1] = -1; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara); + SkEventDispatcher(pAC, pAC->IoBase); + /* disable interrupts */ + SK_OUT32(pAC->IoBase, B0_IMSK, 0); + SkGeDeInit(pAC, pAC->IoBase); + spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); + pAC->BoardLevel = SK_INIT_DATA; + /* We do NOT check here, if IRQ was pending, of course*/ + } + + if (pAC->BoardLevel == SK_INIT_IO) { + /* board is still alive */ + SkGeDeInit(pAC, pAC->IoBase); + pAC->BoardLevel = SK_INIT_DATA; + } + + FreeResources(dev); + free_netdev(dev); + if (otherdev != dev) + free_netdev(otherdev); + kfree(pAC); +} + +#ifdef CONFIG_PM +static int skge_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + DEV_NET *pNet = netdev_priv(dev); + SK_AC *pAC = pNet->pAC; + struct net_device *otherdev = pAC->dev[1]; + + if (netif_running(dev)) { + netif_carrier_off(dev); + DoPrintInterfaceChange = SK_FALSE; + SkDrvDeInitAdapter(pAC, 0); /* performs SkGeClose */ + netif_device_detach(dev); + } + if (otherdev != dev) { + if (netif_running(otherdev)) { + netif_carrier_off(otherdev); + DoPrintInterfaceChange = SK_FALSE; + SkDrvDeInitAdapter(pAC, 1); /* performs SkGeClose */ + netif_device_detach(otherdev); + } + } + + pci_save_state(pdev); + pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); + if (pAC->AllocFlag & SK_ALLOC_IRQ) { + free_irq(dev->irq, dev); + } + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +static int skge_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + DEV_NET *pNet = netdev_priv(dev); + SK_AC *pAC = pNet->pAC; + struct net_device *otherdev = pAC->dev[1]; + int ret; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + ret = pci_enable_device(pdev); + if (ret) { + printk(KERN_WARNING "sk98lin: unable to enable device %s " + "in resume\n", dev->name); + goto err_out; + } + pci_set_master(pdev); + if (pAC->GIni.GIMacsFound == 2) + ret = request_irq(dev->irq, SkGeIsr, IRQF_SHARED, "sk98lin", dev); + else + ret = request_irq(dev->irq, SkGeIsrOnePort, IRQF_SHARED, "sk98lin", dev); + if (ret) { + printk(KERN_WARNING "sk98lin: unable to acquire IRQ %d\n", dev->irq); + ret = -EBUSY; + goto err_out_disable_pdev; + } + + netif_device_attach(dev); + if (netif_running(dev)) { + DoPrintInterfaceChange = SK_FALSE; + SkDrvInitAdapter(pAC, 0); /* first device */ + } + if (otherdev != dev) { + netif_device_attach(otherdev); + if (netif_running(otherdev)) { + DoPrintInterfaceChange = SK_FALSE; + SkDrvInitAdapter(pAC, 1); /* second device */ + } + } + + return 0; + +err_out_disable_pdev: + pci_disable_device(pdev); +err_out: + pAC->AllocFlag &= ~SK_ALLOC_IRQ; + dev->irq = 0; + return ret; +} +#else +#define skge_suspend NULL +#define skge_resume NULL +#endif + +static struct pci_device_id skge_pci_tbl[] = { +#ifdef SK98LIN_ALL_DEVICES + { PCI_VENDOR_ID_3COM, 0x1700, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_3COM, 0x80eb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, +#endif +#ifdef GENESIS + /* Generic SysKonnect SK-98xx Gigabit Ethernet Server Adapter */ + { PCI_VENDOR_ID_SYSKONNECT, 0x4300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, +#endif + /* Generic SysKonnect SK-98xx V2.0 Gigabit Ethernet Adapter */ + { PCI_VENDOR_ID_SYSKONNECT, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, +#ifdef SK98LIN_ALL_DEVICES +/* DLink card does not have valid VPD so this driver gags + * { PCI_VENDOR_ID_DLINK, 0x4c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + */ + { PCI_VENDOR_ID_MARVELL, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_MARVELL, 0x5005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_CNET, 0x434e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015, }, + { PCI_VENDOR_ID_LINKSYS, 0x1064, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, +#endif + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, skge_pci_tbl); + +static struct pci_driver skge_driver = { + .name = "sk98lin", + .id_table = skge_pci_tbl, + .probe = skge_probe_one, + .remove = __devexit_p(skge_remove_one), + .suspend = skge_suspend, + .resume = skge_resume, +}; + +static int __init skge_init(void) +{ + printk(KERN_NOTICE "sk98lin: driver has been replaced by the skge driver" + " and is scheduled for removal\n"); + + return pci_register_driver(&skge_driver); +} + +static void __exit skge_exit(void) +{ + pci_unregister_driver(&skge_driver); +} + +module_init(skge_init); +module_exit(skge_exit); diff --git a/drivers/net/sk98lin/skgehwt.c b/drivers/net/sk98lin/skgehwt.c new file mode 100644 index 000000000000..db670993c2df --- /dev/null +++ b/drivers/net/sk98lin/skgehwt.c @@ -0,0 +1,171 @@ +/****************************************************************************** + * + * Name: skgehwt.c + * Project: Gigabit Ethernet Adapters, Event Scheduler Module + * Version: $Revision: 1.15 $ + * Date: $Date: 2003/09/16 13:41:23 $ + * Purpose: Hardware Timer + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +/* + * Event queue and dispatcher + */ +#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM)))) +static const char SysKonnectFileId[] = + "@(#) $Id: skgehwt.c,v 1.15 2003/09/16 13:41:23 rschmidt Exp $ (C) Marvell."; +#endif + +#include "h/skdrv1st.h" /* Driver Specific Definitions */ +#include "h/skdrv2nd.h" /* Adapter Control- and Driver specific Def. */ + +#ifdef __C2MAN__ +/* + * Hardware Timer function queue management. + */ +intro() +{} +#endif + +/* + * Prototypes of local functions. + */ +#define SK_HWT_MAX (65000) + +/* correction factor */ +#define SK_HWT_FAC (1000 * (SK_U32)pAC->GIni.GIHstClkFact / 100) + +/* + * Initialize hardware timer. + * + * Must be called during init level 1. + */ +void SkHwtInit( +SK_AC *pAC, /* Adapters context */ +SK_IOC Ioc) /* IoContext */ +{ + pAC->Hwt.TStart = 0 ; + pAC->Hwt.TStop = 0 ; + pAC->Hwt.TActive = SK_FALSE; + + SkHwtStop(pAC, Ioc); +} + +/* + * + * Start hardware timer (clock ticks are 16us). + * + */ +void SkHwtStart( +SK_AC *pAC, /* Adapters context */ +SK_IOC Ioc, /* IoContext */ +SK_U32 Time) /* Time in units of 16us to load the timer with. */ +{ + SK_U32 Cnt; + + if (Time > SK_HWT_MAX) + Time = SK_HWT_MAX; + + pAC->Hwt.TStart = Time; + pAC->Hwt.TStop = 0L; + + Cnt = Time; + + /* + * if time < 16 us + * time = 16 us + */ + if (!Cnt) { + Cnt++; + } + + SK_OUT32(Ioc, B2_TI_INI, Cnt * SK_HWT_FAC); + + SK_OUT16(Ioc, B2_TI_CTRL, TIM_START); /* Start timer. */ + + pAC->Hwt.TActive = SK_TRUE; +} + +/* + * Stop hardware timer. + * and clear the timer IRQ + */ +void SkHwtStop( +SK_AC *pAC, /* Adapters context */ +SK_IOC Ioc) /* IoContext */ +{ + SK_OUT16(Ioc, B2_TI_CTRL, TIM_STOP); + + SK_OUT16(Ioc, B2_TI_CTRL, TIM_CLR_IRQ); + + pAC->Hwt.TActive = SK_FALSE; +} + + +/* + * Stop hardware timer and read time elapsed since last start. + * + * returns + * The elapsed time since last start in units of 16us. + * + */ +SK_U32 SkHwtRead( +SK_AC *pAC, /* Adapters context */ +SK_IOC Ioc) /* IoContext */ +{ + SK_U32 TRead; + SK_U32 IStatus; + + if (pAC->Hwt.TActive) { + + SkHwtStop(pAC, Ioc); + + SK_IN32(Ioc, B2_TI_VAL, &TRead); + TRead /= SK_HWT_FAC; + + SK_IN32(Ioc, B0_ISRC, &IStatus); + + /* Check if timer expired (or wraped around) */ + if ((TRead > pAC->Hwt.TStart) || (IStatus & IS_TIMINT)) { + + SkHwtStop(pAC, Ioc); + + pAC->Hwt.TStop = pAC->Hwt.TStart; + } + else { + + pAC->Hwt.TStop = pAC->Hwt.TStart - TRead; + } + } + return(pAC->Hwt.TStop); +} + +/* + * interrupt source= timer + */ +void SkHwtIsr( +SK_AC *pAC, /* Adapters context */ +SK_IOC Ioc) /* IoContext */ +{ + SkHwtStop(pAC, Ioc); + + pAC->Hwt.TStop = pAC->Hwt.TStart; + + SkTimerDone(pAC, Ioc); +} + +/* End of file */ diff --git a/drivers/net/sk98lin/skgeinit.c b/drivers/net/sk98lin/skgeinit.c new file mode 100644 index 000000000000..67f1d6a5c15d --- /dev/null +++ b/drivers/net/sk98lin/skgeinit.c @@ -0,0 +1,2005 @@ +/****************************************************************************** + * + * Name: skgeinit.c + * Project: Gigabit Ethernet Adapters, Common Modules + * Version: $Revision: 1.97 $ + * Date: $Date: 2003/10/02 16:45:31 $ + * Purpose: Contains functions to initialize the adapter + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +#include "h/skdrv1st.h" +#include "h/skdrv2nd.h" + +/* global variables ***********************************************************/ + +/* local variables ************************************************************/ + +#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM)))) +static const char SysKonnectFileId[] = + "@(#) $Id: skgeinit.c,v 1.97 2003/10/02 16:45:31 rschmidt Exp $ (C) Marvell."; +#endif + +struct s_QOffTab { + int RxQOff; /* Receive Queue Address Offset */ + int XsQOff; /* Sync Tx Queue Address Offset */ + int XaQOff; /* Async Tx Queue Address Offset */ +}; +static struct s_QOffTab QOffTab[] = { + {Q_R1, Q_XS1, Q_XA1}, {Q_R2, Q_XS2, Q_XA2} +}; + +struct s_Config { + char ScanString[8]; + SK_U32 Value; +}; + +static struct s_Config OemConfig = { + {'O','E','M','_','C','o','n','f'}, +#ifdef SK_OEM_CONFIG + OEM_CONFIG_VALUE, +#else + 0, +#endif +}; + +/****************************************************************************** + * + * SkGePollTxD() - Enable / Disable Descriptor Polling of TxD Rings + * + * Description: + * Enable or disable the descriptor polling of the transmit descriptor + * ring(s) (TxD) for port 'Port'. + * The new configuration is *not* saved over any SkGeStopPort() and + * SkGeInitPort() calls. + * + * Returns: + * nothing + */ +void SkGePollTxD( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +SK_BOOL PollTxD) /* SK_TRUE (enable pol.), SK_FALSE (disable pol.) */ +{ + SK_GEPORT *pPrt; + SK_U32 DWord; + + pPrt = &pAC->GIni.GP[Port]; + + DWord = (SK_U32)(PollTxD ? CSR_ENA_POL : CSR_DIS_POL); + + if (pPrt->PXSQSize != 0) { + SK_OUT32(IoC, Q_ADDR(pPrt->PXsQOff, Q_CSR), DWord); + } + + if (pPrt->PXAQSize != 0) { + SK_OUT32(IoC, Q_ADDR(pPrt->PXaQOff, Q_CSR), DWord); + } +} /* SkGePollTxD */ + + +/****************************************************************************** + * + * SkGeYellowLED() - Switch the yellow LED on or off. + * + * Description: + * Switch the yellow LED on or off. + * + * Note: + * This function may be called any time after SkGeInit(Level 1). + * + * Returns: + * nothing + */ +void SkGeYellowLED( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int State) /* yellow LED state, 0 = OFF, 0 != ON */ +{ + if (State == 0) { + /* Switch yellow LED OFF */ + SK_OUT8(IoC, B0_LED, LED_STAT_OFF); + } + else { + /* Switch yellow LED ON */ + SK_OUT8(IoC, B0_LED, LED_STAT_ON); + } +} /* SkGeYellowLED */ + + +#if (!defined(SK_SLIM) || defined(GENESIS)) +/****************************************************************************** + * + * SkGeXmitLED() - Modify the Operational Mode of a transmission LED. + * + * Description: + * The Rx or Tx LED which is specified by 'Led' will be + * enabled, disabled or switched on in test mode. + * + * Note: + * 'Led' must contain the address offset of the LEDs INI register. + * + * Usage: + * SkGeXmitLED(pAC, IoC, MR_ADDR(Port, TX_LED_INI), SK_LED_ENA); + * + * Returns: + * nothing + */ +void SkGeXmitLED( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Led, /* offset to the LED Init Value register */ +int Mode) /* Mode may be SK_LED_DIS, SK_LED_ENA, SK_LED_TST */ +{ + SK_U32 LedIni; + + switch (Mode) { + case SK_LED_ENA: + LedIni = SK_XMIT_DUR * (SK_U32)pAC->GIni.GIHstClkFact / 100; + SK_OUT32(IoC, Led + XMIT_LED_INI, LedIni); + SK_OUT8(IoC, Led + XMIT_LED_CTRL, LED_START); + break; + case SK_LED_TST: + SK_OUT8(IoC, Led + XMIT_LED_TST, LED_T_ON); + SK_OUT32(IoC, Led + XMIT_LED_CNT, 100); + SK_OUT8(IoC, Led + XMIT_LED_CTRL, LED_START); + break; + case SK_LED_DIS: + default: + /* + * Do NOT stop the LED Timer here. The LED might be + * in on state. But it needs to go off. + */ + SK_OUT32(IoC, Led + XMIT_LED_CNT, 0); + SK_OUT8(IoC, Led + XMIT_LED_TST, LED_T_OFF); + break; + } + + /* + * 1000BT: The Transmit LED is driven by the PHY. + * But the default LED configuration is used for + * Level One and Broadcom PHYs. + * (Broadcom: It may be that PHY_B_PEC_EN_LTR has to be set.) + * (In this case it has to be added here. But we will see. XXX) + */ +} /* SkGeXmitLED */ +#endif /* !SK_SLIM || GENESIS */ + + +/****************************************************************************** + * + * DoCalcAddr() - Calculates the start and the end address of a queue. + * + * Description: + * This function calculates the start and the end address of a queue. + * Afterwards the 'StartVal' is incremented to the next start position. + * If the port is already initialized the calculated values + * will be checked against the configured values and an + * error will be returned, if they are not equal. + * If the port is not initialized the values will be written to + * *StartAdr and *EndAddr. + * + * Returns: + * 0: success + * 1: configuration error + */ +static int DoCalcAddr( +SK_AC *pAC, /* adapter context */ +SK_GEPORT SK_FAR *pPrt, /* port index */ +int QuSize, /* size of the queue to configure in kB */ +SK_U32 SK_FAR *StartVal, /* start value for address calculation */ +SK_U32 SK_FAR *QuStartAddr,/* start addr to calculate */ +SK_U32 SK_FAR *QuEndAddr) /* end address to calculate */ +{ + SK_U32 EndVal; + SK_U32 NextStart; + int Rtv; + + Rtv = 0; + if (QuSize == 0) { + EndVal = *StartVal; + NextStart = EndVal; + } + else { + EndVal = *StartVal + ((SK_U32)QuSize * 1024) - 1; + NextStart = EndVal + 1; + } + + if (pPrt->PState >= SK_PRT_INIT) { + if (*StartVal != *QuStartAddr || EndVal != *QuEndAddr) { + Rtv = 1; + } + } + else { + *QuStartAddr = *StartVal; + *QuEndAddr = EndVal; + } + + *StartVal = NextStart; + return(Rtv); +} /* DoCalcAddr */ + +/****************************************************************************** + * + * SkGeInitAssignRamToQueues() - allocate default queue sizes + * + * Description: + * This function assigns the memory to the different queues and ports. + * When DualNet is set to SK_TRUE all ports get the same amount of memory. + * Otherwise the first port gets most of the memory and all the + * other ports just the required minimum. + * This function can only be called when pAC->GIni.GIRamSize and + * pAC->GIni.GIMacsFound have been initialized, usually this happens + * at init level 1 + * + * Returns: + * 0 - ok + * 1 - invalid input values + * 2 - not enough memory + */ + +int SkGeInitAssignRamToQueues( +SK_AC *pAC, /* Adapter context */ +int ActivePort, /* Active Port in RLMT mode */ +SK_BOOL DualNet) /* adapter context */ +{ + int i; + int UsedKilobytes; /* memory already assigned */ + int ActivePortKilobytes; /* memory available for active port */ + SK_GEPORT *pGePort; + + UsedKilobytes = 0; + + if (ActivePort >= pAC->GIni.GIMacsFound) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_INIT, + ("SkGeInitAssignRamToQueues: ActivePort (%d) invalid\n", + ActivePort)); + return(1); + } + if (((pAC->GIni.GIMacsFound * (SK_MIN_RXQ_SIZE + SK_MIN_TXQ_SIZE)) + + ((RAM_QUOTA_SYNC == 0) ? 0 : SK_MIN_TXQ_SIZE)) > pAC->GIni.GIRamSize) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_INIT, + ("SkGeInitAssignRamToQueues: Not enough memory (%d)\n", + pAC->GIni.GIRamSize)); + return(2); + } + + if (DualNet) { + /* every port gets the same amount of memory */ + ActivePortKilobytes = pAC->GIni.GIRamSize / pAC->GIni.GIMacsFound; + for (i = 0; i < pAC->GIni.GIMacsFound; i++) { + + pGePort = &pAC->GIni.GP[i]; + + /* take away the minimum memory for active queues */ + ActivePortKilobytes -= (SK_MIN_RXQ_SIZE + SK_MIN_TXQ_SIZE); + + /* receive queue gets the minimum + 80% of the rest */ + pGePort->PRxQSize = (int) (ROUND_QUEUE_SIZE_KB(( + ActivePortKilobytes * (unsigned long) RAM_QUOTA_RX) / 100)) + + SK_MIN_RXQ_SIZE; + + ActivePortKilobytes -= (pGePort->PRxQSize - SK_MIN_RXQ_SIZE); + + /* synchronous transmit queue */ + pGePort->PXSQSize = 0; + + /* asynchronous transmit queue */ + pGePort->PXAQSize = (int) ROUND_QUEUE_SIZE_KB(ActivePortKilobytes + + SK_MIN_TXQ_SIZE); + } + } + else { + /* Rlmt Mode or single link adapter */ + + /* Set standby queue size defaults for all standby ports */ + for (i = 0; i < pAC->GIni.GIMacsFound; i++) { + + if (i != ActivePort) { + pGePort = &pAC->GIni.GP[i]; + + pGePort->PRxQSize = SK_MIN_RXQ_SIZE; + pGePort->PXAQSize = SK_MIN_TXQ_SIZE; + pGePort->PXSQSize = 0; + + /* Count used RAM */ + UsedKilobytes += pGePort->PRxQSize + pGePort->PXAQSize; + } + } + /* what's left? */ + ActivePortKilobytes = pAC->GIni.GIRamSize - UsedKilobytes; + + /* assign it to the active port */ + /* first take away the minimum memory */ + ActivePortKilobytes -= (SK_MIN_RXQ_SIZE + SK_MIN_TXQ_SIZE); + pGePort = &pAC->GIni.GP[ActivePort]; + + /* receive queue get's the minimum + 80% of the rest */ + pGePort->PRxQSize = (int) (ROUND_QUEUE_SIZE_KB((ActivePortKilobytes * + (unsigned long) RAM_QUOTA_RX) / 100)) + SK_MIN_RXQ_SIZE; + + ActivePortKilobytes -= (pGePort->PRxQSize - SK_MIN_RXQ_SIZE); + + /* synchronous transmit queue */ + pGePort->PXSQSize = 0; + + /* asynchronous transmit queue */ + pGePort->PXAQSize = (int) ROUND_QUEUE_SIZE_KB(ActivePortKilobytes) + + SK_MIN_TXQ_SIZE; + } +#ifdef VCPU + VCPUprintf(0, "PRxQSize=%u, PXSQSize=%u, PXAQSize=%u\n", + pGePort->PRxQSize, pGePort->PXSQSize, pGePort->PXAQSize); +#endif /* VCPU */ + + return(0); +} /* SkGeInitAssignRamToQueues */ + +/****************************************************************************** + * + * SkGeCheckQSize() - Checks the Adapters Queue Size Configuration + * + * Description: + * This function verifies the Queue Size Configuration specified + * in the variables PRxQSize, PXSQSize, and PXAQSize of all + * used ports. + * This requirements must be fullfilled to have a valid configuration: + * - The size of all queues must not exceed GIRamSize. + * - The queue sizes must be specified in units of 8 kB. + * - The size of Rx queues of available ports must not be + * smaller than 16 kB. + * - The size of at least one Tx queue (synch. or asynch.) + * of available ports must not be smaller than 16 kB + * when Jumbo Frames are used. + * - The RAM start and end addresses must not be changed + * for ports which are already initialized. + * Furthermore SkGeCheckQSize() defines the Start and End Addresses + * of all ports and stores them into the HWAC port structure. + * + * Returns: + * 0: Queue Size Configuration valid + * 1: Queue Size Configuration invalid + */ +static int SkGeCheckQSize( +SK_AC *pAC, /* adapter context */ +int Port) /* port index */ +{ + SK_GEPORT *pPrt; + int i; + int Rtv; + int Rtv2; + SK_U32 StartAddr; +#ifndef SK_SLIM + int UsedMem; /* total memory used (max. found ports) */ +#endif + + Rtv = 0; + +#ifndef SK_SLIM + + UsedMem = 0; + for (i = 0; i < pAC->GIni.GIMacsFound; i++) { + pPrt = &pAC->GIni.GP[i]; + + if ((pPrt->PRxQSize & QZ_UNITS) != 0 || + (pPrt->PXSQSize & QZ_UNITS) != 0 || + (pPrt->PXAQSize & QZ_UNITS) != 0) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E012, SKERR_HWI_E012MSG); + return(1); + } + + if (i == Port && pPrt->PRxQSize < SK_MIN_RXQ_SIZE) { + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E011, SKERR_HWI_E011MSG); + return(1); + } + + /* + * the size of at least one Tx queue (synch. or asynch.) has to be > 0. + * if Jumbo Frames are used, this size has to be >= 16 kB. + */ + if ((i == Port && pPrt->PXSQSize == 0 && pPrt->PXAQSize == 0) || + (pAC->GIni.GIPortUsage == SK_JUMBO_LINK && + ((pPrt->PXSQSize > 0 && pPrt->PXSQSize < SK_MIN_TXQ_SIZE) || + (pPrt->PXAQSize > 0 && pPrt->PXAQSize < SK_MIN_TXQ_SIZE)))) { + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E023, SKERR_HWI_E023MSG); + return(1); + } + + UsedMem += pPrt->PRxQSize + pPrt->PXSQSize + pPrt->PXAQSize; + } + + if (UsedMem > pAC->GIni.GIRamSize) { + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E012, SKERR_HWI_E012MSG); + return(1); + } +#endif /* !SK_SLIM */ + + /* Now start address calculation */ + StartAddr = pAC->GIni.GIRamOffs; + for (i = 0; i < pAC->GIni.GIMacsFound; i++) { + pPrt = &pAC->GIni.GP[i]; + + /* Calculate/Check values for the receive queue */ + Rtv2 = DoCalcAddr(pAC, pPrt, pPrt->PRxQSize, &StartAddr, + &pPrt->PRxQRamStart, &pPrt->PRxQRamEnd); + Rtv |= Rtv2; + + /* Calculate/Check values for the synchronous Tx queue */ + Rtv2 = DoCalcAddr(pAC, pPrt, pPrt->PXSQSize, &StartAddr, + &pPrt->PXsQRamStart, &pPrt->PXsQRamEnd); + Rtv |= Rtv2; + + /* Calculate/Check values for the asynchronous Tx queue */ + Rtv2 = DoCalcAddr(pAC, pPrt, pPrt->PXAQSize, &StartAddr, + &pPrt->PXaQRamStart, &pPrt->PXaQRamEnd); + Rtv |= Rtv2; + + if (Rtv) { + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E013, SKERR_HWI_E013MSG); + return(1); + } + } + + return(0); +} /* SkGeCheckQSize */ + + +#ifdef GENESIS +/****************************************************************************** + * + * SkGeInitMacArb() - Initialize the MAC Arbiter + * + * Description: + * This function initializes the MAC Arbiter. + * It must not be called if there is still an + * initialized or active port. + * + * Returns: + * nothing + */ +static void SkGeInitMacArb( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC) /* IO context */ +{ + /* release local reset */ + SK_OUT16(IoC, B3_MA_TO_CTRL, MA_RST_CLR); + + /* configure timeout values */ + SK_OUT8(IoC, B3_MA_TOINI_RX1, SK_MAC_TO_53); + SK_OUT8(IoC, B3_MA_TOINI_RX2, SK_MAC_TO_53); + SK_OUT8(IoC, B3_MA_TOINI_TX1, SK_MAC_TO_53); + SK_OUT8(IoC, B3_MA_TOINI_TX2, SK_MAC_TO_53); + + SK_OUT8(IoC, B3_MA_RCINI_RX1, 0); + SK_OUT8(IoC, B3_MA_RCINI_RX2, 0); + SK_OUT8(IoC, B3_MA_RCINI_TX1, 0); + SK_OUT8(IoC, B3_MA_RCINI_TX2, 0); + + /* recovery values are needed for XMAC II Rev. B2 only */ + /* Fast Output Enable Mode was intended to use with Rev. B2, but now? */ + + /* + * There is no start or enable button to push, therefore + * the MAC arbiter is configured and enabled now. + */ +} /* SkGeInitMacArb */ + + +/****************************************************************************** + * + * SkGeInitPktArb() - Initialize the Packet Arbiter + * + * Description: + * This function initializes the Packet Arbiter. + * It must not be called if there is still an + * initialized or active port. + * + * Returns: + * nothing + */ +static void SkGeInitPktArb( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC) /* IO context */ +{ + /* release local reset */ + SK_OUT16(IoC, B3_PA_CTRL, PA_RST_CLR); + + /* configure timeout values */ + SK_OUT16(IoC, B3_PA_TOINI_RX1, SK_PKT_TO_MAX); + SK_OUT16(IoC, B3_PA_TOINI_RX2, SK_PKT_TO_MAX); + SK_OUT16(IoC, B3_PA_TOINI_TX1, SK_PKT_TO_MAX); + SK_OUT16(IoC, B3_PA_TOINI_TX2, SK_PKT_TO_MAX); + + /* + * enable timeout timers if jumbo frames not used + * NOTE: the packet arbiter timeout interrupt is needed for + * half duplex hangup workaround + */ + if (pAC->GIni.GIPortUsage != SK_JUMBO_LINK) { + if (pAC->GIni.GIMacsFound == 1) { + SK_OUT16(IoC, B3_PA_CTRL, PA_ENA_TO_TX1); + } + else { + SK_OUT16(IoC, B3_PA_CTRL, PA_ENA_TO_TX1 | PA_ENA_TO_TX2); + } + } +} /* SkGeInitPktArb */ +#endif /* GENESIS */ + + +/****************************************************************************** + * + * SkGeInitMacFifo() - Initialize the MAC FIFOs + * + * Description: + * Initialize all MAC FIFOs of the specified port + * + * Returns: + * nothing + */ +static void SkGeInitMacFifo( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_U16 Word; +#ifdef VCPU + SK_U32 DWord; +#endif /* VCPU */ + /* + * For each FIFO: + * - release local reset + * - use default value for MAC FIFO size + * - setup defaults for the control register + * - enable the FIFO + */ + +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + /* Configure Rx MAC FIFO */ + SK_OUT8(IoC, MR_ADDR(Port, RX_MFF_CTRL2), MFF_RST_CLR); + SK_OUT16(IoC, MR_ADDR(Port, RX_MFF_CTRL1), MFF_RX_CTRL_DEF); + SK_OUT8(IoC, MR_ADDR(Port, RX_MFF_CTRL2), MFF_ENA_OP_MD); + + /* Configure Tx MAC FIFO */ + SK_OUT8(IoC, MR_ADDR(Port, TX_MFF_CTRL2), MFF_RST_CLR); + SK_OUT16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF); + SK_OUT8(IoC, MR_ADDR(Port, TX_MFF_CTRL2), MFF_ENA_OP_MD); + + /* Enable frame flushing if jumbo frames used */ + if (pAC->GIni.GIPortUsage == SK_JUMBO_LINK) { + SK_OUT16(IoC, MR_ADDR(Port, RX_MFF_CTRL1), MFF_ENA_FLUSH); + } + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + /* set Rx GMAC FIFO Flush Mask */ + SK_OUT16(IoC, MR_ADDR(Port, RX_GMF_FL_MSK), (SK_U16)RX_FF_FL_DEF_MSK); + + Word = (SK_U16)GMF_RX_CTRL_DEF; + + /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */ + if (pAC->GIni.GIYukonLite && pAC->GIni.GIChipId == CHIP_ID_YUKON) { + + Word &= ~GMF_RX_F_FL_ON; + } + + /* Configure Rx MAC FIFO */ + SK_OUT8(IoC, MR_ADDR(Port, RX_GMF_CTRL_T), (SK_U8)GMF_RST_CLR); + SK_OUT16(IoC, MR_ADDR(Port, RX_GMF_CTRL_T), Word); + + /* set Rx GMAC FIFO Flush Threshold (default: 0x0a -> 56 bytes) */ + SK_OUT16(IoC, MR_ADDR(Port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF); + + /* Configure Tx MAC FIFO */ + SK_OUT8(IoC, MR_ADDR(Port, TX_GMF_CTRL_T), (SK_U8)GMF_RST_CLR); + SK_OUT16(IoC, MR_ADDR(Port, TX_GMF_CTRL_T), (SK_U16)GMF_TX_CTRL_DEF); + +#ifdef VCPU + SK_IN32(IoC, MR_ADDR(Port, RX_GMF_AF_THR), &DWord); + SK_IN32(IoC, MR_ADDR(Port, TX_GMF_AE_THR), &DWord); +#endif /* VCPU */ + + /* set Tx GMAC FIFO Almost Empty Threshold */ +/* SK_OUT32(IoC, MR_ADDR(Port, TX_GMF_AE_THR), 0); */ + } +#endif /* YUKON */ + +} /* SkGeInitMacFifo */ + +#ifdef SK_LNK_SYNC_CNT +/****************************************************************************** + * + * SkGeLoadLnkSyncCnt() - Load the Link Sync Counter and starts counting + * + * Description: + * This function starts the Link Sync Counter of the specified + * port and enables the generation of an Link Sync IRQ. + * The Link Sync Counter may be used to detect an active link, + * if autonegotiation is not used. + * + * Note: + * o To ensure receiving the Link Sync Event the LinkSyncCounter + * should be initialized BEFORE clearing the XMAC's reset! + * o Enable IS_LNK_SYNC_M1 and IS_LNK_SYNC_M2 after calling this + * function. + * + * Returns: + * nothing + */ +void SkGeLoadLnkSyncCnt( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +SK_U32 CntVal) /* Counter value */ +{ + SK_U32 OrgIMsk; + SK_U32 NewIMsk; + SK_U32 ISrc; + SK_BOOL IrqPend; + + /* stop counter */ + SK_OUT8(IoC, MR_ADDR(Port, LNK_SYNC_CTRL), LED_STOP); + + /* + * ASIC problem: + * Each time starting the Link Sync Counter an IRQ is generated + * by the adapter. See problem report entry from 21.07.98 + * + * Workaround: Disable Link Sync IRQ and clear the unexpeced IRQ + * if no IRQ is already pending. + */ + IrqPend = SK_FALSE; + SK_IN32(IoC, B0_ISRC, &ISrc); + SK_IN32(IoC, B0_IMSK, &OrgIMsk); + if (Port == MAC_1) { + NewIMsk = OrgIMsk & ~IS_LNK_SYNC_M1; + if ((ISrc & IS_LNK_SYNC_M1) != 0) { + IrqPend = SK_TRUE; + } + } + else { + NewIMsk = OrgIMsk & ~IS_LNK_SYNC_M2; + if ((ISrc & IS_LNK_SYNC_M2) != 0) { + IrqPend = SK_TRUE; + } + } + if (!IrqPend) { + SK_OUT32(IoC, B0_IMSK, NewIMsk); + } + + /* load counter */ + SK_OUT32(IoC, MR_ADDR(Port, LNK_SYNC_INI), CntVal); + + /* start counter */ + SK_OUT8(IoC, MR_ADDR(Port, LNK_SYNC_CTRL), LED_START); + + if (!IrqPend) { + /* clear the unexpected IRQ, and restore the interrupt mask */ + SK_OUT8(IoC, MR_ADDR(Port, LNK_SYNC_CTRL), LED_CLR_IRQ); + SK_OUT32(IoC, B0_IMSK, OrgIMsk); + } +} /* SkGeLoadLnkSyncCnt*/ +#endif /* SK_LNK_SYNC_CNT */ + +#if defined(SK_DIAG) || defined(SK_CFG_SYNC) +/****************************************************************************** + * + * SkGeCfgSync() - Configure synchronous bandwidth for this port. + * + * Description: + * This function may be used to configure synchronous bandwidth + * to the specified port. This may be done any time after + * initializing the port. The configuration values are NOT saved + * in the HWAC port structure and will be overwritten any + * time when stopping and starting the port. + * Any values for the synchronous configuration will be ignored + * if the size of the synchronous queue is zero! + * + * The default configuration for the synchronous service is + * TXA_ENA_FSYNC. This means if the size of + * the synchronous queue is unequal zero but no specific + * synchronous bandwidth is configured, the synchronous queue + * will always have the 'unlimited' transmit priority! + * + * This mode will be restored if the synchronous bandwidth is + * deallocated ('IntTime' = 0 and 'LimCount' = 0). + * + * Returns: + * 0: success + * 1: parameter configuration error + * 2: try to configure quality of service although no + * synchronous queue is configured + */ +int SkGeCfgSync( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +SK_U32 IntTime, /* Interval Timer Value in units of 8ns */ +SK_U32 LimCount, /* Number of bytes to transfer during IntTime */ +int SyncMode) /* Sync Mode: TXA_ENA_ALLOC | TXA_DIS_ALLOC | 0 */ +{ + int Rtv; + + Rtv = 0; + + /* check the parameters */ + if (LimCount > IntTime || + (LimCount == 0 && IntTime != 0) || + (LimCount != 0 && IntTime == 0)) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E010, SKERR_HWI_E010MSG); + return(1); + } + + if (pAC->GIni.GP[Port].PXSQSize == 0) { + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E009, SKERR_HWI_E009MSG); + return(2); + } + + /* calculate register values */ + IntTime = (IntTime / 2) * pAC->GIni.GIHstClkFact / 100; + LimCount = LimCount / 8; + + if (IntTime > TXA_MAX_VAL || LimCount > TXA_MAX_VAL) { + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E010, SKERR_HWI_E010MSG); + return(1); + } + + /* + * - Enable 'Force Sync' to ensure the synchronous queue + * has the priority while configuring the new values. + * - Also 'disable alloc' to ensure the settings complies + * to the SyncMode parameter. + * - Disable 'Rate Control' to configure the new values. + * - write IntTime and LimCount + * - start 'Rate Control' and disable 'Force Sync' + * if Interval Timer or Limit Counter not zero. + */ + SK_OUT8(IoC, MR_ADDR(Port, TXA_CTRL), + TXA_ENA_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); + + SK_OUT32(IoC, MR_ADDR(Port, TXA_ITI_INI), IntTime); + SK_OUT32(IoC, MR_ADDR(Port, TXA_LIM_INI), LimCount); + + SK_OUT8(IoC, MR_ADDR(Port, TXA_CTRL), + (SK_U8)(SyncMode & (TXA_ENA_ALLOC | TXA_DIS_ALLOC))); + + if (IntTime != 0 || LimCount != 0) { + SK_OUT8(IoC, MR_ADDR(Port, TXA_CTRL), TXA_DIS_FSYNC | TXA_START_RC); + } + + return(0); +} /* SkGeCfgSync */ +#endif /* SK_DIAG || SK_CFG_SYNC*/ + + +/****************************************************************************** + * + * DoInitRamQueue() - Initialize the RAM Buffer Address of a single Queue + * + * Desccription: + * If the queue is used, enable and initialize it. + * Make sure the queue is still reset, if it is not used. + * + * Returns: + * nothing + */ +static void DoInitRamQueue( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int QuIoOffs, /* Queue IO Address Offset */ +SK_U32 QuStartAddr, /* Queue Start Address */ +SK_U32 QuEndAddr, /* Queue End Address */ +int QuType) /* Queue Type (SK_RX_SRAM_Q|SK_RX_BRAM_Q|SK_TX_RAM_Q) */ +{ + SK_U32 RxUpThresVal; + SK_U32 RxLoThresVal; + + if (QuStartAddr != QuEndAddr) { + /* calculate thresholds, assume we have a big Rx queue */ + RxUpThresVal = (QuEndAddr + 1 - QuStartAddr - SK_RB_ULPP) / 8; + RxLoThresVal = (QuEndAddr + 1 - QuStartAddr - SK_RB_LLPP_B)/8; + + /* build HW address format */ + QuStartAddr = QuStartAddr / 8; + QuEndAddr = QuEndAddr / 8; + + /* release local reset */ + SK_OUT8(IoC, RB_ADDR(QuIoOffs, RB_CTRL), RB_RST_CLR); + + /* configure addresses */ + SK_OUT32(IoC, RB_ADDR(QuIoOffs, RB_START), QuStartAddr); + SK_OUT32(IoC, RB_ADDR(QuIoOffs, RB_END), QuEndAddr); + SK_OUT32(IoC, RB_ADDR(QuIoOffs, RB_WP), QuStartAddr); + SK_OUT32(IoC, RB_ADDR(QuIoOffs, RB_RP), QuStartAddr); + + switch (QuType) { + case SK_RX_SRAM_Q: + /* configure threshold for small Rx Queue */ + RxLoThresVal += (SK_RB_LLPP_B - SK_RB_LLPP_S) / 8; + + /* continue with SK_RX_BRAM_Q */ + case SK_RX_BRAM_Q: + /* write threshold for Rx Queue */ + + SK_OUT32(IoC, RB_ADDR(QuIoOffs, RB_RX_UTPP), RxUpThresVal); + SK_OUT32(IoC, RB_ADDR(QuIoOffs, RB_RX_LTPP), RxLoThresVal); + + /* the high priority threshold not used */ + break; + case SK_TX_RAM_Q: + /* + * Do NOT use Store & Forward under normal operation due to + * performance optimization (GENESIS only). + * But if Jumbo Frames are configured (XMAC Tx FIFO is only 4 kB) + * or YUKON is used ((GMAC Tx FIFO is only 1 kB) + * we NEED Store & Forward of the RAM buffer. + */ + if (pAC->GIni.GIPortUsage == SK_JUMBO_LINK || + pAC->GIni.GIYukon) { + /* enable Store & Forward Mode for the Tx Side */ + SK_OUT8(IoC, RB_ADDR(QuIoOffs, RB_CTRL), RB_ENA_STFWD); + } + break; + } + + /* set queue operational */ + SK_OUT8(IoC, RB_ADDR(QuIoOffs, RB_CTRL), RB_ENA_OP_MD); + } + else { + /* ensure the queue is still disabled */ + SK_OUT8(IoC, RB_ADDR(QuIoOffs, RB_CTRL), RB_RST_SET); + } +} /* DoInitRamQueue */ + + +/****************************************************************************** + * + * SkGeInitRamBufs() - Initialize the RAM Buffer Queues + * + * Description: + * Initialize all RAM Buffer Queues of the specified port + * + * Returns: + * nothing + */ +static void SkGeInitRamBufs( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_GEPORT *pPrt; + int RxQType; + + pPrt = &pAC->GIni.GP[Port]; + + if (pPrt->PRxQSize == SK_MIN_RXQ_SIZE) { + RxQType = SK_RX_SRAM_Q; /* small Rx Queue */ + } + else { + RxQType = SK_RX_BRAM_Q; /* big Rx Queue */ + } + + DoInitRamQueue(pAC, IoC, pPrt->PRxQOff, pPrt->PRxQRamStart, + pPrt->PRxQRamEnd, RxQType); + + DoInitRamQueue(pAC, IoC, pPrt->PXsQOff, pPrt->PXsQRamStart, + pPrt->PXsQRamEnd, SK_TX_RAM_Q); + + DoInitRamQueue(pAC, IoC, pPrt->PXaQOff, pPrt->PXaQRamStart, + pPrt->PXaQRamEnd, SK_TX_RAM_Q); + +} /* SkGeInitRamBufs */ + + +/****************************************************************************** + * + * SkGeInitRamIface() - Initialize the RAM Interface + * + * Description: + * This function initializes the Adapters RAM Interface. + * + * Note: + * This function is used in the diagnostics. + * + * Returns: + * nothing + */ +static void SkGeInitRamIface( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC) /* IO context */ +{ + /* release local reset */ + SK_OUT16(IoC, B3_RI_CTRL, RI_RST_CLR); + + /* configure timeout values */ + SK_OUT8(IoC, B3_RI_WTO_R1, SK_RI_TO_53); + SK_OUT8(IoC, B3_RI_WTO_XA1, SK_RI_TO_53); + SK_OUT8(IoC, B3_RI_WTO_XS1, SK_RI_TO_53); + SK_OUT8(IoC, B3_RI_RTO_R1, SK_RI_TO_53); + SK_OUT8(IoC, B3_RI_RTO_XA1, SK_RI_TO_53); + SK_OUT8(IoC, B3_RI_RTO_XS1, SK_RI_TO_53); + SK_OUT8(IoC, B3_RI_WTO_R2, SK_RI_TO_53); + SK_OUT8(IoC, B3_RI_WTO_XA2, SK_RI_TO_53); + SK_OUT8(IoC, B3_RI_WTO_XS2, SK_RI_TO_53); + SK_OUT8(IoC, B3_RI_RTO_R2, SK_RI_TO_53); + SK_OUT8(IoC, B3_RI_RTO_XA2, SK_RI_TO_53); + SK_OUT8(IoC, B3_RI_RTO_XS2, SK_RI_TO_53); + +} /* SkGeInitRamIface */ + + +/****************************************************************************** + * + * SkGeInitBmu() - Initialize the BMU state machines + * + * Description: + * Initialize all BMU state machines of the specified port + * + * Returns: + * nothing + */ +static void SkGeInitBmu( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_GEPORT *pPrt; + SK_U32 RxWm; + SK_U32 TxWm; + + pPrt = &pAC->GIni.GP[Port]; + + RxWm = SK_BMU_RX_WM; + TxWm = SK_BMU_TX_WM; + + if (!pAC->GIni.GIPciSlot64 && !pAC->GIni.GIPciClock66) { + /* for better performance */ + RxWm /= 2; + TxWm /= 2; + } + + /* Rx Queue: Release all local resets and set the watermark */ + SK_OUT32(IoC, Q_ADDR(pPrt->PRxQOff, Q_CSR), CSR_CLR_RESET); + SK_OUT32(IoC, Q_ADDR(pPrt->PRxQOff, Q_F), RxWm); + + /* + * Tx Queue: Release all local resets if the queue is used ! + * set watermark + */ + if (pPrt->PXSQSize != 0) { + SK_OUT32(IoC, Q_ADDR(pPrt->PXsQOff, Q_CSR), CSR_CLR_RESET); + SK_OUT32(IoC, Q_ADDR(pPrt->PXsQOff, Q_F), TxWm); + } + + if (pPrt->PXAQSize != 0) { + SK_OUT32(IoC, Q_ADDR(pPrt->PXaQOff, Q_CSR), CSR_CLR_RESET); + SK_OUT32(IoC, Q_ADDR(pPrt->PXaQOff, Q_F), TxWm); + } + /* + * Do NOT enable the descriptor poll timers here, because + * the descriptor addresses are not specified yet. + */ +} /* SkGeInitBmu */ + + +/****************************************************************************** + * + * TestStopBit() - Test the stop bit of the queue + * + * Description: + * Stopping a queue is not as simple as it seems to be. + * If descriptor polling is enabled, it may happen + * that RX/TX stop is done and SV idle is NOT set. + * In this case we have to issue another stop command. + * + * Returns: + * The queues control status register + */ +static SK_U32 TestStopBit( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* IO Context */ +int QuIoOffs) /* Queue IO Address Offset */ +{ + SK_U32 QuCsr; /* CSR contents */ + + SK_IN32(IoC, Q_ADDR(QuIoOffs, Q_CSR), &QuCsr); + + if ((QuCsr & (CSR_STOP | CSR_SV_IDLE)) == 0) { + /* Stop Descriptor overridden by start command */ + SK_OUT32(IoC, Q_ADDR(QuIoOffs, Q_CSR), CSR_STOP); + + SK_IN32(IoC, Q_ADDR(QuIoOffs, Q_CSR), &QuCsr); + } + + return(QuCsr); +} /* TestStopBit */ + + +/****************************************************************************** + * + * SkGeStopPort() - Stop the Rx/Tx activity of the port 'Port'. + * + * Description: + * After calling this function the descriptor rings and Rx and Tx + * queues of this port may be reconfigured. + * + * It is possible to stop the receive and transmit path separate or + * both together. + * + * Dir = SK_STOP_TX Stops the transmit path only and resets the MAC. + * The receive queue is still active and + * the pending Rx frames may be still transferred + * into the RxD. + * SK_STOP_RX Stop the receive path. The tansmit path + * has to be stopped once before. + * SK_STOP_ALL SK_STOP_TX + SK_STOP_RX + * + * RstMode = SK_SOFT_RST Resets the MAC. The PHY is still alive. + * SK_HARD_RST Resets the MAC and the PHY. + * + * Example: + * 1) A Link Down event was signaled for a port. Therefore the activity + * of this port should be stopped and a hardware reset should be issued + * to enable the workaround of XMAC Errata #2. But the received frames + * should not be discarded. + * ... + * SkGeStopPort(pAC, IoC, Port, SK_STOP_TX, SK_HARD_RST); + * (transfer all pending Rx frames) + * SkGeStopPort(pAC, IoC, Port, SK_STOP_RX, SK_HARD_RST); + * ... + * + * 2) An event was issued which request the driver to switch + * the 'virtual active' link to an other already active port + * as soon as possible. The frames in the receive queue of this + * port may be lost. But the PHY must not be reset during this + * event. + * ... + * SkGeStopPort(pAC, IoC, Port, SK_STOP_ALL, SK_SOFT_RST); + * ... + * + * Extended Description: + * If SK_STOP_TX is set, + * o disable the MAC's receive and transmitter to prevent + * from sending incomplete frames + * o stop the port's transmit queues before terminating the + * BMUs to prevent from performing incomplete PCI cycles + * on the PCI bus + * - The network Rx and Tx activity and PCI Tx transfer is + * disabled now. + * o reset the MAC depending on the RstMode + * o Stop Interval Timer and Limit Counter of Tx Arbiter, + * also disable Force Sync bit and Enable Alloc bit. + * o perform a local reset of the port's Tx path + * - reset the PCI FIFO of the async Tx queue + * - reset the PCI FIFO of the sync Tx queue + * - reset the RAM Buffer async Tx queue + * - reset the RAM Buffer sync Tx queue + * - reset the MAC Tx FIFO + * o switch Link and Tx LED off, stop the LED counters + * + * If SK_STOP_RX is set, + * o stop the port's receive queue + * - The path data transfer activity is fully stopped now. + * o perform a local reset of the port's Rx path + * - reset the PCI FIFO of the Rx queue + * - reset the RAM Buffer receive queue + * - reset the MAC Rx FIFO + * o switch Rx LED off, stop the LED counter + * + * If all ports are stopped, + * o reset the RAM Interface. + * + * Notes: + * o This function may be called during the driver states RESET_PORT and + * SWITCH_PORT. + */ +void SkGeStopPort( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* I/O context */ +int Port, /* port to stop (MAC_1 + n) */ +int Dir, /* Direction to Stop (SK_STOP_RX, SK_STOP_TX, SK_STOP_ALL) */ +int RstMode)/* Reset Mode (SK_SOFT_RST, SK_HARD_RST) */ +{ +#ifndef SK_DIAG + SK_EVPARA Para; +#endif /* !SK_DIAG */ + SK_GEPORT *pPrt; + SK_U32 DWord; + SK_U32 XsCsr; + SK_U32 XaCsr; + SK_U64 ToutStart; + int i; + int ToutCnt; + + pPrt = &pAC->GIni.GP[Port]; + + if ((Dir & SK_STOP_TX) != 0) { + /* disable receiver and transmitter */ + SkMacRxTxDisable(pAC, IoC, Port); + + /* stop both transmit queues */ + /* + * If the BMU is in the reset state CSR_STOP will terminate + * immediately. + */ + SK_OUT32(IoC, Q_ADDR(pPrt->PXsQOff, Q_CSR), CSR_STOP); + SK_OUT32(IoC, Q_ADDR(pPrt->PXaQOff, Q_CSR), CSR_STOP); + + ToutStart = SkOsGetTime(pAC); + ToutCnt = 0; + do { + /* + * Clear packet arbiter timeout to make sure + * this loop will terminate. + */ + SK_OUT16(IoC, B3_PA_CTRL, (SK_U16)((Port == MAC_1) ? + PA_CLR_TO_TX1 : PA_CLR_TO_TX2)); + + /* + * If the transfer stucks at the MAC the STOP command will not + * terminate if we don't flush the XMAC's transmit FIFO ! + */ + SkMacFlushTxFifo(pAC, IoC, Port); + + XsCsr = TestStopBit(pAC, IoC, pPrt->PXsQOff); + XaCsr = TestStopBit(pAC, IoC, pPrt->PXaQOff); + + if (SkOsGetTime(pAC) - ToutStart > (SK_TICKS_PER_SEC / 18)) { + /* + * Timeout of 1/18 second reached. + * This needs to be checked at 1/18 sec only. + */ + ToutCnt++; + if (ToutCnt > 1) { + /* Might be a problem when the driver event handler + * calls StopPort again. XXX. + */ + + /* Fatal Error, Loop aborted */ + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_HWI_E018, + SKERR_HWI_E018MSG); +#ifndef SK_DIAG + Para.Para64 = Port; + SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para); +#endif /* !SK_DIAG */ + return; + } + /* + * Cache incoherency workaround: Assume a start command + * has been lost while sending the frame. + */ + ToutStart = SkOsGetTime(pAC); + + if ((XsCsr & CSR_STOP) != 0) { + SK_OUT32(IoC, Q_ADDR(pPrt->PXsQOff, Q_CSR), CSR_START); + } + if ((XaCsr & CSR_STOP) != 0) { + SK_OUT32(IoC, Q_ADDR(pPrt->PXaQOff, Q_CSR), CSR_START); + } + } + + /* + * Because of the ASIC problem report entry from 21.08.1998 it is + * required to wait until CSR_STOP is reset and CSR_SV_IDLE is set. + */ + } while ((XsCsr & (CSR_STOP | CSR_SV_IDLE)) != CSR_SV_IDLE || + (XaCsr & (CSR_STOP | CSR_SV_IDLE)) != CSR_SV_IDLE); + + /* Reset the MAC depending on the RstMode */ + if (RstMode == SK_SOFT_RST) { + SkMacSoftRst(pAC, IoC, Port); + } + else { + SkMacHardRst(pAC, IoC, Port); + } + + /* Disable Force Sync bit and Enable Alloc bit */ + SK_OUT8(IoC, MR_ADDR(Port, TXA_CTRL), + TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); + + /* Stop Interval Timer and Limit Counter of Tx Arbiter */ + SK_OUT32(IoC, MR_ADDR(Port, TXA_ITI_INI), 0L); + SK_OUT32(IoC, MR_ADDR(Port, TXA_LIM_INI), 0L); + + /* Perform a local reset of the port's Tx path */ + + /* Reset the PCI FIFO of the async Tx queue */ + SK_OUT32(IoC, Q_ADDR(pPrt->PXaQOff, Q_CSR), CSR_SET_RESET); + /* Reset the PCI FIFO of the sync Tx queue */ + SK_OUT32(IoC, Q_ADDR(pPrt->PXsQOff, Q_CSR), CSR_SET_RESET); + /* Reset the RAM Buffer async Tx queue */ + SK_OUT8(IoC, RB_ADDR(pPrt->PXaQOff, RB_CTRL), RB_RST_SET); + /* Reset the RAM Buffer sync Tx queue */ + SK_OUT8(IoC, RB_ADDR(pPrt->PXsQOff, RB_CTRL), RB_RST_SET); + + /* Reset Tx MAC FIFO */ +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + /* Note: MFF_RST_SET does NOT reset the XMAC ! */ + SK_OUT8(IoC, MR_ADDR(Port, TX_MFF_CTRL2), MFF_RST_SET); + + /* switch Link and Tx LED off, stop the LED counters */ + /* Link LED is switched off by the RLMT and the Diag itself */ + SkGeXmitLED(pAC, IoC, MR_ADDR(Port, TX_LED_INI), SK_LED_DIS); + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + /* Reset TX MAC FIFO */ + SK_OUT8(IoC, MR_ADDR(Port, TX_GMF_CTRL_T), (SK_U8)GMF_RST_SET); + } +#endif /* YUKON */ + } + + if ((Dir & SK_STOP_RX) != 0) { + /* + * The RX Stop Command will not terminate if no buffers + * are queued in the RxD ring. But it will always reach + * the Idle state. Therefore we can use this feature to + * stop the transfer of received packets. + */ + /* stop the port's receive queue */ + SK_OUT32(IoC, Q_ADDR(pPrt->PRxQOff, Q_CSR), CSR_STOP); + + i = 100; + do { + /* + * Clear packet arbiter timeout to make sure + * this loop will terminate + */ + SK_OUT16(IoC, B3_PA_CTRL, (SK_U16)((Port == MAC_1) ? + PA_CLR_TO_RX1 : PA_CLR_TO_RX2)); + + DWord = TestStopBit(pAC, IoC, pPrt->PRxQOff); + + /* timeout if i==0 (bug fix for #10748) */ + if (--i == 0) { + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_HWI_E024, + SKERR_HWI_E024MSG); + break; + } + /* + * because of the ASIC problem report entry from 21.08.98 + * it is required to wait until CSR_STOP is reset and + * CSR_SV_IDLE is set. + */ + } while ((DWord & (CSR_STOP | CSR_SV_IDLE)) != CSR_SV_IDLE); + + /* The path data transfer activity is fully stopped now */ + + /* Perform a local reset of the port's Rx path */ + + /* Reset the PCI FIFO of the Rx queue */ + SK_OUT32(IoC, Q_ADDR(pPrt->PRxQOff, Q_CSR), CSR_SET_RESET); + /* Reset the RAM Buffer receive queue */ + SK_OUT8(IoC, RB_ADDR(pPrt->PRxQOff, RB_CTRL), RB_RST_SET); + + /* Reset Rx MAC FIFO */ +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + + SK_OUT8(IoC, MR_ADDR(Port, RX_MFF_CTRL2), MFF_RST_SET); + + /* switch Rx LED off, stop the LED counter */ + SkGeXmitLED(pAC, IoC, MR_ADDR(Port, RX_LED_INI), SK_LED_DIS); + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + /* Reset Rx MAC FIFO */ + SK_OUT8(IoC, MR_ADDR(Port, RX_GMF_CTRL_T), (SK_U8)GMF_RST_SET); + } +#endif /* YUKON */ + } +} /* SkGeStopPort */ + + +/****************************************************************************** + * + * SkGeInit0() - Level 0 Initialization + * + * Description: + * - Initialize the BMU address offsets + * + * Returns: + * nothing + */ +static void SkGeInit0( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC) /* IO context */ +{ + int i; + SK_GEPORT *pPrt; + + for (i = 0; i < SK_MAX_MACS; i++) { + pPrt = &pAC->GIni.GP[i]; + + pPrt->PState = SK_PRT_RESET; + pPrt->PRxQOff = QOffTab[i].RxQOff; + pPrt->PXsQOff = QOffTab[i].XsQOff; + pPrt->PXaQOff = QOffTab[i].XaQOff; + pPrt->PCheckPar = SK_FALSE; + pPrt->PIsave = 0; + pPrt->PPrevShorts = 0; + pPrt->PLinkResCt = 0; + pPrt->PAutoNegTOCt = 0; + pPrt->PPrevRx = 0; + pPrt->PPrevFcs = 0; + pPrt->PRxLim = SK_DEF_RX_WA_LIM; + pPrt->PLinkMode = (SK_U8)SK_LMODE_AUTOFULL; + pPrt->PLinkSpeedCap = (SK_U8)SK_LSPEED_CAP_1000MBPS; + pPrt->PLinkSpeed = (SK_U8)SK_LSPEED_1000MBPS; + pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_UNKNOWN; + pPrt->PLinkModeConf = (SK_U8)SK_LMODE_AUTOSENSE; + pPrt->PFlowCtrlMode = (SK_U8)SK_FLOW_MODE_SYM_OR_REM; + pPrt->PLinkCap = (SK_U8)(SK_LMODE_CAP_HALF | SK_LMODE_CAP_FULL | + SK_LMODE_CAP_AUTOHALF | SK_LMODE_CAP_AUTOFULL); + pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_UNKNOWN; + pPrt->PFlowCtrlCap = (SK_U8)SK_FLOW_MODE_SYM_OR_REM; + pPrt->PFlowCtrlStatus = (SK_U8)SK_FLOW_STAT_NONE; + pPrt->PMSCap = 0; + pPrt->PMSMode = (SK_U8)SK_MS_MODE_AUTO; + pPrt->PMSStatus = (SK_U8)SK_MS_STAT_UNSET; + pPrt->PLipaAutoNeg = (SK_U8)SK_LIPA_UNKNOWN; + pPrt->PAutoNegFail = SK_FALSE; + pPrt->PHWLinkUp = SK_FALSE; + pPrt->PLinkBroken = SK_TRUE; /* See WA code */ + pPrt->PPhyPowerState = PHY_PM_OPERATIONAL_MODE; + pPrt->PMacColThres = TX_COL_DEF; + pPrt->PMacJamLen = TX_JAM_LEN_DEF; + pPrt->PMacJamIpgVal = TX_JAM_IPG_DEF; + pPrt->PMacJamIpgData = TX_IPG_JAM_DEF; + pPrt->PMacIpgData = IPG_DATA_DEF; + pPrt->PMacLimit4 = SK_FALSE; + } + + pAC->GIni.GIPortUsage = SK_RED_LINK; + pAC->GIni.GILedBlinkCtrl = (SK_U16)OemConfig.Value; + pAC->GIni.GIValIrqMask = IS_ALL_MSK; + +} /* SkGeInit0*/ + + +/****************************************************************************** + * + * SkGeInit1() - Level 1 Initialization + * + * Description: + * o Do a software reset. + * o Clear all reset bits. + * o Verify that the detected hardware is present. + * Return an error if not. + * o Get the hardware configuration + * + Read the number of MACs/Ports. + * + Read the RAM size. + * + Read the PCI Revision Id. + * + Find out the adapters host clock speed + * + Read and check the PHY type + * + * Returns: + * 0: success + * 5: Unexpected PHY type detected + * 6: HW self test failed + */ +static int SkGeInit1( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC) /* IO context */ +{ + SK_U8 Byte; + SK_U16 Word; + SK_U16 CtrlStat; + SK_U32 DWord; + int RetVal; + int i; + + RetVal = 0; + + /* save CLK_RUN bits (YUKON-Lite) */ + SK_IN16(IoC, B0_CTST, &CtrlStat); + + /* do the SW-reset */ + SK_OUT8(IoC, B0_CTST, CS_RST_SET); + + /* release the SW-reset */ + SK_OUT8(IoC, B0_CTST, CS_RST_CLR); + + /* reset all error bits in the PCI STATUS register */ + /* + * Note: PCI Cfg cycles cannot be used, because they are not + * available on some platforms after 'boot time'. + */ + SK_IN16(IoC, PCI_C(PCI_STATUS), &Word); + + SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON); + SK_OUT16(IoC, PCI_C(PCI_STATUS), (SK_U16)(Word | PCI_ERRBITS)); + SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + + /* release Master Reset */ + SK_OUT8(IoC, B0_CTST, CS_MRST_CLR); + +#ifdef CLK_RUN + CtrlStat |= CS_CLK_RUN_ENA; +#endif /* CLK_RUN */ + + /* restore CLK_RUN bits */ + SK_OUT16(IoC, B0_CTST, (SK_U16)(CtrlStat & + (CS_CLK_RUN_HOT | CS_CLK_RUN_RST | CS_CLK_RUN_ENA))); + + /* read Chip Identification Number */ + SK_IN8(IoC, B2_CHIP_ID, &Byte); + pAC->GIni.GIChipId = Byte; + + /* read number of MACs */ + SK_IN8(IoC, B2_MAC_CFG, &Byte); + pAC->GIni.GIMacsFound = (Byte & CFG_SNG_MAC) ? 1 : 2; + + /* get Chip Revision Number */ + pAC->GIni.GIChipRev = (SK_U8)((Byte & CFG_CHIP_R_MSK) >> 4); + + /* get diff. PCI parameters */ + SK_IN16(IoC, B0_CTST, &CtrlStat); + + /* read the adapters RAM size */ + SK_IN8(IoC, B2_E_0, &Byte); + + pAC->GIni.GIGenesis = SK_FALSE; + pAC->GIni.GIYukon = SK_FALSE; + pAC->GIni.GIYukonLite = SK_FALSE; + +#ifdef GENESIS + if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) { + + pAC->GIni.GIGenesis = SK_TRUE; + + if (Byte == (SK_U8)3) { + /* special case: 4 x 64k x 36, offset = 0x80000 */ + pAC->GIni.GIRamSize = 1024; + pAC->GIni.GIRamOffs = (SK_U32)512 * 1024; + } + else { + pAC->GIni.GIRamSize = (int)Byte * 512; + pAC->GIni.GIRamOffs = 0; + } + /* all GE adapters work with 53.125 MHz host clock */ + pAC->GIni.GIHstClkFact = SK_FACT_53; + + /* set Descr. Poll Timer Init Value to 250 ms */ + pAC->GIni.GIPollTimerVal = + SK_DPOLL_DEF * (SK_U32)pAC->GIni.GIHstClkFact / 100; + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIChipId != CHIP_ID_GENESIS) { + + pAC->GIni.GIYukon = SK_TRUE; + + pAC->GIni.GIRamSize = (Byte == (SK_U8)0) ? 128 : (int)Byte * 4; + + pAC->GIni.GIRamOffs = 0; + + /* WA for chip Rev. A */ + pAC->GIni.GIWolOffs = (pAC->GIni.GIChipId == CHIP_ID_YUKON && + pAC->GIni.GIChipRev == 0) ? WOL_REG_OFFS : 0; + + /* get PM Capabilities of PCI config space */ + SK_IN16(IoC, PCI_C(PCI_PM_CAP_REG), &Word); + + /* check if VAUX is available */ + if (((CtrlStat & CS_VAUX_AVAIL) != 0) && + /* check also if PME from D3cold is set */ + ((Word & PCI_PME_D3C_SUP) != 0)) { + /* set entry in GE init struct */ + pAC->GIni.GIVauxAvail = SK_TRUE; + } + + if (pAC->GIni.GIChipId == CHIP_ID_YUKON_LITE) { + /* this is Rev. A1 */ + pAC->GIni.GIYukonLite = SK_TRUE; + } + else { + /* save Flash-Address Register */ + SK_IN32(IoC, B2_FAR, &DWord); + + /* test Flash-Address Register */ + SK_OUT8(IoC, B2_FAR + 3, 0xff); + SK_IN8(IoC, B2_FAR + 3, &Byte); + + if (Byte != 0) { + /* this is Rev. A0 */ + pAC->GIni.GIYukonLite = SK_TRUE; + + /* restore Flash-Address Register */ + SK_OUT32(IoC, B2_FAR, DWord); + } + } + + /* switch power to VCC (WA for VAUX problem) */ + SK_OUT8(IoC, B0_POWER_CTRL, (SK_U8)(PC_VAUX_ENA | PC_VCC_ENA | + PC_VAUX_OFF | PC_VCC_ON)); + + /* read the Interrupt source */ + SK_IN32(IoC, B0_ISRC, &DWord); + + if ((DWord & IS_HW_ERR) != 0) { + /* read the HW Error Interrupt source */ + SK_IN32(IoC, B0_HWE_ISRC, &DWord); + + if ((DWord & IS_IRQ_SENSOR) != 0) { + /* disable HW Error IRQ */ + pAC->GIni.GIValIrqMask &= ~IS_HW_ERR; + } + } + + for (i = 0; i < pAC->GIni.GIMacsFound; i++) { + /* set GMAC Link Control reset */ + SK_OUT16(IoC, MR_ADDR(i, GMAC_LINK_CTRL), GMLC_RST_SET); + + /* clear GMAC Link Control reset */ + SK_OUT16(IoC, MR_ADDR(i, GMAC_LINK_CTRL), GMLC_RST_CLR); + } + /* all YU chips work with 78.125 MHz host clock */ + pAC->GIni.GIHstClkFact = SK_FACT_78; + + pAC->GIni.GIPollTimerVal = SK_DPOLL_MAX; /* 215 ms */ + } +#endif /* YUKON */ + + /* check if 64-bit PCI Slot is present */ + pAC->GIni.GIPciSlot64 = (SK_BOOL)((CtrlStat & CS_BUS_SLOT_SZ) != 0); + + /* check if 66 MHz PCI Clock is active */ + pAC->GIni.GIPciClock66 = (SK_BOOL)((CtrlStat & CS_BUS_CLOCK) != 0); + + /* read PCI HW Revision Id. */ + SK_IN8(IoC, PCI_C(PCI_REV_ID), &Byte); + pAC->GIni.GIPciHwRev = Byte; + + /* read the PMD type */ + SK_IN8(IoC, B2_PMD_TYP, &Byte); + pAC->GIni.GICopperType = (SK_U8)(Byte == 'T'); + + /* read the PHY type */ + SK_IN8(IoC, B2_E_1, &Byte); + + Byte &= 0x0f; /* the PHY type is stored in the lower nibble */ + for (i = 0; i < pAC->GIni.GIMacsFound; i++) { + +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + switch (Byte) { + case SK_PHY_XMAC: + pAC->GIni.GP[i].PhyAddr = PHY_ADDR_XMAC; + break; + case SK_PHY_BCOM: + pAC->GIni.GP[i].PhyAddr = PHY_ADDR_BCOM; + pAC->GIni.GP[i].PMSCap = (SK_U8)(SK_MS_CAP_AUTO | + SK_MS_CAP_MASTER | SK_MS_CAP_SLAVE); + break; +#ifdef OTHER_PHY + case SK_PHY_LONE: + pAC->GIni.GP[i].PhyAddr = PHY_ADDR_LONE; + break; + case SK_PHY_NAT: + pAC->GIni.GP[i].PhyAddr = PHY_ADDR_NAT; + break; +#endif /* OTHER_PHY */ + default: + /* ERROR: unexpected PHY type detected */ + RetVal = 5; + break; + } + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + + if (Byte < (SK_U8)SK_PHY_MARV_COPPER) { + /* if this field is not initialized */ + Byte = (SK_U8)SK_PHY_MARV_COPPER; + + pAC->GIni.GICopperType = SK_TRUE; + } + + pAC->GIni.GP[i].PhyAddr = PHY_ADDR_MARV; + + if (pAC->GIni.GICopperType) { + + pAC->GIni.GP[i].PLinkSpeedCap = (SK_U8)(SK_LSPEED_CAP_AUTO | + SK_LSPEED_CAP_10MBPS | SK_LSPEED_CAP_100MBPS | + SK_LSPEED_CAP_1000MBPS); + + pAC->GIni.GP[i].PLinkSpeed = (SK_U8)SK_LSPEED_AUTO; + + pAC->GIni.GP[i].PMSCap = (SK_U8)(SK_MS_CAP_AUTO | + SK_MS_CAP_MASTER | SK_MS_CAP_SLAVE); + } + else { + Byte = (SK_U8)SK_PHY_MARV_FIBER; + } + } +#endif /* YUKON */ + + pAC->GIni.GP[i].PhyType = (int)Byte; + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_INIT, + ("PHY type: %d PHY addr: %04x\n", Byte, + pAC->GIni.GP[i].PhyAddr)); + } + + /* get MAC Type & set function pointers dependent on */ +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + + pAC->GIni.GIMacType = SK_MAC_XMAC; + + pAC->GIni.GIFunc.pFnMacUpdateStats = SkXmUpdateStats; + pAC->GIni.GIFunc.pFnMacStatistic = SkXmMacStatistic; + pAC->GIni.GIFunc.pFnMacResetCounter = SkXmResetCounter; + pAC->GIni.GIFunc.pFnMacOverflow = SkXmOverflowStatus; + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + + pAC->GIni.GIMacType = SK_MAC_GMAC; + + pAC->GIni.GIFunc.pFnMacUpdateStats = SkGmUpdateStats; + pAC->GIni.GIFunc.pFnMacStatistic = SkGmMacStatistic; + pAC->GIni.GIFunc.pFnMacResetCounter = SkGmResetCounter; + pAC->GIni.GIFunc.pFnMacOverflow = SkGmOverflowStatus; + +#ifdef SPECIAL_HANDLING + if (pAC->GIni.GIChipId == CHIP_ID_YUKON) { + /* check HW self test result */ + SK_IN8(IoC, B2_E_3, &Byte); + if (Byte & B2_E3_RES_MASK) { + RetVal = 6; + } + } +#endif + } +#endif /* YUKON */ + + return(RetVal); +} /* SkGeInit1 */ + + +/****************************************************************************** + * + * SkGeInit2() - Level 2 Initialization + * + * Description: + * - start the Blink Source Counter + * - start the Descriptor Poll Timer + * - configure the MAC-Arbiter + * - configure the Packet-Arbiter + * - enable the Tx Arbiters + * - enable the RAM Interface Arbiter + * + * Returns: + * nothing + */ +static void SkGeInit2( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC) /* IO context */ +{ +#ifdef GENESIS + SK_U32 DWord; +#endif /* GENESIS */ + int i; + + /* start the Descriptor Poll Timer */ + if (pAC->GIni.GIPollTimerVal != 0) { + if (pAC->GIni.GIPollTimerVal > SK_DPOLL_MAX) { + pAC->GIni.GIPollTimerVal = SK_DPOLL_MAX; + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E017, SKERR_HWI_E017MSG); + } + SK_OUT32(IoC, B28_DPT_INI, pAC->GIni.GIPollTimerVal); + SK_OUT8(IoC, B28_DPT_CTRL, DPT_START); + } + +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + /* start the Blink Source Counter */ + DWord = SK_BLK_DUR * (SK_U32)pAC->GIni.GIHstClkFact / 100; + + SK_OUT32(IoC, B2_BSC_INI, DWord); + SK_OUT8(IoC, B2_BSC_CTRL, BSC_START); + + /* + * Configure the MAC Arbiter and the Packet Arbiter. + * They will be started once and never be stopped. + */ + SkGeInitMacArb(pAC, IoC); + + SkGeInitPktArb(pAC, IoC); + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + /* start Time Stamp Timer */ + SK_OUT8(IoC, GMAC_TI_ST_CTRL, (SK_U8)GMT_ST_START); + } +#endif /* YUKON */ + + /* enable the Tx Arbiters */ + for (i = 0; i < pAC->GIni.GIMacsFound; i++) { + SK_OUT8(IoC, MR_ADDR(i, TXA_CTRL), TXA_ENA_ARB); + } + + /* enable the RAM Interface Arbiter */ + SkGeInitRamIface(pAC, IoC); + +} /* SkGeInit2 */ + +/****************************************************************************** + * + * SkGeInit() - Initialize the GE Adapter with the specified level. + * + * Description: + * Level 0: Initialize the Module structures. + * Level 1: Generic Hardware Initialization. The IOP/MemBase pointer has + * to be set before calling this level. + * + * o Do a software reset. + * o Clear all reset bits. + * o Verify that the detected hardware is present. + * Return an error if not. + * o Get the hardware configuration + * + Set GIMacsFound with the number of MACs. + * + Store the RAM size in GIRamSize. + * + Save the PCI Revision ID in GIPciHwRev. + * o return an error + * if Number of MACs > SK_MAX_MACS + * + * After returning from Level 0 the adapter + * may be accessed with IO operations. + * + * Level 2: start the Blink Source Counter + * + * Returns: + * 0: success + * 1: Number of MACs exceeds SK_MAX_MACS (after level 1) + * 2: Adapter not present or not accessible + * 3: Illegal initialization level + * 4: Initialization Level 1 Call missing + * 5: Unexpected PHY type detected + * 6: HW self test failed + */ +int SkGeInit( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Level) /* initialization level */ +{ + int RetVal; /* return value */ + SK_U32 DWord; + + RetVal = 0; + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_INIT, + ("SkGeInit(Level %d)\n", Level)); + + switch (Level) { + case SK_INIT_DATA: + /* Initialization Level 0 */ + SkGeInit0(pAC, IoC); + pAC->GIni.GILevel = SK_INIT_DATA; + break; + + case SK_INIT_IO: + /* Initialization Level 1 */ + RetVal = SkGeInit1(pAC, IoC); + if (RetVal != 0) { + break; + } + + /* check if the adapter seems to be accessible */ + SK_OUT32(IoC, B2_IRQM_INI, SK_TEST_VAL); + SK_IN32(IoC, B2_IRQM_INI, &DWord); + SK_OUT32(IoC, B2_IRQM_INI, 0L); + + if (DWord != SK_TEST_VAL) { + RetVal = 2; + break; + } + + /* check if the number of GIMacsFound matches SK_MAX_MACS */ + if (pAC->GIni.GIMacsFound > SK_MAX_MACS) { + RetVal = 1; + break; + } + + /* Level 1 successfully passed */ + pAC->GIni.GILevel = SK_INIT_IO; + break; + + case SK_INIT_RUN: + /* Initialization Level 2 */ + if (pAC->GIni.GILevel != SK_INIT_IO) { +#ifndef SK_DIAG + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E002, SKERR_HWI_E002MSG); +#endif /* !SK_DIAG */ + RetVal = 4; + break; + } + SkGeInit2(pAC, IoC); + + /* Level 2 successfully passed */ + pAC->GIni.GILevel = SK_INIT_RUN; + break; + + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E003, SKERR_HWI_E003MSG); + RetVal = 3; + break; + } + + return(RetVal); +} /* SkGeInit */ + + +/****************************************************************************** + * + * SkGeDeInit() - Deinitialize the adapter + * + * Description: + * All ports of the adapter will be stopped if not already done. + * Do a software reset and switch off all LEDs. + * + * Returns: + * nothing + */ +void SkGeDeInit( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC) /* IO context */ +{ + int i; + SK_U16 Word; + +#if (!defined(SK_SLIM) && !defined(VCPU)) + /* ensure I2C is ready */ + SkI2cWaitIrq(pAC, IoC); +#endif + + /* stop all current transfer activity */ + for (i = 0; i < pAC->GIni.GIMacsFound; i++) { + if (pAC->GIni.GP[i].PState != SK_PRT_STOP && + pAC->GIni.GP[i].PState != SK_PRT_RESET) { + + SkGeStopPort(pAC, IoC, i, SK_STOP_ALL, SK_HARD_RST); + } + } + + /* Reset all bits in the PCI STATUS register */ + /* + * Note: PCI Cfg cycles cannot be used, because they are not + * available on some platforms after 'boot time'. + */ + SK_IN16(IoC, PCI_C(PCI_STATUS), &Word); + + SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON); + SK_OUT16(IoC, PCI_C(PCI_STATUS), (SK_U16)(Word | PCI_ERRBITS)); + SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + + /* do the reset, all LEDs are switched off now */ + SK_OUT8(IoC, B0_CTST, CS_RST_SET); + + pAC->GIni.GILevel = SK_INIT_DATA; +} /* SkGeDeInit */ + + +/****************************************************************************** + * + * SkGeInitPort() Initialize the specified port. + * + * Description: + * PRxQSize, PXSQSize, and PXAQSize has to be + * configured for the specified port before calling this function. + * The descriptor rings has to be initialized too. + * + * o (Re)configure queues of the specified port. + * o configure the MAC of the specified port. + * o put ASIC and MAC(s) in operational mode. + * o initialize Rx/Tx and Sync LED + * o initialize RAM Buffers and MAC FIFOs + * + * The port is ready to connect when returning. + * + * Note: + * The MAC's Rx and Tx state machine is still disabled when returning. + * + * Returns: + * 0: success + * 1: Queue size initialization error. The configured values + * for PRxQSize, PXSQSize, or PXAQSize are invalid for one + * or more queues. The specified port was NOT initialized. + * An error log entry was generated. + * 2: The port has to be stopped before it can be initialized again. + */ +int SkGeInitPort( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port to configure */ +{ + SK_GEPORT *pPrt; + + pPrt = &pAC->GIni.GP[Port]; + + if (SkGeCheckQSize(pAC, Port) != 0) { + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E004, SKERR_HWI_E004MSG); + return(1); + } + + if (pPrt->PState == SK_PRT_INIT || pPrt->PState == SK_PRT_RUN) { + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E005, SKERR_HWI_E005MSG); + return(2); + } + + /* configuration ok, initialize the Port now */ + +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + /* initialize Rx, Tx and Link LED */ + /* + * If 1000BT Phy needs LED initialization than swap + * LED and XMAC initialization order + */ + SkGeXmitLED(pAC, IoC, MR_ADDR(Port, TX_LED_INI), SK_LED_ENA); + SkGeXmitLED(pAC, IoC, MR_ADDR(Port, RX_LED_INI), SK_LED_ENA); + /* The Link LED is initialized by RLMT or Diagnostics itself */ + + SkXmInitMac(pAC, IoC, Port); + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + + SkGmInitMac(pAC, IoC, Port); + } +#endif /* YUKON */ + + /* do NOT initialize the Link Sync Counter */ + + SkGeInitMacFifo(pAC, IoC, Port); + + SkGeInitRamBufs(pAC, IoC, Port); + + if (pPrt->PXSQSize != 0) { + /* enable Force Sync bit if synchronous queue available */ + SK_OUT8(IoC, MR_ADDR(Port, TXA_CTRL), TXA_ENA_FSYNC); + } + + SkGeInitBmu(pAC, IoC, Port); + + /* mark port as initialized */ + pPrt->PState = SK_PRT_INIT; + + return(0); +} /* SkGeInitPort */ diff --git a/drivers/net/sk98lin/skgemib.c b/drivers/net/sk98lin/skgemib.c new file mode 100644 index 000000000000..0a6f67a7a395 --- /dev/null +++ b/drivers/net/sk98lin/skgemib.c @@ -0,0 +1,1075 @@ +/***************************************************************************** + * + * Name: skgemib.c + * Project: GEnesis, PCI Gigabit Ethernet Adapter + * Version: $Revision: 1.11 $ + * Date: $Date: 2003/09/15 13:38:12 $ + * Purpose: Private Network Management Interface Management Database + * + ****************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +/* + * PRIVATE OID handler function prototypes + */ +PNMI_STATIC int Addr(SK_AC *pAC, SK_IOC IoC, int action, + SK_U32 Id, char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC int CsumStat(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC int General(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC int Mac8023Stat(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC int MacPrivateConf(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC int MacPrivateStat(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC int Monitor(SK_AC *pAC, SK_IOC IoC, int action, + SK_U32 Id, char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC int OidStruct(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC int Perform(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int* pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC int Rlmt(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC int RlmtStat(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC int SensorStat(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC int Vpd(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC int Vct(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); + +#ifdef SK_POWER_MGMT +PNMI_STATIC int PowerManagement(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +#endif /* SK_POWER_MGMT */ + +#ifdef SK_DIAG_SUPPORT +PNMI_STATIC int DiagActions(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +#endif /* SK_DIAG_SUPPORT */ + + +/* defines *******************************************************************/ +#define ID_TABLE_SIZE (sizeof(IdTable)/sizeof(IdTable[0])) + + +/* global variables **********************************************************/ + +/* + * Table to correlate OID with handler function and index to + * hardware register stored in StatAddress if applicable. + */ +PNMI_STATIC const SK_PNMI_TAB_ENTRY IdTable[] = { + {OID_GEN_XMIT_OK, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX}, + {OID_GEN_RCV_OK, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX}, + {OID_GEN_XMIT_ERROR, + 0, + 0, + 0, + SK_PNMI_RO, General, 0}, + {OID_GEN_RCV_ERROR, + 0, + 0, + 0, + SK_PNMI_RO, General, 0}, + {OID_GEN_RCV_NO_BUFFER, + 0, + 0, + 0, + SK_PNMI_RO, General, 0}, + {OID_GEN_DIRECTED_FRAMES_XMIT, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_UNICAST}, + {OID_GEN_MULTICAST_FRAMES_XMIT, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_MULTICAST}, + {OID_GEN_BROADCAST_FRAMES_XMIT, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_BROADCAST}, + {OID_GEN_DIRECTED_FRAMES_RCV, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_UNICAST}, + {OID_GEN_MULTICAST_FRAMES_RCV, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_MULTICAST}, + {OID_GEN_BROADCAST_FRAMES_RCV, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_BROADCAST}, + {OID_GEN_RCV_CRC_ERROR, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_FCS}, + {OID_GEN_TRANSMIT_QUEUE_LENGTH, + 0, + 0, + 0, + SK_PNMI_RO, General, 0}, + {OID_802_3_PERMANENT_ADDRESS, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, 0}, + {OID_802_3_CURRENT_ADDRESS, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, 0}, + {OID_802_3_RCV_ERROR_ALIGNMENT, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_FRAMING}, + {OID_802_3_XMIT_ONE_COLLISION, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_SINGLE_COL}, + {OID_802_3_XMIT_MORE_COLLISIONS, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_MULTI_COL}, + {OID_802_3_XMIT_DEFERRED, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_DEFFERAL}, + {OID_802_3_XMIT_MAX_COLLISIONS, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_EXCESS_COL}, + {OID_802_3_RCV_OVERRUN, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_OVERFLOW}, + {OID_802_3_XMIT_UNDERRUN, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_UNDERRUN}, + {OID_802_3_XMIT_TIMES_CRS_LOST, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_CARRIER}, + {OID_802_3_XMIT_LATE_COLLISIONS, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_LATE_COL}, +#ifdef SK_POWER_MGMT + {OID_PNP_CAPABILITIES, + 0, + 0, + 0, + SK_PNMI_RO, PowerManagement, 0}, + {OID_PNP_SET_POWER, + 0, + 0, + 0, + SK_PNMI_WO, PowerManagement, 0}, + {OID_PNP_QUERY_POWER, + 0, + 0, + 0, + SK_PNMI_RO, PowerManagement, 0}, + {OID_PNP_ADD_WAKE_UP_PATTERN, + 0, + 0, + 0, + SK_PNMI_WO, PowerManagement, 0}, + {OID_PNP_REMOVE_WAKE_UP_PATTERN, + 0, + 0, + 0, + SK_PNMI_WO, PowerManagement, 0}, + {OID_PNP_ENABLE_WAKE_UP, + 0, + 0, + 0, + SK_PNMI_RW, PowerManagement, 0}, +#endif /* SK_POWER_MGMT */ +#ifdef SK_DIAG_SUPPORT + {OID_SKGE_DIAG_MODE, + 0, + 0, + 0, + SK_PNMI_RW, DiagActions, 0}, +#endif /* SK_DIAG_SUPPORT */ + {OID_SKGE_MDB_VERSION, + 1, + 0, + SK_PNMI_MAI_OFF(MgmtDBVersion), + SK_PNMI_RO, General, 0}, + {OID_SKGE_SUPPORTED_LIST, + 0, + 0, + 0, + SK_PNMI_RO, General, 0}, + {OID_SKGE_ALL_DATA, + 0, + 0, + 0, + SK_PNMI_RW, OidStruct, 0}, + {OID_SKGE_VPD_FREE_BYTES, + 1, + 0, + SK_PNMI_MAI_OFF(VpdFreeBytes), + SK_PNMI_RO, Vpd, 0}, + {OID_SKGE_VPD_ENTRIES_LIST, + 1, + 0, + SK_PNMI_MAI_OFF(VpdEntriesList), + SK_PNMI_RO, Vpd, 0}, + {OID_SKGE_VPD_ENTRIES_NUMBER, + 1, + 0, + SK_PNMI_MAI_OFF(VpdEntriesNumber), + SK_PNMI_RO, Vpd, 0}, + {OID_SKGE_VPD_KEY, + SK_PNMI_VPD_ENTRIES, + sizeof(SK_PNMI_VPD), + SK_PNMI_OFF(Vpd) + SK_PNMI_VPD_OFF(VpdKey), + SK_PNMI_RO, Vpd, 0}, + {OID_SKGE_VPD_VALUE, + SK_PNMI_VPD_ENTRIES, + sizeof(SK_PNMI_VPD), + SK_PNMI_OFF(Vpd) + SK_PNMI_VPD_OFF(VpdValue), + SK_PNMI_RO, Vpd, 0}, + {OID_SKGE_VPD_ACCESS, + SK_PNMI_VPD_ENTRIES, + sizeof(SK_PNMI_VPD), + SK_PNMI_OFF(Vpd) + SK_PNMI_VPD_OFF(VpdAccess), + SK_PNMI_RO, Vpd, 0}, + {OID_SKGE_VPD_ACTION, + SK_PNMI_VPD_ENTRIES, + sizeof(SK_PNMI_VPD), + SK_PNMI_OFF(Vpd) + SK_PNMI_VPD_OFF(VpdAction), + SK_PNMI_RW, Vpd, 0}, + {OID_SKGE_PORT_NUMBER, + 1, + 0, + SK_PNMI_MAI_OFF(PortNumber), + SK_PNMI_RO, General, 0}, + {OID_SKGE_DEVICE_TYPE, + 1, + 0, + SK_PNMI_MAI_OFF(DeviceType), + SK_PNMI_RO, General, 0}, + {OID_SKGE_DRIVER_DESCR, + 1, + 0, + SK_PNMI_MAI_OFF(DriverDescr), + SK_PNMI_RO, General, 0}, + {OID_SKGE_DRIVER_VERSION, + 1, + 0, + SK_PNMI_MAI_OFF(DriverVersion), + SK_PNMI_RO, General, 0}, + {OID_SKGE_DRIVER_RELDATE, + 1, + 0, + SK_PNMI_MAI_OFF(DriverReleaseDate), + SK_PNMI_RO, General, 0}, + {OID_SKGE_DRIVER_FILENAME, + 1, + 0, + SK_PNMI_MAI_OFF(DriverFileName), + SK_PNMI_RO, General, 0}, + {OID_SKGE_HW_DESCR, + 1, + 0, + SK_PNMI_MAI_OFF(HwDescr), + SK_PNMI_RO, General, 0}, + {OID_SKGE_HW_VERSION, + 1, + 0, + SK_PNMI_MAI_OFF(HwVersion), + SK_PNMI_RO, General, 0}, + {OID_SKGE_CHIPSET, + 1, + 0, + SK_PNMI_MAI_OFF(Chipset), + SK_PNMI_RO, General, 0}, + {OID_SKGE_CHIPID, + 1, + 0, + SK_PNMI_MAI_OFF(ChipId), + SK_PNMI_RO, General, 0}, + {OID_SKGE_RAMSIZE, + 1, + 0, + SK_PNMI_MAI_OFF(RamSize), + SK_PNMI_RO, General, 0}, + {OID_SKGE_VAUXAVAIL, + 1, + 0, + SK_PNMI_MAI_OFF(VauxAvail), + SK_PNMI_RO, General, 0}, + {OID_SKGE_ACTION, + 1, + 0, + SK_PNMI_MAI_OFF(Action), + SK_PNMI_RW, Perform, 0}, + {OID_SKGE_RESULT, + 1, + 0, + SK_PNMI_MAI_OFF(TestResult), + SK_PNMI_RO, General, 0}, + {OID_SKGE_BUS_TYPE, + 1, + 0, + SK_PNMI_MAI_OFF(BusType), + SK_PNMI_RO, General, 0}, + {OID_SKGE_BUS_SPEED, + 1, + 0, + SK_PNMI_MAI_OFF(BusSpeed), + SK_PNMI_RO, General, 0}, + {OID_SKGE_BUS_WIDTH, + 1, + 0, + SK_PNMI_MAI_OFF(BusWidth), + SK_PNMI_RO, General, 0}, + {OID_SKGE_TX_SW_QUEUE_LEN, + 1, + 0, + SK_PNMI_MAI_OFF(TxSwQueueLen), + SK_PNMI_RO, General, 0}, + {OID_SKGE_TX_SW_QUEUE_MAX, + 1, + 0, + SK_PNMI_MAI_OFF(TxSwQueueMax), + SK_PNMI_RO, General, 0}, + {OID_SKGE_TX_RETRY, + 1, + 0, + SK_PNMI_MAI_OFF(TxRetryCts), + SK_PNMI_RO, General, 0}, + {OID_SKGE_RX_INTR_CTS, + 1, + 0, + SK_PNMI_MAI_OFF(RxIntrCts), + SK_PNMI_RO, General, 0}, + {OID_SKGE_TX_INTR_CTS, + 1, + 0, + SK_PNMI_MAI_OFF(TxIntrCts), + SK_PNMI_RO, General, 0}, + {OID_SKGE_RX_NO_BUF_CTS, + 1, + 0, + SK_PNMI_MAI_OFF(RxNoBufCts), + SK_PNMI_RO, General, 0}, + {OID_SKGE_TX_NO_BUF_CTS, + 1, + 0, + SK_PNMI_MAI_OFF(TxNoBufCts), + SK_PNMI_RO, General, 0}, + {OID_SKGE_TX_USED_DESCR_NO, + 1, + 0, + SK_PNMI_MAI_OFF(TxUsedDescrNo), + SK_PNMI_RO, General, 0}, + {OID_SKGE_RX_DELIVERED_CTS, + 1, + 0, + SK_PNMI_MAI_OFF(RxDeliveredCts), + SK_PNMI_RO, General, 0}, + {OID_SKGE_RX_OCTETS_DELIV_CTS, + 1, + 0, + SK_PNMI_MAI_OFF(RxOctetsDeliveredCts), + SK_PNMI_RO, General, 0}, + {OID_SKGE_RX_HW_ERROR_CTS, + 1, + 0, + SK_PNMI_MAI_OFF(RxHwErrorsCts), + SK_PNMI_RO, General, 0}, + {OID_SKGE_TX_HW_ERROR_CTS, + 1, + 0, + SK_PNMI_MAI_OFF(TxHwErrorsCts), + SK_PNMI_RO, General, 0}, + {OID_SKGE_IN_ERRORS_CTS, + 1, + 0, + SK_PNMI_MAI_OFF(InErrorsCts), + SK_PNMI_RO, General, 0}, + {OID_SKGE_OUT_ERROR_CTS, + 1, + 0, + SK_PNMI_MAI_OFF(OutErrorsCts), + SK_PNMI_RO, General, 0}, + {OID_SKGE_ERR_RECOVERY_CTS, + 1, + 0, + SK_PNMI_MAI_OFF(ErrRecoveryCts), + SK_PNMI_RO, General, 0}, + {OID_SKGE_SYSUPTIME, + 1, + 0, + SK_PNMI_MAI_OFF(SysUpTime), + SK_PNMI_RO, General, 0}, + {OID_SKGE_SENSOR_NUMBER, + 1, + 0, + SK_PNMI_MAI_OFF(SensorNumber), + SK_PNMI_RO, General, 0}, + {OID_SKGE_SENSOR_INDEX, + SK_PNMI_SENSOR_ENTRIES, + sizeof(SK_PNMI_SENSOR), + SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorIndex), + SK_PNMI_RO, SensorStat, 0}, + {OID_SKGE_SENSOR_DESCR, + SK_PNMI_SENSOR_ENTRIES, + sizeof(SK_PNMI_SENSOR), + SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorDescr), + SK_PNMI_RO, SensorStat, 0}, + {OID_SKGE_SENSOR_TYPE, + SK_PNMI_SENSOR_ENTRIES, + sizeof(SK_PNMI_SENSOR), + SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorType), + SK_PNMI_RO, SensorStat, 0}, + {OID_SKGE_SENSOR_VALUE, + SK_PNMI_SENSOR_ENTRIES, + sizeof(SK_PNMI_SENSOR), + SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorValue), + SK_PNMI_RO, SensorStat, 0}, + {OID_SKGE_SENSOR_WAR_THRES_LOW, + SK_PNMI_SENSOR_ENTRIES, + sizeof(SK_PNMI_SENSOR), + SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorWarningThresholdLow), + SK_PNMI_RO, SensorStat, 0}, + {OID_SKGE_SENSOR_WAR_THRES_UPP, + SK_PNMI_SENSOR_ENTRIES, + sizeof(SK_PNMI_SENSOR), + SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorWarningThresholdHigh), + SK_PNMI_RO, SensorStat, 0}, + {OID_SKGE_SENSOR_ERR_THRES_LOW, + SK_PNMI_SENSOR_ENTRIES, + sizeof(SK_PNMI_SENSOR), + SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorErrorThresholdLow), + SK_PNMI_RO, SensorStat, 0}, + {OID_SKGE_SENSOR_ERR_THRES_UPP, + SK_PNMI_SENSOR_ENTRIES, + sizeof(SK_PNMI_SENSOR), + SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorErrorThresholdHigh), + SK_PNMI_RO, SensorStat, 0}, + {OID_SKGE_SENSOR_STATUS, + SK_PNMI_SENSOR_ENTRIES, + sizeof(SK_PNMI_SENSOR), + SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorStatus), + SK_PNMI_RO, SensorStat, 0}, + {OID_SKGE_SENSOR_WAR_CTS, + SK_PNMI_SENSOR_ENTRIES, + sizeof(SK_PNMI_SENSOR), + SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorWarningCts), + SK_PNMI_RO, SensorStat, 0}, + {OID_SKGE_SENSOR_ERR_CTS, + SK_PNMI_SENSOR_ENTRIES, + sizeof(SK_PNMI_SENSOR), + SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorErrorCts), + SK_PNMI_RO, SensorStat, 0}, + {OID_SKGE_SENSOR_WAR_TIME, + SK_PNMI_SENSOR_ENTRIES, + sizeof(SK_PNMI_SENSOR), + SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorWarningTimestamp), + SK_PNMI_RO, SensorStat, 0}, + {OID_SKGE_SENSOR_ERR_TIME, + SK_PNMI_SENSOR_ENTRIES, + sizeof(SK_PNMI_SENSOR), + SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorErrorTimestamp), + SK_PNMI_RO, SensorStat, 0}, + {OID_SKGE_CHKSM_NUMBER, + 1, + 0, + SK_PNMI_MAI_OFF(ChecksumNumber), + SK_PNMI_RO, General, 0}, + {OID_SKGE_CHKSM_RX_OK_CTS, + SKCS_NUM_PROTOCOLS, + sizeof(SK_PNMI_CHECKSUM), + SK_PNMI_OFF(Checksum) + SK_PNMI_CHK_OFF(ChecksumRxOkCts), + SK_PNMI_RO, CsumStat, 0}, + {OID_SKGE_CHKSM_RX_UNABLE_CTS, + SKCS_NUM_PROTOCOLS, + sizeof(SK_PNMI_CHECKSUM), + SK_PNMI_OFF(Checksum) + SK_PNMI_CHK_OFF(ChecksumRxUnableCts), + SK_PNMI_RO, CsumStat, 0}, + {OID_SKGE_CHKSM_RX_ERR_CTS, + SKCS_NUM_PROTOCOLS, + sizeof(SK_PNMI_CHECKSUM), + SK_PNMI_OFF(Checksum) + SK_PNMI_CHK_OFF(ChecksumRxErrCts), + SK_PNMI_RO, CsumStat, 0}, + {OID_SKGE_CHKSM_TX_OK_CTS, + SKCS_NUM_PROTOCOLS, + sizeof(SK_PNMI_CHECKSUM), + SK_PNMI_OFF(Checksum) + SK_PNMI_CHK_OFF(ChecksumTxOkCts), + SK_PNMI_RO, CsumStat, 0}, + {OID_SKGE_CHKSM_TX_UNABLE_CTS, + SKCS_NUM_PROTOCOLS, + sizeof(SK_PNMI_CHECKSUM), + SK_PNMI_OFF(Checksum) + SK_PNMI_CHK_OFF(ChecksumTxUnableCts), + SK_PNMI_RO, CsumStat, 0}, + {OID_SKGE_STAT_TX, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxOkCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX}, + {OID_SKGE_STAT_TX_OCTETS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxOctetsOkCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_OCTET}, + {OID_SKGE_STAT_TX_BROADCAST, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxBroadcastOkCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_BROADCAST}, + {OID_SKGE_STAT_TX_MULTICAST, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxMulticastOkCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_MULTICAST}, + {OID_SKGE_STAT_TX_UNICAST, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxUnicastOkCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_UNICAST}, + {OID_SKGE_STAT_TX_LONGFRAMES, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxLongFramesCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_LONGFRAMES}, + {OID_SKGE_STAT_TX_BURST, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxBurstCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_BURST}, + {OID_SKGE_STAT_TX_PFLOWC, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxPauseMacCtrlCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_PMACC}, + {OID_SKGE_STAT_TX_FLOWC, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxMacCtrlCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_MACC}, + {OID_SKGE_STAT_TX_SINGLE_COL, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxSingleCollisionCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_SINGLE_COL}, + {OID_SKGE_STAT_TX_MULTI_COL, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxMultipleCollisionCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_MULTI_COL}, + {OID_SKGE_STAT_TX_EXCESS_COL, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxExcessiveCollisionCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_EXCESS_COL}, + {OID_SKGE_STAT_TX_LATE_COL, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxLateCollisionCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_LATE_COL}, + {OID_SKGE_STAT_TX_DEFFERAL, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxDeferralCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_DEFFERAL}, + {OID_SKGE_STAT_TX_EXCESS_DEF, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxExcessiveDeferralCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_EXCESS_DEF}, + {OID_SKGE_STAT_TX_UNDERRUN, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxFifoUnderrunCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_UNDERRUN}, + {OID_SKGE_STAT_TX_CARRIER, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxCarrierCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_CARRIER}, +/* {OID_SKGE_STAT_TX_UTIL, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxUtilization), + SK_PNMI_RO, MacPrivateStat, (SK_U16)(-1)}, */ + {OID_SKGE_STAT_TX_64, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTx64Cts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_64}, + {OID_SKGE_STAT_TX_127, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTx127Cts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_127}, + {OID_SKGE_STAT_TX_255, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTx255Cts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_255}, + {OID_SKGE_STAT_TX_511, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTx511Cts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_511}, + {OID_SKGE_STAT_TX_1023, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTx1023Cts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_1023}, + {OID_SKGE_STAT_TX_MAX, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxMaxCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_MAX}, + {OID_SKGE_STAT_TX_SYNC, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxSyncCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_SYNC}, + {OID_SKGE_STAT_TX_SYNC_OCTETS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxSyncOctetsCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_SYNC_OCTET}, + {OID_SKGE_STAT_RX, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxOkCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX}, + {OID_SKGE_STAT_RX_OCTETS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxOctetsOkCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_OCTET}, + {OID_SKGE_STAT_RX_BROADCAST, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxBroadcastOkCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_BROADCAST}, + {OID_SKGE_STAT_RX_MULTICAST, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxMulticastOkCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_MULTICAST}, + {OID_SKGE_STAT_RX_UNICAST, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxUnicastOkCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_UNICAST}, + {OID_SKGE_STAT_RX_LONGFRAMES, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxLongFramesCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_LONGFRAMES}, + {OID_SKGE_STAT_RX_PFLOWC, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxPauseMacCtrlCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_PMACC}, + {OID_SKGE_STAT_RX_FLOWC, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxMacCtrlCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_MACC}, + {OID_SKGE_STAT_RX_PFLOWC_ERR, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxPauseMacCtrlErrorCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_PMACC_ERR}, + {OID_SKGE_STAT_RX_FLOWC_UNKWN, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxMacCtrlUnknownCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_MACC_UNKWN}, + {OID_SKGE_STAT_RX_BURST, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxBurstCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_BURST}, + {OID_SKGE_STAT_RX_MISSED, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxMissedCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_MISSED}, + {OID_SKGE_STAT_RX_FRAMING, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxFramingCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_FRAMING}, + {OID_SKGE_STAT_RX_OVERFLOW, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxFifoOverflowCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_OVERFLOW}, + {OID_SKGE_STAT_RX_JABBER, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxJabberCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_JABBER}, + {OID_SKGE_STAT_RX_CARRIER, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxCarrierCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_CARRIER}, + {OID_SKGE_STAT_RX_IR_LENGTH, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxIRLengthCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_IRLENGTH}, + {OID_SKGE_STAT_RX_SYMBOL, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxSymbolCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_SYMBOL}, + {OID_SKGE_STAT_RX_SHORTS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxShortsCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_SHORTS}, + {OID_SKGE_STAT_RX_RUNT, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxRuntCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_RUNT}, + {OID_SKGE_STAT_RX_CEXT, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxCextCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_CEXT}, + {OID_SKGE_STAT_RX_TOO_LONG, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxTooLongCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_TOO_LONG}, + {OID_SKGE_STAT_RX_FCS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxFcsCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_FCS}, +/* {OID_SKGE_STAT_RX_UTIL, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxUtilization), + SK_PNMI_RO, MacPrivateStat, (SK_U16)(-1)}, */ + {OID_SKGE_STAT_RX_64, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRx64Cts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_64}, + {OID_SKGE_STAT_RX_127, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRx127Cts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_127}, + {OID_SKGE_STAT_RX_255, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRx255Cts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_255}, + {OID_SKGE_STAT_RX_511, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRx511Cts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_511}, + {OID_SKGE_STAT_RX_1023, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRx1023Cts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_1023}, + {OID_SKGE_STAT_RX_MAX, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxMaxCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_MAX}, + {OID_SKGE_PHYS_CUR_ADDR, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfMacCurrentAddr), + SK_PNMI_RW, Addr, 0}, + {OID_SKGE_PHYS_FAC_ADDR, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfMacFactoryAddr), + SK_PNMI_RO, Addr, 0}, + {OID_SKGE_PMD, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPMD), + SK_PNMI_RO, MacPrivateConf, 0}, + {OID_SKGE_CONNECTOR, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfConnector), + SK_PNMI_RO, MacPrivateConf, 0}, + {OID_SKGE_PHY_TYPE, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyType), + SK_PNMI_RO, MacPrivateConf, 0}, + {OID_SKGE_LINK_CAP, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfLinkCapability), + SK_PNMI_RO, MacPrivateConf, 0}, + {OID_SKGE_LINK_MODE, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfLinkMode), + SK_PNMI_RW, MacPrivateConf, 0}, + {OID_SKGE_LINK_MODE_STATUS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfLinkModeStatus), + SK_PNMI_RO, MacPrivateConf, 0}, + {OID_SKGE_LINK_STATUS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfLinkStatus), + SK_PNMI_RO, MacPrivateConf, 0}, + {OID_SKGE_FLOWCTRL_CAP, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfFlowCtrlCapability), + SK_PNMI_RO, MacPrivateConf, 0}, + {OID_SKGE_FLOWCTRL_MODE, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfFlowCtrlMode), + SK_PNMI_RW, MacPrivateConf, 0}, + {OID_SKGE_FLOWCTRL_STATUS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfFlowCtrlStatus), + SK_PNMI_RO, MacPrivateConf, 0}, + {OID_SKGE_PHY_OPERATION_CAP, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyOperationCapability), + SK_PNMI_RO, MacPrivateConf, 0}, + {OID_SKGE_PHY_OPERATION_MODE, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyOperationMode), + SK_PNMI_RW, MacPrivateConf, 0}, + {OID_SKGE_PHY_OPERATION_STATUS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyOperationStatus), + SK_PNMI_RO, MacPrivateConf, 0}, + {OID_SKGE_SPEED_CAP, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfSpeedCapability), + SK_PNMI_RO, MacPrivateConf, 0}, + {OID_SKGE_SPEED_MODE, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfSpeedMode), + SK_PNMI_RW, MacPrivateConf, 0}, + {OID_SKGE_SPEED_STATUS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfSpeedStatus), + SK_PNMI_RO, MacPrivateConf, 0}, + {OID_SKGE_TRAP, + 1, + 0, + SK_PNMI_MAI_OFF(Trap), + SK_PNMI_RO, General, 0}, + {OID_SKGE_TRAP_NUMBER, + 1, + 0, + SK_PNMI_MAI_OFF(TrapNumber), + SK_PNMI_RO, General, 0}, + {OID_SKGE_RLMT_MODE, + 1, + 0, + SK_PNMI_MAI_OFF(RlmtMode), + SK_PNMI_RW, Rlmt, 0}, + {OID_SKGE_RLMT_PORT_NUMBER, + 1, + 0, + SK_PNMI_MAI_OFF(RlmtPortNumber), + SK_PNMI_RO, Rlmt, 0}, + {OID_SKGE_RLMT_PORT_ACTIVE, + 1, + 0, + SK_PNMI_MAI_OFF(RlmtPortActive), + SK_PNMI_RO, Rlmt, 0}, + {OID_SKGE_RLMT_PORT_PREFERRED, + 1, + 0, + SK_PNMI_MAI_OFF(RlmtPortPreferred), + SK_PNMI_RW, Rlmt, 0}, + {OID_SKGE_RLMT_CHANGE_CTS, + 1, + 0, + SK_PNMI_MAI_OFF(RlmtChangeCts), + SK_PNMI_RO, Rlmt, 0}, + {OID_SKGE_RLMT_CHANGE_TIME, + 1, + 0, + SK_PNMI_MAI_OFF(RlmtChangeTime), + SK_PNMI_RO, Rlmt, 0}, + {OID_SKGE_RLMT_CHANGE_ESTIM, + 1, + 0, + SK_PNMI_MAI_OFF(RlmtChangeEstimate), + SK_PNMI_RO, Rlmt, 0}, + {OID_SKGE_RLMT_CHANGE_THRES, + 1, + 0, + SK_PNMI_MAI_OFF(RlmtChangeThreshold), + SK_PNMI_RW, Rlmt, 0}, + {OID_SKGE_RLMT_PORT_INDEX, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_RLMT), + SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtIndex), + SK_PNMI_RO, RlmtStat, 0}, + {OID_SKGE_RLMT_STATUS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_RLMT), + SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtStatus), + SK_PNMI_RO, RlmtStat, 0}, + {OID_SKGE_RLMT_TX_HELLO_CTS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_RLMT), + SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtTxHelloCts), + SK_PNMI_RO, RlmtStat, 0}, + {OID_SKGE_RLMT_RX_HELLO_CTS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_RLMT), + SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtRxHelloCts), + SK_PNMI_RO, RlmtStat, 0}, + {OID_SKGE_RLMT_TX_SP_REQ_CTS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_RLMT), + SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtTxSpHelloReqCts), + SK_PNMI_RO, RlmtStat, 0}, + {OID_SKGE_RLMT_RX_SP_CTS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_RLMT), + SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtRxSpHelloCts), + SK_PNMI_RO, RlmtStat, 0}, + {OID_SKGE_RLMT_MONITOR_NUMBER, + 1, + 0, + SK_PNMI_MAI_OFF(RlmtMonitorNumber), + SK_PNMI_RO, General, 0}, + {OID_SKGE_RLMT_MONITOR_INDEX, + SK_PNMI_MONITOR_ENTRIES, + sizeof(SK_PNMI_RLMT_MONITOR), + SK_PNMI_OFF(RlmtMonitor) + SK_PNMI_MON_OFF(RlmtMonitorIndex), + SK_PNMI_RO, Monitor, 0}, + {OID_SKGE_RLMT_MONITOR_ADDR, + SK_PNMI_MONITOR_ENTRIES, + sizeof(SK_PNMI_RLMT_MONITOR), + SK_PNMI_OFF(RlmtMonitor) + SK_PNMI_MON_OFF(RlmtMonitorAddr), + SK_PNMI_RO, Monitor, 0}, + {OID_SKGE_RLMT_MONITOR_ERRS, + SK_PNMI_MONITOR_ENTRIES, + sizeof(SK_PNMI_RLMT_MONITOR), + SK_PNMI_OFF(RlmtMonitor) + SK_PNMI_MON_OFF(RlmtMonitorErrorCts), + SK_PNMI_RO, Monitor, 0}, + {OID_SKGE_RLMT_MONITOR_TIMESTAMP, + SK_PNMI_MONITOR_ENTRIES, + sizeof(SK_PNMI_RLMT_MONITOR), + SK_PNMI_OFF(RlmtMonitor) + SK_PNMI_MON_OFF(RlmtMonitorTimestamp), + SK_PNMI_RO, Monitor, 0}, + {OID_SKGE_RLMT_MONITOR_ADMIN, + SK_PNMI_MONITOR_ENTRIES, + sizeof(SK_PNMI_RLMT_MONITOR), + SK_PNMI_OFF(RlmtMonitor) + SK_PNMI_MON_OFF(RlmtMonitorAdmin), + SK_PNMI_RW, Monitor, 0}, + {OID_SKGE_MTU, + 1, + 0, + SK_PNMI_MAI_OFF(MtuSize), + SK_PNMI_RW, MacPrivateConf, 0}, + {OID_SKGE_VCT_GET, + 0, + 0, + 0, + SK_PNMI_RO, Vct, 0}, + {OID_SKGE_VCT_SET, + 0, + 0, + 0, + SK_PNMI_WO, Vct, 0}, + {OID_SKGE_VCT_STATUS, + 0, + 0, + 0, + SK_PNMI_RO, Vct, 0}, + {OID_SKGE_BOARDLEVEL, + 0, + 0, + 0, + SK_PNMI_RO, General, 0}, +}; + diff --git a/drivers/net/sk98lin/skgepnmi.c b/drivers/net/sk98lin/skgepnmi.c new file mode 100644 index 000000000000..b36dd9ac6b29 --- /dev/null +++ b/drivers/net/sk98lin/skgepnmi.c @@ -0,0 +1,8210 @@ +/***************************************************************************** + * + * Name: skgepnmi.c + * Project: GEnesis, PCI Gigabit Ethernet Adapter + * Version: $Revision: 1.111 $ + * Date: $Date: 2003/09/15 13:35:35 $ + * Purpose: Private Network Management Interface + * + ****************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + + +#ifndef _lint +static const char SysKonnectFileId[] = + "@(#) $Id: skgepnmi.c,v 1.111 2003/09/15 13:35:35 tschilli Exp $ (C) Marvell."; +#endif /* !_lint */ + +#include "h/skdrv1st.h" +#include "h/sktypes.h" +#include "h/xmac_ii.h" +#include "h/skdebug.h" +#include "h/skqueue.h" +#include "h/skgepnmi.h" +#include "h/skgesirq.h" +#include "h/skcsum.h" +#include "h/skvpd.h" +#include "h/skgehw.h" +#include "h/skgeinit.h" +#include "h/skdrv2nd.h" +#include "h/skgepnm2.h" +#ifdef SK_POWER_MGMT +#include "h/skgepmgt.h" +#endif +/* defines *******************************************************************/ + +#ifndef DEBUG +#define PNMI_STATIC static +#else /* DEBUG */ +#define PNMI_STATIC +#endif /* DEBUG */ + +/* + * Public Function prototypes + */ +int SkPnmiInit(SK_AC *pAC, SK_IOC IoC, int level); +int SkPnmiSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void *pBuf, + unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex); +int SkPnmiGetStruct(SK_AC *pAC, SK_IOC IoC, void *pBuf, + unsigned int *pLen, SK_U32 NetIndex); +int SkPnmiPreSetStruct(SK_AC *pAC, SK_IOC IoC, void *pBuf, + unsigned int *pLen, SK_U32 NetIndex); +int SkPnmiSetStruct(SK_AC *pAC, SK_IOC IoC, void *pBuf, + unsigned int *pLen, SK_U32 NetIndex); +int SkPnmiEvent(SK_AC *pAC, SK_IOC IoC, SK_U32 Event, SK_EVPARA Param); +int SkPnmiGenIoctl(SK_AC *pAC, SK_IOC IoC, void * pBuf, + unsigned int * pLen, SK_U32 NetIndex); + + +/* + * Private Function prototypes + */ + +PNMI_STATIC SK_U8 CalculateLinkModeStatus(SK_AC *pAC, SK_IOC IoC, unsigned int + PhysPortIndex); +PNMI_STATIC SK_U8 CalculateLinkStatus(SK_AC *pAC, SK_IOC IoC, unsigned int + PhysPortIndex); +PNMI_STATIC void CopyMac(char *pDst, SK_MAC_ADDR *pMac); +PNMI_STATIC void CopyTrapQueue(SK_AC *pAC, char *pDstBuf); +PNMI_STATIC SK_U64 GetPhysStatVal(SK_AC *pAC, SK_IOC IoC, + unsigned int PhysPortIndex, unsigned int StatIndex); +PNMI_STATIC SK_U64 GetStatVal(SK_AC *pAC, SK_IOC IoC, unsigned int LogPortIndex, + unsigned int StatIndex, SK_U32 NetIndex); +PNMI_STATIC char* GetTrapEntry(SK_AC *pAC, SK_U32 TrapId, unsigned int Size); +PNMI_STATIC void GetTrapQueueLen(SK_AC *pAC, unsigned int *pLen, + unsigned int *pEntries); +PNMI_STATIC int GetVpdKeyArr(SK_AC *pAC, SK_IOC IoC, char *pKeyArr, + unsigned int KeyArrLen, unsigned int *pKeyNo); +PNMI_STATIC int LookupId(SK_U32 Id); +PNMI_STATIC int MacUpdate(SK_AC *pAC, SK_IOC IoC, unsigned int FirstMac, + unsigned int LastMac); +PNMI_STATIC int PnmiStruct(SK_AC *pAC, SK_IOC IoC, int Action, char *pBuf, + unsigned int *pLen, SK_U32 NetIndex); +PNMI_STATIC int PnmiVar(SK_AC *pAC, SK_IOC IoC, int Action, SK_U32 Id, + char *pBuf, unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex); +PNMI_STATIC void QueueRlmtNewMacTrap(SK_AC *pAC, unsigned int ActiveMac); +PNMI_STATIC void QueueRlmtPortTrap(SK_AC *pAC, SK_U32 TrapId, + unsigned int PortIndex); +PNMI_STATIC void QueueSensorTrap(SK_AC *pAC, SK_U32 TrapId, + unsigned int SensorIndex); +PNMI_STATIC void QueueSimpleTrap(SK_AC *pAC, SK_U32 TrapId); +PNMI_STATIC void ResetCounter(SK_AC *pAC, SK_IOC IoC, SK_U32 NetIndex); +PNMI_STATIC int RlmtUpdate(SK_AC *pAC, SK_IOC IoC, SK_U32 NetIndex); +PNMI_STATIC int SirqUpdate(SK_AC *pAC, SK_IOC IoC); +PNMI_STATIC void VirtualConf(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, char *pBuf); +PNMI_STATIC int Vct(SK_AC *pAC, SK_IOC IoC, int Action, SK_U32 Id, char *pBuf, + unsigned int *pLen, SK_U32 Instance, unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC void CheckVctStatus(SK_AC *, SK_IOC, char *, SK_U32, SK_U32); + +/* + * Table to correlate OID with handler function and index to + * hardware register stored in StatAddress if applicable. + */ +#include "skgemib.c" + +/* global variables **********************************************************/ + +/* + * Overflow status register bit table and corresponding counter + * dependent on MAC type - the number relates to the size of overflow + * mask returned by the pFnMacOverflow function + */ +PNMI_STATIC const SK_U16 StatOvrflwBit[][SK_PNMI_MAC_TYPES] = { +/* Bit0 */ { SK_PNMI_HTX, SK_PNMI_HTX_UNICAST}, +/* Bit1 */ { SK_PNMI_HTX_OCTETHIGH, SK_PNMI_HTX_BROADCAST}, +/* Bit2 */ { SK_PNMI_HTX_OCTETLOW, SK_PNMI_HTX_PMACC}, +/* Bit3 */ { SK_PNMI_HTX_BROADCAST, SK_PNMI_HTX_MULTICAST}, +/* Bit4 */ { SK_PNMI_HTX_MULTICAST, SK_PNMI_HTX_OCTETLOW}, +/* Bit5 */ { SK_PNMI_HTX_UNICAST, SK_PNMI_HTX_OCTETHIGH}, +/* Bit6 */ { SK_PNMI_HTX_LONGFRAMES, SK_PNMI_HTX_64}, +/* Bit7 */ { SK_PNMI_HTX_BURST, SK_PNMI_HTX_127}, +/* Bit8 */ { SK_PNMI_HTX_PMACC, SK_PNMI_HTX_255}, +/* Bit9 */ { SK_PNMI_HTX_MACC, SK_PNMI_HTX_511}, +/* Bit10 */ { SK_PNMI_HTX_SINGLE_COL, SK_PNMI_HTX_1023}, +/* Bit11 */ { SK_PNMI_HTX_MULTI_COL, SK_PNMI_HTX_MAX}, +/* Bit12 */ { SK_PNMI_HTX_EXCESS_COL, SK_PNMI_HTX_LONGFRAMES}, +/* Bit13 */ { SK_PNMI_HTX_LATE_COL, SK_PNMI_HTX_RESERVED}, +/* Bit14 */ { SK_PNMI_HTX_DEFFERAL, SK_PNMI_HTX_COL}, +/* Bit15 */ { SK_PNMI_HTX_EXCESS_DEF, SK_PNMI_HTX_LATE_COL}, +/* Bit16 */ { SK_PNMI_HTX_UNDERRUN, SK_PNMI_HTX_EXCESS_COL}, +/* Bit17 */ { SK_PNMI_HTX_CARRIER, SK_PNMI_HTX_MULTI_COL}, +/* Bit18 */ { SK_PNMI_HTX_UTILUNDER, SK_PNMI_HTX_SINGLE_COL}, +/* Bit19 */ { SK_PNMI_HTX_UTILOVER, SK_PNMI_HTX_UNDERRUN}, +/* Bit20 */ { SK_PNMI_HTX_64, SK_PNMI_HTX_RESERVED}, +/* Bit21 */ { SK_PNMI_HTX_127, SK_PNMI_HTX_RESERVED}, +/* Bit22 */ { SK_PNMI_HTX_255, SK_PNMI_HTX_RESERVED}, +/* Bit23 */ { SK_PNMI_HTX_511, SK_PNMI_HTX_RESERVED}, +/* Bit24 */ { SK_PNMI_HTX_1023, SK_PNMI_HTX_RESERVED}, +/* Bit25 */ { SK_PNMI_HTX_MAX, SK_PNMI_HTX_RESERVED}, +/* Bit26 */ { SK_PNMI_HTX_RESERVED, SK_PNMI_HTX_RESERVED}, +/* Bit27 */ { SK_PNMI_HTX_RESERVED, SK_PNMI_HTX_RESERVED}, +/* Bit28 */ { SK_PNMI_HTX_RESERVED, SK_PNMI_HTX_RESERVED}, +/* Bit29 */ { SK_PNMI_HTX_RESERVED, SK_PNMI_HTX_RESERVED}, +/* Bit30 */ { SK_PNMI_HTX_RESERVED, SK_PNMI_HTX_RESERVED}, +/* Bit31 */ { SK_PNMI_HTX_RESERVED, SK_PNMI_HTX_RESERVED}, +/* Bit32 */ { SK_PNMI_HRX, SK_PNMI_HRX_UNICAST}, +/* Bit33 */ { SK_PNMI_HRX_OCTETHIGH, SK_PNMI_HRX_BROADCAST}, +/* Bit34 */ { SK_PNMI_HRX_OCTETLOW, SK_PNMI_HRX_PMACC}, +/* Bit35 */ { SK_PNMI_HRX_BROADCAST, SK_PNMI_HRX_MULTICAST}, +/* Bit36 */ { SK_PNMI_HRX_MULTICAST, SK_PNMI_HRX_FCS}, +/* Bit37 */ { SK_PNMI_HRX_UNICAST, SK_PNMI_HRX_RESERVED}, +/* Bit38 */ { SK_PNMI_HRX_PMACC, SK_PNMI_HRX_OCTETLOW}, +/* Bit39 */ { SK_PNMI_HRX_MACC, SK_PNMI_HRX_OCTETHIGH}, +/* Bit40 */ { SK_PNMI_HRX_PMACC_ERR, SK_PNMI_HRX_BADOCTETLOW}, +/* Bit41 */ { SK_PNMI_HRX_MACC_UNKWN, SK_PNMI_HRX_BADOCTETHIGH}, +/* Bit42 */ { SK_PNMI_HRX_BURST, SK_PNMI_HRX_UNDERSIZE}, +/* Bit43 */ { SK_PNMI_HRX_MISSED, SK_PNMI_HRX_RUNT}, +/* Bit44 */ { SK_PNMI_HRX_FRAMING, SK_PNMI_HRX_64}, +/* Bit45 */ { SK_PNMI_HRX_OVERFLOW, SK_PNMI_HRX_127}, +/* Bit46 */ { SK_PNMI_HRX_JABBER, SK_PNMI_HRX_255}, +/* Bit47 */ { SK_PNMI_HRX_CARRIER, SK_PNMI_HRX_511}, +/* Bit48 */ { SK_PNMI_HRX_IRLENGTH, SK_PNMI_HRX_1023}, +/* Bit49 */ { SK_PNMI_HRX_SYMBOL, SK_PNMI_HRX_MAX}, +/* Bit50 */ { SK_PNMI_HRX_SHORTS, SK_PNMI_HRX_LONGFRAMES}, +/* Bit51 */ { SK_PNMI_HRX_RUNT, SK_PNMI_HRX_TOO_LONG}, +/* Bit52 */ { SK_PNMI_HRX_TOO_LONG, SK_PNMI_HRX_JABBER}, +/* Bit53 */ { SK_PNMI_HRX_FCS, SK_PNMI_HRX_RESERVED}, +/* Bit54 */ { SK_PNMI_HRX_RESERVED, SK_PNMI_HRX_OVERFLOW}, +/* Bit55 */ { SK_PNMI_HRX_CEXT, SK_PNMI_HRX_RESERVED}, +/* Bit56 */ { SK_PNMI_HRX_UTILUNDER, SK_PNMI_HRX_RESERVED}, +/* Bit57 */ { SK_PNMI_HRX_UTILOVER, SK_PNMI_HRX_RESERVED}, +/* Bit58 */ { SK_PNMI_HRX_64, SK_PNMI_HRX_RESERVED}, +/* Bit59 */ { SK_PNMI_HRX_127, SK_PNMI_HRX_RESERVED}, +/* Bit60 */ { SK_PNMI_HRX_255, SK_PNMI_HRX_RESERVED}, +/* Bit61 */ { SK_PNMI_HRX_511, SK_PNMI_HRX_RESERVED}, +/* Bit62 */ { SK_PNMI_HRX_1023, SK_PNMI_HRX_RESERVED}, +/* Bit63 */ { SK_PNMI_HRX_MAX, SK_PNMI_HRX_RESERVED} +}; + +/* + * Table for hardware register saving on resets and port switches + */ +PNMI_STATIC const SK_PNMI_STATADDR StatAddr[SK_PNMI_MAX_IDX][SK_PNMI_MAC_TYPES] = { + /* SK_PNMI_HTX */ + {{XM_TXF_OK, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HTX_OCTETHIGH */ + {{XM_TXO_OK_HI, SK_TRUE}, {GM_TXO_OK_HI, SK_TRUE}}, + /* SK_PNMI_HTX_OCTETLOW */ + {{XM_TXO_OK_LO, SK_FALSE}, {GM_TXO_OK_LO, SK_FALSE}}, + /* SK_PNMI_HTX_BROADCAST */ + {{XM_TXF_BC_OK, SK_TRUE}, {GM_TXF_BC_OK, SK_TRUE}}, + /* SK_PNMI_HTX_MULTICAST */ + {{XM_TXF_MC_OK, SK_TRUE}, {GM_TXF_MC_OK, SK_TRUE}}, + /* SK_PNMI_HTX_UNICAST */ + {{XM_TXF_UC_OK, SK_TRUE}, {GM_TXF_UC_OK, SK_TRUE}}, + /* SK_PNMI_HTX_BURST */ + {{XM_TXE_BURST, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HTX_PMACC */ + {{XM_TXF_MPAUSE, SK_TRUE}, {GM_TXF_MPAUSE, SK_TRUE}}, + /* SK_PNMI_HTX_MACC */ + {{XM_TXF_MCTRL, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HTX_COL */ + {{0, SK_FALSE}, {GM_TXF_COL, SK_TRUE}}, + /* SK_PNMI_HTX_SINGLE_COL */ + {{XM_TXF_SNG_COL, SK_TRUE}, {GM_TXF_SNG_COL, SK_TRUE}}, + /* SK_PNMI_HTX_MULTI_COL */ + {{XM_TXF_MUL_COL, SK_TRUE}, {GM_TXF_MUL_COL, SK_TRUE}}, + /* SK_PNMI_HTX_EXCESS_COL */ + {{XM_TXF_ABO_COL, SK_TRUE}, {GM_TXF_ABO_COL, SK_TRUE}}, + /* SK_PNMI_HTX_LATE_COL */ + {{XM_TXF_LAT_COL, SK_TRUE}, {GM_TXF_LAT_COL, SK_TRUE}}, + /* SK_PNMI_HTX_DEFFERAL */ + {{XM_TXF_DEF, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HTX_EXCESS_DEF */ + {{XM_TXF_EX_DEF, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HTX_UNDERRUN */ + {{XM_TXE_FIFO_UR, SK_TRUE}, {GM_TXE_FIFO_UR, SK_TRUE}}, + /* SK_PNMI_HTX_CARRIER */ + {{XM_TXE_CS_ERR, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HTX_UTILUNDER */ + {{0, SK_FALSE}, {0, SK_FALSE}}, + /* SK_PNMI_HTX_UTILOVER */ + {{0, SK_FALSE}, {0, SK_FALSE}}, + /* SK_PNMI_HTX_64 */ + {{XM_TXF_64B, SK_TRUE}, {GM_TXF_64B, SK_TRUE}}, + /* SK_PNMI_HTX_127 */ + {{XM_TXF_127B, SK_TRUE}, {GM_TXF_127B, SK_TRUE}}, + /* SK_PNMI_HTX_255 */ + {{XM_TXF_255B, SK_TRUE}, {GM_TXF_255B, SK_TRUE}}, + /* SK_PNMI_HTX_511 */ + {{XM_TXF_511B, SK_TRUE}, {GM_TXF_511B, SK_TRUE}}, + /* SK_PNMI_HTX_1023 */ + {{XM_TXF_1023B, SK_TRUE}, {GM_TXF_1023B, SK_TRUE}}, + /* SK_PNMI_HTX_MAX */ + {{XM_TXF_MAX_SZ, SK_TRUE}, {GM_TXF_1518B, SK_TRUE}}, + /* SK_PNMI_HTX_LONGFRAMES */ + {{XM_TXF_LONG, SK_TRUE}, {GM_TXF_MAX_SZ, SK_TRUE}}, + /* SK_PNMI_HTX_SYNC */ + {{0, SK_FALSE}, {0, SK_FALSE}}, + /* SK_PNMI_HTX_SYNC_OCTET */ + {{0, SK_FALSE}, {0, SK_FALSE}}, + /* SK_PNMI_HTX_RESERVED */ + {{0, SK_FALSE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX */ + {{XM_RXF_OK, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_OCTETHIGH */ + {{XM_RXO_OK_HI, SK_TRUE}, {GM_RXO_OK_HI, SK_TRUE}}, + /* SK_PNMI_HRX_OCTETLOW */ + {{XM_RXO_OK_LO, SK_FALSE}, {GM_RXO_OK_LO, SK_FALSE}}, + /* SK_PNMI_HRX_BADOCTETHIGH */ + {{0, SK_FALSE}, {GM_RXO_ERR_HI, SK_TRUE}}, + /* SK_PNMI_HRX_BADOCTETLOW */ + {{0, SK_FALSE}, {GM_RXO_ERR_LO, SK_TRUE}}, + /* SK_PNMI_HRX_BROADCAST */ + {{XM_RXF_BC_OK, SK_TRUE}, {GM_RXF_BC_OK, SK_TRUE}}, + /* SK_PNMI_HRX_MULTICAST */ + {{XM_RXF_MC_OK, SK_TRUE}, {GM_RXF_MC_OK, SK_TRUE}}, + /* SK_PNMI_HRX_UNICAST */ + {{XM_RXF_UC_OK, SK_TRUE}, {GM_RXF_UC_OK, SK_TRUE}}, + /* SK_PNMI_HRX_PMACC */ + {{XM_RXF_MPAUSE, SK_TRUE}, {GM_RXF_MPAUSE, SK_TRUE}}, + /* SK_PNMI_HRX_MACC */ + {{XM_RXF_MCTRL, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_PMACC_ERR */ + {{XM_RXF_INV_MP, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_MACC_UNKWN */ + {{XM_RXF_INV_MOC, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_BURST */ + {{XM_RXE_BURST, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_MISSED */ + {{XM_RXE_FMISS, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_FRAMING */ + {{XM_RXF_FRA_ERR, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_UNDERSIZE */ + {{0, SK_FALSE}, {GM_RXF_SHT, SK_TRUE}}, + /* SK_PNMI_HRX_OVERFLOW */ + {{XM_RXE_FIFO_OV, SK_TRUE}, {GM_RXE_FIFO_OV, SK_TRUE}}, + /* SK_PNMI_HRX_JABBER */ + {{XM_RXF_JAB_PKT, SK_TRUE}, {GM_RXF_JAB_PKT, SK_TRUE}}, + /* SK_PNMI_HRX_CARRIER */ + {{XM_RXE_CAR_ERR, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_IRLENGTH */ + {{XM_RXF_LEN_ERR, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_SYMBOL */ + {{XM_RXE_SYM_ERR, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_SHORTS */ + {{XM_RXE_SHT_ERR, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_RUNT */ + {{XM_RXE_RUNT, SK_TRUE}, {GM_RXE_FRAG, SK_TRUE}}, + /* SK_PNMI_HRX_TOO_LONG */ + {{XM_RXF_LNG_ERR, SK_TRUE}, {GM_RXF_LNG_ERR, SK_TRUE}}, + /* SK_PNMI_HRX_FCS */ + {{XM_RXF_FCS_ERR, SK_TRUE}, {GM_RXF_FCS_ERR, SK_TRUE}}, + /* SK_PNMI_HRX_CEXT */ + {{XM_RXF_CEX_ERR, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_UTILUNDER */ + {{0, SK_FALSE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_UTILOVER */ + {{0, SK_FALSE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_64 */ + {{XM_RXF_64B, SK_TRUE}, {GM_RXF_64B, SK_TRUE}}, + /* SK_PNMI_HRX_127 */ + {{XM_RXF_127B, SK_TRUE}, {GM_RXF_127B, SK_TRUE}}, + /* SK_PNMI_HRX_255 */ + {{XM_RXF_255B, SK_TRUE}, {GM_RXF_255B, SK_TRUE}}, + /* SK_PNMI_HRX_511 */ + {{XM_RXF_511B, SK_TRUE}, {GM_RXF_511B, SK_TRUE}}, + /* SK_PNMI_HRX_1023 */ + {{XM_RXF_1023B, SK_TRUE}, {GM_RXF_1023B, SK_TRUE}}, + /* SK_PNMI_HRX_MAX */ + {{XM_RXF_MAX_SZ, SK_TRUE}, {GM_RXF_1518B, SK_TRUE}}, + /* SK_PNMI_HRX_LONGFRAMES */ + {{0, SK_FALSE}, {GM_RXF_MAX_SZ, SK_TRUE}}, + /* SK_PNMI_HRX_RESERVED */ + {{0, SK_FALSE}, {0, SK_FALSE}} +}; + + +/***************************************************************************** + * + * Public functions + * + */ + +/***************************************************************************** + * + * SkPnmiInit - Init function of PNMI + * + * Description: + * SK_INIT_DATA: Initialises the data structures + * SK_INIT_IO: Resets the XMAC statistics, determines the device and + * connector type. + * SK_INIT_RUN: Starts a timer event for port switch per hour + * calculation. + * + * Returns: + * Always 0 + */ +int SkPnmiInit( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +int Level) /* Initialization level */ +{ + unsigned int PortMax; /* Number of ports */ + unsigned int PortIndex; /* Current port index in loop */ + SK_U16 Val16; /* Multiple purpose 16 bit variable */ + SK_U8 Val8; /* Mulitple purpose 8 bit variable */ + SK_EVPARA EventParam; /* Event struct for timer event */ + SK_PNMI_VCT *pVctBackupData; + + + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, + ("PNMI: SkPnmiInit: Called, level=%d\n", Level)); + + switch (Level) { + + case SK_INIT_DATA: + SK_MEMSET((char *)&pAC->Pnmi, 0, sizeof(pAC->Pnmi)); + pAC->Pnmi.TrapBufFree = SK_PNMI_TRAP_QUEUE_LEN; + pAC->Pnmi.StartUpTime = SK_PNMI_HUNDREDS_SEC(SkOsGetTime(pAC)); + pAC->Pnmi.RlmtChangeThreshold = SK_PNMI_DEF_RLMT_CHG_THRES; + for (PortIndex = 0; PortIndex < SK_MAX_MACS; PortIndex ++) { + + pAC->Pnmi.Port[PortIndex].ActiveFlag = SK_FALSE; + pAC->Pnmi.DualNetActiveFlag = SK_FALSE; + } + +#ifdef SK_PNMI_CHECK + if (SK_PNMI_MAX_IDX != SK_PNMI_CNT_NO) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR049, SK_PNMI_ERR049MSG); + + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_INIT | SK_DBGCAT_FATAL, + ("CounterOffset struct size (%d) differs from" + "SK_PNMI_MAX_IDX (%d)\n", + SK_PNMI_CNT_NO, SK_PNMI_MAX_IDX)); + } + + if (SK_PNMI_MAX_IDX != + (sizeof(StatAddr) / (sizeof(SK_PNMI_STATADDR) * SK_PNMI_MAC_TYPES))) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR050, SK_PNMI_ERR050MSG); + + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_INIT | SK_DBGCAT_FATAL, + ("StatAddr table size (%d) differs from " + "SK_PNMI_MAX_IDX (%d)\n", + (sizeof(StatAddr) / + (sizeof(SK_PNMI_STATADDR) * SK_PNMI_MAC_TYPES)), + SK_PNMI_MAX_IDX)); + } +#endif /* SK_PNMI_CHECK */ + break; + + case SK_INIT_IO: + /* + * Reset MAC counters + */ + PortMax = pAC->GIni.GIMacsFound; + + for (PortIndex = 0; PortIndex < PortMax; PortIndex ++) { + + pAC->GIni.GIFunc.pFnMacResetCounter(pAC, IoC, PortIndex); + } + + /* Initialize DSP variables for Vct() to 0xff => Never written! */ + for (PortIndex = 0; PortIndex < PortMax; PortIndex ++) { + pAC->GIni.GP[PortIndex].PCableLen = 0xff; + pVctBackupData = &pAC->Pnmi.VctBackup[PortIndex]; + pVctBackupData->PCableLen = 0xff; + } + + /* + * Get pci bus speed + */ + SK_IN16(IoC, B0_CTST, &Val16); + if ((Val16 & CS_BUS_CLOCK) == 0) { + + pAC->Pnmi.PciBusSpeed = 33; + } + else { + pAC->Pnmi.PciBusSpeed = 66; + } + + /* + * Get pci bus width + */ + SK_IN16(IoC, B0_CTST, &Val16); + if ((Val16 & CS_BUS_SLOT_SZ) == 0) { + + pAC->Pnmi.PciBusWidth = 32; + } + else { + pAC->Pnmi.PciBusWidth = 64; + } + + /* + * Get chipset + */ + switch (pAC->GIni.GIChipId) { + case CHIP_ID_GENESIS: + pAC->Pnmi.Chipset = SK_PNMI_CHIPSET_XMAC; + break; + + case CHIP_ID_YUKON: + pAC->Pnmi.Chipset = SK_PNMI_CHIPSET_YUKON; + break; + + default: + break; + } + + /* + * Get PMD and DeviceType + */ + SK_IN8(IoC, B2_PMD_TYP, &Val8); + switch (Val8) { + case 'S': + pAC->Pnmi.PMD = 3; + if (pAC->GIni.GIMacsFound > 1) { + + pAC->Pnmi.DeviceType = 0x00020002; + } + else { + pAC->Pnmi.DeviceType = 0x00020001; + } + break; + + case 'L': + pAC->Pnmi.PMD = 2; + if (pAC->GIni.GIMacsFound > 1) { + + pAC->Pnmi.DeviceType = 0x00020004; + } + else { + pAC->Pnmi.DeviceType = 0x00020003; + } + break; + + case 'C': + pAC->Pnmi.PMD = 4; + if (pAC->GIni.GIMacsFound > 1) { + + pAC->Pnmi.DeviceType = 0x00020006; + } + else { + pAC->Pnmi.DeviceType = 0x00020005; + } + break; + + case 'T': + pAC->Pnmi.PMD = 5; + if (pAC->GIni.GIMacsFound > 1) { + + pAC->Pnmi.DeviceType = 0x00020008; + } + else { + pAC->Pnmi.DeviceType = 0x00020007; + } + break; + + default : + pAC->Pnmi.PMD = 1; + pAC->Pnmi.DeviceType = 0; + break; + } + + /* + * Get connector + */ + SK_IN8(IoC, B2_CONN_TYP, &Val8); + switch (Val8) { + case 'C': + pAC->Pnmi.Connector = 2; + break; + + case 'D': + pAC->Pnmi.Connector = 3; + break; + + case 'F': + pAC->Pnmi.Connector = 4; + break; + + case 'J': + pAC->Pnmi.Connector = 5; + break; + + case 'V': + pAC->Pnmi.Connector = 6; + break; + + default: + pAC->Pnmi.Connector = 1; + break; + } + break; + + case SK_INIT_RUN: + /* + * Start timer for RLMT change counter + */ + SK_MEMSET((char *)&EventParam, 0, sizeof(EventParam)); + SkTimerStart(pAC, IoC, &pAC->Pnmi.RlmtChangeEstimate.EstTimer, + 28125000, SKGE_PNMI, SK_PNMI_EVT_CHG_EST_TIMER, + EventParam); + break; + + default: + break; /* Nothing todo */ + } + + return (0); +} + +/***************************************************************************** + * + * SkPnmiGetVar - Retrieves the value of a single OID + * + * Description: + * Calls a general sub-function for all this stuff. If the instance + * -1 is passed, the values of all instances are returned in an + * array of values. + * + * Returns: + * SK_PNMI_ERR_OK The request was successfully performed + * SK_PNMI_ERR_GENERAL A general severe internal error occured + * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to take + * the data. + * SK_PNMI_ERR_UNKNOWN_OID The requested OID is unknown + * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't + * exist (e.g. port instance 3 on a two port + * adapter. + */ +static int SkPnmiGetVar( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +SK_U32 Id, /* Object ID that is to be processed */ +void *pBuf, /* Buffer to which the management data will be copied */ +unsigned int *pLen, /* On call: buffer length. On return: used buffer */ +SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ +{ + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, + ("PNMI: SkPnmiGetVar: Called, Id=0x%x, BufLen=%d, Instance=%d, NetIndex=%d\n", + Id, *pLen, Instance, NetIndex)); + + return (PnmiVar(pAC, IoC, SK_PNMI_GET, Id, (char *)pBuf, pLen, + Instance, NetIndex)); +} + +/***************************************************************************** + * + * SkPnmiPreSetVar - Presets the value of a single OID + * + * Description: + * Calls a general sub-function for all this stuff. The preset does + * the same as a set, but returns just before finally setting the + * new value. This is useful to check if a set might be successfull. + * If the instance -1 is passed, an array of values is supposed and + * all instances of the OID will be set. + * + * Returns: + * SK_PNMI_ERR_OK The request was successfully performed. + * SK_PNMI_ERR_GENERAL A general severe internal error occured. + * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain + * the correct data (e.g. a 32bit value is + * needed, but a 16 bit value was passed). + * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid + * value range. + * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set. + * SK_PNMI_ERR_UNKNOWN_OID The requested OID is unknown. + * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't + * exist (e.g. port instance 3 on a two port + * adapter. + */ +static int SkPnmiPreSetVar( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +SK_U32 Id, /* Object ID that is to be processed */ +void *pBuf, /* Buffer to which the management data will be copied */ +unsigned int *pLen, /* Total length of management data */ +SK_U32 Instance, /* Instance (1..n) that is to be set or -1 */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ +{ + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, + ("PNMI: SkPnmiPreSetVar: Called, Id=0x%x, BufLen=%d, Instance=%d, NetIndex=%d\n", + Id, *pLen, Instance, NetIndex)); + + + return (PnmiVar(pAC, IoC, SK_PNMI_PRESET, Id, (char *)pBuf, pLen, + Instance, NetIndex)); +} + +/***************************************************************************** + * + * SkPnmiSetVar - Sets the value of a single OID + * + * Description: + * Calls a general sub-function for all this stuff. The preset does + * the same as a set, but returns just before finally setting the + * new value. This is useful to check if a set might be successfull. + * If the instance -1 is passed, an array of values is supposed and + * all instances of the OID will be set. + * + * Returns: + * SK_PNMI_ERR_OK The request was successfully performed. + * SK_PNMI_ERR_GENERAL A general severe internal error occured. + * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain + * the correct data (e.g. a 32bit value is + * needed, but a 16 bit value was passed). + * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid + * value range. + * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set. + * SK_PNMI_ERR_UNKNOWN_OID The requested OID is unknown. + * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't + * exist (e.g. port instance 3 on a two port + * adapter. + */ +int SkPnmiSetVar( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +SK_U32 Id, /* Object ID that is to be processed */ +void *pBuf, /* Buffer to which the management data will be copied */ +unsigned int *pLen, /* Total length of management data */ +SK_U32 Instance, /* Instance (1..n) that is to be set or -1 */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ +{ + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, + ("PNMI: SkPnmiSetVar: Called, Id=0x%x, BufLen=%d, Instance=%d, NetIndex=%d\n", + Id, *pLen, Instance, NetIndex)); + + return (PnmiVar(pAC, IoC, SK_PNMI_SET, Id, (char *)pBuf, pLen, + Instance, NetIndex)); +} + +/***************************************************************************** + * + * SkPnmiGetStruct - Retrieves the management database in SK_PNMI_STRUCT_DATA + * + * Description: + * Runs through the IdTable, queries the single OIDs and stores the + * returned data into the management database structure + * SK_PNMI_STRUCT_DATA. The offset of the OID in the structure + * is stored in the IdTable. The return value of the function will also + * be stored in SK_PNMI_STRUCT_DATA if the passed buffer has the + * minimum size of SK_PNMI_MIN_STRUCT_SIZE. + * + * Returns: + * SK_PNMI_ERR_OK The request was successfully performed + * SK_PNMI_ERR_GENERAL A general severe internal error occured + * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to take + * the data. + * SK_PNMI_ERR_UNKNOWN_NET The requested NetIndex doesn't exist + */ +int SkPnmiGetStruct( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +void *pBuf, /* Buffer to which the management data will be copied. */ +unsigned int *pLen, /* Length of buffer */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ +{ + int Ret; + unsigned int TableIndex; + unsigned int DstOffset; + unsigned int InstanceNo; + unsigned int InstanceCnt; + SK_U32 Instance; + unsigned int TmpLen; + char KeyArr[SK_PNMI_VPD_ENTRIES][SK_PNMI_VPD_KEY_SIZE]; + + + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, + ("PNMI: SkPnmiGetStruct: Called, BufLen=%d, NetIndex=%d\n", + *pLen, NetIndex)); + + if (*pLen < SK_PNMI_STRUCT_SIZE) { + + if (*pLen >= SK_PNMI_MIN_STRUCT_SIZE) { + + SK_PNMI_SET_STAT(pBuf, SK_PNMI_ERR_TOO_SHORT, + (SK_U32)(-1)); + } + + *pLen = SK_PNMI_STRUCT_SIZE; + return (SK_PNMI_ERR_TOO_SHORT); + } + + /* + * Check NetIndex + */ + if (NetIndex >= pAC->Rlmt.NumNets) { + return (SK_PNMI_ERR_UNKNOWN_NET); + } + + /* Update statistic */ + SK_PNMI_CHECKFLAGS("SkPnmiGetStruct: On call"); + + if ((Ret = MacUpdate(pAC, IoC, 0, pAC->GIni.GIMacsFound - 1)) != + SK_PNMI_ERR_OK) { + + SK_PNMI_SET_STAT(pBuf, Ret, (SK_U32)(-1)); + *pLen = SK_PNMI_MIN_STRUCT_SIZE; + return (Ret); + } + + if ((Ret = RlmtUpdate(pAC, IoC, NetIndex)) != SK_PNMI_ERR_OK) { + + SK_PNMI_SET_STAT(pBuf, Ret, (SK_U32)(-1)); + *pLen = SK_PNMI_MIN_STRUCT_SIZE; + return (Ret); + } + + if ((Ret = SirqUpdate(pAC, IoC)) != SK_PNMI_ERR_OK) { + + SK_PNMI_SET_STAT(pBuf, Ret, (SK_U32)(-1)); + *pLen = SK_PNMI_MIN_STRUCT_SIZE; + return (Ret); + } + + /* + * Increment semaphores to indicate that an update was + * already done + */ + pAC->Pnmi.MacUpdatedFlag ++; + pAC->Pnmi.RlmtUpdatedFlag ++; + pAC->Pnmi.SirqUpdatedFlag ++; + + /* Get vpd keys for instance calculation */ + Ret = GetVpdKeyArr(pAC, IoC, &KeyArr[0][0], sizeof(KeyArr), &TmpLen); + if (Ret != SK_PNMI_ERR_OK) { + + pAC->Pnmi.MacUpdatedFlag --; + pAC->Pnmi.RlmtUpdatedFlag --; + pAC->Pnmi.SirqUpdatedFlag --; + + SK_PNMI_CHECKFLAGS("SkPnmiGetStruct: On return"); + SK_PNMI_SET_STAT(pBuf, Ret, (SK_U32)(-1)); + *pLen = SK_PNMI_MIN_STRUCT_SIZE; + return (SK_PNMI_ERR_GENERAL); + } + + /* Retrieve values */ + SK_MEMSET((char *)pBuf, 0, SK_PNMI_STRUCT_SIZE); + for (TableIndex = 0; TableIndex < ID_TABLE_SIZE; TableIndex ++) { + + InstanceNo = IdTable[TableIndex].InstanceNo; + for (InstanceCnt = 1; InstanceCnt <= InstanceNo; + InstanceCnt ++) { + + DstOffset = IdTable[TableIndex].Offset + + (InstanceCnt - 1) * + IdTable[TableIndex].StructSize; + + /* + * For the VPD the instance is not an index number + * but the key itself. Determin with the instance + * counter the VPD key to be used. + */ + if (IdTable[TableIndex].Id == OID_SKGE_VPD_KEY || + IdTable[TableIndex].Id == OID_SKGE_VPD_VALUE || + IdTable[TableIndex].Id == OID_SKGE_VPD_ACCESS || + IdTable[TableIndex].Id == OID_SKGE_VPD_ACTION) { + + SK_STRNCPY((char *)&Instance, KeyArr[InstanceCnt - 1], 4); + } + else { + Instance = (SK_U32)InstanceCnt; + } + + TmpLen = *pLen - DstOffset; + Ret = IdTable[TableIndex].Func(pAC, IoC, SK_PNMI_GET, + IdTable[TableIndex].Id, (char *)pBuf + + DstOffset, &TmpLen, Instance, TableIndex, NetIndex); + + /* + * An unknown instance error means that we reached + * the last instance of that variable. Proceed with + * the next OID in the table and ignore the return + * code. + */ + if (Ret == SK_PNMI_ERR_UNKNOWN_INST) { + + break; + } + + if (Ret != SK_PNMI_ERR_OK) { + + pAC->Pnmi.MacUpdatedFlag --; + pAC->Pnmi.RlmtUpdatedFlag --; + pAC->Pnmi.SirqUpdatedFlag --; + + SK_PNMI_CHECKFLAGS("SkPnmiGetStruct: On return"); + SK_PNMI_SET_STAT(pBuf, Ret, DstOffset); + *pLen = SK_PNMI_MIN_STRUCT_SIZE; + return (Ret); + } + } + } + + pAC->Pnmi.MacUpdatedFlag --; + pAC->Pnmi.RlmtUpdatedFlag --; + pAC->Pnmi.SirqUpdatedFlag --; + + *pLen = SK_PNMI_STRUCT_SIZE; + SK_PNMI_CHECKFLAGS("SkPnmiGetStruct: On return"); + SK_PNMI_SET_STAT(pBuf, SK_PNMI_ERR_OK, (SK_U32)(-1)); + return (SK_PNMI_ERR_OK); +} + +/***************************************************************************** + * + * SkPnmiPreSetStruct - Presets the management database in SK_PNMI_STRUCT_DATA + * + * Description: + * Calls a general sub-function for all this set stuff. The preset does + * the same as a set, but returns just before finally setting the + * new value. This is useful to check if a set might be successfull. + * The sub-function runs through the IdTable, checks which OIDs are able + * to set, and calls the handler function of the OID to perform the + * preset. The return value of the function will also be stored in + * SK_PNMI_STRUCT_DATA if the passed buffer has the minimum size of + * SK_PNMI_MIN_STRUCT_SIZE. + * + * Returns: + * SK_PNMI_ERR_OK The request was successfully performed. + * SK_PNMI_ERR_GENERAL A general severe internal error occured. + * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain + * the correct data (e.g. a 32bit value is + * needed, but a 16 bit value was passed). + * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid + * value range. + */ +int SkPnmiPreSetStruct( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +void *pBuf, /* Buffer which contains the data to be set */ +unsigned int *pLen, /* Length of buffer */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ +{ + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, + ("PNMI: SkPnmiPreSetStruct: Called, BufLen=%d, NetIndex=%d\n", + *pLen, NetIndex)); + + return (PnmiStruct(pAC, IoC, SK_PNMI_PRESET, (char *)pBuf, + pLen, NetIndex)); +} + +/***************************************************************************** + * + * SkPnmiSetStruct - Sets the management database in SK_PNMI_STRUCT_DATA + * + * Description: + * Calls a general sub-function for all this set stuff. The return value + * of the function will also be stored in SK_PNMI_STRUCT_DATA if the + * passed buffer has the minimum size of SK_PNMI_MIN_STRUCT_SIZE. + * The sub-function runs through the IdTable, checks which OIDs are able + * to set, and calls the handler function of the OID to perform the + * set. The return value of the function will also be stored in + * SK_PNMI_STRUCT_DATA if the passed buffer has the minimum size of + * SK_PNMI_MIN_STRUCT_SIZE. + * + * Returns: + * SK_PNMI_ERR_OK The request was successfully performed. + * SK_PNMI_ERR_GENERAL A general severe internal error occured. + * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain + * the correct data (e.g. a 32bit value is + * needed, but a 16 bit value was passed). + * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid + * value range. + */ +int SkPnmiSetStruct( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +void *pBuf, /* Buffer which contains the data to be set */ +unsigned int *pLen, /* Length of buffer */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ +{ + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, + ("PNMI: SkPnmiSetStruct: Called, BufLen=%d, NetIndex=%d\n", + *pLen, NetIndex)); + + return (PnmiStruct(pAC, IoC, SK_PNMI_SET, (char *)pBuf, + pLen, NetIndex)); +} + +/***************************************************************************** + * + * SkPnmiEvent - Event handler + * + * Description: + * Handles the following events: + * SK_PNMI_EVT_SIRQ_OVERFLOW When a hardware counter overflows an + * interrupt will be generated which is + * first handled by SIRQ which generates a + * this event. The event increments the + * upper 32 bit of the 64 bit counter. + * SK_PNMI_EVT_SEN_XXX The event is generated by the I2C module + * when a sensor reports a warning or + * error. The event will store a trap + * message in the trap buffer. + * SK_PNMI_EVT_CHG_EST_TIMER The timer event was initiated by this + * module and is used to calculate the + * port switches per hour. + * SK_PNMI_EVT_CLEAR_COUNTER The event clears all counters and + * timestamps. + * SK_PNMI_EVT_XMAC_RESET The event is generated by the driver + * before a hard reset of the XMAC is + * performed. All counters will be saved + * and added to the hardware counter + * values after reset to grant continuous + * counter values. + * SK_PNMI_EVT_RLMT_PORT_UP Generated by RLMT to notify that a port + * went logically up. A trap message will + * be stored to the trap buffer. + * SK_PNMI_EVT_RLMT_PORT_DOWN Generated by RLMT to notify that a port + * went logically down. A trap message will + * be stored to the trap buffer. + * SK_PNMI_EVT_RLMT_SEGMENTATION Generated by RLMT to notify that two + * spanning tree root bridges were + * detected. A trap message will be stored + * to the trap buffer. + * SK_PNMI_EVT_RLMT_ACTIVE_DOWN Notifies PNMI that an active port went + * down. PNMI will not further add the + * statistic values to the virtual port. + * SK_PNMI_EVT_RLMT_ACTIVE_UP Notifies PNMI that a port went up and + * is now an active port. PNMI will now + * add the statistic data of this port to + * the virtual port. + * SK_PNMI_EVT_RLMT_SET_NETS Notifies PNMI about the net mode. The first parameter + * contains the number of nets. 1 means single net, 2 means + * dual net. The second parameter is -1 + * + * Returns: + * Always 0 + */ +int SkPnmiEvent( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +SK_U32 Event, /* Event-Id */ +SK_EVPARA Param) /* Event dependent parameter */ +{ + unsigned int PhysPortIndex; + unsigned int MaxNetNumber; + int CounterIndex; + int Ret; + SK_U16 MacStatus; + SK_U64 OverflowStatus; + SK_U64 Mask; + int MacType; + SK_U64 Value; + SK_U32 Val32; + SK_U16 Register; + SK_EVPARA EventParam; + SK_U64 NewestValue; + SK_U64 OldestValue; + SK_U64 Delta; + SK_PNMI_ESTIMATE *pEst; + SK_U32 NetIndex; + SK_GEPORT *pPrt; + SK_PNMI_VCT *pVctBackupData; + SK_U32 RetCode; + int i; + SK_U32 CableLength; + + +#ifdef DEBUG + if (Event != SK_PNMI_EVT_XMAC_RESET) { + + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, + ("PNMI: SkPnmiEvent: Called, Event=0x%x, Param=0x%x\n", + (unsigned int)Event, (unsigned int)Param.Para64)); + } +#endif /* DEBUG */ + SK_PNMI_CHECKFLAGS("SkPnmiEvent: On call"); + + MacType = pAC->GIni.GIMacType; + + switch (Event) { + + case SK_PNMI_EVT_SIRQ_OVERFLOW: + PhysPortIndex = (int)Param.Para32[0]; + MacStatus = (SK_U16)Param.Para32[1]; +#ifdef DEBUG + if (PhysPortIndex >= SK_MAX_MACS) { + + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, + ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_SIRQ_OVERFLOW parameter" + " wrong, PhysPortIndex=0x%x\n", + PhysPortIndex)); + return (0); + } +#endif /* DEBUG */ + OverflowStatus = 0; + + /* + * Check which source caused an overflow interrupt. + */ + if ((pAC->GIni.GIFunc.pFnMacOverflow(pAC, IoC, PhysPortIndex, + MacStatus, &OverflowStatus) != 0) || + (OverflowStatus == 0)) { + + SK_PNMI_CHECKFLAGS("SkPnmiEvent: On return"); + return (0); + } + + /* + * Check the overflow status register and increment + * the upper dword of corresponding counter. + */ + for (CounterIndex = 0; CounterIndex < sizeof(Mask) * 8; + CounterIndex ++) { + + Mask = (SK_U64)1 << CounterIndex; + if ((OverflowStatus & Mask) == 0) { + + continue; + } + + switch (StatOvrflwBit[CounterIndex][MacType]) { + + case SK_PNMI_HTX_UTILUNDER: + case SK_PNMI_HTX_UTILOVER: + if (MacType == SK_MAC_XMAC) { + XM_IN16(IoC, PhysPortIndex, XM_TX_CMD, &Register); + Register |= XM_TX_SAM_LINE; + XM_OUT16(IoC, PhysPortIndex, XM_TX_CMD, Register); + } + break; + + case SK_PNMI_HRX_UTILUNDER: + case SK_PNMI_HRX_UTILOVER: + if (MacType == SK_MAC_XMAC) { + XM_IN16(IoC, PhysPortIndex, XM_RX_CMD, &Register); + Register |= XM_RX_SAM_LINE; + XM_OUT16(IoC, PhysPortIndex, XM_RX_CMD, Register); + } + break; + + case SK_PNMI_HTX_OCTETHIGH: + case SK_PNMI_HTX_OCTETLOW: + case SK_PNMI_HTX_RESERVED: + case SK_PNMI_HRX_OCTETHIGH: + case SK_PNMI_HRX_OCTETLOW: + case SK_PNMI_HRX_IRLENGTH: + case SK_PNMI_HRX_RESERVED: + + /* + * the following counters aren't be handled (id > 63) + */ + case SK_PNMI_HTX_SYNC: + case SK_PNMI_HTX_SYNC_OCTET: + break; + + case SK_PNMI_HRX_LONGFRAMES: + if (MacType == SK_MAC_GMAC) { + pAC->Pnmi.Port[PhysPortIndex]. + CounterHigh[CounterIndex] ++; + } + break; + + default: + pAC->Pnmi.Port[PhysPortIndex]. + CounterHigh[CounterIndex] ++; + } + } + break; + + case SK_PNMI_EVT_SEN_WAR_LOW: +#ifdef DEBUG + if ((unsigned int)Param.Para64 >= (unsigned int)pAC->I2c.MaxSens) { + + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, + ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_SEN_WAR_LOW parameter wrong, SensorIndex=%d\n", + (unsigned int)Param.Para64)); + return (0); + } +#endif /* DEBUG */ + + /* + * Store a trap message in the trap buffer and generate + * an event for user space applications with the + * SK_DRIVER_SENDEVENT macro. + */ + QueueSensorTrap(pAC, OID_SKGE_TRAP_SEN_WAR_LOW, + (unsigned int)Param.Para64); + (void)SK_DRIVER_SENDEVENT(pAC, IoC); + break; + + case SK_PNMI_EVT_SEN_WAR_UPP: +#ifdef DEBUG + if ((unsigned int)Param.Para64 >= (unsigned int)pAC->I2c.MaxSens) { + + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, + ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_SEN_WAR_UPP parameter wrong, SensorIndex=%d\n", + (unsigned int)Param.Para64)); + return (0); + } +#endif /* DEBUG */ + + /* + * Store a trap message in the trap buffer and generate + * an event for user space applications with the + * SK_DRIVER_SENDEVENT macro. + */ + QueueSensorTrap(pAC, OID_SKGE_TRAP_SEN_WAR_UPP, + (unsigned int)Param.Para64); + (void)SK_DRIVER_SENDEVENT(pAC, IoC); + break; + + case SK_PNMI_EVT_SEN_ERR_LOW: +#ifdef DEBUG + if ((unsigned int)Param.Para64 >= (unsigned int)pAC->I2c.MaxSens) { + + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, + ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_SEN_ERR_LOW parameter wrong, SensorIndex=%d\n", + (unsigned int)Param.Para64)); + return (0); + } +#endif /* DEBUG */ + + /* + * Store a trap message in the trap buffer and generate + * an event for user space applications with the + * SK_DRIVER_SENDEVENT macro. + */ + QueueSensorTrap(pAC, OID_SKGE_TRAP_SEN_ERR_LOW, + (unsigned int)Param.Para64); + (void)SK_DRIVER_SENDEVENT(pAC, IoC); + break; + + case SK_PNMI_EVT_SEN_ERR_UPP: +#ifdef DEBUG + if ((unsigned int)Param.Para64 >= (unsigned int)pAC->I2c.MaxSens) { + + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, + ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_SEN_ERR_UPP parameter wrong, SensorIndex=%d\n", + (unsigned int)Param.Para64)); + return (0); + } +#endif /* DEBUG */ + + /* + * Store a trap message in the trap buffer and generate + * an event for user space applications with the + * SK_DRIVER_SENDEVENT macro. + */ + QueueSensorTrap(pAC, OID_SKGE_TRAP_SEN_ERR_UPP, + (unsigned int)Param.Para64); + (void)SK_DRIVER_SENDEVENT(pAC, IoC); + break; + + case SK_PNMI_EVT_CHG_EST_TIMER: + /* + * Calculate port switch average on a per hour basis + * Time interval for check : 28125 ms + * Number of values for average : 8 + * + * Be careful in changing these values, on change check + * - typedef of SK_PNMI_ESTIMATE (Size of EstValue + * array one less than value number) + * - Timer initialization SkTimerStart() in SkPnmiInit + * - Delta value below must be multiplicated with + * power of 2 + * + */ + pEst = &pAC->Pnmi.RlmtChangeEstimate; + CounterIndex = pEst->EstValueIndex + 1; + if (CounterIndex == 7) { + + CounterIndex = 0; + } + pEst->EstValueIndex = CounterIndex; + + NewestValue = pAC->Pnmi.RlmtChangeCts; + OldestValue = pEst->EstValue[CounterIndex]; + pEst->EstValue[CounterIndex] = NewestValue; + + /* + * Calculate average. Delta stores the number of + * port switches per 28125 * 8 = 225000 ms + */ + if (NewestValue >= OldestValue) { + + Delta = NewestValue - OldestValue; + } + else { + /* Overflow situation */ + Delta = (SK_U64)(0 - OldestValue) + NewestValue; + } + + /* + * Extrapolate delta to port switches per hour. + * Estimate = Delta * (3600000 / 225000) + * = Delta * 16 + * = Delta << 4 + */ + pAC->Pnmi.RlmtChangeEstimate.Estimate = Delta << 4; + + /* + * Check if threshold is exceeded. If the threshold is + * permanently exceeded every 28125 ms an event will be + * generated to remind the user of this condition. + */ + if ((pAC->Pnmi.RlmtChangeThreshold != 0) && + (pAC->Pnmi.RlmtChangeEstimate.Estimate >= + pAC->Pnmi.RlmtChangeThreshold)) { + + QueueSimpleTrap(pAC, OID_SKGE_TRAP_RLMT_CHANGE_THRES); + (void)SK_DRIVER_SENDEVENT(pAC, IoC); + } + + SK_MEMSET((char *)&EventParam, 0, sizeof(EventParam)); + SkTimerStart(pAC, IoC, &pAC->Pnmi.RlmtChangeEstimate.EstTimer, + 28125000, SKGE_PNMI, SK_PNMI_EVT_CHG_EST_TIMER, + EventParam); + break; + + case SK_PNMI_EVT_CLEAR_COUNTER: + /* + * Param.Para32[0] contains the NetIndex (0 ..1). + * Param.Para32[1] is reserved, contains -1. + */ + NetIndex = (SK_U32)Param.Para32[0]; + +#ifdef DEBUG + if (NetIndex >= pAC->Rlmt.NumNets) { + + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, + ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_CLEAR_COUNTER parameter wrong, NetIndex=%d\n", + NetIndex)); + + return (0); + } +#endif /* DEBUG */ + + /* + * Set all counters and timestamps to zero. + * The according NetIndex is required as a + * parameter of the event. + */ + ResetCounter(pAC, IoC, NetIndex); + break; + + case SK_PNMI_EVT_XMAC_RESET: + /* + * To grant continuous counter values store the current + * XMAC statistic values to the entries 1..n of the + * CounterOffset array. XMAC Errata #2 + */ +#ifdef DEBUG + if ((unsigned int)Param.Para64 >= SK_MAX_MACS) { + + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, + ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_XMAC_RESET parameter wrong, PhysPortIndex=%d\n", + (unsigned int)Param.Para64)); + return (0); + } +#endif + PhysPortIndex = (unsigned int)Param.Para64; + + /* + * Update XMAC statistic to get fresh values + */ + Ret = MacUpdate(pAC, IoC, 0, pAC->GIni.GIMacsFound - 1); + if (Ret != SK_PNMI_ERR_OK) { + + SK_PNMI_CHECKFLAGS("SkPnmiEvent: On return"); + return (0); + } + /* + * Increment semaphore to indicate that an update was + * already done + */ + pAC->Pnmi.MacUpdatedFlag ++; + + for (CounterIndex = 0; CounterIndex < SK_PNMI_MAX_IDX; + CounterIndex ++) { + + if (!StatAddr[CounterIndex][MacType].GetOffset) { + + continue; + } + + pAC->Pnmi.Port[PhysPortIndex].CounterOffset[CounterIndex] = + GetPhysStatVal(pAC, IoC, PhysPortIndex, CounterIndex); + + pAC->Pnmi.Port[PhysPortIndex].CounterHigh[CounterIndex] = 0; + } + + pAC->Pnmi.MacUpdatedFlag --; + break; + + case SK_PNMI_EVT_RLMT_PORT_UP: + PhysPortIndex = (unsigned int)Param.Para32[0]; +#ifdef DEBUG + if (PhysPortIndex >= SK_MAX_MACS) { + + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, + ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_RLMT_PORT_UP parameter" + " wrong, PhysPortIndex=%d\n", PhysPortIndex)); + + return (0); + } +#endif /* DEBUG */ + + /* + * Store a trap message in the trap buffer and generate an event for + * user space applications with the SK_DRIVER_SENDEVENT macro. + */ + QueueRlmtPortTrap(pAC, OID_SKGE_TRAP_RLMT_PORT_UP, PhysPortIndex); + (void)SK_DRIVER_SENDEVENT(pAC, IoC); + + /* Bugfix for XMAC errata (#10620)*/ + if (MacType == SK_MAC_XMAC) { + /* Add incremental difference to offset (#10620)*/ + (void)pAC->GIni.GIFunc.pFnMacStatistic(pAC, IoC, PhysPortIndex, + XM_RXE_SHT_ERR, &Val32); + + Value = (((SK_U64)pAC->Pnmi.Port[PhysPortIndex]. + CounterHigh[SK_PNMI_HRX_SHORTS] << 32) | (SK_U64)Val32); + pAC->Pnmi.Port[PhysPortIndex].CounterOffset[SK_PNMI_HRX_SHORTS] += + Value - pAC->Pnmi.Port[PhysPortIndex].RxShortZeroMark; + } + + /* Tell VctStatus() that a link was up meanwhile. */ + pAC->Pnmi.VctStatus[PhysPortIndex] |= SK_PNMI_VCT_LINK; + break; + + case SK_PNMI_EVT_RLMT_PORT_DOWN: + PhysPortIndex = (unsigned int)Param.Para32[0]; + +#ifdef DEBUG + if (PhysPortIndex >= SK_MAX_MACS) { + + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, + ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_RLMT_PORT_DOWN parameter" + " wrong, PhysPortIndex=%d\n", PhysPortIndex)); + + return (0); + } +#endif /* DEBUG */ + + /* + * Store a trap message in the trap buffer and generate an event for + * user space applications with the SK_DRIVER_SENDEVENT macro. + */ + QueueRlmtPortTrap(pAC, OID_SKGE_TRAP_RLMT_PORT_DOWN, PhysPortIndex); + (void)SK_DRIVER_SENDEVENT(pAC, IoC); + + /* Bugfix #10620 - get zero level for incremental difference */ + if (MacType == SK_MAC_XMAC) { + + (void)pAC->GIni.GIFunc.pFnMacStatistic(pAC, IoC, PhysPortIndex, + XM_RXE_SHT_ERR, &Val32); + + pAC->Pnmi.Port[PhysPortIndex].RxShortZeroMark = + (((SK_U64)pAC->Pnmi.Port[PhysPortIndex]. + CounterHigh[SK_PNMI_HRX_SHORTS] << 32) | (SK_U64)Val32); + } + break; + + case SK_PNMI_EVT_RLMT_ACTIVE_DOWN: + PhysPortIndex = (unsigned int)Param.Para32[0]; + NetIndex = (SK_U32)Param.Para32[1]; + +#ifdef DEBUG + if (PhysPortIndex >= SK_MAX_MACS) { + + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, + ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_RLMT_ACTIVE_DOWN parameter too high, PhysPort=%d\n", + PhysPortIndex)); + } + + if (NetIndex >= pAC->Rlmt.NumNets) { + + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, + ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_RLMT_ACTIVE_DOWN parameter too high, NetIndex=%d\n", + NetIndex)); + } +#endif /* DEBUG */ + + /* + * For now, ignore event if NetIndex != 0. + */ + if (Param.Para32[1] != 0) { + + return (0); + } + + /* + * Nothing to do if port is already inactive + */ + if (!pAC->Pnmi.Port[PhysPortIndex].ActiveFlag) { + + return (0); + } + + /* + * Update statistic counters to calculate new offset for the virtual + * port and increment semaphore to indicate that an update was already + * done. + */ + if (MacUpdate(pAC, IoC, 0, pAC->GIni.GIMacsFound - 1) != + SK_PNMI_ERR_OK) { + + SK_PNMI_CHECKFLAGS("SkPnmiEvent: On return"); + return (0); + } + pAC->Pnmi.MacUpdatedFlag ++; + + /* + * Calculate new counter offset for virtual port to grant continous + * counting on port switches. The virtual port consists of all currently + * active ports. The port down event indicates that a port is removed + * from the virtual port. Therefore add the counter value of the removed + * port to the CounterOffset for the virtual port to grant the same + * counter value. + */ + for (CounterIndex = 0; CounterIndex < SK_PNMI_MAX_IDX; + CounterIndex ++) { + + if (!StatAddr[CounterIndex][MacType].GetOffset) { + + continue; + } + + Value = GetPhysStatVal(pAC, IoC, PhysPortIndex, CounterIndex); + + pAC->Pnmi.VirtualCounterOffset[CounterIndex] += Value; + } + + /* + * Set port to inactive + */ + pAC->Pnmi.Port[PhysPortIndex].ActiveFlag = SK_FALSE; + + pAC->Pnmi.MacUpdatedFlag --; + break; + + case SK_PNMI_EVT_RLMT_ACTIVE_UP: + PhysPortIndex = (unsigned int)Param.Para32[0]; + NetIndex = (SK_U32)Param.Para32[1]; + +#ifdef DEBUG + if (PhysPortIndex >= SK_MAX_MACS) { + + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, + ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_RLMT_ACTIVE_UP parameter too high, PhysPort=%d\n", + PhysPortIndex)); + } + + if (NetIndex >= pAC->Rlmt.NumNets) { + + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, + ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_RLMT_ACTIVE_UP parameter too high, NetIndex=%d\n", + NetIndex)); + } +#endif /* DEBUG */ + + /* + * For now, ignore event if NetIndex != 0. + */ + if (Param.Para32[1] != 0) { + + return (0); + } + + /* + * Nothing to do if port is already active + */ + if (pAC->Pnmi.Port[PhysPortIndex].ActiveFlag) { + + return (0); + } + + /* + * Statistic maintenance + */ + pAC->Pnmi.RlmtChangeCts ++; + pAC->Pnmi.RlmtChangeTime = SK_PNMI_HUNDREDS_SEC(SkOsGetTime(pAC)); + + /* + * Store a trap message in the trap buffer and generate an event for + * user space applications with the SK_DRIVER_SENDEVENT macro. + */ + QueueRlmtNewMacTrap(pAC, PhysPortIndex); + (void)SK_DRIVER_SENDEVENT(pAC, IoC); + + /* + * Update statistic counters to calculate new offset for the virtual + * port and increment semaphore to indicate that an update was + * already done. + */ + if (MacUpdate(pAC, IoC, 0, pAC->GIni.GIMacsFound - 1) != + SK_PNMI_ERR_OK) { + + SK_PNMI_CHECKFLAGS("SkPnmiEvent: On return"); + return (0); + } + pAC->Pnmi.MacUpdatedFlag ++; + + /* + * Calculate new counter offset for virtual port to grant continous + * counting on port switches. A new port is added to the virtual port. + * Therefore substract the counter value of the new port from the + * CounterOffset for the virtual port to grant the same value. + */ + for (CounterIndex = 0; CounterIndex < SK_PNMI_MAX_IDX; + CounterIndex ++) { + + if (!StatAddr[CounterIndex][MacType].GetOffset) { + + continue; + } + + Value = GetPhysStatVal(pAC, IoC, PhysPortIndex, CounterIndex); + + pAC->Pnmi.VirtualCounterOffset[CounterIndex] -= Value; + } + + /* Set port to active */ + pAC->Pnmi.Port[PhysPortIndex].ActiveFlag = SK_TRUE; + + pAC->Pnmi.MacUpdatedFlag --; + break; + + case SK_PNMI_EVT_RLMT_SEGMENTATION: + /* + * Para.Para32[0] contains the NetIndex. + */ + + /* + * Store a trap message in the trap buffer and generate an event for + * user space applications with the SK_DRIVER_SENDEVENT macro. + */ + QueueSimpleTrap(pAC, OID_SKGE_TRAP_RLMT_SEGMENTATION); + (void)SK_DRIVER_SENDEVENT(pAC, IoC); + break; + + case SK_PNMI_EVT_RLMT_SET_NETS: + /* + * Param.Para32[0] contains the number of Nets. + * Param.Para32[1] is reserved, contains -1. + */ + /* + * Check number of nets + */ + MaxNetNumber = pAC->GIni.GIMacsFound; + if (((unsigned int)Param.Para32[0] < 1) + || ((unsigned int)Param.Para32[0] > MaxNetNumber)) { + return (SK_PNMI_ERR_UNKNOWN_NET); + } + + if ((unsigned int)Param.Para32[0] == 1) { /* single net mode */ + pAC->Pnmi.DualNetActiveFlag = SK_FALSE; + } + else { /* dual net mode */ + pAC->Pnmi.DualNetActiveFlag = SK_TRUE; + } + break; + + case SK_PNMI_EVT_VCT_RESET: + PhysPortIndex = Param.Para32[0]; + pPrt = &pAC->GIni.GP[PhysPortIndex]; + pVctBackupData = &pAC->Pnmi.VctBackup[PhysPortIndex]; + + if (pAC->Pnmi.VctStatus[PhysPortIndex] & SK_PNMI_VCT_PENDING) { + RetCode = SkGmCableDiagStatus(pAC, IoC, PhysPortIndex, SK_FALSE); + if (RetCode == 2) { + /* + * VCT test is still running. + * Start VCT timer counter again. + */ + SK_MEMSET((char *) &Param, 0, sizeof(Param)); + Param.Para32[0] = PhysPortIndex; + Param.Para32[1] = -1; + SkTimerStart(pAC, IoC, + &pAC->Pnmi.VctTimeout[PhysPortIndex].VctTimer, + 4000000, SKGE_PNMI, SK_PNMI_EVT_VCT_RESET, Param); + break; + } + pAC->Pnmi.VctStatus[PhysPortIndex] &= ~SK_PNMI_VCT_PENDING; + pAC->Pnmi.VctStatus[PhysPortIndex] |= + (SK_PNMI_VCT_NEW_VCT_DATA | SK_PNMI_VCT_TEST_DONE); + + /* Copy results for later use to PNMI struct. */ + for (i = 0; i < 4; i++) { + if (pPrt->PMdiPairSts[i] == SK_PNMI_VCT_NORMAL_CABLE) { + if ((pPrt->PMdiPairLen[i] > 35) && + (pPrt->PMdiPairLen[i] < 0xff)) { + pPrt->PMdiPairSts[i] = SK_PNMI_VCT_IMPEDANCE_MISMATCH; + } + } + if ((pPrt->PMdiPairLen[i] > 35) && + (pPrt->PMdiPairLen[i] != 0xff)) { + CableLength = 1000 * + (((175 * pPrt->PMdiPairLen[i]) / 210) - 28); + } + else { + CableLength = 0; + } + pVctBackupData->PMdiPairLen[i] = CableLength; + pVctBackupData->PMdiPairSts[i] = pPrt->PMdiPairSts[i]; + } + + Param.Para32[0] = PhysPortIndex; + Param.Para32[1] = -1; + SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_RESET, Param); + SkEventDispatcher(pAC, IoC); + } + + break; + + default: + break; + } + + SK_PNMI_CHECKFLAGS("SkPnmiEvent: On return"); + return (0); +} + + +/****************************************************************************** + * + * Private functions + * + */ + +/***************************************************************************** + * + * PnmiVar - Gets, presets, and sets single OIDs + * + * Description: + * Looks up the requested OID, calls the corresponding handler + * function, and passes the parameters with the get, preset, or + * set command. The function is called by SkGePnmiGetVar, + * SkGePnmiPreSetVar, or SkGePnmiSetVar. + * + * Returns: + * SK_PNMI_ERR_XXX. For details have a look at the description of the + * calling functions. + * SK_PNMI_ERR_UNKNOWN_NET The requested NetIndex doesn't exist + */ +PNMI_STATIC int PnmiVar( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +int Action, /* GET/PRESET/SET action */ +SK_U32 Id, /* Object ID that is to be processed */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* Total length of pBuf management data */ +SK_U32 Instance, /* Instance (1..n) that is to be set or -1 */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ +{ + unsigned int TableIndex; + int Ret; + + + if ((TableIndex = LookupId(Id)) == (unsigned int)(-1)) { + + *pLen = 0; + return (SK_PNMI_ERR_UNKNOWN_OID); + } + + /* Check NetIndex */ + if (NetIndex >= pAC->Rlmt.NumNets) { + return (SK_PNMI_ERR_UNKNOWN_NET); + } + + SK_PNMI_CHECKFLAGS("PnmiVar: On call"); + + Ret = IdTable[TableIndex].Func(pAC, IoC, Action, Id, pBuf, pLen, + Instance, TableIndex, NetIndex); + + SK_PNMI_CHECKFLAGS("PnmiVar: On return"); + + return (Ret); +} + +/***************************************************************************** + * + * PnmiStruct - Presets and Sets data in structure SK_PNMI_STRUCT_DATA + * + * Description: + * The return value of the function will also be stored in + * SK_PNMI_STRUCT_DATA if the passed buffer has the minimum size of + * SK_PNMI_MIN_STRUCT_SIZE. The sub-function runs through the IdTable, + * checks which OIDs are able to set, and calls the handler function of + * the OID to perform the set. The return value of the function will + * also be stored in SK_PNMI_STRUCT_DATA if the passed buffer has the + * minimum size of SK_PNMI_MIN_STRUCT_SIZE. The function is called + * by SkGePnmiPreSetStruct and SkGePnmiSetStruct. + * + * Returns: + * SK_PNMI_ERR_XXX. The codes are described in the calling functions. + * SK_PNMI_ERR_UNKNOWN_NET The requested NetIndex doesn't exist + */ +PNMI_STATIC int PnmiStruct( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +int Action, /* PRESET/SET action to be performed */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* Length of pBuf management data buffer */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ +{ + int Ret; + unsigned int TableIndex; + unsigned int DstOffset; + unsigned int Len; + unsigned int InstanceNo; + unsigned int InstanceCnt; + SK_U32 Instance; + SK_U32 Id; + + + /* Check if the passed buffer has the right size */ + if (*pLen < SK_PNMI_STRUCT_SIZE) { + + /* Check if we can return the error within the buffer */ + if (*pLen >= SK_PNMI_MIN_STRUCT_SIZE) { + + SK_PNMI_SET_STAT(pBuf, SK_PNMI_ERR_TOO_SHORT, + (SK_U32)(-1)); + } + + *pLen = SK_PNMI_STRUCT_SIZE; + return (SK_PNMI_ERR_TOO_SHORT); + } + + /* Check NetIndex */ + if (NetIndex >= pAC->Rlmt.NumNets) { + return (SK_PNMI_ERR_UNKNOWN_NET); + } + + SK_PNMI_CHECKFLAGS("PnmiStruct: On call"); + + /* + * Update the values of RLMT and SIRQ and increment semaphores to + * indicate that an update was already done. + */ + if ((Ret = RlmtUpdate(pAC, IoC, NetIndex)) != SK_PNMI_ERR_OK) { + + SK_PNMI_SET_STAT(pBuf, Ret, (SK_U32)(-1)); + *pLen = SK_PNMI_MIN_STRUCT_SIZE; + return (Ret); + } + + if ((Ret = SirqUpdate(pAC, IoC)) != SK_PNMI_ERR_OK) { + + SK_PNMI_SET_STAT(pBuf, Ret, (SK_U32)(-1)); + *pLen = SK_PNMI_MIN_STRUCT_SIZE; + return (Ret); + } + + pAC->Pnmi.RlmtUpdatedFlag ++; + pAC->Pnmi.SirqUpdatedFlag ++; + + /* Preset/Set values */ + for (TableIndex = 0; TableIndex < ID_TABLE_SIZE; TableIndex ++) { + + if ((IdTable[TableIndex].Access != SK_PNMI_RW) && + (IdTable[TableIndex].Access != SK_PNMI_WO)) { + + continue; + } + + InstanceNo = IdTable[TableIndex].InstanceNo; + Id = IdTable[TableIndex].Id; + + for (InstanceCnt = 1; InstanceCnt <= InstanceNo; + InstanceCnt ++) { + + DstOffset = IdTable[TableIndex].Offset + + (InstanceCnt - 1) * + IdTable[TableIndex].StructSize; + + /* + * Because VPD multiple instance variables are + * not setable we do not need to evaluate VPD + * instances. Have a look to VPD instance + * calculation in SkPnmiGetStruct(). + */ + Instance = (SK_U32)InstanceCnt; + + /* + * Evaluate needed buffer length + */ + Len = 0; + Ret = IdTable[TableIndex].Func(pAC, IoC, + SK_PNMI_GET, IdTable[TableIndex].Id, + NULL, &Len, Instance, TableIndex, NetIndex); + + if (Ret == SK_PNMI_ERR_UNKNOWN_INST) { + + break; + } + if (Ret != SK_PNMI_ERR_TOO_SHORT) { + + pAC->Pnmi.RlmtUpdatedFlag --; + pAC->Pnmi.SirqUpdatedFlag --; + + SK_PNMI_CHECKFLAGS("PnmiStruct: On return"); + SK_PNMI_SET_STAT(pBuf, + SK_PNMI_ERR_GENERAL, DstOffset); + *pLen = SK_PNMI_MIN_STRUCT_SIZE; + return (SK_PNMI_ERR_GENERAL); + } + if (Id == OID_SKGE_VPD_ACTION) { + + switch (*(pBuf + DstOffset)) { + + case SK_PNMI_VPD_CREATE: + Len = 3 + *(pBuf + DstOffset + 3); + break; + + case SK_PNMI_VPD_DELETE: + Len = 3; + break; + + default: + Len = 1; + break; + } + } + + /* Call the OID handler function */ + Ret = IdTable[TableIndex].Func(pAC, IoC, Action, + IdTable[TableIndex].Id, pBuf + DstOffset, + &Len, Instance, TableIndex, NetIndex); + + if (Ret != SK_PNMI_ERR_OK) { + + pAC->Pnmi.RlmtUpdatedFlag --; + pAC->Pnmi.SirqUpdatedFlag --; + + SK_PNMI_CHECKFLAGS("PnmiStruct: On return"); + SK_PNMI_SET_STAT(pBuf, SK_PNMI_ERR_BAD_VALUE, + DstOffset); + *pLen = SK_PNMI_MIN_STRUCT_SIZE; + return (SK_PNMI_ERR_BAD_VALUE); + } + } + } + + pAC->Pnmi.RlmtUpdatedFlag --; + pAC->Pnmi.SirqUpdatedFlag --; + + SK_PNMI_CHECKFLAGS("PnmiStruct: On return"); + SK_PNMI_SET_STAT(pBuf, SK_PNMI_ERR_OK, (SK_U32)(-1)); + return (SK_PNMI_ERR_OK); +} + +/***************************************************************************** + * + * LookupId - Lookup an OID in the IdTable + * + * Description: + * Scans the IdTable to find the table entry of an OID. + * + * Returns: + * The table index or -1 if not found. + */ +PNMI_STATIC int LookupId( +SK_U32 Id) /* Object identifier to be searched */ +{ + int i; + + for (i = 0; i < ID_TABLE_SIZE; i++) { + + if (IdTable[i].Id == Id) { + + return i; + } + } + + return (-1); +} + +/***************************************************************************** + * + * OidStruct - Handler of OID_SKGE_ALL_DATA + * + * Description: + * This OID performs a Get/Preset/SetStruct call and returns all data + * in a SK_PNMI_STRUCT_DATA structure. + * + * Returns: + * SK_PNMI_ERR_OK The request was successfully performed. + * SK_PNMI_ERR_GENERAL A general severe internal error occured. + * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain + * the correct data (e.g. a 32bit value is + * needed, but a 16 bit value was passed). + * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid + * value range. + * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set. + * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't + * exist (e.g. port instance 3 on a two port + * adapter. + */ +PNMI_STATIC int OidStruct( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +int Action, /* GET/PRESET/SET action */ +SK_U32 Id, /* Object ID that is to be processed */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ +SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ +unsigned int TableIndex, /* Index to the Id table */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ +{ + if (Id != OID_SKGE_ALL_DATA) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR003, + SK_PNMI_ERR003MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + /* + * Check instance. We only handle single instance variables + */ + if (Instance != (SK_U32)(-1) && Instance != 1) { + + *pLen = 0; + return (SK_PNMI_ERR_UNKNOWN_INST); + } + + switch (Action) { + + case SK_PNMI_GET: + return (SkPnmiGetStruct(pAC, IoC, pBuf, pLen, NetIndex)); + + case SK_PNMI_PRESET: + return (SkPnmiPreSetStruct(pAC, IoC, pBuf, pLen, NetIndex)); + + case SK_PNMI_SET: + return (SkPnmiSetStruct(pAC, IoC, pBuf, pLen, NetIndex)); + } + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR004, SK_PNMI_ERR004MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); +} + +/***************************************************************************** + * + * Perform - OID handler of OID_SKGE_ACTION + * + * Description: + * None. + * + * Returns: + * SK_PNMI_ERR_OK The request was successfully performed. + * SK_PNMI_ERR_GENERAL A general severe internal error occured. + * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain + * the correct data (e.g. a 32bit value is + * needed, but a 16 bit value was passed). + * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid + * value range. + * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set. + * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't + * exist (e.g. port instance 3 on a two port + * adapter. + */ +PNMI_STATIC int Perform( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +int Action, /* GET/PRESET/SET action */ +SK_U32 Id, /* Object ID that is to be processed */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ +SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ +unsigned int TableIndex, /* Index to the Id table */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ +{ + int Ret; + SK_U32 ActionOp; + + + /* + * Check instance. We only handle single instance variables + */ + if (Instance != (SK_U32)(-1) && Instance != 1) { + + *pLen = 0; + return (SK_PNMI_ERR_UNKNOWN_INST); + } + + if (*pLen < sizeof(SK_U32)) { + + *pLen = sizeof(SK_U32); + return (SK_PNMI_ERR_TOO_SHORT); + } + + /* Check if a get should be performed */ + if (Action == SK_PNMI_GET) { + + /* A get is easy. We always return the same value */ + ActionOp = (SK_U32)SK_PNMI_ACT_IDLE; + SK_PNMI_STORE_U32(pBuf, ActionOp); + *pLen = sizeof(SK_U32); + + return (SK_PNMI_ERR_OK); + } + + /* Continue with PRESET/SET action */ + if (*pLen > sizeof(SK_U32)) { + + return (SK_PNMI_ERR_BAD_VALUE); + } + + /* Check if the command is a known one */ + SK_PNMI_READ_U32(pBuf, ActionOp); + if (*pLen > sizeof(SK_U32) || + (ActionOp != SK_PNMI_ACT_IDLE && + ActionOp != SK_PNMI_ACT_RESET && + ActionOp != SK_PNMI_ACT_SELFTEST && + ActionOp != SK_PNMI_ACT_RESETCNT)) { + + *pLen = 0; + return (SK_PNMI_ERR_BAD_VALUE); + } + + /* A preset ends here */ + if (Action == SK_PNMI_PRESET) { + + return (SK_PNMI_ERR_OK); + } + + switch (ActionOp) { + + case SK_PNMI_ACT_IDLE: + /* Nothing to do */ + break; + + case SK_PNMI_ACT_RESET: + /* + * Perform a driver reset or something that comes near + * to this. + */ + Ret = SK_DRIVER_RESET(pAC, IoC); + if (Ret != 0) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR005, + SK_PNMI_ERR005MSG); + + return (SK_PNMI_ERR_GENERAL); + } + break; + + case SK_PNMI_ACT_SELFTEST: + /* + * Perform a driver selftest or something similar to this. + * Currently this feature is not used and will probably + * implemented in another way. + */ + Ret = SK_DRIVER_SELFTEST(pAC, IoC); + pAC->Pnmi.TestResult = Ret; + break; + + case SK_PNMI_ACT_RESETCNT: + /* Set all counters and timestamps to zero */ + ResetCounter(pAC, IoC, NetIndex); + break; + + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR006, + SK_PNMI_ERR006MSG); + + return (SK_PNMI_ERR_GENERAL); + } + + return (SK_PNMI_ERR_OK); +} + +/***************************************************************************** + * + * Mac8023Stat - OID handler of OID_GEN_XXX and OID_802_3_XXX + * + * Description: + * Retrieves the statistic values of the virtual port (logical + * index 0). Only special OIDs of NDIS are handled which consist + * of a 32 bit instead of a 64 bit value. The OIDs are public + * because perhaps some other platform can use them too. + * + * Returns: + * SK_PNMI_ERR_OK The request was successfully performed. + * SK_PNMI_ERR_GENERAL A general severe internal error occured. + * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain + * the correct data (e.g. a 32bit value is + * needed, but a 16 bit value was passed). + * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't + * exist (e.g. port instance 3 on a two port + * adapter. + */ +PNMI_STATIC int Mac8023Stat( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +int Action, /* GET/PRESET/SET action */ +SK_U32 Id, /* Object ID that is to be processed */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ +SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ +unsigned int TableIndex, /* Index to the Id table */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ +{ + int Ret; + SK_U64 StatVal; + SK_U32 StatVal32; + SK_BOOL Is64BitReq = SK_FALSE; + + /* + * Only the active Mac is returned + */ + if (Instance != (SK_U32)(-1) && Instance != 1) { + + *pLen = 0; + return (SK_PNMI_ERR_UNKNOWN_INST); + } + + /* + * Check action type + */ + if (Action != SK_PNMI_GET) { + + *pLen = 0; + return (SK_PNMI_ERR_READ_ONLY); + } + + /* Check length */ + switch (Id) { + + case OID_802_3_PERMANENT_ADDRESS: + case OID_802_3_CURRENT_ADDRESS: + if (*pLen < sizeof(SK_MAC_ADDR)) { + + *pLen = sizeof(SK_MAC_ADDR); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + default: +#ifndef SK_NDIS_64BIT_CTR + if (*pLen < sizeof(SK_U32)) { + *pLen = sizeof(SK_U32); + return (SK_PNMI_ERR_TOO_SHORT); + } + +#else /* SK_NDIS_64BIT_CTR */ + + /* for compatibility, at least 32bit are required for OID */ + if (*pLen < sizeof(SK_U32)) { + /* + * but indicate handling for 64bit values, + * if insufficient space is provided + */ + *pLen = sizeof(SK_U64); + return (SK_PNMI_ERR_TOO_SHORT); + } + + Is64BitReq = (*pLen < sizeof(SK_U64)) ? SK_FALSE : SK_TRUE; +#endif /* SK_NDIS_64BIT_CTR */ + break; + } + + /* + * Update all statistics, because we retrieve virtual MAC, which + * consists of multiple physical statistics and increment semaphore + * to indicate that an update was already done. + */ + Ret = MacUpdate(pAC, IoC, 0, pAC->GIni.GIMacsFound - 1); + if ( Ret != SK_PNMI_ERR_OK) { + + *pLen = 0; + return (Ret); + } + pAC->Pnmi.MacUpdatedFlag ++; + + /* + * Get value (MAC Index 0 identifies the virtual MAC) + */ + switch (Id) { + + case OID_802_3_PERMANENT_ADDRESS: + CopyMac(pBuf, &pAC->Addr.Net[NetIndex].PermanentMacAddress); + *pLen = sizeof(SK_MAC_ADDR); + break; + + case OID_802_3_CURRENT_ADDRESS: + CopyMac(pBuf, &pAC->Addr.Net[NetIndex].CurrentMacAddress); + *pLen = sizeof(SK_MAC_ADDR); + break; + + default: + StatVal = GetStatVal(pAC, IoC, 0, IdTable[TableIndex].Param, NetIndex); + + /* by default 32bit values are evaluated */ + if (!Is64BitReq) { + StatVal32 = (SK_U32)StatVal; + SK_PNMI_STORE_U32(pBuf, StatVal32); + *pLen = sizeof(SK_U32); + } + else { + SK_PNMI_STORE_U64(pBuf, StatVal); + *pLen = sizeof(SK_U64); + } + break; + } + + pAC->Pnmi.MacUpdatedFlag --; + + return (SK_PNMI_ERR_OK); +} + +/***************************************************************************** + * + * MacPrivateStat - OID handler function of OID_SKGE_STAT_XXX + * + * Description: + * Retrieves the MAC statistic data. + * + * Returns: + * SK_PNMI_ERR_OK The request was successfully performed. + * SK_PNMI_ERR_GENERAL A general severe internal error occured. + * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain + * the correct data (e.g. a 32bit value is + * needed, but a 16 bit value was passed). + * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't + * exist (e.g. port instance 3 on a two port + * adapter. + */ +PNMI_STATIC int MacPrivateStat( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +int Action, /* GET/PRESET/SET action */ +SK_U32 Id, /* Object ID that is to be processed */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ +SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ +unsigned int TableIndex, /* Index to the Id table */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ +{ + unsigned int LogPortMax; + unsigned int LogPortIndex; + unsigned int PhysPortMax; + unsigned int Limit; + unsigned int Offset; + int MacType; + int Ret; + SK_U64 StatVal; + + + + /* Calculate instance if wished. MAC index 0 is the virtual MAC */ + PhysPortMax = pAC->GIni.GIMacsFound; + LogPortMax = SK_PNMI_PORT_PHYS2LOG(PhysPortMax); + + MacType = pAC->GIni.GIMacType; + + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { /* Dual net mode */ + LogPortMax--; + } + + if ((Instance != (SK_U32)(-1))) { /* Only one specific instance is queried */ + /* Check instance range */ + if ((Instance < 1) || (Instance > LogPortMax)) { + + *pLen = 0; + return (SK_PNMI_ERR_UNKNOWN_INST); + } + LogPortIndex = SK_PNMI_PORT_INST2LOG(Instance); + Limit = LogPortIndex + 1; + } + + else { /* Instance == (SK_U32)(-1), get all Instances of that OID */ + + LogPortIndex = 0; + Limit = LogPortMax; + } + + /* Check action */ + if (Action != SK_PNMI_GET) { + + *pLen = 0; + return (SK_PNMI_ERR_READ_ONLY); + } + + /* Check length */ + if (*pLen < (Limit - LogPortIndex) * sizeof(SK_U64)) { + + *pLen = (Limit - LogPortIndex) * sizeof(SK_U64); + return (SK_PNMI_ERR_TOO_SHORT); + } + + /* + * Update MAC statistic and increment semaphore to indicate that + * an update was already done. + */ + Ret = MacUpdate(pAC, IoC, 0, pAC->GIni.GIMacsFound - 1); + if (Ret != SK_PNMI_ERR_OK) { + + *pLen = 0; + return (Ret); + } + pAC->Pnmi.MacUpdatedFlag ++; + + /* Get value */ + Offset = 0; + for (; LogPortIndex < Limit; LogPortIndex ++) { + + switch (Id) { + +/* XXX not yet implemented due to XMAC problems + case OID_SKGE_STAT_TX_UTIL: + return (SK_PNMI_ERR_GENERAL); +*/ +/* XXX not yet implemented due to XMAC problems + case OID_SKGE_STAT_RX_UTIL: + return (SK_PNMI_ERR_GENERAL); +*/ + case OID_SKGE_STAT_RX: + if (MacType == SK_MAC_GMAC) { + StatVal = + GetStatVal(pAC, IoC, LogPortIndex, + SK_PNMI_HRX_BROADCAST, NetIndex) + + GetStatVal(pAC, IoC, LogPortIndex, + SK_PNMI_HRX_MULTICAST, NetIndex) + + GetStatVal(pAC, IoC, LogPortIndex, + SK_PNMI_HRX_UNICAST, NetIndex) + + GetStatVal(pAC, IoC, LogPortIndex, + SK_PNMI_HRX_UNDERSIZE, NetIndex); + } + else { + StatVal = GetStatVal(pAC, IoC, LogPortIndex, + IdTable[TableIndex].Param, NetIndex); + } + break; + + case OID_SKGE_STAT_TX: + if (MacType == SK_MAC_GMAC) { + StatVal = + GetStatVal(pAC, IoC, LogPortIndex, + SK_PNMI_HTX_BROADCAST, NetIndex) + + GetStatVal(pAC, IoC, LogPortIndex, + SK_PNMI_HTX_MULTICAST, NetIndex) + + GetStatVal(pAC, IoC, LogPortIndex, + SK_PNMI_HTX_UNICAST, NetIndex); + } + else { + StatVal = GetStatVal(pAC, IoC, LogPortIndex, + IdTable[TableIndex].Param, NetIndex); + } + break; + + default: + StatVal = GetStatVal(pAC, IoC, LogPortIndex, + IdTable[TableIndex].Param, NetIndex); + } + SK_PNMI_STORE_U64(pBuf + Offset, StatVal); + + Offset += sizeof(SK_U64); + } + *pLen = Offset; + + pAC->Pnmi.MacUpdatedFlag --; + + return (SK_PNMI_ERR_OK); +} + +/***************************************************************************** + * + * Addr - OID handler function of OID_SKGE_PHYS_CUR_ADDR and _FAC_ADDR + * + * Description: + * Get/Presets/Sets the current and factory MAC address. The MAC + * address of the virtual port, which is reported to the OS, may + * not be changed, but the physical ones. A set to the virtual port + * will be ignored. No error should be reported because otherwise + * a multiple instance set (-1) would always fail. + * + * Returns: + * SK_PNMI_ERR_OK The request was successfully performed. + * SK_PNMI_ERR_GENERAL A general severe internal error occured. + * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain + * the correct data (e.g. a 32bit value is + * needed, but a 16 bit value was passed). + * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid + * value range. + * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set. + * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't + * exist (e.g. port instance 3 on a two port + * adapter. + */ +PNMI_STATIC int Addr( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +int Action, /* GET/PRESET/SET action */ +SK_U32 Id, /* Object ID that is to be processed */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ +SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ +unsigned int TableIndex, /* Index to the Id table */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ +{ + int Ret; + unsigned int LogPortMax; + unsigned int PhysPortMax; + unsigned int LogPortIndex; + unsigned int PhysPortIndex; + unsigned int Limit; + unsigned int Offset = 0; + + /* + * Calculate instance if wished. MAC index 0 is the virtual + * MAC. + */ + PhysPortMax = pAC->GIni.GIMacsFound; + LogPortMax = SK_PNMI_PORT_PHYS2LOG(PhysPortMax); + + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { /* Dual net mode */ + LogPortMax--; + } + + if ((Instance != (SK_U32)(-1))) { /* Only one specific instance is queried */ + /* Check instance range */ + if ((Instance < 1) || (Instance > LogPortMax)) { + + *pLen = 0; + return (SK_PNMI_ERR_UNKNOWN_INST); + } + LogPortIndex = SK_PNMI_PORT_INST2LOG(Instance); + Limit = LogPortIndex + 1; + } + else { /* Instance == (SK_U32)(-1), get all Instances of that OID */ + + LogPortIndex = 0; + Limit = LogPortMax; + } + + /* + * Perform Action + */ + if (Action == SK_PNMI_GET) { + + /* Check length */ + if (*pLen < (Limit - LogPortIndex) * 6) { + + *pLen = (Limit - LogPortIndex) * 6; + return (SK_PNMI_ERR_TOO_SHORT); + } + + /* + * Get value + */ + for (; LogPortIndex < Limit; LogPortIndex ++) { + + switch (Id) { + + case OID_SKGE_PHYS_CUR_ADDR: + if (LogPortIndex == 0) { + CopyMac(pBuf + Offset, &pAC->Addr.Net[NetIndex].CurrentMacAddress); + } + else { + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(pAC, LogPortIndex); + + CopyMac(pBuf + Offset, + &pAC->Addr.Port[PhysPortIndex].CurrentMacAddress); + } + Offset += 6; + break; + + case OID_SKGE_PHYS_FAC_ADDR: + if (LogPortIndex == 0) { + CopyMac(pBuf + Offset, + &pAC->Addr.Net[NetIndex].PermanentMacAddress); + } + else { + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + + CopyMac(pBuf + Offset, + &pAC->Addr.Port[PhysPortIndex].PermanentMacAddress); + } + Offset += 6; + break; + + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR008, + SK_PNMI_ERR008MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + } + + *pLen = Offset; + } + else { + /* + * The logical MAC address may not be changed only + * the physical ones + */ + if (Id == OID_SKGE_PHYS_FAC_ADDR) { + + *pLen = 0; + return (SK_PNMI_ERR_READ_ONLY); + } + + /* + * Only the current address may be changed + */ + if (Id != OID_SKGE_PHYS_CUR_ADDR) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR009, + SK_PNMI_ERR009MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + /* Check length */ + if (*pLen < (Limit - LogPortIndex) * 6) { + + *pLen = (Limit - LogPortIndex) * 6; + return (SK_PNMI_ERR_TOO_SHORT); + } + if (*pLen > (Limit - LogPortIndex) * 6) { + + *pLen = 0; + return (SK_PNMI_ERR_BAD_VALUE); + } + + /* + * Check Action + */ + if (Action == SK_PNMI_PRESET) { + + *pLen = 0; + return (SK_PNMI_ERR_OK); + } + + /* + * Set OID_SKGE_MAC_CUR_ADDR + */ + for (; LogPortIndex < Limit; LogPortIndex ++, Offset += 6) { + + /* + * A set to virtual port and set of broadcast + * address will be ignored + */ + if (LogPortIndex == 0 || SK_MEMCMP(pBuf + Offset, + "\xff\xff\xff\xff\xff\xff", 6) == 0) { + + continue; + } + + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(pAC, + LogPortIndex); + + Ret = SkAddrOverride(pAC, IoC, PhysPortIndex, + (SK_MAC_ADDR *)(pBuf + Offset), + (LogPortIndex == 0 ? SK_ADDR_VIRTUAL_ADDRESS : + SK_ADDR_PHYSICAL_ADDRESS)); + if (Ret != SK_ADDR_OVERRIDE_SUCCESS) { + + return (SK_PNMI_ERR_GENERAL); + } + } + *pLen = Offset; + } + + return (SK_PNMI_ERR_OK); +} + +/***************************************************************************** + * + * CsumStat - OID handler function of OID_SKGE_CHKSM_XXX + * + * Description: + * Retrieves the statistic values of the CSUM module. The CSUM data + * structure must be available in the SK_AC even if the CSUM module + * is not included, because PNMI reads the statistic data from the + * CSUM part of SK_AC directly. + * + * Returns: + * SK_PNMI_ERR_OK The request was successfully performed. + * SK_PNMI_ERR_GENERAL A general severe internal error occured. + * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain + * the correct data (e.g. a 32bit value is + * needed, but a 16 bit value was passed). + * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't + * exist (e.g. port instance 3 on a two port + * adapter. + */ +PNMI_STATIC int CsumStat( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +int Action, /* GET/PRESET/SET action */ +SK_U32 Id, /* Object ID that is to be processed */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ +SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ +unsigned int TableIndex, /* Index to the Id table */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ +{ + unsigned int Index; + unsigned int Limit; + unsigned int Offset = 0; + SK_U64 StatVal; + + + /* + * Calculate instance if wished + */ + if (Instance != (SK_U32)(-1)) { + + if ((Instance < 1) || (Instance > SKCS_NUM_PROTOCOLS)) { + + *pLen = 0; + return (SK_PNMI_ERR_UNKNOWN_INST); + } + Index = (unsigned int)Instance - 1; + Limit = Index + 1; + } + else { + Index = 0; + Limit = SKCS_NUM_PROTOCOLS; + } + + /* + * Check action + */ + if (Action != SK_PNMI_GET) { + + *pLen = 0; + return (SK_PNMI_ERR_READ_ONLY); + } + + /* Check length */ + if (*pLen < (Limit - Index) * sizeof(SK_U64)) { + + *pLen = (Limit - Index) * sizeof(SK_U64); + return (SK_PNMI_ERR_TOO_SHORT); + } + + /* + * Get value + */ + for (; Index < Limit; Index ++) { + + switch (Id) { + + case OID_SKGE_CHKSM_RX_OK_CTS: + StatVal = pAC->Csum.ProtoStats[NetIndex][Index].RxOkCts; + break; + + case OID_SKGE_CHKSM_RX_UNABLE_CTS: + StatVal = pAC->Csum.ProtoStats[NetIndex][Index].RxUnableCts; + break; + + case OID_SKGE_CHKSM_RX_ERR_CTS: + StatVal = pAC->Csum.ProtoStats[NetIndex][Index].RxErrCts; + break; + + case OID_SKGE_CHKSM_TX_OK_CTS: + StatVal = pAC->Csum.ProtoStats[NetIndex][Index].TxOkCts; + break; + + case OID_SKGE_CHKSM_TX_UNABLE_CTS: + StatVal = pAC->Csum.ProtoStats[NetIndex][Index].TxUnableCts; + break; + + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR010, + SK_PNMI_ERR010MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + SK_PNMI_STORE_U64(pBuf + Offset, StatVal); + Offset += sizeof(SK_U64); + } + + /* + * Store used buffer space + */ + *pLen = Offset; + + return (SK_PNMI_ERR_OK); +} + +/***************************************************************************** + * + * SensorStat - OID handler function of OID_SKGE_SENSOR_XXX + * + * Description: + * Retrieves the statistic values of the I2C module, which handles + * the temperature and voltage sensors. + * + * Returns: + * SK_PNMI_ERR_OK The request was successfully performed. + * SK_PNMI_ERR_GENERAL A general severe internal error occured. + * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain + * the correct data (e.g. a 32bit value is + * needed, but a 16 bit value was passed). + * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't + * exist (e.g. port instance 3 on a two port + * adapter. + */ +PNMI_STATIC int SensorStat( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +int Action, /* GET/PRESET/SET action */ +SK_U32 Id, /* Object ID that is to be processed */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ +SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ +unsigned int TableIndex, /* Index to the Id table */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ +{ + unsigned int i; + unsigned int Index; + unsigned int Limit; + unsigned int Offset; + unsigned int Len; + SK_U32 Val32; + SK_U64 Val64; + + + /* + * Calculate instance if wished + */ + if ((Instance != (SK_U32)(-1))) { + + if ((Instance < 1) || (Instance > (SK_U32)pAC->I2c.MaxSens)) { + + *pLen = 0; + return (SK_PNMI_ERR_UNKNOWN_INST); + } + + Index = (unsigned int)Instance -1; + Limit = (unsigned int)Instance; + } + else { + Index = 0; + Limit = (unsigned int) pAC->I2c.MaxSens; + } + + /* + * Check action + */ + if (Action != SK_PNMI_GET) { + + *pLen = 0; + return (SK_PNMI_ERR_READ_ONLY); + } + + /* Check length */ + switch (Id) { + + case OID_SKGE_SENSOR_VALUE: + case OID_SKGE_SENSOR_WAR_THRES_LOW: + case OID_SKGE_SENSOR_WAR_THRES_UPP: + case OID_SKGE_SENSOR_ERR_THRES_LOW: + case OID_SKGE_SENSOR_ERR_THRES_UPP: + if (*pLen < (Limit - Index) * sizeof(SK_U32)) { + + *pLen = (Limit - Index) * sizeof(SK_U32); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + case OID_SKGE_SENSOR_DESCR: + for (Offset = 0, i = Index; i < Limit; i ++) { + + Len = (unsigned int) + SK_STRLEN(pAC->I2c.SenTable[i].SenDesc) + 1; + if (Len >= SK_PNMI_STRINGLEN2) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR011, + SK_PNMI_ERR011MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + Offset += Len; + } + if (*pLen < Offset) { + + *pLen = Offset; + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + case OID_SKGE_SENSOR_INDEX: + case OID_SKGE_SENSOR_TYPE: + case OID_SKGE_SENSOR_STATUS: + if (*pLen < Limit - Index) { + + *pLen = Limit - Index; + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + case OID_SKGE_SENSOR_WAR_CTS: + case OID_SKGE_SENSOR_WAR_TIME: + case OID_SKGE_SENSOR_ERR_CTS: + case OID_SKGE_SENSOR_ERR_TIME: + if (*pLen < (Limit - Index) * sizeof(SK_U64)) { + + *pLen = (Limit - Index) * sizeof(SK_U64); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR012, + SK_PNMI_ERR012MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + + } + + /* + * Get value + */ + for (Offset = 0; Index < Limit; Index ++) { + + switch (Id) { + + case OID_SKGE_SENSOR_INDEX: + *(pBuf + Offset) = (char)Index; + Offset += sizeof(char); + break; + + case OID_SKGE_SENSOR_DESCR: + Len = SK_STRLEN(pAC->I2c.SenTable[Index].SenDesc); + SK_MEMCPY(pBuf + Offset + 1, + pAC->I2c.SenTable[Index].SenDesc, Len); + *(pBuf + Offset) = (char)Len; + Offset += Len + 1; + break; + + case OID_SKGE_SENSOR_TYPE: + *(pBuf + Offset) = + (char)pAC->I2c.SenTable[Index].SenType; + Offset += sizeof(char); + break; + + case OID_SKGE_SENSOR_VALUE: + Val32 = (SK_U32)pAC->I2c.SenTable[Index].SenValue; + SK_PNMI_STORE_U32(pBuf + Offset, Val32); + Offset += sizeof(SK_U32); + break; + + case OID_SKGE_SENSOR_WAR_THRES_LOW: + Val32 = (SK_U32)pAC->I2c.SenTable[Index]. + SenThreWarnLow; + SK_PNMI_STORE_U32(pBuf + Offset, Val32); + Offset += sizeof(SK_U32); + break; + + case OID_SKGE_SENSOR_WAR_THRES_UPP: + Val32 = (SK_U32)pAC->I2c.SenTable[Index]. + SenThreWarnHigh; + SK_PNMI_STORE_U32(pBuf + Offset, Val32); + Offset += sizeof(SK_U32); + break; + + case OID_SKGE_SENSOR_ERR_THRES_LOW: + Val32 = (SK_U32)pAC->I2c.SenTable[Index]. + SenThreErrLow; + SK_PNMI_STORE_U32(pBuf + Offset, Val32); + Offset += sizeof(SK_U32); + break; + + case OID_SKGE_SENSOR_ERR_THRES_UPP: + Val32 = pAC->I2c.SenTable[Index].SenThreErrHigh; + SK_PNMI_STORE_U32(pBuf + Offset, Val32); + Offset += sizeof(SK_U32); + break; + + case OID_SKGE_SENSOR_STATUS: + *(pBuf + Offset) = + (char)pAC->I2c.SenTable[Index].SenErrFlag; + Offset += sizeof(char); + break; + + case OID_SKGE_SENSOR_WAR_CTS: + Val64 = pAC->I2c.SenTable[Index].SenWarnCts; + SK_PNMI_STORE_U64(pBuf + Offset, Val64); + Offset += sizeof(SK_U64); + break; + + case OID_SKGE_SENSOR_ERR_CTS: + Val64 = pAC->I2c.SenTable[Index].SenErrCts; + SK_PNMI_STORE_U64(pBuf + Offset, Val64); + Offset += sizeof(SK_U64); + break; + + case OID_SKGE_SENSOR_WAR_TIME: + Val64 = SK_PNMI_HUNDREDS_SEC(pAC->I2c.SenTable[Index]. + SenBegWarnTS); + SK_PNMI_STORE_U64(pBuf + Offset, Val64); + Offset += sizeof(SK_U64); + break; + + case OID_SKGE_SENSOR_ERR_TIME: + Val64 = SK_PNMI_HUNDREDS_SEC(pAC->I2c.SenTable[Index]. + SenBegErrTS); + SK_PNMI_STORE_U64(pBuf + Offset, Val64); + Offset += sizeof(SK_U64); + break; + + default: + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR, + ("SensorStat: Unknown OID should be handled before")); + + return (SK_PNMI_ERR_GENERAL); + } + } + + /* + * Store used buffer space + */ + *pLen = Offset; + + return (SK_PNMI_ERR_OK); +} + +/***************************************************************************** + * + * Vpd - OID handler function of OID_SKGE_VPD_XXX + * + * Description: + * Get/preset/set of VPD data. As instance the name of a VPD key + * can be passed. The Instance parameter is a SK_U32 and can be + * used as a string buffer for the VPD key, because their maximum + * length is 4 byte. + * + * Returns: + * SK_PNMI_ERR_OK The request was successfully performed. + * SK_PNMI_ERR_GENERAL A general severe internal error occured. + * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain + * the correct data (e.g. a 32bit value is + * needed, but a 16 bit value was passed). + * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid + * value range. + * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set. + * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't + * exist (e.g. port instance 3 on a two port + * adapter. + */ +PNMI_STATIC int Vpd( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +int Action, /* GET/PRESET/SET action */ +SK_U32 Id, /* Object ID that is to be processed */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ +SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ +unsigned int TableIndex, /* Index to the Id table */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ +{ + SK_VPD_STATUS *pVpdStatus; + unsigned int BufLen; + char Buf[256]; + char KeyArr[SK_PNMI_VPD_ENTRIES][SK_PNMI_VPD_KEY_SIZE]; + char KeyStr[SK_PNMI_VPD_KEY_SIZE]; + unsigned int KeyNo; + unsigned int Offset; + unsigned int Index; + unsigned int FirstIndex; + unsigned int LastIndex; + unsigned int Len; + int Ret; + SK_U32 Val32; + + /* + * Get array of all currently stored VPD keys + */ + Ret = GetVpdKeyArr(pAC, IoC, &KeyArr[0][0], sizeof(KeyArr), &KeyNo); + if (Ret != SK_PNMI_ERR_OK) { + *pLen = 0; + return (Ret); + } + + /* + * If instance is not -1, try to find the requested VPD key for + * the multiple instance variables. The other OIDs as for example + * OID VPD_ACTION are single instance variables and must be + * handled separatly. + */ + FirstIndex = 0; + LastIndex = KeyNo; + + if ((Instance != (SK_U32)(-1))) { + + if (Id == OID_SKGE_VPD_KEY || Id == OID_SKGE_VPD_VALUE || + Id == OID_SKGE_VPD_ACCESS) { + + SK_STRNCPY(KeyStr, (char *)&Instance, 4); + KeyStr[4] = 0; + + for (Index = 0; Index < KeyNo; Index ++) { + + if (SK_STRCMP(KeyStr, KeyArr[Index]) == 0) { + FirstIndex = Index; + LastIndex = Index+1; + break; + } + } + if (Index == KeyNo) { + + *pLen = 0; + return (SK_PNMI_ERR_UNKNOWN_INST); + } + } + else if (Instance != 1) { + + *pLen = 0; + return (SK_PNMI_ERR_UNKNOWN_INST); + } + } + + /* + * Get value, if a query should be performed + */ + if (Action == SK_PNMI_GET) { + + switch (Id) { + + case OID_SKGE_VPD_FREE_BYTES: + /* Check length of buffer */ + if (*pLen < sizeof(SK_U32)) { + + *pLen = sizeof(SK_U32); + return (SK_PNMI_ERR_TOO_SHORT); + } + /* Get number of free bytes */ + pVpdStatus = VpdStat(pAC, IoC); + if (pVpdStatus == NULL) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR017, + SK_PNMI_ERR017MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + if ((pVpdStatus->vpd_status & VPD_VALID) == 0) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR018, + SK_PNMI_ERR018MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + Val32 = (SK_U32)pVpdStatus->vpd_free_rw; + SK_PNMI_STORE_U32(pBuf, Val32); + *pLen = sizeof(SK_U32); + break; + + case OID_SKGE_VPD_ENTRIES_LIST: + /* Check length */ + for (Len = 0, Index = 0; Index < KeyNo; Index ++) { + + Len += SK_STRLEN(KeyArr[Index]) + 1; + } + if (*pLen < Len) { + + *pLen = Len; + return (SK_PNMI_ERR_TOO_SHORT); + } + + /* Get value */ + *(pBuf) = (char)Len - 1; + for (Offset = 1, Index = 0; Index < KeyNo; Index ++) { + + Len = SK_STRLEN(KeyArr[Index]); + SK_MEMCPY(pBuf + Offset, KeyArr[Index], Len); + + Offset += Len; + + if (Index < KeyNo - 1) { + + *(pBuf + Offset) = ' '; + Offset ++; + } + } + *pLen = Offset; + break; + + case OID_SKGE_VPD_ENTRIES_NUMBER: + /* Check length */ + if (*pLen < sizeof(SK_U32)) { + + *pLen = sizeof(SK_U32); + return (SK_PNMI_ERR_TOO_SHORT); + } + + Val32 = (SK_U32)KeyNo; + SK_PNMI_STORE_U32(pBuf, Val32); + *pLen = sizeof(SK_U32); + break; + + case OID_SKGE_VPD_KEY: + /* Check buffer length, if it is large enough */ + for (Len = 0, Index = FirstIndex; + Index < LastIndex; Index ++) { + + Len += SK_STRLEN(KeyArr[Index]) + 1; + } + if (*pLen < Len) { + + *pLen = Len; + return (SK_PNMI_ERR_TOO_SHORT); + } + + /* + * Get the key to an intermediate buffer, because + * we have to prepend a length byte. + */ + for (Offset = 0, Index = FirstIndex; + Index < LastIndex; Index ++) { + + Len = SK_STRLEN(KeyArr[Index]); + + *(pBuf + Offset) = (char)Len; + SK_MEMCPY(pBuf + Offset + 1, KeyArr[Index], + Len); + Offset += Len + 1; + } + *pLen = Offset; + break; + + case OID_SKGE_VPD_VALUE: + /* Check the buffer length if it is large enough */ + for (Offset = 0, Index = FirstIndex; + Index < LastIndex; Index ++) { + + BufLen = 256; + if (VpdRead(pAC, IoC, KeyArr[Index], Buf, + (int *)&BufLen) > 0 || + BufLen >= SK_PNMI_VPD_DATALEN) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, + SK_PNMI_ERR021, + SK_PNMI_ERR021MSG); + + return (SK_PNMI_ERR_GENERAL); + } + Offset += BufLen + 1; + } + if (*pLen < Offset) { + + *pLen = Offset; + return (SK_PNMI_ERR_TOO_SHORT); + } + + /* + * Get the value to an intermediate buffer, because + * we have to prepend a length byte. + */ + for (Offset = 0, Index = FirstIndex; + Index < LastIndex; Index ++) { + + BufLen = 256; + if (VpdRead(pAC, IoC, KeyArr[Index], Buf, + (int *)&BufLen) > 0 || + BufLen >= SK_PNMI_VPD_DATALEN) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, + SK_PNMI_ERR022, + SK_PNMI_ERR022MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + *(pBuf + Offset) = (char)BufLen; + SK_MEMCPY(pBuf + Offset + 1, Buf, BufLen); + Offset += BufLen + 1; + } + *pLen = Offset; + break; + + case OID_SKGE_VPD_ACCESS: + if (*pLen < LastIndex - FirstIndex) { + + *pLen = LastIndex - FirstIndex; + return (SK_PNMI_ERR_TOO_SHORT); + } + + for (Offset = 0, Index = FirstIndex; + Index < LastIndex; Index ++) { + + if (VpdMayWrite(KeyArr[Index])) { + + *(pBuf + Offset) = SK_PNMI_VPD_RW; + } + else { + *(pBuf + Offset) = SK_PNMI_VPD_RO; + } + Offset ++; + } + *pLen = Offset; + break; + + case OID_SKGE_VPD_ACTION: + Offset = LastIndex - FirstIndex; + if (*pLen < Offset) { + + *pLen = Offset; + return (SK_PNMI_ERR_TOO_SHORT); + } + SK_MEMSET(pBuf, 0, Offset); + *pLen = Offset; + break; + + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR023, + SK_PNMI_ERR023MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + } + else { + /* The only OID which can be set is VPD_ACTION */ + if (Id != OID_SKGE_VPD_ACTION) { + + if (Id == OID_SKGE_VPD_FREE_BYTES || + Id == OID_SKGE_VPD_ENTRIES_LIST || + Id == OID_SKGE_VPD_ENTRIES_NUMBER || + Id == OID_SKGE_VPD_KEY || + Id == OID_SKGE_VPD_VALUE || + Id == OID_SKGE_VPD_ACCESS) { + + *pLen = 0; + return (SK_PNMI_ERR_READ_ONLY); + } + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR024, + SK_PNMI_ERR024MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + /* + * From this point we handle VPD_ACTION. Check the buffer + * length. It should at least have the size of one byte. + */ + if (*pLen < 1) { + + *pLen = 1; + return (SK_PNMI_ERR_TOO_SHORT); + } + + /* + * The first byte contains the VPD action type we should + * perform. + */ + switch (*pBuf) { + + case SK_PNMI_VPD_IGNORE: + /* Nothing to do */ + break; + + case SK_PNMI_VPD_CREATE: + /* + * We have to create a new VPD entry or we modify + * an existing one. Check first the buffer length. + */ + if (*pLen < 4) { + + *pLen = 4; + return (SK_PNMI_ERR_TOO_SHORT); + } + KeyStr[0] = pBuf[1]; + KeyStr[1] = pBuf[2]; + KeyStr[2] = 0; + + /* + * Is the entry writable or does it belong to the + * read-only area? + */ + if (!VpdMayWrite(KeyStr)) { + + *pLen = 0; + return (SK_PNMI_ERR_BAD_VALUE); + } + + Offset = (int)pBuf[3] & 0xFF; + + SK_MEMCPY(Buf, pBuf + 4, Offset); + Buf[Offset] = 0; + + /* A preset ends here */ + if (Action == SK_PNMI_PRESET) { + + return (SK_PNMI_ERR_OK); + } + + /* Write the new entry or modify an existing one */ + Ret = VpdWrite(pAC, IoC, KeyStr, Buf); + if (Ret == SK_PNMI_VPD_NOWRITE ) { + + *pLen = 0; + return (SK_PNMI_ERR_BAD_VALUE); + } + else if (Ret != SK_PNMI_VPD_OK) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR025, + SK_PNMI_ERR025MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + /* + * Perform an update of the VPD data. This is + * not mandantory, but just to be sure. + */ + Ret = VpdUpdate(pAC, IoC); + if (Ret != SK_PNMI_VPD_OK) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR026, + SK_PNMI_ERR026MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + break; + + case SK_PNMI_VPD_DELETE: + /* Check if the buffer size is plausible */ + if (*pLen < 3) { + + *pLen = 3; + return (SK_PNMI_ERR_TOO_SHORT); + } + if (*pLen > 3) { + + *pLen = 0; + return (SK_PNMI_ERR_BAD_VALUE); + } + KeyStr[0] = pBuf[1]; + KeyStr[1] = pBuf[2]; + KeyStr[2] = 0; + + /* Find the passed key in the array */ + for (Index = 0; Index < KeyNo; Index ++) { + + if (SK_STRCMP(KeyStr, KeyArr[Index]) == 0) { + + break; + } + } + /* + * If we cannot find the key it is wrong, so we + * return an appropriate error value. + */ + if (Index == KeyNo) { + + *pLen = 0; + return (SK_PNMI_ERR_BAD_VALUE); + } + + if (Action == SK_PNMI_PRESET) { + + return (SK_PNMI_ERR_OK); + } + + /* Ok, you wanted it and you will get it */ + Ret = VpdDelete(pAC, IoC, KeyStr); + if (Ret != SK_PNMI_VPD_OK) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR027, + SK_PNMI_ERR027MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + /* + * Perform an update of the VPD data. This is + * not mandantory, but just to be sure. + */ + Ret = VpdUpdate(pAC, IoC); + if (Ret != SK_PNMI_VPD_OK) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR028, + SK_PNMI_ERR028MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + break; + + default: + *pLen = 0; + return (SK_PNMI_ERR_BAD_VALUE); + } + } + + return (SK_PNMI_ERR_OK); +} + +/***************************************************************************** + * + * General - OID handler function of various single instance OIDs + * + * Description: + * The code is simple. No description necessary. + * + * Returns: + * SK_PNMI_ERR_OK The request was successfully performed. + * SK_PNMI_ERR_GENERAL A general severe internal error occured. + * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain + * the correct data (e.g. a 32bit value is + * needed, but a 16 bit value was passed). + * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't + * exist (e.g. port instance 3 on a two port + * adapter. + */ +PNMI_STATIC int General( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +int Action, /* GET/PRESET/SET action */ +SK_U32 Id, /* Object ID that is to be processed */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: buffer length. On return: used buffer */ +SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ +unsigned int TableIndex, /* Index to the Id table */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ +{ + int Ret; + unsigned int Index; + unsigned int Len; + unsigned int Offset; + unsigned int Val; + SK_U8 Val8; + SK_U16 Val16; + SK_U32 Val32; + SK_U64 Val64; + SK_U64 Val64RxHwErrs = 0; + SK_U64 Val64TxHwErrs = 0; + SK_BOOL Is64BitReq = SK_FALSE; + char Buf[256]; + int MacType; + + /* + * Check instance. We only handle single instance variables. + */ + if (Instance != (SK_U32)(-1) && Instance != 1) { + + *pLen = 0; + return (SK_PNMI_ERR_UNKNOWN_INST); + } + + /* + * Check action. We only allow get requests. + */ + if (Action != SK_PNMI_GET) { + + *pLen = 0; + return (SK_PNMI_ERR_READ_ONLY); + } + + MacType = pAC->GIni.GIMacType; + + /* + * Check length for the various supported OIDs + */ + switch (Id) { + + case OID_GEN_XMIT_ERROR: + case OID_GEN_RCV_ERROR: + case OID_GEN_RCV_NO_BUFFER: +#ifndef SK_NDIS_64BIT_CTR + if (*pLen < sizeof(SK_U32)) { + *pLen = sizeof(SK_U32); + return (SK_PNMI_ERR_TOO_SHORT); + } + +#else /* SK_NDIS_64BIT_CTR */ + + /* + * for compatibility, at least 32bit are required for oid + */ + if (*pLen < sizeof(SK_U32)) { + /* + * but indicate handling for 64bit values, + * if insufficient space is provided + */ + *pLen = sizeof(SK_U64); + return (SK_PNMI_ERR_TOO_SHORT); + } + + Is64BitReq = (*pLen < sizeof(SK_U64)) ? SK_FALSE : SK_TRUE; +#endif /* SK_NDIS_64BIT_CTR */ + break; + + case OID_SKGE_PORT_NUMBER: + case OID_SKGE_DEVICE_TYPE: + case OID_SKGE_RESULT: + case OID_SKGE_RLMT_MONITOR_NUMBER: + case OID_GEN_TRANSMIT_QUEUE_LENGTH: + case OID_SKGE_TRAP_NUMBER: + case OID_SKGE_MDB_VERSION: + case OID_SKGE_BOARDLEVEL: + case OID_SKGE_CHIPID: + case OID_SKGE_RAMSIZE: + if (*pLen < sizeof(SK_U32)) { + + *pLen = sizeof(SK_U32); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + case OID_SKGE_CHIPSET: + if (*pLen < sizeof(SK_U16)) { + + *pLen = sizeof(SK_U16); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + case OID_SKGE_BUS_TYPE: + case OID_SKGE_BUS_SPEED: + case OID_SKGE_BUS_WIDTH: + case OID_SKGE_SENSOR_NUMBER: + case OID_SKGE_CHKSM_NUMBER: + case OID_SKGE_VAUXAVAIL: + if (*pLen < sizeof(SK_U8)) { + + *pLen = sizeof(SK_U8); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + case OID_SKGE_TX_SW_QUEUE_LEN: + case OID_SKGE_TX_SW_QUEUE_MAX: + case OID_SKGE_TX_RETRY: + case OID_SKGE_RX_INTR_CTS: + case OID_SKGE_TX_INTR_CTS: + case OID_SKGE_RX_NO_BUF_CTS: + case OID_SKGE_TX_NO_BUF_CTS: + case OID_SKGE_TX_USED_DESCR_NO: + case OID_SKGE_RX_DELIVERED_CTS: + case OID_SKGE_RX_OCTETS_DELIV_CTS: + case OID_SKGE_RX_HW_ERROR_CTS: + case OID_SKGE_TX_HW_ERROR_CTS: + case OID_SKGE_IN_ERRORS_CTS: + case OID_SKGE_OUT_ERROR_CTS: + case OID_SKGE_ERR_RECOVERY_CTS: + case OID_SKGE_SYSUPTIME: + if (*pLen < sizeof(SK_U64)) { + + *pLen = sizeof(SK_U64); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + default: + /* Checked later */ + break; + } + + /* Update statistic */ + if (Id == OID_SKGE_RX_HW_ERROR_CTS || + Id == OID_SKGE_TX_HW_ERROR_CTS || + Id == OID_SKGE_IN_ERRORS_CTS || + Id == OID_SKGE_OUT_ERROR_CTS || + Id == OID_GEN_XMIT_ERROR || + Id == OID_GEN_RCV_ERROR) { + + /* Force the XMAC to update its statistic counters and + * Increment semaphore to indicate that an update was + * already done. + */ + Ret = MacUpdate(pAC, IoC, 0, pAC->GIni.GIMacsFound - 1); + if (Ret != SK_PNMI_ERR_OK) { + + *pLen = 0; + return (Ret); + } + pAC->Pnmi.MacUpdatedFlag ++; + + /* + * Some OIDs consist of multiple hardware counters. Those + * values which are contained in all of them will be added + * now. + */ + switch (Id) { + + case OID_SKGE_RX_HW_ERROR_CTS: + case OID_SKGE_IN_ERRORS_CTS: + case OID_GEN_RCV_ERROR: + Val64RxHwErrs = + GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_MISSED, NetIndex) + + GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_FRAMING, NetIndex) + + GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_OVERFLOW, NetIndex) + + GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_JABBER, NetIndex) + + GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_CARRIER, NetIndex) + + GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_IRLENGTH, NetIndex) + + GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_SYMBOL, NetIndex) + + GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_SHORTS, NetIndex) + + GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_RUNT, NetIndex) + + GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_TOO_LONG, NetIndex) + + GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_FCS, NetIndex) + + GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_CEXT, NetIndex); + break; + + case OID_SKGE_TX_HW_ERROR_CTS: + case OID_SKGE_OUT_ERROR_CTS: + case OID_GEN_XMIT_ERROR: + Val64TxHwErrs = + GetStatVal(pAC, IoC, 0, SK_PNMI_HTX_EXCESS_COL, NetIndex) + + GetStatVal(pAC, IoC, 0, SK_PNMI_HTX_LATE_COL, NetIndex) + + GetStatVal(pAC, IoC, 0, SK_PNMI_HTX_UNDERRUN, NetIndex) + + GetStatVal(pAC, IoC, 0, SK_PNMI_HTX_CARRIER, NetIndex); + break; + } + } + + /* + * Retrieve value + */ + switch (Id) { + + case OID_SKGE_SUPPORTED_LIST: + Len = ID_TABLE_SIZE * sizeof(SK_U32); + if (*pLen < Len) { + + *pLen = Len; + return (SK_PNMI_ERR_TOO_SHORT); + } + for (Offset = 0, Index = 0; Offset < Len; + Offset += sizeof(SK_U32), Index ++) { + + Val32 = (SK_U32)IdTable[Index].Id; + SK_PNMI_STORE_U32(pBuf + Offset, Val32); + } + *pLen = Len; + break; + + case OID_SKGE_BOARDLEVEL: + Val32 = (SK_U32)pAC->GIni.GILevel; + SK_PNMI_STORE_U32(pBuf, Val32); + *pLen = sizeof(SK_U32); + break; + + case OID_SKGE_PORT_NUMBER: + Val32 = (SK_U32)pAC->GIni.GIMacsFound; + SK_PNMI_STORE_U32(pBuf, Val32); + *pLen = sizeof(SK_U32); + break; + + case OID_SKGE_DEVICE_TYPE: + Val32 = (SK_U32)pAC->Pnmi.DeviceType; + SK_PNMI_STORE_U32(pBuf, Val32); + *pLen = sizeof(SK_U32); + break; + + case OID_SKGE_DRIVER_DESCR: + if (pAC->Pnmi.pDriverDescription == NULL) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR007, + SK_PNMI_ERR007MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + Len = SK_STRLEN(pAC->Pnmi.pDriverDescription) + 1; + if (Len > SK_PNMI_STRINGLEN1) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR029, + SK_PNMI_ERR029MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + if (*pLen < Len) { + + *pLen = Len; + return (SK_PNMI_ERR_TOO_SHORT); + } + *pBuf = (char)(Len - 1); + SK_MEMCPY(pBuf + 1, pAC->Pnmi.pDriverDescription, Len - 1); + *pLen = Len; + break; + + case OID_SKGE_DRIVER_VERSION: + if (pAC->Pnmi.pDriverVersion == NULL) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR030, + SK_PNMI_ERR030MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + Len = SK_STRLEN(pAC->Pnmi.pDriverVersion) + 1; + if (Len > SK_PNMI_STRINGLEN1) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR031, + SK_PNMI_ERR031MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + if (*pLen < Len) { + + *pLen = Len; + return (SK_PNMI_ERR_TOO_SHORT); + } + *pBuf = (char)(Len - 1); + SK_MEMCPY(pBuf + 1, pAC->Pnmi.pDriverVersion, Len - 1); + *pLen = Len; + break; + + case OID_SKGE_DRIVER_RELDATE: + if (pAC->Pnmi.pDriverReleaseDate == NULL) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR030, + SK_PNMI_ERR053MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + Len = SK_STRLEN(pAC->Pnmi.pDriverReleaseDate) + 1; + if (Len > SK_PNMI_STRINGLEN1) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR031, + SK_PNMI_ERR054MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + if (*pLen < Len) { + + *pLen = Len; + return (SK_PNMI_ERR_TOO_SHORT); + } + *pBuf = (char)(Len - 1); + SK_MEMCPY(pBuf + 1, pAC->Pnmi.pDriverReleaseDate, Len - 1); + *pLen = Len; + break; + + case OID_SKGE_DRIVER_FILENAME: + if (pAC->Pnmi.pDriverFileName == NULL) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR030, + SK_PNMI_ERR055MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + Len = SK_STRLEN(pAC->Pnmi.pDriverFileName) + 1; + if (Len > SK_PNMI_STRINGLEN1) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR031, + SK_PNMI_ERR056MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + if (*pLen < Len) { + + *pLen = Len; + return (SK_PNMI_ERR_TOO_SHORT); + } + *pBuf = (char)(Len - 1); + SK_MEMCPY(pBuf + 1, pAC->Pnmi.pDriverFileName, Len - 1); + *pLen = Len; + break; + + case OID_SKGE_HW_DESCR: + /* + * The hardware description is located in the VPD. This + * query may move to the initialisation routine. But + * the VPD data is cached and therefore a call here + * will not make much difference. + */ + Len = 256; + if (VpdRead(pAC, IoC, VPD_NAME, Buf, (int *)&Len) > 0) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR032, + SK_PNMI_ERR032MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + Len ++; + if (Len > SK_PNMI_STRINGLEN1) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR033, + SK_PNMI_ERR033MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + if (*pLen < Len) { + + *pLen = Len; + return (SK_PNMI_ERR_TOO_SHORT); + } + *pBuf = (char)(Len - 1); + SK_MEMCPY(pBuf + 1, Buf, Len - 1); + *pLen = Len; + break; + + case OID_SKGE_HW_VERSION: + /* Oh, I love to do some string manipulation */ + if (*pLen < 5) { + + *pLen = 5; + return (SK_PNMI_ERR_TOO_SHORT); + } + Val8 = (SK_U8)pAC->GIni.GIPciHwRev; + pBuf[0] = 4; + pBuf[1] = 'v'; + pBuf[2] = (char)(0x30 | ((Val8 >> 4) & 0x0F)); + pBuf[3] = '.'; + pBuf[4] = (char)(0x30 | (Val8 & 0x0F)); + *pLen = 5; + break; + + case OID_SKGE_CHIPSET: + Val16 = pAC->Pnmi.Chipset; + SK_PNMI_STORE_U16(pBuf, Val16); + *pLen = sizeof(SK_U16); + break; + + case OID_SKGE_CHIPID: + Val32 = pAC->GIni.GIChipId; + SK_PNMI_STORE_U32(pBuf, Val32); + *pLen = sizeof(SK_U32); + break; + + case OID_SKGE_RAMSIZE: + Val32 = pAC->GIni.GIRamSize; + SK_PNMI_STORE_U32(pBuf, Val32); + *pLen = sizeof(SK_U32); + break; + + case OID_SKGE_VAUXAVAIL: + *pBuf = (char) pAC->GIni.GIVauxAvail; + *pLen = sizeof(char); + break; + + case OID_SKGE_BUS_TYPE: + *pBuf = (char) SK_PNMI_BUS_PCI; + *pLen = sizeof(char); + break; + + case OID_SKGE_BUS_SPEED: + *pBuf = pAC->Pnmi.PciBusSpeed; + *pLen = sizeof(char); + break; + + case OID_SKGE_BUS_WIDTH: + *pBuf = pAC->Pnmi.PciBusWidth; + *pLen = sizeof(char); + break; + + case OID_SKGE_RESULT: + Val32 = pAC->Pnmi.TestResult; + SK_PNMI_STORE_U32(pBuf, Val32); + *pLen = sizeof(SK_U32); + break; + + case OID_SKGE_SENSOR_NUMBER: + *pBuf = (char)pAC->I2c.MaxSens; + *pLen = sizeof(char); + break; + + case OID_SKGE_CHKSM_NUMBER: + *pBuf = SKCS_NUM_PROTOCOLS; + *pLen = sizeof(char); + break; + + case OID_SKGE_TRAP_NUMBER: + GetTrapQueueLen(pAC, &Len, &Val); + Val32 = (SK_U32)Val; + SK_PNMI_STORE_U32(pBuf, Val32); + *pLen = sizeof(SK_U32); + break; + + case OID_SKGE_TRAP: + GetTrapQueueLen(pAC, &Len, &Val); + if (*pLen < Len) { + + *pLen = Len; + return (SK_PNMI_ERR_TOO_SHORT); + } + CopyTrapQueue(pAC, pBuf); + *pLen = Len; + break; + + case OID_SKGE_RLMT_MONITOR_NUMBER: +/* XXX Not yet implemented by RLMT therefore we return zero elements */ + Val32 = 0; + SK_PNMI_STORE_U32(pBuf, Val32); + *pLen = sizeof(SK_U32); + break; + + case OID_SKGE_TX_SW_QUEUE_LEN: + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.BufPort[NetIndex].TxSwQueueLen; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.BufPort[0].TxSwQueueLen + + pAC->Pnmi.BufPort[1].TxSwQueueLen; + } + } + else { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.Port[NetIndex].TxSwQueueLen; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.Port[0].TxSwQueueLen + + pAC->Pnmi.Port[1].TxSwQueueLen; + } + } + SK_PNMI_STORE_U64(pBuf, Val64); + *pLen = sizeof(SK_U64); + break; + + + case OID_SKGE_TX_SW_QUEUE_MAX: + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.BufPort[NetIndex].TxSwQueueMax; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.BufPort[0].TxSwQueueMax + + pAC->Pnmi.BufPort[1].TxSwQueueMax; + } + } + else { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.Port[NetIndex].TxSwQueueMax; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.Port[0].TxSwQueueMax + + pAC->Pnmi.Port[1].TxSwQueueMax; + } + } + SK_PNMI_STORE_U64(pBuf, Val64); + *pLen = sizeof(SK_U64); + break; + + case OID_SKGE_TX_RETRY: + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.BufPort[NetIndex].TxRetryCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.BufPort[0].TxRetryCts + + pAC->Pnmi.BufPort[1].TxRetryCts; + } + } + else { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.Port[NetIndex].TxRetryCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.Port[0].TxRetryCts + + pAC->Pnmi.Port[1].TxRetryCts; + } + } + SK_PNMI_STORE_U64(pBuf, Val64); + *pLen = sizeof(SK_U64); + break; + + case OID_SKGE_RX_INTR_CTS: + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.BufPort[NetIndex].RxIntrCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.BufPort[0].RxIntrCts + + pAC->Pnmi.BufPort[1].RxIntrCts; + } + } + else { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.Port[NetIndex].RxIntrCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.Port[0].RxIntrCts + + pAC->Pnmi.Port[1].RxIntrCts; + } + } + SK_PNMI_STORE_U64(pBuf, Val64); + *pLen = sizeof(SK_U64); + break; + + case OID_SKGE_TX_INTR_CTS: + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.BufPort[NetIndex].TxIntrCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.BufPort[0].TxIntrCts + + pAC->Pnmi.BufPort[1].TxIntrCts; + } + } + else { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.Port[NetIndex].TxIntrCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.Port[0].TxIntrCts + + pAC->Pnmi.Port[1].TxIntrCts; + } + } + SK_PNMI_STORE_U64(pBuf, Val64); + *pLen = sizeof(SK_U64); + break; + + case OID_SKGE_RX_NO_BUF_CTS: + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.BufPort[NetIndex].RxNoBufCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.BufPort[0].RxNoBufCts + + pAC->Pnmi.BufPort[1].RxNoBufCts; + } + } + else { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.Port[NetIndex].RxNoBufCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.Port[0].RxNoBufCts + + pAC->Pnmi.Port[1].RxNoBufCts; + } + } + SK_PNMI_STORE_U64(pBuf, Val64); + *pLen = sizeof(SK_U64); + break; + + case OID_SKGE_TX_NO_BUF_CTS: + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.BufPort[NetIndex].TxNoBufCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.BufPort[0].TxNoBufCts + + pAC->Pnmi.BufPort[1].TxNoBufCts; + } + } + else { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.Port[NetIndex].TxNoBufCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.Port[0].TxNoBufCts + + pAC->Pnmi.Port[1].TxNoBufCts; + } + } + SK_PNMI_STORE_U64(pBuf, Val64); + *pLen = sizeof(SK_U64); + break; + + case OID_SKGE_TX_USED_DESCR_NO: + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.BufPort[NetIndex].TxUsedDescrNo; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.BufPort[0].TxUsedDescrNo + + pAC->Pnmi.BufPort[1].TxUsedDescrNo; + } + } + else { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.Port[NetIndex].TxUsedDescrNo; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.Port[0].TxUsedDescrNo + + pAC->Pnmi.Port[1].TxUsedDescrNo; + } + } + SK_PNMI_STORE_U64(pBuf, Val64); + *pLen = sizeof(SK_U64); + break; + + case OID_SKGE_RX_DELIVERED_CTS: + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.BufPort[NetIndex].RxDeliveredCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.BufPort[0].RxDeliveredCts + + pAC->Pnmi.BufPort[1].RxDeliveredCts; + } + } + else { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.Port[NetIndex].RxDeliveredCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.Port[0].RxDeliveredCts + + pAC->Pnmi.Port[1].RxDeliveredCts; + } + } + SK_PNMI_STORE_U64(pBuf, Val64); + *pLen = sizeof(SK_U64); + break; + + case OID_SKGE_RX_OCTETS_DELIV_CTS: + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.BufPort[NetIndex].RxOctetsDeliveredCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.BufPort[0].RxOctetsDeliveredCts + + pAC->Pnmi.BufPort[1].RxOctetsDeliveredCts; + } + } + else { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.Port[NetIndex].RxOctetsDeliveredCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.Port[0].RxOctetsDeliveredCts + + pAC->Pnmi.Port[1].RxOctetsDeliveredCts; + } + } + SK_PNMI_STORE_U64(pBuf, Val64); + *pLen = sizeof(SK_U64); + break; + + case OID_SKGE_RX_HW_ERROR_CTS: + SK_PNMI_STORE_U64(pBuf, Val64RxHwErrs); + *pLen = sizeof(SK_U64); + break; + + case OID_SKGE_TX_HW_ERROR_CTS: + SK_PNMI_STORE_U64(pBuf, Val64TxHwErrs); + *pLen = sizeof(SK_U64); + break; + + case OID_SKGE_IN_ERRORS_CTS: + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = Val64RxHwErrs + pAC->Pnmi.BufPort[NetIndex].RxNoBufCts; + } + /* Single net mode */ + else { + Val64 = Val64RxHwErrs + + pAC->Pnmi.BufPort[0].RxNoBufCts + + pAC->Pnmi.BufPort[1].RxNoBufCts; + } + } + else { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = Val64RxHwErrs + pAC->Pnmi.Port[NetIndex].RxNoBufCts; + } + /* Single net mode */ + else { + Val64 = Val64RxHwErrs + + pAC->Pnmi.Port[0].RxNoBufCts + + pAC->Pnmi.Port[1].RxNoBufCts; + } + } + SK_PNMI_STORE_U64(pBuf, Val64); + *pLen = sizeof(SK_U64); + break; + + case OID_SKGE_OUT_ERROR_CTS: + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = Val64TxHwErrs + pAC->Pnmi.BufPort[NetIndex].TxNoBufCts; + } + /* Single net mode */ + else { + Val64 = Val64TxHwErrs + + pAC->Pnmi.BufPort[0].TxNoBufCts + + pAC->Pnmi.BufPort[1].TxNoBufCts; + } + } + else { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = Val64TxHwErrs + pAC->Pnmi.Port[NetIndex].TxNoBufCts; + } + /* Single net mode */ + else { + Val64 = Val64TxHwErrs + + pAC->Pnmi.Port[0].TxNoBufCts + + pAC->Pnmi.Port[1].TxNoBufCts; + } + } + SK_PNMI_STORE_U64(pBuf, Val64); + *pLen = sizeof(SK_U64); + break; + + case OID_SKGE_ERR_RECOVERY_CTS: + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.BufPort[NetIndex].ErrRecoveryCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.BufPort[0].ErrRecoveryCts + + pAC->Pnmi.BufPort[1].ErrRecoveryCts; + } + } + else { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.Port[NetIndex].ErrRecoveryCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.Port[0].ErrRecoveryCts + + pAC->Pnmi.Port[1].ErrRecoveryCts; + } + } + SK_PNMI_STORE_U64(pBuf, Val64); + *pLen = sizeof(SK_U64); + break; + + case OID_SKGE_SYSUPTIME: + Val64 = SK_PNMI_HUNDREDS_SEC(SkOsGetTime(pAC)); + Val64 -= pAC->Pnmi.StartUpTime; + SK_PNMI_STORE_U64(pBuf, Val64); + *pLen = sizeof(SK_U64); + break; + + case OID_SKGE_MDB_VERSION: + Val32 = SK_PNMI_MDB_VERSION; + SK_PNMI_STORE_U32(pBuf, Val32); + *pLen = sizeof(SK_U32); + break; + + case OID_GEN_RCV_ERROR: + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + Val64 = Val64RxHwErrs + pAC->Pnmi.BufPort[NetIndex].RxNoBufCts; + } + else { + Val64 = Val64RxHwErrs + pAC->Pnmi.Port[NetIndex].RxNoBufCts; + } + + /* + * by default 32bit values are evaluated + */ + if (!Is64BitReq) { + Val32 = (SK_U32)Val64; + SK_PNMI_STORE_U32(pBuf, Val32); + *pLen = sizeof(SK_U32); + } + else { + SK_PNMI_STORE_U64(pBuf, Val64); + *pLen = sizeof(SK_U64); + } + break; + + case OID_GEN_XMIT_ERROR: + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + Val64 = Val64TxHwErrs + pAC->Pnmi.BufPort[NetIndex].TxNoBufCts; + } + else { + Val64 = Val64TxHwErrs + pAC->Pnmi.Port[NetIndex].TxNoBufCts; + } + + /* + * by default 32bit values are evaluated + */ + if (!Is64BitReq) { + Val32 = (SK_U32)Val64; + SK_PNMI_STORE_U32(pBuf, Val32); + *pLen = sizeof(SK_U32); + } + else { + SK_PNMI_STORE_U64(pBuf, Val64); + *pLen = sizeof(SK_U64); + } + break; + + case OID_GEN_RCV_NO_BUFFER: + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + Val64 = pAC->Pnmi.BufPort[NetIndex].RxNoBufCts; + } + else { + Val64 = pAC->Pnmi.Port[NetIndex].RxNoBufCts; + } + + /* + * by default 32bit values are evaluated + */ + if (!Is64BitReq) { + Val32 = (SK_U32)Val64; + SK_PNMI_STORE_U32(pBuf, Val32); + *pLen = sizeof(SK_U32); + } + else { + SK_PNMI_STORE_U64(pBuf, Val64); + *pLen = sizeof(SK_U64); + } + break; + + case OID_GEN_TRANSMIT_QUEUE_LENGTH: + Val32 = (SK_U32)pAC->Pnmi.Port[NetIndex].TxSwQueueLen; + SK_PNMI_STORE_U32(pBuf, Val32); + *pLen = sizeof(SK_U32); + break; + + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR034, + SK_PNMI_ERR034MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + if (Id == OID_SKGE_RX_HW_ERROR_CTS || + Id == OID_SKGE_TX_HW_ERROR_CTS || + Id == OID_SKGE_IN_ERRORS_CTS || + Id == OID_SKGE_OUT_ERROR_CTS || + Id == OID_GEN_XMIT_ERROR || + Id == OID_GEN_RCV_ERROR) { + + pAC->Pnmi.MacUpdatedFlag --; + } + + return (SK_PNMI_ERR_OK); +} + +/***************************************************************************** + * + * Rlmt - OID handler function of OID_SKGE_RLMT_XXX single instance. + * + * Description: + * Get/Presets/Sets the RLMT OIDs. + * + * Returns: + * SK_PNMI_ERR_OK The request was successfully performed. + * SK_PNMI_ERR_GENERAL A general severe internal error occured. + * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain + * the correct data (e.g. a 32bit value is + * needed, but a 16 bit value was passed). + * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid + * value range. + * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set. + * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't + * exist (e.g. port instance 3 on a two port + * adapter. + */ +PNMI_STATIC int Rlmt( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +int Action, /* GET/PRESET/SET action */ +SK_U32 Id, /* Object ID that is to be processed */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ +SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ +unsigned int TableIndex, /* Index to the Id table */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ +{ + int Ret; + unsigned int PhysPortIndex; + unsigned int PhysPortMax; + SK_EVPARA EventParam; + SK_U32 Val32; + SK_U64 Val64; + + + /* + * Check instance. Only single instance OIDs are allowed here. + */ + if (Instance != (SK_U32)(-1) && Instance != 1) { + + *pLen = 0; + return (SK_PNMI_ERR_UNKNOWN_INST); + } + + /* + * Perform the requested action. + */ + if (Action == SK_PNMI_GET) { + + /* + * Check if the buffer length is large enough. + */ + + switch (Id) { + + case OID_SKGE_RLMT_MODE: + case OID_SKGE_RLMT_PORT_ACTIVE: + case OID_SKGE_RLMT_PORT_PREFERRED: + if (*pLen < sizeof(SK_U8)) { + + *pLen = sizeof(SK_U8); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + case OID_SKGE_RLMT_PORT_NUMBER: + if (*pLen < sizeof(SK_U32)) { + + *pLen = sizeof(SK_U32); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + case OID_SKGE_RLMT_CHANGE_CTS: + case OID_SKGE_RLMT_CHANGE_TIME: + case OID_SKGE_RLMT_CHANGE_ESTIM: + case OID_SKGE_RLMT_CHANGE_THRES: + if (*pLen < sizeof(SK_U64)) { + + *pLen = sizeof(SK_U64); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR035, + SK_PNMI_ERR035MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + /* + * Update RLMT statistic and increment semaphores to indicate + * that an update was already done. Maybe RLMT will hold its + * statistic always up to date some time. Then we can + * remove this type of call. + */ + if ((Ret = RlmtUpdate(pAC, IoC, NetIndex)) != SK_PNMI_ERR_OK) { + + *pLen = 0; + return (Ret); + } + pAC->Pnmi.RlmtUpdatedFlag ++; + + /* + * Retrieve Value + */ + switch (Id) { + + case OID_SKGE_RLMT_MODE: + *pBuf = (char)pAC->Rlmt.Net[0].RlmtMode; + *pLen = sizeof(char); + break; + + case OID_SKGE_RLMT_PORT_NUMBER: + Val32 = (SK_U32)pAC->GIni.GIMacsFound; + SK_PNMI_STORE_U32(pBuf, Val32); + *pLen = sizeof(SK_U32); + break; + + case OID_SKGE_RLMT_PORT_ACTIVE: + *pBuf = 0; + /* + * If multiple ports may become active this OID + * doesn't make sense any more. A new variable in + * the port structure should be created. However, + * for this variable the first active port is + * returned. + */ + PhysPortMax = pAC->GIni.GIMacsFound; + + for (PhysPortIndex = 0; PhysPortIndex < PhysPortMax; + PhysPortIndex ++) { + + if (pAC->Pnmi.Port[PhysPortIndex].ActiveFlag) { + + *pBuf = (char)SK_PNMI_PORT_PHYS2LOG(PhysPortIndex); + break; + } + } + *pLen = sizeof(char); + break; + + case OID_SKGE_RLMT_PORT_PREFERRED: + *pBuf = (char)SK_PNMI_PORT_PHYS2LOG(pAC->Rlmt.Net[NetIndex].Preference); + *pLen = sizeof(char); + break; + + case OID_SKGE_RLMT_CHANGE_CTS: + Val64 = pAC->Pnmi.RlmtChangeCts; + SK_PNMI_STORE_U64(pBuf, Val64); + *pLen = sizeof(SK_U64); + break; + + case OID_SKGE_RLMT_CHANGE_TIME: + Val64 = pAC->Pnmi.RlmtChangeTime; + SK_PNMI_STORE_U64(pBuf, Val64); + *pLen = sizeof(SK_U64); + break; + + case OID_SKGE_RLMT_CHANGE_ESTIM: + Val64 = pAC->Pnmi.RlmtChangeEstimate.Estimate; + SK_PNMI_STORE_U64(pBuf, Val64); + *pLen = sizeof(SK_U64); + break; + + case OID_SKGE_RLMT_CHANGE_THRES: + Val64 = pAC->Pnmi.RlmtChangeThreshold; + SK_PNMI_STORE_U64(pBuf, Val64); + *pLen = sizeof(SK_U64); + break; + + default: + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR, + ("Rlmt: Unknown OID should be handled before")); + + pAC->Pnmi.RlmtUpdatedFlag --; + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + pAC->Pnmi.RlmtUpdatedFlag --; + } + else { + /* Perform a preset or set */ + switch (Id) { + + case OID_SKGE_RLMT_MODE: + /* Check if the buffer length is plausible */ + if (*pLen < sizeof(char)) { + + *pLen = sizeof(char); + return (SK_PNMI_ERR_TOO_SHORT); + } + /* Check if the value range is correct */ + if (*pLen != sizeof(char) || + (*pBuf & SK_PNMI_RLMT_MODE_CHK_LINK) == 0 || + *(SK_U8 *)pBuf > 15) { + + *pLen = 0; + return (SK_PNMI_ERR_BAD_VALUE); + } + /* The preset ends here */ + if (Action == SK_PNMI_PRESET) { + + *pLen = 0; + return (SK_PNMI_ERR_OK); + } + /* Send an event to RLMT to change the mode */ + SK_MEMSET((char *)&EventParam, 0, sizeof(EventParam)); + EventParam.Para32[0] |= (SK_U32)(*pBuf); + EventParam.Para32[1] = 0; + if (SkRlmtEvent(pAC, IoC, SK_RLMT_MODE_CHANGE, + EventParam) > 0) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR037, + SK_PNMI_ERR037MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + break; + + case OID_SKGE_RLMT_PORT_PREFERRED: + /* Check if the buffer length is plausible */ + if (*pLen < sizeof(char)) { + + *pLen = sizeof(char); + return (SK_PNMI_ERR_TOO_SHORT); + } + /* Check if the value range is correct */ + if (*pLen != sizeof(char) || *(SK_U8 *)pBuf > + (SK_U8)pAC->GIni.GIMacsFound) { + + *pLen = 0; + return (SK_PNMI_ERR_BAD_VALUE); + } + /* The preset ends here */ + if (Action == SK_PNMI_PRESET) { + + *pLen = 0; + return (SK_PNMI_ERR_OK); + } + + /* + * Send an event to RLMT change the preferred port. + * A param of -1 means automatic mode. RLMT will + * make the decision which is the preferred port. + */ + SK_MEMSET((char *)&EventParam, 0, sizeof(EventParam)); + EventParam.Para32[0] = (SK_U32)(*pBuf) - 1; + EventParam.Para32[1] = NetIndex; + if (SkRlmtEvent(pAC, IoC, SK_RLMT_PREFPORT_CHANGE, + EventParam) > 0) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR038, + SK_PNMI_ERR038MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + break; + + case OID_SKGE_RLMT_CHANGE_THRES: + /* Check if the buffer length is plausible */ + if (*pLen < sizeof(SK_U64)) { + + *pLen = sizeof(SK_U64); + return (SK_PNMI_ERR_TOO_SHORT); + } + /* + * There are not many restrictions to the + * value range. + */ + if (*pLen != sizeof(SK_U64)) { + + *pLen = 0; + return (SK_PNMI_ERR_BAD_VALUE); + } + /* A preset ends here */ + if (Action == SK_PNMI_PRESET) { + + *pLen = 0; + return (SK_PNMI_ERR_OK); + } + /* + * Store the new threshold, which will be taken + * on the next timer event. + */ + SK_PNMI_READ_U64(pBuf, Val64); + pAC->Pnmi.RlmtChangeThreshold = Val64; + break; + + default: + /* The other OIDs are not be able for set */ + *pLen = 0; + return (SK_PNMI_ERR_READ_ONLY); + } + } + + return (SK_PNMI_ERR_OK); +} + +/***************************************************************************** + * + * RlmtStat - OID handler function of OID_SKGE_RLMT_XXX multiple instance. + * + * Description: + * Performs get requests on multiple instance variables. + * + * Returns: + * SK_PNMI_ERR_OK The request was successfully performed. + * SK_PNMI_ERR_GENERAL A general severe internal error occured. + * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain + * the correct data (e.g. a 32bit value is + * needed, but a 16 bit value was passed). + * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't + * exist (e.g. port instance 3 on a two port + * adapter. + */ +PNMI_STATIC int RlmtStat( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +int Action, /* GET/PRESET/SET action */ +SK_U32 Id, /* Object ID that is to be processed */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ +SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ +unsigned int TableIndex, /* Index to the Id table */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ +{ + unsigned int PhysPortMax; + unsigned int PhysPortIndex; + unsigned int Limit; + unsigned int Offset; + int Ret; + SK_U32 Val32; + SK_U64 Val64; + + /* + * Calculate the port indexes from the instance. + */ + PhysPortMax = pAC->GIni.GIMacsFound; + + if ((Instance != (SK_U32)(-1))) { + /* Check instance range */ + if ((Instance < 1) || (Instance > PhysPortMax)) { + + *pLen = 0; + return (SK_PNMI_ERR_UNKNOWN_INST); + } + + /* Single net mode */ + PhysPortIndex = Instance - 1; + + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + PhysPortIndex = NetIndex; + } + + /* Both net modes */ + Limit = PhysPortIndex + 1; + } + else { + /* Single net mode */ + PhysPortIndex = 0; + Limit = PhysPortMax; + + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + PhysPortIndex = NetIndex; + Limit = PhysPortIndex + 1; + } + } + + /* + * Currently only get requests are allowed. + */ + if (Action != SK_PNMI_GET) { + + *pLen = 0; + return (SK_PNMI_ERR_READ_ONLY); + } + + /* + * Check if the buffer length is large enough. + */ + switch (Id) { + + case OID_SKGE_RLMT_PORT_INDEX: + case OID_SKGE_RLMT_STATUS: + if (*pLen < (Limit - PhysPortIndex) * sizeof(SK_U32)) { + + *pLen = (Limit - PhysPortIndex) * sizeof(SK_U32); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + case OID_SKGE_RLMT_TX_HELLO_CTS: + case OID_SKGE_RLMT_RX_HELLO_CTS: + case OID_SKGE_RLMT_TX_SP_REQ_CTS: + case OID_SKGE_RLMT_RX_SP_CTS: + if (*pLen < (Limit - PhysPortIndex) * sizeof(SK_U64)) { + + *pLen = (Limit - PhysPortIndex) * sizeof(SK_U64); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR039, + SK_PNMI_ERR039MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + + } + + /* + * Update statistic and increment semaphores to indicate that + * an update was already done. + */ + if ((Ret = RlmtUpdate(pAC, IoC, NetIndex)) != SK_PNMI_ERR_OK) { + + *pLen = 0; + return (Ret); + } + pAC->Pnmi.RlmtUpdatedFlag ++; + + /* + * Get value + */ + Offset = 0; + for (; PhysPortIndex < Limit; PhysPortIndex ++) { + + switch (Id) { + + case OID_SKGE_RLMT_PORT_INDEX: + Val32 = PhysPortIndex; + SK_PNMI_STORE_U32(pBuf + Offset, Val32); + Offset += sizeof(SK_U32); + break; + + case OID_SKGE_RLMT_STATUS: + if (pAC->Rlmt.Port[PhysPortIndex].PortState == + SK_RLMT_PS_INIT || + pAC->Rlmt.Port[PhysPortIndex].PortState == + SK_RLMT_PS_DOWN) { + + Val32 = SK_PNMI_RLMT_STATUS_ERROR; + } + else if (pAC->Pnmi.Port[PhysPortIndex].ActiveFlag) { + + Val32 = SK_PNMI_RLMT_STATUS_ACTIVE; + } + else { + Val32 = SK_PNMI_RLMT_STATUS_STANDBY; + } + SK_PNMI_STORE_U32(pBuf + Offset, Val32); + Offset += sizeof(SK_U32); + break; + + case OID_SKGE_RLMT_TX_HELLO_CTS: + Val64 = pAC->Rlmt.Port[PhysPortIndex].TxHelloCts; + SK_PNMI_STORE_U64(pBuf + Offset, Val64); + Offset += sizeof(SK_U64); + break; + + case OID_SKGE_RLMT_RX_HELLO_CTS: + Val64 = pAC->Rlmt.Port[PhysPortIndex].RxHelloCts; + SK_PNMI_STORE_U64(pBuf + Offset, Val64); + Offset += sizeof(SK_U64); + break; + + case OID_SKGE_RLMT_TX_SP_REQ_CTS: + Val64 = pAC->Rlmt.Port[PhysPortIndex].TxSpHelloReqCts; + SK_PNMI_STORE_U64(pBuf + Offset, Val64); + Offset += sizeof(SK_U64); + break; + + case OID_SKGE_RLMT_RX_SP_CTS: + Val64 = pAC->Rlmt.Port[PhysPortIndex].RxSpHelloCts; + SK_PNMI_STORE_U64(pBuf + Offset, Val64); + Offset += sizeof(SK_U64); + break; + + default: + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR, + ("RlmtStat: Unknown OID should be errored before")); + + pAC->Pnmi.RlmtUpdatedFlag --; + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + } + *pLen = Offset; + + pAC->Pnmi.RlmtUpdatedFlag --; + + return (SK_PNMI_ERR_OK); +} + +/***************************************************************************** + * + * MacPrivateConf - OID handler function of OIDs concerning the configuration + * + * Description: + * Get/Presets/Sets the OIDs concerning the configuration. + * + * Returns: + * SK_PNMI_ERR_OK The request was successfully performed. + * SK_PNMI_ERR_GENERAL A general severe internal error occured. + * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain + * the correct data (e.g. a 32bit value is + * needed, but a 16 bit value was passed). + * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid + * value range. + * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set. + * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't + * exist (e.g. port instance 3 on a two port + * adapter. + */ +PNMI_STATIC int MacPrivateConf( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +int Action, /* GET/PRESET/SET action */ +SK_U32 Id, /* Object ID that is to be processed */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ +SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ +unsigned int TableIndex, /* Index to the Id table */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ +{ + unsigned int PhysPortMax; + unsigned int PhysPortIndex; + unsigned int LogPortMax; + unsigned int LogPortIndex; + unsigned int Limit; + unsigned int Offset; + char Val8; + char *pBufPtr; + int Ret; + SK_EVPARA EventParam; + SK_U32 Val32; + + /* + * Calculate instance if wished. MAC index 0 is the virtual MAC. + */ + PhysPortMax = pAC->GIni.GIMacsFound; + LogPortMax = SK_PNMI_PORT_PHYS2LOG(PhysPortMax); + + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { /* Dual net mode */ + LogPortMax--; + } + + if ((Instance != (SK_U32)(-1))) { /* Only one specific instance is queried */ + /* Check instance range */ + if ((Instance < 1) || (Instance > LogPortMax)) { + + *pLen = 0; + return (SK_PNMI_ERR_UNKNOWN_INST); + } + LogPortIndex = SK_PNMI_PORT_INST2LOG(Instance); + Limit = LogPortIndex + 1; + } + + else { /* Instance == (SK_U32)(-1), get all Instances of that OID */ + + LogPortIndex = 0; + Limit = LogPortMax; + } + + /* + * Perform action + */ + if (Action == SK_PNMI_GET) { + + /* Check length */ + switch (Id) { + + case OID_SKGE_PMD: + case OID_SKGE_CONNECTOR: + case OID_SKGE_LINK_CAP: + case OID_SKGE_LINK_MODE: + case OID_SKGE_LINK_MODE_STATUS: + case OID_SKGE_LINK_STATUS: + case OID_SKGE_FLOWCTRL_CAP: + case OID_SKGE_FLOWCTRL_MODE: + case OID_SKGE_FLOWCTRL_STATUS: + case OID_SKGE_PHY_OPERATION_CAP: + case OID_SKGE_PHY_OPERATION_MODE: + case OID_SKGE_PHY_OPERATION_STATUS: + case OID_SKGE_SPEED_CAP: + case OID_SKGE_SPEED_MODE: + case OID_SKGE_SPEED_STATUS: + if (*pLen < (Limit - LogPortIndex) * sizeof(SK_U8)) { + + *pLen = (Limit - LogPortIndex) * sizeof(SK_U8); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + case OID_SKGE_MTU: + case OID_SKGE_PHY_TYPE: + if (*pLen < (Limit - LogPortIndex) * sizeof(SK_U32)) { + + *pLen = (Limit - LogPortIndex) * sizeof(SK_U32); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR041, + SK_PNMI_ERR041MSG); + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + /* + * Update statistic and increment semaphore to indicate + * that an update was already done. + */ + if ((Ret = SirqUpdate(pAC, IoC)) != SK_PNMI_ERR_OK) { + + *pLen = 0; + return (Ret); + } + pAC->Pnmi.SirqUpdatedFlag ++; + + /* + * Get value + */ + Offset = 0; + for (; LogPortIndex < Limit; LogPortIndex ++) { + + pBufPtr = pBuf + Offset; + + switch (Id) { + + case OID_SKGE_PMD: + *pBufPtr = pAC->Pnmi.PMD; + Offset += sizeof(char); + break; + + case OID_SKGE_CONNECTOR: + *pBufPtr = pAC->Pnmi.Connector; + Offset += sizeof(char); + break; + + case OID_SKGE_PHY_TYPE: + if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ + if (LogPortIndex == 0) { + continue; + } + else { + /* Get value for physical ports */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + Val32 = pAC->GIni.GP[PhysPortIndex].PhyType; + SK_PNMI_STORE_U32(pBufPtr, Val32); + } + } + else { /* DualNetMode */ + + Val32 = pAC->GIni.GP[NetIndex].PhyType; + SK_PNMI_STORE_U32(pBufPtr, Val32); + } + Offset += sizeof(SK_U32); + break; + + case OID_SKGE_LINK_CAP: + if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ + if (LogPortIndex == 0) { + /* Get value for virtual port */ + VirtualConf(pAC, IoC, Id, pBufPtr); + } + else { + /* Get value for physical ports */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + + *pBufPtr = pAC->GIni.GP[PhysPortIndex].PLinkCap; + } + } + else { /* DualNetMode */ + + *pBufPtr = pAC->GIni.GP[NetIndex].PLinkCap; + } + Offset += sizeof(char); + break; + + case OID_SKGE_LINK_MODE: + if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ + if (LogPortIndex == 0) { + /* Get value for virtual port */ + VirtualConf(pAC, IoC, Id, pBufPtr); + } + else { + /* Get value for physical ports */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + + *pBufPtr = pAC->GIni.GP[PhysPortIndex].PLinkModeConf; + } + } + else { /* DualNetMode */ + + *pBufPtr = pAC->GIni.GP[NetIndex].PLinkModeConf; + } + Offset += sizeof(char); + break; + + case OID_SKGE_LINK_MODE_STATUS: + if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ + if (LogPortIndex == 0) { + /* Get value for virtual port */ + VirtualConf(pAC, IoC, Id, pBufPtr); + } + else { + /* Get value for physical port */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + + *pBufPtr = + CalculateLinkModeStatus(pAC, IoC, PhysPortIndex); + } + } + else { /* DualNetMode */ + + *pBufPtr = CalculateLinkModeStatus(pAC, IoC, NetIndex); + } + Offset += sizeof(char); + break; + + case OID_SKGE_LINK_STATUS: + if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ + if (LogPortIndex == 0) { + /* Get value for virtual port */ + VirtualConf(pAC, IoC, Id, pBufPtr); + } + else { + /* Get value for physical ports */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + + *pBufPtr = CalculateLinkStatus(pAC, IoC, PhysPortIndex); + } + } + else { /* DualNetMode */ + + *pBufPtr = CalculateLinkStatus(pAC, IoC, NetIndex); + } + Offset += sizeof(char); + break; + + case OID_SKGE_FLOWCTRL_CAP: + if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ + if (LogPortIndex == 0) { + /* Get value for virtual port */ + VirtualConf(pAC, IoC, Id, pBufPtr); + } + else { + /* Get value for physical ports */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + + *pBufPtr = pAC->GIni.GP[PhysPortIndex].PFlowCtrlCap; + } + } + else { /* DualNetMode */ + + *pBufPtr = pAC->GIni.GP[NetIndex].PFlowCtrlCap; + } + Offset += sizeof(char); + break; + + case OID_SKGE_FLOWCTRL_MODE: + if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ + if (LogPortIndex == 0) { + /* Get value for virtual port */ + VirtualConf(pAC, IoC, Id, pBufPtr); + } + else { + /* Get value for physical port */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + + *pBufPtr = pAC->GIni.GP[PhysPortIndex].PFlowCtrlMode; + } + } + else { /* DualNetMode */ + + *pBufPtr = pAC->GIni.GP[NetIndex].PFlowCtrlMode; + } + Offset += sizeof(char); + break; + + case OID_SKGE_FLOWCTRL_STATUS: + if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ + if (LogPortIndex == 0) { + /* Get value for virtual port */ + VirtualConf(pAC, IoC, Id, pBufPtr); + } + else { + /* Get value for physical port */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + + *pBufPtr = pAC->GIni.GP[PhysPortIndex].PFlowCtrlStatus; + } + } + else { /* DualNetMode */ + + *pBufPtr = pAC->GIni.GP[NetIndex].PFlowCtrlStatus; + } + Offset += sizeof(char); + break; + + case OID_SKGE_PHY_OPERATION_CAP: + if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ + if (LogPortIndex == 0) { + /* Get value for virtual port */ + VirtualConf(pAC, IoC, Id, pBufPtr); + } + else { + /* Get value for physical ports */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + + *pBufPtr = pAC->GIni.GP[PhysPortIndex].PMSCap; + } + } + else { /* DualNetMode */ + + *pBufPtr = pAC->GIni.GP[NetIndex].PMSCap; + } + Offset += sizeof(char); + break; + + case OID_SKGE_PHY_OPERATION_MODE: + if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ + if (LogPortIndex == 0) { + /* Get value for virtual port */ + VirtualConf(pAC, IoC, Id, pBufPtr); + } + else { + /* Get value for physical port */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + + *pBufPtr = pAC->GIni.GP[PhysPortIndex].PMSMode; + } + } + else { /* DualNetMode */ + + *pBufPtr = pAC->GIni.GP[NetIndex].PMSMode; + } + Offset += sizeof(char); + break; + + case OID_SKGE_PHY_OPERATION_STATUS: + if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ + if (LogPortIndex == 0) { + /* Get value for virtual port */ + VirtualConf(pAC, IoC, Id, pBufPtr); + } + else { + /* Get value for physical port */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + + *pBufPtr = pAC->GIni.GP[PhysPortIndex].PMSStatus; + } + } + else { + + *pBufPtr = pAC->GIni.GP[NetIndex].PMSStatus; + } + Offset += sizeof(char); + break; + + case OID_SKGE_SPEED_CAP: + if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ + if (LogPortIndex == 0) { + /* Get value for virtual port */ + VirtualConf(pAC, IoC, Id, pBufPtr); + } + else { + /* Get value for physical ports */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + + *pBufPtr = pAC->GIni.GP[PhysPortIndex].PLinkSpeedCap; + } + } + else { /* DualNetMode */ + + *pBufPtr = pAC->GIni.GP[NetIndex].PLinkSpeedCap; + } + Offset += sizeof(char); + break; + + case OID_SKGE_SPEED_MODE: + if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ + if (LogPortIndex == 0) { + /* Get value for virtual port */ + VirtualConf(pAC, IoC, Id, pBufPtr); + } + else { + /* Get value for physical port */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + + *pBufPtr = pAC->GIni.GP[PhysPortIndex].PLinkSpeed; + } + } + else { /* DualNetMode */ + + *pBufPtr = pAC->GIni.GP[NetIndex].PLinkSpeed; + } + Offset += sizeof(char); + break; + + case OID_SKGE_SPEED_STATUS: + if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ + if (LogPortIndex == 0) { + /* Get value for virtual port */ + VirtualConf(pAC, IoC, Id, pBufPtr); + } + else { + /* Get value for physical port */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + + *pBufPtr = pAC->GIni.GP[PhysPortIndex].PLinkSpeedUsed; + } + } + else { /* DualNetMode */ + + *pBufPtr = pAC->GIni.GP[NetIndex].PLinkSpeedUsed; + } + Offset += sizeof(char); + break; + + case OID_SKGE_MTU: + Val32 = SK_DRIVER_GET_MTU(pAC, IoC, NetIndex); + SK_PNMI_STORE_U32(pBufPtr, Val32); + Offset += sizeof(SK_U32); + break; + + default: + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR, + ("MacPrivateConf: Unknown OID should be handled before")); + + pAC->Pnmi.SirqUpdatedFlag --; + return (SK_PNMI_ERR_GENERAL); + } + } + *pLen = Offset; + pAC->Pnmi.SirqUpdatedFlag --; + + return (SK_PNMI_ERR_OK); + } + + /* + * From here SET or PRESET action. Check if the passed + * buffer length is plausible. + */ + switch (Id) { + + case OID_SKGE_LINK_MODE: + case OID_SKGE_FLOWCTRL_MODE: + case OID_SKGE_PHY_OPERATION_MODE: + case OID_SKGE_SPEED_MODE: + if (*pLen < Limit - LogPortIndex) { + + *pLen = Limit - LogPortIndex; + return (SK_PNMI_ERR_TOO_SHORT); + } + if (*pLen != Limit - LogPortIndex) { + + *pLen = 0; + return (SK_PNMI_ERR_BAD_VALUE); + } + break; + + case OID_SKGE_MTU: + if (*pLen < sizeof(SK_U32)) { + + *pLen = sizeof(SK_U32); + return (SK_PNMI_ERR_TOO_SHORT); + } + if (*pLen != sizeof(SK_U32)) { + + *pLen = 0; + return (SK_PNMI_ERR_BAD_VALUE); + } + break; + + default: + *pLen = 0; + return (SK_PNMI_ERR_READ_ONLY); + } + + /* + * Perform preset or set + */ + Offset = 0; + for (; LogPortIndex < Limit; LogPortIndex ++) { + + switch (Id) { + + case OID_SKGE_LINK_MODE: + /* Check the value range */ + Val8 = *(pBuf + Offset); + if (Val8 == 0) { + + Offset += sizeof(char); + break; + } + if (Val8 < SK_LMODE_HALF || + (LogPortIndex != 0 && Val8 > SK_LMODE_AUTOSENSE) || + (LogPortIndex == 0 && Val8 > SK_LMODE_INDETERMINATED)) { + + *pLen = 0; + return (SK_PNMI_ERR_BAD_VALUE); + } + + /* The preset ends here */ + if (Action == SK_PNMI_PRESET) { + + return (SK_PNMI_ERR_OK); + } + + if (LogPortIndex == 0) { + + /* + * The virtual port consists of all currently + * active ports. Find them and send an event + * with the new link mode to SIRQ. + */ + for (PhysPortIndex = 0; + PhysPortIndex < PhysPortMax; + PhysPortIndex ++) { + + if (!pAC->Pnmi.Port[PhysPortIndex]. + ActiveFlag) { + + continue; + } + + EventParam.Para32[0] = PhysPortIndex; + EventParam.Para32[1] = (SK_U32)Val8; + if (SkGeSirqEvent(pAC, IoC, + SK_HWEV_SET_LMODE, + EventParam) > 0) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, + SK_PNMI_ERR043, + SK_PNMI_ERR043MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + } + } + else { + /* + * Send an event with the new link mode to + * the SIRQ module. + */ + EventParam.Para32[0] = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + EventParam.Para32[1] = (SK_U32)Val8; + if (SkGeSirqEvent(pAC, IoC, SK_HWEV_SET_LMODE, + EventParam) > 0) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, + SK_PNMI_ERR043, + SK_PNMI_ERR043MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + } + Offset += sizeof(char); + break; + + case OID_SKGE_FLOWCTRL_MODE: + /* Check the value range */ + Val8 = *(pBuf + Offset); + if (Val8 == 0) { + + Offset += sizeof(char); + break; + } + if (Val8 < SK_FLOW_MODE_NONE || + (LogPortIndex != 0 && Val8 > SK_FLOW_MODE_SYM_OR_REM) || + (LogPortIndex == 0 && Val8 > SK_FLOW_MODE_INDETERMINATED)) { + + *pLen = 0; + return (SK_PNMI_ERR_BAD_VALUE); + } + + /* The preset ends here */ + if (Action == SK_PNMI_PRESET) { + + return (SK_PNMI_ERR_OK); + } + + if (LogPortIndex == 0) { + + /* + * The virtual port consists of all currently + * active ports. Find them and send an event + * with the new flow control mode to SIRQ. + */ + for (PhysPortIndex = 0; + PhysPortIndex < PhysPortMax; + PhysPortIndex ++) { + + if (!pAC->Pnmi.Port[PhysPortIndex]. + ActiveFlag) { + + continue; + } + + EventParam.Para32[0] = PhysPortIndex; + EventParam.Para32[1] = (SK_U32)Val8; + if (SkGeSirqEvent(pAC, IoC, + SK_HWEV_SET_FLOWMODE, + EventParam) > 0) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, + SK_PNMI_ERR044, + SK_PNMI_ERR044MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + } + } + else { + /* + * Send an event with the new flow control + * mode to the SIRQ module. + */ + EventParam.Para32[0] = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + EventParam.Para32[1] = (SK_U32)Val8; + if (SkGeSirqEvent(pAC, IoC, + SK_HWEV_SET_FLOWMODE, EventParam) + > 0) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, + SK_PNMI_ERR044, + SK_PNMI_ERR044MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + } + Offset += sizeof(char); + break; + + case OID_SKGE_PHY_OPERATION_MODE : + /* Check the value range */ + Val8 = *(pBuf + Offset); + if (Val8 == 0) { + /* mode of this port remains unchanged */ + Offset += sizeof(char); + break; + } + if (Val8 < SK_MS_MODE_AUTO || + (LogPortIndex != 0 && Val8 > SK_MS_MODE_SLAVE) || + (LogPortIndex == 0 && Val8 > SK_MS_MODE_INDETERMINATED)) { + + *pLen = 0; + return (SK_PNMI_ERR_BAD_VALUE); + } + + /* The preset ends here */ + if (Action == SK_PNMI_PRESET) { + + return (SK_PNMI_ERR_OK); + } + + if (LogPortIndex == 0) { + + /* + * The virtual port consists of all currently + * active ports. Find them and send an event + * with new master/slave (role) mode to SIRQ. + */ + for (PhysPortIndex = 0; + PhysPortIndex < PhysPortMax; + PhysPortIndex ++) { + + if (!pAC->Pnmi.Port[PhysPortIndex]. + ActiveFlag) { + + continue; + } + + EventParam.Para32[0] = PhysPortIndex; + EventParam.Para32[1] = (SK_U32)Val8; + if (SkGeSirqEvent(pAC, IoC, + SK_HWEV_SET_ROLE, + EventParam) > 0) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, + SK_PNMI_ERR042, + SK_PNMI_ERR042MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + } + } + else { + /* + * Send an event with the new master/slave + * (role) mode to the SIRQ module. + */ + EventParam.Para32[0] = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + EventParam.Para32[1] = (SK_U32)Val8; + if (SkGeSirqEvent(pAC, IoC, + SK_HWEV_SET_ROLE, EventParam) > 0) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, + SK_PNMI_ERR042, + SK_PNMI_ERR042MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + } + + Offset += sizeof(char); + break; + + case OID_SKGE_SPEED_MODE: + /* Check the value range */ + Val8 = *(pBuf + Offset); + if (Val8 == 0) { + + Offset += sizeof(char); + break; + } + if (Val8 < (SK_LSPEED_AUTO) || + (LogPortIndex != 0 && Val8 > (SK_LSPEED_1000MBPS)) || + (LogPortIndex == 0 && Val8 > (SK_LSPEED_INDETERMINATED))) { + + *pLen = 0; + return (SK_PNMI_ERR_BAD_VALUE); + } + + /* The preset ends here */ + if (Action == SK_PNMI_PRESET) { + + return (SK_PNMI_ERR_OK); + } + + if (LogPortIndex == 0) { + + /* + * The virtual port consists of all currently + * active ports. Find them and send an event + * with the new flow control mode to SIRQ. + */ + for (PhysPortIndex = 0; + PhysPortIndex < PhysPortMax; + PhysPortIndex ++) { + + if (!pAC->Pnmi.Port[PhysPortIndex].ActiveFlag) { + + continue; + } + + EventParam.Para32[0] = PhysPortIndex; + EventParam.Para32[1] = (SK_U32)Val8; + if (SkGeSirqEvent(pAC, IoC, + SK_HWEV_SET_SPEED, + EventParam) > 0) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, + SK_PNMI_ERR045, + SK_PNMI_ERR045MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + } + } + else { + /* + * Send an event with the new flow control + * mode to the SIRQ module. + */ + EventParam.Para32[0] = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + EventParam.Para32[1] = (SK_U32)Val8; + if (SkGeSirqEvent(pAC, IoC, + SK_HWEV_SET_SPEED, + EventParam) > 0) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, + SK_PNMI_ERR045, + SK_PNMI_ERR045MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + } + Offset += sizeof(char); + break; + + case OID_SKGE_MTU : + /* Check the value range */ + Val32 = *(SK_U32*)(pBuf + Offset); + if (Val32 == 0) { + /* mtu of this port remains unchanged */ + Offset += sizeof(SK_U32); + break; + } + if (SK_DRIVER_PRESET_MTU(pAC, IoC, NetIndex, Val32) != 0) { + *pLen = 0; + return (SK_PNMI_ERR_BAD_VALUE); + } + + /* The preset ends here */ + if (Action == SK_PNMI_PRESET) { + return (SK_PNMI_ERR_OK); + } + + if (SK_DRIVER_SET_MTU(pAC, IoC, NetIndex, Val32) != 0) { + return (SK_PNMI_ERR_GENERAL); + } + + Offset += sizeof(SK_U32); + break; + + default: + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR, + ("MacPrivateConf: Unknown OID should be handled before set")); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + } + + return (SK_PNMI_ERR_OK); +} + +/***************************************************************************** + * + * Monitor - OID handler function for RLMT_MONITOR_XXX + * + * Description: + * Because RLMT currently does not support the monitoring of + * remote adapter cards, we return always an empty table. + * + * Returns: + * SK_PNMI_ERR_OK The request was successfully performed. + * SK_PNMI_ERR_GENERAL A general severe internal error occured. + * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain + * the correct data (e.g. a 32bit value is + * needed, but a 16 bit value was passed). + * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid + * value range. + * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set. + * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't + * exist (e.g. port instance 3 on a two port + * adapter. + */ +PNMI_STATIC int Monitor( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +int Action, /* GET/PRESET/SET action */ +SK_U32 Id, /* Object ID that is to be processed */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ +SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ +unsigned int TableIndex, /* Index to the Id table */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ +{ + unsigned int Index; + unsigned int Limit; + unsigned int Offset; + unsigned int Entries; + + + /* + * Calculate instance if wished. + */ + /* XXX Not yet implemented. Return always an empty table. */ + Entries = 0; + + if ((Instance != (SK_U32)(-1))) { + + if ((Instance < 1) || (Instance > Entries)) { + + *pLen = 0; + return (SK_PNMI_ERR_UNKNOWN_INST); + } + + Index = (unsigned int)Instance - 1; + Limit = (unsigned int)Instance; + } + else { + Index = 0; + Limit = Entries; + } + + /* + * Get/Set value + */ + if (Action == SK_PNMI_GET) { + + for (Offset=0; Index < Limit; Index ++) { + + switch (Id) { + + case OID_SKGE_RLMT_MONITOR_INDEX: + case OID_SKGE_RLMT_MONITOR_ADDR: + case OID_SKGE_RLMT_MONITOR_ERRS: + case OID_SKGE_RLMT_MONITOR_TIMESTAMP: + case OID_SKGE_RLMT_MONITOR_ADMIN: + break; + + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR046, + SK_PNMI_ERR046MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + } + *pLen = Offset; + } + else { + /* Only MONITOR_ADMIN can be set */ + if (Id != OID_SKGE_RLMT_MONITOR_ADMIN) { + + *pLen = 0; + return (SK_PNMI_ERR_READ_ONLY); + } + + /* Check if the length is plausible */ + if (*pLen < (Limit - Index)) { + + return (SK_PNMI_ERR_TOO_SHORT); + } + /* Okay, we have a wide value range */ + if (*pLen != (Limit - Index)) { + + *pLen = 0; + return (SK_PNMI_ERR_BAD_VALUE); + } +/* + for (Offset=0; Index < Limit; Index ++) { + } +*/ +/* + * XXX Not yet implemented. Return always BAD_VALUE, because the table + * is empty. + */ + *pLen = 0; + return (SK_PNMI_ERR_BAD_VALUE); + } + + return (SK_PNMI_ERR_OK); +} + +/***************************************************************************** + * + * VirtualConf - Calculates the values of configuration OIDs for virtual port + * + * Description: + * We handle here the get of the configuration group OIDs, which are + * a little bit complicated. The virtual port consists of all currently + * active physical ports. If multiple ports are active and configured + * differently we get in some trouble to return a single value. So we + * get the value of the first active port and compare it with that of + * the other active ports. If they are not the same, we return a value + * that indicates that the state is indeterminated. + * + * Returns: + * Nothing + */ +PNMI_STATIC void VirtualConf( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +SK_U32 Id, /* Object ID that is to be processed */ +char *pBuf) /* Buffer used for the management data transfer */ +{ + unsigned int PhysPortMax; + unsigned int PhysPortIndex; + SK_U8 Val8; + SK_U32 Val32; + SK_BOOL PortActiveFlag; + SK_GEPORT *pPrt; + + *pBuf = 0; + PortActiveFlag = SK_FALSE; + PhysPortMax = pAC->GIni.GIMacsFound; + + for (PhysPortIndex = 0; PhysPortIndex < PhysPortMax; + PhysPortIndex ++) { + + pPrt = &pAC->GIni.GP[PhysPortIndex]; + + /* Check if the physical port is active */ + if (!pAC->Pnmi.Port[PhysPortIndex].ActiveFlag) { + + continue; + } + + PortActiveFlag = SK_TRUE; + + switch (Id) { + + case OID_SKGE_PHY_TYPE: + /* Check if it is the first active port */ + if (*pBuf == 0) { + Val32 = pPrt->PhyType; + SK_PNMI_STORE_U32(pBuf, Val32); + continue; + } + + case OID_SKGE_LINK_CAP: + + /* + * Different capabilities should not happen, but + * in the case of the cases OR them all together. + * From a curious point of view the virtual port + * is capable of all found capabilities. + */ + *pBuf |= pPrt->PLinkCap; + break; + + case OID_SKGE_LINK_MODE: + /* Check if it is the first active port */ + if (*pBuf == 0) { + + *pBuf = pPrt->PLinkModeConf; + continue; + } + + /* + * If we find an active port with a different link + * mode than the first one we return a value that + * indicates that the link mode is indeterminated. + */ + if (*pBuf != pPrt->PLinkModeConf) { + + *pBuf = SK_LMODE_INDETERMINATED; + } + break; + + case OID_SKGE_LINK_MODE_STATUS: + /* Get the link mode of the physical port */ + Val8 = CalculateLinkModeStatus(pAC, IoC, PhysPortIndex); + + /* Check if it is the first active port */ + if (*pBuf == 0) { + + *pBuf = Val8; + continue; + } + + /* + * If we find an active port with a different link + * mode status than the first one we return a value + * that indicates that the link mode status is + * indeterminated. + */ + if (*pBuf != Val8) { + + *pBuf = SK_LMODE_STAT_INDETERMINATED; + } + break; + + case OID_SKGE_LINK_STATUS: + /* Get the link status of the physical port */ + Val8 = CalculateLinkStatus(pAC, IoC, PhysPortIndex); + + /* Check if it is the first active port */ + if (*pBuf == 0) { + + *pBuf = Val8; + continue; + } + + /* + * If we find an active port with a different link + * status than the first one, we return a value + * that indicates that the link status is + * indeterminated. + */ + if (*pBuf != Val8) { + + *pBuf = SK_PNMI_RLMT_LSTAT_INDETERMINATED; + } + break; + + case OID_SKGE_FLOWCTRL_CAP: + /* Check if it is the first active port */ + if (*pBuf == 0) { + + *pBuf = pPrt->PFlowCtrlCap; + continue; + } + + /* + * From a curious point of view the virtual port + * is capable of all found capabilities. + */ + *pBuf |= pPrt->PFlowCtrlCap; + break; + + case OID_SKGE_FLOWCTRL_MODE: + /* Check if it is the first active port */ + if (*pBuf == 0) { + + *pBuf = pPrt->PFlowCtrlMode; + continue; + } + + /* + * If we find an active port with a different flow + * control mode than the first one, we return a value + * that indicates that the mode is indeterminated. + */ + if (*pBuf != pPrt->PFlowCtrlMode) { + + *pBuf = SK_FLOW_MODE_INDETERMINATED; + } + break; + + case OID_SKGE_FLOWCTRL_STATUS: + /* Check if it is the first active port */ + if (*pBuf == 0) { + + *pBuf = pPrt->PFlowCtrlStatus; + continue; + } + + /* + * If we find an active port with a different flow + * control status than the first one, we return a + * value that indicates that the status is + * indeterminated. + */ + if (*pBuf != pPrt->PFlowCtrlStatus) { + + *pBuf = SK_FLOW_STAT_INDETERMINATED; + } + break; + + case OID_SKGE_PHY_OPERATION_CAP: + /* Check if it is the first active port */ + if (*pBuf == 0) { + + *pBuf = pPrt->PMSCap; + continue; + } + + /* + * From a curious point of view the virtual port + * is capable of all found capabilities. + */ + *pBuf |= pPrt->PMSCap; + break; + + case OID_SKGE_PHY_OPERATION_MODE: + /* Check if it is the first active port */ + if (*pBuf == 0) { + + *pBuf = pPrt->PMSMode; + continue; + } + + /* + * If we find an active port with a different master/ + * slave mode than the first one, we return a value + * that indicates that the mode is indeterminated. + */ + if (*pBuf != pPrt->PMSMode) { + + *pBuf = SK_MS_MODE_INDETERMINATED; + } + break; + + case OID_SKGE_PHY_OPERATION_STATUS: + /* Check if it is the first active port */ + if (*pBuf == 0) { + + *pBuf = pPrt->PMSStatus; + continue; + } + + /* + * If we find an active port with a different master/ + * slave status than the first one, we return a + * value that indicates that the status is + * indeterminated. + */ + if (*pBuf != pPrt->PMSStatus) { + + *pBuf = SK_MS_STAT_INDETERMINATED; + } + break; + + case OID_SKGE_SPEED_MODE: + /* Check if it is the first active port */ + if (*pBuf == 0) { + + *pBuf = pPrt->PLinkSpeed; + continue; + } + + /* + * If we find an active port with a different flow + * control mode than the first one, we return a value + * that indicates that the mode is indeterminated. + */ + if (*pBuf != pPrt->PLinkSpeed) { + + *pBuf = SK_LSPEED_INDETERMINATED; + } + break; + + case OID_SKGE_SPEED_STATUS: + /* Check if it is the first active port */ + if (*pBuf == 0) { + + *pBuf = pPrt->PLinkSpeedUsed; + continue; + } + + /* + * If we find an active port with a different flow + * control status than the first one, we return a + * value that indicates that the status is + * indeterminated. + */ + if (*pBuf != pPrt->PLinkSpeedUsed) { + + *pBuf = SK_LSPEED_STAT_INDETERMINATED; + } + break; + } + } + + /* + * If no port is active return an indeterminated answer + */ + if (!PortActiveFlag) { + + switch (Id) { + + case OID_SKGE_LINK_CAP: + *pBuf = SK_LMODE_CAP_INDETERMINATED; + break; + + case OID_SKGE_LINK_MODE: + *pBuf = SK_LMODE_INDETERMINATED; + break; + + case OID_SKGE_LINK_MODE_STATUS: + *pBuf = SK_LMODE_STAT_INDETERMINATED; + break; + + case OID_SKGE_LINK_STATUS: + *pBuf = SK_PNMI_RLMT_LSTAT_INDETERMINATED; + break; + + case OID_SKGE_FLOWCTRL_CAP: + case OID_SKGE_FLOWCTRL_MODE: + *pBuf = SK_FLOW_MODE_INDETERMINATED; + break; + + case OID_SKGE_FLOWCTRL_STATUS: + *pBuf = SK_FLOW_STAT_INDETERMINATED; + break; + + case OID_SKGE_PHY_OPERATION_CAP: + *pBuf = SK_MS_CAP_INDETERMINATED; + break; + + case OID_SKGE_PHY_OPERATION_MODE: + *pBuf = SK_MS_MODE_INDETERMINATED; + break; + + case OID_SKGE_PHY_OPERATION_STATUS: + *pBuf = SK_MS_STAT_INDETERMINATED; + break; + case OID_SKGE_SPEED_CAP: + *pBuf = SK_LSPEED_CAP_INDETERMINATED; + break; + + case OID_SKGE_SPEED_MODE: + *pBuf = SK_LSPEED_INDETERMINATED; + break; + + case OID_SKGE_SPEED_STATUS: + *pBuf = SK_LSPEED_STAT_INDETERMINATED; + break; + } + } +} + +/***************************************************************************** + * + * CalculateLinkStatus - Determins the link status of a physical port + * + * Description: + * Determins the link status the following way: + * LSTAT_PHY_DOWN: Link is down + * LSTAT_AUTONEG: Auto-negotiation failed + * LSTAT_LOG_DOWN: Link is up but RLMT did not yet put the port + * logically up. + * LSTAT_LOG_UP: RLMT marked the port as up + * + * Returns: + * Link status of physical port + */ +PNMI_STATIC SK_U8 CalculateLinkStatus( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +unsigned int PhysPortIndex) /* Physical port index */ +{ + SK_U8 Result; + + if (!pAC->GIni.GP[PhysPortIndex].PHWLinkUp) { + + Result = SK_PNMI_RLMT_LSTAT_PHY_DOWN; + } + else if (pAC->GIni.GP[PhysPortIndex].PAutoNegFail > 0) { + + Result = SK_PNMI_RLMT_LSTAT_AUTONEG; + } + else if (!pAC->Rlmt.Port[PhysPortIndex].PortDown) { + + Result = SK_PNMI_RLMT_LSTAT_LOG_UP; + } + else { + Result = SK_PNMI_RLMT_LSTAT_LOG_DOWN; + } + + return (Result); +} + +/***************************************************************************** + * + * CalculateLinkModeStatus - Determins the link mode status of a phys. port + * + * Description: + * The COMMON module only tells us if the mode is half or full duplex. + * But in the decade of auto sensing it is useful for the user to + * know if the mode was negotiated or forced. Therefore we have a + * look to the mode, which was last used by the negotiation process. + * + * Returns: + * The link mode status + */ +PNMI_STATIC SK_U8 CalculateLinkModeStatus( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +unsigned int PhysPortIndex) /* Physical port index */ +{ + SK_U8 Result; + + /* Get the current mode, which can be full or half duplex */ + Result = pAC->GIni.GP[PhysPortIndex].PLinkModeStatus; + + /* Check if no valid mode could be found (link is down) */ + if (Result < SK_LMODE_STAT_HALF) { + + Result = SK_LMODE_STAT_UNKNOWN; + } + else if (pAC->GIni.GP[PhysPortIndex].PLinkMode >= SK_LMODE_AUTOHALF) { + + /* + * Auto-negotiation was used to bring up the link. Change + * the already found duplex status that it indicates + * auto-negotiation was involved. + */ + if (Result == SK_LMODE_STAT_HALF) { + + Result = SK_LMODE_STAT_AUTOHALF; + } + else if (Result == SK_LMODE_STAT_FULL) { + + Result = SK_LMODE_STAT_AUTOFULL; + } + } + + return (Result); +} + +/***************************************************************************** + * + * GetVpdKeyArr - Obtain an array of VPD keys + * + * Description: + * Read the VPD keys and build an array of VPD keys, which are + * easy to access. + * + * Returns: + * SK_PNMI_ERR_OK Task successfully performed. + * SK_PNMI_ERR_GENERAL Something went wrong. + */ +PNMI_STATIC int GetVpdKeyArr( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +char *pKeyArr, /* Ptr KeyArray */ +unsigned int KeyArrLen, /* Length of array in bytes */ +unsigned int *pKeyNo) /* Number of keys */ +{ + unsigned int BufKeysLen = SK_PNMI_VPD_BUFSIZE; + char BufKeys[SK_PNMI_VPD_BUFSIZE]; + unsigned int StartOffset; + unsigned int Offset; + int Index; + int Ret; + + + SK_MEMSET(pKeyArr, 0, KeyArrLen); + + /* + * Get VPD key list + */ + Ret = VpdKeys(pAC, IoC, (char *)&BufKeys, (int *)&BufKeysLen, + (int *)pKeyNo); + if (Ret > 0) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR014, + SK_PNMI_ERR014MSG); + + return (SK_PNMI_ERR_GENERAL); + } + /* If no keys are available return now */ + if (*pKeyNo == 0 || BufKeysLen == 0) { + + return (SK_PNMI_ERR_OK); + } + /* + * If the key list is too long for us trunc it and give a + * errorlog notification. This case should not happen because + * the maximum number of keys is limited due to RAM limitations + */ + if (*pKeyNo > SK_PNMI_VPD_ENTRIES) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR015, + SK_PNMI_ERR015MSG); + + *pKeyNo = SK_PNMI_VPD_ENTRIES; + } + + /* + * Now build an array of fixed string length size and copy + * the keys together. + */ + for (Index = 0, StartOffset = 0, Offset = 0; Offset < BufKeysLen; + Offset ++) { + + if (BufKeys[Offset] != 0) { + + continue; + } + + if (Offset - StartOffset > SK_PNMI_VPD_KEY_SIZE) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR016, + SK_PNMI_ERR016MSG); + return (SK_PNMI_ERR_GENERAL); + } + + SK_STRNCPY(pKeyArr + Index * SK_PNMI_VPD_KEY_SIZE, + &BufKeys[StartOffset], SK_PNMI_VPD_KEY_SIZE); + + Index ++; + StartOffset = Offset + 1; + } + + /* Last key not zero terminated? Get it anyway */ + if (StartOffset < Offset) { + + SK_STRNCPY(pKeyArr + Index * SK_PNMI_VPD_KEY_SIZE, + &BufKeys[StartOffset], SK_PNMI_VPD_KEY_SIZE); + } + + return (SK_PNMI_ERR_OK); +} + +/***************************************************************************** + * + * SirqUpdate - Let the SIRQ update its internal values + * + * Description: + * Just to be sure that the SIRQ module holds its internal data + * structures up to date, we send an update event before we make + * any access. + * + * Returns: + * SK_PNMI_ERR_OK Task successfully performed. + * SK_PNMI_ERR_GENERAL Something went wrong. + */ +PNMI_STATIC int SirqUpdate( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC) /* IO context handle */ +{ + SK_EVPARA EventParam; + + + /* Was the module already updated during the current PNMI call? */ + if (pAC->Pnmi.SirqUpdatedFlag > 0) { + + return (SK_PNMI_ERR_OK); + } + + /* Send an synchronuous update event to the module */ + SK_MEMSET((char *)&EventParam, 0, sizeof(EventParam)); + if (SkGeSirqEvent(pAC, IoC, SK_HWEV_UPDATE_STAT, EventParam) > 0) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR047, + SK_PNMI_ERR047MSG); + + return (SK_PNMI_ERR_GENERAL); + } + + return (SK_PNMI_ERR_OK); +} + +/***************************************************************************** + * + * RlmtUpdate - Let the RLMT update its internal values + * + * Description: + * Just to be sure that the RLMT module holds its internal data + * structures up to date, we send an update event before we make + * any access. + * + * Returns: + * SK_PNMI_ERR_OK Task successfully performed. + * SK_PNMI_ERR_GENERAL Something went wrong. + */ +PNMI_STATIC int RlmtUpdate( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */ +{ + SK_EVPARA EventParam; + + + /* Was the module already updated during the current PNMI call? */ + if (pAC->Pnmi.RlmtUpdatedFlag > 0) { + + return (SK_PNMI_ERR_OK); + } + + /* Send an synchronuous update event to the module */ + SK_MEMSET((char *)&EventParam, 0, sizeof(EventParam)); + EventParam.Para32[0] = NetIndex; + EventParam.Para32[1] = (SK_U32)-1; + if (SkRlmtEvent(pAC, IoC, SK_RLMT_STATS_UPDATE, EventParam) > 0) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR048, + SK_PNMI_ERR048MSG); + + return (SK_PNMI_ERR_GENERAL); + } + + return (SK_PNMI_ERR_OK); +} + +/***************************************************************************** + * + * MacUpdate - Force the XMAC to output the current statistic + * + * Description: + * The XMAC holds its statistic internally. To obtain the current + * values we must send a command so that the statistic data will + * be written to a predefined memory area on the adapter. + * + * Returns: + * SK_PNMI_ERR_OK Task successfully performed. + * SK_PNMI_ERR_GENERAL Something went wrong. + */ +PNMI_STATIC int MacUpdate( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +unsigned int FirstMac, /* Index of the first Mac to be updated */ +unsigned int LastMac) /* Index of the last Mac to be updated */ +{ + unsigned int MacIndex; + + /* + * Were the statistics already updated during the + * current PNMI call? + */ + if (pAC->Pnmi.MacUpdatedFlag > 0) { + + return (SK_PNMI_ERR_OK); + } + + /* Send an update command to all MACs specified */ + for (MacIndex = FirstMac; MacIndex <= LastMac; MacIndex ++) { + + /* + * 2002-09-13 pweber: Freeze the current SW counters. + * (That should be done as close as + * possible to the update of the + * HW counters) + */ + if (pAC->GIni.GIMacType == SK_MAC_XMAC) { + pAC->Pnmi.BufPort[MacIndex] = pAC->Pnmi.Port[MacIndex]; + } + + /* 2002-09-13 pweber: Update the HW counter */ + if (pAC->GIni.GIFunc.pFnMacUpdateStats(pAC, IoC, MacIndex) != 0) { + + return (SK_PNMI_ERR_GENERAL); + } + } + + return (SK_PNMI_ERR_OK); +} + +/***************************************************************************** + * + * GetStatVal - Retrieve an XMAC statistic counter + * + * Description: + * Retrieves the statistic counter of a virtual or physical port. The + * virtual port is identified by the index 0. It consists of all + * currently active ports. To obtain the counter value for this port + * we must add the statistic counter of all active ports. To grant + * continuous counter values for the virtual port even when port + * switches occur we must additionally add a delta value, which was + * calculated during a SK_PNMI_EVT_RLMT_ACTIVE_UP event. + * + * Returns: + * Requested statistic value + */ +PNMI_STATIC SK_U64 GetStatVal( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +unsigned int LogPortIndex, /* Index of the logical Port to be processed */ +unsigned int StatIndex, /* Index to statistic value */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */ +{ + unsigned int PhysPortIndex; + unsigned int PhysPortMax; + SK_U64 Val = 0; + + + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { /* Dual net mode */ + + PhysPortIndex = NetIndex; + + Val = GetPhysStatVal(pAC, IoC, PhysPortIndex, StatIndex); + } + else { /* Single Net mode */ + + if (LogPortIndex == 0) { + + PhysPortMax = pAC->GIni.GIMacsFound; + + /* Add counter of all active ports */ + for (PhysPortIndex = 0; PhysPortIndex < PhysPortMax; + PhysPortIndex ++) { + + if (pAC->Pnmi.Port[PhysPortIndex].ActiveFlag) { + + Val += GetPhysStatVal(pAC, IoC, PhysPortIndex, StatIndex); + } + } + + /* Correct value because of port switches */ + Val += pAC->Pnmi.VirtualCounterOffset[StatIndex]; + } + else { + /* Get counter value of physical port */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(pAC, LogPortIndex); + + Val = GetPhysStatVal(pAC, IoC, PhysPortIndex, StatIndex); + } + } + return (Val); +} + +/***************************************************************************** + * + * GetPhysStatVal - Get counter value for physical port + * + * Description: + * Builds a 64bit counter value. Except for the octet counters + * the lower 32bit are counted in hardware and the upper 32bit + * in software by monitoring counter overflow interrupts in the + * event handler. To grant continous counter values during XMAC + * resets (caused by a workaround) we must add a delta value. + * The delta was calculated in the event handler when a + * SK_PNMI_EVT_XMAC_RESET was received. + * + * Returns: + * Counter value + */ +PNMI_STATIC SK_U64 GetPhysStatVal( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +unsigned int PhysPortIndex, /* Index of the logical Port to be processed */ +unsigned int StatIndex) /* Index to statistic value */ +{ + SK_U64 Val = 0; + SK_U32 LowVal = 0; + SK_U32 HighVal = 0; + SK_U16 Word; + int MacType; + unsigned int HelpIndex; + SK_GEPORT *pPrt; + + SK_PNMI_PORT *pPnmiPrt; + SK_GEMACFUNC *pFnMac; + + pPrt = &pAC->GIni.GP[PhysPortIndex]; + + MacType = pAC->GIni.GIMacType; + + /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + pPnmiPrt = &pAC->Pnmi.BufPort[PhysPortIndex]; + } + else { + pPnmiPrt = &pAC->Pnmi.Port[PhysPortIndex]; + } + + pFnMac = &pAC->GIni.GIFunc; + + switch (StatIndex) { + case SK_PNMI_HTX: + if (MacType == SK_MAC_GMAC) { + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[SK_PNMI_HTX_BROADCAST][MacType].Reg, + &LowVal); + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[SK_PNMI_HTX_MULTICAST][MacType].Reg, + &HighVal); + LowVal += HighVal; + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[SK_PNMI_HTX_UNICAST][MacType].Reg, + &HighVal); + LowVal += HighVal; + } + else { + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &LowVal); + } + HighVal = pPnmiPrt->CounterHigh[StatIndex]; + break; + + case SK_PNMI_HRX: + if (MacType == SK_MAC_GMAC) { + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[SK_PNMI_HRX_BROADCAST][MacType].Reg, + &LowVal); + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[SK_PNMI_HRX_MULTICAST][MacType].Reg, + &HighVal); + LowVal += HighVal; + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[SK_PNMI_HRX_UNICAST][MacType].Reg, + &HighVal); + LowVal += HighVal; + } + else { + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &LowVal); + } + HighVal = pPnmiPrt->CounterHigh[StatIndex]; + break; + + case SK_PNMI_HTX_OCTET: + case SK_PNMI_HRX_OCTET: + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &HighVal); + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex + 1][MacType].Reg, + &LowVal); + break; + + case SK_PNMI_HTX_BURST: + case SK_PNMI_HTX_EXCESS_DEF: + case SK_PNMI_HTX_CARRIER: + /* Not supported by GMAC */ + if (MacType == SK_MAC_GMAC) { + return (Val); + } + + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &LowVal); + HighVal = pPnmiPrt->CounterHigh[StatIndex]; + break; + + case SK_PNMI_HTX_MACC: + /* GMAC only supports PAUSE MAC control frames */ + if (MacType == SK_MAC_GMAC) { + HelpIndex = SK_PNMI_HTX_PMACC; + } + else { + HelpIndex = StatIndex; + } + + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[HelpIndex][MacType].Reg, + &LowVal); + + HighVal = pPnmiPrt->CounterHigh[StatIndex]; + break; + + case SK_PNMI_HTX_COL: + case SK_PNMI_HRX_UNDERSIZE: + /* Not supported by XMAC */ + if (MacType == SK_MAC_XMAC) { + return (Val); + } + + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &LowVal); + HighVal = pPnmiPrt->CounterHigh[StatIndex]; + break; + + case SK_PNMI_HTX_DEFFERAL: + /* Not supported by GMAC */ + if (MacType == SK_MAC_GMAC) { + return (Val); + } + + /* + * XMAC counts frames with deferred transmission + * even in full-duplex mode. + * + * In full-duplex mode the counter remains constant! + */ + if ((pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOFULL) || + (pPrt->PLinkModeStatus == SK_LMODE_STAT_FULL)) { + + LowVal = 0; + HighVal = 0; + } + else { + /* Otherwise get contents of hardware register */ + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &LowVal); + HighVal = pPnmiPrt->CounterHigh[StatIndex]; + } + break; + + case SK_PNMI_HRX_BADOCTET: + /* Not supported by XMAC */ + if (MacType == SK_MAC_XMAC) { + return (Val); + } + + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &HighVal); + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex + 1][MacType].Reg, + &LowVal); + break; + + case SK_PNMI_HTX_OCTETLOW: + case SK_PNMI_HRX_OCTETLOW: + case SK_PNMI_HRX_BADOCTETLOW: + return (Val); + + case SK_PNMI_HRX_LONGFRAMES: + /* For XMAC the SW counter is managed by PNMI */ + if (MacType == SK_MAC_XMAC) { + return (pPnmiPrt->StatRxLongFrameCts); + } + + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &LowVal); + HighVal = pPnmiPrt->CounterHigh[StatIndex]; + break; + + case SK_PNMI_HRX_TOO_LONG: + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &LowVal); + HighVal = pPnmiPrt->CounterHigh[StatIndex]; + + Val = (((SK_U64)HighVal << 32) | (SK_U64)LowVal); + + if (MacType == SK_MAC_GMAC) { + /* For GMAC the SW counter is additionally managed by PNMI */ + Val += pPnmiPrt->StatRxFrameTooLongCts; + } + else { + /* + * Frames longer than IEEE 802.3 frame max size are counted + * by XMAC in frame_too_long counter even reception of long + * frames was enabled and the frame was correct. + * So correct the value by subtracting RxLongFrame counter. + */ + Val -= pPnmiPrt->StatRxLongFrameCts; + } + + LowVal = (SK_U32)Val; + HighVal = (SK_U32)(Val >> 32); + break; + + case SK_PNMI_HRX_SHORTS: + /* Not supported by GMAC */ + if (MacType == SK_MAC_GMAC) { + /* GM_RXE_FRAG?? */ + return (Val); + } + + /* + * XMAC counts short frame errors even if link down (#10620) + * + * If link-down the counter remains constant + */ + if (pPrt->PLinkModeStatus != SK_LMODE_STAT_UNKNOWN) { + + /* Otherwise get incremental difference */ + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &LowVal); + HighVal = pPnmiPrt->CounterHigh[StatIndex]; + + Val = (((SK_U64)HighVal << 32) | (SK_U64)LowVal); + Val -= pPnmiPrt->RxShortZeroMark; + + LowVal = (SK_U32)Val; + HighVal = (SK_U32)(Val >> 32); + } + break; + + case SK_PNMI_HRX_MACC: + case SK_PNMI_HRX_MACC_UNKWN: + case SK_PNMI_HRX_BURST: + case SK_PNMI_HRX_MISSED: + case SK_PNMI_HRX_FRAMING: + case SK_PNMI_HRX_CARRIER: + case SK_PNMI_HRX_IRLENGTH: + case SK_PNMI_HRX_SYMBOL: + case SK_PNMI_HRX_CEXT: + /* Not supported by GMAC */ + if (MacType == SK_MAC_GMAC) { + return (Val); + } + + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &LowVal); + HighVal = pPnmiPrt->CounterHigh[StatIndex]; + break; + + case SK_PNMI_HRX_PMACC_ERR: + /* For GMAC the SW counter is managed by PNMI */ + if (MacType == SK_MAC_GMAC) { + return (pPnmiPrt->StatRxPMaccErr); + } + + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &LowVal); + HighVal = pPnmiPrt->CounterHigh[StatIndex]; + break; + + /* SW counter managed by PNMI */ + case SK_PNMI_HTX_SYNC: + LowVal = (SK_U32)pPnmiPrt->StatSyncCts; + HighVal = (SK_U32)(pPnmiPrt->StatSyncCts >> 32); + break; + + /* SW counter managed by PNMI */ + case SK_PNMI_HTX_SYNC_OCTET: + LowVal = (SK_U32)pPnmiPrt->StatSyncOctetsCts; + HighVal = (SK_U32)(pPnmiPrt->StatSyncOctetsCts >> 32); + break; + + case SK_PNMI_HRX_FCS: + /* + * Broadcom filters FCS errors and counts it in + * Receive Error Counter register + */ + if (pPrt->PhyType == SK_PHY_BCOM) { + /* do not read while not initialized (PHY_READ hangs!)*/ + if (pPrt->PState != SK_PRT_RESET) { + SkXmPhyRead(pAC, IoC, PhysPortIndex, PHY_BCOM_RE_CTR, &Word); + + LowVal = Word; + } + HighVal = pPnmiPrt->CounterHigh[StatIndex]; + } + else { + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &LowVal); + HighVal = pPnmiPrt->CounterHigh[StatIndex]; + } + break; + + default: + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &LowVal); + HighVal = pPnmiPrt->CounterHigh[StatIndex]; + break; + } + + Val = (((SK_U64)HighVal << 32) | (SK_U64)LowVal); + + /* Correct value because of possible XMAC reset. XMAC Errata #2 */ + Val += pPnmiPrt->CounterOffset[StatIndex]; + + return (Val); +} + +/***************************************************************************** + * + * ResetCounter - Set all counters and timestamps to zero + * + * Description: + * Notifies other common modules which store statistic data to + * reset their counters and finally reset our own counters. + * + * Returns: + * Nothing + */ +PNMI_STATIC void ResetCounter( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +SK_U32 NetIndex) +{ + unsigned int PhysPortIndex; + SK_EVPARA EventParam; + + + SK_MEMSET((char *)&EventParam, 0, sizeof(EventParam)); + + /* Notify sensor module */ + SkEventQueue(pAC, SKGE_I2C, SK_I2CEV_CLEAR, EventParam); + + /* Notify RLMT module */ + EventParam.Para32[0] = NetIndex; + EventParam.Para32[1] = (SK_U32)-1; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STATS_CLEAR, EventParam); + EventParam.Para32[1] = 0; + + /* Notify SIRQ module */ + SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_CLEAR_STAT, EventParam); + + /* Notify CSUM module */ +#ifdef SK_USE_CSUM + EventParam.Para32[0] = NetIndex; + EventParam.Para32[1] = (SK_U32)-1; + SkEventQueue(pAC, SKGE_CSUM, SK_CSUM_EVENT_CLEAR_PROTO_STATS, + EventParam); +#endif /* SK_USE_CSUM */ + + /* Clear XMAC statistic */ + for (PhysPortIndex = 0; PhysPortIndex < + (unsigned int)pAC->GIni.GIMacsFound; PhysPortIndex ++) { + + (void)pAC->GIni.GIFunc.pFnMacResetCounter(pAC, IoC, PhysPortIndex); + + SK_MEMSET((char *)&pAC->Pnmi.Port[PhysPortIndex].CounterHigh, + 0, sizeof(pAC->Pnmi.Port[PhysPortIndex].CounterHigh)); + SK_MEMSET((char *)&pAC->Pnmi.Port[PhysPortIndex]. + CounterOffset, 0, sizeof(pAC->Pnmi.Port[ + PhysPortIndex].CounterOffset)); + SK_MEMSET((char *)&pAC->Pnmi.Port[PhysPortIndex].StatSyncCts, + 0, sizeof(pAC->Pnmi.Port[PhysPortIndex].StatSyncCts)); + SK_MEMSET((char *)&pAC->Pnmi.Port[PhysPortIndex]. + StatSyncOctetsCts, 0, sizeof(pAC->Pnmi.Port[ + PhysPortIndex].StatSyncOctetsCts)); + SK_MEMSET((char *)&pAC->Pnmi.Port[PhysPortIndex]. + StatRxLongFrameCts, 0, sizeof(pAC->Pnmi.Port[ + PhysPortIndex].StatRxLongFrameCts)); + SK_MEMSET((char *)&pAC->Pnmi.Port[PhysPortIndex]. + StatRxFrameTooLongCts, 0, sizeof(pAC->Pnmi.Port[ + PhysPortIndex].StatRxFrameTooLongCts)); + SK_MEMSET((char *)&pAC->Pnmi.Port[PhysPortIndex]. + StatRxPMaccErr, 0, sizeof(pAC->Pnmi.Port[ + PhysPortIndex].StatRxPMaccErr)); + } + + /* + * Clear local statistics + */ + SK_MEMSET((char *)&pAC->Pnmi.VirtualCounterOffset, 0, + sizeof(pAC->Pnmi.VirtualCounterOffset)); + pAC->Pnmi.RlmtChangeCts = 0; + pAC->Pnmi.RlmtChangeTime = 0; + SK_MEMSET((char *)&pAC->Pnmi.RlmtChangeEstimate.EstValue[0], 0, + sizeof(pAC->Pnmi.RlmtChangeEstimate.EstValue)); + pAC->Pnmi.RlmtChangeEstimate.EstValueIndex = 0; + pAC->Pnmi.RlmtChangeEstimate.Estimate = 0; + pAC->Pnmi.Port[NetIndex].TxSwQueueMax = 0; + pAC->Pnmi.Port[NetIndex].TxRetryCts = 0; + pAC->Pnmi.Port[NetIndex].RxIntrCts = 0; + pAC->Pnmi.Port[NetIndex].TxIntrCts = 0; + pAC->Pnmi.Port[NetIndex].RxNoBufCts = 0; + pAC->Pnmi.Port[NetIndex].TxNoBufCts = 0; + pAC->Pnmi.Port[NetIndex].TxUsedDescrNo = 0; + pAC->Pnmi.Port[NetIndex].RxDeliveredCts = 0; + pAC->Pnmi.Port[NetIndex].RxOctetsDeliveredCts = 0; + pAC->Pnmi.Port[NetIndex].ErrRecoveryCts = 0; +} + +/***************************************************************************** + * + * GetTrapEntry - Get an entry in the trap buffer + * + * Description: + * The trap buffer stores various events. A user application somehow + * gets notified that an event occured and retrieves the trap buffer + * contens (or simply polls the buffer). The buffer is organized as + * a ring which stores the newest traps at the beginning. The oldest + * traps are overwritten by the newest ones. Each trap entry has a + * unique number, so that applications may detect new trap entries. + * + * Returns: + * A pointer to the trap entry + */ +PNMI_STATIC char* GetTrapEntry( +SK_AC *pAC, /* Pointer to adapter context */ +SK_U32 TrapId, /* SNMP ID of the trap */ +unsigned int Size) /* Space needed for trap entry */ +{ + unsigned int BufPad = pAC->Pnmi.TrapBufPad; + unsigned int BufFree = pAC->Pnmi.TrapBufFree; + unsigned int Beg = pAC->Pnmi.TrapQueueBeg; + unsigned int End = pAC->Pnmi.TrapQueueEnd; + char *pBuf = &pAC->Pnmi.TrapBuf[0]; + int Wrap; + unsigned int NeededSpace; + unsigned int EntrySize; + SK_U32 Val32; + SK_U64 Val64; + + + /* Last byte of entry will get a copy of the entry length */ + Size ++; + + /* + * Calculate needed buffer space */ + if (Beg >= Size) { + + NeededSpace = Size; + Wrap = SK_FALSE; + } + else { + NeededSpace = Beg + Size; + Wrap = SK_TRUE; + } + + /* + * Check if enough buffer space is provided. Otherwise + * free some entries. Leave one byte space between begin + * and end of buffer to make it possible to detect whether + * the buffer is full or empty + */ + while (BufFree < NeededSpace + 1) { + + if (End == 0) { + + End = SK_PNMI_TRAP_QUEUE_LEN; + } + + EntrySize = (unsigned int)*((unsigned char *)pBuf + End - 1); + BufFree += EntrySize; + End -= EntrySize; +#ifdef DEBUG + SK_MEMSET(pBuf + End, (char)(-1), EntrySize); +#endif /* DEBUG */ + if (End == BufPad) { +#ifdef DEBUG + SK_MEMSET(pBuf, (char)(-1), End); +#endif /* DEBUG */ + BufFree += End; + End = 0; + BufPad = 0; + } + } + + /* + * Insert new entry as first entry. Newest entries are + * stored at the beginning of the queue. + */ + if (Wrap) { + + BufPad = Beg; + Beg = SK_PNMI_TRAP_QUEUE_LEN - Size; + } + else { + Beg = Beg - Size; + } + BufFree -= NeededSpace; + + /* Save the current offsets */ + pAC->Pnmi.TrapQueueBeg = Beg; + pAC->Pnmi.TrapQueueEnd = End; + pAC->Pnmi.TrapBufPad = BufPad; + pAC->Pnmi.TrapBufFree = BufFree; + + /* Initialize the trap entry */ + *(pBuf + Beg + Size - 1) = (char)Size; + *(pBuf + Beg) = (char)Size; + Val32 = (pAC->Pnmi.TrapUnique) ++; + SK_PNMI_STORE_U32(pBuf + Beg + 1, Val32); + SK_PNMI_STORE_U32(pBuf + Beg + 1 + sizeof(SK_U32), TrapId); + Val64 = SK_PNMI_HUNDREDS_SEC(SkOsGetTime(pAC)); + SK_PNMI_STORE_U64(pBuf + Beg + 1 + 2 * sizeof(SK_U32), Val64); + + return (pBuf + Beg); +} + +/***************************************************************************** + * + * CopyTrapQueue - Copies the trap buffer for the TRAP OID + * + * Description: + * On a query of the TRAP OID the trap buffer contents will be + * copied continuously to the request buffer, which must be large + * enough. No length check is performed. + * + * Returns: + * Nothing + */ +PNMI_STATIC void CopyTrapQueue( +SK_AC *pAC, /* Pointer to adapter context */ +char *pDstBuf) /* Buffer to which the queued traps will be copied */ +{ + unsigned int BufPad = pAC->Pnmi.TrapBufPad; + unsigned int Trap = pAC->Pnmi.TrapQueueBeg; + unsigned int End = pAC->Pnmi.TrapQueueEnd; + char *pBuf = &pAC->Pnmi.TrapBuf[0]; + unsigned int Len; + unsigned int DstOff = 0; + + + while (Trap != End) { + + Len = (unsigned int)*(pBuf + Trap); + + /* + * Last byte containing a copy of the length will + * not be copied. + */ + *(pDstBuf + DstOff) = (char)(Len - 1); + SK_MEMCPY(pDstBuf + DstOff + 1, pBuf + Trap + 1, Len - 2); + DstOff += Len - 1; + + Trap += Len; + if (Trap == SK_PNMI_TRAP_QUEUE_LEN) { + + Trap = BufPad; + } + } +} + +/***************************************************************************** + * + * GetTrapQueueLen - Get the length of the trap buffer + * + * Description: + * Evaluates the number of currently stored traps and the needed + * buffer size to retrieve them. + * + * Returns: + * Nothing + */ +PNMI_STATIC void GetTrapQueueLen( +SK_AC *pAC, /* Pointer to adapter context */ +unsigned int *pLen, /* Length in Bytes of all queued traps */ +unsigned int *pEntries) /* Returns number of trapes stored in queue */ +{ + unsigned int BufPad = pAC->Pnmi.TrapBufPad; + unsigned int Trap = pAC->Pnmi.TrapQueueBeg; + unsigned int End = pAC->Pnmi.TrapQueueEnd; + char *pBuf = &pAC->Pnmi.TrapBuf[0]; + unsigned int Len; + unsigned int Entries = 0; + unsigned int TotalLen = 0; + + + while (Trap != End) { + + Len = (unsigned int)*(pBuf + Trap); + TotalLen += Len - 1; + Entries ++; + + Trap += Len; + if (Trap == SK_PNMI_TRAP_QUEUE_LEN) { + + Trap = BufPad; + } + } + + *pEntries = Entries; + *pLen = TotalLen; +} + +/***************************************************************************** + * + * QueueSimpleTrap - Store a simple trap to the trap buffer + * + * Description: + * A simple trap is a trap with now additional data. It consists + * simply of a trap code. + * + * Returns: + * Nothing + */ +PNMI_STATIC void QueueSimpleTrap( +SK_AC *pAC, /* Pointer to adapter context */ +SK_U32 TrapId) /* Type of sensor trap */ +{ + GetTrapEntry(pAC, TrapId, SK_PNMI_TRAP_SIMPLE_LEN); +} + +/***************************************************************************** + * + * QueueSensorTrap - Stores a sensor trap in the trap buffer + * + * Description: + * Gets an entry in the trap buffer and fills it with sensor related + * data. + * + * Returns: + * Nothing + */ +PNMI_STATIC void QueueSensorTrap( +SK_AC *pAC, /* Pointer to adapter context */ +SK_U32 TrapId, /* Type of sensor trap */ +unsigned int SensorIndex) /* Index of sensor which caused the trap */ +{ + char *pBuf; + unsigned int Offset; + unsigned int DescrLen; + SK_U32 Val32; + + + /* Get trap buffer entry */ + DescrLen = SK_STRLEN(pAC->I2c.SenTable[SensorIndex].SenDesc); + pBuf = GetTrapEntry(pAC, TrapId, + SK_PNMI_TRAP_SENSOR_LEN_BASE + DescrLen); + Offset = SK_PNMI_TRAP_SIMPLE_LEN; + + /* Store additionally sensor trap related data */ + Val32 = OID_SKGE_SENSOR_INDEX; + SK_PNMI_STORE_U32(pBuf + Offset, Val32); + *(pBuf + Offset + 4) = 4; + Val32 = (SK_U32)SensorIndex; + SK_PNMI_STORE_U32(pBuf + Offset + 5, Val32); + Offset += 9; + + Val32 = (SK_U32)OID_SKGE_SENSOR_DESCR; + SK_PNMI_STORE_U32(pBuf + Offset, Val32); + *(pBuf + Offset + 4) = (char)DescrLen; + SK_MEMCPY(pBuf + Offset + 5, pAC->I2c.SenTable[SensorIndex].SenDesc, + DescrLen); + Offset += DescrLen + 5; + + Val32 = OID_SKGE_SENSOR_TYPE; + SK_PNMI_STORE_U32(pBuf + Offset, Val32); + *(pBuf + Offset + 4) = 1; + *(pBuf + Offset + 5) = (char)pAC->I2c.SenTable[SensorIndex].SenType; + Offset += 6; + + Val32 = OID_SKGE_SENSOR_VALUE; + SK_PNMI_STORE_U32(pBuf + Offset, Val32); + *(pBuf + Offset + 4) = 4; + Val32 = (SK_U32)pAC->I2c.SenTable[SensorIndex].SenValue; + SK_PNMI_STORE_U32(pBuf + Offset + 5, Val32); +} + +/***************************************************************************** + * + * QueueRlmtNewMacTrap - Store a port switch trap in the trap buffer + * + * Description: + * Nothing further to explain. + * + * Returns: + * Nothing + */ +PNMI_STATIC void QueueRlmtNewMacTrap( +SK_AC *pAC, /* Pointer to adapter context */ +unsigned int ActiveMac) /* Index (0..n) of the currently active port */ +{ + char *pBuf; + SK_U32 Val32; + + + pBuf = GetTrapEntry(pAC, OID_SKGE_TRAP_RLMT_CHANGE_PORT, + SK_PNMI_TRAP_RLMT_CHANGE_LEN); + + Val32 = OID_SKGE_RLMT_PORT_ACTIVE; + SK_PNMI_STORE_U32(pBuf + SK_PNMI_TRAP_SIMPLE_LEN, Val32); + *(pBuf + SK_PNMI_TRAP_SIMPLE_LEN + 4) = 1; + *(pBuf + SK_PNMI_TRAP_SIMPLE_LEN + 5) = (char)ActiveMac; +} + +/***************************************************************************** + * + * QueueRlmtPortTrap - Store port related RLMT trap to trap buffer + * + * Description: + * Nothing further to explain. + * + * Returns: + * Nothing + */ +PNMI_STATIC void QueueRlmtPortTrap( +SK_AC *pAC, /* Pointer to adapter context */ +SK_U32 TrapId, /* Type of RLMT port trap */ +unsigned int PortIndex) /* Index of the port, which changed its state */ +{ + char *pBuf; + SK_U32 Val32; + + + pBuf = GetTrapEntry(pAC, TrapId, SK_PNMI_TRAP_RLMT_PORT_LEN); + + Val32 = OID_SKGE_RLMT_PORT_INDEX; + SK_PNMI_STORE_U32(pBuf + SK_PNMI_TRAP_SIMPLE_LEN, Val32); + *(pBuf + SK_PNMI_TRAP_SIMPLE_LEN + 4) = 1; + *(pBuf + SK_PNMI_TRAP_SIMPLE_LEN + 5) = (char)PortIndex; +} + +/***************************************************************************** + * + * CopyMac - Copies a MAC address + * + * Description: + * Nothing further to explain. + * + * Returns: + * Nothing + */ +PNMI_STATIC void CopyMac( +char *pDst, /* Pointer to destination buffer */ +SK_MAC_ADDR *pMac) /* Pointer of Source */ +{ + int i; + + + for (i = 0; i < sizeof(SK_MAC_ADDR); i ++) { + + *(pDst + i) = pMac->a[i]; + } +} + +#ifdef SK_POWER_MGMT +/***************************************************************************** + * + * PowerManagement - OID handler function of PowerManagement OIDs + * + * Description: + * The code is simple. No description necessary. + * + * Returns: + * SK_PNMI_ERR_OK The request was successfully performed. + * SK_PNMI_ERR_GENERAL A general severe internal error occured. + * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain + * the correct data (e.g. a 32bit value is + * needed, but a 16 bit value was passed). + * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't + * exist (e.g. port instance 3 on a two port + * adapter. + */ + +PNMI_STATIC int PowerManagement( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +int Action, /* Get/PreSet/Set action */ +SK_U32 Id, /* Object ID that is to be processed */ +char *pBuf, /* Buffer to which to mgmt data will be retrieved */ +unsigned int *pLen, /* On call: buffer length. On return: used buffer */ +SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ +unsigned int TableIndex, /* Index to the Id table */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */ +{ + + SK_U32 RetCode = SK_PNMI_ERR_GENERAL; + + /* + * Check instance. We only handle single instance variables + */ + if (Instance != (SK_U32)(-1) && Instance != 1) { + + *pLen = 0; + return (SK_PNMI_ERR_UNKNOWN_INST); + } + + + /* Check length */ + switch (Id) { + + case OID_PNP_CAPABILITIES: + if (*pLen < sizeof(SK_PNP_CAPABILITIES)) { + + *pLen = sizeof(SK_PNP_CAPABILITIES); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + case OID_PNP_SET_POWER: + case OID_PNP_QUERY_POWER: + if (*pLen < sizeof(SK_DEVICE_POWER_STATE)) + { + *pLen = sizeof(SK_DEVICE_POWER_STATE); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + case OID_PNP_ADD_WAKE_UP_PATTERN: + case OID_PNP_REMOVE_WAKE_UP_PATTERN: + if (*pLen < sizeof(SK_PM_PACKET_PATTERN)) { + + *pLen = sizeof(SK_PM_PACKET_PATTERN); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + case OID_PNP_ENABLE_WAKE_UP: + if (*pLen < sizeof(SK_U32)) { + + *pLen = sizeof(SK_U32); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + } + + /* + * Perform action + */ + if (Action == SK_PNMI_GET) { + + /* + * Get value + */ + switch (Id) { + + case OID_PNP_CAPABILITIES: + RetCode = SkPowerQueryPnPCapabilities(pAC, IoC, pBuf, pLen); + break; + + case OID_PNP_QUERY_POWER: + /* The Windows DDK describes: An OID_PNP_QUERY_POWER requests + the miniport to indicate whether it can transition its NIC + to the low-power state. + A miniport driver must always return NDIS_STATUS_SUCCESS + to a query of OID_PNP_QUERY_POWER. */ + *pLen = sizeof(SK_DEVICE_POWER_STATE); + RetCode = SK_PNMI_ERR_OK; + break; + + /* NDIS handles these OIDs as write-only. + * So in case of get action the buffer with written length = 0 + * is returned + */ + case OID_PNP_SET_POWER: + case OID_PNP_ADD_WAKE_UP_PATTERN: + case OID_PNP_REMOVE_WAKE_UP_PATTERN: + *pLen = 0; + RetCode = SK_PNMI_ERR_NOT_SUPPORTED; + break; + + case OID_PNP_ENABLE_WAKE_UP: + RetCode = SkPowerGetEnableWakeUp(pAC, IoC, pBuf, pLen); + break; + + default: + RetCode = SK_PNMI_ERR_GENERAL; + break; + } + + return (RetCode); + } + + + /* + * Perform preset or set + */ + + /* POWER module does not support PRESET action */ + if (Action == SK_PNMI_PRESET) { + return (SK_PNMI_ERR_OK); + } + + switch (Id) { + case OID_PNP_SET_POWER: + RetCode = SkPowerSetPower(pAC, IoC, pBuf, pLen); + break; + + case OID_PNP_ADD_WAKE_UP_PATTERN: + RetCode = SkPowerAddWakeUpPattern(pAC, IoC, pBuf, pLen); + break; + + case OID_PNP_REMOVE_WAKE_UP_PATTERN: + RetCode = SkPowerRemoveWakeUpPattern(pAC, IoC, pBuf, pLen); + break; + + case OID_PNP_ENABLE_WAKE_UP: + RetCode = SkPowerSetEnableWakeUp(pAC, IoC, pBuf, pLen); + break; + + default: + RetCode = SK_PNMI_ERR_READ_ONLY; + } + + return (RetCode); +} +#endif /* SK_POWER_MGMT */ + +#ifdef SK_DIAG_SUPPORT +/***************************************************************************** + * + * DiagActions - OID handler function of Diagnostic driver + * + * Description: + * The code is simple. No description necessary. + * + * Returns: + * SK_PNMI_ERR_OK The request was successfully performed. + * SK_PNMI_ERR_GENERAL A general severe internal error occured. + * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain + * the correct data (e.g. a 32bit value is + * needed, but a 16 bit value was passed). + * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't + * exist (e.g. port instance 3 on a two port + * adapter. + */ + +PNMI_STATIC int DiagActions( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +int Action, /* GET/PRESET/SET action */ +SK_U32 Id, /* Object ID that is to be processed */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ +SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ +unsigned int TableIndex, /* Index to the Id table */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ +{ + + SK_U32 DiagStatus; + SK_U32 RetCode = SK_PNMI_ERR_GENERAL; + + /* + * Check instance. We only handle single instance variables. + */ + if (Instance != (SK_U32)(-1) && Instance != 1) { + + *pLen = 0; + return (SK_PNMI_ERR_UNKNOWN_INST); + } + + /* + * Check length. + */ + switch (Id) { + + case OID_SKGE_DIAG_MODE: + if (*pLen < sizeof(SK_U32)) { + + *pLen = sizeof(SK_U32); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR040, SK_PNMI_ERR040MSG); + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + /* Perform action. */ + + /* GET value. */ + if (Action == SK_PNMI_GET) { + + switch (Id) { + + case OID_SKGE_DIAG_MODE: + DiagStatus = pAC->Pnmi.DiagAttached; + SK_PNMI_STORE_U32(pBuf, DiagStatus); + *pLen = sizeof(SK_U32); + RetCode = SK_PNMI_ERR_OK; + break; + + default: + *pLen = 0; + RetCode = SK_PNMI_ERR_GENERAL; + break; + } + return (RetCode); + } + + /* From here SET or PRESET value. */ + + /* PRESET value is not supported. */ + if (Action == SK_PNMI_PRESET) { + return (SK_PNMI_ERR_OK); + } + + /* SET value. */ + switch (Id) { + case OID_SKGE_DIAG_MODE: + + /* Handle the SET. */ + switch (*pBuf) { + + /* Attach the DIAG to this adapter. */ + case SK_DIAG_ATTACHED: + /* Check if we come from running */ + if (pAC->Pnmi.DiagAttached == SK_DIAG_RUNNING) { + + RetCode = SkDrvLeaveDiagMode(pAC); + + } + else if (pAC->Pnmi.DiagAttached == SK_DIAG_IDLE) { + + RetCode = SK_PNMI_ERR_OK; + } + + else { + + RetCode = SK_PNMI_ERR_GENERAL; + + } + + if (RetCode == SK_PNMI_ERR_OK) { + + pAC->Pnmi.DiagAttached = SK_DIAG_ATTACHED; + } + break; + + /* Enter the DIAG mode in the driver. */ + case SK_DIAG_RUNNING: + RetCode = SK_PNMI_ERR_OK; + + /* + * If DiagAttached is set, we can tell the driver + * to enter the DIAG mode. + */ + if (pAC->Pnmi.DiagAttached == SK_DIAG_ATTACHED) { + /* If DiagMode is not active, we can enter it. */ + if (!pAC->DiagModeActive) { + + RetCode = SkDrvEnterDiagMode(pAC); + } + else { + + RetCode = SK_PNMI_ERR_GENERAL; + } + } + else { + + RetCode = SK_PNMI_ERR_GENERAL; + } + + if (RetCode == SK_PNMI_ERR_OK) { + + pAC->Pnmi.DiagAttached = SK_DIAG_RUNNING; + } + break; + + case SK_DIAG_IDLE: + /* Check if we come from running */ + if (pAC->Pnmi.DiagAttached == SK_DIAG_RUNNING) { + + RetCode = SkDrvLeaveDiagMode(pAC); + + } + else if (pAC->Pnmi.DiagAttached == SK_DIAG_ATTACHED) { + + RetCode = SK_PNMI_ERR_OK; + } + + else { + + RetCode = SK_PNMI_ERR_GENERAL; + + } + + if (RetCode == SK_PNMI_ERR_OK) { + + pAC->Pnmi.DiagAttached = SK_DIAG_IDLE; + } + break; + + default: + RetCode = SK_PNMI_ERR_BAD_VALUE; + break; + } + break; + + default: + RetCode = SK_PNMI_ERR_GENERAL; + } + + if (RetCode == SK_PNMI_ERR_OK) { + *pLen = sizeof(SK_U32); + } + else { + + *pLen = 0; + } + return (RetCode); +} +#endif /* SK_DIAG_SUPPORT */ + +/***************************************************************************** + * + * Vct - OID handler function of OIDs + * + * Description: + * The code is simple. No description necessary. + * + * Returns: + * SK_PNMI_ERR_OK The request was performed successfully. + * SK_PNMI_ERR_GENERAL A general severe internal error occured. + * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain + * the correct data (e.g. a 32bit value is + * needed, but a 16 bit value was passed). + * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't + * exist (e.g. port instance 3 on a two port + * adapter). + * SK_PNMI_ERR_READ_ONLY Only the Get action is allowed. + * + */ + +PNMI_STATIC int Vct( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +int Action, /* GET/PRESET/SET action */ +SK_U32 Id, /* Object ID that is to be processed */ +char *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */ +SK_U32 Instance, /* Instance (-1,2..n) that is to be queried */ +unsigned int TableIndex, /* Index to the Id table */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ +{ + SK_GEPORT *pPrt; + SK_PNMI_VCT *pVctBackupData; + SK_U32 LogPortMax; + SK_U32 PhysPortMax; + SK_U32 PhysPortIndex; + SK_U32 Limit; + SK_U32 Offset; + SK_BOOL Link; + SK_U32 RetCode = SK_PNMI_ERR_GENERAL; + int i; + SK_EVPARA Para; + SK_U32 CableLength; + + /* + * Calculate the port indexes from the instance. + */ + PhysPortMax = pAC->GIni.GIMacsFound; + LogPortMax = SK_PNMI_PORT_PHYS2LOG(PhysPortMax); + + /* Dual net mode? */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + LogPortMax--; + } + + if ((Instance != (SK_U32) (-1))) { + /* Check instance range. */ + if ((Instance < 2) || (Instance > LogPortMax)) { + *pLen = 0; + return (SK_PNMI_ERR_UNKNOWN_INST); + } + + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + PhysPortIndex = NetIndex; + } + else { + PhysPortIndex = Instance - 2; + } + Limit = PhysPortIndex + 1; + } + else { + /* + * Instance == (SK_U32) (-1), get all Instances of that OID. + * + * Not implemented yet. May be used in future releases. + */ + PhysPortIndex = 0; + Limit = PhysPortMax; + } + + pPrt = &pAC->GIni.GP[PhysPortIndex]; + if (pPrt->PHWLinkUp) { + Link = SK_TRUE; + } + else { + Link = SK_FALSE; + } + + /* Check MAC type */ + if (pPrt->PhyType != SK_PHY_MARV_COPPER) { + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + /* Initialize backup data pointer. */ + pVctBackupData = &pAC->Pnmi.VctBackup[PhysPortIndex]; + + /* Check action type */ + if (Action == SK_PNMI_GET) { + /* Check length */ + switch (Id) { + + case OID_SKGE_VCT_GET: + if (*pLen < (Limit - PhysPortIndex) * sizeof(SK_PNMI_VCT)) { + *pLen = (Limit - PhysPortIndex) * sizeof(SK_PNMI_VCT); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + case OID_SKGE_VCT_STATUS: + if (*pLen < (Limit - PhysPortIndex) * sizeof(SK_U8)) { + *pLen = (Limit - PhysPortIndex) * sizeof(SK_U8); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + default: + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + /* Get value */ + Offset = 0; + for (; PhysPortIndex < Limit; PhysPortIndex++) { + switch (Id) { + + case OID_SKGE_VCT_GET: + if ((Link == SK_FALSE) && + (pAC->Pnmi.VctStatus[PhysPortIndex] & SK_PNMI_VCT_PENDING)) { + RetCode = SkGmCableDiagStatus(pAC, IoC, PhysPortIndex, SK_FALSE); + if (RetCode == 0) { + pAC->Pnmi.VctStatus[PhysPortIndex] &= ~SK_PNMI_VCT_PENDING; + pAC->Pnmi.VctStatus[PhysPortIndex] |= + (SK_PNMI_VCT_NEW_VCT_DATA | SK_PNMI_VCT_TEST_DONE); + + /* Copy results for later use to PNMI struct. */ + for (i = 0; i < 4; i++) { + if (pPrt->PMdiPairSts[i] == SK_PNMI_VCT_NORMAL_CABLE) { + if ((pPrt->PMdiPairLen[i] > 35) && (pPrt->PMdiPairLen[i] < 0xff)) { + pPrt->PMdiPairSts[i] = SK_PNMI_VCT_IMPEDANCE_MISMATCH; + } + } + if ((pPrt->PMdiPairLen[i] > 35) && (pPrt->PMdiPairLen[i] != 0xff)) { + CableLength = 1000 * (((175 * pPrt->PMdiPairLen[i]) / 210) - 28); + } + else { + CableLength = 0; + } + pVctBackupData->PMdiPairLen[i] = CableLength; + pVctBackupData->PMdiPairSts[i] = pPrt->PMdiPairSts[i]; + } + + Para.Para32[0] = PhysPortIndex; + Para.Para32[1] = -1; + SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_RESET, Para); + SkEventDispatcher(pAC, IoC); + } + else { + ; /* VCT test is running. */ + } + } + + /* Get all results. */ + CheckVctStatus(pAC, IoC, pBuf, Offset, PhysPortIndex); + Offset += sizeof(SK_U8); + *(pBuf + Offset) = pPrt->PCableLen; + Offset += sizeof(SK_U8); + for (i = 0; i < 4; i++) { + SK_PNMI_STORE_U32((pBuf + Offset), pVctBackupData->PMdiPairLen[i]); + Offset += sizeof(SK_U32); + } + for (i = 0; i < 4; i++) { + *(pBuf + Offset) = pVctBackupData->PMdiPairSts[i]; + Offset += sizeof(SK_U8); + } + + RetCode = SK_PNMI_ERR_OK; + break; + + case OID_SKGE_VCT_STATUS: + CheckVctStatus(pAC, IoC, pBuf, Offset, PhysPortIndex); + Offset += sizeof(SK_U8); + RetCode = SK_PNMI_ERR_OK; + break; + + default: + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + } /* for */ + *pLen = Offset; + return (RetCode); + + } /* if SK_PNMI_GET */ + + /* + * From here SET or PRESET action. Check if the passed + * buffer length is plausible. + */ + + /* Check length */ + switch (Id) { + case OID_SKGE_VCT_SET: + if (*pLen < (Limit - PhysPortIndex) * sizeof(SK_U32)) { + *pLen = (Limit - PhysPortIndex) * sizeof(SK_U32); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + default: + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + /* + * Perform preset or set. + */ + + /* VCT does not support PRESET action. */ + if (Action == SK_PNMI_PRESET) { + return (SK_PNMI_ERR_OK); + } + + Offset = 0; + for (; PhysPortIndex < Limit; PhysPortIndex++) { + switch (Id) { + case OID_SKGE_VCT_SET: /* Start VCT test. */ + if (Link == SK_FALSE) { + SkGeStopPort(pAC, IoC, PhysPortIndex, SK_STOP_ALL, SK_SOFT_RST); + + RetCode = SkGmCableDiagStatus(pAC, IoC, PhysPortIndex, SK_TRUE); + if (RetCode == 0) { /* RetCode: 0 => Start! */ + pAC->Pnmi.VctStatus[PhysPortIndex] |= SK_PNMI_VCT_PENDING; + pAC->Pnmi.VctStatus[PhysPortIndex] &= ~SK_PNMI_VCT_NEW_VCT_DATA; + pAC->Pnmi.VctStatus[PhysPortIndex] &= ~SK_PNMI_VCT_LINK; + + /* + * Start VCT timer counter. + */ + SK_MEMSET((char *) &Para, 0, sizeof(Para)); + Para.Para32[0] = PhysPortIndex; + Para.Para32[1] = -1; + SkTimerStart(pAC, IoC, &pAC->Pnmi.VctTimeout[PhysPortIndex].VctTimer, + 4000000, SKGE_PNMI, SK_PNMI_EVT_VCT_RESET, Para); + SK_PNMI_STORE_U32((pBuf + Offset), RetCode); + RetCode = SK_PNMI_ERR_OK; + } + else { /* RetCode: 2 => Running! */ + SK_PNMI_STORE_U32((pBuf + Offset), RetCode); + RetCode = SK_PNMI_ERR_OK; + } + } + else { /* RetCode: 4 => Link! */ + RetCode = 4; + SK_PNMI_STORE_U32((pBuf + Offset), RetCode); + RetCode = SK_PNMI_ERR_OK; + } + Offset += sizeof(SK_U32); + break; + + default: + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + } /* for */ + *pLen = Offset; + return (RetCode); + +} /* Vct */ + + +PNMI_STATIC void CheckVctStatus( +SK_AC *pAC, +SK_IOC IoC, +char *pBuf, +SK_U32 Offset, +SK_U32 PhysPortIndex) +{ + SK_GEPORT *pPrt; + SK_PNMI_VCT *pVctData; + SK_U32 RetCode; + + pPrt = &pAC->GIni.GP[PhysPortIndex]; + + pVctData = (SK_PNMI_VCT *) (pBuf + Offset); + pVctData->VctStatus = SK_PNMI_VCT_NONE; + + if (!pPrt->PHWLinkUp) { + + /* Was a VCT test ever made before? */ + if (pAC->Pnmi.VctStatus[PhysPortIndex] & SK_PNMI_VCT_TEST_DONE) { + if ((pAC->Pnmi.VctStatus[PhysPortIndex] & SK_PNMI_VCT_LINK)) { + pVctData->VctStatus |= SK_PNMI_VCT_OLD_VCT_DATA; + } + else { + pVctData->VctStatus |= SK_PNMI_VCT_NEW_VCT_DATA; + } + } + + /* Check VCT test status. */ + RetCode = SkGmCableDiagStatus(pAC,IoC, PhysPortIndex, SK_FALSE); + if (RetCode == 2) { /* VCT test is running. */ + pVctData->VctStatus |= SK_PNMI_VCT_RUNNING; + } + else { /* VCT data was copied to pAC here. Check PENDING state. */ + if (pAC->Pnmi.VctStatus[PhysPortIndex] & SK_PNMI_VCT_PENDING) { + pVctData->VctStatus |= SK_PNMI_VCT_NEW_VCT_DATA; + } + } + + if (pPrt->PCableLen != 0xff) { /* Old DSP value. */ + pVctData->VctStatus |= SK_PNMI_VCT_OLD_DSP_DATA; + } + } + else { + + /* Was a VCT test ever made before? */ + if (pAC->Pnmi.VctStatus[PhysPortIndex] & SK_PNMI_VCT_TEST_DONE) { + pVctData->VctStatus &= ~SK_PNMI_VCT_NEW_VCT_DATA; + pVctData->VctStatus |= SK_PNMI_VCT_OLD_VCT_DATA; + } + + /* DSP only valid in 100/1000 modes. */ + if (pAC->GIni.GP[PhysPortIndex].PLinkSpeedUsed != + SK_LSPEED_STAT_10MBPS) { + pVctData->VctStatus |= SK_PNMI_VCT_NEW_DSP_DATA; + } + } +} /* CheckVctStatus */ + + +/***************************************************************************** + * + * SkPnmiGenIoctl - Handles new generic PNMI IOCTL, calls the needed + * PNMI function depending on the subcommand and + * returns all data belonging to the complete database + * or OID request. + * + * Description: + * Looks up the requested subcommand, calls the corresponding handler + * function and passes all required parameters to it. + * The function is called by the driver. It is needed to handle the new + * generic PNMI IOCTL. This IOCTL is given to the driver and contains both + * the OID and a subcommand to decide what kind of request has to be done. + * + * Returns: + * SK_PNMI_ERR_OK The request was successfully performed + * SK_PNMI_ERR_GENERAL A general severe internal error occured + * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to take + * the data. + * SK_PNMI_ERR_UNKNOWN_OID The requested OID is unknown + * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't + * exist (e.g. port instance 3 on a two port + * adapter. + */ +int SkPnmiGenIoctl( +SK_AC *pAC, /* Pointer to adapter context struct */ +SK_IOC IoC, /* I/O context */ +void *pBuf, /* Buffer used for the management data transfer */ +unsigned int *pLen, /* Length of buffer */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ +{ +SK_I32 Mode; /* Store value of subcommand. */ +SK_U32 Oid; /* Store value of OID. */ +int ReturnCode; /* Store return value to show status of PNMI action. */ +int HeaderLength; /* Length of desired action plus OID. */ + + ReturnCode = SK_PNMI_ERR_GENERAL; + + SK_MEMCPY(&Mode, pBuf, sizeof(SK_I32)); + SK_MEMCPY(&Oid, (char *) pBuf + sizeof(SK_I32), sizeof(SK_U32)); + HeaderLength = sizeof(SK_I32) + sizeof(SK_U32); + *pLen = *pLen - HeaderLength; + SK_MEMCPY((char *) pBuf + sizeof(SK_I32), (char *) pBuf + HeaderLength, *pLen); + + switch(Mode) { + case SK_GET_SINGLE_VAR: + ReturnCode = SkPnmiGetVar(pAC, IoC, Oid, + (char *) pBuf + sizeof(SK_I32), pLen, + ((SK_U32) (-1)), NetIndex); + SK_PNMI_STORE_U32(pBuf, ReturnCode); + *pLen = *pLen + sizeof(SK_I32); + break; + case SK_PRESET_SINGLE_VAR: + ReturnCode = SkPnmiPreSetVar(pAC, IoC, Oid, + (char *) pBuf + sizeof(SK_I32), pLen, + ((SK_U32) (-1)), NetIndex); + SK_PNMI_STORE_U32(pBuf, ReturnCode); + *pLen = *pLen + sizeof(SK_I32); + break; + case SK_SET_SINGLE_VAR: + ReturnCode = SkPnmiSetVar(pAC, IoC, Oid, + (char *) pBuf + sizeof(SK_I32), pLen, + ((SK_U32) (-1)), NetIndex); + SK_PNMI_STORE_U32(pBuf, ReturnCode); + *pLen = *pLen + sizeof(SK_I32); + break; + case SK_GET_FULL_MIB: + ReturnCode = SkPnmiGetStruct(pAC, IoC, pBuf, pLen, NetIndex); + break; + case SK_PRESET_FULL_MIB: + ReturnCode = SkPnmiPreSetStruct(pAC, IoC, pBuf, pLen, NetIndex); + break; + case SK_SET_FULL_MIB: + ReturnCode = SkPnmiSetStruct(pAC, IoC, pBuf, pLen, NetIndex); + break; + default: + break; + } + + return (ReturnCode); + +} /* SkGeIocGen */ diff --git a/drivers/net/sk98lin/skgesirq.c b/drivers/net/sk98lin/skgesirq.c new file mode 100644 index 000000000000..3e7aa49afd00 --- /dev/null +++ b/drivers/net/sk98lin/skgesirq.c @@ -0,0 +1,2229 @@ +/****************************************************************************** + * + * Name: skgesirq.c + * Project: Gigabit Ethernet Adapters, Common Modules + * Version: $Revision: 1.92 $ + * Date: $Date: 2003/09/16 14:37:07 $ + * Purpose: Special IRQ module + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +/* + * Special Interrupt handler + * + * The following abstract should show how this module is included + * in the driver path: + * + * In the ISR of the driver the bits for frame transmission complete and + * for receive complete are checked and handled by the driver itself. + * The bits of the slow path mask are checked after that and then the + * entry into the so-called "slow path" is prepared. It is an implementors + * decision whether this is executed directly or just scheduled by + * disabling the mask. In the interrupt service routine some events may be + * generated, so it would be a good idea to call the EventDispatcher + * right after this ISR. + * + * The Interrupt source register of the adapter is NOT read by this module. + * SO if the drivers implementor needs a while loop around the + * slow data paths interrupt bits, he needs to call the SkGeSirqIsr() for + * each loop entered. + * + * However, the MAC Interrupt status registers are read in a while loop. + * + */ + +#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM)))) +static const char SysKonnectFileId[] = + "@(#) $Id: skgesirq.c,v 1.92 2003/09/16 14:37:07 rschmidt Exp $ (C) Marvell."; +#endif + +#include "h/skdrv1st.h" /* Driver Specific Definitions */ +#ifndef SK_SLIM +#include "h/skgepnmi.h" /* PNMI Definitions */ +#include "h/skrlmt.h" /* RLMT Definitions */ +#endif +#include "h/skdrv2nd.h" /* Adapter Control and Driver specific Def. */ + +/* local function prototypes */ +#ifdef GENESIS +static int SkGePortCheckUpXmac(SK_AC*, SK_IOC, int, SK_BOOL); +static int SkGePortCheckUpBcom(SK_AC*, SK_IOC, int, SK_BOOL); +static void SkPhyIsrBcom(SK_AC*, SK_IOC, int, SK_U16); +#endif /* GENESIS */ +#ifdef YUKON +static int SkGePortCheckUpGmac(SK_AC*, SK_IOC, int, SK_BOOL); +static void SkPhyIsrGmac(SK_AC*, SK_IOC, int, SK_U16); +#endif /* YUKON */ +#ifdef OTHER_PHY +static int SkGePortCheckUpLone(SK_AC*, SK_IOC, int, SK_BOOL); +static int SkGePortCheckUpNat(SK_AC*, SK_IOC, int, SK_BOOL); +static void SkPhyIsrLone(SK_AC*, SK_IOC, int, SK_U16); +#endif /* OTHER_PHY */ + +#ifdef GENESIS +/* + * array of Rx counter from XMAC which are checked + * in AutoSense mode to check whether a link is not able to auto-negotiate. + */ +static const SK_U16 SkGeRxRegs[]= { + XM_RXF_64B, + XM_RXF_127B, + XM_RXF_255B, + XM_RXF_511B, + XM_RXF_1023B, + XM_RXF_MAX_SZ +} ; +#endif /* GENESIS */ + +#ifdef __C2MAN__ +/* + * Special IRQ function + * + * General Description: + * + */ +intro() +{} +#endif + +/****************************************************************************** + * + * SkHWInitDefSense() - Default Autosensing mode initialization + * + * Description: sets the PLinkMode for HWInit + * + * Returns: N/A + */ +static void SkHWInitDefSense( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_GEPORT *pPrt; /* GIni Port struct pointer */ + + pPrt = &pAC->GIni.GP[Port]; + + pPrt->PAutoNegTimeOut = 0; + + if (pPrt->PLinkModeConf != SK_LMODE_AUTOSENSE) { + pPrt->PLinkMode = pPrt->PLinkModeConf; + return; + } + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("AutoSensing: First mode %d on Port %d\n", + (int)SK_LMODE_AUTOFULL, Port)); + + pPrt->PLinkMode = (SK_U8)SK_LMODE_AUTOFULL; + + return; +} /* SkHWInitDefSense */ + + +#ifdef GENESIS +/****************************************************************************** + * + * SkHWSenseGetNext() - Get Next Autosensing Mode + * + * Description: gets the appropriate next mode + * + * Note: + * + */ +static SK_U8 SkHWSenseGetNext( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_GEPORT *pPrt; /* GIni Port struct pointer */ + + pPrt = &pAC->GIni.GP[Port]; + + pPrt->PAutoNegTimeOut = 0; + + if (pPrt->PLinkModeConf != (SK_U8)SK_LMODE_AUTOSENSE) { + /* Leave all as configured */ + return(pPrt->PLinkModeConf); + } + + if (pPrt->PLinkMode == (SK_U8)SK_LMODE_AUTOFULL) { + /* Return next mode AUTOBOTH */ + return ((SK_U8)SK_LMODE_AUTOBOTH); + } + + /* Return default autofull */ + return ((SK_U8)SK_LMODE_AUTOFULL); +} /* SkHWSenseGetNext */ + + +/****************************************************************************** + * + * SkHWSenseSetNext() - Autosensing Set next mode + * + * Description: sets the appropriate next mode + * + * Returns: N/A + */ +static void SkHWSenseSetNext( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +SK_U8 NewMode) /* New Mode to be written in sense mode */ +{ + SK_GEPORT *pPrt; /* GIni Port struct pointer */ + + pPrt = &pAC->GIni.GP[Port]; + + pPrt->PAutoNegTimeOut = 0; + + if (pPrt->PLinkModeConf != (SK_U8)SK_LMODE_AUTOSENSE) { + return; + } + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("AutoSensing: next mode %d on Port %d\n", + (int)NewMode, Port)); + + pPrt->PLinkMode = NewMode; + + return; +} /* SkHWSenseSetNext */ +#endif /* GENESIS */ + + +/****************************************************************************** + * + * SkHWLinkDown() - Link Down handling + * + * Description: handles the hardware link down signal + * + * Returns: N/A + */ +void SkHWLinkDown( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_GEPORT *pPrt; /* GIni Port struct pointer */ + + pPrt = &pAC->GIni.GP[Port]; + + /* Disable all MAC interrupts */ + SkMacIrqDisable(pAC, IoC, Port); + + /* Disable Receiver and Transmitter */ + SkMacRxTxDisable(pAC, IoC, Port); + + /* Init default sense mode */ + SkHWInitDefSense(pAC, IoC, Port); + + if (pPrt->PHWLinkUp == SK_FALSE) { + return; + } + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("Link down Port %d\n", Port)); + + /* Set Link to DOWN */ + pPrt->PHWLinkUp = SK_FALSE; + + /* Reset Port stati */ + pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_UNKNOWN; + pPrt->PFlowCtrlStatus = (SK_U8)SK_FLOW_STAT_NONE; + pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_INDETERMINATED; + + /* Re-init Phy especially when the AutoSense default is set now */ + SkMacInitPhy(pAC, IoC, Port, SK_FALSE); + + /* GP0: used for workaround of Rev. C Errata 2 */ + + /* Do NOT signal to RLMT */ + + /* Do NOT start the timer here */ +} /* SkHWLinkDown */ + + +/****************************************************************************** + * + * SkHWLinkUp() - Link Up handling + * + * Description: handles the hardware link up signal + * + * Returns: N/A + */ +static void SkHWLinkUp( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_GEPORT *pPrt; /* GIni Port struct pointer */ + + pPrt = &pAC->GIni.GP[Port]; + + if (pPrt->PHWLinkUp) { + /* We do NOT need to proceed on active link */ + return; + } + + pPrt->PHWLinkUp = SK_TRUE; + pPrt->PAutoNegFail = SK_FALSE; + pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_UNKNOWN; + + if (pPrt->PLinkMode != (SK_U8)SK_LMODE_AUTOHALF && + pPrt->PLinkMode != (SK_U8)SK_LMODE_AUTOFULL && + pPrt->PLinkMode != (SK_U8)SK_LMODE_AUTOBOTH) { + /* Link is up and no Auto-negotiation should be done */ + + /* Link speed should be the configured one */ + switch (pPrt->PLinkSpeed) { + case SK_LSPEED_AUTO: + /* default is 1000 Mbps */ + case SK_LSPEED_1000MBPS: + pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_1000MBPS; + break; + case SK_LSPEED_100MBPS: + pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_100MBPS; + break; + case SK_LSPEED_10MBPS: + pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_10MBPS; + break; + } + + /* Set Link Mode Status */ + if (pPrt->PLinkMode == SK_LMODE_FULL) { + pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_FULL; + } + else { + pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_HALF; + } + + /* No flow control without auto-negotiation */ + pPrt->PFlowCtrlStatus = (SK_U8)SK_FLOW_STAT_NONE; + + /* enable Rx/Tx */ + (void)SkMacRxTxEnable(pAC, IoC, Port); + } +} /* SkHWLinkUp */ + + +/****************************************************************************** + * + * SkMacParity() - MAC parity workaround + * + * Description: handles MAC parity errors correctly + * + * Returns: N/A + */ +static void SkMacParity( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index of the port failed */ +{ + SK_EVPARA Para; + SK_GEPORT *pPrt; /* GIni Port struct pointer */ + SK_U32 TxMax; /* Tx Max Size Counter */ + + pPrt = &pAC->GIni.GP[Port]; + + /* Clear IRQ Tx Parity Error */ +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + + SK_OUT16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), MFF_CLR_PERR); + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */ + SK_OUT8(IoC, MR_ADDR(Port, TX_GMF_CTRL_T), + (SK_U8)((pAC->GIni.GIChipId == CHIP_ID_YUKON && + pAC->GIni.GIChipRev == 0) ? GMF_CLI_TX_FC : GMF_CLI_TX_PE)); + } +#endif /* YUKON */ + + if (pPrt->PCheckPar) { + + if (Port == MAC_1) { + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E016, SKERR_SIRQ_E016MSG); + } + else { + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E017, SKERR_SIRQ_E017MSG); + } + Para.Para64 = Port; + SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para); + + Para.Para32[0] = Port; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); + + return; + } + + /* Check whether frames with a size of 1k were sent */ +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + /* Snap statistic counters */ + (void)SkXmUpdateStats(pAC, IoC, Port); + + (void)SkXmMacStatistic(pAC, IoC, Port, XM_TXF_MAX_SZ, &TxMax); + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + + (void)SkGmMacStatistic(pAC, IoC, Port, GM_TXF_1518B, &TxMax); + } +#endif /* YUKON */ + + if (TxMax > 0) { + /* From now on check the parity */ + pPrt->PCheckPar = SK_TRUE; + } +} /* SkMacParity */ + + +/****************************************************************************** + * + * SkGeHwErr() - Hardware Error service routine + * + * Description: handles all HW Error interrupts + * + * Returns: N/A + */ +static void SkGeHwErr( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +SK_U32 HwStatus) /* Interrupt status word */ +{ + SK_EVPARA Para; + SK_U16 Word; + + if ((HwStatus & (IS_IRQ_MST_ERR | IS_IRQ_STAT)) != 0) { + /* PCI Errors occured */ + if ((HwStatus & IS_IRQ_STAT) != 0) { + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E013, SKERR_SIRQ_E013MSG); + } + else { + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E012, SKERR_SIRQ_E012MSG); + } + + /* Reset all bits in the PCI STATUS register */ + SK_IN16(IoC, PCI_C(PCI_STATUS), &Word); + + SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON); + SK_OUT16(IoC, PCI_C(PCI_STATUS), (SK_U16)(Word | PCI_ERRBITS)); + SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + + Para.Para64 = 0; + SkEventQueue(pAC, SKGE_DRV, SK_DRV_ADAP_FAIL, Para); + } + +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + + if ((HwStatus & IS_NO_STAT_M1) != 0) { + /* Ignore it */ + /* This situation is also indicated in the descriptor */ + SK_OUT16(IoC, MR_ADDR(MAC_1, RX_MFF_CTRL1), MFF_CLR_INSTAT); + } + + if ((HwStatus & IS_NO_STAT_M2) != 0) { + /* Ignore it */ + /* This situation is also indicated in the descriptor */ + SK_OUT16(IoC, MR_ADDR(MAC_2, RX_MFF_CTRL1), MFF_CLR_INSTAT); + } + + if ((HwStatus & IS_NO_TIST_M1) != 0) { + /* Ignore it */ + /* This situation is also indicated in the descriptor */ + SK_OUT16(IoC, MR_ADDR(MAC_1, RX_MFF_CTRL1), MFF_CLR_INTIST); + } + + if ((HwStatus & IS_NO_TIST_M2) != 0) { + /* Ignore it */ + /* This situation is also indicated in the descriptor */ + SK_OUT16(IoC, MR_ADDR(MAC_2, RX_MFF_CTRL1), MFF_CLR_INTIST); + } + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + /* This is necessary only for Rx timing measurements */ + if ((HwStatus & IS_IRQ_TIST_OV) != 0) { + /* increment Time Stamp Timer counter (high) */ + pAC->GIni.GITimeStampCnt++; + + /* Clear Time Stamp Timer IRQ */ + SK_OUT8(IoC, GMAC_TI_ST_CTRL, (SK_U8)GMT_ST_CLR_IRQ); + } + + if ((HwStatus & IS_IRQ_SENSOR) != 0) { + /* no sensors on 32-bit Yukon */ + if (pAC->GIni.GIYukon32Bit) { + /* disable HW Error IRQ */ + pAC->GIni.GIValIrqMask &= ~IS_HW_ERR; + } + } + } +#endif /* YUKON */ + + if ((HwStatus & IS_RAM_RD_PAR) != 0) { + SK_OUT16(IoC, B3_RI_CTRL, RI_CLR_RD_PERR); + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E014, SKERR_SIRQ_E014MSG); + Para.Para64 = 0; + SkEventQueue(pAC, SKGE_DRV, SK_DRV_ADAP_FAIL, Para); + } + + if ((HwStatus & IS_RAM_WR_PAR) != 0) { + SK_OUT16(IoC, B3_RI_CTRL, RI_CLR_WR_PERR); + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E015, SKERR_SIRQ_E015MSG); + Para.Para64 = 0; + SkEventQueue(pAC, SKGE_DRV, SK_DRV_ADAP_FAIL, Para); + } + + if ((HwStatus & IS_M1_PAR_ERR) != 0) { + SkMacParity(pAC, IoC, MAC_1); + } + + if ((HwStatus & IS_M2_PAR_ERR) != 0) { + SkMacParity(pAC, IoC, MAC_2); + } + + if ((HwStatus & IS_R1_PAR_ERR) != 0) { + /* Clear IRQ */ + SK_OUT32(IoC, B0_R1_CSR, CSR_IRQ_CL_P); + + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E018, SKERR_SIRQ_E018MSG); + Para.Para64 = MAC_1; + SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para); + + Para.Para32[0] = MAC_1; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); + } + + if ((HwStatus & IS_R2_PAR_ERR) != 0) { + /* Clear IRQ */ + SK_OUT32(IoC, B0_R2_CSR, CSR_IRQ_CL_P); + + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E019, SKERR_SIRQ_E019MSG); + Para.Para64 = MAC_2; + SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para); + + Para.Para32[0] = MAC_2; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); + } +} /* SkGeHwErr */ + + +/****************************************************************************** + * + * SkGeSirqIsr() - Special Interrupt Service Routine + * + * Description: handles all non data transfer specific interrupts (slow path) + * + * Returns: N/A + */ +void SkGeSirqIsr( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +SK_U32 Istatus) /* Interrupt status word */ +{ + SK_EVPARA Para; + SK_U32 RegVal32; /* Read register value */ + SK_GEPORT *pPrt; /* GIni Port struct pointer */ + SK_U16 PhyInt; + int i; + + if (((Istatus & IS_HW_ERR) & pAC->GIni.GIValIrqMask) != 0) { + /* read the HW Error Interrupt source */ + SK_IN32(IoC, B0_HWE_ISRC, &RegVal32); + + SkGeHwErr(pAC, IoC, RegVal32); + } + + /* + * Packet Timeout interrupts + */ + /* Check whether MACs are correctly initialized */ + if (((Istatus & (IS_PA_TO_RX1 | IS_PA_TO_TX1)) != 0) && + pAC->GIni.GP[MAC_1].PState == SK_PRT_RESET) { + /* MAC 1 was not initialized but Packet timeout occured */ + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E004, + SKERR_SIRQ_E004MSG); + } + + if (((Istatus & (IS_PA_TO_RX2 | IS_PA_TO_TX2)) != 0) && + pAC->GIni.GP[MAC_2].PState == SK_PRT_RESET) { + /* MAC 2 was not initialized but Packet timeout occured */ + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E005, + SKERR_SIRQ_E005MSG); + } + + if ((Istatus & IS_PA_TO_RX1) != 0) { + /* Means network is filling us up */ + SK_ERR_LOG(pAC, SK_ERRCL_HW | SK_ERRCL_INIT, SKERR_SIRQ_E002, + SKERR_SIRQ_E002MSG); + SK_OUT16(IoC, B3_PA_CTRL, PA_CLR_TO_RX1); + } + + if ((Istatus & IS_PA_TO_RX2) != 0) { + /* Means network is filling us up */ + SK_ERR_LOG(pAC, SK_ERRCL_HW | SK_ERRCL_INIT, SKERR_SIRQ_E003, + SKERR_SIRQ_E003MSG); + SK_OUT16(IoC, B3_PA_CTRL, PA_CLR_TO_RX2); + } + + if ((Istatus & IS_PA_TO_TX1) != 0) { + + pPrt = &pAC->GIni.GP[0]; + + /* May be a normal situation in a server with a slow network */ + SK_OUT16(IoC, B3_PA_CTRL, PA_CLR_TO_TX1); + +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + /* + * workaround: if in half duplex mode, check for Tx hangup. + * Read number of TX'ed bytes, wait for 10 ms, then compare + * the number with current value. If nothing changed, we assume + * that Tx is hanging and do a FIFO flush (see event routine). + */ + if ((pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF || + pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) && + !pPrt->HalfDupTimerActive) { + /* + * many more pack. arb. timeouts may come in between, + * we ignore those + */ + pPrt->HalfDupTimerActive = SK_TRUE; + /* Snap statistic counters */ + (void)SkXmUpdateStats(pAC, IoC, 0); + + (void)SkXmMacStatistic(pAC, IoC, 0, XM_TXO_OK_HI, &RegVal32); + + pPrt->LastOctets = (SK_U64)RegVal32 << 32; + + (void)SkXmMacStatistic(pAC, IoC, 0, XM_TXO_OK_LO, &RegVal32); + + pPrt->LastOctets += RegVal32; + + Para.Para32[0] = 0; + SkTimerStart(pAC, IoC, &pPrt->HalfDupChkTimer, SK_HALFDUP_CHK_TIME, + SKGE_HWAC, SK_HWEV_HALFDUP_CHK, Para); + } + } +#endif /* GENESIS */ + } + + if ((Istatus & IS_PA_TO_TX2) != 0) { + + pPrt = &pAC->GIni.GP[1]; + + /* May be a normal situation in a server with a slow network */ + SK_OUT16(IoC, B3_PA_CTRL, PA_CLR_TO_TX2); + +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + /* workaround: see above */ + if ((pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF || + pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) && + !pPrt->HalfDupTimerActive) { + pPrt->HalfDupTimerActive = SK_TRUE; + /* Snap statistic counters */ + (void)SkXmUpdateStats(pAC, IoC, 1); + + (void)SkXmMacStatistic(pAC, IoC, 1, XM_TXO_OK_HI, &RegVal32); + + pPrt->LastOctets = (SK_U64)RegVal32 << 32; + + (void)SkXmMacStatistic(pAC, IoC, 1, XM_TXO_OK_LO, &RegVal32); + + pPrt->LastOctets += RegVal32; + + Para.Para32[0] = 1; + SkTimerStart(pAC, IoC, &pPrt->HalfDupChkTimer, SK_HALFDUP_CHK_TIME, + SKGE_HWAC, SK_HWEV_HALFDUP_CHK, Para); + } + } +#endif /* GENESIS */ + } + + /* Check interrupts of the particular queues */ + if ((Istatus & IS_R1_C) != 0) { + /* Clear IRQ */ + SK_OUT32(IoC, B0_R1_CSR, CSR_IRQ_CL_C); + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E006, + SKERR_SIRQ_E006MSG); + Para.Para64 = MAC_1; + SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para); + Para.Para32[0] = MAC_1; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); + } + + if ((Istatus & IS_R2_C) != 0) { + /* Clear IRQ */ + SK_OUT32(IoC, B0_R2_CSR, CSR_IRQ_CL_C); + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E007, + SKERR_SIRQ_E007MSG); + Para.Para64 = MAC_2; + SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para); + Para.Para32[0] = MAC_2; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); + } + + if ((Istatus & IS_XS1_C) != 0) { + /* Clear IRQ */ + SK_OUT32(IoC, B0_XS1_CSR, CSR_IRQ_CL_C); + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E008, + SKERR_SIRQ_E008MSG); + Para.Para64 = MAC_1; + SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para); + Para.Para32[0] = MAC_1; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); + } + + if ((Istatus & IS_XA1_C) != 0) { + /* Clear IRQ */ + SK_OUT32(IoC, B0_XA1_CSR, CSR_IRQ_CL_C); + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E009, + SKERR_SIRQ_E009MSG); + Para.Para64 = MAC_1; + SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para); + Para.Para32[0] = MAC_1; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); + } + + if ((Istatus & IS_XS2_C) != 0) { + /* Clear IRQ */ + SK_OUT32(IoC, B0_XS2_CSR, CSR_IRQ_CL_C); + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E010, + SKERR_SIRQ_E010MSG); + Para.Para64 = MAC_2; + SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para); + Para.Para32[0] = MAC_2; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); + } + + if ((Istatus & IS_XA2_C) != 0) { + /* Clear IRQ */ + SK_OUT32(IoC, B0_XA2_CSR, CSR_IRQ_CL_C); + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E011, + SKERR_SIRQ_E011MSG); + Para.Para64 = MAC_2; + SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para); + Para.Para32[0] = MAC_2; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); + } + + /* External reg interrupt */ + if ((Istatus & IS_EXT_REG) != 0) { + /* Test IRQs from PHY */ + for (i = 0; i < pAC->GIni.GIMacsFound; i++) { + + pPrt = &pAC->GIni.GP[i]; + + if (pPrt->PState == SK_PRT_RESET) { + continue; + } + +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + + switch (pPrt->PhyType) { + + case SK_PHY_XMAC: + break; + + case SK_PHY_BCOM: + SkXmPhyRead(pAC, IoC, i, PHY_BCOM_INT_STAT, &PhyInt); + + if ((PhyInt & ~PHY_B_DEF_MSK) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("Port %d Bcom Int: 0x%04X\n", + i, PhyInt)); + SkPhyIsrBcom(pAC, IoC, i, PhyInt); + } + break; +#ifdef OTHER_PHY + case SK_PHY_LONE: + SkXmPhyRead(pAC, IoC, i, PHY_LONE_INT_STAT, &PhyInt); + + if ((PhyInt & PHY_L_DEF_MSK) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("Port %d Lone Int: %x\n", + i, PhyInt)); + SkPhyIsrLone(pAC, IoC, i, PhyInt); + } + break; +#endif /* OTHER_PHY */ + } + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + /* Read PHY Interrupt Status */ + SkGmPhyRead(pAC, IoC, i, PHY_MARV_INT_STAT, &PhyInt); + + if ((PhyInt & PHY_M_DEF_MSK) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("Port %d Marv Int: 0x%04X\n", + i, PhyInt)); + SkPhyIsrGmac(pAC, IoC, i, PhyInt); + } + } +#endif /* YUKON */ + } + } + + /* I2C Ready interrupt */ + if ((Istatus & IS_I2C_READY) != 0) { +#ifdef SK_SLIM + SK_OUT32(IoC, B2_I2C_IRQ, I2C_CLR_IRQ); +#else + SkI2cIsr(pAC, IoC); +#endif + } + + /* SW forced interrupt */ + if ((Istatus & IS_IRQ_SW) != 0) { + /* clear the software IRQ */ + SK_OUT8(IoC, B0_CTST, CS_CL_SW_IRQ); + } + + if ((Istatus & IS_LNK_SYNC_M1) != 0) { + /* + * We do NOT need the Link Sync interrupt, because it shows + * us only a link going down. + */ + /* clear interrupt */ + SK_OUT8(IoC, MR_ADDR(MAC_1, LNK_SYNC_CTRL), LED_CLR_IRQ); + } + + /* Check MAC after link sync counter */ + if ((Istatus & IS_MAC1) != 0) { + /* IRQ from MAC 1 */ + SkMacIrq(pAC, IoC, MAC_1); + } + + if ((Istatus & IS_LNK_SYNC_M2) != 0) { + /* + * We do NOT need the Link Sync interrupt, because it shows + * us only a link going down. + */ + /* clear interrupt */ + SK_OUT8(IoC, MR_ADDR(MAC_2, LNK_SYNC_CTRL), LED_CLR_IRQ); + } + + /* Check MAC after link sync counter */ + if ((Istatus & IS_MAC2) != 0) { + /* IRQ from MAC 2 */ + SkMacIrq(pAC, IoC, MAC_2); + } + + /* Timer interrupt (served last) */ + if ((Istatus & IS_TIMINT) != 0) { + /* check for HW Errors */ + if (((Istatus & IS_HW_ERR) & ~pAC->GIni.GIValIrqMask) != 0) { + /* read the HW Error Interrupt source */ + SK_IN32(IoC, B0_HWE_ISRC, &RegVal32); + + SkGeHwErr(pAC, IoC, RegVal32); + } + + SkHwtIsr(pAC, IoC); + } + +} /* SkGeSirqIsr */ + + +#ifdef GENESIS +/****************************************************************************** + * + * SkGePortCheckShorts() - Implementing XMAC Workaround Errata # 2 + * + * return: + * 0 o.k. nothing needed + * 1 Restart needed on this port + */ +static int SkGePortCheckShorts( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* IO Context */ +int Port) /* Which port should be checked */ +{ + SK_U32 Shorts; /* Short Event Counter */ + SK_U32 CheckShorts; /* Check value for Short Event Counter */ + SK_U64 RxCts; /* Rx Counter (packets on network) */ + SK_U32 RxTmp; /* Rx temp. Counter */ + SK_U32 FcsErrCts; /* FCS Error Counter */ + SK_GEPORT *pPrt; /* GIni Port struct pointer */ + int Rtv; /* Return value */ + int i; + + pPrt = &pAC->GIni.GP[Port]; + + /* Default: no action */ + Rtv = SK_HW_PS_NONE; + + (void)SkXmUpdateStats(pAC, IoC, Port); + + /* Extra precaution: check for short Event counter */ + (void)SkXmMacStatistic(pAC, IoC, Port, XM_RXE_SHT_ERR, &Shorts); + + /* + * Read Rx counters (packets seen on the network and not necessarily + * really received. + */ + RxCts = 0; + + for (i = 0; i < sizeof(SkGeRxRegs)/sizeof(SkGeRxRegs[0]); i++) { + + (void)SkXmMacStatistic(pAC, IoC, Port, SkGeRxRegs[i], &RxTmp); + + RxCts += (SK_U64)RxTmp; + } + + /* On default: check shorts against zero */ + CheckShorts = 0; + + /* Extra precaution on active links */ + if (pPrt->PHWLinkUp) { + /* Reset Link Restart counter */ + pPrt->PLinkResCt = 0; + pPrt->PAutoNegTOCt = 0; + + /* If link is up check for 2 */ + CheckShorts = 2; + + (void)SkXmMacStatistic(pAC, IoC, Port, XM_RXF_FCS_ERR, &FcsErrCts); + + if (pPrt->PLinkModeConf == SK_LMODE_AUTOSENSE && + pPrt->PLipaAutoNeg == SK_LIPA_UNKNOWN && + (pPrt->PLinkMode == SK_LMODE_HALF || + pPrt->PLinkMode == SK_LMODE_FULL)) { + /* + * This is autosensing and we are in the fallback + * manual full/half duplex mode. + */ + if (RxCts == pPrt->PPrevRx) { + /* Nothing received, restart link */ + pPrt->PPrevFcs = FcsErrCts; + pPrt->PPrevShorts = Shorts; + + return(SK_HW_PS_RESTART); + } + else { + pPrt->PLipaAutoNeg = SK_LIPA_MANUAL; + } + } + + if (((RxCts - pPrt->PPrevRx) > pPrt->PRxLim) || + (!(FcsErrCts - pPrt->PPrevFcs))) { + /* + * Note: The compare with zero above has to be done the way shown, + * otherwise the Linux driver will have a problem. + */ + /* + * We received a bunch of frames or no CRC error occured on the + * network -> ok. + */ + pPrt->PPrevRx = RxCts; + pPrt->PPrevFcs = FcsErrCts; + pPrt->PPrevShorts = Shorts; + + return(SK_HW_PS_NONE); + } + + pPrt->PPrevFcs = FcsErrCts; + } + + + if ((Shorts - pPrt->PPrevShorts) > CheckShorts) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("Short Event Count Restart Port %d \n", Port)); + Rtv = SK_HW_PS_RESTART; + } + + pPrt->PPrevShorts = Shorts; + pPrt->PPrevRx = RxCts; + + return(Rtv); +} /* SkGePortCheckShorts */ +#endif /* GENESIS */ + + +/****************************************************************************** + * + * SkGePortCheckUp() - Check if the link is up + * + * return: + * 0 o.k. nothing needed + * 1 Restart needed on this port + * 2 Link came up + */ +static int SkGePortCheckUp( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* IO Context */ +int Port) /* Which port should be checked */ +{ + SK_GEPORT *pPrt; /* GIni Port struct pointer */ + SK_BOOL AutoNeg; /* Is Auto-negotiation used ? */ + int Rtv; /* Return value */ + + Rtv = SK_HW_PS_NONE; + + pPrt = &pAC->GIni.GP[Port]; + + if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) { + AutoNeg = SK_FALSE; + } + else { + AutoNeg = SK_TRUE; + } + +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + + switch (pPrt->PhyType) { + + case SK_PHY_XMAC: + Rtv = SkGePortCheckUpXmac(pAC, IoC, Port, AutoNeg); + break; + case SK_PHY_BCOM: + Rtv = SkGePortCheckUpBcom(pAC, IoC, Port, AutoNeg); + break; +#ifdef OTHER_PHY + case SK_PHY_LONE: + Rtv = SkGePortCheckUpLone(pAC, IoC, Port, AutoNeg); + break; + case SK_PHY_NAT: + Rtv = SkGePortCheckUpNat(pAC, IoC, Port, AutoNeg); + break; +#endif /* OTHER_PHY */ + } + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + + Rtv = SkGePortCheckUpGmac(pAC, IoC, Port, AutoNeg); + } +#endif /* YUKON */ + + return(Rtv); +} /* SkGePortCheckUp */ + + +#ifdef GENESIS +/****************************************************************************** + * + * SkGePortCheckUpXmac() - Implementing of the Workaround Errata # 2 + * + * return: + * 0 o.k. nothing needed + * 1 Restart needed on this port + * 2 Link came up + */ +static int SkGePortCheckUpXmac( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* IO Context */ +int Port, /* Which port should be checked */ +SK_BOOL AutoNeg) /* Is Auto-negotiation used ? */ +{ + SK_U32 Shorts; /* Short Event Counter */ + SK_GEPORT *pPrt; /* GIni Port struct pointer */ + int Done; + SK_U32 GpReg; /* General Purpose register value */ + SK_U16 Isrc; /* Interrupt source register */ + SK_U16 IsrcSum; /* Interrupt source register sum */ + SK_U16 LpAb; /* Link Partner Ability */ + SK_U16 ResAb; /* Resolved Ability */ + SK_U16 ExtStat; /* Extended Status Register */ + SK_U8 NextMode; /* Next AutoSensing Mode */ + + pPrt = &pAC->GIni.GP[Port]; + + if (pPrt->PHWLinkUp) { + if (pPrt->PhyType != SK_PHY_XMAC) { + return(SK_HW_PS_NONE); + } + else { + return(SkGePortCheckShorts(pAC, IoC, Port)); + } + } + + IsrcSum = pPrt->PIsave; + pPrt->PIsave = 0; + + /* Now wait for each port's link */ + if (pPrt->PLinkBroken) { + /* Link was broken */ + XM_IN32(IoC, Port, XM_GP_PORT, &GpReg); + + if ((GpReg & XM_GP_INP_ASS) == 0) { + /* The Link is in sync */ + XM_IN16(IoC, Port, XM_ISRC, &Isrc); + IsrcSum |= Isrc; + SkXmAutoNegLipaXmac(pAC, IoC, Port, IsrcSum); + + if ((Isrc & XM_IS_INP_ASS) == 0) { + /* It has been in sync since last time */ + /* Restart the PORT */ + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("Link in sync Restart Port %d\n", Port)); + + (void)SkXmUpdateStats(pAC, IoC, Port); + + /* We now need to reinitialize the PrevShorts counter */ + (void)SkXmMacStatistic(pAC, IoC, Port, XM_RXE_SHT_ERR, &Shorts); + pPrt->PPrevShorts = Shorts; + + pPrt->PLinkBroken = SK_FALSE; + + /* + * Link Restart Workaround: + * it may be possible that the other Link side + * restarts its link as well an we detect + * another LinkBroken. To prevent this + * happening we check for a maximum number + * of consecutive restart. If those happens, + * we do NOT restart the active link and + * check whether the link is now o.k. + */ + pPrt->PLinkResCt++; + + pPrt->PAutoNegTimeOut = 0; + + if (pPrt->PLinkResCt < SK_MAX_LRESTART) { + return(SK_HW_PS_RESTART); + } + + pPrt->PLinkResCt = 0; + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Do NOT restart on Port %d %x %x\n", Port, Isrc, IsrcSum)); + } + else { + pPrt->PIsave = (SK_U16)(IsrcSum & XM_IS_AND); + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Save Sync/nosync Port %d %x %x\n", Port, Isrc, IsrcSum)); + + /* Do nothing more if link is broken */ + return(SK_HW_PS_NONE); + } + } + else { + /* Do nothing more if link is broken */ + return(SK_HW_PS_NONE); + } + + } + else { + /* Link was not broken, check if it is */ + XM_IN16(IoC, Port, XM_ISRC, &Isrc); + IsrcSum |= Isrc; + if ((Isrc & XM_IS_INP_ASS) != 0) { + XM_IN16(IoC, Port, XM_ISRC, &Isrc); + IsrcSum |= Isrc; + if ((Isrc & XM_IS_INP_ASS) != 0) { + XM_IN16(IoC, Port, XM_ISRC, &Isrc); + IsrcSum |= Isrc; + if ((Isrc & XM_IS_INP_ASS) != 0) { + pPrt->PLinkBroken = SK_TRUE; + /* Re-Init Link partner Autoneg flag */ + pPrt->PLipaAutoNeg = SK_LIPA_UNKNOWN; + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("Link broken Port %d\n", Port)); + + /* Cable removed-> reinit sense mode */ + SkHWInitDefSense(pAC, IoC, Port); + + return(SK_HW_PS_RESTART); + } + } + } + else { + SkXmAutoNegLipaXmac(pAC, IoC, Port, Isrc); + + if (SkGePortCheckShorts(pAC, IoC, Port) == SK_HW_PS_RESTART) { + return(SK_HW_PS_RESTART); + } + } + } + + /* + * here we usually can check whether the link is in sync and + * auto-negotiation is done. + */ + XM_IN32(IoC, Port, XM_GP_PORT, &GpReg); + XM_IN16(IoC, Port, XM_ISRC, &Isrc); + IsrcSum |= Isrc; + + SkXmAutoNegLipaXmac(pAC, IoC, Port, IsrcSum); + + if ((GpReg & XM_GP_INP_ASS) != 0 || (IsrcSum & XM_IS_INP_ASS) != 0) { + if ((GpReg & XM_GP_INP_ASS) == 0) { + /* Save Auto-negotiation Done interrupt only if link is in sync */ + pPrt->PIsave = (SK_U16)(IsrcSum & XM_IS_AND); + } +#ifdef DEBUG + if ((pPrt->PIsave & XM_IS_AND) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNeg done rescheduled Port %d\n", Port)); + } +#endif /* DEBUG */ + return(SK_HW_PS_NONE); + } + + if (AutoNeg) { + if ((IsrcSum & XM_IS_AND) != 0) { + SkHWLinkUp(pAC, IoC, Port); + Done = SkMacAutoNegDone(pAC, IoC, Port); + if (Done != SK_AND_OK) { + /* Get PHY parameters, for debugging only */ + SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_AUNE_LP, &LpAb); + SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_RES_ABI, &ResAb); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNeg FAIL Port %d (LpAb %x, ResAb %x)\n", + Port, LpAb, ResAb)); + + /* Try next possible mode */ + NextMode = SkHWSenseGetNext(pAC, IoC, Port); + SkHWLinkDown(pAC, IoC, Port); + if (Done == SK_AND_DUP_CAP) { + /* GoTo next mode */ + SkHWSenseSetNext(pAC, IoC, Port, NextMode); + } + + return(SK_HW_PS_RESTART); + } + /* + * Dummy Read extended status to prevent extra link down/ups + * (clear Page Received bit if set) + */ + SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_AUNE_EXP, &ExtStat); + + return(SK_HW_PS_LINK); + } + + /* AutoNeg not done, but HW link is up. Check for timeouts */ + pPrt->PAutoNegTimeOut++; + if (pPrt->PAutoNegTimeOut >= SK_AND_MAX_TO) { + /* Increase the Timeout counter */ + pPrt->PAutoNegTOCt++; + + /* Timeout occured */ + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("AutoNeg timeout Port %d\n", Port)); + if (pPrt->PLinkModeConf == SK_LMODE_AUTOSENSE && + pPrt->PLipaAutoNeg != SK_LIPA_AUTO) { + /* Set Link manually up */ + SkHWSenseSetNext(pAC, IoC, Port, SK_LMODE_FULL); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("Set manual full duplex Port %d\n", Port)); + } + + if (pPrt->PLinkModeConf == SK_LMODE_AUTOSENSE && + pPrt->PLipaAutoNeg == SK_LIPA_AUTO && + pPrt->PAutoNegTOCt >= SK_MAX_ANEG_TO) { + /* + * This is rather complicated. + * we need to check here whether the LIPA_AUTO + * we saw before is false alert. We saw at one + * switch ( SR8800) that on boot time it sends + * just one auto-neg packet and does no further + * auto-negotiation. + * Solution: we restart the autosensing after + * a few timeouts. + */ + pPrt->PAutoNegTOCt = 0; + pPrt->PLipaAutoNeg = SK_LIPA_UNKNOWN; + SkHWInitDefSense(pAC, IoC, Port); + } + + /* Do the restart */ + return(SK_HW_PS_RESTART); + } + } + else { + /* Link is up and we don't need more */ +#ifdef DEBUG + if (pPrt->PLipaAutoNeg == SK_LIPA_AUTO) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("ERROR: Lipa auto detected on port %d\n", Port)); + } +#endif /* DEBUG */ + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("Link sync(GP), Port %d\n", Port)); + SkHWLinkUp(pAC, IoC, Port); + + /* + * Link sync (GP) and so assume a good connection. But if not received + * a bunch of frames received in a time slot (maybe broken tx cable) + * the port is restart. + */ + return(SK_HW_PS_LINK); + } + + return(SK_HW_PS_NONE); +} /* SkGePortCheckUpXmac */ + + +/****************************************************************************** + * + * SkGePortCheckUpBcom() - Check if the link is up on Bcom PHY + * + * return: + * 0 o.k. nothing needed + * 1 Restart needed on this port + * 2 Link came up + */ +static int SkGePortCheckUpBcom( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* IO Context */ +int Port, /* Which port should be checked */ +SK_BOOL AutoNeg) /* Is Auto-negotiation used ? */ +{ + SK_GEPORT *pPrt; /* GIni Port struct pointer */ + int Done; + SK_U16 Isrc; /* Interrupt source register */ + SK_U16 PhyStat; /* Phy Status Register */ + SK_U16 ResAb; /* Master/Slave resolution */ + SK_U16 Ctrl; /* Broadcom control flags */ +#ifdef DEBUG + SK_U16 LpAb; + SK_U16 ExtStat; +#endif /* DEBUG */ + + pPrt = &pAC->GIni.GP[Port]; + + /* Check for No HCD Link events (#10523) */ + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_INT_STAT, &Isrc); + +#ifdef xDEBUG + if ((Isrc & ~(PHY_B_IS_HCT | PHY_B_IS_LCT)) == + (PHY_B_IS_SCR_S_ER | PHY_B_IS_RRS_CHANGE | PHY_B_IS_LRS_CHANGE)) { + + SK_U32 Stat1, Stat2, Stat3; + + Stat1 = 0; + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_INT_MASK, &Stat1); + CMSMPrintString( + pAC->pConfigTable, + MSG_TYPE_RUNTIME_INFO, + "CheckUp1 - Stat: %x, Mask: %x", + (void *)Isrc, + (void *)Stat1); + + Stat1 = 0; + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_CTRL, &Stat1); + Stat2 = 0; + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_STAT, &Stat2); + Stat1 = Stat1 << 16 | Stat2; + Stat2 = 0; + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_ADV, &Stat2); + Stat3 = 0; + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_LP, &Stat3); + Stat2 = Stat2 << 16 | Stat3; + CMSMPrintString( + pAC->pConfigTable, + MSG_TYPE_RUNTIME_INFO, + "Ctrl/Stat: %x, AN Adv/LP: %x", + (void *)Stat1, + (void *)Stat2); + + Stat1 = 0; + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_EXP, &Stat1); + Stat2 = 0; + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_EXT_STAT, &Stat2); + Stat1 = Stat1 << 16 | Stat2; + Stat2 = 0; + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_CTRL, &Stat2); + Stat3 = 0; + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_STAT, &Stat3); + Stat2 = Stat2 << 16 | Stat3; + CMSMPrintString( + pAC->pConfigTable, + MSG_TYPE_RUNTIME_INFO, + "AN Exp/IEEE Ext: %x, 1000T Ctrl/Stat: %x", + (void *)Stat1, + (void *)Stat2); + + Stat1 = 0; + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_P_EXT_CTRL, &Stat1); + Stat2 = 0; + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_P_EXT_STAT, &Stat2); + Stat1 = Stat1 << 16 | Stat2; + Stat2 = 0; + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, &Stat2); + Stat3 = 0; + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_STAT, &Stat3); + Stat2 = Stat2 << 16 | Stat3; + CMSMPrintString( + pAC->pConfigTable, + MSG_TYPE_RUNTIME_INFO, + "PHY Ext Ctrl/Stat: %x, Aux Ctrl/Stat: %x", + (void *)Stat1, + (void *)Stat2); + } +#endif /* DEBUG */ + + if ((Isrc & (PHY_B_IS_NO_HDCL /* | PHY_B_IS_NO_HDC */)) != 0) { + /* + * Workaround BCom Errata: + * enable and disable loopback mode if "NO HCD" occurs. + */ + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_CTRL, &Ctrl); + SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_CTRL, + (SK_U16)(Ctrl | PHY_CT_LOOP)); + SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_CTRL, + (SK_U16)(Ctrl & ~PHY_CT_LOOP)); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("No HCD Link event, Port %d\n", Port)); +#ifdef xDEBUG + CMSMPrintString( + pAC->pConfigTable, + MSG_TYPE_RUNTIME_INFO, + "No HCD link event, port %d.", + (void *)Port, + (void *)NULL); +#endif /* DEBUG */ + } + + /* Not obsolete: link status bit is latched to 0 and autoclearing! */ + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_STAT, &PhyStat); + + if (pPrt->PHWLinkUp) { + return(SK_HW_PS_NONE); + } + +#ifdef xDEBUG + { + SK_U32 Stat1, Stat2, Stat3; + + Stat1 = 0; + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_INT_MASK, &Stat1); + CMSMPrintString( + pAC->pConfigTable, + MSG_TYPE_RUNTIME_INFO, + "CheckUp1a - Stat: %x, Mask: %x", + (void *)Isrc, + (void *)Stat1); + + Stat1 = 0; + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_CTRL, &Stat1); + Stat2 = 0; + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_STAT, &PhyStat); + Stat1 = Stat1 << 16 | PhyStat; + Stat2 = 0; + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_ADV, &Stat2); + Stat3 = 0; + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_LP, &Stat3); + Stat2 = Stat2 << 16 | Stat3; + CMSMPrintString( + pAC->pConfigTable, + MSG_TYPE_RUNTIME_INFO, + "Ctrl/Stat: %x, AN Adv/LP: %x", + (void *)Stat1, + (void *)Stat2); + + Stat1 = 0; + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_EXP, &Stat1); + Stat2 = 0; + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_EXT_STAT, &Stat2); + Stat1 = Stat1 << 16 | Stat2; + Stat2 = 0; + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_CTRL, &Stat2); + Stat3 = 0; + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_STAT, &ResAb); + Stat2 = Stat2 << 16 | ResAb; + CMSMPrintString( + pAC->pConfigTable, + MSG_TYPE_RUNTIME_INFO, + "AN Exp/IEEE Ext: %x, 1000T Ctrl/Stat: %x", + (void *)Stat1, + (void *)Stat2); + + Stat1 = 0; + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_P_EXT_CTRL, &Stat1); + Stat2 = 0; + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_P_EXT_STAT, &Stat2); + Stat1 = Stat1 << 16 | Stat2; + Stat2 = 0; + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, &Stat2); + Stat3 = 0; + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_STAT, &Stat3); + Stat2 = Stat2 << 16 | Stat3; + CMSMPrintString( + pAC->pConfigTable, + MSG_TYPE_RUNTIME_INFO, + "PHY Ext Ctrl/Stat: %x, Aux Ctrl/Stat: %x", + (void *)Stat1, + (void *)Stat2); + } +#endif /* DEBUG */ + + /* + * Here we usually can check whether the link is in sync and + * auto-negotiation is done. + */ + + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_STAT, &PhyStat); + + SkMacAutoNegLipaPhy(pAC, IoC, Port, PhyStat); + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("CheckUp Port %d, PhyStat: 0x%04X\n", Port, PhyStat)); + + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_STAT, &ResAb); + + if ((ResAb & PHY_B_1000S_MSF) != 0) { + /* Error */ + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Master/Slave Fault port %d\n", Port)); + + pPrt->PAutoNegFail = SK_TRUE; + pPrt->PMSStatus = SK_MS_STAT_FAULT; + + return(SK_HW_PS_RESTART); + } + + if ((PhyStat & PHY_ST_LSYNC) == 0) { + return(SK_HW_PS_NONE); + } + + pPrt->PMSStatus = ((ResAb & PHY_B_1000S_MSR) != 0) ? + SK_MS_STAT_MASTER : SK_MS_STAT_SLAVE; + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Port %d, ResAb: 0x%04X\n", Port, ResAb)); + + if (AutoNeg) { + if ((PhyStat & PHY_ST_AN_OVER) != 0) { + + SkHWLinkUp(pAC, IoC, Port); + + Done = SkMacAutoNegDone(pAC, IoC, Port); + + if (Done != SK_AND_OK) { +#ifdef DEBUG + /* Get PHY parameters, for debugging only */ + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_LP, &LpAb); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_STAT, &ExtStat); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNeg FAIL Port %d (LpAb %x, 1000TStat %x)\n", + Port, LpAb, ExtStat)); +#endif /* DEBUG */ + return(SK_HW_PS_RESTART); + } + else { +#ifdef xDEBUG + /* Dummy read ISR to prevent extra link downs/ups */ + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_INT_STAT, &ExtStat); + + if ((ExtStat & ~(PHY_B_IS_HCT | PHY_B_IS_LCT)) != 0) { + CMSMPrintString( + pAC->pConfigTable, + MSG_TYPE_RUNTIME_INFO, + "CheckUp2 - Stat: %x", + (void *)ExtStat, + (void *)NULL); + } +#endif /* DEBUG */ + return(SK_HW_PS_LINK); + } + } + } + else { /* !AutoNeg */ + /* Link is up and we don't need more. */ +#ifdef DEBUG + if (pPrt->PLipaAutoNeg == SK_LIPA_AUTO) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("ERROR: Lipa auto detected on port %d\n", Port)); + } +#endif /* DEBUG */ + +#ifdef xDEBUG + /* Dummy read ISR to prevent extra link downs/ups */ + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_INT_STAT, &ExtStat); + + if ((ExtStat & ~(PHY_B_IS_HCT | PHY_B_IS_LCT)) != 0) { + CMSMPrintString( + pAC->pConfigTable, + MSG_TYPE_RUNTIME_INFO, + "CheckUp3 - Stat: %x", + (void *)ExtStat, + (void *)NULL); + } +#endif /* DEBUG */ + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("Link sync(GP), Port %d\n", Port)); + SkHWLinkUp(pAC, IoC, Port); + + return(SK_HW_PS_LINK); + } + + return(SK_HW_PS_NONE); +} /* SkGePortCheckUpBcom */ +#endif /* GENESIS */ + + +#ifdef YUKON +/****************************************************************************** + * + * SkGePortCheckUpGmac() - Check if the link is up on Marvell PHY + * + * return: + * 0 o.k. nothing needed + * 1 Restart needed on this port + * 2 Link came up + */ +static int SkGePortCheckUpGmac( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* IO Context */ +int Port, /* Which port should be checked */ +SK_BOOL AutoNeg) /* Is Auto-negotiation used ? */ +{ + SK_GEPORT *pPrt; /* GIni Port struct pointer */ + int Done; + SK_U16 PhyIsrc; /* PHY Interrupt source */ + SK_U16 PhyStat; /* PPY Status */ + SK_U16 PhySpecStat;/* PHY Specific Status */ + SK_U16 ResAb; /* Master/Slave resolution */ + SK_EVPARA Para; +#ifdef DEBUG + SK_U16 Word; /* I/O helper */ +#endif /* DEBUG */ + + pPrt = &pAC->GIni.GP[Port]; + + if (pPrt->PHWLinkUp) { + return(SK_HW_PS_NONE); + } + + /* Read PHY Status */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_STAT, &PhyStat); + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("CheckUp Port %d, PhyStat: 0x%04X\n", Port, PhyStat)); + + /* Read PHY Interrupt Status */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_INT_STAT, &PhyIsrc); + + if ((PhyIsrc & PHY_M_IS_AN_COMPL) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Auto-Negotiation Completed, PhyIsrc: 0x%04X\n", PhyIsrc)); + } + + if ((PhyIsrc & PHY_M_IS_LSP_CHANGE) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Link Speed Changed, PhyIsrc: 0x%04X\n", PhyIsrc)); + } + + SkMacAutoNegLipaPhy(pAC, IoC, Port, PhyStat); + + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_1000T_STAT, &ResAb); + + if ((ResAb & PHY_B_1000S_MSF) != 0) { + /* Error */ + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Master/Slave Fault port %d\n", Port)); + + pPrt->PAutoNegFail = SK_TRUE; + pPrt->PMSStatus = SK_MS_STAT_FAULT; + + return(SK_HW_PS_RESTART); + } + + /* Read PHY Specific Status */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_STAT, &PhySpecStat); + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Phy1000BT: 0x%04X, PhySpecStat: 0x%04X\n", ResAb, PhySpecStat)); + +#ifdef DEBUG + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_AUNE_EXP, &Word); + + if ((PhyIsrc & PHY_M_IS_AN_PR) != 0 || (Word & PHY_ANE_RX_PG) != 0 || + (PhySpecStat & PHY_M_PS_PAGE_REC) != 0) { + /* Read PHY Next Page Link Partner */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_NEPG_LP, &Word); + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Page Received, NextPage: 0x%04X\n", Word)); + } +#endif /* DEBUG */ + + if ((PhySpecStat & PHY_M_PS_LINK_UP) == 0) { + return(SK_HW_PS_NONE); + } + + if ((PhySpecStat & PHY_M_PS_DOWNS_STAT) != 0 || + (PhyIsrc & PHY_M_IS_DOWNSH_DET) != 0) { + /* Downshift detected */ + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E025, SKERR_SIRQ_E025MSG); + + Para.Para64 = Port; + SkEventQueue(pAC, SKGE_DRV, SK_DRV_DOWNSHIFT_DET, Para); + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Downshift detected, PhyIsrc: 0x%04X\n", PhyIsrc)); + } + + pPrt->PMSStatus = ((ResAb & PHY_B_1000S_MSR) != 0) ? + SK_MS_STAT_MASTER : SK_MS_STAT_SLAVE; + + pPrt->PCableLen = (SK_U8)((PhySpecStat & PHY_M_PS_CABLE_MSK) >> 7); + + if (AutoNeg) { + /* Auto-Negotiation Over ? */ + if ((PhyStat & PHY_ST_AN_OVER) != 0) { + + SkHWLinkUp(pAC, IoC, Port); + + Done = SkMacAutoNegDone(pAC, IoC, Port); + + if (Done != SK_AND_OK) { + return(SK_HW_PS_RESTART); + } + + return(SK_HW_PS_LINK); + } + } + else { /* !AutoNeg */ + /* Link is up and we don't need more */ +#ifdef DEBUG + if (pPrt->PLipaAutoNeg == SK_LIPA_AUTO) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("ERROR: Lipa auto detected on port %d\n", Port)); + } +#endif /* DEBUG */ + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("Link sync, Port %d\n", Port)); + SkHWLinkUp(pAC, IoC, Port); + + return(SK_HW_PS_LINK); + } + + return(SK_HW_PS_NONE); +} /* SkGePortCheckUpGmac */ +#endif /* YUKON */ + + +#ifdef OTHER_PHY +/****************************************************************************** + * + * SkGePortCheckUpLone() - Check if the link is up on Level One PHY + * + * return: + * 0 o.k. nothing needed + * 1 Restart needed on this port + * 2 Link came up + */ +static int SkGePortCheckUpLone( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* IO Context */ +int Port, /* Which port should be checked */ +SK_BOOL AutoNeg) /* Is Auto-negotiation used ? */ +{ + SK_GEPORT *pPrt; /* GIni Port struct pointer */ + int Done; + SK_U16 Isrc; /* Interrupt source register */ + SK_U16 LpAb; /* Link Partner Ability */ + SK_U16 ExtStat; /* Extended Status Register */ + SK_U16 PhyStat; /* Phy Status Register */ + SK_U16 StatSum; + SK_U8 NextMode; /* Next AutoSensing Mode */ + + pPrt = &pAC->GIni.GP[Port]; + + if (pPrt->PHWLinkUp) { + return(SK_HW_PS_NONE); + } + + StatSum = pPrt->PIsave; + pPrt->PIsave = 0; + + /* + * here we usually can check whether the link is in sync and + * auto-negotiation is done. + */ + SkXmPhyRead(pAC, IoC, Port, PHY_LONE_STAT, &PhyStat); + StatSum |= PhyStat; + + SkMacAutoNegLipaPhy(pAC, IoC, Port, PhyStat); + + if ((PhyStat & PHY_ST_LSYNC) == 0) { + /* Save Auto-negotiation Done bit */ + pPrt->PIsave = (SK_U16)(StatSum & PHY_ST_AN_OVER); +#ifdef DEBUG + if ((pPrt->PIsave & PHY_ST_AN_OVER) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNeg done rescheduled Port %d\n", Port)); + } +#endif /* DEBUG */ + return(SK_HW_PS_NONE); + } + + if (AutoNeg) { + if ((StatSum & PHY_ST_AN_OVER) != 0) { + SkHWLinkUp(pAC, IoC, Port); + Done = SkMacAutoNegDone(pAC, IoC, Port); + if (Done != SK_AND_OK) { + /* Get PHY parameters, for debugging only */ + SkXmPhyRead(pAC, IoC, Port, PHY_LONE_AUNE_LP, &LpAb); + SkXmPhyRead(pAC, IoC, Port, PHY_LONE_1000T_STAT, &ExtStat); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNeg FAIL Port %d (LpAb %x, 1000TStat %x)\n", + Port, LpAb, ExtStat)); + + /* Try next possible mode */ + NextMode = SkHWSenseGetNext(pAC, IoC, Port); + SkHWLinkDown(pAC, IoC, Port); + if (Done == SK_AND_DUP_CAP) { + /* GoTo next mode */ + SkHWSenseSetNext(pAC, IoC, Port, NextMode); + } + + return(SK_HW_PS_RESTART); + + } + else { + /* + * Dummy Read interrupt status to prevent + * extra link down/ups + */ + SkXmPhyRead(pAC, IoC, Port, PHY_LONE_INT_STAT, &ExtStat); + return(SK_HW_PS_LINK); + } + } + + /* AutoNeg not done, but HW link is up. Check for timeouts */ + pPrt->PAutoNegTimeOut++; + if (pPrt->PAutoNegTimeOut >= SK_AND_MAX_TO) { + /* Timeout occured */ + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("AutoNeg timeout Port %d\n", Port)); + if (pPrt->PLinkModeConf == SK_LMODE_AUTOSENSE && + pPrt->PLipaAutoNeg != SK_LIPA_AUTO) { + /* Set Link manually up */ + SkHWSenseSetNext(pAC, IoC, Port, SK_LMODE_FULL); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("Set manual full duplex Port %d\n", Port)); + } + + /* Do the restart */ + return(SK_HW_PS_RESTART); + } + } + else { + /* Link is up and we don't need more */ +#ifdef DEBUG + if (pPrt->PLipaAutoNeg == SK_LIPA_AUTO) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("ERROR: Lipa auto detected on port %d\n", Port)); + } +#endif /* DEBUG */ + + /* + * Dummy Read interrupt status to prevent + * extra link down/ups + */ + SkXmPhyRead(pAC, IoC, Port, PHY_LONE_INT_STAT, &ExtStat); + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("Link sync(GP), Port %d\n", Port)); + SkHWLinkUp(pAC, IoC, Port); + + return(SK_HW_PS_LINK); + } + + return(SK_HW_PS_NONE); +} /* SkGePortCheckUpLone */ + + +/****************************************************************************** + * + * SkGePortCheckUpNat() - Check if the link is up on National PHY + * + * return: + * 0 o.k. nothing needed + * 1 Restart needed on this port + * 2 Link came up + */ +static int SkGePortCheckUpNat( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* IO Context */ +int Port, /* Which port should be checked */ +SK_BOOL AutoNeg) /* Is Auto-negotiation used ? */ +{ + /* todo: National */ + return(SK_HW_PS_NONE); +} /* SkGePortCheckUpNat */ +#endif /* OTHER_PHY */ + + +/****************************************************************************** + * + * SkGeSirqEvent() - Event Service Routine + * + * Description: + * + * Notes: + */ +int SkGeSirqEvent( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* Io Context */ +SK_U32 Event, /* Module specific Event */ +SK_EVPARA Para) /* Event specific Parameter */ +{ + SK_GEPORT *pPrt; /* GIni Port struct pointer */ + SK_U32 Port; + SK_U32 Val32; + int PortStat; + SK_U8 Val8; +#ifdef GENESIS + SK_U64 Octets; +#endif /* GENESIS */ + + Port = Para.Para32[0]; + pPrt = &pAC->GIni.GP[Port]; + + switch (Event) { + case SK_HWEV_WATIM: + if (pPrt->PState == SK_PRT_RESET) { + + PortStat = SK_HW_PS_NONE; + } + else { + /* Check whether port came up */ + PortStat = SkGePortCheckUp(pAC, IoC, (int)Port); + } + + switch (PortStat) { + case SK_HW_PS_RESTART: + if (pPrt->PHWLinkUp) { + /* Set Link to down */ + SkHWLinkDown(pAC, IoC, (int)Port); + + /* + * Signal directly to RLMT to ensure correct + * sequence of SWITCH and RESET event. + */ + SkRlmtEvent(pAC, IoC, SK_RLMT_LINK_DOWN, Para); + } + + /* Restart needed */ + SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_RESET, Para); + break; + + case SK_HW_PS_LINK: + /* Signal to RLMT */ + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_UP, Para); + break; + } + + /* Start again the check Timer */ + if (pPrt->PHWLinkUp) { + Val32 = SK_WA_ACT_TIME; + } + else { + Val32 = SK_WA_INA_TIME; + } + + /* Todo: still needed for non-XMAC PHYs??? */ + /* Start workaround Errata #2 timer */ + SkTimerStart(pAC, IoC, &pPrt->PWaTimer, Val32, + SKGE_HWAC, SK_HWEV_WATIM, Para); + break; + + case SK_HWEV_PORT_START: + if (pPrt->PHWLinkUp) { + /* + * Signal directly to RLMT to ensure correct + * sequence of SWITCH and RESET event. + */ + SkRlmtEvent(pAC, IoC, SK_RLMT_LINK_DOWN, Para); + } + + SkHWLinkDown(pAC, IoC, (int)Port); + + /* Schedule Port RESET */ + SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_RESET, Para); + + /* Start workaround Errata #2 timer */ + SkTimerStart(pAC, IoC, &pPrt->PWaTimer, SK_WA_INA_TIME, + SKGE_HWAC, SK_HWEV_WATIM, Para); + break; + + case SK_HWEV_PORT_STOP: + if (pPrt->PHWLinkUp) { + /* + * Signal directly to RLMT to ensure correct + * sequence of SWITCH and RESET event. + */ + SkRlmtEvent(pAC, IoC, SK_RLMT_LINK_DOWN, Para); + } + + /* Stop Workaround Timer */ + SkTimerStop(pAC, IoC, &pPrt->PWaTimer); + + SkHWLinkDown(pAC, IoC, (int)Port); + break; + + case SK_HWEV_UPDATE_STAT: + /* We do NOT need to update any statistics */ + break; + + case SK_HWEV_CLEAR_STAT: + /* We do NOT need to clear any statistics */ + for (Port = 0; Port < (SK_U32)pAC->GIni.GIMacsFound; Port++) { + pPrt->PPrevRx = 0; + pPrt->PPrevFcs = 0; + pPrt->PPrevShorts = 0; + } + break; + + case SK_HWEV_SET_LMODE: + Val8 = (SK_U8)Para.Para32[1]; + if (pPrt->PLinkModeConf != Val8) { + /* Set New link mode */ + pPrt->PLinkModeConf = Val8; + + /* Restart Port */ + SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_STOP, Para); + SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_START, Para); + } + break; + + case SK_HWEV_SET_FLOWMODE: + Val8 = (SK_U8)Para.Para32[1]; + if (pPrt->PFlowCtrlMode != Val8) { + /* Set New Flow Control mode */ + pPrt->PFlowCtrlMode = Val8; + + /* Restart Port */ + SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_STOP, Para); + SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_START, Para); + } + break; + + case SK_HWEV_SET_ROLE: + /* not possible for fiber */ + if (!pAC->GIni.GICopperType) { + break; + } + Val8 = (SK_U8)Para.Para32[1]; + if (pPrt->PMSMode != Val8) { + /* Set New Role (Master/Slave) mode */ + pPrt->PMSMode = Val8; + + /* Restart Port */ + SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_STOP, Para); + SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_START, Para); + } + break; + + case SK_HWEV_SET_SPEED: + if (pPrt->PhyType != SK_PHY_MARV_COPPER) { + break; + } + Val8 = (SK_U8)Para.Para32[1]; + if (pPrt->PLinkSpeed != Val8) { + /* Set New Speed parameter */ + pPrt->PLinkSpeed = Val8; + + /* Restart Port */ + SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_STOP, Para); + SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_START, Para); + } + break; + +#ifdef GENESIS + case SK_HWEV_HALFDUP_CHK: + if (pAC->GIni.GIGenesis) { + /* + * half duplex hangup workaround. + * See packet arbiter timeout interrupt for description + */ + pPrt->HalfDupTimerActive = SK_FALSE; + if (pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF || + pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) { + /* Snap statistic counters */ + (void)SkXmUpdateStats(pAC, IoC, Port); + + (void)SkXmMacStatistic(pAC, IoC, Port, XM_TXO_OK_HI, &Val32); + + Octets = (SK_U64)Val32 << 32; + + (void)SkXmMacStatistic(pAC, IoC, Port, XM_TXO_OK_LO, &Val32); + + Octets += Val32; + + if (pPrt->LastOctets == Octets) { + /* Tx hanging, a FIFO flush restarts it */ + SkMacFlushTxFifo(pAC, IoC, Port); + } + } + } + break; +#endif /* GENESIS */ + + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_SIRQ_E001, SKERR_SIRQ_E001MSG); + break; + } + + return(0); +} /* SkGeSirqEvent */ + + +#ifdef GENESIS +/****************************************************************************** + * + * SkPhyIsrBcom() - PHY interrupt service routine + * + * Description: handles all interrupts from BCom PHY + * + * Returns: N/A + */ +static void SkPhyIsrBcom( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* Io Context */ +int Port, /* Port Num = PHY Num */ +SK_U16 IStatus) /* Interrupt Status */ +{ + SK_GEPORT *pPrt; /* GIni Port struct pointer */ + SK_EVPARA Para; + + pPrt = &pAC->GIni.GP[Port]; + + if ((IStatus & PHY_B_IS_PSE) != 0) { + /* Incorrectable pair swap error */ + SK_ERR_LOG(pAC, SK_ERRCL_HW | SK_ERRCL_INIT, SKERR_SIRQ_E022, + SKERR_SIRQ_E022MSG); + } + + if ((IStatus & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) != 0) { + + SkHWLinkDown(pAC, IoC, Port); + + Para.Para32[0] = (SK_U32)Port; + /* Signal to RLMT */ + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); + + /* Start workaround Errata #2 timer */ + SkTimerStart(pAC, IoC, &pPrt->PWaTimer, SK_WA_INA_TIME, + SKGE_HWAC, SK_HWEV_WATIM, Para); + } + +} /* SkPhyIsrBcom */ +#endif /* GENESIS */ + + +#ifdef YUKON +/****************************************************************************** + * + * SkPhyIsrGmac() - PHY interrupt service routine + * + * Description: handles all interrupts from Marvell PHY + * + * Returns: N/A + */ +static void SkPhyIsrGmac( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* Io Context */ +int Port, /* Port Num = PHY Num */ +SK_U16 IStatus) /* Interrupt Status */ +{ + SK_GEPORT *pPrt; /* GIni Port struct pointer */ + SK_EVPARA Para; + SK_U16 Word; + + pPrt = &pAC->GIni.GP[Port]; + + if ((IStatus & (PHY_M_IS_AN_PR | PHY_M_IS_LST_CHANGE)) != 0) { + + SkHWLinkDown(pAC, IoC, Port); + + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_AUNE_ADV, &Word); + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNeg.Adv: 0x%04X\n", Word)); + + /* Set Auto-negotiation advertisement */ + if (pPrt->PFlowCtrlMode == SK_FLOW_MODE_SYM_OR_REM) { + /* restore Asymmetric Pause bit */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_AUNE_ADV, + (SK_U16)(Word | PHY_M_AN_ASP)); + } + + Para.Para32[0] = (SK_U32)Port; + /* Signal to RLMT */ + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); + } + + if ((IStatus & PHY_M_IS_AN_ERROR) != 0) { + /* Auto-Negotiation Error */ + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E023, SKERR_SIRQ_E023MSG); + } + + if ((IStatus & PHY_M_IS_FIFO_ERROR) != 0) { + /* FIFO Overflow/Underrun Error */ + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E024, SKERR_SIRQ_E024MSG); + } + +} /* SkPhyIsrGmac */ +#endif /* YUKON */ + + +#ifdef OTHER_PHY +/****************************************************************************** + * + * SkPhyIsrLone() - PHY interrupt service routine + * + * Description: handles all interrupts from LONE PHY + * + * Returns: N/A + */ +static void SkPhyIsrLone( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* Io Context */ +int Port, /* Port Num = PHY Num */ +SK_U16 IStatus) /* Interrupt Status */ +{ + SK_EVPARA Para; + + if (IStatus & (PHY_L_IS_DUP | PHY_L_IS_ISOL)) { + + SkHWLinkDown(pAC, IoC, Port); + + Para.Para32[0] = (SK_U32)Port; + /* Signal to RLMT */ + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); + } + +} /* SkPhyIsrLone */ +#endif /* OTHER_PHY */ + +/* End of File */ diff --git a/drivers/net/sk98lin/ski2c.c b/drivers/net/sk98lin/ski2c.c new file mode 100644 index 000000000000..79bf57cb5326 --- /dev/null +++ b/drivers/net/sk98lin/ski2c.c @@ -0,0 +1,1296 @@ +/****************************************************************************** + * + * Name: ski2c.c + * Project: Gigabit Ethernet Adapters, TWSI-Module + * Version: $Revision: 1.59 $ + * Date: $Date: 2003/10/20 09:07:25 $ + * Purpose: Functions to access Voltage and Temperature Sensor + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +/* + * I2C Protocol + */ +#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM)))) +static const char SysKonnectFileId[] = + "@(#) $Id: ski2c.c,v 1.59 2003/10/20 09:07:25 rschmidt Exp $ (C) Marvell. "; +#endif + +#include "h/skdrv1st.h" /* Driver Specific Definitions */ +#include "h/lm80.h" +#include "h/skdrv2nd.h" /* Adapter Control- and Driver specific Def. */ + +#ifdef __C2MAN__ +/* + I2C protocol implementation. + + General Description: + + The I2C protocol is used for the temperature sensors and for + the serial EEPROM which hold the configuration. + + This file covers functions that allow to read write and do + some bulk requests a specified I2C address. + + The Genesis has 2 I2C buses. One for the EEPROM which holds + the VPD Data and one for temperature and voltage sensor. + The following picture shows the I2C buses, I2C devices and + their control registers. + + Note: The VPD functions are in skvpd.c +. +. PCI Config I2C Bus for VPD Data: +. +. +------------+ +. | VPD EEPROM | +. +------------+ +. | +. | <-- I2C +. | +. +-----------+-----------+ +. | | +. +-----------------+ +-----------------+ +. | PCI_VPD_ADR_REG | | PCI_VPD_DAT_REG | +. +-----------------+ +-----------------+ +. +. +. I2C Bus for LM80 sensor: +. +. +-----------------+ +. | Temperature and | +. | Voltage Sensor | +. | LM80 | +. +-----------------+ +. | +. | +. I2C --> | +. | +. +----+ +. +-------------->| OR |<--+ +. | +----+ | +. +------+------+ | +. | | | +. +--------+ +--------+ +----------+ +. | B2_I2C | | B2_I2C | | B2_I2C | +. | _CTRL | | _DATA | | _SW | +. +--------+ +--------+ +----------+ +. + The I2C bus may be driven by the B2_I2C_SW or by the B2_I2C_CTRL + and B2_I2C_DATA registers. + For driver software it is recommended to use the I2C control and + data register, because I2C bus timing is done by the ASIC and + an interrupt may be received when the I2C request is completed. + + Clock Rate Timing: MIN MAX generated by + VPD EEPROM: 50 kHz 100 kHz HW + LM80 over I2C Ctrl/Data reg. 50 kHz 100 kHz HW + LM80 over B2_I2C_SW register 0 400 kHz SW + + Note: The clock generated by the hardware is dependend on the + PCI clock. If the PCI bus clock is 33 MHz, the I2C/VPD + clock is 50 kHz. + */ +intro() +{} +#endif + +#ifdef SK_DIAG +/* + * I2C Fast Mode timing values used by the LM80. + * If new devices are added to the I2C bus the timing values have to be checked. + */ +#ifndef I2C_SLOW_TIMING +#define T_CLK_LOW 1300L /* clock low time in ns */ +#define T_CLK_HIGH 600L /* clock high time in ns */ +#define T_DATA_IN_SETUP 100L /* data in Set-up Time */ +#define T_START_HOLD 600L /* start condition hold time */ +#define T_START_SETUP 600L /* start condition Set-up time */ +#define T_STOP_SETUP 600L /* stop condition Set-up time */ +#define T_BUS_IDLE 1300L /* time the bus must free after Tx */ +#define T_CLK_2_DATA_OUT 900L /* max. clock low to data output valid */ +#else /* I2C_SLOW_TIMING */ +/* I2C Standard Mode Timing */ +#define T_CLK_LOW 4700L /* clock low time in ns */ +#define T_CLK_HIGH 4000L /* clock high time in ns */ +#define T_DATA_IN_SETUP 250L /* data in Set-up Time */ +#define T_START_HOLD 4000L /* start condition hold time */ +#define T_START_SETUP 4700L /* start condition Set-up time */ +#define T_STOP_SETUP 4000L /* stop condition Set-up time */ +#define T_BUS_IDLE 4700L /* time the bus must free after Tx */ +#endif /* !I2C_SLOW_TIMING */ + +#define NS2BCLK(x) (((x)*125)/10000) + +/* + * I2C Wire Operations + * + * About I2C_CLK_LOW(): + * + * The Data Direction bit (I2C_DATA_DIR) has to be set to input when setting + * clock to low, to prevent the ASIC and the I2C data client from driving the + * serial data line simultaneously (ASIC: last bit of a byte = '1', I2C client + * send an 'ACK'). See also Concentrator Bugreport No. 10192. + */ +#define I2C_DATA_HIGH(IoC) SK_I2C_SET_BIT(IoC, I2C_DATA) +#define I2C_DATA_LOW(IoC) SK_I2C_CLR_BIT(IoC, I2C_DATA) +#define I2C_DATA_OUT(IoC) SK_I2C_SET_BIT(IoC, I2C_DATA_DIR) +#define I2C_DATA_IN(IoC) SK_I2C_CLR_BIT(IoC, I2C_DATA_DIR | I2C_DATA) +#define I2C_CLK_HIGH(IoC) SK_I2C_SET_BIT(IoC, I2C_CLK) +#define I2C_CLK_LOW(IoC) SK_I2C_CLR_BIT(IoC, I2C_CLK | I2C_DATA_DIR) +#define I2C_START_COND(IoC) SK_I2C_CLR_BIT(IoC, I2C_CLK) + +#define NS2CLKT(x) ((x*125L)/10000) + +/*--------------- I2C Interface Register Functions --------------- */ + +/* + * sending one bit + */ +void SkI2cSndBit( +SK_IOC IoC, /* I/O Context */ +SK_U8 Bit) /* Bit to send */ +{ + I2C_DATA_OUT(IoC); + if (Bit) { + I2C_DATA_HIGH(IoC); + } + else { + I2C_DATA_LOW(IoC); + } + SkDgWaitTime(IoC, NS2BCLK(T_DATA_IN_SETUP)); + I2C_CLK_HIGH(IoC); + SkDgWaitTime(IoC, NS2BCLK(T_CLK_HIGH)); + I2C_CLK_LOW(IoC); +} /* SkI2cSndBit*/ + + +/* + * Signal a start to the I2C Bus. + * + * A start is signaled when data goes to low in a high clock cycle. + * + * Ends with Clock Low. + * + * Status: not tested + */ +void SkI2cStart( +SK_IOC IoC) /* I/O Context */ +{ + /* Init data and Clock to output lines */ + /* Set Data high */ + I2C_DATA_OUT(IoC); + I2C_DATA_HIGH(IoC); + /* Set Clock high */ + I2C_CLK_HIGH(IoC); + + SkDgWaitTime(IoC, NS2BCLK(T_START_SETUP)); + + /* Set Data Low */ + I2C_DATA_LOW(IoC); + + SkDgWaitTime(IoC, NS2BCLK(T_START_HOLD)); + + /* Clock low without Data to Input */ + I2C_START_COND(IoC); + + SkDgWaitTime(IoC, NS2BCLK(T_CLK_LOW)); +} /* SkI2cStart */ + + +void SkI2cStop( +SK_IOC IoC) /* I/O Context */ +{ + /* Init data and Clock to output lines */ + /* Set Data low */ + I2C_DATA_OUT(IoC); + I2C_DATA_LOW(IoC); + + SkDgWaitTime(IoC, NS2BCLK(T_CLK_2_DATA_OUT)); + + /* Set Clock high */ + I2C_CLK_HIGH(IoC); + + SkDgWaitTime(IoC, NS2BCLK(T_STOP_SETUP)); + + /* + * Set Data High: Do it by setting the Data Line to Input. + * Because of a pull up resistor the Data Line + * floods to high. + */ + I2C_DATA_IN(IoC); + + /* + * When I2C activity is stopped + * o DATA should be set to input and + * o CLOCK should be set to high! + */ + SkDgWaitTime(IoC, NS2BCLK(T_BUS_IDLE)); +} /* SkI2cStop */ + + +/* + * Receive just one bit via the I2C bus. + * + * Note: Clock must be set to LOW before calling this function. + * + * Returns The received bit. + */ +int SkI2cRcvBit( +SK_IOC IoC) /* I/O Context */ +{ + int Bit; + SK_U8 I2cSwCtrl; + + /* Init data as input line */ + I2C_DATA_IN(IoC); + + SkDgWaitTime(IoC, NS2BCLK(T_CLK_2_DATA_OUT)); + + I2C_CLK_HIGH(IoC); + + SkDgWaitTime(IoC, NS2BCLK(T_CLK_HIGH)); + + SK_I2C_GET_SW(IoC, &I2cSwCtrl); + + Bit = (I2cSwCtrl & I2C_DATA) ? 1 : 0; + + I2C_CLK_LOW(IoC); + SkDgWaitTime(IoC, NS2BCLK(T_CLK_LOW-T_CLK_2_DATA_OUT)); + + return(Bit); +} /* SkI2cRcvBit */ + + +/* + * Receive an ACK. + * + * returns 0 If acknowledged + * 1 in case of an error + */ +int SkI2cRcvAck( +SK_IOC IoC) /* I/O Context */ +{ + /* + * Received bit must be zero. + */ + return(SkI2cRcvBit(IoC) != 0); +} /* SkI2cRcvAck */ + + +/* + * Send an NACK. + */ +void SkI2cSndNAck( +SK_IOC IoC) /* I/O Context */ +{ + /* + * Received bit must be zero. + */ + SkI2cSndBit(IoC, 1); +} /* SkI2cSndNAck */ + + +/* + * Send an ACK. + */ +void SkI2cSndAck( +SK_IOC IoC) /* I/O Context */ +{ + /* + * Received bit must be zero. + */ + SkI2cSndBit(IoC, 0); +} /* SkI2cSndAck */ + + +/* + * Send one byte to the I2C device and wait for ACK. + * + * Return acknowleged status. + */ +int SkI2cSndByte( +SK_IOC IoC, /* I/O Context */ +int Byte) /* byte to send */ +{ + int i; + + for (i = 0; i < 8; i++) { + if (Byte & (1<<(7-i))) { + SkI2cSndBit(IoC, 1); + } + else { + SkI2cSndBit(IoC, 0); + } + } + + return(SkI2cRcvAck(IoC)); +} /* SkI2cSndByte */ + + +/* + * Receive one byte and ack it. + * + * Return byte. + */ +int SkI2cRcvByte( +SK_IOC IoC, /* I/O Context */ +int Last) /* Last Byte Flag */ +{ + int i; + int Byte = 0; + + for (i = 0; i < 8; i++) { + Byte <<= 1; + Byte |= SkI2cRcvBit(IoC); + } + + if (Last) { + SkI2cSndNAck(IoC); + } + else { + SkI2cSndAck(IoC); + } + + return(Byte); +} /* SkI2cRcvByte */ + + +/* + * Start dialog and send device address + * + * Return 0 if acknowleged, 1 in case of an error + */ +int SkI2cSndDev( +SK_IOC IoC, /* I/O Context */ +int Addr, /* Device Address */ +int Rw) /* Read / Write Flag */ +{ + SkI2cStart(IoC); + Rw = ~Rw; + Rw &= I2C_WRITE; + return(SkI2cSndByte(IoC, (Addr<<1) | Rw)); +} /* SkI2cSndDev */ + +#endif /* SK_DIAG */ + +/*----------------- I2C CTRL Register Functions ----------*/ + +/* + * waits for a completion of an I2C transfer + * + * returns 0: success, transfer completes + * 1: error, transfer does not complete, I2C transfer + * killed, wait loop terminated. + */ +static int SkI2cWait( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +int Event) /* complete event to wait for (I2C_READ or I2C_WRITE) */ +{ + SK_U64 StartTime; + SK_U64 CurrentTime; + SK_U32 I2cCtrl; + + StartTime = SkOsGetTime(pAC); + + do { + CurrentTime = SkOsGetTime(pAC); + + if (CurrentTime - StartTime > SK_TICKS_PER_SEC / 8) { + + SK_I2C_STOP(IoC); +#ifndef SK_DIAG + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_I2C_E002, SKERR_I2C_E002MSG); +#endif /* !SK_DIAG */ + return(1); + } + + SK_I2C_GET_CTL(IoC, &I2cCtrl); + +#ifdef xYUKON_DBG + printf("StartTime=%lu, CurrentTime=%lu\n", + StartTime, CurrentTime); + if (kbhit()) { + return(1); + } +#endif /* YUKON_DBG */ + + } while ((I2cCtrl & I2C_FLAG) == (SK_U32)Event << 31); + + return(0); +} /* SkI2cWait */ + + +/* + * waits for a completion of an I2C transfer + * + * Returns + * Nothing + */ +void SkI2cWaitIrq( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC) /* I/O Context */ +{ + SK_SENSOR *pSen; + SK_U64 StartTime; + SK_U32 IrqSrc; + + pSen = &pAC->I2c.SenTable[pAC->I2c.CurrSens]; + + if (pSen->SenState == SK_SEN_IDLE) { + return; + } + + StartTime = SkOsGetTime(pAC); + + do { + if (SkOsGetTime(pAC) - StartTime > SK_TICKS_PER_SEC / 8) { + + SK_I2C_STOP(IoC); +#ifndef SK_DIAG + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_I2C_E016, SKERR_I2C_E016MSG); +#endif /* !SK_DIAG */ + return; + } + + SK_IN32(IoC, B0_ISRC, &IrqSrc); + + } while ((IrqSrc & IS_I2C_READY) == 0); + + pSen->SenState = SK_SEN_IDLE; + return; +} /* SkI2cWaitIrq */ + +/* + * writes a single byte or 4 bytes into the I2C device + * + * returns 0: success + * 1: error + */ +static int SkI2cWrite( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_U32 I2cData, /* I2C Data to write */ +int I2cDev, /* I2C Device Address */ +int I2cDevSize, /* I2C Device Size (e.g. I2C_025K_DEV or I2C_2K_DEV) */ +int I2cReg, /* I2C Device Register Address */ +int I2cBurst) /* I2C Burst Flag */ +{ + SK_OUT32(IoC, B2_I2C_DATA, I2cData); + + SK_I2C_CTL(IoC, I2C_WRITE, I2cDev, I2cDevSize, I2cReg, I2cBurst); + + return(SkI2cWait(pAC, IoC, I2C_WRITE)); +} /* SkI2cWrite*/ + + +#ifdef SK_DIAG +/* + * reads a single byte or 4 bytes from the I2C device + * + * returns the word read + */ +SK_U32 SkI2cRead( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +int I2cDev, /* I2C Device Address */ +int I2cDevSize, /* I2C Device Size (e.g. I2C_025K_DEV or I2C_2K_DEV) */ +int I2cReg, /* I2C Device Register Address */ +int I2cBurst) /* I2C Burst Flag */ +{ + SK_U32 Data; + + SK_OUT32(IoC, B2_I2C_DATA, 0); + SK_I2C_CTL(IoC, I2C_READ, I2cDev, I2cDevSize, I2cReg, I2cBurst); + + if (SkI2cWait(pAC, IoC, I2C_READ) != 0) { + w_print("%s\n", SKERR_I2C_E002MSG); + } + + SK_IN32(IoC, B2_I2C_DATA, &Data); + + return(Data); +} /* SkI2cRead */ +#endif /* SK_DIAG */ + + +/* + * read a sensor's value + * + * This function reads a sensor's value from the I2C sensor chip. The sensor + * is defined by its index into the sensors database in the struct pAC points + * to. + * Returns + * 1 if the read is completed + * 0 if the read must be continued (I2C Bus still allocated) + */ +static int SkI2cReadSensor( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_SENSOR *pSen) /* Sensor to be read */ +{ + if (pSen->SenRead != NULL) { + return((*pSen->SenRead)(pAC, IoC, pSen)); + } + else { + return(0); /* no success */ + } +} /* SkI2cReadSensor */ + +/* + * Do the Init state 0 initialization + */ +static int SkI2cInit0( +SK_AC *pAC) /* Adapter Context */ +{ + int i; + + /* Begin with first sensor */ + pAC->I2c.CurrSens = 0; + + /* Begin with timeout control for state machine */ + pAC->I2c.TimerMode = SK_TIMER_WATCH_SM; + + /* Set sensor number to zero */ + pAC->I2c.MaxSens = 0; + +#ifndef SK_DIAG + /* Initialize Number of Dummy Reads */ + pAC->I2c.DummyReads = SK_MAX_SENSORS; +#endif + + for (i = 0; i < SK_MAX_SENSORS; i++) { + pAC->I2c.SenTable[i].SenDesc = "unknown"; + pAC->I2c.SenTable[i].SenType = SK_SEN_UNKNOWN; + pAC->I2c.SenTable[i].SenThreErrHigh = 0; + pAC->I2c.SenTable[i].SenThreErrLow = 0; + pAC->I2c.SenTable[i].SenThreWarnHigh = 0; + pAC->I2c.SenTable[i].SenThreWarnLow = 0; + pAC->I2c.SenTable[i].SenReg = LM80_FAN2_IN; + pAC->I2c.SenTable[i].SenInit = SK_SEN_DYN_INIT_NONE; + pAC->I2c.SenTable[i].SenValue = 0; + pAC->I2c.SenTable[i].SenErrFlag = SK_SEN_ERR_NOT_PRESENT; + pAC->I2c.SenTable[i].SenErrCts = 0; + pAC->I2c.SenTable[i].SenBegErrTS = 0; + pAC->I2c.SenTable[i].SenState = SK_SEN_IDLE; + pAC->I2c.SenTable[i].SenRead = NULL; + pAC->I2c.SenTable[i].SenDev = 0; + } + + /* Now we are "INIT data"ed */ + pAC->I2c.InitLevel = SK_INIT_DATA; + return(0); +} /* SkI2cInit0*/ + + +/* + * Do the init state 1 initialization + * + * initialize the following register of the LM80: + * Configuration register: + * - START, noINT, activeLOW, noINT#Clear, noRESET, noCI, noGPO#, noINIT + * + * Interrupt Mask Register 1: + * - all interrupts are Disabled (0xff) + * + * Interrupt Mask Register 2: + * - all interrupts are Disabled (0xff) Interrupt modi doesn't matter. + * + * Fan Divisor/RST_OUT register: + * - Divisors set to 1 (bits 00), all others 0s. + * + * OS# Configuration/Temperature resolution Register: + * - all 0s + * + */ +static int SkI2cInit1( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC) /* I/O Context */ +{ + int i; + SK_U8 I2cSwCtrl; + SK_GEPORT *pPrt; /* GIni Port struct pointer */ + + if (pAC->I2c.InitLevel != SK_INIT_DATA) { + /* ReInit not needed in I2C module */ + return(0); + } + + /* Set the Direction of I2C-Data Pin to IN */ + SK_I2C_CLR_BIT(IoC, I2C_DATA_DIR | I2C_DATA); + /* Check for 32-Bit Yukon with Low at I2C-Data Pin */ + SK_I2C_GET_SW(IoC, &I2cSwCtrl); + + if ((I2cSwCtrl & I2C_DATA) == 0) { + /* this is a 32-Bit board */ + pAC->GIni.GIYukon32Bit = SK_TRUE; + return(0); + } + + /* Check for 64 Bit Yukon without sensors */ + if (SkI2cWrite(pAC, IoC, 0, LM80_ADDR, I2C_025K_DEV, LM80_CFG, 0) != 0) { + return(0); + } + + (void)SkI2cWrite(pAC, IoC, 0xffUL, LM80_ADDR, I2C_025K_DEV, LM80_IMSK_1, 0); + + (void)SkI2cWrite(pAC, IoC, 0xffUL, LM80_ADDR, I2C_025K_DEV, LM80_IMSK_2, 0); + + (void)SkI2cWrite(pAC, IoC, 0, LM80_ADDR, I2C_025K_DEV, LM80_FAN_CTRL, 0); + + (void)SkI2cWrite(pAC, IoC, 0, LM80_ADDR, I2C_025K_DEV, LM80_TEMP_CTRL, 0); + + (void)SkI2cWrite(pAC, IoC, (SK_U32)LM80_CFG_START, LM80_ADDR, I2C_025K_DEV, + LM80_CFG, 0); + + /* + * MaxSens has to be updated here, because PhyType is not + * set when performing Init Level 0 + */ + pAC->I2c.MaxSens = 5; + + pPrt = &pAC->GIni.GP[0]; + + if (pAC->GIni.GIGenesis) { + if (pPrt->PhyType == SK_PHY_BCOM) { + if (pAC->GIni.GIMacsFound == 1) { + pAC->I2c.MaxSens += 1; + } + else { + pAC->I2c.MaxSens += 3; + } + } + } + else { + pAC->I2c.MaxSens += 3; + } + + for (i = 0; i < pAC->I2c.MaxSens; i++) { + switch (i) { + case 0: + pAC->I2c.SenTable[i].SenDesc = "Temperature"; + pAC->I2c.SenTable[i].SenType = SK_SEN_TEMP; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_TEMP_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_TEMP_HIGH_WARN; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_TEMP_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_TEMP_LOW_ERR; + pAC->I2c.SenTable[i].SenReg = LM80_TEMP_IN; + break; + case 1: + pAC->I2c.SenTable[i].SenDesc = "Voltage PCI"; + pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PCI_5V_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PCI_5V_HIGH_WARN; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PCI_5V_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PCI_5V_LOW_ERR; + pAC->I2c.SenTable[i].SenReg = LM80_VT0_IN; + break; + case 2: + pAC->I2c.SenTable[i].SenDesc = "Voltage PCI-IO"; + pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PCI_IO_5V_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PCI_IO_5V_HIGH_WARN; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PCI_IO_3V3_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PCI_IO_3V3_LOW_ERR; + pAC->I2c.SenTable[i].SenReg = LM80_VT1_IN; + pAC->I2c.SenTable[i].SenInit = SK_SEN_DYN_INIT_PCI_IO; + break; + case 3: + pAC->I2c.SenTable[i].SenDesc = "Voltage ASIC"; + pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_VDD_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_VDD_HIGH_WARN; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_VDD_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_VDD_LOW_ERR; + pAC->I2c.SenTable[i].SenReg = LM80_VT2_IN; + break; + case 4: + if (pAC->GIni.GIGenesis) { + if (pPrt->PhyType == SK_PHY_BCOM) { + pAC->I2c.SenTable[i].SenDesc = "Voltage PHY A PLL"; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PLL_3V3_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PLL_3V3_HIGH_WARN; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PLL_3V3_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PLL_3V3_LOW_ERR; + } + else { + pAC->I2c.SenTable[i].SenDesc = "Voltage PMA"; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PLL_3V3_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PLL_3V3_HIGH_WARN; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PLL_3V3_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PLL_3V3_LOW_ERR; + } + } + else { + pAC->I2c.SenTable[i].SenDesc = "Voltage VAUX"; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_VAUX_3V3_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_VAUX_3V3_HIGH_WARN; + if (pAC->GIni.GIVauxAvail) { + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_VAUX_3V3_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_VAUX_3V3_LOW_ERR; + } + else { + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_VAUX_0V_WARN_ERR; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_VAUX_0V_WARN_ERR; + } + } + pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT; + pAC->I2c.SenTable[i].SenReg = LM80_VT3_IN; + break; + case 5: + if (pAC->GIni.GIGenesis) { + pAC->I2c.SenTable[i].SenDesc = "Voltage PHY 2V5"; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PHY_2V5_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PHY_2V5_HIGH_WARN; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PHY_2V5_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PHY_2V5_LOW_ERR; + } + else { + pAC->I2c.SenTable[i].SenDesc = "Voltage Core 1V5"; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_CORE_1V5_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_CORE_1V5_HIGH_WARN; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_CORE_1V5_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_CORE_1V5_LOW_ERR; + } + pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT; + pAC->I2c.SenTable[i].SenReg = LM80_VT4_IN; + break; + case 6: + if (pAC->GIni.GIGenesis) { + pAC->I2c.SenTable[i].SenDesc = "Voltage PHY B PLL"; + } + else { + pAC->I2c.SenTable[i].SenDesc = "Voltage PHY 3V3"; + } + pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PLL_3V3_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PLL_3V3_HIGH_WARN; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PLL_3V3_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PLL_3V3_LOW_ERR; + pAC->I2c.SenTable[i].SenReg = LM80_VT5_IN; + break; + case 7: + if (pAC->GIni.GIGenesis) { + pAC->I2c.SenTable[i].SenDesc = "Speed Fan"; + pAC->I2c.SenTable[i].SenType = SK_SEN_FAN; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_FAN_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_FAN_HIGH_WARN; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_FAN_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_FAN_LOW_ERR; + pAC->I2c.SenTable[i].SenReg = LM80_FAN2_IN; + } + else { + pAC->I2c.SenTable[i].SenDesc = "Voltage PHY 2V5"; + pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PHY_2V5_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PHY_2V5_HIGH_WARN; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PHY_2V5_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PHY_2V5_LOW_ERR; + pAC->I2c.SenTable[i].SenReg = LM80_VT6_IN; + } + break; + default: + SK_ERR_LOG(pAC, SK_ERRCL_INIT | SK_ERRCL_SW, + SKERR_I2C_E001, SKERR_I2C_E001MSG); + break; + } + + pAC->I2c.SenTable[i].SenValue = 0; + pAC->I2c.SenTable[i].SenErrFlag = SK_SEN_ERR_OK; + pAC->I2c.SenTable[i].SenErrCts = 0; + pAC->I2c.SenTable[i].SenBegErrTS = 0; + pAC->I2c.SenTable[i].SenState = SK_SEN_IDLE; + pAC->I2c.SenTable[i].SenRead = SkLm80ReadSensor; + pAC->I2c.SenTable[i].SenDev = LM80_ADDR; + } + +#ifndef SK_DIAG + pAC->I2c.DummyReads = pAC->I2c.MaxSens; +#endif /* !SK_DIAG */ + + /* Clear I2C IRQ */ + SK_OUT32(IoC, B2_I2C_IRQ, I2C_CLR_IRQ); + + /* Now we are I/O initialized */ + pAC->I2c.InitLevel = SK_INIT_IO; + return(0); +} /* SkI2cInit1 */ + + +/* + * Init level 2: Start first sensor read. + */ +static int SkI2cInit2( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC) /* I/O Context */ +{ + int ReadComplete; + SK_SENSOR *pSen; + + if (pAC->I2c.InitLevel != SK_INIT_IO) { + /* ReInit not needed in I2C module */ + /* Init0 and Init2 not permitted */ + return(0); + } + + pSen = &pAC->I2c.SenTable[pAC->I2c.CurrSens]; + ReadComplete = SkI2cReadSensor(pAC, IoC, pSen); + + if (ReadComplete) { + SK_ERR_LOG(pAC, SK_ERRCL_INIT, SKERR_I2C_E008, SKERR_I2C_E008MSG); + } + + /* Now we are correctly initialized */ + pAC->I2c.InitLevel = SK_INIT_RUN; + + return(0); +} /* SkI2cInit2*/ + + +/* + * Initialize I2C devices + * + * Get the first voltage value and discard it. + * Go into temperature read mode. A default pointer is not set. + * + * The things to be done depend on the init level in the parameter list: + * Level 0: + * Initialize only the data structures. Do NOT access hardware. + * Level 1: + * Initialize hardware through SK_IN / SK_OUT commands. Do NOT use interrupts. + * Level 2: + * Everything is possible. Interrupts may be used from now on. + * + * return: + * 0 = success + * other = error. + */ +int SkI2cInit( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context needed in levels 1 and 2 */ +int Level) /* Init Level */ +{ + + switch (Level) { + case SK_INIT_DATA: + return(SkI2cInit0(pAC)); + case SK_INIT_IO: + return(SkI2cInit1(pAC, IoC)); + case SK_INIT_RUN: + return(SkI2cInit2(pAC, IoC)); + default: + break; + } + + return(0); +} /* SkI2cInit */ + + +#ifndef SK_DIAG + +/* + * Interrupt service function for the I2C Interface + * + * Clears the Interrupt source + * + * Reads the register and check it for sending a trap. + * + * Starts the timer if necessary. + */ +void SkI2cIsr( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC) /* I/O Context */ +{ + SK_EVPARA Para; + + /* Clear I2C IRQ */ + SK_OUT32(IoC, B2_I2C_IRQ, I2C_CLR_IRQ); + + Para.Para64 = 0; + SkEventQueue(pAC, SKGE_I2C, SK_I2CEV_IRQ, Para); +} /* SkI2cIsr */ + + +/* + * Check this sensors Value against the threshold and send events. + */ +static void SkI2cCheckSensor( +SK_AC *pAC, /* Adapter Context */ +SK_SENSOR *pSen) +{ + SK_EVPARA ParaLocal; + SK_BOOL TooHigh; /* Is sensor too high? */ + SK_BOOL TooLow; /* Is sensor too low? */ + SK_U64 CurrTime; /* Current Time */ + SK_BOOL DoTrapSend; /* We need to send a trap */ + SK_BOOL DoErrLog; /* We need to log the error */ + SK_BOOL IsError; /* We need to log the error */ + + /* Check Dummy Reads first */ + if (pAC->I2c.DummyReads > 0) { + pAC->I2c.DummyReads--; + return; + } + + /* Get the current time */ + CurrTime = SkOsGetTime(pAC); + + /* Set para to the most useful setting: The current sensor. */ + ParaLocal.Para64 = (SK_U64)pAC->I2c.CurrSens; + + /* Check the Value against the thresholds. First: Error Thresholds */ + TooHigh = (pSen->SenValue > pSen->SenThreErrHigh); + TooLow = (pSen->SenValue < pSen->SenThreErrLow); + + IsError = SK_FALSE; + if (TooHigh || TooLow) { + /* Error condition is satisfied */ + DoTrapSend = SK_TRUE; + DoErrLog = SK_TRUE; + + /* Now error condition is satisfied */ + IsError = SK_TRUE; + + if (pSen->SenErrFlag == SK_SEN_ERR_ERR) { + /* This state is the former one */ + + /* So check first whether we have to send a trap */ + if (pSen->SenLastErrTrapTS + SK_SEN_ERR_TR_HOLD > + CurrTime) { + /* + * Do NOT send the Trap. The hold back time + * has to run out first. + */ + DoTrapSend = SK_FALSE; + } + + /* Check now whether we have to log an Error */ + if (pSen->SenLastErrLogTS + SK_SEN_ERR_LOG_HOLD > + CurrTime) { + /* + * Do NOT log the error. The hold back time + * has to run out first. + */ + DoErrLog = SK_FALSE; + } + } + else { + /* We came from a different state -> Set Begin Time Stamp */ + pSen->SenBegErrTS = CurrTime; + pSen->SenErrFlag = SK_SEN_ERR_ERR; + } + + if (DoTrapSend) { + /* Set current Time */ + pSen->SenLastErrTrapTS = CurrTime; + pSen->SenErrCts++; + + /* Queue PNMI Event */ + SkEventQueue(pAC, SKGE_PNMI, (TooHigh ? + SK_PNMI_EVT_SEN_ERR_UPP : + SK_PNMI_EVT_SEN_ERR_LOW), + ParaLocal); + } + + if (DoErrLog) { + /* Set current Time */ + pSen->SenLastErrLogTS = CurrTime; + + if (pSen->SenType == SK_SEN_TEMP) { + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_I2C_E011, SKERR_I2C_E011MSG); + } + else if (pSen->SenType == SK_SEN_VOLT) { + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_I2C_E012, SKERR_I2C_E012MSG); + } + else { + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_I2C_E015, SKERR_I2C_E015MSG); + } + } + } + + /* Check the Value against the thresholds */ + /* 2nd: Warning thresholds */ + TooHigh = (pSen->SenValue > pSen->SenThreWarnHigh); + TooLow = (pSen->SenValue < pSen->SenThreWarnLow); + + if (!IsError && (TooHigh || TooLow)) { + /* Error condition is satisfied */ + DoTrapSend = SK_TRUE; + DoErrLog = SK_TRUE; + + if (pSen->SenErrFlag == SK_SEN_ERR_WARN) { + /* This state is the former one */ + + /* So check first whether we have to send a trap */ + if (pSen->SenLastWarnTrapTS + SK_SEN_WARN_TR_HOLD > CurrTime) { + /* + * Do NOT send the Trap. The hold back time + * has to run out first. + */ + DoTrapSend = SK_FALSE; + } + + /* Check now whether we have to log an Error */ + if (pSen->SenLastWarnLogTS + SK_SEN_WARN_LOG_HOLD > CurrTime) { + /* + * Do NOT log the error. The hold back time + * has to run out first. + */ + DoErrLog = SK_FALSE; + } + } + else { + /* We came from a different state -> Set Begin Time Stamp */ + pSen->SenBegWarnTS = CurrTime; + pSen->SenErrFlag = SK_SEN_ERR_WARN; + } + + if (DoTrapSend) { + /* Set current Time */ + pSen->SenLastWarnTrapTS = CurrTime; + pSen->SenWarnCts++; + + /* Queue PNMI Event */ + SkEventQueue(pAC, SKGE_PNMI, (TooHigh ? + SK_PNMI_EVT_SEN_WAR_UPP : + SK_PNMI_EVT_SEN_WAR_LOW), + ParaLocal); + } + + if (DoErrLog) { + /* Set current Time */ + pSen->SenLastWarnLogTS = CurrTime; + + if (pSen->SenType == SK_SEN_TEMP) { + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_I2C_E009, SKERR_I2C_E009MSG); + } + else if (pSen->SenType == SK_SEN_VOLT) { + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_I2C_E010, SKERR_I2C_E010MSG); + } + else { + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_I2C_E014, SKERR_I2C_E014MSG); + } + } + } + + /* Check for NO error at all */ + if (!IsError && !TooHigh && !TooLow) { + /* Set o.k. Status if no error and no warning condition */ + pSen->SenErrFlag = SK_SEN_ERR_OK; + } + + /* End of check against the thresholds */ + + /* Bug fix AF: 16.Aug.2001: Correct the init base + * of LM80 sensor. + */ + if (pSen->SenInit == SK_SEN_DYN_INIT_PCI_IO) { + + pSen->SenInit = SK_SEN_DYN_INIT_NONE; + + if (pSen->SenValue > SK_SEN_PCI_IO_RANGE_LIMITER) { + /* 5V PCI-IO Voltage */ + pSen->SenThreWarnLow = SK_SEN_PCI_IO_5V_LOW_WARN; + pSen->SenThreErrLow = SK_SEN_PCI_IO_5V_LOW_ERR; + } + else { + /* 3.3V PCI-IO Voltage */ + pSen->SenThreWarnHigh = SK_SEN_PCI_IO_3V3_HIGH_WARN; + pSen->SenThreErrHigh = SK_SEN_PCI_IO_3V3_HIGH_ERR; + } + } + +#ifdef TEST_ONLY + /* Dynamic thresholds also for VAUX of LM80 sensor */ + if (pSen->SenInit == SK_SEN_DYN_INIT_VAUX) { + + pSen->SenInit = SK_SEN_DYN_INIT_NONE; + + /* 3.3V VAUX Voltage */ + if (pSen->SenValue > SK_SEN_VAUX_RANGE_LIMITER) { + pSen->SenThreWarnLow = SK_SEN_VAUX_3V3_LOW_WARN; + pSen->SenThreErrLow = SK_SEN_VAUX_3V3_LOW_ERR; + } + /* 0V VAUX Voltage */ + else { + pSen->SenThreWarnHigh = SK_SEN_VAUX_0V_WARN_ERR; + pSen->SenThreErrHigh = SK_SEN_VAUX_0V_WARN_ERR; + } + } + + /* + * Check initialization state: + * The VIO Thresholds need adaption + */ + if (!pSen->SenInit && pSen->SenReg == LM80_VT1_IN && + pSen->SenValue > SK_SEN_WARNLOW2C && + pSen->SenValue < SK_SEN_WARNHIGH2) { + pSen->SenThreErrLow = SK_SEN_ERRLOW2C; + pSen->SenThreWarnLow = SK_SEN_WARNLOW2C; + pSen->SenInit = SK_TRUE; + } + + if (!pSen->SenInit && pSen->SenReg == LM80_VT1_IN && + pSen->SenValue > SK_SEN_WARNLOW2 && + pSen->SenValue < SK_SEN_WARNHIGH2C) { + pSen->SenThreErrHigh = SK_SEN_ERRHIGH2C; + pSen->SenThreWarnHigh = SK_SEN_WARNHIGH2C; + pSen->SenInit = SK_TRUE; + } +#endif + + if (pSen->SenInit != SK_SEN_DYN_INIT_NONE) { + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_I2C_E013, SKERR_I2C_E013MSG); + } +} /* SkI2cCheckSensor */ + + +/* + * The only Event to be served is the timeout event + * + */ +int SkI2cEvent( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_U32 Event, /* Module specific Event */ +SK_EVPARA Para) /* Event specific Parameter */ +{ + int ReadComplete; + SK_SENSOR *pSen; + SK_U32 Time; + SK_EVPARA ParaLocal; + int i; + + /* New case: no sensors */ + if (pAC->I2c.MaxSens == 0) { + return(0); + } + + switch (Event) { + case SK_I2CEV_IRQ: + pSen = &pAC->I2c.SenTable[pAC->I2c.CurrSens]; + ReadComplete = SkI2cReadSensor(pAC, IoC, pSen); + + if (ReadComplete) { + /* Check sensor against defined thresholds */ + SkI2cCheckSensor(pAC, pSen); + + /* Increment Current sensor and set appropriate Timeout */ + pAC->I2c.CurrSens++; + if (pAC->I2c.CurrSens >= pAC->I2c.MaxSens) { + pAC->I2c.CurrSens = 0; + Time = SK_I2C_TIM_LONG; + } + else { + Time = SK_I2C_TIM_SHORT; + } + + /* Start Timer */ + ParaLocal.Para64 = (SK_U64)0; + + pAC->I2c.TimerMode = SK_TIMER_NEW_GAUGING; + + SkTimerStart(pAC, IoC, &pAC->I2c.SenTimer, Time, + SKGE_I2C, SK_I2CEV_TIM, ParaLocal); + } + else { + /* Start Timer */ + ParaLocal.Para64 = (SK_U64)0; + + pAC->I2c.TimerMode = SK_TIMER_WATCH_SM; + + SkTimerStart(pAC, IoC, &pAC->I2c.SenTimer, SK_I2C_TIM_WATCH, + SKGE_I2C, SK_I2CEV_TIM, ParaLocal); + } + break; + case SK_I2CEV_TIM: + if (pAC->I2c.TimerMode == SK_TIMER_NEW_GAUGING) { + + ParaLocal.Para64 = (SK_U64)0; + SkTimerStop(pAC, IoC, &pAC->I2c.SenTimer); + + pSen = &pAC->I2c.SenTable[pAC->I2c.CurrSens]; + ReadComplete = SkI2cReadSensor(pAC, IoC, pSen); + + if (ReadComplete) { + /* Check sensor against defined thresholds */ + SkI2cCheckSensor(pAC, pSen); + + /* Increment Current sensor and set appropriate Timeout */ + pAC->I2c.CurrSens++; + if (pAC->I2c.CurrSens == pAC->I2c.MaxSens) { + pAC->I2c.CurrSens = 0; + Time = SK_I2C_TIM_LONG; + } + else { + Time = SK_I2C_TIM_SHORT; + } + + /* Start Timer */ + ParaLocal.Para64 = (SK_U64)0; + + pAC->I2c.TimerMode = SK_TIMER_NEW_GAUGING; + + SkTimerStart(pAC, IoC, &pAC->I2c.SenTimer, Time, + SKGE_I2C, SK_I2CEV_TIM, ParaLocal); + } + } + else { + pSen = &pAC->I2c.SenTable[pAC->I2c.CurrSens]; + pSen->SenErrFlag = SK_SEN_ERR_FAULTY; + SK_I2C_STOP(IoC); + + /* Increment Current sensor and set appropriate Timeout */ + pAC->I2c.CurrSens++; + if (pAC->I2c.CurrSens == pAC->I2c.MaxSens) { + pAC->I2c.CurrSens = 0; + Time = SK_I2C_TIM_LONG; + } + else { + Time = SK_I2C_TIM_SHORT; + } + + /* Start Timer */ + ParaLocal.Para64 = (SK_U64)0; + + pAC->I2c.TimerMode = SK_TIMER_NEW_GAUGING; + + SkTimerStart(pAC, IoC, &pAC->I2c.SenTimer, Time, + SKGE_I2C, SK_I2CEV_TIM, ParaLocal); + } + break; + case SK_I2CEV_CLEAR: + for (i = 0; i < SK_MAX_SENSORS; i++) { + pAC->I2c.SenTable[i].SenErrFlag = SK_SEN_ERR_OK; + pAC->I2c.SenTable[i].SenErrCts = 0; + pAC->I2c.SenTable[i].SenWarnCts = 0; + pAC->I2c.SenTable[i].SenBegErrTS = 0; + pAC->I2c.SenTable[i].SenBegWarnTS = 0; + pAC->I2c.SenTable[i].SenLastErrTrapTS = (SK_U64)0; + pAC->I2c.SenTable[i].SenLastErrLogTS = (SK_U64)0; + pAC->I2c.SenTable[i].SenLastWarnTrapTS = (SK_U64)0; + pAC->I2c.SenTable[i].SenLastWarnLogTS = (SK_U64)0; + } + break; + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_I2C_E006, SKERR_I2C_E006MSG); + } + + return(0); +} /* SkI2cEvent*/ + +#endif /* !SK_DIAG */ diff --git a/drivers/net/sk98lin/sklm80.c b/drivers/net/sk98lin/sklm80.c new file mode 100644 index 000000000000..a204f5bb55d4 --- /dev/null +++ b/drivers/net/sk98lin/sklm80.c @@ -0,0 +1,141 @@ +/****************************************************************************** + * + * Name: sklm80.c + * Project: Gigabit Ethernet Adapters, TWSI-Module + * Version: $Revision: 1.22 $ + * Date: $Date: 2003/10/20 09:08:21 $ + * Purpose: Functions to access Voltage and Temperature Sensor (LM80) + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +/* + LM80 functions +*/ +#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM)))) +static const char SysKonnectFileId[] = + "@(#) $Id: sklm80.c,v 1.22 2003/10/20 09:08:21 rschmidt Exp $ (C) Marvell. "; +#endif + +#include "h/skdrv1st.h" /* Driver Specific Definitions */ +#include "h/lm80.h" +#include "h/skdrv2nd.h" /* Adapter Control- and Driver specific Def. */ + +#define BREAK_OR_WAIT(pAC,IoC,Event) break + +/* + * read a sensors value (LM80 specific) + * + * This function reads a sensors value from the I2C sensor chip LM80. + * The sensor is defined by its index into the sensors database in the struct + * pAC points to. + * + * Returns 1 if the read is completed + * 0 if the read must be continued (I2C Bus still allocated) + */ +int SkLm80ReadSensor( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context needed in level 1 and 2 */ +SK_SENSOR *pSen) /* Sensor to be read */ +{ + SK_I32 Value; + + switch (pSen->SenState) { + case SK_SEN_IDLE: + /* Send address to ADDR register */ + SK_I2C_CTL(IoC, I2C_READ, pSen->SenDev, I2C_025K_DEV, pSen->SenReg, 0); + + pSen->SenState = SK_SEN_VALUE ; + BREAK_OR_WAIT(pAC, IoC, I2C_READ); + + case SK_SEN_VALUE: + /* Read value from data register */ + SK_IN32(IoC, B2_I2C_DATA, ((SK_U32 *)&Value)); + + Value &= 0xff; /* only least significant byte is valid */ + + /* Do NOT check the Value against the thresholds */ + /* Checking is done in the calling instance */ + + if (pSen->SenType == SK_SEN_VOLT) { + /* Voltage sensor */ + pSen->SenValue = Value * SK_LM80_VT_LSB; + pSen->SenState = SK_SEN_IDLE ; + return(1); + } + + if (pSen->SenType == SK_SEN_FAN) { + if (Value != 0 && Value != 0xff) { + /* Fan speed counter */ + pSen->SenValue = SK_LM80_FAN_FAKTOR/Value; + } + else { + /* Indicate Fan error */ + pSen->SenValue = 0; + } + pSen->SenState = SK_SEN_IDLE ; + return(1); + } + + /* First: correct the value: it might be negative */ + if ((Value & 0x80) != 0) { + /* Value is negative */ + Value = Value - 256; + } + + /* We have a temperature sensor and need to get the signed extension. + * For now we get the extension from the last reading, so in the normal + * case we won't see flickering temperatures. + */ + pSen->SenValue = (Value * SK_LM80_TEMP_LSB) + + (pSen->SenValue % SK_LM80_TEMP_LSB); + + /* Send address to ADDR register */ + SK_I2C_CTL(IoC, I2C_READ, pSen->SenDev, I2C_025K_DEV, LM80_TEMP_CTRL, 0); + + pSen->SenState = SK_SEN_VALEXT ; + BREAK_OR_WAIT(pAC, IoC, I2C_READ); + + case SK_SEN_VALEXT: + /* Read value from data register */ + SK_IN32(IoC, B2_I2C_DATA, ((SK_U32 *)&Value)); + Value &= LM80_TEMP_LSB_9; /* only bit 7 is valid */ + + /* cut the LSB bit */ + pSen->SenValue = ((pSen->SenValue / SK_LM80_TEMP_LSB) * + SK_LM80_TEMP_LSB); + + if (pSen->SenValue < 0) { + /* Value negative: The bit value must be subtracted */ + pSen->SenValue -= ((Value >> 7) * SK_LM80_TEMPEXT_LSB); + } + else { + /* Value positive: The bit value must be added */ + pSen->SenValue += ((Value >> 7) * SK_LM80_TEMPEXT_LSB); + } + + pSen->SenState = SK_SEN_IDLE ; + return(1); + + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_I2C_E007, SKERR_I2C_E007MSG); + return(1); + } + + /* Not completed */ + return(0); +} + diff --git a/drivers/net/sk98lin/skqueue.c b/drivers/net/sk98lin/skqueue.c new file mode 100644 index 000000000000..0275b4f71d9b --- /dev/null +++ b/drivers/net/sk98lin/skqueue.c @@ -0,0 +1,179 @@ +/****************************************************************************** + * + * Name: skqueue.c + * Project: Gigabit Ethernet Adapters, Event Scheduler Module + * Version: $Revision: 1.20 $ + * Date: $Date: 2003/09/16 13:44:00 $ + * Purpose: Management of an event queue. + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + + +/* + * Event queue and dispatcher + */ +#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM)))) +static const char SysKonnectFileId[] = + "@(#) $Id: skqueue.c,v 1.20 2003/09/16 13:44:00 rschmidt Exp $ (C) Marvell."; +#endif + +#include "h/skdrv1st.h" /* Driver Specific Definitions */ +#include "h/skqueue.h" /* Queue Definitions */ +#include "h/skdrv2nd.h" /* Adapter Control- and Driver specific Def. */ + +#ifdef __C2MAN__ +/* + Event queue management. + + General Description: + + */ +intro() +{} +#endif + +#define PRINTF(a,b,c) + +/* + * init event queue management + * + * Must be called during init level 0. + */ +void SkEventInit( +SK_AC *pAC, /* Adapter context */ +SK_IOC Ioc, /* IO context */ +int Level) /* Init level */ +{ + switch (Level) { + case SK_INIT_DATA: + pAC->Event.EvPut = pAC->Event.EvGet = pAC->Event.EvQueue; + break; + default: + break; + } +} + +/* + * add event to queue + */ +void SkEventQueue( +SK_AC *pAC, /* Adapters context */ +SK_U32 Class, /* Event Class */ +SK_U32 Event, /* Event to be queued */ +SK_EVPARA Para) /* Event parameter */ +{ + pAC->Event.EvPut->Class = Class; + pAC->Event.EvPut->Event = Event; + pAC->Event.EvPut->Para = Para; + + if (++pAC->Event.EvPut == &pAC->Event.EvQueue[SK_MAX_EVENT]) + pAC->Event.EvPut = pAC->Event.EvQueue; + + if (pAC->Event.EvPut == pAC->Event.EvGet) { + SK_ERR_LOG(pAC, SK_ERRCL_NORES, SKERR_Q_E001, SKERR_Q_E001MSG); + } +} + +/* + * event dispatcher + * while event queue is not empty + * get event from queue + * send command to state machine + * end + * return error reported by individual Event function + * 0 if no error occured. + */ +int SkEventDispatcher( +SK_AC *pAC, /* Adapters Context */ +SK_IOC Ioc) /* Io context */ +{ + SK_EVENTELEM *pEv; /* pointer into queue */ + SK_U32 Class; + int Rtv; + + pEv = pAC->Event.EvGet; + + PRINTF("dispatch get %x put %x\n", pEv, pAC->Event.ev_put); + + while (pEv != pAC->Event.EvPut) { + PRINTF("dispatch Class %d Event %d\n", pEv->Class, pEv->Event); + + switch (Class = pEv->Class) { +#ifndef SK_USE_LAC_EV +#ifndef SK_SLIM + case SKGE_RLMT: /* RLMT Event */ + Rtv = SkRlmtEvent(pAC, Ioc, pEv->Event, pEv->Para); + break; + case SKGE_I2C: /* I2C Event */ + Rtv = SkI2cEvent(pAC, Ioc, pEv->Event, pEv->Para); + break; + case SKGE_PNMI: /* PNMI Event */ + Rtv = SkPnmiEvent(pAC, Ioc, pEv->Event, pEv->Para); + break; +#endif /* not SK_SLIM */ +#endif /* not SK_USE_LAC_EV */ + case SKGE_DRV: /* Driver Event */ + Rtv = SkDrvEvent(pAC, Ioc, pEv->Event, pEv->Para); + break; +#ifndef SK_USE_SW_TIMER + case SKGE_HWAC: + Rtv = SkGeSirqEvent(pAC, Ioc, pEv->Event, pEv->Para); + break; +#else /* !SK_USE_SW_TIMER */ + case SKGE_SWT : + Rtv = SkSwtEvent(pAC, Ioc, pEv->Event, pEv->Para); + break; +#endif /* !SK_USE_SW_TIMER */ +#ifdef SK_USE_LAC_EV + case SKGE_LACP : + Rtv = SkLacpEvent(pAC, Ioc, pEv->Event, pEv->Para); + break; + case SKGE_RSF : + Rtv = SkRsfEvent(pAC, Ioc, pEv->Event, pEv->Para); + break; + case SKGE_MARKER : + Rtv = SkMarkerEvent(pAC, Ioc, pEv->Event, pEv->Para); + break; + case SKGE_FD : + Rtv = SkFdEvent(pAC, Ioc, pEv->Event, pEv->Para); + break; +#endif /* SK_USE_LAC_EV */ +#ifdef SK_USE_CSUM + case SKGE_CSUM : + Rtv = SkCsEvent(pAC, Ioc, pEv->Event, pEv->Para); + break; +#endif /* SK_USE_CSUM */ + default : + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_Q_E002, SKERR_Q_E002MSG); + Rtv = 0; + } + + if (Rtv != 0) { + return(Rtv); + } + + if (++pEv == &pAC->Event.EvQueue[SK_MAX_EVENT]) + pEv = pAC->Event.EvQueue; + + /* Renew get: it is used in queue_events to detect overruns */ + pAC->Event.EvGet = pEv; + } + + return(0); +} + +/* End of file */ diff --git a/drivers/net/sk98lin/skrlmt.c b/drivers/net/sk98lin/skrlmt.c new file mode 100644 index 000000000000..be8d1ccddf6d --- /dev/null +++ b/drivers/net/sk98lin/skrlmt.c @@ -0,0 +1,3257 @@ +/****************************************************************************** + * + * Name: skrlmt.c + * Project: GEnesis, PCI Gigabit Ethernet Adapter + * Version: $Revision: 1.69 $ + * Date: $Date: 2003/04/15 09:39:22 $ + * Purpose: Manage links on SK-NET Adapters, esp. redundant ones. + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +/****************************************************************************** + * + * Description: + * + * This module contains code for Link ManagemenT (LMT) of SK-NET Adapters. + * It is mainly intended for adapters with more than one link. + * For such adapters, this module realizes Redundant Link ManagemenT (RLMT). + * + * Include File Hierarchy: + * + * "skdrv1st.h" + * "skdrv2nd.h" + * + ******************************************************************************/ + +#ifndef lint +static const char SysKonnectFileId[] = + "@(#) $Id: skrlmt.c,v 1.69 2003/04/15 09:39:22 tschilli Exp $ (C) Marvell."; +#endif /* !defined(lint) */ + +#define __SKRLMT_C + +#ifdef __cplusplus +extern "C" { +#endif /* cplusplus */ + +#include "h/skdrv1st.h" +#include "h/skdrv2nd.h" + +/* defines ********************************************************************/ + +#ifndef SK_HWAC_LINK_LED +#define SK_HWAC_LINK_LED(a,b,c,d) +#endif /* !defined(SK_HWAC_LINK_LED) */ + +#ifndef DEBUG +#define RLMT_STATIC static +#else /* DEBUG */ +#define RLMT_STATIC + +#ifndef SK_LITTLE_ENDIAN +/* First 32 bits */ +#define OFFS_LO32 1 + +/* Second 32 bits */ +#define OFFS_HI32 0 +#else /* SK_LITTLE_ENDIAN */ +/* First 32 bits */ +#define OFFS_LO32 0 + +/* Second 32 bits */ +#define OFFS_HI32 1 +#endif /* SK_LITTLE_ENDIAN */ + +#endif /* DEBUG */ + +/* ----- Private timeout values ----- */ + +#define SK_RLMT_MIN_TO_VAL 125000 /* 1/8 sec. */ +#define SK_RLMT_DEF_TO_VAL 1000000 /* 1 sec. */ +#define SK_RLMT_PORTDOWN_TIM_VAL 900000 /* another 0.9 sec. */ +#define SK_RLMT_PORTSTART_TIM_VAL 100000 /* 0.1 sec. */ +#define SK_RLMT_PORTUP_TIM_VAL 2500000 /* 2.5 sec. */ +#define SK_RLMT_SEG_TO_VAL 900000000 /* 15 min. */ + +/* Assume tick counter increment is 1 - may be set OS-dependent. */ +#ifndef SK_TICK_INCR +#define SK_TICK_INCR SK_CONSTU64(1) +#endif /* !defined(SK_TICK_INCR) */ + +/* + * Amount that a time stamp must be later to be recognized as "substantially + * later". This is about 1/128 sec, but above 1 tick counter increment. + */ +#define SK_RLMT_BC_DELTA (1 + ((SK_TICKS_PER_SEC >> 7) > SK_TICK_INCR ? \ + (SK_TICKS_PER_SEC >> 7) : SK_TICK_INCR)) + +/* ----- Private RLMT defaults ----- */ + +#define SK_RLMT_DEF_PREF_PORT 0 /* "Lower" port. */ +#define SK_RLMT_DEF_MODE SK_RLMT_CHECK_LINK /* Default RLMT Mode. */ + +/* ----- Private RLMT checking states ----- */ + +#define SK_RLMT_RCS_SEG 1 /* RLMT Check State: check seg. */ +#define SK_RLMT_RCS_START_SEG 2 /* RLMT Check State: start check seg. */ +#define SK_RLMT_RCS_SEND_SEG 4 /* RLMT Check State: send BPDU packet */ +#define SK_RLMT_RCS_REPORT_SEG 8 /* RLMT Check State: report seg. */ + +/* ----- Private PORT checking states ----- */ + +#define SK_RLMT_PCS_TX 1 /* Port Check State: check tx. */ +#define SK_RLMT_PCS_RX 2 /* Port Check State: check rx. */ + +/* ----- Private PORT events ----- */ + +/* Note: Update simulation when changing these. */ +#define SK_RLMT_PORTSTART_TIM 1100 /* Port start timeout. */ +#define SK_RLMT_PORTUP_TIM 1101 /* Port can now go up. */ +#define SK_RLMT_PORTDOWN_RX_TIM 1102 /* Port did not receive once ... */ +#define SK_RLMT_PORTDOWN 1103 /* Port went down. */ +#define SK_RLMT_PORTDOWN_TX_TIM 1104 /* Partner did not receive ... */ + +/* ----- Private RLMT events ----- */ + +/* Note: Update simulation when changing these. */ +#define SK_RLMT_TIM 2100 /* RLMT timeout. */ +#define SK_RLMT_SEG_TIM 2101 /* RLMT segmentation check timeout. */ + +#define TO_SHORTEN(tim) ((tim) / 2) + +/* Error numbers and messages. */ +#define SKERR_RLMT_E001 (SK_ERRBASE_RLMT + 0) +#define SKERR_RLMT_E001_MSG "No Packet." +#define SKERR_RLMT_E002 (SKERR_RLMT_E001 + 1) +#define SKERR_RLMT_E002_MSG "Short Packet." +#define SKERR_RLMT_E003 (SKERR_RLMT_E002 + 1) +#define SKERR_RLMT_E003_MSG "Unknown RLMT event." +#define SKERR_RLMT_E004 (SKERR_RLMT_E003 + 1) +#define SKERR_RLMT_E004_MSG "PortsUp incorrect." +#define SKERR_RLMT_E005 (SKERR_RLMT_E004 + 1) +#define SKERR_RLMT_E005_MSG \ + "Net seems to be segmented (different root bridges are reported on the ports)." +#define SKERR_RLMT_E006 (SKERR_RLMT_E005 + 1) +#define SKERR_RLMT_E006_MSG "Duplicate MAC Address detected." +#define SKERR_RLMT_E007 (SKERR_RLMT_E006 + 1) +#define SKERR_RLMT_E007_MSG "LinksUp incorrect." +#define SKERR_RLMT_E008 (SKERR_RLMT_E007 + 1) +#define SKERR_RLMT_E008_MSG "Port not started but link came up." +#define SKERR_RLMT_E009 (SKERR_RLMT_E008 + 1) +#define SKERR_RLMT_E009_MSG "Corrected illegal setting of Preferred Port." +#define SKERR_RLMT_E010 (SKERR_RLMT_E009 + 1) +#define SKERR_RLMT_E010_MSG "Ignored illegal Preferred Port." + +/* LLC field values. */ +#define LLC_COMMAND_RESPONSE_BIT 1 +#define LLC_TEST_COMMAND 0xE3 +#define LLC_UI 0x03 + +/* RLMT Packet fields. */ +#define SK_RLMT_DSAP 0 +#define SK_RLMT_SSAP 0 +#define SK_RLMT_CTRL (LLC_TEST_COMMAND) +#define SK_RLMT_INDICATOR0 0x53 /* S */ +#define SK_RLMT_INDICATOR1 0x4B /* K */ +#define SK_RLMT_INDICATOR2 0x2D /* - */ +#define SK_RLMT_INDICATOR3 0x52 /* R */ +#define SK_RLMT_INDICATOR4 0x4C /* L */ +#define SK_RLMT_INDICATOR5 0x4D /* M */ +#define SK_RLMT_INDICATOR6 0x54 /* T */ +#define SK_RLMT_PACKET_VERSION 0 + +/* RLMT SPT Flag values. */ +#define SK_RLMT_SPT_FLAG_CHANGE 0x01 +#define SK_RLMT_SPT_FLAG_CHANGE_ACK 0x80 + +/* RLMT SPT Packet fields. */ +#define SK_RLMT_SPT_DSAP 0x42 +#define SK_RLMT_SPT_SSAP 0x42 +#define SK_RLMT_SPT_CTRL (LLC_UI) +#define SK_RLMT_SPT_PROTOCOL_ID0 0x00 +#define SK_RLMT_SPT_PROTOCOL_ID1 0x00 +#define SK_RLMT_SPT_PROTOCOL_VERSION_ID 0x00 +#define SK_RLMT_SPT_BPDU_TYPE 0x00 +#define SK_RLMT_SPT_FLAGS 0x00 /* ?? */ +#define SK_RLMT_SPT_ROOT_ID0 0xFF /* Lowest possible priority. */ +#define SK_RLMT_SPT_ROOT_ID1 0xFF /* Lowest possible priority. */ + +/* Remaining 6 bytes will be the current port address. */ +#define SK_RLMT_SPT_ROOT_PATH_COST0 0x00 +#define SK_RLMT_SPT_ROOT_PATH_COST1 0x00 +#define SK_RLMT_SPT_ROOT_PATH_COST2 0x00 +#define SK_RLMT_SPT_ROOT_PATH_COST3 0x00 +#define SK_RLMT_SPT_BRIDGE_ID0 0xFF /* Lowest possible priority. */ +#define SK_RLMT_SPT_BRIDGE_ID1 0xFF /* Lowest possible priority. */ + +/* Remaining 6 bytes will be the current port address. */ +#define SK_RLMT_SPT_PORT_ID0 0xFF /* Lowest possible priority. */ +#define SK_RLMT_SPT_PORT_ID1 0xFF /* Lowest possible priority. */ +#define SK_RLMT_SPT_MSG_AGE0 0x00 +#define SK_RLMT_SPT_MSG_AGE1 0x00 +#define SK_RLMT_SPT_MAX_AGE0 0x00 +#define SK_RLMT_SPT_MAX_AGE1 0xFF +#define SK_RLMT_SPT_HELLO_TIME0 0x00 +#define SK_RLMT_SPT_HELLO_TIME1 0xFF +#define SK_RLMT_SPT_FWD_DELAY0 0x00 +#define SK_RLMT_SPT_FWD_DELAY1 0x40 + +/* Size defines. */ +#define SK_RLMT_MIN_PACKET_SIZE 34 +#define SK_RLMT_MAX_PACKET_SIZE (SK_RLMT_MAX_TX_BUF_SIZE) +#define SK_PACKET_DATA_LEN (SK_RLMT_MAX_PACKET_SIZE - \ + SK_RLMT_MIN_PACKET_SIZE) + +/* ----- RLMT packet types ----- */ +#define SK_PACKET_ANNOUNCE 1 /* Port announcement. */ +#define SK_PACKET_ALIVE 2 /* Alive packet to port. */ +#define SK_PACKET_ADDR_CHANGED 3 /* Port address changed. */ +#define SK_PACKET_CHECK_TX 4 /* Check your tx line. */ + +#ifdef SK_LITTLE_ENDIAN +#define SK_U16_TO_NETWORK_ORDER(Val,Addr) { \ + SK_U8 *_Addr = (SK_U8*)(Addr); \ + SK_U16 _Val = (SK_U16)(Val); \ + *_Addr++ = (SK_U8)(_Val >> 8); \ + *_Addr = (SK_U8)(_Val & 0xFF); \ +} +#endif /* SK_LITTLE_ENDIAN */ + +#ifdef SK_BIG_ENDIAN +#define SK_U16_TO_NETWORK_ORDER(Val,Addr) (*(SK_U16*)(Addr) = (SK_U16)(Val)) +#endif /* SK_BIG_ENDIAN */ + +#define AUTONEG_FAILED SK_FALSE +#define AUTONEG_SUCCESS SK_TRUE + + +/* typedefs *******************************************************************/ + +/* RLMT packet. Length: SK_RLMT_MAX_PACKET_SIZE (60) bytes. */ +typedef struct s_RlmtPacket { + SK_U8 DstAddr[SK_MAC_ADDR_LEN]; + SK_U8 SrcAddr[SK_MAC_ADDR_LEN]; + SK_U8 TypeLen[2]; + SK_U8 DSap; + SK_U8 SSap; + SK_U8 Ctrl; + SK_U8 Indicator[7]; + SK_U8 RlmtPacketType[2]; + SK_U8 Align1[2]; + SK_U8 Random[4]; /* Random value of requesting(!) station. */ + SK_U8 RlmtPacketVersion[2]; /* RLMT Packet version. */ + SK_U8 Data[SK_PACKET_DATA_LEN]; +} SK_RLMT_PACKET; + +typedef struct s_SpTreeRlmtPacket { + SK_U8 DstAddr[SK_MAC_ADDR_LEN]; + SK_U8 SrcAddr[SK_MAC_ADDR_LEN]; + SK_U8 TypeLen[2]; + SK_U8 DSap; + SK_U8 SSap; + SK_U8 Ctrl; + SK_U8 ProtocolId[2]; + SK_U8 ProtocolVersionId; + SK_U8 BpduType; + SK_U8 Flags; + SK_U8 RootId[8]; + SK_U8 RootPathCost[4]; + SK_U8 BridgeId[8]; + SK_U8 PortId[2]; + SK_U8 MessageAge[2]; + SK_U8 MaxAge[2]; + SK_U8 HelloTime[2]; + SK_U8 ForwardDelay[2]; +} SK_SPTREE_PACKET; + +/* global variables ***********************************************************/ + +SK_MAC_ADDR SkRlmtMcAddr = {{0x01, 0x00, 0x5A, 0x52, 0x4C, 0x4D}}; +SK_MAC_ADDR BridgeMcAddr = {{0x01, 0x80, 0xC2, 0x00, 0x00, 0x00}}; + +/* local variables ************************************************************/ + +/* None. */ + +/* functions ******************************************************************/ + +RLMT_STATIC void SkRlmtCheckSwitch( + SK_AC *pAC, + SK_IOC IoC, + SK_U32 NetIdx); +RLMT_STATIC void SkRlmtCheckSeg( + SK_AC *pAC, + SK_IOC IoC, + SK_U32 NetIdx); +RLMT_STATIC void SkRlmtEvtSetNets( + SK_AC *pAC, + SK_IOC IoC, + SK_EVPARA Para); + +/****************************************************************************** + * + * SkRlmtInit - initialize data, set state to init + * + * Description: + * + * SK_INIT_DATA + * ============ + * + * This routine initializes all RLMT-related variables to a known state. + * The initial state is SK_RLMT_RS_INIT. + * All ports are initialized to SK_RLMT_PS_INIT. + * + * + * SK_INIT_IO + * ========== + * + * Nothing. + * + * + * SK_INIT_RUN + * =========== + * + * Determine the adapter's random value. + * Set the hw registers, the "logical MAC address", the + * RLMT multicast address, and eventually the BPDU multicast address. + * + * Context: + * init, pageable + * + * Returns: + * Nothing. + */ +void SkRlmtInit( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +int Level) /* Initialization Level */ +{ + SK_U32 i, j; + SK_U64 Random; + SK_EVPARA Para; + SK_MAC_ADDR VirtualMacAddress; + SK_MAC_ADDR PhysicalAMacAddress; + SK_BOOL VirtualMacAddressSet; + SK_BOOL PhysicalAMacAddressSet; + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_INIT, + ("RLMT Init level %d.\n", Level)) + + switch (Level) { + case SK_INIT_DATA: /* Initialize data structures. */ + SK_MEMSET((char *)&pAC->Rlmt, 0, sizeof(SK_RLMT)); + + for (i = 0; i < SK_MAX_MACS; i++) { + pAC->Rlmt.Port[i].PortState = SK_RLMT_PS_INIT; + pAC->Rlmt.Port[i].LinkDown = SK_TRUE; + pAC->Rlmt.Port[i].PortDown = SK_TRUE; + pAC->Rlmt.Port[i].PortStarted = SK_FALSE; + pAC->Rlmt.Port[i].PortNoRx = SK_FALSE; + pAC->Rlmt.Port[i].RootIdSet = SK_FALSE; + pAC->Rlmt.Port[i].PortNumber = i; + pAC->Rlmt.Port[i].Net = &pAC->Rlmt.Net[0]; + pAC->Rlmt.Port[i].AddrPort = &pAC->Addr.Port[i]; + } + + pAC->Rlmt.NumNets = 1; + for (i = 0; i < SK_MAX_NETS; i++) { + pAC->Rlmt.Net[i].RlmtState = SK_RLMT_RS_INIT; + pAC->Rlmt.Net[i].RootIdSet = SK_FALSE; + pAC->Rlmt.Net[i].PrefPort = SK_RLMT_DEF_PREF_PORT; + pAC->Rlmt.Net[i].Preference = 0xFFFFFFFF; /* Automatic. */ + /* Just assuming. */ + pAC->Rlmt.Net[i].ActivePort = pAC->Rlmt.Net[i].PrefPort; + pAC->Rlmt.Net[i].RlmtMode = SK_RLMT_DEF_MODE; + pAC->Rlmt.Net[i].TimeoutValue = SK_RLMT_DEF_TO_VAL; + pAC->Rlmt.Net[i].NetNumber = i; + } + + pAC->Rlmt.Net[0].Port[0] = &pAC->Rlmt.Port[0]; + pAC->Rlmt.Net[0].Port[1] = &pAC->Rlmt.Port[1]; +#if SK_MAX_NETS > 1 + pAC->Rlmt.Net[1].Port[0] = &pAC->Rlmt.Port[1]; +#endif /* SK_MAX_NETS > 1 */ + break; + + case SK_INIT_IO: /* GIMacsFound first available here. */ + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_INIT, + ("RLMT: %d MACs were detected.\n", pAC->GIni.GIMacsFound)) + + pAC->Rlmt.Net[0].NumPorts = pAC->GIni.GIMacsFound; + + /* Initialize HW registers? */ + if (pAC->GIni.GIMacsFound == 1) { + Para.Para32[0] = SK_RLMT_MODE_CLS; + Para.Para32[1] = 0; + (void)SkRlmtEvent(pAC, IoC, SK_RLMT_MODE_CHANGE, Para); + } + break; + + case SK_INIT_RUN: + /* Ensure RLMT is set to one net. */ + if (pAC->Rlmt.NumNets > 1) { + Para.Para32[0] = 1; + Para.Para32[1] = -1; + SkRlmtEvtSetNets(pAC, IoC, Para); + } + + for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) { + Random = SkOsGetTime(pAC); + *(SK_U32*)&pAC->Rlmt.Port[i].Random = *(SK_U32*)&Random; + + for (j = 0; j < 4; j++) { + pAC->Rlmt.Port[i].Random[j] ^= pAC->Rlmt.Port[i].AddrPort-> + CurrentMacAddress.a[SK_MAC_ADDR_LEN - 1 - j]; + } + + (void)SkAddrMcClear(pAC, IoC, i, SK_ADDR_PERMANENT | SK_MC_SW_ONLY); + + /* Add RLMT MC address. */ + (void)SkAddrMcAdd(pAC, IoC, i, &SkRlmtMcAddr, SK_ADDR_PERMANENT); + + if (pAC->Rlmt.Net[0].RlmtMode & SK_RLMT_CHECK_SEG) { + /* Add BPDU MC address. */ + (void)SkAddrMcAdd(pAC, IoC, i, &BridgeMcAddr, SK_ADDR_PERMANENT); + } + + (void)SkAddrMcUpdate(pAC, IoC, i); + } + + VirtualMacAddressSet = SK_FALSE; + /* Read virtual MAC address from Control Register File. */ + for (j = 0; j < SK_MAC_ADDR_LEN; j++) { + + SK_IN8(IoC, B2_MAC_1 + j, &VirtualMacAddress.a[j]); + VirtualMacAddressSet |= VirtualMacAddress.a[j]; + } + + PhysicalAMacAddressSet = SK_FALSE; + /* Read physical MAC address for MAC A from Control Register File. */ + for (j = 0; j < SK_MAC_ADDR_LEN; j++) { + + SK_IN8(IoC, B2_MAC_2 + j, &PhysicalAMacAddress.a[j]); + PhysicalAMacAddressSet |= PhysicalAMacAddress.a[j]; + } + + /* check if the two mac addresses contain reasonable values */ + if (!VirtualMacAddressSet || !PhysicalAMacAddressSet) { + + pAC->Rlmt.RlmtOff = SK_TRUE; + } + + /* if the two mac addresses are equal switch off the RLMT_PRE_LOOKAHEAD + and the RLMT_LOOKAHEAD macros */ + else if (SK_ADDR_EQUAL(PhysicalAMacAddress.a, VirtualMacAddress.a)) { + + pAC->Rlmt.RlmtOff = SK_TRUE; + } + else { + pAC->Rlmt.RlmtOff = SK_FALSE; + } + break; + + default: /* error */ + break; + } + return; +} /* SkRlmtInit */ + + +/****************************************************************************** + * + * SkRlmtBuildCheckChain - build the check chain + * + * Description: + * This routine builds the local check chain: + * - Each port that is up checks the next port. + * - The last port that is up checks the first port that is up. + * + * Notes: + * - Currently only local ports are considered when building the chain. + * - Currently the SuspectState is just reset; + * it would be better to save it ... + * + * Context: + * runtime, pageable? + * + * Returns: + * Nothing + */ +RLMT_STATIC void SkRlmtBuildCheckChain( +SK_AC *pAC, /* Adapter Context */ +SK_U32 NetIdx) /* Net Number */ +{ + SK_U32 i; + SK_U32 NumMacsUp; + SK_RLMT_PORT * FirstMacUp; + SK_RLMT_PORT * PrevMacUp; + + FirstMacUp = NULL; + PrevMacUp = NULL; + + if (!(pAC->Rlmt.Net[NetIdx].RlmtMode & SK_RLMT_CHECK_LOC_LINK)) { + for (i = 0; i < pAC->Rlmt.Net[i].NumPorts; i++) { + pAC->Rlmt.Net[NetIdx].Port[i]->PortsChecked = 0; + } + return; /* Done. */ + } + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SkRlmtBuildCheckChain.\n")) + + NumMacsUp = 0; + + for (i = 0; i < pAC->Rlmt.Net[NetIdx].NumPorts; i++) { + pAC->Rlmt.Net[NetIdx].Port[i]->PortsChecked = 0; + pAC->Rlmt.Net[NetIdx].Port[i]->PortsSuspect = 0; + pAC->Rlmt.Net[NetIdx].Port[i]->CheckingState &= + ~(SK_RLMT_PCS_RX | SK_RLMT_PCS_TX); + + /* + * If more than two links are detected we should consider + * checking at least two other ports: + * 1. the next port that is not LinkDown and + * 2. the next port that is not PortDown. + */ + if (!pAC->Rlmt.Net[NetIdx].Port[i]->LinkDown) { + if (NumMacsUp == 0) { + FirstMacUp = pAC->Rlmt.Net[NetIdx].Port[i]; + } + else { + PrevMacUp->PortCheck[ + pAC->Rlmt.Net[NetIdx].Port[i]->PortsChecked].CheckAddr = + pAC->Rlmt.Net[NetIdx].Port[i]->AddrPort->CurrentMacAddress; + PrevMacUp->PortCheck[ + PrevMacUp->PortsChecked].SuspectTx = SK_FALSE; + PrevMacUp->PortsChecked++; + } + PrevMacUp = pAC->Rlmt.Net[NetIdx].Port[i]; + NumMacsUp++; + } + } + + if (NumMacsUp > 1) { + PrevMacUp->PortCheck[PrevMacUp->PortsChecked].CheckAddr = + FirstMacUp->AddrPort->CurrentMacAddress; + PrevMacUp->PortCheck[PrevMacUp->PortsChecked].SuspectTx = + SK_FALSE; + PrevMacUp->PortsChecked++; + } + +#ifdef DEBUG + for (i = 0; i < pAC->Rlmt.Net[NetIdx].NumPorts; i++) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("Port %d checks %d other ports: %2X.\n", i, + pAC->Rlmt.Net[NetIdx].Port[i]->PortsChecked, + pAC->Rlmt.Net[NetIdx].Port[i]->PortCheck[0].CheckAddr.a[5])) + } +#endif /* DEBUG */ + + return; +} /* SkRlmtBuildCheckChain */ + + +/****************************************************************************** + * + * SkRlmtBuildPacket - build an RLMT packet + * + * Description: + * This routine sets up an RLMT packet. + * + * Context: + * runtime, pageable? + * + * Returns: + * NULL or pointer to RLMT mbuf + */ +RLMT_STATIC SK_MBUF *SkRlmtBuildPacket( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_U32 PortNumber, /* Sending port */ +SK_U16 PacketType, /* RLMT packet type */ +SK_MAC_ADDR *SrcAddr, /* Source address */ +SK_MAC_ADDR *DestAddr) /* Destination address */ +{ + int i; + SK_U16 Length; + SK_MBUF *pMb; + SK_RLMT_PACKET *pPacket; + +#ifdef DEBUG + SK_U8 CheckSrc = 0; + SK_U8 CheckDest = 0; + + for (i = 0; i < SK_MAC_ADDR_LEN; ++i) { + CheckSrc |= SrcAddr->a[i]; + CheckDest |= DestAddr->a[i]; + } + + if ((CheckSrc == 0) || (CheckDest == 0)) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_ERR, + ("SkRlmtBuildPacket: Invalid %s%saddr.\n", + (CheckSrc == 0 ? "Src" : ""), (CheckDest == 0 ? "Dest" : ""))) + } +#endif + + if ((pMb = SkDrvAllocRlmtMbuf(pAC, IoC, SK_RLMT_MAX_PACKET_SIZE)) != NULL) { + pPacket = (SK_RLMT_PACKET*)pMb->pData; + for (i = 0; i < SK_MAC_ADDR_LEN; i++) { + pPacket->DstAddr[i] = DestAddr->a[i]; + pPacket->SrcAddr[i] = SrcAddr->a[i]; + } + pPacket->DSap = SK_RLMT_DSAP; + pPacket->SSap = SK_RLMT_SSAP; + pPacket->Ctrl = SK_RLMT_CTRL; + pPacket->Indicator[0] = SK_RLMT_INDICATOR0; + pPacket->Indicator[1] = SK_RLMT_INDICATOR1; + pPacket->Indicator[2] = SK_RLMT_INDICATOR2; + pPacket->Indicator[3] = SK_RLMT_INDICATOR3; + pPacket->Indicator[4] = SK_RLMT_INDICATOR4; + pPacket->Indicator[5] = SK_RLMT_INDICATOR5; + pPacket->Indicator[6] = SK_RLMT_INDICATOR6; + + SK_U16_TO_NETWORK_ORDER(PacketType, &pPacket->RlmtPacketType[0]); + + for (i = 0; i < 4; i++) { + pPacket->Random[i] = pAC->Rlmt.Port[PortNumber].Random[i]; + } + + SK_U16_TO_NETWORK_ORDER( + SK_RLMT_PACKET_VERSION, &pPacket->RlmtPacketVersion[0]); + + for (i = 0; i < SK_PACKET_DATA_LEN; i++) { + pPacket->Data[i] = 0x00; + } + + Length = SK_RLMT_MAX_PACKET_SIZE; /* Or smaller. */ + pMb->Length = Length; + pMb->PortIdx = PortNumber; + Length -= 14; + SK_U16_TO_NETWORK_ORDER(Length, &pPacket->TypeLen[0]); + + if (PacketType == SK_PACKET_ALIVE) { + pAC->Rlmt.Port[PortNumber].TxHelloCts++; + } + } + + return (pMb); +} /* SkRlmtBuildPacket */ + + +/****************************************************************************** + * + * SkRlmtBuildSpanningTreePacket - build spanning tree check packet + * + * Description: + * This routine sets up a BPDU packet for spanning tree check. + * + * Context: + * runtime, pageable? + * + * Returns: + * NULL or pointer to RLMT mbuf + */ +RLMT_STATIC SK_MBUF *SkRlmtBuildSpanningTreePacket( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_U32 PortNumber) /* Sending port */ +{ + unsigned i; + SK_U16 Length; + SK_MBUF *pMb; + SK_SPTREE_PACKET *pSPacket; + + if ((pMb = SkDrvAllocRlmtMbuf(pAC, IoC, SK_RLMT_MAX_PACKET_SIZE)) != + NULL) { + pSPacket = (SK_SPTREE_PACKET*)pMb->pData; + for (i = 0; i < SK_MAC_ADDR_LEN; i++) { + pSPacket->DstAddr[i] = BridgeMcAddr.a[i]; + pSPacket->SrcAddr[i] = + pAC->Addr.Port[PortNumber].CurrentMacAddress.a[i]; + } + pSPacket->DSap = SK_RLMT_SPT_DSAP; + pSPacket->SSap = SK_RLMT_SPT_SSAP; + pSPacket->Ctrl = SK_RLMT_SPT_CTRL; + + pSPacket->ProtocolId[0] = SK_RLMT_SPT_PROTOCOL_ID0; + pSPacket->ProtocolId[1] = SK_RLMT_SPT_PROTOCOL_ID1; + pSPacket->ProtocolVersionId = SK_RLMT_SPT_PROTOCOL_VERSION_ID; + pSPacket->BpduType = SK_RLMT_SPT_BPDU_TYPE; + pSPacket->Flags = SK_RLMT_SPT_FLAGS; + pSPacket->RootId[0] = SK_RLMT_SPT_ROOT_ID0; + pSPacket->RootId[1] = SK_RLMT_SPT_ROOT_ID1; + pSPacket->RootPathCost[0] = SK_RLMT_SPT_ROOT_PATH_COST0; + pSPacket->RootPathCost[1] = SK_RLMT_SPT_ROOT_PATH_COST1; + pSPacket->RootPathCost[2] = SK_RLMT_SPT_ROOT_PATH_COST2; + pSPacket->RootPathCost[3] = SK_RLMT_SPT_ROOT_PATH_COST3; + pSPacket->BridgeId[0] = SK_RLMT_SPT_BRIDGE_ID0; + pSPacket->BridgeId[1] = SK_RLMT_SPT_BRIDGE_ID1; + + /* + * Use logical MAC address as bridge ID and filter these packets + * on receive. + */ + for (i = 0; i < SK_MAC_ADDR_LEN; i++) { + pSPacket->BridgeId[i + 2] = pSPacket->RootId[i + 2] = + pAC->Addr.Net[pAC->Rlmt.Port[PortNumber].Net->NetNumber]. + CurrentMacAddress.a[i]; + } + pSPacket->PortId[0] = SK_RLMT_SPT_PORT_ID0; + pSPacket->PortId[1] = SK_RLMT_SPT_PORT_ID1; + pSPacket->MessageAge[0] = SK_RLMT_SPT_MSG_AGE0; + pSPacket->MessageAge[1] = SK_RLMT_SPT_MSG_AGE1; + pSPacket->MaxAge[0] = SK_RLMT_SPT_MAX_AGE0; + pSPacket->MaxAge[1] = SK_RLMT_SPT_MAX_AGE1; + pSPacket->HelloTime[0] = SK_RLMT_SPT_HELLO_TIME0; + pSPacket->HelloTime[1] = SK_RLMT_SPT_HELLO_TIME1; + pSPacket->ForwardDelay[0] = SK_RLMT_SPT_FWD_DELAY0; + pSPacket->ForwardDelay[1] = SK_RLMT_SPT_FWD_DELAY1; + + Length = SK_RLMT_MAX_PACKET_SIZE; /* Or smaller. */ + pMb->Length = Length; + pMb->PortIdx = PortNumber; + Length -= 14; + SK_U16_TO_NETWORK_ORDER(Length, &pSPacket->TypeLen[0]); + + pAC->Rlmt.Port[PortNumber].TxSpHelloReqCts++; + } + + return (pMb); +} /* SkRlmtBuildSpanningTreePacket */ + + +/****************************************************************************** + * + * SkRlmtSend - build and send check packets + * + * Description: + * Depending on the RLMT state and the checking state, several packets + * are sent through the indicated port. + * + * Context: + * runtime, pageable? + * + * Returns: + * Nothing. + */ +RLMT_STATIC void SkRlmtSend( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_U32 PortNumber) /* Sending port */ +{ + unsigned j; + SK_EVPARA Para; + SK_RLMT_PORT *pRPort; + + pRPort = &pAC->Rlmt.Port[PortNumber]; + if (pAC->Rlmt.Port[PortNumber].Net->RlmtMode & SK_RLMT_CHECK_LOC_LINK) { + if (pRPort->CheckingState & (SK_RLMT_PCS_TX | SK_RLMT_PCS_RX)) { + /* Port is suspicious. Send the RLMT packet to the RLMT mc addr. */ + if ((Para.pParaPtr = SkRlmtBuildPacket(pAC, IoC, PortNumber, + SK_PACKET_ALIVE, &pAC->Addr.Port[PortNumber].CurrentMacAddress, + &SkRlmtMcAddr)) != NULL) { + SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para); + } + } + else { + /* + * Send a directed RLMT packet to all ports that are + * checked by the indicated port. + */ + for (j = 0; j < pRPort->PortsChecked; j++) { + if ((Para.pParaPtr = SkRlmtBuildPacket(pAC, IoC, PortNumber, + SK_PACKET_ALIVE, &pAC->Addr.Port[PortNumber].CurrentMacAddress, + &pRPort->PortCheck[j].CheckAddr)) != NULL) { + SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para); + } + } + } + } + + if ((pAC->Rlmt.Port[PortNumber].Net->RlmtMode & SK_RLMT_CHECK_SEG) && + (pAC->Rlmt.Port[PortNumber].Net->CheckingState & SK_RLMT_RCS_SEND_SEG)) { + /* + * Send a BPDU packet to make a connected switch tell us + * the correct root bridge. + */ + if ((Para.pParaPtr = + SkRlmtBuildSpanningTreePacket(pAC, IoC, PortNumber)) != NULL) { + pAC->Rlmt.Port[PortNumber].Net->CheckingState &= ~SK_RLMT_RCS_SEND_SEG; + pRPort->RootIdSet = SK_FALSE; + + SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para); + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_TX, + ("SkRlmtSend: BPDU Packet on Port %u.\n", PortNumber)) + } + } + return; +} /* SkRlmtSend */ + + +/****************************************************************************** + * + * SkRlmtPortReceives - check if port is (going) down and bring it up + * + * Description: + * This routine checks if a port who received a non-BPDU packet + * needs to go up or needs to be stopped going down. + * + * Context: + * runtime, pageable? + * + * Returns: + * Nothing. + */ +RLMT_STATIC void SkRlmtPortReceives( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_U32 PortNumber) /* Port to check */ +{ + SK_RLMT_PORT *pRPort; + SK_EVPARA Para; + + pRPort = &pAC->Rlmt.Port[PortNumber]; + pRPort->PortNoRx = SK_FALSE; + + if ((pRPort->PortState == SK_RLMT_PS_DOWN) && + !(pRPort->CheckingState & SK_RLMT_PCS_TX)) { + /* + * Port is marked down (rx), but received a non-BPDU packet. + * Bring it up. + */ + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX, + ("SkRlmtPacketReceive: Received on PortDown.\n")) + + pRPort->PortState = SK_RLMT_PS_GOING_UP; + pRPort->GuTimeStamp = SkOsGetTime(pAC); + Para.Para32[0] = PortNumber; + Para.Para32[1] = (SK_U32)-1; + SkTimerStart(pAC, IoC, &pRPort->UpTimer, SK_RLMT_PORTUP_TIM_VAL, + SKGE_RLMT, SK_RLMT_PORTUP_TIM, Para); + pRPort->CheckingState &= ~SK_RLMT_PCS_RX; + /* pAC->Rlmt.CheckSwitch = SK_TRUE; */ + SkRlmtCheckSwitch(pAC, IoC, pRPort->Net->NetNumber); + } /* PortDown && !SuspectTx */ + else if (pRPort->CheckingState & SK_RLMT_PCS_RX) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX, + ("SkRlmtPacketReceive: Stop bringing port down.\n")) + SkTimerStop(pAC, IoC, &pRPort->DownRxTimer); + pRPort->CheckingState &= ~SK_RLMT_PCS_RX; + /* pAC->Rlmt.CheckSwitch = SK_TRUE; */ + SkRlmtCheckSwitch(pAC, IoC, pRPort->Net->NetNumber); + } /* PortGoingDown */ + + return; +} /* SkRlmtPortReceives */ + + +/****************************************************************************** + * + * SkRlmtPacketReceive - receive a packet for closer examination + * + * Description: + * This routine examines a packet more closely than SK_RLMT_LOOKAHEAD. + * + * Context: + * runtime, pageable? + * + * Returns: + * Nothing. + */ +RLMT_STATIC void SkRlmtPacketReceive( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_MBUF *pMb) /* Received packet */ +{ +#ifdef xDEBUG + extern void DumpData(char *p, int size); +#endif /* DEBUG */ + int i; + unsigned j; + SK_U16 PacketType; + SK_U32 PortNumber; + SK_ADDR_PORT *pAPort; + SK_RLMT_PORT *pRPort; + SK_RLMT_PACKET *pRPacket; + SK_SPTREE_PACKET *pSPacket; + SK_EVPARA Para; + + PortNumber = pMb->PortIdx; + pAPort = &pAC->Addr.Port[PortNumber]; + pRPort = &pAC->Rlmt.Port[PortNumber]; + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX, + ("SkRlmtPacketReceive: PortNumber == %d.\n", PortNumber)) + + pRPacket = (SK_RLMT_PACKET*)pMb->pData; + pSPacket = (SK_SPTREE_PACKET*)pRPacket; + +#ifdef xDEBUG + DumpData((char *)pRPacket, 32); +#endif /* DEBUG */ + + if ((pRPort->PacketsPerTimeSlot - pRPort->BpduPacketsPerTimeSlot) != 0) { + SkRlmtPortReceives(pAC, IoC, PortNumber); + } + + /* Check destination address. */ + + if (!SK_ADDR_EQUAL(pAPort->CurrentMacAddress.a, pRPacket->DstAddr) && + !SK_ADDR_EQUAL(SkRlmtMcAddr.a, pRPacket->DstAddr) && + !SK_ADDR_EQUAL(BridgeMcAddr.a, pRPacket->DstAddr)) { + + /* Not sent to current MAC or registered MC address => Trash it. */ + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX, + ("SkRlmtPacketReceive: Not for me.\n")) + + SkDrvFreeRlmtMbuf(pAC, IoC, pMb); + return; + } + else if (SK_ADDR_EQUAL(pAPort->CurrentMacAddress.a, pRPacket->SrcAddr)) { + + /* + * Was sent by same port (may happen during port switching + * or in case of duplicate MAC addresses). + */ + + /* + * Check for duplicate address here: + * If Packet.Random != My.Random => DupAddr. + */ + for (i = 3; i >= 0; i--) { + if (pRPort->Random[i] != pRPacket->Random[i]) { + break; + } + } + + /* + * CAUTION: Do not check for duplicate MAC address in RLMT Alive Reply + * packets (they have the LLC_COMMAND_RESPONSE_BIT set in + * pRPacket->SSap). + */ + if (i >= 0 && pRPacket->DSap == SK_RLMT_DSAP && + pRPacket->Ctrl == SK_RLMT_CTRL && + pRPacket->SSap == SK_RLMT_SSAP && + pRPacket->Indicator[0] == SK_RLMT_INDICATOR0 && + pRPacket->Indicator[1] == SK_RLMT_INDICATOR1 && + pRPacket->Indicator[2] == SK_RLMT_INDICATOR2 && + pRPacket->Indicator[3] == SK_RLMT_INDICATOR3 && + pRPacket->Indicator[4] == SK_RLMT_INDICATOR4 && + pRPacket->Indicator[5] == SK_RLMT_INDICATOR5 && + pRPacket->Indicator[6] == SK_RLMT_INDICATOR6) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX, + ("SkRlmtPacketReceive: Duplicate MAC Address.\n")) + + /* Error Log entry. */ + SK_ERR_LOG(pAC, SK_ERRCL_COMM, SKERR_RLMT_E006, SKERR_RLMT_E006_MSG); + } + else { + /* Simply trash it. */ + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX, + ("SkRlmtPacketReceive: Sent by me.\n")) + } + + SkDrvFreeRlmtMbuf(pAC, IoC, pMb); + return; + } + + /* Check SuspectTx entries. */ + if (pRPort->PortsSuspect > 0) { + for (j = 0; j < pRPort->PortsChecked; j++) { + if (pRPort->PortCheck[j].SuspectTx && + SK_ADDR_EQUAL( + pRPacket->SrcAddr, pRPort->PortCheck[j].CheckAddr.a)) { + pRPort->PortCheck[j].SuspectTx = SK_FALSE; + pRPort->PortsSuspect--; + break; + } + } + } + + /* Determine type of packet. */ + if (pRPacket->DSap == SK_RLMT_DSAP && + pRPacket->Ctrl == SK_RLMT_CTRL && + (pRPacket->SSap & ~LLC_COMMAND_RESPONSE_BIT) == SK_RLMT_SSAP && + pRPacket->Indicator[0] == SK_RLMT_INDICATOR0 && + pRPacket->Indicator[1] == SK_RLMT_INDICATOR1 && + pRPacket->Indicator[2] == SK_RLMT_INDICATOR2 && + pRPacket->Indicator[3] == SK_RLMT_INDICATOR3 && + pRPacket->Indicator[4] == SK_RLMT_INDICATOR4 && + pRPacket->Indicator[5] == SK_RLMT_INDICATOR5 && + pRPacket->Indicator[6] == SK_RLMT_INDICATOR6) { + + /* It's an RLMT packet. */ + PacketType = (SK_U16)((pRPacket->RlmtPacketType[0] << 8) | + pRPacket->RlmtPacketType[1]); + + switch (PacketType) { + case SK_PACKET_ANNOUNCE: /* Not yet used. */ +#if 0 + /* Build the check chain. */ + SkRlmtBuildCheckChain(pAC); +#endif /* 0 */ + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX, + ("SkRlmtPacketReceive: Announce.\n")) + + SkDrvFreeRlmtMbuf(pAC, IoC, pMb); + break; + + case SK_PACKET_ALIVE: + if (pRPacket->SSap & LLC_COMMAND_RESPONSE_BIT) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX, + ("SkRlmtPacketReceive: Alive Reply.\n")) + + if (!(pAC->Addr.Port[PortNumber].PromMode & SK_PROM_MODE_LLC) || + SK_ADDR_EQUAL( + pRPacket->DstAddr, pAPort->CurrentMacAddress.a)) { + /* Obviously we could send something. */ + if (pRPort->CheckingState & SK_RLMT_PCS_TX) { + pRPort->CheckingState &= ~SK_RLMT_PCS_TX; + SkTimerStop(pAC, IoC, &pRPort->DownTxTimer); + } + + if ((pRPort->PortState == SK_RLMT_PS_DOWN) && + !(pRPort->CheckingState & SK_RLMT_PCS_RX)) { + pRPort->PortState = SK_RLMT_PS_GOING_UP; + pRPort->GuTimeStamp = SkOsGetTime(pAC); + + SkTimerStop(pAC, IoC, &pRPort->DownTxTimer); + + Para.Para32[0] = PortNumber; + Para.Para32[1] = (SK_U32)-1; + SkTimerStart(pAC, IoC, &pRPort->UpTimer, + SK_RLMT_PORTUP_TIM_VAL, SKGE_RLMT, + SK_RLMT_PORTUP_TIM, Para); + } + } + + /* Mark sending port as alive? */ + SkDrvFreeRlmtMbuf(pAC, IoC, pMb); + } + else { /* Alive Request Packet. */ + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX, + ("SkRlmtPacketReceive: Alive Request.\n")) + + pRPort->RxHelloCts++; + + /* Answer. */ + for (i = 0; i < SK_MAC_ADDR_LEN; i++) { + pRPacket->DstAddr[i] = pRPacket->SrcAddr[i]; + pRPacket->SrcAddr[i] = + pAC->Addr.Port[PortNumber].CurrentMacAddress.a[i]; + } + pRPacket->SSap |= LLC_COMMAND_RESPONSE_BIT; + + Para.pParaPtr = pMb; + SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para); + } + break; + + case SK_PACKET_CHECK_TX: + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX, + ("SkRlmtPacketReceive: Check your tx line.\n")) + + /* A port checking us requests us to check our tx line. */ + pRPort->CheckingState |= SK_RLMT_PCS_TX; + + /* Start PortDownTx timer. */ + Para.Para32[0] = PortNumber; + Para.Para32[1] = (SK_U32)-1; + SkTimerStart(pAC, IoC, &pRPort->DownTxTimer, + SK_RLMT_PORTDOWN_TIM_VAL, SKGE_RLMT, + SK_RLMT_PORTDOWN_TX_TIM, Para); + + SkDrvFreeRlmtMbuf(pAC, IoC, pMb); + + if ((Para.pParaPtr = SkRlmtBuildPacket(pAC, IoC, PortNumber, + SK_PACKET_ALIVE, &pAC->Addr.Port[PortNumber].CurrentMacAddress, + &SkRlmtMcAddr)) != NULL) { + SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para); + } + break; + + case SK_PACKET_ADDR_CHANGED: + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX, + ("SkRlmtPacketReceive: Address Change.\n")) + + /* Build the check chain. */ + SkRlmtBuildCheckChain(pAC, pRPort->Net->NetNumber); + SkDrvFreeRlmtMbuf(pAC, IoC, pMb); + break; + + default: + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX, + ("SkRlmtPacketReceive: Unknown RLMT packet.\n")) + + /* RA;:;: ??? */ + SkDrvFreeRlmtMbuf(pAC, IoC, pMb); + } + } + else if (pSPacket->DSap == SK_RLMT_SPT_DSAP && + pSPacket->Ctrl == SK_RLMT_SPT_CTRL && + (pSPacket->SSap & ~LLC_COMMAND_RESPONSE_BIT) == SK_RLMT_SPT_SSAP) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX, + ("SkRlmtPacketReceive: BPDU Packet.\n")) + + /* Spanning Tree packet. */ + pRPort->RxSpHelloCts++; + + if (!SK_ADDR_EQUAL(&pSPacket->RootId[2], &pAC->Addr.Net[pAC->Rlmt. + Port[PortNumber].Net->NetNumber].CurrentMacAddress.a[0])) { + /* + * Check segmentation if a new root bridge is set and + * the segmentation check is not currently running. + */ + if (!SK_ADDR_EQUAL(&pSPacket->RootId[2], &pRPort->Root.Id[2]) && + (pAC->Rlmt.Port[PortNumber].Net->LinksUp > 1) && + (pAC->Rlmt.Port[PortNumber].Net->RlmtMode & SK_RLMT_CHECK_SEG) + != 0 && (pAC->Rlmt.Port[PortNumber].Net->CheckingState & + SK_RLMT_RCS_SEG) == 0) { + pAC->Rlmt.Port[PortNumber].Net->CheckingState |= + SK_RLMT_RCS_START_SEG | SK_RLMT_RCS_SEND_SEG; + } + + /* Store tree view of this port. */ + for (i = 0; i < 8; i++) { + pRPort->Root.Id[i] = pSPacket->RootId[i]; + } + pRPort->RootIdSet = SK_TRUE; + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_DUMP, + ("Root ID %d: %02x %02x %02x %02x %02x %02x %02x %02x.\n", + PortNumber, + pRPort->Root.Id[0], pRPort->Root.Id[1], + pRPort->Root.Id[2], pRPort->Root.Id[3], + pRPort->Root.Id[4], pRPort->Root.Id[5], + pRPort->Root.Id[6], pRPort->Root.Id[7])) + } + + SkDrvFreeRlmtMbuf(pAC, IoC, pMb); + if ((pAC->Rlmt.Port[PortNumber].Net->CheckingState & + SK_RLMT_RCS_REPORT_SEG) != 0) { + SkRlmtCheckSeg(pAC, IoC, pAC->Rlmt.Port[PortNumber].Net->NetNumber); + } + } + else { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX, + ("SkRlmtPacketReceive: Unknown Packet Type.\n")) + + /* Unknown packet. */ + SkDrvFreeRlmtMbuf(pAC, IoC, pMb); + } + return; +} /* SkRlmtPacketReceive */ + + +/****************************************************************************** + * + * SkRlmtCheckPort - check if a port works + * + * Description: + * This routine checks if a port whose link is up received something + * and if it seems to transmit successfully. + * + * # PortState: PsInit, PsLinkDown, PsDown, PsGoingUp, PsUp + * # PortCheckingState (Bitfield): ChkTx, ChkRx, ChkSeg + * # RlmtCheckingState (Bitfield): ChkSeg, StartChkSeg, ReportSeg + * + * if (Rx - RxBpdu == 0) { # No rx. + * if (state == PsUp) { + * PortCheckingState |= ChkRx + * } + * if (ModeCheckSeg && (Timeout == + * TO_SHORTEN(RLMT_DEFAULT_TIMEOUT))) { + * RlmtCheckingState |= ChkSeg) + * PortCheckingState |= ChkSeg + * } + * NewTimeout = TO_SHORTEN(Timeout) + * if (NewTimeout < RLMT_MIN_TIMEOUT) { + * NewTimeout = RLMT_MIN_TIMEOUT + * PortState = PsDown + * ... + * } + * } + * else { # something was received + * # Set counter to 0 at LinkDown? + * # No - rx may be reported after LinkDown ??? + * PortCheckingState &= ~ChkRx + * NewTimeout = RLMT_DEFAULT_TIMEOUT + * if (RxAck == 0) { + * possible reasons: + * is my tx line bad? -- + * send RLMT multicast and report + * back internally? (only possible + * between ports on same adapter) + * } + * if (RxChk == 0) { + * possible reasons: + * - tx line of port set to check me + * maybe bad + * - no other port/adapter available or set + * to check me + * - adapter checking me has a longer + * timeout + * ??? anything that can be done here? + * } + * } + * + * Context: + * runtime, pageable? + * + * Returns: + * New timeout value. + */ +RLMT_STATIC SK_U32 SkRlmtCheckPort( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_U32 PortNumber) /* Port to check */ +{ + unsigned i; + SK_U32 NewTimeout; + SK_RLMT_PORT *pRPort; + SK_EVPARA Para; + + pRPort = &pAC->Rlmt.Port[PortNumber]; + + if ((pRPort->PacketsPerTimeSlot - pRPort->BpduPacketsPerTimeSlot) == 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SkRlmtCheckPort %d: No (%d) receives in last time slot.\n", + PortNumber, pRPort->PacketsPerTimeSlot)) + + /* + * Check segmentation if there was no receive at least twice + * in a row (PortNoRx is already set) and the segmentation + * check is not currently running. + */ + + if (pRPort->PortNoRx && (pAC->Rlmt.Port[PortNumber].Net->LinksUp > 1) && + (pAC->Rlmt.Port[PortNumber].Net->RlmtMode & SK_RLMT_CHECK_SEG) && + !(pAC->Rlmt.Port[PortNumber].Net->CheckingState & SK_RLMT_RCS_SEG)) { + pAC->Rlmt.Port[PortNumber].Net->CheckingState |= + SK_RLMT_RCS_START_SEG | SK_RLMT_RCS_SEND_SEG; + } + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SkRlmtCheckPort: PortsSuspect %d, PcsRx %d.\n", + pRPort->PortsSuspect, pRPort->CheckingState & SK_RLMT_PCS_RX)) + + if (pRPort->PortState != SK_RLMT_PS_DOWN) { + NewTimeout = TO_SHORTEN(pAC->Rlmt.Port[PortNumber].Net->TimeoutValue); + if (NewTimeout < SK_RLMT_MIN_TO_VAL) { + NewTimeout = SK_RLMT_MIN_TO_VAL; + } + + if (!(pRPort->CheckingState & SK_RLMT_PCS_RX)) { + Para.Para32[0] = PortNumber; + pRPort->CheckingState |= SK_RLMT_PCS_RX; + + /* + * What shall we do if the port checked by this one receives + * our request frames? What's bad - our rx line or his tx line? + */ + Para.Para32[1] = (SK_U32)-1; + SkTimerStart(pAC, IoC, &pRPort->DownRxTimer, + SK_RLMT_PORTDOWN_TIM_VAL, SKGE_RLMT, + SK_RLMT_PORTDOWN_RX_TIM, Para); + + for (i = 0; i < pRPort->PortsChecked; i++) { + if (pRPort->PortCheck[i].SuspectTx) { + continue; + } + pRPort->PortCheck[i].SuspectTx = SK_TRUE; + pRPort->PortsSuspect++; + if ((Para.pParaPtr = + SkRlmtBuildPacket(pAC, IoC, PortNumber, SK_PACKET_CHECK_TX, + &pAC->Addr.Port[PortNumber].CurrentMacAddress, + &pRPort->PortCheck[i].CheckAddr)) != NULL) { + SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para); + } + } + } + } + else { /* PortDown -- or all partners suspect. */ + NewTimeout = SK_RLMT_DEF_TO_VAL; + } + pRPort->PortNoRx = SK_TRUE; + } + else { /* A non-BPDU packet was received. */ + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SkRlmtCheckPort %d: %d (%d) receives in last time slot.\n", + PortNumber, + pRPort->PacketsPerTimeSlot - pRPort->BpduPacketsPerTimeSlot, + pRPort->PacketsPerTimeSlot)) + + SkRlmtPortReceives(pAC, IoC, PortNumber); + if (pAC->Rlmt.CheckSwitch) { + SkRlmtCheckSwitch(pAC, IoC, pRPort->Net->NetNumber); + } + + NewTimeout = SK_RLMT_DEF_TO_VAL; + } + + return (NewTimeout); +} /* SkRlmtCheckPort */ + + +/****************************************************************************** + * + * SkRlmtSelectBcRx - select new active port, criteria 1 (CLP) + * + * Description: + * This routine selects the port that received a broadcast frame + * substantially later than all other ports. + * + * Context: + * runtime, pageable? + * + * Returns: + * SK_BOOL + */ +RLMT_STATIC SK_BOOL SkRlmtSelectBcRx( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_U32 Active, /* Active port */ +SK_U32 PrefPort, /* Preferred port */ +SK_U32 *pSelect) /* New active port */ +{ + SK_U64 BcTimeStamp; + SK_U32 i; + SK_BOOL PortFound; + + BcTimeStamp = 0; /* Not totally necessary, but feeling better. */ + PortFound = SK_FALSE; + + /* Select port with the latest TimeStamp. */ + for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) { + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("TimeStamp Port %d (Down: %d, NoRx: %d): %08x %08x.\n", + i, + pAC->Rlmt.Port[i].PortDown, pAC->Rlmt.Port[i].PortNoRx, + *((SK_U32*)(&pAC->Rlmt.Port[i].BcTimeStamp) + OFFS_HI32), + *((SK_U32*)(&pAC->Rlmt.Port[i].BcTimeStamp) + OFFS_LO32))) + + if (!pAC->Rlmt.Port[i].PortDown && !pAC->Rlmt.Port[i].PortNoRx) { + if (!PortFound || pAC->Rlmt.Port[i].BcTimeStamp > BcTimeStamp) { + BcTimeStamp = pAC->Rlmt.Port[i].BcTimeStamp; + *pSelect = i; + PortFound = SK_TRUE; + } + } + } + + if (PortFound) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("Port %d received the last broadcast.\n", *pSelect)) + + /* Look if another port's time stamp is similar. */ + for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) { + if (i == *pSelect) { + continue; + } + if (!pAC->Rlmt.Port[i].PortDown && !pAC->Rlmt.Port[i].PortNoRx && + (pAC->Rlmt.Port[i].BcTimeStamp > + BcTimeStamp - SK_RLMT_BC_DELTA || + pAC->Rlmt.Port[i].BcTimeStamp + + SK_RLMT_BC_DELTA > BcTimeStamp)) { + PortFound = SK_FALSE; + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("Port %d received a broadcast at a similar time.\n", i)) + break; + } + } + } + +#ifdef DEBUG + if (PortFound) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_SELECT_BCRX found Port %d receiving the substantially " + "latest broadcast (%u).\n", + *pSelect, + BcTimeStamp - pAC->Rlmt.Port[1 - *pSelect].BcTimeStamp)) + } +#endif /* DEBUG */ + + return (PortFound); +} /* SkRlmtSelectBcRx */ + + +/****************************************************************************** + * + * SkRlmtSelectNotSuspect - select new active port, criteria 2 (CLP) + * + * Description: + * This routine selects a good port (it is PortUp && !SuspectRx). + * + * Context: + * runtime, pageable? + * + * Returns: + * SK_BOOL + */ +RLMT_STATIC SK_BOOL SkRlmtSelectNotSuspect( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_U32 Active, /* Active port */ +SK_U32 PrefPort, /* Preferred port */ +SK_U32 *pSelect) /* New active port */ +{ + SK_U32 i; + SK_BOOL PortFound; + + PortFound = SK_FALSE; + + /* Select first port that is PortUp && !SuspectRx. */ + for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) { + if (!pAC->Rlmt.Port[i].PortDown && + !(pAC->Rlmt.Port[i].CheckingState & SK_RLMT_PCS_RX)) { + *pSelect = i; + if (!pAC->Rlmt.Port[Active].PortDown && + !(pAC->Rlmt.Port[Active].CheckingState & SK_RLMT_PCS_RX)) { + *pSelect = Active; + } + if (!pAC->Rlmt.Port[PrefPort].PortDown && + !(pAC->Rlmt.Port[PrefPort].CheckingState & SK_RLMT_PCS_RX)) { + *pSelect = PrefPort; + } + PortFound = SK_TRUE; + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_SELECT_NOTSUSPECT found Port %d up and not check RX.\n", + *pSelect)) + break; + } + } + return (PortFound); +} /* SkRlmtSelectNotSuspect */ + + +/****************************************************************************** + * + * SkRlmtSelectUp - select new active port, criteria 3, 4 (CLP) + * + * Description: + * This routine selects a port that is up. + * + * Context: + * runtime, pageable? + * + * Returns: + * SK_BOOL + */ +RLMT_STATIC SK_BOOL SkRlmtSelectUp( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_U32 Active, /* Active port */ +SK_U32 PrefPort, /* Preferred port */ +SK_U32 *pSelect, /* New active port */ +SK_BOOL AutoNegDone) /* Successfully auto-negotiated? */ +{ + SK_U32 i; + SK_BOOL PortFound; + + PortFound = SK_FALSE; + + /* Select first port that is PortUp. */ + for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) { + if (pAC->Rlmt.Port[i].PortState == SK_RLMT_PS_UP && + pAC->GIni.GP[i].PAutoNegFail != AutoNegDone) { + *pSelect = i; + if (pAC->Rlmt.Port[Active].PortState == SK_RLMT_PS_UP && + pAC->GIni.GP[Active].PAutoNegFail != AutoNegDone) { + *pSelect = Active; + } + if (pAC->Rlmt.Port[PrefPort].PortState == SK_RLMT_PS_UP && + pAC->GIni.GP[PrefPort].PAutoNegFail != AutoNegDone) { + *pSelect = PrefPort; + } + PortFound = SK_TRUE; + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_SELECT_UP found Port %d up.\n", *pSelect)) + break; + } + } + return (PortFound); +} /* SkRlmtSelectUp */ + + +/****************************************************************************** + * + * SkRlmtSelectGoingUp - select new active port, criteria 5, 6 (CLP) + * + * Description: + * This routine selects the port that is going up for the longest time. + * + * Context: + * runtime, pageable? + * + * Returns: + * SK_BOOL + */ +RLMT_STATIC SK_BOOL SkRlmtSelectGoingUp( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_U32 Active, /* Active port */ +SK_U32 PrefPort, /* Preferred port */ +SK_U32 *pSelect, /* New active port */ +SK_BOOL AutoNegDone) /* Successfully auto-negotiated? */ +{ + SK_U64 GuTimeStamp; + SK_U32 i; + SK_BOOL PortFound; + + GuTimeStamp = 0; + PortFound = SK_FALSE; + + /* Select port that is PortGoingUp for the longest time. */ + for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) { + if (pAC->Rlmt.Port[i].PortState == SK_RLMT_PS_GOING_UP && + pAC->GIni.GP[i].PAutoNegFail != AutoNegDone) { + GuTimeStamp = pAC->Rlmt.Port[i].GuTimeStamp; + *pSelect = i; + PortFound = SK_TRUE; + break; + } + } + + if (!PortFound) { + return (SK_FALSE); + } + + for (i = *pSelect + 1; i < (SK_U32)pAC->GIni.GIMacsFound; i++) { + if (pAC->Rlmt.Port[i].PortState == SK_RLMT_PS_GOING_UP && + pAC->Rlmt.Port[i].GuTimeStamp < GuTimeStamp && + pAC->GIni.GP[i].PAutoNegFail != AutoNegDone) { + GuTimeStamp = pAC->Rlmt.Port[i].GuTimeStamp; + *pSelect = i; + } + } + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_SELECT_GOINGUP found Port %d going up.\n", *pSelect)) + return (SK_TRUE); +} /* SkRlmtSelectGoingUp */ + + +/****************************************************************************** + * + * SkRlmtSelectDown - select new active port, criteria 7, 8 (CLP) + * + * Description: + * This routine selects a port that is down. + * + * Context: + * runtime, pageable? + * + * Returns: + * SK_BOOL + */ +RLMT_STATIC SK_BOOL SkRlmtSelectDown( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_U32 Active, /* Active port */ +SK_U32 PrefPort, /* Preferred port */ +SK_U32 *pSelect, /* New active port */ +SK_BOOL AutoNegDone) /* Successfully auto-negotiated? */ +{ + SK_U32 i; + SK_BOOL PortFound; + + PortFound = SK_FALSE; + + /* Select first port that is PortDown. */ + for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) { + if (pAC->Rlmt.Port[i].PortState == SK_RLMT_PS_DOWN && + pAC->GIni.GP[i].PAutoNegFail != AutoNegDone) { + *pSelect = i; + if (pAC->Rlmt.Port[Active].PortState == SK_RLMT_PS_DOWN && + pAC->GIni.GP[Active].PAutoNegFail != AutoNegDone) { + *pSelect = Active; + } + if (pAC->Rlmt.Port[PrefPort].PortState == SK_RLMT_PS_DOWN && + pAC->GIni.GP[PrefPort].PAutoNegFail != AutoNegDone) { + *pSelect = PrefPort; + } + PortFound = SK_TRUE; + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_SELECT_DOWN found Port %d down.\n", *pSelect)) + break; + } + } + return (PortFound); +} /* SkRlmtSelectDown */ + + +/****************************************************************************** + * + * SkRlmtCheckSwitch - select new active port and switch to it + * + * Description: + * This routine decides which port should be the active one and queues + * port switching if necessary. + * + * Context: + * runtime, pageable? + * + * Returns: + * Nothing. + */ +RLMT_STATIC void SkRlmtCheckSwitch( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_U32 NetIdx) /* Net index */ +{ + SK_EVPARA Para; + SK_U32 Active; + SK_U32 PrefPort; + SK_U32 i; + SK_BOOL PortFound; + + Active = pAC->Rlmt.Net[NetIdx].ActivePort; /* Index of active port. */ + PrefPort = pAC->Rlmt.Net[NetIdx].PrefPort; /* Index of preferred port. */ + PortFound = SK_FALSE; + pAC->Rlmt.CheckSwitch = SK_FALSE; + +#if 0 /* RW 2001/10/18 - active port becomes always prefered one */ + if (pAC->Rlmt.Net[NetIdx].Preference == 0xFFFFFFFF) { /* Automatic */ + /* disable auto-fail back */ + PrefPort = Active; + } +#endif + + if (pAC->Rlmt.Net[NetIdx].LinksUp == 0) { + /* Last link went down - shut down the net. */ + pAC->Rlmt.Net[NetIdx].RlmtState = SK_RLMT_RS_NET_DOWN; + Para.Para32[0] = SK_RLMT_NET_DOWN_TEMP; + Para.Para32[1] = NetIdx; + SkEventQueue(pAC, SKGE_DRV, SK_DRV_NET_DOWN, Para); + + Para.Para32[0] = pAC->Rlmt.Net[NetIdx]. + Port[pAC->Rlmt.Net[NetIdx].ActivePort]->PortNumber; + Para.Para32[1] = NetIdx; + SkEventQueue(pAC, SKGE_PNMI, SK_PNMI_EVT_RLMT_ACTIVE_DOWN, Para); + return; + } /* pAC->Rlmt.LinksUp == 0 */ + else if (pAC->Rlmt.Net[NetIdx].LinksUp == 1 && + pAC->Rlmt.Net[NetIdx].RlmtState == SK_RLMT_RS_NET_DOWN) { + /* First link came up - get the net up. */ + pAC->Rlmt.Net[NetIdx].RlmtState = SK_RLMT_RS_NET_UP; + + /* + * If pAC->Rlmt.ActivePort != Para.Para32[0], + * the DRV switches to the port that came up. + */ + for (i = 0; i < pAC->Rlmt.Net[NetIdx].NumPorts; i++) { + if (!pAC->Rlmt.Net[NetIdx].Port[i]->LinkDown) { + if (!pAC->Rlmt.Net[NetIdx].Port[Active]->LinkDown) { + i = Active; + } + if (!pAC->Rlmt.Net[NetIdx].Port[PrefPort]->LinkDown) { + i = PrefPort; + } + PortFound = SK_TRUE; + break; + } + } + + if (PortFound) { + Para.Para32[0] = pAC->Rlmt.Net[NetIdx].Port[i]->PortNumber; + Para.Para32[1] = NetIdx; + SkEventQueue(pAC, SKGE_PNMI, SK_PNMI_EVT_RLMT_ACTIVE_UP, Para); + + pAC->Rlmt.Net[NetIdx].ActivePort = i; + Para.Para32[0] = pAC->Rlmt.Net[NetIdx].Port[i]->PortNumber; + Para.Para32[1] = NetIdx; + SkEventQueue(pAC, SKGE_DRV, SK_DRV_NET_UP, Para); + + if ((pAC->Rlmt.Net[NetIdx].RlmtMode & SK_RLMT_TRANSPARENT) == 0 && + (Para.pParaPtr = SkRlmtBuildPacket(pAC, IoC, + pAC->Rlmt.Net[NetIdx].Port[i]->PortNumber, + SK_PACKET_ANNOUNCE, &pAC->Addr.Net[NetIdx]. + CurrentMacAddress, &SkRlmtMcAddr)) != NULL) { + /* + * Send announce packet to RLMT multicast address to force + * switches to learn the new location of the logical MAC address. + */ + SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para); + } + } + else { + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_RLMT_E007, SKERR_RLMT_E007_MSG); + } + + return; + } /* LinksUp == 1 && RlmtState == SK_RLMT_RS_NET_DOWN */ + else { /* Cannot be reached in dual-net mode. */ + Para.Para32[0] = Active; + + /* + * Preselection: + * If RLMT Mode != CheckLinkState + * select port that received a broadcast frame substantially later + * than all other ports + * else select first port that is not SuspectRx + * else select first port that is PortUp + * else select port that is PortGoingUp for the longest time + * else select first port that is PortDown + * else stop. + * + * For the preselected port: + * If ActivePort is equal in quality, select ActivePort. + * + * If PrefPort is equal in quality, select PrefPort. + * + * If ActivePort != SelectedPort, + * If old ActivePort is LinkDown, + * SwitchHard + * else + * SwitchSoft + */ + /* check of ChgBcPrio flag added */ + if ((pAC->Rlmt.Net[0].RlmtMode != SK_RLMT_MODE_CLS) && + (!pAC->Rlmt.Net[0].ChgBcPrio)) { + + if (!PortFound) { + PortFound = SkRlmtSelectBcRx( + pAC, IoC, Active, PrefPort, &Para.Para32[1]); + } + + if (!PortFound) { + PortFound = SkRlmtSelectNotSuspect( + pAC, IoC, Active, PrefPort, &Para.Para32[1]); + } + } /* pAC->Rlmt.RlmtMode != SK_RLMT_MODE_CLS */ + + /* with changed priority for last broadcast received */ + if ((pAC->Rlmt.Net[0].RlmtMode != SK_RLMT_MODE_CLS) && + (pAC->Rlmt.Net[0].ChgBcPrio)) { + if (!PortFound) { + PortFound = SkRlmtSelectNotSuspect( + pAC, IoC, Active, PrefPort, &Para.Para32[1]); + } + + if (!PortFound) { + PortFound = SkRlmtSelectBcRx( + pAC, IoC, Active, PrefPort, &Para.Para32[1]); + } + } /* pAC->Rlmt.RlmtMode != SK_RLMT_MODE_CLS */ + + if (!PortFound) { + PortFound = SkRlmtSelectUp( + pAC, IoC, Active, PrefPort, &Para.Para32[1], AUTONEG_SUCCESS); + } + + if (!PortFound) { + PortFound = SkRlmtSelectUp( + pAC, IoC, Active, PrefPort, &Para.Para32[1], AUTONEG_FAILED); + } + + if (!PortFound) { + PortFound = SkRlmtSelectGoingUp( + pAC, IoC, Active, PrefPort, &Para.Para32[1], AUTONEG_SUCCESS); + } + + if (!PortFound) { + PortFound = SkRlmtSelectGoingUp( + pAC, IoC, Active, PrefPort, &Para.Para32[1], AUTONEG_FAILED); + } + + if (pAC->Rlmt.Net[0].RlmtMode != SK_RLMT_MODE_CLS) { + if (!PortFound) { + PortFound = SkRlmtSelectDown(pAC, IoC, + Active, PrefPort, &Para.Para32[1], AUTONEG_SUCCESS); + } + + if (!PortFound) { + PortFound = SkRlmtSelectDown(pAC, IoC, + Active, PrefPort, &Para.Para32[1], AUTONEG_FAILED); + } + } /* pAC->Rlmt.RlmtMode != SK_RLMT_MODE_CLS */ + + if (PortFound) { + + if (Para.Para32[1] != Active) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("Active: %d, Para1: %d.\n", Active, Para.Para32[1])) + pAC->Rlmt.Net[NetIdx].ActivePort = Para.Para32[1]; + Para.Para32[0] = pAC->Rlmt.Net[NetIdx]. + Port[Para.Para32[0]]->PortNumber; + Para.Para32[1] = pAC->Rlmt.Net[NetIdx]. + Port[Para.Para32[1]]->PortNumber; + SK_HWAC_LINK_LED(pAC, IoC, Para.Para32[1], SK_LED_ACTIVE); + if (pAC->Rlmt.Port[Active].LinkDown) { + SkEventQueue(pAC, SKGE_DRV, SK_DRV_SWITCH_HARD, Para); + } + else { + SK_HWAC_LINK_LED(pAC, IoC, Para.Para32[0], SK_LED_STANDBY); + SkEventQueue(pAC, SKGE_DRV, SK_DRV_SWITCH_SOFT, Para); + } + Para.Para32[1] = NetIdx; + Para.Para32[0] = + pAC->Rlmt.Net[NetIdx].Port[Para.Para32[0]]->PortNumber; + SkEventQueue(pAC, SKGE_PNMI, SK_PNMI_EVT_RLMT_ACTIVE_DOWN, Para); + Para.Para32[0] = pAC->Rlmt.Net[NetIdx]. + Port[pAC->Rlmt.Net[NetIdx].ActivePort]->PortNumber; + SkEventQueue(pAC, SKGE_PNMI, SK_PNMI_EVT_RLMT_ACTIVE_UP, Para); + if ((pAC->Rlmt.Net[NetIdx].RlmtMode & SK_RLMT_TRANSPARENT) == 0 && + (Para.pParaPtr = SkRlmtBuildPacket(pAC, IoC, Para.Para32[0], + SK_PACKET_ANNOUNCE, &pAC->Addr.Net[NetIdx].CurrentMacAddress, + &SkRlmtMcAddr)) != NULL) { + /* + * Send announce packet to RLMT multicast address to force + * switches to learn the new location of the logical + * MAC address. + */ + SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para); + } /* (Para.pParaPtr = SkRlmtBuildPacket(...)) != NULL */ + } /* Para.Para32[1] != Active */ + } /* PortFound */ + else { + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_RLMT_E004, SKERR_RLMT_E004_MSG); + } + } /* LinksUp > 1 || LinksUp == 1 && RlmtState != SK_RLMT_RS_NET_DOWN */ + return; +} /* SkRlmtCheckSwitch */ + + +/****************************************************************************** + * + * SkRlmtCheckSeg - Report if segmentation is detected + * + * Description: + * This routine checks if the ports see different root bridges and reports + * segmentation in such a case. + * + * Context: + * runtime, pageable? + * + * Returns: + * Nothing. + */ +RLMT_STATIC void SkRlmtCheckSeg( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_U32 NetIdx) /* Net number */ +{ + SK_EVPARA Para; + SK_RLMT_NET *pNet; + SK_U32 i, j; + SK_BOOL Equal; + + pNet = &pAC->Rlmt.Net[NetIdx]; + pNet->RootIdSet = SK_FALSE; + Equal = SK_TRUE; + + for (i = 0; i < pNet->NumPorts; i++) { + if (pNet->Port[i]->LinkDown || !pNet->Port[i]->RootIdSet) { + continue; + } + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_DUMP, + ("Root ID %d: %02x %02x %02x %02x %02x %02x %02x %02x.\n", i, + pNet->Port[i]->Root.Id[0], pNet->Port[i]->Root.Id[1], + pNet->Port[i]->Root.Id[2], pNet->Port[i]->Root.Id[3], + pNet->Port[i]->Root.Id[4], pNet->Port[i]->Root.Id[5], + pNet->Port[i]->Root.Id[6], pNet->Port[i]->Root.Id[7])) + + if (!pNet->RootIdSet) { + pNet->Root = pNet->Port[i]->Root; + pNet->RootIdSet = SK_TRUE; + continue; + } + + for (j = 0; j < 8; j ++) { + Equal &= pNet->Port[i]->Root.Id[j] == pNet->Root.Id[j]; + if (!Equal) { + break; + } + } + + if (!Equal) { + SK_ERR_LOG(pAC, SK_ERRCL_COMM, SKERR_RLMT_E005, SKERR_RLMT_E005_MSG); + Para.Para32[0] = NetIdx; + Para.Para32[1] = (SK_U32)-1; + SkEventQueue(pAC, SKGE_PNMI, SK_PNMI_EVT_RLMT_SEGMENTATION, Para); + + pNet->CheckingState &= ~SK_RLMT_RCS_REPORT_SEG; + + /* 2000-03-06 RA: New. */ + Para.Para32[0] = NetIdx; + Para.Para32[1] = (SK_U32)-1; + SkTimerStart(pAC, IoC, &pNet->SegTimer, SK_RLMT_SEG_TO_VAL, + SKGE_RLMT, SK_RLMT_SEG_TIM, Para); + break; + } + } /* for (i = 0; i < pNet->NumPorts; i++) */ + + /* 2000-03-06 RA: Moved here. */ + /* Segmentation check not running anymore. */ + pNet->CheckingState &= ~SK_RLMT_RCS_SEG; + +} /* SkRlmtCheckSeg */ + + +/****************************************************************************** + * + * SkRlmtPortStart - initialize port variables and start port + * + * Description: + * This routine initializes a port's variables and issues a PORT_START + * to the HWAC module. This handles retries if the start fails or the + * link eventually goes down. + * + * Context: + * runtime, pageable? + * + * Returns: + * Nothing + */ +RLMT_STATIC void SkRlmtPortStart( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_U32 PortNumber) /* Port number */ +{ + SK_EVPARA Para; + + pAC->Rlmt.Port[PortNumber].PortState = SK_RLMT_PS_LINK_DOWN; + pAC->Rlmt.Port[PortNumber].PortStarted = SK_TRUE; + pAC->Rlmt.Port[PortNumber].LinkDown = SK_TRUE; + pAC->Rlmt.Port[PortNumber].PortDown = SK_TRUE; + pAC->Rlmt.Port[PortNumber].CheckingState = 0; + pAC->Rlmt.Port[PortNumber].RootIdSet = SK_FALSE; + Para.Para32[0] = PortNumber; + Para.Para32[1] = (SK_U32)-1; + SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_START, Para); +} /* SkRlmtPortStart */ + + +/****************************************************************************** + * + * SkRlmtEvtPortStartTim - PORT_START_TIM + * + * Description: + * This routine handles PORT_START_TIM events. + * + * Context: + * runtime, pageable? + * may be called after SK_INIT_IO + * + * Returns: + * Nothing + */ +RLMT_STATIC void SkRlmtEvtPortStartTim( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_EVPARA Para) /* SK_U32 PortNumber; SK_U32 -1 */ +{ + SK_U32 i; + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_PORTSTART_TIMEOUT Port %d Event BEGIN.\n", Para.Para32[0])) + + if (Para.Para32[1] != (SK_U32)-1) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("Bad Parameter.\n")) + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_PORTSTART_TIMEOUT Event EMPTY.\n")) + return; + } + + /* + * Used to start non-preferred ports if the preferred one + * does not come up. + * This timeout needs only be set when starting the first + * (preferred) port. + */ + if (pAC->Rlmt.Port[Para.Para32[0]].LinkDown) { + /* PORT_START failed. */ + for (i = 0; i < pAC->Rlmt.Port[Para.Para32[0]].Net->NumPorts; i++) { + if (!pAC->Rlmt.Port[Para.Para32[0]].Net->Port[i]->PortStarted) { + SkRlmtPortStart(pAC, IoC, + pAC->Rlmt.Port[Para.Para32[0]].Net->Port[i]->PortNumber); + } + } + } + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_PORTSTART_TIMEOUT Event END.\n")) +} /* SkRlmtEvtPortStartTim */ + + +/****************************************************************************** + * + * SkRlmtEvtLinkUp - LINK_UP + * + * Description: + * This routine handles LLINK_UP events. + * + * Context: + * runtime, pageable? + * may be called after SK_INIT_IO + * + * Returns: + * Nothing + */ +RLMT_STATIC void SkRlmtEvtLinkUp( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_EVPARA Para) /* SK_U32 PortNumber; SK_U32 Undefined */ +{ + SK_U32 i; + SK_RLMT_PORT *pRPort; + SK_EVPARA Para2; + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_LINK_UP Port %d Event BEGIN.\n", Para.Para32[0])) + + pRPort = &pAC->Rlmt.Port[Para.Para32[0]]; + if (!pRPort->PortStarted) { + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_RLMT_E008, SKERR_RLMT_E008_MSG); + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_LINK_UP Event EMPTY.\n")) + return; + } + + if (!pRPort->LinkDown) { + /* RA;:;: Any better solution? */ + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_LINK_UP Event EMPTY.\n")) + return; + } + + SkTimerStop(pAC, IoC, &pRPort->UpTimer); + SkTimerStop(pAC, IoC, &pRPort->DownRxTimer); + SkTimerStop(pAC, IoC, &pRPort->DownTxTimer); + + /* Do something if timer already fired? */ + + pRPort->LinkDown = SK_FALSE; + pRPort->PortState = SK_RLMT_PS_GOING_UP; + pRPort->GuTimeStamp = SkOsGetTime(pAC); + pRPort->BcTimeStamp = 0; + pRPort->Net->LinksUp++; + if (pRPort->Net->LinksUp == 1) { + SK_HWAC_LINK_LED(pAC, IoC, Para.Para32[0], SK_LED_ACTIVE); + } + else { + SK_HWAC_LINK_LED(pAC, IoC, Para.Para32[0], SK_LED_STANDBY); + } + + for (i = 0; i < pRPort->Net->NumPorts; i++) { + if (!pRPort->Net->Port[i]->PortStarted) { + SkRlmtPortStart(pAC, IoC, pRPort->Net->Port[i]->PortNumber); + } + } + + SkRlmtCheckSwitch(pAC, IoC, pRPort->Net->NetNumber); + + if (pRPort->Net->LinksUp >= 2) { + if (pRPort->Net->RlmtMode & SK_RLMT_CHECK_LOC_LINK) { + /* Build the check chain. */ + SkRlmtBuildCheckChain(pAC, pRPort->Net->NetNumber); + } + } + + /* If the first link comes up, start the periodical RLMT timeout. */ + if (pRPort->Net->NumPorts > 1 && pRPort->Net->LinksUp == 1 && + (pRPort->Net->RlmtMode & SK_RLMT_CHECK_OTHERS) != 0) { + Para2.Para32[0] = pRPort->Net->NetNumber; + Para2.Para32[1] = (SK_U32)-1; + SkTimerStart(pAC, IoC, &pRPort->Net->LocTimer, + pRPort->Net->TimeoutValue, SKGE_RLMT, SK_RLMT_TIM, Para2); + } + + Para2 = Para; + Para2.Para32[1] = (SK_U32)-1; + SkTimerStart(pAC, IoC, &pRPort->UpTimer, SK_RLMT_PORTUP_TIM_VAL, + SKGE_RLMT, SK_RLMT_PORTUP_TIM, Para2); + + /* Later: if (pAC->Rlmt.RlmtMode & SK_RLMT_CHECK_LOC_LINK) && */ + if ((pRPort->Net->RlmtMode & SK_RLMT_TRANSPARENT) == 0 && + (pRPort->Net->RlmtMode & SK_RLMT_CHECK_LINK) != 0 && + (Para2.pParaPtr = + SkRlmtBuildPacket(pAC, IoC, Para.Para32[0], SK_PACKET_ANNOUNCE, + &pAC->Addr.Port[Para.Para32[0]].CurrentMacAddress, &SkRlmtMcAddr) + ) != NULL) { + /* Send "new" packet to RLMT multicast address. */ + SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para2); + } + + if (pRPort->Net->RlmtMode & SK_RLMT_CHECK_SEG) { + if ((Para2.pParaPtr = + SkRlmtBuildSpanningTreePacket(pAC, IoC, Para.Para32[0])) != NULL) { + pAC->Rlmt.Port[Para.Para32[0]].RootIdSet = SK_FALSE; + pRPort->Net->CheckingState |= + SK_RLMT_RCS_SEG | SK_RLMT_RCS_REPORT_SEG; + + SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para2); + + Para.Para32[1] = (SK_U32)-1; + SkTimerStart(pAC, IoC, &pRPort->Net->SegTimer, + SK_RLMT_SEG_TO_VAL, SKGE_RLMT, SK_RLMT_SEG_TIM, Para); + } + } + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_LINK_UP Event END.\n")) +} /* SkRlmtEvtLinkUp */ + + +/****************************************************************************** + * + * SkRlmtEvtPortUpTim - PORT_UP_TIM + * + * Description: + * This routine handles PORT_UP_TIM events. + * + * Context: + * runtime, pageable? + * may be called after SK_INIT_IO + * + * Returns: + * Nothing + */ +RLMT_STATIC void SkRlmtEvtPortUpTim( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_EVPARA Para) /* SK_U32 PortNumber; SK_U32 -1 */ +{ + SK_RLMT_PORT *pRPort; + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_PORTUP_TIM Port %d Event BEGIN.\n", Para.Para32[0])) + + if (Para.Para32[1] != (SK_U32)-1) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("Bad Parameter.\n")) + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_PORTUP_TIM Event EMPTY.\n")) + return; + } + + pRPort = &pAC->Rlmt.Port[Para.Para32[0]]; + if (pRPort->LinkDown || (pRPort->PortState == SK_RLMT_PS_UP)) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_PORTUP_TIM Port %d Event EMPTY.\n", Para.Para32[0])) + return; + } + + pRPort->PortDown = SK_FALSE; + pRPort->PortState = SK_RLMT_PS_UP; + pRPort->Net->PortsUp++; + if (pRPort->Net->RlmtState != SK_RLMT_RS_INIT) { + if (pAC->Rlmt.NumNets <= 1) { + SkRlmtCheckSwitch(pAC, IoC, pRPort->Net->NetNumber); + } + SkEventQueue(pAC, SKGE_PNMI, SK_PNMI_EVT_RLMT_PORT_UP, Para); + } + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_PORTUP_TIM Event END.\n")) +} /* SkRlmtEvtPortUpTim */ + + +/****************************************************************************** + * + * SkRlmtEvtPortDownTim - PORT_DOWN_* + * + * Description: + * This routine handles PORT_DOWN_* events. + * + * Context: + * runtime, pageable? + * may be called after SK_INIT_IO + * + * Returns: + * Nothing + */ +RLMT_STATIC void SkRlmtEvtPortDownX( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_U32 Event, /* Event code */ +SK_EVPARA Para) /* SK_U32 PortNumber; SK_U32 -1 */ +{ + SK_RLMT_PORT *pRPort; + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_PORTDOWN* Port %d Event (%d) BEGIN.\n", + Para.Para32[0], Event)) + + if (Para.Para32[1] != (SK_U32)-1) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("Bad Parameter.\n")) + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_PORTDOWN* Event EMPTY.\n")) + return; + } + + pRPort = &pAC->Rlmt.Port[Para.Para32[0]]; + if (!pRPort->PortStarted || (Event == SK_RLMT_PORTDOWN_TX_TIM && + !(pRPort->CheckingState & SK_RLMT_PCS_TX))) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_PORTDOWN* Event (%d) EMPTY.\n", Event)) + return; + } + + /* Stop port's timers. */ + SkTimerStop(pAC, IoC, &pRPort->UpTimer); + SkTimerStop(pAC, IoC, &pRPort->DownRxTimer); + SkTimerStop(pAC, IoC, &pRPort->DownTxTimer); + + if (pRPort->PortState != SK_RLMT_PS_LINK_DOWN) { + pRPort->PortState = SK_RLMT_PS_DOWN; + } + + if (!pRPort->PortDown) { + pRPort->Net->PortsUp--; + pRPort->PortDown = SK_TRUE; + SkEventQueue(pAC, SKGE_PNMI, SK_PNMI_EVT_RLMT_PORT_DOWN, Para); + } + + pRPort->PacketsPerTimeSlot = 0; + /* pRPort->DataPacketsPerTimeSlot = 0; */ + pRPort->BpduPacketsPerTimeSlot = 0; + pRPort->BcTimeStamp = 0; + + /* + * RA;:;: To be checked: + * - actions at RLMT_STOP: We should not switch anymore. + */ + if (pRPort->Net->RlmtState != SK_RLMT_RS_INIT) { + if (Para.Para32[0] == + pRPort->Net->Port[pRPort->Net->ActivePort]->PortNumber) { + /* Active Port went down. */ + SkRlmtCheckSwitch(pAC, IoC, pRPort->Net->NetNumber); + } + } + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_PORTDOWN* Event (%d) END.\n", Event)) +} /* SkRlmtEvtPortDownX */ + + +/****************************************************************************** + * + * SkRlmtEvtLinkDown - LINK_DOWN + * + * Description: + * This routine handles LINK_DOWN events. + * + * Context: + * runtime, pageable? + * may be called after SK_INIT_IO + * + * Returns: + * Nothing + */ +RLMT_STATIC void SkRlmtEvtLinkDown( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_EVPARA Para) /* SK_U32 PortNumber; SK_U32 Undefined */ +{ + SK_RLMT_PORT *pRPort; + + pRPort = &pAC->Rlmt.Port[Para.Para32[0]]; + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_LINK_DOWN Port %d Event BEGIN.\n", Para.Para32[0])) + + if (!pAC->Rlmt.Port[Para.Para32[0]].LinkDown) { + pRPort->Net->LinksUp--; + pRPort->LinkDown = SK_TRUE; + pRPort->PortState = SK_RLMT_PS_LINK_DOWN; + SK_HWAC_LINK_LED(pAC, IoC, Para.Para32[0], SK_LED_OFF); + + if ((pRPort->Net->RlmtMode & SK_RLMT_CHECK_LOC_LINK) != 0) { + /* Build the check chain. */ + SkRlmtBuildCheckChain(pAC, pRPort->Net->NetNumber); + } + + /* Ensure that port is marked down. */ + Para.Para32[1] = -1; + (void)SkRlmtEvent(pAC, IoC, SK_RLMT_PORTDOWN, Para); + } + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_LINK_DOWN Event END.\n")) +} /* SkRlmtEvtLinkDown */ + + +/****************************************************************************** + * + * SkRlmtEvtPortAddr - PORT_ADDR + * + * Description: + * This routine handles PORT_ADDR events. + * + * Context: + * runtime, pageable? + * may be called after SK_INIT_IO + * + * Returns: + * Nothing + */ +RLMT_STATIC void SkRlmtEvtPortAddr( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_EVPARA Para) /* SK_U32 PortNumber; SK_U32 -1 */ +{ + SK_U32 i, j; + SK_RLMT_PORT *pRPort; + SK_MAC_ADDR *pOldMacAddr; + SK_MAC_ADDR *pNewMacAddr; + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_PORT_ADDR Port %d Event BEGIN.\n", Para.Para32[0])) + + if (Para.Para32[1] != (SK_U32)-1) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("Bad Parameter.\n")) + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_PORT_ADDR Event EMPTY.\n")) + return; + } + + /* Port's physical MAC address changed. */ + pOldMacAddr = &pAC->Addr.Port[Para.Para32[0]].PreviousMacAddress; + pNewMacAddr = &pAC->Addr.Port[Para.Para32[0]].CurrentMacAddress; + + /* + * NOTE: This is not scalable for solutions where ports are + * checked remotely. There, we need to send an RLMT + * address change packet - and how do we ensure delivery? + */ + for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) { + pRPort = &pAC->Rlmt.Port[i]; + for (j = 0; j < pRPort->PortsChecked; j++) { + if (SK_ADDR_EQUAL( + pRPort->PortCheck[j].CheckAddr.a, pOldMacAddr->a)) { + pRPort->PortCheck[j].CheckAddr = *pNewMacAddr; + } + } + } + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_PORT_ADDR Event END.\n")) +} /* SkRlmtEvtPortAddr */ + + +/****************************************************************************** + * + * SkRlmtEvtStart - START + * + * Description: + * This routine handles START events. + * + * Context: + * runtime, pageable? + * may be called after SK_INIT_IO + * + * Returns: + * Nothing + */ +RLMT_STATIC void SkRlmtEvtStart( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_EVPARA Para) /* SK_U32 NetNumber; SK_U32 -1 */ +{ + SK_EVPARA Para2; + SK_U32 PortIdx; + SK_U32 PortNumber; + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_START Net %d Event BEGIN.\n", Para.Para32[0])) + + if (Para.Para32[1] != (SK_U32)-1) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("Bad Parameter.\n")) + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_START Event EMPTY.\n")) + return; + } + + if (Para.Para32[0] >= pAC->Rlmt.NumNets) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("Bad NetNumber %d.\n", Para.Para32[0])) + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_START Event EMPTY.\n")) + return; + } + + if (pAC->Rlmt.Net[Para.Para32[0]].RlmtState != SK_RLMT_RS_INIT) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_START Event EMPTY.\n")) + return; + } + + if (pAC->Rlmt.NetsStarted >= pAC->Rlmt.NumNets) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("All nets should have been started.\n")) + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_START Event EMPTY.\n")) + return; + } + + if (pAC->Rlmt.Net[Para.Para32[0]].PrefPort >= + pAC->Rlmt.Net[Para.Para32[0]].NumPorts) { + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_RLMT_E009, SKERR_RLMT_E009_MSG); + + /* Change PrefPort to internal default. */ + Para2.Para32[0] = 0xFFFFFFFF; + Para2.Para32[1] = Para.Para32[0]; + (void)SkRlmtEvent(pAC, IoC, SK_RLMT_PREFPORT_CHANGE, Para2); + } + + PortIdx = pAC->Rlmt.Net[Para.Para32[0]].PrefPort; + PortNumber = pAC->Rlmt.Net[Para.Para32[0]].Port[PortIdx]->PortNumber; + + pAC->Rlmt.Net[Para.Para32[0]].LinksUp = 0; + pAC->Rlmt.Net[Para.Para32[0]].PortsUp = 0; + pAC->Rlmt.Net[Para.Para32[0]].CheckingState = 0; + pAC->Rlmt.Net[Para.Para32[0]].RlmtState = SK_RLMT_RS_NET_DOWN; + + /* Start preferred port. */ + SkRlmtPortStart(pAC, IoC, PortNumber); + + /* Start Timer (for first port only). */ + Para2.Para32[0] = PortNumber; + Para2.Para32[1] = (SK_U32)-1; + SkTimerStart(pAC, IoC, &pAC->Rlmt.Port[PortNumber].UpTimer, + SK_RLMT_PORTSTART_TIM_VAL, SKGE_RLMT, SK_RLMT_PORTSTART_TIM, Para2); + + pAC->Rlmt.NetsStarted++; + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_START Event END.\n")) +} /* SkRlmtEvtStart */ + + +/****************************************************************************** + * + * SkRlmtEvtStop - STOP + * + * Description: + * This routine handles STOP events. + * + * Context: + * runtime, pageable? + * may be called after SK_INIT_IO + * + * Returns: + * Nothing + */ +RLMT_STATIC void SkRlmtEvtStop( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_EVPARA Para) /* SK_U32 NetNumber; SK_U32 -1 */ +{ + SK_EVPARA Para2; + SK_U32 PortNumber; + SK_U32 i; + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_STOP Net %d Event BEGIN.\n", Para.Para32[0])) + + if (Para.Para32[1] != (SK_U32)-1) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("Bad Parameter.\n")) + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_STOP Event EMPTY.\n")) + return; + } + + if (Para.Para32[0] >= pAC->Rlmt.NumNets) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("Bad NetNumber %d.\n", Para.Para32[0])) + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_STOP Event EMPTY.\n")) + return; + } + + if (pAC->Rlmt.Net[Para.Para32[0]].RlmtState == SK_RLMT_RS_INIT) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_STOP Event EMPTY.\n")) + return; + } + + if (pAC->Rlmt.NetsStarted == 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("All nets are stopped.\n")) + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_STOP Event EMPTY.\n")) + return; + } + + /* Stop RLMT timers. */ + SkTimerStop(pAC, IoC, &pAC->Rlmt.Net[Para.Para32[0]].LocTimer); + SkTimerStop(pAC, IoC, &pAC->Rlmt.Net[Para.Para32[0]].SegTimer); + + /* Stop net. */ + pAC->Rlmt.Net[Para.Para32[0]].RlmtState = SK_RLMT_RS_INIT; + pAC->Rlmt.Net[Para.Para32[0]].RootIdSet = SK_FALSE; + Para2.Para32[0] = SK_RLMT_NET_DOWN_FINAL; + Para2.Para32[1] = Para.Para32[0]; /* Net# */ + SkEventQueue(pAC, SKGE_DRV, SK_DRV_NET_DOWN, Para2); + + /* Stop ports. */ + for (i = 0; i < pAC->Rlmt.Net[Para.Para32[0]].NumPorts; i++) { + PortNumber = pAC->Rlmt.Net[Para.Para32[0]].Port[i]->PortNumber; + if (pAC->Rlmt.Port[PortNumber].PortState != SK_RLMT_PS_INIT) { + SkTimerStop(pAC, IoC, &pAC->Rlmt.Port[PortNumber].UpTimer); + SkTimerStop(pAC, IoC, &pAC->Rlmt.Port[PortNumber].DownRxTimer); + SkTimerStop(pAC, IoC, &pAC->Rlmt.Port[PortNumber].DownTxTimer); + + pAC->Rlmt.Port[PortNumber].PortState = SK_RLMT_PS_INIT; + pAC->Rlmt.Port[PortNumber].RootIdSet = SK_FALSE; + pAC->Rlmt.Port[PortNumber].PortStarted = SK_FALSE; + Para2.Para32[0] = PortNumber; + Para2.Para32[1] = (SK_U32)-1; + SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_STOP, Para2); + } + } + + pAC->Rlmt.NetsStarted--; + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_STOP Event END.\n")) +} /* SkRlmtEvtStop */ + + +/****************************************************************************** + * + * SkRlmtEvtTim - TIM + * + * Description: + * This routine handles TIM events. + * + * Context: + * runtime, pageable? + * may be called after SK_INIT_IO + * + * Returns: + * Nothing + */ +RLMT_STATIC void SkRlmtEvtTim( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_EVPARA Para) /* SK_U32 NetNumber; SK_U32 -1 */ +{ + SK_RLMT_PORT *pRPort; + SK_U32 Timeout; + SK_U32 NewTimeout; + SK_U32 PortNumber; + SK_U32 i; + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_TIM Event BEGIN.\n")) + + if (Para.Para32[1] != (SK_U32)-1) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("Bad Parameter.\n")) + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_TIM Event EMPTY.\n")) + return; + } + + if ((pAC->Rlmt.Net[Para.Para32[0]].RlmtMode & SK_RLMT_CHECK_OTHERS) == 0 || + pAC->Rlmt.Net[Para.Para32[0]].LinksUp == 0) { + /* Mode changed or all links down: No more link checking. */ + return; + } + +#if 0 + pAC->Rlmt.SwitchCheckCounter--; + if (pAC->Rlmt.SwitchCheckCounter == 0) { + pAC->Rlmt.SwitchCheckCounter; + } +#endif /* 0 */ + + NewTimeout = SK_RLMT_DEF_TO_VAL; + for (i = 0; i < pAC->Rlmt.Net[Para.Para32[0]].NumPorts; i++) { + PortNumber = pAC->Rlmt.Net[Para.Para32[0]].Port[i]->PortNumber; + pRPort = &pAC->Rlmt.Port[PortNumber]; + if (!pRPort->LinkDown) { + Timeout = SkRlmtCheckPort(pAC, IoC, PortNumber); + if (Timeout < NewTimeout) { + NewTimeout = Timeout; + } + + /* + * These counters should be set to 0 for all ports before the + * first frame is sent in the next loop. + */ + pRPort->PacketsPerTimeSlot = 0; + /* pRPort->DataPacketsPerTimeSlot = 0; */ + pRPort->BpduPacketsPerTimeSlot = 0; + } + } + pAC->Rlmt.Net[Para.Para32[0]].TimeoutValue = NewTimeout; + + if (pAC->Rlmt.Net[Para.Para32[0]].LinksUp > 1) { + /* + * If checking remote ports, also send packets if + * (LinksUp == 1) && + * this port checks at least one (remote) port. + */ + + /* + * Must be new loop, as SkRlmtCheckPort can request to + * check segmentation when e.g. checking the last port. + */ + for (i = 0; i < pAC->Rlmt.Net[Para.Para32[0]].NumPorts; i++) { + if (!pAC->Rlmt.Net[Para.Para32[0]].Port[i]->LinkDown) { + SkRlmtSend(pAC, IoC, + pAC->Rlmt.Net[Para.Para32[0]].Port[i]->PortNumber); + } + } + } + + SkTimerStart(pAC, IoC, &pAC->Rlmt.Net[Para.Para32[0]].LocTimer, + pAC->Rlmt.Net[Para.Para32[0]].TimeoutValue, SKGE_RLMT, SK_RLMT_TIM, + Para); + + if (pAC->Rlmt.Net[Para.Para32[0]].LinksUp > 1 && + (pAC->Rlmt.Net[Para.Para32[0]].RlmtMode & SK_RLMT_CHECK_SEG) && + (pAC->Rlmt.Net[Para.Para32[0]].CheckingState & SK_RLMT_RCS_START_SEG)) { + SkTimerStart(pAC, IoC, &pAC->Rlmt.Net[Para.Para32[0]].SegTimer, + SK_RLMT_SEG_TO_VAL, SKGE_RLMT, SK_RLMT_SEG_TIM, Para); + pAC->Rlmt.Net[Para.Para32[0]].CheckingState &= ~SK_RLMT_RCS_START_SEG; + pAC->Rlmt.Net[Para.Para32[0]].CheckingState |= + SK_RLMT_RCS_SEG | SK_RLMT_RCS_REPORT_SEG; + } + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_TIM Event END.\n")) +} /* SkRlmtEvtTim */ + + +/****************************************************************************** + * + * SkRlmtEvtSegTim - SEG_TIM + * + * Description: + * This routine handles SEG_TIM events. + * + * Context: + * runtime, pageable? + * may be called after SK_INIT_IO + * + * Returns: + * Nothing + */ +RLMT_STATIC void SkRlmtEvtSegTim( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_EVPARA Para) /* SK_U32 NetNumber; SK_U32 -1 */ +{ +#ifdef xDEBUG + int j; +#endif /* DEBUG */ + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_SEG_TIM Event BEGIN.\n")) + + if (Para.Para32[1] != (SK_U32)-1) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("Bad Parameter.\n")) + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_SEG_TIM Event EMPTY.\n")) + return; + } + +#ifdef xDEBUG + for (j = 0; j < pAC->Rlmt.Net[Para.Para32[0]].NumPorts; j++) { + SK_ADDR_PORT *pAPort; + SK_U32 k; + SK_U16 *InAddr; + SK_U8 InAddr8[6]; + + InAddr = (SK_U16 *)&InAddr8[0]; + pAPort = pAC->Rlmt.Net[Para.Para32[0]].Port[j]->AddrPort; + for (k = 0; k < pAPort->NextExactMatchRlmt; k++) { + /* Get exact match address k from port j. */ + XM_INADDR(IoC, pAC->Rlmt.Net[Para.Para32[0]].Port[j]->PortNumber, + XM_EXM(k), InAddr); + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("MC address %d on Port %u: %02x %02x %02x %02x %02x %02x -- %02x %02x %02x %02x %02x %02x.\n", + k, pAC->Rlmt.Net[Para.Para32[0]].Port[j]->PortNumber, + InAddr8[0], InAddr8[1], InAddr8[2], + InAddr8[3], InAddr8[4], InAddr8[5], + pAPort->Exact[k].a[0], pAPort->Exact[k].a[1], + pAPort->Exact[k].a[2], pAPort->Exact[k].a[3], + pAPort->Exact[k].a[4], pAPort->Exact[k].a[5])) + } + } +#endif /* xDEBUG */ + + SkRlmtCheckSeg(pAC, IoC, Para.Para32[0]); + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_SEG_TIM Event END.\n")) +} /* SkRlmtEvtSegTim */ + + +/****************************************************************************** + * + * SkRlmtEvtPacketRx - PACKET_RECEIVED + * + * Description: + * This routine handles PACKET_RECEIVED events. + * + * Context: + * runtime, pageable? + * may be called after SK_INIT_IO + * + * Returns: + * Nothing + */ +RLMT_STATIC void SkRlmtEvtPacketRx( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_EVPARA Para) /* SK_MBUF *pMb */ +{ + SK_MBUF *pMb; + SK_MBUF *pNextMb; + SK_U32 NetNumber; + + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_PACKET_RECEIVED Event BEGIN.\n")) + + /* Should we ignore frames during port switching? */ + +#ifdef DEBUG + pMb = Para.pParaPtr; + if (pMb == NULL) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, ("No mbuf.\n")) + } + else if (pMb->pNext != NULL) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("More than one mbuf or pMb->pNext not set.\n")) + } +#endif /* DEBUG */ + + for (pMb = Para.pParaPtr; pMb != NULL; pMb = pNextMb) { + pNextMb = pMb->pNext; + pMb->pNext = NULL; + + NetNumber = pAC->Rlmt.Port[pMb->PortIdx].Net->NetNumber; + if (pAC->Rlmt.Net[NetNumber].RlmtState == SK_RLMT_RS_INIT) { + SkDrvFreeRlmtMbuf(pAC, IoC, pMb); + } + else { + SkRlmtPacketReceive(pAC, IoC, pMb); + } + } + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_PACKET_RECEIVED Event END.\n")) +} /* SkRlmtEvtPacketRx */ + + +/****************************************************************************** + * + * SkRlmtEvtStatsClear - STATS_CLEAR + * + * Description: + * This routine handles STATS_CLEAR events. + * + * Context: + * runtime, pageable? + * may be called after SK_INIT_IO + * + * Returns: + * Nothing + */ +RLMT_STATIC void SkRlmtEvtStatsClear( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_EVPARA Para) /* SK_U32 NetNumber; SK_U32 -1 */ +{ + SK_U32 i; + SK_RLMT_PORT *pRPort; + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_STATS_CLEAR Event BEGIN.\n")) + + if (Para.Para32[1] != (SK_U32)-1) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("Bad Parameter.\n")) + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_STATS_CLEAR Event EMPTY.\n")) + return; + } + + if (Para.Para32[0] >= pAC->Rlmt.NumNets) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("Bad NetNumber %d.\n", Para.Para32[0])) + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_STATS_CLEAR Event EMPTY.\n")) + return; + } + + /* Clear statistics for logical and physical ports. */ + for (i = 0; i < pAC->Rlmt.Net[Para.Para32[0]].NumPorts; i++) { + pRPort = + &pAC->Rlmt.Port[pAC->Rlmt.Net[Para.Para32[0]].Port[i]->PortNumber]; + pRPort->TxHelloCts = 0; + pRPort->RxHelloCts = 0; + pRPort->TxSpHelloReqCts = 0; + pRPort->RxSpHelloCts = 0; + } + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_STATS_CLEAR Event END.\n")) +} /* SkRlmtEvtStatsClear */ + + +/****************************************************************************** + * + * SkRlmtEvtStatsUpdate - STATS_UPDATE + * + * Description: + * This routine handles STATS_UPDATE events. + * + * Context: + * runtime, pageable? + * may be called after SK_INIT_IO + * + * Returns: + * Nothing + */ +RLMT_STATIC void SkRlmtEvtStatsUpdate( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_EVPARA Para) /* SK_U32 NetNumber; SK_U32 -1 */ +{ + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_STATS_UPDATE Event BEGIN.\n")) + + if (Para.Para32[1] != (SK_U32)-1) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("Bad Parameter.\n")) + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_STATS_UPDATE Event EMPTY.\n")) + return; + } + + if (Para.Para32[0] >= pAC->Rlmt.NumNets) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("Bad NetNumber %d.\n", Para.Para32[0])) + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_STATS_UPDATE Event EMPTY.\n")) + return; + } + + /* Update statistics - currently always up-to-date. */ + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_STATS_UPDATE Event END.\n")) +} /* SkRlmtEvtStatsUpdate */ + + +/****************************************************************************** + * + * SkRlmtEvtPrefportChange - PREFPORT_CHANGE + * + * Description: + * This routine handles PREFPORT_CHANGE events. + * + * Context: + * runtime, pageable? + * may be called after SK_INIT_IO + * + * Returns: + * Nothing + */ +RLMT_STATIC void SkRlmtEvtPrefportChange( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_EVPARA Para) /* SK_U32 PortIndex; SK_U32 NetNumber */ +{ + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_PREFPORT_CHANGE to Port %d Event BEGIN.\n", Para.Para32[0])) + + if (Para.Para32[1] >= pAC->Rlmt.NumNets) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("Bad NetNumber %d.\n", Para.Para32[1])) + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_PREFPORT_CHANGE Event EMPTY.\n")) + return; + } + + /* 0xFFFFFFFF == auto-mode. */ + if (Para.Para32[0] == 0xFFFFFFFF) { + pAC->Rlmt.Net[Para.Para32[1]].PrefPort = SK_RLMT_DEF_PREF_PORT; + } + else { + if (Para.Para32[0] >= pAC->Rlmt.Net[Para.Para32[1]].NumPorts) { + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_RLMT_E010, SKERR_RLMT_E010_MSG); + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_PREFPORT_CHANGE Event EMPTY.\n")) + return; + } + + pAC->Rlmt.Net[Para.Para32[1]].PrefPort = Para.Para32[0]; + } + + pAC->Rlmt.Net[Para.Para32[1]].Preference = Para.Para32[0]; + + if (pAC->Rlmt.Net[Para.Para32[1]].RlmtState != SK_RLMT_RS_INIT) { + SkRlmtCheckSwitch(pAC, IoC, Para.Para32[1]); + } + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_PREFPORT_CHANGE Event END.\n")) +} /* SkRlmtEvtPrefportChange */ + + +/****************************************************************************** + * + * SkRlmtEvtSetNets - SET_NETS + * + * Description: + * This routine handles SET_NETS events. + * + * Context: + * runtime, pageable? + * may be called after SK_INIT_IO + * + * Returns: + * Nothing + */ +RLMT_STATIC void SkRlmtEvtSetNets( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_EVPARA Para) /* SK_U32 NumNets; SK_U32 -1 */ +{ + int i; + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_SET_NETS Event BEGIN.\n")) + + if (Para.Para32[1] != (SK_U32)-1) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("Bad Parameter.\n")) + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_SET_NETS Event EMPTY.\n")) + return; + } + + if (Para.Para32[0] == 0 || Para.Para32[0] > SK_MAX_NETS || + Para.Para32[0] > (SK_U32)pAC->GIni.GIMacsFound) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("Bad number of nets: %d.\n", Para.Para32[0])) + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_SET_NETS Event EMPTY.\n")) + return; + } + + if (Para.Para32[0] == pAC->Rlmt.NumNets) { /* No change. */ + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_SET_NETS Event EMPTY.\n")) + return; + } + + /* Entering and leaving dual mode only allowed while nets are stopped. */ + if (pAC->Rlmt.NetsStarted > 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("Changing dual mode only allowed while all nets are stopped.\n")) + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_SET_NETS Event EMPTY.\n")) + return; + } + + if (Para.Para32[0] == 1) { + if (pAC->Rlmt.NumNets > 1) { + /* Clear logical MAC addr from second net's active port. */ + (void)SkAddrOverride(pAC, IoC, pAC->Rlmt.Net[1].Port[pAC->Addr. + Net[1].ActivePort]->PortNumber, NULL, SK_ADDR_CLEAR_LOGICAL); + pAC->Rlmt.Net[1].NumPorts = 0; + } + + pAC->Rlmt.NumNets = Para.Para32[0]; + for (i = 0; (SK_U32)i < pAC->Rlmt.NumNets; i++) { + pAC->Rlmt.Net[i].RlmtState = SK_RLMT_RS_INIT; + pAC->Rlmt.Net[i].RootIdSet = SK_FALSE; + pAC->Rlmt.Net[i].Preference = 0xFFFFFFFF; /* "Automatic" */ + pAC->Rlmt.Net[i].PrefPort = SK_RLMT_DEF_PREF_PORT; + /* Just assuming. */ + pAC->Rlmt.Net[i].ActivePort = pAC->Rlmt.Net[i].PrefPort; + pAC->Rlmt.Net[i].RlmtMode = SK_RLMT_DEF_MODE; + pAC->Rlmt.Net[i].TimeoutValue = SK_RLMT_DEF_TO_VAL; + pAC->Rlmt.Net[i].NetNumber = i; + } + + pAC->Rlmt.Port[1].Net= &pAC->Rlmt.Net[0]; + pAC->Rlmt.Net[0].NumPorts = pAC->GIni.GIMacsFound; + + SkEventQueue(pAC, SKGE_PNMI, SK_PNMI_EVT_RLMT_SET_NETS, Para); + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("RLMT: Changed to one net with two ports.\n")) + } + else if (Para.Para32[0] == 2) { + pAC->Rlmt.Port[1].Net= &pAC->Rlmt.Net[1]; + pAC->Rlmt.Net[1].NumPorts = pAC->GIni.GIMacsFound - 1; + pAC->Rlmt.Net[0].NumPorts = + pAC->GIni.GIMacsFound - pAC->Rlmt.Net[1].NumPorts; + + pAC->Rlmt.NumNets = Para.Para32[0]; + for (i = 0; (SK_U32)i < pAC->Rlmt.NumNets; i++) { + pAC->Rlmt.Net[i].RlmtState = SK_RLMT_RS_INIT; + pAC->Rlmt.Net[i].RootIdSet = SK_FALSE; + pAC->Rlmt.Net[i].Preference = 0xFFFFFFFF; /* "Automatic" */ + pAC->Rlmt.Net[i].PrefPort = SK_RLMT_DEF_PREF_PORT; + /* Just assuming. */ + pAC->Rlmt.Net[i].ActivePort = pAC->Rlmt.Net[i].PrefPort; + pAC->Rlmt.Net[i].RlmtMode = SK_RLMT_DEF_MODE; + pAC->Rlmt.Net[i].TimeoutValue = SK_RLMT_DEF_TO_VAL; + + pAC->Rlmt.Net[i].NetNumber = i; + } + + /* Set logical MAC addr on second net's active port. */ + (void)SkAddrOverride(pAC, IoC, pAC->Rlmt.Net[1].Port[pAC->Addr. + Net[1].ActivePort]->PortNumber, NULL, SK_ADDR_SET_LOGICAL); + + SkEventQueue(pAC, SKGE_PNMI, SK_PNMI_EVT_RLMT_SET_NETS, Para); + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("RLMT: Changed to two nets with one port each.\n")) + } + else { + /* Not implemented for more than two nets. */ + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SetNets not implemented for more than two nets.\n")) + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_SET_NETS Event EMPTY.\n")) + return; + } + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_SET_NETS Event END.\n")) +} /* SkRlmtSetNets */ + + +/****************************************************************************** + * + * SkRlmtEvtModeChange - MODE_CHANGE + * + * Description: + * This routine handles MODE_CHANGE events. + * + * Context: + * runtime, pageable? + * may be called after SK_INIT_IO + * + * Returns: + * Nothing + */ +RLMT_STATIC void SkRlmtEvtModeChange( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_EVPARA Para) /* SK_U32 NewMode; SK_U32 NetNumber */ +{ + SK_EVPARA Para2; + SK_U32 i; + SK_U32 PrevRlmtMode; + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_MODE_CHANGE Event BEGIN.\n")) + + if (Para.Para32[1] >= pAC->Rlmt.NumNets) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("Bad NetNumber %d.\n", Para.Para32[1])) + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_MODE_CHANGE Event EMPTY.\n")) + return; + } + + Para.Para32[0] |= SK_RLMT_CHECK_LINK; + + if ((pAC->Rlmt.Net[Para.Para32[1]].NumPorts == 1) && + Para.Para32[0] != SK_RLMT_MODE_CLS) { + pAC->Rlmt.Net[Para.Para32[1]].RlmtMode = SK_RLMT_MODE_CLS; + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("Forced RLMT mode to CLS on single port net.\n")) + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_MODE_CHANGE Event EMPTY.\n")) + return; + } + + /* Update RLMT mode. */ + PrevRlmtMode = pAC->Rlmt.Net[Para.Para32[1]].RlmtMode; + pAC->Rlmt.Net[Para.Para32[1]].RlmtMode = Para.Para32[0]; + + if ((PrevRlmtMode & SK_RLMT_CHECK_LOC_LINK) != + (pAC->Rlmt.Net[Para.Para32[1]].RlmtMode & SK_RLMT_CHECK_LOC_LINK)) { + /* SK_RLMT_CHECK_LOC_LINK bit changed. */ + if ((PrevRlmtMode & SK_RLMT_CHECK_OTHERS) == 0 && + pAC->Rlmt.Net[Para.Para32[1]].NumPorts > 1 && + pAC->Rlmt.Net[Para.Para32[1]].PortsUp >= 1) { + /* 20001207 RA: Was "PortsUp == 1". */ + Para2.Para32[0] = Para.Para32[1]; + Para2.Para32[1] = (SK_U32)-1; + SkTimerStart(pAC, IoC, &pAC->Rlmt.Net[Para.Para32[1]].LocTimer, + pAC->Rlmt.Net[Para.Para32[1]].TimeoutValue, + SKGE_RLMT, SK_RLMT_TIM, Para2); + } + } + + if ((PrevRlmtMode & SK_RLMT_CHECK_SEG) != + (pAC->Rlmt.Net[Para.Para32[1]].RlmtMode & SK_RLMT_CHECK_SEG)) { + /* SK_RLMT_CHECK_SEG bit changed. */ + for (i = 0; i < pAC->Rlmt.Net[Para.Para32[1]].NumPorts; i++) { + (void)SkAddrMcClear(pAC, IoC, + pAC->Rlmt.Net[Para.Para32[1]].Port[i]->PortNumber, + SK_ADDR_PERMANENT | SK_MC_SW_ONLY); + + /* Add RLMT MC address. */ + (void)SkAddrMcAdd(pAC, IoC, + pAC->Rlmt.Net[Para.Para32[1]].Port[i]->PortNumber, + &SkRlmtMcAddr, SK_ADDR_PERMANENT); + + if ((pAC->Rlmt.Net[Para.Para32[1]].RlmtMode & + SK_RLMT_CHECK_SEG) != 0) { + /* Add BPDU MC address. */ + (void)SkAddrMcAdd(pAC, IoC, + pAC->Rlmt.Net[Para.Para32[1]].Port[i]->PortNumber, + &BridgeMcAddr, SK_ADDR_PERMANENT); + + if (pAC->Rlmt.Net[Para.Para32[1]].RlmtState != SK_RLMT_RS_INIT) { + if (!pAC->Rlmt.Net[Para.Para32[1]].Port[i]->LinkDown && + (Para2.pParaPtr = SkRlmtBuildSpanningTreePacket( + pAC, IoC, i)) != NULL) { + pAC->Rlmt.Net[Para.Para32[1]].Port[i]->RootIdSet = + SK_FALSE; + SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para2); + } + } + } + (void)SkAddrMcUpdate(pAC, IoC, + pAC->Rlmt.Net[Para.Para32[1]].Port[i]->PortNumber); + } /* for ... */ + + if ((pAC->Rlmt.Net[Para.Para32[1]].RlmtMode & SK_RLMT_CHECK_SEG) != 0) { + Para2.Para32[0] = Para.Para32[1]; + Para2.Para32[1] = (SK_U32)-1; + SkTimerStart(pAC, IoC, &pAC->Rlmt.Net[Para.Para32[1]].SegTimer, + SK_RLMT_SEG_TO_VAL, SKGE_RLMT, SK_RLMT_SEG_TIM, Para2); + } + } /* SK_RLMT_CHECK_SEG bit changed. */ + + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_MODE_CHANGE Event END.\n")) +} /* SkRlmtEvtModeChange */ + + +/****************************************************************************** + * + * SkRlmtEvent - a PORT- or an RLMT-specific event happened + * + * Description: + * This routine calls subroutines to handle PORT- and RLMT-specific events. + * + * Context: + * runtime, pageable? + * may be called after SK_INIT_IO + * + * Returns: + * 0 + */ +int SkRlmtEvent( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +SK_U32 Event, /* Event code */ +SK_EVPARA Para) /* Event-specific parameter */ +{ + switch (Event) { + + /* ----- PORT events ----- */ + + case SK_RLMT_PORTSTART_TIM: /* From RLMT via TIME. */ + SkRlmtEvtPortStartTim(pAC, IoC, Para); + break; + case SK_RLMT_LINK_UP: /* From SIRQ. */ + SkRlmtEvtLinkUp(pAC, IoC, Para); + break; + case SK_RLMT_PORTUP_TIM: /* From RLMT via TIME. */ + SkRlmtEvtPortUpTim(pAC, IoC, Para); + break; + case SK_RLMT_PORTDOWN: /* From RLMT. */ + case SK_RLMT_PORTDOWN_RX_TIM: /* From RLMT via TIME. */ + case SK_RLMT_PORTDOWN_TX_TIM: /* From RLMT via TIME. */ + SkRlmtEvtPortDownX(pAC, IoC, Event, Para); + break; + case SK_RLMT_LINK_DOWN: /* From SIRQ. */ + SkRlmtEvtLinkDown(pAC, IoC, Para); + break; + case SK_RLMT_PORT_ADDR: /* From ADDR. */ + SkRlmtEvtPortAddr(pAC, IoC, Para); + break; + + /* ----- RLMT events ----- */ + + case SK_RLMT_START: /* From DRV. */ + SkRlmtEvtStart(pAC, IoC, Para); + break; + case SK_RLMT_STOP: /* From DRV. */ + SkRlmtEvtStop(pAC, IoC, Para); + break; + case SK_RLMT_TIM: /* From RLMT via TIME. */ + SkRlmtEvtTim(pAC, IoC, Para); + break; + case SK_RLMT_SEG_TIM: + SkRlmtEvtSegTim(pAC, IoC, Para); + break; + case SK_RLMT_PACKET_RECEIVED: /* From DRV. */ + SkRlmtEvtPacketRx(pAC, IoC, Para); + break; + case SK_RLMT_STATS_CLEAR: /* From PNMI. */ + SkRlmtEvtStatsClear(pAC, IoC, Para); + break; + case SK_RLMT_STATS_UPDATE: /* From PNMI. */ + SkRlmtEvtStatsUpdate(pAC, IoC, Para); + break; + case SK_RLMT_PREFPORT_CHANGE: /* From PNMI. */ + SkRlmtEvtPrefportChange(pAC, IoC, Para); + break; + case SK_RLMT_MODE_CHANGE: /* From PNMI. */ + SkRlmtEvtModeChange(pAC, IoC, Para); + break; + case SK_RLMT_SET_NETS: /* From DRV. */ + SkRlmtEvtSetNets(pAC, IoC, Para); + break; + + /* ----- Unknown events ----- */ + + default: /* Create error log entry. */ + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("Unknown RLMT Event %d.\n", Event)) + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_RLMT_E003, SKERR_RLMT_E003_MSG); + break; + } /* switch() */ + + return (0); +} /* SkRlmtEvent */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ diff --git a/drivers/net/sk98lin/sktimer.c b/drivers/net/sk98lin/sktimer.c new file mode 100644 index 000000000000..4e462955ecd8 --- /dev/null +++ b/drivers/net/sk98lin/sktimer.c @@ -0,0 +1,250 @@ +/****************************************************************************** + * + * Name: sktimer.c + * Project: Gigabit Ethernet Adapters, Event Scheduler Module + * Version: $Revision: 1.14 $ + * Date: $Date: 2003/09/16 13:46:51 $ + * Purpose: High level timer functions. + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect GmbH. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + + +/* + * Event queue and dispatcher + */ +#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM)))) +static const char SysKonnectFileId[] = + "@(#) $Id: sktimer.c,v 1.14 2003/09/16 13:46:51 rschmidt Exp $ (C) Marvell."; +#endif + +#include "h/skdrv1st.h" /* Driver Specific Definitions */ +#include "h/skdrv2nd.h" /* Adapter Control- and Driver specific Def. */ + +#ifdef __C2MAN__ +/* + Event queue management. + + General Description: + + */ +intro() +{} +#endif + + +/* Forward declaration */ +static void timer_done(SK_AC *pAC,SK_IOC Ioc,int Restart); + + +/* + * Inits the software timer + * + * needs to be called during Init level 1. + */ +void SkTimerInit( +SK_AC *pAC, /* Adapters context */ +SK_IOC Ioc, /* IoContext */ +int Level) /* Init Level */ +{ + switch (Level) { + case SK_INIT_DATA: + pAC->Tim.StQueue = NULL; + break; + case SK_INIT_IO: + SkHwtInit(pAC, Ioc); + SkTimerDone(pAC, Ioc); + break; + default: + break; + } +} + +/* + * Stops a high level timer + * - If a timer is not in the queue the function returns normally, too. + */ +void SkTimerStop( +SK_AC *pAC, /* Adapters context */ +SK_IOC Ioc, /* IoContext */ +SK_TIMER *pTimer) /* Timer Pointer to be started */ +{ + SK_TIMER **ppTimPrev; + SK_TIMER *pTm; + + /* + * remove timer from queue + */ + pTimer->TmActive = SK_FALSE; + + if (pAC->Tim.StQueue == pTimer && !pTimer->TmNext) { + SkHwtStop(pAC, Ioc); + } + + for (ppTimPrev = &pAC->Tim.StQueue; (pTm = *ppTimPrev); + ppTimPrev = &pTm->TmNext ) { + + if (pTm == pTimer) { + /* + * Timer found in queue + * - dequeue it and + * - correct delta of the next timer + */ + *ppTimPrev = pTm->TmNext; + + if (pTm->TmNext) { + /* correct delta of next timer in queue */ + pTm->TmNext->TmDelta += pTm->TmDelta; + } + return; + } + } +} + +/* + * Start a high level software timer + */ +void SkTimerStart( +SK_AC *pAC, /* Adapters context */ +SK_IOC Ioc, /* IoContext */ +SK_TIMER *pTimer, /* Timer Pointer to be started */ +SK_U32 Time, /* Time value */ +SK_U32 Class, /* Event Class for this timer */ +SK_U32 Event, /* Event Value for this timer */ +SK_EVPARA Para) /* Event Parameter for this timer */ +{ + SK_TIMER **ppTimPrev; + SK_TIMER *pTm; + SK_U32 Delta; + + Time /= 16; /* input is uS, clock ticks are 16uS */ + + if (!Time) + Time = 1; + + SkTimerStop(pAC, Ioc, pTimer); + + pTimer->TmClass = Class; + pTimer->TmEvent = Event; + pTimer->TmPara = Para; + pTimer->TmActive = SK_TRUE; + + if (!pAC->Tim.StQueue) { + /* First Timer to be started */ + pAC->Tim.StQueue = pTimer; + pTimer->TmNext = NULL; + pTimer->TmDelta = Time; + + SkHwtStart(pAC, Ioc, Time); + + return; + } + + /* + * timer correction + */ + timer_done(pAC, Ioc, 0); + + /* + * find position in queue + */ + Delta = 0; + for (ppTimPrev = &pAC->Tim.StQueue; (pTm = *ppTimPrev); + ppTimPrev = &pTm->TmNext ) { + + if (Delta + pTm->TmDelta > Time) { + /* Position found */ + /* Here the timer needs to be inserted. */ + break; + } + Delta += pTm->TmDelta; + } + + /* insert in queue */ + *ppTimPrev = pTimer; + pTimer->TmNext = pTm; + pTimer->TmDelta = Time - Delta; + + if (pTm) { + /* There is a next timer + * -> correct its Delta value. + */ + pTm->TmDelta -= pTimer->TmDelta; + } + + /* restart with first */ + SkHwtStart(pAC, Ioc, pAC->Tim.StQueue->TmDelta); +} + + +void SkTimerDone( +SK_AC *pAC, /* Adapters context */ +SK_IOC Ioc) /* IoContext */ +{ + timer_done(pAC, Ioc, 1); +} + + +static void timer_done( +SK_AC *pAC, /* Adapters context */ +SK_IOC Ioc, /* IoContext */ +int Restart) /* Do we need to restart the Hardware timer ? */ +{ + SK_U32 Delta; + SK_TIMER *pTm; + SK_TIMER *pTComp; /* Timer completed now now */ + SK_TIMER **ppLast; /* Next field of Last timer to be deq */ + int Done = 0; + + Delta = SkHwtRead(pAC, Ioc); + + ppLast = &pAC->Tim.StQueue; + pTm = pAC->Tim.StQueue; + while (pTm && !Done) { + if (Delta >= pTm->TmDelta) { + /* Timer ran out */ + pTm->TmActive = SK_FALSE; + Delta -= pTm->TmDelta; + ppLast = &pTm->TmNext; + pTm = pTm->TmNext; + } + else { + /* We found the first timer that did not run out */ + pTm->TmDelta -= Delta; + Delta = 0; + Done = 1; + } + } + *ppLast = NULL; + /* + * pTm points to the first Timer that did not run out. + * StQueue points to the first Timer that run out. + */ + + for ( pTComp = pAC->Tim.StQueue; pTComp; pTComp = pTComp->TmNext) { + SkEventQueue(pAC,pTComp->TmClass, pTComp->TmEvent, pTComp->TmPara); + } + + /* Set head of timer queue to the first timer that did not run out */ + pAC->Tim.StQueue = pTm; + + if (Restart && pAC->Tim.StQueue) { + /* Restart HW timer */ + SkHwtStart(pAC, Ioc, pAC->Tim.StQueue->TmDelta); + } +} + +/* End of file */ diff --git a/drivers/net/sk98lin/skvpd.c b/drivers/net/sk98lin/skvpd.c new file mode 100644 index 000000000000..1e662aaebf84 --- /dev/null +++ b/drivers/net/sk98lin/skvpd.c @@ -0,0 +1,1091 @@ +/****************************************************************************** + * + * Name: skvpd.c + * Project: GEnesis, PCI Gigabit Ethernet Adapter + * Version: $Revision: 1.37 $ + * Date: $Date: 2003/01/13 10:42:45 $ + * Purpose: Shared software to read and write VPD data + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2003 SysKonnect GmbH. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +/* + Please refer skvpd.txt for information how to include this module + */ +static const char SysKonnectFileId[] = + "@(#)$Id: skvpd.c,v 1.37 2003/01/13 10:42:45 rschmidt Exp $ (C) SK"; + +#include "h/skdrv1st.h" +#include "h/sktypes.h" +#include "h/skdebug.h" +#include "h/skdrv2nd.h" + +/* + * Static functions + */ +#ifndef SK_KR_PROTO +static SK_VPD_PARA *vpd_find_para( + SK_AC *pAC, + const char *key, + SK_VPD_PARA *p); +#else /* SK_KR_PROTO */ +static SK_VPD_PARA *vpd_find_para(); +#endif /* SK_KR_PROTO */ + +/* + * waits for a completion of a VPD transfer + * The VPD transfer must complete within SK_TICKS_PER_SEC/16 + * + * returns 0: success, transfer completes + * error exit(9) with a error message + */ +static int VpdWait( +SK_AC *pAC, /* Adapters context */ +SK_IOC IoC, /* IO Context */ +int event) /* event to wait for (VPD_READ / VPD_write) completion*/ +{ + SK_U64 start_time; + SK_U16 state; + + SK_DBG_MSG(pAC,SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("VPD wait for %s\n", event?"Write":"Read")); + start_time = SkOsGetTime(pAC); + do { + if (SkOsGetTime(pAC) - start_time > SK_TICKS_PER_SEC) { + + /* Bug fix AF: Thu Mar 28 2002 + * Do not call: VPD_STOP(pAC, IoC); + * A pending VPD read cycle can not be aborted by writing + * VPD_WRITE to the PCI_VPD_ADR_REG (VPD address register). + * Although the write threshold in the OUR-register protects + * VPD read only space from being overwritten this does not + * protect a VPD read from being `converted` into a VPD write + * operation (on the fly). As a consequence the VPD_STOP would + * delete VPD read only data. In case of any problems with the + * I2C bus we exit the loop here. The I2C read operation can + * not be aborted except by a reset (->LR). + */ + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_FATAL | SK_DBGCAT_ERR, + ("ERROR:VPD wait timeout\n")); + return(1); + } + + VPD_IN16(pAC, IoC, PCI_VPD_ADR_REG, &state); + + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("state = %x, event %x\n",state,event)); + } while((int)(state & PCI_VPD_FLAG) == event); + + return(0); +} + +#ifdef SKDIAG + +/* + * Read the dword at address 'addr' from the VPD EEPROM. + * + * Needed Time: MIN 1,3 ms MAX 2,6 ms + * + * Note: The DWord is returned in the endianess of the machine the routine + * is running on. + * + * Returns the data read. + */ +SK_U32 VpdReadDWord( +SK_AC *pAC, /* Adapters context */ +SK_IOC IoC, /* IO Context */ +int addr) /* VPD address */ +{ + SK_U32 Rtv; + + /* start VPD read */ + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("VPD read dword at 0x%x\n",addr)); + addr &= ~VPD_WRITE; /* ensure the R/W bit is set to read */ + + VPD_OUT16(pAC, IoC, PCI_VPD_ADR_REG, (SK_U16)addr); + + /* ignore return code here */ + (void)VpdWait(pAC, IoC, VPD_READ); + + /* Don't swap here, it's a data stream of bytes */ + Rtv = 0; + + VPD_IN32(pAC, IoC, PCI_VPD_DAT_REG, &Rtv); + + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("VPD read dword data = 0x%x\n",Rtv)); + return(Rtv); +} + +#endif /* SKDIAG */ + +/* + * Read one Stream of 'len' bytes of VPD data, starting at 'addr' from + * or to the I2C EEPROM. + * + * Returns number of bytes read / written. + */ +static int VpdWriteStream( +SK_AC *pAC, /* Adapters context */ +SK_IOC IoC, /* IO Context */ +char *buf, /* data buffer */ +int Addr, /* VPD start address */ +int Len) /* number of bytes to read / to write */ +{ + int i; + int j; + SK_U16 AdrReg; + int Rtv; + SK_U8 * pComp; /* Compare pointer */ + SK_U8 Data; /* Input Data for Compare */ + + /* Init Compare Pointer */ + pComp = (SK_U8 *) buf; + + for (i = 0; i < Len; i++, buf++) { + if ((i%sizeof(SK_U32)) == 0) { + /* + * At the begin of each cycle read the Data Reg + * So it is initialized even if only a few bytes + * are written. + */ + AdrReg = (SK_U16) Addr; + AdrReg &= ~VPD_WRITE; /* READ operation */ + + VPD_OUT16(pAC, IoC, PCI_VPD_ADR_REG, AdrReg); + + /* Wait for termination */ + Rtv = VpdWait(pAC, IoC, VPD_READ); + if (Rtv != 0) { + return(i); + } + } + + /* Write current Byte */ + VPD_OUT8(pAC, IoC, PCI_VPD_DAT_REG + (i%sizeof(SK_U32)), + *(SK_U8*)buf); + + if (((i%sizeof(SK_U32)) == 3) || (i == (Len - 1))) { + /* New Address needs to be written to VPD_ADDR reg */ + AdrReg = (SK_U16) Addr; + Addr += sizeof(SK_U32); + AdrReg |= VPD_WRITE; /* WRITE operation */ + + VPD_OUT16(pAC, IoC, PCI_VPD_ADR_REG, AdrReg); + + /* Wait for termination */ + Rtv = VpdWait(pAC, IoC, VPD_WRITE); + if (Rtv != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("Write Timed Out\n")); + return(i - (i%sizeof(SK_U32))); + } + + /* + * Now re-read to verify + */ + AdrReg &= ~VPD_WRITE; /* READ operation */ + + VPD_OUT16(pAC, IoC, PCI_VPD_ADR_REG, AdrReg); + + /* Wait for termination */ + Rtv = VpdWait(pAC, IoC, VPD_READ); + if (Rtv != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("Verify Timed Out\n")); + return(i - (i%sizeof(SK_U32))); + } + + for (j = 0; j <= (int)(i%sizeof(SK_U32)); j++, pComp++) { + + VPD_IN8(pAC, IoC, PCI_VPD_DAT_REG + j, &Data); + + if (Data != *pComp) { + /* Verify Error */ + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("WriteStream Verify Error\n")); + return(i - (i%sizeof(SK_U32)) + j); + } + } + } + } + + return(Len); +} + + +/* + * Read one Stream of 'len' bytes of VPD data, starting at 'addr' from + * or to the I2C EEPROM. + * + * Returns number of bytes read / written. + */ +static int VpdReadStream( +SK_AC *pAC, /* Adapters context */ +SK_IOC IoC, /* IO Context */ +char *buf, /* data buffer */ +int Addr, /* VPD start address */ +int Len) /* number of bytes to read / to write */ +{ + int i; + SK_U16 AdrReg; + int Rtv; + + for (i = 0; i < Len; i++, buf++) { + if ((i%sizeof(SK_U32)) == 0) { + /* New Address needs to be written to VPD_ADDR reg */ + AdrReg = (SK_U16) Addr; + Addr += sizeof(SK_U32); + AdrReg &= ~VPD_WRITE; /* READ operation */ + + VPD_OUT16(pAC, IoC, PCI_VPD_ADR_REG, AdrReg); + + /* Wait for termination */ + Rtv = VpdWait(pAC, IoC, VPD_READ); + if (Rtv != 0) { + return(i); + } + } + VPD_IN8(pAC, IoC, PCI_VPD_DAT_REG + (i%sizeof(SK_U32)), + (SK_U8 *)buf); + } + + return(Len); +} + +/* + * Read ore writes 'len' bytes of VPD data, starting at 'addr' from + * or to the I2C EEPROM. + * + * Returns number of bytes read / written. + */ +static int VpdTransferBlock( +SK_AC *pAC, /* Adapters context */ +SK_IOC IoC, /* IO Context */ +char *buf, /* data buffer */ +int addr, /* VPD start address */ +int len, /* number of bytes to read / to write */ +int dir) /* transfer direction may be VPD_READ or VPD_WRITE */ +{ + int Rtv; /* Return value */ + int vpd_rom_size; + + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("VPD %s block, addr = 0x%x, len = %d\n", + dir ? "write" : "read", addr, len)); + + if (len == 0) + return(0); + + vpd_rom_size = pAC->vpd.rom_size; + + if (addr > vpd_rom_size - 4) { + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL, + ("Address error: 0x%x, exp. < 0x%x\n", + addr, vpd_rom_size - 4)); + return(0); + } + + if (addr + len > vpd_rom_size) { + len = vpd_rom_size - addr; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("Warning: len was cut to %d\n", len)); + } + + if (dir == VPD_READ) { + Rtv = VpdReadStream(pAC, IoC, buf, addr, len); + } + else { + Rtv = VpdWriteStream(pAC, IoC, buf, addr, len); + } + + return(Rtv); +} + +#ifdef SKDIAG + +/* + * Read 'len' bytes of VPD data, starting at 'addr'. + * + * Returns number of bytes read. + */ +int VpdReadBlock( +SK_AC *pAC, /* pAC pointer */ +SK_IOC IoC, /* IO Context */ +char *buf, /* buffer were the data should be stored */ +int addr, /* start reading at the VPD address */ +int len) /* number of bytes to read */ +{ + return(VpdTransferBlock(pAC, IoC, buf, addr, len, VPD_READ)); +} + +/* + * Write 'len' bytes of *but to the VPD EEPROM, starting at 'addr'. + * + * Returns number of bytes writes. + */ +int VpdWriteBlock( +SK_AC *pAC, /* pAC pointer */ +SK_IOC IoC, /* IO Context */ +char *buf, /* buffer, holds the data to write */ +int addr, /* start writing at the VPD address */ +int len) /* number of bytes to write */ +{ + return(VpdTransferBlock(pAC, IoC, buf, addr, len, VPD_WRITE)); +} +#endif /* SKDIAG */ + +/* + * (re)initialize the VPD buffer + * + * Reads the VPD data from the EEPROM into the VPD buffer. + * Get the remaining read only and read / write space. + * + * return 0: success + * 1: fatal VPD error + */ +static int VpdInit( +SK_AC *pAC, /* Adapters context */ +SK_IOC IoC) /* IO Context */ +{ + SK_VPD_PARA *r, rp; /* RW or RV */ + int i; + unsigned char x; + int vpd_size; + SK_U16 dev_id; + SK_U32 our_reg2; + + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_INIT, ("VpdInit .. ")); + + VPD_IN16(pAC, IoC, PCI_DEVICE_ID, &dev_id); + + VPD_IN32(pAC, IoC, PCI_OUR_REG_2, &our_reg2); + + pAC->vpd.rom_size = 256 << ((our_reg2 & PCI_VPD_ROM_SZ) >> 14); + + /* + * this function might get used before the hardware is initialized + * therefore we cannot always trust in GIChipId + */ + if (((pAC->vpd.v.vpd_status & VPD_VALID) == 0 && + dev_id != VPD_DEV_ID_GENESIS) || + ((pAC->vpd.v.vpd_status & VPD_VALID) != 0 && + !pAC->GIni.GIGenesis)) { + + /* for Yukon the VPD size is always 256 */ + vpd_size = VPD_SIZE_YUKON; + } + else { + /* Genesis uses the maximum ROM size up to 512 for VPD */ + if (pAC->vpd.rom_size > VPD_SIZE_GENESIS) { + vpd_size = VPD_SIZE_GENESIS; + } + else { + vpd_size = pAC->vpd.rom_size; + } + } + + /* read the VPD data into the VPD buffer */ + if (VpdTransferBlock(pAC, IoC, pAC->vpd.vpd_buf, 0, vpd_size, VPD_READ) + != vpd_size) { + + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("Block Read Error\n")); + return(1); + } + + pAC->vpd.vpd_size = vpd_size; + + /* Asus K8V Se Deluxe bugfix. Correct VPD content */ + /* MBo April 2004 */ + if (((unsigned char)pAC->vpd.vpd_buf[0x3f] == 0x38) && + ((unsigned char)pAC->vpd.vpd_buf[0x40] == 0x3c) && + ((unsigned char)pAC->vpd.vpd_buf[0x41] == 0x45)) { + printk("sk98lin: Asus mainboard with buggy VPD? " + "Correcting data.\n"); + pAC->vpd.vpd_buf[0x40] = 0x38; + } + + + /* find the end tag of the RO area */ + if (!(r = vpd_find_para(pAC, VPD_RV, &rp))) { + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL, + ("Encoding Error: RV Tag not found\n")); + return(1); + } + + if (r->p_val + r->p_len > pAC->vpd.vpd_buf + vpd_size/2) { + SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR | SK_DBGCAT_FATAL, + ("Encoding Error: Invalid VPD struct size\n")); + return(1); + } + pAC->vpd.v.vpd_free_ro = r->p_len - 1; + + /* test the checksum */ + for (i = 0, x = 0; (unsigned)i <= (unsigned)vpd_size/2 - r->p_len; i++) { + x += pAC->vpd.vpd_buf[i]; + } + + if (x != 0) { + /* checksum error */ + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL, + ("VPD Checksum Error\n")); + return(1); + } + + /* find and check the end tag of the RW area */ + if (!(r = vpd_find_para(pAC, VPD_RW, &rp))) { + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL, + ("Encoding Error: RV Tag not found\n")); + return(1); + } + + if (r->p_val < pAC->vpd.vpd_buf + vpd_size/2) { + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL, + ("Encoding Error: Invalid VPD struct size\n")); + return(1); + } + pAC->vpd.v.vpd_free_rw = r->p_len; + + /* everything seems to be ok */ + if (pAC->GIni.GIChipId != 0) { + pAC->vpd.v.vpd_status |= VPD_VALID; + } + + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_INIT, + ("done. Free RO = %d, Free RW = %d\n", + pAC->vpd.v.vpd_free_ro, pAC->vpd.v.vpd_free_rw)); + + return(0); +} + +/* + * find the Keyword 'key' in the VPD buffer and fills the + * parameter struct 'p' with it's values + * + * returns *p success + * 0: parameter was not found or VPD encoding error + */ +static SK_VPD_PARA *vpd_find_para( +SK_AC *pAC, /* common data base */ +const char *key, /* keyword to find (e.g. "MN") */ +SK_VPD_PARA *p) /* parameter description struct */ +{ + char *v ; /* points to VPD buffer */ + int max; /* Maximum Number of Iterations */ + + v = pAC->vpd.vpd_buf; + max = 128; + + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("VPD find para %s .. ",key)); + + /* check mandatory resource type ID string (Product Name) */ + if (*v != (char)RES_ID) { + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL, + ("Error: 0x%x missing\n", RES_ID)); + return NULL; + } + + if (strcmp(key, VPD_NAME) == 0) { + p->p_len = VPD_GET_RES_LEN(v); + p->p_val = VPD_GET_VAL(v); + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("found, len = %d\n", p->p_len)); + return(p); + } + + v += 3 + VPD_GET_RES_LEN(v) + 3; + for (;; ) { + if (SK_MEMCMP(key,v,2) == 0) { + p->p_len = VPD_GET_VPD_LEN(v); + p->p_val = VPD_GET_VAL(v); + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("found, len = %d\n",p->p_len)); + return(p); + } + + /* exit when reaching the "RW" Tag or the maximum of itera. */ + max--; + if (SK_MEMCMP(VPD_RW,v,2) == 0 || max == 0) { + break; + } + + if (SK_MEMCMP(VPD_RV,v,2) == 0) { + v += 3 + VPD_GET_VPD_LEN(v) + 3; /* skip VPD-W */ + } + else { + v += 3 + VPD_GET_VPD_LEN(v); + } + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("scanning '%c%c' len = %d\n",v[0],v[1],v[2])); + } + +#ifdef DEBUG + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, ("not found\n")); + if (max == 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL, + ("Key/Len Encoding error\n")); + } +#endif /* DEBUG */ + return NULL; +} + +/* + * Move 'n' bytes. Begin with the last byte if 'n' is > 0, + * Start with the last byte if n is < 0. + * + * returns nothing + */ +static void vpd_move_para( +char *start, /* start of memory block */ +char *end, /* end of memory block to move */ +int n) /* number of bytes the memory block has to be moved */ +{ + char *p; + int i; /* number of byte copied */ + + if (n == 0) + return; + + i = (int) (end - start + 1); + if (n < 0) { + p = start + n; + while (i != 0) { + *p++ = *start++; + i--; + } + } + else { + p = end + n; + while (i != 0) { + *p-- = *end--; + i--; + } + } +} + +/* + * setup the VPD keyword 'key' at 'ip'. + * + * returns nothing + */ +static void vpd_insert_key( +const char *key, /* keyword to insert */ +const char *buf, /* buffer with the keyword value */ +int len, /* length of the value string */ +char *ip) /* inseration point */ +{ + SK_VPD_KEY *p; + + p = (SK_VPD_KEY *) ip; + p->p_key[0] = key[0]; + p->p_key[1] = key[1]; + p->p_len = (unsigned char) len; + SK_MEMCPY(&p->p_val,buf,len); +} + +/* + * Setup the VPD end tag "RV" / "RW". + * Also correct the remaining space variables vpd_free_ro / vpd_free_rw. + * + * returns 0: success + * 1: encoding error + */ +static int vpd_mod_endtag( +SK_AC *pAC, /* common data base */ +char *etp) /* end pointer input position */ +{ + SK_VPD_KEY *p; + unsigned char x; + int i; + int vpd_size; + + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("VPD modify endtag at 0x%x = '%c%c'\n",etp,etp[0],etp[1])); + + vpd_size = pAC->vpd.vpd_size; + + p = (SK_VPD_KEY *) etp; + + if (p->p_key[0] != 'R' || (p->p_key[1] != 'V' && p->p_key[1] != 'W')) { + /* something wrong here, encoding error */ + SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR | SK_DBGCAT_FATAL, + ("Encoding Error: invalid end tag\n")); + return(1); + } + if (etp > pAC->vpd.vpd_buf + vpd_size/2) { + /* create "RW" tag */ + p->p_len = (unsigned char)(pAC->vpd.vpd_buf+vpd_size-etp-3-1); + pAC->vpd.v.vpd_free_rw = (int) p->p_len; + i = pAC->vpd.v.vpd_free_rw; + etp += 3; + } + else { + /* create "RV" tag */ + p->p_len = (unsigned char)(pAC->vpd.vpd_buf+vpd_size/2-etp-3); + pAC->vpd.v.vpd_free_ro = (int) p->p_len - 1; + + /* setup checksum */ + for (i = 0, x = 0; i < vpd_size/2 - p->p_len; i++) { + x += pAC->vpd.vpd_buf[i]; + } + p->p_val = (char) 0 - x; + i = pAC->vpd.v.vpd_free_ro; + etp += 4; + } + while (i) { + *etp++ = 0x00; + i--; + } + + return(0); +} + +/* + * Insert a VPD keyword into the VPD buffer. + * + * The keyword 'key' is inserted at the position 'ip' in the + * VPD buffer. + * The keywords behind the input position will + * be moved. The VPD end tag "RV" or "RW" is generated again. + * + * returns 0: success + * 2: value string was cut + * 4: VPD full, keyword was not written + * 6: fatal VPD error + * + */ +static int VpdSetupPara( +SK_AC *pAC, /* common data base */ +const char *key, /* keyword to insert */ +const char *buf, /* buffer with the keyword value */ +int len, /* length of the keyword value */ +int type, /* VPD_RO_KEY or VPD_RW_KEY */ +int op) /* operation to do: ADD_KEY or OWR_KEY */ +{ + SK_VPD_PARA vp; + char *etp; /* end tag position */ + int free; /* remaining space in selected area */ + char *ip; /* input position inside the VPD buffer */ + int rtv; /* return code */ + int head; /* additional haeder bytes to move */ + int found; /* additinoal bytes if the keyword was found */ + int vpd_size; + + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("VPD setup para key = %s, val = %s\n",key,buf)); + + vpd_size = pAC->vpd.vpd_size; + + rtv = 0; + ip = NULL; + if (type == VPD_RW_KEY) { + /* end tag is "RW" */ + free = pAC->vpd.v.vpd_free_rw; + etp = pAC->vpd.vpd_buf + (vpd_size - free - 1 - 3); + } + else { + /* end tag is "RV" */ + free = pAC->vpd.v.vpd_free_ro; + etp = pAC->vpd.vpd_buf + (vpd_size/2 - free - 4); + } + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("Free RO = %d, Free RW = %d\n", + pAC->vpd.v.vpd_free_ro, pAC->vpd.v.vpd_free_rw)); + + head = 0; + found = 0; + if (op == OWR_KEY) { + if (vpd_find_para(pAC, key, &vp)) { + found = 3; + ip = vp.p_val - 3; + free += vp.p_len + 3; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("Overwrite Key\n")); + } + else { + op = ADD_KEY; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("Add Key\n")); + } + } + if (op == ADD_KEY) { + ip = etp; + vp.p_len = 0; + head = 3; + } + + if (len + 3 > free) { + if (free < 7) { + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("VPD Buffer Overflow, keyword not written\n")); + return(4); + } + /* cut it again */ + len = free - 3; + rtv = 2; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("VPD Buffer Full, Keyword was cut\n")); + } + + vpd_move_para(ip + vp.p_len + found, etp+2, len-vp.p_len+head); + vpd_insert_key(key, buf, len, ip); + if (vpd_mod_endtag(pAC, etp + len - vp.p_len + head)) { + pAC->vpd.v.vpd_status &= ~VPD_VALID; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("VPD Encoding Error\n")); + return(6); + } + + return(rtv); +} + + +/* + * Read the contents of the VPD EEPROM and copy it to the + * VPD buffer if not already done. + * + * return: A pointer to the vpd_status structure. The structure contains + * this fields. + */ +SK_VPD_STATUS *VpdStat( +SK_AC *pAC, /* Adapters context */ +SK_IOC IoC) /* IO Context */ +{ + if ((pAC->vpd.v.vpd_status & VPD_VALID) == 0) { + (void)VpdInit(pAC, IoC); + } + return(&pAC->vpd.v); +} + + +/* + * Read the contents of the VPD EEPROM and copy it to the VPD + * buffer if not already done. + * Scan the VPD buffer for VPD keywords and create the VPD + * keyword list by copying the keywords to 'buf', all after + * each other and terminated with a '\0'. + * + * Exceptions: o The Resource Type ID String (product name) is called "Name" + * o The VPD end tags 'RV' and 'RW' are not listed + * + * The number of copied keywords is counted in 'elements'. + * + * returns 0: success + * 2: buffer overfull, one or more keywords are missing + * 6: fatal VPD error + * + * example values after returning: + * + * buf = "Name\0PN\0EC\0MN\0SN\0CP\0VF\0VL\0YA\0" + * *len = 30 + * *elements = 9 + */ +int VpdKeys( +SK_AC *pAC, /* common data base */ +SK_IOC IoC, /* IO Context */ +char *buf, /* buffer where to copy the keywords */ +int *len, /* buffer length */ +int *elements) /* number of keywords returned */ +{ + char *v; + int n; + + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_RX, ("list VPD keys .. ")); + *elements = 0; + if ((pAC->vpd.v.vpd_status & VPD_VALID) == 0) { + if (VpdInit(pAC, IoC) != 0) { + *len = 0; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("VPD Init Error, terminated\n")); + return(6); + } + } + + if ((signed)strlen(VPD_NAME) + 1 <= *len) { + v = pAC->vpd.vpd_buf; + strcpy(buf,VPD_NAME); + n = strlen(VPD_NAME) + 1; + buf += n; + *elements = 1; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_RX, + ("'%c%c' ",v[0],v[1])); + } + else { + *len = 0; + SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR, + ("buffer overflow\n")); + return(2); + } + + v += 3 + VPD_GET_RES_LEN(v) + 3; + for (;; ) { + /* exit when reaching the "RW" Tag */ + if (SK_MEMCMP(VPD_RW,v,2) == 0) { + break; + } + + if (SK_MEMCMP(VPD_RV,v,2) == 0) { + v += 3 + VPD_GET_VPD_LEN(v) + 3; /* skip VPD-W */ + continue; + } + + if (n+3 <= *len) { + SK_MEMCPY(buf,v,2); + buf += 2; + *buf++ = '\0'; + n += 3; + v += 3 + VPD_GET_VPD_LEN(v); + *elements += 1; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_RX, + ("'%c%c' ",v[0],v[1])); + } + else { + *len = n; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("buffer overflow\n")); + return(2); + } + } + + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_RX, ("\n")); + *len = n; + return(0); +} + + +/* + * Read the contents of the VPD EEPROM and copy it to the + * VPD buffer if not already done. Search for the VPD keyword + * 'key' and copy its value to 'buf'. Add a terminating '\0'. + * If the value does not fit into the buffer cut it after + * 'len' - 1 bytes. + * + * returns 0: success + * 1: keyword not found + * 2: value string was cut + * 3: VPD transfer timeout + * 6: fatal VPD error + */ +int VpdRead( +SK_AC *pAC, /* common data base */ +SK_IOC IoC, /* IO Context */ +const char *key, /* keyword to read (e.g. "MN") */ +char *buf, /* buffer where to copy the keyword value */ +int *len) /* buffer length */ +{ + SK_VPD_PARA *p, vp; + + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_RX, ("VPD read %s .. ", key)); + if ((pAC->vpd.v.vpd_status & VPD_VALID) == 0) { + if (VpdInit(pAC, IoC) != 0) { + *len = 0; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("VPD init error\n")); + return(6); + } + } + + if ((p = vpd_find_para(pAC, key, &vp)) != NULL) { + if (p->p_len > (*(unsigned *)len)-1) { + p->p_len = *len - 1; + } + SK_MEMCPY(buf, p->p_val, p->p_len); + buf[p->p_len] = '\0'; + *len = p->p_len; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_RX, + ("%c%c%c%c.., len = %d\n", + buf[0],buf[1],buf[2],buf[3],*len)); + } + else { + *len = 0; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, ("not found\n")); + return(1); + } + return(0); +} + + +/* + * Check whether a given key may be written + * + * returns + * SK_TRUE Yes it may be written + * SK_FALSE No it may be written + */ +SK_BOOL VpdMayWrite( +char *key) /* keyword to write (allowed values "Yx", "Vx") */ +{ + if ((*key != 'Y' && *key != 'V') || + key[1] < '0' || key[1] > 'Z' || + (key[1] > '9' && key[1] < 'A') || strlen(key) != 2) { + + return(SK_FALSE); + } + return(SK_TRUE); +} + +/* + * Read the contents of the VPD EEPROM and copy it to the VPD + * buffer if not already done. Insert/overwrite the keyword 'key' + * in the VPD buffer. Cut the keyword value if it does not fit + * into the VPD read / write area. + * + * returns 0: success + * 2: value string was cut + * 3: VPD transfer timeout + * 4: VPD full, keyword was not written + * 5: keyword cannot be written + * 6: fatal VPD error + */ +int VpdWrite( +SK_AC *pAC, /* common data base */ +SK_IOC IoC, /* IO Context */ +const char *key, /* keyword to write (allowed values "Yx", "Vx") */ +const char *buf) /* buffer where the keyword value can be read from */ +{ + int len; /* length of the keyword to write */ + int rtv; /* return code */ + int rtv2; + + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, + ("VPD write %s = %s\n",key,buf)); + + if ((*key != 'Y' && *key != 'V') || + key[1] < '0' || key[1] > 'Z' || + (key[1] > '9' && key[1] < 'A') || strlen(key) != 2) { + + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("illegal key tag, keyword not written\n")); + return(5); + } + + if ((pAC->vpd.v.vpd_status & VPD_VALID) == 0) { + if (VpdInit(pAC, IoC) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("VPD init error\n")); + return(6); + } + } + + rtv = 0; + len = strlen(buf); + if (len > VPD_MAX_LEN) { + /* cut it */ + len = VPD_MAX_LEN; + rtv = 2; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("keyword too long, cut after %d bytes\n",VPD_MAX_LEN)); + } + if ((rtv2 = VpdSetupPara(pAC, key, buf, len, VPD_RW_KEY, OWR_KEY)) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("VPD write error\n")); + return(rtv2); + } + + return(rtv); +} + +/* + * Read the contents of the VPD EEPROM and copy it to the + * VPD buffer if not already done. Remove the VPD keyword + * 'key' from the VPD buffer. + * Only the keywords in the read/write area can be deleted. + * Keywords in the read only area cannot be deleted. + * + * returns 0: success, keyword was removed + * 1: keyword not found + * 5: keyword cannot be deleted + * 6: fatal VPD error + */ +int VpdDelete( +SK_AC *pAC, /* common data base */ +SK_IOC IoC, /* IO Context */ +char *key) /* keyword to read (e.g. "MN") */ +{ + SK_VPD_PARA *p, vp; + char *etp; + int vpd_size; + + vpd_size = pAC->vpd.vpd_size; + + SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_TX,("VPD delete key %s\n",key)); + if ((pAC->vpd.v.vpd_status & VPD_VALID) == 0) { + if (VpdInit(pAC, IoC) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("VPD init error\n")); + return(6); + } + } + + if ((p = vpd_find_para(pAC, key, &vp)) != NULL) { + if (p->p_val < pAC->vpd.vpd_buf + vpd_size/2) { + /* try to delete read only keyword */ + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("cannot delete RO keyword\n")); + return(5); + } + + etp = pAC->vpd.vpd_buf + (vpd_size-pAC->vpd.v.vpd_free_rw-1-3); + + vpd_move_para(vp.p_val+vp.p_len, etp+2, + - ((int)(vp.p_len + 3))); + if (vpd_mod_endtag(pAC, etp - vp.p_len - 3)) { + pAC->vpd.v.vpd_status &= ~VPD_VALID; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("VPD encoding error\n")); + return(6); + } + } + else { + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("keyword not found\n")); + return(1); + } + + return(0); +} + +/* + * If the VPD buffer contains valid data write the VPD + * read/write area back to the VPD EEPROM. + * + * returns 0: success + * 3: VPD transfer timeout + */ +int VpdUpdate( +SK_AC *pAC, /* Adapters context */ +SK_IOC IoC) /* IO Context */ +{ + int vpd_size; + + vpd_size = pAC->vpd.vpd_size; + + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, ("VPD update .. ")); + if ((pAC->vpd.v.vpd_status & VPD_VALID) != 0) { + if (VpdTransferBlock(pAC, IoC, pAC->vpd.vpd_buf + vpd_size/2, + vpd_size/2, vpd_size/2, VPD_WRITE) != vpd_size/2) { + + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("transfer timed out\n")); + return(3); + } + } + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, ("done\n")); + return(0); +} + diff --git a/drivers/net/sk98lin/skxmac2.c b/drivers/net/sk98lin/skxmac2.c new file mode 100644 index 000000000000..b4e75022a657 --- /dev/null +++ b/drivers/net/sk98lin/skxmac2.c @@ -0,0 +1,4160 @@ +/****************************************************************************** + * + * Name: skxmac2.c + * Project: Gigabit Ethernet Adapters, Common Modules + * Version: $Revision: 1.102 $ + * Date: $Date: 2003/10/02 16:53:58 $ + * Purpose: Contains functions to initialize the MACs and PHYs + * + ******************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 1998-2002 SysKonnect. + * (C)Copyright 2002-2003 Marvell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +#include "h/skdrv1st.h" +#include "h/skdrv2nd.h" + +/* typedefs *******************************************************************/ + +/* BCOM PHY magic pattern list */ +typedef struct s_PhyHack { + int PhyReg; /* Phy register */ + SK_U16 PhyVal; /* Value to write */ +} BCOM_HACK; + +/* local variables ************************************************************/ + +#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM)))) +static const char SysKonnectFileId[] = + "@(#) $Id: skxmac2.c,v 1.102 2003/10/02 16:53:58 rschmidt Exp $ (C) Marvell."; +#endif + +#ifdef GENESIS +static BCOM_HACK BcomRegA1Hack[] = { + { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 }, + { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 }, + { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, + { 0, 0 } +}; +static BCOM_HACK BcomRegC0Hack[] = { + { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 }, { 0x17, 0x0013 }, + { 0x15, 0x0A04 }, { 0x18, 0x0420 }, + { 0, 0 } +}; +#endif + +/* function prototypes ********************************************************/ +#ifdef GENESIS +static void SkXmInitPhyXmac(SK_AC*, SK_IOC, int, SK_BOOL); +static void SkXmInitPhyBcom(SK_AC*, SK_IOC, int, SK_BOOL); +static int SkXmAutoNegDoneXmac(SK_AC*, SK_IOC, int); +static int SkXmAutoNegDoneBcom(SK_AC*, SK_IOC, int); +#endif /* GENESIS */ +#ifdef YUKON +static void SkGmInitPhyMarv(SK_AC*, SK_IOC, int, SK_BOOL); +static int SkGmAutoNegDoneMarv(SK_AC*, SK_IOC, int); +#endif /* YUKON */ +#ifdef OTHER_PHY +static void SkXmInitPhyLone(SK_AC*, SK_IOC, int, SK_BOOL); +static void SkXmInitPhyNat (SK_AC*, SK_IOC, int, SK_BOOL); +static int SkXmAutoNegDoneLone(SK_AC*, SK_IOC, int); +static int SkXmAutoNegDoneNat (SK_AC*, SK_IOC, int); +#endif /* OTHER_PHY */ + + +#ifdef GENESIS +/****************************************************************************** + * + * SkXmPhyRead() - Read from XMAC PHY register + * + * Description: reads a 16-bit word from XMAC PHY or ext. PHY + * + * Returns: + * nothing + */ +void SkXmPhyRead( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +int Port, /* Port Index (MAC_1 + n) */ +int PhyReg, /* Register Address (Offset) */ +SK_U16 SK_FAR *pVal) /* Pointer to Value */ +{ + SK_U16 Mmu; + SK_GEPORT *pPrt; + + pPrt = &pAC->GIni.GP[Port]; + + /* write the PHY register's address */ + XM_OUT16(IoC, Port, XM_PHY_ADDR, PhyReg | pPrt->PhyAddr); + + /* get the PHY register's value */ + XM_IN16(IoC, Port, XM_PHY_DATA, pVal); + + if (pPrt->PhyType != SK_PHY_XMAC) { + do { + XM_IN16(IoC, Port, XM_MMU_CMD, &Mmu); + /* wait until 'Ready' is set */ + } while ((Mmu & XM_MMU_PHY_RDY) == 0); + + /* get the PHY register's value */ + XM_IN16(IoC, Port, XM_PHY_DATA, pVal); + } +} /* SkXmPhyRead */ + + +/****************************************************************************** + * + * SkXmPhyWrite() - Write to XMAC PHY register + * + * Description: writes a 16-bit word to XMAC PHY or ext. PHY + * + * Returns: + * nothing + */ +void SkXmPhyWrite( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +int Port, /* Port Index (MAC_1 + n) */ +int PhyReg, /* Register Address (Offset) */ +SK_U16 Val) /* Value */ +{ + SK_U16 Mmu; + SK_GEPORT *pPrt; + + pPrt = &pAC->GIni.GP[Port]; + + if (pPrt->PhyType != SK_PHY_XMAC) { + do { + XM_IN16(IoC, Port, XM_MMU_CMD, &Mmu); + /* wait until 'Busy' is cleared */ + } while ((Mmu & XM_MMU_PHY_BUSY) != 0); + } + + /* write the PHY register's address */ + XM_OUT16(IoC, Port, XM_PHY_ADDR, PhyReg | pPrt->PhyAddr); + + /* write the PHY register's value */ + XM_OUT16(IoC, Port, XM_PHY_DATA, Val); + + if (pPrt->PhyType != SK_PHY_XMAC) { + do { + XM_IN16(IoC, Port, XM_MMU_CMD, &Mmu); + /* wait until 'Busy' is cleared */ + } while ((Mmu & XM_MMU_PHY_BUSY) != 0); + } +} /* SkXmPhyWrite */ +#endif /* GENESIS */ + + +#ifdef YUKON +/****************************************************************************** + * + * SkGmPhyRead() - Read from GPHY register + * + * Description: reads a 16-bit word from GPHY through MDIO + * + * Returns: + * nothing + */ +void SkGmPhyRead( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +int Port, /* Port Index (MAC_1 + n) */ +int PhyReg, /* Register Address (Offset) */ +SK_U16 SK_FAR *pVal) /* Pointer to Value */ +{ + SK_U16 Ctrl; + SK_GEPORT *pPrt; +#ifdef VCPU + u_long SimCyle; + u_long SimLowTime; + + VCPUgetTime(&SimCyle, &SimLowTime); + VCPUprintf(0, "SkGmPhyRead(%u), SimCyle=%u, SimLowTime=%u\n", + PhyReg, SimCyle, SimLowTime); +#endif /* VCPU */ + + pPrt = &pAC->GIni.GP[Port]; + + /* set PHY-Register offset and 'Read' OpCode (= 1) */ + *pVal = (SK_U16)(GM_SMI_CT_PHY_AD(pPrt->PhyAddr) | + GM_SMI_CT_REG_AD(PhyReg) | GM_SMI_CT_OP_RD); + + GM_OUT16(IoC, Port, GM_SMI_CTRL, *pVal); + + GM_IN16(IoC, Port, GM_SMI_CTRL, &Ctrl); + + /* additional check for MDC/MDIO activity */ + if ((Ctrl & GM_SMI_CT_BUSY) == 0) { + *pVal = 0; + return; + } + + *pVal |= GM_SMI_CT_BUSY; + + do { +#ifdef VCPU + VCPUwaitTime(1000); +#endif /* VCPU */ + + GM_IN16(IoC, Port, GM_SMI_CTRL, &Ctrl); + + /* wait until 'ReadValid' is set */ + } while (Ctrl == *pVal); + + /* get the PHY register's value */ + GM_IN16(IoC, Port, GM_SMI_DATA, pVal); + +#ifdef VCPU + VCPUgetTime(&SimCyle, &SimLowTime); + VCPUprintf(0, "VCPUgetTime(), SimCyle=%u, SimLowTime=%u\n", + SimCyle, SimLowTime); +#endif /* VCPU */ + +} /* SkGmPhyRead */ + + +/****************************************************************************** + * + * SkGmPhyWrite() - Write to GPHY register + * + * Description: writes a 16-bit word to GPHY through MDIO + * + * Returns: + * nothing + */ +void SkGmPhyWrite( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +int Port, /* Port Index (MAC_1 + n) */ +int PhyReg, /* Register Address (Offset) */ +SK_U16 Val) /* Value */ +{ + SK_U16 Ctrl; + SK_GEPORT *pPrt; +#ifdef VCPU + SK_U32 DWord; + u_long SimCyle; + u_long SimLowTime; + + VCPUgetTime(&SimCyle, &SimLowTime); + VCPUprintf(0, "SkGmPhyWrite(Reg=%u, Val=0x%04x), SimCyle=%u, SimLowTime=%u\n", + PhyReg, Val, SimCyle, SimLowTime); +#endif /* VCPU */ + + pPrt = &pAC->GIni.GP[Port]; + + /* write the PHY register's value */ + GM_OUT16(IoC, Port, GM_SMI_DATA, Val); + + /* set PHY-Register offset and 'Write' OpCode (= 0) */ + Val = GM_SMI_CT_PHY_AD(pPrt->PhyAddr) | GM_SMI_CT_REG_AD(PhyReg); + + GM_OUT16(IoC, Port, GM_SMI_CTRL, Val); + + GM_IN16(IoC, Port, GM_SMI_CTRL, &Ctrl); + + /* additional check for MDC/MDIO activity */ + if ((Ctrl & GM_SMI_CT_BUSY) == 0) { + return; + } + + Val |= GM_SMI_CT_BUSY; + + do { +#ifdef VCPU + /* read Timer value */ + SK_IN32(IoC, B2_TI_VAL, &DWord); + + VCPUwaitTime(1000); +#endif /* VCPU */ + + GM_IN16(IoC, Port, GM_SMI_CTRL, &Ctrl); + + /* wait until 'Busy' is cleared */ + } while (Ctrl == Val); + +#ifdef VCPU + VCPUgetTime(&SimCyle, &SimLowTime); + VCPUprintf(0, "VCPUgetTime(), SimCyle=%u, SimLowTime=%u\n", + SimCyle, SimLowTime); +#endif /* VCPU */ + +} /* SkGmPhyWrite */ +#endif /* YUKON */ + + +#ifdef SK_DIAG +/****************************************************************************** + * + * SkGePhyRead() - Read from PHY register + * + * Description: calls a read PHY routine dep. on board type + * + * Returns: + * nothing + */ +void SkGePhyRead( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +int Port, /* Port Index (MAC_1 + n) */ +int PhyReg, /* Register Address (Offset) */ +SK_U16 *pVal) /* Pointer to Value */ +{ + void (*r_func)(SK_AC *pAC, SK_IOC IoC, int Port, int Reg, SK_U16 *pVal); + + if (pAC->GIni.GIGenesis) { + r_func = SkXmPhyRead; + } + else { + r_func = SkGmPhyRead; + } + + r_func(pAC, IoC, Port, PhyReg, pVal); +} /* SkGePhyRead */ + + +/****************************************************************************** + * + * SkGePhyWrite() - Write to PHY register + * + * Description: calls a write PHY routine dep. on board type + * + * Returns: + * nothing + */ +void SkGePhyWrite( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +int Port, /* Port Index (MAC_1 + n) */ +int PhyReg, /* Register Address (Offset) */ +SK_U16 Val) /* Value */ +{ + void (*w_func)(SK_AC *pAC, SK_IOC IoC, int Port, int Reg, SK_U16 Val); + + if (pAC->GIni.GIGenesis) { + w_func = SkXmPhyWrite; + } + else { + w_func = SkGmPhyWrite; + } + + w_func(pAC, IoC, Port, PhyReg, Val); +} /* SkGePhyWrite */ +#endif /* SK_DIAG */ + + +/****************************************************************************** + * + * SkMacPromiscMode() - Enable / Disable Promiscuous Mode + * + * Description: + * enables / disables promiscuous mode by setting Mode Register (XMAC) or + * Receive Control Register (GMAC) dep. on board type + * + * Returns: + * nothing + */ +void SkMacPromiscMode( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +SK_BOOL Enable) /* Enable / Disable */ +{ +#ifdef YUKON + SK_U16 RcReg; +#endif +#ifdef GENESIS + SK_U32 MdReg; +#endif + +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + + XM_IN32(IoC, Port, XM_MODE, &MdReg); + /* enable or disable promiscuous mode */ + if (Enable) { + MdReg |= XM_MD_ENA_PROM; + } + else { + MdReg &= ~XM_MD_ENA_PROM; + } + /* setup Mode Register */ + XM_OUT32(IoC, Port, XM_MODE, MdReg); + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + + GM_IN16(IoC, Port, GM_RX_CTRL, &RcReg); + + /* enable or disable unicast and multicast filtering */ + if (Enable) { + RcReg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); + } + else { + RcReg |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); + } + /* setup Receive Control Register */ + GM_OUT16(IoC, Port, GM_RX_CTRL, RcReg); + } +#endif /* YUKON */ + +} /* SkMacPromiscMode*/ + + +/****************************************************************************** + * + * SkMacHashing() - Enable / Disable Hashing + * + * Description: + * enables / disables hashing by setting Mode Register (XMAC) or + * Receive Control Register (GMAC) dep. on board type + * + * Returns: + * nothing + */ +void SkMacHashing( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +SK_BOOL Enable) /* Enable / Disable */ +{ +#ifdef YUKON + SK_U16 RcReg; +#endif +#ifdef GENESIS + SK_U32 MdReg; +#endif + +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + + XM_IN32(IoC, Port, XM_MODE, &MdReg); + /* enable or disable hashing */ + if (Enable) { + MdReg |= XM_MD_ENA_HASH; + } + else { + MdReg &= ~XM_MD_ENA_HASH; + } + /* setup Mode Register */ + XM_OUT32(IoC, Port, XM_MODE, MdReg); + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + + GM_IN16(IoC, Port, GM_RX_CTRL, &RcReg); + + /* enable or disable multicast filtering */ + if (Enable) { + RcReg |= GM_RXCR_MCF_ENA; + } + else { + RcReg &= ~GM_RXCR_MCF_ENA; + } + /* setup Receive Control Register */ + GM_OUT16(IoC, Port, GM_RX_CTRL, RcReg); + } +#endif /* YUKON */ + +} /* SkMacHashing*/ + + +#ifdef SK_DIAG +/****************************************************************************** + * + * SkXmSetRxCmd() - Modify the value of the XMAC's Rx Command Register + * + * Description: + * The features + * - FCS stripping, SK_STRIP_FCS_ON/OFF + * - pad byte stripping, SK_STRIP_PAD_ON/OFF + * - don't set XMR_FS_ERR in status SK_LENERR_OK_ON/OFF + * for inrange length error frames + * - don't set XMR_FS_ERR in status SK_BIG_PK_OK_ON/OFF + * for frames > 1514 bytes + * - enable Rx of own packets SK_SELF_RX_ON/OFF + * + * for incoming packets may be enabled/disabled by this function. + * Additional modes may be added later. + * Multiple modes can be enabled/disabled at the same time. + * The new configuration is written to the Rx Command register immediately. + * + * Returns: + * nothing + */ +static void SkXmSetRxCmd( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +int Mode) /* Mode is SK_STRIP_FCS_ON/OFF, SK_STRIP_PAD_ON/OFF, + SK_LENERR_OK_ON/OFF, or SK_BIG_PK_OK_ON/OFF */ +{ + SK_U16 OldRxCmd; + SK_U16 RxCmd; + + XM_IN16(IoC, Port, XM_RX_CMD, &OldRxCmd); + + RxCmd = OldRxCmd; + + switch (Mode & (SK_STRIP_FCS_ON | SK_STRIP_FCS_OFF)) { + case SK_STRIP_FCS_ON: + RxCmd |= XM_RX_STRIP_FCS; + break; + case SK_STRIP_FCS_OFF: + RxCmd &= ~XM_RX_STRIP_FCS; + break; + } + + switch (Mode & (SK_STRIP_PAD_ON | SK_STRIP_PAD_OFF)) { + case SK_STRIP_PAD_ON: + RxCmd |= XM_RX_STRIP_PAD; + break; + case SK_STRIP_PAD_OFF: + RxCmd &= ~XM_RX_STRIP_PAD; + break; + } + + switch (Mode & (SK_LENERR_OK_ON | SK_LENERR_OK_OFF)) { + case SK_LENERR_OK_ON: + RxCmd |= XM_RX_LENERR_OK; + break; + case SK_LENERR_OK_OFF: + RxCmd &= ~XM_RX_LENERR_OK; + break; + } + + switch (Mode & (SK_BIG_PK_OK_ON | SK_BIG_PK_OK_OFF)) { + case SK_BIG_PK_OK_ON: + RxCmd |= XM_RX_BIG_PK_OK; + break; + case SK_BIG_PK_OK_OFF: + RxCmd &= ~XM_RX_BIG_PK_OK; + break; + } + + switch (Mode & (SK_SELF_RX_ON | SK_SELF_RX_OFF)) { + case SK_SELF_RX_ON: + RxCmd |= XM_RX_SELF_RX; + break; + case SK_SELF_RX_OFF: + RxCmd &= ~XM_RX_SELF_RX; + break; + } + + /* Write the new mode to the Rx command register if required */ + if (OldRxCmd != RxCmd) { + XM_OUT16(IoC, Port, XM_RX_CMD, RxCmd); + } +} /* SkXmSetRxCmd */ + + +/****************************************************************************** + * + * SkGmSetRxCmd() - Modify the value of the GMAC's Rx Control Register + * + * Description: + * The features + * - FCS (CRC) stripping, SK_STRIP_FCS_ON/OFF + * - don't set GMR_FS_LONG_ERR SK_BIG_PK_OK_ON/OFF + * for frames > 1514 bytes + * - enable Rx of own packets SK_SELF_RX_ON/OFF + * + * for incoming packets may be enabled/disabled by this function. + * Additional modes may be added later. + * Multiple modes can be enabled/disabled at the same time. + * The new configuration is written to the Rx Command register immediately. + * + * Returns: + * nothing + */ +static void SkGmSetRxCmd( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +int Mode) /* Mode is SK_STRIP_FCS_ON/OFF, SK_STRIP_PAD_ON/OFF, + SK_LENERR_OK_ON/OFF, or SK_BIG_PK_OK_ON/OFF */ +{ + SK_U16 OldRxCmd; + SK_U16 RxCmd; + + if ((Mode & (SK_STRIP_FCS_ON | SK_STRIP_FCS_OFF)) != 0) { + + GM_IN16(IoC, Port, GM_RX_CTRL, &OldRxCmd); + + RxCmd = OldRxCmd; + + if ((Mode & SK_STRIP_FCS_ON) != 0) { + RxCmd |= GM_RXCR_CRC_DIS; + } + else { + RxCmd &= ~GM_RXCR_CRC_DIS; + } + /* Write the new mode to the Rx control register if required */ + if (OldRxCmd != RxCmd) { + GM_OUT16(IoC, Port, GM_RX_CTRL, RxCmd); + } + } + + if ((Mode & (SK_BIG_PK_OK_ON | SK_BIG_PK_OK_OFF)) != 0) { + + GM_IN16(IoC, Port, GM_SERIAL_MODE, &OldRxCmd); + + RxCmd = OldRxCmd; + + if ((Mode & SK_BIG_PK_OK_ON) != 0) { + RxCmd |= GM_SMOD_JUMBO_ENA; + } + else { + RxCmd &= ~GM_SMOD_JUMBO_ENA; + } + /* Write the new mode to the Rx control register if required */ + if (OldRxCmd != RxCmd) { + GM_OUT16(IoC, Port, GM_SERIAL_MODE, RxCmd); + } + } +} /* SkGmSetRxCmd */ + + +/****************************************************************************** + * + * SkMacSetRxCmd() - Modify the value of the MAC's Rx Control Register + * + * Description: modifies the MAC's Rx Control reg. dep. on board type + * + * Returns: + * nothing + */ +void SkMacSetRxCmd( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +int Mode) /* Rx Mode */ +{ + if (pAC->GIni.GIGenesis) { + + SkXmSetRxCmd(pAC, IoC, Port, Mode); + } + else { + + SkGmSetRxCmd(pAC, IoC, Port, Mode); + } + +} /* SkMacSetRxCmd */ + + +/****************************************************************************** + * + * SkMacCrcGener() - Enable / Disable CRC Generation + * + * Description: enables / disables CRC generation dep. on board type + * + * Returns: + * nothing + */ +void SkMacCrcGener( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +SK_BOOL Enable) /* Enable / Disable */ +{ + SK_U16 Word; + + if (pAC->GIni.GIGenesis) { + + XM_IN16(IoC, Port, XM_TX_CMD, &Word); + + if (Enable) { + Word &= ~XM_TX_NO_CRC; + } + else { + Word |= XM_TX_NO_CRC; + } + /* setup Tx Command Register */ + XM_OUT16(IoC, Port, XM_TX_CMD, Word); + } + else { + + GM_IN16(IoC, Port, GM_TX_CTRL, &Word); + + if (Enable) { + Word &= ~GM_TXCR_CRC_DIS; + } + else { + Word |= GM_TXCR_CRC_DIS; + } + /* setup Tx Control Register */ + GM_OUT16(IoC, Port, GM_TX_CTRL, Word); + } + +} /* SkMacCrcGener*/ + +#endif /* SK_DIAG */ + + +#ifdef GENESIS +/****************************************************************************** + * + * SkXmClrExactAddr() - Clear Exact Match Address Registers + * + * Description: + * All Exact Match Address registers of the XMAC 'Port' will be + * cleared starting with 'StartNum' up to (and including) the + * Exact Match address number of 'StopNum'. + * + * Returns: + * nothing + */ +void SkXmClrExactAddr( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +int StartNum, /* Begin with this Address Register Index (0..15) */ +int StopNum) /* Stop after finished with this Register Idx (0..15) */ +{ + int i; + SK_U16 ZeroAddr[3] = {0x0000, 0x0000, 0x0000}; + + if ((unsigned)StartNum > 15 || (unsigned)StopNum > 15 || + StartNum > StopNum) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E001, SKERR_HWI_E001MSG); + return; + } + + for (i = StartNum; i <= StopNum; i++) { + XM_OUTADDR(IoC, Port, XM_EXM(i), &ZeroAddr[0]); + } +} /* SkXmClrExactAddr */ +#endif /* GENESIS */ + + +/****************************************************************************** + * + * SkMacFlushTxFifo() - Flush the MAC's transmit FIFO + * + * Description: + * Flush the transmit FIFO of the MAC specified by the index 'Port' + * + * Returns: + * nothing + */ +void SkMacFlushTxFifo( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ +#ifdef GENESIS + SK_U32 MdReg; + + if (pAC->GIni.GIGenesis) { + + XM_IN32(IoC, Port, XM_MODE, &MdReg); + + XM_OUT32(IoC, Port, XM_MODE, MdReg | XM_MD_FTF); + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + /* no way to flush the FIFO we have to issue a reset */ + /* TBD */ + } +#endif /* YUKON */ + +} /* SkMacFlushTxFifo */ + + +/****************************************************************************** + * + * SkMacFlushRxFifo() - Flush the MAC's receive FIFO + * + * Description: + * Flush the receive FIFO of the MAC specified by the index 'Port' + * + * Returns: + * nothing + */ +static void SkMacFlushRxFifo( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ +#ifdef GENESIS + SK_U32 MdReg; + + if (pAC->GIni.GIGenesis) { + + XM_IN32(IoC, Port, XM_MODE, &MdReg); + + XM_OUT32(IoC, Port, XM_MODE, MdReg | XM_MD_FRF); + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + /* no way to flush the FIFO we have to issue a reset */ + /* TBD */ + } +#endif /* YUKON */ + +} /* SkMacFlushRxFifo */ + + +#ifdef GENESIS +/****************************************************************************** + * + * SkXmSoftRst() - Do a XMAC software reset + * + * Description: + * The PHY registers should not be destroyed during this + * kind of software reset. Therefore the XMAC Software Reset + * (XM_GP_RES_MAC bit in XM_GP_PORT) must not be used! + * + * The software reset is done by + * - disabling the Rx and Tx state machine, + * - resetting the statistics module, + * - clear all other significant XMAC Mode, + * Command, and Control Registers + * - clearing the Hash Register and the + * Exact Match Address registers, and + * - flushing the XMAC's Rx and Tx FIFOs. + * + * Note: + * Another requirement when stopping the XMAC is to + * avoid sending corrupted frames on the network. + * Disabling the Tx state machine will NOT interrupt + * the currently transmitted frame. But we must take care + * that the Tx FIFO is cleared AFTER the current frame + * is complete sent to the network. + * + * It takes about 12ns to send a frame with 1538 bytes. + * One PCI clock goes at least 15ns (66MHz). Therefore + * after reading XM_GP_PORT back, we are sure that the + * transmitter is disabled AND idle. And this means + * we may flush the transmit FIFO now. + * + * Returns: + * nothing + */ +static void SkXmSoftRst( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_U16 ZeroAddr[4] = {0x0000, 0x0000, 0x0000, 0x0000}; + + /* reset the statistics module */ + XM_OUT32(IoC, Port, XM_GP_PORT, XM_GP_RES_STAT); + + /* disable all XMAC IRQs */ + XM_OUT16(IoC, Port, XM_IMSK, 0xffff); + + XM_OUT32(IoC, Port, XM_MODE, 0); /* clear Mode Reg */ + + XM_OUT16(IoC, Port, XM_TX_CMD, 0); /* reset TX CMD Reg */ + XM_OUT16(IoC, Port, XM_RX_CMD, 0); /* reset RX CMD Reg */ + + /* disable all PHY IRQs */ + switch (pAC->GIni.GP[Port].PhyType) { + case SK_PHY_BCOM: + SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_INT_MASK, 0xffff); + break; +#ifdef OTHER_PHY + case SK_PHY_LONE: + SkXmPhyWrite(pAC, IoC, Port, PHY_LONE_INT_ENAB, 0); + break; + case SK_PHY_NAT: + /* todo: National + SkXmPhyWrite(pAC, IoC, Port, PHY_NAT_INT_MASK, 0xffff); */ + break; +#endif /* OTHER_PHY */ + } + + /* clear the Hash Register */ + XM_OUTHASH(IoC, Port, XM_HSM, &ZeroAddr); + + /* clear the Exact Match Address registers */ + SkXmClrExactAddr(pAC, IoC, Port, 0, 15); + + /* clear the Source Check Address registers */ + XM_OUTHASH(IoC, Port, XM_SRC_CHK, &ZeroAddr); + +} /* SkXmSoftRst */ + + +/****************************************************************************** + * + * SkXmHardRst() - Do a XMAC hardware reset + * + * Description: + * The XMAC of the specified 'Port' and all connected devices + * (PHY and SERDES) will receive a reset signal on its *Reset pins. + * External PHYs must be reset by clearing a bit in the GPIO register + * (Timing requirements: Broadcom: 400ns, Level One: none, National: 80ns). + * + * ATTENTION: + * It is absolutely necessary to reset the SW_RST Bit first + * before calling this function. + * + * Returns: + * nothing + */ +static void SkXmHardRst( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_U32 Reg; + int i; + int TOut; + SK_U16 Word; + + for (i = 0; i < 4; i++) { + /* TX_MFF_CTRL1 has 32 bits, but only the lowest 16 bits are used */ + SK_OUT16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); + + TOut = 0; + do { + if (TOut++ > 10000) { + /* + * Adapter seems to be in RESET state. + * Registers cannot be written. + */ + return; + } + + SK_OUT16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), MFF_SET_MAC_RST); + + SK_IN16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), &Word); + + } while ((Word & MFF_SET_MAC_RST) == 0); + } + + /* For external PHYs there must be special handling */ + if (pAC->GIni.GP[Port].PhyType != SK_PHY_XMAC) { + + SK_IN32(IoC, B2_GP_IO, &Reg); + + if (Port == 0) { + Reg |= GP_DIR_0; /* set to output */ + Reg &= ~GP_IO_0; /* set PHY reset (active low) */ + } + else { + Reg |= GP_DIR_2; /* set to output */ + Reg &= ~GP_IO_2; /* set PHY reset (active low) */ + } + /* reset external PHY */ + SK_OUT32(IoC, B2_GP_IO, Reg); + + /* short delay */ + SK_IN32(IoC, B2_GP_IO, &Reg); + } +} /* SkXmHardRst */ + + +/****************************************************************************** + * + * SkXmClearRst() - Release the PHY & XMAC reset + * + * Description: + * + * Returns: + * nothing + */ +static void SkXmClearRst( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_U32 DWord; + + /* clear HW reset */ + SK_OUT16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); + + if (pAC->GIni.GP[Port].PhyType != SK_PHY_XMAC) { + + SK_IN32(IoC, B2_GP_IO, &DWord); + + if (Port == 0) { + DWord |= (GP_DIR_0 | GP_IO_0); /* set to output */ + } + else { + DWord |= (GP_DIR_2 | GP_IO_2); /* set to output */ + } + /* Clear PHY reset */ + SK_OUT32(IoC, B2_GP_IO, DWord); + + /* Enable GMII interface */ + XM_OUT16(IoC, Port, XM_HW_CFG, XM_HW_GMII_MD); + } +} /* SkXmClearRst */ +#endif /* GENESIS */ + + +#ifdef YUKON +/****************************************************************************** + * + * SkGmSoftRst() - Do a GMAC software reset + * + * Description: + * The GPHY registers should not be destroyed during this + * kind of software reset. + * + * Returns: + * nothing + */ +static void SkGmSoftRst( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_U16 EmptyHash[4] = {0x0000, 0x0000, 0x0000, 0x0000}; + SK_U16 RxCtrl; + + /* reset the statistics module */ + + /* disable all GMAC IRQs */ + SK_OUT8(IoC, GMAC_IRQ_MSK, 0); + + /* disable all PHY IRQs */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_INT_MASK, 0); + + /* clear the Hash Register */ + GM_OUTHASH(IoC, Port, GM_MC_ADDR_H1, EmptyHash); + + /* Enable Unicast and Multicast filtering */ + GM_IN16(IoC, Port, GM_RX_CTRL, &RxCtrl); + + GM_OUT16(IoC, Port, GM_RX_CTRL, + (SK_U16)(RxCtrl | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA)); + +} /* SkGmSoftRst */ + + +/****************************************************************************** + * + * SkGmHardRst() - Do a GMAC hardware reset + * + * Description: + * + * Returns: + * nothing + */ +static void SkGmHardRst( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_U32 DWord; + + /* WA code for COMA mode */ + if (pAC->GIni.GIYukonLite && + pAC->GIni.GIChipRev >= CHIP_REV_YU_LITE_A3) { + + SK_IN32(IoC, B2_GP_IO, &DWord); + + DWord |= (GP_DIR_9 | GP_IO_9); + + /* set PHY reset */ + SK_OUT32(IoC, B2_GP_IO, DWord); + } + + /* set GPHY Control reset */ + SK_OUT32(IoC, MR_ADDR(Port, GPHY_CTRL), GPC_RST_SET); + + /* set GMAC Control reset */ + SK_OUT32(IoC, MR_ADDR(Port, GMAC_CTRL), GMC_RST_SET); + +} /* SkGmHardRst */ + + +/****************************************************************************** + * + * SkGmClearRst() - Release the GPHY & GMAC reset + * + * Description: + * + * Returns: + * nothing + */ +static void SkGmClearRst( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_U32 DWord; + +#ifdef XXX + /* clear GMAC Control reset */ + SK_OUT32(IoC, MR_ADDR(Port, GMAC_CTRL), GMC_RST_CLR); + + /* set GMAC Control reset */ + SK_OUT32(IoC, MR_ADDR(Port, GMAC_CTRL), GMC_RST_SET); +#endif /* XXX */ + + /* WA code for COMA mode */ + if (pAC->GIni.GIYukonLite && + pAC->GIni.GIChipRev >= CHIP_REV_YU_LITE_A3) { + + SK_IN32(IoC, B2_GP_IO, &DWord); + + DWord |= GP_DIR_9; /* set to output */ + DWord &= ~GP_IO_9; /* clear PHY reset (active high) */ + + /* clear PHY reset */ + SK_OUT32(IoC, B2_GP_IO, DWord); + } + + /* set HWCFG_MODE */ + DWord = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP | + GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE | + (pAC->GIni.GICopperType ? GPC_HWCFG_GMII_COP : + GPC_HWCFG_GMII_FIB); + + /* set GPHY Control reset */ + SK_OUT32(IoC, MR_ADDR(Port, GPHY_CTRL), DWord | GPC_RST_SET); + + /* release GPHY Control reset */ + SK_OUT32(IoC, MR_ADDR(Port, GPHY_CTRL), DWord | GPC_RST_CLR); + +#ifdef VCPU + VCpuWait(9000); +#endif /* VCPU */ + + /* clear GMAC Control reset */ + SK_OUT32(IoC, MR_ADDR(Port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR); + +#ifdef VCPU + VCpuWait(2000); + + SK_IN32(IoC, MR_ADDR(Port, GPHY_CTRL), &DWord); + + SK_IN32(IoC, B0_ISRC, &DWord); +#endif /* VCPU */ + +} /* SkGmClearRst */ +#endif /* YUKON */ + + +/****************************************************************************** + * + * SkMacSoftRst() - Do a MAC software reset + * + * Description: calls a MAC software reset routine dep. on board type + * + * Returns: + * nothing + */ +void SkMacSoftRst( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_GEPORT *pPrt; + + pPrt = &pAC->GIni.GP[Port]; + + /* disable receiver and transmitter */ + SkMacRxTxDisable(pAC, IoC, Port); + +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + + SkXmSoftRst(pAC, IoC, Port); + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + + SkGmSoftRst(pAC, IoC, Port); + } +#endif /* YUKON */ + + /* flush the MAC's Rx and Tx FIFOs */ + SkMacFlushTxFifo(pAC, IoC, Port); + + SkMacFlushRxFifo(pAC, IoC, Port); + + pPrt->PState = SK_PRT_STOP; + +} /* SkMacSoftRst */ + + +/****************************************************************************** + * + * SkMacHardRst() - Do a MAC hardware reset + * + * Description: calls a MAC hardware reset routine dep. on board type + * + * Returns: + * nothing + */ +void SkMacHardRst( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + + SkXmHardRst(pAC, IoC, Port); + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + + SkGmHardRst(pAC, IoC, Port); + } +#endif /* YUKON */ + + pAC->GIni.GP[Port].PState = SK_PRT_RESET; + +} /* SkMacHardRst */ + + +#ifdef GENESIS +/****************************************************************************** + * + * SkXmInitMac() - Initialize the XMAC II + * + * Description: + * Initialize the XMAC of the specified port. + * The XMAC must be reset or stopped before calling this function. + * + * Note: + * The XMAC's Rx and Tx state machine is still disabled when returning. + * + * Returns: + * nothing + */ +void SkXmInitMac( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_GEPORT *pPrt; + int i; + SK_U16 SWord; + + pPrt = &pAC->GIni.GP[Port]; + + if (pPrt->PState == SK_PRT_STOP) { + /* Port State: SK_PRT_STOP */ + /* Verify that the reset bit is cleared */ + SK_IN16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), &SWord); + + if ((SWord & MFF_SET_MAC_RST) != 0) { + /* PState does not match HW state */ + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E006, SKERR_HWI_E006MSG); + /* Correct it */ + pPrt->PState = SK_PRT_RESET; + } + } + + if (pPrt->PState == SK_PRT_RESET) { + + SkXmClearRst(pAC, IoC, Port); + + if (pPrt->PhyType != SK_PHY_XMAC) { + /* read Id from external PHY (all have the same address) */ + SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_ID1, &pPrt->PhyId1); + + /* + * Optimize MDIO transfer by suppressing preamble. + * Must be done AFTER first access to BCOM chip. + */ + XM_IN16(IoC, Port, XM_MMU_CMD, &SWord); + + XM_OUT16(IoC, Port, XM_MMU_CMD, SWord | XM_MMU_NO_PRE); + + if (pPrt->PhyId1 == PHY_BCOM_ID1_C0) { + /* + * Workaround BCOM Errata for the C0 type. + * Write magic patterns to reserved registers. + */ + i = 0; + while (BcomRegC0Hack[i].PhyReg != 0) { + SkXmPhyWrite(pAC, IoC, Port, BcomRegC0Hack[i].PhyReg, + BcomRegC0Hack[i].PhyVal); + i++; + } + } + else if (pPrt->PhyId1 == PHY_BCOM_ID1_A1) { + /* + * Workaround BCOM Errata for the A1 type. + * Write magic patterns to reserved registers. + */ + i = 0; + while (BcomRegA1Hack[i].PhyReg != 0) { + SkXmPhyWrite(pAC, IoC, Port, BcomRegA1Hack[i].PhyReg, + BcomRegA1Hack[i].PhyVal); + i++; + } + } + + /* + * Workaround BCOM Errata (#10523) for all BCom PHYs. + * Disable Power Management after reset. + */ + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, &SWord); + + SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, + (SK_U16)(SWord | PHY_B_AC_DIS_PM)); + + /* PHY LED initialization is done in SkGeXmitLED() */ + } + + /* Dummy read the Interrupt source register */ + XM_IN16(IoC, Port, XM_ISRC, &SWord); + + /* + * The auto-negotiation process starts immediately after + * clearing the reset. The auto-negotiation process should be + * started by the SIRQ, therefore stop it here immediately. + */ + SkMacInitPhy(pAC, IoC, Port, SK_FALSE); + +#ifdef TEST_ONLY + /* temp. code: enable signal detect */ + /* WARNING: do not override GMII setting above */ + XM_OUT16(IoC, Port, XM_HW_CFG, XM_HW_COM4SIG); +#endif + } + + /* + * configure the XMACs Station Address + * B2_MAC_2 = xx xx xx xx xx x1 is programmed to XMAC A + * B2_MAC_3 = xx xx xx xx xx x2 is programmed to XMAC B + */ + for (i = 0; i < 3; i++) { + /* + * The following 2 statements are together endianess + * independent. Remember this when changing. + */ + SK_IN16(IoC, (B2_MAC_2 + Port * 8 + i * 2), &SWord); + + XM_OUT16(IoC, Port, (XM_SA + i * 2), SWord); + } + + /* Tx Inter Packet Gap (XM_TX_IPG): use default */ + /* Tx High Water Mark (XM_TX_HI_WM): use default */ + /* Tx Low Water Mark (XM_TX_LO_WM): use default */ + /* Host Request Threshold (XM_HT_THR): use default */ + /* Rx Request Threshold (XM_RX_THR): use default */ + /* Rx Low Water Mark (XM_RX_LO_WM): use default */ + + /* configure Rx High Water Mark (XM_RX_HI_WM) */ + XM_OUT16(IoC, Port, XM_RX_HI_WM, SK_XM_RX_HI_WM); + + /* Configure Tx Request Threshold */ + SWord = SK_XM_THR_SL; /* for single port */ + + if (pAC->GIni.GIMacsFound > 1) { + switch (pAC->GIni.GIPortUsage) { + case SK_RED_LINK: + SWord = SK_XM_THR_REDL; /* redundant link */ + break; + case SK_MUL_LINK: + SWord = SK_XM_THR_MULL; /* load balancing */ + break; + case SK_JUMBO_LINK: + SWord = SK_XM_THR_JUMBO; /* jumbo frames */ + break; + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E014, SKERR_HWI_E014MSG); + break; + } + } + XM_OUT16(IoC, Port, XM_TX_THR, SWord); + + /* setup register defaults for the Tx Command Register */ + XM_OUT16(IoC, Port, XM_TX_CMD, XM_TX_AUTO_PAD); + + /* setup register defaults for the Rx Command Register */ + SWord = XM_RX_STRIP_FCS | XM_RX_LENERR_OK; + + if (pAC->GIni.GIPortUsage == SK_JUMBO_LINK) { + SWord |= XM_RX_BIG_PK_OK; + } + + if (pPrt->PLinkMode == SK_LMODE_HALF) { + /* + * If in manual half duplex mode the other side might be in + * full duplex mode, so ignore if a carrier extension is not seen + * on frames received + */ + SWord |= XM_RX_DIS_CEXT; + } + + XM_OUT16(IoC, Port, XM_RX_CMD, SWord); + + /* + * setup register defaults for the Mode Register + * - Don't strip error frames to avoid Store & Forward + * on the Rx side. + * - Enable 'Check Station Address' bit + * - Enable 'Check Address Array' bit + */ + XM_OUT32(IoC, Port, XM_MODE, XM_DEF_MODE); + + /* + * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK) + * - Enable all bits excepting 'Octets Rx OK Low CntOv' + * and 'Octets Rx OK Hi Cnt Ov'. + */ + XM_OUT32(IoC, Port, XM_RX_EV_MSK, XMR_DEF_MSK); + + /* + * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK) + * - Enable all bits excepting 'Octets Tx OK Low CntOv' + * and 'Octets Tx OK Hi Cnt Ov'. + */ + XM_OUT32(IoC, Port, XM_TX_EV_MSK, XMT_DEF_MSK); + + /* + * Do NOT init XMAC interrupt mask here. + * All interrupts remain disable until link comes up! + */ + + /* + * Any additional configuration changes may be done now. + * The last action is to enable the Rx and Tx state machine. + * This should be done after the auto-negotiation process + * has been completed successfully. + */ +} /* SkXmInitMac */ +#endif /* GENESIS */ + + +#ifdef YUKON +/****************************************************************************** + * + * SkGmInitMac() - Initialize the GMAC + * + * Description: + * Initialize the GMAC of the specified port. + * The GMAC must be reset or stopped before calling this function. + * + * Note: + * The GMAC's Rx and Tx state machine is still disabled when returning. + * + * Returns: + * nothing + */ +void SkGmInitMac( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_GEPORT *pPrt; + int i; + SK_U16 SWord; + SK_U32 DWord; + + pPrt = &pAC->GIni.GP[Port]; + + if (pPrt->PState == SK_PRT_STOP) { + /* Port State: SK_PRT_STOP */ + /* Verify that the reset bit is cleared */ + SK_IN32(IoC, MR_ADDR(Port, GMAC_CTRL), &DWord); + + if ((DWord & GMC_RST_SET) != 0) { + /* PState does not match HW state */ + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E006, SKERR_HWI_E006MSG); + /* Correct it */ + pPrt->PState = SK_PRT_RESET; + } + } + + if (pPrt->PState == SK_PRT_RESET) { + + SkGmHardRst(pAC, IoC, Port); + + SkGmClearRst(pAC, IoC, Port); + + /* Auto-negotiation ? */ + if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) { + /* Auto-negotiation disabled */ + + /* get General Purpose Control */ + GM_IN16(IoC, Port, GM_GP_CTRL, &SWord); + + /* disable auto-update for speed, duplex and flow-control */ + SWord |= GM_GPCR_AU_ALL_DIS; + + /* setup General Purpose Control Register */ + GM_OUT16(IoC, Port, GM_GP_CTRL, SWord); + + SWord = GM_GPCR_AU_ALL_DIS; + } + else { + SWord = 0; + } + + /* speed settings */ + switch (pPrt->PLinkSpeed) { + case SK_LSPEED_AUTO: + case SK_LSPEED_1000MBPS: + SWord |= GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100; + break; + case SK_LSPEED_100MBPS: + SWord |= GM_GPCR_SPEED_100; + break; + case SK_LSPEED_10MBPS: + break; + } + + /* duplex settings */ + if (pPrt->PLinkMode != SK_LMODE_HALF) { + /* set full duplex */ + SWord |= GM_GPCR_DUP_FULL; + } + + /* flow-control settings */ + switch (pPrt->PFlowCtrlMode) { + case SK_FLOW_MODE_NONE: + /* set Pause Off */ + SK_OUT32(IoC, MR_ADDR(Port, GMAC_CTRL), GMC_PAUSE_OFF); + /* disable Tx & Rx flow-control */ + SWord |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; + break; + case SK_FLOW_MODE_LOC_SEND: + /* disable Rx flow-control */ + SWord |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; + break; + case SK_FLOW_MODE_SYMMETRIC: + case SK_FLOW_MODE_SYM_OR_REM: + /* enable Tx & Rx flow-control */ + break; + } + + /* setup General Purpose Control Register */ + GM_OUT16(IoC, Port, GM_GP_CTRL, SWord); + + /* dummy read the Interrupt Source Register */ + SK_IN16(IoC, GMAC_IRQ_SRC, &SWord); + +#ifndef VCPU + /* read Id from PHY */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_ID1, &pPrt->PhyId1); + + SkGmInitPhyMarv(pAC, IoC, Port, SK_FALSE); +#endif /* VCPU */ + } + + (void)SkGmResetCounter(pAC, IoC, Port); + + /* setup Transmit Control Register */ + GM_OUT16(IoC, Port, GM_TX_CTRL, TX_COL_THR(pPrt->PMacColThres)); + + /* setup Receive Control Register */ + GM_OUT16(IoC, Port, GM_RX_CTRL, GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA | + GM_RXCR_CRC_DIS); + + /* setup Transmit Flow Control Register */ + GM_OUT16(IoC, Port, GM_TX_FLOW_CTRL, 0xffff); + + /* setup Transmit Parameter Register */ +#ifdef VCPU + GM_IN16(IoC, Port, GM_TX_PARAM, &SWord); +#endif /* VCPU */ + + SWord = TX_JAM_LEN_VAL(pPrt->PMacJamLen) | + TX_JAM_IPG_VAL(pPrt->PMacJamIpgVal) | + TX_IPG_JAM_DATA(pPrt->PMacJamIpgData); + + GM_OUT16(IoC, Port, GM_TX_PARAM, SWord); + + /* configure the Serial Mode Register */ +#ifdef VCPU + GM_IN16(IoC, Port, GM_SERIAL_MODE, &SWord); +#endif /* VCPU */ + + SWord = GM_SMOD_VLAN_ENA | IPG_DATA_VAL(pPrt->PMacIpgData); + + if (pPrt->PMacLimit4) { + /* reset of collision counter after 4 consecutive collisions */ + SWord |= GM_SMOD_LIMIT_4; + } + + if (pAC->GIni.GIPortUsage == SK_JUMBO_LINK) { + /* enable jumbo mode (Max. Frame Length = 9018) */ + SWord |= GM_SMOD_JUMBO_ENA; + } + + GM_OUT16(IoC, Port, GM_SERIAL_MODE, SWord); + + /* + * configure the GMACs Station Addresses + * in PROM you can find our addresses at: + * B2_MAC_1 = xx xx xx xx xx x0 virtual address + * B2_MAC_2 = xx xx xx xx xx x1 is programmed to GMAC A + * B2_MAC_3 = xx xx xx xx xx x2 is reserved for DualPort + */ + + for (i = 0; i < 3; i++) { + /* + * The following 2 statements are together endianess + * independent. Remember this when changing. + */ + /* physical address: will be used for pause frames */ + SK_IN16(IoC, (B2_MAC_2 + Port * 8 + i * 2), &SWord); + +#ifdef WA_DEV_16 + /* WA for deviation #16 */ + if (pAC->GIni.GIChipId == CHIP_ID_YUKON && pAC->GIni.GIChipRev == 0) { + /* swap the address bytes */ + SWord = ((SWord & 0xff00) >> 8) | ((SWord & 0x00ff) << 8); + + /* write to register in reversed order */ + GM_OUT16(IoC, Port, (GM_SRC_ADDR_1L + (2 - i) * 4), SWord); + } + else { + GM_OUT16(IoC, Port, (GM_SRC_ADDR_1L + i * 4), SWord); + } +#else + GM_OUT16(IoC, Port, (GM_SRC_ADDR_1L + i * 4), SWord); +#endif /* WA_DEV_16 */ + + /* virtual address: will be used for data */ + SK_IN16(IoC, (B2_MAC_1 + Port * 8 + i * 2), &SWord); + + GM_OUT16(IoC, Port, (GM_SRC_ADDR_2L + i * 4), SWord); + + /* reset Multicast filtering Hash registers 1-3 */ + GM_OUT16(IoC, Port, GM_MC_ADDR_H1 + 4*i, 0); + } + + /* reset Multicast filtering Hash register 4 */ + GM_OUT16(IoC, Port, GM_MC_ADDR_H4, 0); + + /* enable interrupt mask for counter overflows */ + GM_OUT16(IoC, Port, GM_TX_IRQ_MSK, 0); + GM_OUT16(IoC, Port, GM_RX_IRQ_MSK, 0); + GM_OUT16(IoC, Port, GM_TR_IRQ_MSK, 0); + +#if defined(SK_DIAG) || defined(DEBUG) + /* read General Purpose Status */ + GM_IN16(IoC, Port, GM_GP_STAT, &SWord); + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("MAC Stat Reg.=0x%04X\n", SWord)); +#endif /* SK_DIAG || DEBUG */ + +#ifdef SK_DIAG + c_print("MAC Stat Reg=0x%04X\n", SWord); +#endif /* SK_DIAG */ + +} /* SkGmInitMac */ +#endif /* YUKON */ + + +#ifdef GENESIS +/****************************************************************************** + * + * SkXmInitDupMd() - Initialize the XMACs Duplex Mode + * + * Description: + * This function initializes the XMACs Duplex Mode. + * It should be called after successfully finishing + * the Auto-negotiation Process + * + * Returns: + * nothing + */ +static void SkXmInitDupMd( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + switch (pAC->GIni.GP[Port].PLinkModeStatus) { + case SK_LMODE_STAT_AUTOHALF: + case SK_LMODE_STAT_HALF: + /* Configuration Actions for Half Duplex Mode */ + /* + * XM_BURST = default value. We are probable not quick + * enough at the 'XMAC' bus to burst 8kB. + * The XMAC stops bursting if no transmit frames + * are available or the burst limit is exceeded. + */ + /* XM_TX_RT_LIM = default value (15) */ + /* XM_TX_STIME = default value (0xff = 4096 bit times) */ + break; + case SK_LMODE_STAT_AUTOFULL: + case SK_LMODE_STAT_FULL: + /* Configuration Actions for Full Duplex Mode */ + /* + * The duplex mode is configured by the PHY, + * therefore it seems to be that there is nothing + * to do here. + */ + break; + case SK_LMODE_STAT_UNKNOWN: + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E007, SKERR_HWI_E007MSG); + break; + } +} /* SkXmInitDupMd */ + + +/****************************************************************************** + * + * SkXmInitPauseMd() - initialize the Pause Mode to be used for this port + * + * Description: + * This function initializes the Pause Mode which should + * be used for this port. + * It should be called after successfully finishing + * the Auto-negotiation Process + * + * Returns: + * nothing + */ +static void SkXmInitPauseMd( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_GEPORT *pPrt; + SK_U32 DWord; + SK_U16 Word; + + pPrt = &pAC->GIni.GP[Port]; + + XM_IN16(IoC, Port, XM_MMU_CMD, &Word); + + if (pPrt->PFlowCtrlStatus == SK_FLOW_STAT_NONE || + pPrt->PFlowCtrlStatus == SK_FLOW_STAT_LOC_SEND) { + + /* Disable Pause Frame Reception */ + Word |= XM_MMU_IGN_PF; + } + else { + /* + * enabling pause frame reception is required for 1000BT + * because the XMAC is not reset if the link is going down + */ + /* Enable Pause Frame Reception */ + Word &= ~XM_MMU_IGN_PF; + } + + XM_OUT16(IoC, Port, XM_MMU_CMD, Word); + + XM_IN32(IoC, Port, XM_MODE, &DWord); + + if (pPrt->PFlowCtrlStatus == SK_FLOW_STAT_SYMMETRIC || + pPrt->PFlowCtrlStatus == SK_FLOW_STAT_LOC_SEND) { + + /* + * Configure Pause Frame Generation + * Use internal and external Pause Frame Generation. + * Sending pause frames is edge triggered. + * Send a Pause frame with the maximum pause time if + * internal oder external FIFO full condition occurs. + * Send a zero pause time frame to re-start transmission. + */ + + /* XM_PAUSE_DA = '010000C28001' (default) */ + + /* XM_MAC_PTIME = 0xffff (maximum) */ + /* remember this value is defined in big endian (!) */ + XM_OUT16(IoC, Port, XM_MAC_PTIME, 0xffff); + + /* Set Pause Mode in Mode Register */ + DWord |= XM_PAUSE_MODE; + + /* Set Pause Mode in MAC Rx FIFO */ + SK_OUT16(IoC, MR_ADDR(Port, RX_MFF_CTRL1), MFF_ENA_PAUSE); + } + else { + /* + * disable pause frame generation is required for 1000BT + * because the XMAC is not reset if the link is going down + */ + /* Disable Pause Mode in Mode Register */ + DWord &= ~XM_PAUSE_MODE; + + /* Disable Pause Mode in MAC Rx FIFO */ + SK_OUT16(IoC, MR_ADDR(Port, RX_MFF_CTRL1), MFF_DIS_PAUSE); + } + + XM_OUT32(IoC, Port, XM_MODE, DWord); +} /* SkXmInitPauseMd*/ + + +/****************************************************************************** + * + * SkXmInitPhyXmac() - Initialize the XMAC Phy registers + * + * Description: initializes all the XMACs Phy registers + * + * Note: + * + * Returns: + * nothing + */ +static void SkXmInitPhyXmac( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +SK_BOOL DoLoop) /* Should a Phy LoopBack be set-up? */ +{ + SK_GEPORT *pPrt; + SK_U16 Ctrl; + + pPrt = &pAC->GIni.GP[Port]; + Ctrl = 0; + + /* Auto-negotiation ? */ + if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("InitPhyXmac: no auto-negotiation Port %d\n", Port)); + /* Set DuplexMode in Config register */ + if (pPrt->PLinkMode == SK_LMODE_FULL) { + Ctrl |= PHY_CT_DUP_MD; + } + + /* + * Do NOT enable Auto-negotiation here. This would hold + * the link down because no IDLEs are transmitted + */ + } + else { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("InitPhyXmac: with auto-negotiation Port %d\n", Port)); + /* Set Auto-negotiation advertisement */ + + /* Set Full/half duplex capabilities */ + switch (pPrt->PLinkMode) { + case SK_LMODE_AUTOHALF: + Ctrl |= PHY_X_AN_HD; + break; + case SK_LMODE_AUTOFULL: + Ctrl |= PHY_X_AN_FD; + break; + case SK_LMODE_AUTOBOTH: + Ctrl |= PHY_X_AN_FD | PHY_X_AN_HD; + break; + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E015, + SKERR_HWI_E015MSG); + } + + /* Set Flow-control capabilities */ + switch (pPrt->PFlowCtrlMode) { + case SK_FLOW_MODE_NONE: + Ctrl |= PHY_X_P_NO_PAUSE; + break; + case SK_FLOW_MODE_LOC_SEND: + Ctrl |= PHY_X_P_ASYM_MD; + break; + case SK_FLOW_MODE_SYMMETRIC: + Ctrl |= PHY_X_P_SYM_MD; + break; + case SK_FLOW_MODE_SYM_OR_REM: + Ctrl |= PHY_X_P_BOTH_MD; + break; + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E016, + SKERR_HWI_E016MSG); + } + + /* Write AutoNeg Advertisement Register */ + SkXmPhyWrite(pAC, IoC, Port, PHY_XMAC_AUNE_ADV, Ctrl); + + /* Restart Auto-negotiation */ + Ctrl = PHY_CT_ANE | PHY_CT_RE_CFG; + } + + if (DoLoop) { + /* Set the Phy Loopback bit, too */ + Ctrl |= PHY_CT_LOOP; + } + + /* Write to the Phy control register */ + SkXmPhyWrite(pAC, IoC, Port, PHY_XMAC_CTRL, Ctrl); +} /* SkXmInitPhyXmac */ + + +/****************************************************************************** + * + * SkXmInitPhyBcom() - Initialize the Broadcom Phy registers + * + * Description: initializes all the Broadcom Phy registers + * + * Note: + * + * Returns: + * nothing + */ +static void SkXmInitPhyBcom( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +SK_BOOL DoLoop) /* Should a Phy LoopBack be set-up? */ +{ + SK_GEPORT *pPrt; + SK_U16 Ctrl1; + SK_U16 Ctrl2; + SK_U16 Ctrl3; + SK_U16 Ctrl4; + SK_U16 Ctrl5; + + Ctrl1 = PHY_CT_SP1000; + Ctrl2 = 0; + Ctrl3 = PHY_SEL_TYPE; + Ctrl4 = PHY_B_PEC_EN_LTR; + Ctrl5 = PHY_B_AC_TX_TST; + + pPrt = &pAC->GIni.GP[Port]; + + /* manually Master/Slave ? */ + if (pPrt->PMSMode != SK_MS_MODE_AUTO) { + Ctrl2 |= PHY_B_1000C_MSE; + + if (pPrt->PMSMode == SK_MS_MODE_MASTER) { + Ctrl2 |= PHY_B_1000C_MSC; + } + } + /* Auto-negotiation ? */ + if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("InitPhyBcom: no auto-negotiation Port %d\n", Port)); + /* Set DuplexMode in Config register */ + if (pPrt->PLinkMode == SK_LMODE_FULL) { + Ctrl1 |= PHY_CT_DUP_MD; + } + + /* Determine Master/Slave manually if not already done */ + if (pPrt->PMSMode == SK_MS_MODE_AUTO) { + Ctrl2 |= PHY_B_1000C_MSE; /* set it to Slave */ + } + + /* + * Do NOT enable Auto-negotiation here. This would hold + * the link down because no IDLES are transmitted + */ + } + else { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("InitPhyBcom: with auto-negotiation Port %d\n", Port)); + /* Set Auto-negotiation advertisement */ + + /* + * Workaround BCOM Errata #1 for the C5 type. + * 1000Base-T Link Acquisition Failure in Slave Mode + * Set Repeater/DTE bit 10 of the 1000Base-T Control Register + */ + Ctrl2 |= PHY_B_1000C_RD; + + /* Set Full/half duplex capabilities */ + switch (pPrt->PLinkMode) { + case SK_LMODE_AUTOHALF: + Ctrl2 |= PHY_B_1000C_AHD; + break; + case SK_LMODE_AUTOFULL: + Ctrl2 |= PHY_B_1000C_AFD; + break; + case SK_LMODE_AUTOBOTH: + Ctrl2 |= PHY_B_1000C_AFD | PHY_B_1000C_AHD; + break; + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E015, + SKERR_HWI_E015MSG); + } + + /* Set Flow-control capabilities */ + switch (pPrt->PFlowCtrlMode) { + case SK_FLOW_MODE_NONE: + Ctrl3 |= PHY_B_P_NO_PAUSE; + break; + case SK_FLOW_MODE_LOC_SEND: + Ctrl3 |= PHY_B_P_ASYM_MD; + break; + case SK_FLOW_MODE_SYMMETRIC: + Ctrl3 |= PHY_B_P_SYM_MD; + break; + case SK_FLOW_MODE_SYM_OR_REM: + Ctrl3 |= PHY_B_P_BOTH_MD; + break; + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E016, + SKERR_HWI_E016MSG); + } + + /* Restart Auto-negotiation */ + Ctrl1 |= PHY_CT_ANE | PHY_CT_RE_CFG; + } + + /* Initialize LED register here? */ + /* No. Please do it in SkDgXmitLed() (if required) and swap + init order of LEDs and XMAC. (MAl) */ + + /* Write 1000Base-T Control Register */ + SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_1000T_CTRL, Ctrl2); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Set 1000B-T Ctrl Reg=0x%04X\n", Ctrl2)); + + /* Write AutoNeg Advertisement Register */ + SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_AUNE_ADV, Ctrl3); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Set Auto-Neg.Adv.Reg=0x%04X\n", Ctrl3)); + + if (DoLoop) { + /* Set the Phy Loopback bit, too */ + Ctrl1 |= PHY_CT_LOOP; + } + + if (pAC->GIni.GIPortUsage == SK_JUMBO_LINK) { + /* configure FIFO to high latency for transmission of ext. packets */ + Ctrl4 |= PHY_B_PEC_HIGH_LA; + + /* configure reception of extended packets */ + Ctrl5 |= PHY_B_AC_LONG_PACK; + + SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, Ctrl5); + } + + /* Configure LED Traffic Mode and Jumbo Frame usage if specified */ + SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_P_EXT_CTRL, Ctrl4); + + /* Write to the Phy control register */ + SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_CTRL, Ctrl1); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("PHY Control Reg=0x%04X\n", Ctrl1)); +} /* SkXmInitPhyBcom */ +#endif /* GENESIS */ + +#ifdef YUKON +/****************************************************************************** + * + * SkGmInitPhyMarv() - Initialize the Marvell Phy registers + * + * Description: initializes all the Marvell Phy registers + * + * Note: + * + * Returns: + * nothing + */ +static void SkGmInitPhyMarv( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +SK_BOOL DoLoop) /* Should a Phy LoopBack be set-up? */ +{ + SK_GEPORT *pPrt; + SK_U16 PhyCtrl; + SK_U16 C1000BaseT; + SK_U16 AutoNegAdv; + SK_U16 ExtPhyCtrl; + SK_U16 LedCtrl; + SK_BOOL AutoNeg; +#if defined(SK_DIAG) || defined(DEBUG) + SK_U16 PhyStat; + SK_U16 PhyStat1; + SK_U16 PhySpecStat; +#endif /* SK_DIAG || DEBUG */ + + pPrt = &pAC->GIni.GP[Port]; + + /* Auto-negotiation ? */ + if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) { + AutoNeg = SK_FALSE; + } + else { + AutoNeg = SK_TRUE; + } + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("InitPhyMarv: Port %d, auto-negotiation %s\n", + Port, AutoNeg ? "ON" : "OFF")); + +#ifdef VCPU + VCPUprintf(0, "SkGmInitPhyMarv(), Port=%u, DoLoop=%u\n", + Port, DoLoop); +#else /* VCPU */ + if (DoLoop) { + /* Set 'MAC Power up'-bit, set Manual MDI configuration */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, + PHY_M_PC_MAC_POW_UP); + } + else if (AutoNeg && pPrt->PLinkSpeed == SK_LSPEED_AUTO) { + /* Read Ext. PHY Specific Control */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_EXT_CTRL, &ExtPhyCtrl); + + ExtPhyCtrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | + PHY_M_EC_MAC_S_MSK); + + ExtPhyCtrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ) | + PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1); + + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_EXT_CTRL, ExtPhyCtrl); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Set Ext. PHY Ctrl=0x%04X\n", ExtPhyCtrl)); + } + + /* Read PHY Control */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &PhyCtrl); + + if (!AutoNeg) { + /* Disable Auto-negotiation */ + PhyCtrl &= ~PHY_CT_ANE; + } + + PhyCtrl |= PHY_CT_RESET; + /* Assert software reset */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, PhyCtrl); +#endif /* VCPU */ + + PhyCtrl = 0 /* PHY_CT_COL_TST */; + C1000BaseT = 0; + AutoNegAdv = PHY_SEL_TYPE; + + /* manually Master/Slave ? */ + if (pPrt->PMSMode != SK_MS_MODE_AUTO) { + /* enable Manual Master/Slave */ + C1000BaseT |= PHY_M_1000C_MSE; + + if (pPrt->PMSMode == SK_MS_MODE_MASTER) { + C1000BaseT |= PHY_M_1000C_MSC; /* set it to Master */ + } + } + + /* Auto-negotiation ? */ + if (!AutoNeg) { + + if (pPrt->PLinkMode == SK_LMODE_FULL) { + /* Set Full Duplex Mode */ + PhyCtrl |= PHY_CT_DUP_MD; + } + + /* Set Master/Slave manually if not already done */ + if (pPrt->PMSMode == SK_MS_MODE_AUTO) { + C1000BaseT |= PHY_M_1000C_MSE; /* set it to Slave */ + } + + /* Set Speed */ + switch (pPrt->PLinkSpeed) { + case SK_LSPEED_AUTO: + case SK_LSPEED_1000MBPS: + PhyCtrl |= PHY_CT_SP1000; + break; + case SK_LSPEED_100MBPS: + PhyCtrl |= PHY_CT_SP100; + break; + case SK_LSPEED_10MBPS: + break; + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E019, + SKERR_HWI_E019MSG); + } + + if (!DoLoop) { + PhyCtrl |= PHY_CT_RESET; + } + } + else { + /* Set Auto-negotiation advertisement */ + + if (pAC->GIni.GICopperType) { + /* Set Speed capabilities */ + switch (pPrt->PLinkSpeed) { + case SK_LSPEED_AUTO: + C1000BaseT |= PHY_M_1000C_AHD | PHY_M_1000C_AFD; + AutoNegAdv |= PHY_M_AN_100_FD | PHY_M_AN_100_HD | + PHY_M_AN_10_FD | PHY_M_AN_10_HD; + break; + case SK_LSPEED_1000MBPS: + C1000BaseT |= PHY_M_1000C_AHD | PHY_M_1000C_AFD; + break; + case SK_LSPEED_100MBPS: + AutoNegAdv |= PHY_M_AN_100_FD | PHY_M_AN_100_HD | + /* advertise 10Base-T also */ + PHY_M_AN_10_FD | PHY_M_AN_10_HD; + break; + case SK_LSPEED_10MBPS: + AutoNegAdv |= PHY_M_AN_10_FD | PHY_M_AN_10_HD; + break; + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E019, + SKERR_HWI_E019MSG); + } + + /* Set Full/half duplex capabilities */ + switch (pPrt->PLinkMode) { + case SK_LMODE_AUTOHALF: + C1000BaseT &= ~PHY_M_1000C_AFD; + AutoNegAdv &= ~(PHY_M_AN_100_FD | PHY_M_AN_10_FD); + break; + case SK_LMODE_AUTOFULL: + C1000BaseT &= ~PHY_M_1000C_AHD; + AutoNegAdv &= ~(PHY_M_AN_100_HD | PHY_M_AN_10_HD); + break; + case SK_LMODE_AUTOBOTH: + break; + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E015, + SKERR_HWI_E015MSG); + } + + /* Set Flow-control capabilities */ + switch (pPrt->PFlowCtrlMode) { + case SK_FLOW_MODE_NONE: + AutoNegAdv |= PHY_B_P_NO_PAUSE; + break; + case SK_FLOW_MODE_LOC_SEND: + AutoNegAdv |= PHY_B_P_ASYM_MD; + break; + case SK_FLOW_MODE_SYMMETRIC: + AutoNegAdv |= PHY_B_P_SYM_MD; + break; + case SK_FLOW_MODE_SYM_OR_REM: + AutoNegAdv |= PHY_B_P_BOTH_MD; + break; + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E016, + SKERR_HWI_E016MSG); + } + } + else { /* special defines for FIBER (88E1011S only) */ + + /* Set Full/half duplex capabilities */ + switch (pPrt->PLinkMode) { + case SK_LMODE_AUTOHALF: + AutoNegAdv |= PHY_M_AN_1000X_AHD; + break; + case SK_LMODE_AUTOFULL: + AutoNegAdv |= PHY_M_AN_1000X_AFD; + break; + case SK_LMODE_AUTOBOTH: + AutoNegAdv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD; + break; + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E015, + SKERR_HWI_E015MSG); + } + + /* Set Flow-control capabilities */ + switch (pPrt->PFlowCtrlMode) { + case SK_FLOW_MODE_NONE: + AutoNegAdv |= PHY_M_P_NO_PAUSE_X; + break; + case SK_FLOW_MODE_LOC_SEND: + AutoNegAdv |= PHY_M_P_ASYM_MD_X; + break; + case SK_FLOW_MODE_SYMMETRIC: + AutoNegAdv |= PHY_M_P_SYM_MD_X; + break; + case SK_FLOW_MODE_SYM_OR_REM: + AutoNegAdv |= PHY_M_P_BOTH_MD_X; + break; + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E016, + SKERR_HWI_E016MSG); + } + } + + if (!DoLoop) { + /* Restart Auto-negotiation */ + PhyCtrl |= PHY_CT_ANE | PHY_CT_RE_CFG; + } + } + +#ifdef VCPU + /* + * E-mail from Gu Lin (08-03-2002): + */ + + /* Program PHY register 30 as 16'h0708 for simulation speed up */ + SkGmPhyWrite(pAC, IoC, Port, 30, 0x0700 /* 0x0708 */); + + VCpuWait(2000); + +#else /* VCPU */ + + /* Write 1000Base-T Control Register */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_1000T_CTRL, C1000BaseT); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Set 1000B-T Ctrl =0x%04X\n", C1000BaseT)); + + /* Write AutoNeg Advertisement Register */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_AUNE_ADV, AutoNegAdv); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Set Auto-Neg.Adv.=0x%04X\n", AutoNegAdv)); +#endif /* VCPU */ + + if (DoLoop) { + /* Set the PHY Loopback bit */ + PhyCtrl |= PHY_CT_LOOP; + +#ifdef XXX + /* Program PHY register 16 as 16'h0400 to force link good */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, PHY_M_PC_FL_GOOD); +#endif /* XXX */ + +#ifndef VCPU + if (pPrt->PLinkSpeed != SK_LSPEED_AUTO) { + /* Write Ext. PHY Specific Control */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_EXT_CTRL, + (SK_U16)((pPrt->PLinkSpeed + 2) << 4)); + } +#endif /* VCPU */ + } +#ifdef TEST_ONLY + else if (pPrt->PLinkSpeed == SK_LSPEED_10MBPS) { + /* Write PHY Specific Control */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, + PHY_M_PC_EN_DET_MSK); + } +#endif + + /* Write to the PHY Control register */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, PhyCtrl); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Set PHY Ctrl Reg.=0x%04X\n", PhyCtrl)); + +#ifdef VCPU + VCpuWait(2000); +#else + + LedCtrl = PHY_M_LED_PULS_DUR(PULS_170MS) | PHY_M_LED_BLINK_RT(BLINK_84MS); + + if ((pAC->GIni.GILedBlinkCtrl & SK_ACT_LED_BLINK) != 0) { + LedCtrl |= PHY_M_LEDC_RX_CTRL | PHY_M_LEDC_TX_CTRL; + } + + if ((pAC->GIni.GILedBlinkCtrl & SK_DUP_LED_NORMAL) != 0) { + LedCtrl |= PHY_M_LEDC_DP_CTRL; + } + + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_LED_CTRL, LedCtrl); + + if ((pAC->GIni.GILedBlinkCtrl & SK_LED_LINK100_ON) != 0) { + /* only in forced 100 Mbps mode */ + if (!AutoNeg && pPrt->PLinkSpeed == SK_LSPEED_100MBPS) { + + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_LED_OVER, + PHY_M_LED_MO_100(MO_LED_ON)); + } + } + +#ifdef SK_DIAG + c_print("Set PHY Ctrl=0x%04X\n", PhyCtrl); + c_print("Set 1000 B-T=0x%04X\n", C1000BaseT); + c_print("Set Auto-Neg=0x%04X\n", AutoNegAdv); + c_print("Set Ext Ctrl=0x%04X\n", ExtPhyCtrl); +#endif /* SK_DIAG */ + +#if defined(SK_DIAG) || defined(DEBUG) + /* Read PHY Control */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &PhyCtrl); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("PHY Ctrl Reg.=0x%04X\n", PhyCtrl)); + + /* Read 1000Base-T Control Register */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_1000T_CTRL, &C1000BaseT); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("1000B-T Ctrl =0x%04X\n", C1000BaseT)); + + /* Read AutoNeg Advertisement Register */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_AUNE_ADV, &AutoNegAdv); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Auto-Neg.Adv.=0x%04X\n", AutoNegAdv)); + + /* Read Ext. PHY Specific Control */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_EXT_CTRL, &ExtPhyCtrl); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Ext. PHY Ctrl=0x%04X\n", ExtPhyCtrl)); + + /* Read PHY Status */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_STAT, &PhyStat); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("PHY Stat Reg.=0x%04X\n", PhyStat)); + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_STAT, &PhyStat1); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("PHY Stat Reg.=0x%04X\n", PhyStat1)); + + /* Read PHY Specific Status */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_STAT, &PhySpecStat); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("PHY Spec Stat=0x%04X\n", PhySpecStat)); +#endif /* SK_DIAG || DEBUG */ + +#ifdef SK_DIAG + c_print("PHY Ctrl Reg=0x%04X\n", PhyCtrl); + c_print("PHY 1000 Reg=0x%04X\n", C1000BaseT); + c_print("PHY AnAd Reg=0x%04X\n", AutoNegAdv); + c_print("Ext Ctrl Reg=0x%04X\n", ExtPhyCtrl); + c_print("PHY Stat Reg=0x%04X\n", PhyStat); + c_print("PHY Stat Reg=0x%04X\n", PhyStat1); + c_print("PHY Spec Reg=0x%04X\n", PhySpecStat); +#endif /* SK_DIAG */ + +#endif /* VCPU */ + +} /* SkGmInitPhyMarv */ +#endif /* YUKON */ + + +#ifdef OTHER_PHY +/****************************************************************************** + * + * SkXmInitPhyLone() - Initialize the Level One Phy registers + * + * Description: initializes all the Level One Phy registers + * + * Note: + * + * Returns: + * nothing + */ +static void SkXmInitPhyLone( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +SK_BOOL DoLoop) /* Should a Phy LoopBack be set-up? */ +{ + SK_GEPORT *pPrt; + SK_U16 Ctrl1; + SK_U16 Ctrl2; + SK_U16 Ctrl3; + + Ctrl1 = PHY_CT_SP1000; + Ctrl2 = 0; + Ctrl3 = PHY_SEL_TYPE; + + pPrt = &pAC->GIni.GP[Port]; + + /* manually Master/Slave ? */ + if (pPrt->PMSMode != SK_MS_MODE_AUTO) { + Ctrl2 |= PHY_L_1000C_MSE; + + if (pPrt->PMSMode == SK_MS_MODE_MASTER) { + Ctrl2 |= PHY_L_1000C_MSC; + } + } + /* Auto-negotiation ? */ + if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) { + /* + * level one spec say: "1000 Mbps: manual mode not allowed" + * but lets see what happens... + */ + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("InitPhyLone: no auto-negotiation Port %d\n", Port)); + /* Set DuplexMode in Config register */ + if (pPrt->PLinkMode == SK_LMODE_FULL) { + Ctrl1 |= PHY_CT_DUP_MD; + } + + /* Determine Master/Slave manually if not already done */ + if (pPrt->PMSMode == SK_MS_MODE_AUTO) { + Ctrl2 |= PHY_L_1000C_MSE; /* set it to Slave */ + } + + /* + * Do NOT enable Auto-negotiation here. This would hold + * the link down because no IDLES are transmitted + */ + } + else { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("InitPhyLone: with auto-negotiation Port %d\n", Port)); + /* Set Auto-negotiation advertisement */ + + /* Set Full/half duplex capabilities */ + switch (pPrt->PLinkMode) { + case SK_LMODE_AUTOHALF: + Ctrl2 |= PHY_L_1000C_AHD; + break; + case SK_LMODE_AUTOFULL: + Ctrl2 |= PHY_L_1000C_AFD; + break; + case SK_LMODE_AUTOBOTH: + Ctrl2 |= PHY_L_1000C_AFD | PHY_L_1000C_AHD; + break; + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E015, + SKERR_HWI_E015MSG); + } + + /* Set Flow-control capabilities */ + switch (pPrt->PFlowCtrlMode) { + case SK_FLOW_MODE_NONE: + Ctrl3 |= PHY_L_P_NO_PAUSE; + break; + case SK_FLOW_MODE_LOC_SEND: + Ctrl3 |= PHY_L_P_ASYM_MD; + break; + case SK_FLOW_MODE_SYMMETRIC: + Ctrl3 |= PHY_L_P_SYM_MD; + break; + case SK_FLOW_MODE_SYM_OR_REM: + Ctrl3 |= PHY_L_P_BOTH_MD; + break; + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E016, + SKERR_HWI_E016MSG); + } + + /* Restart Auto-negotiation */ + Ctrl1 = PHY_CT_ANE | PHY_CT_RE_CFG; + } + + /* Write 1000Base-T Control Register */ + SkXmPhyWrite(pAC, IoC, Port, PHY_LONE_1000T_CTRL, Ctrl2); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("1000B-T Ctrl Reg=0x%04X\n", Ctrl2)); + + /* Write AutoNeg Advertisement Register */ + SkXmPhyWrite(pAC, IoC, Port, PHY_LONE_AUNE_ADV, Ctrl3); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Auto-Neg.Adv.Reg=0x%04X\n", Ctrl3)); + + if (DoLoop) { + /* Set the Phy Loopback bit, too */ + Ctrl1 |= PHY_CT_LOOP; + } + + /* Write to the Phy control register */ + SkXmPhyWrite(pAC, IoC, Port, PHY_LONE_CTRL, Ctrl1); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("PHY Control Reg=0x%04X\n", Ctrl1)); +} /* SkXmInitPhyLone */ + + +/****************************************************************************** + * + * SkXmInitPhyNat() - Initialize the National Phy registers + * + * Description: initializes all the National Phy registers + * + * Note: + * + * Returns: + * nothing + */ +static void SkXmInitPhyNat( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +SK_BOOL DoLoop) /* Should a Phy LoopBack be set-up? */ +{ +/* todo: National */ +} /* SkXmInitPhyNat */ +#endif /* OTHER_PHY */ + + +/****************************************************************************** + * + * SkMacInitPhy() - Initialize the PHY registers + * + * Description: calls the Init PHY routines dep. on board type + * + * Note: + * + * Returns: + * nothing + */ +void SkMacInitPhy( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +SK_BOOL DoLoop) /* Should a Phy LoopBack be set-up? */ +{ + SK_GEPORT *pPrt; + + pPrt = &pAC->GIni.GP[Port]; + +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + + switch (pPrt->PhyType) { + case SK_PHY_XMAC: + SkXmInitPhyXmac(pAC, IoC, Port, DoLoop); + break; + case SK_PHY_BCOM: + SkXmInitPhyBcom(pAC, IoC, Port, DoLoop); + break; +#ifdef OTHER_PHY + case SK_PHY_LONE: + SkXmInitPhyLone(pAC, IoC, Port, DoLoop); + break; + case SK_PHY_NAT: + SkXmInitPhyNat(pAC, IoC, Port, DoLoop); + break; +#endif /* OTHER_PHY */ + } + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + + SkGmInitPhyMarv(pAC, IoC, Port, DoLoop); + } +#endif /* YUKON */ + +} /* SkMacInitPhy */ + + +#ifdef GENESIS +/****************************************************************************** + * + * SkXmAutoNegDoneXmac() - Auto-negotiation handling + * + * Description: + * This function handles the auto-negotiation if the Done bit is set. + * + * Returns: + * SK_AND_OK o.k. + * SK_AND_DUP_CAP Duplex capability error happened + * SK_AND_OTHER Other error happened + */ +static int SkXmAutoNegDoneXmac( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_GEPORT *pPrt; + SK_U16 ResAb; /* Resolved Ability */ + SK_U16 LPAb; /* Link Partner Ability */ + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNegDoneXmac, Port %d\n", Port)); + + pPrt = &pAC->GIni.GP[Port]; + + /* Get PHY parameters */ + SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_AUNE_LP, &LPAb); + SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_RES_ABI, &ResAb); + + if ((LPAb & PHY_X_AN_RFB) != 0) { + /* At least one of the remote fault bit is set */ + /* Error */ + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNegFail: Remote fault bit set Port %d\n", Port)); + pPrt->PAutoNegFail = SK_TRUE; + return(SK_AND_OTHER); + } + + /* Check Duplex mismatch */ + if ((ResAb & (PHY_X_RS_HD | PHY_X_RS_FD)) == PHY_X_RS_FD) { + pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_AUTOFULL; + } + else if ((ResAb & (PHY_X_RS_HD | PHY_X_RS_FD)) == PHY_X_RS_HD) { + pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_AUTOHALF; + } + else { + /* Error */ + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNegFail: Duplex mode mismatch Port %d\n", Port)); + pPrt->PAutoNegFail = SK_TRUE; + return(SK_AND_DUP_CAP); + } + + /* Check PAUSE mismatch */ + /* We are NOT using chapter 4.23 of the Xaqti manual */ + /* We are using IEEE 802.3z/D5.0 Table 37-4 */ + if ((pPrt->PFlowCtrlMode == SK_FLOW_MODE_SYMMETRIC || + pPrt->PFlowCtrlMode == SK_FLOW_MODE_SYM_OR_REM) && + (LPAb & PHY_X_P_SYM_MD) != 0) { + /* Symmetric PAUSE */ + pPrt->PFlowCtrlStatus = SK_FLOW_STAT_SYMMETRIC; + } + else if (pPrt->PFlowCtrlMode == SK_FLOW_MODE_SYM_OR_REM && + (LPAb & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD) { + /* Enable PAUSE receive, disable PAUSE transmit */ + pPrt->PFlowCtrlStatus = SK_FLOW_STAT_REM_SEND; + } + else if (pPrt->PFlowCtrlMode == SK_FLOW_MODE_LOC_SEND && + (LPAb & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD) { + /* Disable PAUSE receive, enable PAUSE transmit */ + pPrt->PFlowCtrlStatus = SK_FLOW_STAT_LOC_SEND; + } + else { + /* PAUSE mismatch -> no PAUSE */ + pPrt->PFlowCtrlStatus = SK_FLOW_STAT_NONE; + } + pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_1000MBPS; + + return(SK_AND_OK); +} /* SkXmAutoNegDoneXmac */ + + +/****************************************************************************** + * + * SkXmAutoNegDoneBcom() - Auto-negotiation handling + * + * Description: + * This function handles the auto-negotiation if the Done bit is set. + * + * Returns: + * SK_AND_OK o.k. + * SK_AND_DUP_CAP Duplex capability error happened + * SK_AND_OTHER Other error happened + */ +static int SkXmAutoNegDoneBcom( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_GEPORT *pPrt; + SK_U16 LPAb; /* Link Partner Ability */ + SK_U16 AuxStat; /* Auxiliary Status */ + +#ifdef TEST_ONLY +01-Sep-2000 RA;:;: + SK_U16 ResAb; /* Resolved Ability */ +#endif /* 0 */ + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNegDoneBcom, Port %d\n", Port)); + pPrt = &pAC->GIni.GP[Port]; + + /* Get PHY parameters */ + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_LP, &LPAb); +#ifdef TEST_ONLY +01-Sep-2000 RA;:;: + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_STAT, &ResAb); +#endif /* 0 */ + + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_STAT, &AuxStat); + + if ((LPAb & PHY_B_AN_RF) != 0) { + /* Remote fault bit is set: Error */ + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNegFail: Remote fault bit set Port %d\n", Port)); + pPrt->PAutoNegFail = SK_TRUE; + return(SK_AND_OTHER); + } + + /* Check Duplex mismatch */ + if ((AuxStat & PHY_B_AS_AN_RES_MSK) == PHY_B_RES_1000FD) { + pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_AUTOFULL; + } + else if ((AuxStat & PHY_B_AS_AN_RES_MSK) == PHY_B_RES_1000HD) { + pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_AUTOHALF; + } + else { + /* Error */ + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNegFail: Duplex mode mismatch Port %d\n", Port)); + pPrt->PAutoNegFail = SK_TRUE; + return(SK_AND_DUP_CAP); + } + +#ifdef TEST_ONLY +01-Sep-2000 RA;:;: + /* Check Master/Slave resolution */ + if ((ResAb & PHY_B_1000S_MSF) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Master/Slave Fault Port %d\n", Port)); + pPrt->PAutoNegFail = SK_TRUE; + pPrt->PMSStatus = SK_MS_STAT_FAULT; + return(SK_AND_OTHER); + } + + pPrt->PMSStatus = ((ResAb & PHY_B_1000S_MSR) != 0) ? + SK_MS_STAT_MASTER : SK_MS_STAT_SLAVE; +#endif /* 0 */ + + /* Check PAUSE mismatch ??? */ + /* We are using IEEE 802.3z/D5.0 Table 37-4 */ + if ((AuxStat & PHY_B_AS_PAUSE_MSK) == PHY_B_AS_PAUSE_MSK) { + /* Symmetric PAUSE */ + pPrt->PFlowCtrlStatus = SK_FLOW_STAT_SYMMETRIC; + } + else if ((AuxStat & PHY_B_AS_PAUSE_MSK) == PHY_B_AS_PRR) { + /* Enable PAUSE receive, disable PAUSE transmit */ + pPrt->PFlowCtrlStatus = SK_FLOW_STAT_REM_SEND; + } + else if ((AuxStat & PHY_B_AS_PAUSE_MSK) == PHY_B_AS_PRT) { + /* Disable PAUSE receive, enable PAUSE transmit */ + pPrt->PFlowCtrlStatus = SK_FLOW_STAT_LOC_SEND; + } + else { + /* PAUSE mismatch -> no PAUSE */ + pPrt->PFlowCtrlStatus = SK_FLOW_STAT_NONE; + } + pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_1000MBPS; + + return(SK_AND_OK); +} /* SkXmAutoNegDoneBcom */ +#endif /* GENESIS */ + + +#ifdef YUKON +/****************************************************************************** + * + * SkGmAutoNegDoneMarv() - Auto-negotiation handling + * + * Description: + * This function handles the auto-negotiation if the Done bit is set. + * + * Returns: + * SK_AND_OK o.k. + * SK_AND_DUP_CAP Duplex capability error happened + * SK_AND_OTHER Other error happened + */ +static int SkGmAutoNegDoneMarv( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_GEPORT *pPrt; + SK_U16 LPAb; /* Link Partner Ability */ + SK_U16 ResAb; /* Resolved Ability */ + SK_U16 AuxStat; /* Auxiliary Status */ + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNegDoneMarv, Port %d\n", Port)); + pPrt = &pAC->GIni.GP[Port]; + + /* Get PHY parameters */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_AUNE_LP, &LPAb); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Link P.Abil.=0x%04X\n", LPAb)); + + if ((LPAb & PHY_M_AN_RF) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNegFail: Remote fault bit set Port %d\n", Port)); + pPrt->PAutoNegFail = SK_TRUE; + return(SK_AND_OTHER); + } + + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_1000T_STAT, &ResAb); + + /* Check Master/Slave resolution */ + if ((ResAb & PHY_B_1000S_MSF) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Master/Slave Fault Port %d\n", Port)); + pPrt->PAutoNegFail = SK_TRUE; + pPrt->PMSStatus = SK_MS_STAT_FAULT; + return(SK_AND_OTHER); + } + + pPrt->PMSStatus = ((ResAb & PHY_B_1000S_MSR) != 0) ? + (SK_U8)SK_MS_STAT_MASTER : (SK_U8)SK_MS_STAT_SLAVE; + + /* Read PHY Specific Status */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_STAT, &AuxStat); + + /* Check Speed & Duplex resolved */ + if ((AuxStat & PHY_M_PS_SPDUP_RES) == 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNegFail: Speed & Duplex not resolved, Port %d\n", Port)); + pPrt->PAutoNegFail = SK_TRUE; + pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_UNKNOWN; + return(SK_AND_DUP_CAP); + } + + if ((AuxStat & PHY_M_PS_FULL_DUP) != 0) { + pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_AUTOFULL; + } + else { + pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_AUTOHALF; + } + + /* Check PAUSE mismatch ??? */ + /* We are using IEEE 802.3z/D5.0 Table 37-4 */ + if ((AuxStat & PHY_M_PS_PAUSE_MSK) == PHY_M_PS_PAUSE_MSK) { + /* Symmetric PAUSE */ + pPrt->PFlowCtrlStatus = SK_FLOW_STAT_SYMMETRIC; + } + else if ((AuxStat & PHY_M_PS_PAUSE_MSK) == PHY_M_PS_RX_P_EN) { + /* Enable PAUSE receive, disable PAUSE transmit */ + pPrt->PFlowCtrlStatus = SK_FLOW_STAT_REM_SEND; + } + else if ((AuxStat & PHY_M_PS_PAUSE_MSK) == PHY_M_PS_TX_P_EN) { + /* Disable PAUSE receive, enable PAUSE transmit */ + pPrt->PFlowCtrlStatus = SK_FLOW_STAT_LOC_SEND; + } + else { + /* PAUSE mismatch -> no PAUSE */ + pPrt->PFlowCtrlStatus = SK_FLOW_STAT_NONE; + } + + /* set used link speed */ + switch ((unsigned)(AuxStat & PHY_M_PS_SPEED_MSK)) { + case (unsigned)PHY_M_PS_SPEED_1000: + pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_1000MBPS; + break; + case PHY_M_PS_SPEED_100: + pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_100MBPS; + break; + default: + pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_10MBPS; + } + + return(SK_AND_OK); +} /* SkGmAutoNegDoneMarv */ +#endif /* YUKON */ + + +#ifdef OTHER_PHY +/****************************************************************************** + * + * SkXmAutoNegDoneLone() - Auto-negotiation handling + * + * Description: + * This function handles the auto-negotiation if the Done bit is set. + * + * Returns: + * SK_AND_OK o.k. + * SK_AND_DUP_CAP Duplex capability error happened + * SK_AND_OTHER Other error happened + */ +static int SkXmAutoNegDoneLone( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_GEPORT *pPrt; + SK_U16 ResAb; /* Resolved Ability */ + SK_U16 LPAb; /* Link Partner Ability */ + SK_U16 QuickStat; /* Auxiliary Status */ + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNegDoneLone, Port %d\n", Port)); + pPrt = &pAC->GIni.GP[Port]; + + /* Get PHY parameters */ + SkXmPhyRead(pAC, IoC, Port, PHY_LONE_AUNE_LP, &LPAb); + SkXmPhyRead(pAC, IoC, Port, PHY_LONE_1000T_STAT, &ResAb); + SkXmPhyRead(pAC, IoC, Port, PHY_LONE_Q_STAT, &QuickStat); + + if ((LPAb & PHY_L_AN_RF) != 0) { + /* Remote fault bit is set */ + /* Error */ + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNegFail: Remote fault bit set Port %d\n", Port)); + pPrt->PAutoNegFail = SK_TRUE; + return(SK_AND_OTHER); + } + + /* Check Duplex mismatch */ + if ((QuickStat & PHY_L_QS_DUP_MOD) != 0) { + pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_AUTOFULL; + } + else { + pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_AUTOHALF; + } + + /* Check Master/Slave resolution */ + if ((ResAb & PHY_L_1000S_MSF) != 0) { + /* Error */ + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Master/Slave Fault Port %d\n", Port)); + pPrt->PAutoNegFail = SK_TRUE; + pPrt->PMSStatus = SK_MS_STAT_FAULT; + return(SK_AND_OTHER); + } + else if (ResAb & PHY_L_1000S_MSR) { + pPrt->PMSStatus = SK_MS_STAT_MASTER; + } + else { + pPrt->PMSStatus = SK_MS_STAT_SLAVE; + } + + /* Check PAUSE mismatch */ + /* We are using IEEE 802.3z/D5.0 Table 37-4 */ + /* we must manually resolve the abilities here */ + pPrt->PFlowCtrlStatus = SK_FLOW_STAT_NONE; + + switch (pPrt->PFlowCtrlMode) { + case SK_FLOW_MODE_NONE: + /* default */ + break; + case SK_FLOW_MODE_LOC_SEND: + if ((QuickStat & (PHY_L_QS_PAUSE | PHY_L_QS_AS_PAUSE)) == + (PHY_L_QS_PAUSE | PHY_L_QS_AS_PAUSE)) { + /* Disable PAUSE receive, enable PAUSE transmit */ + pPrt->PFlowCtrlStatus = SK_FLOW_STAT_LOC_SEND; + } + break; + case SK_FLOW_MODE_SYMMETRIC: + if ((QuickStat & PHY_L_QS_PAUSE) != 0) { + /* Symmetric PAUSE */ + pPrt->PFlowCtrlStatus = SK_FLOW_STAT_SYMMETRIC; + } + break; + case SK_FLOW_MODE_SYM_OR_REM: + if ((QuickStat & (PHY_L_QS_PAUSE | PHY_L_QS_AS_PAUSE)) == + PHY_L_QS_AS_PAUSE) { + /* Enable PAUSE receive, disable PAUSE transmit */ + pPrt->PFlowCtrlStatus = SK_FLOW_STAT_REM_SEND; + } + else if ((QuickStat & PHY_L_QS_PAUSE) != 0) { + /* Symmetric PAUSE */ + pPrt->PFlowCtrlStatus = SK_FLOW_STAT_SYMMETRIC; + } + break; + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E016, + SKERR_HWI_E016MSG); + } + + return(SK_AND_OK); +} /* SkXmAutoNegDoneLone */ + + +/****************************************************************************** + * + * SkXmAutoNegDoneNat() - Auto-negotiation handling + * + * Description: + * This function handles the auto-negotiation if the Done bit is set. + * + * Returns: + * SK_AND_OK o.k. + * SK_AND_DUP_CAP Duplex capability error happened + * SK_AND_OTHER Other error happened + */ +static int SkXmAutoNegDoneNat( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ +/* todo: National */ + return(SK_AND_OK); +} /* SkXmAutoNegDoneNat */ +#endif /* OTHER_PHY */ + + +/****************************************************************************** + * + * SkMacAutoNegDone() - Auto-negotiation handling + * + * Description: calls the auto-negotiation done routines dep. on board type + * + * Returns: + * SK_AND_OK o.k. + * SK_AND_DUP_CAP Duplex capability error happened + * SK_AND_OTHER Other error happened + */ +int SkMacAutoNegDone( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_GEPORT *pPrt; + int Rtv; + + Rtv = SK_AND_OK; + + pPrt = &pAC->GIni.GP[Port]; + +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + + switch (pPrt->PhyType) { + + case SK_PHY_XMAC: + Rtv = SkXmAutoNegDoneXmac(pAC, IoC, Port); + break; + case SK_PHY_BCOM: + Rtv = SkXmAutoNegDoneBcom(pAC, IoC, Port); + break; +#ifdef OTHER_PHY + case SK_PHY_LONE: + Rtv = SkXmAutoNegDoneLone(pAC, IoC, Port); + break; + case SK_PHY_NAT: + Rtv = SkXmAutoNegDoneNat(pAC, IoC, Port); + break; +#endif /* OTHER_PHY */ + default: + return(SK_AND_OTHER); + } + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + + Rtv = SkGmAutoNegDoneMarv(pAC, IoC, Port); + } +#endif /* YUKON */ + + if (Rtv != SK_AND_OK) { + return(Rtv); + } + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNeg done Port %d\n", Port)); + + /* We checked everything and may now enable the link */ + pPrt->PAutoNegFail = SK_FALSE; + + SkMacRxTxEnable(pAC, IoC, Port); + + return(SK_AND_OK); +} /* SkMacAutoNegDone */ + + +/****************************************************************************** + * + * SkMacRxTxEnable() - Enable Rx/Tx activity if port is up + * + * Description: enables Rx/Tx dep. on board type + * + * Returns: + * 0 o.k. + * != 0 Error happened + */ +int SkMacRxTxEnable( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_GEPORT *pPrt; + SK_U16 Reg; /* 16-bit register value */ + SK_U16 IntMask; /* MAC interrupt mask */ +#ifdef GENESIS + SK_U16 SWord; +#endif + + pPrt = &pAC->GIni.GP[Port]; + + if (!pPrt->PHWLinkUp) { + /* The Hardware link is NOT up */ + return(0); + } + + if ((pPrt->PLinkMode == SK_LMODE_AUTOHALF || + pPrt->PLinkMode == SK_LMODE_AUTOFULL || + pPrt->PLinkMode == SK_LMODE_AUTOBOTH) && + pPrt->PAutoNegFail) { + /* Auto-negotiation is not done or failed */ + return(0); + } + +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + /* set Duplex Mode and Pause Mode */ + SkXmInitDupMd(pAC, IoC, Port); + + SkXmInitPauseMd(pAC, IoC, Port); + + /* + * Initialize the Interrupt Mask Register. Default IRQs are... + * - Link Asynchronous Event + * - Link Partner requests config + * - Auto Negotiation Done + * - Rx Counter Event Overflow + * - Tx Counter Event Overflow + * - Transmit FIFO Underrun + */ + IntMask = XM_DEF_MSK; + +#ifdef DEBUG + /* add IRQ for Receive FIFO Overflow */ + IntMask &= ~XM_IS_RXF_OV; +#endif /* DEBUG */ + + if (pPrt->PhyType != SK_PHY_XMAC) { + /* disable GP0 interrupt bit */ + IntMask |= XM_IS_INP_ASS; + } + XM_OUT16(IoC, Port, XM_IMSK, IntMask); + + /* get MMU Command Reg. */ + XM_IN16(IoC, Port, XM_MMU_CMD, &Reg); + + if (pPrt->PhyType != SK_PHY_XMAC && + (pPrt->PLinkModeStatus == SK_LMODE_STAT_FULL || + pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOFULL)) { + /* set to Full Duplex */ + Reg |= XM_MMU_GMII_FD; + } + + switch (pPrt->PhyType) { + case SK_PHY_BCOM: + /* + * Workaround BCOM Errata (#10523) for all BCom Phys + * Enable Power Management after link up + */ + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, &SWord); + SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, + (SK_U16)(SWord & ~PHY_B_AC_DIS_PM)); + SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_INT_MASK, + (SK_U16)PHY_B_DEF_MSK); + break; +#ifdef OTHER_PHY + case SK_PHY_LONE: + SkXmPhyWrite(pAC, IoC, Port, PHY_LONE_INT_ENAB, PHY_L_DEF_MSK); + break; + case SK_PHY_NAT: + /* todo National: + SkXmPhyWrite(pAC, IoC, Port, PHY_NAT_INT_MASK, PHY_N_DEF_MSK); */ + /* no interrupts possible from National ??? */ + break; +#endif /* OTHER_PHY */ + } + + /* enable Rx/Tx */ + XM_OUT16(IoC, Port, XM_MMU_CMD, Reg | XM_MMU_ENA_RX | XM_MMU_ENA_TX); + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + /* + * Initialize the Interrupt Mask Register. Default IRQs are... + * - Rx Counter Event Overflow + * - Tx Counter Event Overflow + * - Transmit FIFO Underrun + */ + IntMask = GMAC_DEF_MSK; + +#ifdef DEBUG + /* add IRQ for Receive FIFO Overrun */ + IntMask |= GM_IS_RX_FF_OR; +#endif /* DEBUG */ + + SK_OUT8(IoC, GMAC_IRQ_MSK, (SK_U8)IntMask); + + /* get General Purpose Control */ + GM_IN16(IoC, Port, GM_GP_CTRL, &Reg); + + if (pPrt->PLinkModeStatus == SK_LMODE_STAT_FULL || + pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOFULL) { + /* set to Full Duplex */ + Reg |= GM_GPCR_DUP_FULL; + } + + /* enable Rx/Tx */ + GM_OUT16(IoC, Port, GM_GP_CTRL, (SK_U16)(Reg | GM_GPCR_RX_ENA | + GM_GPCR_TX_ENA)); + +#ifndef VCPU + /* Enable all PHY interrupts */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_INT_MASK, + (SK_U16)PHY_M_DEF_MSK); +#endif /* VCPU */ + } +#endif /* YUKON */ + + return(0); + +} /* SkMacRxTxEnable */ + + +/****************************************************************************** + * + * SkMacRxTxDisable() - Disable Receiver and Transmitter + * + * Description: disables Rx/Tx dep. on board type + * + * Returns: N/A + */ +void SkMacRxTxDisable( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_U16 Word; + +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + + XM_IN16(IoC, Port, XM_MMU_CMD, &Word); + + XM_OUT16(IoC, Port, XM_MMU_CMD, Word & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX)); + + /* dummy read to ensure writing */ + XM_IN16(IoC, Port, XM_MMU_CMD, &Word); + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + + GM_IN16(IoC, Port, GM_GP_CTRL, &Word); + + GM_OUT16(IoC, Port, GM_GP_CTRL, (SK_U16)(Word & ~(GM_GPCR_RX_ENA | + GM_GPCR_TX_ENA))); + + /* dummy read to ensure writing */ + GM_IN16(IoC, Port, GM_GP_CTRL, &Word); + } +#endif /* YUKON */ + +} /* SkMacRxTxDisable */ + + +/****************************************************************************** + * + * SkMacIrqDisable() - Disable IRQ from MAC + * + * Description: sets the IRQ-mask to disable IRQ dep. on board type + * + * Returns: N/A + */ +void SkMacIrqDisable( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_GEPORT *pPrt; +#ifdef GENESIS + SK_U16 Word; +#endif + + pPrt = &pAC->GIni.GP[Port]; + +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + + /* disable all XMAC IRQs */ + XM_OUT16(IoC, Port, XM_IMSK, 0xffff); + + /* Disable all PHY interrupts */ + switch (pPrt->PhyType) { + case SK_PHY_BCOM: + /* Make sure that PHY is initialized */ + if (pPrt->PState != SK_PRT_RESET) { + /* NOT allowed if BCOM is in RESET state */ + /* Workaround BCOM Errata (#10523) all BCom */ + /* Disable Power Management if link is down */ + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, &Word); + SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, + (SK_U16)(Word | PHY_B_AC_DIS_PM)); + SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_INT_MASK, 0xffff); + } + break; +#ifdef OTHER_PHY + case SK_PHY_LONE: + SkXmPhyWrite(pAC, IoC, Port, PHY_LONE_INT_ENAB, 0); + break; + case SK_PHY_NAT: + /* todo: National + SkXmPhyWrite(pAC, IoC, Port, PHY_NAT_INT_MASK, 0xffff); */ + break; +#endif /* OTHER_PHY */ + } + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + /* disable all GMAC IRQs */ + SK_OUT8(IoC, GMAC_IRQ_MSK, 0); + +#ifndef VCPU + /* Disable all PHY interrupts */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_INT_MASK, 0); +#endif /* VCPU */ + } +#endif /* YUKON */ + +} /* SkMacIrqDisable */ + + +#ifdef SK_DIAG +/****************************************************************************** + * + * SkXmSendCont() - Enable / Disable Send Continuous Mode + * + * Description: enable / disable Send Continuous Mode on XMAC + * + * Returns: + * nothing + */ +void SkXmSendCont( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +SK_BOOL Enable) /* Enable / Disable */ +{ + SK_U32 MdReg; + + XM_IN32(IoC, Port, XM_MODE, &MdReg); + + if (Enable) { + MdReg |= XM_MD_TX_CONT; + } + else { + MdReg &= ~XM_MD_TX_CONT; + } + /* setup Mode Register */ + XM_OUT32(IoC, Port, XM_MODE, MdReg); + +} /* SkXmSendCont */ + + +/****************************************************************************** + * + * SkMacTimeStamp() - Enable / Disable Time Stamp + * + * Description: enable / disable Time Stamp generation for Rx packets + * + * Returns: + * nothing + */ +void SkMacTimeStamp( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +SK_BOOL Enable) /* Enable / Disable */ +{ + SK_U32 MdReg; + SK_U8 TimeCtrl; + + if (pAC->GIni.GIGenesis) { + + XM_IN32(IoC, Port, XM_MODE, &MdReg); + + if (Enable) { + MdReg |= XM_MD_ATS; + } + else { + MdReg &= ~XM_MD_ATS; + } + /* setup Mode Register */ + XM_OUT32(IoC, Port, XM_MODE, MdReg); + } + else { + if (Enable) { + TimeCtrl = GMT_ST_START | GMT_ST_CLR_IRQ; + } + else { + TimeCtrl = GMT_ST_STOP | GMT_ST_CLR_IRQ; + } + /* Start/Stop Time Stamp Timer */ + SK_OUT8(IoC, GMAC_TI_ST_CTRL, TimeCtrl); + } + +} /* SkMacTimeStamp*/ + +#else /* !SK_DIAG */ + +#ifdef GENESIS +/****************************************************************************** + * + * SkXmAutoNegLipaXmac() - Decides whether Link Partner could do auto-neg + * + * This function analyses the Interrupt status word. If any of the + * Auto-negotiating interrupt bits are set, the PLipaAutoNeg variable + * is set true. + */ +void SkXmAutoNegLipaXmac( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +SK_U16 IStatus) /* Interrupt Status word to analyse */ +{ + SK_GEPORT *pPrt; + + pPrt = &pAC->GIni.GP[Port]; + + if (pPrt->PLipaAutoNeg != SK_LIPA_AUTO && + (IStatus & (XM_IS_LIPA_RC | XM_IS_RX_PAGE | XM_IS_AND)) != 0) { + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNegLipa: AutoNeg detected on Port %d, IStatus=0x%04X\n", + Port, IStatus)); + pPrt->PLipaAutoNeg = SK_LIPA_AUTO; + } +} /* SkXmAutoNegLipaXmac */ +#endif /* GENESIS */ + + +/****************************************************************************** + * + * SkMacAutoNegLipaPhy() - Decides whether Link Partner could do auto-neg + * + * This function analyses the PHY status word. + * If any of the Auto-negotiating bits are set, the PLipaAutoNeg variable + * is set true. + */ +void SkMacAutoNegLipaPhy( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +SK_U16 PhyStat) /* PHY Status word to analyse */ +{ + SK_GEPORT *pPrt; + + pPrt = &pAC->GIni.GP[Port]; + + if (pPrt->PLipaAutoNeg != SK_LIPA_AUTO && + (PhyStat & PHY_ST_AN_OVER) != 0) { + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNegLipa: AutoNeg detected on Port %d, PhyStat=0x%04X\n", + Port, PhyStat)); + pPrt->PLipaAutoNeg = SK_LIPA_AUTO; + } +} /* SkMacAutoNegLipaPhy */ + + +#ifdef GENESIS +/****************************************************************************** + * + * SkXmIrq() - Interrupt Service Routine + * + * Description: services an Interrupt Request of the XMAC + * + * Note: + * With an external PHY, some interrupt bits are not meaningfull any more: + * - LinkAsyncEvent (bit #14) XM_IS_LNK_AE + * - LinkPartnerReqConfig (bit #10) XM_IS_LIPA_RC + * - Page Received (bit #9) XM_IS_RX_PAGE + * - NextPageLoadedForXmt (bit #8) XM_IS_TX_PAGE + * - AutoNegDone (bit #7) XM_IS_AND + * Also probably not valid any more is the GP0 input bit: + * - GPRegisterBit0set XM_IS_INP_ASS + * + * Returns: + * nothing + */ +static void SkXmIrq( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_GEPORT *pPrt; + SK_EVPARA Para; + SK_U16 IStatus; /* Interrupt status read from the XMAC */ + SK_U16 IStatus2; +#ifdef SK_SLIM + SK_U64 OverflowStatus; +#endif + + pPrt = &pAC->GIni.GP[Port]; + + XM_IN16(IoC, Port, XM_ISRC, &IStatus); + + /* LinkPartner Auto-negable? */ + if (pPrt->PhyType == SK_PHY_XMAC) { + SkXmAutoNegLipaXmac(pAC, IoC, Port, IStatus); + } + else { + /* mask bits that are not used with ext. PHY */ + IStatus &= ~(XM_IS_LNK_AE | XM_IS_LIPA_RC | + XM_IS_RX_PAGE | XM_IS_TX_PAGE | + XM_IS_AND | XM_IS_INP_ASS); + } + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("XmacIrq Port %d Isr 0x%04X\n", Port, IStatus)); + + if (!pPrt->PHWLinkUp) { + /* Spurious XMAC interrupt */ + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("SkXmIrq: spurious interrupt on Port %d\n", Port)); + return; + } + + if ((IStatus & XM_IS_INP_ASS) != 0) { + /* Reread ISR Register if link is not in sync */ + XM_IN16(IoC, Port, XM_ISRC, &IStatus2); + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("SkXmIrq: Link async. Double check Port %d 0x%04X 0x%04X\n", + Port, IStatus, IStatus2)); + IStatus &= ~XM_IS_INP_ASS; + IStatus |= IStatus2; + } + + if ((IStatus & XM_IS_LNK_AE) != 0) { + /* not used, GP0 is used instead */ + } + + if ((IStatus & XM_IS_TX_ABORT) != 0) { + /* not used */ + } + + if ((IStatus & XM_IS_FRC_INT) != 0) { + /* not used, use ASIC IRQ instead if needed */ + } + + if ((IStatus & (XM_IS_INP_ASS | XM_IS_LIPA_RC | XM_IS_RX_PAGE)) != 0) { + SkHWLinkDown(pAC, IoC, Port); + + /* Signal to RLMT */ + Para.Para32[0] = (SK_U32)Port; + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); + + /* Start workaround Errata #2 timer */ + SkTimerStart(pAC, IoC, &pPrt->PWaTimer, SK_WA_INA_TIME, + SKGE_HWAC, SK_HWEV_WATIM, Para); + } + + if ((IStatus & XM_IS_RX_PAGE) != 0) { + /* not used */ + } + + if ((IStatus & XM_IS_TX_PAGE) != 0) { + /* not used */ + } + + if ((IStatus & XM_IS_AND) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("SkXmIrq: AND on link that is up Port %d\n", Port)); + } + + if ((IStatus & XM_IS_TSC_OV) != 0) { + /* not used */ + } + + /* Combined Tx & Rx Counter Overflow SIRQ Event */ + if ((IStatus & (XM_IS_RXC_OV | XM_IS_TXC_OV)) != 0) { +#ifdef SK_SLIM + SkXmOverflowStatus(pAC, IoC, Port, IStatus, &OverflowStatus); +#else + Para.Para32[0] = (SK_U32)Port; + Para.Para32[1] = (SK_U32)IStatus; + SkPnmiEvent(pAC, IoC, SK_PNMI_EVT_SIRQ_OVERFLOW, Para); +#endif /* SK_SLIM */ + } + + if ((IStatus & XM_IS_RXF_OV) != 0) { + /* normal situation -> no effect */ +#ifdef DEBUG + pPrt->PRxOverCnt++; +#endif /* DEBUG */ + } + + if ((IStatus & XM_IS_TXF_UR) != 0) { + /* may NOT happen -> error log */ + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E020, SKERR_SIRQ_E020MSG); + } + + if ((IStatus & XM_IS_TX_COMP) != 0) { + /* not served here */ + } + + if ((IStatus & XM_IS_RX_COMP) != 0) { + /* not served here */ + } +} /* SkXmIrq */ +#endif /* GENESIS */ + + +#ifdef YUKON +/****************************************************************************** + * + * SkGmIrq() - Interrupt Service Routine + * + * Description: services an Interrupt Request of the GMAC + * + * Note: + * + * Returns: + * nothing + */ +static void SkGmIrq( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_GEPORT *pPrt; + SK_U8 IStatus; /* Interrupt status */ +#ifdef SK_SLIM + SK_U64 OverflowStatus; +#else + SK_EVPARA Para; +#endif + + pPrt = &pAC->GIni.GP[Port]; + + SK_IN8(IoC, GMAC_IRQ_SRC, &IStatus); + +#ifdef XXX + /* LinkPartner Auto-negable? */ + SkMacAutoNegLipaPhy(pAC, IoC, Port, IStatus); +#endif /* XXX */ + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("GmacIrq Port %d Isr 0x%04X\n", Port, IStatus)); + + /* Combined Tx & Rx Counter Overflow SIRQ Event */ + if (IStatus & (GM_IS_RX_CO_OV | GM_IS_TX_CO_OV)) { + /* these IRQs will be cleared by reading GMACs register */ +#ifdef SK_SLIM + SkGmOverflowStatus(pAC, IoC, Port, IStatus, &OverflowStatus); +#else + Para.Para32[0] = (SK_U32)Port; + Para.Para32[1] = (SK_U32)IStatus; + SkPnmiEvent(pAC, IoC, SK_PNMI_EVT_SIRQ_OVERFLOW, Para); +#endif + } + + if (IStatus & GM_IS_RX_FF_OR) { + /* clear GMAC Rx FIFO Overrun IRQ */ + SK_OUT8(IoC, MR_ADDR(Port, RX_GMF_CTRL_T), (SK_U8)GMF_CLI_RX_FO); +#ifdef DEBUG + pPrt->PRxOverCnt++; +#endif /* DEBUG */ + } + + if (IStatus & GM_IS_TX_FF_UR) { + /* clear GMAC Tx FIFO Underrun IRQ */ + SK_OUT8(IoC, MR_ADDR(Port, TX_GMF_CTRL_T), (SK_U8)GMF_CLI_TX_FU); + /* may NOT happen -> error log */ + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E020, SKERR_SIRQ_E020MSG); + } + + if (IStatus & GM_IS_TX_COMPL) { + /* not served here */ + } + + if (IStatus & GM_IS_RX_COMPL) { + /* not served here */ + } +} /* SkGmIrq */ +#endif /* YUKON */ + + +/****************************************************************************** + * + * SkMacIrq() - Interrupt Service Routine for MAC + * + * Description: calls the Interrupt Service Routine dep. on board type + * + * Returns: + * nothing + */ +void SkMacIrq( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ +#ifdef GENESIS + if (pAC->GIni.GIGenesis) { + /* IRQ from XMAC */ + SkXmIrq(pAC, IoC, Port); + } +#endif /* GENESIS */ + +#ifdef YUKON + if (pAC->GIni.GIYukon) { + /* IRQ from GMAC */ + SkGmIrq(pAC, IoC, Port); + } +#endif /* YUKON */ + +} /* SkMacIrq */ + +#endif /* !SK_DIAG */ + +#ifdef GENESIS +/****************************************************************************** + * + * SkXmUpdateStats() - Force the XMAC to output the current statistic + * + * Description: + * The XMAC holds its statistic internally. To obtain the current + * values a command must be sent so that the statistic data will + * be written to a predefined memory area on the adapter. + * + * Returns: + * 0: success + * 1: something went wrong + */ +int SkXmUpdateStats( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +unsigned int Port) /* Port Index (MAC_1 + n) */ +{ + SK_GEPORT *pPrt; + SK_U16 StatReg; + int WaitIndex; + + pPrt = &pAC->GIni.GP[Port]; + WaitIndex = 0; + + /* Send an update command to XMAC specified */ + XM_OUT16(IoC, Port, XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC); + + /* + * It is an auto-clearing register. If the command bits + * went to zero again, the statistics are transferred. + * Normally the command should be executed immediately. + * But just to be sure we execute a loop. + */ + do { + + XM_IN16(IoC, Port, XM_STAT_CMD, &StatReg); + + if (++WaitIndex > 10) { + + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_HWI_E021, SKERR_HWI_E021MSG); + + return(1); + } + } while ((StatReg & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) != 0); + + return(0); +} /* SkXmUpdateStats */ + + +/****************************************************************************** + * + * SkXmMacStatistic() - Get XMAC counter value + * + * Description: + * Gets the 32bit counter value. Except for the octet counters + * the lower 32bit are counted in hardware and the upper 32bit + * must be counted in software by monitoring counter overflow interrupts. + * + * Returns: + * 0: success + * 1: something went wrong + */ +int SkXmMacStatistic( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +unsigned int Port, /* Port Index (MAC_1 + n) */ +SK_U16 StatAddr, /* MIB counter base address */ +SK_U32 SK_FAR *pVal) /* ptr to return statistic value */ +{ + if ((StatAddr < XM_TXF_OK) || (StatAddr > XM_RXF_MAX_SZ)) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E022, SKERR_HWI_E022MSG); + + return(1); + } + + XM_IN32(IoC, Port, StatAddr, pVal); + + return(0); +} /* SkXmMacStatistic */ + + +/****************************************************************************** + * + * SkXmResetCounter() - Clear MAC statistic counter + * + * Description: + * Force the XMAC to clear its statistic counter. + * + * Returns: + * 0: success + * 1: something went wrong + */ +int SkXmResetCounter( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +unsigned int Port) /* Port Index (MAC_1 + n) */ +{ + XM_OUT16(IoC, Port, XM_STAT_CMD, XM_SC_CLR_RXC | XM_SC_CLR_TXC); + /* Clear two times according to Errata #3 */ + XM_OUT16(IoC, Port, XM_STAT_CMD, XM_SC_CLR_RXC | XM_SC_CLR_TXC); + + return(0); +} /* SkXmResetCounter */ + + +/****************************************************************************** + * + * SkXmOverflowStatus() - Gets the status of counter overflow interrupt + * + * Description: + * Checks the source causing an counter overflow interrupt. On success the + * resulting counter overflow status is written to <pStatus>, whereas the + * upper dword stores the XMAC ReceiveCounterEvent register and the lower + * dword the XMAC TransmitCounterEvent register. + * + * Note: + * For XMAC the interrupt source is a self-clearing register, so the source + * must be checked only once. SIRQ module does another check to be sure + * that no interrupt get lost during process time. + * + * Returns: + * 0: success + * 1: something went wrong + */ +int SkXmOverflowStatus( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +unsigned int Port, /* Port Index (MAC_1 + n) */ +SK_U16 IStatus, /* Interupt Status from MAC */ +SK_U64 SK_FAR *pStatus) /* ptr for return overflow status value */ +{ + SK_U64 Status; /* Overflow status */ + SK_U32 RegVal; + + Status = 0; + + if ((IStatus & XM_IS_RXC_OV) != 0) { + + XM_IN32(IoC, Port, XM_RX_CNT_EV, &RegVal); + Status |= (SK_U64)RegVal << 32; + } + + if ((IStatus & XM_IS_TXC_OV) != 0) { + + XM_IN32(IoC, Port, XM_TX_CNT_EV, &RegVal); + Status |= (SK_U64)RegVal; + } + + *pStatus = Status; + + return(0); +} /* SkXmOverflowStatus */ +#endif /* GENESIS */ + + +#ifdef YUKON +/****************************************************************************** + * + * SkGmUpdateStats() - Force the GMAC to output the current statistic + * + * Description: + * Empty function for GMAC. Statistic data is accessible in direct way. + * + * Returns: + * 0: success + * 1: something went wrong + */ +int SkGmUpdateStats( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +unsigned int Port) /* Port Index (MAC_1 + n) */ +{ + return(0); +} + + +/****************************************************************************** + * + * SkGmMacStatistic() - Get GMAC counter value + * + * Description: + * Gets the 32bit counter value. Except for the octet counters + * the lower 32bit are counted in hardware and the upper 32bit + * must be counted in software by monitoring counter overflow interrupts. + * + * Returns: + * 0: success + * 1: something went wrong + */ +int SkGmMacStatistic( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +unsigned int Port, /* Port Index (MAC_1 + n) */ +SK_U16 StatAddr, /* MIB counter base address */ +SK_U32 SK_FAR *pVal) /* ptr to return statistic value */ +{ + + if ((StatAddr < GM_RXF_UC_OK) || (StatAddr > GM_TXE_FIFO_UR)) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E022, SKERR_HWI_E022MSG); + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("SkGmMacStat: wrong MIB counter 0x%04X\n", StatAddr)); + return(1); + } + + GM_IN32(IoC, Port, StatAddr, pVal); + + return(0); +} /* SkGmMacStatistic */ + + +/****************************************************************************** + * + * SkGmResetCounter() - Clear MAC statistic counter + * + * Description: + * Force GMAC to clear its statistic counter. + * + * Returns: + * 0: success + * 1: something went wrong + */ +int SkGmResetCounter( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +unsigned int Port) /* Port Index (MAC_1 + n) */ +{ + SK_U16 Reg; /* Phy Address Register */ + SK_U16 Word; + int i; + + GM_IN16(IoC, Port, GM_PHY_ADDR, &Reg); + + /* set MIB Clear Counter Mode */ + GM_OUT16(IoC, Port, GM_PHY_ADDR, Reg | GM_PAR_MIB_CLR); + + /* read all MIB Counters with Clear Mode set */ + for (i = 0; i < GM_MIB_CNT_SIZE; i++) { + /* the reset is performed only when the lower 16 bits are read */ + GM_IN16(IoC, Port, GM_MIB_CNT_BASE + 8*i, &Word); + } + + /* clear MIB Clear Counter Mode */ + GM_OUT16(IoC, Port, GM_PHY_ADDR, Reg); + + return(0); +} /* SkGmResetCounter */ + + +/****************************************************************************** + * + * SkGmOverflowStatus() - Gets the status of counter overflow interrupt + * + * Description: + * Checks the source causing an counter overflow interrupt. On success the + * resulting counter overflow status is written to <pStatus>, whereas the + * the following bit coding is used: + * 63:56 - unused + * 55:48 - TxRx interrupt register bit7:0 + * 32:47 - Rx interrupt register + * 31:24 - unused + * 23:16 - TxRx interrupt register bit15:8 + * 15:0 - Tx interrupt register + * + * Returns: + * 0: success + * 1: something went wrong + */ +int SkGmOverflowStatus( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +unsigned int Port, /* Port Index (MAC_1 + n) */ +SK_U16 IStatus, /* Interupt Status from MAC */ +SK_U64 SK_FAR *pStatus) /* ptr for return overflow status value */ +{ + SK_U64 Status; /* Overflow status */ + SK_U16 RegVal; + + Status = 0; + + if ((IStatus & GM_IS_RX_CO_OV) != 0) { + /* this register is self-clearing after read */ + GM_IN16(IoC, Port, GM_RX_IRQ_SRC, &RegVal); + Status |= (SK_U64)RegVal << 32; + } + + if ((IStatus & GM_IS_TX_CO_OV) != 0) { + /* this register is self-clearing after read */ + GM_IN16(IoC, Port, GM_TX_IRQ_SRC, &RegVal); + Status |= (SK_U64)RegVal; + } + + /* this register is self-clearing after read */ + GM_IN16(IoC, Port, GM_TR_IRQ_SRC, &RegVal); + /* Rx overflow interrupt register bits (LoByte)*/ + Status |= (SK_U64)((SK_U8)RegVal) << 48; + /* Tx overflow interrupt register bits (HiByte)*/ + Status |= (SK_U64)(RegVal >> 8) << 16; + + *pStatus = Status; + + return(0); +} /* SkGmOverflowStatus */ + + +#ifndef SK_SLIM +/****************************************************************************** + * + * SkGmCableDiagStatus() - Starts / Gets status of cable diagnostic test + * + * Description: + * starts the cable diagnostic test if 'StartTest' is true + * gets the results if 'StartTest' is true + * + * NOTE: this test is meaningful only when link is down + * + * Returns: + * 0: success + * 1: no YUKON copper + * 2: test in progress + */ +int SkGmCableDiagStatus( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +SK_BOOL StartTest) /* flag for start / get result */ +{ + int i; + SK_U16 RegVal; + SK_GEPORT *pPrt; + + pPrt = &pAC->GIni.GP[Port]; + + if (pPrt->PhyType != SK_PHY_MARV_COPPER) { + + return(1); + } + + if (StartTest) { + /* only start the cable test */ + if ((pPrt->PhyId1 & PHY_I1_REV_MSK) < 4) { + /* apply TDR workaround from Marvell */ + SkGmPhyWrite(pAC, IoC, Port, 29, 0x001e); + + SkGmPhyWrite(pAC, IoC, Port, 30, 0xcc00); + SkGmPhyWrite(pAC, IoC, Port, 30, 0xc800); + SkGmPhyWrite(pAC, IoC, Port, 30, 0xc400); + SkGmPhyWrite(pAC, IoC, Port, 30, 0xc000); + SkGmPhyWrite(pAC, IoC, Port, 30, 0xc100); + } + + /* set address to 0 for MDI[0] */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_EXT_ADR, 0); + + /* Read Cable Diagnostic Reg */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CABLE_DIAG, &RegVal); + + /* start Cable Diagnostic Test */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CABLE_DIAG, + (SK_U16)(RegVal | PHY_M_CABD_ENA_TEST)); + + return(0); + } + + /* Read Cable Diagnostic Reg */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CABLE_DIAG, &RegVal); + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("PHY Cable Diag.=0x%04X\n", RegVal)); + + if ((RegVal & PHY_M_CABD_ENA_TEST) != 0) { + /* test is running */ + return(2); + } + + /* get the test results */ + for (i = 0; i < 4; i++) { + /* set address to i for MDI[i] */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_EXT_ADR, (SK_U16)i); + + /* get Cable Diagnostic values */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CABLE_DIAG, &RegVal); + + pPrt->PMdiPairLen[i] = (SK_U8)(RegVal & PHY_M_CABD_DIST_MSK); + + pPrt->PMdiPairSts[i] = (SK_U8)((RegVal & PHY_M_CABD_STAT_MSK) >> 13); + } + + return(0); +} /* SkGmCableDiagStatus */ +#endif /* !SK_SLIM */ +#endif /* YUKON */ + +/* End of file */ diff --git a/drivers/net/skfp/drvfbi.c b/drivers/net/skfp/drvfbi.c index 4fe624b0dd25..c77cc14b3227 100644 --- a/drivers/net/skfp/drvfbi.c +++ b/drivers/net/skfp/drvfbi.c @@ -43,25 +43,6 @@ static const char ID_sccs[] = "@(#)drvfbi.c 1.63 99/02/11 (C) SK " ; /* * valid configuration values are: */ -#ifdef ISA -const int opt_ints[] = {8, 3, 4, 5, 9, 10, 11, 12, 15} ; -const int opt_iops[] = {8, - 0x100, 0x120, 0x180, 0x1a0, 0x220, 0x240, 0x320, 0x340}; -const int opt_dmas[] = {4, 3, 5, 6, 7} ; -const int opt_eproms[] = {15, 0xc0, 0xc2, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce, - 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc} ; -#endif -#ifdef EISA -const int opt_ints[] = {5, 9, 10, 11} ; -const int opt_dmas[] = {0, 5, 6, 7} ; -const int opt_eproms[] = {0xc0, 0xc2, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce, - 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc} ; -#endif - -#ifdef MCA -int opt_ints[] = {3, 11, 10, 9} ; /* FM1 */ -int opt_eproms[] = {0, 0xc4, 0xc8, 0xcc, 0xd0, 0xd4, 0xd8, 0xdc} ; -#endif /* MCA */ /* * xPOS_ID:xxxx @@ -78,17 +59,9 @@ int opt_eproms[] = {0, 0xc4, 0xc8, 0xcc, 0xd0, 0xd4, 0xd8, 0xdc} ; */ #ifndef MULT_OEM #ifndef OEM_CONCEPT -#ifndef MCA const u_char oem_id[] = "xPOS_ID:xxxx" ; -#else -const u_char oem_id[] = "xPOSID1:xxxx" ; /* FM1 card id. */ -#endif #else /* OEM_CONCEPT */ -#ifndef MCA const u_char oem_id[] = OEM_ID ; -#else -const u_char oem_id[] = OEM_ID1 ; /* FM1 card id. */ -#endif /* MCA */ #endif /* OEM_CONCEPT */ #define ID_BYTE0 8 #define OEMID(smc,i) oem_id[ID_BYTE0 + i] @@ -109,23 +82,6 @@ extern int AIX_vpdReadByte() ; /* Prototype of a local function. */ static void smt_stop_watchdog(struct s_smc *smc); -#ifdef MCA -static int read_card_id() ; -static void DisableSlotAccess() ; -static void EnableSlotAccess() ; -#ifdef AIX -extern int attach_POS_addr() ; -extern int detach_POS_addr() ; -extern u_char read_POS() ; -extern void write_POS() ; -extern int AIX_vpdReadByte() ; -#else -#define read_POS(smc,a1,a2) ((u_char) inp(a1)) -#define write_POS(smc,a1,a2,a3) outp((a1),(a3)) -#endif -#endif /* MCA */ - - /* * FDDI card reset */ @@ -139,51 +95,6 @@ static void card_start(struct s_smc *smc) smt_stop_watchdog(smc) ; -#ifdef ISA - outpw(CSR_A,0) ; /* reset for all chips */ - for (i = 10 ; i ; i--) /* delay for PLC's */ - (void)inpw(ISR_A) ; - OUT_82c54_TIMER(3,COUNT(2) | RW_OP(3) | TMODE(2)) ; - /* counter 2, mode 2 */ - OUT_82c54_TIMER(2,97) ; /* LSB */ - OUT_82c54_TIMER(2,0) ; /* MSB ( 15.6 us ) */ - outpw(CSR_A,CS_CRESET) ; -#endif -#ifdef EISA - outpw(CSR_A,0) ; /* reset for all chips */ - for (i = 10 ; i ; i--) /* delay for PLC's */ - (void)inpw(ISR_A) ; - outpw(CSR_A,CS_CRESET) ; - smc->hw.led = (2<<6) ; - outpw(CSR_A,CS_CRESET | smc->hw.led) ; -#endif -#ifdef MCA - outp(ADDR(CARD_DIS),0) ; /* reset for all chips */ - for (i = 10 ; i ; i--) /* delay for PLC's */ - (void)inpw(ISR_A) ; - outp(ADDR(CARD_EN),0) ; - /* first I/O after reset must not be a access to FORMAC or PLC */ - - /* - * bus timeout (MCA) - */ - OUT_82c54_TIMER(3,COUNT(2) | RW_OP(3) | TMODE(3)) ; - /* counter 2, mode 3 */ - OUT_82c54_TIMER(2,(2*24)) ; /* 3.9 us * 2 square wave */ - OUT_82c54_TIMER(2,0) ; /* MSB */ - - /* POS 102 indicated an activ Check Line or Buss Error monitoring */ - if (inpw(CSA_A) & (POS_EN_CHKINT | POS_EN_BUS_ERR)) { - outp(ADDR(IRQ_CHCK_EN),0) ; - } - - if (!((i = inpw(CSR_A)) & CS_SAS)) { - if (!(i & CS_BYSTAT)) { - outp(ADDR(BYPASS(STAT_INS)),0) ;/* insert station */ - } - } - outpw(LEDR_A,LED_1) ; /* yellow */ -#endif /* MCA */ #ifdef PCI /* * make sure no transfer activity is pending @@ -253,15 +164,7 @@ void card_stop(struct s_smc *smc) { smt_stop_watchdog(smc) ; smc->hw.mac_ring_is_up = 0 ; /* ring down */ -#ifdef ISA - outpw(CSR_A,0) ; /* reset for all chips */ -#endif -#ifdef EISA - outpw(CSR_A,0) ; /* reset for all chips */ -#endif -#ifdef MCA - outp(ADDR(CARD_DIS),0) ; /* reset for all chips */ -#endif + #ifdef PCI /* * make sure no transfer activity is pending @@ -284,60 +187,6 @@ void mac1_irq(struct s_smc *smc, u_short stu, u_short stl) { int restart_tx = 0 ; again: -#ifndef PCI -#ifndef ISA -/* - * FORMAC+ bug modified the queue pointer if many read/write accesses happens!? - */ - if (stl & (FM_SPCEPDS | /* parit/coding err. syn.q.*/ - FM_SPCEPDA0 | /* parit/coding err. a.q.0 */ - FM_SPCEPDA1 | /* parit/coding err. a.q.1 */ - FM_SPCEPDA2)) { /* parit/coding err. a.q.2 */ - SMT_PANIC(smc,SMT_E0132, SMT_E0132_MSG) ; - } - if (stl & (FM_STBURS | /* tx buffer underrun syn.q.*/ - FM_STBURA0 | /* tx buffer underrun a.q.0 */ - FM_STBURA1 | /* tx buffer underrun a.q.1 */ - FM_STBURA2)) { /* tx buffer underrun a.q.2 */ - SMT_PANIC(smc,SMT_E0133, SMT_E0133_MSG) ; - } -#endif - if ( (stu & (FM_SXMTABT | /* transmit abort */ -#ifdef SYNC - FM_STXABRS | /* syn. tx abort */ -#endif /* SYNC */ - FM_STXABRA0)) || /* asyn. tx abort */ - (stl & (FM_SQLCKS | /* lock for syn. q. */ - FM_SQLCKA0)) ) { /* lock for asyn. q. */ - formac_tx_restart(smc) ; /* init tx */ - restart_tx = 1 ; - stu = inpw(FM_A(FM_ST1U)) ; - stl = inpw(FM_A(FM_ST1L)) ; - stu &= ~ (FM_STECFRMA0 | FM_STEFRMA0 | FM_STEFRMS) ; - if (stu || stl) - goto again ; - } - -#ifndef SYNC - if (stu & (FM_STECFRMA0 | /* end of chain asyn tx */ - FM_STEFRMA0)) { /* end of frame asyn tx */ - /* free tx_queue */ - smc->hw.n_a_send = 0 ; - if (++smc->hw.fp.tx_free < smc->hw.fp.tx_max) { - start_next_send(smc); - } - restart_tx = 1 ; - } -#else /* SYNC */ - if (stu & (FM_STEFRMA0 | /* end of asyn tx */ - FM_STEFRMS)) { /* end of sync tx */ - restart_tx = 1 ; - } -#endif /* SYNC */ - if (restart_tx) - llc_restart_tx(smc) ; -} -#else /* PCI */ /* * parity error: note encoding error is not possible in tag mode @@ -378,7 +227,7 @@ again: if (restart_tx) llc_restart_tx(smc) ; } -#endif /* PCI */ + /* * interrupt source= plc1 * this function is called in nwfbisr.asm @@ -387,10 +236,6 @@ void plc1_irq(struct s_smc *smc) { u_short st = inpw(PLC(PB,PL_INTR_EVENT)) ; -#if (defined(ISA) || defined(EISA)) - /* reset PLC Int. bits */ - outpw(PLC1_I,inpw(PLC1_I)) ; -#endif plc_irq(smc,PB,st) ; } @@ -402,10 +247,6 @@ void plc2_irq(struct s_smc *smc) { u_short st = inpw(PLC(PA,PL_INTR_EVENT)) ; -#if (defined(ISA) || defined(EISA)) - /* reset PLC Int. bits */ - outpw(PLC2_I,inpw(PLC2_I)) ; -#endif plc_irq(smc,PA,st) ; } @@ -446,43 +287,15 @@ void read_address(struct s_smc *smc, u_char *mac_addr) char PmdType ; int i ; -#if (defined(ISA) || defined(MCA)) - for (i = 0; i < 4 ;i++) { /* read mac address from board */ - smc->hw.fddi_phys_addr.a[i] = - bitrev8(inpw(PR_A(i+SA_MAC))); - } - for (i = 4; i < 6; i++) { - smc->hw.fddi_phys_addr.a[i] = - bitrev8(inpw(PR_A(i+SA_MAC+PRA_OFF))); - } -#endif -#ifdef EISA - /* - * Note: We get trouble on an Alpha machine if we make a inpw() - * instead of inp() - */ - for (i = 0; i < 4 ;i++) { /* read mac address from board */ - smc->hw.fddi_phys_addr.a[i] = - bitrev8(inp(PR_A(i+SA_MAC))); - } - for (i = 4; i < 6; i++) { - smc->hw.fddi_phys_addr.a[i] = - bitrev8(inp(PR_A(i+SA_MAC+PRA_OFF))); - } -#endif #ifdef PCI for (i = 0; i < 6; i++) { /* read mac address from board */ smc->hw.fddi_phys_addr.a[i] = bitrev8(inp(ADDR(B2_MAC_0+i))); } #endif -#ifndef PCI - ConnectorType = inpw(PR_A(SA_PMD_TYPE)) & 0xff ; - PmdType = inpw(PR_A(SA_PMD_TYPE+1)) & 0xff ; -#else + ConnectorType = inp(ADDR(B2_CONN_TYP)) ; PmdType = inp(ADDR(B2_PMD_TYP)) ; -#endif smc->y[PA].pmd_type[PMD_SK_CONN] = smc->y[PB].pmd_type[PMD_SK_CONN] = ConnectorType ; @@ -512,20 +325,12 @@ void init_board(struct s_smc *smc, u_char *mac_addr) card_start(smc) ; read_address(smc,mac_addr) ; -#ifndef PCI - if (inpw(CSR_A) & CS_SAS) -#else if (!(inp(ADDR(B0_DAS)) & DAS_AVAIL)) -#endif smc->s.sas = SMT_SAS ; /* Single att. station */ else smc->s.sas = SMT_DAS ; /* Dual att. station */ -#ifndef PCI - if (inpw(CSR_A) & CS_BYSTAT) -#else if (!(inp(ADDR(B0_DAS)) & DAS_BYP_ST)) -#endif smc->mib.fddiSMTBypassPresent = 0 ; /* without opt. bypass */ else @@ -538,42 +343,12 @@ void init_board(struct s_smc *smc, u_char *mac_addr) */ void sm_pm_bypass_req(struct s_smc *smc, int mode) { -#if (defined(ISA) || defined(EISA)) - int csra_v ; -#endif - DB_ECMN(1,"ECM : sm_pm_bypass_req(%s)\n",(mode == BP_INSERT) ? "BP_INSERT" : "BP_DEINSERT",0) ; if (smc->s.sas != SMT_DAS) return ; -#if (defined(ISA) || defined(EISA)) - - csra_v = inpw(CSR_A) & ~CS_BYPASS ; -#ifdef EISA - csra_v |= smc->hw.led ; -#endif - - switch(mode) { - case BP_INSERT : - outpw(CSR_A,csra_v | CS_BYPASS) ; - break ; - case BP_DEINSERT : - outpw(CSR_A,csra_v) ; - break ; - } -#endif /* ISA / EISA */ -#ifdef MCA - switch(mode) { - case BP_INSERT : - outp(ADDR(BYPASS(STAT_INS)),0) ;/* insert station */ - break ; - case BP_DEINSERT : - outp(ADDR(BYPASS(STAT_BYP)),0) ; /* bypass station */ - break ; - } -#endif #ifdef PCI switch(mode) { case BP_INSERT : @@ -591,31 +366,14 @@ void sm_pm_bypass_req(struct s_smc *smc, int mode) */ int sm_pm_bypass_present(struct s_smc *smc) { -#ifndef PCI - return( (inpw(CSR_A) & CS_BYSTAT) ? FALSE : TRUE ) ; -#else return( (inp(ADDR(B0_DAS)) & DAS_BYP_ST) ? TRUE: FALSE) ; -#endif } void plc_clear_irq(struct s_smc *smc, int p) { SK_UNUSED(p) ; -#if (defined(ISA) || defined(EISA)) - switch (p) { - case PA : - /* reset PLC Int. bits */ - outpw(PLC2_I,inpw(PLC2_I)) ; - break ; - case PB : - /* reset PLC Int. bits */ - outpw(PLC1_I,inpw(PLC1_I)) ; - break ; - } -#else SK_UNUSED(smc) ; -#endif } @@ -645,51 +403,6 @@ static void led_indication(struct s_smc *smc, int led_event) phy = &smc->y[PB] ; mib_b = phy->mib ; -#ifdef EISA - /* Ring up = yellow led OFF*/ - if (led_event == LED_Y_ON) { - smc->hw.led |= CS_LED_1 ; - } - else if (led_event == LED_Y_OFF) { - smc->hw.led &= ~CS_LED_1 ; - } - else { - /* Link at Port A or B = green led ON */ - if (mib_a->fddiPORTPCMState == PC8_ACTIVE || - mib_b->fddiPORTPCMState == PC8_ACTIVE) { - smc->hw.led |= CS_LED_0 ; - } - else { - smc->hw.led &= ~CS_LED_0 ; - } - } -#endif -#ifdef MCA - led_state = inpw(LEDR_A) ; - - /* Ring up = yellow led OFF*/ - if (led_event == LED_Y_ON) { - led_state |= LED_1 ; - } - else if (led_event == LED_Y_OFF) { - led_state &= ~LED_1 ; - } - else { - led_state &= ~(LED_2|LED_0) ; - - /* Link at Port A = green led A ON */ - if (mib_a->fddiPORTPCMState == PC8_ACTIVE) { - led_state |= LED_2 ; - } - - /* Link at Port B/S = green led B ON */ - if (mib_b->fddiPORTPCMState == PC8_ACTIVE) { - led_state |= LED_0 ; - } - } - - outpw(LEDR_A, led_state) ; -#endif /* MCA */ #ifdef PCI led_state = 0 ; @@ -824,446 +537,6 @@ int set_oi_id_def(struct s_smc *smc) } #endif /* MULT_OEM */ - -#ifdef MCA -/************************ - * - * BEGIN_MANUAL_ENTRY() - * - * exist_board - * - * Check if an MCA board is present in the specified slot. - * - * int exist_board( - * struct s_smc *smc, - * int slot) ; - * In - * smc - A pointer to the SMT Context struct. - * - * slot - The number of the slot to inspect. - * Out - * 0 = No adapter present. - * 1 = Found FM1 adapter. - * - * Pseudo - * Read MCA ID - * for all valid OEM_IDs - * compare with ID read - * if equal, return 1 - * return(0 - * - * Note - * The smc pointer must be valid now. - * - * END_MANUAL_ENTRY() - * - ************************/ -#define LONG_CARD_ID(lo, hi) ((((hi) & 0xff) << 8) | ((lo) & 0xff)) -int exist_board(struct s_smc *smc, int slot) -{ -#ifdef MULT_OEM - SK_LOC_DECL(u_char,id[2]) ; - int idi ; -#endif /* MULT_OEM */ - - /* No longer valid. */ - if (smc == NULL) - return(0) ; - -#ifndef MULT_OEM - if (read_card_id(smc, slot) - == LONG_CARD_ID(OEMID(smc,0), OEMID(smc,1))) - return (1) ; /* Found FM adapter. */ - -#else /* MULT_OEM */ - idi = read_card_id(smc, slot) ; - id[0] = idi & 0xff ; - id[1] = idi >> 8 ; - - smc->hw.oem_id = (struct s_oem_ids *) &oem_ids[0] ; - for (; smc->hw.oem_id->oi_status != OI_STAT_LAST; smc->hw.oem_id++) { - if (smc->hw.oem_id->oi_status < smc->hw.oem_min_status) - continue ; - - if (is_equal_num(&id[0],&OEMID(smc,0),2)) - return (1) ; - } -#endif /* MULT_OEM */ - return (0) ; /* No adapter found. */ -} - -/************************ - * - * read_card_id - * - * Read the MCA card id from the specified slot. - * In - * smc - A pointer to the SMT Context struct. - * CAVEAT: This pointer may be NULL and *must not* be used within this - * function. It's only purpose is for drivers that need some information - * for the inp() and outp() macros. - * - * slot - The number of the slot for which the card id is returned. - * Out - * Returns the card id read from the specified slot. If an illegal slot - * number is specified, the function returns zero. - * - ************************/ -static int read_card_id(struct s_smc *smc, int slot) -/* struct s_smc *smc ; Do not use. */ -{ - int card_id ; - - SK_UNUSED(smc) ; /* Make LINT happy. */ - if ((slot < 1) || (slot > 15)) /* max 16 slots, 0 = motherboard */ - return (0) ; /* Illegal slot number specified. */ - - EnableSlotAccess(smc, slot) ; - - card_id = ((read_POS(smc,POS_ID_HIGH,slot - 1) & 0xff) << 8) | - (read_POS(smc,POS_ID_LOW,slot - 1) & 0xff) ; - - DisableSlotAccess(smc) ; - - return (card_id) ; -} - -/************************ - * - * BEGIN_MANUAL_ENTRY() - * - * get_board_para - * - * Get adapter configuration information. Fill all board specific - * parameters within the 'smc' structure. - * - * int get_board_para( - * struct s_smc *smc, - * int slot) ; - * In - * smc - A pointer to the SMT Context struct, to which this function will - * write some adapter configuration data. - * - * slot - The number of the slot, in which the adapter is installed. - * Out - * 0 = No adapter present. - * 1 = Ok. - * 2 = Adapter present, but card enable bit not set. - * - * END_MANUAL_ENTRY() - * - ************************/ -int get_board_para(struct s_smc *smc, int slot) -{ - int val ; - int i ; - - /* Check if adapter present & get type of adapter. */ - switch (exist_board(smc, slot)) { - case 0: /* Adapter not present. */ - return (0) ; - case 1: /* FM Rev. 1 */ - smc->hw.rev = FM1_REV ; - smc->hw.VFullRead = 0x0a ; - smc->hw.VFullWrite = 0x05 ; - smc->hw.DmaWriteExtraBytes = 8 ; /* 2 extra words. */ - break ; - } - smc->hw.slot = slot ; - - EnableSlotAccess(smc, slot) ; - - if (!(read_POS(smc,POS_102, slot - 1) & POS_CARD_EN)) { - DisableSlotAccess(smc) ; - return (2) ; /* Card enable bit not set. */ - } - - val = read_POS(smc,POS_104, slot - 1) ; /* I/O, IRQ */ - -#ifndef MEM_MAPPED_IO /* is defined by the operating system */ - i = val & POS_IOSEL ; /* I/O base addr. (0x0200 .. 0xfe00) */ - smc->hw.iop = (i + 1) * 0x0400 - 0x200 ; -#endif - i = ((val & POS_IRQSEL) >> 6) & 0x03 ; /* IRQ <0, 1> */ - smc->hw.irq = opt_ints[i] ; - - /* FPROM base addr. */ - i = ((read_POS(smc,POS_103, slot - 1) & POS_MSEL) >> 4) & 0x07 ; - smc->hw.eprom = opt_eproms[i] ; - - DisableSlotAccess(smc) ; - - /* before this, the smc->hw.iop must be set !!! */ - smc->hw.slot_32 = inpw(CSF_A) & SLOT_32 ; - - return (1) ; -} - -/* Enable access to specified MCA slot. */ -static void EnableSlotAccess(struct s_smc *smc, int slot) -{ - SK_UNUSED(slot) ; - -#ifndef AIX - SK_UNUSED(smc) ; - - /* System mode. */ - outp(POS_SYS_SETUP, POS_SYSTEM) ; - - /* Select slot. */ - outp(POS_CHANNEL_POS, POS_CHANNEL_BIT | (slot-1)) ; -#else - attach_POS_addr (smc) ; -#endif -} - -/* Disable access to MCA slot formerly enabled via EnableSlotAccess(). */ -static void DisableSlotAccess(struct s_smc *smc) -{ -#ifndef AIX - SK_UNUSED(smc) ; - - outp(POS_CHANNEL_POS, 0) ; -#else - detach_POS_addr (smc) ; -#endif -} -#endif /* MCA */ - -#ifdef EISA -#ifndef MEM_MAPPED_IO -#define SADDR(slot) (((slot)<<12)&0xf000) -#else /* MEM_MAPPED_IO */ -#define SADDR(slot) (smc->hw.iop) -#endif /* MEM_MAPPED_IO */ - -/************************ - * - * BEGIN_MANUAL_ENTRY() - * - * exist_board - * - * Check if an EISA board is present in the specified slot. - * - * int exist_board( - * struct s_smc *smc, - * int slot) ; - * In - * smc - A pointer to the SMT Context struct. - * - * slot - The number of the slot to inspect. - * Out - * 0 = No adapter present. - * 1 = Found adapter. - * - * Pseudo - * Read EISA ID - * for all valid OEM_IDs - * compare with ID read - * if equal, return 1 - * return(0 - * - * Note - * The smc pointer must be valid now. - * - ************************/ -int exist_board(struct s_smc *smc, int slot) -{ - int i ; -#ifdef MULT_OEM - SK_LOC_DECL(u_char,id[4]) ; -#endif /* MULT_OEM */ - - /* No longer valid. */ - if (smc == NULL) - return(0); - - SK_UNUSED(slot) ; - -#ifndef MULT_OEM - for (i = 0 ; i < 4 ; i++) { - if (inp(SADDR(slot)+PRA(i)) != OEMID(smc,i)) - return(0) ; - } - return(1) ; -#else /* MULT_OEM */ - for (i = 0 ; i < 4 ; i++) - id[i] = inp(SADDR(slot)+PRA(i)) ; - - smc->hw.oem_id = (struct s_oem_ids *) &oem_ids[0] ; - - for (; smc->hw.oem_id->oi_status != OI_STAT_LAST; smc->hw.oem_id++) { - if (smc->hw.oem_id->oi_status < smc->hw.oem_min_status) - continue ; - - if (is_equal_num(&id[0],&OEMID(smc,0),4)) - return (1) ; - } - return (0) ; /* No adapter found. */ -#endif /* MULT_OEM */ -} - - -int get_board_para(struct s_smc *smc, int slot) -{ - int i ; - - if (!exist_board(smc,slot)) - return(0) ; - - smc->hw.slot = slot ; -#ifndef MEM_MAPPED_IO /* if defined by the operating system */ - smc->hw.iop = SADDR(slot) ; -#endif - - if (!(inp(C0_A(0))&CFG_CARD_EN)) { - return(2) ; /* CFG_CARD_EN bit not set! */ - } - - smc->hw.irq = opt_ints[(inp(C1_A(0)) & CFG_IRQ_SEL)] ; - smc->hw.dma = opt_dmas[((inp(C1_A(0)) & CFG_DRQ_SEL)>>3)] ; - - if ((i = inp(C2_A(0)) & CFG_EPROM_SEL) != 0x0f) - smc->hw.eprom = opt_eproms[i] ; - else - smc->hw.eprom = 0 ; - - smc->hw.DmaWriteExtraBytes = 8 ; - - return(1) ; -} -#endif /* EISA */ - -#ifdef ISA -#ifndef MULT_OEM -const u_char sklogo[6] = SKLOGO_STR ; -#define SIZE_SKLOGO(smc) sizeof(sklogo) -#define SKLOGO(smc,i) sklogo[i] -#else /* MULT_OEM */ -#define SIZE_SKLOGO(smc) smc->hw.oem_id->oi_logo_len -#define SKLOGO(smc,i) smc->hw.oem_id->oi_logo[i] -#endif /* MULT_OEM */ - - -int exist_board(struct s_smc *smc, HW_PTR port) -{ - int i ; -#ifdef MULT_OEM - int bytes_read ; - u_char board_logo[15] ; - SK_LOC_DECL(u_char,id[4]) ; -#endif /* MULT_OEM */ - - /* No longer valid. */ - if (smc == NULL) - return(0); - - SK_UNUSED(smc) ; -#ifndef MULT_OEM - for (i = SADDRL ; i < (signed) (SADDRL+SIZE_SKLOGO(smc)) ; i++) { - if ((u_char)inpw((PRA(i)+port)) != SKLOGO(smc,i-SADDRL)) { - return(0) ; - } - } - - /* check MAC address (S&K or other) */ - for (i = 0 ; i < 3 ; i++) { - if ((u_char)inpw((PRA(i)+port)) != OEMID(smc,i)) - return(0) ; - } - return(1) ; -#else /* MULT_OEM */ - smc->hw.oem_id = (struct s_oem_ids *) &oem_ids[0] ; - board_logo[0] = (u_char)inpw((PRA(SADDRL)+port)) ; - bytes_read = 1 ; - - for (; smc->hw.oem_id->oi_status != OI_STAT_LAST; smc->hw.oem_id++) { - if (smc->hw.oem_id->oi_status < smc->hw.oem_min_status) - continue ; - - /* Test all read bytes with current OEM_entry */ - /* for (i=0; (i<bytes_read) && (i < SIZE_SKLOGO(smc)); i++) { */ - for (i = 0; i < bytes_read; i++) { - if (board_logo[i] != SKLOGO(smc,i)) - break ; - } - - /* If mismatch, switch to next OEM entry */ - if ((board_logo[i] != SKLOGO(smc,i)) && (i < bytes_read)) - continue ; - - --i ; - while (bytes_read < SIZE_SKLOGO(smc)) { - // inpw next byte SK_Logo - i++ ; - board_logo[i] = (u_char)inpw((PRA(SADDRL+i)+port)) ; - bytes_read++ ; - if (board_logo[i] != SKLOGO(smc,i)) - break ; - } - - for (i = 0 ; i < 3 ; i++) - id[i] = (u_char)inpw((PRA(i)+port)) ; - - if ((board_logo[i] == SKLOGO(smc,i)) - && (bytes_read == SIZE_SKLOGO(smc))) { - - if (is_equal_num(&id[0],&OEMID(smc,0),3)) - return(1); - } - } /* for */ - return(0) ; -#endif /* MULT_OEM */ -} - -int get_board_para(struct s_smc *smc, int slot) -{ - SK_UNUSED(smc) ; - SK_UNUSED(slot) ; - return(0) ; /* for ISA not supported */ -} -#endif /* ISA */ - -#ifdef PCI -#ifdef USE_BIOS_FUN -int exist_board(struct s_smc *smc, int slot) -{ - u_short dev_id ; - u_short ven_id ; - int found ; - int i ; - - found = FALSE ; /* make sure we returned with adatper not found*/ - /* if an empty oemids.h was included */ - -#ifdef MULT_OEM - smc->hw.oem_id = (struct s_oem_ids *) &oem_ids[0] ; - for (; smc->hw.oem_id->oi_status != OI_STAT_LAST; smc->hw.oem_id++) { - if (smc->hw.oem_id->oi_status < smc->hw.oem_min_status) - continue ; -#endif - ven_id = OEMID(smc,0) + (OEMID(smc,1) << 8) ; - dev_id = OEMID(smc,2) + (OEMID(smc,3) << 8) ; - for (i = 0; i < slot; i++) { - if (pci_find_device(i,&smc->hw.pci_handle, - dev_id,ven_id) != 0) { - - found = FALSE ; - } else { - found = TRUE ; - } - } - if (found) { - return(1) ; /* adapter was found */ - } -#ifdef MULT_OEM - } -#endif - return(0) ; /* adapter was not found */ -} -#endif /* PCI */ -#endif /* USE_BIOS_FUNC */ - void driver_get_bia(struct s_smc *smc, struct fddi_addr *bia_addr) { int i ; diff --git a/drivers/net/skfp/h/mbuf.h b/drivers/net/skfp/h/mbuf.h index b339d1f2e0e5..f2aadcda9e7f 100644 --- a/drivers/net/skfp/h/mbuf.h +++ b/drivers/net/skfp/h/mbuf.h @@ -15,11 +15,7 @@ #ifndef _MBUF_ #define _MBUF_ -#ifndef PCI -#define M_SIZE 4550 -#else #define M_SIZE 4504 -#endif #ifndef MAX_MBUF #define MAX_MBUF 4 diff --git a/drivers/net/skfp/h/skfbi.h b/drivers/net/skfp/h/skfbi.h index ba347d6910f1..c1ba26c06d73 100644 --- a/drivers/net/skfp/h/skfbi.h +++ b/drivers/net/skfp/h/skfbi.h @@ -15,797 +15,11 @@ #ifndef _SKFBI_H_ #define _SKFBI_H_ -#ifdef SYNC -#define exist_board_far exist_board -#define get_board_para_far get_board_para -#endif - -/* - * physical address offset + IO-Port base address - */ -#ifndef PCI -#define ADDR(a) ((a)+smc->hw.iop) -#define ADDRS(smc,a) ((a)+(smc)->hw.iop) -#endif - /* - * FDDI-Fx (x := {I(SA), E(ISA), M(CA), P(CI)}) + * FDDI-Fx (x := {I(SA), P(CI)}) * address calculation & function defines */ -#ifdef EISA - -/* - * Configuration PROM: !! all 8-Bit IO's !! - * |<- MAC-Address ->| - * /-+--+--+--+--+-//-+--+--+--+--+-//-+--+--+--+--+-//-+--+--+--+--+-/ - * val: |PROD_ID0..3| | free | |00|00|5A|40| |nn|mm|00|00| - * /-+--+--+--+--+-//-+--+--+--+--+-//-+--+--+--+--+-//-+--+--+--+--+-/ - * IO- ^ ^ ^ ^ ^ - * port 0C80 0C83 0C88 0C90 0C98 - * | \ - * | \ - * | \______________________________________________ - * EISA Expansion Board Product ID: \ - * BIT: |7 6 5 4 3 2 1 0| \ - * | PROD_ID0 | PROD_ID1 | PROD_ID2 | PROD_ID3 | - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * |0| MAN_C0 | MAN_C1 | MAN_C2 | PROD1 | PROD0 | REV1 | REV0 | - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * ^=reserved | product numb. | revision numb | - * MAN_Cx = compressed manufacterer code (x:=0..2) - * ASCII : 'A'..'Z' : 0x41..0x5A -> compr.(c-0x40) : 0x01..0x1A (5Bits!) - */ - -#ifndef MULT_OEM -#ifndef OEM_CONCEPT -#define MAN_C0 ('S'-0x40) -#define MAN_C1 ('K'-0x40) -#define MAN_C2 ('D'-0x40) -#define PROD_ID0 (u_char)((MAN_C0<<2) | (MAN_C1>>3)) -#define PROD_ID1 (u_char)(((MAN_C1<<5) & 0xff) | MAN_C2) -#define PROD_ID2 (u_char)(1) /* prod. nr. */ -#define PROD_ID3 (u_char)(0) /* rev. nr. */ - -#ifndef OEM_USER_DATA -#define OEM_USER_DATA "SK-NET FDDI V2.0 Userdata" -#endif -#else /* OEM_CONCEPT */ - -/* MAN_C(0|1|2) no longer present (ra). */ -#define PROD_ID0 (u_char)OEM_PROD_ID0 -#define PROD_ID1 (u_char)OEM_PROD_ID1 -#define PROD_ID2 (u_char)OEM_PROD_ID2 -#define PROD_ID3 (u_char)OEM_PROD_ID3 -#endif /* OEM_CONCEPT */ - -#define SKLOGO PROD_ID0, PROD_ID1, PROD_ID2, PROD_ID3 -#endif /* MULT_OEM */ - -#define SADDRL (0) /* start address SKLOGO */ -#define SA_MAC (0x10) /* start addr. MAC_AD within the PROM */ -#define PRA_OFF (4) -#define SA_PMD_TYPE (8) /* start addr. PMD-Type */ - -#define SKFDDI_PSZ 32 /* address PROM size */ - -/* - * address transmission from logical to physical offset address on board - */ -#define FMA(a) (0x0400|((a)<<1)) /* FORMAC+ (r/w) */ -#define P1A(a) (0x0800|((a)<<1)) /* PLC1 (r/w) */ -#define P2A(a) (0x0840|((a)<<1)) /* PLC2 (r/w) */ -#define TIA(a) (0x0880|((a)<<1)) /* Timer (r/w) */ -#define PRA(a) (0x0c80| (a)) /* configuration PROM */ -#define C0A(a) (0x0c84| (a)) /* config. RAM */ -#define C1A(a) (0x0ca0| (a)) /* IRQ-, DMA-nr., EPROM type */ -#define C2A(a) (0x0ca4| (a)) /* EPROM and PAGE selector */ - -#define CONF C0A(0) /* config RAM (card enable bit port) */ -#define PGRA C2A(0) /* Flash page register */ -#define CDID PRA(0) /* Card ID I/O port addr. offset */ - - -/* - * physical address offset + slot specific IO-Port base address - */ -#define FM_A(a) (FMA(a)+smc->hw.iop) /* FORMAC Plus physical addr */ -#define P1_A(a) (P1A(a)+smc->hw.iop) /* PLC1 (r/w) */ -#define P2_A(a) (P2A(a)+smc->hw.iop) /* PLC2 (r/w) */ -#define TI_A(a) (TIA(a)+smc->hw.iop) /* Timer (r/w) */ -#define PR_A(a) (PRA(a)+smc->hw.iop) /* config. PROM */ -#define C0_A(a) (C0A(a)+smc->hw.iop) /* config. RAM */ -#define C1_A(a) (C1A(a)+smc->hw.iop) /* config. RAM */ -#define C2_A(a) (C2A(a)+smc->hw.iop) /* config. RAM */ - - -#define CSRA 0x0008 /* control/status register address (r/w) */ -#define ISRA 0x0008 /* int. source register address (upper 8Bits) */ -#define PLC1I 0x001a /* clear PLC1 interrupt (write only) */ -#define PLC2I 0x0020 /* clear PLC2 interrupt (write only) */ -#define CSFA 0x001c /* control/status FIFO BUSY flags (read only) */ -#define RQAA 0x001c /* Request reg. (write only) */ -#define WCTA 0x001e /* word counter (r/w) */ -#define FFLAG 0x005e /* FLAG/V_FULL (FIFO almost full, write only)*/ - -#define CSR_A (CSRA+smc->hw.iop) /* control/status register address (r/w) */ -#ifdef UNIX -#define CSR_AS(smc) (CSRA+(smc)->hw.iop) /* control/status register address (r/w) */ -#endif -#define ISR_A (ISRA+smc->hw.iop) /* int. source register address (upper 8Bits) */ -#define PLC1_I (PLC1I+smc->hw.iop) /* clear PLC1 internupt (write only) */ -#define PLC2_I (PLC2I+smc->hw.iop) /* clear PLC2 interrupt (write only) */ -#define CSF_A (CSFA+smc->hw.iop) /* control/status FIFO BUSY flags (r/w) */ -#define RQA_A (RQAA+smc->hw.iop) /* Request reg. (write only) */ -#define WCT_A (WCTA+smc->hw.iop) /* word counter (r/w) */ -#define FFLAG_A (FFLAG+smc->hw.iop) /* FLAG/V_FULL (FIFO almost full, write only)*/ - -/* - * control/status register CSRA bits - */ -/* write */ -#define CS_CRESET 0x01 /* Card reset (0=reset) */ -#define CS_RESET_FIFO 0x02 /* FIFO reset (0=reset) */ -#define CS_IMSK 0x04 /* enable IRQ (1=enable, 0=disable) */ -#define CS_EN_IRQ_TC 0x08 /* enable IRQ from transfer counter */ -#define CS_BYPASS 0x20 /* bypass switch (0=remove, 1=insert)*/ -#define CS_LED_0 0x40 /* switch LED 0 */ -#define CS_LED_1 0x80 /* switch LED 1 */ -/* read */ -#define CS_BYSTAT 0x40 /* 0=Bypass exist, 1= ..not */ -#define CS_SAS 0x80 /* single attachement station (=1) */ - -/* - * control/status register CSFA bits (FIFO) - */ -#define CSF_MUX0 0x01 -#define CSF_MUX1 0x02 -#define CSF_HSREQ0 0x04 -#define CSF_HSREQ1 0x08 -#define CSF_HSREQ2 0x10 -#define CSF_BUSY_DMA 0x40 -#define CSF_BUSY_FIFO 0x80 - -/* - * Interrupt source register ISRA (upper 8 data bits) read only & low activ. - */ -#define IS_MINTR1 0x0100 /* FORMAC ST1U/L & ~IMSK1U/L*/ -#define IS_MINTR2 0x0200 /* FORMAC ST2U/L & ~IMSK2U/L*/ -#define IS_PLINT1 0x0400 /* PLC1 */ -#define IS_PLINT2 0x0800 /* PLC2 */ -#define IS_TIMINT 0x1000 /* Timer 82C54-2 */ -#define IS_TC 0x2000 /* transf. counter */ - -#define ALL_IRSR (IS_MINTR1|IS_MINTR2|IS_PLINT1|IS_PLINT2|IS_TIMINT|IS_TC) - -/* - * CONFIG<0> RAM (C0_A()) - */ -#define CFG_CARD_EN 0x01 /* card enable */ - -/* - * CONFIG<1> RAM (C1_A()) - */ -#define CFG_IRQ_SEL 0x03 /* IRQ select (4 nr.) */ -#define CFG_IRQ_TT 0x04 /* IRQ trigger type (LEVEL/EDGE) */ -#define CFG_DRQ_SEL 0x18 /* DMA requ. (4 nr.) */ -#define CFG_BOOT_EN 0x20 /* 0=BOOT-, 1=Application Software */ -#define CFG_PROG_EN 0x40 /* V_Prog for FLASH_PROM (1=on) */ - -/* - * CONFIG<2> RAM (C2_A()) - */ -#define CFG_EPROM_SEL 0x0f /* FPROM start address selection */ -#define CFG_PAGE 0xf0 /* FPROM page selection */ - - -#define READ_PROM(a) ((u_char)inp(a)) -#define GET_PAGE(i) outp(C2_A(0),((int)(i)<<4) | (inp(C2_A(0)) & ~CFG_PAGE)) -#define FPROM_SW() (inp(C1_A(0)) & CFG_BOOT_EN) - -#define MAX_PAGES 16 /* 16 pages */ -#define MAX_FADDR 0x2000 /* 8K per page */ -#define VPP_ON() outp(C1_A(0),inp(C1_A(0)) | CFG_PROG_EN) -#define VPP_OFF() outp(C1_A(0),inp(C1_A(0)) & ~CFG_PROG_EN) - -#define DMA_BUSY() (inpw(CSF_A) & CSF_BUSY_DMA) -#define FIFO_BUSY() (inpw(CSF_A) & CSF_BUSY_FIFO) -#define DMA_FIFO_BUSY() (inpw(CSF_A) & (CSF_BUSY_DMA | CSF_BUSY_FIFO)) -#define BUS_CHECK() - -#ifdef UNISYS -/* For UNISYS use another macro with drv_usecewait function */ -#define CHECK_DMA() {u_long k = 1000000; \ - while (k && (DMA_BUSY())) { k--; drv_usecwait(20); } \ - if (!k) SMT_PANIC(smc,HWM_E0003,HWM_E0003_MSG) ; } -#else -#define CHECK_DMA() {u_long k = 1000000 ;\ - while (k && (DMA_BUSY())) k-- ;\ - if (!k) SMT_PANIC(smc,HWM_E0003,HWM_E0003_MSG) ; } -#endif - -#define CHECK_FIFO() {u_long k = 1000000 ;\ - while (k && (FIFO_BUSY())) k-- ;\ - if (!k) SMT_PANIC(smc,HWM_E0019,HWM_E0019_MSG) ; } - -#define CHECK_DMA_FIFO() {u_long k = 1000000 ;\ - while (k && (DMA_FIFO_BUSY())) k-- ;\ - if (!k) SMT_PANIC(smc,HWM_E0004,HWM_E0004_MSG) ; } - -#define GET_ISR() ~inpw(ISR_A) -#define CHECK_ISR() ~inpw(ISR_A) - -#ifndef UNIX -#ifndef WINNT -#define CLI_FBI() outpw(CSR_A,(inpw(CSR_A)&\ - (CS_CRESET|CS_BYPASS))|CS_RESET_FIFO|smc->hw.led) -#else /* WINNT */ -#define CLI_FBI() outpw(CSR_A,(l_inpw(CSR_A)&\ - (CS_CRESET|CS_BYPASS))|CS_RESET_FIFO|smc->hw.led) -#endif /* WINNT */ -#else /* UNIX */ -#define CLI_FBI(smc) outpw(CSR_AS(smc),(inpw(CSR_AS(smc))&\ - (CS_CRESET|CS_BYPASS))|CS_RESET_FIFO|(smc)->hw.led) -#endif - -#ifndef UNIX -#define STI_FBI() outpw(CSR_A,(inpw(CSR_A)&\ - (CS_CRESET|CS_BYPASS|CS_RESET_FIFO))|CS_IMSK|smc->hw.led) -#else -#define STI_FBI(smc) outpw(CSR_AS(smc),(inpw(CSR_AS(smc))&\ - (CS_CRESET|CS_BYPASS|CS_RESET_FIFO))|CS_IMSK|(smc)->hw.led) -#endif - -/* EISA DMA Controller */ -#define DMA_WRITE_SINGLE_MASK_BIT_M 0x0a /* Master DMA Controller */ -#define DMA_WRITE_SINGLE_MASK_BIT_S 0xd4 /* Slave DMA Controller */ -#define DMA_CLEAR_BYTE_POINTER_M 0x0c -#define DMA_CLEAR_BYTE_POINTER_S 0xd8 - -#endif /* EISA */ - -#ifdef MCA - -/* - * POS Register: !! all I/O's are 8-Bit !! - */ -#define POS_SYS_SETUP 0x94 /* system setup register */ -#define POS_SYSTEM 0xff /* system mode */ - -#define POS_CHANNEL_POS 0x96 /* register slot ID */ -#define POS_CHANNEL_BIT 0x08 /* mask for -"- */ - -#define POS_BASE 0x100 /* POS base address */ -#define POS_ID_LOW POS_BASE /* card ID low */ -#define POS_ID_HIGH (POS_BASE+1) /* card ID high */ -#define POS_102 (POS_BASE+2) /* card en., arbitration level .. */ -#define POS_103 (POS_BASE+3) /* FPROM addr, page */ -#define POS_104 (POS_BASE+4) /* I/O, IRQ */ -#define POS_105 (POS_BASE+5) /* POS_CHCK */ -#define POS_106 (POS_BASE+6) /* to read VPD */ -#define POS_107 (POS_BASE+7) /* added without function */ - -/* FM1 card IDs */ -#define FM1_CARD_ID0 0x83 -#define FM1_CARD_ID1 0 - -#define FM1_IBM_ID0 0x9c -#define FM1_IBM_ID1 0x8f - - -/* FM2 card IDs */ -#define FM2_CARD_ID0 0xab -#define FM2_CARD_ID1 0 - -#define FM2_IBM_ID0 0x7e -#define FM2_IBM_ID1 0x8f - -/* Board revision. */ -#define FM1_REV 0 -#define FM2_REV 1 - -#define MAX_SLOT 8 - -/* - * POS_102 - */ -#define POS_CARD_EN 0x01 /* card enable =1 */ -#define POS_SDAT_EN 0x02 /* enable 32-bit streaming data mode */ -#define POS_EN_CHKINT 0x04 /* enable int. from check line asserted */ -#define POS_EN_BUS_ERR 0x08 /* enable int. on invalid busmaster transf. */ -#define POS_FAIRNESS 0x10 /* fairnes on =1 */ -/* attention: arbitration level used with bit 0 POS 105 */ -#define POS_LARBIT 0xe0 /* arbitration level (0,0,0)->level = 0x8 - (1,1,1)->level = 0xf */ -/* - * POS_103 - */ -#define POS_PAGE 0x07 /* FPROM page selection */ -#define POS_BOOT_EN 0x08 /* boot PROM enable =1 */ -#define POS_MSEL 0x70 /* memory start address for FPROM mapping */ -#define PROG_EN 0x80 /* FM1: Vpp prog on/off */ -#define POS_SDR 0x80 /* FM2: Streaming data bit */ - -/* - * POS_104 - */ -#define POS_IOSEL 0x3f /* selected I/O base address */ -#define POS_IRQSEL 0xc0 /* selected interrupt */ - -/* - * POS_105 - */ -#define POS_CHCK 0x80 -#define POS_SYNC_ERR 0x20 /* FM2: synchronous error reporting */ -#define POS_PAR_DATA 0x10 /* FM2: data parity enable bit */ -#define POS_PAR_ADDR 0x08 /* FM2: address parity enable bit */ -#define POS_IRQHSEL 0x02 /* FM2: Highest bit for IRQ_selection */ -#define POS_HARBIT 0x01 /* Highest bit in Bus arbitration selection */ - -#define SA_MAC (0) /* start addr. MAC_AD within the PROM */ -#define PRA_OFF (0) -#define SA_PMD_TYPE (8) /* start addr. PMD-Type */ - -/* - * address transmission from logical to physical offset address on board - */ -#define FMA(a) (0x0100|((a)<<1)) /* FORMAC+ (r/w) */ -#define P2(a) (0x00c0|((a)<<1)) /* PLC2 (r/w) (DAS) */ -#define P1(a) (0x0080|((a)<<1)) /* PLC1 (r/w) */ -#define TI(a) (0x0060|((a)<<1)) /* Timer (r/w) */ -#define PR(a) (0x0040|((a)<<1)) /* configuration PROM */ -#define CS(a) (0x0020| (a)) /* control/status */ -#define FF(a) (0x0010|((a)<<1)) /* FIFO ASIC */ -#define CT(a) (0x0000|((a)<<1)) /* counter */ - -/* - * counter - */ -#define ACLA CT(0) /* address counter low */ -#define ACHA CT(1) /* address counter high */ -#define BCN CT(2) /* byte counter */ -#define MUX CT(3) /* MUX-register */ -#define WCN CT(0x08) /* word counter */ -#define FFLG CT(0x09) /* FIFO Flags */ - -/* - * test/control register (FM2 only) - */ -#define CNT_TST 0x018 /* Counter test control register */ -#define CNT_STP 0x01a /* Counter test step reg. (8 Bit) */ - -/* - * CS register (read only) - */ -#define CSRA CS(0) /* control/status register address */ -#define CSFA CS(2) /* control/status FIFO BUSY ... */ -#define ISRA CS(4) /* first int. source register address */ -#define ISR2 CS(6) /* second int. source register address */ -#define LEDR CS(0x0c) /* LED register r/w */ -#define CSIL CS(0x10) /* I/O mapped POS_ID_low (100) */ -#define CSIH CS(0x12) /* - " - POS_ID_HIGH (101) */ -#define CSA CS(0x14) /* - " - POS_102 */ -#define CSM CS(0x0e) /* - " - POS_103 */ -#define CSM_FM1 CS(0x16) /* - " - POS_103 (copy in FM1) */ -#define CSI CS(0x18) /* - " - POS_104 */ -#define CSS CS(0x1a) /* - " - POS_105 */ -#define CSP_06 CS(0x1c) /* - " - POS_106 */ -#define WDOG_ST 0x1c /* Watchdog status (FM2 only) */ -#define WDOG_EN 0x1c /* Watchdog enabling (FM2 only, 8Bit) */ -#define WDOG_DIS 0x1e /* Watchdog disabling (FM2 only, 8Bit) */ - -#define PGRA CSM /* Flash page register */ - - -#define WCTA FF(0) /* word counter */ -#define FFLAG FF(1) /* FLAG/V_FULL (FIFO almost full, write only)*/ - -/* - * Timer register (FM2 only) - */ -#define RTM_CNT 0x28 /* RTM Counter */ -#define TI_DIV 0x60 /* Timer Prescaler */ -#define TI_CH1 0x62 /* Timer channel 1 counter */ -#define TI_STOP 0x64 /* Stop timer on channel 1 */ -#define TI_STRT 0x66 /* Start timer on channel 1 */ -#define TI_INI2 0x68 /* Timer: Bus master preemption */ -#define TI_CNT2 0x6a /* Timer */ -#define TI_INI3 0x6c /* Timer: Streaming data */ -#define TI_CNT3 0x6e /* Timer */ -#define WDOG_LO 0x70 /* Watchdog counter low */ -#define WDOG_HI 0x72 /* Watchdog counter high */ -#define RTM_PRE 0x74 /* restr. token prescaler */ -#define RTM_TIM 0x76 /* restr. token timer */ - -/* - * Recommended Timeout values (for FM2 timer only) - */ -#define TOUT_BM_PRE 188 /* 3.76 usec */ -#define TOUT_S_DAT 374 /* 7.48 usec */ - -/* - * CS register (write only) - */ -#define HSR(p) CS(0x18|(p)) /* Host request register */ - -#define RTM_PUT 0x36 /* restr. token counter write */ -#define RTM_GET 0x28 /* - " - clear */ -#define RTM_CLEAR 0x34 /* - " - read */ - -/* - * BCN Bit definitions - */ -#define BCN_BUSY 0x8000 /* DMA Busy flag */ -#define BCN_AZERO 0x4000 /* Almost zero flag (BCN < 4) */ -#define BCN_STREAM 0x2000 /* Allow streaming data (BCN >= 8) */ - -/* - * WCN Bit definitions - */ -#define WCN_ZERO 0x2000 /* Zero flag (counted to zero) */ -#define WCN_AZERO 0x1000 /* Almost zero flag (BCN < 4) */ - -/* - * CNT_TST Bit definitions - */ -#define CNT_MODE 0x01 /* Go into test mode */ -#define CNT_D32 0x02 /* 16/32 BIT test mode */ - -/* - * FIFO Flag FIFO Flags/Vfull register - */ -#define FF_VFULL 0x003f /* V_full value mask */ -#define FFLG_FULL 0x2000 /* FULL flag */ -#define FFLG_A_FULL 0x1000 /* Almost full flag */ -#define FFLG_VFULL 0x0800 /* V_full Flag */ -#define FFLG_A_EMP 0x0400 /* almost empty flag */ -#define FFLG_EMP 0x0200 /* empty flag */ -#define FFLG_T_EMP 0x0100 /* totally empty flag */ - -/* - * WDOG Watchdog status register - */ -#define WDOG_ALM 0x01 /* Watchdog alarm Bit */ -#define WDOG_ACT 0x02 /* Watchdog active Bit */ - -/* - * CS(0) CONTROLS - */ -#define CS_CRESET 0x0001 -#define FIFO_RST 0x0002 -#define CS_IMSK 0x0004 -#define EN_IRQ_CHCK 0x0008 -#define EN_IRQ_TOKEN 0x0010 -#define EN_IRQ_TC 0x0020 -#define TOKEN_STATUS 0x0040 -#define RTM_CHANGE 0x0080 - -#define CS_SAS 0x0100 -#define CS_BYSTAT 0x0200 /* bypass connected (0=conn.) */ -#define CS_BYPASS 0x0400 /* bypass on/off indication */ - -/* - * CS(2) FIFOSTAT - */ -#define HSREQ 0x0007 -#define BIGDIR 0x0008 -#define CSF_BUSY_FIFO 0x0010 -#define CSF_BUSY_DMA 0x0020 -#define SLOT_32 0x0040 - -#define LED_0 0x0001 -#define LED_1 0x0002 -#define LED_2 0x0100 - -#define MAX_PAGES 8 /* pages */ -#define MAX_FADDR 0x4000 /* 16K per page */ - -/* - * IRQ = ISRA || ISR2 ; - * - * ISRA = IRQ_OTH_EN && (IS_LAN | IS_BUS) ; - * ISR2 = IRQ_TC_EN && IS_TC ; - * - * IS_LAN = (IS_MINTR1 | IS_MINTR2 | IS_PLINT1 | IS_PLINT2 | IS_TIMINT) || - * (IRQ_EN_TOKEN && IS_TOKEN) ; - * IS_BUS = IRQ_CHCK_EN && (IS_BUSERR | IS_CHCK_L) ; - */ -/* - * ISRA !!! activ high !!! - */ -#define IS_MINTR1 0x0001 /* FORMAC ST1U/L & ~IMSK1U/L*/ -#define IS_MINTR2 0x0002 /* FORMAC ST2U/L & ~IMSK2U/L*/ -#define IS_PLINT1 0x0004 /* PLC1 */ -#define IS_PLINT2 0x0008 /* PLC2 */ -#define IS_TIMINT 0x0010 /* Timer 82C54-2 */ -#define IS_TOKEN 0x0020 /* restrictet token monitoring */ -#define IS_CHCK_L 0x0040 /* check line asserted */ -#define IS_BUSERR 0x0080 /* bus error */ -/* - * ISR2 - */ -#define IS_TC 0x0001 /* terminal count irq */ -#define IS_SFDBKRTN 0x0002 /* selected feedback return */ -#define IS_D16 0x0004 /* DS16 */ -#define IS_D32 0x0008 /* DS32 */ -#define IS_DPEI 0x0010 /* Data Parity Indication */ - -#define ALL_IRSR 0x00ff - -#define FM_A(a) ADDR(FMA(a)) /* FORMAC Plus physical addr */ -#define P1_A(a) ADDR(P1(a)) /* PLC1 (r/w) */ -#define P2_A(a) ADDR(P2(a)) /* PLC2 (r/w) (DAS) */ -#define TI_A(a) ADDR(TI(a)) /* Timer (r/w) FM1 only! */ -#define PR_A(a) ADDR(PR(a)) /* config. PROM */ -#define CS_A(a) ADDR(CS(a)) /* control/status */ - -#define ISR1_A ADDR(ISRA) /* first int. source register address */ -#define ISR2_A ADDR(ISR2) /* second -"- */ -#define CSR_A ADDR(CSRA) /* control/status register address */ -#define CSF_A ADDR(CSFA) /* control/status FIFO BUSY flags (r/w) */ - -#define CSIL_A ADDR(CSIL) /* I/O mapped POS_ID_low (102) */ -#define CSIH_A ADDR(CSIH) /* - " - POS_ID_HIGH (101) */ -#define CSA_A ADDR(CSA) /* - " - POS_102 */ -#define CSI_A ADDR(CSI) /* - " - POS_104 */ -#define CSM_A ADDR(CSM) /* - " - POS_103 */ -#define CSM_FM1_A ADDR(CSM_FM1) /* - " - POS_103 (2nd copy, FM1) */ -#define CSP_06_A ADDR(CSP_06) /* - " - POS_106 */ - -#define WCT_A ADDR(WCTA) /* word counter (r/w) */ -#define FFLAG_A ADDR(FFLAG) /* FLAG/V_FULL (FIFO almost full, write only)*/ - -#define ACL_A ADDR(ACLA) /* address counter low */ -#define ACH_A ADDR(ACHA) /* address counter high */ -#define BCN_A ADDR(BCN) /* byte counter */ -#define MUX_A ADDR(MUX) /* MUX-register */ - -#define ISR_A ADDR(ISRA) /* Interrupt Source Register */ -#define FIFO_RESET_A ADDR(FIFO_RESET) /* reset the FIFO */ -#define FIFO_EN_A ADDR(FIFO_EN) /* enable the FIFO */ - -#define WDOG_EN_A ADDR(WDOG_EN) /* reset and start the WDOG */ -#define WDOG_DIS_A ADDR(WDOG_DIS) /* disable the WDOG */ -/* - * all control reg. (read!) are 8 bit (except PAGE_RG_A and LEDR_A) - */ -#define HSR_A(p) ADDR(HSR(p)) /* Host request register */ - -#define STAT_BYP 0 /* bypass station */ -#define STAT_INS 2 /* insert station */ -#define BYPASS(o) CS(0x10|(o)) /* o=STAT_BYP || STAT_INS */ - -#define IRQ_TC_EN CS(0x0b) /* enable/disable IRQ on TC */ -#define IRQ_TC_DIS CS(0x0a) -#define IRQ_TOKEN_EN CS(9) /* enable/disable IRQ on restr. Token */ -#define IRQ_TOKEN_DIS CS(8) -#define IRQ_CHCK_EN CS(7) /* -"- IRQ after CHCK line */ -#define IRQ_CHCK_DIS CS(6) -#define IRQ_OTH_EN CS(5) /* -"- other IRQ's */ -#define IRQ_OTH_DIS CS(4) -#define FIFO_EN CS(3) /* disable (reset), enable FIFO */ -#define FIFO_RESET CS(2) -#define CARD_EN CS(1) /* disable (reset), enable card */ -#define CARD_DIS CS(0) - -#define LEDR_A ADDR(LEDR) /* D0=green, D1=yellow, D8=L2 */ -#define PAGE_RG_A ADDR(CSM) /* D<2..0> */ -#define IRQ_CHCK_EN_A ADDR(IRQ_CHCK_EN) -#define IRQ_CHCK_DIS_A ADDR(IRQ_CHCK_DIS) - -#define GET_PAGE(bank) outpw(PAGE_RG_A,(inpw(PAGE_RG_A) &\ - (~POS_PAGE)) |(int) (bank)) -#define VPP_ON() if (smc->hw.rev == FM1_REV) { \ - outpw(PAGE_RG_A, \ - (inpw(PAGE_RG_A) & POS_PAGE) | PROG_EN); \ - } -#define VPP_OFF() if (smc->hw.rev == FM1_REV) { \ - outpw(PAGE_RG_A,(inpw(PAGE_RG_A) & POS_PAGE)); \ - } - -#define SKFDDI_PSZ 16 /* address PROM size */ - -#define READ_PROM(a) ((u_char)inp(a)) - -#define GET_ISR() ~inpw(ISR1_A) -#ifndef TCI -#define CHECK_ISR() ~inpw(ISR1_A) -#define CHECK_ISR_SMP(iop) ~inpw((iop)+ISRA) -#else -#define CHECK_ISR() (~inpw(ISR1_A) | ~inpw(ISR2_A)) -#define CHECK_ISR_SMP(iop) (~inpw((iop)+ISRA) | ~inpw((iop)+ISR2)) -#endif - -#define DMA_BUSY() (inpw(CSF_A) & CSF_BUSY_DMA) -#define FIFO_BUSY() (inpw(CSF_A) & CSF_BUSY_FIFO) -#define DMA_FIFO_BUSY() (inpw(CSF_A) & (CSF_BUSY_DMA | CSF_BUSY_FIFO)) -#define BUS_CHECK() { int i ; \ - if ((i = GET_ISR()) & IS_BUSERR) \ - SMT_PANIC(smc,HWM_E0020,HWM_E0020_MSG) ; \ - if (i & IS_CHCK_L) \ - SMT_PANIC(smc,HWM_E0014,HWM_E0014_MSG) ; \ - } - -#define CHECK_DMA() { u_long k = 10000 ; \ - while (k && (DMA_BUSY())) { \ - k-- ; \ - BUS_CHECK() ; \ - } \ - if (!k) SMT_PANIC(smc,HWM_E0003,HWM_E0003_MSG) ; } - -#define CHECK_FIFO() {u_long k = 1000000 ;\ - while (k && (FIFO_BUSY())) k-- ;\ - if (!k) SMT_PANIC(smc,HWM_E0019,HWM_E0019_MSG) ; } - -#define CHECK_DMA_FIFO() {u_long k = 1000000 ;\ - while (k && (DMA_FIFO_BUSY())) { \ - k-- ;\ - BUS_CHECK() ; \ - } \ - if (!k) SMT_PANIC(smc,HWM_E0004,HWM_E0004_MSG) ; } - -#ifndef UNIX -#define CLI_FBI() outp(ADDR(IRQ_OTH_DIS),0) -#else -#define CLI_FBI(smc) outp(ADDRS((smc),IRQ_OTH_DIS),0) -#endif - -#ifndef TCI -#define CLI_FBI_SMP(iop) outp((iop)+IRQ_OTH_DIS,0) -#else -#define CLI_FBI_SMP(iop) outp((iop)+IRQ_OTH_DIS,0) ;\ - outp((iop)+IRQ_TC_DIS,0) -#endif - -#ifndef UNIX -#define STI_FBI() outp(ADDR(IRQ_OTH_EN),0) -#else -#define STI_FBI(smc) outp(ADDRS((smc),IRQ_OTH_EN),0) -#endif - -/* - * Terminal count primitives - */ -#define CLI_TCI(smc) outp(ADDRS((smc),IRQ_TC_DIS),0) -#define STI_TCI(smc) outp(ADDRS((smc),IRQ_TC_EN),0) -#define CHECK_TC(smc,k) {(k) = 10000 ;\ - while ((k) && (~inpw(ISR2_A) & IS_TC)) (k)-- ;\ - if (!k) SMT_PANIC(smc,HWM_E0018,HWM_E0018_MSG) ; } - -#endif /* MCA */ - -#ifdef ISA - -/* - * address transmission from logic NPADDR6-0 to physical offset address on board - */ -#define FMA(a) (0x8000|(((a)&0x07)<<1)|(((a)&0x78)<<7)) /* FORMAC+ (r/w) */ -#define PRA(a) (0x1000|(((a)&0x07)<<1)|(((a)&0x18)<<7)) /* PROM (read only)*/ -#define P1A(a) (0x4000|(((a)&0x07)<<1)|(((a)&0x18)<<7)) /* PLC1 (r/w) */ -#define P2A(a) (0x5000|(((a)&0x07)<<1)|(((a)&0x18)<<7)) /* PLC2 (r/w) */ -#define TIA(a) (0x6000|(((a)&0x03)<<1)) /* Timer (r/w) */ - -#define ISRA 0x0000 /* int. source register address (read only) */ -#define ACLA 0x0000 /* address counter low address (write only) */ -#define ACHA 0x0002 /* address counter high address (write only) */ -#define TRCA 0x0004 /* transfer counter address (write only) */ -#define PGRA 0x0006 /* page register address (write only) */ -#define RQAA 0x2000 /* Request reg. (write only) */ -#define CSRA 0x3000 /* control/status register address (r/w) */ - -/* - * physical address offset + IO-Port base address - */ -#define FM_A(a) (FMA(a)+smc->hw.iop) /* FORMAC Plus physical addr */ -#define PR_A(a) (PRA(a)+smc->hw.iop) /* PROM (read only)*/ -#define P1_A(a) (P1A(a)+smc->hw.iop) /* PLC1 (r/w) */ -#define P2_A(a) (P2A(a)+smc->hw.iop) /* PLC2 (r/w) */ -#define TI_A(a) (TIA(a)+smc->hw.iop) /* Timer (r/w) */ - -#define ISR_A (0x0000+smc->hw.iop) /* int. source register address (read only) */ -#define ACL_A (0x0000+smc->hw.iop) /* address counter low address (write only) */ -#define ACH_A (0x0002+smc->hw.iop) /* address counter high address (write only)*/ -#define TRC_A (0x0004+smc->hw.iop) /* transfer counter address (write only) */ -#define PGR_A (0x0006+smc->hw.iop) /* page register address (write only) */ -#define RQA_A (0x2000+smc->hw.iop) /* Request reg. (write only) */ -#define CSR_A (0x3000+smc->hw.iop) /* control/status register address (r/w) */ -#ifdef UNIX -#define CSR_AS(smc) (0x3000+(smc)->hw.iop) /* control/status register address */ -#endif -#define PLC1_I (0x3400+smc->hw.iop) /* clear PLC1 interrupt bit */ -#define PLC2_I (0x3800+smc->hw.iop) /* clear PLC2 interrupt bit */ - -#ifndef MULT_OEM -#ifndef OEM_CONCEPT -#define SKLOGO_STR "SKFDDI" -#else /* OEM_CONCEPT */ -#define SKLOGO_STR OEM_FDDI_LOGO -#endif /* OEM_CONCEPT */ -#endif /* MULT_OEM */ -#define SADDRL (24) /* start address SKLOGO */ -#define SA_MAC (0) /* start addr. MAC_AD within the PROM */ -#define PRA_OFF (0) -#define SA_PMD_TYPE (8) /* start addr. PMD-Type */ - -#define CDID (PRA(SADDRL)) /* Card ID int/O port addr. offset */ -#define NEXT_CDID ((PRA(SADDRL+1)) - CDID) - -#define SKFDDI_PSZ 32 /* address PROM size */ - -#define READ_PROM(a) ((u_char)inpw(a)) -#define GET_PAGE(i) outpw(PGR_A,(int)(i)) - -#define MAX_PAGES 16 /* 16 pages */ -#define MAX_FADDR 0x2000 /* 8K per page */ -#define VPP_OFF() outpw(CSR_A,(inpw(CSR_A) & (CS_CRESET|CS_BYPASS))) -#define VPP_ON() outpw(CSR_A,(inpw(CSR_A) & (CS_CRESET|CS_BYPASS)) | \ - CS_VPPSW) - -/* - * control/status register CSRA bits (log. addr: 0x3000) - */ -/* write */ -#define CS_CRESET 0x01 /* Card reset (0=reset) */ -#define CS_IMSK 0x02 /* enable IRQ (1=enable, 0=disable) */ -#define CS_RESINT1 0x04 /* PLINT1 reset */ -#define CS_VPPSW 0x10 /* 12V power switch (0=off, 1=on) */ -#define CS_BYPASS 0x20 /* bypass switch (0=remove, 1=insert)*/ -#define CS_RESINT2 0x40 /* PLINT2 reset */ -/* read */ -#define CS_BUSY 0x04 /* master transfer activ (=1) */ -#define CS_SW_EPROM 0x08 /* 0=Application Soft. 1=BOOT-EPROM */ -#define CS_BYSTAT 0x40 /* 0=Bypass exist, 1= ..not */ -#define CS_SAS 0x80 /* single attachement station (=1) */ - -/* - * Interrupt source register ISRA (log. addr: 0x0000) read only & low activ. - */ -#define IS_MINTR1 0x01 /* FORMAC ST1U/L && ~IMSK1U/L*/ -#define IS_MINTR2 0x02 /* FORMAC ST2U/L && ~IMSK2U/L*/ -#define IS_PLINT1 0x04 /* PLC1 */ -#define IS_PLINT2 0x08 /* PLC2 */ -#define IS_TIMINT 0x10 /* Timer 82C54-2 */ - -#define ALL_IRSR (IS_MINTR1|IS_MINTR2|IS_PLINT1|IS_PLINT2|IS_TIMINT) - -#define FPROM_SW() (inpw(CSR_A)&CS_SW_EPROM) -#define DMA_BUSY() (inpw(CSR_A)&CS_BUSY) -#define CHECK_FIFO() -#define BUS_CHECK() - -/* - * set Host Request register (wr.) - */ -#define SET_HRQ(qup) outpw(RQA_A+((qup)<<1),0) - -#ifndef UNIX -#ifndef WINNT -#define CLI_FBI() outpw(CSR_A,(inpw(CSR_A)&(CS_CRESET|CS_BYPASS|CS_VPPSW))) -#else -#define CLI_FBI() outpw(CSR_A,(l_inpw(CSR_A) & \ - (CS_CRESET|CS_BYPASS|CS_VPPSW))) -#endif -#else -#define CLI_FBI(smc) outpw(CSR_AS(smc),(inpw(CSR_AS(smc))& \ - (CS_CRESET|CS_BYPASS|CS_VPPSW))) -#endif - -#ifndef UNIX -#define STI_FBI() outpw(CSR_A,(inpw(CSR_A) & \ - (CS_CRESET|CS_BYPASS|CS_VPPSW)) | CS_IMSK) -#else -#define STI_FBI(smc) outpw(CSR_AS(smc),(inpw(CSR_AS(smc)) & \ - (CS_CRESET|CS_BYPASS|CS_VPPSW)) | CS_IMSK) -#endif - -#define CHECK_DMA() {unsigned k = 10000 ;\ - while (k && (DMA_BUSY())) k-- ;\ - if (!k) SMT_PANIC(smc,HWM_E0003,HWM_E0003_MSG) ; } - -#define GET_ISR() ~inpw(ISR_A) - -#endif /* ISA */ - /*--------------------------------------------------------------------------*/ #ifdef PCI diff --git a/drivers/net/skfp/h/skfbiinc.h b/drivers/net/skfp/h/skfbiinc.h index 79d55ad2cd2a..ac2d7192f1ca 100644 --- a/drivers/net/skfp/h/skfbiinc.h +++ b/drivers/net/skfp/h/skfbiinc.h @@ -22,32 +22,6 @@ */ #define ERR_FLAGS (FS_MSRABT | FS_SEAC2 | FS_SFRMERR | FS_SFRMTY1) -#ifdef ISA -#define DMA_BUSY_CHECK CSRA -#define IMASK_FAST (IS_PLINT1 | IS_PLINT2 | IS_TIMINT) -#define HRQR (RQAA+(RQ_RRQ<<1)) -#define HRQW (RQAA+(RQ_WA2<<1)) -#define HRQA0 (RQAA+(RQ_WA0<<1)) -#define HRQSQ (RQAA+(RQ_WSQ<<1)) -#endif - -#ifdef EISA -#define DMA_BUSY_CHECK CSRA -#define DMA_HIGH_WORD 0x0400 -#define DMA_MASK_M 0x0a -#define DMA_MODE_M 0x0b -#define DMA_BYTE_PTR_M 0x0c -#define DMA_MASK_S 0x0d4 -#define DMA_MODE_S 0x0d6 -#define DMA_BYTE_PTR_S 0x0d8 -#define IMASK_FAST (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TC) -#endif /* EISA */ - -#ifdef MCA -#define IMASK_FAST (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TOKEN | \ - IS_CHCK_L | IS_BUSERR) -#endif - #ifdef PCI #define IMASK_FAST (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TOKEN | \ IS_MINTR2 | IS_MINTR3 | IS_R1_P | \ diff --git a/drivers/net/skfp/h/targethw.h b/drivers/net/skfp/h/targethw.h index 22c4923241d3..626dc7263591 100644 --- a/drivers/net/skfp/h/targethw.h +++ b/drivers/net/skfp/h/targethw.h @@ -53,11 +53,6 @@ struct s_oem_ids { u_char oi_sub_id[4] ; /* sub id bytes, representation as */ /* defined by hardware, */ #endif -#ifdef ISA - u_char oi_logo_len ; /* the length of the adapter logo */ - u_char oi_logo[6] ; /* the adapter logo */ - u_char oi_reserved1 ; -#endif /* ISA */ } ; #endif /* MULT_OEM */ @@ -70,43 +65,17 @@ struct s_smt_hw { short dma ; /* DMA channel */ short irq ; /* IRQ level */ short eprom ; /* FLASH prom */ -#ifndef PCI - short DmaWriteExtraBytes ; /* add bytes for DMA write */ -#endif #ifndef SYNC u_short n_a_send ; /* pending send requests */ #endif -#if (defined(EISA) || defined(MCA) || defined(PCI)) +#if defined(PCI) short slot ; /* slot number */ short max_slots ; /* maximum number of slots */ -#endif - -#if (defined(PCI) || defined(MCA)) short wdog_used ; /* TRUE if the watch dog is used */ #endif -#ifdef MCA - short slot_32 ; /* 32bit slot (1) or 16bit slot (0) */ - short rev ; /* Board revision (FMx_REV). */ - short VFullRead ; /* V_full value for DMA read */ - short VFullWrite ; /* V_full value for DMA write */ -#endif - -#ifdef EISA - short led ; /* LED for FE card */ - - short dma_rmode ; /* read mode */ - short dma_wmode ; /* write mode */ - short dma_emode ; /* extend mode */ - - /* DMA controller channel dependent io addresses */ - u_short dma_base_word_count ; - u_short dma_base_address ; - u_short dma_base_address_page ; -#endif - #ifdef PCI u_short pci_handle ; /* handle to access the BIOS func */ u_long is_imask ; /* int maske for the int source reg */ diff --git a/drivers/net/skfp/hwt.c b/drivers/net/skfp/hwt.c index e01f8a0f35c6..053151468f93 100644 --- a/drivers/net/skfp/hwt.c +++ b/drivers/net/skfp/hwt.c @@ -77,25 +77,10 @@ void hwt_start(struct s_smc *smc, u_long time) */ if (!cnt) cnt++ ; -#ifndef PCI - /* - * 6.25MHz -> CLK0 : T0 (cnt0 = 16us) -> OUT0 - * OUT0 -> CLK1 : T1 (cnt1) OUT1 -> ISRA(IS_TIMINT) - */ - OUT_82c54_TIMER(3,1<<6 | 3<<4 | 0<<1) ; /* counter 1, mode 0 */ - OUT_82c54_TIMER(1,cnt & 0xff) ; /* LSB */ - OUT_82c54_TIMER(1,(cnt>>8) & 0xff) ; /* MSB */ - /* - * start timer by switching counter 0 to mode 3 - * T0 resolution 16 us (CLK0=0.16us) - */ - OUT_82c54_TIMER(3,0<<6 | 3<<4 | 3<<1) ; /* counter 0, mode 3 */ - OUT_82c54_TIMER(0,100) ; /* LSB */ - OUT_82c54_TIMER(0,0) ; /* MSB */ -#else /* PCI */ + outpd(ADDR(B2_TI_INI), (u_long) cnt * 200) ; /* Load timer value. */ outpw(ADDR(B2_TI_CRTL), TIM_START) ; /* Start timer. */ -#endif /* PCI */ + smc->hw.timer_activ = TRUE ; } @@ -115,15 +100,8 @@ void hwt_start(struct s_smc *smc, u_long time) ************************/ void hwt_stop(struct s_smc *smc) { -#ifndef PCI - /* stop counter 0 by switching to mode 0 */ - OUT_82c54_TIMER(3,0<<6 | 3<<4 | 0<<1) ; /* counter 0, mode 0 */ - OUT_82c54_TIMER(0,0) ; /* LSB */ - OUT_82c54_TIMER(0,0) ; /* MSB */ -#else /* PCI */ outpw(ADDR(B2_TI_CRTL), TIM_STOP) ; outpw(ADDR(B2_TI_CRTL), TIM_CL_IRQ) ; -#endif /* PCI */ smc->hw.timer_activ = FALSE ; } @@ -168,11 +146,6 @@ void hwt_init(struct s_smc *smc) void hwt_restart(struct s_smc *smc) { hwt_stop(smc) ; -#ifndef PCI - OUT_82c54_TIMER(3,1<<6 | 3<<4 | 0<<1) ; /* counter 1, mode 0 */ - OUT_82c54_TIMER(1,1 ) ; /* LSB */ - OUT_82c54_TIMER(1,0 ) ; /* MSB */ -#endif } /************************ @@ -191,21 +164,12 @@ void hwt_restart(struct s_smc *smc) u_long hwt_read(struct s_smc *smc) { u_short tr ; -#ifndef PCI - u_short is ; -#else u_long is ; -#endif if (smc->hw.timer_activ) { hwt_stop(smc) ; -#ifndef PCI - OUT_82c54_TIMER(3,1<<6) ; /* latch command */ - tr = IN_82c54_TIMER(1) & 0xff ; - tr += (IN_82c54_TIMER(1) & 0xff)<<8 ; -#else /* PCI */ tr = (u_short)((inpd(ADDR(B2_TI_VAL))/200) & 0xffff) ; -#endif /* PCI */ + is = GET_ISR() ; /* Check if timer expired (or wraparound). */ if ((tr > smc->hw.t_start) || (is & IS_TIMINT)) { diff --git a/drivers/net/skfp/pmf.c b/drivers/net/skfp/pmf.c index efc639c013fd..ea85de918233 100644 --- a/drivers/net/skfp/pmf.c +++ b/drivers/net/skfp/pmf.c @@ -575,7 +575,7 @@ void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para, int sp_len ; /* - * skip if errror + * skip if error */ if (pcon->pc_err) return ; diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c index a7ef6c8b7721..7cf9b9f35dee 100644 --- a/drivers/net/skfp/skfddi.c +++ b/drivers/net/skfp/skfddi.c @@ -260,9 +260,7 @@ static int skfp_init_one(struct pci_dev *pdev, dev->set_multicast_list = &skfp_ctl_set_multicast_list; dev->set_mac_address = &skfp_ctl_set_mac_address; dev->do_ioctl = &skfp_ioctl; - dev->header_cache_update = NULL; /* not supported */ - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); /* Initialize board structure with bus-specific info */ diff --git a/drivers/net/skfp/smt.c b/drivers/net/skfp/smt.c index 75afc1f07ba0..ced2c8f76be7 100644 --- a/drivers/net/skfp/smt.c +++ b/drivers/net/skfp/smt.c @@ -1654,7 +1654,7 @@ static const struct smt_pdef { { SMT_P4053, 0, SWAP_SMT_P4053 } , } ; -#define N_SMT_PLEN (sizeof(smt_pdef)/sizeof(smt_pdef[0])) +#define N_SMT_PLEN ARRAY_SIZE(smt_pdef) int smt_check_para(struct s_smc *smc, struct smt_header *sm, const u_short list[]) diff --git a/drivers/net/skfp/srf.c b/drivers/net/skfp/srf.c index 16573aca8b62..6caf713b744c 100644 --- a/drivers/net/skfp/srf.c +++ b/drivers/net/skfp/srf.c @@ -43,7 +43,7 @@ static void clear_reported(struct s_smc *smc); static void smt_send_srf(struct s_smc *smc); static struct s_srf_evc *smt_get_evc(struct s_smc *smc, int code, int index); -#define MAX_EVCS (sizeof(smc->evcs)/sizeof(smc->evcs[0])) +#define MAX_EVCS ARRAY_SIZE(smc->evcs) struct evc_init { u_char code ; @@ -67,7 +67,7 @@ static const struct evc_init evc_inits[] = { { SMT_EVENT_PORT_PATH_CHANGE, INDEX_PORT,NUMPHYS,SMT_P4053 } , } ; -#define MAX_INIT_EVC (sizeof(evc_inits)/sizeof(evc_inits[0])) +#define MAX_INIT_EVC ARRAY_SIZE(evc_inits) void smt_init_evc(struct s_smc *smc) { diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 776692946562..2aae9fe38c5a 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -410,9 +410,14 @@ static const struct skge_stat { { "rx_fcs_error", XM_RXF_FCS_ERR, GM_RXF_FCS_ERR }, }; -static int skge_get_stats_count(struct net_device *dev) +static int skge_get_sset_count(struct net_device *dev, int sset) { - return ARRAY_SIZE(skge_stats); + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(skge_stats); + default: + return -EOPNOTSUPP; + } } static void skge_get_ethtool_stats(struct net_device *dev, @@ -811,17 +816,14 @@ static const struct ethtool_ops skge_ethtool_ops = { .set_pauseparam = skge_set_pauseparam, .get_coalesce = skge_get_coalesce, .set_coalesce = skge_set_coalesce, - .get_sg = ethtool_op_get_sg, .set_sg = skge_set_sg, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = skge_set_tx_csum, .get_rx_csum = skge_get_rx_csum, .set_rx_csum = skge_set_rx_csum, .get_strings = skge_get_strings, .phys_id = skge_phys_id, - .get_stats_count = skge_get_stats_count, + .get_sset_count = skge_get_sset_count, .get_ethtool_stats = skge_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, }; /* @@ -2529,7 +2531,7 @@ static int skge_up(struct net_device *dev) skge_write32(hw, B0_IMSK, hw->intr_mask); spin_unlock_irq(&hw->hw_lock); - netif_poll_enable(dev); + napi_enable(&skge->napi); return 0; free_rx_ring: @@ -2559,7 +2561,7 @@ static int skge_down(struct net_device *dev) if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC) del_timer_sync(&skge->link_timer); - netif_poll_disable(dev); + napi_disable(&skge->napi); netif_carrier_off(dev); spin_lock_irq(&hw->hw_lock); @@ -3045,14 +3047,13 @@ static void skge_tx_done(struct net_device *dev) } } -static int skge_poll(struct net_device *dev, int *budget) +static int skge_poll(struct napi_struct *napi, int to_do) { - struct skge_port *skge = netdev_priv(dev); + struct skge_port *skge = container_of(napi, struct skge_port, napi); + struct net_device *dev = skge->netdev; struct skge_hw *hw = skge->hw; struct skge_ring *ring = &skge->rx_ring; struct skge_element *e; - unsigned long flags; - int to_do = min(dev->quota, *budget); int work_done = 0; skge_tx_done(dev); @@ -3083,20 +3084,16 @@ static int skge_poll(struct net_device *dev, int *budget) wmb(); skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START); - *budget -= work_done; - dev->quota -= work_done; - - if (work_done >= to_do) - return 1; /* not done */ - - spin_lock_irqsave(&hw->hw_lock, flags); - __netif_rx_complete(dev); - hw->intr_mask |= napimask[skge->port]; - skge_write32(hw, B0_IMSK, hw->intr_mask); - skge_read32(hw, B0_IMSK); - spin_unlock_irqrestore(&hw->hw_lock, flags); + if (work_done < to_do) { + spin_lock_irq(&hw->hw_lock); + __netif_rx_complete(dev, napi); + hw->intr_mask |= napimask[skge->port]; + skge_write32(hw, B0_IMSK, hw->intr_mask); + skge_read32(hw, B0_IMSK); + spin_unlock_irq(&hw->hw_lock); + } - return 0; + return work_done; } /* Parity errors seem to happen when Genesis is connected to a switch @@ -3253,8 +3250,9 @@ static irqreturn_t skge_intr(int irq, void *dev_id) } if (status & (IS_XA1_F|IS_R1_F)) { + struct skge_port *skge = netdev_priv(hw->dev[0]); hw->intr_mask &= ~(IS_XA1_F|IS_R1_F); - netif_rx_schedule(hw->dev[0]); + netif_rx_schedule(hw->dev[0], &skge->napi); } if (status & IS_PA_TO_TX1) @@ -3272,13 +3270,14 @@ static irqreturn_t skge_intr(int irq, void *dev_id) skge_mac_intr(hw, 0); if (hw->dev[1]) { + struct skge_port *skge = netdev_priv(hw->dev[1]); + if (status & (IS_XA2_F|IS_R2_F)) { hw->intr_mask &= ~(IS_XA2_F|IS_R2_F); - netif_rx_schedule(hw->dev[1]); + netif_rx_schedule(hw->dev[1], &skge->napi); } if (status & IS_PA_TO_RX2) { - struct skge_port *skge = netdev_priv(hw->dev[1]); ++skge->net_stats.rx_over_errors; skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2); } @@ -3553,7 +3552,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, return NULL; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &hw->pdev->dev); dev->open = skge_up; dev->stop = skge_down; @@ -3570,8 +3568,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, SET_ETHTOOL_OPS(dev, &skge_ethtool_ops); dev->tx_timeout = skge_tx_timeout; dev->watchdog_timeo = TX_WATCHDOG; - dev->poll = skge_poll; - dev->weight = NAPI_WEIGHT; #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = skge_netpoll; #endif @@ -3581,6 +3577,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, dev->features |= NETIF_F_HIGHDMA; skge = netdev_priv(dev); + netif_napi_add(dev, &skge->napi, skge_poll, NAPI_WEIGHT); skge->netdev = dev; skge->hw = hw; skge->msg_enable = netif_msg_init(debug, default_msg); @@ -3624,12 +3621,11 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, static void __devinit skge_show_addr(struct net_device *dev) { const struct skge_port *skge = netdev_priv(dev); + DECLARE_MAC_BUF(mac); if (netif_msg_probe(skge)) - printk(KERN_INFO PFX "%s: addr %02x:%02x:%02x:%02x:%02x:%02x\n", - dev->name, - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + printk(KERN_INFO PFX "%s: addr %s\n", + dev->name, print_mac(mac, dev->dev_addr)); } static int __devinit skge_probe(struct pci_dev *pdev, diff --git a/drivers/net/skge.h b/drivers/net/skge.h index edd71468220c..1a57bdd1ddf1 100644 --- a/drivers/net/skge.h +++ b/drivers/net/skge.h @@ -1351,8 +1351,6 @@ enum { PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */ }; -#define PHY_M_PC_MDI_XMODE(x) ((((u16)(x)<<5) & PHY_M_PC_MDIX_MSK) - enum { PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */ PHY_M_PC_MAN_MDIX = 1, /* 01 = Manual MDIX configuration */ @@ -2448,6 +2446,7 @@ enum pause_status { struct skge_port { struct skge_hw *hw; struct net_device *netdev; + struct napi_struct napi; int port; u32 msg_enable; diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index a2f32151559e..68f728f0b600 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -31,6 +31,7 @@ #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/pci.h> +#include <linux/aer.h> #include <linux/ip.h> #include <net/ip.h> #include <linux/tcp.h> @@ -51,7 +52,7 @@ #include "sky2.h" #define DRV_NAME "sky2" -#define DRV_VERSION "1.16" +#define DRV_VERSION "1.19" #define PFX DRV_NAME " " /* @@ -99,10 +100,6 @@ static int disable_msi = 0; module_param(disable_msi, int, 0); MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); -static int idle_timeout = 100; -module_param(idle_timeout, int, 0); -MODULE_PARM_DESC(idle_timeout, "Watchdog timer for lost interrupts (ms)"); - static const struct pci_device_id sky2_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */ { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */ @@ -122,12 +119,15 @@ static const struct pci_device_id sky2_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) }, /* 88E8036 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, /* 88E8038 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) }, /* 88E8039 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4354) }, /* 88E8040 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4356) }, /* 88EC033 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x435A) }, /* 88E8048 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) }, /* 88E8052 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4365) }, /* 88E8070 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */ @@ -151,8 +151,11 @@ static const char *yukon2_name[] = { "Extreme", /* 0xb5 */ "EC", /* 0xb6 */ "FE", /* 0xb7 */ + "FE+", /* 0xb8 */ }; +static void sky2_set_multicast(struct net_device *dev); + /* Access to external PHY */ static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val) { @@ -219,25 +222,30 @@ static void sky2_power_on(struct sky2_hw *hw) else sky2_write8(hw, B2_Y2_CLK_GATE, 0); - if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) { + if (hw->flags & SKY2_HW_ADV_POWER_CTL) { + struct pci_dev *pdev = hw->pdev; u32 reg; - reg = sky2_pci_read32(hw, PCI_DEV_REG4); + pci_write_config_dword(pdev, PCI_DEV_REG3, 0); + + pci_read_config_dword(pdev, PCI_DEV_REG4, ®); /* set all bits to 0 except bits 15..12 and 8 */ reg &= P_ASPM_CONTROL_MSK; - sky2_pci_write32(hw, PCI_DEV_REG4, reg); + pci_write_config_dword(pdev, PCI_DEV_REG4, reg); - reg = sky2_pci_read32(hw, PCI_DEV_REG5); + pci_read_config_dword(pdev, PCI_DEV_REG5, ®); /* set all bits to 0 except bits 28 & 27 */ reg &= P_CTL_TIM_VMAIN_AV_MSK; - sky2_pci_write32(hw, PCI_DEV_REG5, reg); + pci_write_config_dword(pdev, PCI_DEV_REG5, reg); - sky2_pci_write32(hw, PCI_CFG_REG_1, 0); + pci_write_config_dword(pdev, PCI_CFG_REG_1, 0); /* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */ reg = sky2_read32(hw, B2_GP_IO); reg |= GLB_GPIO_STAT_RACE_DIS; sky2_write32(hw, B2_GP_IO, reg); + + sky2_read32(hw, B2_GP_IO); } } @@ -288,10 +296,10 @@ static const u16 copper_fc_adv[] = { /* flow control to advertise bits when using 1000BaseX */ static const u16 fiber_fc_adv[] = { - [FC_BOTH] = PHY_M_P_BOTH_MD_X, + [FC_NONE] = PHY_M_P_NO_PAUSE_X, [FC_TX] = PHY_M_P_ASYM_MD_X, [FC_RX] = PHY_M_P_SYM_MD_X, - [FC_NONE] = PHY_M_P_NO_PAUSE_X, + [FC_BOTH] = PHY_M_P_BOTH_MD_X, }; /* flow control to GMA disable bits */ @@ -308,10 +316,8 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) struct sky2_port *sky2 = netdev_priv(hw->dev[port]); u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg; - if (sky2->autoneg == AUTONEG_ENABLE - && !(hw->chip_id == CHIP_ID_YUKON_XL - || hw->chip_id == CHIP_ID_YUKON_EC_U - || hw->chip_id == CHIP_ID_YUKON_EX)) { + if (sky2->autoneg == AUTONEG_ENABLE && + !(hw->flags & SKY2_HW_NEWER_PHY)) { u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | @@ -331,9 +337,19 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); if (sky2_is_copper(hw)) { - if (hw->chip_id == CHIP_ID_YUKON_FE) { + if (!(hw->flags & SKY2_HW_GIGABIT)) { /* enable automatic crossover */ ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1; + + if (hw->chip_id == CHIP_ID_YUKON_FE_P && + hw->chip_rev == CHIP_REV_YU_FE2_A0) { + u16 spec; + + /* Enable Class A driver for FE+ A0 */ + spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2); + spec |= PHY_M_FESC_SEL_CL_A; + gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec); + } } else { /* disable energy detect */ ctrl &= ~PHY_M_PC_EN_DET_MSK; @@ -343,9 +359,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) /* downshift on PHY 88E1112 and 88E1149 is changed */ if (sky2->autoneg == AUTONEG_ENABLE - && (hw->chip_id == CHIP_ID_YUKON_XL - || hw->chip_id == CHIP_ID_YUKON_EC_U - || hw->chip_id == CHIP_ID_YUKON_EX)) { + && (hw->flags & SKY2_HW_NEWER_PHY)) { /* set downshift counter to 3x and enable downshift */ ctrl &= ~PHY_M_PC_DSC_MSK; ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA; @@ -361,7 +375,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); /* special setup for PHY 88E1112 Fiber */ - if (hw->chip_id == CHIP_ID_YUKON_XL && !sky2_is_copper(hw)) { + if (hw->chip_id == CHIP_ID_YUKON_XL && (hw->flags & SKY2_HW_FIBRE_PHY)) { pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */ @@ -452,7 +466,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) gma_write16(hw, port, GM_GP_CTRL, reg); - if (hw->chip_id != CHIP_ID_YUKON_FE) + if (hw->flags & SKY2_HW_GIGABIT) gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000); gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); @@ -476,6 +490,23 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl); break; + case CHIP_ID_YUKON_FE_P: + /* Enable Link Partner Next Page */ + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); + ctrl |= PHY_M_PC_ENA_LIP_NP; + + /* disable Energy Detect and enable scrambler */ + ctrl &= ~(PHY_M_PC_ENA_ENE_DT | PHY_M_PC_DIS_SCRAMB); + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); + + /* set LED2 -> ACT, LED1 -> LINK, LED0 -> SPEED */ + ctrl = PHY_M_FELP_LED2_CTRL(LED_PAR_CTRL_ACT_BL) | + PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_LINK) | + PHY_M_FELP_LED0_CTRL(LED_PAR_CTRL_SPEED); + + gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl); + break; + case CHIP_ID_YUKON_XL: pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); @@ -545,7 +576,13 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) /* set page register to 0 */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); + } else if (hw->chip_id == CHIP_ID_YUKON_FE_P && + hw->chip_rev == CHIP_REV_YU_FE2_A0) { + /* apply workaround for integrated resistors calibration */ + gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17); + gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60); } else if (hw->chip_id != CHIP_ID_YUKON_EX) { + /* no effect on Yukon-XL */ gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) { @@ -567,25 +604,24 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff) { + struct pci_dev *pdev = hw->pdev; u32 reg1; - static const u32 phy_power[] - = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD }; - - /* looks like this XL is back asswards .. */ - if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) - onoff = !onoff; + static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD }; + static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA }; - sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); - reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); + pci_read_config_dword(pdev, PCI_DEV_REG1, ®1); + /* Turn on/off phy power saving */ if (onoff) - /* Turn off phy power saving */ reg1 &= ~phy_power[port]; else reg1 |= phy_power[port]; - sky2_pci_write32(hw, PCI_DEV_REG1, reg1); - sky2_pci_read32(hw, PCI_DEV_REG1); - sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + if (onoff && hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) + reg1 |= coma_mode[port]; + + pci_write_config_dword(pdev, PCI_DEV_REG1, reg1); + pci_read_config_dword(pdev, PCI_DEV_REG1, ®1); + udelay(100); } @@ -653,11 +689,9 @@ static void sky2_wol_init(struct sky2_port *sky2) sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl); /* Turn on legacy PCI-Express PME mode */ - sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); - reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); + pci_read_config_dword(hw->pdev, PCI_DEV_REG1, ®1); reg1 |= PCI_Y2_PME_LEGACY; - sky2_pci_write32(hw, PCI_DEV_REG1, reg1); - sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1); /* block receiver */ sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); @@ -666,25 +700,25 @@ static void sky2_wol_init(struct sky2_port *sky2) static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port) { - if (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev != CHIP_REV_YU_EX_A0) { + struct net_device *dev = hw->dev[port]; + + if (dev->mtu <= ETH_DATA_LEN) sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), - TX_STFW_ENA | - (hw->dev[port]->mtu > ETH_DATA_LEN) ? TX_JUMBO_ENA : TX_JUMBO_DIS); - } else { - if (hw->dev[port]->mtu > ETH_DATA_LEN) { - /* set Tx GMAC FIFO Almost Empty Threshold */ - sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR), - (ECU_JUMBO_WM << 16) | ECU_AE_THR); + TX_JUMBO_DIS | TX_STFW_ENA); + + else if (hw->chip_id != CHIP_ID_YUKON_EC_U) + sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), + TX_STFW_ENA | TX_JUMBO_ENA); + else { + /* set Tx GMAC FIFO Almost Empty Threshold */ + sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR), + (ECU_JUMBO_WM << 16) | ECU_AE_THR); - sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), - TX_JUMBO_ENA | TX_STFW_DIS); + sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), + TX_JUMBO_ENA | TX_STFW_DIS); - /* Can't do offload because of lack of store/forward */ - hw->dev[port]->features &= ~(NETIF_F_TSO | NETIF_F_SG - | NETIF_F_ALL_CSUM); - } else - sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), - TX_JUMBO_DIS | TX_STFW_ENA); + /* Can't do offload because of lack of store/forward */ + dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | NETIF_F_ALL_CSUM); } } @@ -692,11 +726,12 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) { struct sky2_port *sky2 = netdev_priv(hw->dev[port]); u16 reg; + u32 rx_reg; int i; const u8 *addr = hw->dev[port]->dev_addr; - sky2_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); - sky2_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); + sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); + sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); @@ -768,23 +803,30 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) /* Configure Rx MAC FIFO */ sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); - reg = GMF_OPER_ON | GMF_RX_F_FL_ON; - if (hw->chip_id == CHIP_ID_YUKON_EX) - reg |= GMF_RX_OVER_ON; + rx_reg = GMF_OPER_ON | GMF_RX_F_FL_ON; + if (hw->chip_id == CHIP_ID_YUKON_EX || + hw->chip_id == CHIP_ID_YUKON_FE_P) + rx_reg |= GMF_RX_OVER_ON; - sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), reg); + sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg); /* Flush Rx MAC FIFO on any flow control or error */ sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR); /* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug */ - sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1); + reg = RX_GMF_FL_THR_DEF + 1; + /* Another magic mystery workaround from sk98lin */ + if (hw->chip_id == CHIP_ID_YUKON_FE_P && + hw->chip_rev == CHIP_REV_YU_FE2_A0) + reg = 0x178; + sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), reg); /* Configure Tx MAC FIFO */ sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); - if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) { + /* On chips without ram buffer, pause is controled by MAC level */ + if (sky2_read8(hw, B2_E_0) == 0) { sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8); sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8); @@ -867,6 +909,20 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2) return le; } +static void tx_init(struct sky2_port *sky2) +{ + struct sky2_tx_le *le; + + sky2->tx_prod = sky2->tx_cons = 0; + sky2->tx_tcpsum = 0; + sky2->tx_last_mss = 0; + + le = get_tx_le(sky2); + le->addr = 0; + le->opcode = OP_ADDR64 | HW_OWNER; + sky2->tx_addr64 = 0; +} + static inline struct tx_ring_info *tx_le_re(struct sky2_port *sky2, struct sky2_tx_le *le) { @@ -963,19 +1019,15 @@ static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re) */ static void rx_set_checksum(struct sky2_port *sky2) { - struct sky2_rx_le *le; - - if (sky2->hw->chip_id != CHIP_ID_YUKON_EX) { - le = sky2_next_rx(sky2); - le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN); - le->ctrl = 0; - le->opcode = OP_TCPSTART | HW_OWNER; + struct sky2_rx_le *le = sky2_next_rx(sky2); - sky2_write32(sky2->hw, - Q_ADDR(rxqaddr[sky2->port], Q_CSR), - sky2->rx_csum ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); - } + le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN); + le->ctrl = 0; + le->opcode = OP_TCPSTART | HW_OWNER; + sky2_write32(sky2->hw, + Q_ADDR(rxqaddr[sky2->port], Q_CSR), + sky2->rx_csum ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); } /* @@ -1077,7 +1129,7 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp u16 port = sky2->port; netif_tx_lock_bh(dev); - netif_poll_disable(sky2->hw->dev[0]); + napi_disable(&hw->napi); sky2->vlgrp = grp; if (grp) { @@ -1092,7 +1144,7 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp TX_VLAN_TAG_OFF); } - netif_poll_enable(sky2->hw->dev[0]); + napi_enable(&hw->napi); netif_tx_unlock_bh(dev); } #endif @@ -1171,7 +1223,8 @@ static int sky2_rx_start(struct sky2_port *sky2) sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1); - rx_set_checksum(sky2); + if (!(hw->flags & SKY2_HW_NEW_LE)) + rx_set_checksum(sky2); /* Space needed for frame data + headers rounded up */ size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8); @@ -1242,7 +1295,7 @@ static int sky2_up(struct net_device *dev) struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; - u32 ramsize, imask; + u32 imask, ramsize; int cap, err = -ENOMEM; struct net_device *otherdev = hw->dev[sky2->port^1]; @@ -1255,9 +1308,9 @@ static int sky2_up(struct net_device *dev) struct sky2_port *osky2 = netdev_priv(otherdev); u16 cmd; - cmd = sky2_pci_read16(hw, cap + PCI_X_CMD); + pci_read_config_word(hw->pdev, cap + PCI_X_CMD, &cmd); cmd &= ~PCI_X_CMD_MAX_SPLIT; - sky2_pci_write16(hw, cap + PCI_X_CMD, cmd); + pci_write_config_word(hw->pdev, cap + PCI_X_CMD, cmd); sky2->rx_csum = 0; osky2->rx_csum = 0; @@ -1280,7 +1333,8 @@ static int sky2_up(struct net_device *dev) GFP_KERNEL); if (!sky2->tx_ring) goto err_out; - sky2->tx_prod = sky2->tx_cons = 0; + + tx_init(sky2); sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES, &sky2->rx_le_map); @@ -1299,11 +1353,10 @@ static int sky2_up(struct net_device *dev) /* Register is number of 4K blocks on internal RAM buffer. */ ramsize = sky2_read8(hw, B2_E_0) * 4; - printk(KERN_INFO PFX "%s: ram buffer %dK\n", dev->name, ramsize); - if (ramsize > 0) { u32 rxspace; + pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize); if (ramsize < 16) rxspace = ramsize / 2; else @@ -1331,9 +1384,13 @@ static int sky2_up(struct net_device *dev) sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, TX_RING_SIZE - 1); + napi_enable(&hw->napi); + err = sky2_rx_start(sky2); - if (err) + if (err) { + napi_disable(&hw->napi); goto err_out; + } /* Enable interrupts from phy/mac for port */ imask = sky2_read32(hw, B0_IMSK); @@ -1432,13 +1489,15 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) /* Check for TCP Segmentation Offload */ mss = skb_shinfo(skb)->gso_size; if (mss != 0) { - if (hw->chip_id != CHIP_ID_YUKON_EX) + + if (!(hw->flags & SKY2_HW_NEW_LE)) mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); if (mss != sky2->tx_last_mss) { le = get_tx_le(sky2); le->addr = cpu_to_le32(mss); - if (hw->chip_id == CHIP_ID_YUKON_EX) + + if (hw->flags & SKY2_HW_NEW_LE) le->opcode = OP_MSS | HW_OWNER; else le->opcode = OP_LRGLEN | HW_OWNER; @@ -1464,8 +1523,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) /* Handle TCP checksum offload */ if (skb->ip_summed == CHECKSUM_PARTIAL) { /* On Yukon EX (some versions) encoding change. */ - if (hw->chip_id == CHIP_ID_YUKON_EX - && hw->chip_rev != CHIP_REV_YU_EX_B0) + if (hw->flags & SKY2_HW_AUTO_TX_SUM) ctrl |= CALSUM; /* auto checksum */ else { const unsigned offset = skb_transport_offset(skb); @@ -1577,8 +1635,8 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) printk(KERN_DEBUG "%s: tx done %u\n", dev->name, idx); - sky2->net_stats.tx_packets++; - sky2->net_stats.tx_bytes += re->skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += re->skb->len; dev_kfree_skb_any(re->skb); sky2->tx_next = RING_NEXT(idx, TX_RING_SIZE); @@ -1621,6 +1679,8 @@ static int sky2_down(struct net_device *dev) /* Stop more packets from being queued */ netif_stop_queue(dev); + napi_disable(&hw->napi); + /* Disable port IRQ */ imask = sky2_read32(hw, B0_IMSK); imask &= ~portirq_msk[port]; @@ -1701,11 +1761,15 @@ static int sky2_down(struct net_device *dev) static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux) { - if (!sky2_is_copper(hw)) + if (hw->flags & SKY2_HW_FIBRE_PHY) return SPEED_1000; - if (hw->chip_id == CHIP_ID_YUKON_FE) - return (aux & PHY_M_PS_SPEED_100) ? SPEED_100 : SPEED_10; + if (!(hw->flags & SKY2_HW_GIGABIT)) { + if (aux & PHY_M_PS_SPEED_100) + return SPEED_100; + else + return SPEED_10; + } switch (aux & PHY_M_PS_SPEED_MSK) { case PHY_M_PS_SPEED_1000: @@ -1738,13 +1802,13 @@ static void sky2_link_up(struct sky2_port *sky2) netif_carrier_on(sky2->netdev); + mod_timer(&hw->watchdog_timer, jiffies + 1); + /* Turn on link LED */ sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF); - if (hw->chip_id == CHIP_ID_YUKON_XL - || hw->chip_id == CHIP_ID_YUKON_EC_U - || hw->chip_id == CHIP_ID_YUKON_EX) { + if (hw->flags & SKY2_HW_NEWER_PHY) { u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); u16 led = PHY_M_LEDC_LOS_CTRL(1); /* link active */ @@ -1831,7 +1895,7 @@ static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux) /* Since the pause result bits seem to in different positions on * different chips. look at registers. */ - if (!sky2_is_copper(hw)) { + if (hw->flags & SKY2_HW_FIBRE_PHY) { /* Shift for bits in fiber PHY */ advert &= ~(ADVERTISE_PAUSE_CAP|ADVERTISE_PAUSE_ASYM); lpa &= ~(LPA_PAUSE_CAP|LPA_PAUSE_ASYM); @@ -1942,7 +2006,9 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) return -EINVAL; - if (new_mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_FE) + if (new_mtu > ETH_DATA_LEN && + (hw->chip_id == CHIP_ID_YUKON_FE || + hw->chip_id == CHIP_ID_YUKON_FE_P)) return -EINVAL; if (!netif_running(dev)) { @@ -1955,11 +2021,11 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) dev->trans_start = jiffies; /* prevent tx timeout */ netif_stop_queue(dev); - netif_poll_disable(hw->dev[0]); + napi_disable(&hw->napi); synchronize_irq(hw->pdev->irq); - if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) + if (sky2_read8(hw, B2_E_0) == 0) sky2_set_tx_stfwd(hw, port); ctl = gma_read16(hw, port, GM_GP_CTRL); @@ -1982,12 +2048,16 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) err = sky2_rx_start(sky2); sky2_write32(hw, B0_IMSK, imask); + /* Unconditionally re-enable NAPI because even if we + * call dev_close() that will do a napi_disable(). + */ + napi_enable(&hw->napi); + if (err) dev_close(dev); else { gma_write16(hw, port, GM_GP_CTRL, ctl); - netif_poll_enable(hw->dev[0]); netif_wake_queue(dev); } @@ -2087,6 +2157,13 @@ static struct sk_buff *sky2_receive(struct net_device *dev, struct sky2_port *sky2 = netdev_priv(dev); struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next; struct sk_buff *skb = NULL; + u16 count = (status & GMR_FS_LEN) >> 16; + +#ifdef SKY2_VLAN_TAG_USED + /* Account for vlan tag */ + if (sky2->vlgrp && (status & GMR_FS_VLAN)) + count -= VLAN_HLEN; +#endif if (unlikely(netif_msg_rx_status(sky2))) printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n", @@ -2095,15 +2172,26 @@ static struct sk_buff *sky2_receive(struct net_device *dev, sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending; prefetch(sky2->rx_ring + sky2->rx_next); + /* This chip has hardware problems that generates bogus status. + * So do only marginal checking and expect higher level protocols + * to handle crap frames. + */ + if (sky2->hw->chip_id == CHIP_ID_YUKON_FE_P && + sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0 && + length != count) + goto okay; + if (status & GMR_FS_ANY_ERR) goto error; if (!(status & GMR_FS_RX_OK)) goto resubmit; - if (status >> 16 != length) - goto len_mismatch; + /* if length reported by DMA does not match PHY, packet was truncated */ + if (length != count) + goto len_error; +okay: if (length < copybreak) skb = receive_copy(sky2, re, length); else @@ -2113,15 +2201,19 @@ resubmit: return skb; -len_mismatch: +len_error: /* Truncation of overlength packets causes PHY length to not match MAC length */ - ++sky2->net_stats.rx_length_errors; + ++dev->stats.rx_length_errors; + if (netif_msg_rx_err(sky2) && net_ratelimit()) + pr_info(PFX "%s: rx length error: status %#x length %d\n", + dev->name, status, length); + goto resubmit; error: - ++sky2->net_stats.rx_errors; + ++dev->stats.rx_errors; if (status & GMR_FS_RX_FF_OV) { - sky2->net_stats.rx_over_errors++; + dev->stats.rx_over_errors++; goto resubmit; } @@ -2130,11 +2222,11 @@ error: dev->name, status, length); if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE)) - sky2->net_stats.rx_length_errors++; + dev->stats.rx_length_errors++; if (status & GMR_FS_FRAGMENT) - sky2->net_stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (status & GMR_FS_CRC_ERR) - sky2->net_stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; goto resubmit; } @@ -2152,15 +2244,13 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last) } /* Process status response ring */ -static int sky2_status_intr(struct sky2_hw *hw, int to_do) +static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) { int work_done = 0; unsigned rx[2] = { 0, 0 }; - u16 hwidx = sky2_read16(hw, STAT_PUT_IDX); rmb(); - - while (hw->st_idx != hwidx) { + do { struct sky2_port *sky2; struct sky2_status_le *le = hw->st_le + hw->st_idx; unsigned port = le->css & CSS_LINK_BIT; @@ -2181,12 +2271,12 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do) ++rx[port]; skb = sky2_receive(dev, length, status); if (unlikely(!skb)) { - sky2->net_stats.rx_dropped++; + dev->stats.rx_dropped++; break; } /* This chip reports checksum status differently */ - if (hw->chip_id == CHIP_ID_YUKON_EX) { + if (hw->flags & SKY2_HW_NEW_LE) { if (sky2->rx_csum && (le->css & (CSS_ISIPV4 | CSS_ISIPV6)) && (le->css & CSS_TCPUDPCSOK)) @@ -2196,8 +2286,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do) } skb->protocol = eth_type_trans(skb, dev); - sky2->net_stats.rx_packets++; - sky2->net_stats.rx_bytes += skb->len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; dev->last_rx = jiffies; #ifdef SKY2_VLAN_TAG_USED @@ -2227,8 +2317,14 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do) if (!sky2->rx_csum) break; - if (hw->chip_id == CHIP_ID_YUKON_EX) + /* If this happens then driver assuming wrong format */ + if (unlikely(hw->flags & SKY2_HW_NEW_LE)) { + if (net_ratelimit()) + printk(KERN_NOTICE "%s: unexpected" + " checksum status\n", + dev->name); break; + } /* Both checksum counters are programmed to start at * the same offset, so unless there is a problem they @@ -2265,7 +2361,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do) printk(KERN_WARNING PFX "unknown status opcode 0x%x\n", le->opcode); } - } + } while (hw->st_idx != idx); /* Fully processed status ring so clear irq */ sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); @@ -2326,7 +2422,11 @@ static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status) static void sky2_hw_intr(struct sky2_hw *hw) { + struct pci_dev *pdev = hw->pdev; u32 status = sky2_read32(hw, B0_HWE_ISRC); + u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK); + + status &= hwmsk; if (status & Y2_IS_TIST_OV) sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); @@ -2334,38 +2434,24 @@ static void sky2_hw_intr(struct sky2_hw *hw) if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { u16 pci_err; - pci_err = sky2_pci_read16(hw, PCI_STATUS); + pci_read_config_word(pdev, PCI_STATUS, &pci_err); if (net_ratelimit()) - dev_err(&hw->pdev->dev, "PCI hardware error (0x%x)\n", + dev_err(&pdev->dev, "PCI hardware error (0x%x)\n", pci_err); - sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); - sky2_pci_write16(hw, PCI_STATUS, - pci_err | PCI_STATUS_ERROR_BITS); - sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + pci_write_config_word(pdev, PCI_STATUS, + pci_err | PCI_STATUS_ERROR_BITS); } if (status & Y2_IS_PCI_EXP) { /* PCI-Express uncorrectable Error occurred */ - u32 pex_err; - - pex_err = sky2_pci_read32(hw, PEX_UNC_ERR_STAT); + int pos = pci_find_aer_capability(hw->pdev); + u32 err; + pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_STATUS, &err); if (net_ratelimit()) - dev_err(&hw->pdev->dev, "PCI Express error (0x%x)\n", - pex_err); - - /* clear the interrupt */ - sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); - sky2_pci_write32(hw, PEX_UNC_ERR_STAT, - 0xffffffffUL); - sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); - - if (pex_err & PEX_FATAL_ERRORS) { - u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK); - hwmsk &= ~Y2_IS_PCI_EXP; - sky2_write32(hw, B0_HWE_IMSK, hwmsk); - } + dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err); + pci_cleanup_aer_uncorrect_error_status(pdev); } if (status & Y2_HWE_L1_MASK) @@ -2392,12 +2478,12 @@ static void sky2_mac_intr(struct sky2_hw *hw, unsigned port) gma_read16(hw, port, GM_TX_IRQ_SRC); if (status & GM_IS_RX_FF_OR) { - ++sky2->net_stats.rx_fifo_errors; + ++dev->stats.rx_fifo_errors; sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); } if (status & GM_IS_TX_FF_UR) { - ++sky2->net_stats.tx_fifo_errors; + ++dev->stats.tx_fifo_errors; sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); } } @@ -2420,25 +2506,69 @@ static void sky2_le_error(struct sky2_hw *hw, unsigned port, sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK); } -/* If idle then force a fake soft NAPI poll once a second - * to work around cases where sharing an edge triggered interrupt. - */ -static inline void sky2_idle_start(struct sky2_hw *hw) +static int sky2_rx_hung(struct net_device *dev) { - if (idle_timeout > 0) - mod_timer(&hw->idle_timer, - jiffies + msecs_to_jiffies(idle_timeout)); + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + unsigned rxq = rxqaddr[port]; + u32 mac_rp = sky2_read32(hw, SK_REG(port, RX_GMF_RP)); + u8 mac_lev = sky2_read8(hw, SK_REG(port, RX_GMF_RLEV)); + u8 fifo_rp = sky2_read8(hw, Q_ADDR(rxq, Q_RP)); + u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL)); + + /* If idle and MAC or PCI is stuck */ + if (sky2->check.last == dev->last_rx && + ((mac_rp == sky2->check.mac_rp && + mac_lev != 0 && mac_lev >= sky2->check.mac_lev) || + /* Check if the PCI RX hang */ + (fifo_rp == sky2->check.fifo_rp && + fifo_lev != 0 && fifo_lev >= sky2->check.fifo_lev))) { + printk(KERN_DEBUG PFX "%s: hung mac %d:%d fifo %d (%d:%d)\n", + dev->name, mac_lev, mac_rp, fifo_lev, fifo_rp, + sky2_read8(hw, Q_ADDR(rxq, Q_WP))); + return 1; + } else { + sky2->check.last = dev->last_rx; + sky2->check.mac_rp = mac_rp; + sky2->check.mac_lev = mac_lev; + sky2->check.fifo_rp = fifo_rp; + sky2->check.fifo_lev = fifo_lev; + return 0; + } } -static void sky2_idle(unsigned long arg) +static void sky2_watchdog(unsigned long arg) { struct sky2_hw *hw = (struct sky2_hw *) arg; - struct net_device *dev = hw->dev[0]; - if (__netif_rx_schedule_prep(dev)) - __netif_rx_schedule(dev); + /* Check for lost IRQ once a second */ + if (sky2_read32(hw, B0_ISRC)) { + napi_schedule(&hw->napi); + } else { + int i, active = 0; - mod_timer(&hw->idle_timer, jiffies + msecs_to_jiffies(idle_timeout)); + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + if (!netif_running(dev)) + continue; + ++active; + + /* For chips with Rx FIFO, check if stuck */ + if ((hw->flags & SKY2_HW_FIFO_HANG_CHECK) && + sky2_rx_hung(dev)) { + pr_info(PFX "%s: receiver hang detected\n", + dev->name); + schedule_work(&hw->restart_work); + return; + } + } + + if (active == 0) + return; + } + + mod_timer(&hw->watchdog_timer, round_jiffies(jiffies + HZ)); } /* Hardware/software error handling */ @@ -2469,11 +2599,12 @@ static void sky2_err_intr(struct sky2_hw *hw, u32 status) sky2_le_error(hw, 1, Q_XA2, TX_RING_SIZE); } -static int sky2_poll(struct net_device *dev0, int *budget) +static int sky2_poll(struct napi_struct *napi, int work_limit) { - struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw; - int work_done; + struct sky2_hw *hw = container_of(napi, struct sky2_hw, napi); u32 status = sky2_read32(hw, B0_Y2_SP_EISR); + int work_done = 0; + u16 idx; if (unlikely(status & Y2_IS_ERROR)) sky2_err_intr(hw, status); @@ -2484,13 +2615,12 @@ static int sky2_poll(struct net_device *dev0, int *budget) if (status & Y2_IS_IRQ_PHY2) sky2_phy_intr(hw, 1); - work_done = sky2_status_intr(hw, min(dev0->quota, *budget)); - *budget -= work_done; - dev0->quota -= work_done; + while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) { + work_done += sky2_status_intr(hw, work_limit - work_done, idx); - /* More work? */ - if (hw->st_idx != sky2_read16(hw, STAT_PUT_IDX)) - return 1; + if (work_done >= work_limit) + goto done; + } /* Bug/Errata workaround? * Need to kick the TX irq moderation timer. @@ -2499,16 +2629,16 @@ static int sky2_poll(struct net_device *dev0, int *budget) sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP); sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); } - netif_rx_complete(dev0); - + napi_complete(napi); sky2_read32(hw, B0_Y2_SP_LISR); - return 0; +done: + + return work_done; } static irqreturn_t sky2_intr(int irq, void *dev_id) { struct sky2_hw *hw = dev_id; - struct net_device *dev0 = hw->dev[0]; u32 status; /* Reading this mask interrupts as side effect */ @@ -2517,8 +2647,8 @@ static irqreturn_t sky2_intr(int irq, void *dev_id) return IRQ_NONE; prefetch(&hw->st_le[hw->st_idx]); - if (likely(__netif_rx_schedule_prep(dev0))) - __netif_rx_schedule(dev0); + + napi_schedule(&hw->napi); return IRQ_HANDLED; } @@ -2527,25 +2657,31 @@ static irqreturn_t sky2_intr(int irq, void *dev_id) static void sky2_netpoll(struct net_device *dev) { struct sky2_port *sky2 = netdev_priv(dev); - struct net_device *dev0 = sky2->hw->dev[0]; - if (netif_running(dev) && __netif_rx_schedule_prep(dev0)) - __netif_rx_schedule(dev0); + napi_schedule(&sky2->hw->napi); } #endif /* Chip internal frequency for clock calculations */ -static inline u32 sky2_mhz(const struct sky2_hw *hw) +static u32 sky2_mhz(const struct sky2_hw *hw) { switch (hw->chip_id) { case CHIP_ID_YUKON_EC: case CHIP_ID_YUKON_EC_U: case CHIP_ID_YUKON_EX: - return 125; /* 125 Mhz */ + return 125; + case CHIP_ID_YUKON_FE: - return 100; /* 100 Mhz */ - default: /* YUKON_XL */ - return 156; /* 156 Mhz */ + return 100; + + case CHIP_ID_YUKON_FE_P: + return 50; + + case CHIP_ID_YUKON_XL: + return 156; + + default: + BUG(); } } @@ -2562,31 +2698,74 @@ static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk) static int __devinit sky2_init(struct sky2_hw *hw) { + int rc; u8 t8; - /* Enable all clocks */ - sky2_pci_write32(hw, PCI_DEV_REG3, 0); + /* Enable all clocks and check for bad PCI access */ + rc = pci_write_config_dword(hw->pdev, PCI_DEV_REG3, 0); + if (rc) + return rc; sky2_write8(hw, B0_CTST, CS_RST_CLR); hw->chip_id = sky2_read8(hw, B2_CHIP_ID); - if (hw->chip_id < CHIP_ID_YUKON_XL || hw->chip_id > CHIP_ID_YUKON_FE) { + hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4; + + switch(hw->chip_id) { + case CHIP_ID_YUKON_XL: + hw->flags = SKY2_HW_GIGABIT + | SKY2_HW_NEWER_PHY; + if (hw->chip_rev < 3) + hw->flags |= SKY2_HW_FIFO_HANG_CHECK; + + break; + + case CHIP_ID_YUKON_EC_U: + hw->flags = SKY2_HW_GIGABIT + | SKY2_HW_NEWER_PHY + | SKY2_HW_ADV_POWER_CTL; + break; + + case CHIP_ID_YUKON_EX: + hw->flags = SKY2_HW_GIGABIT + | SKY2_HW_NEWER_PHY + | SKY2_HW_NEW_LE + | SKY2_HW_ADV_POWER_CTL; + + /* New transmit checksum */ + if (hw->chip_rev != CHIP_REV_YU_EX_B0) + hw->flags |= SKY2_HW_AUTO_TX_SUM; + break; + + case CHIP_ID_YUKON_EC: + /* This rev is really old, and requires untested workarounds */ + if (hw->chip_rev == CHIP_REV_YU_EC_A1) { + dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n"); + return -EOPNOTSUPP; + } + hw->flags = SKY2_HW_GIGABIT | SKY2_HW_FIFO_HANG_CHECK; + break; + + case CHIP_ID_YUKON_FE: + break; + + case CHIP_ID_YUKON_FE_P: + hw->flags = SKY2_HW_NEWER_PHY + | SKY2_HW_NEW_LE + | SKY2_HW_AUTO_TX_SUM + | SKY2_HW_ADV_POWER_CTL; + break; + default: dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n", hw->chip_id); return -EOPNOTSUPP; } - hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4; + hw->pmd_type = sky2_read8(hw, B2_PMD_TYP); + if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P') + hw->flags |= SKY2_HW_FIBRE_PHY; - /* This rev is really old, and requires untested workarounds */ - if (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == CHIP_REV_YU_EC_A1) { - dev_err(&hw->pdev->dev, "unsupported revision Yukon-%s (0x%x) rev %d\n", - yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL], - hw->chip_id, hw->chip_rev); - return -EOPNOTSUPP; - } - hw->pmd_type = sky2_read8(hw, B2_PMD_TYP); hw->ports = 1; t8 = sky2_read8(hw, B2_Y2_HW_RES); if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) { @@ -2599,8 +2778,10 @@ static int __devinit sky2_init(struct sky2_hw *hw) static void sky2_reset(struct sky2_hw *hw) { + struct pci_dev *pdev = hw->pdev; u16 status; - int i; + int i, cap; + u32 hwe_mask = Y2_HWE_ALL_MASK; /* disable ASF */ if (hw->chip_id == CHIP_ID_YUKON_EX) { @@ -2617,18 +2798,25 @@ static void sky2_reset(struct sky2_hw *hw) sky2_write8(hw, B0_CTST, CS_RST_CLR); /* clear PCI errors, if any */ - status = sky2_pci_read16(hw, PCI_STATUS); - - sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); - sky2_pci_write16(hw, PCI_STATUS, status | PCI_STATUS_ERROR_BITS); - + pci_read_config_word(pdev, PCI_STATUS, &status); + status |= PCI_STATUS_ERROR_BITS; + pci_write_config_word(pdev, PCI_STATUS, status); sky2_write8(hw, B0_CTST, CS_MRST_CLR); - /* clear any PEX errors */ - if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) - sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL); + cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (cap) { + /* Check for advanced error reporting */ + pci_cleanup_aer_uncorrect_error_status(pdev); + pci_cleanup_aer_correct_error_status(pdev); + + /* If error bit is stuck on ignore it */ + if (sky2_read32(hw, B0_HWE_ISRC) & Y2_IS_PCI_EXP) + dev_info(&pdev->dev, "ignoring stuck error report bit\n"); + else if (pci_enable_pcie_error_reporting(pdev)) + hwe_mask |= Y2_IS_PCI_EXP; + } sky2_power_on(hw); @@ -2642,8 +2830,6 @@ static void sky2_reset(struct sky2_hw *hw) | GMC_BYP_RETR_ON); } - sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); - /* Clear I2C IRQ noise */ sky2_write32(hw, B2_I2C_IRQ, 1); @@ -2682,7 +2868,7 @@ static void sky2_reset(struct sky2_hw *hw) sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53); } - sky2_write32(hw, B0_HWE_IMSK, Y2_HWE_ALL_MASK); + sky2_write32(hw, B0_HWE_IMSK, hwe_mask); for (i = 0; i < hw->ports; i++) sky2_gmac_reset(hw, i); @@ -2726,14 +2912,10 @@ static void sky2_restart(struct work_struct *work) struct net_device *dev; int i, err; - del_timer_sync(&hw->idle_timer); - rtnl_lock(); sky2_write32(hw, B0_IMSK, 0); sky2_read32(hw, B0_IMSK); - netif_poll_disable(hw->dev[0]); - for (i = 0; i < hw->ports; i++) { dev = hw->dev[i]; if (netif_running(dev)) @@ -2742,7 +2924,6 @@ static void sky2_restart(struct work_struct *work) sky2_reset(hw); sky2_write32(hw, B0_IMSK, Y2_IS_BASE); - netif_poll_enable(hw->dev[0]); for (i = 0; i < hw->ports; i++) { dev = hw->dev[i]; @@ -2756,8 +2937,6 @@ static void sky2_restart(struct work_struct *work) } } - sky2_idle_start(hw); - rtnl_unlock(); } @@ -2784,7 +2963,9 @@ static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) sky2->wol = wol->wolopts; - if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) + if (hw->chip_id == CHIP_ID_YUKON_EC_U || + hw->chip_id == CHIP_ID_YUKON_EX || + hw->chip_id == CHIP_ID_YUKON_FE_P) sky2_write32(hw, B0_CTST, sky2->wol ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF); @@ -2802,7 +2983,7 @@ static u32 sky2_supported_modes(const struct sky2_hw *hw) | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP; - if (hw->chip_id != CHIP_ID_YUKON_FE) + if (hw->flags & SKY2_HW_GIGABIT) modes |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; return modes; @@ -2822,13 +3003,6 @@ static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) ecmd->supported = sky2_supported_modes(hw); ecmd->phy_address = PHY_ADDR_MARV; if (sky2_is_copper(hw)) { - ecmd->supported = SUPPORTED_10baseT_Half - | SUPPORTED_10baseT_Full - | SUPPORTED_100baseT_Half - | SUPPORTED_100baseT_Full - | SUPPORTED_1000baseT_Half - | SUPPORTED_1000baseT_Full - | SUPPORTED_Autoneg | SUPPORTED_TP; ecmd->port = PORT_TP; ecmd->speed = sky2->speed; } else { @@ -2895,8 +3069,10 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) sky2->autoneg = ecmd->autoneg; sky2->advertising = ecmd->advertising; - if (netif_running(dev)) + if (netif_running(dev)) { sky2_phy_reinit(sky2); + sky2_set_multicast(dev); + } return 0; } @@ -2989,6 +3165,7 @@ static int sky2_nway_reset(struct net_device *dev) return -EINVAL; sky2_phy_reinit(sky2); + sky2_set_multicast(dev); return 0; } @@ -3014,9 +3191,14 @@ static void sky2_set_msglevel(struct net_device *netdev, u32 value) sky2->msg_enable = value; } -static int sky2_get_stats_count(struct net_device *dev) +static int sky2_get_sset_count(struct net_device *dev, int sset) { - return ARRAY_SIZE(sky2_stats); + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(sky2_stats); + default: + return -EOPNOTSUPP; + } } static void sky2_get_ethtool_stats(struct net_device *dev, @@ -3040,12 +3222,6 @@ static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data) } } -static struct net_device_stats *sky2_get_stats(struct net_device *dev) -{ - struct sky2_port *sky2 = netdev_priv(dev); - return &sky2->net_stats; -} - static int sky2_set_mac_address(struct net_device *dev, void *p) { struct sky2_port *sky2 = netdev_priv(dev); @@ -3386,20 +3562,64 @@ static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs, { const struct sky2_port *sky2 = netdev_priv(dev); const void __iomem *io = sky2->hw->regs; + unsigned int b; regs->version = 1; - memset(p, 0, regs->len); - memcpy_fromio(p, io, B3_RAM_ADDR); - - /* skip diagnostic ram region */ - memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, 0x2000 - B3_RI_WTO_R1); + for (b = 0; b < 128; b++) { + /* This complicated switch statement is to make sure and + * only access regions that are unreserved. + * Some blocks are only valid on dual port cards. + * and block 3 has some special diagnostic registers that + * are poison. + */ + switch (b) { + case 3: + /* skip diagnostic ram region */ + memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10); + break; - /* copy GMAC registers */ - memcpy_fromio(p + BASE_GMAC_1, io + BASE_GMAC_1, 0x1000); - if (sky2->hw->ports > 1) - memcpy_fromio(p + BASE_GMAC_2, io + BASE_GMAC_2, 0x1000); + /* dual port cards only */ + case 5: /* Tx Arbiter 2 */ + case 9: /* RX2 */ + case 14 ... 15: /* TX2 */ + case 17: case 19: /* Ram Buffer 2 */ + case 22 ... 23: /* Tx Ram Buffer 2 */ + case 25: /* Rx MAC Fifo 1 */ + case 27: /* Tx MAC Fifo 2 */ + case 31: /* GPHY 2 */ + case 40 ... 47: /* Pattern Ram 2 */ + case 52: case 54: /* TCP Segmentation 2 */ + case 112 ... 116: /* GMAC 2 */ + if (sky2->hw->ports == 1) + goto reserved; + /* fall through */ + case 0: /* Control */ + case 2: /* Mac address */ + case 4: /* Tx Arbiter 1 */ + case 7: /* PCI express reg */ + case 8: /* RX1 */ + case 12 ... 13: /* TX1 */ + case 16: case 18:/* Rx Ram Buffer 1 */ + case 20 ... 21: /* Tx Ram Buffer 1 */ + case 24: /* Rx MAC Fifo 1 */ + case 26: /* Tx MAC Fifo 1 */ + case 28 ... 29: /* Descriptor and status unit */ + case 30: /* GPHY 1*/ + case 32 ... 39: /* Pattern Ram 1 */ + case 48: case 50: /* TCP Segmentation 1 */ + case 56 ... 60: /* PCI space */ + case 80 ... 84: /* GMAC 1 */ + memcpy_fromio(p, io, 128); + break; + default: +reserved: + memset(p, 0, 128); + } + p += 128; + io += 128; + } } /* In order to do Jumbo packets on these chips, need to turn off the @@ -3435,26 +3655,31 @@ static int sky2_get_eeprom_len(struct net_device *dev) struct sky2_port *sky2 = netdev_priv(dev); u16 reg2; - reg2 = sky2_pci_read32(sky2->hw, PCI_DEV_REG2); + pci_read_config_word(sky2->hw->pdev, PCI_DEV_REG2, ®2); return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); } -static u32 sky2_vpd_read(struct sky2_hw *hw, int cap, u16 offset) +static u32 sky2_vpd_read(struct pci_dev *pdev, int cap, u16 offset) { - sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset); + u32 val; + + pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset); - while (!(sky2_pci_read16(hw, cap + PCI_VPD_ADDR) & PCI_VPD_ADDR_F)) - cpu_relax(); - return sky2_pci_read32(hw, cap + PCI_VPD_DATA); + do { + pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); + } while (!(offset & PCI_VPD_ADDR_F)); + + pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val); + return val; } -static void sky2_vpd_write(struct sky2_hw *hw, int cap, u16 offset, u32 val) +static void sky2_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val) { - sky2_pci_write32(hw, cap + PCI_VPD_DATA, val); - sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F); + pci_write_config_word(pdev, cap + PCI_VPD_DATA, val); + pci_write_config_dword(pdev, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F); do { - cpu_relax(); - } while (sky2_pci_read16(hw, cap + PCI_VPD_ADDR) & PCI_VPD_ADDR_F); + pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); + } while (offset & PCI_VPD_ADDR_F); } static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, @@ -3471,7 +3696,7 @@ static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom eeprom->magic = SKY2_EEPROM_MAGIC; while (length > 0) { - u32 val = sky2_vpd_read(sky2->hw, cap, offset); + u32 val = sky2_vpd_read(sky2->hw->pdev, cap, offset); int n = min_t(int, length, sizeof(val)); memcpy(data, &val, n); @@ -3501,10 +3726,10 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom int n = min_t(int, length, sizeof(val)); if (n < sizeof(val)) - val = sky2_vpd_read(sky2->hw, cap, offset); + val = sky2_vpd_read(sky2->hw->pdev, cap, offset); memcpy(&val, data, n); - sky2_vpd_write(sky2->hw, cap, offset, val); + sky2_vpd_write(sky2->hw->pdev, cap, offset, val); length -= n; data += n; @@ -3529,11 +3754,8 @@ static const struct ethtool_ops sky2_ethtool_ops = { .get_eeprom_len = sky2_get_eeprom_len, .get_eeprom = sky2_get_eeprom, .set_eeprom = sky2_set_eeprom, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = sky2_set_tx_csum, - .get_tso = ethtool_op_get_tso, .set_tso = sky2_set_tso, .get_rx_csum = sky2_get_rx_csum, .set_rx_csum = sky2_set_rx_csum, @@ -3545,9 +3767,8 @@ static const struct ethtool_ops sky2_ethtool_ops = { .get_pauseparam = sky2_get_pauseparam, .set_pauseparam = sky2_set_pauseparam, .phys_id = sky2_phys_id, - .get_stats_count = sky2_get_stats_count, + .get_sset_count = sky2_get_sset_count, .get_ethtool_stats = sky2_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, }; #ifdef CONFIG_SKY2_DEBUG @@ -3558,7 +3779,7 @@ static int sky2_debug_show(struct seq_file *seq, void *v) { struct net_device *dev = seq->private; const struct sky2_port *sky2 = netdev_priv(dev); - const struct sky2_hw *hw = sky2->hw; + struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; unsigned idx, last; int sop; @@ -3571,7 +3792,7 @@ static int sky2_debug_show(struct seq_file *seq, void *v) sky2_read32(hw, B0_IMSK), sky2_read32(hw, B0_Y2_SP_ICR)); - netif_poll_disable(hw->dev[0]); + napi_disable(&hw->napi); last = sky2_read16(hw, STAT_PUT_IDX); if (hw->st_idx == last) @@ -3641,7 +3862,7 @@ static int sky2_debug_show(struct seq_file *seq, void *v) last = sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_PUT_IDX)), sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_LAST_IDX))); - netif_poll_enable(hw->dev[0]); + napi_enable(&hw->napi); return 0; } @@ -3666,42 +3887,34 @@ static int sky2_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = ptr; + struct sky2_port *sky2 = netdev_priv(dev); - if (dev->open == sky2_up) { - struct sky2_port *sky2 = netdev_priv(dev); + if (dev->open != sky2_up || !sky2_debug) + return NOTIFY_DONE; - switch(event) { - case NETDEV_CHANGENAME: - if (!netif_running(dev)) - break; - /* fallthrough */ - case NETDEV_DOWN: - case NETDEV_GOING_DOWN: - if (sky2->debugfs) { - printk(KERN_DEBUG PFX "%s: remove debugfs\n", - dev->name); - debugfs_remove(sky2->debugfs); - sky2->debugfs = NULL; - } + switch(event) { + case NETDEV_CHANGENAME: + if (sky2->debugfs) { + sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs, + sky2_debug, dev->name); + } + break; - if (event != NETDEV_CHANGENAME) - break; - /* fallthrough for changename */ - case NETDEV_UP: - if (sky2_debug) { - struct dentry *d; - d = debugfs_create_file(dev->name, S_IRUGO, - sky2_debug, dev, - &sky2_debug_fops); - if (d == NULL || IS_ERR(d)) - printk(KERN_INFO PFX - "%s: debugfs create failed\n", - dev->name); - else - sky2->debugfs = d; - } - break; + case NETDEV_GOING_DOWN: + if (sky2->debugfs) { + printk(KERN_DEBUG PFX "%s: remove debugfs\n", + dev->name); + debugfs_remove(sky2->debugfs); + sky2->debugfs = NULL; } + break; + + case NETDEV_UP: + sky2->debugfs = debugfs_create_file(dev->name, S_IRUGO, + sky2_debug, dev, + &sky2_debug_fops); + if (IS_ERR(sky2->debugfs)) + sky2->debugfs = NULL; } return NOTIFY_DONE; @@ -3752,29 +3965,20 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, return NULL; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &hw->pdev->dev); dev->irq = hw->pdev->irq; dev->open = sky2_up; dev->stop = sky2_down; dev->do_ioctl = sky2_ioctl; dev->hard_start_xmit = sky2_xmit_frame; - dev->get_stats = sky2_get_stats; dev->set_multicast_list = sky2_set_multicast; dev->set_mac_address = sky2_set_mac_address; dev->change_mtu = sky2_change_mtu; SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops); dev->tx_timeout = sky2_tx_timeout; dev->watchdog_timeo = TX_WATCHDOG; - if (port == 0) - dev->poll = sky2_poll; - dev->weight = NAPI_WEIGHT; #ifdef CONFIG_NET_POLL_CONTROLLER - /* Network console (only works on port 0) - * because netpoll makes assumptions about NAPI - */ - if (port == 0) - dev->poll_controller = sky2_netpoll; + dev->poll_controller = sky2_netpoll; #endif sky2 = netdev_priv(dev); @@ -3805,8 +4009,12 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, dev->features |= NETIF_F_HIGHDMA; #ifdef SKY2_VLAN_TAG_USED - dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - dev->vlan_rx_register = sky2_vlan_rx_register; + /* The workaround for FE+ status conflicts with VLAN tag detection. */ + if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P && + sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0)) { + dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + dev->vlan_rx_register = sky2_vlan_rx_register; + } #endif /* read the mac address */ @@ -3819,12 +4027,11 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, static void __devinit sky2_show_addr(struct net_device *dev) { const struct sky2_port *sky2 = netdev_priv(dev); + DECLARE_MAC_BUF(mac); if (netif_msg_probe(sky2)) - printk(KERN_INFO PFX "%s: addr %02x:%02x:%02x:%02x:%02x:%02x\n", - dev->name, - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + printk(KERN_INFO PFX "%s: addr %s\n", + dev->name, print_mac(mac, dev->dev_addr)); } /* Handle software interrupt used during MSI test */ @@ -3837,7 +4044,7 @@ static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id) return IRQ_NONE; if (status & Y2_IS_IRQ_SW) { - hw->msi = 1; + hw->flags |= SKY2_HW_USE_MSI; wake_up(&hw->msi_wait); sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); } @@ -3865,9 +4072,9 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw) sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ); sky2_read8(hw, B0_CTST); - wait_event_timeout(hw->msi_wait, hw->msi, HZ/10); + wait_event_timeout(hw->msi_wait, (hw->flags & SKY2_HW_USE_MSI), HZ/10); - if (!hw->msi) { + if (!(hw->flags & SKY2_HW_USE_MSI)) { /* MSI test failed, go back to INTx mode */ dev_info(&pdev->dev, "No interrupt generated using MSI, " "switching to INTx mode.\n"); @@ -3957,15 +4164,14 @@ static int __devinit sky2_probe(struct pci_dev *pdev, */ { u32 reg; - reg = sky2_pci_read32(hw, PCI_DEV_REG2); + pci_read_config_dword(pdev,PCI_DEV_REG2, ®); reg &= ~PCI_REV_DESC; - sky2_pci_write32(hw, PCI_DEV_REG2, reg); + pci_write_config_dword(pdev, PCI_DEV_REG2, reg); } #endif /* ring for status responses */ - hw->st_le = pci_alloc_consistent(hw->pdev, STATUS_LE_BYTES, - &hw->st_dma); + hw->st_le = pci_alloc_consistent(pdev, STATUS_LE_BYTES, &hw->st_dma); if (!hw->st_le) goto err_out_iounmap; @@ -3985,6 +4191,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev, err = -ENOMEM; goto err_out_free_pci; } + netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT); if (!disable_msi && pci_enable_msi(pdev) == 0) { err = sky2_test_msi(hw); @@ -4000,7 +4207,8 @@ static int __devinit sky2_probe(struct pci_dev *pdev, goto err_out_free_netdev; } - err = request_irq(pdev->irq, sky2_intr, hw->msi ? 0 : IRQF_SHARED, + err = request_irq(pdev->irq, sky2_intr, + (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED, dev->name, hw); if (err) { dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); @@ -4025,24 +4233,22 @@ static int __devinit sky2_probe(struct pci_dev *pdev, sky2_show_addr(dev1); } - setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw); + setup_timer(&hw->watchdog_timer, sky2_watchdog, (unsigned long) hw); INIT_WORK(&hw->restart_work, sky2_restart); - sky2_idle_start(hw); - pci_set_drvdata(pdev, hw); return 0; err_out_unregister: - if (hw->msi) + if (hw->flags & SKY2_HW_USE_MSI) pci_disable_msi(pdev); unregister_netdev(dev); err_out_free_netdev: free_netdev(dev); err_out_free_pci: sky2_write8(hw, B0_CTST, CS_RST_SET); - pci_free_consistent(hw->pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma); + pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma); err_out_iounmap: iounmap(hw->regs); err_out_free_hw: @@ -4064,7 +4270,7 @@ static void __devexit sky2_remove(struct pci_dev *pdev) if (!hw) return; - del_timer_sync(&hw->idle_timer); + del_timer_sync(&hw->watchdog_timer); flush_scheduled_work(); @@ -4084,7 +4290,7 @@ static void __devexit sky2_remove(struct pci_dev *pdev) sky2_read8(hw, B0_CTST); free_irq(pdev->irq, hw); - if (hw->msi) + if (hw->flags & SKY2_HW_USE_MSI) pci_disable_msi(pdev); pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma); pci_release_regions(pdev); @@ -4108,9 +4314,6 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state) if (!hw) return 0; - del_timer_sync(&hw->idle_timer); - netif_poll_disable(hw->dev[0]); - for (i = 0; i < hw->ports; i++) { struct net_device *dev = hw->dev[i]; struct sky2_port *sky2 = netdev_priv(dev); @@ -4153,8 +4356,10 @@ static int sky2_resume(struct pci_dev *pdev) pci_enable_wake(pdev, PCI_D0, 0); /* Re-enable all clocks */ - if (hw->chip_id == CHIP_ID_YUKON_EX || hw->chip_id == CHIP_ID_YUKON_EC_U) - sky2_pci_write32(hw, PCI_DEV_REG3, 0); + if (hw->chip_id == CHIP_ID_YUKON_EX || + hw->chip_id == CHIP_ID_YUKON_EC_U || + hw->chip_id == CHIP_ID_YUKON_FE_P) + pci_write_config_dword(pdev, PCI_DEV_REG3, 0); sky2_reset(hw); @@ -4170,11 +4375,11 @@ static int sky2_resume(struct pci_dev *pdev) dev_close(dev); goto out; } + + sky2_set_multicast(dev); } } - netif_poll_enable(hw->dev[0]); - sky2_idle_start(hw); return 0; out: dev_err(&pdev->dev, "resume failed (%d)\n", err); @@ -4191,8 +4396,7 @@ static void sky2_shutdown(struct pci_dev *pdev) if (!hw) return; - del_timer_sync(&hw->idle_timer); - netif_poll_disable(hw->dev[0]); + napi_disable(&hw->napi); for (i = 0; i < hw->ports; i++) { struct net_device *dev = hw->dev[i]; diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index dce4d276d443..49ee264064ab 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h @@ -18,14 +18,6 @@ enum { PCI_CFG_REG_1 = 0x94, }; -enum { - PEX_DEV_CAP = 0xe4, - PEX_DEV_CTRL = 0xe8, - PEX_DEV_STA = 0xea, - PEX_LNK_STAT = 0xf2, - PEX_UNC_ERR_STAT= 0x104, -}; - /* Yukon-2 */ enum pci_dev_reg_1 { PCI_Y2_PIG_ENA = 1<<31, /* Enable Plug-in-Go (YUKON-2) */ @@ -151,38 +143,6 @@ enum pci_cfg_reg1 { PCI_STATUS_REC_TARGET_ABORT | \ PCI_STATUS_PARITY) -enum pex_dev_ctrl { - PEX_DC_MAX_RRS_MSK = 7<<12, /* Bit 14..12: Max. Read Request Size */ - PEX_DC_EN_NO_SNOOP = 1<<11,/* Enable No Snoop */ - PEX_DC_EN_AUX_POW = 1<<10,/* Enable AUX Power */ - PEX_DC_EN_PHANTOM = 1<<9, /* Enable Phantom Functions */ - PEX_DC_EN_EXT_TAG = 1<<8, /* Enable Extended Tag Field */ - PEX_DC_MAX_PLS_MSK = 7<<5, /* Bit 7.. 5: Max. Payload Size Mask */ - PEX_DC_EN_REL_ORD = 1<<4, /* Enable Relaxed Ordering */ - PEX_DC_EN_UNS_RQ_RP = 1<<3, /* Enable Unsupported Request Reporting */ - PEX_DC_EN_FAT_ER_RP = 1<<2, /* Enable Fatal Error Reporting */ - PEX_DC_EN_NFA_ER_RP = 1<<1, /* Enable Non-Fatal Error Reporting */ - PEX_DC_EN_COR_ER_RP = 1<<0, /* Enable Correctable Error Reporting */ -}; -#define PEX_DC_MAX_RD_RQ_SIZE(x) (((x)<<12) & PEX_DC_MAX_RRS_MSK) - -/* PEX_UNC_ERR_STAT PEX Uncorrectable Errors Status Register (Yukon-2) */ -enum pex_err { - PEX_UNSUP_REQ = 1<<20, /* Unsupported Request Error */ - - PEX_MALFOR_TLP = 1<<18, /* Malformed TLP */ - - PEX_UNEXP_COMP = 1<<16, /* Unexpected Completion */ - - PEX_COMP_TO = 1<<14, /* Completion Timeout */ - PEX_FLOW_CTRL_P = 1<<13, /* Flow Control Protocol Error */ - PEX_POIS_TLP = 1<<12, /* Poisoned TLP */ - - PEX_DATA_LINK_P = 1<<4, /* Data Link Protocol Error */ - PEX_FATAL_ERRORS= (PEX_MALFOR_TLP | PEX_FLOW_CTRL_P | PEX_DATA_LINK_P), -}; - - enum csr_regs { B0_RAP = 0x0000, B0_CTST = 0x0004, @@ -419,7 +379,6 @@ enum { Y2_IS_PAR_RX2 | Y2_IS_TCP_TXS2| Y2_IS_TCP_TXA2, Y2_HWE_ALL_MASK = Y2_IS_TIST_OV | Y2_IS_MST_ERR | Y2_IS_IRQ_STAT | - Y2_IS_PCI_EXP | Y2_HWE_L1_MASK | Y2_HWE_L2_MASK, }; @@ -470,18 +429,24 @@ enum { CHIP_ID_YUKON_EX = 0xb5, /* Chip ID for YUKON-2 Extreme */ CHIP_ID_YUKON_EC = 0xb6, /* Chip ID for YUKON-2 EC */ CHIP_ID_YUKON_FE = 0xb7, /* Chip ID for YUKON-2 FE */ - + CHIP_ID_YUKON_FE_P = 0xb8, /* Chip ID for YUKON-2 FE+ */ +}; +enum yukon_ec_rev { CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */ CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */ CHIP_REV_YU_EC_A3 = 2, /* Chip Rev. for Yukon-EC A3 */ - +}; +enum yukon_ec_u_rev { CHIP_REV_YU_EC_U_A0 = 1, CHIP_REV_YU_EC_U_A1 = 2, CHIP_REV_YU_EC_U_B0 = 3, - +}; +enum yukon_fe_rev { CHIP_REV_YU_FE_A1 = 1, CHIP_REV_YU_FE_A2 = 2, - +}; +enum yukon_fe_p_rev { + CHIP_REV_YU_FE2_A0 = 0, }; enum yukon_ex_rev { CHIP_REV_YU_EX_A0 = 1, @@ -1668,7 +1633,7 @@ enum { /* Receive Frame Status Encoding */ enum { - GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */ + GMR_FS_LEN = 0x7fff<<16, /* Bit 30..16: Rx Frame Length */ GMR_FS_VLAN = 1<<13, /* VLAN Packet */ GMR_FS_JABBER = 1<<12, /* Jabber Packet */ GMR_FS_UN_SIZE = 1<<11, /* Undersize Packet */ @@ -1729,6 +1694,10 @@ enum { GMF_RX_CTRL_DEF = GMF_OPER_ON | GMF_RX_F_FL_ON, }; +/* TX_GMF_EA 32 bit Tx GMAC FIFO End Address */ +enum { + TX_DYN_WM_ENA = 3, /* Yukon-FE+ specific */ +}; /* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */ enum { @@ -1840,6 +1809,28 @@ enum { /* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */ enum { + GPC_TX_PAUSE = 1<<30, /* Tx pause enabled (ro) */ + GPC_RX_PAUSE = 1<<29, /* Rx pause enabled (ro) */ + GPC_SPEED = 3<<27, /* PHY speed (ro) */ + GPC_LINK = 1<<26, /* Link up (ro) */ + GPC_DUPLEX = 1<<25, /* Duplex (ro) */ + GPC_CLOCK = 1<<24, /* 125Mhz clock stable (ro) */ + + GPC_PDOWN = 1<<23, /* Internal regulator 2.5 power down */ + GPC_TSTMODE = 1<<22, /* Test mode */ + GPC_REG18 = 1<<21, /* Reg18 Power down */ + GPC_REG12SEL = 3<<19, /* Reg12 power setting */ + GPC_REG18SEL = 3<<17, /* Reg18 power setting */ + GPC_SPILOCK = 1<<16, /* SPI lock (ASF) */ + + GPC_LEDMUX = 3<<14, /* LED Mux */ + GPC_INTPOL = 1<<13, /* Interrupt polarity */ + GPC_DETECT = 1<<12, /* Energy detect */ + GPC_1000HD = 1<<11, /* Enable 1000Mbit HD */ + GPC_SLAVE = 1<<10, /* Slave mode */ + GPC_PAUSE = 1<<9, /* Pause enable */ + GPC_LEDCTL = 3<<6, /* GPHY Leds */ + GPC_RST_CLR = 1<<1, /* Clear GPHY Reset */ GPC_RST_SET = 1<<0, /* Set GPHY Reset */ }; @@ -2017,6 +2008,14 @@ struct sky2_port { u16 rx_tag; struct vlan_group *vlgrp; #endif + struct { + unsigned long last; + u32 mac_rp; + u8 mac_lev; + u8 fifo_rp; + u8 fifo_lev; + } check; + dma_addr_t rx_le_map; dma_addr_t tx_le_map; @@ -2032,14 +2031,22 @@ struct sky2_port { #ifdef CONFIG_SKY2_DEBUG struct dentry *debugfs; #endif - struct net_device_stats net_stats; - }; struct sky2_hw { void __iomem *regs; struct pci_dev *pdev; + struct napi_struct napi; struct net_device *dev[2]; + unsigned long flags; +#define SKY2_HW_USE_MSI 0x00000001 +#define SKY2_HW_FIBRE_PHY 0x00000002 +#define SKY2_HW_GIGABIT 0x00000004 +#define SKY2_HW_NEWER_PHY 0x00000008 +#define SKY2_HW_FIFO_HANG_CHECK 0x00000010 +#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */ +#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ +#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ u8 chip_id; u8 chip_rev; @@ -2050,15 +2057,14 @@ struct sky2_hw { u32 st_idx; dma_addr_t st_dma; - struct timer_list idle_timer; + struct timer_list watchdog_timer; struct work_struct restart_work; - int msi; wait_queue_head_t msi_wait; }; static inline int sky2_is_copper(const struct sky2_hw *hw) { - return !(hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P'); + return !(hw->flags & SKY2_HW_FIBRE_PHY); } /* Register accessor for memory mapped device */ @@ -2121,25 +2127,4 @@ static inline void gma_set_addr(struct sky2_hw *hw, unsigned port, unsigned reg, gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8)); gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8)); } - -/* PCI config space access */ -static inline u32 sky2_pci_read32(const struct sky2_hw *hw, unsigned reg) -{ - return sky2_read32(hw, Y2_CFG_SPC + reg); -} - -static inline u16 sky2_pci_read16(const struct sky2_hw *hw, unsigned reg) -{ - return sky2_read16(hw, Y2_CFG_SPC + reg); -} - -static inline void sky2_pci_write32(struct sky2_hw *hw, unsigned reg, u32 val) -{ - sky2_write32(hw, Y2_CFG_SPC + reg, val); -} - -static inline void sky2_pci_write16(struct sky2_hw *hw, unsigned reg, u16 val) -{ - sky2_write16(hw, Y2_CFG_SPC + reg, val); -} #endif diff --git a/drivers/net/slip.c b/drivers/net/slip.c index 65bd20fac820..335b7cc80eba 100644 --- a/drivers/net/slip.c +++ b/drivers/net/slip.c @@ -639,8 +639,6 @@ static void sl_setup(struct net_device *dev) dev->addr_len = 0; dev->tx_queue_len = 10; - SET_MODULE_OWNER(dev); - /* New-style flags. */ dev->flags = IFF_NOARP|IFF_POINTOPOINT|IFF_MULTICAST; } @@ -957,7 +955,7 @@ slip_close(struct tty_struct *tty) * STANDARD SLIP ENCAPSULATION * ************************************************************************/ -int +static int slip_esc(unsigned char *s, unsigned char *d, int len) { unsigned char *ptr = d; diff --git a/drivers/net/smc-mca.c b/drivers/net/smc-mca.c index ae1ae343beed..d6abb68e6e2f 100644 --- a/drivers/net/smc-mca.c +++ b/drivers/net/smc-mca.c @@ -196,6 +196,7 @@ static int __init ultramca_probe(struct device *gen_dev) int tirq = 0; int base_addr = ultra_io[ultra_found]; int irq = ultra_irq[ultra_found]; + DECLARE_MAC_BUF(mac); if (base_addr || irq) { printk(KERN_INFO "Probing for SMC MCA adapter"); @@ -264,7 +265,6 @@ static int __init ultramca_probe(struct device *gen_dev) if(!dev) return -ENODEV; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, gen_dev); mca_device_set_name(mca_dev, smc_mca_adapter_names[adapter]); mca_device_set_claim(mca_dev, 1); @@ -331,10 +331,11 @@ static int __init ultramca_probe(struct device *gen_dev) reg4 = inb(ioaddr + 4) & 0x7f; outb(reg4, ioaddr + 4); - printk(KERN_INFO "smc_mca[%d]: Parameters: %#3x,", slot + 1, ioaddr); - for (i = 0; i < 6; i++) - printk(" %2.2X", dev->dev_addr[i] = inb(ioaddr + 8 + i)); + dev->dev_addr[i] = inb(ioaddr + 8 + i); + + printk(KERN_INFO "smc_mca[%d]: Parameters: %#3x, %s", + slot + 1, ioaddr, print_mac(mac, dev->dev_addr)); /* Switch from the station address to the alternate register set * and read the useful registers there. diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c index a52b22d7db65..00d6cf1af484 100644 --- a/drivers/net/smc-ultra.c +++ b/drivers/net/smc-ultra.c @@ -142,8 +142,6 @@ static int __init do_ultra_probe(struct net_device *dev) int base_addr = dev->base_addr; int irq = dev->irq; - SET_MODULE_OWNER(dev); - #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = &ultra_poll; #endif @@ -200,6 +198,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr) unsigned char num_pages, irqreg, addr, piomode; unsigned char idreg = inb(ioaddr + 7); unsigned char reg4 = inb(ioaddr + 4) & 0x7f; + DECLARE_MAC_BUF(mac); if (!request_region(ioaddr, ULTRA_IO_EXTENT, DRV_NAME)) return -EBUSY; @@ -226,10 +225,11 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr) model_name = (idreg & 0xF0) == 0x20 ? "SMC Ultra" : "SMC EtherEZ"; - printk("%s: %s at %#3x,", dev->name, model_name, ioaddr); - for (i = 0; i < 6; i++) - printk(" %2.2X", dev->dev_addr[i] = inb(ioaddr + 8 + i)); + dev->dev_addr[i] = inb(ioaddr + 8 + i); + + printk("%s: %s at %#3x, %s", dev->name, model_name, + ioaddr, print_mac(mac, dev->dev_addr)); /* Switch from the station address to the alternate register set and read the useful registers there. */ diff --git a/drivers/net/smc-ultra32.c b/drivers/net/smc-ultra32.c index 88a30e56c64c..a5a91ace28cc 100644 --- a/drivers/net/smc-ultra32.c +++ b/drivers/net/smc-ultra32.c @@ -132,8 +132,6 @@ struct net_device * __init ultra32_probe(int unit) netdev_boot_setup_check(dev); } - SET_MODULE_OWNER(dev); - irq = dev->irq; /* EISA spec allows for up to 16 slots, but 8 is typical. */ @@ -165,6 +163,7 @@ static int __init ultra32_probe1(struct net_device *dev, int ioaddr) unsigned char idreg; unsigned char reg4; const char *ifmap[] = {"UTP No Link", "", "UTP/AUI", "UTP/BNC"}; + DECLARE_MAC_BUF(mac); if (!request_region(ioaddr, ULTRA32_IO_EXTENT, DRV_NAME)) return -EBUSY; @@ -205,10 +204,11 @@ static int __init ultra32_probe1(struct net_device *dev, int ioaddr) model_name = "SMC Ultra32"; - printk("%s: %s at 0x%X,", dev->name, model_name, ioaddr); - for (i = 0; i < 6; i++) - printk(" %2.2X", dev->dev_addr[i] = inb(ioaddr + 8 + i)); + dev->dev_addr[i] = inb(ioaddr + 8 + i); + + printk("%s: %s at 0x%X, %s", + dev->name, model_name, ioaddr, print_mac(mac, dev->dev_addr)); /* Switch from the station address to the alternate register set and read the useful registers there. */ diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c index db43e42bee35..7c60df46fc65 100644 --- a/drivers/net/smc911x.c +++ b/drivers/net/smc911x.c @@ -115,13 +115,6 @@ struct smc911x_local { */ struct sk_buff *pending_tx_skb; - /* - * these are things that the kernel wants me to keep, so users - * can find out semi-useless statistics of how well the card is - * performing - */ - struct net_device_stats stats; - /* version/revision of the SMC911x chip */ u16 version; u16 revision; @@ -315,8 +308,8 @@ static void smc911x_reset(struct net_device *dev) if (lp->pending_tx_skb != NULL) { dev_kfree_skb (lp->pending_tx_skb); lp->pending_tx_skb = NULL; - lp->stats.tx_errors++; - lp->stats.tx_aborted_errors++; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; } } @@ -449,14 +442,14 @@ static inline void smc911x_rcv(struct net_device *dev) pkt_len = (status & RX_STS_PKT_LEN_) >> 16; if (status & RX_STS_ES_) { /* Deal with a bad packet */ - lp->stats.rx_errors++; + dev->stats.rx_errors++; if (status & RX_STS_CRC_ERR_) - lp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; else { if (status & RX_STS_LEN_ERR_) - lp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; if (status & RX_STS_MCAST_) - lp->stats.multicast++; + dev->stats.multicast++; } /* Remove the bad packet data from the RX FIFO */ smc911x_drop_pkt(dev); @@ -467,7 +460,7 @@ static inline void smc911x_rcv(struct net_device *dev) if (unlikely(skb == NULL)) { PRINTK( "%s: Low memory, rcvd packet dropped.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; smc911x_drop_pkt(dev); return; } @@ -503,8 +496,8 @@ static inline void smc911x_rcv(struct net_device *dev) dev->last_rx = jiffies; skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len-4; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len-4; #endif } } @@ -616,8 +609,8 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) printk("%s: No Tx free space %d < %d\n", dev->name, free, skb->len); lp->pending_tx_skb = NULL; - lp->stats.tx_errors++; - lp->stats.tx_dropped++; + dev->stats.tx_errors++; + dev->stats.tx_dropped++; dev_kfree_skb(skb); return 0; } @@ -667,8 +660,8 @@ static void smc911x_tx(struct net_device *dev) dev->name, (SMC_GET_TX_FIFO_INF() & TX_FIFO_INF_TSUSED_) >> 16); tx_status = SMC_GET_TX_STS_FIFO(); - lp->stats.tx_packets++; - lp->stats.tx_bytes+=tx_status>>16; + dev->stats.tx_packets++; + dev->stats.tx_bytes+=tx_status>>16; DBG(SMC_DEBUG_TX, "%s: Tx FIFO tag 0x%04x status 0x%04x\n", dev->name, (tx_status & 0xffff0000) >> 16, tx_status & 0x0000ffff); @@ -676,22 +669,22 @@ static void smc911x_tx(struct net_device *dev) * full-duplex mode */ if ((tx_status & TX_STS_ES_) && !(lp->ctl_rfduplx && !(tx_status & 0x00000306))) { - lp->stats.tx_errors++; + dev->stats.tx_errors++; } if (tx_status & TX_STS_MANY_COLL_) { - lp->stats.collisions+=16; - lp->stats.tx_aborted_errors++; + dev->stats.collisions+=16; + dev->stats.tx_aborted_errors++; } else { - lp->stats.collisions+=(tx_status & TX_STS_COLL_CNT_) >> 3; + dev->stats.collisions+=(tx_status & TX_STS_COLL_CNT_) >> 3; } /* carrier error only has meaning for half-duplex communication */ if ((tx_status & (TX_STS_LOC_ | TX_STS_NO_CARR_)) && !lp->ctl_rfduplx) { - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; } if (tx_status & TX_STS_LATE_COLL_) { - lp->stats.collisions++; - lp->stats.tx_aborted_errors++; + dev->stats.collisions++; + dev->stats.tx_aborted_errors++; } } } @@ -1121,11 +1114,11 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id) /* Handle various error conditions */ if (status & INT_STS_RXE_) { SMC_ACK_INT(INT_STS_RXE_); - lp->stats.rx_errors++; + dev->stats.rx_errors++; } if (status & INT_STS_RXDFH_INT_) { SMC_ACK_INT(INT_STS_RXDFH_INT_); - lp->stats.rx_dropped+=SMC_GET_RX_DROP(); + dev->stats.rx_dropped+=SMC_GET_RX_DROP(); } /* Undocumented interrupt-what is the right thing to do here? */ if (status & INT_STS_RXDF_INT_) { @@ -1140,8 +1133,8 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id) cr &= ~MAC_CR_RXEN_; SMC_SET_MAC_CR(cr); DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name); - lp->stats.rx_errors++; - lp->stats.rx_fifo_errors++; + dev->stats.rx_errors++; + dev->stats.rx_fifo_errors++; } SMC_ACK_INT(INT_STS_RDFL_); } @@ -1152,8 +1145,8 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id) SMC_SET_MAC_CR(cr); rx_overrun=1; DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name); - lp->stats.rx_errors++; - lp->stats.rx_fifo_errors++; + dev->stats.rx_errors++; + dev->stats.rx_fifo_errors++; } SMC_ACK_INT(INT_STS_RDFO_); } @@ -1307,8 +1300,8 @@ smc911x_rx_dma_irq(int dma, void *data) dev->last_rx = jiffies; skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); - lp->stats.rx_packets++; - lp->stats.rx_bytes += skb->len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; spin_lock_irqsave(&lp->lock, flags); pkts = (SMC_GET_RX_FIFO_INF() & RX_FIFO_INF_RXSUSED_) >> 16; @@ -1568,19 +1561,6 @@ static int smc911x_close(struct net_device *dev) } /* - * Get the current statistics. - * This may be called with the card open or closed. - */ -static struct net_device_stats *smc911x_query_statistics(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); - - - return &lp->stats; -} - -/* * Ethtool support */ static int @@ -2056,7 +2036,6 @@ static int __init smc911x_probe(struct net_device *dev, unsigned long ioaddr) dev->hard_start_xmit = smc911x_hard_start_xmit; dev->tx_timeout = smc911x_timeout; dev->watchdog_timeo = msecs_to_jiffies(watchdog); - dev->get_stats = smc911x_query_statistics; dev->set_multicast_list = smc911x_set_multicast_list; dev->ethtool_ops = &smc911x_ethtool_ops; #ifdef CONFIG_NET_POLL_CONTROLLER @@ -2084,7 +2063,7 @@ static int __init smc911x_probe(struct net_device *dev, unsigned long ioaddr) /* Grab the IRQ */ retval = request_irq(dev->irq, &smc911x_interrupt, - IRQF_SHARED | IRQF_TRIGGER_FALLING, dev->name, dev); + IRQF_SHARED | SMC_IRQ_SENSE, dev->name, dev); if (retval) goto err_out; @@ -2181,7 +2160,6 @@ static int smc911x_drv_probe(struct platform_device *pdev) ret = -ENOMEM; goto release_1; } - SET_MODULE_OWNER(ndev); SET_NETDEV_DEV(ndev, &pdev->dev); ndev->dma = (unsigned char)-1; diff --git a/drivers/net/smc911x.h b/drivers/net/smc911x.h index 962a710459fc..16a0edc078fd 100644 --- a/drivers/net/smc911x.h +++ b/drivers/net/smc911x.h @@ -36,6 +36,12 @@ #define SMC_USE_PXA_DMA 1 #define SMC_USE_16BIT 0 #define SMC_USE_32BIT 1 + #define SMC_IRQ_SENSE IRQF_TRIGGER_FALLING +#elif CONFIG_SH_MAGIC_PANEL_R2 + #define SMC_USE_SH_DMA 0 + #define SMC_USE_16BIT 0 + #define SMC_USE_32BIT 1 + #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW #endif diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c index 36c1ebadbf20..cb2698de5190 100644 --- a/drivers/net/smc9194.c +++ b/drivers/net/smc9194.c @@ -191,13 +191,6 @@ static struct devlist smc_devlist[] __initdata = { /* store this information for the driver.. */ struct smc_local { /* - these are things that the kernel wants me to keep, so users - can find out semi-useless statistics of how well the card is - performing - */ - struct net_device_stats stats; - - /* If I have to wait until memory is available to send a packet, I will store the skbuff here, until I get the desired memory. Then, I'll send it out and free it. @@ -249,12 +242,6 @@ static void smc_timeout(struct net_device *dev); static int smc_close(struct net_device *dev); /* - . This routine allows the proc file system to query the driver's - . statistics. -*/ -static struct net_device_stats * smc_query_statistics( struct net_device *dev); - -/* . Finally, a call to set promiscuous mode ( for TCPDUMP and related . programs ) and multicast modes. */ @@ -514,7 +501,7 @@ static int smc_wait_to_send_packet( struct sk_buff * skb, struct net_device * de if ( lp->saved_skb) { /* THIS SHOULD NEVER HAPPEN. */ - lp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; printk(CARDNAME": Bad Craziness - sent packet while busy.\n" ); return 1; } @@ -744,8 +731,6 @@ struct net_device * __init smc_init(int unit) irq = dev->irq; } - SET_MODULE_OWNER(dev); - if (io > 0x1ff) { /* Check a single specified location. */ err = smc_probe(dev, io); } else if (io != 0) { /* Don't probe at all. */ @@ -891,6 +876,8 @@ static int __init smc_probe(struct net_device *dev, int ioaddr) word memory_info_register; word memory_cfg_register; + DECLARE_MAC_BUF(mac); + /* Grab the region so that no one else tries to probe our ioports. */ if (!request_region(ioaddr, SMC_IO_EXTENT, DRV_NAME)) return -EBUSY; @@ -1046,10 +1033,7 @@ static int __init smc_probe(struct net_device *dev, int ioaddr) /* . Print the Ethernet address */ - printk("ADDR: "); - for (i = 0; i < 5; i++) - printk("%2.2x:", dev->dev_addr[i] ); - printk("%2.2x \n", dev->dev_addr[5] ); + printk("ADDR: %s\n", print_mac(mac, dev->dev_addr)); /* set the private data to zero by default */ memset(dev->priv, 0, sizeof(struct smc_local)); @@ -1067,7 +1051,6 @@ static int __init smc_probe(struct net_device *dev, int ioaddr) dev->hard_start_xmit = smc_wait_to_send_packet; dev->tx_timeout = smc_timeout; dev->watchdog_timeo = HZ/20; - dev->get_stats = smc_query_statistics; dev->set_multicast_list = smc_set_multicast_list; return 0; @@ -1201,7 +1184,6 @@ static void smc_timeout(struct net_device *dev) */ static void smc_rcv(struct net_device *dev) { - struct smc_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; int packet_number; word status; @@ -1245,13 +1227,13 @@ static void smc_rcv(struct net_device *dev) /* set multicast stats */ if ( status & RS_MULTICAST ) - lp->stats.multicast++; + dev->stats.multicast++; skb = dev_alloc_skb( packet_length + 5); if ( skb == NULL ) { printk(KERN_NOTICE CARDNAME ": Low memory, packet dropped.\n"); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; goto done; } @@ -1291,16 +1273,16 @@ static void smc_rcv(struct net_device *dev) skb->protocol = eth_type_trans(skb, dev ); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += packet_length; + dev->stats.rx_packets++; + dev->stats.rx_bytes += packet_length; } else { /* error ... */ - lp->stats.rx_errors++; + dev->stats.rx_errors++; - if ( status & RS_ALGNERR ) lp->stats.rx_frame_errors++; + if ( status & RS_ALGNERR ) dev->stats.rx_frame_errors++; if ( status & (RS_TOOSHORT | RS_TOOLONG ) ) - lp->stats.rx_length_errors++; - if ( status & RS_BADCRC) lp->stats.rx_crc_errors++; + dev->stats.rx_length_errors++; + if ( status & RS_BADCRC) dev->stats.rx_crc_errors++; } done: @@ -1348,12 +1330,12 @@ static void smc_tx( struct net_device * dev ) tx_status = inw( ioaddr + DATA_1 ); PRINTK3((CARDNAME": TX DONE STATUS: %4x \n", tx_status )); - lp->stats.tx_errors++; - if ( tx_status & TS_LOSTCAR ) lp->stats.tx_carrier_errors++; + dev->stats.tx_errors++; + if ( tx_status & TS_LOSTCAR ) dev->stats.tx_carrier_errors++; if ( tx_status & TS_LATCOL ) { printk(KERN_DEBUG CARDNAME ": Late collision occurred on last xmit.\n"); - lp->stats.tx_window_errors++; + dev->stats.tx_window_errors++; } #if 0 if ( tx_status & TS_16COL ) { ... } @@ -1448,10 +1430,10 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id) SMC_SELECT_BANK( 0 ); card_stats = inw( ioaddr + COUNTER ); /* single collisions */ - lp->stats.collisions += card_stats & 0xF; + dev->stats.collisions += card_stats & 0xF; card_stats >>= 4; /* multiple collisions */ - lp->stats.collisions += card_stats & 0xF; + dev->stats.collisions += card_stats & 0xF; /* these are for when linux supports these statistics */ @@ -1460,7 +1442,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id) ": TX_BUFFER_EMPTY handled\n")); outb( IM_TX_EMPTY_INT, ioaddr + INTERRUPT ); mask &= ~IM_TX_EMPTY_INT; - lp->stats.tx_packets += lp->packets_waiting; + dev->stats.tx_packets += lp->packets_waiting; lp->packets_waiting = 0; } else if (status & IM_ALLOC_INT ) { @@ -1479,8 +1461,8 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id) PRINTK2((CARDNAME": Handoff done successfully.\n")); } else if (status & IM_RX_OVRN_INT ) { - lp->stats.rx_errors++; - lp->stats.rx_fifo_errors++; + dev->stats.rx_errors++; + dev->stats.rx_fifo_errors++; outb( IM_RX_OVRN_INT, ioaddr + INTERRUPT ); } else if (status & IM_EPH_INT ) { PRINTK((CARDNAME ": UNSUPPORTED: EPH INTERRUPT \n")); @@ -1523,16 +1505,6 @@ static int smc_close(struct net_device *dev) return 0; } -/*------------------------------------------------------------ - . Get the current statistics. - . This may be called with the card open or closed. - .-------------------------------------------------------------*/ -static struct net_device_stats* smc_query_statistics(struct net_device *dev) { - struct smc_local *lp = netdev_priv(dev); - - return &lp->stats; -} - /*----------------------------------------------------------- . smc_set_multicast_list . diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index 01cc3c742c38..24e610e711e8 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c @@ -183,13 +183,6 @@ struct smc_local { struct sk_buff *pending_tx_skb; struct tasklet_struct tx_task; - /* - * these are things that the kernel wants me to keep, so users - * can find out semi-useless statistics of how well the card is - * performing - */ - struct net_device_stats stats; - /* version/revision of the SMC91x chip */ int version; @@ -332,8 +325,8 @@ static void smc_reset(struct net_device *dev) /* free any pending tx skb */ if (pending_skb) { dev_kfree_skb(pending_skb); - lp->stats.tx_errors++; - lp->stats.tx_aborted_errors++; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; } /* @@ -512,13 +505,13 @@ static inline void smc_rcv(struct net_device *dev) } SMC_WAIT_MMU_BUSY(); SMC_SET_MMU_CMD(MC_RELEASE); - lp->stats.rx_errors++; + dev->stats.rx_errors++; if (status & RS_ALGNERR) - lp->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (status & (RS_TOOSHORT | RS_TOOLONG)) - lp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; if (status & RS_BADCRC) - lp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; } else { struct sk_buff *skb; unsigned char *data; @@ -526,7 +519,7 @@ static inline void smc_rcv(struct net_device *dev) /* set multicast stats */ if (status & RS_MULTICAST) - lp->stats.multicast++; + dev->stats.multicast++; /* * Actual payload is packet_len - 6 (or 5 if odd byte). @@ -542,7 +535,7 @@ static inline void smc_rcv(struct net_device *dev) dev->name); SMC_WAIT_MMU_BUSY(); SMC_SET_MMU_CMD(MC_RELEASE); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; return; } @@ -570,8 +563,8 @@ static inline void smc_rcv(struct net_device *dev) dev->last_rx = jiffies; skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); - lp->stats.rx_packets++; - lp->stats.rx_bytes += data_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += data_len; } } @@ -644,8 +637,8 @@ static void smc_hardware_send_pkt(unsigned long data) packet_no = SMC_GET_AR(); if (unlikely(packet_no & AR_FAILED)) { printk("%s: Memory allocation failed.\n", dev->name); - lp->stats.tx_errors++; - lp->stats.tx_fifo_errors++; + dev->stats.tx_errors++; + dev->stats.tx_fifo_errors++; smc_special_unlock(&lp->lock); goto done; } @@ -688,8 +681,8 @@ static void smc_hardware_send_pkt(unsigned long data) smc_special_unlock(&lp->lock); dev->trans_start = jiffies; - lp->stats.tx_packets++; - lp->stats.tx_bytes += len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += len; SMC_ENABLE_INT(IM_TX_INT | IM_TX_EMPTY_INT); @@ -729,8 +722,8 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) numPages = ((skb->len & ~1) + (6 - 1)) >> 8; if (unlikely(numPages > 7)) { printk("%s: Far too big packet error.\n", dev->name); - lp->stats.tx_errors++; - lp->stats.tx_dropped++; + dev->stats.tx_errors++; + dev->stats.tx_dropped++; dev_kfree_skb(skb); return 0; } @@ -803,17 +796,17 @@ static void smc_tx(struct net_device *dev) dev->name, tx_status, packet_no); if (!(tx_status & ES_TX_SUC)) - lp->stats.tx_errors++; + dev->stats.tx_errors++; if (tx_status & ES_LOSTCARR) - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (tx_status & (ES_LATCOL | ES_16COL)) { PRINTK("%s: %s occurred on last xmit\n", dev->name, (tx_status & ES_LATCOL) ? "late collision" : "too many collisions"); - lp->stats.tx_window_errors++; - if (!(lp->stats.tx_window_errors & 63) && net_ratelimit()) { + dev->stats.tx_window_errors++; + if (!(dev->stats.tx_window_errors & 63) && net_ratelimit()) { printk(KERN_INFO "%s: unexpectedly large number of " "bad collisions. Please check duplex " "setting.\n", dev->name); @@ -1347,19 +1340,19 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id) SMC_SELECT_BANK(2); /* single collisions */ - lp->stats.collisions += card_stats & 0xF; + dev->stats.collisions += card_stats & 0xF; card_stats >>= 4; /* multiple collisions */ - lp->stats.collisions += card_stats & 0xF; + dev->stats.collisions += card_stats & 0xF; } else if (status & IM_RX_OVRN_INT) { DBG(1, "%s: RX overrun (EPH_ST 0x%04x)\n", dev->name, ({ int eph_st; SMC_SELECT_BANK(0); eph_st = SMC_GET_EPH_STATUS(); SMC_SELECT_BANK(2); eph_st; }) ); SMC_ACK_INT(IM_RX_OVRN_INT); - lp->stats.rx_errors++; - lp->stats.rx_fifo_errors++; + dev->stats.rx_errors++; + dev->stats.rx_fifo_errors++; } else if (status & IM_EPH_INT) { smc_eph_interrupt(dev); } else if (status & IM_MDINT) { @@ -1628,19 +1621,6 @@ static int smc_close(struct net_device *dev) } /* - * Get the current statistics. - * This may be called with the card open or closed. - */ -static struct net_device_stats *smc_query_statistics(struct net_device *dev) -{ - struct smc_local *lp = netdev_priv(dev); - - DBG(2, "%s: %s\n", dev->name, __FUNCTION__); - - return &lp->stats; -} - -/* * Ethtool support */ static int @@ -1842,9 +1822,10 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr) { struct smc_local *lp = netdev_priv(dev); static int version_printed = 0; - int i, retval; + int retval; unsigned int val, revision_register; const char *version_string; + DECLARE_MAC_BUF(mac); DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__); @@ -1965,7 +1946,6 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr) dev->hard_start_xmit = smc_hard_start_xmit; dev->tx_timeout = smc_timeout; dev->watchdog_timeo = msecs_to_jiffies(watchdog); - dev->get_stats = smc_query_statistics; dev->set_multicast_list = smc_set_multicast_list; dev->ethtool_ops = &smc_ethtool_ops; #ifdef CONFIG_NET_POLL_CONTROLLER @@ -2035,10 +2015,8 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr) "set using ifconfig\n", dev->name); } else { /* Print the Ethernet address */ - printk("%s: Ethernet addr: ", dev->name); - for (i = 0; i < 5; i++) - printk("%2.2x:", dev->dev_addr[i]); - printk("%2.2x\n", dev->dev_addr[5]); + printk("%s: Ethernet addr: %s\n", + dev->name, print_mac(mac, dev->dev_addr)); } if (lp->phy_type == 0) { @@ -2212,7 +2190,6 @@ static int smc_drv_probe(struct platform_device *pdev) ret = -ENOMEM; goto out_release_io; } - SET_MODULE_OWNER(ndev); SET_NETDEV_DEV(ndev, &pdev->dev); ndev->dma = (unsigned char)-1; diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index f8429449dc1e..af9e6bf59552 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h @@ -284,6 +284,7 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg) #elif defined(CONFIG_SUPERH) #ifdef CONFIG_SOLUTION_ENGINE +#define SMC_IRQ_FLAGS (0) #define SMC_CAN_USE_8BIT 0 #define SMC_CAN_USE_16BIT 1 #define SMC_CAN_USE_32BIT 0 @@ -299,7 +300,7 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg) #define SMC_CAN_USE_8BIT 1 #define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 1 +#define SMC_CAN_USE_32BIT 0 #define SMC_inb(a, r) inb((a) + (r)) #define SMC_inw(a, r) inw((a) + (r)) @@ -310,8 +311,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg) #endif /* BOARDS */ -#define set_irq_type(irq, type) do {} while (0) - #elif defined(CONFIG_M32R) #define SMC_CAN_USE_8BIT 0 diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index 590b12c7246c..fab055ffcc90 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c @@ -795,6 +795,7 @@ spider_net_set_low_watermark(struct spider_net_card *card) static int spider_net_release_tx_chain(struct spider_net_card *card, int brutal) { + struct net_device *dev = card->netdev; struct spider_net_descr_chain *chain = &card->tx_chain; struct spider_net_descr *descr; struct spider_net_hw_descr *hwdescr; @@ -815,8 +816,8 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal) status = spider_net_get_descr_status(hwdescr); switch (status) { case SPIDER_NET_DESCR_COMPLETE: - card->netdev_stats.tx_packets++; - card->netdev_stats.tx_bytes += descr->skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += descr->skb->len; break; case SPIDER_NET_DESCR_CARDOWNED: @@ -835,11 +836,11 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal) if (netif_msg_tx_err(card)) dev_err(&card->netdev->dev, "forcing end of tx descriptor " "with status x%02x\n", status); - card->netdev_stats.tx_errors++; + dev->stats.tx_errors++; break; default: - card->netdev_stats.tx_dropped++; + dev->stats.tx_dropped++; if (!brutal) { spin_unlock_irqrestore(&chain->lock, flags); return 1; @@ -919,7 +920,7 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) spider_net_release_tx_chain(card, 0); if (spider_net_prepare_tx_descr(card, skb) != 0) { - card->netdev_stats.tx_dropped++; + netdev->stats.tx_dropped++; netif_stop_queue(netdev); return NETDEV_TX_BUSY; } @@ -979,16 +980,12 @@ static void spider_net_pass_skb_up(struct spider_net_descr *descr, struct spider_net_card *card) { - struct spider_net_hw_descr *hwdescr= descr->hwdescr; - struct sk_buff *skb; - struct net_device *netdev; - u32 data_status, data_error; - - data_status = hwdescr->data_status; - data_error = hwdescr->data_error; - netdev = card->netdev; + struct spider_net_hw_descr *hwdescr = descr->hwdescr; + struct sk_buff *skb = descr->skb; + struct net_device *netdev = card->netdev; + u32 data_status = hwdescr->data_status; + u32 data_error = hwdescr->data_error; - skb = descr->skb; skb_put(skb, hwdescr->valid_size); /* the card seems to add 2 bytes of junk in front @@ -1015,8 +1012,8 @@ spider_net_pass_skb_up(struct spider_net_descr *descr, } /* update netdevice statistics */ - card->netdev_stats.rx_packets++; - card->netdev_stats.rx_bytes += skb->len; + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += skb->len; /* pass skb up to stack */ netif_receive_skb(skb); @@ -1184,6 +1181,7 @@ static int spider_net_resync_tail_ptr(struct spider_net_card *card) static int spider_net_decode_one_descr(struct spider_net_card *card) { + struct net_device *dev = card->netdev; struct spider_net_descr_chain *chain = &card->rx_chain; struct spider_net_descr *descr = chain->tail; struct spider_net_hw_descr *hwdescr = descr->hwdescr; @@ -1210,9 +1208,9 @@ spider_net_decode_one_descr(struct spider_net_card *card) (status == SPIDER_NET_DESCR_PROTECTION_ERROR) || (status == SPIDER_NET_DESCR_FORCE_END) ) { if (netif_msg_rx_err(card)) - dev_err(&card->netdev->dev, + dev_err(&dev->dev, "dropping RX descriptor with state %d\n", status); - card->netdev_stats.rx_dropped++; + dev->stats.rx_dropped++; goto bad_desc; } @@ -1278,34 +1276,26 @@ bad_desc: * (using netif_receive_skb). If all/enough packets are up, the driver * reenables interrupts and returns 0. If not, 1 is returned. */ -static int -spider_net_poll(struct net_device *netdev, int *budget) +static int spider_net_poll(struct napi_struct *napi, int budget) { - struct spider_net_card *card = netdev_priv(netdev); - int packets_to_do, packets_done = 0; - int no_more_packets = 0; - - packets_to_do = min(*budget, netdev->quota); - - while (packets_to_do) { - if (spider_net_decode_one_descr(card)) { - packets_done++; - packets_to_do--; - } else { - /* no more packets for the stack */ - no_more_packets = 1; + struct spider_net_card *card = container_of(napi, struct spider_net_card, napi); + struct net_device *netdev = card->netdev; + int packets_done = 0; + + while (packets_done < budget) { + if (!spider_net_decode_one_descr(card)) break; - } + + packets_done++; } if ((packets_done == 0) && (card->num_rx_ints != 0)) { - no_more_packets = spider_net_resync_tail_ptr(card); + if (!spider_net_resync_tail_ptr(card)) + packets_done = budget; spider_net_resync_head_ptr(card); } card->num_rx_ints = 0; - netdev->quota -= packets_done; - *budget -= packets_done; spider_net_refill_rx_chain(card); spider_net_enable_rxdmac(card); @@ -1313,28 +1303,13 @@ spider_net_poll(struct net_device *netdev, int *budget) /* if all packets are in the stack, enable interrupts and return 0 */ /* if not, return 1 */ - if (no_more_packets) { - netif_rx_complete(netdev); + if (packets_done < budget) { + netif_rx_complete(netdev, napi); spider_net_rx_irq_on(card); card->ignore_rx_ramfull = 0; - return 0; } - return 1; -} - -/** - * spider_net_get_stats - get interface statistics - * @netdev: interface device structure - * - * returns the interface statistics residing in the spider_net_card struct - */ -static struct net_device_stats * -spider_net_get_stats(struct net_device *netdev) -{ - struct spider_net_card *card = netdev_priv(netdev); - struct net_device_stats *stats = &card->netdev_stats; - return stats; + return packets_done; } /** @@ -1441,17 +1416,14 @@ static void spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) { u32 error_reg1, error_reg2; - u32 mask_reg1, mask_reg2; u32 i; int show_error = 1; error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS); error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS); - mask_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1MSK); - mask_reg2 = spider_net_read_reg(card,SPIDER_NET_GHIINT2MSK); - error_reg1 &= mask_reg1; - error_reg2 &= mask_reg2; + error_reg1 &= SPIDER_NET_INT1_MASK_VALUE; + error_reg2 &= SPIDER_NET_INT2_MASK_VALUE; /* check GHIINT0STS ************************************/ if (status_reg) @@ -1563,7 +1535,8 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) spider_net_refill_rx_chain(card); spider_net_enable_rxdmac(card); card->num_rx_ints ++; - netif_rx_schedule(card->netdev); + netif_rx_schedule(card->netdev, + &card->napi); } show_error = 0; break; @@ -1583,7 +1556,8 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) spider_net_refill_rx_chain(card); spider_net_enable_rxdmac(card); card->num_rx_ints ++; - netif_rx_schedule(card->netdev); + netif_rx_schedule(card->netdev, + &card->napi); show_error = 0; break; @@ -1597,7 +1571,8 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) spider_net_refill_rx_chain(card); spider_net_enable_rxdmac(card); card->num_rx_ints ++; - netif_rx_schedule(card->netdev); + netif_rx_schedule(card->netdev, + &card->napi); show_error = 0; break; @@ -1679,22 +1654,21 @@ spider_net_interrupt(int irq, void *ptr) { struct net_device *netdev = ptr; struct spider_net_card *card = netdev_priv(netdev); - u32 status_reg, mask_reg; + u32 status_reg; status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS); - mask_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK); - status_reg &= mask_reg; + status_reg &= SPIDER_NET_INT0_MASK_VALUE; if (!status_reg) return IRQ_NONE; if (status_reg & SPIDER_NET_RXINT ) { spider_net_rx_irq_off(card); - netif_rx_schedule(netdev); + netif_rx_schedule(netdev, &card->napi); card->num_rx_ints ++; } if (status_reg & SPIDER_NET_TXINT) - netif_rx_schedule(netdev); + netif_rx_schedule(netdev, &card->napi); if (status_reg & SPIDER_NET_LINKINT) spider_net_link_reset(netdev); @@ -2038,7 +2012,7 @@ spider_net_open(struct net_device *netdev) netif_start_queue(netdev); netif_carrier_on(netdev); - netif_poll_enable(netdev); + napi_enable(&card->napi); spider_net_enable_interrupts(card); @@ -2208,7 +2182,7 @@ spider_net_stop(struct net_device *netdev) { struct spider_net_card *card = netdev_priv(netdev); - netif_poll_disable(netdev); + napi_disable(&card->napi); netif_carrier_off(netdev); netif_stop_queue(netdev); del_timer_sync(&card->tx_timer); @@ -2300,7 +2274,6 @@ spider_net_setup_netdev_ops(struct net_device *netdev) netdev->open = &spider_net_open; netdev->stop = &spider_net_stop; netdev->hard_start_xmit = &spider_net_xmit; - netdev->get_stats = &spider_net_get_stats; netdev->set_multicast_list = &spider_net_set_multi; netdev->set_mac_address = &spider_net_set_mac; netdev->change_mtu = &spider_net_change_mtu; @@ -2308,9 +2281,6 @@ spider_net_setup_netdev_ops(struct net_device *netdev) /* tx watchdog */ netdev->tx_timeout = &spider_net_tx_timeout; netdev->watchdog_timeo = SPIDER_NET_WATCHDOG_TIMEOUT; - /* NAPI */ - netdev->poll = &spider_net_poll; - netdev->weight = SPIDER_NET_NAPI_WEIGHT; /* HW VLAN */ #ifdef CONFIG_NET_POLL_CONTROLLER /* poll controller */ @@ -2337,7 +2307,6 @@ spider_net_setup_netdev(struct spider_net_card *card) struct sockaddr addr; const u8 *mac; - SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &card->pdev->dev); pci_set_drvdata(card->pdev, netdev); @@ -2355,6 +2324,9 @@ spider_net_setup_netdev(struct spider_net_card *card) card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT; + netif_napi_add(netdev, &card->napi, + spider_net_poll, SPIDER_NET_NAPI_WEIGHT); + spider_net_setup_netdev_ops(netdev); netdev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX; diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h index dbbdb8cee3c6..a897beee7d5d 100644 --- a/drivers/net/spider_net.h +++ b/drivers/net/spider_net.h @@ -466,6 +466,8 @@ struct spider_net_card { struct pci_dev *pdev; struct mii_phy phy; + struct napi_struct napi; + int medium; void __iomem *regs; @@ -485,7 +487,6 @@ struct spider_net_card { /* for ethtool */ int msg_enable; - struct net_device_stats netdev_stats; struct spider_net_extra_stats spider_stats; struct spider_net_options options; diff --git a/drivers/net/spider_net_ethtool.c b/drivers/net/spider_net_ethtool.c index d940474e024a..85691d2a0be2 100644 --- a/drivers/net/spider_net_ethtool.c +++ b/drivers/net/spider_net_ethtool.c @@ -28,8 +28,6 @@ #include "spider_net.h" -#define SPIDER_NET_NUM_STATS 13 - static struct { const char str[ETH_GSTRING_LEN]; } ethtool_stats_keys[] = { @@ -147,9 +145,14 @@ spider_net_ethtool_get_ringparam(struct net_device *netdev, ering->rx_pending = card->rx_chain.num_desc; } -static int spider_net_get_stats_count(struct net_device *netdev) +static int spider_net_get_sset_count(struct net_device *netdev, int sset) { - return SPIDER_NET_NUM_STATS; + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(ethtool_stats_keys); + default: + return -EOPNOTSUPP; + } } static void spider_net_get_ethtool_stats(struct net_device *netdev, @@ -157,13 +160,13 @@ static void spider_net_get_ethtool_stats(struct net_device *netdev, { struct spider_net_card *card = netdev->priv; - data[0] = card->netdev_stats.tx_packets; - data[1] = card->netdev_stats.tx_bytes; - data[2] = card->netdev_stats.rx_packets; - data[3] = card->netdev_stats.rx_bytes; - data[4] = card->netdev_stats.tx_errors; - data[5] = card->netdev_stats.tx_dropped; - data[6] = card->netdev_stats.rx_dropped; + data[0] = netdev->stats.tx_packets; + data[1] = netdev->stats.tx_bytes; + data[2] = netdev->stats.rx_packets; + data[3] = netdev->stats.rx_bytes; + data[4] = netdev->stats.tx_errors; + data[5] = netdev->stats.tx_dropped; + data[6] = netdev->stats.rx_dropped; data[7] = card->spider_stats.rx_desc_error; data[8] = card->spider_stats.tx_timeouts; data[9] = card->spider_stats.alloc_rx_skb_error; @@ -188,11 +191,10 @@ const struct ethtool_ops spider_net_ethtool_ops = { .nway_reset = spider_net_ethtool_nway_reset, .get_rx_csum = spider_net_ethtool_get_rx_csum, .set_rx_csum = spider_net_ethtool_set_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, .get_ringparam = spider_net_ethtool_get_ringparam, .get_strings = spider_net_get_strings, - .get_stats_count = spider_net_get_stats_count, + .get_sset_count = spider_net_get_sset_count, .get_ethtool_stats = spider_net_get_ethtool_stats, }; diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c index 8b6478663a56..bcc430bd9e49 100644 --- a/drivers/net/starfire.c +++ b/drivers/net/starfire.c @@ -155,7 +155,7 @@ static int full_duplex[MAX_UNITS] = {0, }; #if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__alpha__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) /* 64-bit dma_addr_t */ #define ADDR_64BITS /* This chip uses 64 bit addresses. */ -#define netdrv_addr_t u64 +#define netdrv_addr_t __le64 #define cpu_to_dma(x) cpu_to_le64(x) #define dma_to_cpu(x) le64_to_cpu(x) #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit @@ -164,7 +164,7 @@ static int full_duplex[MAX_UNITS] = {0, }; #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit #define RX_DESC_ADDR_SIZE RxDescAddr64bit #else /* 32-bit dma_addr_t */ -#define netdrv_addr_t u32 +#define netdrv_addr_t __le32 #define cpu_to_dma(x) cpu_to_le32(x) #define dma_to_cpu(x) le32_to_cpu(x) #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit @@ -178,16 +178,13 @@ static int full_duplex[MAX_UNITS] = {0, }; #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) #ifdef HAVE_NETDEV_POLL -#define init_poll(dev) \ -do { \ - dev->poll = &netdev_poll; \ - dev->weight = max_interrupt_work; \ -} while (0) -#define netdev_rx(dev, ioaddr) \ +#define init_poll(dev, np) \ + netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work) +#define netdev_rx(dev, np, ioaddr) \ do { \ u32 intr_enable; \ - if (netif_rx_schedule_prep(dev)) { \ - __netif_rx_schedule(dev); \ + if (netif_rx_schedule_prep(dev, &np->napi)) { \ + __netif_rx_schedule(dev, &np->napi); \ intr_enable = readl(ioaddr + IntrEnable); \ intr_enable &= ~(IntrRxDone | IntrRxEmpty); \ writel(intr_enable, ioaddr + IntrEnable); \ @@ -204,12 +201,12 @@ do { \ } while (0) #define netdev_receive_skb(skb) netif_receive_skb(skb) #define vlan_netdev_receive_skb(skb, vlgrp, vlid) vlan_hwaccel_receive_skb(skb, vlgrp, vlid) -static int netdev_poll(struct net_device *dev, int *budget); +static int netdev_poll(struct napi_struct *napi, int budget); #else /* not HAVE_NETDEV_POLL */ -#define init_poll(dev) +#define init_poll(dev, np) #define netdev_receive_skb(skb) netif_rx(skb) #define vlan_netdev_receive_skb(skb, vlgrp, vlid) vlan_hwaccel_rx(skb, vlgrp, vlid) -#define netdev_rx(dev, ioaddr) \ +#define netdev_rx(dev, np, ioaddr) \ do { \ int quota = np->dirty_rx + RX_RING_SIZE - np->cur_rx; \ __netdev_rx(dev, "a);\ @@ -497,7 +494,7 @@ enum intr_ctrl_bits { /* The Rx and Tx buffer descriptors. */ struct starfire_rx_desc { - dma_addr_t rxaddr; + netdrv_addr_t rxaddr; }; enum rx_desc_bits { RxDescValid=1, RxDescEndRing=2, @@ -505,25 +502,25 @@ enum rx_desc_bits { /* Completion queue entry. */ struct short_rx_done_desc { - u32 status; /* Low 16 bits is length. */ + __le32 status; /* Low 16 bits is length. */ }; struct basic_rx_done_desc { - u32 status; /* Low 16 bits is length. */ - u16 vlanid; - u16 status2; + __le32 status; /* Low 16 bits is length. */ + __le16 vlanid; + __le16 status2; }; struct csum_rx_done_desc { - u32 status; /* Low 16 bits is length. */ - u16 csum; /* Partial checksum */ - u16 status2; + __le32 status; /* Low 16 bits is length. */ + __le16 csum; /* Partial checksum */ + __le16 status2; }; struct full_rx_done_desc { - u32 status; /* Low 16 bits is length. */ - u16 status3; - u16 status2; - u16 vlanid; - u16 csum; /* partial checksum */ - u32 timestamp; + __le32 status; /* Low 16 bits is length. */ + __le16 status3; + __le16 status2; + __le16 vlanid; + __le16 csum; /* partial checksum */ + __le32 timestamp; }; /* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */ #ifdef VLAN_SUPPORT @@ -540,15 +537,15 @@ enum rx_done_bits { /* Type 1 Tx descriptor. */ struct starfire_tx_desc_1 { - u32 status; /* Upper bits are status, lower 16 length. */ - u32 addr; + __le32 status; /* Upper bits are status, lower 16 length. */ + __le32 addr; }; /* Type 2 Tx descriptor. */ struct starfire_tx_desc_2 { - u32 status; /* Upper bits are status, lower 16 length. */ - u32 reserved; - u64 addr; + __le32 status; /* Upper bits are status, lower 16 length. */ + __le32 reserved; + __le64 addr; }; #ifdef ADDR_64BITS @@ -566,9 +563,9 @@ enum tx_desc_bits { TxRingWrap=0x04000000, TxCalTCP=0x02000000, }; struct tx_done_desc { - u32 status; /* timestamp, index. */ + __le32 status; /* timestamp, index. */ #if 0 - u32 intrstatus; /* interrupt status */ + __le32 intrstatus; /* interrupt status */ #endif }; @@ -599,6 +596,8 @@ struct netdev_private { struct tx_done_desc *tx_done_q; dma_addr_t tx_done_q_dma; unsigned int tx_done; + struct napi_struct napi; + struct net_device *dev; struct net_device_stats stats; struct pci_dev *pci_dev; #ifdef VLAN_SUPPORT @@ -695,6 +694,7 @@ static int __devinit starfire_init_one(struct pci_dev *pdev, void __iomem *base; int drv_flags, io_size; int boguscnt; + DECLARE_MAC_BUF(mac); /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -720,7 +720,6 @@ static int __devinit starfire_init_one(struct pci_dev *pdev, printk(KERN_ERR DRV_NAME " %d: cannot alloc etherdev, aborting\n", card_idx); return -ENOMEM; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); irq = pdev->irq; @@ -791,6 +790,7 @@ static int __devinit starfire_init_one(struct pci_dev *pdev, dev->irq = irq; np = netdev_priv(dev); + np->dev = dev; np->base = base; spin_lock_init(&np->lock); pci_set_drvdata(pdev, dev); @@ -851,7 +851,7 @@ static int __devinit starfire_init_one(struct pci_dev *pdev, dev->hard_start_xmit = &start_tx; dev->tx_timeout = tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; - init_poll(dev); + init_poll(dev, np); dev->stop = &netdev_close; dev->get_stats = &get_stats; dev->set_multicast_list = &set_rx_mode; @@ -864,11 +864,9 @@ static int __devinit starfire_init_one(struct pci_dev *pdev, if (register_netdev(dev)) goto err_out_cleardev; - printk(KERN_INFO "%s: %s at %p, ", - dev->name, netdrv_tbl[chip_idx].name, base); - for (i = 0; i < 5; i++) - printk("%2.2x:", dev->dev_addr[i]); - printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq); + printk(KERN_INFO "%s: %s at %p, %s, IRQ %d.\n", + dev->name, netdrv_tbl[chip_idx].name, base, + print_mac(mac, dev->dev_addr), irq); if (drv_flags & CanHaveMII) { int phy, phy_idx = 0; @@ -965,7 +963,7 @@ static int netdev_open(struct net_device *dev) dev->name, dev->irq); /* Allocate the various queues. */ - if (np->queue_mem == 0) { + if (!np->queue_mem) { tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN; rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN; tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN; @@ -1038,11 +1036,11 @@ static int netdev_open(struct net_device *dev) writew(0, ioaddr + PerfFilterTable + 4); writew(0, ioaddr + PerfFilterTable + 8); for (i = 1; i < 16; i++) { - u16 *eaddrs = (u16 *)dev->dev_addr; + __be16 *eaddrs = (__be16 *)dev->dev_addr; void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16; - writew(cpu_to_be16(eaddrs[2]), setup_frm); setup_frm += 4; - writew(cpu_to_be16(eaddrs[1]), setup_frm); setup_frm += 4; - writew(cpu_to_be16(eaddrs[0]), setup_frm); setup_frm += 8; + writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4; + writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4; + writew(be16_to_cpu(eaddrs[0]), setup_frm); setup_frm += 8; } /* Initialize other registers. */ @@ -1056,6 +1054,9 @@ static int netdev_open(struct net_device *dev) writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl); +#ifdef HAVE_NETDEV_POLL + napi_enable(&np->napi); +#endif netif_start_queue(dev); if (debug > 1) @@ -1330,7 +1331,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) handled = 1; if (intr_status & (IntrRxDone | IntrRxEmpty)) - netdev_rx(dev, ioaddr); + netdev_rx(dev, np, ioaddr); /* Scavenge the skbuff list based on the Tx-done queue. There are redundant checks here that may be cleaned up @@ -1470,13 +1471,16 @@ static int __netdev_rx(struct net_device *dev, int *quota) } #ifndef final_version /* Remove after testing. */ /* You will want this info for the initial debug. */ - if (debug > 5) - printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:" - "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x.\n", - skb->data[0], skb->data[1], skb->data[2], skb->data[3], - skb->data[4], skb->data[5], skb->data[6], skb->data[7], - skb->data[8], skb->data[9], skb->data[10], - skb->data[11], skb->data[12], skb->data[13]); + if (debug > 5) { + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); + + printk(KERN_DEBUG " Rx data %s %s" + " %2.2x%2.2x.\n", + print_mac(mac, &skb->data[0]), + print_mac(mac2, &skb->data[6]), + skb->data[12], skb->data[13]); + } #endif skb->protocol = eth_type_trans(skb, dev); @@ -1531,36 +1535,35 @@ static int __netdev_rx(struct net_device *dev, int *quota) #ifdef HAVE_NETDEV_POLL -static int netdev_poll(struct net_device *dev, int *budget) +static int netdev_poll(struct napi_struct *napi, int budget) { + struct netdev_private *np = container_of(napi, struct netdev_private, napi); + struct net_device *dev = np->dev; u32 intr_status; - struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; - int retcode = 0, quota = dev->quota; + int quota = budget; do { writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear); - retcode = __netdev_rx(dev, "a); - *budget -= (dev->quota - quota); - dev->quota = quota; - if (retcode) + if (__netdev_rx(dev, "a)) goto out; intr_status = readl(ioaddr + IntrStatus); } while (intr_status & (IntrRxDone | IntrRxEmpty)); - netif_rx_complete(dev); + netif_rx_complete(dev, napi); intr_status = readl(ioaddr + IntrEnable); intr_status |= IntrRxDone | IntrRxEmpty; writel(intr_status, ioaddr + IntrEnable); out: if (debug > 5) - printk(KERN_DEBUG " exiting netdev_poll(): %d.\n", retcode); + printk(KERN_DEBUG " exiting netdev_poll(): %d.\n", + budget - quota); /* Restart Rx engine if stopped. */ - return retcode; + return budget - quota; } #endif /* HAVE_NETDEV_POLL */ @@ -1764,26 +1767,26 @@ static void set_rx_mode(struct net_device *dev) } else if (dev->mc_count <= 14) { /* Use the 16 element perfect filter, skip first two entries. */ void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16; - u16 *eaddrs; + __be16 *eaddrs; for (i = 2, mclist = dev->mc_list; mclist && i < dev->mc_count + 2; i++, mclist = mclist->next) { - eaddrs = (u16 *)mclist->dmi_addr; - writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 4; - writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4; - writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 8; + eaddrs = (__be16 *)mclist->dmi_addr; + writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4; + writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4; + writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8; } - eaddrs = (u16 *)dev->dev_addr; + eaddrs = (__be16 *)dev->dev_addr; while (i++ < 16) { - writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 4; - writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4; - writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 8; + writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4; + writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4; + writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8; } rx_mode |= AcceptBroadcast|PerfectFilter; } else { /* Must use a multicast hash table. */ void __iomem *filter_addr; - u16 *eaddrs; - u16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */ + __be16 *eaddrs; + __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */ memset(mc_filter, 0, sizeof(mc_filter)); for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; @@ -1791,17 +1794,17 @@ static void set_rx_mode(struct net_device *dev) /* The chip uses the upper 9 CRC bits as index into the hash table */ int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23; - __u32 *fptr = (__u32 *) &mc_filter[(bit_nr >> 4) & ~1]; + __le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1]; *fptr |= cpu_to_le32(1 << (bit_nr & 31)); } /* Clear the perfect filter list, skip first two entries. */ filter_addr = ioaddr + PerfFilterTable + 2 * 16; - eaddrs = (u16 *)dev->dev_addr; + eaddrs = (__be16 *)dev->dev_addr; for (i = 2; i < 16; i++) { - writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 4; - writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4; - writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 8; + writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4; + writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4; + writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8; } for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++) writew(mc_filter[i], filter_addr); @@ -1904,6 +1907,9 @@ static int netdev_close(struct net_device *dev) int i; netif_stop_queue(dev); +#ifdef HAVE_NETDEV_POLL + napi_disable(&np->napi); +#endif if (debug > 1) { printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n", diff --git a/drivers/net/stnic.c b/drivers/net/stnic.c index e6f90427160c..b65be5d70fec 100644 --- a/drivers/net/stnic.c +++ b/drivers/net/stnic.c @@ -112,7 +112,6 @@ static int __init stnic_probe(void) dev = alloc_ei_netdev(); if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); #ifdef CONFIG_SH_STANDARD_BIOS sh_bios_get_node_addr (stnic_eadr); diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c index b77ab6e8fd35..9b2a7f7bb258 100644 --- a/drivers/net/sun3_82586.c +++ b/drivers/net/sun3_82586.c @@ -311,7 +311,6 @@ struct net_device * __init sun3_82586_probe(int unit) sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); } - SET_MODULE_OWNER(dev); dev->irq = IE_IRQ; dev->base_addr = ioaddr; diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c index f1548c033327..f8d46134daca 100644 --- a/drivers/net/sun3lance.c +++ b/drivers/net/sun3lance.c @@ -152,7 +152,6 @@ struct lance_private { struct lance_memory *mem; int new_rx, new_tx; /* The next free ring entry */ int old_tx, old_rx; /* ring entry to be processed */ - struct net_device_stats stats; /* These two must be longs for set_bit() */ long tx_full; long lock; @@ -241,7 +240,6 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ); static irqreturn_t lance_interrupt( int irq, void *dev_id); static int lance_rx( struct net_device *dev ); static int lance_close( struct net_device *dev ); -static struct net_device_stats *lance_get_stats( struct net_device *dev ); static void set_multicast_list( struct net_device *dev ); /************************* End of Prototypes **************************/ @@ -274,7 +272,6 @@ struct net_device * __init sun3lance_probe(int unit) sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); } - SET_MODULE_OWNER(dev); if (!lance_probe(dev)) goto out; @@ -303,6 +300,7 @@ static int __init lance_probe( struct net_device *dev) static int did_version; volatile unsigned short *ioaddr_probe; unsigned short tmp1, tmp2; + DECLARE_MAC_BUF(mac); #ifdef CONFIG_SUN3 ioaddr = (unsigned long)ioremap(LANCE_OBIO, PAGE_SIZE); @@ -378,8 +376,7 @@ static int __init lance_probe( struct net_device *dev) MEM->init.hwaddr[4] = dev->dev_addr[5]; MEM->init.hwaddr[5] = dev->dev_addr[4]; - for( i = 0; i < 6; ++i ) - printk( "%02x%s", dev->dev_addr[i], (i < 5) ? ":" : "\n" ); + printk("%s\n", print_mac(mac, dev->dev_addr)); MEM->init.mode = 0x0000; MEM->init.filter[0] = 0x00000000; @@ -402,15 +399,12 @@ static int __init lance_probe( struct net_device *dev) dev->open = &lance_open; dev->hard_start_xmit = &lance_start_xmit; dev->stop = &lance_close; - dev->get_stats = &lance_get_stats; dev->set_multicast_list = &set_multicast_list; dev->set_mac_address = NULL; // KLUDGE -- REMOVE ME set_bit(__LINK_STATE_PRESENT, &dev->state); - memset( &lp->stats, 0, sizeof(lp->stats) ); - return 1; } @@ -535,7 +529,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) * little endian mode. */ REGA(CSR3) = CSR3_BSWP; - lp->stats.tx_errors++; + dev->stats.tx_errors++; if(lance_debug >= 2) { int i; @@ -596,17 +590,12 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) /* Fill in a Tx ring entry */ #if 0 if (lance_debug >= 2) { - u_char *p; - int i; - printk( "%s: TX pkt %d type 0x%04x from ", dev->name, - lp->new_tx, ((u_short *)skb->data)[6]); - for( p = &((u_char *)skb->data)[6], i = 0; i < 6; i++ ) - printk("%02x%s", *p++, i != 5 ? ":" : "" ); - printk(" to "); - for( p = (u_char *)skb->data, i = 0; i < 6; i++ ) - printk("%02x%s", *p++, i != 5 ? ":" : "" ); - printk(" data at 0x%08x len %d\n", (int)skb->data, - (int)skb->len ); + printk( "%s: TX pkt %d type 0x%04x" + " from %s to %s" + " data at 0x%08x len %d\n", + dev->name, lp->new_tx, ((u_short *)skb->data)[6], + DEV_ADDR(&skb->data[6]), DEV_ADDR(skb->data), + (int)skb->data, (int)skb->len ); } #endif /* We're not prepared for the int until the last flags are set/reset. @@ -635,7 +624,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP; lp->new_tx = (lp->new_tx + 1) & TX_RING_MOD_MASK; - lp->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += skb->len; /* Trigger an immediate send poll. */ REGA(CSR0) = CSR0_INEA | CSR0_TDMD | CSR0_STRT; @@ -713,12 +702,12 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id) if (head->flag & TMD1_ERR) { int status = head->misc; - lp->stats.tx_errors++; - if (status & TMD3_RTRY) lp->stats.tx_aborted_errors++; - if (status & TMD3_LCAR) lp->stats.tx_carrier_errors++; - if (status & TMD3_LCOL) lp->stats.tx_window_errors++; + dev->stats.tx_errors++; + if (status & TMD3_RTRY) dev->stats.tx_aborted_errors++; + if (status & TMD3_LCAR) dev->stats.tx_carrier_errors++; + if (status & TMD3_LCOL) dev->stats.tx_window_errors++; if (status & (TMD3_UFLO | TMD3_BUFF)) { - lp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; printk("%s: Tx FIFO error\n", dev->name); REGA(CSR0) = CSR0_STOP; @@ -731,9 +720,9 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id) head->flag &= ~(TMD1_ENP | TMD1_STP); if(head->flag & (TMD1_ONE | TMD1_MORE)) - lp->stats.collisions++; + dev->stats.collisions++; - lp->stats.tx_packets++; + dev->stats.tx_packets++; DPRINTK(3, ("cleared tx ring %d\n", old_tx)); } old_tx = (old_tx +1) & TX_RING_MOD_MASK; @@ -753,8 +742,8 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id) lance_rx( dev ); /* Log misc errors. */ - if (csr0 & CSR0_BABL) lp->stats.tx_errors++; /* Tx babble. */ - if (csr0 & CSR0_MISS) lp->stats.rx_errors++; /* Missed a Rx frame. */ + if (csr0 & CSR0_BABL) dev->stats.tx_errors++; /* Tx babble. */ + if (csr0 & CSR0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */ if (csr0 & CSR0_MERR) { DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), " "status %04x.\n", dev->name, csr0 )); @@ -800,11 +789,11 @@ static int lance_rx( struct net_device *dev ) full-sized buffers it's possible for a jabber packet to use two buffers, with only the last correctly noting the error. */ if (status & RMD1_ENP) /* Only count a general error at the */ - lp->stats.rx_errors++; /* end of a packet.*/ - if (status & RMD1_FRAM) lp->stats.rx_frame_errors++; - if (status & RMD1_OFLO) lp->stats.rx_over_errors++; - if (status & RMD1_CRC) lp->stats.rx_crc_errors++; - if (status & RMD1_BUFF) lp->stats.rx_fifo_errors++; + dev->stats.rx_errors++; /* end of a packet.*/ + if (status & RMD1_FRAM) dev->stats.rx_frame_errors++; + if (status & RMD1_OFLO) dev->stats.rx_over_errors++; + if (status & RMD1_CRC) dev->stats.rx_crc_errors++; + if (status & RMD1_BUFF) dev->stats.rx_fifo_errors++; head->flag &= (RMD1_ENP|RMD1_STP); } else { /* Malloc up new buffer, compatible with net-3. */ @@ -814,7 +803,7 @@ static int lance_rx( struct net_device *dev ) if (pkt_len < 60) { printk( "%s: Runt packet!\n", dev->name ); - lp->stats.rx_errors++; + dev->stats.rx_errors++; } else { skb = dev_alloc_skb( pkt_len+2 ); @@ -822,7 +811,7 @@ static int lance_rx( struct net_device *dev ) DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n", dev->name )); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; head->msg_length = 0; head->flag |= RMD1_OWN_CHIP; lp->new_rx = (lp->new_rx+1) & @@ -831,13 +820,14 @@ static int lance_rx( struct net_device *dev ) #if 0 if (lance_debug >= 3) { - u_char *data = PKTBUF_ADDR(head), *p; - printk( "%s: RX pkt %d type 0x%04x from ", dev->name, entry, ((u_short *)data)[6]); - for( p = &data[6], i = 0; i < 6; i++ ) - printk("%02x%s", *p++, i != 5 ? ":" : "" ); - printk(" to "); - for( p = data, i = 0; i < 6; i++ ) - printk("%02x%s", *p++, i != 5 ? ":" : "" ); + u_char *data = PKTBUF_ADDR(head); + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2) + printk("%s: RX pkt %d type 0x%04x" + " from %s to %s", + dev->name, lp->new_tx, ((u_short *)data)[6], + print_mac(mac, &data[6]), print_mac(mac2, data)); + printk(" data %02x %02x %02x %02x %02x %02x %02x %02x " "len %d at %08x\n", data[15], data[16], data[17], data[18], @@ -860,8 +850,8 @@ static int lance_rx( struct net_device *dev ) skb->protocol = eth_type_trans( skb, dev ); netif_rx( skb ); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } } @@ -898,14 +888,6 @@ static int lance_close( struct net_device *dev ) } -static struct net_device_stats *lance_get_stats( struct net_device *dev ) -{ - struct lance_private *lp = netdev_priv(dev); - - return &lp->stats; -} - - /* Set or clear the multicast filter for this adaptor. num_addrs == -1 Promiscuous mode, receive all packets num_addrs == 0 Normal mode, clear multicast list diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c index b3e0158def4f..fe3ac6f9ae89 100644 --- a/drivers/net/sunbmac.c +++ b/drivers/net/sunbmac.c @@ -1082,12 +1082,12 @@ static int __init bigmac_ether_init(struct sbus_dev *qec_sdev) struct bigmac *bp; u8 bsizes, bsizes_more; int i; + DECLARE_MAC_BUF(mac); /* Get a new device struct for this interface. */ dev = alloc_etherdev(sizeof(struct bigmac)); if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); if (version_printed++ == 0) printk(KERN_INFO "%s", version); @@ -1227,11 +1227,8 @@ static int __init bigmac_ether_init(struct sbus_dev *qec_sdev) dev_set_drvdata(&bp->bigmac_sdev->ofdev.dev, bp); - printk(KERN_INFO "%s: BigMAC 100baseT Ethernet ", dev->name); - for (i = 0; i < 6; i++) - printk("%2.2x%c", dev->dev_addr[i], - i == 5 ? ' ' : ':'); - printk("\n"); + printk(KERN_INFO "%s: BigMAC 100baseT Ethernet %s\n", + dev->name, print_mac(mac, dev->dev_addr)); return 0; diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c index af0c9831074c..ff98f5d597f1 100644 --- a/drivers/net/sundance.c +++ b/drivers/net/sundance.c @@ -466,8 +466,8 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev, #else int bar = 1; #endif - int phy, phy_idx = 0; - + int phy, phy_end, phy_idx = 0; + DECLARE_MAC_BUF(mac); /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -485,7 +485,6 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev, dev = alloc_etherdev(sizeof(*np)); if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); if (pci_request_regions(pdev, DRV_NAME)) @@ -547,19 +546,25 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev, if (i) goto err_out_unmap_rx; - printk(KERN_INFO "%s: %s at %p, ", - dev->name, pci_id_tbl[chip_idx].name, ioaddr); - for (i = 0; i < 5; i++) - printk("%2.2x:", dev->dev_addr[i]); - printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq); + printk(KERN_INFO "%s: %s at %p, %s, IRQ %d.\n", + dev->name, pci_id_tbl[chip_idx].name, ioaddr, + print_mac(mac, dev->dev_addr), irq); np->phys[0] = 1; /* Default setting */ np->mii_preamble_required++; + /* * It seems some phys doesn't deal well with address 0 being accessed - * first, so leave address zero to the end of the loop (32 & 31). + * first */ - for (phy = 1; phy <= 32 && phy_idx < MII_CNT; phy++) { + if (sundance_pci_tbl[np->chip_id].device == 0x0200) { + phy = 0; + phy_end = 31; + } else { + phy = 1; + phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */ + } + for (; phy <= phy_end && phy_idx < MII_CNT; phy++) { int phyx = phy & 0x1f; int mii_status = mdio_read(dev, phyx, MII_BMSR); if (mii_status != 0xffff && mii_status != 0x0000) { @@ -1586,7 +1591,6 @@ static const struct ethtool_ops ethtool_ops = { .get_link = get_link, .get_msglevel = get_msglevel, .set_msglevel = set_msglevel, - .get_perm_addr = ethtool_op_get_perm_addr, }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index 432803855034..53b8344a68ef 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c @@ -19,7 +19,7 @@ * * gem_change_mtu() and gem_set_multicast() are called with a read_lock() * help by net/core/dev.c, thus they can't schedule. That means they can't - * call netif_poll_disable() neither, thus force gem_poll() to keep a spinlock + * call napi_disable() neither, thus force gem_poll() to keep a spinlock * where it could have been dropped. change_mtu especially would love also to * be able to msleep instead of horrid locked delays when resetting the HW, * but that read_lock() makes it impossible, unless I defer it's action to @@ -878,19 +878,20 @@ static int gem_rx(struct gem *gp, int work_to_do) return work_done; } -static int gem_poll(struct net_device *dev, int *budget) +static int gem_poll(struct napi_struct *napi, int budget) { - struct gem *gp = dev->priv; + struct gem *gp = container_of(napi, struct gem, napi); + struct net_device *dev = gp->dev; unsigned long flags; + int work_done; /* * NAPI locking nightmare: See comment at head of driver */ spin_lock_irqsave(&gp->lock, flags); + work_done = 0; do { - int work_to_do, work_done; - /* Handle anomalies */ if (gp->status & GREG_STAT_ABNORMAL) { if (gem_abnormal_irq(dev, gp, gp->status)) @@ -906,29 +907,25 @@ static int gem_poll(struct net_device *dev, int *budget) /* Run RX thread. We don't use any locking here, * code willing to do bad things - like cleaning the - * rx ring - must call netif_poll_disable(), which + * rx ring - must call napi_disable(), which * schedule_timeout()'s if polling is already disabled. */ - work_to_do = min(*budget, dev->quota); - - work_done = gem_rx(gp, work_to_do); - - *budget -= work_done; - dev->quota -= work_done; + work_done += gem_rx(gp, budget); - if (work_done >= work_to_do) - return 1; + if (work_done >= budget) + return work_done; spin_lock_irqsave(&gp->lock, flags); gp->status = readl(gp->regs + GREG_STAT); } while (gp->status & GREG_STAT_NAPI); - __netif_rx_complete(dev); + __netif_rx_complete(dev, napi); gem_enable_ints(gp); spin_unlock_irqrestore(&gp->lock, flags); - return 0; + + return work_done; } static irqreturn_t gem_interrupt(int irq, void *dev_id) @@ -946,17 +943,17 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id) spin_lock_irqsave(&gp->lock, flags); - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &gp->napi)) { u32 gem_status = readl(gp->regs + GREG_STAT); if (gem_status == 0) { - netif_poll_enable(dev); + napi_enable(&gp->napi); spin_unlock_irqrestore(&gp->lock, flags); return IRQ_NONE; } gp->status = gem_status; gem_disable_ints(gp); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &gp->napi); } spin_unlock_irqrestore(&gp->lock, flags); @@ -2284,7 +2281,7 @@ static void gem_reset_task(struct work_struct *work) mutex_lock(&gp->pm_mutex); - netif_poll_disable(gp->dev); + napi_disable(&gp->napi); spin_lock_irq(&gp->lock); spin_lock(&gp->tx_lock); @@ -2307,7 +2304,7 @@ static void gem_reset_task(struct work_struct *work) spin_unlock(&gp->tx_lock); spin_unlock_irq(&gp->lock); - netif_poll_enable(gp->dev); + napi_enable(&gp->napi); mutex_unlock(&gp->pm_mutex); } @@ -2324,6 +2321,8 @@ static int gem_open(struct net_device *dev) if (!gp->asleep) rc = gem_do_start(dev); gp->opened = (rc == 0); + if (gp->opened) + napi_enable(&gp->napi); mutex_unlock(&gp->pm_mutex); @@ -2334,9 +2333,7 @@ static int gem_close(struct net_device *dev) { struct gem *gp = dev->priv; - /* Note: we don't need to call netif_poll_disable() here because - * our caller (dev_close) already did it for us - */ + napi_disable(&gp->napi); mutex_lock(&gp->pm_mutex); @@ -2358,7 +2355,7 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state) mutex_lock(&gp->pm_mutex); - netif_poll_disable(dev); + napi_disable(&gp->napi); printk(KERN_INFO "%s: suspending, WakeOnLan %s\n", dev->name, @@ -2482,7 +2479,7 @@ static int gem_resume(struct pci_dev *pdev) spin_unlock(&gp->tx_lock); spin_unlock_irqrestore(&gp->lock, flags); - netif_poll_enable(dev); + napi_enable(&gp->napi); mutex_unlock(&gp->pm_mutex); @@ -2968,7 +2965,8 @@ static int __devinit gem_init_one(struct pci_dev *pdev, unsigned long gemreg_base, gemreg_len; struct net_device *dev; struct gem *gp; - int i, err, pci_using_dac; + int err, pci_using_dac; + DECLARE_MAC_BUF(mac); if (gem_version_printed++ == 0) printk(KERN_INFO "%s", version); @@ -3026,7 +3024,6 @@ static int __devinit gem_init_one(struct pci_dev *pdev, err = -ENOMEM; goto err_disable_device; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); gp = dev->priv; @@ -3121,8 +3118,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev, dev->get_stats = gem_get_stats; dev->set_multicast_list = gem_set_multicast; dev->do_ioctl = gem_ioctl; - dev->poll = gem_poll; - dev->weight = 64; + netif_napi_add(dev, &gp->napi, gem_poll, 64); dev->ethtool_ops = &gem_ethtool_ops; dev->tx_timeout = gem_tx_timeout; dev->watchdog_timeo = 5 * HZ; @@ -3154,12 +3150,9 @@ static int __devinit gem_init_one(struct pci_dev *pdev, goto err_out_free_consistent; } - printk(KERN_INFO "%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet ", - dev->name); - for (i = 0; i < 6; i++) - printk("%2.2x%c", dev->dev_addr[i], - i == 5 ? ' ' : ':'); - printk("\n"); + printk(KERN_INFO "%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet " + "%s\n", + dev->name, print_mac(mac, dev->dev_addr)); if (gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h index 58cf87c5751e..76d760acc9e2 100644 --- a/drivers/net/sungem.h +++ b/drivers/net/sungem.h @@ -993,6 +993,7 @@ struct gem { u32 msg_enable; u32 status; + struct napi_struct napi; struct net_device_stats net_stats; int tx_fifo_sz; diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c index 8b35f13318ea..120c8affe83d 100644 --- a/drivers/net/sunhme.c +++ b/drivers/net/sunhme.c @@ -2664,6 +2664,7 @@ static int __devinit happy_meal_sbus_probe_one(struct sbus_dev *sdev, int is_qfe struct net_device *dev; int i, qfe_slot = -1; int err = -ENODEV; + DECLARE_MAC_BUF(mac); if (is_qfe) { qp = quattro_sbus_find(sdev); @@ -2680,7 +2681,6 @@ static int __devinit happy_meal_sbus_probe_one(struct sbus_dev *sdev, int is_qfe dev = alloc_etherdev(sizeof(struct happy_meal)); if (!dev) goto err_out; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &sdev->ofdev.dev); if (hme_version_printed++ == 0) @@ -2851,10 +2851,7 @@ static int __devinit happy_meal_sbus_probe_one(struct sbus_dev *sdev, int is_qfe printk(KERN_INFO "%s: HAPPY MEAL (SBUS) 10/100baseT Ethernet ", dev->name); - for (i = 0; i < 6; i++) - printk("%2.2x%c", - dev->dev_addr[i], i == 5 ? ' ' : ':'); - printk("\n"); + printk("%s\n", print_mac(mac, dev->dev_addr)); return 0; @@ -2989,6 +2986,7 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev, int i, qfe_slot = -1; char prom_name[64]; int err; + DECLARE_MAC_BUF(mac); /* Now make sure pci_dev cookie is there. */ #ifdef CONFIG_SPARC @@ -3022,7 +3020,6 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev, err = -ENOMEM; if (!dev) goto err_out; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); if (hme_version_printed++ == 0) @@ -3203,10 +3200,7 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev, printk(KERN_INFO "%s: HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet ", dev->name); - for (i = 0; i < 6; i++) - printk("%2.2x%c", dev->dev_addr[i], i == 5 ? ' ' : ':'); - - printk("\n"); + printk("%s\n", print_mac(mac, dev->dev_addr)); return 0; diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c index 053b7cb0d944..26ade68aeabf 100644 --- a/drivers/net/sunlance.c +++ b/drivers/net/sunlance.c @@ -99,8 +99,7 @@ static char lancestr[] = "LANCE"; #include <asm/byteorder.h> /* Used by the checksum routines */ #include <asm/idprom.h> #include <asm/sbus.h> -#include <asm/openprom.h> -#include <asm/oplib.h> +#include <asm/prom.h> #include <asm/auxio.h> /* For tpe-link-test? setting */ #include <asm/irq.h> @@ -249,7 +248,6 @@ struct lance_private { int rx_new, tx_new; int rx_old, tx_old; - struct net_device_stats stats; struct sbus_dma *ledma; /* If set this points to ledma */ char tpe; /* cable-selection is TPE */ char auto_select; /* cable-selection by carrier */ @@ -520,17 +518,17 @@ static void lance_rx_dvma(struct net_device *dev) /* We got an incomplete frame? */ if ((bits & LE_R1_POK) != LE_R1_POK) { - lp->stats.rx_over_errors++; - lp->stats.rx_errors++; + dev->stats.rx_over_errors++; + dev->stats.rx_errors++; } else if (bits & LE_R1_ERR) { /* Count only the end frame as a rx error, * not the beginning */ - if (bits & LE_R1_BUF) lp->stats.rx_fifo_errors++; - if (bits & LE_R1_CRC) lp->stats.rx_crc_errors++; - if (bits & LE_R1_OFL) lp->stats.rx_over_errors++; - if (bits & LE_R1_FRA) lp->stats.rx_frame_errors++; - if (bits & LE_R1_EOP) lp->stats.rx_errors++; + if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; + if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; + if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; + if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; + if (bits & LE_R1_EOP) dev->stats.rx_errors++; } else { len = (rd->mblength & 0xfff) - 4; skb = dev_alloc_skb(len + 2); @@ -538,14 +536,14 @@ static void lance_rx_dvma(struct net_device *dev) if (skb == NULL) { printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; rd->mblength = 0; rd->rmd1_bits = LE_R1_OWN; lp->rx_new = RX_NEXT(entry); return; } - lp->stats.rx_bytes += len; + dev->stats.rx_bytes += len; skb_reserve(skb, 2); /* 16 byte align */ skb_put(skb, len); /* make room */ @@ -555,7 +553,7 @@ static void lance_rx_dvma(struct net_device *dev) skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; + dev->stats.rx_packets++; } /* Return the packet to the pool */ @@ -587,12 +585,12 @@ static void lance_tx_dvma(struct net_device *dev) if (bits & LE_T1_ERR) { u16 status = td->misc; - lp->stats.tx_errors++; - if (status & LE_T3_RTY) lp->stats.tx_aborted_errors++; - if (status & LE_T3_LCOL) lp->stats.tx_window_errors++; + dev->stats.tx_errors++; + if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; + if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; if (status & LE_T3_CLOS) { - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (lp->auto_select) { lp->tpe = 1 - lp->tpe; printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n", @@ -609,7 +607,7 @@ static void lance_tx_dvma(struct net_device *dev) * transmitter, restart the adapter. */ if (status & (LE_T3_BUF|LE_T3_UFL)) { - lp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n", dev->name); @@ -627,13 +625,13 @@ static void lance_tx_dvma(struct net_device *dev) /* One collision before packet was sent. */ if (bits & LE_T1_EONE) - lp->stats.collisions++; + dev->stats.collisions++; /* More than one collision, be optimistic. */ if (bits & LE_T1_EMORE) - lp->stats.collisions += 2; + dev->stats.collisions += 2; - lp->stats.tx_packets++; + dev->stats.tx_packets++; } j = TX_NEXT(j); @@ -693,17 +691,17 @@ static void lance_rx_pio(struct net_device *dev) /* We got an incomplete frame? */ if ((bits & LE_R1_POK) != LE_R1_POK) { - lp->stats.rx_over_errors++; - lp->stats.rx_errors++; + dev->stats.rx_over_errors++; + dev->stats.rx_errors++; } else if (bits & LE_R1_ERR) { /* Count only the end frame as a rx error, * not the beginning */ - if (bits & LE_R1_BUF) lp->stats.rx_fifo_errors++; - if (bits & LE_R1_CRC) lp->stats.rx_crc_errors++; - if (bits & LE_R1_OFL) lp->stats.rx_over_errors++; - if (bits & LE_R1_FRA) lp->stats.rx_frame_errors++; - if (bits & LE_R1_EOP) lp->stats.rx_errors++; + if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; + if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; + if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; + if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; + if (bits & LE_R1_EOP) dev->stats.rx_errors++; } else { len = (sbus_readw(&rd->mblength) & 0xfff) - 4; skb = dev_alloc_skb(len + 2); @@ -711,14 +709,14 @@ static void lance_rx_pio(struct net_device *dev) if (skb == NULL) { printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; sbus_writew(0, &rd->mblength); sbus_writeb(LE_R1_OWN, &rd->rmd1_bits); lp->rx_new = RX_NEXT(entry); return; } - lp->stats.rx_bytes += len; + dev->stats.rx_bytes += len; skb_reserve (skb, 2); /* 16 byte align */ skb_put(skb, len); /* make room */ @@ -726,7 +724,7 @@ static void lance_rx_pio(struct net_device *dev) skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; + dev->stats.rx_packets++; } /* Return the packet to the pool */ @@ -758,12 +756,12 @@ static void lance_tx_pio(struct net_device *dev) if (bits & LE_T1_ERR) { u16 status = sbus_readw(&td->misc); - lp->stats.tx_errors++; - if (status & LE_T3_RTY) lp->stats.tx_aborted_errors++; - if (status & LE_T3_LCOL) lp->stats.tx_window_errors++; + dev->stats.tx_errors++; + if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; + if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; if (status & LE_T3_CLOS) { - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (lp->auto_select) { lp->tpe = 1 - lp->tpe; printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n", @@ -780,7 +778,7 @@ static void lance_tx_pio(struct net_device *dev) * transmitter, restart the adapter. */ if (status & (LE_T3_BUF|LE_T3_UFL)) { - lp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n", dev->name); @@ -798,13 +796,13 @@ static void lance_tx_pio(struct net_device *dev) /* One collision before packet was sent. */ if (bits & LE_T1_EONE) - lp->stats.collisions++; + dev->stats.collisions++; /* More than one collision, be optimistic. */ if (bits & LE_T1_EMORE) - lp->stats.collisions += 2; + dev->stats.collisions += 2; - lp->stats.tx_packets++; + dev->stats.tx_packets++; } j = TX_NEXT(j); @@ -845,10 +843,10 @@ static irqreturn_t lance_interrupt(int irq, void *dev_id) lp->tx(dev); if (csr0 & LE_C0_BABL) - lp->stats.tx_errors++; + dev->stats.tx_errors++; if (csr0 & LE_C0_MISS) - lp->stats.rx_errors++; + dev->stats.rx_errors++; if (csr0 & LE_C0_MERR) { if (lp->dregs) { @@ -1128,7 +1126,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) spin_lock_irq(&lp->lock); - lp->stats.tx_bytes += len; + dev->stats.tx_bytes += len; entry = lp->tx_new & TX_RING_MOD_MASK; if (lp->pio_buffer) { @@ -1171,13 +1169,6 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } -static struct net_device_stats *lance_get_stats(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - - return &lp->stats; -} - /* taken from the depca driver */ static void lance_load_multicast(struct net_device *dev) { @@ -1326,16 +1317,17 @@ static int __devinit sparc_lance_probe_one(struct sbus_dev *sdev, struct sbus_dev *lebuffer) { static unsigned version_printed; + struct device_node *dp = sdev->ofdev.node; struct net_device *dev; struct lance_private *lp; int i; + DECLARE_MAC_BUF(mac); dev = alloc_etherdev(sizeof(struct lance_private) + 8); if (!dev) return -ENOMEM; lp = netdev_priv(dev); - memset(lp, 0, sizeof(*lp)); if (sparc_lance_debug && version_printed++ == 0) printk (KERN_INFO "%s", version); @@ -1389,54 +1381,46 @@ static int __devinit sparc_lance_probe_one(struct sbus_dev *sdev, lp->rx = lance_rx_dvma; lp->tx = lance_tx_dvma; } - lp->busmaster_regval = prom_getintdefault(sdev->prom_node, - "busmaster-regval", - (LE_C3_BSWP | LE_C3_ACON | - LE_C3_BCON)); + lp->busmaster_regval = of_getintprop_default(dp, "busmaster-regval", + (LE_C3_BSWP | + LE_C3_ACON | + LE_C3_BCON)); lp->name = lancestr; lp->ledma = ledma; lp->burst_sizes = 0; if (lp->ledma) { - char prop[6]; + struct device_node *ledma_dp = ledma->sdev->ofdev.node; + const char *prop; unsigned int sbmask; u32 csr; /* Find burst-size property for ledma */ - lp->burst_sizes = prom_getintdefault(ledma->sdev->prom_node, - "burst-sizes", 0); + lp->burst_sizes = of_getintprop_default(ledma_dp, + "burst-sizes", 0); /* ledma may be capable of fast bursts, but sbus may not. */ - sbmask = prom_getintdefault(ledma->sdev->bus->prom_node, - "burst-sizes", DMA_BURSTBITS); + sbmask = of_getintprop_default(ledma_dp, "burst-sizes", + DMA_BURSTBITS); lp->burst_sizes &= sbmask; /* Get the cable-selection property */ - memset(prop, 0, sizeof(prop)); - prom_getstring(ledma->sdev->prom_node, "cable-selection", - prop, sizeof(prop)); - if (prop[0] == 0) { - int topnd, nd; + prop = of_get_property(ledma_dp, "cable-selection", NULL); + if (!prop || prop[0] == '\0') { + struct device_node *nd; - printk(KERN_INFO "SunLance: using auto-carrier-detection.\n"); + printk(KERN_INFO "SunLance: using " + "auto-carrier-detection.\n"); - /* Is this found at /options .attributes in all - * Prom versions? XXX - */ - topnd = prom_getchild(prom_root_node); - - nd = prom_searchsiblings(topnd, "options"); + nd = of_find_node_by_path("/options"); if (!nd) goto no_link_test; - if (!prom_node_has_property(nd, "tpe-link-test?")) + prop = of_get_property(nd, "tpe-link-test?", NULL); + if (!prop) goto no_link_test; - memset(prop, 0, sizeof(prop)); - prom_getstring(nd, "tpe-link-test?", prop, - sizeof(prop)); - if (strcmp(prop, "true")) { printk(KERN_NOTICE "SunLance: warning: overriding option " "'tpe-link-test?'\n"); @@ -1466,14 +1450,12 @@ no_link_test: lp->dregs = NULL; lp->dev = dev; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &sdev->ofdev.dev); dev->open = &lance_open; dev->stop = &lance_close; dev->hard_start_xmit = &lance_start_xmit; dev->tx_timeout = &lance_tx_timeout; dev->watchdog_timeo = 5*HZ; - dev->get_stats = &lance_get_stats; dev->set_multicast_list = &lance_set_multicast; dev->ethtool_ops = &sparc_lance_ethtool_ops; @@ -1497,12 +1479,8 @@ no_link_test: dev_set_drvdata(&sdev->ofdev.dev, lp); - printk(KERN_INFO "%s: LANCE ", dev->name); - - for (i = 0; i < 6; i++) - printk("%2.2x%c", dev->dev_addr[i], - i == 5 ? ' ': ':'); - printk("\n"); + printk(KERN_INFO "%s: LANCE %s\n", + dev->name, print_mac(mac, dev->dev_addr)); return 0; diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c index 1b65ae8a1c7c..ff23c6489efd 100644 --- a/drivers/net/sunqe.c +++ b/drivers/net/sunqe.c @@ -260,31 +260,31 @@ static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) if (qe_status & CREG_STAT_EDEFER) { printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name); - qep->net_stats.tx_errors++; + dev->stats.tx_errors++; } if (qe_status & CREG_STAT_CLOSS) { printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name); - qep->net_stats.tx_errors++; - qep->net_stats.tx_carrier_errors++; + dev->stats.tx_errors++; + dev->stats.tx_carrier_errors++; } if (qe_status & CREG_STAT_ERETRIES) { printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name); - qep->net_stats.tx_errors++; + dev->stats.tx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_LCOLL) { printk(KERN_ERR "%s: Late transmit collision.\n", dev->name); - qep->net_stats.tx_errors++; - qep->net_stats.collisions++; + dev->stats.tx_errors++; + dev->stats.collisions++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_FUFLOW) { printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name); - qep->net_stats.tx_errors++; + dev->stats.tx_errors++; mace_hwbug_workaround = 1; } @@ -297,104 +297,104 @@ static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) } if (qe_status & CREG_STAT_CCOFLOW) { - qep->net_stats.tx_errors += 256; - qep->net_stats.collisions += 256; + dev->stats.tx_errors += 256; + dev->stats.collisions += 256; } if (qe_status & CREG_STAT_TXDERROR) { printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name); - qep->net_stats.tx_errors++; - qep->net_stats.tx_aborted_errors++; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_TXLERR) { printk(KERN_ERR "%s: Transmit late error.\n", dev->name); - qep->net_stats.tx_errors++; + dev->stats.tx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_TXPERR) { printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name); - qep->net_stats.tx_errors++; - qep->net_stats.tx_aborted_errors++; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_TXSERR) { printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name); - qep->net_stats.tx_errors++; - qep->net_stats.tx_aborted_errors++; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_RCCOFLOW) { - qep->net_stats.rx_errors += 256; - qep->net_stats.collisions += 256; + dev->stats.rx_errors += 256; + dev->stats.collisions += 256; } if (qe_status & CREG_STAT_RUOFLOW) { - qep->net_stats.rx_errors += 256; - qep->net_stats.rx_over_errors += 256; + dev->stats.rx_errors += 256; + dev->stats.rx_over_errors += 256; } if (qe_status & CREG_STAT_MCOFLOW) { - qep->net_stats.rx_errors += 256; - qep->net_stats.rx_missed_errors += 256; + dev->stats.rx_errors += 256; + dev->stats.rx_missed_errors += 256; } if (qe_status & CREG_STAT_RXFOFLOW) { printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name); - qep->net_stats.rx_errors++; - qep->net_stats.rx_over_errors++; + dev->stats.rx_errors++; + dev->stats.rx_over_errors++; } if (qe_status & CREG_STAT_RLCOLL) { printk(KERN_ERR "%s: Late receive collision.\n", dev->name); - qep->net_stats.rx_errors++; - qep->net_stats.collisions++; + dev->stats.rx_errors++; + dev->stats.collisions++; } if (qe_status & CREG_STAT_FCOFLOW) { - qep->net_stats.rx_errors += 256; - qep->net_stats.rx_frame_errors += 256; + dev->stats.rx_errors += 256; + dev->stats.rx_frame_errors += 256; } if (qe_status & CREG_STAT_CECOFLOW) { - qep->net_stats.rx_errors += 256; - qep->net_stats.rx_crc_errors += 256; + dev->stats.rx_errors += 256; + dev->stats.rx_crc_errors += 256; } if (qe_status & CREG_STAT_RXDROP) { printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name); - qep->net_stats.rx_errors++; - qep->net_stats.rx_dropped++; - qep->net_stats.rx_missed_errors++; + dev->stats.rx_errors++; + dev->stats.rx_dropped++; + dev->stats.rx_missed_errors++; } if (qe_status & CREG_STAT_RXSMALL) { printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name); - qep->net_stats.rx_errors++; - qep->net_stats.rx_length_errors++; + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; } if (qe_status & CREG_STAT_RXLERR) { printk(KERN_ERR "%s: Receive late error.\n", dev->name); - qep->net_stats.rx_errors++; + dev->stats.rx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_RXPERR) { printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name); - qep->net_stats.rx_errors++; - qep->net_stats.rx_missed_errors++; + dev->stats.rx_errors++; + dev->stats.rx_missed_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_RXSERR) { printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name); - qep->net_stats.rx_errors++; - qep->net_stats.rx_missed_errors++; + dev->stats.rx_errors++; + dev->stats.rx_missed_errors++; mace_hwbug_workaround = 1; } @@ -409,6 +409,7 @@ static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) static void qe_rx(struct sunqe *qep) { struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0]; + struct net_device *dev = qep->dev; struct qe_rxd *this; struct sunqe_buffers *qbufs = qep->buffers; __u32 qbufs_dvma = qep->buffers_dvma; @@ -428,14 +429,14 @@ static void qe_rx(struct sunqe *qep) /* Check for errors. */ if (len < ETH_ZLEN) { - qep->net_stats.rx_errors++; - qep->net_stats.rx_length_errors++; - qep->net_stats.rx_dropped++; + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; + dev->stats.rx_dropped++; } else { skb = dev_alloc_skb(len + 2); if (skb == NULL) { drops++; - qep->net_stats.rx_dropped++; + dev->stats.rx_dropped++; } else { skb_reserve(skb, 2); skb_put(skb, len); @@ -444,8 +445,8 @@ static void qe_rx(struct sunqe *qep) skb->protocol = eth_type_trans(skb, qep->dev); netif_rx(skb); qep->dev->last_rx = jiffies; - qep->net_stats.rx_packets++; - qep->net_stats.rx_bytes += len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; } } end_rxd->rx_addr = this_qbuf_dvma; @@ -603,8 +604,8 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) dev->trans_start = jiffies; sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL); - qep->net_stats.tx_packets++; - qep->net_stats.tx_bytes += len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += len; if (TX_BUFFS_AVAIL(qep) <= 0) { /* Halt the net queue and enable tx interrupts. @@ -622,13 +623,6 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } -static struct net_device_stats *qe_get_stats(struct net_device *dev) -{ - struct sunqe *qep = (struct sunqe *) dev->priv; - - return &qep->net_stats; -} - static void qe_set_multicast(struct net_device *dev) { struct sunqe *qep = (struct sunqe *) dev->priv; @@ -898,13 +892,11 @@ static int __init qec_ether_init(struct sbus_dev *sdev) /* Stop this QE. */ qe_stop(qe); - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &sdev->ofdev.dev); dev->open = qe_open; dev->stop = qe_close; dev->hard_start_xmit = qe_start_xmit; - dev->get_stats = qe_get_stats; dev->set_multicast_list = qe_set_multicast; dev->tx_timeout = qe_tx_timeout; dev->watchdog_timeo = 5*HZ; diff --git a/drivers/net/sunqe.h b/drivers/net/sunqe.h index af34f36111ed..347c8ddc1592 100644 --- a/drivers/net/sunqe.h +++ b/drivers/net/sunqe.h @@ -342,7 +342,6 @@ struct sunqe { __u32 buffers_dvma; /* DVMA visible address. */ struct sunqec *parent; u8 mconfig; /* Base MACE mconfig value */ - struct net_device_stats net_stats; /* Statistical counters */ struct sbus_dev *qe_sdev; /* QE's SBUS device struct */ struct net_device *dev; /* QE's netdevice struct */ int channel; /* Who am I? */ diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c index 8a667c13faef..ff1028a597df 100644 --- a/drivers/net/sunvnet.c +++ b/drivers/net/sunvnet.c @@ -12,6 +12,7 @@ #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/etherdevice.h> +#include <linux/mutex.h> #include <asm/vio.h> #include <asm/ldc.h> @@ -458,6 +459,22 @@ static int vnet_nack(struct vnet_port *port, void *msgbuf) return 0; } +static int handle_mcast(struct vnet_port *port, void *msgbuf) +{ + struct vio_net_mcast_info *pkt = msgbuf; + + if (pkt->tag.stype != VIO_SUBTYPE_ACK) + printk(KERN_ERR PFX "%s: Got unexpected MCAST reply " + "[%02x:%02x:%04x:%08x]\n", + port->vp->dev->name, + pkt->tag.type, + pkt->tag.stype, + pkt->tag.stype_env, + pkt->tag.sid); + + return 0; +} + static void maybe_tx_wakeup(struct vnet *vp) { struct net_device *dev = vp->dev; @@ -497,6 +514,8 @@ static void vnet_event(void *arg, int event) vio_link_state_change(vio, event); spin_unlock_irqrestore(&vio->lock, flags); + if (event == LDC_EVENT_RESET) + vio_port_up(vio); return; } @@ -541,7 +560,10 @@ static void vnet_event(void *arg, int event) err = vnet_nack(port, &msgbuf); } } else if (msgbuf.tag.type == VIO_TYPE_CTRL) { - err = vio_control_pkt_engine(vio, &msgbuf); + if (msgbuf.tag.stype_env == VNET_MCAST_INFO) + err = handle_mcast(port, &msgbuf); + else + err = vio_control_pkt_engine(vio, &msgbuf); if (err) break; } else { @@ -728,9 +750,122 @@ static int vnet_close(struct net_device *dev) return 0; } +static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr) +{ + struct vnet_mcast_entry *m; + + for (m = vp->mcast_list; m; m = m->next) { + if (!memcmp(m->addr, addr, ETH_ALEN)) + return m; + } + return NULL; +} + +static void __update_mc_list(struct vnet *vp, struct net_device *dev) +{ + struct dev_addr_list *p; + + for (p = dev->mc_list; p; p = p->next) { + struct vnet_mcast_entry *m; + + m = __vnet_mc_find(vp, p->dmi_addr); + if (m) { + m->hit = 1; + continue; + } + + if (!m) { + m = kzalloc(sizeof(*m), GFP_ATOMIC); + if (!m) + continue; + memcpy(m->addr, p->dmi_addr, ETH_ALEN); + m->hit = 1; + + m->next = vp->mcast_list; + vp->mcast_list = m; + } + } +} + +static void __send_mc_list(struct vnet *vp, struct vnet_port *port) +{ + struct vio_net_mcast_info info; + struct vnet_mcast_entry *m, **pp; + int n_addrs; + + memset(&info, 0, sizeof(info)); + + info.tag.type = VIO_TYPE_CTRL; + info.tag.stype = VIO_SUBTYPE_INFO; + info.tag.stype_env = VNET_MCAST_INFO; + info.tag.sid = vio_send_sid(&port->vio); + info.set = 1; + + n_addrs = 0; + for (m = vp->mcast_list; m; m = m->next) { + if (m->sent) + continue; + m->sent = 1; + memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], + m->addr, ETH_ALEN); + if (++n_addrs == VNET_NUM_MCAST) { + info.count = n_addrs; + + (void) vio_ldc_send(&port->vio, &info, + sizeof(info)); + n_addrs = 0; + } + } + if (n_addrs) { + info.count = n_addrs; + (void) vio_ldc_send(&port->vio, &info, sizeof(info)); + } + + info.set = 0; + + n_addrs = 0; + pp = &vp->mcast_list; + while ((m = *pp) != NULL) { + if (m->hit) { + m->hit = 0; + pp = &m->next; + continue; + } + + memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], + m->addr, ETH_ALEN); + if (++n_addrs == VNET_NUM_MCAST) { + info.count = n_addrs; + (void) vio_ldc_send(&port->vio, &info, + sizeof(info)); + n_addrs = 0; + } + + *pp = m->next; + kfree(m); + } + if (n_addrs) { + info.count = n_addrs; + (void) vio_ldc_send(&port->vio, &info, sizeof(info)); + } +} + static void vnet_set_rx_mode(struct net_device *dev) { - /* XXX Implement multicast support XXX */ + struct vnet *vp = netdev_priv(dev); + struct vnet_port *port; + unsigned long flags; + + spin_lock_irqsave(&vp->lock, flags); + if (!list_empty(&vp->port_list)) { + port = list_entry(vp->port_list.next, struct vnet_port, list); + + if (port->switch_port) { + __update_mc_list(vp, dev); + __send_mc_list(vp, port); + } + } + spin_unlock_irqrestore(&vp->lock, flags); } static int vnet_change_mtu(struct net_device *dev, int new_mtu) @@ -771,7 +906,6 @@ static const struct ethtool_ops vnet_ethtool_ops = { .get_msglevel = vnet_get_msglevel, .set_msglevel = vnet_set_msglevel, .get_link = ethtool_op_get_link, - .get_perm_addr = ethtool_op_get_perm_addr, }; static void vnet_port_free_tx_bufs(struct vnet_port *port) @@ -875,6 +1009,115 @@ err_out: return err; } +static LIST_HEAD(vnet_list); +static DEFINE_MUTEX(vnet_list_mutex); + +static struct vnet * __devinit vnet_new(const u64 *local_mac) +{ + struct net_device *dev; + struct vnet *vp; + int err, i; + + dev = alloc_etherdev(sizeof(*vp)); + if (!dev) { + printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); + return ERR_PTR(-ENOMEM); + } + + for (i = 0; i < ETH_ALEN; i++) + dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff; + + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); + + vp = netdev_priv(dev); + + spin_lock_init(&vp->lock); + vp->dev = dev; + + INIT_LIST_HEAD(&vp->port_list); + for (i = 0; i < VNET_PORT_HASH_SIZE; i++) + INIT_HLIST_HEAD(&vp->port_hash[i]); + INIT_LIST_HEAD(&vp->list); + vp->local_mac = *local_mac; + + dev->open = vnet_open; + dev->stop = vnet_close; + dev->set_multicast_list = vnet_set_rx_mode; + dev->set_mac_address = vnet_set_mac_addr; + dev->tx_timeout = vnet_tx_timeout; + dev->ethtool_ops = &vnet_ethtool_ops; + dev->watchdog_timeo = VNET_TX_TIMEOUT; + dev->change_mtu = vnet_change_mtu; + dev->hard_start_xmit = vnet_start_xmit; + + err = register_netdev(dev); + if (err) { + printk(KERN_ERR PFX "Cannot register net device, " + "aborting.\n"); + goto err_out_free_dev; + } + + printk(KERN_INFO "%s: Sun LDOM vnet ", dev->name); + + for (i = 0; i < 6; i++) + printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':'); + + list_add(&vp->list, &vnet_list); + + return vp; + +err_out_free_dev: + free_netdev(dev); + + return ERR_PTR(err); +} + +static struct vnet * __devinit vnet_find_or_create(const u64 *local_mac) +{ + struct vnet *iter, *vp; + + mutex_lock(&vnet_list_mutex); + vp = NULL; + list_for_each_entry(iter, &vnet_list, list) { + if (iter->local_mac == *local_mac) { + vp = iter; + break; + } + } + if (!vp) + vp = vnet_new(local_mac); + mutex_unlock(&vnet_list_mutex); + + return vp; +} + +static const char *local_mac_prop = "local-mac-address"; + +static struct vnet * __devinit vnet_find_parent(struct mdesc_handle *hp, + u64 port_node) +{ + const u64 *local_mac = NULL; + u64 a; + + mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) { + u64 target = mdesc_arc_target(hp, a); + const char *name; + + name = mdesc_get_property(hp, target, "name", NULL); + if (!name || strcmp(name, "network")) + continue; + + local_mac = mdesc_get_property(hp, target, + local_mac_prop, NULL); + if (local_mac) + break; + } + if (!local_mac) + return ERR_PTR(-ENODEV); + + return vnet_find_or_create(local_mac); +} + static struct ldc_channel_config vnet_ldc_cfg = { .event = vnet_event, .mtu = 64, @@ -887,6 +1130,14 @@ static struct vio_driver_ops vnet_vio_ops = { .handshake_complete = vnet_handshake_complete, }; +static void print_version(void) +{ + static int version_printed; + + if (version_printed++ == 0) + printk(KERN_INFO "%s", version); +} + const char *remote_macaddr_prop = "remote-mac-address"; static int __devinit vnet_port_probe(struct vio_dev *vdev, @@ -899,14 +1150,17 @@ static int __devinit vnet_port_probe(struct vio_dev *vdev, const u64 *rmac; int len, i, err, switch_port; - vp = dev_get_drvdata(vdev->dev.parent); - if (!vp) { - printk(KERN_ERR PFX "Cannot find port parent vnet.\n"); - return -ENODEV; - } + print_version(); hp = mdesc_grab(); + vp = vnet_find_parent(hp, vdev->mp); + if (IS_ERR(vp)) { + printk(KERN_ERR PFX "Cannot find port parent vnet.\n"); + err = PTR_ERR(vp); + goto err_out_put_mdesc; + } + rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len); err = -ENODEV; if (!rmac) { @@ -947,6 +1201,7 @@ static int __devinit vnet_port_probe(struct vio_dev *vdev, switch_port = 0; if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL) switch_port = 1; + port->switch_port = switch_port; spin_lock_irqsave(&vp->lock, flags); if (switch_port) @@ -1013,7 +1268,7 @@ static struct vio_device_id vnet_port_match[] = { }, {}, }; -MODULE_DEVICE_TABLE(vio, vnet_match); +MODULE_DEVICE_TABLE(vio, vnet_port_match); static struct vio_driver vnet_port_driver = { .id_table = vnet_port_match, @@ -1025,139 +1280,14 @@ static struct vio_driver vnet_port_driver = { } }; -const char *local_mac_prop = "local-mac-address"; - -static int __devinit vnet_probe(struct vio_dev *vdev, - const struct vio_device_id *id) -{ - static int vnet_version_printed; - struct mdesc_handle *hp; - struct net_device *dev; - struct vnet *vp; - const u64 *mac; - int err, i, len; - - if (vnet_version_printed++ == 0) - printk(KERN_INFO "%s", version); - - hp = mdesc_grab(); - - mac = mdesc_get_property(hp, vdev->mp, local_mac_prop, &len); - if (!mac) { - printk(KERN_ERR PFX "vnet lacks %s property.\n", - local_mac_prop); - err = -ENODEV; - goto err_out; - } - - dev = alloc_etherdev(sizeof(*vp)); - if (!dev) { - printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); - err = -ENOMEM; - goto err_out; - } - - for (i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = (*mac >> (5 - i) * 8) & 0xff; - - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); - - SET_NETDEV_DEV(dev, &vdev->dev); - - vp = netdev_priv(dev); - - spin_lock_init(&vp->lock); - vp->dev = dev; - vp->vdev = vdev; - - INIT_LIST_HEAD(&vp->port_list); - for (i = 0; i < VNET_PORT_HASH_SIZE; i++) - INIT_HLIST_HEAD(&vp->port_hash[i]); - - dev->open = vnet_open; - dev->stop = vnet_close; - dev->set_multicast_list = vnet_set_rx_mode; - dev->set_mac_address = vnet_set_mac_addr; - dev->tx_timeout = vnet_tx_timeout; - dev->ethtool_ops = &vnet_ethtool_ops; - dev->watchdog_timeo = VNET_TX_TIMEOUT; - dev->change_mtu = vnet_change_mtu; - dev->hard_start_xmit = vnet_start_xmit; - - err = register_netdev(dev); - if (err) { - printk(KERN_ERR PFX "Cannot register net device, " - "aborting.\n"); - goto err_out_free_dev; - } - - printk(KERN_INFO "%s: Sun LDOM vnet ", dev->name); - - for (i = 0; i < 6; i++) - printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':'); - - dev_set_drvdata(&vdev->dev, vp); - - mdesc_release(hp); - - return 0; - -err_out_free_dev: - free_netdev(dev); - -err_out: - mdesc_release(hp); - return err; -} - -static int vnet_remove(struct vio_dev *vdev) -{ - - struct vnet *vp = dev_get_drvdata(&vdev->dev); - - if (vp) { - /* XXX unregister port, or at least check XXX */ - unregister_netdevice(vp->dev); - dev_set_drvdata(&vdev->dev, NULL); - } - return 0; -} - -static struct vio_device_id vnet_match[] = { - { - .type = "network", - }, - {}, -}; -MODULE_DEVICE_TABLE(vio, vnet_match); - -static struct vio_driver vnet_driver = { - .id_table = vnet_match, - .probe = vnet_probe, - .remove = vnet_remove, - .driver = { - .name = "vnet", - .owner = THIS_MODULE, - } -}; - static int __init vnet_init(void) { - int err = vio_register_driver(&vnet_driver); - - if (!err) { - err = vio_register_driver(&vnet_port_driver); - if (err) - vio_unregister_driver(&vnet_driver); - } - - return err; + return vio_register_driver(&vnet_port_driver); } static void __exit vnet_exit(void) { vio_unregister_driver(&vnet_port_driver); - vio_unregister_driver(&vnet_driver); } module_init(vnet_init); diff --git a/drivers/net/sunvnet.h b/drivers/net/sunvnet.h index 1c887302d46d..d347a5bf24b0 100644 --- a/drivers/net/sunvnet.h +++ b/drivers/net/sunvnet.h @@ -30,6 +30,8 @@ struct vnet_port { struct hlist_node hash; u8 raddr[ETH_ALEN]; + u8 switch_port; + u8 __pad; struct vnet *vp; @@ -53,6 +55,13 @@ static inline unsigned int vnet_hashfn(u8 *mac) return val & (VNET_PORT_HASH_MASK); } +struct vnet_mcast_entry { + u8 addr[ETH_ALEN]; + u8 sent; + u8 hit; + struct vnet_mcast_entry *next; +}; + struct vnet { /* Protects port_list and port_hash. */ spinlock_t lock; @@ -60,11 +69,15 @@ struct vnet { struct net_device *dev; u32 msg_enable; - struct vio_dev *vdev; struct list_head port_list; struct hlist_head port_hash[VNET_PORT_HASH_SIZE]; + + struct vnet_mcast_entry *mcast_list; + + struct list_head list; + u64 local_mac; }; #endif /* _SUNVNET_H */ diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index 75655add3f34..a679f4310ce1 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c @@ -414,6 +414,9 @@ enum tc35815_timer_state { struct tc35815_local { struct pci_dev *pci_dev; + struct net_device *dev; + struct napi_struct napi; + /* statistics */ struct net_device_stats stats; struct { @@ -566,7 +569,7 @@ static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev); static irqreturn_t tc35815_interrupt(int irq, void *dev_id); #ifdef TC35815_NAPI static int tc35815_rx(struct net_device *dev, int limit); -static int tc35815_poll(struct net_device *dev, int *budget); +static int tc35815_poll(struct napi_struct *napi, int budget); #else static void tc35815_rx(struct net_device *dev); #endif @@ -626,7 +629,7 @@ static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev) return -ENODEV; } #else -static int __devinit tc35815_read_plat_dev_addr(struct device *dev) +static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev) { return -ENODEV; } @@ -682,9 +685,9 @@ static int __devinit tc35815_init_one (struct pci_dev *pdev, dev_err(&pdev->dev, "unable to alloc new ethernet\n"); return -ENOMEM; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); lp = dev->priv; + lp->dev = dev; /* enable device (incl. PCI PM wakeup), and bus-mastering */ rc = pci_enable_device (pdev); @@ -738,8 +741,7 @@ static int __devinit tc35815_init_one (struct pci_dev *pdev, dev->tx_timeout = tc35815_tx_timeout; dev->watchdog_timeo = TC35815_TX_TIMEOUT; #ifdef TC35815_NAPI - dev->poll = tc35815_poll; - dev->weight = NAPI_WEIGHT; + netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT); #endif #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = tc35815_poll_controller; @@ -748,8 +750,6 @@ static int __devinit tc35815_init_one (struct pci_dev *pdev, dev->irq = pdev->irq; dev->base_addr = (unsigned long) ioaddr; - /* dev->priv/lp zeroed and aligned in alloc_etherdev */ - lp = dev->priv; spin_lock_init(&lp->lock); lp->pci_dev = pdev; lp->boardtype = ent->driver_data; @@ -1237,6 +1237,10 @@ tc35815_open(struct net_device *dev) return -EAGAIN; } +#ifdef TC35815_NAPI + napi_enable(&lp->napi); +#endif + /* Reset the hardware here. Don't forget to set the station address. */ spin_lock_irq(&lp->lock); tc35815_chip_init(dev); @@ -1436,6 +1440,7 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status) static irqreturn_t tc35815_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; + struct tc35815_local *lp = netdev_priv(dev); struct tc35815_regs __iomem *tr = (struct tc35815_regs __iomem *)dev->base_addr; #ifdef TC35815_NAPI @@ -1444,8 +1449,8 @@ static irqreturn_t tc35815_interrupt(int irq, void *dev_id) if (!(dmactl & DMA_IntMask)) { /* disable interrupts */ tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl); - if (netif_rx_schedule_prep(dev)) - __netif_rx_schedule(dev); + if (netif_rx_schedule_prep(dev, &lp->napi)) + __netif_rx_schedule(dev, &lp->napi); else { printk(KERN_ERR "%s: interrupt taken in poll\n", dev->name); @@ -1726,13 +1731,12 @@ tc35815_rx(struct net_device *dev) } #ifdef TC35815_NAPI -static int -tc35815_poll(struct net_device *dev, int *budget) +static int tc35815_poll(struct napi_struct *napi, int budget) { - struct tc35815_local *lp = dev->priv; + struct tc35815_local *lp = container_of(napi, struct tc35815_local, napi); + struct net_device *dev = lp->dev; struct tc35815_regs __iomem *tr = (struct tc35815_regs __iomem *)dev->base_addr; - int limit = min(*budget, dev->quota); int received = 0, handled; u32 status; @@ -1744,23 +1748,19 @@ tc35815_poll(struct net_device *dev, int *budget) handled = tc35815_do_interrupt(dev, status, limit); if (handled >= 0) { received += handled; - limit -= handled; - if (limit <= 0) + if (received >= budget) break; } status = tc_readl(&tr->Int_Src); } while (status); spin_unlock(&lp->lock); - dev->quota -= received; - *budget -= received; - if (limit <= 0) - return 1; - - netif_rx_complete(dev); - /* enable interrupts */ - tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl); - return 0; + if (received < budget) { + netif_rx_complete(dev, napi); + /* enable interrupts */ + tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl); + } + return received; } #endif @@ -1949,7 +1949,11 @@ static int tc35815_close(struct net_device *dev) { struct tc35815_local *lp = dev->priv; + netif_stop_queue(dev); +#ifdef TC35815_NAPI + napi_disable(&lp->napi); +#endif /* Flush the Tx and disable Rx here. */ @@ -2158,10 +2162,16 @@ static void tc35815_set_msglevel(struct net_device *dev, u32 datum) lp->msg_enable = datum; } -static int tc35815_get_stats_count(struct net_device *dev) +static int tc35815_get_sset_count(struct net_device *dev, int sset) { struct tc35815_local *lp = dev->priv; - return sizeof(lp->lstats) / sizeof(int); + + switch (sset) { + case ETH_SS_STATS: + return sizeof(lp->lstats) / sizeof(int); + default: + return -EOPNOTSUPP; + } } static void tc35815_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) @@ -2196,9 +2206,8 @@ static const struct ethtool_ops tc35815_ethtool_ops = { .get_msglevel = tc35815_get_msglevel, .set_msglevel = tc35815_set_msglevel, .get_strings = tc35815_get_strings, - .get_stats_count = tc35815_get_stats_count, + .get_sset_count = tc35815_get_sset_count, .get_ethtool_stats = tc35815_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, }; static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c new file mode 100644 index 000000000000..8d04654f0c59 --- /dev/null +++ b/drivers/net/tehuti.c @@ -0,0 +1,2506 @@ +/* + * Tehuti Networks(R) Network Driver + * ethtool interface implementation + * Copyright (C) 2007 Tehuti Networks Ltd. All rights reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +/* + * RX HW/SW interaction overview + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * There are 2 types of RX communication channels betwean driver and NIC. + * 1) RX Free Fifo - RXF - holds descriptors of empty buffers to accept incoming + * traffic. This Fifo is filled by SW and is readen by HW. Each descriptor holds + * info about buffer's location, size and ID. An ID field is used to identify a + * buffer when it's returned with data via RXD Fifo (see below) + * 2) RX Data Fifo - RXD - holds descriptors of full buffers. This Fifo is + * filled by HW and is readen by SW. Each descriptor holds status and ID. + * HW pops descriptor from RXF Fifo, stores ID, fills buffer with incoming data, + * via dma moves it into host memory, builds new RXD descriptor with same ID, + * pushes it into RXD Fifo and raises interrupt to indicate new RX data. + * + * Current NIC configuration (registers + firmware) makes NIC use 2 RXF Fifos. + * One holds 1.5K packets and another - 26K packets. Depending on incoming + * packet size, HW desides on a RXF Fifo to pop buffer from. When packet is + * filled with data, HW builds new RXD descriptor for it and push it into single + * RXD Fifo. + * + * RX SW Data Structures + * ~~~~~~~~~~~~~~~~~~~~~ + * skb db - used to keep track of all skbs owned by SW and their dma addresses. + * For RX case, ownership lasts from allocating new empty skb for RXF until + * accepting full skb from RXD and passing it to OS. Each RXF Fifo has its own + * skb db. Implemented as array with bitmask. + * fifo - keeps info about fifo's size and location, relevant HW registers, + * usage and skb db. Each RXD and RXF Fifo has its own fifo structure. + * Implemented as simple struct. + * + * RX SW Execution Flow + * ~~~~~~~~~~~~~~~~~~~~ + * Upon initialization (ifconfig up) driver creates RX fifos and initializes + * relevant registers. At the end of init phase, driver enables interrupts. + * NIC sees that there is no RXF buffers and raises + * RD_INTR interrupt, isr fills skbs and Rx begins. + * Driver has two receive operation modes: + * NAPI - interrupt-driven mixed with polling + * interrupt-driven only + * + * Interrupt-driven only flow is following. When buffer is ready, HW raises + * interrupt and isr is called. isr collects all available packets + * (bdx_rx_receive), refills skbs (bdx_rx_alloc_skbs) and exit. + + * Rx buffer allocation note + * ~~~~~~~~~~~~~~~~~~~~~~~~~ + * Driver cares to feed such amount of RxF descriptors that respective amount of + * RxD descriptors can not fill entire RxD fifo. The main reason is lack of + * overflow check in Bordeaux for RxD fifo free/used size. + * FIXME: this is NOT fully implemented, more work should be done + * + */ + +#include "tehuti.h" +#include "tehuti_fw.h" + +static struct pci_device_id __devinitdata bdx_pci_tbl[] = { + {0x1FC9, 0x3009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {0x1FC9, 0x3010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {0x1FC9, 0x3014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {0} +}; + +MODULE_DEVICE_TABLE(pci, bdx_pci_tbl); + +/* Definitions needed by ISR or NAPI functions */ +static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f); +static void bdx_tx_cleanup(struct bdx_priv *priv); +static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget); + +/* Definitions needed by FW loading */ +static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size); + +/* Definitions needed by hw_start */ +static int bdx_tx_init(struct bdx_priv *priv); +static int bdx_rx_init(struct bdx_priv *priv); + +/* Definitions needed by bdx_close */ +static void bdx_rx_free(struct bdx_priv *priv); +static void bdx_tx_free(struct bdx_priv *priv); + +/* Definitions needed by bdx_probe */ +static void bdx_ethtool_ops(struct net_device *netdev); + +/************************************************************************* + * Print Info * + *************************************************************************/ + +static void print_hw_id(struct pci_dev *pdev) +{ + struct pci_nic *nic = pci_get_drvdata(pdev); + u16 pci_link_status = 0; + u16 pci_ctrl = 0; + + pci_read_config_word(pdev, PCI_LINK_STATUS_REG, &pci_link_status); + pci_read_config_word(pdev, PCI_DEV_CTRL_REG, &pci_ctrl); + + printk(KERN_INFO "tehuti: %s%s\n", BDX_NIC_NAME, + nic->port_num == 1 ? "" : ", 2-Port"); + printk(KERN_INFO + "tehuti: srom 0x%x fpga %d build %u lane# %d" + " max_pl 0x%x mrrs 0x%x\n", + readl(nic->regs + SROM_VER), readl(nic->regs + FPGA_VER) & 0xFFF, + readl(nic->regs + FPGA_SEED), + GET_LINK_STATUS_LANES(pci_link_status), + GET_DEV_CTRL_MAXPL(pci_ctrl), GET_DEV_CTRL_MRRS(pci_ctrl)); +} + +static void print_fw_id(struct pci_nic *nic) +{ + printk(KERN_INFO "tehuti: fw 0x%x\n", readl(nic->regs + FW_VER)); +} + +static void print_eth_id(struct net_device *ndev) +{ + printk(KERN_INFO "%s: %s, Port %c\n", ndev->name, BDX_NIC_NAME, + (ndev->if_port == 0) ? 'A' : 'B'); + +} + +/************************************************************************* + * Code * + *************************************************************************/ + +#define bdx_enable_interrupts(priv) \ + do { WRITE_REG(priv, regIMR, IR_RUN); } while (0) +#define bdx_disable_interrupts(priv) \ + do { WRITE_REG(priv, regIMR, 0); } while (0) + +/* bdx_fifo_init + * create TX/RX descriptor fifo for host-NIC communication. + * 1K extra space is allocated at the end of the fifo to simplify + * processing of descriptors that wraps around fifo's end + * @priv - NIC private structure + * @f - fifo to initialize + * @fsz_type - fifo size type: 0-4KB, 1-8KB, 2-16KB, 3-32KB + * @reg_XXX - offsets of registers relative to base address + * + * Returns 0 on success, negative value on failure + * + */ +static int +bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type, + u16 reg_CFG0, u16 reg_CFG1, u16 reg_RPTR, u16 reg_WPTR) +{ + u16 memsz = FIFO_SIZE * (1 << fsz_type); + + memset(f, 0, sizeof(struct fifo)); + /* pci_alloc_consistent gives us 4k-aligned memory */ + f->va = pci_alloc_consistent(priv->pdev, + memsz + FIFO_EXTRA_SPACE, &f->da); + if (!f->va) { + ERR("pci_alloc_consistent failed\n"); + RET(-ENOMEM); + } + f->reg_CFG0 = reg_CFG0; + f->reg_CFG1 = reg_CFG1; + f->reg_RPTR = reg_RPTR; + f->reg_WPTR = reg_WPTR; + f->rptr = 0; + f->wptr = 0; + f->memsz = memsz; + f->size_mask = memsz - 1; + WRITE_REG(priv, reg_CFG0, (u32) ((f->da & TX_RX_CFG0_BASE) | fsz_type)); + WRITE_REG(priv, reg_CFG1, H32_64(f->da)); + + RET(0); +} + +/* bdx_fifo_free - free all resources used by fifo + * @priv - NIC private structure + * @f - fifo to release + */ +static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f) +{ + ENTER; + if (f->va) { + pci_free_consistent(priv->pdev, + f->memsz + FIFO_EXTRA_SPACE, f->va, f->da); + f->va = NULL; + } + RET(); +} + +/* + * bdx_link_changed - notifies OS about hw link state. + * @bdx_priv - hw adapter structure + */ +static void bdx_link_changed(struct bdx_priv *priv) +{ + u32 link = READ_REG(priv, regMAC_LNK_STAT) & MAC_LINK_STAT; + + if (!link) { + if (netif_carrier_ok(priv->ndev)) { + netif_stop_queue(priv->ndev); + netif_carrier_off(priv->ndev); + ERR("%s: Link Down\n", priv->ndev->name); + } + } else { + if (!netif_carrier_ok(priv->ndev)) { + netif_wake_queue(priv->ndev); + netif_carrier_on(priv->ndev); + ERR("%s: Link Up\n", priv->ndev->name); + } + } +} + +static void bdx_isr_extra(struct bdx_priv *priv, u32 isr) +{ + if (isr & IR_RX_FREE_0) { + bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0); + DBG("RX_FREE_0\n"); + } + + if (isr & IR_LNKCHG0) + bdx_link_changed(priv); + + if (isr & IR_PCIE_LINK) + ERR("%s: PCI-E Link Fault\n", priv->ndev->name); + + if (isr & IR_PCIE_TOUT) + ERR("%s: PCI-E Time Out\n", priv->ndev->name); + +} + +/* bdx_isr - Interrupt Service Routine for Bordeaux NIC + * @irq - interrupt number + * @ndev - network device + * @regs - CPU registers + * + * Return IRQ_NONE if it was not our interrupt, IRQ_HANDLED - otherwise + * + * It reads ISR register to know interrupt reasons, and proceed them one by one. + * Reasons of interest are: + * RX_DESC - new packet has arrived and RXD fifo holds its descriptor + * RX_FREE - number of free Rx buffers in RXF fifo gets low + * TX_FREE - packet was transmited and RXF fifo holds its descriptor + */ + +static irqreturn_t bdx_isr_napi(int irq, void *dev) +{ + struct net_device *ndev = dev; + struct bdx_priv *priv = ndev->priv; + u32 isr; + + ENTER; + isr = (READ_REG(priv, regISR) & IR_RUN); + if (unlikely(!isr)) { + bdx_enable_interrupts(priv); + return IRQ_NONE; /* Not our interrupt */ + } + + if (isr & IR_EXTRA) + bdx_isr_extra(priv, isr); + + if (isr & (IR_RX_DESC_0 | IR_TX_FREE_0)) { + if (likely(netif_rx_schedule_prep(ndev, &priv->napi))) { + __netif_rx_schedule(ndev, &priv->napi); + RET(IRQ_HANDLED); + } else { + /* NOTE: we get here if intr has slipped into window + * between these lines in bdx_poll: + * bdx_enable_interrupts(priv); + * return 0; + * currently intrs are disabled (since we read ISR), + * and we have failed to register next poll. + * so we read the regs to trigger chip + * and allow further interupts. */ + READ_REG(priv, regTXF_WPTR_0); + READ_REG(priv, regRXD_WPTR_0); + } + } + + bdx_enable_interrupts(priv); + RET(IRQ_HANDLED); +} + +static int bdx_poll(struct napi_struct *napi, int budget) +{ + struct bdx_priv *priv = container_of(napi, struct bdx_priv, napi); + struct net_device *dev = priv->ndev; + int work_done; + + ENTER; + bdx_tx_cleanup(priv); + work_done = bdx_rx_receive(priv, &priv->rxd_fifo0, budget); + if ((work_done < budget) || + (priv->napi_stop++ >= 30)) { + DBG("rx poll is done. backing to isr-driven\n"); + + /* from time to time we exit to let NAPI layer release + * device lock and allow waiting tasks (eg rmmod) to advance) */ + priv->napi_stop = 0; + + netif_rx_complete(dev, napi); + bdx_enable_interrupts(priv); + } + return work_done; +} + +/* bdx_fw_load - loads firmware to NIC + * @priv - NIC private structure + * Firmware is loaded via TXD fifo, so it must be initialized first. + * Firware must be loaded once per NIC not per PCI device provided by NIC (NIC + * can have few of them). So all drivers use semaphore register to choose one + * that will actually load FW to NIC. + */ + +static int bdx_fw_load(struct bdx_priv *priv) +{ + int master, i; + + ENTER; + master = READ_REG(priv, regINIT_SEMAPHORE); + if (!READ_REG(priv, regINIT_STATUS) && master) { + bdx_tx_push_desc_safe(priv, s_firmLoad, sizeof(s_firmLoad)); + mdelay(100); + } + for (i = 0; i < 200; i++) { + if (READ_REG(priv, regINIT_STATUS)) + break; + mdelay(2); + } + if (master) + WRITE_REG(priv, regINIT_SEMAPHORE, 1); + + if (i == 200) { + ERR("%s: firmware loading failed\n", priv->ndev->name); + DBG("VPC = 0x%x VIC = 0x%x INIT_STATUS = 0x%x i=%d\n", + READ_REG(priv, regVPC), + READ_REG(priv, regVIC), READ_REG(priv, regINIT_STATUS), i); + RET(-EIO); + } else { + DBG("%s: firmware loading success\n", priv->ndev->name); + RET(0); + } +} + +static void bdx_restore_mac(struct net_device *ndev, struct bdx_priv *priv) +{ + u32 val; + + ENTER; + DBG("mac0=%x mac1=%x mac2=%x\n", + READ_REG(priv, regUNC_MAC0_A), + READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A)); + + val = (ndev->dev_addr[0] << 8) | (ndev->dev_addr[1]); + WRITE_REG(priv, regUNC_MAC2_A, val); + val = (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]); + WRITE_REG(priv, regUNC_MAC1_A, val); + val = (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]); + WRITE_REG(priv, regUNC_MAC0_A, val); + + DBG("mac0=%x mac1=%x mac2=%x\n", + READ_REG(priv, regUNC_MAC0_A), + READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A)); + RET(); +} + +/* bdx_hw_start - inits registers and starts HW's Rx and Tx engines + * @priv - NIC private structure + */ +static int bdx_hw_start(struct bdx_priv *priv) +{ + int rc = -EIO; + struct net_device *ndev = priv->ndev; + + ENTER; + bdx_link_changed(priv); + + /* 10G overall max length (vlan, eth&ip header, ip payload, crc) */ + WRITE_REG(priv, regFRM_LENGTH, 0X3FE0); + WRITE_REG(priv, regPAUSE_QUANT, 0x96); + WRITE_REG(priv, regRX_FIFO_SECTION, 0x800010); + WRITE_REG(priv, regTX_FIFO_SECTION, 0xE00010); + WRITE_REG(priv, regRX_FULLNESS, 0); + WRITE_REG(priv, regTX_FULLNESS, 0); + WRITE_REG(priv, regCTRLST, + regCTRLST_BASE | regCTRLST_RX_ENA | regCTRLST_TX_ENA); + + WRITE_REG(priv, regVGLB, 0); + WRITE_REG(priv, regMAX_FRAME_A, + priv->rxf_fifo0.m.pktsz & MAX_FRAME_AB_VAL); + + DBG("RDINTCM=%08x\n", priv->rdintcm); /*NOTE: test script uses this */ + WRITE_REG(priv, regRDINTCM0, priv->rdintcm); + WRITE_REG(priv, regRDINTCM2, 0); /*cpu_to_le32(rcm.val)); */ + + DBG("TDINTCM=%08x\n", priv->tdintcm); /*NOTE: test script uses this */ + WRITE_REG(priv, regTDINTCM0, priv->tdintcm); /* old val = 0x300064 */ + + /* Enable timer interrupt once in 2 secs. */ + /*WRITE_REG(priv, regGTMR0, ((GTMR_SEC * 2) & GTMR_DATA)); */ + bdx_restore_mac(priv->ndev, priv); + + WRITE_REG(priv, regGMAC_RXF_A, GMAC_RX_FILTER_OSEN | + GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB); + +#define BDX_IRQ_TYPE ((priv->nic->irq_type == IRQ_MSI)?0:IRQF_SHARED) + if ((rc = request_irq(priv->pdev->irq, &bdx_isr_napi, BDX_IRQ_TYPE, + ndev->name, ndev))) + goto err_irq; + bdx_enable_interrupts(priv); + + RET(0); + +err_irq: + RET(rc); +} + +static void bdx_hw_stop(struct bdx_priv *priv) +{ + ENTER; + bdx_disable_interrupts(priv); + free_irq(priv->pdev->irq, priv->ndev); + + netif_carrier_off(priv->ndev); + netif_stop_queue(priv->ndev); + + RET(); +} + +static int bdx_hw_reset_direct(void __iomem *regs) +{ + u32 val, i; + ENTER; + + /* reset sequences: read, write 1, read, write 0 */ + val = readl(regs + regCLKPLL); + writel((val | CLKPLL_SFTRST) + 0x8, regs + regCLKPLL); + udelay(50); + val = readl(regs + regCLKPLL); + writel(val & ~CLKPLL_SFTRST, regs + regCLKPLL); + + /* check that the PLLs are locked and reset ended */ + for (i = 0; i < 70; i++, mdelay(10)) + if ((readl(regs + regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) { + /* do any PCI-E read transaction */ + readl(regs + regRXD_CFG0_0); + return 0; + } + ERR("tehuti: HW reset failed\n"); + return 1; /* failure */ +} + +static int bdx_hw_reset(struct bdx_priv *priv) +{ + u32 val, i; + ENTER; + + if (priv->port == 0) { + /* reset sequences: read, write 1, read, write 0 */ + val = READ_REG(priv, regCLKPLL); + WRITE_REG(priv, regCLKPLL, (val | CLKPLL_SFTRST) + 0x8); + udelay(50); + val = READ_REG(priv, regCLKPLL); + WRITE_REG(priv, regCLKPLL, val & ~CLKPLL_SFTRST); + } + /* check that the PLLs are locked and reset ended */ + for (i = 0; i < 70; i++, mdelay(10)) + if ((READ_REG(priv, regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) { + /* do any PCI-E read transaction */ + READ_REG(priv, regRXD_CFG0_0); + return 0; + } + ERR("tehuti: HW reset failed\n"); + return 1; /* failure */ +} + +static int bdx_sw_reset(struct bdx_priv *priv) +{ + int i; + + ENTER; + /* 1. load MAC (obsolete) */ + /* 2. disable Rx (and Tx) */ + WRITE_REG(priv, regGMAC_RXF_A, 0); + mdelay(100); + /* 3. disable port */ + WRITE_REG(priv, regDIS_PORT, 1); + /* 4. disable queue */ + WRITE_REG(priv, regDIS_QU, 1); + /* 5. wait until hw is disabled */ + for (i = 0; i < 50; i++) { + if (READ_REG(priv, regRST_PORT) & 1) + break; + mdelay(10); + } + if (i == 50) + ERR("%s: SW reset timeout. continuing anyway\n", + priv->ndev->name); + + /* 6. disable intrs */ + WRITE_REG(priv, regRDINTCM0, 0); + WRITE_REG(priv, regTDINTCM0, 0); + WRITE_REG(priv, regIMR, 0); + READ_REG(priv, regISR); + + /* 7. reset queue */ + WRITE_REG(priv, regRST_QU, 1); + /* 8. reset port */ + WRITE_REG(priv, regRST_PORT, 1); + /* 9. zero all read and write pointers */ + for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10) + DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR); + for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10) + WRITE_REG(priv, i, 0); + /* 10. unseet port disable */ + WRITE_REG(priv, regDIS_PORT, 0); + /* 11. unset queue disable */ + WRITE_REG(priv, regDIS_QU, 0); + /* 12. unset queue reset */ + WRITE_REG(priv, regRST_QU, 0); + /* 13. unset port reset */ + WRITE_REG(priv, regRST_PORT, 0); + /* 14. enable Rx */ + /* skiped. will be done later */ + /* 15. save MAC (obsolete) */ + for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10) + DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR); + + RET(0); +} + +/* bdx_reset - performs right type of reset depending on hw type */ +static int bdx_reset(struct bdx_priv *priv) +{ + ENTER; + RET((priv->pdev->device == 0x3009) + ? bdx_hw_reset(priv) + : bdx_sw_reset(priv)); +} + +/** + * bdx_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int bdx_close(struct net_device *ndev) +{ + struct bdx_priv *priv = NULL; + + ENTER; + priv = ndev->priv; + + napi_disable(&priv->napi); + + bdx_reset(priv); + bdx_hw_stop(priv); + bdx_rx_free(priv); + bdx_tx_free(priv); + RET(0); +} + +/** + * bdx_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int bdx_open(struct net_device *ndev) +{ + struct bdx_priv *priv; + int rc; + + ENTER; + priv = ndev->priv; + bdx_reset(priv); + if (netif_running(ndev)) + netif_stop_queue(priv->ndev); + + if ((rc = bdx_tx_init(priv))) + goto err; + + if ((rc = bdx_rx_init(priv))) + goto err; + + if ((rc = bdx_fw_load(priv))) + goto err; + + bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0); + + if ((rc = bdx_hw_start(priv))) + goto err; + + napi_enable(&priv->napi); + + print_fw_id(priv->nic); + + RET(0); + +err: + bdx_close(ndev); + RET(rc); +} + +static void __init bdx_firmware_endianess(void) +{ + int i; + for (i = 0; i < sizeof(s_firmLoad) / sizeof(u32); i++) + s_firmLoad[i] = CPU_CHIP_SWAP32(s_firmLoad[i]); +} + +static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd) +{ + struct bdx_priv *priv = ndev->priv; + u32 data[3]; + int error; + + ENTER; + + DBG("jiffies=%ld cmd=%d\n", jiffies, cmd); + if (cmd != SIOCDEVPRIVATE) { + error = copy_from_user(data, ifr->ifr_data, sizeof(data)); + if (error) { + ERR("cant copy from user\n"); + RET(error); + } + DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]); + } + + switch (data[0]) { + + case BDX_OP_READ: + data[2] = READ_REG(priv, data[1]); + DBG("read_reg(0x%x)=0x%x (dec %d)\n", data[1], data[2], + data[2]); + error = copy_to_user(ifr->ifr_data, data, sizeof(data)); + if (error) + RET(error); + break; + + case BDX_OP_WRITE: + WRITE_REG(priv, data[1], data[2]); + DBG("write_reg(0x%x, 0x%x)\n", data[1], data[2]); + break; + + default: + RET(-EOPNOTSUPP); + } + return 0; +} + +static int bdx_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) +{ + ENTER; + if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) + RET(bdx_ioctl_priv(ndev, ifr, cmd)); + else + RET(-EOPNOTSUPP); +} + +/* + * __bdx_vlan_rx_vid - private helper for adding/killing VLAN vid + * by passing VLAN filter table to hardware + * @ndev network device + * @vid VLAN vid + * @op add or kill operation + */ +static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable) +{ + struct bdx_priv *priv = ndev->priv; + u32 reg, bit, val; + + ENTER; + DBG2("vid=%d value=%d\n", (int)vid, enable); + if (unlikely(vid >= 4096)) { + ERR("tehuti: invalid VID: %u (> 4096)\n", vid); + RET(); + } + reg = regVLAN_0 + (vid / 32) * 4; + bit = 1 << vid % 32; + val = READ_REG(priv, reg); + DBG2("reg=%x, val=%x, bit=%d\n", reg, val, bit); + if (enable) + val |= bit; + else + val &= ~bit; + DBG2("new val %x\n", val); + WRITE_REG(priv, reg, val); + RET(); +} + +/* + * bdx_vlan_rx_add_vid - kernel hook for adding VLAN vid to hw filtering table + * @ndev network device + * @vid VLAN vid to add + */ +static void bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid) +{ + __bdx_vlan_rx_vid(ndev, vid, 1); +} + +/* + * bdx_vlan_rx_kill_vid - kernel hook for killing VLAN vid in hw filtering table + * @ndev network device + * @vid VLAN vid to kill + */ +static void bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid) +{ + __bdx_vlan_rx_vid(ndev, vid, 0); +} + +/* + * bdx_vlan_rx_register - kernel hook for adding VLAN group + * @ndev network device + * @grp VLAN group + */ +static void +bdx_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp) +{ + struct bdx_priv *priv = ndev->priv; + + ENTER; + DBG("device='%s', group='%p'\n", ndev->name, grp); + priv->vlgrp = grp; + RET(); +} + +/** + * bdx_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int bdx_change_mtu(struct net_device *ndev, int new_mtu) +{ + ENTER; + + if (new_mtu == ndev->mtu) + RET(0); + + /* enforce minimum frame size */ + if (new_mtu < ETH_ZLEN) { + ERR("%s: %s mtu %d is less then minimal %d\n", + BDX_DRV_NAME, ndev->name, new_mtu, ETH_ZLEN); + RET(-EINVAL); + } + + ndev->mtu = new_mtu; + if (netif_running(ndev)) { + bdx_close(ndev); + bdx_open(ndev); + } + RET(0); +} + +static void bdx_setmulti(struct net_device *ndev) +{ + struct bdx_priv *priv = ndev->priv; + + u32 rxf_val = + GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB | GMAC_RX_FILTER_OSEN; + int i; + + ENTER; + /* IMF - imperfect (hash) rx multicat filter */ + /* PMF - perfect rx multicat filter */ + + /* FIXME: RXE(OFF) */ + if (ndev->flags & IFF_PROMISC) { + rxf_val |= GMAC_RX_FILTER_PRM; + } else if (ndev->flags & IFF_ALLMULTI) { + /* set IMF to accept all multicast frmaes */ + for (i = 0; i < MAC_MCST_HASH_NUM; i++) + WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0); + } else if (ndev->mc_count) { + u8 hash; + struct dev_mc_list *mclist; + u32 reg, val; + + /* set IMF to deny all multicast frames */ + for (i = 0; i < MAC_MCST_HASH_NUM; i++) + WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, 0); + /* set PMF to deny all multicast frames */ + for (i = 0; i < MAC_MCST_NUM; i++) { + WRITE_REG(priv, regRX_MAC_MCST0 + i * 8, 0); + WRITE_REG(priv, regRX_MAC_MCST1 + i * 8, 0); + } + + /* use PMF to accept first MAC_MCST_NUM (15) addresses */ + /* TBD: sort addreses and write them in ascending order + * into RX_MAC_MCST regs. we skip this phase now and accept ALL + * multicast frames throu IMF */ + mclist = ndev->mc_list; + + /* accept the rest of addresses throu IMF */ + for (; mclist; mclist = mclist->next) { + hash = 0; + for (i = 0; i < ETH_ALEN; i++) + hash ^= mclist->dmi_addr[i]; + reg = regRX_MCST_HASH0 + ((hash >> 5) << 2); + val = READ_REG(priv, reg); + val |= (1 << (hash % 32)); + WRITE_REG(priv, reg, val); + } + + } else { + DBG("only own mac %d\n", ndev->mc_count); + rxf_val |= GMAC_RX_FILTER_AB; + } + WRITE_REG(priv, regGMAC_RXF_A, rxf_val); + /* enable RX */ + /* FIXME: RXE(ON) */ + RET(); +} + +static int bdx_set_mac(struct net_device *ndev, void *p) +{ + struct bdx_priv *priv = ndev->priv; + struct sockaddr *addr = p; + + ENTER; + /* + if (netif_running(dev)) + return -EBUSY + */ + memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + bdx_restore_mac(ndev, priv); + RET(0); +} + +static int bdx_read_mac(struct bdx_priv *priv) +{ + u16 macAddress[3], i; + ENTER; + + macAddress[2] = READ_REG(priv, regUNC_MAC0_A); + macAddress[2] = READ_REG(priv, regUNC_MAC0_A); + macAddress[1] = READ_REG(priv, regUNC_MAC1_A); + macAddress[1] = READ_REG(priv, regUNC_MAC1_A); + macAddress[0] = READ_REG(priv, regUNC_MAC2_A); + macAddress[0] = READ_REG(priv, regUNC_MAC2_A); + for (i = 0; i < 3; i++) { + priv->ndev->dev_addr[i * 2 + 1] = macAddress[i]; + priv->ndev->dev_addr[i * 2] = macAddress[i] >> 8; + } + RET(0); +} + +static u64 bdx_read_l2stat(struct bdx_priv *priv, int reg) +{ + u64 val; + + val = READ_REG(priv, reg); + val |= ((u64) READ_REG(priv, reg + 8)) << 32; + return val; +} + +/*Do the statistics-update work*/ +static void bdx_update_stats(struct bdx_priv *priv) +{ + struct bdx_stats *stats = &priv->hw_stats; + u64 *stats_vector = (u64 *) stats; + int i; + int addr; + + /*Fill HW structure */ + addr = 0x7200; + /*First 12 statistics - 0x7200 - 0x72B0 */ + for (i = 0; i < 12; i++) { + stats_vector[i] = bdx_read_l2stat(priv, addr); + addr += 0x10; + } + BDX_ASSERT(addr != 0x72C0); + /* 0x72C0-0x72E0 RSRV */ + addr = 0x72F0; + for (; i < 16; i++) { + stats_vector[i] = bdx_read_l2stat(priv, addr); + addr += 0x10; + } + BDX_ASSERT(addr != 0x7330); + /* 0x7330-0x7360 RSRV */ + addr = 0x7370; + for (; i < 19; i++) { + stats_vector[i] = bdx_read_l2stat(priv, addr); + addr += 0x10; + } + BDX_ASSERT(addr != 0x73A0); + /* 0x73A0-0x73B0 RSRV */ + addr = 0x73C0; + for (; i < 23; i++) { + stats_vector[i] = bdx_read_l2stat(priv, addr); + addr += 0x10; + } + BDX_ASSERT(addr != 0x7400); + BDX_ASSERT((sizeof(struct bdx_stats) / sizeof(u64)) != i); +} + +static struct net_device_stats *bdx_get_stats(struct net_device *ndev) +{ + struct bdx_priv *priv = ndev->priv; + struct net_device_stats *net_stat = &priv->net_stats; + return net_stat; +} + +static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len, + u16 rxd_vlan); +static void print_rxfd(struct rxf_desc *rxfd); + +/************************************************************************* + * Rx DB * + *************************************************************************/ + +static void bdx_rxdb_destroy(struct rxdb *db) +{ + if (db) + vfree(db); +} + +static struct rxdb *bdx_rxdb_create(int nelem) +{ + struct rxdb *db; + int i; + + db = vmalloc(sizeof(struct rxdb) + + (nelem * sizeof(int)) + + (nelem * sizeof(struct rx_map))); + if (likely(db != NULL)) { + db->stack = (int *)(db + 1); + db->elems = (void *)(db->stack + nelem); + db->nelem = nelem; + db->top = nelem; + for (i = 0; i < nelem; i++) + db->stack[i] = nelem - i - 1; /* to make first allocs + close to db struct*/ + } + + return db; +} + +static inline int bdx_rxdb_alloc_elem(struct rxdb *db) +{ + BDX_ASSERT(db->top <= 0); + return db->stack[--(db->top)]; +} + +static inline void *bdx_rxdb_addr_elem(struct rxdb *db, int n) +{ + BDX_ASSERT((n < 0) || (n >= db->nelem)); + return db->elems + n; +} + +static inline int bdx_rxdb_available(struct rxdb *db) +{ + return db->top; +} + +static inline void bdx_rxdb_free_elem(struct rxdb *db, int n) +{ + BDX_ASSERT((n >= db->nelem) || (n < 0)); + db->stack[(db->top)++] = n; +} + +/************************************************************************* + * Rx Init * + *************************************************************************/ + +/* bdx_rx_init - initialize RX all related HW and SW resources + * @priv - NIC private structure + * + * Returns 0 on success, negative value on failure + * + * It creates rxf and rxd fifos, update relevant HW registers, preallocate + * skb for rx. It assumes that Rx is desabled in HW + * funcs are grouped for better cache usage + * + * RxD fifo is smaller then RxF fifo by design. Upon high load, RxD will be + * filled and packets will be dropped by nic without getting into host or + * cousing interrupt. Anyway, in that condition, host has no chance to proccess + * all packets, but dropping in nic is cheaper, since it takes 0 cpu cycles + */ + +/* TBD: ensure proper packet size */ + +static int bdx_rx_init(struct bdx_priv *priv) +{ + ENTER; + + if (bdx_fifo_init(priv, &priv->rxd_fifo0.m, priv->rxd_size, + regRXD_CFG0_0, regRXD_CFG1_0, + regRXD_RPTR_0, regRXD_WPTR_0)) + goto err_mem; + if (bdx_fifo_init(priv, &priv->rxf_fifo0.m, priv->rxf_size, + regRXF_CFG0_0, regRXF_CFG1_0, + regRXF_RPTR_0, regRXF_WPTR_0)) + goto err_mem; + if (! + (priv->rxdb = + bdx_rxdb_create(priv->rxf_fifo0.m.memsz / + sizeof(struct rxf_desc)))) + goto err_mem; + + priv->rxf_fifo0.m.pktsz = priv->ndev->mtu + VLAN_ETH_HLEN; + return 0; + +err_mem: + ERR("%s: %s: Rx init failed\n", BDX_DRV_NAME, priv->ndev->name); + return -ENOMEM; +} + +/* bdx_rx_free_skbs - frees and unmaps all skbs allocated for the fifo + * @priv - NIC private structure + * @f - RXF fifo + */ +static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f) +{ + struct rx_map *dm; + struct rxdb *db = priv->rxdb; + u16 i; + + ENTER; + DBG("total=%d free=%d busy=%d\n", db->nelem, bdx_rxdb_available(db), + db->nelem - bdx_rxdb_available(db)); + while (bdx_rxdb_available(db) > 0) { + i = bdx_rxdb_alloc_elem(db); + dm = bdx_rxdb_addr_elem(db, i); + dm->dma = 0; + } + for (i = 0; i < db->nelem; i++) { + dm = bdx_rxdb_addr_elem(db, i); + if (dm->dma) { + pci_unmap_single(priv->pdev, + dm->dma, f->m.pktsz, + PCI_DMA_FROMDEVICE); + dev_kfree_skb(dm->skb); + } + } +} + +/* bdx_rx_free - release all Rx resources + * @priv - NIC private structure + * It assumes that Rx is desabled in HW + */ +static void bdx_rx_free(struct bdx_priv *priv) +{ + ENTER; + if (priv->rxdb) { + bdx_rx_free_skbs(priv, &priv->rxf_fifo0); + bdx_rxdb_destroy(priv->rxdb); + priv->rxdb = NULL; + } + bdx_fifo_free(priv, &priv->rxf_fifo0.m); + bdx_fifo_free(priv, &priv->rxd_fifo0.m); + + RET(); +} + +/************************************************************************* + * Rx Engine * + *************************************************************************/ + +/* bdx_rx_alloc_skbs - fill rxf fifo with new skbs + * @priv - nic's private structure + * @f - RXF fifo that needs skbs + * It allocates skbs, build rxf descs and push it (rxf descr) into rxf fifo. + * skb's virtual and physical addresses are stored in skb db. + * To calculate free space, func uses cached values of RPTR and WPTR + * When needed, it also updates RPTR and WPTR. + */ + +/* TBD: do not update WPTR if no desc were written */ + +static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f) +{ + struct sk_buff *skb; + struct rxf_desc *rxfd; + struct rx_map *dm; + int dno, delta, idx; + struct rxdb *db = priv->rxdb; + + ENTER; + dno = bdx_rxdb_available(db) - 1; + while (dno > 0) { + if (!(skb = dev_alloc_skb(f->m.pktsz + NET_IP_ALIGN))) { + ERR("NO MEM: dev_alloc_skb failed\n"); + break; + } + skb->dev = priv->ndev; + skb_reserve(skb, NET_IP_ALIGN); + + idx = bdx_rxdb_alloc_elem(db); + dm = bdx_rxdb_addr_elem(db, idx); + dm->dma = pci_map_single(priv->pdev, + skb->data, f->m.pktsz, + PCI_DMA_FROMDEVICE); + dm->skb = skb; + rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr); + rxfd->info = CPU_CHIP_SWAP32(0x10003); /* INFO=1 BC=3 */ + rxfd->va_lo = idx; + rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma)); + rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma)); + rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz); + print_rxfd(rxfd); + + f->m.wptr += sizeof(struct rxf_desc); + delta = f->m.wptr - f->m.memsz; + if (unlikely(delta >= 0)) { + f->m.wptr = delta; + if (delta > 0) { + memcpy(f->m.va, f->m.va + f->m.memsz, delta); + DBG("wrapped descriptor\n"); + } + } + dno--; + } + /*TBD: to do - delayed rxf wptr like in txd */ + WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR); + RET(); +} + +static inline void +NETIF_RX_MUX(struct bdx_priv *priv, u32 rxd_val1, u16 rxd_vlan, + struct sk_buff *skb) +{ + ENTER; + DBG("rxdd->flags.bits.vtag=%d vlgrp=%p\n", GET_RXD_VTAG(rxd_val1), + priv->vlgrp); + if (priv->vlgrp && GET_RXD_VTAG(rxd_val1)) { + DBG("%s: vlan rcv vlan '%x' vtag '%x', device name '%s'\n", + priv->ndev->name, + GET_RXD_VLAN_ID(rxd_vlan), + GET_RXD_VTAG(rxd_val1), + vlan_group_get_device(priv->vlgrp, + GET_RXD_VLAN_ID(rxd_vlan))->name); + /* NAPI variant of receive functions */ + vlan_hwaccel_receive_skb(skb, priv->vlgrp, + GET_RXD_VLAN_ID(rxd_vlan)); + } else { + netif_receive_skb(skb); + } +} + +static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd) +{ + struct rxf_desc *rxfd; + struct rx_map *dm; + struct rxf_fifo *f; + struct rxdb *db; + struct sk_buff *skb; + int delta; + + ENTER; + DBG("priv=%p rxdd=%p\n", priv, rxdd); + f = &priv->rxf_fifo0; + db = priv->rxdb; + DBG("db=%p f=%p\n", db, f); + dm = bdx_rxdb_addr_elem(db, rxdd->va_lo); + DBG("dm=%p\n", dm); + skb = dm->skb; + rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr); + rxfd->info = CPU_CHIP_SWAP32(0x10003); /* INFO=1 BC=3 */ + rxfd->va_lo = rxdd->va_lo; + rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma)); + rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma)); + rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz); + print_rxfd(rxfd); + + f->m.wptr += sizeof(struct rxf_desc); + delta = f->m.wptr - f->m.memsz; + if (unlikely(delta >= 0)) { + f->m.wptr = delta; + if (delta > 0) { + memcpy(f->m.va, f->m.va + f->m.memsz, delta); + DBG("wrapped descriptor\n"); + } + } + RET(); +} + +/* bdx_rx_receive - recieves full packets from RXD fifo and pass them to OS + * NOTE: a special treatment is given to non-continous descriptors + * that start near the end, wraps around and continue at the beginning. a second + * part is copied right after the first, and then descriptor is interpreted as + * normal. fifo has an extra space to allow such operations + * @priv - nic's private structure + * @f - RXF fifo that needs skbs + */ + +/* TBD: replace memcpy func call by explicite inline asm */ + +static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget) +{ + struct sk_buff *skb, *skb2; + struct rxd_desc *rxdd; + struct rx_map *dm; + struct rxf_fifo *rxf_fifo; + int tmp_len, size; + int done = 0; + int max_done = BDX_MAX_RX_DONE; + struct rxdb *db = NULL; + /* Unmarshalled descriptor - copy of descriptor in host order */ + u32 rxd_val1; + u16 len; + u16 rxd_vlan; + + ENTER; + max_done = budget; + + priv->ndev->last_rx = jiffies; + f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_WR_PTR; + + size = f->m.wptr - f->m.rptr; + if (size < 0) + size = f->m.memsz + size; /* size is negative :-) */ + + while (size > 0) { + + rxdd = (struct rxd_desc *)(f->m.va + f->m.rptr); + rxd_val1 = CPU_CHIP_SWAP32(rxdd->rxd_val1); + + len = CPU_CHIP_SWAP16(rxdd->len); + + rxd_vlan = CPU_CHIP_SWAP16(rxdd->rxd_vlan); + + print_rxdd(rxdd, rxd_val1, len, rxd_vlan); + + tmp_len = GET_RXD_BC(rxd_val1) << 3; + BDX_ASSERT(tmp_len <= 0); + size -= tmp_len; + if (size < 0) /* test for partially arrived descriptor */ + break; + + f->m.rptr += tmp_len; + + tmp_len = f->m.rptr - f->m.memsz; + if (unlikely(tmp_len >= 0)) { + f->m.rptr = tmp_len; + if (tmp_len > 0) { + DBG("wrapped desc rptr=%d tmp_len=%d\n", + f->m.rptr, tmp_len); + memcpy(f->m.va + f->m.memsz, f->m.va, tmp_len); + } + } + + if (unlikely(GET_RXD_ERR(rxd_val1))) { + DBG("rxd_err = 0x%x\n", GET_RXD_ERR(rxd_val1)); + priv->net_stats.rx_errors++; + bdx_recycle_skb(priv, rxdd); + continue; + } + + rxf_fifo = &priv->rxf_fifo0; + db = priv->rxdb; + dm = bdx_rxdb_addr_elem(db, rxdd->va_lo); + skb = dm->skb; + + if (len < BDX_COPYBREAK && + (skb2 = dev_alloc_skb(len + NET_IP_ALIGN))) { + skb_reserve(skb2, NET_IP_ALIGN); + /*skb_put(skb2, len); */ + pci_dma_sync_single_for_cpu(priv->pdev, + dm->dma, rxf_fifo->m.pktsz, + PCI_DMA_FROMDEVICE); + memcpy(skb2->data, skb->data, len); + bdx_recycle_skb(priv, rxdd); + skb = skb2; + } else { + pci_unmap_single(priv->pdev, + dm->dma, rxf_fifo->m.pktsz, + PCI_DMA_FROMDEVICE); + bdx_rxdb_free_elem(db, rxdd->va_lo); + } + + priv->net_stats.rx_bytes += len; + + skb_put(skb, len); + skb->dev = priv->ndev; + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->protocol = eth_type_trans(skb, priv->ndev); + + /* Non-IP packets aren't checksum-offloaded */ + if (GET_RXD_PKT_ID(rxd_val1) == 0) + skb->ip_summed = CHECKSUM_NONE; + + NETIF_RX_MUX(priv, rxd_val1, rxd_vlan, skb); + + if (++done >= max_done) + break; + } + + priv->net_stats.rx_packets += done; + + /* FIXME: do smth to minimize pci accesses */ + WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR); + + bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0); + + RET(done); +} + +/************************************************************************* + * Debug / Temprorary Code * + *************************************************************************/ +static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len, + u16 rxd_vlan) +{ + DBG("ERROR: rxdd bc %d rxfq %d to %d type %d err %d rxp %d " + "pkt_id %d vtag %d len %d vlan_id %d cfi %d prio %d " + "va_lo %d va_hi %d\n", + GET_RXD_BC(rxd_val1), GET_RXD_RXFQ(rxd_val1), GET_RXD_TO(rxd_val1), + GET_RXD_TYPE(rxd_val1), GET_RXD_ERR(rxd_val1), + GET_RXD_RXP(rxd_val1), GET_RXD_PKT_ID(rxd_val1), + GET_RXD_VTAG(rxd_val1), len, GET_RXD_VLAN_ID(rxd_vlan), + GET_RXD_CFI(rxd_vlan), GET_RXD_PRIO(rxd_vlan), rxdd->va_lo, + rxdd->va_hi); +} + +static void print_rxfd(struct rxf_desc *rxfd) +{ + DBG("=== RxF desc CHIP ORDER/ENDIANESS =============\n" + "info 0x%x va_lo %u pa_lo 0x%x pa_hi 0x%x len 0x%x\n", + rxfd->info, rxfd->va_lo, rxfd->pa_lo, rxfd->pa_hi, rxfd->len); +} + +/* + * TX HW/SW interaction overview + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * There are 2 types of TX communication channels betwean driver and NIC. + * 1) TX Free Fifo - TXF - holds ack descriptors for sent packets + * 2) TX Data Fifo - TXD - holds descriptors of full buffers. + * + * Currently NIC supports TSO, checksuming and gather DMA + * UFO and IP fragmentation is on the way + * + * RX SW Data Structures + * ~~~~~~~~~~~~~~~~~~~~~ + * txdb - used to keep track of all skbs owned by SW and their dma addresses. + * For TX case, ownership lasts from geting packet via hard_xmit and until HW + * acknowledges sent by TXF descriptors. + * Implemented as cyclic buffer. + * fifo - keeps info about fifo's size and location, relevant HW registers, + * usage and skb db. Each RXD and RXF Fifo has its own fifo structure. + * Implemented as simple struct. + * + * TX SW Execution Flow + * ~~~~~~~~~~~~~~~~~~~~ + * OS calls driver's hard_xmit method with packet to sent. + * Driver creates DMA mappings, builds TXD descriptors and kicks HW + * by updating TXD WPTR. + * When packet is sent, HW write us TXF descriptor and SW frees original skb. + * To prevent TXD fifo overflow without reading HW registers every time, + * SW deploys "tx level" technique. + * Upon strart up, tx level is initialized to TXD fifo length. + * For every sent packet, SW gets its TXD descriptor sizei + * (from precalculated array) and substructs it from tx level. + * The size is also stored in txdb. When TXF ack arrives, SW fetch size of + * original TXD descriptor from txdb and adds it to tx level. + * When Tx level drops under some predefined treshhold, the driver + * stops the TX queue. When TX level rises above that level, + * the tx queue is enabled again. + * + * This technique avoids eccessive reading of RPTR and WPTR registers. + * As our benchmarks shows, it adds 1.5 Gbit/sec to NIS's throuput. + */ + +/************************************************************************* + * Tx DB * + *************************************************************************/ +static inline int bdx_tx_db_size(struct txdb *db) +{ + int taken = db->wptr - db->rptr; + if (taken < 0) + taken = db->size + 1 + taken; /* (size + 1) equals memsz */ + + return db->size - taken; +} + +/* __bdx_tx_ptr_next - helper function, increment read/write pointer + wrap + * @d - tx data base + * @ptr - read or write pointer + */ +static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr) +{ + BDX_ASSERT(db == NULL || pptr == NULL); /* sanity */ + + BDX_ASSERT(*pptr != db->rptr && /* expect either read */ + *pptr != db->wptr); /* or write pointer */ + + BDX_ASSERT(*pptr < db->start || /* pointer has to be */ + *pptr >= db->end); /* in range */ + + ++*pptr; + if (unlikely(*pptr == db->end)) + *pptr = db->start; +} + +/* bdx_tx_db_inc_rptr - increment read pointer + * @d - tx data base + */ +static inline void bdx_tx_db_inc_rptr(struct txdb *db) +{ + BDX_ASSERT(db->rptr == db->wptr); /* can't read from empty db */ + __bdx_tx_db_ptr_next(db, &db->rptr); +} + +/* bdx_tx_db_inc_rptr - increment write pointer + * @d - tx data base + */ +static inline void bdx_tx_db_inc_wptr(struct txdb *db) +{ + __bdx_tx_db_ptr_next(db, &db->wptr); + BDX_ASSERT(db->rptr == db->wptr); /* we can not get empty db as + a result of write */ +} + +/* bdx_tx_db_init - creates and initializes tx db + * @d - tx data base + * @sz_type - size of tx fifo + * Returns 0 on success, error code otherwise + */ +static int bdx_tx_db_init(struct txdb *d, int sz_type) +{ + int memsz = FIFO_SIZE * (1 << (sz_type + 1)); + + d->start = vmalloc(memsz); + if (!d->start) + return -ENOMEM; + + /* + * In order to differentiate between db is empty and db is full + * states at least one element should always be empty in order to + * avoid rptr == wptr which means db is empty + */ + d->size = memsz / sizeof(struct tx_map) - 1; + d->end = d->start + d->size + 1; /* just after last element */ + + /* all dbs are created equally empty */ + d->rptr = d->start; + d->wptr = d->start; + + return 0; +} + +/* bdx_tx_db_close - closes tx db and frees all memory + * @d - tx data base + */ +static void bdx_tx_db_close(struct txdb *d) +{ + BDX_ASSERT(d == NULL); + + if (d->start) { + vfree(d->start); + d->start = NULL; + } +} + +/************************************************************************* + * Tx Engine * + *************************************************************************/ + +/* sizes of tx desc (including padding if needed) as function + * of skb's frag number */ +static struct { + u16 bytes; + u16 qwords; /* qword = 64 bit */ +} txd_sizes[MAX_SKB_FRAGS + 1]; + +/* txdb_map_skb - creates and stores dma mappings for skb's data blocks + * @priv - NIC private structure + * @skb - socket buffer to map + * + * It makes dma mappings for skb's data blocks and writes them to PBL of + * new tx descriptor. It also stores them in the tx db, so they could be + * unmaped after data was sent. It is reponsibility of a caller to make + * sure that there is enough space in the tx db. Last element holds pointer + * to skb itself and marked with zero length + */ +static inline void +bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb, + struct txd_desc *txdd) +{ + struct txdb *db = &priv->txdb; + struct pbl *pbl = &txdd->pbl[0]; + int nr_frags = skb_shinfo(skb)->nr_frags; + int i; + + db->wptr->len = skb->len - skb->data_len; + db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data, + db->wptr->len, PCI_DMA_TODEVICE); + pbl->len = CPU_CHIP_SWAP32(db->wptr->len); + pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma)); + pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma)); + DBG("=== pbl len: 0x%x ================\n", pbl->len); + DBG("=== pbl pa_lo: 0x%x ================\n", pbl->pa_lo); + DBG("=== pbl pa_hi: 0x%x ================\n", pbl->pa_hi); + bdx_tx_db_inc_wptr(db); + + for (i = 0; i < nr_frags; i++) { + struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[i]; + db->wptr->len = frag->size; + db->wptr->addr.dma = + pci_map_page(priv->pdev, frag->page, frag->page_offset, + frag->size, PCI_DMA_TODEVICE); + + pbl++; + pbl->len = CPU_CHIP_SWAP32(db->wptr->len); + pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma)); + pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma)); + bdx_tx_db_inc_wptr(db); + } + + /* add skb clean up info. */ + db->wptr->len = -txd_sizes[nr_frags].bytes; + db->wptr->addr.skb = skb; + bdx_tx_db_inc_wptr(db); +} + +/* init_txd_sizes - precalculate sizes of descriptors for skbs up to 16 frags + * number of frags is used as index to fetch correct descriptors size, + * instead of calculating it each time */ +static void __init init_txd_sizes(void) +{ + int i, lwords; + + /* 7 - is number of lwords in txd with one phys buffer + * 3 - is number of lwords used for every additional phys buffer */ + for (i = 0; i < MAX_SKB_FRAGS + 1; i++) { + lwords = 7 + (i * 3); + if (lwords & 1) + lwords++; /* pad it with 1 lword */ + txd_sizes[i].qwords = lwords >> 1; + txd_sizes[i].bytes = lwords << 2; + } +} + +/* bdx_tx_init - initialize all Tx related stuff. + * Namely, TXD and TXF fifos, database etc */ +static int bdx_tx_init(struct bdx_priv *priv) +{ + if (bdx_fifo_init(priv, &priv->txd_fifo0.m, priv->txd_size, + regTXD_CFG0_0, + regTXD_CFG1_0, regTXD_RPTR_0, regTXD_WPTR_0)) + goto err_mem; + if (bdx_fifo_init(priv, &priv->txf_fifo0.m, priv->txf_size, + regTXF_CFG0_0, + regTXF_CFG1_0, regTXF_RPTR_0, regTXF_WPTR_0)) + goto err_mem; + + /* The TX db has to keep mappings for all packets sent (on TxD) + * and not yet reclaimed (on TxF) */ + if (bdx_tx_db_init(&priv->txdb, max(priv->txd_size, priv->txf_size))) + goto err_mem; + + priv->tx_level = BDX_MAX_TX_LEVEL; +#ifdef BDX_DELAY_WPTR + priv->tx_update_mark = priv->tx_level - 1024; +#endif + return 0; + +err_mem: + ERR("tehuti: %s: Tx init failed\n", priv->ndev->name); + return -ENOMEM; +} + +/* + * bdx_tx_space - calculates avalable space in TX fifo + * @priv - NIC private structure + * Returns avaliable space in TX fifo in bytes + */ +static inline int bdx_tx_space(struct bdx_priv *priv) +{ + struct txd_fifo *f = &priv->txd_fifo0; + int fsize; + + f->m.rptr = READ_REG(priv, f->m.reg_RPTR) & TXF_WPTR_WR_PTR; + fsize = f->m.rptr - f->m.wptr; + if (fsize <= 0) + fsize = f->m.memsz + fsize; + return (fsize); +} + +/* bdx_tx_transmit - send packet to NIC + * @skb - packet to send + * ndev - network device assigned to NIC + * Return codes: + * o NETDEV_TX_OK everything ok. + * o NETDEV_TX_BUSY Cannot transmit packet, try later + * Usually a bug, means queue start/stop flow control is broken in + * the driver. Note: the driver must NOT put the skb in its DMA ring. + * o NETDEV_TX_LOCKED Locking failed, please retry quickly. + */ +static int bdx_tx_transmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct bdx_priv *priv = ndev->priv; + struct txd_fifo *f = &priv->txd_fifo0; + int txd_checksum = 7; /* full checksum */ + int txd_lgsnd = 0; + int txd_vlan_id = 0; + int txd_vtag = 0; + int txd_mss = 0; + + int nr_frags = skb_shinfo(skb)->nr_frags; + struct txd_desc *txdd; + int len; + unsigned long flags; + + ENTER; + local_irq_save(flags); + if (!spin_trylock(&priv->tx_lock)) { + local_irq_restore(flags); + DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n", + BDX_DRV_NAME, ndev->name); + return NETDEV_TX_LOCKED; + } + + /* build tx descriptor */ + BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */ + txdd = (struct txd_desc *)(f->m.va + f->m.wptr); + if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) + txd_checksum = 0; + + if (skb_shinfo(skb)->gso_size) { + txd_mss = skb_shinfo(skb)->gso_size; + txd_lgsnd = 1; + DBG("skb %p skb len %d gso size = %d\n", skb, skb->len, + txd_mss); + } + + if (vlan_tx_tag_present(skb)) { + /*Cut VLAN ID to 12 bits */ + txd_vlan_id = vlan_tx_tag_get(skb) & BITS_MASK(12); + txd_vtag = 1; + } + + txdd->length = CPU_CHIP_SWAP16(skb->len); + txdd->mss = CPU_CHIP_SWAP16(txd_mss); + txdd->txd_val1 = + CPU_CHIP_SWAP32(TXD_W1_VAL + (txd_sizes[nr_frags].qwords, txd_checksum, txd_vtag, + txd_lgsnd, txd_vlan_id)); + DBG("=== TxD desc =====================\n"); + DBG("=== w1: 0x%x ================\n", txdd->txd_val1); + DBG("=== w2: mss 0x%x len 0x%x\n", txdd->mss, txdd->length); + + bdx_tx_map_skb(priv, skb, txdd); + + /* increment TXD write pointer. In case of + fifo wrapping copy reminder of the descriptor + to the beginning */ + f->m.wptr += txd_sizes[nr_frags].bytes; + len = f->m.wptr - f->m.memsz; + if (unlikely(len >= 0)) { + f->m.wptr = len; + if (len > 0) { + BDX_ASSERT(len > f->m.memsz); + memcpy(f->m.va, f->m.va + f->m.memsz, len); + } + } + BDX_ASSERT(f->m.wptr >= f->m.memsz); /* finished with valid wptr */ + + priv->tx_level -= txd_sizes[nr_frags].bytes; + BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL); +#ifdef BDX_DELAY_WPTR + if (priv->tx_level > priv->tx_update_mark) { + /* Force memory writes to complete before letting h/w + know there are new descriptors to fetch. + (might be needed on platforms like IA64) + wmb(); */ + WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR); + } else { + if (priv->tx_noupd++ > BDX_NO_UPD_PACKETS) { + priv->tx_noupd = 0; + WRITE_REG(priv, f->m.reg_WPTR, + f->m.wptr & TXF_WPTR_WR_PTR); + } + } +#else + /* Force memory writes to complete before letting h/w + know there are new descriptors to fetch. + (might be needed on platforms like IA64) + wmb(); */ + WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR); + +#endif + ndev->trans_start = jiffies; + + priv->net_stats.tx_packets++; + priv->net_stats.tx_bytes += skb->len; + + if (priv->tx_level < BDX_MIN_TX_LEVEL) { + DBG("%s: %s: TX Q STOP level %d\n", + BDX_DRV_NAME, ndev->name, priv->tx_level); + netif_stop_queue(ndev); + } + + spin_unlock_irqrestore(&priv->tx_lock, flags); + return NETDEV_TX_OK; +} + +/* bdx_tx_cleanup - clean TXF fifo, run in the context of IRQ. + * @priv - bdx adapter + * It scans TXF fifo for descriptors, frees DMA mappings and reports to OS + * that those packets were sent + */ +static void bdx_tx_cleanup(struct bdx_priv *priv) +{ + struct txf_fifo *f = &priv->txf_fifo0; + struct txdb *db = &priv->txdb; + int tx_level = 0; + + ENTER; + f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_MASK; + BDX_ASSERT(f->m.rptr >= f->m.memsz); /* started with valid rptr */ + + while (f->m.wptr != f->m.rptr) { + f->m.rptr += BDX_TXF_DESC_SZ; + f->m.rptr &= f->m.size_mask; + + /* unmap all the fragments */ + /* first has to come tx_maps containing dma */ + BDX_ASSERT(db->rptr->len == 0); + do { + BDX_ASSERT(db->rptr->addr.dma == 0); + pci_unmap_page(priv->pdev, db->rptr->addr.dma, + db->rptr->len, PCI_DMA_TODEVICE); + bdx_tx_db_inc_rptr(db); + } while (db->rptr->len > 0); + tx_level -= db->rptr->len; /* '-' koz len is negative */ + + /* now should come skb pointer - free it */ + dev_kfree_skb_irq(db->rptr->addr.skb); + bdx_tx_db_inc_rptr(db); + } + + /* let h/w know which TXF descriptors were cleaned */ + BDX_ASSERT((f->m.wptr & TXF_WPTR_WR_PTR) >= f->m.memsz); + WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR); + + /* We reclaimed resources, so in case the Q is stopped by xmit callback, + * we resume the transmition and use tx_lock to synchronize with xmit.*/ + spin_lock(&priv->tx_lock); + priv->tx_level += tx_level; + BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL); +#ifdef BDX_DELAY_WPTR + if (priv->tx_noupd) { + priv->tx_noupd = 0; + WRITE_REG(priv, priv->txd_fifo0.m.reg_WPTR, + priv->txd_fifo0.m.wptr & TXF_WPTR_WR_PTR); + } +#endif + + if (unlikely(netif_queue_stopped(priv->ndev) + && netif_carrier_ok(priv->ndev) + && (priv->tx_level >= BDX_MIN_TX_LEVEL))) { + DBG("%s: %s: TX Q WAKE level %d\n", + BDX_DRV_NAME, priv->ndev->name, priv->tx_level); + netif_wake_queue(priv->ndev); + } + spin_unlock(&priv->tx_lock); +} + +/* bdx_tx_free_skbs - frees all skbs from TXD fifo. + * It gets called when OS stops this dev, eg upon "ifconfig down" or rmmod + */ +static void bdx_tx_free_skbs(struct bdx_priv *priv) +{ + struct txdb *db = &priv->txdb; + + ENTER; + while (db->rptr != db->wptr) { + if (likely(db->rptr->len)) + pci_unmap_page(priv->pdev, db->rptr->addr.dma, + db->rptr->len, PCI_DMA_TODEVICE); + else + dev_kfree_skb(db->rptr->addr.skb); + bdx_tx_db_inc_rptr(db); + } + RET(); +} + +/* bdx_tx_free - frees all Tx resources */ +static void bdx_tx_free(struct bdx_priv *priv) +{ + ENTER; + bdx_tx_free_skbs(priv); + bdx_fifo_free(priv, &priv->txd_fifo0.m); + bdx_fifo_free(priv, &priv->txf_fifo0.m); + bdx_tx_db_close(&priv->txdb); +} + +/* bdx_tx_push_desc - push descriptor to TxD fifo + * @priv - NIC private structure + * @data - desc's data + * @size - desc's size + * + * Pushes desc to TxD fifo and overlaps it if needed. + * NOTE: this func does not check for available space. this is responsibility + * of the caller. Neither does it check that data size is smaller then + * fifo size. + */ +static void bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size) +{ + struct txd_fifo *f = &priv->txd_fifo0; + int i = f->m.memsz - f->m.wptr; + + if (size == 0) + return; + + if (i > size) { + memcpy(f->m.va + f->m.wptr, data, size); + f->m.wptr += size; + } else { + memcpy(f->m.va + f->m.wptr, data, i); + f->m.wptr = size - i; + memcpy(f->m.va, data + i, f->m.wptr); + } + WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR); +} + +/* bdx_tx_push_desc_safe - push descriptor to TxD fifo in a safe way + * @priv - NIC private structure + * @data - desc's data + * @size - desc's size + * + * NOTE: this func does check for available space and, if neccessary, waits for + * NIC to read existing data before writing new one. + */ +static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size) +{ + int timer = 0; + ENTER; + + while (size > 0) { + /* we substruct 8 because when fifo is full rptr == wptr + which also means that fifo is empty, we can understand + the difference, but could hw do the same ??? :) */ + int avail = bdx_tx_space(priv) - 8; + if (avail <= 0) { + if (timer++ > 300) { /* prevent endless loop */ + DBG("timeout while writing desc to TxD fifo\n"); + break; + } + udelay(50); /* give hw a chance to clean fifo */ + continue; + } + avail = MIN(avail, size); + DBG("about to push %d bytes starting %p size %d\n", avail, + data, size); + bdx_tx_push_desc(priv, data, avail); + size -= avail; + data += avail; + } + RET(); +} + +/** + * bdx_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in bdx_pci_tbl + * + * Returns 0 on success, negative on failure + * + * bdx_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + * + * functions and their order used as explained in + * /usr/src/linux/Documentation/DMA-{API,mapping}.txt + * + */ + +/* TBD: netif_msg should be checked and implemented. I disable it for now */ +static int __devinit +bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *ndev; + struct bdx_priv *priv; + int err, pci_using_dac, port; + unsigned long pciaddr; + u32 regionSize; + struct pci_nic *nic; + + ENTER; + + nic = vmalloc(sizeof(*nic)); + if (!nic) + RET(-ENOMEM); + + /************** pci *****************/ + if ((err = pci_enable_device(pdev))) /* it trigers interrupt, dunno why. */ + RET(err); /* it's not a problem though */ + + if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) && + !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) { + pci_using_dac = 1; + } else { + if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) || + (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) { + printk(KERN_ERR "tehuti: No usable DMA configuration" + ", aborting\n"); + goto err_dma; + } + pci_using_dac = 0; + } + + if ((err = pci_request_regions(pdev, BDX_DRV_NAME))) + goto err_dma; + + pci_set_master(pdev); + + pciaddr = pci_resource_start(pdev, 0); + if (!pciaddr) { + err = -EIO; + ERR("tehuti: no MMIO resource\n"); + goto err_out_res; + } + if ((regionSize = pci_resource_len(pdev, 0)) < BDX_REGS_SIZE) { + err = -EIO; + ERR("tehuti: MMIO resource (%x) too small\n", regionSize); + goto err_out_res; + } + + nic->regs = ioremap(pciaddr, regionSize); + if (!nic->regs) { + err = -EIO; + ERR("tehuti: ioremap failed\n"); + goto err_out_res; + } + + if (pdev->irq < 2) { + err = -EIO; + ERR("tehuti: invalid irq (%d)\n", pdev->irq); + goto err_out_iomap; + } + pci_set_drvdata(pdev, nic); + + if (pdev->device == 0x3014) + nic->port_num = 2; + else + nic->port_num = 1; + + print_hw_id(pdev); + + bdx_hw_reset_direct(nic->regs); + + nic->irq_type = IRQ_INTX; +#ifdef BDX_MSI + if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) { + if ((err = pci_enable_msi(pdev))) + ERR("Tehuti: Can't eneble msi. error is %d\n", err); + else + nic->irq_type = IRQ_MSI; + } else + DBG("HW does not support MSI\n"); +#endif + + /************** netdev **************/ + for (port = 0; port < nic->port_num; port++) { + if (!(ndev = alloc_etherdev(sizeof(struct bdx_priv)))) { + err = -ENOMEM; + printk(KERN_ERR "tehuti: alloc_etherdev failed\n"); + goto err_out_iomap; + } + + ndev->open = bdx_open; + ndev->stop = bdx_close; + ndev->hard_start_xmit = bdx_tx_transmit; + ndev->do_ioctl = bdx_ioctl; + ndev->set_multicast_list = bdx_setmulti; + ndev->get_stats = bdx_get_stats; + ndev->change_mtu = bdx_change_mtu; + ndev->set_mac_address = bdx_set_mac; + ndev->tx_queue_len = BDX_NDEV_TXQ_LEN; + ndev->vlan_rx_register = bdx_vlan_rx_register; + ndev->vlan_rx_add_vid = bdx_vlan_rx_add_vid; + ndev->vlan_rx_kill_vid = bdx_vlan_rx_kill_vid; + + bdx_ethtool_ops(ndev); /* ethtool interface */ + + /* these fields are used for info purposes only + * so we can have them same for all ports of the board */ + ndev->if_port = port; + ndev->base_addr = pciaddr; + ndev->mem_start = pciaddr; + ndev->mem_end = pciaddr + regionSize; + ndev->irq = pdev->irq; + ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO + | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | + NETIF_F_HW_VLAN_FILTER + /*| NETIF_F_FRAGLIST */ + ; + + if (pci_using_dac) + ndev->features |= NETIF_F_HIGHDMA; + + /************** priv ****************/ + priv = nic->priv[port] = ndev->priv; + + memset(priv, 0, sizeof(struct bdx_priv)); + priv->pBdxRegs = nic->regs + port * 0x8000; + priv->port = port; + priv->pdev = pdev; + priv->ndev = ndev; + priv->nic = nic; + priv->msg_enable = BDX_DEF_MSG_ENABLE; + + netif_napi_add(ndev, &priv->napi, bdx_poll, 64); + + if ((readl(nic->regs + FPGA_VER) & 0xFFF) == 308) { + DBG("HW statistics not supported\n"); + priv->stats_flag = 0; + } else { + priv->stats_flag = 1; + } + + /* Initialize fifo sizes. */ + priv->txd_size = 2; + priv->txf_size = 2; + priv->rxd_size = 2; + priv->rxf_size = 3; + + /* Initialize the initial coalescing registers. */ + priv->rdintcm = INT_REG_VAL(0x20, 1, 4, 12); + priv->tdintcm = INT_REG_VAL(0x20, 1, 0, 12); + + /* ndev->xmit_lock spinlock is not used. + * Private priv->tx_lock is used for synchronization + * between transmit and TX irq cleanup. In addition + * set multicast list callback has to use priv->tx_lock. + */ +#ifdef BDX_LLTX + ndev->features |= NETIF_F_LLTX; +#endif + spin_lock_init(&priv->tx_lock); + + /*bdx_hw_reset(priv); */ + if (bdx_read_mac(priv)) { + printk(KERN_ERR "tehuti: load MAC address failed\n"); + goto err_out_iomap; + } + SET_NETDEV_DEV(ndev, &pdev->dev); + if ((err = register_netdev(ndev))) { + printk(KERN_ERR "tehuti: register_netdev failed\n"); + goto err_out_free; + } + netif_carrier_off(ndev); + netif_stop_queue(ndev); + + print_eth_id(ndev); + } + RET(0); + +err_out_free: + free_netdev(ndev); +err_out_iomap: + iounmap(nic->regs); +err_out_res: + pci_release_regions(pdev); +err_dma: + pci_disable_device(pdev); + vfree(nic); + + RET(err); +} + +/****************** Ethtool interface *********************/ +/* get strings for tests */ +static const char + bdx_test_names[][ETH_GSTRING_LEN] = { + "No tests defined" +}; + +/* get strings for statistics counters */ +static const char + bdx_stat_names[][ETH_GSTRING_LEN] = { + "InUCast", /* 0x7200 */ + "InMCast", /* 0x7210 */ + "InBCast", /* 0x7220 */ + "InPkts", /* 0x7230 */ + "InErrors", /* 0x7240 */ + "InDropped", /* 0x7250 */ + "FrameTooLong", /* 0x7260 */ + "FrameSequenceErrors", /* 0x7270 */ + "InVLAN", /* 0x7280 */ + "InDroppedDFE", /* 0x7290 */ + "InDroppedIntFull", /* 0x72A0 */ + "InFrameAlignErrors", /* 0x72B0 */ + + /* 0x72C0-0x72E0 RSRV */ + + "OutUCast", /* 0x72F0 */ + "OutMCast", /* 0x7300 */ + "OutBCast", /* 0x7310 */ + "OutPkts", /* 0x7320 */ + + /* 0x7330-0x7360 RSRV */ + + "OutVLAN", /* 0x7370 */ + "InUCastOctects", /* 0x7380 */ + "OutUCastOctects", /* 0x7390 */ + + /* 0x73A0-0x73B0 RSRV */ + + "InBCastOctects", /* 0x73C0 */ + "OutBCastOctects", /* 0x73D0 */ + "InOctects", /* 0x73E0 */ + "OutOctects", /* 0x73F0 */ +}; + +/* + * bdx_get_settings - get device-specific settings + * @netdev + * @ecmd + */ +static int bdx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + u32 rdintcm; + u32 tdintcm; + struct bdx_priv *priv = netdev->priv; + + rdintcm = priv->rdintcm; + tdintcm = priv->tdintcm; + + ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); + ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); + ecmd->speed = SPEED_10000; + ecmd->duplex = DUPLEX_FULL; + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; /* what does it mean? */ + ecmd->autoneg = AUTONEG_DISABLE; + + /* PCK_TH measures in multiples of FIFO bytes + We translate to packets */ + ecmd->maxtxpkt = + ((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ); + ecmd->maxrxpkt = + ((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc)); + + return 0; +} + +/* + * bdx_get_drvinfo - report driver information + * @netdev + * @drvinfo + */ +static void +bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct bdx_priv *priv = netdev->priv; + + strncat(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver)); + strncat(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version)); + strncat(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strncat(drvinfo->bus_info, pci_name(priv->pdev), + sizeof(drvinfo->bus_info)); + + drvinfo->n_stats = ((priv->stats_flag) ? + (sizeof(bdx_stat_names) / ETH_GSTRING_LEN) : 0); + drvinfo->testinfo_len = 0; + drvinfo->regdump_len = 0; + drvinfo->eedump_len = 0; +} + +/* + * bdx_get_rx_csum - report whether receive checksums are turned on or off + * @netdev + */ +static u32 bdx_get_rx_csum(struct net_device *netdev) +{ + return 1; /* always on */ +} + +/* + * bdx_get_tx_csum - report whether transmit checksums are turned on or off + * @netdev + */ +static u32 bdx_get_tx_csum(struct net_device *netdev) +{ + return (netdev->features & NETIF_F_IP_CSUM) != 0; +} + +/* + * bdx_get_coalesce - get interrupt coalescing parameters + * @netdev + * @ecoal + */ +static int +bdx_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal) +{ + u32 rdintcm; + u32 tdintcm; + struct bdx_priv *priv = netdev->priv; + + rdintcm = priv->rdintcm; + tdintcm = priv->tdintcm; + + /* PCK_TH measures in multiples of FIFO bytes + We translate to packets */ + ecoal->rx_coalesce_usecs = GET_INT_COAL(rdintcm) * INT_COAL_MULT; + ecoal->rx_max_coalesced_frames = + ((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc)); + + ecoal->tx_coalesce_usecs = GET_INT_COAL(tdintcm) * INT_COAL_MULT; + ecoal->tx_max_coalesced_frames = + ((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ); + + /* adaptive parameters ignored */ + return 0; +} + +/* + * bdx_set_coalesce - set interrupt coalescing parameters + * @netdev + * @ecoal + */ +static int +bdx_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal) +{ + u32 rdintcm; + u32 tdintcm; + struct bdx_priv *priv = netdev->priv; + int rx_coal; + int tx_coal; + int rx_max_coal; + int tx_max_coal; + + /* Check for valid input */ + rx_coal = ecoal->rx_coalesce_usecs / INT_COAL_MULT; + tx_coal = ecoal->tx_coalesce_usecs / INT_COAL_MULT; + rx_max_coal = ecoal->rx_max_coalesced_frames; + tx_max_coal = ecoal->tx_max_coalesced_frames; + + /* Translate from packets to multiples of FIFO bytes */ + rx_max_coal = + (((rx_max_coal * sizeof(struct rxf_desc)) + PCK_TH_MULT - 1) + / PCK_TH_MULT); + tx_max_coal = + (((tx_max_coal * BDX_TXF_DESC_SZ) + PCK_TH_MULT - 1) + / PCK_TH_MULT); + + if ((rx_coal > 0x7FFF) || (tx_coal > 0x7FFF) + || (rx_max_coal > 0xF) || (tx_max_coal > 0xF)) + return -EINVAL; + + rdintcm = INT_REG_VAL(rx_coal, GET_INT_COAL_RC(priv->rdintcm), + GET_RXF_TH(priv->rdintcm), rx_max_coal); + tdintcm = INT_REG_VAL(tx_coal, GET_INT_COAL_RC(priv->tdintcm), 0, + tx_max_coal); + + priv->rdintcm = rdintcm; + priv->tdintcm = tdintcm; + + WRITE_REG(priv, regRDINTCM0, rdintcm); + WRITE_REG(priv, regTDINTCM0, tdintcm); + + return 0; +} + +/* Convert RX fifo size to number of pending packets */ +static inline int bdx_rx_fifo_size_to_packets(int rx_size) +{ + return ((FIFO_SIZE * (1 << rx_size)) / sizeof(struct rxf_desc)); +} + +/* Convert TX fifo size to number of pending packets */ +static inline int bdx_tx_fifo_size_to_packets(int tx_size) +{ + return ((FIFO_SIZE * (1 << tx_size)) / BDX_TXF_DESC_SZ); +} + +/* + * bdx_get_ringparam - report ring sizes + * @netdev + * @ring + */ +static void +bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) +{ + struct bdx_priv *priv = netdev->priv; + + /*max_pending - the maximum-sized FIFO we allow */ + ring->rx_max_pending = bdx_rx_fifo_size_to_packets(3); + ring->tx_max_pending = bdx_tx_fifo_size_to_packets(3); + ring->rx_pending = bdx_rx_fifo_size_to_packets(priv->rxf_size); + ring->tx_pending = bdx_tx_fifo_size_to_packets(priv->txd_size); +} + +/* + * bdx_set_ringparam - set ring sizes + * @netdev + * @ring + */ +static int +bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) +{ + struct bdx_priv *priv = netdev->priv; + int rx_size = 0; + int tx_size = 0; + + for (; rx_size < 4; rx_size++) { + if (bdx_rx_fifo_size_to_packets(rx_size) >= ring->rx_pending) + break; + } + if (rx_size == 4) + rx_size = 3; + + for (; tx_size < 4; tx_size++) { + if (bdx_tx_fifo_size_to_packets(tx_size) >= ring->tx_pending) + break; + } + if (tx_size == 4) + tx_size = 3; + + /*Is there anything to do? */ + if ((rx_size == priv->rxf_size) + && (tx_size == priv->txd_size)) + return 0; + + priv->rxf_size = rx_size; + if (rx_size > 1) + priv->rxd_size = rx_size - 1; + else + priv->rxd_size = rx_size; + + priv->txf_size = priv->txd_size = tx_size; + + if (netif_running(netdev)) { + bdx_close(netdev); + bdx_open(netdev); + } + return 0; +} + +/* + * bdx_get_strings - return a set of strings that describe the requested objects + * @netdev + * @data + */ +static void bdx_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *bdx_test_names, sizeof(bdx_test_names)); + break; + case ETH_SS_STATS: + memcpy(data, *bdx_stat_names, sizeof(bdx_stat_names)); + break; + } +} + +/* + * bdx_get_stats_count - return number of 64bit statistics counters + * @netdev + */ +static int bdx_get_stats_count(struct net_device *netdev) +{ + struct bdx_priv *priv = netdev->priv; + BDX_ASSERT(sizeof(bdx_stat_names) / ETH_GSTRING_LEN + != sizeof(struct bdx_stats) / sizeof(u64)); + return ((priv->stats_flag) ? (sizeof(bdx_stat_names) / ETH_GSTRING_LEN) + : 0); +} + +/* + * bdx_get_ethtool_stats - return device's hardware L2 statistics + * @netdev + * @stats + * @data + */ +static void bdx_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct bdx_priv *priv = netdev->priv; + + if (priv->stats_flag) { + + /* Update stats from HW */ + bdx_update_stats(priv); + + /* Copy data to user buffer */ + memcpy(data, &priv->hw_stats, sizeof(priv->hw_stats)); + } +} + +/* + * bdx_ethtool_ops - ethtool interface implementation + * @netdev + */ +static void bdx_ethtool_ops(struct net_device *netdev) +{ + static struct ethtool_ops bdx_ethtool_ops = { + .get_settings = bdx_get_settings, + .get_drvinfo = bdx_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_coalesce = bdx_get_coalesce, + .set_coalesce = bdx_set_coalesce, + .get_ringparam = bdx_get_ringparam, + .set_ringparam = bdx_set_ringparam, + .get_rx_csum = bdx_get_rx_csum, + .get_tx_csum = bdx_get_tx_csum, + .get_sg = ethtool_op_get_sg, + .get_tso = ethtool_op_get_tso, + .get_strings = bdx_get_strings, + .get_stats_count = bdx_get_stats_count, + .get_ethtool_stats = bdx_get_ethtool_stats, + }; + + SET_ETHTOOL_OPS(netdev, &bdx_ethtool_ops); +} + +/** + * bdx_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * bdx_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void __devexit bdx_remove(struct pci_dev *pdev) +{ + struct pci_nic *nic = pci_get_drvdata(pdev); + struct net_device *ndev; + int port; + + for (port = 0; port < nic->port_num; port++) { + ndev = nic->priv[port]->ndev; + unregister_netdev(ndev); + free_netdev(ndev); + } + + /*bdx_hw_reset_direct(nic->regs); */ +#ifdef BDX_MSI + if (nic->irq_type == IRQ_MSI) + pci_disable_msi(pdev); +#endif + + iounmap(nic->regs); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + vfree(nic); + + RET(); +} + +static struct pci_driver bdx_pci_driver = { + .name = BDX_DRV_NAME, + .id_table = bdx_pci_tbl, + .probe = bdx_probe, + .remove = __devexit_p(bdx_remove), +}; + +/* + * print_driver_id - print parameters of the driver build + */ +static void __init print_driver_id(void) +{ + printk(KERN_INFO "%s: %s, %s\n", BDX_DRV_NAME, BDX_DRV_DESC, + BDX_DRV_VERSION); + printk(KERN_INFO "%s: Options: hw_csum %s\n", BDX_DRV_NAME, + BDX_MSI_STRING); +} + +static int __init bdx_module_init(void) +{ + ENTER; + bdx_firmware_endianess(); + init_txd_sizes(); + print_driver_id(); + RET(pci_register_driver(&bdx_pci_driver)); +} + +module_init(bdx_module_init); + +static void __exit bdx_module_exit(void) +{ + ENTER; + pci_unregister_driver(&bdx_pci_driver); + RET(); +} + +module_exit(bdx_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(BDX_DRV_DESC); diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h new file mode 100644 index 000000000000..efd170f451b4 --- /dev/null +++ b/drivers/net/tehuti.h @@ -0,0 +1,564 @@ +/* + * Tehuti Networks(R) Network Driver + * Copyright (C) 2007 Tehuti Networks Ltd. All rights reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _TEHUTI_H +#define _TEHUTI_H + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/ethtool.h> +#include <linux/mii.h> +#include <linux/crc32.h> +#include <linux/uaccess.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/sched.h> +#include <linux/tty.h> +#include <linux/if_vlan.h> +#include <linux/version.h> +#include <linux/interrupt.h> +#include <linux/vmalloc.h> +#include <asm/byteorder.h> + +/* Compile Time Switches */ +/* start */ +#define BDX_TSO +#define BDX_LLTX +#define BDX_DELAY_WPTR +/* #define BDX_MSI */ +/* end */ + +#if !defined CONFIG_PCI_MSI +# undef BDX_MSI +#endif + +#define BDX_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK) + +/* ioctl ops */ +#define BDX_OP_READ 1 +#define BDX_OP_WRITE 2 + +/* RX copy break size */ +#define BDX_COPYBREAK 257 + +#define DRIVER_AUTHOR "Tehuti Networks(R)" +#define BDX_DRV_DESC "Tehuti Networks(R) Network Driver" +#define BDX_DRV_NAME "tehuti" +#define BDX_NIC_NAME "Tehuti 10 Giga TOE SmartNIC" +#define BDX_NIC2PORT_NAME "Tehuti 2-Port 10 Giga TOE SmartNIC" +#define BDX_DRV_VERSION "7.29.3" + +#ifdef BDX_MSI +# define BDX_MSI_STRING "msi " +#else +# define BDX_MSI_STRING "" +#endif + +/* netdev tx queue len for Luxor. default value is, btw, 1000 + * ifcontig eth1 txqueuelen 3000 - to change it at runtime */ +#define BDX_NDEV_TXQ_LEN 3000 + +#define FIFO_SIZE 4096 +#define FIFO_EXTRA_SPACE 1024 + +#define MIN(x, y) ((x) < (y) ? (x) : (y)) + +#if BITS_PER_LONG == 64 +# define H32_64(x) (u32) ((u64)(x) >> 32) +# define L32_64(x) (u32) ((u64)(x) & 0xffffffff) +#elif BITS_PER_LONG == 32 +# define H32_64(x) 0 +# define L32_64(x) ((u32) (x)) +#else /* BITS_PER_LONG == ?? */ +# error BITS_PER_LONG is undefined. Must be 64 or 32 +#endif /* BITS_PER_LONG */ + +#ifdef __BIG_ENDIAN +# define CPU_CHIP_SWAP32(x) swab32(x) +# define CPU_CHIP_SWAP16(x) swab16(x) +#else +# define CPU_CHIP_SWAP32(x) (x) +# define CPU_CHIP_SWAP16(x) (x) +#endif + +#define READ_REG(pp, reg) readl(pp->pBdxRegs + reg) +#define WRITE_REG(pp, reg, val) writel(val, pp->pBdxRegs + reg) + +#ifndef DMA_64BIT_MASK +# define DMA_64BIT_MASK 0xffffffffffffffffULL +#endif + +#ifndef DMA_32BIT_MASK +# define DMA_32BIT_MASK 0x00000000ffffffffULL +#endif + +#ifndef NET_IP_ALIGN +# define NET_IP_ALIGN 2 +#endif + +#ifndef NETDEV_TX_OK +# define NETDEV_TX_OK 0 +#endif + +#define LUXOR_MAX_PORT 2 +#define BDX_MAX_RX_DONE 150 +#define BDX_TXF_DESC_SZ 16 +#define BDX_MAX_TX_LEVEL (priv->txd_fifo0.m.memsz - 16) +#define BDX_MIN_TX_LEVEL 256 +#define BDX_NO_UPD_PACKETS 40 + +struct pci_nic { + int port_num; + void __iomem *regs; + int irq_type; + struct bdx_priv *priv[LUXOR_MAX_PORT]; +}; + +enum { IRQ_INTX, IRQ_MSI, IRQ_MSIX }; + +#define PCK_TH_MULT 128 +#define INT_COAL_MULT 2 + +#define BITS_MASK(nbits) ((1<<nbits)-1) +#define GET_BITS_SHIFT(x, nbits, nshift) (((x)>>nshift)&BITS_MASK(nbits)) +#define BITS_SHIFT_MASK(nbits, nshift) (BITS_MASK(nbits)<<nshift) +#define BITS_SHIFT_VAL(x, nbits, nshift) (((x)&BITS_MASK(nbits))<<nshift) +#define BITS_SHIFT_CLEAR(x, nbits, nshift) \ + ((x)&(~BITS_SHIFT_MASK(nbits, nshift))) + +#define GET_INT_COAL(x) GET_BITS_SHIFT(x, 15, 0) +#define GET_INT_COAL_RC(x) GET_BITS_SHIFT(x, 1, 15) +#define GET_RXF_TH(x) GET_BITS_SHIFT(x, 4, 16) +#define GET_PCK_TH(x) GET_BITS_SHIFT(x, 4, 20) + +#define INT_REG_VAL(coal, coal_rc, rxf_th, pck_th) \ + ((coal)|((coal_rc)<<15)|((rxf_th)<<16)|((pck_th)<<20)) + +struct fifo { + dma_addr_t da; /* physical address of fifo (used by HW) */ + char *va; /* virtual address of fifo (used by SW) */ + u32 rptr, wptr; /* cached values of RPTR and WPTR registers, + they're 32 bits on both 32 and 64 archs */ + u16 reg_CFG0, reg_CFG1; + u16 reg_RPTR, reg_WPTR; + u16 memsz; /* memory size allocated for fifo */ + u16 size_mask; + u16 pktsz; /* skb packet size to allocate */ + u16 rcvno; /* number of buffers that come from this RXF */ +}; + +struct txf_fifo { + struct fifo m; /* minimal set of variables used by all fifos */ +}; + +struct txd_fifo { + struct fifo m; /* minimal set of variables used by all fifos */ +}; + +struct rxf_fifo { + struct fifo m; /* minimal set of variables used by all fifos */ +}; + +struct rxd_fifo { + struct fifo m; /* minimal set of variables used by all fifos */ +}; + +struct rx_map { + u64 dma; + struct sk_buff *skb; +}; + +struct rxdb { + int *stack; + struct rx_map *elems; + int nelem; + int top; +}; + +union bdx_dma_addr { + dma_addr_t dma; + struct sk_buff *skb; +}; + +/* Entry in the db. + * if len == 0 addr is dma + * if len != 0 addr is skb */ +struct tx_map { + union bdx_dma_addr addr; + int len; +}; + +/* tx database - implemented as circular fifo buffer*/ +struct txdb { + struct tx_map *start; /* points to the first element */ + struct tx_map *end; /* points just AFTER the last element */ + struct tx_map *rptr; /* points to the next element to read */ + struct tx_map *wptr; /* points to the next element to write */ + int size; /* number of elements in the db */ +}; + +/*Internal stats structure*/ +struct bdx_stats { + u64 InUCast; /* 0x7200 */ + u64 InMCast; /* 0x7210 */ + u64 InBCast; /* 0x7220 */ + u64 InPkts; /* 0x7230 */ + u64 InErrors; /* 0x7240 */ + u64 InDropped; /* 0x7250 */ + u64 FrameTooLong; /* 0x7260 */ + u64 FrameSequenceErrors; /* 0x7270 */ + u64 InVLAN; /* 0x7280 */ + u64 InDroppedDFE; /* 0x7290 */ + u64 InDroppedIntFull; /* 0x72A0 */ + u64 InFrameAlignErrors; /* 0x72B0 */ + + /* 0x72C0-0x72E0 RSRV */ + + u64 OutUCast; /* 0x72F0 */ + u64 OutMCast; /* 0x7300 */ + u64 OutBCast; /* 0x7310 */ + u64 OutPkts; /* 0x7320 */ + + /* 0x7330-0x7360 RSRV */ + + u64 OutVLAN; /* 0x7370 */ + u64 InUCastOctects; /* 0x7380 */ + u64 OutUCastOctects; /* 0x7390 */ + + /* 0x73A0-0x73B0 RSRV */ + + u64 InBCastOctects; /* 0x73C0 */ + u64 OutBCastOctects; /* 0x73D0 */ + u64 InOctects; /* 0x73E0 */ + u64 OutOctects; /* 0x73F0 */ +}; + +struct bdx_priv { + void __iomem *pBdxRegs; + struct net_device *ndev; + + struct napi_struct napi; + + /* RX FIFOs: 1 for data (full) descs, and 2 for free descs */ + struct rxd_fifo rxd_fifo0; + struct rxf_fifo rxf_fifo0; + struct rxdb *rxdb; /* rx dbs to store skb pointers */ + int napi_stop; + struct vlan_group *vlgrp; + + /* Tx FIFOs: 1 for data desc, 1 for empty (acks) desc */ + struct txd_fifo txd_fifo0; + struct txf_fifo txf_fifo0; + + struct txdb txdb; + int tx_level; +#ifdef BDX_DELAY_WPTR + int tx_update_mark; + int tx_noupd; +#endif + spinlock_t tx_lock; /* NETIF_F_LLTX mode */ + + /* rarely used */ + u8 port; + u32 msg_enable; + int stats_flag; + struct bdx_stats hw_stats; + struct net_device_stats net_stats; + struct pci_dev *pdev; + + struct pci_nic *nic; + + u8 txd_size; + u8 txf_size; + u8 rxd_size; + u8 rxf_size; + u32 rdintcm; + u32 tdintcm; +}; + +/* RX FREE descriptor - 64bit*/ +struct rxf_desc { + u32 info; /* Buffer Count + Info - described below */ + u32 va_lo; /* VAdr[31:0] */ + u32 va_hi; /* VAdr[63:32] */ + u32 pa_lo; /* PAdr[31:0] */ + u32 pa_hi; /* PAdr[63:32] */ + u32 len; /* Buffer Length */ +}; + +#define GET_RXD_BC(x) GET_BITS_SHIFT((x), 5, 0) +#define GET_RXD_RXFQ(x) GET_BITS_SHIFT((x), 2, 8) +#define GET_RXD_TO(x) GET_BITS_SHIFT((x), 1, 15) +#define GET_RXD_TYPE(x) GET_BITS_SHIFT((x), 4, 16) +#define GET_RXD_ERR(x) GET_BITS_SHIFT((x), 6, 21) +#define GET_RXD_RXP(x) GET_BITS_SHIFT((x), 1, 27) +#define GET_RXD_PKT_ID(x) GET_BITS_SHIFT((x), 3, 28) +#define GET_RXD_VTAG(x) GET_BITS_SHIFT((x), 1, 31) +#define GET_RXD_VLAN_ID(x) GET_BITS_SHIFT((x), 12, 0) +#define GET_RXD_CFI(x) GET_BITS_SHIFT((x), 1, 12) +#define GET_RXD_PRIO(x) GET_BITS_SHIFT((x), 3, 13) + +struct rxd_desc { + u32 rxd_val1; + u16 len; + u16 rxd_vlan; + u32 va_lo; + u32 va_hi; +}; + +/* PBL describes each virtual buffer to be */ +/* transmitted from the host.*/ +struct pbl { + u32 pa_lo; + u32 pa_hi; + u32 len; +}; + +/* First word for TXD descriptor. It means: type = 3 for regular Tx packet, + * hw_csum = 7 for ip+udp+tcp hw checksums */ +#define TXD_W1_VAL(bc, checksum, vtag, lgsnd, vlan_id) \ + ((bc) | ((checksum)<<5) | ((vtag)<<8) | \ + ((lgsnd)<<9) | (0x30000) | ((vlan_id)<<20)) + +struct txd_desc { + u32 txd_val1; + u16 mss; + u16 length; + u32 va_lo; + u32 va_hi; + struct pbl pbl[0]; /* Fragments */ +} __attribute__ ((packed)); + +/* Register region size */ +#define BDX_REGS_SIZE 0x1000 + +/* Registers from 0x0000-0x00fc were remapped to 0x4000-0x40fc */ +#define regTXD_CFG1_0 0x4000 +#define regRXF_CFG1_0 0x4010 +#define regRXD_CFG1_0 0x4020 +#define regTXF_CFG1_0 0x4030 +#define regTXD_CFG0_0 0x4040 +#define regRXF_CFG0_0 0x4050 +#define regRXD_CFG0_0 0x4060 +#define regTXF_CFG0_0 0x4070 +#define regTXD_WPTR_0 0x4080 +#define regRXF_WPTR_0 0x4090 +#define regRXD_WPTR_0 0x40A0 +#define regTXF_WPTR_0 0x40B0 +#define regTXD_RPTR_0 0x40C0 +#define regRXF_RPTR_0 0x40D0 +#define regRXD_RPTR_0 0x40E0 +#define regTXF_RPTR_0 0x40F0 +#define regTXF_RPTR_3 0x40FC + +/* hardware versioning */ +#define FW_VER 0x5010 +#define SROM_VER 0x5020 +#define FPGA_VER 0x5030 +#define FPGA_SEED 0x5040 + +/* Registers from 0x0100-0x0150 were remapped to 0x5100-0x5150 */ +#define regISR regISR0 +#define regISR0 0x5100 + +#define regIMR regIMR0 +#define regIMR0 0x5110 + +#define regRDINTCM0 0x5120 +#define regRDINTCM2 0x5128 + +#define regTDINTCM0 0x5130 + +#define regISR_MSK0 0x5140 + +#define regINIT_SEMAPHORE 0x5170 +#define regINIT_STATUS 0x5180 + +#define regMAC_LNK_STAT 0x0200 +#define MAC_LINK_STAT 0x4 /* Link state */ + +#define regGMAC_RXF_A 0x1240 + +#define regUNC_MAC0_A 0x1250 +#define regUNC_MAC1_A 0x1260 +#define regUNC_MAC2_A 0x1270 + +#define regVLAN_0 0x1800 + +#define regMAX_FRAME_A 0x12C0 + +#define regRX_MAC_MCST0 0x1A80 +#define regRX_MAC_MCST1 0x1A84 +#define MAC_MCST_NUM 15 +#define regRX_MCST_HASH0 0x1A00 +#define MAC_MCST_HASH_NUM 8 + +#define regVPC 0x2300 +#define regVIC 0x2320 +#define regVGLB 0x2340 + +#define regCLKPLL 0x5000 + +/*for 10G only*/ +#define regREVISION 0x6000 +#define regSCRATCH 0x6004 +#define regCTRLST 0x6008 +#define regMAC_ADDR_0 0x600C +#define regMAC_ADDR_1 0x6010 +#define regFRM_LENGTH 0x6014 +#define regPAUSE_QUANT 0x6018 +#define regRX_FIFO_SECTION 0x601C +#define regTX_FIFO_SECTION 0x6020 +#define regRX_FULLNESS 0x6024 +#define regTX_FULLNESS 0x6028 +#define regHASHTABLE 0x602C +#define regMDIO_ST 0x6030 +#define regMDIO_CTL 0x6034 +#define regMDIO_DATA 0x6038 +#define regMDIO_ADDR 0x603C + +#define regRST_PORT 0x7000 +#define regDIS_PORT 0x7010 +#define regRST_QU 0x7020 +#define regDIS_QU 0x7030 + +#define regCTRLST_TX_ENA 0x0001 +#define regCTRLST_RX_ENA 0x0002 +#define regCTRLST_PRM_ENA 0x0010 +#define regCTRLST_PAD_ENA 0x0020 + +#define regCTRLST_BASE (regCTRLST_PAD_ENA|regCTRLST_PRM_ENA) + +#define regRX_FLT 0x1400 + +/* TXD TXF RXF RXD CONFIG 0x0000 --- 0x007c*/ +#define TX_RX_CFG1_BASE 0xffffffff /*0-31 */ +#define TX_RX_CFG0_BASE 0xfffff000 /*31:12 */ +#define TX_RX_CFG0_RSVD 0x0ffc /*11:2 */ +#define TX_RX_CFG0_SIZE 0x0003 /*1:0 */ + +/* TXD TXF RXF RXD WRITE 0x0080 --- 0x00BC */ +#define TXF_WPTR_WR_PTR 0x7ff8 /*14:3 */ + +/* TXD TXF RXF RXD READ 0x00CO --- 0x00FC */ +#define TXF_RPTR_RD_PTR 0x7ff8 /*14:3 */ + +#define TXF_WPTR_MASK 0x7ff0 /* last 4 bits are dropped + * size is rounded to 16 */ + +/* regISR 0x0100 */ +/* regIMR 0x0110 */ +#define IMR_INPROG 0x80000000 /*31 */ +#define IR_LNKCHG1 0x10000000 /*28 */ +#define IR_LNKCHG0 0x08000000 /*27 */ +#define IR_GPIO 0x04000000 /*26 */ +#define IR_RFRSH 0x02000000 /*25 */ +#define IR_RSVD 0x01000000 /*24 */ +#define IR_SWI 0x00800000 /*23 */ +#define IR_RX_FREE_3 0x00400000 /*22 */ +#define IR_RX_FREE_2 0x00200000 /*21 */ +#define IR_RX_FREE_1 0x00100000 /*20 */ +#define IR_RX_FREE_0 0x00080000 /*19 */ +#define IR_TX_FREE_3 0x00040000 /*18 */ +#define IR_TX_FREE_2 0x00020000 /*17 */ +#define IR_TX_FREE_1 0x00010000 /*16 */ +#define IR_TX_FREE_0 0x00008000 /*15 */ +#define IR_RX_DESC_3 0x00004000 /*14 */ +#define IR_RX_DESC_2 0x00002000 /*13 */ +#define IR_RX_DESC_1 0x00001000 /*12 */ +#define IR_RX_DESC_0 0x00000800 /*11 */ +#define IR_PSE 0x00000400 /*10 */ +#define IR_TMR3 0x00000200 /*9 */ +#define IR_TMR2 0x00000100 /*8 */ +#define IR_TMR1 0x00000080 /*7 */ +#define IR_TMR0 0x00000040 /*6 */ +#define IR_VNT 0x00000020 /*5 */ +#define IR_RxFL 0x00000010 /*4 */ +#define IR_SDPERR 0x00000008 /*3 */ +#define IR_TR 0x00000004 /*2 */ +#define IR_PCIE_LINK 0x00000002 /*1 */ +#define IR_PCIE_TOUT 0x00000001 /*0 */ + +#define IR_EXTRA (IR_RX_FREE_0 | IR_LNKCHG0 | IR_PSE | \ + IR_TMR0 | IR_PCIE_LINK | IR_PCIE_TOUT) +#define IR_RUN (IR_EXTRA | IR_RX_DESC_0 | IR_TX_FREE_0) +#define IR_ALL 0xfdfffff7 + +#define IR_LNKCHG0_ofst 27 + +#define GMAC_RX_FILTER_OSEN 0x1000 /* shared OS enable */ +#define GMAC_RX_FILTER_TXFC 0x0400 /* Tx flow control */ +#define GMAC_RX_FILTER_RSV0 0x0200 /* reserved */ +#define GMAC_RX_FILTER_FDA 0x0100 /* filter out direct address */ +#define GMAC_RX_FILTER_AOF 0x0080 /* accept over run */ +#define GMAC_RX_FILTER_ACF 0x0040 /* accept control frames */ +#define GMAC_RX_FILTER_ARUNT 0x0020 /* accept under run */ +#define GMAC_RX_FILTER_ACRC 0x0010 /* accept crc error */ +#define GMAC_RX_FILTER_AM 0x0008 /* accept multicast */ +#define GMAC_RX_FILTER_AB 0x0004 /* accept broadcast */ +#define GMAC_RX_FILTER_PRM 0x0001 /* [0:1] promiscous mode */ + +#define MAX_FRAME_AB_VAL 0x3fff /* 13:0 */ + +#define CLKPLL_PLLLKD 0x0200 /*9 */ +#define CLKPLL_RSTEND 0x0100 /*8 */ +#define CLKPLL_SFTRST 0x0001 /*0 */ + +#define CLKPLL_LKD (CLKPLL_PLLLKD|CLKPLL_RSTEND) + +/* + * PCI-E Device Control Register (Offset 0x88) + * Source: Luxor Data Sheet, 7.1.3.3.3 + */ +#define PCI_DEV_CTRL_REG 0x88 +#define GET_DEV_CTRL_MAXPL(x) GET_BITS_SHIFT(x, 3, 5) +#define GET_DEV_CTRL_MRRS(x) GET_BITS_SHIFT(x, 3, 12) + +/* + * PCI-E Link Status Register (Offset 0x92) + * Source: Luxor Data Sheet, 7.1.3.3.7 + */ +#define PCI_LINK_STATUS_REG 0x92 +#define GET_LINK_STATUS_LANES(x) GET_BITS_SHIFT(x, 6, 4) + +/* Debugging Macros */ + +#define ERR(fmt, args...) printk(KERN_ERR fmt, ## args) +#define DBG2(fmt, args...) \ + printk(KERN_ERR "%s:%-5d: " fmt, __FUNCTION__, __LINE__, ## args) + +#define BDX_ASSERT(x) BUG_ON(x) + +#ifdef DEBUG + +#define ENTER do { \ + printk(KERN_ERR "%s:%-5d: ENTER\n", __FUNCTION__, __LINE__); \ +} while (0) + +#define RET(args...) do { \ + printk(KERN_ERR "%s:%-5d: RETURN\n", __FUNCTION__, __LINE__); \ +return args; } while (0) + +#define DBG(fmt, args...) \ + printk(KERN_ERR "%s:%-5d: " fmt, __FUNCTION__, __LINE__, ## args) +#else +#define ENTER do { } while (0) +#define RET(args...) return args +#define DBG(fmt, args...) do { } while (0) +#endif + +#endif /* _BDX__H */ diff --git a/drivers/net/tehuti_fw.h b/drivers/net/tehuti_fw.h new file mode 100644 index 000000000000..2c603a8a4383 --- /dev/null +++ b/drivers/net/tehuti_fw.h @@ -0,0 +1,10712 @@ +/* + * Tehuti Networks(R) Network Driver + * Copyright (C) 2007 Tehuti Networks Ltd. All rights reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +/* Loading Firmware */ +/* INT_MEM Ver */ +static u32 s_firmLoad[] = { + 0x000f0002, + 0x40718000, + 0x0000002d, + 0xc0000000, + 0x000f0002, + 0x00718001, + 0x0000002d, + 0xc0800000, + 0x000f0002, + 0x00718002, + 0x0000002d, + 0xc1000000, + 0x000f0002, + 0x00718003, + 0x0000002d, + 0xc1800000, + 0x000f0002, + 0x00718004, + 0x0000002d, + 0xc2000000, + 0x000f0002, + 0x00718005, + 0x0000002d, + 0xc2800000, + 0x000f0002, + 0x00718006, + 0x0000002d, + 0xc3000000, + 0x000f0002, + 0x00718007, + 0x0000002d, + 0xc3800000, + 0x000f0002, + 0x00718008, + 0x0000002d, + 0xc4000000, + 0x000f0002, + 0x00718009, + 0x0000002d, + 0xc4800000, + 0x000f0002, + 0x0071800a, + 0x0000002d, + 0xc5000000, + 0x000f0002, + 0x0071800b, + 0x0000002d, + 0xc5800000, + 0x000f0002, + 0x0071800c, + 0x0000002d, + 0xc6000000, + 0x000f0002, + 0x0071800d, + 0x0000002d, + 0xc6800000, + 0x000f0002, + 0x0071800e, + 0x0000002d, + 0xc7000000, + 0x000f0002, + 0x0071800f, + 0x0000002d, + 0xc7800000, + 0x000f0002, + 0x00718010, + 0x0000002d, + 0xc8000000, + 0x000f0002, + 0x00718011, + 0x0000002d, + 0xc8800000, + 0x000f0002, + 0x00718012, + 0x0000002d, + 0xc9000000, + 0x000f0002, + 0x00718013, + 0x0000002d, + 0xc9800000, + 0x000f0002, + 0x00718014, + 0x0000002d, + 0xca000000, + 0x000f0002, + 0x00718015, + 0x0000002d, + 0xca800000, + 0x000f0002, + 0x00718016, + 0x0000002d, + 0xcb000000, + 0x000f0002, + 0x00718017, + 0x0000002d, + 0xcb800000, + 0x000f0002, + 0x00718018, + 0x0000002d, + 0xcc000000, + 0x000f0002, + 0x00718019, + 0x0000002d, + 0xcc800000, + 0x000f0002, + 0x0071801a, + 0x0000002d, + 0xcd000000, + 0x000f0002, + 0x0071801b, + 0x0000002d, + 0xcd800000, + 0x000f0002, + 0x0071801c, + 0x0000002d, + 0xce000000, + 0x000f0002, + 0x0071801d, + 0x0000002d, + 0xce800000, + 0x000f0002, + 0x0071801e, + 0x0000002d, + 0xcf000000, + 0x000f0002, + 0x0071801f, + 0x0000002d, + 0xcf800000, + 0x000f0002, + 0x00718020, + 0x0000002d, + 0xd0000000, + 0x000f0002, + 0x00718021, + 0x0000002d, + 0xd0800000, + 0x000f0002, + 0x00718022, + 0x0000002d, + 0xd1000000, + 0x000f0002, + 0x00718023, + 0x0000002d, + 0xd1800000, + 0x000f0002, + 0x00718024, + 0x0000002d, + 0xd2000000, + 0x000f0002, + 0x00718025, + 0x0000002d, + 0xd2800000, + 0x000f0002, + 0x00718026, + 0x0000002d, + 0xd3000000, + 0x000f0002, + 0x00718027, + 0x0000002d, + 0xd3800000, + 0x000f0002, + 0x00718028, + 0x0000002d, + 0xd4000000, + 0x000f0002, + 0x00718029, + 0x0000002d, + 0xd4800000, + 0x000f0002, + 0x0071802a, + 0x0000002d, + 0xd5000000, + 0x000f0002, + 0x0071802b, + 0x0000002d, + 0xd5800000, + 0x000f0002, + 0x0071802c, + 0x0000002d, + 0xd6000000, + 0x000f0002, + 0x0071802d, + 0x0000002d, + 0xd6800000, + 0x000f0002, + 0x0071802e, + 0x0000002d, + 0xd7000000, + 0x000f0002, + 0x0071802f, + 0x0000002d, + 0xd7800000, + 0x000f0002, + 0x00718030, + 0x0000002d, + 0xd8000000, + 0x000f0002, + 0x00718031, + 0x0000002d, + 0xd8800000, + 0x000f0002, + 0x00718032, + 0x0000002d, + 0xd9000000, + 0x000f0002, + 0x00718033, + 0x0000002d, + 0xd9800000, + 0x000f0002, + 0x00718034, + 0x0000002d, + 0xda000000, + 0x000f0002, + 0x00718035, + 0x0000002d, + 0xda800000, + 0x000f0002, + 0x00718036, + 0x0000002d, + 0xdb000000, + 0x000f0002, + 0x00718037, + 0x0000002d, + 0xdb800000, + 0x000f0002, + 0x00718038, + 0x0000007b, + 0xdd608000, + 0x000f0002, + 0x00718039, + 0x0000002d, + 0xdd000000, + 0x000f0002, + 0x0071803a, + 0x0000002d, + 0xdb800000, + 0x000f0002, + 0x0071803b, + 0x0000002d, + 0xdd000000, + 0x000f0002, + 0x0071803c, + 0x0000002d, + 0xdd000000, + 0x000f0002, + 0x0071803d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071803e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071803f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718040, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718041, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718042, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718043, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718044, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718045, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718046, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718047, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718048, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718049, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071804a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071804b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071804c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071804d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071804e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071804f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718050, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718051, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718052, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718053, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718054, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718055, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718056, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718057, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718058, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718059, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071805a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071805b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071805c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071805d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071805e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071805f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718060, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718061, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718062, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718063, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718064, + 0x0000002d, + 0xdb000000, + 0x000f0002, + 0x00718065, + 0x0000003f, + 0xdd000104, + 0x000f0002, + 0x00718066, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x00718067, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718068, + 0x0000003f, + 0xdd000804, + 0x000f0002, + 0x00718069, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x0071806a, + 0x0000003f, + 0xdd003004, + 0x000f0002, + 0x0071806b, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x0071806c, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x0071806d, + 0x0000003f, + 0xdd000004, + 0x000f0002, + 0x0071806e, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x0071806f, + 0x0000003f, + 0xdd003d04, + 0x000f0002, + 0x00718070, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x00718071, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718072, + 0x0000003f, + 0xdd000704, + 0x000f0002, + 0x00718073, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718074, + 0x0000003f, + 0xdd002884, + 0x000f0002, + 0x00718075, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x00718076, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718077, + 0x0000003f, + 0xdd003704, + 0x000f0002, + 0x00718078, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718079, + 0x0000003f, + 0xdd002904, + 0x000f0002, + 0x0071807a, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x0071807b, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x0071807c, + 0x0000003f, + 0xdd04aa04, + 0x000f0002, + 0x0071807d, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x0071807e, + 0x0000003f, + 0xdd002804, + 0x000f0002, + 0x0071807f, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x00718080, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718081, + 0x0000003f, + 0xdd003104, + 0x000f0002, + 0x00718082, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718083, + 0x0000003f, + 0xdd002b84, + 0x000f0002, + 0x00718084, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x00718085, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718086, + 0x0000003f, + 0xdd01e404, + 0x000f0002, + 0x00718087, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718088, + 0x0000003f, + 0xd7800084, + 0x000f0002, + 0x00718089, + 0x0000003f, + 0xd7980001, + 0x000f0002, + 0x0071808a, + 0x00000059, + 0xd78037ef, + 0x000f0002, + 0x0071808b, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x0071808c, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x0071808d, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x0071808e, + 0x0000002d, + 0xd7d6027f, + 0x000f0002, + 0x0071808f, + 0x00000018, + 0x17ff0081, + 0x000f0002, + 0x00718090, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x00718091, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x00718092, + 0x0000002d, + 0xd7d800b8, + 0x000f0002, + 0x00718093, + 0x00000018, + 0x17eb0081, + 0x000f0002, + 0x00718094, + 0x0000003f, + 0xdd002904, + 0x000f0002, + 0x00718095, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x00718096, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718097, + 0x0000003f, + 0xdd04aa84, + 0x000f0002, + 0x00718098, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718099, + 0x0000003f, + 0xdd002b04, + 0x000f0002, + 0x0071809a, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x0071809b, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x0071809c, + 0x0000003f, + 0xdd000004, + 0x000f0002, + 0x0071809d, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x0071809e, + 0x0000003f, + 0xdd002984, + 0x000f0002, + 0x0071809f, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x007180a0, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x007180a1, + 0x0000003f, + 0xdd000004, + 0x000f0002, + 0x007180a2, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x007180a3, + 0x0000003f, + 0xdd002a04, + 0x000f0002, + 0x007180a4, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x007180a5, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x007180a6, + 0x0000003f, + 0xdd009184, + 0x000f0002, + 0x007180a7, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x007180a8, + 0x0000003f, + 0xd6801984, + 0x000f0002, + 0x007180a9, + 0x0000003f, + 0xd6800001, + 0x000f0002, + 0x007180aa, + 0x00000035, + 0xd68000ed, + 0x000f0002, + 0x007180ab, + 0x00000018, + 0x37ff0081, + 0x000f0002, + 0x007180ac, + 0x0000003f, + 0xdd002b04, + 0x000f0002, + 0x007180ad, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x007180ae, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x007180af, + 0x0000003f, + 0xdd000004, + 0x000f0002, + 0x007180b0, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x007180b1, + 0x0000003f, + 0xdd002a84, + 0x000f0002, + 0x007180b2, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x007180b3, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x007180b4, + 0x0000003f, + 0xdd000004, + 0x000f0002, + 0x007180b5, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x007180b6, + 0x0000003f, + 0xd6800c84, + 0x000f0002, + 0x007180b7, + 0x0000003f, + 0xd6800001, + 0x000f0002, + 0x007180b8, + 0x00000035, + 0xd68000ed, + 0x000f0002, + 0x007180b9, + 0x00000018, + 0x37ff0081, + 0x000f0002, + 0x007180ba, + 0x0000003f, + 0xdd002a84, + 0x000f0002, + 0x007180bb, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x007180bc, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x007180bd, + 0x0000003f, + 0xdd000004, + 0x000f0002, + 0x007180be, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x007180bf, + 0x0000003f, + 0xd6800f84, + 0x000f0002, + 0x007180c0, + 0x0000003f, + 0xd6800001, + 0x000f0002, + 0x007180c1, + 0x00000035, + 0xd68000ed, + 0x000f0002, + 0x007180c2, + 0x00000018, + 0x37ff0081, + 0x000f0002, + 0x007180c3, + 0x0000003f, + 0xdd002a04, + 0x000f0002, + 0x007180c4, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x007180c5, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x007180c6, + 0x0000003f, + 0xdd001184, + 0x000f0002, + 0x007180c7, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x007180c8, + 0x0000003f, + 0xdd002884, + 0x000f0002, + 0x007180c9, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x007180ca, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x007180cb, + 0x0000003f, + 0xdd003784, + 0x000f0002, + 0x007180cc, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x007180cd, + 0x0000002d, + 0xd3800000, + 0x000f0002, + 0x007180ce, + 0x0000003f, + 0xd2003780, + 0x000f0002, + 0x007180cf, + 0x0000003f, + 0xd1800404, + 0x000f0002, + 0x007180d0, + 0x0000003f, + 0xd1840001, + 0x000f0002, + 0x007180d1, + 0x00000069, + 0xdd003b76, + 0x000f0002, + 0x007180d2, + 0x00000069, + 0xdd003b76, + 0x000f0002, + 0x007180d3, + 0x00000069, + 0xdd003b76, + 0x000f0002, + 0x007180d4, + 0x0000003f, + 0xd17fff84, + 0x000f0002, + 0x007180d5, + 0x0000003f, + 0xd17fff81, + 0x000f0002, + 0x007180d6, + 0x00000069, + 0xdd003b76, + 0x000f0002, + 0x007180d7, + 0x00000069, + 0xdd003b76, + 0x000f0002, + 0x007180d8, + 0x00000069, + 0xdd003b76, + 0x000f0002, + 0x007180d9, + 0x00000069, + 0xdd003b76, + 0x000f0002, + 0x007180da, + 0x00000069, + 0xdd003b76, + 0x000f0002, + 0x007180db, + 0x00000069, + 0xdd003b76, + 0x000f0002, + 0x007180dc, + 0x0000003f, + 0xd6800784, + 0x000f0002, + 0x007180dd, + 0x0000003f, + 0xd6800001, + 0x000f0002, + 0x007180de, + 0x00000035, + 0xd68000ed, + 0x000f0002, + 0x007180df, + 0x00000018, + 0x37ff0081, + 0x000f0002, + 0x007180e0, + 0x00000049, + 0xdd003b63, + 0x000f0002, + 0x007180e1, + 0x00000059, + 0xdd003b76, + 0x000f0002, + 0x007180e2, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x007180e3, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x007180e4, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x007180e5, + 0x0000002d, + 0xdd06027f, + 0x000f0002, + 0x007180e6, + 0x00000018, + 0x1d7f3d7a, + 0x000f0002, + 0x007180e7, + 0x00000045, + 0xdd003139, + 0x000f0002, + 0x007180e8, + 0x00000094, + 0x000b313b, + 0x000f0002, + 0x007180e9, + 0x00000094, + 0x0009313d, + 0x000f0002, + 0x007180ea, + 0x00000094, + 0x0007313f, + 0x000f0002, + 0x007180eb, + 0x00000094, + 0x00053b76, + 0x000f0002, + 0x007180ec, + 0x00000009, + 0xc1ed3d7a, + 0x000f0002, + 0x007180ed, + 0x0000003f, + 0xd200b780, + 0x000f0002, + 0x007180ee, + 0x0000003f, + 0xdd002884, + 0x000f0002, + 0x007180ef, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x007180f0, + 0x00000069, + 0xdd003264, + 0x000f0002, + 0x007180f1, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x007180f2, + 0x0000003f, + 0xd6800784, + 0x000f0002, + 0x007180f3, + 0x0000003f, + 0xd6800001, + 0x000f0002, + 0x007180f4, + 0x00000035, + 0xd68000ed, + 0x000f0002, + 0x007180f5, + 0x00000018, + 0x37ff0081, + 0x000f0002, + 0x007180f6, + 0x00000049, + 0xdd003b63, + 0x000f0002, + 0x007180f7, + 0x00000059, + 0xdd003b76, + 0x000f0002, + 0x007180f8, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x007180f9, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x007180fa, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x007180fb, + 0x0000002d, + 0xdd06027f, + 0x000f0002, + 0x007180fc, + 0x00000018, + 0x1d7f3d7a, + 0x000f0002, + 0x007180fd, + 0x00000045, + 0xdd00313a, + 0x000f0002, + 0x007180fe, + 0x00000018, + 0x1d2d3b76, + 0x000f0002, + 0x007180ff, + 0x00000045, + 0xdd00313c, + 0x000f0002, + 0x00718100, + 0x00000018, + 0x1d133b76, + 0x000f0002, + 0x00718101, + 0x00000045, + 0xdd00313e, + 0x000f0002, + 0x00718102, + 0x00000018, + 0x1d1b3b76, + 0x000f0002, + 0x00718103, + 0x0000003f, + 0xdd003004, + 0x000f0002, + 0x00718104, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x00718105, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718106, + 0x0000003f, + 0xdd000104, + 0x000f0002, + 0x00718107, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718108, + 0x00000009, + 0xc52d3d7a, + 0x000f0002, + 0x00718109, + 0x00000029, + 0xd2010064, + 0x000f0002, + 0x0071810a, + 0x0000003f, + 0xdd002884, + 0x000f0002, + 0x0071810b, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x0071810c, + 0x00000069, + 0xdd003264, + 0x000f0002, + 0x0071810d, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x0071810e, + 0x00000009, + 0xc2293d7a, + 0x000f0002, + 0x0071810f, + 0x00000029, + 0xd2000064, + 0x000f0002, + 0x00718110, + 0x0000003f, + 0xdd002884, + 0x000f0002, + 0x00718111, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x00718112, + 0x00000069, + 0xdd003264, + 0x000f0002, + 0x00718113, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718114, + 0x00000049, + 0xdd003b63, + 0x000f0002, + 0x00718115, + 0x00000059, + 0xdd003b76, + 0x000f0002, + 0x00718116, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x00718117, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x00718118, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x00718119, + 0x0000002d, + 0xdd06027f, + 0x000f0002, + 0x0071811a, + 0x00000018, + 0x1d7f3d7a, + 0x000f0002, + 0x0071811b, + 0x00000045, + 0xdd00313a, + 0x000f0002, + 0x0071811c, + 0x00000018, + 0x1d0f3b76, + 0x000f0002, + 0x0071811d, + 0x0000003f, + 0xdd003004, + 0x000f0002, + 0x0071811e, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x0071811f, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718120, + 0x0000003f, + 0xdd000104, + 0x000f0002, + 0x00718121, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718122, + 0x00000009, + 0xc52d3d7a, + 0x000f0002, + 0x00718123, + 0x0000002d, + 0xd1080082, + 0x000f0002, + 0x00718124, + 0x00000008, + 0x23c33d7a, + 0x000f0002, + 0x00718125, + 0x00000049, + 0xd6003b0a, + 0x000f0002, + 0x00718126, + 0x0000003f, + 0xd3000004, + 0x000f0002, + 0x00718127, + 0x0000003f, + 0xd3040001, + 0x000f0002, + 0x00718128, + 0x0000002f, + 0xd6814085, + 0x000f0002, + 0x00718129, + 0x0000003f, + 0xd4ffff84, + 0x000f0002, + 0x0071812a, + 0x0000003f, + 0xd4800781, + 0x000f0002, + 0x0071812b, + 0x0000003f, + 0xd1ffff84, + 0x000f0002, + 0x0071812c, + 0x0000003f, + 0xd1800001, + 0x000f0002, + 0x0071812d, + 0x00000049, + 0xdd003666, + 0x000f0002, + 0x0071812e, + 0x00000069, + 0xdd003b76, + 0x000f0002, + 0x0071812f, + 0x00000069, + 0xdd003b76, + 0x000f0002, + 0x00718130, + 0x00000069, + 0xdd003b76, + 0x000f0002, + 0x00718131, + 0x00000069, + 0xdd003b69, + 0x000f0002, + 0x00718132, + 0x00000069, + 0xdd003b76, + 0x000f0002, + 0x00718133, + 0x00000069, + 0xdd003b76, + 0x000f0002, + 0x00718134, + 0x00000069, + 0xdd003b76, + 0x000f0002, + 0x00718135, + 0x00000069, + 0xdd003b69, + 0x000f0002, + 0x00718136, + 0x00000061, + 0xf600046c, + 0x000f0002, + 0x00718137, + 0x00000035, + 0xd68000ed, + 0x000f0002, + 0x00718138, + 0x00000018, + 0x3d6b0081, + 0x000f0002, + 0x00718139, + 0x00000049, + 0xd600058b, + 0x000f0002, + 0x0071813a, + 0x0000002f, + 0xd6810106, + 0x000f0002, + 0x0071813b, + 0x0000002d, + 0xd2000000, + 0x000f0002, + 0x0071813c, + 0x00000021, + 0xd20000e4, + 0x000f0002, + 0x0071813d, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x0071813e, + 0x0000002d, + 0xdd06017f, + 0x000f0002, + 0x0071813f, + 0x00000018, + 0x1d7f3d7a, + 0x000f0002, + 0x00718140, + 0x00000049, + 0xd800366c, + 0x000f0002, + 0x00718141, + 0x00000069, + 0xd80033e7, + 0x000f0002, + 0x00718142, + 0x00000069, + 0xd80033e7, + 0x000f0002, + 0x00718143, + 0x00000069, + 0xd80037ef, + 0x000f0002, + 0x00718144, + 0x00000031, + 0xd600016c, + 0x000f0002, + 0x00718145, + 0x0000002d, + 0xd1000000, + 0x000f0002, + 0x00718146, + 0x00000049, + 0xd17a33e4, + 0x000f0002, + 0x00718147, + 0x0000002f, + 0xd1710162, + 0x000f0002, + 0x00718148, + 0x0000002f, + 0xd1610162, + 0x000f0002, + 0x00718149, + 0x00000049, + 0xd14033e3, + 0x000f0002, + 0x0071814a, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x0071814b, + 0x0000002d, + 0xdd06017f, + 0x000f0002, + 0x0071814c, + 0x00000018, + 0x1d7f3d7a, + 0x000f0002, + 0x0071814d, + 0x00000049, + 0xd800366c, + 0x000f0002, + 0x0071814e, + 0x00000069, + 0xd80033e7, + 0x000f0002, + 0x0071814f, + 0x00000069, + 0xd8003162, + 0x000f0002, + 0x00718150, + 0x00000069, + 0xd80037ef, + 0x000f0002, + 0x00718151, + 0x00000031, + 0xd600016c, + 0x000f0002, + 0x00718152, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x00718153, + 0x0000002d, + 0xdd06017f, + 0x000f0002, + 0x00718154, + 0x00000018, + 0x1d7f3d7a, + 0x000f0002, + 0x00718155, + 0x00000049, + 0xd800366c, + 0x000f0002, + 0x00718156, + 0x00000069, + 0xd80033e7, + 0x000f0002, + 0x00718157, + 0x00000069, + 0xd80033e7, + 0x000f0002, + 0x00718158, + 0x00000069, + 0xd80037ef, + 0x000f0002, + 0x00718159, + 0x00000031, + 0xd600016c, + 0x000f0002, + 0x0071815a, + 0x0000002d, + 0xd1000000, + 0x000f0002, + 0x0071815b, + 0x0000002d, + 0xd16c07e4, + 0x000f0002, + 0x0071815c, + 0x00000049, + 0xd14033e3, + 0x000f0002, + 0x0071815d, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x0071815e, + 0x0000002d, + 0xdd06017f, + 0x000f0002, + 0x0071815f, + 0x00000018, + 0x1d7f3d7a, + 0x000f0002, + 0x00718160, + 0x00000049, + 0xd800366c, + 0x000f0002, + 0x00718161, + 0x00000069, + 0xd80033e7, + 0x000f0002, + 0x00718162, + 0x00000069, + 0xd8003162, + 0x000f0002, + 0x00718163, + 0x00000069, + 0xd80037ef, + 0x000f0002, + 0x00718164, + 0x00000031, + 0xd600016c, + 0x000f0002, + 0x00718165, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x00718166, + 0x0000002d, + 0xdd06017f, + 0x000f0002, + 0x00718167, + 0x00000018, + 0x1d7f3d7a, + 0x000f0002, + 0x00718168, + 0x00000049, + 0xd800366c, + 0x000f0002, + 0x00718169, + 0x00000069, + 0xd80033e7, + 0x000f0002, + 0x0071816a, + 0x00000069, + 0xd80033e7, + 0x000f0002, + 0x0071816b, + 0x00000069, + 0xd80037ef, + 0x000f0002, + 0x0071816c, + 0x00000031, + 0xd600016c, + 0x000f0002, + 0x0071816d, + 0x0000002d, + 0xd1000000, + 0x000f0002, + 0x0071816e, + 0x00000049, + 0xd17833e4, + 0x000f0002, + 0x0071816f, + 0x0000002f, + 0xd1710162, + 0x000f0002, + 0x00718170, + 0x0000002f, + 0xd1610162, + 0x000f0002, + 0x00718171, + 0x00000049, + 0xd14033e3, + 0x000f0002, + 0x00718172, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x00718173, + 0x0000002d, + 0xdd06017f, + 0x000f0002, + 0x00718174, + 0x00000018, + 0x1d7f3d7a, + 0x000f0002, + 0x00718175, + 0x00000049, + 0xd800366c, + 0x000f0002, + 0x00718176, + 0x00000069, + 0xd80033e7, + 0x000f0002, + 0x00718177, + 0x00000069, + 0xd8003162, + 0x000f0002, + 0x00718178, + 0x00000069, + 0xd80037ef, + 0x000f0002, + 0x00718179, + 0x00000031, + 0xd600016c, + 0x000f0002, + 0x0071817a, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x0071817b, + 0x0000002d, + 0xdd06017f, + 0x000f0002, + 0x0071817c, + 0x00000018, + 0x1d7f3d7a, + 0x000f0002, + 0x0071817d, + 0x00000049, + 0xd800366c, + 0x000f0002, + 0x0071817e, + 0x00000069, + 0xd80033e7, + 0x000f0002, + 0x0071817f, + 0x00000069, + 0xd80033e7, + 0x000f0002, + 0x00718180, + 0x00000069, + 0xd80037ef, + 0x000f0002, + 0x00718181, + 0x00000031, + 0xd600016c, + 0x000f0002, + 0x00718182, + 0x0000002d, + 0xd1000000, + 0x000f0002, + 0x00718183, + 0x0000002d, + 0xd16807e4, + 0x000f0002, + 0x00718184, + 0x00000049, + 0xd14033e3, + 0x000f0002, + 0x00718185, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x00718186, + 0x0000002d, + 0xdd06017f, + 0x000f0002, + 0x00718187, + 0x00000018, + 0x1d7f3d7a, + 0x000f0002, + 0x00718188, + 0x00000049, + 0xd800366c, + 0x000f0002, + 0x00718189, + 0x00000069, + 0xd80033e7, + 0x000f0002, + 0x0071818a, + 0x00000069, + 0xd8003162, + 0x000f0002, + 0x0071818b, + 0x00000069, + 0xd80037ef, + 0x000f0002, + 0x0071818c, + 0x00000031, + 0xd600016c, + 0x000f0002, + 0x0071818d, + 0x00000035, + 0xd68000ed, + 0x000f0002, + 0x0071818e, + 0x00000008, + 0x22790081, + 0x000f0002, + 0x0071818f, + 0x00000049, + 0xd600060c, + 0x000f0002, + 0x00718190, + 0x0000002f, + 0xd6810106, + 0x000f0002, + 0x00718191, + 0x0000003f, + 0xd4800002, + 0x000f0002, + 0x00718192, + 0x0000003f, + 0xd4800084, + 0x000f0002, + 0x00718193, + 0x0000003f, + 0xd5000102, + 0x000f0002, + 0x00718194, + 0x0000003f, + 0xd5000184, + 0x000f0002, + 0x00718195, + 0x0000003f, + 0xd5800202, + 0x000f0002, + 0x00718196, + 0x0000003f, + 0xd5800204, + 0x000f0002, + 0x00718197, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x00718198, + 0x0000002d, + 0xdd06017f, + 0x000f0002, + 0x00718199, + 0x00000018, + 0x1d7f3d7a, + 0x000f0002, + 0x0071819a, + 0x00000049, + 0xd800366c, + 0x000f0002, + 0x0071819b, + 0x00000069, + 0xd80034e9, + 0x000f0002, + 0x0071819c, + 0x00000069, + 0xd800356a, + 0x000f0002, + 0x0071819d, + 0x00000069, + 0xd80037ef, + 0x000f0002, + 0x0071819e, + 0x00000031, + 0xd600016c, + 0x000f0002, + 0x0071819f, + 0x00000041, + 0xd48034eb, + 0x000f0002, + 0x007181a0, + 0x00000041, + 0xd500356b, + 0x000f0002, + 0x007181a1, + 0x00000035, + 0xd68000ed, + 0x000f0002, + 0x007181a2, + 0x00000018, + 0x37eb0081, + 0x000f0002, + 0x007181a3, + 0x00000049, + 0xd6003b0d, + 0x000f0002, + 0x007181a4, + 0x00000049, + 0xd6803b07, + 0x000f0002, + 0x007181a5, + 0x0000002d, + 0xd1000000, + 0x000f0002, + 0x007181a6, + 0x00000049, + 0xdd003666, + 0x000f0002, + 0x007181a7, + 0x00000069, + 0xdd003b76, + 0x000f0002, + 0x007181a8, + 0x00000069, + 0xdd003b76, + 0x000f0002, + 0x007181a9, + 0x00000069, + 0xdd003b76, + 0x000f0002, + 0x007181aa, + 0x00000069, + 0xdd003b76, + 0x000f0002, + 0x007181ab, + 0x00000069, + 0xdd003b76, + 0x000f0002, + 0x007181ac, + 0x00000069, + 0xdd003b76, + 0x000f0002, + 0x007181ad, + 0x00000069, + 0xdd003b76, + 0x000f0002, + 0x007181ae, + 0x00000069, + 0xdd003b76, + 0x000f0002, + 0x007181af, + 0x00000061, + 0xf600046c, + 0x000f0002, + 0x007181b0, + 0x00000035, + 0xd68000ed, + 0x000f0002, + 0x007181b1, + 0x00000018, + 0x37eb0081, + 0x000f0002, + 0x007181b2, + 0x00000049, + 0xd6003b0e, + 0x000f0002, + 0x007181b3, + 0x00000049, + 0xd6803b08, + 0x000f0002, + 0x007181b4, + 0x00000049, + 0xd5803b09, + 0x000f0002, + 0x007181b5, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x007181b6, + 0x0000002d, + 0xdd06017f, + 0x000f0002, + 0x007181b7, + 0x00000018, + 0x1d7f3d7a, + 0x000f0002, + 0x007181b8, + 0x00000049, + 0xd800366c, + 0x000f0002, + 0x007181b9, + 0x00000069, + 0xd80035eb, + 0x000f0002, + 0x007181ba, + 0x00000069, + 0xd80033e7, + 0x000f0002, + 0x007181bb, + 0x00000069, + 0xd80037ef, + 0x000f0002, + 0x007181bc, + 0x00000041, + 0xd60007ec, + 0x000f0002, + 0x007181bd, + 0x00000041, + 0xd580086b, + 0x000f0002, + 0x007181be, + 0x00000035, + 0xd68000ed, + 0x000f0002, + 0x007181bf, + 0x00000018, + 0x37ed0081, + 0x000f0002, + 0x007181c0, + 0x0000003f, + 0xd4ffff80, + 0x000f0002, + 0x007181c1, + 0x0000003f, + 0xd5020004, + 0x000f0002, + 0x007181c2, + 0x0000003f, + 0xd5180001, + 0x000f0002, + 0x007181c3, + 0x00000049, + 0xdd003b6a, + 0x000f0002, + 0x007181c4, + 0x00000069, + 0xdd003b69, + 0x000f0002, + 0x007181c5, + 0x00000069, + 0xdd003b69, + 0x000f0002, + 0x007181c6, + 0x00000021, + 0xd50000ea, + 0x000f0002, + 0x007181c7, + 0x00000049, + 0xdd0e3b6a, + 0x000f0002, + 0x007181c8, + 0x00000035, + 0xd104007a, + 0x000f0002, + 0x007181c9, + 0x00000018, + 0x37f50081, + 0x000f0002, + 0x007181ca, + 0x0000003f, + 0xd4ffff80, + 0x000f0002, + 0x007181cb, + 0x0000003f, + 0xd5040004, + 0x000f0002, + 0x007181cc, + 0x0000003f, + 0xd5180001, + 0x000f0002, + 0x007181cd, + 0x00000069, + 0xdd003b69, + 0x000f0002, + 0x007181ce, + 0x00000069, + 0xdd003b69, + 0x000f0002, + 0x007181cf, + 0x00000049, + 0xdd003b6a, + 0x000f0002, + 0x007181d0, + 0x00000051, + 0xf50000ea, + 0x000f0002, + 0x007181d1, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x007181d2, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x007181d3, + 0x00000049, + 0xdd0e3b6a, + 0x000f0002, + 0x007181d4, + 0x00000035, + 0xd104807a, + 0x000f0002, + 0x007181d5, + 0x00000018, + 0x37f50081, + 0x000f0002, + 0x007181d6, + 0x0000003f, + 0xc77f7f04, + 0x000f0002, + 0x007181d7, + 0x0000003f, + 0xc77f7f01, + 0x000f0002, + 0x007181d8, + 0x0000003f, + 0xd6804000, + 0x000f0002, + 0x007181d9, + 0x0000003f, + 0xd103c000, + 0x000f0002, + 0x007181da, + 0x00000025, + 0xde2000e2, + 0x000f0002, + 0x007181db, + 0x00000049, + 0xde80274e, + 0x000f0002, + 0x007181dc, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x007181dd, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x007181de, + 0x00000035, + 0xd10000e2, + 0x000f0002, + 0x007181df, + 0x00000035, + 0xd68000ed, + 0x000f0002, + 0x007181e0, + 0x00000018, + 0x37f50081, + 0x000f0002, + 0x007181e1, + 0x0000003f, + 0xdd003004, + 0x000f0002, + 0x007181e2, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x007181e3, + 0x00000069, + 0xdd001d3a, + 0x000f0002, + 0x007181e4, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x007181e5, + 0x0000007d, + 0xc760a713, + 0x000f0002, + 0x007181e6, + 0x00000031, + 0xc0800041, + 0x000f0002, + 0x007181e7, + 0x00000031, + 0xc4000048, + 0x000f0002, + 0x007181e8, + 0x00000031, + 0xc2800045, + 0x000f0002, + 0x007181e9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181ea, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181eb, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181ec, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181ed, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181ee, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181ef, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181f0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181f1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181f2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181f3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181f4, + 0x0000002d, + 0xdb000000, + 0x000f0002, + 0x007181f5, + 0x0000003f, + 0xdd003004, + 0x000f0002, + 0x007181f6, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x007181f7, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x007181f8, + 0x0000003f, + 0xdd000004, + 0x000f0002, + 0x007181f9, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x007181fa, + 0x0000002d, + 0xd3800000, + 0x000f0002, + 0x007181fb, + 0x0000003f, + 0xdd000404, + 0x000f0002, + 0x007181fc, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x007181fd, + 0x00000069, + 0xdd000a14, + 0x000f0002, + 0x007181fe, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x007181ff, + 0x00000049, + 0xd1043394, + 0x000f0002, + 0x00718200, + 0x0000003f, + 0xdd000484, + 0x000f0002, + 0x00718201, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x00718202, + 0x00000069, + 0xdd003162, + 0x000f0002, + 0x00718203, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718204, + 0x0000003f, + 0xdd000504, + 0x000f0002, + 0x00718205, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x00718206, + 0x00000069, + 0xdd000a95, + 0x000f0002, + 0x00718207, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718208, + 0x00000049, + 0xd1043395, + 0x000f0002, + 0x00718209, + 0x0000003f, + 0xdd000584, + 0x000f0002, + 0x0071820a, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x0071820b, + 0x00000069, + 0xdd003162, + 0x000f0002, + 0x0071820c, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x0071820d, + 0x0000003f, + 0xdd000604, + 0x000f0002, + 0x0071820e, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x0071820f, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718210, + 0x0000003f, + 0xdd000084, + 0x000f0002, + 0x00718211, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718212, + 0x0000003f, + 0xdd000004, + 0x000f0002, + 0x00718213, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x00718214, + 0x00000069, + 0xdd000b16, + 0x000f0002, + 0x00718215, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718216, + 0x0000003f, + 0xdd001004, + 0x000f0002, + 0x00718217, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x00718218, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718219, + 0x0000003f, + 0xdd000004, + 0x000f0002, + 0x0071821a, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x0071821b, + 0x0000003f, + 0xdd001084, + 0x000f0002, + 0x0071821c, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x0071821d, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x0071821e, + 0x0000003f, + 0xdd018004, + 0x000f0002, + 0x0071821f, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718220, + 0x0000003f, + 0xdd001104, + 0x000f0002, + 0x00718221, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x00718222, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718223, + 0x0000003f, + 0xdd000004, + 0x000f0002, + 0x00718224, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718225, + 0x0000003f, + 0xdd001184, + 0x000f0002, + 0x00718226, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x00718227, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718228, + 0x0000003f, + 0xdd160004, + 0x000f0002, + 0x00718229, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x0071822a, + 0x0000003f, + 0xdd001204, + 0x000f0002, + 0x0071822b, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x0071822c, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x0071822d, + 0x0000003f, + 0xdd000004, + 0x000f0002, + 0x0071822e, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x0071822f, + 0x0000003f, + 0xdd001284, + 0x000f0002, + 0x00718230, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x00718231, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718232, + 0x0000003f, + 0xdd000004, + 0x000f0002, + 0x00718233, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718234, + 0x0000003f, + 0xdd001304, + 0x000f0002, + 0x00718235, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x00718236, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718237, + 0x0000003f, + 0xdd000004, + 0x000f0002, + 0x00718238, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718239, + 0x0000003f, + 0xdd001384, + 0x000f0002, + 0x0071823a, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x0071823b, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x0071823c, + 0x0000003f, + 0xdd050004, + 0x000f0002, + 0x0071823d, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x0071823e, + 0x0000003f, + 0xdd002004, + 0x000f0002, + 0x0071823f, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x00718240, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718241, + 0x0000003f, + 0xdd000004, + 0x000f0002, + 0x00718242, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718243, + 0x0000003f, + 0xdd002084, + 0x000f0002, + 0x00718244, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x00718245, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718246, + 0x0000003f, + 0xdd019004, + 0x000f0002, + 0x00718247, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718248, + 0x0000003f, + 0xdd002104, + 0x000f0002, + 0x00718249, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x0071824a, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x0071824b, + 0x0000003f, + 0xdd000084, + 0x000f0002, + 0x0071824c, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x0071824d, + 0x0000003f, + 0xdd002184, + 0x000f0002, + 0x0071824e, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x0071824f, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718250, + 0x0000003f, + 0xdd000004, + 0x000f0002, + 0x00718251, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718252, + 0x0000003f, + 0xdd002204, + 0x000f0002, + 0x00718253, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x00718254, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718255, + 0x0000003f, + 0xdd000284, + 0x000f0002, + 0x00718256, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718257, + 0x0000003f, + 0xdd002284, + 0x000f0002, + 0x00718258, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x00718259, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x0071825a, + 0x0000003f, + 0xdd000004, + 0x000f0002, + 0x0071825b, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x0071825c, + 0x0000003f, + 0xdd002304, + 0x000f0002, + 0x0071825d, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x0071825e, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x0071825f, + 0x0000003f, + 0xdd000004, + 0x000f0002, + 0x00718260, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718261, + 0x0000003f, + 0xdd001804, + 0x000f0002, + 0x00718262, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x00718263, + 0x00000069, + 0xdd000b97, + 0x000f0002, + 0x00718264, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718265, + 0x00000049, + 0xd1043397, + 0x000f0002, + 0x00718266, + 0x0000003f, + 0xdd001884, + 0x000f0002, + 0x00718267, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x00718268, + 0x00000069, + 0xdd003162, + 0x000f0002, + 0x00718269, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x0071826a, + 0x0000003f, + 0xdd001904, + 0x000f0002, + 0x0071826b, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x0071826c, + 0x00000069, + 0xdd000c18, + 0x000f0002, + 0x0071826d, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x0071826e, + 0x00000049, + 0xd1043398, + 0x000f0002, + 0x0071826f, + 0x0000003f, + 0xdd001984, + 0x000f0002, + 0x00718270, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x00718271, + 0x00000069, + 0xdd003162, + 0x000f0002, + 0x00718272, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718273, + 0x0000003f, + 0xdd001a04, + 0x000f0002, + 0x00718274, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x00718275, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718276, + 0x0000003f, + 0xdd000004, + 0x000f0002, + 0x00718277, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718278, + 0x0000003f, + 0xdd001a84, + 0x000f0002, + 0x00718279, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x0071827a, + 0x00000069, + 0xdd000b16, + 0x000f0002, + 0x0071827b, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x0071827c, + 0x0000003f, + 0xdd001c04, + 0x000f0002, + 0x0071827d, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x0071827e, + 0x00000069, + 0xdd000c99, + 0x000f0002, + 0x0071827f, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718280, + 0x0000003f, + 0xdd001c84, + 0x000f0002, + 0x00718281, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x00718282, + 0x00000069, + 0xdd000d1a, + 0x000f0002, + 0x00718283, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718284, + 0x0000003f, + 0xdd001d04, + 0x000f0002, + 0x00718285, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x00718286, + 0x00000069, + 0xdd000d9b, + 0x000f0002, + 0x00718287, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718288, + 0x00000049, + 0xd104339b, + 0x000f0002, + 0x00718289, + 0x0000003f, + 0xdd001d84, + 0x000f0002, + 0x0071828a, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x0071828b, + 0x00000069, + 0xdd003162, + 0x000f0002, + 0x0071828c, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x0071828d, + 0x0000003f, + 0xdd001e04, + 0x000f0002, + 0x0071828e, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x0071828f, + 0x00000069, + 0xdd000e1c, + 0x000f0002, + 0x00718290, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718291, + 0x0000003f, + 0xdd000104, + 0x000f0002, + 0x00718292, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x00718293, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718294, + 0x0000003f, + 0xdd000f04, + 0x000f0002, + 0x00718295, + 0x00000069, + 0xdd003d7a, + 0x000f0002, + 0x00718296, + 0x0000007d, + 0xc760a713, + 0x000f0002, + 0x00718297, + 0x00000031, + 0xc0800041, + 0x000f0002, + 0x00718298, + 0x00000031, + 0xc4000048, + 0x000f0002, + 0x00718299, + 0x00000031, + 0xc2800045, + 0x000f0002, + 0x0071829a, + 0x00000031, + 0xd680006d, +/* BRDX_INIT_SDRAM */ + 0x000f000f, + 0x00700064, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000040, + 0x00000100, + 0x00000400, + 0x00000064, + 0x00000054, + 0x00000000, + 0x00002400, + 0x00002800, + 0x00000400, + 0x00002880, + 0x00000180, + 0x00000003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000051, + 0x0000017d, + 0x00000008, + 0x00000051, + 0x0000005d, + 0x00000000, + 0x00000009, + 0x00005000, + 0x00000000, + 0x00000000, +/* BRDX_INIT */ + 0x000f000f, + 0x007001f4, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000040, + 0x00000100, + 0x00000400, + 0x00000064, + 0x00000054, + 0x00000000, + 0x00002400, + 0x00002800, + 0x00000400, + 0x00002880, + 0x00000180, + 0x00000003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000051, + 0x0000017d, + 0x00000008, + 0x00000051, + 0x0000005d, + 0x00000000, + 0x00000009, + 0x00005000, + 0x00000000, + 0x00000000, +/* ZERO_INIT */ + 0x000f0002, + 0x00700000, + 0x00000001, + 0x00000000, +/* ZERO_INIT */ + 0x000f0002, + 0x00700000, + 0x00000001, + 0x00000000, +/* Loading operational Firmware */ + 0x000f0002, + 0x00718000, + 0x00000025, + 0xdd0e0002, + 0x000f0002, + 0x00718001, + 0x00000004, + 0x01d13b76, + 0x000f0002, + 0x00718002, + 0x00000025, + 0xdd0e0082, + 0x000f0002, + 0x00718003, + 0x00000004, + 0x02893b76, + 0x000f0002, + 0x00718004, + 0x00000025, + 0xdd0e0102, + 0x000f0002, + 0x00718005, + 0x00000004, + 0x02853b76, + 0x000f0002, + 0x00718006, + 0x00000025, + 0xdd0e0182, + 0x000f0002, + 0x00718007, + 0x00000004, + 0x03fd3b76, + 0x000f0002, + 0x00718008, + 0x00000009, + 0xcf813b76, + 0x000f0002, + 0x00718009, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071800a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071800b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071800c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071800d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071800e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071800f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718010, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718011, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718012, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718013, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718014, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718015, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718016, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718017, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718018, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718019, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071801a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071801b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071801c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071801d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071801e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071801f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718020, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718021, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718022, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718023, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718024, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718025, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718026, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718027, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718028, + 0x00000049, + 0xc0003b00, + 0x000f0002, + 0x00718029, + 0x00000049, + 0xc0803b02, + 0x000f0002, + 0x0071802a, + 0x00000049, + 0xc1003b03, + 0x000f0002, + 0x0071802b, + 0x00000049, + 0xc1803b04, + 0x000f0002, + 0x0071802c, + 0x00000029, + 0xdf600076, + 0x000f0002, + 0x0071802d, + 0x00000049, + 0xdf443b7d, + 0x000f0002, + 0x0071802e, + 0x00000079, + 0xfd609076, + 0x000f0002, + 0x0071802f, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x00718030, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x00718031, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x00718032, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x00718033, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718034, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718035, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718036, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718037, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718038, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718039, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071803a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071803b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071803c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071803d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071803e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071803f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718040, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718041, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718042, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718043, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718044, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718045, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718046, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718047, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718048, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718049, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071804a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071804b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071804c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071804d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071804e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071804f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718050, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718051, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718052, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718053, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718054, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718055, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718056, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718057, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718058, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718059, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071805a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071805b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071805c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071805d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071805e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071805f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718060, + 0x0000003f, + 0xdf000003, + 0x000f0002, + 0x00718061, + 0x0000002d, + 0xdde00f81, + 0x000f0002, + 0x00718062, + 0x0000003f, + 0xdd800283, + 0x000f0002, + 0x00718063, + 0x0000002d, + 0xdd040180, + 0x000f0002, + 0x00718064, + 0x0000007d, + 0xfd150080, + 0x000f0002, + 0x00718065, + 0x0000007a, + 0x10203b76, + 0x000f0002, + 0x00718066, + 0x0000007a, + 0x30207b76, + 0x000f0002, + 0x00718067, + 0x00000021, + 0xd0240060, + 0x000f0002, + 0x00718068, + 0x0000003f, + 0xdd000104, + 0x000f0002, + 0x00718069, + 0x0000003f, + 0xdd400281, + 0x000f0002, + 0x0071806a, + 0x00000079, + 0xdd31bb03, + 0x000f0002, + 0x0071806b, + 0x00000079, + 0xdd31fb04, + 0x000f0002, + 0x0071806c, + 0x00000079, + 0xdd31bb76, + 0x000f0002, + 0x0071806d, + 0x00000079, + 0xdd31fb76, + 0x000f0002, + 0x0071806e, + 0x00000079, + 0xfd210101, + 0x000f0002, + 0x0071806f, + 0x0000007d, + 0xfd2b4081, + 0x000f0002, + 0x00718070, + 0x00000040, + 0x3d003002, + 0x000f0002, + 0x00718071, + 0x00000048, + 0x1d003b02, + 0x000f0002, + 0x00718072, + 0x00000079, + 0xdd217b76, + 0x000f0002, + 0x00718073, + 0x0000002d, + 0xdd04057f, + 0x000f0002, + 0x00718074, + 0x00000018, + 0x3d7f3b76, + 0x000f0002, + 0x00718075, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x00718076, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x00718077, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x00718078, + 0x00000021, + 0xe3371f76, + 0x000f0002, + 0x00718079, + 0x00000049, + 0xdd003b79, + 0x000f0002, + 0x0071807a, + 0x00000079, + 0xdd21bb76, + 0x000f0002, + 0x0071807b, + 0x00000049, + 0xdd003b79, + 0x000f0002, + 0x0071807c, + 0x00000079, + 0xdd21bb76, + 0x000f0002, + 0x0071807d, + 0x00000049, + 0xdd003b79, + 0x000f0002, + 0x0071807e, + 0x00000079, + 0xdd21bb76, + 0x000f0002, + 0x0071807f, + 0x00000079, + 0xfd609076, + 0x000f0002, + 0x00718080, + 0x00000079, + 0xdd21fb76, + 0x000f0002, + 0x00718081, + 0x0000003f, + 0xdf000083, + 0x000f0002, + 0x00718082, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x00718083, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718084, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718085, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718086, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718087, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718088, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718089, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071808a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071808b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071808c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071808d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071808e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071808f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718090, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718091, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718092, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718093, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718094, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718095, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718096, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718097, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718098, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718099, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071809a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071809b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071809c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071809d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071809e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071809f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007180a0, + 0x0000002d, + 0xd0080803, + 0x000f0002, + 0x007180a1, + 0x00000008, + 0x21b5fb76, + 0x000f0002, + 0x007180a2, + 0x00000079, + 0xdd010081, + 0x000f0002, + 0x007180a3, + 0x00000079, + 0xdd018102, + 0x000f0002, + 0x007180a4, + 0x0000007d, + 0xfd018083, + 0x000f0002, + 0x007180a5, + 0x00000079, + 0xd0903b76, + 0x000f0002, + 0x007180a6, + 0x00000049, + 0xd0583b7a, + 0x000f0002, + 0x007180a7, + 0x00000049, + 0xd2043b00, + 0x000f0002, + 0x007180a8, + 0x0000003d, + 0xdd400003, + 0x000f0002, + 0x007180a9, + 0x00000024, + 0x32000264, + 0x000f0002, + 0x007180aa, + 0x0000003d, + 0xdd7ff803, + 0x000f0002, + 0x007180ab, + 0x00000029, + 0xd020017a, + 0x000f0002, + 0x007180ac, + 0x00000049, + 0xd0a43b0e, + 0x000f0002, + 0x007180ad, + 0x0000002d, + 0xdd0442ff, + 0x000f0002, + 0x007180ae, + 0x00000018, + 0x3d7f3b76, + 0x000f0002, + 0x007180af, + 0x0000002d, + 0xdd060082, + 0x000f0002, + 0x007180b0, + 0x00000038, + 0x300001e0, + 0x000f0002, + 0x007180b1, + 0x00000039, + 0xd0000160, + 0x000f0002, + 0x007180b2, + 0x00000079, + 0xd1b13b7c, + 0x000f0002, + 0x007180b3, + 0x0000002d, + 0xdde00fe3, + 0x000f0002, + 0x007180b4, + 0x00000049, + 0xd08030e4, + 0x000f0002, + 0x007180b5, + 0x00000079, + 0xdd317b7c, + 0x000f0002, + 0x007180b6, + 0x00000079, + 0xdd313b7c, + 0x000f0002, + 0x007180b7, + 0x0000007d, + 0xfd374082, + 0x000f0002, + 0x007180b8, + 0x00000008, + 0x017d3b76, + 0x000f0002, + 0x007180b9, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x007180ba, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x007180bb, + 0x00000049, + 0xdd000387, + 0x000f0002, + 0x007180bc, + 0x00000079, + 0xdd310408, + 0x000f0002, + 0x007180bd, + 0x00000079, + 0xdd317d7a, + 0x000f0002, + 0x007180be, + 0x00000049, + 0xd3003b7c, + 0x000f0002, + 0x007180bf, + 0x00000079, + 0xd381bb7c, + 0x000f0002, + 0x007180c0, + 0x0000007f, + 0xd101b27c, + 0x000f0002, + 0x007180c1, + 0x00000048, + 0x51003264, + 0x000f0002, + 0x007180c2, + 0x0000003d, + 0xdd400003, + 0x000f0002, + 0x007180c3, + 0x00000008, + 0x01953162, + 0x000f0002, + 0x007180c4, + 0x00000021, + 0xd3000666, + 0x000f0002, + 0x007180c5, + 0x00000020, + 0x538000e7, + 0x000f0002, + 0x007180c6, + 0x0000003f, + 0xdd000800, + 0x000f0002, + 0x007180c7, + 0x00000079, + 0xdd01b366, + 0x000f0002, + 0x007180c8, + 0x00000079, + 0xdd01b3e7, + 0x000f0002, + 0x007180c9, + 0x00000075, + 0xf1018662, + 0x000f0002, + 0x007180ca, + 0x00000075, + 0xd201b164, + 0x000f0002, + 0x007180cb, + 0x00000078, + 0x1d007b76, + 0x000f0002, + 0x007180cc, + 0x00000025, + 0xdd0001e3, + 0x000f0002, + 0x007180cd, + 0x00000008, + 0x01b13162, + 0x000f0002, + 0x007180ce, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x007180cf, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x007180d0, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x007180d1, + 0x00000021, + 0xe3379f63, + 0x000f0002, + 0x007180d2, + 0x00000049, + 0xd3003b7c, + 0x000f0002, + 0x007180d3, + 0x00000079, + 0xd381bb7c, + 0x000f0002, + 0x007180d4, + 0x0000007f, + 0xd101b27c, + 0x000f0002, + 0x007180d5, + 0x00000048, + 0x51003264, + 0x000f0002, + 0x007180d6, + 0x00000075, + 0xd201b164, + 0x000f0002, + 0x007180d7, + 0x00000078, + 0x1d007b76, + 0x000f0002, + 0x007180d8, + 0x0000003f, + 0xdd000004, + 0x000f0002, + 0x007180d9, + 0x00000079, + 0xdd01c081, + 0x000f0002, + 0x007180da, + 0x00000079, + 0xfd609076, + 0x000f0002, + 0x007180db, + 0x0000002d, + 0xdd080803, + 0x000f0002, + 0x007180dc, + 0x00000078, + 0x3d01c081, + 0x000f0002, + 0x007180dd, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x007180de, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007180df, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007180e0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007180e1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007180e2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007180e3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007180e4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007180e5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007180e6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007180e7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007180e8, + 0x00000049, + 0xd18e3b03, + 0x000f0002, + 0x007180e9, + 0x0000002f, + 0xd18100e3, + 0x000f0002, + 0x007180ea, + 0x0000003f, + 0xd1801803, + 0x000f0002, + 0x007180eb, + 0x00000049, + 0xd1043b03, + 0x000f0002, + 0x007180ec, + 0x0000003f, + 0xdd800203, + 0x000f0002, + 0x007180ed, + 0x00000049, + 0xd2043b02, + 0x000f0002, + 0x007180ee, + 0x00000049, + 0xd2843b00, + 0x000f0002, + 0x007180ef, + 0x00000025, + 0xdd0000e2, + 0x000f0002, + 0x007180f0, + 0x00000094, + 0x00134162, + 0x000f0002, + 0x007180f1, + 0x00000094, + 0x000b4362, + 0x000f0002, + 0x007180f2, + 0x00000094, + 0x001548e2, + 0x000f0002, + 0x007180f3, + 0x00000094, + 0x001b4962, + 0x000f0002, + 0x007180f4, + 0x00000094, + 0x002f4076, + 0x000f0002, + 0x007180f5, + 0x00000009, + 0xcf813d7a, + 0x000f0002, + 0x007180f6, + 0x0000001d, + 0xfd2b80e5, + 0x000f0002, + 0x007180f7, + 0x00000030, + 0x31838063, + 0x000f0002, + 0x007180f8, + 0x00000030, + 0x11828063, + 0x000f0002, + 0x007180f9, + 0x0000001d, + 0xfd2580e5, + 0x000f0002, + 0x007180fa, + 0x00000030, + 0x31830063, + 0x000f0002, + 0x007180fb, + 0x00000030, + 0x11820063, + 0x000f0002, + 0x007180fc, + 0x0000002f, + 0xd18100e3, + 0x000f0002, + 0x007180fd, + 0x0000001d, + 0xfd0980e5, + 0x000f0002, + 0x007180fe, + 0x00000030, + 0x3183d363, + 0x000f0002, + 0x007180ff, + 0x00000030, + 0x1183d263, + 0x000f0002, + 0x00718100, + 0x0000002f, + 0xd18100e3, + 0x000f0002, + 0x00718101, + 0x0000003f, + 0xd6800184, + 0x000f0002, + 0x00718102, + 0x0000003f, + 0xd6800001, + 0x000f0002, + 0x00718103, + 0x00000035, + 0xd68000ed, + 0x000f0002, + 0x00718104, + 0x00000018, + 0x37ff0081, + 0x000f0002, + 0x00718105, + 0x00000025, + 0xd2000264, + 0x000f0002, + 0x00718106, + 0x00000018, + 0x7d77fd7a, + 0x000f0002, + 0x00718107, + 0x00000049, + 0xde203b63, + 0x000f0002, + 0x00718108, + 0x00000049, + 0xde803b79, + 0x000f0002, + 0x00718109, + 0x00000021, + 0xd18000e3, + 0x000f0002, + 0x0071810a, + 0x00000009, + 0xcf813d7a, + 0x000f0002, + 0x0071810b, + 0x00000049, + 0xdd0031e3, + 0x000f0002, + 0x0071810c, + 0x00000069, + 0xdd0e3b78, + 0x000f0002, + 0x0071810d, + 0x00000061, + 0xdd003b76, + 0x000f0002, + 0x0071810e, + 0x0000003f, + 0xdd000184, + 0x000f0002, + 0x0071810f, + 0x0000003f, + 0xdd000001, + 0x000f0002, + 0x00718110, + 0x00000035, + 0xdd0000fa, + 0x000f0002, + 0x00718111, + 0x00000018, + 0x3b7f3b76, + 0x000f0002, + 0x00718112, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x00718113, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x00718114, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x00718115, + 0x00000021, + 0xd18000e3, + 0x000f0002, + 0x00718116, + 0x00000069, + 0xdd043b79, + 0x000f0002, + 0x00718117, + 0x00000061, + 0xf18000e3, + 0x000f0002, + 0x00718118, + 0x0000003f, + 0xd6800184, + 0x000f0002, + 0x00718119, + 0x0000003f, + 0xd6800001, + 0x000f0002, + 0x0071811a, + 0x00000035, + 0xd68000ed, + 0x000f0002, + 0x0071811b, + 0x00000018, + 0x37ff0081, + 0x000f0002, + 0x0071811c, + 0x00000025, + 0xd2000264, + 0x000f0002, + 0x0071811d, + 0x00000098, + 0x605dbb76, + 0x000f0002, + 0x0071811e, + 0x00000009, + 0xcf813d7a, + 0x000f0002, + 0x0071811f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718120, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718121, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718122, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718123, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718124, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718125, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718126, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718127, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718128, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718129, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071812a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071812b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071812c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071812d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071812e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071812f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718130, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718131, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718132, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718133, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718134, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718135, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718136, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718137, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718138, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718139, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071813a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071813b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071813c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071813d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071813e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071813f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718140, + 0x00000049, + 0xd4083b01, + 0x000f0002, + 0x00718141, + 0x00000009, + 0xc28b3d7a, + 0x000f0002, + 0x00718142, + 0x0000003f, + 0xd4000380, + 0x000f0002, + 0x00718143, + 0x00000009, + 0xc28b3d7a, + 0x000f0002, + 0x00718144, + 0x00000049, + 0xd40e3b03, + 0x000f0002, + 0x00718145, + 0x0000003f, + 0xd6420000, + 0x000f0002, + 0x00718146, + 0x0000002f, + 0xd2814080, + 0x000f0002, + 0x00718147, + 0x0000002d, + 0xd2840365, + 0x000f0002, + 0x00718148, + 0x0000003f, + 0xd6800080, + 0x000f0002, + 0x00718149, + 0x0000003f, + 0xdd040004, + 0x000f0002, + 0x0071814a, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x0071814b, + 0x00000069, + 0xdd003b6d, + 0x000f0002, + 0x0071814c, + 0x00000069, + 0xdd003b6d, + 0x000f0002, + 0x0071814d, + 0x00000031, + 0xd303d265, + 0x000f0002, + 0x0071814e, + 0x00000049, + 0xde403b66, + 0x000f0002, + 0x0071814f, + 0x0000003f, + 0xd6800184, + 0x000f0002, + 0x00718150, + 0x0000003f, + 0xd6800001, + 0x000f0002, + 0x00718151, + 0x00000035, + 0xd68000ed, + 0x000f0002, + 0x00718152, + 0x00000018, + 0x37ff0081, + 0x000f0002, + 0x00718153, + 0x00000049, + 0xd5803b7e, + 0x000f0002, + 0x00718154, + 0x00000021, + 0xde4000e6, + 0x000f0002, + 0x00718155, + 0x0000003f, + 0xd6800184, + 0x000f0002, + 0x00718156, + 0x0000003f, + 0xd6800001, + 0x000f0002, + 0x00718157, + 0x00000035, + 0xd68000ed, + 0x000f0002, + 0x00718158, + 0x00000018, + 0x37ff0081, + 0x000f0002, + 0x00718159, + 0x00000049, + 0xd5003b7e, + 0x000f0002, + 0x0071815a, + 0x00000079, + 0xdd013b76, + 0x000f0002, + 0x0071815b, + 0x0000002d, + 0xdd04407f, + 0x000f0002, + 0x0071815c, + 0x00000018, + 0x3d7f3b76, + 0x000f0002, + 0x0071815d, + 0x00000079, + 0xdd01bb76, + 0x000f0002, + 0x0071815e, + 0x00000075, + 0xfd0180e8, + 0x000f0002, + 0x0071815f, + 0x00000094, + 0x00154168, + 0x000f0002, + 0x00718160, + 0x00000094, + 0x002544e8, + 0x000f0002, + 0x00718161, + 0x00000094, + 0x002341e8, + 0x000f0002, + 0x00718162, + 0x00000094, + 0x00174568, + 0x000f0002, + 0x00718163, + 0x00000094, + 0x00154268, + 0x000f0002, + 0x00718164, + 0x00000094, + 0x002742e8, + 0x000f0002, + 0x00718165, + 0x00000094, + 0x002d4368, + 0x000f0002, + 0x00718166, + 0x00000094, + 0x002d4468, + 0x000f0002, + 0x00718167, + 0x00000094, + 0x003343e8, + 0x000f0002, + 0x00718168, + 0x00000004, + 0x03973b76, + 0x000f0002, + 0x00718169, + 0x0000000d, + 0xe30dc165, + 0x000f0002, + 0x0071816a, + 0x00000030, + 0x32030076, + 0x000f0002, + 0x0071816b, + 0x00000030, + 0x12020076, + 0x000f0002, + 0x0071816c, + 0x0000003f, + 0xd4800000, + 0x000f0002, + 0x0071816d, + 0x0000003f, + 0xd1808000, + 0x000f0002, + 0x0071816e, + 0x0000000d, + 0xe33fc165, + 0x000f0002, + 0x0071816f, + 0x00000030, + 0x32046076, + 0x000f0002, + 0x00718170, + 0x00000030, + 0x12044076, + 0x000f0002, + 0x00718171, + 0x0000003f, + 0xd4828000, + 0x000f0002, + 0x00718172, + 0x0000003f, + 0xd1808000, + 0x000f0002, + 0x00718173, + 0x0000000d, + 0xe33fc165, + 0x000f0002, + 0x00718174, + 0x00000030, + 0x32042076, + 0x000f0002, + 0x00718175, + 0x00000030, + 0x12040076, + 0x000f0002, + 0x00718176, + 0x0000003f, + 0xd4820000, + 0x000f0002, + 0x00718177, + 0x0000000d, + 0xe363c165, + 0x000f0002, + 0x00718178, + 0x00000030, + 0x32032076, + 0x000f0002, + 0x00718179, + 0x00000030, + 0x12030076, + 0x000f0002, + 0x0071817a, + 0x0000003f, + 0xd4830000, + 0x000f0002, + 0x0071817b, + 0x00000049, + 0xd2803b76, + 0x000f0002, + 0x0071817c, + 0x0000000d, + 0xe30dc165, + 0x000f0002, + 0x0071817d, + 0x00000030, + 0x32038076, + 0x000f0002, + 0x0071817e, + 0x00000030, + 0x12028076, + 0x000f0002, + 0x0071817f, + 0x0000003f, + 0xd4810000, + 0x000f0002, + 0x00718180, + 0x0000003f, + 0xd6020000, + 0x000f0002, + 0x00718181, + 0x0000003f, + 0xd1810000, + 0x000f0002, + 0x00718182, + 0x0000000d, + 0xe33fc165, + 0x000f0002, + 0x00718183, + 0x00000030, + 0x32042076, + 0x000f0002, + 0x00718184, + 0x00000030, + 0x12040076, + 0x000f0002, + 0x00718185, + 0x0000003f, + 0xd4820000, + 0x000f0002, + 0x00718186, + 0x00000041, + 0xd48034eb, + 0x000f0002, + 0x00718187, + 0x00000079, + 0xdd01bb6a, + 0x000f0002, + 0x00718188, + 0x00000079, + 0xdd01bb76, + 0x000f0002, + 0x00718189, + 0x0000003f, + 0xd2001803, + 0x000f0002, + 0x0071818a, + 0x0000003f, + 0xd1810000, + 0x000f0002, + 0x0071818b, + 0x00000075, + 0xfd018063, + 0x000f0002, + 0x0071818c, + 0x00000098, + 0x80693b64, + 0x000f0002, + 0x0071818d, + 0x00000051, + 0xf20000e4, + 0x000f0002, + 0x0071818e, + 0x0000003f, + 0xd6800184, + 0x000f0002, + 0x0071818f, + 0x0000003f, + 0xd6800001, + 0x000f0002, + 0x00718190, + 0x00000035, + 0xd68000ed, + 0x000f0002, + 0x00718191, + 0x00000018, + 0x37ff0081, + 0x000f0002, + 0x00718192, + 0x0000002d, + 0xdd04407f, + 0x000f0002, + 0x00718193, + 0x00000018, + 0x3d7f3b76, + 0x000f0002, + 0x00718194, + 0x00000049, + 0xd3c03b38, + 0x000f0002, + 0x00718195, + 0x00000049, + 0xdd003b64, + 0x000f0002, + 0x00718196, + 0x00000051, + 0xf20000e4, + 0x000f0002, + 0x00718197, + 0x0000003f, + 0xd6800184, + 0x000f0002, + 0x00718198, + 0x0000003f, + 0xd6800001, + 0x000f0002, + 0x00718199, + 0x00000035, + 0xd68000ed, + 0x000f0002, + 0x0071819a, + 0x00000018, + 0x37ff0081, + 0x000f0002, + 0x0071819b, + 0x00000049, + 0xd3a03b38, + 0x000f0002, + 0x0071819c, + 0x00000019, + 0xdd61bb76, + 0x000f0002, + 0x0071819d, + 0x00000049, + 0xdd003b67, + 0x000f0002, + 0x0071819e, + 0x00000075, + 0xf1818263, + 0x000f0002, + 0x0071819f, + 0x00000041, + 0xd48034eb, + 0x000f0002, + 0x007181a0, + 0x00000079, + 0xdd01bb6a, + 0x000f0002, + 0x007181a1, + 0x00000079, + 0xdd01bb76, + 0x000f0002, + 0x007181a2, + 0x0000003f, + 0xd2001803, + 0x000f0002, + 0x007181a3, + 0x0000003f, + 0xd1808000, + 0x000f0002, + 0x007181a4, + 0x00000075, + 0xfd018063, + 0x000f0002, + 0x007181a5, + 0x00000098, + 0x80373b64, + 0x000f0002, + 0x007181a6, + 0x00000051, + 0xf20000e4, + 0x000f0002, + 0x007181a7, + 0x0000003f, + 0xd6800184, + 0x000f0002, + 0x007181a8, + 0x0000003f, + 0xd6800001, + 0x000f0002, + 0x007181a9, + 0x00000035, + 0xd68000ed, + 0x000f0002, + 0x007181aa, + 0x00000018, + 0x37ff0081, + 0x000f0002, + 0x007181ab, + 0x0000002d, + 0xdd04407f, + 0x000f0002, + 0x007181ac, + 0x00000018, + 0x3d7f3b76, + 0x000f0002, + 0x007181ad, + 0x00000049, + 0xd3803b38, + 0x000f0002, + 0x007181ae, + 0x00000019, + 0xdd6fbb76, + 0x000f0002, + 0x007181af, + 0x00000049, + 0xdd003b67, + 0x000f0002, + 0x007181b0, + 0x00000075, + 0xf1818263, + 0x000f0002, + 0x007181b1, + 0x00000041, + 0xd48034eb, + 0x000f0002, + 0x007181b2, + 0x00000079, + 0xdd01bb6a, + 0x000f0002, + 0x007181b3, + 0x00000079, + 0xdd01bb63, + 0x000f0002, + 0x007181b4, + 0x00000075, + 0xfd018063, + 0x000f0002, + 0x007181b5, + 0x00000098, + 0x80173b64, + 0x000f0002, + 0x007181b6, + 0x00000049, + 0xde403b64, + 0x000f0002, + 0x007181b7, + 0x0000003f, + 0xd6800184, + 0x000f0002, + 0x007181b8, + 0x0000003f, + 0xd6800001, + 0x000f0002, + 0x007181b9, + 0x00000035, + 0xd68000ed, + 0x000f0002, + 0x007181ba, + 0x00000018, + 0x37ff0081, + 0x000f0002, + 0x007181bb, + 0x0000002d, + 0xdd04407f, + 0x000f0002, + 0x007181bc, + 0x00000018, + 0x3d7f3b76, + 0x000f0002, + 0x007181bd, + 0x00000021, + 0xd20000e4, + 0x000f0002, + 0x007181be, + 0x00000019, + 0xd3ef7b7e, + 0x000f0002, + 0x007181bf, + 0x00000075, + 0xf1818263, + 0x000f0002, + 0x007181c0, + 0x0000003f, + 0xd6800000, + 0x000f0002, + 0x007181c1, + 0x0000003f, + 0xdd040004, + 0x000f0002, + 0x007181c2, + 0x0000003f, + 0xdd180001, + 0x000f0002, + 0x007181c3, + 0x00000069, + 0xdd003b6d, + 0x000f0002, + 0x007181c4, + 0x00000069, + 0xdd003b6d, + 0x000f0002, + 0x007181c5, + 0x0000003f, + 0xdd000000, + 0x000f0002, + 0x007181c6, + 0x0000002d, + 0xdd540180, + 0x000f0002, + 0x007181c7, + 0x00000079, + 0xf3e08076, + 0x000f0002, + 0x007181c8, + 0x00000049, + 0xd600367a, + 0x000f0002, + 0x007181c9, + 0x00000079, + 0xdd01fb76, + 0x000f0002, + 0x007181ca, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x007181cb, + 0x00000049, + 0xdd003b03, + 0x000f0002, + 0x007181cc, + 0x00000059, + 0xfd003b76, + 0x000f0002, + 0x007181cd, + 0x0000003f, + 0xdd801c03, + 0x000f0002, + 0x007181ce, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x007181cf, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x007181d0, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x007181d1, + 0x0000002d, + 0xdd06027f, + 0x000f0002, + 0x007181d2, + 0x00000018, + 0x1d7f3d7a, + 0x000f0002, + 0x007181d3, + 0x00000031, + 0xd483886b, + 0x000f0002, + 0x007181d4, + 0x00000079, + 0xdd01bb6a, + 0x000f0002, + 0x007181d5, + 0x00000079, + 0xf1819076, + 0x000f0002, + 0x007181d6, + 0x00000075, + 0xfd018063, + 0x000f0002, + 0x007181d7, + 0x00000098, + 0x8053bb76, + 0x000f0002, + 0x007181d8, + 0x00000019, + 0xdd7f7b79, + 0x000f0002, + 0x007181d9, + 0x00000075, + 0xf1818263, + 0x000f0002, + 0x007181da, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181db, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181dc, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181dd, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181de, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181df, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181e0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181e1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181e2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181e3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181e4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181e5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181e6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181e7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181e8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181e9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181ea, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181eb, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181ec, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181ed, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181ee, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181ef, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181f0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181f1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181f2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181f3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181f4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181f5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181f6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181f7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181f8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181f9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181fa, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181fb, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181fc, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181fd, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007181fe, + 0x0000003f, + 0xdd800203, + 0x000f0002, + 0x007181ff, + 0x00000049, + 0xd1843b02, + 0x000f0002, + 0x00718200, + 0x00000049, + 0xdd003b03, + 0x000f0002, + 0x00718201, + 0x00000069, + 0xdd003b7a, + 0x000f0002, + 0x00718202, + 0x00000049, + 0xdd003b79, + 0x000f0002, + 0x00718203, + 0x00000065, + 0xf1800263, + 0x000f0002, + 0x00718204, + 0x00000018, + 0x7d7d3b76, + 0x000f0002, + 0x00718205, + 0x00000009, + 0xcf813d7a, + 0x000f0002, + 0x00718206, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718207, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718208, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718209, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071820a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071820b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071820c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071820d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071820e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071820f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718210, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718211, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718212, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718213, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718214, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718215, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718216, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718217, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718218, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718219, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071821a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071821b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071821c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071821d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071821e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071821f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718220, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718221, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718222, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718223, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718224, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718225, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718226, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718227, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718228, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718229, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071822a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071822b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071822c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071822d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071822e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071822f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718230, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718231, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718232, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718233, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718234, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718235, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718236, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718237, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718238, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718239, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071823a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071823b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071823c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071823d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071823e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071823f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718240, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718241, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718242, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718243, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718244, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718245, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718246, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718247, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718248, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718249, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071824a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071824b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071824c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071824d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071824e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071824f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718250, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718251, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718252, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718253, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718254, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718255, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718256, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718257, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718258, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718259, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071825a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071825b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071825c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071825d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071825e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071825f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718260, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718261, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718262, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718263, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718264, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718265, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718266, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718267, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718268, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718269, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071826a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071826b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071826c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071826d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071826e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071826f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718270, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718271, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718272, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718273, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718274, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718275, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718276, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718277, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718278, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718279, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071827a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071827b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071827c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071827d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071827e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071827f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718280, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718281, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718282, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718283, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718284, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718285, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718286, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718287, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718288, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718289, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071828a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071828b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071828c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071828d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071828e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071828f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718290, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718291, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718292, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718293, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718294, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718295, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718296, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718297, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718298, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718299, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071829a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071829b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071829c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071829d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071829e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071829f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182a0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182a1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182a2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182a3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182a4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182a5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182a6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182a7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182a8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182a9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182aa, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182ab, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182ac, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182ad, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182ae, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182af, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182b0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182b1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182b2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182b3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182b4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182b5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182b6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182b7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182b8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182b9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182ba, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182bb, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182bc, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182bd, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182be, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182bf, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182c0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182c1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182c2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182c3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182c4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182c5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182c6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182c7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182c8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182c9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182ca, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182cb, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182cc, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182cd, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182ce, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182cf, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182d0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182d1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182d2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182d3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182d4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182d5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182d6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182d7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182d8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182d9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182da, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182db, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182dc, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182dd, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182de, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182df, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182e0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182e1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182e2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182e3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182e4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182e5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182e6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182e7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182e8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182e9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182ea, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182eb, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182ec, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182ed, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182ee, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182ef, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182f0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182f1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182f2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182f3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182f4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182f5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182f6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182f7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182f8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182f9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182fa, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182fb, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182fc, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182fd, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182fe, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007182ff, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718300, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718301, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718302, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718303, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718304, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718305, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718306, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718307, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718308, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718309, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071830a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071830b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071830c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071830d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071830e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071830f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718310, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718311, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718312, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718313, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718314, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718315, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718316, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718317, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718318, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718319, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071831a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071831b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071831c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071831d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071831e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071831f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718320, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718321, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718322, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718323, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718324, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718325, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718326, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718327, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718328, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718329, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071832a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071832b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071832c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071832d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071832e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071832f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718330, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718331, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718332, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718333, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718334, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718335, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718336, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718337, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718338, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718339, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071833a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071833b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071833c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071833d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071833e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071833f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718340, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718341, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718342, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718343, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718344, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718345, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718346, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718347, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718348, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718349, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071834a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071834b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071834c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071834d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071834e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071834f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718350, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718351, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718352, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718353, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718354, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718355, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718356, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718357, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718358, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718359, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071835a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071835b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071835c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071835d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071835e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071835f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718360, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718361, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718362, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718363, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718364, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718365, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718366, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718367, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718368, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718369, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071836a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071836b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071836c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071836d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071836e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071836f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718370, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718371, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718372, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718373, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718374, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718375, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718376, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718377, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718378, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718379, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071837a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071837b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071837c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071837d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071837e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071837f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718380, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718381, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718382, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718383, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718384, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718385, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718386, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718387, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718388, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718389, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071838a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071838b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071838c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071838d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071838e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071838f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718390, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718391, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718392, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718393, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718394, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718395, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718396, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718397, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718398, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718399, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071839a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071839b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071839c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071839d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071839e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071839f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183a0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183a1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183a2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183a3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183a4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183a5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183a6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183a7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183a8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183a9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183aa, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183ab, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183ac, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183ad, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183ae, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183af, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183b0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183b1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183b2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183b3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183b4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183b5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183b6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183b7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183b8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183b9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183ba, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183bb, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183bc, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183bd, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183be, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183bf, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183c0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183c1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183c2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183c3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183c4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183c5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183c6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183c7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183c8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183c9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183ca, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183cb, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183cc, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183cd, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183ce, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183cf, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183d0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183d1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183d2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183d3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183d4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183d5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183d6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183d7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183d8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183d9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183da, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183db, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183dc, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183dd, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183de, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183df, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183e0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183e1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183e2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183e3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183e4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183e5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183e6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183e7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183e8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183e9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183ea, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183eb, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183ec, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183ed, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183ee, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183ef, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183f0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183f1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183f2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183f3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183f4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183f5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183f6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183f7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183f8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183f9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183fa, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183fb, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183fc, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183fd, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183fe, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007183ff, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718400, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718401, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718402, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718403, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718404, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718405, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718406, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718407, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718408, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718409, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071840a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071840b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071840c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071840d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071840e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071840f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718410, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718411, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718412, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718413, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718414, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718415, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718416, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718417, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718418, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718419, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071841a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071841b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071841c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071841d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071841e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071841f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718420, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718421, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718422, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718423, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718424, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718425, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718426, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718427, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718428, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718429, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071842a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071842b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071842c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071842d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071842e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071842f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718430, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718431, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718432, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718433, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718434, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718435, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718436, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718437, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718438, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718439, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071843a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071843b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071843c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071843d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071843e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071843f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718440, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718441, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718442, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718443, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718444, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718445, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718446, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718447, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718448, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718449, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071844a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071844b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071844c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071844d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071844e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071844f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718450, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718451, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718452, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718453, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718454, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718455, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718456, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718457, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718458, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718459, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071845a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071845b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071845c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071845d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071845e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071845f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718460, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718461, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718462, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718463, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718464, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718465, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718466, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718467, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718468, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718469, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071846a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071846b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071846c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071846d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071846e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071846f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718470, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718471, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718472, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718473, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718474, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718475, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718476, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718477, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718478, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718479, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071847a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071847b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071847c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071847d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071847e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071847f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718480, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718481, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718482, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718483, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718484, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718485, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718486, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718487, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718488, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718489, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071848a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071848b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071848c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071848d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071848e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071848f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718490, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718491, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718492, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718493, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718494, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718495, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718496, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718497, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718498, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718499, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071849a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071849b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071849c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071849d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071849e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071849f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184a0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184a1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184a2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184a3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184a4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184a5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184a6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184a7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184a8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184a9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184aa, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184ab, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184ac, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184ad, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184ae, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184af, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184b0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184b1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184b2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184b3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184b4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184b5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184b6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184b7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184b8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184b9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184ba, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184bb, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184bc, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184bd, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184be, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184bf, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184c0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184c1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184c2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184c3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184c4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184c5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184c6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184c7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184c8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184c9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184ca, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184cb, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184cc, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184cd, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184ce, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184cf, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184d0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184d1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184d2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184d3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184d4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184d5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184d6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184d7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184d8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184d9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184da, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184db, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184dc, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184dd, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184de, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184df, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184e0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184e1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184e2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184e3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184e4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184e5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184e6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184e7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184e8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184e9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184ea, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184eb, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184ec, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184ed, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184ee, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184ef, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184f0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184f1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184f2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184f3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184f4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184f5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184f6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184f7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184f8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184f9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184fa, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184fb, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184fc, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184fd, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184fe, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007184ff, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718500, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718501, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718502, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718503, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718504, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718505, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718506, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718507, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718508, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718509, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071850a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071850b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071850c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071850d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071850e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071850f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718510, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718511, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718512, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718513, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718514, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718515, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718516, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718517, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718518, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718519, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071851a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071851b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071851c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071851d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071851e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071851f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718520, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718521, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718522, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718523, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718524, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718525, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718526, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718527, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718528, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718529, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071852a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071852b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071852c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071852d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071852e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071852f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718530, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718531, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718532, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718533, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718534, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718535, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718536, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718537, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718538, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718539, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071853a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071853b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071853c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071853d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071853e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071853f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718540, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718541, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718542, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718543, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718544, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718545, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718546, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718547, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718548, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718549, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071854a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071854b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071854c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071854d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071854e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071854f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718550, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718551, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718552, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718553, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718554, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718555, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718556, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718557, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718558, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718559, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071855a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071855b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071855c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071855d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071855e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071855f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718560, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718561, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718562, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718563, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718564, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718565, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718566, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718567, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718568, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718569, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071856a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071856b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071856c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071856d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071856e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071856f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718570, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718571, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718572, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718573, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718574, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718575, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718576, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718577, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718578, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718579, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071857a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071857b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071857c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071857d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071857e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071857f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718580, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718581, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718582, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718583, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718584, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718585, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718586, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718587, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718588, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718589, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071858a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071858b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071858c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071858d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071858e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071858f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718590, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718591, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718592, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718593, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718594, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718595, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718596, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718597, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718598, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718599, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071859a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071859b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071859c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071859d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071859e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071859f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185a0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185a1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185a2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185a3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185a4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185a5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185a6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185a7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185a8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185a9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185aa, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185ab, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185ac, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185ad, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185ae, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185af, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185b0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185b1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185b2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185b3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185b4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185b5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185b6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185b7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185b8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185b9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185ba, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185bb, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185bc, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185bd, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185be, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185bf, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185c0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185c1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185c2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185c3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185c4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185c5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185c6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185c7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185c8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185c9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185ca, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185cb, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185cc, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185cd, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185ce, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185cf, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185d0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185d1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185d2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185d3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185d4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185d5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185d6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185d7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185d8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185d9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185da, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185db, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185dc, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185dd, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185de, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185df, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185e0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185e1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185e2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185e3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185e4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185e5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185e6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185e7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185e8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185e9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185ea, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185eb, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185ec, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185ed, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185ee, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185ef, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185f0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185f1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185f2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185f3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185f4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185f5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185f6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185f7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185f8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185f9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185fa, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185fb, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185fc, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185fd, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185fe, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007185ff, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718600, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718601, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718602, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718603, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718604, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718605, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718606, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718607, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718608, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718609, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071860a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071860b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071860c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071860d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071860e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071860f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718610, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718611, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718612, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718613, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718614, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718615, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718616, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718617, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718618, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718619, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071861a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071861b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071861c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071861d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071861e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071861f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718620, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718621, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718622, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718623, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718624, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718625, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718626, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718627, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718628, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718629, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071862a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071862b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071862c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071862d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071862e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071862f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718630, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718631, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718632, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718633, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718634, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718635, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718636, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718637, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718638, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718639, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071863a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071863b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071863c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071863d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071863e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071863f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718640, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718641, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718642, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718643, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718644, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718645, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718646, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718647, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718648, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718649, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071864a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071864b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071864c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071864d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071864e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071864f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718650, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718651, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718652, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718653, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718654, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718655, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718656, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718657, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718658, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718659, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071865a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071865b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071865c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071865d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071865e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071865f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718660, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718661, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718662, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718663, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718664, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718665, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718666, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718667, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718668, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718669, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071866a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071866b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071866c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071866d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071866e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071866f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718670, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718671, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718672, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718673, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718674, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718675, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718676, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718677, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718678, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718679, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071867a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071867b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071867c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071867d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071867e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071867f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718680, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718681, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718682, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718683, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718684, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718685, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718686, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718687, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718688, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718689, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071868a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071868b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071868c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071868d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071868e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071868f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718690, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718691, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718692, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718693, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718694, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718695, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718696, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718697, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718698, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718699, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071869a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071869b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071869c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071869d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071869e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071869f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186a0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186a1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186a2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186a3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186a4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186a5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186a6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186a7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186a8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186a9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186aa, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186ab, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186ac, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186ad, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186ae, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186af, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186b0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186b1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186b2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186b3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186b4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186b5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186b6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186b7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186b8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186b9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186ba, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186bb, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186bc, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186bd, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186be, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186bf, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186c0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186c1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186c2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186c3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186c4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186c5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186c6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186c7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186c8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186c9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186ca, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186cb, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186cc, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186cd, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186ce, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186cf, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186d0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186d1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186d2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186d3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186d4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186d5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186d6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186d7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186d8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186d9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186da, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186db, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186dc, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186dd, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186de, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186df, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186e0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186e1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186e2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186e3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186e4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186e5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186e6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186e7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186e8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186e9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186ea, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186eb, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186ec, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186ed, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186ee, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186ef, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186f0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186f1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186f2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186f3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186f4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186f5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186f6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186f7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186f8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186f9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186fa, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186fb, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186fc, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186fd, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186fe, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007186ff, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718700, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718701, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718702, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718703, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718704, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718705, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718706, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718707, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718708, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718709, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071870a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071870b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071870c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071870d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071870e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071870f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718710, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718711, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718712, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718713, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718714, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718715, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718716, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718717, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718718, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718719, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071871a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071871b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071871c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071871d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071871e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071871f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718720, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718721, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718722, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718723, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718724, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718725, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718726, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718727, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718728, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718729, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071872a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071872b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071872c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071872d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071872e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071872f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718730, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718731, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718732, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718733, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718734, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718735, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718736, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718737, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718738, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718739, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071873a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071873b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071873c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071873d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071873e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071873f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718740, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718741, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718742, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718743, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718744, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718745, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718746, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718747, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718748, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718749, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071874a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071874b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071874c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071874d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071874e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071874f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718750, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718751, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718752, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718753, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718754, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718755, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718756, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718757, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718758, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718759, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071875a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071875b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071875c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071875d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071875e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071875f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718760, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718761, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718762, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718763, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718764, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718765, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718766, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718767, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718768, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718769, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071876a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071876b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071876c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071876d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071876e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071876f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718770, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718771, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718772, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718773, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718774, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718775, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718776, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718777, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718778, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718779, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071877a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071877b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071877c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071877d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071877e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071877f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718780, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718781, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718782, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718783, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718784, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718785, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718786, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718787, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718788, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718789, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071878a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071878b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071878c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071878d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071878e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071878f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718790, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718791, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718792, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718793, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718794, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718795, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718796, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718797, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718798, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x00718799, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071879a, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071879b, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071879c, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071879d, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071879e, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x0071879f, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187a0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187a1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187a2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187a3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187a4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187a5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187a6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187a7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187a8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187a9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187aa, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187ab, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187ac, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187ad, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187ae, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187af, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187b0, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187b1, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187b2, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187b3, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187b4, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187b5, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187b6, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187b7, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187b8, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187b9, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187ba, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187bb, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187bc, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187bd, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187be, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187bf, + 0x00000000, + 0x00000000, + 0x000f0002, + 0x007187c0, + 0x00000079, + 0xfd609076, + 0x000f0002, + 0x007187c1, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x007187c2, + 0x0000003d, + 0xf780006f, + 0x000f0002, + 0x007187c3, + 0x0000003d, + 0xf780006f, +/* FINISH INIT Descriptor */ + 0x000f0002, + 0x807187c4, + 0x0000003d, + 0xf780006f, +}; diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 5ee14764fd74..30b1cca8144c 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -64,8 +64,8 @@ #define DRV_MODULE_NAME "tg3" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "3.78" -#define DRV_MODULE_RELDATE "July 11, 2007" +#define DRV_MODULE_VERSION "3.83" +#define DRV_MODULE_RELDATE "October 10, 2007" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -198,6 +198,10 @@ static struct pci_device_id tg3_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, @@ -312,6 +316,16 @@ static u32 tg3_read32(struct tg3 *tp, u32 off) return (readl(tp->regs + off)); } +static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val) +{ + writel(val, tp->aperegs + off); +} + +static u32 tg3_ape_read32(struct tg3 *tp, u32 off) +{ + return (readl(tp->aperegs + off)); +} + static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) { unsigned long flags; @@ -498,6 +512,73 @@ static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) spin_unlock_irqrestore(&tp->indirect_lock, flags); } +static void tg3_ape_lock_init(struct tg3 *tp) +{ + int i; + + /* Make sure the driver hasn't any stale locks. */ + for (i = 0; i < 8; i++) + tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i, + APE_LOCK_GRANT_DRIVER); +} + +static int tg3_ape_lock(struct tg3 *tp, int locknum) +{ + int i, off; + int ret = 0; + u32 status; + + if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) + return 0; + + switch (locknum) { + case TG3_APE_LOCK_MEM: + break; + default: + return -EINVAL; + } + + off = 4 * locknum; + + tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER); + + /* Wait for up to 1 millisecond to acquire lock. */ + for (i = 0; i < 100; i++) { + status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off); + if (status == APE_LOCK_GRANT_DRIVER) + break; + udelay(10); + } + + if (status != APE_LOCK_GRANT_DRIVER) { + /* Revoke the lock request. */ + tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, + APE_LOCK_GRANT_DRIVER); + + ret = -EBUSY; + } + + return ret; +} + +static void tg3_ape_unlock(struct tg3 *tp, int locknum) +{ + int off; + + if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) + return; + + switch (locknum) { + case TG3_APE_LOCK_MEM: + break; + default: + return; + } + + off = 4 * locknum; + tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER); +} + static void tg3_disable_ints(struct tg3 *tp) { tw32(TG3PCI_MISC_HOST_CTRL, @@ -574,7 +655,7 @@ static void tg3_restart_ints(struct tg3 *tp) static inline void tg3_netif_stop(struct tg3 *tp) { tp->dev->trans_start = jiffies; /* prevent tx timeout */ - netif_poll_disable(tp->dev); + napi_disable(&tp->napi); netif_tx_disable(tp->dev); } @@ -585,7 +666,7 @@ static inline void tg3_netif_start(struct tg3 *tp) * so long as all callers are assured to have free tx slots * (such as after tg3_init_hw) */ - netif_poll_enable(tp->dev); + napi_enable(&tp->napi); tp->hw_status->status |= SD_STATUS_UPDATED; tg3_enable_ints(tp); } @@ -595,7 +676,8 @@ static void tg3_switch_clocks(struct tg3 *tp) u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); u32 orig_clock_ctrl; - if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) + if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) || + (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) return; orig_clock_ctrl = clock_ctrl; @@ -1400,6 +1482,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | CLOCK_CTRL_PWRDOWN_PLL133, 40); } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || + (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) { /* do nothing */ } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && @@ -1444,7 +1527,8 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) } if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) && - !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) + !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && + !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) tg3_power_down_phy(tp); tg3_frob_aux_power(tp); @@ -3471,11 +3555,9 @@ next_pkt_nopost: return received; } -static int tg3_poll(struct net_device *netdev, int *budget) +static int tg3_poll_work(struct tg3 *tp, int work_done, int budget) { - struct tg3 *tp = netdev_priv(netdev); struct tg3_hw_status *sblk = tp->hw_status; - int done; /* handle link change and other phy events */ if (!(tp->tg3_flags & @@ -3493,44 +3575,59 @@ static int tg3_poll(struct net_device *netdev, int *budget) /* run TX completion thread */ if (sblk->idx[0].tx_consumer != tp->tx_cons) { tg3_tx(tp); - if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) { - netif_rx_complete(netdev); - schedule_work(&tp->reset_task); - return 0; - } + if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) + return work_done; } /* run RX thread, within the bounds set by NAPI. * All RX "locking" is done by ensuring outside - * code synchronizes with dev->poll() + * code synchronizes with tg3->napi.poll() */ - if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) { - int orig_budget = *budget; - int work_done; + if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) + work_done += tg3_rx(tp, budget - work_done); - if (orig_budget > netdev->quota) - orig_budget = netdev->quota; + return work_done; +} - work_done = tg3_rx(tp, orig_budget); +static int tg3_poll(struct napi_struct *napi, int budget) +{ + struct tg3 *tp = container_of(napi, struct tg3, napi); + int work_done = 0; + struct tg3_hw_status *sblk = tp->hw_status; - *budget -= work_done; - netdev->quota -= work_done; - } + while (1) { + work_done = tg3_poll_work(tp, work_done, budget); - if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { - tp->last_tag = sblk->status_tag; - rmb(); - } else - sblk->status &= ~SD_STATUS_UPDATED; + if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) + goto tx_recovery; + + if (unlikely(work_done >= budget)) + break; - /* if no more work, tell net stack and NIC we're done */ - done = !tg3_has_work(tp); - if (done) { - netif_rx_complete(netdev); - tg3_restart_ints(tp); + if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { + /* tp->last_tag is used in tg3_restart_ints() below + * to tell the hw how much work has been processed, + * so we must read it before checking for more work. + */ + tp->last_tag = sblk->status_tag; + rmb(); + } else + sblk->status &= ~SD_STATUS_UPDATED; + + if (likely(!tg3_has_work(tp))) { + netif_rx_complete(tp->dev, napi); + tg3_restart_ints(tp); + break; + } } - return (done ? 0 : 1); + return work_done; + +tx_recovery: + /* work_done is guaranteed to be less than budget. */ + netif_rx_complete(tp->dev, napi); + schedule_work(&tp->reset_task); + return work_done; } static void tg3_irq_quiesce(struct tg3 *tp) @@ -3577,7 +3674,7 @@ static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); if (likely(!tg3_irq_sync(tp))) - netif_rx_schedule(dev); /* schedule NAPI poll */ + netif_rx_schedule(dev, &tp->napi); return IRQ_HANDLED; } @@ -3602,7 +3699,7 @@ static irqreturn_t tg3_msi(int irq, void *dev_id) */ tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); if (likely(!tg3_irq_sync(tp))) - netif_rx_schedule(dev); /* schedule NAPI poll */ + netif_rx_schedule(dev, &tp->napi); return IRQ_RETVAL(1); } @@ -3644,7 +3741,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id) sblk->status &= ~SD_STATUS_UPDATED; if (likely(tg3_has_work(tp))) { prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); - netif_rx_schedule(dev); /* schedule NAPI poll */ + netif_rx_schedule(dev, &tp->napi); } else { /* No work, shared interrupt perhaps? re-enable * interrupts, and flush that PCI write @@ -3690,7 +3787,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); if (tg3_irq_sync(tp)) goto out; - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &tp->napi)) { prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); /* Update last_tag to mark that this status has been * seen. Because interrupt may be shared, we may be @@ -3698,7 +3795,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) * if tg3_poll() is not scheduled. */ tp->last_tag = sblk->status_tag; - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &tp->napi); } out: return IRQ_RETVAL(handled); @@ -3737,7 +3834,7 @@ static int tg3_restart_hw(struct tg3 *tp, int reset_phy) tg3_full_unlock(tp); del_timer_sync(&tp->timer); tp->irq_sync = 0; - netif_poll_enable(tp->dev); + napi_enable(&tp->napi); dev_close(tp->dev); tg3_full_lock(tp, 0); } @@ -3932,7 +4029,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) len = skb_headlen(skb); /* We are running in BH disabled context with netif_tx_lock - * and TX reclaim runs via tp->poll inside of a software + * and TX reclaim runs via tp->napi.poll inside of a software * interrupt. Furthermore, IRQ processing runs lockless so we have * no IRQ context deadlocks to worry about either. Rejoice! */ @@ -4087,7 +4184,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) len = skb_headlen(skb); /* We are running in BH disabled context with netif_tx_lock - * and TX reclaim runs via tp->poll inside of a software + * and TX reclaim runs via tp->napi.poll inside of a software * interrupt. Furthermore, IRQ processing runs lockless so we have * no IRQ context deadlocks to worry about either. Rejoice! */ @@ -4732,6 +4829,80 @@ static void tg3_disable_nvram_access(struct tg3 *tp) } } +static void tg3_ape_send_event(struct tg3 *tp, u32 event) +{ + int i; + u32 apedata; + + apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); + if (apedata != APE_SEG_SIG_MAGIC) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); + if (apedata != APE_FW_STATUS_READY) + return; + + /* Wait for up to 1 millisecond for APE to service previous event. */ + for (i = 0; i < 10; i++) { + if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); + + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, + event | APE_EVENT_STATUS_EVENT_PENDING); + + tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); + + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + break; + + udelay(100); + } + + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); +} + +static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) +{ + u32 event; + u32 apedata; + + if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) + return; + + switch (kind) { + case RESET_KIND_INIT: + tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, + APE_HOST_SEG_SIG_MAGIC); + tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, + APE_HOST_SEG_LEN_MAGIC); + apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); + tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); + tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, + APE_HOST_DRIVER_ID_MAGIC); + tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, + APE_HOST_BEHAV_NO_PHYLOCK); + + event = APE_EVENT_STATUS_STATE_START; + break; + case RESET_KIND_SHUTDOWN: + event = APE_EVENT_STATUS_STATE_UNLOAD; + break; + case RESET_KIND_SUSPEND: + event = APE_EVENT_STATUS_STATE_SUSPEND; + break; + default: + return; + } + + event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; + + tg3_ape_send_event(tp, event); +} + /* tp->lock is held. */ static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) { @@ -4759,6 +4930,10 @@ static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) break; }; } + + if (kind == RESET_KIND_INIT || + kind == RESET_KIND_SUSPEND) + tg3_ape_driver_state_change(tp, kind); } /* tp->lock is held. */ @@ -4780,6 +4955,9 @@ static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) break; }; } + + if (kind == RESET_KIND_SHUTDOWN) + tg3_ape_driver_state_change(tp, kind); } /* tp->lock is held. */ @@ -4847,6 +5025,68 @@ static int tg3_poll_fw(struct tg3 *tp) return 0; } +/* Save PCI command register before chip reset */ +static void tg3_save_pci_state(struct tg3 *tp) +{ + u32 val; + + pci_read_config_dword(tp->pdev, TG3PCI_COMMAND, &val); + tp->pci_cmd = val; +} + +/* Restore PCI state after chip reset */ +static void tg3_restore_pci_state(struct tg3 *tp) +{ + u32 val; + + /* Re-enable indirect register accesses. */ + pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, + tp->misc_host_ctrl); + + /* Set MAX PCI retry to zero. */ + val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); + if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && + (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) + val |= PCISTATE_RETRY_SAME_DMA; + /* Allow reads and writes to the APE register and memory space. */ + if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) + val |= PCISTATE_ALLOW_APE_CTLSPC_WR | + PCISTATE_ALLOW_APE_SHMEM_WR; + pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); + + pci_write_config_dword(tp->pdev, TG3PCI_COMMAND, tp->pci_cmd); + + /* Make sure PCI-X relaxed ordering bit is clear. */ + if (tp->pcix_cap) { + u16 pcix_cmd; + + pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + &pcix_cmd); + pcix_cmd &= ~PCI_X_CMD_ERO; + pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + pcix_cmd); + } + + if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { + + /* Chip reset on 5780 will reset MSI enable bit, + * so need to restore it. + */ + if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { + u16 ctrl; + + pci_read_config_word(tp->pdev, + tp->msi_cap + PCI_MSI_FLAGS, + &ctrl); + pci_write_config_word(tp->pdev, + tp->msi_cap + PCI_MSI_FLAGS, + ctrl | PCI_MSI_FLAGS_ENABLE); + val = tr32(MSGINT_MODE); + tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); + } + } +} + static void tg3_stop_fw(struct tg3 *); /* tp->lock is held. */ @@ -4863,9 +5103,17 @@ static int tg3_chip_reset(struct tg3 *tp) */ tp->nvram_lock_cnt = 0; + /* GRC_MISC_CFG core clock reset will clear the memory + * enable bit in PCI register 4 and the MSI enable bit + * on some chips, so we save relevant registers here. + */ + tg3_save_pci_state(tp); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) tw32(GRC_FASTBOOT_PC, 0); /* @@ -4961,50 +5209,14 @@ static int tg3_chip_reset(struct tg3 *tp) pci_write_config_dword(tp->pdev, 0xd8, 0xf5000); } - /* Re-enable indirect register accesses. */ - pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, - tp->misc_host_ctrl); - - /* Set MAX PCI retry to zero. */ - val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); - if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && - (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) - val |= PCISTATE_RETRY_SAME_DMA; - pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); - - pci_restore_state(tp->pdev); + tg3_restore_pci_state(tp); tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING; - /* Make sure PCI-X relaxed ordering bit is clear. */ - pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val); - val &= ~PCIX_CAPS_RELAXED_ORDERING; - pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val); - - if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { - u32 val; - - /* Chip reset on 5780 will reset MSI enable bit, - * so need to restore it. - */ - if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { - u16 ctrl; - - pci_read_config_word(tp->pdev, - tp->msi_cap + PCI_MSI_FLAGS, - &ctrl); - pci_write_config_word(tp->pdev, - tp->msi_cap + PCI_MSI_FLAGS, - ctrl | PCI_MSI_FLAGS_ENABLE); - val = tr32(MSGINT_MODE); - tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); - } - + val = 0; + if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) val = tr32(MEMARB_MODE); - tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); - - } else - tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); + tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) { tg3_stop_fw(tp); @@ -5014,7 +5226,7 @@ static int tg3_chip_reset(struct tg3 *tp) tw32(GRC_MODE, tp->grc_mode); if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) { - u32 val = tr32(0xc4); + val = tr32(0xc4); tw32(0xc4, val | (1 << 15)); } @@ -5043,7 +5255,7 @@ static int tg3_chip_reset(struct tg3 *tp) if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) { - u32 val = tr32(0x7c00); + val = tr32(0x7c00); tw32(0x7c00, val | (1 << 25)); } @@ -5069,7 +5281,8 @@ static int tg3_chip_reset(struct tg3 *tp) /* tp->lock is held. */ static void tg3_stop_fw(struct tg3 *tp) { - if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { + if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && + !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { u32 val; int i; @@ -6126,14 +6339,22 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tg3_write_sig_legacy(tp, RESET_KIND_INIT); + if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0) { + val = tr32(TG3_CPMU_CTRL); + val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE); + tw32(TG3_CPMU_CTRL, val); + } + /* This works around an issue with Athlon chipsets on * B3 tigon3 silicon. This bit has no effect on any * other revision. But do not set this on PCI Express - * chips. + * chips and don't even touch the clocks if the CPMU is present. */ - if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) - tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; - tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); + if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) { + if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) + tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; + tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); + } if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) { @@ -6142,6 +6363,16 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(TG3PCI_PCISTATE, val); } + if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { + /* Allow reads and writes to the + * APE register and memory space. + */ + val = tr32(TG3PCI_PCISTATE); + val |= PCISTATE_ALLOW_APE_CTLSPC_WR | + PCISTATE_ALLOW_APE_SHMEM_WR; + tw32(TG3PCI_PCISTATE, val); + } + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) { /* Enable some hw fixes. */ val = tr32(TG3PCI_MSI_DATA); @@ -6158,10 +6389,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (err) return err; - /* This value is determined during the probe time DMA - * engine test, tg3_test_dma. - */ - tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { + /* This value is determined during the probe time DMA + * engine test, tg3_test_dma. + */ + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + } tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS | GRC_MODE_4X_NIC_SEND_RINGS | @@ -6395,6 +6629,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | RDMAC_MODE_LNGREAD_ENAB); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) + rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | + RDMAC_MODE_MBUF_RBD_CRPT_ENAB | + RDMAC_MODE_MBUF_SBD_CRPT_ENAB; + /* If statement applies to 5705 and 5750 PCI devices only */ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) || @@ -6556,22 +6795,28 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) /* Enable host coalescing bug fix */ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)) + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)) val |= (1 << 29); tw32_f(WDMAC_MODE, val); udelay(40); - if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) { - val = tr32(TG3PCI_X_CAPS); + if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { + u16 pcix_cmd; + + pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + &pcix_cmd); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) { - val &= ~PCIX_CAPS_BURST_MASK; - val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT); + pcix_cmd &= ~PCI_X_CMD_MAX_READ; + pcix_cmd |= PCI_X_CMD_READ_2K; } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { - val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK); - val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT); + pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ); + pcix_cmd |= PCI_X_CMD_READ_2K; } - tw32(TG3PCI_X_CAPS, val); + pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + pcix_cmd); } tw32_f(RDMAC_MODE, rdmac_mode); @@ -6580,7 +6825,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); - tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + tw32(SNDDATAC_MODE, + SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY); + else + tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); + tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ); @@ -6607,7 +6858,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) udelay(100); tp->rx_mode = RX_MODE_ENABLE; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; tw32_f(MAC_RX_MODE, tp->rx_mode); @@ -6737,6 +6989,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) break; }; + if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) + /* Write our heartbeat update interval to APE. */ + tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, + APE_HOST_HEARTBEAT_INT_DISABLE); + tg3_write_sig_post_reset(tp, RESET_KIND_INIT); return 0; @@ -7104,6 +7361,10 @@ static int tg3_open(struct net_device *dev) } else if (pci_enable_msi(tp->pdev) == 0) { u32 msi_mode; + /* Hardware bug - MSI won't work if INTX disabled. */ + if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) + pci_intx(tp->pdev, 1); + msi_mode = tr32(MSGINT_MODE); tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); tp->tg3_flags2 |= TG3_FLG2_USING_MSI; @@ -7120,6 +7381,8 @@ static int tg3_open(struct net_device *dev) return err; } + napi_enable(&tp->napi); + tg3_full_lock(tp, 0); err = tg3_init_hw(tp, 1); @@ -7147,6 +7410,7 @@ static int tg3_open(struct net_device *dev) tg3_full_unlock(tp); if (err) { + napi_disable(&tp->napi); free_irq(tp->pdev->irq, dev); if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { pci_disable_msi(tp->pdev); @@ -7172,6 +7436,8 @@ static int tg3_open(struct net_device *dev) tg3_full_unlock(tp); + napi_disable(&tp->napi); + return err; } @@ -7433,6 +7699,7 @@ static int tg3_close(struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); + napi_disable(&tp->napi); cancel_work_sync(&tp->reset_task); netif_stop_queue(dev); @@ -7968,7 +8235,7 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, buf = data; if (b_offset || odd_len) { buf = kmalloc(len, GFP_KERNEL); - if (buf == 0) + if (!buf) return -ENOMEM; if (b_offset) memcpy(buf, &start, 4); @@ -8048,7 +8315,8 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) tp->link_config.autoneg = cmd->autoneg; if (cmd->autoneg == AUTONEG_ENABLE) { - tp->link_config.advertising = cmd->advertising; + tp->link_config.advertising = (cmd->advertising | + ADVERTISED_Autoneg); tp->link_config.speed = SPEED_INVALID; tp->link_config.duplex = DUPLEX_INVALID; } else { @@ -8136,10 +8404,12 @@ static int tg3_set_tso(struct net_device *dev, u32 value) } if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) && (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) { - if (value) + if (value) { dev->features |= NETIF_F_TSO6; - else - dev->features &= ~NETIF_F_TSO6; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + dev->features |= NETIF_F_TSO_ECN; + } else + dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN); } return ethtool_op_set_tso(dev, value); } @@ -8317,7 +8587,9 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data) } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ethtool_op_set_tx_ipv6_csum(dev, data); else ethtool_op_set_tx_csum(dev, data); @@ -8325,14 +8597,16 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data) return 0; } -static int tg3_get_stats_count (struct net_device *dev) +static int tg3_get_sset_count (struct net_device *dev, int sset) { - return TG3_NUM_STATS; -} - -static int tg3_get_test_count (struct net_device *dev) -{ - return TG3_NUM_TEST; + switch (sset) { + case ETH_SS_TEST: + return TG3_NUM_TEST; + case ETH_SS_STATS: + return TG3_NUM_STATS; + default: + return -EOPNOTSUPP; + } } static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf) @@ -8397,7 +8671,7 @@ static void tg3_get_ethtool_stats (struct net_device *dev, static int tg3_test_nvram(struct tg3 *tp) { u32 *buf, csum, magic; - int i, j, err = 0, size; + int i, j, k, err = 0, size; if (tg3_nvram_read_swab(tp, 0, &magic) != 0) return -EIO; @@ -8451,7 +8725,6 @@ static int tg3_test_nvram(struct tg3 *tp) u8 data[NVRAM_SELFBOOT_DATA_SIZE]; u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; u8 *buf8 = (u8 *) buf; - int j, k; /* Separate the parity bits and the data bytes. */ for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) { @@ -8812,7 +9085,9 @@ static int tg3_test_memory(struct tg3 *tp) if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) mem_tbl = mem_tbl_5755; else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) mem_tbl = mem_tbl_5906; @@ -9009,6 +9284,7 @@ out: static int tg3_test_loopback(struct tg3 *tp) { int err = 0; + u32 cpmuctrl = 0; if (!netif_running(tp->dev)) return TG3_LOOPBACK_FAILED; @@ -9017,8 +9293,40 @@ static int tg3_test_loopback(struct tg3 *tp) if (err) return TG3_LOOPBACK_FAILED; + if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) { + int i; + u32 status; + + tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER); + + /* Wait for up to 40 microseconds to acquire lock. */ + for (i = 0; i < 4; i++) { + status = tr32(TG3_CPMU_MUTEX_GNT); + if (status == CPMU_MUTEX_GNT_DRIVER) + break; + udelay(10); + } + + if (status != CPMU_MUTEX_GNT_DRIVER) + return TG3_LOOPBACK_FAILED; + + cpmuctrl = tr32(TG3_CPMU_CTRL); + + /* Turn off power management based on link speed. */ + tw32(TG3_CPMU_CTRL, + cpmuctrl & ~CPMU_CTRL_LINK_SPEED_MODE); + } + if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) err |= TG3_MAC_LOOPBACK_FAILED; + + if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) { + tw32(TG3_CPMU_CTRL, cpmuctrl); + + /* Release the mutex */ + tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER); + } + if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK)) err |= TG3_PHY_LOOPBACK_FAILED; @@ -9257,21 +9565,16 @@ static const struct ethtool_ops tg3_ethtool_ops = { .set_pauseparam = tg3_set_pauseparam, .get_rx_csum = tg3_get_rx_csum, .set_rx_csum = tg3_set_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = tg3_set_tx_csum, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, - .get_tso = ethtool_op_get_tso, .set_tso = tg3_set_tso, - .self_test_count = tg3_get_test_count, .self_test = tg3_self_test, .get_strings = tg3_get_strings, .phys_id = tg3_phys_id, - .get_stats_count = tg3_get_stats_count, .get_ethtool_stats = tg3_get_ethtool_stats, .get_coalesce = tg3_get_coalesce, .set_coalesce = tg3_set_coalesce, - .get_perm_addr = ethtool_op_get_perm_addr, + .get_sset_count = tg3_get_sset_count, }; static void __devinit tg3_get_eeprom_size(struct tg3 *tp) @@ -9529,6 +9832,81 @@ static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp) } } +static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1, protect = 0; + + nvcfg1 = tr32(NVRAM_CFG1); + + /* NVRAM protection for TPM */ + if (nvcfg1 & (1 << 27)) { + tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; + protect = 1; + } + + nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; + switch (nvcfg1) { + case FLASH_5761VENDOR_ATMEL_ADB021D: + case FLASH_5761VENDOR_ATMEL_ADB041D: + case FLASH_5761VENDOR_ATMEL_ADB081D: + case FLASH_5761VENDOR_ATMEL_ADB161D: + case FLASH_5761VENDOR_ATMEL_MDB021D: + case FLASH_5761VENDOR_ATMEL_MDB041D: + case FLASH_5761VENDOR_ATMEL_MDB081D: + case FLASH_5761VENDOR_ATMEL_MDB161D: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; + tp->nvram_pagesize = 256; + break; + case FLASH_5761VENDOR_ST_A_M45PE20: + case FLASH_5761VENDOR_ST_A_M45PE40: + case FLASH_5761VENDOR_ST_A_M45PE80: + case FLASH_5761VENDOR_ST_A_M45PE16: + case FLASH_5761VENDOR_ST_M_M45PE20: + case FLASH_5761VENDOR_ST_M_M45PE40: + case FLASH_5761VENDOR_ST_M_M45PE80: + case FLASH_5761VENDOR_ST_M_M45PE16: + tp->nvram_jedecnum = JEDEC_ST; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + tp->nvram_pagesize = 256; + break; + } + + if (protect) { + tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); + } else { + switch (nvcfg1) { + case FLASH_5761VENDOR_ATMEL_ADB161D: + case FLASH_5761VENDOR_ATMEL_MDB161D: + case FLASH_5761VENDOR_ST_A_M45PE16: + case FLASH_5761VENDOR_ST_M_M45PE16: + tp->nvram_size = 0x100000; + break; + case FLASH_5761VENDOR_ATMEL_ADB081D: + case FLASH_5761VENDOR_ATMEL_MDB081D: + case FLASH_5761VENDOR_ST_A_M45PE80: + case FLASH_5761VENDOR_ST_M_M45PE80: + tp->nvram_size = 0x80000; + break; + case FLASH_5761VENDOR_ATMEL_ADB041D: + case FLASH_5761VENDOR_ATMEL_MDB041D: + case FLASH_5761VENDOR_ST_A_M45PE40: + case FLASH_5761VENDOR_ST_M_M45PE40: + tp->nvram_size = 0x40000; + break; + case FLASH_5761VENDOR_ATMEL_ADB021D: + case FLASH_5761VENDOR_ATMEL_MDB021D: + case FLASH_5761VENDOR_ST_A_M45PE20: + case FLASH_5761VENDOR_ST_M_M45PE20: + tp->nvram_size = 0x20000; + break; + } + } +} + static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp) { tp->nvram_jedecnum = JEDEC_ATMEL; @@ -9568,8 +9946,11 @@ static void __devinit tg3_nvram_init(struct tg3 *tp) tg3_get_5752_nvram_info(tp); else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) tg3_get_5755_nvram_info(tp); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) tg3_get_5787_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + tg3_get_5761_nvram_info(tp); else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) tg3_get_5906_nvram_info(tp); else @@ -9647,6 +10028,7 @@ static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) if ((tp->tg3_flags & TG3_FLAG_NVRAM) && (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && (tp->tg3_flags2 & TG3_FLG2_FLASH) && + !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) && (tp->nvram_jedecnum == JEDEC_ATMEL)) addr = ((addr / tp->nvram_pagesize) << @@ -9661,6 +10043,7 @@ static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) if ((tp->tg3_flags & TG3_FLAG_NVRAM) && (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && (tp->tg3_flags2 & TG3_FLG2_FLASH) && + !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) && (tp->nvram_jedecnum == JEDEC_ATMEL)) addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * @@ -9881,6 +10264,8 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) && (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) && (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) && + (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) && + (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) && (tp->nvram_jedecnum == JEDEC_ST) && (nvram_cmd & NVRAM_CMD_FIRST)) { @@ -10051,8 +10436,12 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; tp->tg3_flags2 |= TG3_FLG2_IS_NIC; } - if (tr32(VCPU_CFGSHDW) & VCPU_CFGSHDW_ASPM_DBNC) + val = tr32(VCPU_CFGSHDW); + if (val & VCPU_CFGSHDW_ASPM_DBNC) tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; + if ((val & VCPU_CFGSHDW_WOL_ENABLE) && + (val & VCPU_CFGSHDW_WOL_MAGPKT)) + tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; return; } @@ -10169,10 +10558,16 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; } + if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) + tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE; if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES && !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) tp->tg3_flags &= ~TG3_FLAG_WOL_CAP; + if (tp->tg3_flags & TG3_FLAG_WOL_CAP && + nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE) + tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; + if (cfg2 & (1 << 17)) tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING; @@ -10201,7 +10596,8 @@ static int __devinit tg3_phy_probe(struct tg3 *tp) * firwmare access to the PHY hardware. */ err = 0; - if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { + if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || + (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID; } else { /* Now read the physical PHY_ID from the chip and verify @@ -10248,6 +10644,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp) } if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) && + !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { u32 bmsr, adv_reg, tg3_ctrl, mask; @@ -10499,6 +10896,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->pci_chip_rev_id = (misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) { + u32 prod_id_asic_rev; + + pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV, + &prod_id_asic_rev); + tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK; + } /* Wrong chip ID in 5752 A0. This code can be removed later * as A0 is not in production. @@ -10618,6 +11022,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) tp->tg3_flags2 |= TG3_FLG2_5750_PLUS; @@ -10637,6 +11043,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; @@ -10654,6 +11062,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE; @@ -10694,10 +11104,20 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) cacheline_sz_reg); } + if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || + (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { + tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); + if (!tp->pcix_cap) { + printk(KERN_ERR PFX "Cannot find PCI-X " + "capability, aborting.\n"); + return -EIO; + } + } + pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &pci_state_reg); - if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) { + if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) { tp->tg3_flags |= TG3_FLAG_PCIX_MODE; /* If this is a 5700 BX chipset, and we are in PCI-X @@ -10708,7 +11128,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) */ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) { u32 pm_reg; - u16 pci_cmd; tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; @@ -10716,11 +11135,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) * space registers clobbered due to this bug. * So explicitly force the chip into D0 here. */ - pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT, + pci_read_config_dword(tp->pdev, + tp->pm_cap + PCI_PM_CTRL, &pm_reg); pm_reg &= ~PCI_PM_CTRL_STATE_MASK; pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; - pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT, + pci_write_config_dword(tp->pdev, + tp->pm_cap + PCI_PM_CTRL, pm_reg); /* Also, force SERR#/PERR# in PCI command. */ @@ -10818,6 +11239,20 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) */ tg3_get_eeprom_hw_cfg(tp); + if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { + /* Allow reads and writes to the + * APE register and memory space. + */ + pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | + PCISTATE_ALLOW_APE_SHMEM_WR; + pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, + pci_state_reg); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT; + /* Set up tp->grc_local_ctrl before calling tg3_set_power_state(). * GPIO1 driven high will bring 5700's external PHY out of reset. * It is also used as eeprom write protect on LOMs. @@ -10884,7 +11319,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) { + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG; @@ -11027,6 +11464,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) */ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) tp->dev->hard_start_xmit = tg3_start_xmit; else @@ -11047,11 +11486,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) tp->rx_std_max_post = 8; - /* By default, disable wake-on-lan. User can change this - * using ETHTOOL_SWOL. - */ - tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE; - if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & PCIE_PWR_MGMT_L1_THRESH_MSK; @@ -11648,8 +12082,10 @@ static char * __devinit tg3_phy_string(struct tg3 *tp) case PHY_ID_BCM5780: return "5780"; case PHY_ID_BCM5755: return "5755"; case PHY_ID_BCM5787: return "5787"; + case PHY_ID_BCM5784: return "5784"; case PHY_ID_BCM5756: return "5722/5756"; case PHY_ID_BCM5906: return "5906"; + case PHY_ID_BCM5761: return "5761"; case PHY_ID_BCM8002: return "8002/serdes"; case 0: return "serdes"; default: return "unknown"; @@ -11807,7 +12243,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, goto err_out_free_res; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); #if TG3_VLAN_TAG_USED @@ -11854,7 +12289,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, INIT_WORK(&tp->reset_task, tg3_reset_task); tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len); - if (tp->regs == 0UL) { + if (!tp->regs) { printk(KERN_ERR PFX "Cannot map device registers, " "aborting.\n"); err = -ENOMEM; @@ -11874,9 +12309,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, dev->set_mac_address = tg3_set_mac_addr; dev->do_ioctl = tg3_ioctl; dev->tx_timeout = tg3_tx_timeout; - dev->poll = tg3_poll; + netif_napi_add(dev, &tp->napi, tg3_poll, 64); dev->ethtool_ops = &tg3_ethtool_ops; - dev->weight = 64; dev->watchdog_timeo = TG3_TX_TIMEOUT; dev->change_mtu = tg3_change_mtu; dev->irq = pdev->irq; @@ -11954,6 +12388,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) && (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) dev->features |= NETIF_F_TSO6; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + dev->features |= NETIF_F_TSO_ECN; } @@ -11978,7 +12414,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, */ if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { - pci_save_state(tp->pdev); tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); } @@ -11995,7 +12430,9 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) { dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) dev->features |= NETIF_F_IPV6_CSUM; tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; @@ -12007,11 +12444,27 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, tg3_init_coal(tp); - /* Now that we have fully setup the chip, save away a snapshot - * of the PCI config space. We need to restore this after - * GRC_MISC_CFG core clock resets and some resume events. - */ - pci_save_state(tp->pdev); + if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { + if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { + printk(KERN_ERR PFX "Cannot find proper PCI device " + "base address for APE, aborting.\n"); + err = -ENODEV; + goto err_out_iounmap; + } + + tg3reg_base = pci_resource_start(pdev, 2); + tg3reg_len = pci_resource_len(pdev, 2); + + tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len); + if (tp->aperegs == 0UL) { + printk(KERN_ERR PFX "Cannot map APE registers, " + "aborting.\n"); + err = -ENOMEM; + goto err_out_iounmap; + } + + tg3_ape_lock_init(tp); + } pci_set_drvdata(pdev, dev); @@ -12019,7 +12472,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, if (err) { printk(KERN_ERR PFX "Cannot register net device, " "aborting.\n"); - goto err_out_iounmap; + goto err_out_apeunmap; } printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ", @@ -12052,6 +12505,12 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, return 0; +err_out_apeunmap: + if (tp->aperegs) { + iounmap(tp->aperegs); + tp->aperegs = NULL; + } + err_out_iounmap: if (tp->regs) { iounmap(tp->regs); @@ -12079,6 +12538,10 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev) flush_scheduled_work(); unregister_netdev(dev); + if (tp->aperegs) { + iounmap(tp->aperegs); + tp->aperegs = NULL; + } if (tp->regs) { iounmap(tp->regs); tp->regs = NULL; @@ -12096,6 +12559,12 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) struct tg3 *tp = netdev_priv(dev); int err; + /* PCI register 4 needs to be saved whether netif_running() or not. + * MSI address and data need to be saved if using MSI and + * netif_running(). + */ + pci_save_state(pdev); + if (!netif_running(dev)) return 0; @@ -12115,9 +12584,6 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; tg3_full_unlock(tp); - /* Save MSI address and data for resume. */ - pci_save_state(pdev); - err = tg3_set_power_state(tp, pci_choose_state(pdev, state)); if (err) { tg3_full_lock(tp, 0); @@ -12145,15 +12611,20 @@ static int tg3_resume(struct pci_dev *pdev) struct tg3 *tp = netdev_priv(dev); int err; + pci_restore_state(tp->pdev); + if (!netif_running(dev)) return 0; - pci_restore_state(tp->pdev); - err = tg3_set_power_state(tp, PCI_D0); if (err) return err; + /* Hardware bug - MSI won't work if INTX disabled. */ + if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) && + (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) + pci_intx(tp->pdev, 1); + netif_device_attach(dev); tg3_full_lock(tp, 0); diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index d84e75e7365d..6dbdad2b8f88 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h @@ -57,32 +57,7 @@ #define TG3PCI_IRQ_PIN 0x0000003d #define TG3PCI_MIN_GNT 0x0000003e #define TG3PCI_MAX_LAT 0x0000003f -#define TG3PCI_X_CAPS 0x00000040 -#define PCIX_CAPS_RELAXED_ORDERING 0x00020000 -#define PCIX_CAPS_SPLIT_MASK 0x00700000 -#define PCIX_CAPS_SPLIT_SHIFT 20 -#define PCIX_CAPS_BURST_MASK 0x000c0000 -#define PCIX_CAPS_BURST_SHIFT 18 -#define PCIX_CAPS_MAX_BURST_CPIOB 2 -#define TG3PCI_PM_CAP_PTR 0x00000041 -#define TG3PCI_X_COMMAND 0x00000042 -#define TG3PCI_X_STATUS 0x00000044 -#define TG3PCI_PM_CAP_ID 0x00000048 -#define TG3PCI_VPD_CAP_PTR 0x00000049 -#define TG3PCI_PM_CAPS 0x0000004a -#define TG3PCI_PM_CTRL_STAT 0x0000004c -#define TG3PCI_BR_SUPP_EXT 0x0000004e -#define TG3PCI_PM_DATA 0x0000004f -#define TG3PCI_VPD_CAP_ID 0x00000050 -#define TG3PCI_MSI_CAP_PTR 0x00000051 -#define TG3PCI_VPD_ADDR_FLAG 0x00000052 -#define VPD_ADDR_FLAG_WRITE 0x00008000 -#define TG3PCI_VPD_DATA 0x00000054 -#define TG3PCI_MSI_CAP_ID 0x00000058 -#define TG3PCI_NXT_CAP_PTR 0x00000059 -#define TG3PCI_MSI_CTRL 0x0000005a -#define TG3PCI_MSI_ADDR_LOW 0x0000005c -#define TG3PCI_MSI_ADDR_HIGH 0x00000060 +/* 0x40 --> 0x64 unused */ #define TG3PCI_MSI_DATA 0x00000064 /* 0x66 --> 0x68 unused */ #define TG3PCI_MISC_HOST_CTRL 0x00000068 @@ -133,6 +108,7 @@ #define CHIPREV_ID_5752_A1 0x6001 #define CHIPREV_ID_5714_A2 0x9002 #define CHIPREV_ID_5906_A1 0xc001 +#define CHIPREV_ID_5784_A0 0x5784000 #define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12) #define ASIC_REV_5700 0x07 #define ASIC_REV_5701 0x00 @@ -146,6 +122,9 @@ #define ASIC_REV_5755 0x0a #define ASIC_REV_5787 0x0b #define ASIC_REV_5906 0x0c +#define ASIC_REV_USE_PROD_ID_REG 0x0f +#define ASIC_REV_5784 0x5784 +#define ASIC_REV_5761 0x5761 #define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8) #define CHIPREV_5700_AX 0x70 #define CHIPREV_5700_BX 0x71 @@ -213,6 +192,8 @@ #define PCISTATE_ROM_RETRY_ENABLE 0x00000040 #define PCISTATE_FLAT_VIEW 0x00000100 #define PCISTATE_RETRY_SAME_DMA 0x00002000 +#define PCISTATE_ALLOW_APE_CTLSPC_WR 0x00010000 +#define PCISTATE_ALLOW_APE_SHMEM_WR 0x00020000 #define TG3PCI_CLOCK_CTRL 0x00000074 #define CLOCK_CTRL_CORECLK_DISABLE 0x00000200 #define CLOCK_CTRL_RXCLK_DISABLE 0x00000400 @@ -239,7 +220,9 @@ #define TG3PCI_DUAL_MAC_CTRL 0x000000b8 #define DUAL_MAC_CTRL_CH_MASK 0x00000003 #define DUAL_MAC_CTRL_ID 0x00000004 -/* 0xbc --> 0x100 unused */ +#define TG3PCI_PRODID_ASICREV 0x000000bc +#define PROD_ID_ASIC_REV_MASK 0x0fffffff +/* 0xc0 --> 0x100 unused */ /* 0x100 --> 0x200 unused */ @@ -683,6 +666,7 @@ #define SNDDATAC_MODE 0x00001000 #define SNDDATAC_MODE_RESET 0x00000001 #define SNDDATAC_MODE_ENABLE 0x00000002 +#define SNDDATAC_MODE_CDELAY 0x00000010 /* 0x1004 --> 0x1400 unused */ /* Send BD ring selector */ @@ -865,7 +849,20 @@ #define RCVLSC_MODE_ATTN_ENABLE 0x00000004 #define RCVLSC_STATUS 0x00003404 #define RCVLSC_STATUS_ERROR_ATTN 0x00000004 -/* 0x3408 --> 0x3800 unused */ +/* 0x3408 --> 0x3600 unused */ + +/* CPMU registers */ +#define TG3_CPMU_CTRL 0x00003600 +#define CPMU_CTRL_LINK_IDLE_MODE 0x00000200 +#define CPMU_CTRL_LINK_AWARE_MODE 0x00000400 +#define CPMU_CTRL_LINK_SPEED_MODE 0x00004000 +/* 0x3604 --> 0x365c unused */ + +#define TG3_CPMU_MUTEX_REQ 0x0000365c +#define CPMU_MUTEX_REQ_DRIVER 0x00001000 +#define TG3_CPMU_MUTEX_GNT 0x00003660 +#define CPMU_MUTEX_GNT_DRIVER 0x00001000 +/* 0x3664 --> 0x3800 unused */ /* Mbuf cluster free registers */ #define MBFREE_MODE 0x00003800 @@ -1045,7 +1042,10 @@ #define RDMAC_MODE_FIFOOREAD_ENAB 0x00000100 #define RDMAC_MODE_LNGREAD_ENAB 0x00000200 #define RDMAC_MODE_SPLIT_ENABLE 0x00000800 +#define RDMAC_MODE_BD_SBD_CRPT_ENAB 0x00000800 #define RDMAC_MODE_SPLIT_RESET 0x00001000 +#define RDMAC_MODE_MBUF_RBD_CRPT_ENAB 0x00001000 +#define RDMAC_MODE_MBUF_SBD_CRPT_ENAB 0x00002000 #define RDMAC_MODE_FIFO_SIZE_128 0x00020000 #define RDMAC_MODE_FIFO_LONG_BURST 0x00030000 #define RDMAC_STATUS 0x00004804 @@ -1151,6 +1151,8 @@ #define VCPU_STATUS_DRV_RESET 0x08000000 #define VCPU_CFGSHDW 0x00005104 +#define VCPU_CFGSHDW_WOL_ENABLE 0x00000001 +#define VCPU_CFGSHDW_WOL_MAGPKT 0x00000004 #define VCPU_CFGSHDW_ASPM_DBNC 0x00001000 /* Mailboxes */ @@ -1474,6 +1476,22 @@ #define FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ 0x03000002 #define FLASH_5787VENDOR_MICRO_EEPROM_64KHZ 0x03000000 #define FLASH_5787VENDOR_MICRO_EEPROM_376KHZ 0x02000000 +#define FLASH_5761VENDOR_ATMEL_MDB021D 0x00800003 +#define FLASH_5761VENDOR_ATMEL_MDB041D 0x00800000 +#define FLASH_5761VENDOR_ATMEL_MDB081D 0x00800002 +#define FLASH_5761VENDOR_ATMEL_MDB161D 0x00800001 +#define FLASH_5761VENDOR_ATMEL_ADB021D 0x00000003 +#define FLASH_5761VENDOR_ATMEL_ADB041D 0x00000000 +#define FLASH_5761VENDOR_ATMEL_ADB081D 0x00000002 +#define FLASH_5761VENDOR_ATMEL_ADB161D 0x00000001 +#define FLASH_5761VENDOR_ST_M_M45PE20 0x02800001 +#define FLASH_5761VENDOR_ST_M_M45PE40 0x02800000 +#define FLASH_5761VENDOR_ST_M_M45PE80 0x02800002 +#define FLASH_5761VENDOR_ST_M_M45PE16 0x02800003 +#define FLASH_5761VENDOR_ST_A_M45PE20 0x02000001 +#define FLASH_5761VENDOR_ST_A_M45PE40 0x02000000 +#define FLASH_5761VENDOR_ST_A_M45PE80 0x02000002 +#define FLASH_5761VENDOR_ST_A_M45PE16 0x02000003 #define NVRAM_CFG1_5752PAGE_SIZE_MASK 0x70000000 #define FLASH_5752PAGE_SIZE_256 0x00000000 #define FLASH_5752PAGE_SIZE_512 0x10000000 @@ -1504,9 +1522,11 @@ #define ACCESS_ENABLE 0x00000001 #define ACCESS_WR_ENABLE 0x00000002 #define NVRAM_WRITE1 0x00007028 -/* 0x702c --> 0x7400 unused */ +/* 0x702c unused */ + +#define NVRAM_ADDR_LOCKOUT 0x00007030 +/* 0x7034 --> 0x7c00 unused */ -/* 0x7400 --> 0x7c00 unused */ #define PCIE_TRANSACTION_CFG 0x00007c04 #define PCIE_TRANS_CFG_1SHOT_MSI 0x20000000 #define PCIE_TRANS_CFG_LOM 0x00000020 @@ -1552,6 +1572,7 @@ #define NIC_SRAM_DATA_CFG_MINI_PCI 0x00001000 #define NIC_SRAM_DATA_CFG_FIBER_WOL 0x00004000 #define NIC_SRAM_DATA_CFG_NO_GPIO2 0x00100000 +#define NIC_SRAM_DATA_CFG_APE_ENABLE 0x00200000 #define NIC_SRAM_DATA_VER 0x00000b5c #define NIC_SRAM_DATA_VER_SHIFT 16 @@ -1680,6 +1701,47 @@ #define MII_TG3_TEST1_TRIM_EN 0x0010 #define MII_TG3_TEST1_CRC_EN 0x8000 +/* APE registers. Accessible through BAR1 */ +#define TG3_APE_EVENT 0x000c +#define APE_EVENT_1 0x00000001 +#define TG3_APE_LOCK_REQ 0x002c +#define APE_LOCK_REQ_DRIVER 0x00001000 +#define TG3_APE_LOCK_GRANT 0x004c +#define APE_LOCK_GRANT_DRIVER 0x00001000 +#define TG3_APE_SEG_SIG 0x4000 +#define APE_SEG_SIG_MAGIC 0x41504521 + +/* APE shared memory. Accessible through BAR1 */ +#define TG3_APE_FW_STATUS 0x400c +#define APE_FW_STATUS_READY 0x00000100 +#define TG3_APE_HOST_SEG_SIG 0x4200 +#define APE_HOST_SEG_SIG_MAGIC 0x484f5354 +#define TG3_APE_HOST_SEG_LEN 0x4204 +#define APE_HOST_SEG_LEN_MAGIC 0x0000001c +#define TG3_APE_HOST_INIT_COUNT 0x4208 +#define TG3_APE_HOST_DRIVER_ID 0x420c +#define APE_HOST_DRIVER_ID_MAGIC 0xf0035100 +#define TG3_APE_HOST_BEHAVIOR 0x4210 +#define APE_HOST_BEHAV_NO_PHYLOCK 0x00000001 +#define TG3_APE_HOST_HEARTBEAT_INT_MS 0x4214 +#define APE_HOST_HEARTBEAT_INT_DISABLE 0 +#define APE_HOST_HEARTBEAT_INT_5SEC 5000 +#define TG3_APE_HOST_HEARTBEAT_COUNT 0x4218 + +#define TG3_APE_EVENT_STATUS 0x4300 + +#define APE_EVENT_STATUS_DRIVER_EVNT 0x00000010 +#define APE_EVENT_STATUS_STATE_CHNGE 0x00000500 +#define APE_EVENT_STATUS_STATE_START 0x00010000 +#define APE_EVENT_STATUS_STATE_UNLOAD 0x00020000 +#define APE_EVENT_STATUS_STATE_WOL 0x00030000 +#define APE_EVENT_STATUS_STATE_SUSPEND 0x00040000 +#define APE_EVENT_STATUS_EVENT_PENDING 0x80000000 + +/* APE convenience enumerations. */ +#define TG3_APE_LOCK_MEM 4 + + /* There are two ways to manage the TX descriptors on the tigon3. * Either the descriptors are in host DMA'able memory, or they * exist only in the cards on-chip SRAM. All 16 send bds are under @@ -2155,6 +2217,7 @@ struct tg3 { void (*write32_mbox) (struct tg3 *, u32, u32); void __iomem *regs; + void __iomem *aperegs; struct net_device *dev; struct pci_dev *pdev; @@ -2176,6 +2239,7 @@ struct tg3 { dma_addr_t tx_desc_mapping; /* begin "rx thread" cacheline section */ + struct napi_struct napi; void (*write32_rx_mbox) (struct tg3 *, u32, u32); u32 rx_rcb_ptr; @@ -2237,7 +2301,7 @@ struct tg3 { #define TG3_FLAG_JUMBO_RING_ENABLE 0x00800000 #define TG3_FLAG_10_100_ONLY 0x01000000 #define TG3_FLAG_PAUSE_AUTONEG 0x02000000 - +#define TG3_FLAG_CPMU_PRESENT 0x04000000 #define TG3_FLAG_40BIT_DMA_BUG 0x08000000 #define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000 #define TG3_FLAG_SUPPORT_MSI 0x20000000 @@ -2279,6 +2343,9 @@ struct tg3 { #define TG3_FLG2_PHY_JITTER_BUG 0x20000000 #define TG3_FLG2_NO_FWARE_REPORTED 0x40000000 #define TG3_FLG2_PHY_ADJUST_TRIM 0x80000000 + u32 tg3_flags3; +#define TG3_FLG3_NO_NVRAM_ADDR_TRANS 0x00000001 +#define TG3_FLG3_ENABLE_APE 0x00000002 struct timer_list timer; u16 timer_counter; @@ -2309,7 +2376,7 @@ struct tg3 { u32 pwrmgmt_thresh; /* PCI block */ - u16 pci_chip_rev_id; + u32 pci_chip_rev_id; u8 pci_cacheline_sz; u8 pci_lat_timer; u8 pci_hdr_type; @@ -2317,6 +2384,7 @@ struct tg3 { int pm_cap; int msi_cap; + int pcix_cap; /* PHY info */ u32 phy_id; @@ -2335,6 +2403,8 @@ struct tg3 { #define PHY_ID_BCM5755 0xbc050cc0 #define PHY_ID_BCM5787 0xbc050ce0 #define PHY_ID_BCM5756 0xbc050ed0 +#define PHY_ID_BCM5784 0xbc050fa0 +#define PHY_ID_BCM5761 0xbc050fd0 #define PHY_ID_BCM5906 0xdc00ac40 #define PHY_ID_BCM8002 0x60010140 #define PHY_ID_INVALID 0xffffffff @@ -2345,6 +2415,7 @@ struct tg3 { #define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */ u32 led_ctrl; + u32 pci_cmd; char board_part_number[24]; char fw_ver[16]; @@ -2363,7 +2434,8 @@ struct tg3 { (X) == PHY_ID_BCM5752 || (X) == PHY_ID_BCM5714 || \ (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \ (X) == PHY_ID_BCM5755 || (X) == PHY_ID_BCM5756 || \ - (X) == PHY_ID_BCM5906 || (X) == PHY_ID_BCM8002) + (X) == PHY_ID_BCM5906 || (X) == PHY_ID_BCM5761 || \ + (X) == PHY_ID_BCM8002) struct tg3_hw_stats *hw_stats; dma_addr_t stats_mapping; diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c index 74eb12107e68..c99ce74a7aff 100644 --- a/drivers/net/tlan.c +++ b/drivers/net/tlan.c @@ -556,7 +556,6 @@ static int __devinit TLan_probe1(struct pci_dev *pdev, rc = -ENOMEM; goto err_out_regions; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); priv = netdev_priv(dev); diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c index 9f1b6ab9c228..7224d368b2a7 100644 --- a/drivers/net/tokenring/3c359.c +++ b/drivers/net/tokenring/3c359.c @@ -156,7 +156,7 @@ static void print_rx_state(struct net_device *dev) ; static void print_tx_state(struct net_device *dev) { - struct xl_private *xl_priv = (struct xl_private *)dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); struct xl_tx_desc *txd ; u8 __iomem *xl_mmio = xl_priv->xl_mmio ; int i ; @@ -179,7 +179,7 @@ static void print_tx_state(struct net_device *dev) static void print_rx_state(struct net_device *dev) { - struct xl_private *xl_priv = (struct xl_private *)dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); struct xl_rx_desc *rxd ; u8 __iomem *xl_mmio = xl_priv->xl_mmio ; int i ; @@ -213,7 +213,7 @@ static void print_rx_state(struct net_device *dev) static u16 xl_ee_read(struct net_device *dev, int ee_addr) { - struct xl_private *xl_priv = (struct xl_private *)dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); u8 __iomem *xl_mmio = xl_priv->xl_mmio ; /* Wait for EEProm to not be busy */ @@ -245,7 +245,7 @@ static u16 xl_ee_read(struct net_device *dev, int ee_addr) static void xl_ee_write(struct net_device *dev, int ee_addr, u16 ee_value) { - struct xl_private *xl_priv = (struct xl_private *)dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); u8 __iomem *xl_mmio = xl_priv->xl_mmio ; /* Wait for EEProm to not be busy */ @@ -305,11 +305,11 @@ static int __devinit xl_probe(struct pci_dev *pdev, pci_release_regions(pdev) ; return -ENOMEM ; } - xl_priv = dev->priv ; + xl_priv = netdev_priv(dev); #if XL_DEBUG printk("pci_device: %p, dev:%p, dev->priv: %p, ba[0]: %10x, ba[1]:%10x\n", - pdev, dev, dev->priv, (unsigned int)pdev->resource[0].start, (unsigned int)pdev->resource[1].start) ; + pdev, dev, netdev_priv(dev), (unsigned int)pdev->resource[0].start, (unsigned int)pdev->resource[1].start); #endif dev->irq=pdev->irq; @@ -344,7 +344,6 @@ static int __devinit xl_probe(struct pci_dev *pdev, dev->set_multicast_list=&xl_set_rx_mode; dev->get_stats=&xl_get_stats ; dev->set_mac_address=&xl_set_mac_address ; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); pci_set_drvdata(pdev,dev) ; @@ -365,7 +364,7 @@ static int __devinit xl_probe(struct pci_dev *pdev, static int __devinit xl_init(struct net_device *dev) { - struct xl_private *xl_priv = (struct xl_private *)dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); printk(KERN_INFO "%s \n", version); printk(KERN_INFO "%s: I/O at %hx, MMIO at %p, using irq %d\n", @@ -385,7 +384,7 @@ static int __devinit xl_init(struct net_device *dev) static int xl_hw_reset(struct net_device *dev) { - struct xl_private *xl_priv = (struct xl_private *)dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); u8 __iomem *xl_mmio = xl_priv->xl_mmio ; unsigned long t ; u16 i ; @@ -568,7 +567,7 @@ static int xl_hw_reset(struct net_device *dev) static int xl_open(struct net_device *dev) { - struct xl_private *xl_priv=(struct xl_private *)dev->priv; + struct xl_private *xl_priv=netdev_priv(dev); u8 __iomem *xl_mmio = xl_priv->xl_mmio ; u8 i ; u16 hwaddr[3] ; /* Should be u8[6] but we get word return values */ @@ -641,14 +640,14 @@ static int xl_open(struct net_device *dev) * Now to set up the Rx and Tx buffer structures */ /* These MUST be on 8 byte boundaries */ - xl_priv->xl_tx_ring = kmalloc((sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE) + 7, GFP_DMA | GFP_KERNEL) ; + xl_priv->xl_tx_ring = kzalloc((sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE) + 7, GFP_DMA | GFP_KERNEL); if (xl_priv->xl_tx_ring == NULL) { printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers.\n", dev->name); free_irq(dev->irq,dev); return -ENOMEM; } - xl_priv->xl_rx_ring = kmalloc((sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE) +7, GFP_DMA | GFP_KERNEL) ; + xl_priv->xl_rx_ring = kzalloc((sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE) +7, GFP_DMA | GFP_KERNEL); if (xl_priv->xl_tx_ring == NULL) { printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers.\n", dev->name); @@ -656,8 +655,6 @@ static int xl_open(struct net_device *dev) kfree(xl_priv->xl_tx_ring); return -ENOMEM; } - memset(xl_priv->xl_tx_ring,0,sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE) ; - memset(xl_priv->xl_rx_ring,0,sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE) ; /* Setup Rx Ring */ for (i=0 ; i < XL_RX_RING_SIZE ; i++) { @@ -726,7 +723,7 @@ static int xl_open(struct net_device *dev) static int xl_open_hw(struct net_device *dev) { - struct xl_private *xl_priv=(struct xl_private *)dev->priv; + struct xl_private *xl_priv=netdev_priv(dev); u8 __iomem *xl_mmio = xl_priv->xl_mmio ; u16 vsoff ; char ver_str[33]; @@ -875,7 +872,7 @@ static int xl_open_hw(struct net_device *dev) static void adv_rx_ring(struct net_device *dev) /* Advance rx_ring, cut down on bloat in xl_rx */ { - struct xl_private *xl_priv=(struct xl_private *)dev->priv; + struct xl_private *xl_priv=netdev_priv(dev); int prev_ring_loc ; prev_ring_loc = (xl_priv->rx_ring_tail + XL_RX_RING_SIZE - 1) & (XL_RX_RING_SIZE - 1); @@ -890,7 +887,7 @@ static void adv_rx_ring(struct net_device *dev) /* Advance rx_ring, cut down on static void xl_rx(struct net_device *dev) { - struct xl_private *xl_priv=(struct xl_private *)dev->priv; + struct xl_private *xl_priv=netdev_priv(dev); u8 __iomem * xl_mmio = xl_priv->xl_mmio ; struct sk_buff *skb, *skb2 ; int frame_length = 0, copy_len = 0 ; @@ -997,7 +994,7 @@ static void xl_rx(struct net_device *dev) static void xl_reset(struct net_device *dev) { - struct xl_private *xl_priv=(struct xl_private *)dev->priv; + struct xl_private *xl_priv=netdev_priv(dev); u8 __iomem * xl_mmio = xl_priv->xl_mmio ; unsigned long t; @@ -1020,7 +1017,7 @@ static void xl_reset(struct net_device *dev) static void xl_freemem(struct net_device *dev) { - struct xl_private *xl_priv=(struct xl_private *)dev->priv ; + struct xl_private *xl_priv=netdev_priv(dev); int i ; for (i=0;i<XL_RX_RING_SIZE;i++) { @@ -1044,15 +1041,10 @@ static void xl_freemem(struct net_device *dev) static irqreturn_t xl_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; - struct xl_private *xl_priv =(struct xl_private *)dev->priv; + struct xl_private *xl_priv =netdev_priv(dev); u8 __iomem * xl_mmio = xl_priv->xl_mmio ; u16 intstatus, macstatus ; - if (!dev) { - printk(KERN_WARNING "Device structure dead, aaahhhh !\n") ; - return IRQ_NONE; - } - intstatus = readw(xl_mmio + MMIO_INTSTATUS) ; if (!(intstatus & 1)) /* We didn't generate the interrupt */ @@ -1171,7 +1163,7 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id) static int xl_xmit(struct sk_buff *skb, struct net_device *dev) { - struct xl_private *xl_priv=(struct xl_private *)dev->priv; + struct xl_private *xl_priv=netdev_priv(dev); struct xl_tx_desc *txd ; int tx_head, tx_tail, tx_prev ; unsigned long flags ; @@ -1232,7 +1224,7 @@ static int xl_xmit(struct sk_buff *skb, struct net_device *dev) static void xl_dn_comp(struct net_device *dev) { - struct xl_private *xl_priv=(struct xl_private *)dev->priv; + struct xl_private *xl_priv=netdev_priv(dev); u8 __iomem * xl_mmio = xl_priv->xl_mmio ; struct xl_tx_desc *txd ; @@ -1268,7 +1260,7 @@ static void xl_dn_comp(struct net_device *dev) static int xl_close(struct net_device *dev) { - struct xl_private *xl_priv = (struct xl_private *) dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); u8 __iomem * xl_mmio = xl_priv->xl_mmio ; unsigned long t ; @@ -1366,7 +1358,7 @@ static int xl_close(struct net_device *dev) static void xl_set_rx_mode(struct net_device *dev) { - struct xl_private *xl_priv = (struct xl_private *) dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); struct dev_mc_list *dmi ; unsigned char dev_mc_address[4] ; u16 options ; @@ -1407,7 +1399,7 @@ static void xl_set_rx_mode(struct net_device *dev) static void xl_srb_bh(struct net_device *dev) { - struct xl_private *xl_priv = (struct xl_private *) dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); u8 __iomem * xl_mmio = xl_priv->xl_mmio ; u8 srb_cmd, ret_code ; int i ; @@ -1476,14 +1468,14 @@ static void xl_srb_bh(struct net_device *dev) static struct net_device_stats * xl_get_stats(struct net_device *dev) { - struct xl_private *xl_priv = (struct xl_private *) dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); return (struct net_device_stats *) &xl_priv->xl_stats; } static int xl_set_mac_address (struct net_device *dev, void *addr) { struct sockaddr *saddr = addr ; - struct xl_private *xl_priv = (struct xl_private *)dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); if (netif_running(dev)) { printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ; @@ -1504,7 +1496,7 @@ static int xl_set_mac_address (struct net_device *dev, void *addr) static void xl_arb_cmd(struct net_device *dev) { - struct xl_private *xl_priv = (struct xl_private *) dev->priv; + struct xl_private *xl_priv = netdev_priv(dev); u8 __iomem * xl_mmio = xl_priv->xl_mmio ; u8 arb_cmd ; u16 lan_status, lan_status_diff ; @@ -1632,7 +1624,7 @@ static void xl_arb_cmd(struct net_device *dev) static void xl_asb_cmd(struct net_device *dev) { - struct xl_private *xl_priv = (struct xl_private *) dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); u8 __iomem * xl_mmio = xl_priv->xl_mmio ; if (xl_priv->asb_queued == 1) @@ -1663,7 +1655,7 @@ static void xl_asb_cmd(struct net_device *dev) */ static void xl_asb_bh(struct net_device *dev) { - struct xl_private *xl_priv = (struct xl_private *) dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); u8 __iomem * xl_mmio = xl_priv->xl_mmio ; u8 ret_code ; @@ -1691,7 +1683,7 @@ static void xl_asb_bh(struct net_device *dev) static void xl_srb_cmd(struct net_device *dev, int srb_cmd) { - struct xl_private *xl_priv = (struct xl_private *) dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); u8 __iomem * xl_mmio = xl_priv->xl_mmio ; switch (srb_cmd) { @@ -1748,7 +1740,7 @@ static void xl_srb_cmd(struct net_device *dev, int srb_cmd) static void xl_wait_misr_flags(struct net_device *dev) { - struct xl_private *xl_priv = (struct xl_private *) dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); u8 __iomem * xl_mmio = xl_priv->xl_mmio ; int i ; @@ -1773,7 +1765,7 @@ static void xl_wait_misr_flags(struct net_device *dev) static int xl_change_mtu(struct net_device *dev, int mtu) { - struct xl_private *xl_priv = (struct xl_private *) dev->priv; + struct xl_private *xl_priv = netdev_priv(dev); u16 max_mtu ; if (xl_priv->xl_ring_speed == 4) @@ -1795,7 +1787,7 @@ static int xl_change_mtu(struct net_device *dev, int mtu) static void __devexit xl_remove_one (struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); - struct xl_private *xl_priv=(struct xl_private *)dev->priv; + struct xl_private *xl_priv=netdev_priv(dev); unregister_netdev(dev); iounmap(xl_priv->xl_mmio) ; diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c index 1bdd3beefbe5..124cfd4fbcf4 100644 --- a/drivers/net/tokenring/abyss.c +++ b/drivers/net/tokenring/abyss.c @@ -97,8 +97,9 @@ static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_ static int versionprinted; struct net_device *dev; struct net_local *tp; - int i, ret, pci_irq_line; + int ret, pci_irq_line; unsigned long pci_ioaddr; + DECLARE_MAC_BUF(mac); if (versionprinted++ == 0) printk("%s", version); @@ -116,8 +117,6 @@ static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_ if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); - if (!request_region(pci_ioaddr, ABYSS_IO_EXTENT, dev->name)) { ret = -EBUSY; goto err_out_trdev; @@ -147,12 +146,9 @@ static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_ } abyss_read_eeprom(dev); - - printk("%s: Ring Station Address: ", dev->name); - printk("%2.2x", dev->dev_addr[0]); - for (i = 1; i < 6; i++) - printk(":%2.2x", dev->dev_addr[i]); - printk("\n"); + + printk("%s: Ring Station Address: %s\n", + dev->name, print_mac(mac, dev->dev_addr)); tp = netdev_priv(dev); tp->setnselout = abyss_setnselout_pins; diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c index 1e8958ee2d0a..e494c63bfbd9 100644 --- a/drivers/net/tokenring/ibmtr.c +++ b/drivers/net/tokenring/ibmtr.c @@ -116,9 +116,6 @@ in the event that chatty debug messages are desired - jjs 12/30/98 */ #define ENABLE_PAGING 1 #endif -#define FALSE 0 -#define TRUE (!FALSE) - /* changes the output format of driver initialization */ #define TR_VERBOSE 0 @@ -327,7 +324,7 @@ static void ibmtr_cleanup_card(struct net_device *dev) release_region(dev->base_addr, IBMTR_IO_EXTENT); { - struct tok_info *ti = (struct tok_info *) dev->priv; + struct tok_info *ti = netdev_priv(dev); iounmap(ti->mmio); iounmap(ti->sram_virt); } @@ -384,7 +381,7 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr) unsigned char segment, intr=0, irq=0, i, j, cardpresent=NOTOK, temp=0; void __iomem * t_mmio = NULL; - struct tok_info *ti = dev->priv; + struct tok_info *ti = netdev_priv(dev); void __iomem *cd_chanid; unsigned char *tchanid, ctemp; #ifndef PCMCIA @@ -392,6 +389,7 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr) unsigned long timeout; static int version_printed; #endif + DECLARE_MAC_BUF(mac); /* Query the adapter PIO base port which will return * indication of where MMIO was placed. We also have a @@ -705,9 +703,8 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr) channel_def[cardpresent - 1], adapter_def(ti->adapter_type)); DPRINTK("using irq %d, PIOaddr %hx, %dK shared RAM.\n", irq, PIOaddr, ti->mapped_ram_size / 2); - DPRINTK("Hardware address : %02X:%02X:%02X:%02X:%02X:%02X\n", - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + DPRINTK("Hardware address : %s\n", + print_mac(mac, dev->dev_addr)); if (ti->page_mask) DPRINTK("Shared RAM paging enabled. " "Page size: %uK Shared Ram size %dK\n", @@ -823,7 +820,7 @@ static unsigned char __devinit get_sram_size(struct tok_info *adapt_info) static int __devinit trdev_init(struct net_device *dev) { - struct tok_info *ti = (struct tok_info *) dev->priv; + struct tok_info *ti = netdev_priv(dev); SET_PAGE(ti->srb_page); ti->open_failure = NO ; @@ -846,7 +843,7 @@ static int tok_init_card(struct net_device *dev) unsigned long i; PIOaddr = dev->base_addr; - ti = (struct tok_info *) dev->priv; + ti = netdev_priv(dev); /* Special processing for first interrupt after reset */ ti->do_tok_int = FIRST_INT; /* Reset adapter */ @@ -868,7 +865,7 @@ static int tok_init_card(struct net_device *dev) /*****************************************************************************/ static int tok_open(struct net_device *dev) { - struct tok_info *ti = (struct tok_info *) dev->priv; + struct tok_info *ti = netdev_priv(dev); int i; /*the case we were left in a failure state during a previous open */ @@ -927,7 +924,7 @@ static void tok_open_adapter(unsigned long dev_addr) struct tok_info *ti; int i; - ti = (struct tok_info *) dev->priv; + ti = netdev_priv(dev); SET_PAGE(ti->init_srb_page); writeb(~SRB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); for (i = 0; i < sizeof(struct dir_open_adapter); i++) @@ -962,7 +959,7 @@ static void tok_open_adapter(unsigned long dev_addr) static void open_sap(unsigned char type, struct net_device *dev) { int i; - struct tok_info *ti = (struct tok_info *) dev->priv; + struct tok_info *ti = netdev_priv(dev); SET_PAGE(ti->srb_page); for (i = 0; i < sizeof(struct dlc_open_sap); i++) @@ -986,7 +983,7 @@ static void open_sap(unsigned char type, struct net_device *dev) static void tok_set_multicast_list(struct net_device *dev) { - struct tok_info *ti = (struct tok_info *) dev->priv; + struct tok_info *ti = netdev_priv(dev); struct dev_mc_list *mclist; unsigned char address[4]; @@ -1029,7 +1026,7 @@ static int tok_send_packet(struct sk_buff *skb, struct net_device *dev) { struct tok_info *ti; unsigned long flags; - ti = (struct tok_info *) dev->priv; + ti = netdev_priv(dev); netif_stop_queue(dev); @@ -1051,7 +1048,7 @@ static int tok_send_packet(struct sk_buff *skb, struct net_device *dev) static int tok_close(struct net_device *dev) { - struct tok_info *ti = (struct tok_info *) dev->priv; + struct tok_info *ti = netdev_priv(dev); /* Important for PCMCIA hot unplug, otherwise, we'll pull the card, */ /* unloading the module from memory, and then if a timer pops, ouch */ @@ -1094,7 +1091,7 @@ static void __iomem *map_address(struct tok_info *ti, unsigned index, __u8 *page static void dir_open_adapter (struct net_device *dev) { - struct tok_info *ti = (struct tok_info *) dev->priv; + struct tok_info *ti = netdev_priv(dev); unsigned char ret_code; __u16 err; @@ -1179,7 +1176,7 @@ static irqreturn_t tok_interrupt(int irq, void *dev_id) #if TR_VERBOSE DPRINTK("Int from tok_driver, dev : %p irq%d\n", dev,irq); #endif - ti = (struct tok_info *) dev->priv; + ti = netdev_priv(dev); if (ti->sram_phys & 1) return IRQ_NONE; /* PCMCIA card extraction flag */ spin_lock(&(ti->lock)); @@ -1498,7 +1495,7 @@ static void initial_tok_int(struct net_device *dev) struct tok_info *ti; unsigned char init_status; /*BMS 12/2000*/ - ti = (struct tok_info *) dev->priv; + ti = netdev_priv(dev); ti->do_tok_int = NOT_FIRST; @@ -1542,7 +1539,7 @@ static void initial_tok_int(struct net_device *dev) ti->ring_speed = init_status & 0x01 ? 16 : 4; DPRINTK("Initial interrupt : %d Mbps, shared RAM base %08x.\n", ti->ring_speed, (unsigned int)dev->mem_start); - ti->auto_speedsave=readb(ti->init_srb+INIT_STATUS_2_OFST)&4?TRUE:FALSE; + ti->auto_speedsave = (readb(ti->init_srb+INIT_STATUS_2_OFST) & 4) != 0; if (ti->open_mode == MANUAL) wake_up(&ti->wait_for_reset); else tok_open_adapter((unsigned long)dev); @@ -1560,7 +1557,7 @@ static void initial_tok_int(struct net_device *dev) static void tr_tx(struct net_device *dev) { - struct tok_info *ti = (struct tok_info *) dev->priv; + struct tok_info *ti = netdev_priv(dev); struct trh_hdr *trhdr = (struct trh_hdr *) ti->current_skb->data; unsigned int hdr_len; __u32 dhb=0,dhb_base; @@ -1674,7 +1671,7 @@ static void tr_tx(struct net_device *dev) static void tr_rx(struct net_device *dev) { - struct tok_info *ti = (struct tok_info *) dev->priv; + struct tok_info *ti = netdev_priv(dev); __u32 rbuffer; void __iomem *rbuf, *rbufdata, *llc; __u8 rbuffer_page = 0; @@ -1742,18 +1739,20 @@ static void tr_rx(struct net_device *dev) if (!IPv4_p) { void __iomem *trhhdr = rbuf + offsetof(struct rec_buf, data); - + u8 saddr[6]; + u8 daddr[6]; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); + int i; + for (i = 0 ; i < 6 ; i++) + saddr[i] = readb(trhhdr + SADDR_OFST + i); + for (i = 0 ; i < 6 ; i++) + daddr[i] = readb(trhhdr + DADDR_OFST + i); DPRINTK("Probably non-IP frame received.\n"); DPRINTK("ssap: %02X dsap: %02X " - "saddr: %02X:%02X:%02X:%02X:%02X:%02X " - "daddr: %02X:%02X:%02X:%02X:%02X:%02X\n", + "saddr: %s daddr: %$s\n", readb(llc + SSAP_OFST), readb(llc + DSAP_OFST), - readb(trhhdr+SADDR_OFST), readb(trhhdr+ SADDR_OFST+1), - readb(trhhdr+SADDR_OFST+2), readb(trhhdr+SADDR_OFST+3), - readb(trhhdr+SADDR_OFST+4), readb(trhhdr+SADDR_OFST+5), - readb(trhhdr+DADDR_OFST), readb(trhhdr+DADDR_OFST + 1), - readb(trhhdr+DADDR_OFST+2), readb(trhhdr+DADDR_OFST+3), - readb(trhhdr+DADDR_OFST+4), readb(trhhdr+DADDR_OFST+5)); + print_mac(mac, saddr), print_mac(mac2, daddr)); } #endif @@ -1846,7 +1845,7 @@ static void ibmtr_reset_timer(struct timer_list *tmr, struct net_device *dev) void tok_rerun(unsigned long dev_addr){ struct net_device *dev = (struct net_device *)dev_addr; - struct tok_info *ti = (struct tok_info *) dev->priv; + struct tok_info *ti = netdev_priv(dev); if ( ti->open_action == RESTART){ ti->do_tok_int = FIRST_INT; @@ -1868,7 +1867,7 @@ static void ibmtr_readlog(struct net_device *dev) { struct tok_info *ti; - ti = (struct tok_info *) dev->priv; + ti = netdev_priv(dev); ti->readlog_pending = 0; SET_PAGE(ti->srb_page); @@ -1891,7 +1890,7 @@ static struct net_device_stats *tok_get_stats(struct net_device *dev) { struct tok_info *toki; - toki = (struct tok_info *) dev->priv; + toki = netdev_priv(dev); return (struct net_device_stats *) &toki->tr_stats; } @@ -1899,7 +1898,7 @@ static struct net_device_stats *tok_get_stats(struct net_device *dev) static int ibmtr_change_mtu(struct net_device *dev, int mtu) { - struct tok_info *ti = (struct tok_info *) dev->priv; + struct tok_info *ti = netdev_priv(dev); if (ti->ring_speed == 16 && mtu > ti->maxmtu16) return -EINVAL; diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c index 5d849c089a3b..47d84cd28097 100644 --- a/drivers/net/tokenring/lanstreamer.c +++ b/drivers/net/tokenring/lanstreamer.c @@ -123,6 +123,7 @@ #include <linux/bitops.h> #include <linux/jiffies.h> +#include <net/net_namespace.h> #include <net/checksum.h> #include <asm/io.h> @@ -244,13 +245,12 @@ static int __devinit streamer_init_one(struct pci_dev *pdev, return -ENOMEM; } - SET_MODULE_OWNER(dev); - streamer_priv = dev->priv; + streamer_priv = netdev_priv(dev); #if STREAMER_NETWORK_MONITOR #ifdef CONFIG_PROC_FS if (!dev_streamer) - create_proc_read_entry("net/streamer_tr", 0, 0, + create_proc_read_entry("streamer_tr", 0, init_net.proc_net, streamer_proc_info, NULL); streamer_priv->next = dev_streamer; dev_streamer = streamer_priv; @@ -404,7 +404,7 @@ static void __devexit streamer_remove_one(struct pci_dev *pdev) return; } - streamer_priv=dev->priv; + streamer_priv=netdev_priv(dev); if (streamer_priv == NULL) { printk(KERN_ERR "lanstreamer::streamer_remove_one, ERROR dev->priv is NULL\n"); return; @@ -423,7 +423,7 @@ static void __devexit streamer_remove_one(struct pci_dev *pdev) } } if (!dev_streamer) - remove_proc_entry("net/streamer_tr", NULL); + remove_proc_entry("streamer_tr", init_net.proc_net); } #endif #endif @@ -447,8 +447,11 @@ static int streamer_reset(struct net_device *dev) unsigned int uaa_addr; struct sk_buff *skb = NULL; __u16 misr; +#if STREAMER_DEBUG + DECLARE_MAC_BUF(mac); +#endif - streamer_priv = (struct streamer_private *) dev->priv; + streamer_priv = netdev_priv(dev); streamer_mmio = streamer_priv->streamer_mmio; writew(readw(streamer_mmio + BCTL) | BCTL_SOFTRESET, streamer_mmio + BCTL); @@ -575,11 +578,8 @@ static int streamer_reset(struct net_device *dev) dev->dev_addr[i+1]= addr & 0xff; } #if STREAMER_DEBUG - printk("Adapter address: "); - for (i = 0; i < 6; i++) { - printk("%02x:", dev->dev_addr[i]); - } - printk("\n"); + printk("Adapter address: %s\n", + print_mac(mac, dev->dev_addr)); #endif } return 0; @@ -587,7 +587,7 @@ static int streamer_reset(struct net_device *dev) static int streamer_open(struct net_device *dev) { - struct streamer_private *streamer_priv = (struct streamer_private *) dev->priv; + struct streamer_private *streamer_priv = netdev_priv(dev); __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; unsigned long flags; char open_error[255]; @@ -904,7 +904,7 @@ static int streamer_open(struct net_device *dev) static void streamer_rx(struct net_device *dev) { struct streamer_private *streamer_priv = - (struct streamer_private *) dev->priv; + netdev_priv(dev); __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; struct streamer_rx_desc *rx_desc; int rx_ring_last_received, length, frame_length, buffer_cnt = 0; @@ -1029,7 +1029,7 @@ static irqreturn_t streamer_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *) dev_id; struct streamer_private *streamer_priv = - (struct streamer_private *) dev->priv; + netdev_priv(dev); __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; __u16 sisr; __u16 misr; @@ -1152,7 +1152,7 @@ static irqreturn_t streamer_interrupt(int irq, void *dev_id) static int streamer_xmit(struct sk_buff *skb, struct net_device *dev) { struct streamer_private *streamer_priv = - (struct streamer_private *) dev->priv; + netdev_priv(dev); __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; unsigned long flags ; @@ -1203,7 +1203,7 @@ static int streamer_xmit(struct sk_buff *skb, struct net_device *dev) static int streamer_close(struct net_device *dev) { struct streamer_private *streamer_priv = - (struct streamer_private *) dev->priv; + netdev_priv(dev); __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; unsigned long flags; int i; @@ -1269,7 +1269,7 @@ static int streamer_close(struct net_device *dev) static void streamer_set_rx_mode(struct net_device *dev) { struct streamer_private *streamer_priv = - (struct streamer_private *) dev->priv; + netdev_priv(dev); __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; __u8 options = 0; struct dev_mc_list *dmi; @@ -1328,7 +1328,7 @@ static void streamer_set_rx_mode(struct net_device *dev) static void streamer_srb_bh(struct net_device *dev) { - struct streamer_private *streamer_priv = (struct streamer_private *) dev->priv; + struct streamer_private *streamer_priv = netdev_priv(dev); __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; __u16 srb_word; @@ -1493,14 +1493,14 @@ static void streamer_srb_bh(struct net_device *dev) static struct net_device_stats *streamer_get_stats(struct net_device *dev) { struct streamer_private *streamer_priv; - streamer_priv = (struct streamer_private *) dev->priv; + streamer_priv = netdev_priv(dev); return (struct net_device_stats *) &streamer_priv->streamer_stats; } static int streamer_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *saddr = addr; - struct streamer_private *streamer_priv = (struct streamer_private *) dev->priv; + struct streamer_private *streamer_priv = netdev_priv(dev); if (netif_running(dev)) { @@ -1525,7 +1525,7 @@ static int streamer_set_mac_address(struct net_device *dev, void *addr) static void streamer_arb_cmd(struct net_device *dev) { struct streamer_private *streamer_priv = - (struct streamer_private *) dev->priv; + netdev_priv(dev); __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; __u8 header_len; __u16 frame_len, buffer_len; @@ -1539,6 +1539,7 @@ static void streamer_arb_cmd(struct net_device *dev) #if STREAMER_NETWORK_MONITOR struct trh_hdr *mac_hdr; + DECLARE_MAC_BUF(mac); #endif writew(streamer_priv->arb, streamer_mmio + LAPA); @@ -1611,15 +1612,11 @@ static void streamer_arb_cmd(struct net_device *dev) dev->name); mac_hdr = tr_hdr(mac_frame); printk(KERN_WARNING - "%s: MAC Frame Dest. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", - dev->name, mac_hdr->daddr[0], mac_hdr->daddr[1], - mac_hdr->daddr[2], mac_hdr->daddr[3], - mac_hdr->daddr[4], mac_hdr->daddr[5]); + "%s: MAC Frame Dest. Addr: %s\n", + dev->name, print_mac(mac, mac_hdr->daddr)); printk(KERN_WARNING - "%s: MAC Frame Srce. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", - dev->name, mac_hdr->saddr[0], mac_hdr->saddr[1], - mac_hdr->saddr[2], mac_hdr->saddr[3], - mac_hdr->saddr[4], mac_hdr->saddr[5]); + "%s: MAC Frame Srce. Addr: %s\n", + dev->name, DEV->ADDR6(mac_hdr->saddr)); #endif netif_rx(mac_frame); @@ -1740,7 +1737,7 @@ drop_frame: static void streamer_asb_bh(struct net_device *dev) { struct streamer_private *streamer_priv = - (struct streamer_private *) dev->priv; + netdev_priv(dev); __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; if (streamer_priv->asb_queued == 1) @@ -1784,7 +1781,7 @@ static void streamer_asb_bh(struct net_device *dev) static int streamer_change_mtu(struct net_device *dev, int mtu) { struct streamer_private *streamer_priv = - (struct streamer_private *) dev->priv; + netdev_priv(dev); __u16 max_mtu; if (streamer_priv->streamer_ring_speed == 4) @@ -1848,12 +1845,14 @@ static int streamer_proc_info(char *buffer, char **start, off_t offset, static int sprintf_info(char *buffer, struct net_device *dev) { struct streamer_private *streamer_priv = - (struct streamer_private *) dev->priv; + netdev_priv(dev); __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; struct streamer_adapter_addr_table sat; struct streamer_parameters_table spt; int size = 0; int i; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); writew(streamer_priv->streamer_addr_table_addr, streamer_mmio + LAPA); for (i = 0; i < 14; i += 2) { @@ -1875,37 +1874,30 @@ static int sprintf_info(char *buffer, struct net_device *dev) size = sprintf(buffer, "\n%6s: Adapter Address : Node Address : Functional Addr\n", dev->name); size += sprintf(buffer + size, - "%6s: %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x\n", - dev->name, dev->dev_addr[0], dev->dev_addr[1], - dev->dev_addr[2], dev->dev_addr[3], dev->dev_addr[4], - dev->dev_addr[5], sat.node_addr[0], sat.node_addr[1], - sat.node_addr[2], sat.node_addr[3], sat.node_addr[4], - sat.node_addr[5], sat.func_addr[0], sat.func_addr[1], - sat.func_addr[2], sat.func_addr[3]); + "%6s: %s : %s : %02x:%02x:%02x:%02x\n", + dev->name, print_mac(mac, dev->dev_addr), + print_mac(mac2, sat.node_addr), + sat.func_addr[0], sat.func_addr[1], + sat.func_addr[2], sat.func_addr[3]); size += sprintf(buffer + size, "\n%6s: Token Ring Parameters Table:\n", dev->name); size += sprintf(buffer + size, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n", dev->name); size += sprintf(buffer + size, - "%6s: %02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %04x : %04x : %04x :\n", + "%6s: %02x:%02x:%02x:%02x : %s : %s : %04x : %04x : %04x :\n", dev->name, spt.phys_addr[0], spt.phys_addr[1], spt.phys_addr[2], spt.phys_addr[3], - spt.up_node_addr[0], spt.up_node_addr[1], - spt.up_node_addr[2], spt.up_node_addr[3], - spt.up_node_addr[4], spt.up_node_addr[4], - spt.poll_addr[0], spt.poll_addr[1], spt.poll_addr[2], - spt.poll_addr[3], spt.poll_addr[4], spt.poll_addr[5], + print_mac(mac, spt.up_node_addr), + print_mac(mac2, spt.poll_addr), ntohs(spt.acc_priority), ntohs(spt.auth_source_class), ntohs(spt.att_code)); size += sprintf(buffer + size, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n", dev->name); size += sprintf(buffer + size, - "%6s: %02x:%02x:%02x:%02x:%02x:%02x : %04x : %04x : %04x : %04x : %04x : %04x : \n", - dev->name, spt.source_addr[0], spt.source_addr[1], - spt.source_addr[2], spt.source_addr[3], - spt.source_addr[4], spt.source_addr[5], + "%6s: %s : %04x : %04x : %04x : %04x : %04x : %04x : \n", + dev->name, print_mac(mac, spt.source_addr), ntohs(spt.beacon_type), ntohs(spt.major_vector), ntohs(spt.lan_status), ntohs(spt.local_ring), ntohs(spt.mon_error), ntohs(spt.frame_correl)); @@ -1914,14 +1906,12 @@ static int sprintf_info(char *buffer, struct net_device *dev) dev->name); size += sprintf(buffer + size, - "%6s: : %02x : %02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x : \n", + "%6s: : %02x : %02x : %s : %02x:%02x:%02x:%02x : \n", dev->name, ntohs(spt.beacon_transmit), - ntohs(spt.beacon_receive), spt.beacon_naun[0], - spt.beacon_naun[1], spt.beacon_naun[2], - spt.beacon_naun[3], spt.beacon_naun[4], - spt.beacon_naun[5], spt.beacon_phys[0], - spt.beacon_phys[1], spt.beacon_phys[2], - spt.beacon_phys[3]); + ntohs(spt.beacon_receive), + print_mac(mac, spt.beacon_naun), + spt.beacon_phys[0], spt.beacon_phys[1], + spt.beacon_phys[2], spt.beacon_phys[3]); return size; } #endif diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c index f8f4d74f01f1..5a4151362fc0 100644 --- a/drivers/net/tokenring/madgemc.c +++ b/drivers/net/tokenring/madgemc.c @@ -151,7 +151,8 @@ static int __devinit madgemc_probe(struct device *device) struct net_local *tp; struct card_info *card; struct mca_device *mdev = to_mca_device(device); - int ret = 0, i = 0; + int ret = 0; + DECLARE_MAC_BUF(mac); if (versionprinted++ == 0) printk("%s", version); @@ -168,7 +169,6 @@ static int __devinit madgemc_probe(struct device *device) goto getout; } - SET_MODULE_OWNER(dev); dev->dma = 0; card = kmalloc(sizeof(struct card_info), GFP_KERNEL); @@ -323,11 +323,8 @@ static int __devinit madgemc_probe(struct device *device) mca_device_set_name(mdev, (card->cardtype == 0x08)?MADGEMC16_CARDNAME:MADGEMC32_CARDNAME); mca_set_adapter_procfn(mdev->slot, madgemc_mcaproc, dev); - printk("%s: Ring Station Address: ", dev->name); - printk("%2.2x", dev->dev_addr[0]); - for (i = 1; i < 6; i++) - printk(":%2.2x", dev->dev_addr[i]); - printk("\n"); + printk("%s: Ring Station Address: %s\n", + dev->name, print_mac(mac, dev->dev_addr)); if (tmsdev_init(dev, device)) { printk("%s: unable to get memory for dev->priv.\n", @@ -690,14 +687,14 @@ static int madgemc_close(struct net_device *dev) static int madgemc_mcaproc(char *buf, int slot, void *d) { struct net_device *dev = (struct net_device *)d; - struct net_local *tp = dev->priv; + struct net_local *tp = netdev_priv(dev); struct card_info *curcard = tp->tmspriv; int len = 0; + DECLARE_MAC_BUF(mac); len += sprintf(buf+len, "-------\n"); if (curcard) { struct net_local *tp = netdev_priv(dev); - int i; len += sprintf(buf+len, "Card Revision: %d\n", curcard->cardrev); len += sprintf(buf+len, "RAM Size: %dkb\n", curcard->ramsize); @@ -717,11 +714,8 @@ static int madgemc_mcaproc(char *buf, int slot, void *d) } len += sprintf(buf+len, " (%s)\n", (curcard->fairness)?"Unfair":"Fair"); - len += sprintf(buf+len, "Ring Station Address: "); - len += sprintf(buf+len, "%2.2x", dev->dev_addr[0]); - for (i = 1; i < 6; i++) - len += sprintf(buf+len, " %2.2x", dev->dev_addr[i]); - len += sprintf(buf+len, "\n"); + len += sprintf(buf+len, "Ring Station Address: %s\n", + print_mac(mac, dev->dev_addr)); } else len += sprintf(buf+len, "Card not configured\n"); @@ -736,7 +730,7 @@ static int __devexit madgemc_remove(struct device *device) BUG_ON(!dev); - tp = dev->priv; + tp = netdev_priv(dev); card = tp->tmspriv; kfree(card); tp->tmspriv = NULL; diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c index 09b3cfb8e809..74c1f0f189f5 100644 --- a/drivers/net/tokenring/olympic.c +++ b/drivers/net/tokenring/olympic.c @@ -102,6 +102,7 @@ #include <linux/jiffies.h> #include <net/checksum.h> +#include <net/net_namespace.h> #include <asm/io.h> #include <asm/system.h> @@ -219,14 +220,14 @@ static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device goto op_release_dev; } - olympic_priv = dev->priv ; + olympic_priv = netdev_priv(dev) ; spin_lock_init(&olympic_priv->olympic_lock) ; init_waitqueue_head(&olympic_priv->srb_wait); init_waitqueue_head(&olympic_priv->trb_wait); #if OLYMPIC_DEBUG - printk(KERN_INFO "pci_device: %p, dev:%p, dev->priv: %p\n", pdev, dev, dev->priv); + printk(KERN_INFO "pci_device: %p, dev:%p, dev->priv: %p\n", pdev, dev, netdev_priv(dev)); #endif dev->irq=pdev->irq; dev->base_addr=pci_resource_start(pdev, 0); @@ -260,7 +261,6 @@ static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device dev->set_multicast_list=&olympic_set_rx_mode; dev->get_stats=&olympic_get_stats ; dev->set_mac_address=&olympic_set_mac_address ; - SET_MODULE_OWNER(dev) ; SET_NETDEV_DEV(dev, &pdev->dev); pci_set_drvdata(pdev,dev) ; @@ -268,9 +268,9 @@ static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device printk("Olympic: %s registered as: %s\n",olympic_priv->olympic_card_name,dev->name); if (olympic_priv->olympic_network_monitor) { /* Must go after register_netdev as we need the device name */ char proc_name[20] ; - strcpy(proc_name,"net/olympic_") ; + strcpy(proc_name,"olympic_") ; strcat(proc_name,dev->name) ; - create_proc_read_entry(proc_name,0,NULL,olympic_proc_info,(void *)dev) ; + create_proc_read_entry(proc_name,0,init_net.proc_net,olympic_proc_info,(void *)dev) ; printk("Olympic: Network Monitor information: /proc/%s\n",proc_name); } return 0 ; @@ -297,7 +297,7 @@ static int __devinit olympic_init(struct net_device *dev) unsigned long t; unsigned int uaa_addr; - olympic_priv=(struct olympic_private *)dev->priv; + olympic_priv=netdev_priv(dev); olympic_mmio=olympic_priv->olympic_mmio; printk("%s \n", version); @@ -418,14 +418,15 @@ static int __devinit olympic_init(struct net_device *dev) writel(uaa_addr,olympic_mmio+LAPA); adapter_addr=olympic_priv->olympic_lap + (uaa_addr & (~0xf800)); + memcpy_fromio(&dev->dev_addr[0], adapter_addr,6); + #if OLYMPIC_DEBUG - printk("adapter address: %02x:%02x:%02x:%02x:%02x:%02x\n", - readb(adapter_addr), readb(adapter_addr+1),readb(adapter_addr+2), - readb(adapter_addr+3),readb(adapter_addr+4),readb(adapter_addr+5)); + { + DECLARE_MAC_BUF(mac); + printk("adapter address: %s\n", print_mac(mac, dev->dev_addr)); + } #endif - memcpy_fromio(&dev->dev_addr[0], adapter_addr,6); - olympic_priv->olympic_addr_table_addr = swab16(readw(init_srb + 12)); olympic_priv->olympic_parms_addr = swab16(readw(init_srb + 14)); @@ -435,11 +436,12 @@ static int __devinit olympic_init(struct net_device *dev) static int olympic_open(struct net_device *dev) { - struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv; + struct olympic_private *olympic_priv=netdev_priv(dev); u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*init_srb; unsigned long flags, t; int i, open_finished = 1 ; u8 resp, err; + DECLARE_MAC_BUF(mac); DECLARE_WAITQUEUE(wait,current) ; @@ -567,14 +569,8 @@ static int olympic_open(struct net_device *dev) goto out; case 0x32: - printk(KERN_WARNING "%s: Invalid LAA: %02x:%02x:%02x:%02x:%02x:%02x\n", - dev->name, - olympic_priv->olympic_laa[0], - olympic_priv->olympic_laa[1], - olympic_priv->olympic_laa[2], - olympic_priv->olympic_laa[3], - olympic_priv->olympic_laa[4], - olympic_priv->olympic_laa[5]) ; + printk(KERN_WARNING "%s: Invalid LAA: %s\n", + dev->name, print_mac(mac, olympic_priv->olympic_laa)); goto out; default: @@ -704,30 +700,26 @@ static int olympic_open(struct net_device *dev) #endif if (olympic_priv->olympic_network_monitor) { - u8 __iomem *oat ; - u8 __iomem *opt ; - oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ; - opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ; - - printk("%s: Node Address: %02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, - readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)), - readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+1), - readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+2), - readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+3), - readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+4), - readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+5)); + u8 __iomem *oat; + u8 __iomem *opt; + int i; + u8 addr[6]; + DECLARE_MAC_BUF(mac); + oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr); + opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr); + + for (i = 0; i < 6; i++) + addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+i); + printk("%s: Node Address: %s\n",dev->name, print_mac(mac, addr)); printk("%s: Functional Address: %02x:%02x:%02x:%02x\n",dev->name, readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)), readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1), readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2), readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3)); - printk("%s: NAUN Address: %02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, - readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)), - readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+1), - readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+2), - readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+3), - readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+4), - readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+5)); + + for (i = 0; i < 6; i++) + addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+i); + printk("%s: NAUN Address: %s\n",dev->name, print_mac(mac, addr)); } netif_start_queue(dev); @@ -755,7 +747,7 @@ out: */ static void olympic_rx(struct net_device *dev) { - struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv; + struct olympic_private *olympic_priv=netdev_priv(dev); u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio; struct olympic_rx_status *rx_status; struct olympic_rx_desc *rx_desc ; @@ -897,7 +889,7 @@ static void olympic_rx(struct net_device *dev) static void olympic_freemem(struct net_device *dev) { - struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv; + struct olympic_private *olympic_priv=netdev_priv(dev); int i; for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) { @@ -930,7 +922,7 @@ static void olympic_freemem(struct net_device *dev) static irqreturn_t olympic_interrupt(int irq, void *dev_id) { struct net_device *dev= (struct net_device *)dev_id; - struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv; + struct olympic_private *olympic_priv=netdev_priv(dev); u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio; u32 sisr; u8 __iomem *adapter_check_area ; @@ -1046,7 +1038,7 @@ static irqreturn_t olympic_interrupt(int irq, void *dev_id) static int olympic_xmit(struct sk_buff *skb, struct net_device *dev) { - struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv; + struct olympic_private *olympic_priv=netdev_priv(dev); u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio; unsigned long flags ; @@ -1077,7 +1069,7 @@ static int olympic_xmit(struct sk_buff *skb, struct net_device *dev) static int olympic_close(struct net_device *dev) { - struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv; + struct olympic_private *olympic_priv=netdev_priv(dev); u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*srb; unsigned long t,flags; @@ -1147,7 +1139,7 @@ static int olympic_close(struct net_device *dev) static void olympic_set_rx_mode(struct net_device *dev) { - struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ; + struct olympic_private *olympic_priv = netdev_priv(dev); u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ; u8 options = 0; u8 __iomem *srb; @@ -1215,7 +1207,7 @@ static void olympic_set_rx_mode(struct net_device *dev) static void olympic_srb_bh(struct net_device *dev) { - struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ; + struct olympic_private *olympic_priv = netdev_priv(dev); u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ; u8 __iomem *srb; @@ -1361,14 +1353,14 @@ static void olympic_srb_bh(struct net_device *dev) static struct net_device_stats * olympic_get_stats(struct net_device *dev) { struct olympic_private *olympic_priv ; - olympic_priv=(struct olympic_private *) dev->priv; + olympic_priv=netdev_priv(dev); return (struct net_device_stats *) &olympic_priv->olympic_stats; } static int olympic_set_mac_address (struct net_device *dev, void *addr) { struct sockaddr *saddr = addr ; - struct olympic_private *olympic_priv = (struct olympic_private *)dev->priv ; + struct olympic_private *olympic_priv = netdev_priv(dev); if (netif_running(dev)) { printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ; @@ -1389,7 +1381,7 @@ static int olympic_set_mac_address (struct net_device *dev, void *addr) static void olympic_arb_cmd(struct net_device *dev) { - struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv; + struct olympic_private *olympic_priv = netdev_priv(dev); u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio; u8 __iomem *arb_block, *asb_block, *srb ; u8 header_len ; @@ -1445,11 +1437,14 @@ static void olympic_arb_cmd(struct net_device *dev) mac_frame->protocol = tr_type_trans(mac_frame, dev); if (olympic_priv->olympic_network_monitor) { - struct trh_hdr *mac_hdr ; - printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name) ; + struct trh_hdr *mac_hdr; + DECLARE_MAC_BUF(mac); + printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name); mac_hdr = tr_hdr(mac_frame); - printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->daddr[0], mac_hdr->daddr[1], mac_hdr->daddr[2], mac_hdr->daddr[3], mac_hdr->daddr[4], mac_hdr->daddr[5]) ; - printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->saddr[0], mac_hdr->saddr[1], mac_hdr->saddr[2], mac_hdr->saddr[3], mac_hdr->saddr[4], mac_hdr->saddr[5]) ; + printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %s\n", + dev->name, print_mac(mac, mac_hdr->daddr)); + printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %s\n", + dev->name, print_mac(mac, mac_hdr->saddr)); } netif_rx(mac_frame); dev->last_rx = jiffies; @@ -1575,7 +1570,7 @@ drop_frame: static void olympic_asb_bh(struct net_device *dev) { - struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ; + struct olympic_private *olympic_priv = netdev_priv(dev); u8 __iomem *arb_block, *asb_block ; arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ; @@ -1615,7 +1610,7 @@ static void olympic_asb_bh(struct net_device *dev) static int olympic_change_mtu(struct net_device *dev, int mtu) { - struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv; + struct olympic_private *olympic_priv = netdev_priv(dev); u16 max_mtu ; if (olympic_priv->olympic_ring_speed == 4) @@ -1637,33 +1632,31 @@ static int olympic_change_mtu(struct net_device *dev, int mtu) static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) { struct net_device *dev = (struct net_device *)data ; - struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv; + struct olympic_private *olympic_priv=netdev_priv(dev); u8 __iomem *oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ; u8 __iomem *opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ; int size = 0 ; int len=0; off_t begin=0; off_t pos=0; - + u8 addr[6]; + u8 addr2[6]; + int i; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); + size = sprintf(buffer, "IBM Pit/Pit-Phy/Olympic Chipset Token Ring Adapter %s\n",dev->name); size += sprintf(buffer+size, "\n%6s: Adapter Address : Node Address : Functional Addr\n", dev->name); - size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x\n", + for (i = 0 ; i < 6 ; i++) + addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr) + i); + + size += sprintf(buffer+size, "%6s: %s : %s : %02x:%02x:%02x:%02x\n", dev->name, - dev->dev_addr[0], - dev->dev_addr[1], - dev->dev_addr[2], - dev->dev_addr[3], - dev->dev_addr[4], - dev->dev_addr[5], - readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)), - readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+1), - readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+2), - readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+3), - readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+4), - readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+5), + print_mac(mac, dev->dev_addr), + print_mac(mac2, addr), readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)), readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1), readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2), @@ -1673,25 +1666,20 @@ static int olympic_proc_info(char *buffer, char **start, off_t offset, int lengt size += sprintf(buffer+size, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n", dev->name) ; - - size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %04x : %04x : %04x :\n", + + for (i = 0 ; i < 6 ; i++) + addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr) + i); + for (i = 0 ; i < 6 ; i++) + addr2[i] = readb(opt+offsetof(struct olympic_parameters_table, poll_addr) + i); + + size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x : %s : %s : %04x : %04x : %04x :\n", dev->name, readb(opt+offsetof(struct olympic_parameters_table, phys_addr)), readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+1), readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+2), readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+3), - readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)), - readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+1), - readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+2), - readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+3), - readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+4), - readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+5), - readb(opt+offsetof(struct olympic_parameters_table, poll_addr)), - readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+1), - readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+2), - readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+3), - readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+4), - readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+5), + print_mac(mac, addr), + print_mac(mac2, addr2), swab16(readw(opt+offsetof(struct olympic_parameters_table, acc_priority))), swab16(readw(opt+offsetof(struct olympic_parameters_table, auth_source_class))), swab16(readw(opt+offsetof(struct olympic_parameters_table, att_code)))); @@ -1699,14 +1687,11 @@ static int olympic_proc_info(char *buffer, char **start, off_t offset, int lengt size += sprintf(buffer+size, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n", dev->name) ; - size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x:%02x:%02x : %04x : %04x : %04x : %04x : %04x : %04x : \n", + for (i = 0 ; i < 6 ; i++) + addr[i] = readb(opt+offsetof(struct olympic_parameters_table, source_addr) + i); + size += sprintf(buffer+size, "%6s: %s : %04x : %04x : %04x : %04x : %04x : %04x : \n", dev->name, - readb(opt+offsetof(struct olympic_parameters_table, source_addr)), - readb(opt+offsetof(struct olympic_parameters_table, source_addr)+1), - readb(opt+offsetof(struct olympic_parameters_table, source_addr)+2), - readb(opt+offsetof(struct olympic_parameters_table, source_addr)+3), - readb(opt+offsetof(struct olympic_parameters_table, source_addr)+4), - readb(opt+offsetof(struct olympic_parameters_table, source_addr)+5), + print_mac(mac, addr), swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_type))), swab16(readw(opt+offsetof(struct olympic_parameters_table, major_vector))), swab16(readw(opt+offsetof(struct olympic_parameters_table, lan_status))), @@ -1717,16 +1702,13 @@ static int olympic_proc_info(char *buffer, char **start, off_t offset, int lengt size += sprintf(buffer+size, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n", dev->name) ; - size += sprintf(buffer+size, "%6s: : %02x : %02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x : \n", + for (i = 0 ; i < 6 ; i++) + addr[i] = readb(opt+offsetof(struct olympic_parameters_table, beacon_naun) + i); + size += sprintf(buffer+size, "%6s: : %02x : %02x : %s : %02x:%02x:%02x:%02x : \n", dev->name, swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_transmit))), swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_receive))), - readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)), - readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+1), - readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+2), - readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+3), - readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+4), - readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+5), + print_mac(mac, addr), readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)), readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+1), readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+2), @@ -1748,13 +1730,13 @@ static int olympic_proc_info(char *buffer, char **start, off_t offset, int lengt static void __devexit olympic_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev) ; - struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv; + struct olympic_private *olympic_priv=netdev_priv(dev); if (olympic_priv->olympic_network_monitor) { char proc_name[20] ; - strcpy(proc_name,"net/olympic_") ; + strcpy(proc_name,"olympic_") ; strcat(proc_name,dev->name) ; - remove_proc_entry(proc_name,NULL); + remove_proc_entry(proc_name,init_net.proc_net); } unregister_netdev(dev) ; iounmap(olympic_priv->olympic_mmio) ; diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c index cb7dbb63c9d9..ca6b65919b3d 100644 --- a/drivers/net/tokenring/proteon.c +++ b/drivers/net/tokenring/proteon.c @@ -122,11 +122,11 @@ static int __init setup_card(struct net_device *dev, struct device *pdev) static int versionprinted; const unsigned *port; int j,err = 0; + DECLARE_MAC_BUF(mac); if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); if (dev->base_addr) /* probe specific location */ err = proteon_probe1(dev, dev->base_addr); else { @@ -153,11 +153,8 @@ static int __init setup_card(struct net_device *dev, struct device *pdev) proteon_read_eeprom(dev); - printk(KERN_DEBUG "proteon.c: Ring Station Address: "); - printk("%2.2x", dev->dev_addr[0]); - for (j = 1; j < 6; j++) - printk(":%2.2x", dev->dev_addr[j]); - printk("\n"); + printk(KERN_DEBUG "proteon.c: Ring Station Address: %s\n", + print_mac(mac, dev->dev_addr)); tp = netdev_priv(dev); tp->setnselout = proteon_setnselout_pins; diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c index 33afea31d87b..32e8d5a9f958 100644 --- a/drivers/net/tokenring/skisa.c +++ b/drivers/net/tokenring/skisa.c @@ -139,11 +139,11 @@ static int __init setup_card(struct net_device *dev, struct device *pdev) static int versionprinted; const unsigned *port; int j, err = 0; + DECLARE_MAC_BUF(mac); if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); if (dev->base_addr) /* probe specific location */ err = sk_isa_probe1(dev, dev->base_addr); else { @@ -170,11 +170,8 @@ static int __init setup_card(struct net_device *dev, struct device *pdev) sk_isa_read_eeprom(dev); - printk(KERN_DEBUG "skisa.c: Ring Station Address: "); - printk("%2.2x", dev->dev_addr[0]); - for (j = 1; j < 6; j++) - printk(":%2.2x", dev->dev_addr[j]); - printk("\n"); + printk(KERN_DEBUG "skisa.c: Ring Station Address: %s\n", + print_mac(mac, dev->dev_addr)); tp = netdev_priv(dev); tp->setnselout = sk_isa_setnselout_pins; diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c index f83bb5cb0d3d..93da3a36cde8 100644 --- a/drivers/net/tokenring/smctr.c +++ b/drivers/net/tokenring/smctr.c @@ -3583,8 +3583,6 @@ struct net_device __init *smctr_probe(int unit) if (!dev) return ERR_PTR(-ENOMEM); - SET_MODULE_OWNER(dev); - if (unit >= 0) { sprintf(dev->name, "tr%d", unit); netdev_boot_setup_check(dev); diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c index 12bd294045a7..d5fa36d36515 100644 --- a/drivers/net/tokenring/tms380tr.c +++ b/drivers/net/tokenring/tms380tr.c @@ -2124,7 +2124,7 @@ static void tms380tr_rcv_status_irq(struct net_device *dev) /* Get the frame size (Byte swap for Intel). * Do this early (see workaround comment below) */ - Length = be16_to_cpu((unsigned short)rpl->FrameSize); + Length = be16_to_cpu(rpl->FrameSize); /* Check if the Frame_Start, Frame_End and * Frame_Complete bits are set. @@ -2140,7 +2140,7 @@ static void tms380tr_rcv_status_irq(struct net_device *dev) * Length2 is there because there have also been * cases where the FrameSize was partially written */ - Length2 = be16_to_cpu((unsigned short)rpl->FrameSize); + Length2 = be16_to_cpu(rpl->FrameSize); if(Length == 0 || Length != Length2) { diff --git a/drivers/net/tokenring/tms380tr.h b/drivers/net/tokenring/tms380tr.h index 2a16078ac3fd..7daf74e31ccd 100644 --- a/drivers/net/tokenring/tms380tr.h +++ b/drivers/net/tokenring/tms380tr.h @@ -476,13 +476,13 @@ typedef struct { * bytes = 0xC000 */ u_int32_t FunctAddr; /* High order bytes = 0xC000 */ - u_int16_t RxListSize; /* RPL size: 0 (=26), 14, 20 or + __be16 RxListSize; /* RPL size: 0 (=26), 14, 20 or * 26 bytes read by the adapter. * (Depending on the number of * fragments/list) */ - u_int16_t TxListSize; /* TPL size */ - u_int16_t BufSize; /* Is automatically rounded up to the + __be16 TxListSize; /* TPL size */ + __be16 BufSize; /* Is automatically rounded up to the * nearest nK boundary. */ u_int16_t FullDuplex; @@ -580,14 +580,14 @@ typedef struct { /*--------------------- Send and Receive definitions -------------------*/ #pragma pack(1) typedef struct { - u_int16_t DataCount; /* Value 0, even and odd values are + __be16 DataCount; /* Value 0, even and odd values are * permitted; value is unaltered most * significant bit set: following * fragments last fragment: most * significant bit is not evaluated. * (???) */ - u_int32_t DataAddr; /* Pointer to frame data fragment; + __be32 DataAddr; /* Pointer to frame data fragment; * even or odd. */ } Fragment; @@ -679,7 +679,7 @@ typedef struct { typedef struct s_TPL TPL; struct s_TPL { /* Transmit Parameter List (align on even word boundaries) */ - u_int32_t NextTPLAddr; /* Pointer to next TPL in chain; if + __be32 NextTPLAddr; /* Pointer to next TPL in chain; if * pointer is odd: this is the last * TPL. Pointing to itself can cause * problems! @@ -689,7 +689,7 @@ struct s_TPL { /* Transmit Parameter List (align on even word boundaries) */ * significant bit first! Set by the * adapter: CSTAT_COMPLETE status. */ - u_int16_t FrameSize; /* Number of bytes to be transmitted + __be16 FrameSize; /* Number of bytes to be transmitted * as a frame including AC/FC, * Destination, Source, Routing field * not including CRC, FS, End Delimiter @@ -1020,7 +1020,7 @@ enum SKB_STAT { #pragma pack(1) typedef struct s_RPL RPL; struct s_RPL { /* Receive Parameter List */ - u_int32_t NextRPLAddr; /* Pointer to next RPL in chain + __be32 NextRPLAddr; /* Pointer to next RPL in chain * (normalized = physical 32 bit * address) if pointer is odd: this * is last RPL. Pointing to itself can @@ -1031,7 +1031,7 @@ struct s_RPL { /* Receive Parameter List */ * adapter in lists that start or end * a frame. */ - volatile u_int16_t FrameSize; /* Number of bytes received as a + volatile __be16 FrameSize; /* Number of bytes received as a * frame including AC/FC, Destination, * Source, Routing field not including * CRC, FS (Frame Status), End Delimiter diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c index 3b2f00b9b7bd..1c18f782f522 100644 --- a/drivers/net/tokenring/tmspci.c +++ b/drivers/net/tokenring/tmspci.c @@ -96,10 +96,11 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic static int versionprinted; struct net_device *dev; struct net_local *tp; - int i, ret; + int ret; unsigned int pci_irq_line; unsigned long pci_ioaddr; struct card_info *cardinfo = &card_info_table[ent->driver_data]; + DECLARE_MAC_BUF(mac); if (versionprinted++ == 0) printk("%s", version); @@ -115,7 +116,6 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic dev = alloc_trdev(sizeof(struct net_local)); if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); if (!request_region(pci_ioaddr, TMS_PCI_IO_EXTENT, dev->name)) { ret = -EBUSY; @@ -137,11 +137,8 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic tms_pci_read_eeprom(dev); - printk("%s: Ring Station Address: ", dev->name); - printk("%2.2x", dev->dev_addr[0]); - for (i = 1; i < 6; i++) - printk(":%2.2x", dev->dev_addr[i]); - printk("\n"); + printk("%s: Ring Station Address: %s\n", + dev->name, print_mac(mac, dev->dev_addr)); ret = tmsdev_init(dev, &pdev->dev); if (ret) { @@ -149,7 +146,7 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic goto err_out_irq; } - tp = dev->priv; + tp = netdev_priv(dev); tp->setnselout = tms_pci_setnselout_pins; tp->sifreadb = tms_pci_sifreadb; @@ -210,7 +207,7 @@ static void tms_pci_read_eeprom(struct net_device *dev) static unsigned short tms_pci_setnselout_pins(struct net_device *dev) { unsigned short val = 0; - struct net_local *tp = dev->priv; + struct net_local *tp = netdev_priv(dev); struct card_info *cardinfo = tp->tmspriv; if(tp->DataRate == SPEED_4) diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c index 1aabc91f6458..df10af7df7b8 100644 --- a/drivers/net/tsi108_eth.c +++ b/drivers/net/tsi108_eth.c @@ -47,7 +47,6 @@ #include <linux/rtnetlink.h> #include <linux/timer.h> #include <linux/platform_device.h> -#include <linux/etherdevice.h> #include <asm/system.h> #include <asm/io.h> @@ -79,6 +78,9 @@ struct tsi108_prv_data { void __iomem *regs; /* Base of normal regs */ void __iomem *phyregs; /* Base of register bank used for PHY access */ + struct net_device *dev; + struct napi_struct napi; + unsigned int phy; /* Index of PHY for this interface */ unsigned int irq_num; unsigned int id; @@ -837,13 +839,13 @@ static int tsi108_refill_rx(struct net_device *dev, int budget) return done; } -static int tsi108_poll(struct net_device *dev, int *budget) +static int tsi108_poll(struct napi_struct *napi, int budget) { - struct tsi108_prv_data *data = netdev_priv(dev); + struct tsi108_prv_data *data = container_of(napi, struct tsi108_prv_data, napi); + struct net_device *dev = data->dev; u32 estat = TSI_READ(TSI108_EC_RXESTAT); u32 intstat = TSI_READ(TSI108_EC_INTSTAT); - int total_budget = min(*budget, dev->quota); - int num_received = 0, num_filled = 0, budget_used; + int num_received = 0, num_filled = 0; intstat &= TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH | TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | TSI108_INT_RXWAIT; @@ -852,7 +854,7 @@ static int tsi108_poll(struct net_device *dev, int *budget) TSI_WRITE(TSI108_EC_INTSTAT, intstat); if (data->rxpending || (estat & TSI108_EC_RXESTAT_Q0_DESCINT)) - num_received = tsi108_complete_rx(dev, total_budget); + num_received = tsi108_complete_rx(dev, budget); /* This should normally fill no more slots than the number of * packets received in tsi108_complete_rx(). The exception @@ -867,7 +869,7 @@ static int tsi108_poll(struct net_device *dev, int *budget) */ if (data->rxfree < TSI108_RXRING_LEN) - num_filled = tsi108_refill_rx(dev, total_budget * 2); + num_filled = tsi108_refill_rx(dev, budget * 2); if (intstat & TSI108_INT_RXERROR) { u32 err = TSI_READ(TSI108_EC_RXERR); @@ -890,14 +892,9 @@ static int tsi108_poll(struct net_device *dev, int *budget) spin_unlock_irq(&data->misclock); } - budget_used = max(num_received, num_filled / 2); - - *budget -= budget_used; - dev->quota -= budget_used; - - if (budget_used != total_budget) { + if (num_received < budget) { data->rxpending = 0; - netif_rx_complete(dev); + netif_rx_complete(dev, napi); TSI_WRITE(TSI108_EC_INTMASK, TSI_READ(TSI108_EC_INTMASK) @@ -906,14 +903,11 @@ static int tsi108_poll(struct net_device *dev, int *budget) TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | TSI108_INT_RXWAIT)); - - /* IRQs are level-triggered, so no need to re-check */ - return 0; } else { data->rxpending = 1; } - return 1; + return num_received; } static void tsi108_rx_int(struct net_device *dev) @@ -931,7 +925,7 @@ static void tsi108_rx_int(struct net_device *dev) * from tsi108_check_rxring(). */ - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &data->napi)) { /* Mask, rather than ack, the receive interrupts. The ack * will happen in tsi108_poll(). */ @@ -942,7 +936,7 @@ static void tsi108_rx_int(struct net_device *dev) | TSI108_INT_RXTHRESH | TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | TSI108_INT_RXWAIT); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &data->napi); } else { if (!netif_running(dev)) { /* This can happen if an interrupt occurs while the @@ -1401,6 +1395,8 @@ static int tsi108_open(struct net_device *dev) TSI_WRITE(TSI108_EC_TXQ_PTRLOW, data->txdma); tsi108_init_phy(dev); + napi_enable(&data->napi); + setup_timer(&data->timer, tsi108_timed_checker, (unsigned long)dev); mod_timer(&data->timer, jiffies + 1); @@ -1425,6 +1421,7 @@ static int tsi108_close(struct net_device *dev) struct tsi108_prv_data *data = netdev_priv(dev); netif_stop_queue(dev); + napi_disable(&data->napi); del_timer_sync(&data->timer); @@ -1543,6 +1540,7 @@ tsi108_init_one(struct platform_device *pdev) struct tsi108_prv_data *data = NULL; hw_info *einfo; int err = 0; + DECLARE_MAC_BUF(mac); einfo = pdev->dev.platform_data; @@ -1562,6 +1560,7 @@ tsi108_init_one(struct platform_device *pdev) printk("tsi108_eth%d: probe...\n", pdev->id); data = netdev_priv(dev); + data->dev = dev; pr_debug("tsi108_eth%d:regs:phyresgs:phy:irq_num=0x%x:0x%x:0x%x:0x%x\n", pdev->id, einfo->regs, einfo->phyregs, @@ -1597,9 +1596,8 @@ tsi108_init_one(struct platform_device *pdev) dev->set_mac_address = tsi108_set_mac; dev->set_multicast_list = tsi108_set_rx_mode; dev->get_stats = tsi108_get_stats; - dev->poll = tsi108_poll; + netif_napi_add(dev, &data->napi, tsi108_poll, 64); dev->do_ioctl = tsi108_do_ioctl; - dev->weight = 64; /* 64 is more suitable for GigE interface - klai */ /* Apparently, the Linux networking code won't use scatter-gather * if the hardware doesn't do checksums. However, it's faster @@ -1610,7 +1608,6 @@ tsi108_init_one(struct platform_device *pdev) */ dev->features = NETIF_F_HIGHDMA; - SET_MODULE_OWNER(dev); spin_lock_init(&data->txlock); spin_lock_init(&data->misclock); @@ -1632,10 +1629,8 @@ tsi108_init_one(struct platform_device *pdev) goto register_fail; } - printk(KERN_INFO "%s: Tsi108 Gigabit Ethernet, MAC: " - "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + printk(KERN_INFO "%s: Tsi108 Gigabit Ethernet, MAC: %s\n" + dev->name, print_mac(mac, dev->dev_addr)); #ifdef DEBUG data->msg_enable = DEBUG; dump_eth_one(dev); diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c index d380e0b3f05a..77d9dd7ea34f 100644 --- a/drivers/net/tulip/de2104x.c +++ b/drivers/net/tulip/de2104x.c @@ -264,10 +264,10 @@ struct de_srom_info_leaf { } __attribute__((packed)); struct de_desc { - u32 opts1; - u32 opts2; - u32 addr1; - u32 addr2; + __le32 opts1; + __le32 opts2; + __le32 addr1; + __le32 addr2; }; struct media_info { @@ -1670,8 +1670,6 @@ static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs, static const struct ethtool_ops de_ethtool_ops = { .get_link = ethtool_op_get_link, - .get_tx_csum = ethtool_op_get_tx_csum, - .get_sg = ethtool_op_get_sg, .get_drvinfo = de_get_drvinfo, .get_regs_len = de_get_regs_len, .get_settings = de_get_settings, @@ -1773,8 +1771,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de) /* download entire eeprom */ for (i = 0; i < DE_EEPROM_WORDS; i++) - ((u16 *)ee_data)[i] = - le16_to_cpu(tulip_read_eeprom(de->regs, i, ee_addr_size)); + ((__le16 *)ee_data)[i] = + cpu_to_le16(tulip_read_eeprom(de->regs, i, ee_addr_size)); /* DEC now has a specification but early board makers just put the address in the first EEPROM locations. */ @@ -1931,6 +1929,7 @@ static int __devinit de_init_one (struct pci_dev *pdev, void __iomem *regs; unsigned long pciaddr; static int board_idx = -1; + DECLARE_MAC_BUF(mac); board_idx++; @@ -1944,7 +1943,6 @@ static int __devinit de_init_one (struct pci_dev *pdev, if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); dev->open = de_open; dev->stop = de_close; @@ -2045,15 +2043,11 @@ static int __devinit de_init_one (struct pci_dev *pdev, goto err_out_iomap; /* print info about board and interface just registered */ - printk (KERN_INFO "%s: %s at 0x%lx, " - "%02x:%02x:%02x:%02x:%02x:%02x, " - "IRQ %d\n", + printk (KERN_INFO "%s: %s at 0x%lx, %s, IRQ %d\n", dev->name, de->de21040 ? "21040" : "21041", dev->base_addr, - dev->dev_addr[0], dev->dev_addr[1], - dev->dev_addr[2], dev->dev_addr[3], - dev->dev_addr[4], dev->dev_addr[5], + print_mac(mac, dev->dev_addr), dev->irq); pci_set_drvdata(pdev, dev); diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c index 09902891a6e6..9b9cd83fb8b6 100644 --- a/drivers/net/tulip/de4x5.c +++ b/drivers/net/tulip/de4x5.c @@ -482,7 +482,7 @@ static char version[] __devinitdata = "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n"; #define c_char const char -#define TWIDDLE(a) (u_short)le16_to_cpu(get_unaligned((u_short *)(a))) +#define TWIDDLE(a) (u_short)le16_to_cpu(get_unaligned((__le16 *)(a))) /* ** MII Information @@ -756,10 +756,10 @@ struct de4x5_srom { /* Multiple of 4 for DC21040 */ /* Allows 512 byte alignment */ struct de4x5_desc { - volatile s32 status; - u32 des1; - u32 buf; - u32 next; + volatile __le32 status; + __le32 des1; + __le32 buf; + __le32 next; DESC_ALIGN }; @@ -1088,6 +1088,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) struct de4x5_private *lp = netdev_priv(dev); struct pci_dev *pdev = NULL; int i, status=0; + DECLARE_MAC_BUF(mac); gendev->driver_data = dev; @@ -1123,12 +1124,8 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) dev->base_addr = iobase; printk ("%s: %s at 0x%04lx", gendev->bus_id, name, iobase); - printk(", h/w address "); status = get_hw_addr(dev); - for (i = 0; i < ETH_ALEN - 1; i++) { /* get the ethernet addr. */ - printk("%2.2x:", dev->dev_addr[i]); - } - printk("%2.2x,\n", dev->dev_addr[i]); + printk(", h/w address %s\n", print_mac(mac, dev->dev_addr)); if (status != 0) { printk(" which has an Ethernet PROM CRC error.\n"); @@ -1261,7 +1258,6 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) } /* The DE4X5-specific entries in the device structure. */ - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, gendev); dev->open = &de4x5_open; dev->hard_start_xmit = &de4x5_queue_pkt; @@ -3946,7 +3942,7 @@ create_packet(struct net_device *dev, char *frame, int len) static int EISA_signature(char *name, struct device *device) { - int i, status = 0, siglen = sizeof(de4x5_signatures)/sizeof(c_char *); + int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures); struct eisa_device *edev; *name = '\0'; @@ -3967,7 +3963,7 @@ EISA_signature(char *name, struct device *device) static int PCI_signature(char *name, struct de4x5_private *lp) { - int i, status = 0, siglen = sizeof(de4x5_signatures)/sizeof(c_char *); + int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures); if (lp->chipset == DC21040) { strcpy(name, "DE434/5"); @@ -5073,7 +5069,7 @@ mii_get_phy(struct net_device *dev) { struct de4x5_private *lp = netdev_priv(dev); u_long iobase = dev->base_addr; - int i, j, k, n, limit=sizeof(phy_info)/sizeof(struct phy_table); + int i, j, k, n, limit=ARRAY_SIZE(phy_info); int id; lp->active = 0; @@ -5469,19 +5465,16 @@ static void de4x5_dbg_srom(struct de4x5_srom *p) { int i; + DECLARE_MAC_BUF(mac); if (de4x5_debug & DEBUG_SROM) { printk("Sub-system Vendor ID: %04x\n", *((u_short *)p->sub_vendor_id)); printk("Sub-system ID: %04x\n", *((u_short *)p->sub_system_id)); printk("ID Block CRC: %02x\n", (u_char)(p->id_block_crc)); printk("SROM version: %02x\n", (u_char)(p->version)); - printk("# controllers: %02x\n", (u_char)(p->num_controllers)); + printk("# controllers: %02x\n", (u_char)(p->num_controllers)); - printk("Hardware Address: "); - for (i=0;i<ETH_ALEN-1;i++) { - printk("%02x:", (u_char)*(p->ieee_addr+i)); - } - printk("%02x\n", (u_char)*(p->ieee_addr+i)); + printk("Hardware Address: %s\n", print_mac(mac, p->ieee_addr)); printk("CRC checksum: %04x\n", (u_short)(p->chksum)); for (i=0; i<64; i++) { printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i)); @@ -5495,21 +5488,12 @@ static void de4x5_dbg_rx(struct sk_buff *skb, int len) { int i, j; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); if (de4x5_debug & DEBUG_RX) { - printk("R: %02x:%02x:%02x:%02x:%02x:%02x <- %02x:%02x:%02x:%02x:%02x:%02x len/SAP:%02x%02x [%d]\n", - (u_char)skb->data[0], - (u_char)skb->data[1], - (u_char)skb->data[2], - (u_char)skb->data[3], - (u_char)skb->data[4], - (u_char)skb->data[5], - (u_char)skb->data[6], - (u_char)skb->data[7], - (u_char)skb->data[8], - (u_char)skb->data[9], - (u_char)skb->data[10], - (u_char)skb->data[11], + printk("R: %s <- %s len/SAP:%02x%02x [%d]\n", + print_mac(mac, skb->data), print_mac(mac2, &skb->data[6]), (u_char)skb->data[12], (u_char)skb->data[13], len); diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c index dab74feb44bc..ca90566d5bcd 100644 --- a/drivers/net/tulip/dmfe.c +++ b/drivers/net/tulip/dmfe.c @@ -362,6 +362,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, struct net_device *dev; u32 pci_pmr; int i, err; + DECLARE_MAC_BUF(mac); DMFE_DBUG(0, "dmfe_init_one()", 0); @@ -372,7 +373,6 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, dev = alloc_etherdev(sizeof(*db)); if (dev == NULL) return -ENOMEM; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { @@ -471,13 +471,13 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, if (err) goto err_out_res; - printk(KERN_INFO "%s: Davicom DM%04lx at pci%s,", - dev->name, - ent->driver_data >> 16, - pci_name(pdev)); - for (i = 0; i < 6; i++) - printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]); - printk(", irq %d.\n", dev->irq); + printk(KERN_INFO "%s: Davicom DM%04lx at pci%s, " + "%s, irq %d.\n", + dev->name, + ent->driver_data >> 16, + pci_name(pdev), + print_mac(mac, dev->dev_addr), + dev->irq); pci_set_master(pdev); diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c index 53efd6694e75..365331446387 100644 --- a/drivers/net/tulip/interrupt.c +++ b/drivers/net/tulip/interrupt.c @@ -103,28 +103,29 @@ int tulip_refill_rx(struct net_device *dev) void oom_timer(unsigned long data) { struct net_device *dev = (struct net_device *)data; - netif_rx_schedule(dev); + struct tulip_private *tp = netdev_priv(dev); + netif_rx_schedule(dev, &tp->napi); } -int tulip_poll(struct net_device *dev, int *budget) +int tulip_poll(struct napi_struct *napi, int budget) { - struct tulip_private *tp = netdev_priv(dev); + struct tulip_private *tp = container_of(napi, struct tulip_private, napi); + struct net_device *dev = tp->dev; int entry = tp->cur_rx % RX_RING_SIZE; - int rx_work_limit = *budget; + int work_done = 0; +#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION int received = 0; +#endif if (!netif_running(dev)) goto done; - if (rx_work_limit > dev->quota) - rx_work_limit = dev->quota; - #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION /* that one buffer is needed for mit activation; or might be a bug in the ring buffer code; check later -- JHS*/ - if (rx_work_limit >=RX_RING_SIZE) rx_work_limit--; + if (budget >=RX_RING_SIZE) budget--; #endif if (tulip_debug > 4) @@ -144,14 +145,13 @@ int tulip_poll(struct net_device *dev, int *budget) while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { s32 status = le32_to_cpu(tp->rx_ring[entry].status); - if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx) break; if (tulip_debug > 5) printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n", dev->name, entry, status); - if (--rx_work_limit < 0) + if (work_done++ >= budget) goto not_done; if ((status & 0x38008300) != 0x0300) { @@ -238,7 +238,9 @@ int tulip_poll(struct net_device *dev, int *budget) tp->stats.rx_packets++; tp->stats.rx_bytes += pkt_len; } - received++; +#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION + received++; +#endif entry = (++tp->cur_rx) % RX_RING_SIZE; if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4) @@ -296,17 +298,15 @@ done: #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */ - dev->quota -= received; - *budget -= received; - tulip_refill_rx(dev); /* If RX ring is not full we are out of memory. */ - if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom; + if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) + goto oom; /* Remove us from polling list and enable RX intr. */ - netif_rx_complete(dev); + netif_rx_complete(dev, napi); iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); /* The last op happens after poll completion. Which means the following: @@ -320,28 +320,20 @@ done: * processed irqs. But it must not result in losing events. */ - return 0; + return work_done; not_done: - if (!received) { - - received = dev->quota; /* Not to happen */ - } - dev->quota -= received; - *budget -= received; - if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 || tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) tulip_refill_rx(dev); - if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom; - - return 1; + if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) + goto oom; + return work_done; oom: /* Executed with RX ints disabled */ - /* Start timer, stop polling, but do not enable rx interrupts. */ mod_timer(&tp->oom_timer, jiffies+1); @@ -350,9 +342,9 @@ done: * before we did netif_rx_complete(). See? We would lose it. */ /* remove ourselves from the polling list */ - netif_rx_complete(dev); + netif_rx_complete(dev, napi); - return 0; + return work_done; } #else /* CONFIG_TULIP_NAPI */ @@ -534,7 +526,7 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance) rxd++; /* Mask RX intrs and add the device to poll list. */ iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7); - netif_rx_schedule(dev); + netif_rx_schedule(dev, &tp->napi); if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass))) break; diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h index 16f26a8364f0..3f69f53d7768 100644 --- a/drivers/net/tulip/tulip.h +++ b/drivers/net/tulip/tulip.h @@ -178,18 +178,18 @@ enum tulip_busconfig_bits { /* The Tulip Rx and Tx buffer descriptors. */ struct tulip_rx_desc { - s32 status; - s32 length; - u32 buffer1; - u32 buffer2; + __le32 status; + __le32 length; + __le32 buffer1; + __le32 buffer2; }; struct tulip_tx_desc { - s32 status; - s32 length; - u32 buffer1; - u32 buffer2; /* We use only buffer 1. */ + __le32 status; + __le32 length; + __le32 buffer1; + __le32 buffer2; /* We use only buffer 1. */ }; @@ -353,6 +353,7 @@ struct tulip_private { int chip_id; int revision; int flags; + struct napi_struct napi; struct net_device_stats stats; struct timer_list timer; /* Media selection timer. */ struct timer_list oom_timer; /* Out of memory timer. */ @@ -429,7 +430,7 @@ extern int tulip_rx_copybreak; irqreturn_t tulip_interrupt(int irq, void *dev_instance); int tulip_refill_rx(struct net_device *dev); #ifdef CONFIG_TULIP_NAPI -int tulip_poll(struct net_device *dev, int *budget); +int tulip_poll(struct napi_struct *napi, int budget); #endif diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index f87d76981ab7..ee08292bcf85 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c @@ -294,6 +294,10 @@ static void tulip_up(struct net_device *dev) int next_tick = 3*HZ; int i; +#ifdef CONFIG_TULIP_NAPI + napi_enable(&tp->napi); +#endif + /* Wake the chip from sleep/snooze mode. */ tulip_set_power_state (tp, 0, 0); @@ -322,8 +326,8 @@ static void tulip_up(struct net_device *dev) tp->dirty_rx = tp->dirty_tx = 0; if (tp->flags & MC_HASH_ONLY) { - u32 addr_low = le32_to_cpu(get_unaligned((u32 *)dev->dev_addr)); - u32 addr_high = le16_to_cpu(get_unaligned((u16 *)(dev->dev_addr+4))); + u32 addr_low = le32_to_cpu(get_unaligned((__le32 *)dev->dev_addr)); + u32 addr_high = le16_to_cpu(get_unaligned((__le16 *)(dev->dev_addr+4))); if (tp->chip_id == AX88140) { iowrite32(0, ioaddr + CSR13); iowrite32(addr_low, ioaddr + CSR14); @@ -728,6 +732,10 @@ static void tulip_down (struct net_device *dev) flush_scheduled_work(); +#ifdef CONFIG_TULIP_NAPI + napi_disable(&tp->napi); +#endif + del_timer_sync (&tp->timer); #ifdef CONFIG_TULIP_NAPI del_timer_sync (&tp->oom_timer); @@ -1043,12 +1051,11 @@ static void set_rx_mode(struct net_device *dev) filterbit &= 0x3f; mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); if (tulip_debug > 2) { - printk(KERN_INFO "%s: Added filter for %2.2x:%2.2x:%2.2x:" - "%2.2x:%2.2x:%2.2x %8.8x bit %d.\n", dev->name, - mclist->dmi_addr[0], mclist->dmi_addr[1], - mclist->dmi_addr[2], mclist->dmi_addr[3], - mclist->dmi_addr[4], mclist->dmi_addr[5], - ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit); + DECLARE_MAC_BUF(mac); + printk(KERN_INFO "%s: Added filter for %s" + " %8.8x bit %d.\n", + dev->name, print_mac(mac, mclist->dmi_addr), + ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit); } } if (mc_filter[0] == tp->mc_filter[0] && @@ -1248,6 +1255,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, const char *chip_name = tulip_tbl[chip_idx].chip_name; unsigned int eeprom_missing = 0; unsigned int force_csr0 = 0; + DECLARE_MAC_BUF(mac); #ifndef MODULE static int did_version; /* Already printed version info. */ @@ -1337,7 +1345,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, return -ENOMEM; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) { printk (KERN_ERR PFX "%s: I/O region (0x%llx@0x%llx) too small, " @@ -1436,13 +1443,13 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, do value = ioread32(ioaddr + CSR9); while (value < 0 && --boguscnt > 0); - put_unaligned(le16_to_cpu(value), ((u16*)dev->dev_addr) + i); + put_unaligned(cpu_to_le16(value), ((__le16*)dev->dev_addr) + i); sum += value & 0xffff; } } else if (chip_idx == COMET) { /* No need to read the EEPROM. */ - put_unaligned(cpu_to_le32(ioread32(ioaddr + 0xA4)), (u32 *)dev->dev_addr); - put_unaligned(cpu_to_le16(ioread32(ioaddr + 0xA8)), (u16 *)(dev->dev_addr + 4)); + put_unaligned(cpu_to_le32(ioread32(ioaddr + 0xA4)), (__le32 *)dev->dev_addr); + put_unaligned(cpu_to_le16(ioread32(ioaddr + 0xA8)), (__le16 *)(dev->dev_addr + 4)); for (i = 0; i < 6; i ++) sum += dev->dev_addr[i]; } else { @@ -1471,14 +1478,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, sa_offset = 2; /* Grrr, damn Matrox boards. */ multiport_cnt = 4; } -#ifdef CONFIG_DDB5477 - if ((pdev->bus->number == 0) && (PCI_SLOT(pdev->devfn) == 4)) { - /* DDB5477 MAC address in first EEPROM locations. */ - sa_offset = 0; - /* No media table either */ - tp->flags &= ~HAS_MEDIA_TABLE; - } -#endif #ifdef CONFIG_MIPS_COBALT if ((pdev->bus->number == 0) && ((PCI_SLOT(pdev->devfn) == 7) || @@ -1614,8 +1613,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, dev->tx_timeout = tulip_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; #ifdef CONFIG_TULIP_NAPI - dev->poll = tulip_poll; - dev->weight = 16; + netif_napi_add(dev, &tp->napi, tulip_poll, 16); #endif dev->stop = tulip_close; dev->get_stats = tulip_get_stats; @@ -1641,8 +1639,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, if (eeprom_missing) printk(" EEPROM not present,"); - for (i = 0; i < 6; i++) - printk("%c%2.2X", i ? ':' : ' ', dev->dev_addr[i]); + printk(" %s", print_mac(mac, dev->dev_addr)); printk(", IRQ %d.\n", irq); if (tp->chip_id == PNIC2) diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c index ca2548eb7d63..76e55612430b 100644 --- a/drivers/net/tulip/uli526x.c +++ b/drivers/net/tulip/uli526x.c @@ -112,13 +112,13 @@ /* Structure/enum declaration ------------------------------- */ struct tx_desc { - u32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */ + __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */ char *tx_buf_ptr; /* Data for us */ struct tx_desc *next_tx_desc; } __attribute__(( aligned(32) )); struct rx_desc { - u32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */ + __le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */ struct sk_buff *rx_skb_ptr; /* Data for us */ struct rx_desc *next_rx_desc; } __attribute__(( aligned(32) )); @@ -258,6 +258,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev, struct uli526x_board_info *db; /* board information structure */ struct net_device *dev; int i, err; + DECLARE_MAC_BUF(mac); ULI526X_DBUG(0, "uli526x_init_one()", 0); @@ -268,7 +269,6 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev, dev = alloc_etherdev(sizeof(*db)); if (dev == NULL) return -ENOMEM; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { @@ -344,7 +344,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev, /* read 64 word srom data */ for (i = 0; i < 64; i++) - ((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i)); + ((__le16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i)); /* Set Node address */ if(((u16 *) db->srom)[0] == 0xffff || ((u16 *) db->srom)[0] == 0) /* SROM absent, so read MAC address from ID Table */ @@ -373,11 +373,9 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev, if (err) goto err_out_res; - printk(KERN_INFO "%s: ULi M%04lx at pci%s,",dev->name,ent->driver_data >> 16,pci_name(pdev)); - - for (i = 0; i < 6; i++) - printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]); - printk(", irq %d.\n", dev->irq); + printk(KERN_INFO "%s: ULi M%04lx at pci%s, %s, irq %d.\n", + dev->name,ent->driver_data >> 16,pci_name(pdev), + print_mac(mac, dev->dev_addr), dev->irq); pci_set_master(pdev); @@ -666,11 +664,6 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id) unsigned long ioaddr = dev->base_addr; unsigned long flags; - if (!dev) { - ULI526X_DBUG(1, "uli526x_interrupt() without DEVICE arg", 0); - return IRQ_NONE; - } - spin_lock_irqsave(&db->lock, flags); outl(0, ioaddr + DCR7); @@ -1110,19 +1103,15 @@ static void uli526x_timer(unsigned long data) /* - * Dynamic reset the ULI526X board * Stop ULI526X board * Free Tx/Rx allocated memory - * Reset ULI526X board - * Re-initialize ULI526X board + * Init system variable */ -static void uli526x_dynamic_reset(struct net_device *dev) +static void uli526x_reset_prepare(struct net_device *dev) { struct uli526x_board_info *db = netdev_priv(dev); - ULI526X_DBUG(0, "uli526x_dynamic_reset()", 0); - /* Sopt MAC controller */ db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */ update_cr6(db->cr6_data, dev->base_addr); @@ -1141,6 +1130,22 @@ static void uli526x_dynamic_reset(struct net_device *dev) db->link_failed = 1; db->init=1; db->wait_reset = 0; +} + + +/* + * Dynamic reset the ULI526X board + * Stop ULI526X board + * Free Tx/Rx allocated memory + * Reset ULI526X board + * Re-initialize ULI526X board + */ + +static void uli526x_dynamic_reset(struct net_device *dev) +{ + ULI526X_DBUG(0, "uli526x_dynamic_reset()", 0); + + uli526x_reset_prepare(dev); /* Re-initialize ULI526X board */ uli526x_init(dev); @@ -1150,6 +1155,88 @@ static void uli526x_dynamic_reset(struct net_device *dev) } +#ifdef CONFIG_PM + +/* + * Suspend the interface. + */ + +static int uli526x_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + pci_power_t power_state; + int err; + + ULI526X_DBUG(0, "uli526x_suspend", 0); + + if (!netdev_priv(dev)) + return 0; + + pci_save_state(pdev); + + if (!netif_running(dev)) + return 0; + + netif_device_detach(dev); + uli526x_reset_prepare(dev); + + power_state = pci_choose_state(pdev, state); + pci_enable_wake(pdev, power_state, 0); + err = pci_set_power_state(pdev, power_state); + if (err) { + netif_device_attach(dev); + /* Re-initialize ULI526X board */ + uli526x_init(dev); + /* Restart upper layer interface */ + netif_wake_queue(dev); + } + + return err; +} + +/* + * Resume the interface. + */ + +static int uli526x_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + int err; + + ULI526X_DBUG(0, "uli526x_resume", 0); + + if (!netdev_priv(dev)) + return 0; + + pci_restore_state(pdev); + + if (!netif_running(dev)) + return 0; + + err = pci_set_power_state(pdev, PCI_D0); + if (err) { + printk(KERN_WARNING "%s: Could not put device into D0\n", + dev->name); + return err; + } + + netif_device_attach(dev); + /* Re-initialize ULI526X board */ + uli526x_init(dev); + /* Restart upper layer interface */ + netif_wake_queue(dev); + + return 0; +} + +#else /* !CONFIG_PM */ + +#define uli526x_suspend NULL +#define uli526x_resume NULL + +#endif /* !CONFIG_PM */ + + /* * free all allocated rx buffer */ @@ -1512,7 +1599,6 @@ static void uli526x_process_mode(struct uli526x_board_info *db) case ULI526X_100MFD: phy_reg = 0x2100; break; } phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id); - phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id); } } } @@ -1689,6 +1775,8 @@ static struct pci_driver uli526x_driver = { .id_table = uli526x_pci_tbl, .probe = uli526x_init_one, .remove = __devexit_p(uli526x_remove_one), + .suspend = uli526x_suspend, + .resume = uli526x_resume, }; MODULE_AUTHOR("Peer Chen, peer.chen@uli.com.tw"); diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c index 5824f6a35495..3c8e3b63be07 100644 --- a/drivers/net/tulip/winbond-840.c +++ b/drivers/net/tulip/winbond-840.c @@ -354,6 +354,7 @@ static int __devinit w840_probe1 (struct pci_dev *pdev, int irq; int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; void __iomem *ioaddr; + DECLARE_MAC_BUF(mac); i = pci_enable_device(pdev); if (i) return i; @@ -370,7 +371,6 @@ static int __devinit w840_probe1 (struct pci_dev *pdev, dev = alloc_etherdev(sizeof(*np)); if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); if (pci_request_regions(pdev, DRV_NAME)) @@ -381,7 +381,7 @@ static int __devinit w840_probe1 (struct pci_dev *pdev, goto err_out_free_res; for (i = 0; i < 3; i++) - ((u16 *)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, i)); + ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i)); /* Reset the chip to erase previous misconfiguration. No hold time required! */ @@ -434,11 +434,9 @@ static int __devinit w840_probe1 (struct pci_dev *pdev, if (i) goto err_out_cleardev; - printk(KERN_INFO "%s: %s at %p, ", - dev->name, pci_id_tbl[chip_idx].name, ioaddr); - for (i = 0; i < 5; i++) - printk("%2.2x:", dev->dev_addr[i]); - printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq); + printk(KERN_INFO "%s: %s at %p, %s, IRQ %d.\n", + dev->name, pci_id_tbl[chip_idx].name, ioaddr, + print_mac(mac, dev->dev_addr), irq); if (np->drv_flags & CanHaveMII) { int phy, phy_idx = 0; @@ -1246,16 +1244,16 @@ static int netdev_rx(struct net_device *dev) } #ifndef final_version /* Remove after testing. */ /* You will want this info for the initial debug. */ - if (debug > 5) - printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:" - "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x " - "%d.%d.%d.%d.\n", - skb->data[0], skb->data[1], skb->data[2], skb->data[3], - skb->data[4], skb->data[5], skb->data[6], skb->data[7], - skb->data[8], skb->data[9], skb->data[10], - skb->data[11], skb->data[12], skb->data[13], - skb->data[14], skb->data[15], skb->data[16], - skb->data[17]); + if (debug > 5) { + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); + + printk(KERN_DEBUG " Rx data %s %s" + " %2.2x%2.2x %d.%d.%d.%d.\n", + print_mac(mac, &skb->data[0]), print_mac(mac2, &skb->data[6]), + skb->data[12], skb->data[13], + skb->data[14], skb->data[15], skb->data[16], skb->data[17]); + } #endif skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); @@ -1452,8 +1450,6 @@ static const struct ethtool_ops netdev_ethtool_ops = { .get_link = netdev_get_link, .get_msglevel = netdev_get_msglevel, .set_msglevel = netdev_set_msglevel, - .get_sg = ethtool_op_get_sg, - .get_tx_csum = ethtool_op_get_tx_csum, }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c index 16a54e6b8d4f..70befe33e454 100644 --- a/drivers/net/tulip/xircom_cb.c +++ b/drivers/net/tulip/xircom_cb.c @@ -252,7 +252,6 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_ goto tx_buf_fail; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); @@ -271,7 +270,6 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_ dev->hard_start_xmit = &xircom_start_xmit; dev->stop = &xircom_close; dev->get_stats = &xircom_get_stats; - dev->priv = private; #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = &xircom_poll_controller; #endif @@ -1076,6 +1074,7 @@ static void read_mac_address(struct xircom_private *card) unsigned char j, tuple, link, data_id, data_count; unsigned long flags; int i; + DECLARE_MAC_BUF(mac); enter("read_mac_address"); @@ -1105,11 +1104,7 @@ static void read_mac_address(struct xircom_private *card) } } spin_unlock_irqrestore(&card->lock, flags); -#ifdef DEBUG - for (i = 0; i < 6; i++) - printk("%c%2.2X", i ? ':' : ' ', card->dev->dev_addr[i]); - printk("\n"); -#endif + pr_debug(" %s\n", print_mac(mac, card->dev->dev_addr)); leave("read_mac_address"); } diff --git a/drivers/net/tulip/xircom_tulip_cb.c b/drivers/net/tulip/xircom_tulip_cb.c index fc439f333350..c3f8e303c6c7 100644 --- a/drivers/net/tulip/xircom_tulip_cb.c +++ b/drivers/net/tulip/xircom_tulip_cb.c @@ -547,7 +547,6 @@ static int __devinit xircom_init_one(struct pci_dev *pdev, const struct pci_devi printk (KERN_ERR DRV_NAME "%d: cannot alloc etherdev, aborting\n", board_idx); return -ENOMEM; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); dev->base_addr = ioaddr; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 62b2b3005019..1f7644695976 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -62,6 +62,7 @@ #include <linux/if_ether.h> #include <linux/if_tun.h> #include <linux/crc32.h> +#include <net/net_namespace.h> #include <asm/system.h> #include <asm/uaccess.h> @@ -109,7 +110,7 @@ static int tun_net_xmit(struct sk_buff *skb, struct net_device *dev) /* We won't see all dropped packets individually, so overrun * error is more appropriate. */ - tun->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; } else { /* Single queue mode. * Driver handles dropping of all packets itself. */ @@ -128,7 +129,7 @@ static int tun_net_xmit(struct sk_buff *skb, struct net_device *dev) return 0; drop: - tun->stats.tx_dropped++; + dev->stats.tx_dropped++; kfree_skb(skb); return 0; } @@ -158,23 +159,28 @@ tun_net_mclist(struct net_device *dev) struct tun_struct *tun = netdev_priv(dev); const struct dev_mc_list *mclist; int i; + DECLARE_MAC_BUF(mac); DBG(KERN_DEBUG "%s: tun_net_mclist: mc_count %d\n", dev->name, dev->mc_count); memset(tun->chr_filter, 0, sizeof tun->chr_filter); for (i = 0, mclist = dev->mc_list; i < dev->mc_count && mclist != NULL; i++, mclist = mclist->next) { add_multi(tun->net_filter, mclist->dmi_addr); - DBG(KERN_DEBUG "%s: tun_net_mclist: %x:%x:%x:%x:%x:%x\n", - dev->name, - mclist->dmi_addr[0], mclist->dmi_addr[1], mclist->dmi_addr[2], - mclist->dmi_addr[3], mclist->dmi_addr[4], mclist->dmi_addr[5]); + DBG(KERN_DEBUG "%s: tun_net_mclist: %s\n", + dev->name, print_mac(mac, mclist->dmi_addr)); } } -static struct net_device_stats *tun_net_stats(struct net_device *dev) +#define MIN_MTU 68 +#define MAX_MTU 65535 + +static int +tun_net_change_mtu(struct net_device *dev, int new_mtu) { - struct tun_struct *tun = netdev_priv(dev); - return &tun->stats; + if (new_mtu < MIN_MTU || new_mtu + dev->hard_header_len > MAX_MTU) + return -EINVAL; + dev->mtu = new_mtu; + return 0; } /* Initialize net device. */ @@ -188,6 +194,7 @@ static void tun_net_init(struct net_device *dev) dev->hard_header_len = 0; dev->addr_len = 0; dev->mtu = 1500; + dev->change_mtu = tun_net_change_mtu; /* Zero header length */ dev->type = ARPHRD_NONE; @@ -200,6 +207,7 @@ static void tun_net_init(struct net_device *dev) dev->set_multicast_list = tun_net_mclist; ether_setup(dev); + dev->change_mtu = tun_net_change_mtu; /* random address already created for us by tun_set_iff, use it */ memcpy(dev->dev_addr, tun->dev_addr, min(sizeof(tun->dev_addr), sizeof(dev->dev_addr)) ); @@ -249,14 +257,14 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, align = NET_IP_ALIGN; if (!(skb = alloc_skb(len + align, GFP_KERNEL))) { - tun->stats.rx_dropped++; + tun->dev->stats.rx_dropped++; return -ENOMEM; } if (align) skb_reserve(skb, align); if (memcpy_fromiovec(skb_put(skb, len), iv, len)) { - tun->stats.rx_dropped++; + tun->dev->stats.rx_dropped++; kfree_skb(skb); return -EFAULT; } @@ -278,8 +286,8 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, netif_rx_ni(skb); tun->dev->last_rx = jiffies; - tun->stats.rx_packets++; - tun->stats.rx_bytes += len; + tun->dev->stats.rx_packets++; + tun->dev->stats.rx_bytes += len; return count; } @@ -335,8 +343,8 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun, skb_copy_datagram_iovec(skb, 0, iv, len); total += len; - tun->stats.tx_packets++; - tun->stats.tx_bytes += len; + tun->dev->stats.tx_packets++; + tun->dev->stats.tx_bytes += len; return total; } @@ -349,6 +357,7 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, DECLARE_WAITQUEUE(wait, current); struct sk_buff *skb; ssize_t len, ret = 0; + DECLARE_MAC_BUF(mac); if (!tun) return -EBADFD; @@ -403,16 +412,14 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, (addr[0] == 0x33 && addr[1] == 0x33)) && ((tun->if_flags & IFF_ALLMULTI) || (tun->chr_filter[bit_nr >> 5] & (1 << (bit_nr & 31)))))) { - DBG(KERN_DEBUG "%s: tun_chr_readv: accepted: %x:%x:%x:%x:%x:%x\n", - tun->dev->name, addr[0], addr[1], addr[2], - addr[3], addr[4], addr[5]); + DBG(KERN_DEBUG "%s: tun_chr_readv: accepted: %s\n", + tun->dev->name, print_mac(mac, addr)); ret = tun_put_user(tun, skb, (struct iovec *) iv, len); kfree_skb(skb); break; } else { - DBG(KERN_DEBUG "%s: tun_chr_readv: rejected: %x:%x:%x:%x:%x:%x\n", - tun->dev->name, addr[0], addr[1], addr[2], - addr[3], addr[4], addr[5]); + DBG(KERN_DEBUG "%s: tun_chr_readv: rejected: %s\n", + tun->dev->name, print_mac(mac, addr)); kfree_skb(skb); continue; } @@ -434,11 +441,9 @@ static void tun_setup(struct net_device *dev) tun->owner = -1; tun->group = -1; - SET_MODULE_OWNER(dev); dev->open = tun_net_open; dev->hard_start_xmit = tun_net_xmit; dev->stop = tun_net_close; - dev->get_stats = tun_net_stats; dev->ethtool_ops = &tun_ethtool_ops; dev->destructor = free_netdev; } @@ -475,7 +480,7 @@ static int tun_set_iff(struct file *file, struct ifreq *ifr) !capable(CAP_NET_ADMIN)) return -EPERM; } - else if (__dev_get_by_name(ifr->ifr_name)) + else if (__dev_get_by_name(&init_net, ifr->ifr_name)) return -EINVAL; else { char *name; @@ -557,6 +562,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, struct tun_struct *tun = file->private_data; void __user* argp = (void __user*)arg; struct ifreq ifr; + DECLARE_MAC_BUF(mac); if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) if (copy_from_user(&ifr, argp, sizeof ifr)) @@ -685,22 +691,16 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, /** Add the specified group to the character device's multicast filter * list. */ add_multi(tun->chr_filter, ifr.ifr_hwaddr.sa_data); - DBG(KERN_DEBUG "%s: add multi: %x:%x:%x:%x:%x:%x\n", - tun->dev->name, - (u8)ifr.ifr_hwaddr.sa_data[0], (u8)ifr.ifr_hwaddr.sa_data[1], - (u8)ifr.ifr_hwaddr.sa_data[2], (u8)ifr.ifr_hwaddr.sa_data[3], - (u8)ifr.ifr_hwaddr.sa_data[4], (u8)ifr.ifr_hwaddr.sa_data[5]); + DBG(KERN_DEBUG "%s: add multi: %s\n", + tun->dev->name, print_mac(mac, ifr.ifr_hwaddr.sa_data)); return 0; case SIOCDELMULTI: /** Remove the specified group from the character device's multicast * filter list. */ del_multi(tun->chr_filter, ifr.ifr_hwaddr.sa_data); - DBG(KERN_DEBUG "%s: del multi: %x:%x:%x:%x:%x:%x\n", - tun->dev->name, - (u8)ifr.ifr_hwaddr.sa_data[0], (u8)ifr.ifr_hwaddr.sa_data[1], - (u8)ifr.ifr_hwaddr.sa_data[2], (u8)ifr.ifr_hwaddr.sa_data[3], - (u8)ifr.ifr_hwaddr.sa_data[4], (u8)ifr.ifr_hwaddr.sa_data[5]); + DBG(KERN_DEBUG "%s: del multi: %s\n", + tun->dev->name, print_mac(mac, ifr.ifr_hwaddr.sa_data)); return 0; default: diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c index 03587205546e..72e5e9be7e99 100644 --- a/drivers/net/typhoon.c +++ b/drivers/net/typhoon.c @@ -284,6 +284,7 @@ struct typhoon { struct basic_ring rxLoRing; struct pci_dev * pdev; struct net_device * dev; + struct napi_struct napi; spinlock_t state_lock; struct vlan_group * vlgrp; struct basic_ring rxHiRing; @@ -299,9 +300,9 @@ struct typhoon { const char * name; struct typhoon_shared * shared; dma_addr_t shared_dma; - u16 xcvr_select; - u16 wol_events; - u32 offload; + __le16 xcvr_select; + __le16 wol_events; + __le32 offload; /* unused stuff (future use) */ int capabilities; @@ -827,7 +828,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev) first_txd->processFlags |= TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY; first_txd->processFlags |= - cpu_to_le32(htons(vlan_tx_tag_get(skb)) << + cpu_to_le32(ntohs(vlan_tx_tag_get(skb)) << TYPHOON_TX_PF_VLAN_TAG_SHIFT); } @@ -919,7 +920,7 @@ typhoon_set_rx_mode(struct net_device *dev) struct typhoon *tp = netdev_priv(dev); struct cmd_desc xp_cmd; u32 mc_filter[2]; - u16 filter; + __le16 filter; filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST; if(dev->flags & IFF_PROMISC) { @@ -1130,7 +1131,7 @@ typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct typhoon *tp = netdev_priv(dev); struct cmd_desc xp_cmd; - int xcvr; + __le16 xcvr; int err; err = -EINVAL; @@ -1235,11 +1236,8 @@ static const struct ethtool_ops typhoon_ethtool_ops = { .set_wol = typhoon_set_wol, .get_link = ethtool_op_get_link, .get_rx_csum = typhoon_get_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, - .get_tso = ethtool_op_get_tso, .set_tso = ethtool_op_set_tso, .get_ringparam = typhoon_get_ringparam, }; @@ -1538,7 +1536,7 @@ out_timeout: static u32 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing, - volatile u32 * index) + volatile __le32 * index) { u32 lastRead = txRing->lastRead; struct tx_desc *tx; @@ -1574,7 +1572,7 @@ typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing, static void typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing, - volatile u32 * index) + volatile __le32 * index) { u32 lastRead; int numDesc = MAX_SKB_FRAGS + 1; @@ -1664,8 +1662,8 @@ typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx) } static int -typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready, - volatile u32 * cleared, int budget) +typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready, + volatile __le32 * cleared, int budget) { struct rx_desc *rx; struct sk_buff *skb, *new_skb; @@ -1675,7 +1673,7 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready, u32 rxaddr; int pkt_len; u32 idx; - u32 csum_bits; + __le32 csum_bits; int received; received = 0; @@ -1759,12 +1757,12 @@ typhoon_fill_free_ring(struct typhoon *tp) } static int -typhoon_poll(struct net_device *dev, int *total_budget) +typhoon_poll(struct napi_struct *napi, int budget) { - struct typhoon *tp = netdev_priv(dev); + struct typhoon *tp = container_of(napi, struct typhoon, napi); + struct net_device *dev = tp->dev; struct typhoon_indexes *indexes = tp->indexes; - int orig_budget = *total_budget; - int budget, work_done, done; + int work_done; rmb(); if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared) @@ -1773,30 +1771,16 @@ typhoon_poll(struct net_device *dev, int *total_budget) if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead) typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared); - if(orig_budget > dev->quota) - orig_budget = dev->quota; - - budget = orig_budget; work_done = 0; - done = 1; if(indexes->rxHiCleared != indexes->rxHiReady) { - work_done = typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady, + work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady, &indexes->rxHiCleared, budget); - budget -= work_done; } if(indexes->rxLoCleared != indexes->rxLoReady) { work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady, - &indexes->rxLoCleared, budget); - } - - if(work_done) { - *total_budget -= work_done; - dev->quota -= work_done; - - if(work_done >= orig_budget) - done = 0; + &indexes->rxLoCleared, budget - work_done); } if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) { @@ -1804,14 +1788,14 @@ typhoon_poll(struct net_device *dev, int *total_budget) typhoon_fill_free_ring(tp); } - if(done) { - netif_rx_complete(dev); + if (work_done < budget) { + netif_rx_complete(dev, napi); iowrite32(TYPHOON_INTR_NONE, tp->ioaddr + TYPHOON_REG_INTR_MASK); typhoon_post_pci_writes(tp->ioaddr); } - return (done ? 0 : 1); + return work_done; } static irqreturn_t @@ -1828,10 +1812,10 @@ typhoon_interrupt(int irq, void *dev_instance) iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS); - if(netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &tp->napi)) { iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); typhoon_post_pci_writes(ioaddr); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &tp->napi); } else { printk(KERN_ERR "%s: Error, poll already scheduled\n", dev->name); @@ -1856,7 +1840,7 @@ typhoon_free_rx_rings(struct typhoon *tp) } static int -typhoon_sleep(struct typhoon *tp, pci_power_t state, u16 events) +typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events) { struct pci_dev *pdev = tp->pdev; void __iomem *ioaddr = tp->ioaddr; @@ -1944,8 +1928,8 @@ typhoon_start_runtime(struct typhoon *tp) goto error_out; INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS); - xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0])); - xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2])); + xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0])); + xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2])); err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); if(err < 0) goto error_out; @@ -2119,9 +2103,13 @@ typhoon_open(struct net_device *dev) if(err < 0) goto out_sleep; + napi_enable(&tp->napi); + err = typhoon_start_runtime(tp); - if(err < 0) + if(err < 0) { + napi_disable(&tp->napi); goto out_irq; + } netif_start_queue(dev); return 0; @@ -2150,6 +2138,7 @@ typhoon_close(struct net_device *dev) struct typhoon *tp = netdev_priv(dev); netif_stop_queue(dev); + napi_disable(&tp->napi); if(typhoon_stop_runtime(tp, WaitSleep) < 0) printk(KERN_ERR "%s: unable to stop runtime\n", dev->name); @@ -2240,8 +2229,8 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state) } INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS); - xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0])); - xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2])); + xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0])); + xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2])); if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) { printk(KERN_ERR "%s: unable to set mac address in suspend\n", dev->name); @@ -2327,8 +2316,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dma_addr_t shared_dma; struct cmd_desc xp_cmd; struct resp_desc xp_resp[3]; - int i; int err = 0; + DECLARE_MAC_BUF(mac); if(!did_version++) printk(KERN_INFO "%s", version); @@ -2340,7 +2329,6 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) err = -ENOMEM; goto error_out; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); err = pci_enable_device(pdev); @@ -2477,8 +2465,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto error_out_reset; } - *(u16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1)); - *(u32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2)); + *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1)); + *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2)); if(!is_valid_ether_addr(dev->dev_addr)) { printk(ERR_PFX "%s: Could not obtain valid ethernet address, " @@ -2521,8 +2509,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->stop = typhoon_close; dev->set_multicast_list = typhoon_set_rx_mode; dev->tx_timeout = typhoon_tx_timeout; - dev->poll = typhoon_poll; - dev->weight = 16; + netif_napi_add(dev, &tp->napi, typhoon_poll, 16); dev->watchdog_timeo = TX_TIMEOUT; dev->get_stats = typhoon_get_stats; dev->set_mac_address = typhoon_set_mac_address; @@ -2545,13 +2532,11 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, dev); - printk(KERN_INFO "%s: %s at %s 0x%llx, ", + printk(KERN_INFO "%s: %s at %s 0x%llx, %s\n", dev->name, typhoon_card_info[card_id].name, use_mmio ? "MMIO" : "IO", - (unsigned long long)pci_resource_start(pdev, use_mmio)); - for(i = 0; i < 5; i++) - printk("%2.2x:", dev->dev_addr[i]); - printk("%2.2x\n", dev->dev_addr[i]); + (unsigned long long)pci_resource_start(pdev, use_mmio), + print_mac(mac, dev->dev_addr)); /* xp_resp still contains the response to the READ_VERSIONS command. * For debugging, let the user know what version he has. diff --git a/drivers/net/typhoon.h b/drivers/net/typhoon.h index 2f14a050051b..19df20889b82 100644 --- a/drivers/net/typhoon.h +++ b/drivers/net/typhoon.h @@ -64,19 +64,19 @@ struct transmit_ring { */ struct typhoon_indexes { /* The first four are written by the host, and read by the NIC */ - volatile u32 rxHiCleared; - volatile u32 rxLoCleared; - volatile u32 rxBuffReady; - volatile u32 respCleared; + volatile __le32 rxHiCleared; + volatile __le32 rxLoCleared; + volatile __le32 rxBuffReady; + volatile __le32 respCleared; /* The remaining are written by the NIC, and read by the host */ - volatile u32 txLoCleared; - volatile u32 txHiCleared; - volatile u32 rxLoReady; - volatile u32 rxBuffCleared; - volatile u32 cmdCleared; - volatile u32 respReady; - volatile u32 rxHiReady; + volatile __le32 txLoCleared; + volatile __le32 txHiCleared; + volatile __le32 rxLoReady; + volatile __u32 rxBuffCleared; /* AV: really? */ + volatile __le32 cmdCleared; + volatile __le32 respReady; + volatile __le32 rxHiReady; } __attribute__ ((packed)); /* The host<->Typhoon interface @@ -100,31 +100,31 @@ struct typhoon_indexes { * be zero. */ struct typhoon_interface { - u32 ringIndex; - u32 ringIndexHi; - u32 txLoAddr; - u32 txLoAddrHi; - u32 txLoSize; - u32 txHiAddr; - u32 txHiAddrHi; - u32 txHiSize; - u32 rxLoAddr; - u32 rxLoAddrHi; - u32 rxLoSize; - u32 rxBuffAddr; - u32 rxBuffAddrHi; - u32 rxBuffSize; - u32 cmdAddr; - u32 cmdAddrHi; - u32 cmdSize; - u32 respAddr; - u32 respAddrHi; - u32 respSize; - u32 zeroAddr; - u32 zeroAddrHi; - u32 rxHiAddr; - u32 rxHiAddrHi; - u32 rxHiSize; + __le32 ringIndex; + __le32 ringIndexHi; + __le32 txLoAddr; + __le32 txLoAddrHi; + __le32 txLoSize; + __le32 txHiAddr; + __le32 txHiAddrHi; + __le32 txHiSize; + __le32 rxLoAddr; + __le32 rxLoAddrHi; + __le32 rxLoSize; + __le32 rxBuffAddr; + __le32 rxBuffAddrHi; + __le32 rxBuffSize; + __le32 cmdAddr; + __le32 cmdAddrHi; + __le32 cmdSize; + __le32 respAddr; + __le32 respAddrHi; + __le32 respSize; + __le32 zeroAddr; + __le32 zeroAddrHi; + __le32 rxHiAddr; + __le32 rxHiAddrHi; + __le32 rxHiSize; } __attribute__ ((packed)); /* The Typhoon transmit/fragment descriptor @@ -165,10 +165,10 @@ struct tx_desc { #define TYPHOON_RX_ERROR 0x40 #define TYPHOON_DESC_VALID 0x80 u8 numDesc; - u16 len; + __le16 len; u32 addr; u32 addrHi; - u32 processFlags; + __le32 processFlags; #define TYPHOON_TX_PF_NO_CRC __constant_cpu_to_le32(0x00000001) #define TYPHOON_TX_PF_IP_CHKSUM __constant_cpu_to_le32(0x00000002) #define TYPHOON_TX_PF_TCP_CHKSUM __constant_cpu_to_le32(0x00000004) @@ -197,12 +197,12 @@ struct tx_desc { struct tcpopt_desc { u8 flags; u8 numDesc; - u16 mss_flags; + __le16 mss_flags; #define TYPHOON_TSO_FIRST __constant_cpu_to_le16(0x1000) #define TYPHOON_TSO_LAST __constant_cpu_to_le16(0x2000) - u32 respAddrLo; - u32 bytesTx; - u32 status; + __le32 respAddrLo; + __le32 bytesTx; + __le32 status; } __attribute__ ((packed)); /* The IPSEC Offload descriptor @@ -216,12 +216,12 @@ struct tcpopt_desc { struct ipsec_desc { u8 flags; u8 numDesc; - u16 ipsecFlags; + __le16 ipsecFlags; #define TYPHOON_IPSEC_GEN_IV __constant_cpu_to_le16(0x0000) #define TYPHOON_IPSEC_USE_IV __constant_cpu_to_le16(0x0001) - u32 sa1; - u32 sa2; - u32 reserved; + __le32 sa1; + __le32 sa2; + __le32 reserved; } __attribute__ ((packed)); /* The Typhoon receive descriptor (Updated by NIC) @@ -239,10 +239,10 @@ struct ipsec_desc { struct rx_desc { u8 flags; u8 numDesc; - u16 frameLen; + __le16 frameLen; u32 addr; u32 addrHi; - u32 rxStatus; + __le32 rxStatus; #define TYPHOON_RX_ERR_INTERNAL __constant_cpu_to_le32(0x00000000) #define TYPHOON_RX_ERR_FIFO_UNDERRUN __constant_cpu_to_le32(0x00000001) #define TYPHOON_RX_ERR_BAD_SSD __constant_cpu_to_le32(0x00000002) @@ -264,10 +264,10 @@ struct rx_desc { #define TYPHOON_RX_IP_CHK_GOOD __constant_cpu_to_le32(0x00000100) #define TYPHOON_RX_TCP_CHK_GOOD __constant_cpu_to_le32(0x00000200) #define TYPHOON_RX_UDP_CHK_GOOD __constant_cpu_to_le32(0x00000400) - u16 filterResults; + __le16 filterResults; #define TYPHOON_RX_FILTER_MASK __constant_cpu_to_le16(0x7fff) #define TYPHOON_RX_FILTERED __constant_cpu_to_le16(0x8000) - u16 ipsecResults; + __le16 ipsecResults; #define TYPHOON_RX_OUTER_AH_GOOD __constant_cpu_to_le16(0x0001) #define TYPHOON_RX_OUTER_ESP_GOOD __constant_cpu_to_le16(0x0002) #define TYPHOON_RX_INNER_AH_GOOD __constant_cpu_to_le16(0x0004) @@ -278,7 +278,7 @@ struct rx_desc { #define TYPHOON_RX_INNER_ESP_FAIL __constant_cpu_to_le16(0x0080) #define TYPHOON_RX_UNKNOWN_SA __constant_cpu_to_le16(0x0100) #define TYPHOON_RX_ESP_FORMAT_ERR __constant_cpu_to_le16(0x0200) - u32 vlanTag; + __be32 vlanTag; } __attribute__ ((packed)); /* The Typhoon free buffer descriptor, used to give a buffer to the NIC @@ -292,8 +292,8 @@ struct rx_desc { * from the NIC */ struct rx_free { - u32 physAddr; - u32 physAddrHi; + __le32 physAddr; + __le32 physAddrHi; u32 virtAddr; u32 virtAddrHi; } __attribute__ ((packed)); @@ -312,7 +312,7 @@ struct rx_free { struct cmd_desc { u8 flags; u8 numDesc; - u16 cmd; + __le16 cmd; #define TYPHOON_CMD_TX_ENABLE __constant_cpu_to_le16(0x0001) #define TYPHOON_CMD_TX_DISABLE __constant_cpu_to_le16(0x0002) #define TYPHOON_CMD_RX_ENABLE __constant_cpu_to_le16(0x0003) @@ -339,9 +339,9 @@ struct cmd_desc { #define TYPHOON_CMD_GET_IPSEC_ENABLE __constant_cpu_to_le16(0x0067) #define TYPHOON_CMD_GET_CMD_LVL __constant_cpu_to_le16(0x0069) u16 seqNo; - u16 parm1; - u32 parm2; - u32 parm3; + __le16 parm1; + __le32 parm2; + __le32 parm3; } __attribute__ ((packed)); /* The Typhoon response descriptor, see command descriptor for details @@ -349,11 +349,11 @@ struct cmd_desc { struct resp_desc { u8 flags; u8 numDesc; - u16 cmd; - u16 seqNo; - u16 parm1; - u32 parm2; - u32 parm3; + __le16 cmd; + __le16 seqNo; + __le16 parm1; + __le32 parm2; + __le32 parm3; } __attribute__ ((packed)); #define INIT_COMMAND_NO_RESPONSE(x, command) \ @@ -386,31 +386,31 @@ struct resp_desc { struct stats_resp { u8 flags; u8 numDesc; - u16 cmd; - u16 seqNo; - u16 unused; - u32 txPackets; - u64 txBytes; - u32 txDeferred; - u32 txLateCollisions; - u32 txCollisions; - u32 txCarrierLost; - u32 txMultipleCollisions; - u32 txExcessiveCollisions; - u32 txFifoUnderruns; - u32 txMulticastTxOverflows; - u32 txFiltered; - u32 rxPacketsGood; - u64 rxBytesGood; - u32 rxFifoOverruns; - u32 BadSSD; - u32 rxCrcErrors; - u32 rxOversized; - u32 rxBroadcast; - u32 rxMulticast; - u32 rxOverflow; - u32 rxFiltered; - u32 linkStatus; + __le16 cmd; + __le16 seqNo; + __le16 unused; + __le32 txPackets; + __le64 txBytes; + __le32 txDeferred; + __le32 txLateCollisions; + __le32 txCollisions; + __le32 txCarrierLost; + __le32 txMultipleCollisions; + __le32 txExcessiveCollisions; + __le32 txFifoUnderruns; + __le32 txMulticastTxOverflows; + __le32 txFiltered; + __le32 rxPacketsGood; + __le64 rxBytesGood; + __le32 rxFifoOverruns; + __le32 BadSSD; + __le32 rxCrcErrors; + __le32 rxOversized; + __le32 rxBroadcast; + __le32 rxMulticast; + __le32 rxOverflow; + __le32 rxFiltered; + __le32 linkStatus; #define TYPHOON_LINK_STAT_MASK __constant_cpu_to_le32(0x00000001) #define TYPHOON_LINK_GOOD __constant_cpu_to_le32(0x00000001) #define TYPHOON_LINK_BAD __constant_cpu_to_le32(0x00000000) @@ -420,8 +420,8 @@ struct stats_resp { #define TYPHOON_LINK_DUPLEX_MASK __constant_cpu_to_le32(0x00000004) #define TYPHOON_LINK_FULL_DUPLEX __constant_cpu_to_le32(0x00000004) #define TYPHOON_LINK_HALF_DUPLEX __constant_cpu_to_le32(0x00000000) - u32 unused2; - u32 unused3; + __le32 unused2; + __le32 unused3; } __attribute__ ((packed)); /* TYPHOON_CMD_XCVR_SELECT xcvr values (resp.parm1) @@ -509,17 +509,17 @@ struct sa_descriptor { */ struct typhoon_file_header { u8 tag[8]; - u32 version; - u32 numSections; - u32 startAddr; - u32 hmacDigest[5]; + __le32 version; + __le32 numSections; + __le32 startAddr; + __le32 hmacDigest[5]; } __attribute__ ((packed)); struct typhoon_section_header { - u32 len; + __le32 len; u16 checksum; u16 reserved; - u32 startAddr; + __le32 startAddr; } __attribute__ ((packed)); /* The Typhoon Register offsets diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index e4736a3b1b7a..d00e7d41f6a5 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c @@ -43,10 +43,6 @@ #undef DEBUG -#define DRV_DESC "QE UCC Gigabit Ethernet Controller" -#define DRV_NAME "ucc_geth" -#define DRV_VERSION "1.1" - #define ugeth_printk(level, format, arg...) \ printk(level format "\n", ## arg) @@ -64,9 +60,19 @@ #else #define ugeth_vdbg(fmt, args...) do { } while (0) #endif /* UGETH_VERBOSE_DEBUG */ +#define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1 +void uec_set_ethtool_ops(struct net_device *netdev); + static DEFINE_SPINLOCK(ugeth_lock); +static struct { + u32 msg_enable; +} debug = { -1 }; + +module_param_named(debug, debug.msg_enable, int, 0); +MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)"); + static struct ucc_geth_info ugeth_primary_info = { .uf_info = { .bd_mem_part = MEM_PART_SYSTEM, @@ -104,6 +110,7 @@ static struct ucc_geth_info ugeth_primary_info = { .maxRetransmission = 0xf, .collisionWindow = 0x37, .receiveFlowControl = 1, + .transmitFlowControl = 1, .maxGroupAddrInHash = 4, .maxIndAddrInHash = 4, .prel = 7, @@ -139,7 +146,9 @@ static struct ucc_geth_info ugeth_primary_info = { .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1, .largestexternallookupkeysize = QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE, - .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_NONE, + .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE | + UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX | + UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX, .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP, .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP, .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT, @@ -281,7 +290,8 @@ static int fill_init_enet_entries(struct ucc_geth_private *ugeth, for (i = 0; i < num_entries; i++) { if ((snum = qe_get_snum()) < 0) { - ugeth_err("fill_init_enet_entries: Can not get SNUM."); + if (netif_msg_ifup(ugeth)) + ugeth_err("fill_init_enet_entries: Can not get SNUM."); return snum; } if ((i == 0) && skip_page_for_first_entry) @@ -291,8 +301,8 @@ static int fill_init_enet_entries(struct ucc_geth_private *ugeth, init_enet_offset = qe_muram_alloc(thread_size, thread_alignment); if (IS_ERR_VALUE(init_enet_offset)) { - ugeth_err - ("fill_init_enet_entries: Can not allocate DPRAM memory."); + if (netif_msg_ifup(ugeth)) + ugeth_err("fill_init_enet_entries: Can not allocate DPRAM memory."); qe_put_snum((u8) snum); return -ENOMEM; } @@ -1200,7 +1210,7 @@ static int init_inter_frame_gap_params(u8 non_btb_cs_ipg, return 0; } -static int init_flow_control_params(u32 automatic_flow_control_mode, +int init_flow_control_params(u32 automatic_flow_control_mode, int rx_flow_control_enable, int tx_flow_control_enable, u16 pause_period, @@ -1486,9 +1496,9 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth) ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2); if (ret_val != 0) { - ugeth_err - ("%s: Preamble length must be between 3 and 7 inclusive.", - __FUNCTION__); + if (netif_msg_probe(ugeth)) + ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.", + __FUNCTION__); return ret_val; } @@ -1726,7 +1736,8 @@ static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode) /* check if the UCC number is in range. */ if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { - ugeth_err("%s: ucc_num out of range.", __FUNCTION__); + if (netif_msg_probe(ugeth)) + ugeth_err("%s: ucc_num out of range.", __FUNCTION__); return -EINVAL; } @@ -1754,7 +1765,8 @@ static int ugeth_disable(struct ucc_geth_private * ugeth, enum comm_dir mode) /* check if the UCC number is in range. */ if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { - ugeth_err("%s: ucc_num out of range.", __FUNCTION__); + if (netif_msg_probe(ugeth)) + ugeth_err("%s: ucc_num out of range.", __FUNCTION__); return -EINVAL; } @@ -2136,7 +2148,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth) for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { if (ugeth->tx_skbuff[i][j]) { dma_unmap_single(NULL, - ((qe_bd_t *)bd)->buf, + ((struct qe_bd *)bd)->buf, (in_be32((u32 *)bd) & BD_LENGTH_MASK), DMA_TO_DEVICE); @@ -2306,7 +2318,9 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth) if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) || (uf_info->bd_mem_part == MEM_PART_MURAM))) { - ugeth_err("%s: Bad memory partition value.", __FUNCTION__); + if (netif_msg_probe(ugeth)) + ugeth_err("%s: Bad memory partition value.", + __FUNCTION__); return -EINVAL; } @@ -2315,9 +2329,10 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth) if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) || (ug_info->bdRingLenRx[i] % UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) { - ugeth_err - ("%s: Rx BD ring length must be multiple of 4," - " no smaller than 8.", __FUNCTION__); + if (netif_msg_probe(ugeth)) + ugeth_err + ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.", + __FUNCTION__); return -EINVAL; } } @@ -2325,9 +2340,10 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth) /* Tx BD lengths */ for (i = 0; i < ug_info->numQueuesTx; i++) { if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) { - ugeth_err - ("%s: Tx BD ring length must be no smaller than 2.", - __FUNCTION__); + if (netif_msg_probe(ugeth)) + ugeth_err + ("%s: Tx BD ring length must be no smaller than 2.", + __FUNCTION__); return -EINVAL; } } @@ -2335,31 +2351,35 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth) /* mrblr */ if ((uf_info->max_rx_buf_length == 0) || (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) { - ugeth_err - ("%s: max_rx_buf_length must be non-zero multiple of 128.", - __FUNCTION__); + if (netif_msg_probe(ugeth)) + ugeth_err + ("%s: max_rx_buf_length must be non-zero multiple of 128.", + __FUNCTION__); return -EINVAL; } /* num Tx queues */ if (ug_info->numQueuesTx > NUM_TX_QUEUES) { - ugeth_err("%s: number of tx queues too large.", __FUNCTION__); + if (netif_msg_probe(ugeth)) + ugeth_err("%s: number of tx queues too large.", __FUNCTION__); return -EINVAL; } /* num Rx queues */ if (ug_info->numQueuesRx > NUM_RX_QUEUES) { - ugeth_err("%s: number of rx queues too large.", __FUNCTION__); + if (netif_msg_probe(ugeth)) + ugeth_err("%s: number of rx queues too large.", __FUNCTION__); return -EINVAL; } /* l2qt */ for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) { if (ug_info->l2qt[i] >= ug_info->numQueuesRx) { - ugeth_err - ("%s: VLAN priority table entry must not be" - " larger than number of Rx queues.", - __FUNCTION__); + if (netif_msg_probe(ugeth)) + ugeth_err + ("%s: VLAN priority table entry must not be" + " larger than number of Rx queues.", + __FUNCTION__); return -EINVAL; } } @@ -2367,26 +2387,29 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth) /* l3qt */ for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) { if (ug_info->l3qt[i] >= ug_info->numQueuesRx) { - ugeth_err - ("%s: IP priority table entry must not be" - " larger than number of Rx queues.", - __FUNCTION__); + if (netif_msg_probe(ugeth)) + ugeth_err + ("%s: IP priority table entry must not be" + " larger than number of Rx queues.", + __FUNCTION__); return -EINVAL; } } if (ug_info->cam && !ug_info->ecamptr) { - ugeth_err("%s: If cam mode is chosen, must supply cam ptr.", - __FUNCTION__); + if (netif_msg_probe(ugeth)) + ugeth_err("%s: If cam mode is chosen, must supply cam ptr.", + __FUNCTION__); return -EINVAL; } if ((ug_info->numStationAddresses != UCC_GETH_NUM_OF_STATION_ADDRESSES_1) && ug_info->rxExtendedFiltering) { - ugeth_err("%s: Number of station addresses greater than 1 " - "not allowed in extended parsing mode.", - __FUNCTION__); + if (netif_msg_probe(ugeth)) + ugeth_err("%s: Number of station addresses greater than 1 " + "not allowed in extended parsing mode.", + __FUNCTION__); return -EINVAL; } @@ -2399,7 +2422,8 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth) uf_info->uccm_mask |= (UCCE_TXBF_SINGLE_MASK << i); /* Initialize the general fast UCC block. */ if (ucc_fast_init(uf_info, &ugeth->uccf)) { - ugeth_err("%s: Failed to init uccf.", __FUNCTION__); + if (netif_msg_probe(ugeth)) + ugeth_err("%s: Failed to init uccf.", __FUNCTION__); ucc_geth_memclean(ugeth); return -ENOMEM; } @@ -2452,7 +2476,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) numThreadsRxNumerical = 8; break; default: - ugeth_err("%s: Bad number of Rx threads value.", __FUNCTION__); + if (netif_msg_ifup(ugeth)) + ugeth_err("%s: Bad number of Rx threads value.", + __FUNCTION__); ucc_geth_memclean(ugeth); return -EINVAL; break; @@ -2475,7 +2501,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) numThreadsTxNumerical = 8; break; default: - ugeth_err("%s: Bad number of Tx threads value.", __FUNCTION__); + if (netif_msg_ifup(ugeth)) + ugeth_err("%s: Bad number of Tx threads value.", + __FUNCTION__); ucc_geth_memclean(ugeth); return -EINVAL; break; @@ -2507,7 +2535,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) /* For more details see the hardware spec. */ init_flow_control_params(ug_info->aufc, ug_info->receiveFlowControl, - 1, + ug_info->transmitFlowControl, ug_info->pausePeriod, ug_info->extensionField, &uf_regs->upsmr, @@ -2527,8 +2555,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) ug_info->backToBackInterFrameGap, &ug_regs->ipgifg); if (ret_val != 0) { - ugeth_err("%s: IPGIFG initialization parameter too large.", - __FUNCTION__); + if (netif_msg_ifup(ugeth)) + ugeth_err("%s: IPGIFG initialization parameter too large.", + __FUNCTION__); ucc_geth_memclean(ugeth); return ret_val; } @@ -2544,7 +2573,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) ug_info->collisionWindow, &ug_regs->hafdup); if (ret_val != 0) { - ugeth_err("%s: Half Duplex initialization parameter too large.", + if (netif_msg_ifup(ugeth)) + ugeth_err("%s: Half Duplex initialization parameter too large.", __FUNCTION__); ucc_geth_memclean(ugeth); return ret_val; @@ -2597,9 +2627,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) tx_bd_ring_offset[j]); } if (!ugeth->p_tx_bd_ring[j]) { - ugeth_err - ("%s: Can not allocate memory for Tx bd rings.", - __FUNCTION__); + if (netif_msg_ifup(ugeth)) + ugeth_err + ("%s: Can not allocate memory for Tx bd rings.", + __FUNCTION__); ucc_geth_memclean(ugeth); return -ENOMEM; } @@ -2632,9 +2663,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) rx_bd_ring_offset[j]); } if (!ugeth->p_rx_bd_ring[j]) { - ugeth_err - ("%s: Can not allocate memory for Rx bd rings.", - __FUNCTION__); + if (netif_msg_ifup(ugeth)) + ugeth_err + ("%s: Can not allocate memory for Rx bd rings.", + __FUNCTION__); ucc_geth_memclean(ugeth); return -ENOMEM; } @@ -2648,8 +2680,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) GFP_KERNEL); if (ugeth->tx_skbuff[j] == NULL) { - ugeth_err("%s: Could not allocate tx_skbuff", - __FUNCTION__); + if (netif_msg_ifup(ugeth)) + ugeth_err("%s: Could not allocate tx_skbuff", + __FUNCTION__); ucc_geth_memclean(ugeth); return -ENOMEM; } @@ -2679,8 +2712,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) GFP_KERNEL); if (ugeth->rx_skbuff[j] == NULL) { - ugeth_err("%s: Could not allocate rx_skbuff", - __FUNCTION__); + if (netif_msg_ifup(ugeth)) + ugeth_err("%s: Could not allocate rx_skbuff", + __FUNCTION__); ucc_geth_memclean(ugeth); return -ENOMEM; } @@ -2711,9 +2745,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram), UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT); if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) { - ugeth_err - ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.", - __FUNCTION__); + if (netif_msg_ifup(ugeth)) + ugeth_err + ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.", + __FUNCTION__); ucc_geth_memclean(ugeth); return -ENOMEM; } @@ -2733,9 +2768,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) 32 * (numThreadsTxNumerical == 1), UCC_GETH_THREAD_DATA_ALIGNMENT); if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) { - ugeth_err - ("%s: Can not allocate DPRAM memory for p_thread_data_tx.", - __FUNCTION__); + if (netif_msg_ifup(ugeth)) + ugeth_err + ("%s: Can not allocate DPRAM memory for p_thread_data_tx.", + __FUNCTION__); ucc_geth_memclean(ugeth); return -ENOMEM; } @@ -2761,9 +2797,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) sizeof(struct ucc_geth_send_queue_qd), UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT); if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) { - ugeth_err - ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.", - __FUNCTION__); + if (netif_msg_ifup(ugeth)) + ugeth_err + ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.", + __FUNCTION__); ucc_geth_memclean(ugeth); return -ENOMEM; } @@ -2804,9 +2841,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) qe_muram_alloc(sizeof(struct ucc_geth_scheduler), UCC_GETH_SCHEDULER_ALIGNMENT); if (IS_ERR_VALUE(ugeth->scheduler_offset)) { - ugeth_err - ("%s: Can not allocate DPRAM memory for p_scheduler.", - __FUNCTION__); + if (netif_msg_ifup(ugeth)) + ugeth_err + ("%s: Can not allocate DPRAM memory for p_scheduler.", + __FUNCTION__); ucc_geth_memclean(ugeth); return -ENOMEM; } @@ -2852,9 +2890,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) (struct ucc_geth_tx_firmware_statistics_pram), UCC_GETH_TX_STATISTICS_ALIGNMENT); if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) { - ugeth_err - ("%s: Can not allocate DPRAM memory for" - " p_tx_fw_statistics_pram.", __FUNCTION__); + if (netif_msg_ifup(ugeth)) + ugeth_err + ("%s: Can not allocate DPRAM memory for" + " p_tx_fw_statistics_pram.", + __FUNCTION__); ucc_geth_memclean(ugeth); return -ENOMEM; } @@ -2879,7 +2919,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) test = in_be16(&ugeth->p_tx_glbl_pram->temoder); /* Function code register value to be used later */ - function_code = QE_BMR_BYTE_ORDER_BO_MOT | UCC_FAST_FUNCTION_CODE_GBL; + function_code = UCC_BMR_BO_BE | UCC_BMR_GBL; /* Required for QE */ /* function code register */ @@ -2891,9 +2931,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram), UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT); if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) { - ugeth_err - ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.", - __FUNCTION__); + if (netif_msg_ifup(ugeth)) + ugeth_err + ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.", + __FUNCTION__); ucc_geth_memclean(ugeth); return -ENOMEM; } @@ -2912,9 +2953,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) sizeof(struct ucc_geth_thread_data_rx), UCC_GETH_THREAD_DATA_ALIGNMENT); if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) { - ugeth_err - ("%s: Can not allocate DPRAM memory for p_thread_data_rx.", - __FUNCTION__); + if (netif_msg_ifup(ugeth)) + ugeth_err + ("%s: Can not allocate DPRAM memory for p_thread_data_rx.", + __FUNCTION__); ucc_geth_memclean(ugeth); return -ENOMEM; } @@ -2935,9 +2977,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) (struct ucc_geth_rx_firmware_statistics_pram), UCC_GETH_RX_STATISTICS_ALIGNMENT); if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) { - ugeth_err - ("%s: Can not allocate DPRAM memory for" - " p_rx_fw_statistics_pram.", __FUNCTION__); + if (netif_msg_ifup(ugeth)) + ugeth_err + ("%s: Can not allocate DPRAM memory for" + " p_rx_fw_statistics_pram.", __FUNCTION__); ucc_geth_memclean(ugeth); return -ENOMEM; } @@ -2957,9 +3000,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) sizeof(struct ucc_geth_rx_interrupt_coalescing_entry) + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT); if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) { - ugeth_err - ("%s: Can not allocate DPRAM memory for" - " p_rx_irq_coalescing_tbl.", __FUNCTION__); + if (netif_msg_ifup(ugeth)) + ugeth_err + ("%s: Can not allocate DPRAM memory for" + " p_rx_irq_coalescing_tbl.", __FUNCTION__); ucc_geth_memclean(ugeth); return -ENOMEM; } @@ -3025,9 +3069,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) sizeof(struct ucc_geth_rx_prefetched_bds)), UCC_GETH_RX_BD_QUEUES_ALIGNMENT); if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) { - ugeth_err - ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.", - __FUNCTION__); + if (netif_msg_ifup(ugeth)) + ugeth_err + ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.", + __FUNCTION__); ucc_geth_memclean(ugeth); return -ENOMEM; } @@ -3102,8 +3147,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) /* initialize extended filtering */ if (ug_info->rxExtendedFiltering) { if (!ug_info->extendedFilteringChainPointer) { - ugeth_err("%s: Null Extended Filtering Chain Pointer.", - __FUNCTION__); + if (netif_msg_ifup(ugeth)) + ugeth_err("%s: Null Extended Filtering Chain Pointer.", + __FUNCTION__); ucc_geth_memclean(ugeth); return -EINVAL; } @@ -3114,9 +3160,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram), UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT); if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) { - ugeth_err - ("%s: Can not allocate DPRAM memory for" - " p_exf_glbl_param.", __FUNCTION__); + if (netif_msg_ifup(ugeth)) + ugeth_err + ("%s: Can not allocate DPRAM memory for" + " p_exf_glbl_param.", __FUNCTION__); ucc_geth_memclean(ugeth); return -ENOMEM; } @@ -3161,9 +3208,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) */ if (!(ugeth->p_init_enet_param_shadow = kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) { - ugeth_err - ("%s: Can not allocate memory for" - " p_UccInitEnetParamShadows.", __FUNCTION__); + if (netif_msg_ifup(ugeth)) + ugeth_err + ("%s: Can not allocate memory for" + " p_UccInitEnetParamShadows.", __FUNCTION__); ucc_geth_memclean(ugeth); return -ENOMEM; } @@ -3196,8 +3244,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) && (ug_info->largestexternallookupkeysize != QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) { - ugeth_err("%s: Invalid largest External Lookup Key Size.", - __FUNCTION__); + if (netif_msg_ifup(ugeth)) + ugeth_err("%s: Invalid largest External Lookup Key Size.", + __FUNCTION__); ucc_geth_memclean(ugeth); return -EINVAL; } @@ -3222,8 +3271,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) /* Rx needs one extra for terminator */ , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT, ug_info->riscRx, 1)) != 0) { - ugeth_err("%s: Can not fill p_init_enet_param_shadow.", - __FUNCTION__); + if (netif_msg_ifup(ugeth)) + ugeth_err("%s: Can not fill p_init_enet_param_shadow.", + __FUNCTION__); ucc_geth_memclean(ugeth); return ret_val; } @@ -3237,8 +3287,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) sizeof(struct ucc_geth_thread_tx_pram), UCC_GETH_THREAD_TX_PRAM_ALIGNMENT, ug_info->riscTx, 0)) != 0) { - ugeth_err("%s: Can not fill p_init_enet_param_shadow.", - __FUNCTION__); + if (netif_msg_ifup(ugeth)) + ugeth_err("%s: Can not fill p_init_enet_param_shadow.", + __FUNCTION__); ucc_geth_memclean(ugeth); return ret_val; } @@ -3246,8 +3297,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) /* Load Rx bds with buffers */ for (i = 0; i < ug_info->numQueuesRx; i++) { if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) { - ugeth_err("%s: Can not fill Rx bds with buffers.", - __FUNCTION__); + if (netif_msg_ifup(ugeth)) + ugeth_err("%s: Can not fill Rx bds with buffers.", + __FUNCTION__); ucc_geth_memclean(ugeth); return ret_val; } @@ -3256,9 +3308,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) /* Allocate InitEnet command parameter structure */ init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4); if (IS_ERR_VALUE(init_enet_pram_offset)) { - ugeth_err - ("%s: Can not allocate DPRAM memory for p_init_enet_pram.", - __FUNCTION__); + if (netif_msg_ifup(ugeth)) + ugeth_err + ("%s: Can not allocate DPRAM memory for p_init_enet_pram.", + __FUNCTION__); ucc_geth_memclean(ugeth); return -ENOMEM; } @@ -3297,14 +3350,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) return 0; } -/* returns a net_device_stats structure pointer */ -static struct net_device_stats *ucc_geth_get_stats(struct net_device *dev) -{ - struct ucc_geth_private *ugeth = netdev_priv(dev); - - return &(ugeth->stats); -} - /* ucc_geth_timeout gets called when a packet has not been * transmitted after a set amount of time. * For now, assume that clearing out all the structures, and @@ -3315,7 +3360,7 @@ static void ucc_geth_timeout(struct net_device *dev) ugeth_vdbg("%s: IN", __FUNCTION__); - ugeth->stats.tx_errors++; + dev->stats.tx_errors++; ugeth_dump_regs(ugeth); @@ -3343,7 +3388,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) spin_lock_irq(&ugeth->lock); - ugeth->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += skb->len; /* Start from the next BD that should be filled */ bd = ugeth->txBd[txQ]; @@ -3428,15 +3473,16 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit if (!skb || (!(bd_status & (R_F | R_L))) || (bd_status & R_ERRORS_FATAL)) { - ugeth_vdbg("%s, %d: ERROR!!! skb - 0x%08x", - __FUNCTION__, __LINE__, (u32) skb); + if (netif_msg_rx_err(ugeth)) + ugeth_err("%s, %d: ERROR!!! skb - 0x%08x", + __FUNCTION__, __LINE__, (u32) skb); if (skb) dev_kfree_skb_any(skb); ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; - ugeth->stats.rx_dropped++; + dev->stats.rx_dropped++; } else { - ugeth->stats.rx_packets++; + dev->stats.rx_packets++; howmany++; /* Prep the skb for the packet */ @@ -3445,7 +3491,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit /* Tell the skb what kind of packet this is */ skb->protocol = eth_type_trans(skb, ugeth->dev); - ugeth->stats.rx_bytes += length; + dev->stats.rx_bytes += length; /* Send the packet up the stack */ #ifdef CONFIG_UGETH_NAPI netif_receive_skb(skb); @@ -3458,8 +3504,9 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit skb = get_new_skb(ugeth, bd); if (!skb) { - ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__); - ugeth->stats.rx_dropped++; + if (netif_msg_rx_err(ugeth)) + ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__); + dev->stats.rx_dropped++; break; } @@ -3501,7 +3548,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0)) break; - ugeth->stats.tx_packets++; + dev->stats.tx_packets++; /* Free the sk buffer associated with this TxBD */ dev_kfree_skb_irq(ugeth-> @@ -3527,41 +3574,31 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) } #ifdef CONFIG_UGETH_NAPI -static int ucc_geth_poll(struct net_device *dev, int *budget) +static int ucc_geth_poll(struct napi_struct *napi, int budget) { - struct ucc_geth_private *ugeth = netdev_priv(dev); + struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi); + struct net_device *dev = ugeth->dev; struct ucc_geth_info *ug_info; - struct ucc_fast_private *uccf; - int howmany; - u8 i; - int rx_work_limit; - register u32 uccm; + int howmany, i; ug_info = ugeth->ug_info; - rx_work_limit = *budget; - if (rx_work_limit > dev->quota) - rx_work_limit = dev->quota; - howmany = 0; + for (i = 0; i < ug_info->numQueuesRx; i++) + howmany += ucc_geth_rx(ugeth, i, budget - howmany); - for (i = 0; i < ug_info->numQueuesRx; i++) { - howmany += ucc_geth_rx(ugeth, i, rx_work_limit); - } - - dev->quota -= howmany; - rx_work_limit -= howmany; - *budget -= howmany; + if (howmany < budget) { + struct ucc_fast_private *uccf; + u32 uccm; - if (rx_work_limit > 0) { - netif_rx_complete(dev); + netif_rx_complete(dev, napi); uccf = ugeth->uccf; uccm = in_be32(uccf->p_uccm); uccm |= UCCE_RX_EVENTS; out_be32(uccf->p_uccm, uccm); } - return (rx_work_limit > 0) ? 0 : 1; + return howmany; } #endif /* CONFIG_UGETH_NAPI */ @@ -3596,10 +3633,10 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info) /* check for receive events that require processing */ if (ucce & UCCE_RX_EVENTS) { #ifdef CONFIG_UGETH_NAPI - if (netif_rx_schedule_prep(dev)) { - uccm &= ~UCCE_RX_EVENTS; + if (netif_rx_schedule_prep(dev, &ugeth->napi)) { + uccm &= ~UCCE_RX_EVENTS; out_be32(uccf->p_uccm, uccm); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &ugeth->napi); } #else rx_mask = UCCE_RXBF_SINGLE_MASK; @@ -3628,10 +3665,10 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info) /* Errors and other events */ if (ucce & UCCE_OTHER) { if (ucce & UCCE_BSY) { - ugeth->stats.rx_errors++; + dev->stats.rx_errors++; } if (ucce & UCCE_TXE) { - ugeth->stats.tx_errors++; + dev->stats.tx_errors++; } } @@ -3649,29 +3686,36 @@ static int ucc_geth_open(struct net_device *dev) /* Test station address */ if (dev->dev_addr[0] & ENET_GROUP_ADDR) { - ugeth_err("%s: Multicast address used for station address" - " - is this what you wanted?", __FUNCTION__); + if (netif_msg_ifup(ugeth)) + ugeth_err("%s: Multicast address used for station address" + " - is this what you wanted?", __FUNCTION__); return -EINVAL; } err = ucc_struct_init(ugeth); if (err) { - ugeth_err("%s: Cannot configure internal struct, aborting.", dev->name); + if (netif_msg_ifup(ugeth)) + ugeth_err("%s: Cannot configure internal struct, aborting.", dev->name); return err; } +#ifdef CONFIG_UGETH_NAPI + napi_enable(&ugeth->napi); +#endif err = ucc_geth_startup(ugeth); if (err) { - ugeth_err("%s: Cannot configure net device, aborting.", - dev->name); - return err; + if (netif_msg_ifup(ugeth)) + ugeth_err("%s: Cannot configure net device, aborting.", + dev->name); + goto out_err; } err = adjust_enet_interface(ugeth); if (err) { - ugeth_err("%s: Cannot configure net device, aborting.", - dev->name); - return err; + if (netif_msg_ifup(ugeth)) + ugeth_err("%s: Cannot configure net device, aborting.", + dev->name); + goto out_err; } /* Set MACSTNADDR1, MACSTNADDR2 */ @@ -3687,8 +3731,9 @@ static int ucc_geth_open(struct net_device *dev) err = init_phy(dev); if (err) { - ugeth_err("%s: Cannot initialize PHY, aborting.", dev->name); - return err; + if (netif_msg_ifup(ugeth)) + ugeth_err("%s: Cannot initialize PHY, aborting.", dev->name); + goto out_err; } phy_start(ugeth->phydev); @@ -3697,22 +3742,30 @@ static int ucc_geth_open(struct net_device *dev) request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 0, "UCC Geth", dev); if (err) { - ugeth_err("%s: Cannot get IRQ for net device, aborting.", - dev->name); + if (netif_msg_ifup(ugeth)) + ugeth_err("%s: Cannot get IRQ for net device, aborting.", + dev->name); ucc_geth_stop(ugeth); - return err; + goto out_err; } err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); if (err) { - ugeth_err("%s: Cannot enable net device, aborting.", dev->name); + if (netif_msg_ifup(ugeth)) + ugeth_err("%s: Cannot enable net device, aborting.", dev->name); ucc_geth_stop(ugeth); - return err; + goto out_err; } netif_start_queue(dev); return err; + +out_err: +#ifdef CONFIG_UGETH_NAPI + napi_disable(&ugeth->napi); +#endif + return err; } /* Stops the kernel queue, and halts the controller */ @@ -3722,6 +3775,10 @@ static int ucc_geth_close(struct net_device *dev) ugeth_vdbg("%s: IN", __FUNCTION__); +#ifdef CONFIG_UGETH_NAPI + napi_disable(&ugeth->napi); +#endif + ucc_geth_stop(ugeth); phy_disconnect(ugeth->phydev); @@ -3732,8 +3789,6 @@ static int ucc_geth_close(struct net_device *dev) return 0; } -const struct ethtool_ops ucc_geth_ethtool_ops = { }; - static phy_interface_t to_phy_interface(const char *phy_connection_type) { if (strcasecmp(phy_connection_type, "mii") == 0) @@ -3790,6 +3845,13 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma return -ENODEV; ug_info = &ugeth_info[ucc_num]; + if (ug_info == NULL) { + if (netif_msg_probe(&debug)) + ugeth_err("%s: [%d] Missing additional data!", + __FUNCTION__, ucc_num); + return -ENODEV; + } + ug_info->uf_info.ucc_num = ucc_num; prop = of_get_property(np, "rx-clock", NULL); @@ -3868,15 +3930,10 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma ug_info->mdio_bus = res.start; - printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n", - ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs, - ug_info->uf_info.irq); - - if (ug_info == NULL) { - ugeth_err("%s: [%d] Missing additional data!", __FUNCTION__, - ucc_num); - return -ENODEV; - } + if (netif_msg_probe(&debug)) + printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n", + ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs, + ug_info->uf_info.irq); /* Create an ethernet device instance */ dev = alloc_etherdev(sizeof(*ugeth)); @@ -3892,33 +3949,31 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma /* Set the dev->base_addr to the gfar reg region */ dev->base_addr = (unsigned long)(ug_info->uf_info.regs); - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, device); /* Fill in the dev structure */ + uec_set_ethtool_ops(dev); dev->open = ucc_geth_open; dev->hard_start_xmit = ucc_geth_start_xmit; dev->tx_timeout = ucc_geth_timeout; dev->watchdog_timeo = TX_TIMEOUT; #ifdef CONFIG_UGETH_NAPI - dev->poll = ucc_geth_poll; - dev->weight = UCC_GETH_DEV_WEIGHT; + netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, UCC_GETH_DEV_WEIGHT); #endif /* CONFIG_UGETH_NAPI */ dev->stop = ucc_geth_close; - dev->get_stats = ucc_geth_get_stats; // dev->change_mtu = ucc_geth_change_mtu; dev->mtu = 1500; dev->set_multicast_list = ucc_geth_set_multi; - dev->ethtool_ops = &ucc_geth_ethtool_ops; - ugeth->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; + ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT); ugeth->phy_interface = phy_interface; ugeth->max_speed = max_speed; err = register_netdev(dev); if (err) { - ugeth_err("%s: Cannot register net device, aborting.", - dev->name); + if (netif_msg_probe(ugeth)) + ugeth_err("%s: Cannot register net device, aborting.", + dev->name); free_netdev(dev); return err; } @@ -3972,7 +4027,8 @@ static int __init ucc_geth_init(void) if (ret) return ret; - printk(KERN_INFO "ucc_geth: " DRV_DESC "\n"); + if (netif_msg_drv(&debug)) + printk(KERN_INFO "ucc_geth: " DRV_DESC "\n"); for (i = 0; i < 8; i++) memcpy(&(ugeth_info[i]), &ugeth_primary_info, sizeof(ugeth_primary_info)); diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h index a29e1c3ca4b7..4fb95b3af948 100644 --- a/drivers/net/ucc_geth.h +++ b/drivers/net/ucc_geth.h @@ -30,6 +30,10 @@ #include "ucc_geth_mii.h" +#define DRV_DESC "QE UCC Gigabit Ethernet Controller" +#define DRV_NAME "ucc_geth" +#define DRV_VERSION "1.1" + #define NUM_TX_QUEUES 8 #define NUM_RX_QUEUES 8 #define NUM_BDS_IN_PREFETCHED_BDS 4 @@ -40,6 +44,7 @@ struct ucc_geth { struct ucc_fast uccf; + u8 res0[0x100 - sizeof(struct ucc_fast)]; u32 maccfg1; /* mac configuration reg. 1 */ u32 maccfg2; /* mac configuration reg. 2 */ @@ -896,6 +901,7 @@ struct ucc_geth_hardware_statistics { #define UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX 8 #define UCC_GETH_RX_BD_RING_SIZE_MIN 8 #define UCC_GETH_TX_BD_RING_SIZE_MIN 2 +#define UCC_GETH_BD_RING_SIZE_MAX 0xffff #define UCC_GETH_SIZE_OF_BD QE_SIZEOF_BD @@ -1135,6 +1141,7 @@ struct ucc_geth_info { int bro; int ecm; int receiveFlowControl; + int transmitFlowControl; u8 maxGroupAddrInHash; u8 maxIndAddrInHash; u8 prel; @@ -1178,7 +1185,7 @@ struct ucc_geth_private { struct ucc_geth_info *ug_info; struct ucc_fast_private *uccf; struct net_device *dev; - struct net_device_stats stats; /* linux network statistics */ + struct napi_struct napi; struct ucc_geth *ug_regs; struct ucc_geth_init_pram *p_init_enet_param_shadow; struct ucc_geth_exf_global_pram *p_exf_glbl_param; diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c new file mode 100644 index 000000000000..9a9622c13e2b --- /dev/null +++ b/drivers/net/ucc_geth_ethtool.c @@ -0,0 +1,389 @@ +/* + * Copyright (c) 2007 Freescale Semiconductor, Inc. All rights reserved. + * + * Description: QE UCC Gigabit Ethernet Ethtool API Set + * + * Author: Li Yang <leoli@freescale.com> + * + * Limitation: + * Can only get/set setttings of the first queue. + * Need to re-open the interface manually after changing some paramters. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/stddef.h> +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <linux/mm.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/fsl_devices.h> +#include <linux/ethtool.h> +#include <linux/mii.h> +#include <linux/phy.h> + +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/uaccess.h> +#include <asm/types.h> +#include <asm/uaccess.h> + +#include "ucc_geth.h" +#include "ucc_geth_mii.h" + +static char hw_stat_gstrings[][ETH_GSTRING_LEN] = { + "tx-64-frames", + "tx-65-127-frames", + "tx-128-255-frames", + "rx-64-frames", + "rx-65-127-frames", + "rx-128-255-frames", + "tx-bytes-ok", + "tx-pause-frames", + "tx-multicast-frames", + "tx-broadcast-frames", + "rx-frames", + "rx-bytes-ok", + "rx-bytes-all", + "rx-multicast-frames", + "rx-broadcast-frames", + "stats-counter-carry", + "stats-counter-mask", + "rx-dropped-frames", +}; + +static char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { + "tx-single-collision", + "tx-multiple-collision", + "tx-late-collsion", + "tx-aborted-frames", + "tx-lost-frames", + "tx-carrier-sense-errors", + "tx-frames-ok", + "tx-excessive-differ-frames", + "tx-256-511-frames", + "tx-1024-1518-frames", + "tx-jumbo-frames", +}; + +static char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { + "rx-crc-errors", + "rx-alignment-errors", + "rx-in-range-length-errors", + "rx-out-of-range-length-errors", + "rx-too-long-frames", + "rx-runt", + "rx-very-long-event", + "rx-symbol-errors", + "rx-busy-drop-frames", + "reserved", + "reserved", + "rx-mismatch-drop-frames", + "rx-small-than-64", + "rx-256-511-frames", + "rx-512-1023-frames", + "rx-1024-1518-frames", + "rx-jumbo-frames", + "rx-mac-error-loss", + "rx-pause-frames", + "reserved", + "rx-vlan-removed", + "rx-vlan-replaced", + "rx-vlan-inserted", + "rx-ip-checksum-errors", +}; + +#define UEC_HW_STATS_LEN ARRAY_SIZE(hw_stat_gstrings) +#define UEC_TX_FW_STATS_LEN ARRAY_SIZE(tx_fw_stat_gstrings) +#define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings) + +extern int init_flow_control_params(u32 automatic_flow_control_mode, + int rx_flow_control_enable, + int tx_flow_control_enable, u16 pause_period, + u16 extension_field, volatile u32 *upsmr_register, + volatile u32 *uempr_register, volatile u32 *maccfg1_register); + +static int +uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + struct ucc_geth_private *ugeth = netdev_priv(netdev); + struct phy_device *phydev = ugeth->phydev; + struct ucc_geth_info *ug_info = ugeth->ug_info; + + if (!phydev) + return -ENODEV; + + ecmd->maxtxpkt = 1; + ecmd->maxrxpkt = ug_info->interruptcoalescingmaxvalue[0]; + + return phy_ethtool_gset(phydev, ecmd); +} + +static int +uec_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + struct ucc_geth_private *ugeth = netdev_priv(netdev); + struct phy_device *phydev = ugeth->phydev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_sset(phydev, ecmd); +} + +static void +uec_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ucc_geth_private *ugeth = netdev_priv(netdev); + + pause->autoneg = ugeth->phydev->autoneg; + + if (ugeth->ug_info->receiveFlowControl) + pause->rx_pause = 1; + if (ugeth->ug_info->transmitFlowControl) + pause->tx_pause = 1; +} + +static int +uec_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ucc_geth_private *ugeth = netdev_priv(netdev); + int ret = 0; + + ugeth->ug_info->receiveFlowControl = pause->rx_pause; + ugeth->ug_info->transmitFlowControl = pause->tx_pause; + + if (ugeth->phydev->autoneg) { + if (netif_running(netdev)) { + /* FIXME: automatically restart */ + printk(KERN_INFO + "Please re-open the interface.\n"); + } + } else { + struct ucc_geth_info *ug_info = ugeth->ug_info; + + ret = init_flow_control_params(ug_info->aufc, + ug_info->receiveFlowControl, + ug_info->transmitFlowControl, + ug_info->pausePeriod, + ug_info->extensionField, + &ugeth->uccf->uf_regs->upsmr, + &ugeth->ug_regs->uempr, + &ugeth->ug_regs->maccfg1); + } + + return ret; +} + +static uint32_t +uec_get_msglevel(struct net_device *netdev) +{ + struct ucc_geth_private *ugeth = netdev_priv(netdev); + return ugeth->msg_enable; +} + +static void +uec_set_msglevel(struct net_device *netdev, uint32_t data) +{ + struct ucc_geth_private *ugeth = netdev_priv(netdev); + ugeth->msg_enable = data; +} + +static int +uec_get_regs_len(struct net_device *netdev) +{ + return sizeof(struct ucc_geth); +} + +static void +uec_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + int i; + struct ucc_geth_private *ugeth = netdev_priv(netdev); + u32 __iomem *ug_regs = (u32 __iomem *)ugeth->ug_regs; + u32 *buff = p; + + for (i = 0; i < sizeof(struct ucc_geth) / sizeof(u32); i++) + buff[i] = in_be32(&ug_regs[i]); +} + +static void +uec_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ucc_geth_private *ugeth = netdev_priv(netdev); + struct ucc_geth_info *ug_info = ugeth->ug_info; + int queue = 0; + + ring->rx_max_pending = UCC_GETH_BD_RING_SIZE_MAX; + ring->rx_mini_max_pending = UCC_GETH_BD_RING_SIZE_MAX; + ring->rx_jumbo_max_pending = UCC_GETH_BD_RING_SIZE_MAX; + ring->tx_max_pending = UCC_GETH_BD_RING_SIZE_MAX; + + ring->rx_pending = ug_info->bdRingLenRx[queue]; + ring->rx_mini_pending = ug_info->bdRingLenRx[queue]; + ring->rx_jumbo_pending = ug_info->bdRingLenRx[queue]; + ring->tx_pending = ug_info->bdRingLenTx[queue]; +} + +static int +uec_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ucc_geth_private *ugeth = netdev_priv(netdev); + struct ucc_geth_info *ug_info = ugeth->ug_info; + int queue = 0, ret = 0; + + if (ring->rx_pending < UCC_GETH_RX_BD_RING_SIZE_MIN) { + printk("%s: RxBD ring size must be no smaller than %d.\n", + netdev->name, UCC_GETH_RX_BD_RING_SIZE_MIN); + return -EINVAL; + } + if (ring->rx_pending % UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT) { + printk("%s: RxBD ring size must be multiple of %d.\n", + netdev->name, UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT); + return -EINVAL; + } + if (ring->tx_pending < UCC_GETH_TX_BD_RING_SIZE_MIN) { + printk("%s: TxBD ring size must be no smaller than %d.\n", + netdev->name, UCC_GETH_TX_BD_RING_SIZE_MIN); + return -EINVAL; + } + + ug_info->bdRingLenRx[queue] = ring->rx_pending; + ug_info->bdRingLenTx[queue] = ring->tx_pending; + + if (netif_running(netdev)) { + /* FIXME: restart automatically */ + printk(KERN_INFO + "Please re-open the interface.\n"); + } + + return ret; +} + +static int uec_get_sset_count(struct net_device *netdev, int sset) +{ + struct ucc_geth_private *ugeth = netdev_priv(netdev); + u32 stats_mode = ugeth->ug_info->statisticsMode; + int len = 0; + + switch (sset) { + case ETH_SS_STATS: + if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) + len += UEC_HW_STATS_LEN; + if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) + len += UEC_TX_FW_STATS_LEN; + if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) + len += UEC_RX_FW_STATS_LEN; + + return len; + + default: + return -EOPNOTSUPP; + } +} + +static void uec_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) +{ + struct ucc_geth_private *ugeth = netdev_priv(netdev); + u32 stats_mode = ugeth->ug_info->statisticsMode; + + if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) { + memcpy(buf, hw_stat_gstrings, UEC_HW_STATS_LEN * + ETH_GSTRING_LEN); + buf += UEC_HW_STATS_LEN * ETH_GSTRING_LEN; + } + if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { + memcpy(buf, tx_fw_stat_gstrings, UEC_TX_FW_STATS_LEN * + ETH_GSTRING_LEN); + buf += UEC_TX_FW_STATS_LEN * ETH_GSTRING_LEN; + } + if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) + memcpy(buf, tx_fw_stat_gstrings, UEC_RX_FW_STATS_LEN * + ETH_GSTRING_LEN); +} + +static void uec_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, uint64_t *data) +{ + struct ucc_geth_private *ugeth = netdev_priv(netdev); + u32 stats_mode = ugeth->ug_info->statisticsMode; + u32 __iomem *base; + int i, j = 0; + + if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) { + base = (u32 __iomem *)&ugeth->ug_regs->tx64; + for (i = 0; i < UEC_HW_STATS_LEN; i++) + data[j++] = (u64)in_be32(&base[i]); + } + if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { + base = (u32 __iomem *)ugeth->p_tx_fw_statistics_pram; + for (i = 0; i < UEC_TX_FW_STATS_LEN; i++) + data[j++] = (u64)in_be32(&base[i]); + } + if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) { + base = (u32 __iomem *)ugeth->p_rx_fw_statistics_pram; + for (i = 0; i < UEC_RX_FW_STATS_LEN; i++) + data[j++] = (u64)in_be32(&base[i]); + } +} + +static int uec_nway_reset(struct net_device *netdev) +{ + struct ucc_geth_private *ugeth = netdev_priv(netdev); + + return phy_start_aneg(ugeth->phydev); +} + +/* Report driver information */ +static void +uec_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + strncpy(drvinfo->driver, DRV_NAME, 32); + strncpy(drvinfo->version, DRV_VERSION, 32); + strncpy(drvinfo->fw_version, "N/A", 32); + strncpy(drvinfo->bus_info, "QUICC ENGINE", 32); + drvinfo->eedump_len = 0; + drvinfo->regdump_len = uec_get_regs_len(netdev); +} + +static const struct ethtool_ops uec_ethtool_ops = { + .get_settings = uec_get_settings, + .set_settings = uec_set_settings, + .get_drvinfo = uec_get_drvinfo, + .get_regs_len = uec_get_regs_len, + .get_regs = uec_get_regs, + .get_msglevel = uec_get_msglevel, + .set_msglevel = uec_set_msglevel, + .nway_reset = uec_nway_reset, + .get_link = ethtool_op_get_link, + .get_ringparam = uec_get_ringparam, + .set_ringparam = uec_set_ringparam, + .get_pauseparam = uec_get_pauseparam, + .set_pauseparam = uec_set_pauseparam, + .set_sg = ethtool_op_set_sg, + .get_sset_count = uec_get_sset_count, + .get_strings = uec_get_strings, + .get_ethtool_stats = uec_get_ethtool_stats, +}; + +void uec_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &uec_ethtool_ops); +} diff --git a/drivers/net/ucc_geth_mii.c b/drivers/net/ucc_geth_mii.c index 7bcb82f50cf7..df884f0ad8e5 100644 --- a/drivers/net/ucc_geth_mii.c +++ b/drivers/net/ucc_geth_mii.c @@ -32,7 +32,6 @@ #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> -#include <asm/ocp.h> #include <linux/crc32.h> #include <linux/mii.h> #include <linux/phy.h> @@ -54,8 +53,8 @@ #define vdbg(format, arg...) do {} while(0) #endif -#define DRV_DESC "QE UCC Ethernet Controller MII Bus" -#define DRV_NAME "fsl-uec_mdio" +#define MII_DRV_DESC "QE UCC Ethernet Controller MII Bus" +#define MII_DRV_NAME "fsl-uec_mdio" /* Write value to the PHY for this device to the register at regnum, */ /* waiting until the write is done before it returns. All PHY */ @@ -261,7 +260,7 @@ static struct of_device_id uec_mdio_match[] = { }; static struct of_platform_driver uec_mdio_driver = { - .name = DRV_NAME, + .name = MII_DRV_NAME, .probe = uec_mdio_probe, .remove = uec_mdio_remove, .match_table = uec_mdio_match, @@ -272,7 +271,8 @@ int __init uec_mdio_init(void) return of_register_platform_driver(&uec_mdio_driver); } -void __exit uec_mdio_exit(void) +/* called from __init ucc_geth_init, therefore can not be __exit */ +void uec_mdio_exit(void) { of_unregister_platform_driver(&uec_mdio_driver); } diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c index 6d95cacd5284..61daa096de66 100644 --- a/drivers/net/usb/asix.c +++ b/drivers/net/usb/asix.c @@ -1474,6 +1474,7 @@ static struct usb_driver asix_driver = { .suspend = usbnet_suspend, .resume = usbnet_resume, .disconnect = usbnet_disconnect, + .supports_autosuspend = 1, }; static int __init asix_init(void) diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 16c7a0e87850..a2de32fabc17 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -405,7 +405,7 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->ethtool_ops = &dm9601_ethtool_ops; dev->net->hard_header_len += DM_TX_OVERHEAD; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; - dev->rx_urb_size = dev->net->mtu + DM_RX_OVERHEAD; + dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD; dev->mii.dev = dev->net; dev->mii.mdio_read = dm9601_mdio_read; diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index 524dc5f5e46d..58a53a641754 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c @@ -1152,8 +1152,6 @@ err_fw: INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl); - SET_MODULE_OWNER(netdev); - usb_set_intfdata(intf, kaweth); #if 0 diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index a05fd97e5bc2..d1ed68a11e70 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -768,11 +768,13 @@ done: static void write_bulk_callback(struct urb *urb) { pegasus_t *pegasus = urb->context; - struct net_device *net = pegasus->net; + struct net_device *net; if (!pegasus) return; + net = pegasus->net; + if (!netif_device_present(net) || !netif_running(net)) return; @@ -1295,6 +1297,7 @@ static int pegasus_probe(struct usb_interface *intf, pegasus_t *pegasus; int dev_index = id - pegasus_ids; int res = -ENOMEM; + DECLARE_MAC_BUF(mac); usb_get_dev(dev); net = alloc_etherdev(sizeof(struct pegasus)); @@ -1304,7 +1307,6 @@ static int pegasus_probe(struct usb_interface *intf, } pegasus = netdev_priv(net); - memset(pegasus, 0, sizeof (struct pegasus)); pegasus->dev_index = dev_index; init_waitqueue_head(&pegasus->ctrl_wait); @@ -1320,7 +1322,6 @@ static int pegasus_probe(struct usb_interface *intf, pegasus->intf = intf; pegasus->usb = dev; pegasus->net = net; - SET_MODULE_OWNER(net); net->open = pegasus_open; net->stop = pegasus_close; net->watchdog_timeo = PEGASUS_TX_TIMEOUT; @@ -1367,12 +1368,10 @@ static int pegasus_probe(struct usb_interface *intf, queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check, CARRIER_CHECK_DELAY); - dev_info(&intf->dev, "%s, %s, %02x:%02x:%02x:%02x:%02x:%02x\n", - net->name, - usb_dev_id[dev_index].name, - net->dev_addr [0], net->dev_addr [1], - net->dev_addr [2], net->dev_addr [3], - net->dev_addr [4], net->dev_addr [5]); + dev_info(&intf->dev, "%s, %s, %s\n", + net->name, + usb_dev_id[dev_index].name, + print_mac(mac, net->dev_addr)); return 0; out3: diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index fa598f0340cf..33cbc306226c 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -905,7 +905,6 @@ static int rtl8150_probe(struct usb_interface *intf, } dev = netdev_priv(netdev); - memset(dev, 0, sizeof(rtl8150_t)); dev->intr_buff = kmalloc(INTBUFSIZE, GFP_KERNEL); if (!dev->intr_buff) { @@ -918,7 +917,6 @@ static int rtl8150_probe(struct usb_interface *intf, dev->udev = udev; dev->netdev = netdev; - SET_MODULE_OWNER(netdev); netdev->open = rtl8150_open; netdev->stop = rtl8150_close; netdev->do_ioctl = rtl8150_ioctl; diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 37bf4f2c0a44..acd5f1c0e63a 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -590,6 +590,7 @@ static int usbnet_stop (struct net_device *net) dev->flags = 0; del_timer_sync (&dev->delay); tasklet_kill (&dev->bh); + usb_autopm_put_interface(dev->intf); return 0; } @@ -603,9 +604,19 @@ static int usbnet_stop (struct net_device *net) static int usbnet_open (struct net_device *net) { struct usbnet *dev = netdev_priv(net); - int retval = 0; + int retval; struct driver_info *info = dev->driver_info; + if ((retval = usb_autopm_get_interface(dev->intf)) < 0) { + if (netif_msg_ifup (dev)) + devinfo (dev, + "resumption fail (%d) usbnet usb-%s-%s, %s", + retval, + dev->udev->bus->bus_name, dev->udev->devpath, + info->description); + goto done_nopm; + } + // put into "known safe" state if (info->reset && (retval = info->reset (dev)) < 0) { if (netif_msg_ifup (dev)) @@ -659,7 +670,10 @@ static int usbnet_open (struct net_device *net) // delay posting reads until we're fully open tasklet_schedule (&dev->bh); + return retval; done: + usb_autopm_put_interface(dev->intf); +done_nopm: return retval; } @@ -1120,6 +1134,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) struct usb_device *xdev; int status; const char *name; + DECLARE_MAC_BUF(mac); name = udev->dev.driver->name; info = (struct driver_info *) prod->driver_info; @@ -1143,6 +1158,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) dev = netdev_priv(net); dev->udev = xdev; + dev->intf = udev; dev->driver_info = info; dev->driver_name = name; dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV @@ -1158,7 +1174,6 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) init_timer (&dev->delay); mutex_init (&dev->phy_mutex); - SET_MODULE_OWNER (net); dev->net = net; strcpy (net->name, "usb%d"); memcpy (net->dev_addr, node_id, sizeof node_id); @@ -1227,14 +1242,11 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) if (status) goto out3; if (netif_msg_probe (dev)) - devinfo (dev, "register '%s' at usb-%s-%s, %s, " - "%02x:%02x:%02x:%02x:%02x:%02x", + devinfo (dev, "register '%s' at usb-%s-%s, %s, %s", udev->dev.driver->name, xdev->bus->bus_name, xdev->devpath, dev->driver_info->description, - net->dev_addr [0], net->dev_addr [1], - net->dev_addr [2], net->dev_addr [3], - net->dev_addr [4], net->dev_addr [5]); + print_mac(mac, net->dev_addr)); // ok, it's ready to go. usb_set_intfdata (udev, dev); @@ -1267,12 +1279,18 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message) struct usbnet *dev = usb_get_intfdata(intf); if (!dev->suspend_count++) { - /* accelerate emptying of the rx and queues, to avoid + /* + * accelerate emptying of the rx and queues, to avoid * having everything error out. */ netif_device_detach (dev->net); (void) unlink_urbs (dev, &dev->rxq); (void) unlink_urbs (dev, &dev->txq); + /* + * reattach so runtime management can use and + * wake the device + */ + netif_device_attach (dev->net); } return 0; } @@ -1282,10 +1300,9 @@ int usbnet_resume (struct usb_interface *intf) { struct usbnet *dev = usb_get_intfdata(intf); - if (!--dev->suspend_count) { - netif_device_attach (dev->net); + if (!--dev->suspend_count) tasklet_schedule (&dev->bh); - } + return 0; } EXPORT_SYMBOL_GPL(usbnet_resume); diff --git a/drivers/net/usb/usbnet.h b/drivers/net/usb/usbnet.h index a6c5820767de..1fae4347e831 100644 --- a/drivers/net/usb/usbnet.h +++ b/drivers/net/usb/usbnet.h @@ -28,6 +28,7 @@ struct usbnet { /* housekeeping */ struct usb_device *udev; + struct usb_interface *intf; struct driver_info *driver_info; const char *driver_name; wait_queue_head_t *wait; diff --git a/drivers/net/veth.c b/drivers/net/veth.c new file mode 100644 index 000000000000..fdd1e034569d --- /dev/null +++ b/drivers/net/veth.c @@ -0,0 +1,482 @@ +/* + * drivers/net/veth.c + * + * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc + * + * Author: Pavel Emelianov <xemul@openvz.org> + * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com> + * + */ + +#include <linux/list.h> +#include <linux/netdevice.h> +#include <linux/ethtool.h> +#include <linux/etherdevice.h> + +#include <net/dst.h> +#include <net/xfrm.h> +#include <net/veth.h> + +#define DRV_NAME "veth" +#define DRV_VERSION "1.0" + +struct veth_net_stats { + unsigned long rx_packets; + unsigned long tx_packets; + unsigned long rx_bytes; + unsigned long tx_bytes; + unsigned long tx_dropped; +}; + +struct veth_priv { + struct net_device *peer; + struct net_device *dev; + struct list_head list; + struct veth_net_stats *stats; + unsigned ip_summed; +}; + +static LIST_HEAD(veth_list); + +/* + * ethtool interface + */ + +static struct { + const char string[ETH_GSTRING_LEN]; +} ethtool_stats_keys[] = { + { "peer_ifindex" }, +}; + +static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + cmd->supported = 0; + cmd->advertising = 0; + cmd->speed = SPEED_10000; + cmd->duplex = DUPLEX_FULL; + cmd->port = PORT_TP; + cmd->phy_address = 0; + cmd->transceiver = XCVR_INTERNAL; + cmd->autoneg = AUTONEG_DISABLE; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + return 0; +} + +static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->fw_version, "N/A"); +} + +static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + switch(stringset) { + case ETH_SS_STATS: + memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); + break; + } +} + +static int veth_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(ethtool_stats_keys); + default: + return -EOPNOTSUPP; + } +} + +static void veth_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct veth_priv *priv; + + priv = netdev_priv(dev); + data[0] = priv->peer->ifindex; +} + +static u32 veth_get_rx_csum(struct net_device *dev) +{ + struct veth_priv *priv; + + priv = netdev_priv(dev); + return priv->ip_summed == CHECKSUM_UNNECESSARY; +} + +static int veth_set_rx_csum(struct net_device *dev, u32 data) +{ + struct veth_priv *priv; + + priv = netdev_priv(dev); + priv->ip_summed = data ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE; + return 0; +} + +static u32 veth_get_tx_csum(struct net_device *dev) +{ + return (dev->features & NETIF_F_NO_CSUM) != 0; +} + +static int veth_set_tx_csum(struct net_device *dev, u32 data) +{ + if (data) + dev->features |= NETIF_F_NO_CSUM; + else + dev->features &= ~NETIF_F_NO_CSUM; + return 0; +} + +static struct ethtool_ops veth_ethtool_ops = { + .get_settings = veth_get_settings, + .get_drvinfo = veth_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_rx_csum = veth_get_rx_csum, + .set_rx_csum = veth_set_rx_csum, + .get_tx_csum = veth_get_tx_csum, + .set_tx_csum = veth_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, + .get_strings = veth_get_strings, + .get_sset_count = veth_get_sset_count, + .get_ethtool_stats = veth_get_ethtool_stats, +}; + +/* + * xmit + */ + +static int veth_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct net_device *rcv = NULL; + struct veth_priv *priv, *rcv_priv; + struct veth_net_stats *stats; + int length, cpu; + + skb_orphan(skb); + + priv = netdev_priv(dev); + rcv = priv->peer; + rcv_priv = netdev_priv(rcv); + + cpu = smp_processor_id(); + stats = per_cpu_ptr(priv->stats, cpu); + + if (!(rcv->flags & IFF_UP)) + goto outf; + + skb->pkt_type = PACKET_HOST; + skb->protocol = eth_type_trans(skb, rcv); + if (dev->features & NETIF_F_NO_CSUM) + skb->ip_summed = rcv_priv->ip_summed; + + dst_release(skb->dst); + skb->dst = NULL; + skb->mark = 0; + secpath_reset(skb); + nf_reset(skb); + + length = skb->len; + + stats->tx_bytes += length; + stats->tx_packets++; + + stats = per_cpu_ptr(rcv_priv->stats, cpu); + stats->rx_bytes += length; + stats->rx_packets++; + + netif_rx(skb); + return 0; + +outf: + kfree_skb(skb); + stats->tx_dropped++; + return 0; +} + +/* + * general routines + */ + +static struct net_device_stats *veth_get_stats(struct net_device *dev) +{ + struct veth_priv *priv; + struct net_device_stats *dev_stats; + int cpu; + struct veth_net_stats *stats; + + priv = netdev_priv(dev); + dev_stats = &dev->stats; + + dev_stats->rx_packets = 0; + dev_stats->tx_packets = 0; + dev_stats->rx_bytes = 0; + dev_stats->tx_bytes = 0; + dev_stats->tx_dropped = 0; + + for_each_online_cpu(cpu) { + stats = per_cpu_ptr(priv->stats, cpu); + + dev_stats->rx_packets += stats->rx_packets; + dev_stats->tx_packets += stats->tx_packets; + dev_stats->rx_bytes += stats->rx_bytes; + dev_stats->tx_bytes += stats->tx_bytes; + dev_stats->tx_dropped += stats->tx_dropped; + } + + return dev_stats; +} + +static int veth_open(struct net_device *dev) +{ + struct veth_priv *priv; + + priv = netdev_priv(dev); + if (priv->peer == NULL) + return -ENOTCONN; + + if (priv->peer->flags & IFF_UP) { + netif_carrier_on(dev); + netif_carrier_on(priv->peer); + } + return 0; +} + +static int veth_close(struct net_device *dev) +{ + struct veth_priv *priv; + + if (netif_carrier_ok(dev)) { + priv = netdev_priv(dev); + netif_carrier_off(dev); + netif_carrier_off(priv->peer); + } + return 0; +} + +static int veth_dev_init(struct net_device *dev) +{ + struct veth_net_stats *stats; + struct veth_priv *priv; + + stats = alloc_percpu(struct veth_net_stats); + if (stats == NULL) + return -ENOMEM; + + priv = netdev_priv(dev); + priv->stats = stats; + return 0; +} + +static void veth_dev_free(struct net_device *dev) +{ + struct veth_priv *priv; + + priv = netdev_priv(dev); + free_percpu(priv->stats); + free_netdev(dev); +} + +static void veth_setup(struct net_device *dev) +{ + ether_setup(dev); + + dev->hard_start_xmit = veth_xmit; + dev->get_stats = veth_get_stats; + dev->open = veth_open; + dev->stop = veth_close; + dev->ethtool_ops = &veth_ethtool_ops; + dev->features |= NETIF_F_LLTX; + dev->init = veth_dev_init; + dev->destructor = veth_dev_free; +} + +/* + * netlink interface + */ + +static int veth_validate(struct nlattr *tb[], struct nlattr *data[]) +{ + if (tb[IFLA_ADDRESS]) { + if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) + return -EINVAL; + if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) + return -EADDRNOTAVAIL; + } + return 0; +} + +static struct rtnl_link_ops veth_link_ops; + +static int veth_newlink(struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[]) +{ + int err; + struct net_device *peer; + struct veth_priv *priv; + char ifname[IFNAMSIZ]; + struct nlattr *peer_tb[IFLA_MAX + 1], **tbp; + + /* + * create and register peer first + * + * struct ifinfomsg is at the head of VETH_INFO_PEER, but we + * skip it since no info from it is useful yet + */ + + if (data != NULL && data[VETH_INFO_PEER] != NULL) { + struct nlattr *nla_peer; + + nla_peer = data[VETH_INFO_PEER]; + err = nla_parse(peer_tb, IFLA_MAX, + nla_data(nla_peer) + sizeof(struct ifinfomsg), + nla_len(nla_peer) - sizeof(struct ifinfomsg), + ifla_policy); + if (err < 0) + return err; + + err = veth_validate(peer_tb, NULL); + if (err < 0) + return err; + + tbp = peer_tb; + } else + tbp = tb; + + if (tbp[IFLA_IFNAME]) + nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); + else + snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d"); + + peer = rtnl_create_link(dev->nd_net, ifname, &veth_link_ops, tbp); + if (IS_ERR(peer)) + return PTR_ERR(peer); + + if (tbp[IFLA_ADDRESS] == NULL) + random_ether_addr(peer->dev_addr); + + err = register_netdevice(peer); + if (err < 0) + goto err_register_peer; + + netif_carrier_off(peer); + + /* + * register dev last + * + * note, that since we've registered new device the dev's name + * should be re-allocated + */ + + if (tb[IFLA_ADDRESS] == NULL) + random_ether_addr(dev->dev_addr); + + if (tb[IFLA_IFNAME]) + nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); + else + snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d"); + + if (strchr(dev->name, '%')) { + err = dev_alloc_name(dev, dev->name); + if (err < 0) + goto err_alloc_name; + } + + err = register_netdevice(dev); + if (err < 0) + goto err_register_dev; + + netif_carrier_off(dev); + + /* + * tie the deviced together + */ + + priv = netdev_priv(dev); + priv->dev = dev; + priv->peer = peer; + list_add(&priv->list, &veth_list); + + priv = netdev_priv(peer); + priv->dev = peer; + priv->peer = dev; + INIT_LIST_HEAD(&priv->list); + return 0; + +err_register_dev: + /* nothing to do */ +err_alloc_name: + unregister_netdevice(peer); + return err; + +err_register_peer: + free_netdev(peer); + return err; +} + +static void veth_dellink(struct net_device *dev) +{ + struct veth_priv *priv; + struct net_device *peer; + + priv = netdev_priv(dev); + peer = priv->peer; + + if (!list_empty(&priv->list)) + list_del(&priv->list); + + priv = netdev_priv(peer); + if (!list_empty(&priv->list)) + list_del(&priv->list); + + unregister_netdevice(dev); + unregister_netdevice(peer); +} + +static const struct nla_policy veth_policy[VETH_INFO_MAX + 1]; + +static struct rtnl_link_ops veth_link_ops = { + .kind = DRV_NAME, + .priv_size = sizeof(struct veth_priv), + .setup = veth_setup, + .validate = veth_validate, + .newlink = veth_newlink, + .dellink = veth_dellink, + .policy = veth_policy, + .maxtype = VETH_INFO_MAX, +}; + +/* + * init/fini + */ + +static __init int veth_init(void) +{ + return rtnl_link_register(&veth_link_ops); +} + +static __exit void veth_exit(void) +{ + struct veth_priv *priv, *next; + + rtnl_lock(); + /* + * cannot trust __rtnl_link_unregister() to unregister all + * devices, as each ->dellink call will remove two devices + * from the list at once. + */ + list_for_each_entry_safe(priv, next, &veth_list, list) + veth_dellink(priv->dev); + + __rtnl_link_unregister(&veth_link_ops); + rtnl_unlock(); +} + +module_init(veth_init); +module_exit(veth_exit); + +MODULE_DESCRIPTION("Virtual Ethernet Tunnel"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_RTNL_LINK(DRV_NAME); diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index f51c2c138f10..07263cd93f9c 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c @@ -42,7 +42,13 @@ static int max_interrupt_work = 20; /* Set the copy breakpoint for the copy-only-tiny-frames scheme. Setting to > 1518 effectively disables this feature. */ +#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \ + || defined(CONFIG_SPARC) || defined(__ia64__) \ + || defined(__sh__) || defined(__mips__) +static int rx_copybreak = 1518; +#else static int rx_copybreak; +#endif /* Work-around for broken BIOSes: they are unable to get the chip back out of power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */ @@ -329,16 +335,16 @@ enum wol_bits { /* The Rx and Tx buffer descriptors. */ struct rx_desc { - s32 rx_status; - u32 desc_length; /* Chain flag, Buffer/frame length */ - u32 addr; - u32 next_desc; + __le32 rx_status; + __le32 desc_length; /* Chain flag, Buffer/frame length */ + __le32 addr; + __le32 next_desc; }; struct tx_desc { - s32 tx_status; - u32 desc_length; /* Chain flag, Tx Config, Frame length */ - u32 addr; - u32 next_desc; + __le32 tx_status; + __le32 desc_length; /* Chain flag, Tx Config, Frame length */ + __le32 addr; + __le32 next_desc; }; /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */ @@ -383,6 +389,8 @@ struct rhine_private { struct pci_dev *pdev; long pioaddr; + struct net_device *dev; + struct napi_struct napi; struct net_device_stats stats; spinlock_t lock; @@ -576,28 +584,25 @@ static void rhine_poll(struct net_device *dev) #endif #ifdef CONFIG_VIA_RHINE_NAPI -static int rhine_napipoll(struct net_device *dev, int *budget) +static int rhine_napipoll(struct napi_struct *napi, int budget) { - struct rhine_private *rp = netdev_priv(dev); + struct rhine_private *rp = container_of(napi, struct rhine_private, napi); + struct net_device *dev = rp->dev; void __iomem *ioaddr = rp->base; - int done, limit = min(dev->quota, *budget); + int work_done; - done = rhine_rx(dev, limit); - *budget -= done; - dev->quota -= done; + work_done = rhine_rx(dev, budget); - if (done < limit) { - netif_rx_complete(dev); + if (work_done < budget) { + netif_rx_complete(dev, napi); iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | IntrRxDropped | IntrRxNoBuf | IntrTxAborted | IntrTxDone | IntrTxError | IntrTxUnderrun | IntrPCIErr | IntrStatsMax | IntrLinkChange, ioaddr + IntrEnable); - return 0; } - else - return 1; + return work_done; } #endif @@ -633,6 +638,7 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, #else int bar = 0; #endif + DECLARE_MAC_BUF(mac); /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -697,10 +703,10 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, printk(KERN_ERR "alloc_etherdev failed\n"); goto err_out; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); rp = netdev_priv(dev); + rp->dev = dev; rp->quirks = quirks; rp->pioaddr = pioaddr; rp->pdev = pdev; @@ -779,8 +785,7 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, dev->poll_controller = rhine_poll; #endif #ifdef CONFIG_VIA_RHINE_NAPI - dev->poll = rhine_napipoll; - dev->weight = 64; + netif_napi_add(dev, &rp->napi, rhine_napipoll, 64); #endif if (rp->quirks & rqRhineI) dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; @@ -790,18 +795,14 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, if (rc) goto err_out_unmap; - printk(KERN_INFO "%s: VIA %s at 0x%lx, ", + printk(KERN_INFO "%s: VIA %s at 0x%lx, %s, IRQ %d.\n", dev->name, name, #ifdef USE_MMIO - memaddr + memaddr, #else - (long)ioaddr + (long)ioaddr, #endif - ); - - for (i = 0; i < 5; i++) - printk("%2.2x:", dev->dev_addr[i]); - printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq); + print_mac(mac, dev->dev_addr), pdev->irq); pci_set_drvdata(pdev, dev); @@ -1055,7 +1056,9 @@ static void init_registers(struct net_device *dev) rhine_set_rx_mode(dev); - netif_poll_enable(dev); +#ifdef CONFIG_VIA_RHINE_NAPI + napi_enable(&rp->napi); +#endif /* Enable interrupts by setting the interrupt mask. */ iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | @@ -1190,6 +1193,10 @@ static void rhine_tx_timeout(struct net_device *dev) /* protect against concurrent rx interrupts */ disable_irq(rp->pdev->irq); +#ifdef CONFIG_VIA_RHINE_NAPI + napi_disable(&rp->napi); +#endif + spin_lock(&rp->lock); /* clear all descriptors */ @@ -1318,7 +1325,7 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance) IntrPCIErr | IntrStatsMax | IntrLinkChange, ioaddr + IntrEnable); - netif_rx_schedule(dev); + netif_rx_schedule(dev, &rp->napi); #else rhine_rx(dev, RX_RING_SIZE); #endif @@ -1803,9 +1810,6 @@ static const struct ethtool_ops netdev_ethtool_ops = { .set_msglevel = netdev_set_msglevel, .get_wol = rhine_get_wol, .set_wol = rhine_set_wol, - .get_sg = ethtool_op_get_sg, - .get_tx_csum = ethtool_op_get_tx_csum, - .get_perm_addr = ethtool_op_get_perm_addr, }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) @@ -1832,7 +1836,9 @@ static int rhine_close(struct net_device *dev) spin_lock_irq(&rp->lock); netif_stop_queue(dev); - netif_poll_disable(dev); +#ifdef CONFIG_VIA_RHINE_NAPI + napi_disable(&rp->napi); +#endif if (debug > 1) printk(KERN_DEBUG "%s: Shutting down ethercard, " @@ -1931,6 +1937,9 @@ static int rhine_suspend(struct pci_dev *pdev, pm_message_t state) if (!netif_running(dev)) return 0; +#ifdef CONFIG_VIA_RHINE_NAPI + napi_disable(&rp->napi); +#endif netif_device_detach(dev); pci_save_state(pdev); diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index f331843d1102..4ae05799ac44 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -72,6 +72,7 @@ #include <linux/mii.h> #include <linux/in.h> #include <linux/if_arp.h> +#include <linux/if_vlan.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/udp.h> @@ -84,6 +85,163 @@ static int velocity_nics = 0; static int msglevel = MSG_LEVEL_INFO; +/** + * mac_get_cam_mask - Read a CAM mask + * @regs: register block for this velocity + * @mask: buffer to store mask + * + * Fetch the mask bits of the selected CAM and store them into the + * provided mask buffer. + */ + +static void mac_get_cam_mask(struct mac_regs __iomem * regs, u8 * mask) +{ + int i; + + /* Select CAM mask */ + BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); + + writeb(0, ®s->CAMADDR); + + /* read mask */ + for (i = 0; i < 8; i++) + *mask++ = readb(&(regs->MARCAM[i])); + + /* disable CAMEN */ + writeb(0, ®s->CAMADDR); + + /* Select mar */ + BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); + +} + + +/** + * mac_set_cam_mask - Set a CAM mask + * @regs: register block for this velocity + * @mask: CAM mask to load + * + * Store a new mask into a CAM + */ + +static void mac_set_cam_mask(struct mac_regs __iomem * regs, u8 * mask) +{ + int i; + /* Select CAM mask */ + BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); + + writeb(CAMADDR_CAMEN, ®s->CAMADDR); + + for (i = 0; i < 8; i++) { + writeb(*mask++, &(regs->MARCAM[i])); + } + /* disable CAMEN */ + writeb(0, ®s->CAMADDR); + + /* Select mar */ + BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); +} + +static void mac_set_vlan_cam_mask(struct mac_regs __iomem * regs, u8 * mask) +{ + int i; + /* Select CAM mask */ + BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); + + writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, ®s->CAMADDR); + + for (i = 0; i < 8; i++) { + writeb(*mask++, &(regs->MARCAM[i])); + } + /* disable CAMEN */ + writeb(0, ®s->CAMADDR); + + /* Select mar */ + BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); +} + +/** + * mac_set_cam - set CAM data + * @regs: register block of this velocity + * @idx: Cam index + * @addr: 2 or 6 bytes of CAM data + * + * Load an address or vlan tag into a CAM + */ + +static void mac_set_cam(struct mac_regs __iomem * regs, int idx, const u8 *addr) +{ + int i; + + /* Select CAM mask */ + BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); + + idx &= (64 - 1); + + writeb(CAMADDR_CAMEN | idx, ®s->CAMADDR); + + for (i = 0; i < 6; i++) { + writeb(*addr++, &(regs->MARCAM[i])); + } + BYTE_REG_BITS_ON(CAMCR_CAMWR, ®s->CAMCR); + + udelay(10); + + writeb(0, ®s->CAMADDR); + + /* Select mar */ + BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); +} + +static void mac_set_vlan_cam(struct mac_regs __iomem * regs, int idx, + const u8 *addr) +{ + + /* Select CAM mask */ + BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); + + idx &= (64 - 1); + + writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, ®s->CAMADDR); + writew(*((u16 *) addr), ®s->MARCAM[0]); + + BYTE_REG_BITS_ON(CAMCR_CAMWR, ®s->CAMCR); + + udelay(10); + + writeb(0, ®s->CAMADDR); + + /* Select mar */ + BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); +} + + +/** + * mac_wol_reset - reset WOL after exiting low power + * @regs: register block of this velocity + * + * Called after we drop out of wake on lan mode in order to + * reset the Wake on lan features. This function doesn't restore + * the rest of the logic from the result of sleep/wakeup + */ + +static void mac_wol_reset(struct mac_regs __iomem * regs) +{ + + /* Turn off SWPTAG right after leaving power mode */ + BYTE_REG_BITS_OFF(STICKHW_SWPTAG, ®s->STICKHW); + /* clear sticky bits */ + BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), ®s->STICKHW); + + BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, ®s->CHIPGCR); + BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR); + /* disable force PME-enable */ + writeb(WOLCFG_PMEOVR, ®s->WOLCFGClr); + /* disable power-event config bit */ + writew(0xFFFF, ®s->WOLCRClr); + /* clear power status */ + writew(0xFFFF, ®s->WOLSRClr); +} static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); static const struct ethtool_ops velocity_ethtool_ops; @@ -111,15 +269,6 @@ VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors"); #define TX_DESC_DEF 64 VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors"); -#define VLAN_ID_MIN 0 -#define VLAN_ID_MAX 4095 -#define VLAN_ID_DEF 0 -/* VID_setting[] is used for setting the VID of NIC. - 0: default VID. - 1-4094: other VIDs. -*/ -VELOCITY_PARAM(VID_setting, "802.1Q VLAN ID"); - #define RX_THRESH_MIN 0 #define RX_THRESH_MAX 3 #define RX_THRESH_DEF 0 @@ -147,13 +296,6 @@ VELOCITY_PARAM(rx_thresh, "Receive fifo threshold"); */ VELOCITY_PARAM(DMA_length, "DMA length"); -#define TAGGING_DEF 0 -/* enable_tagging[] is used for enabling 802.1Q VID tagging. - 0: disable VID seeting(default). - 1: enable VID setting. -*/ -VELOCITY_PARAM(enable_tagging, "Enable 802.1Q tagging"); - #define IP_ALIG_DEF 0 /* IP_byte_align[] is used for IP header DWORD byte aligned 0: indicate the IP header won't be DWORD byte aligned.(Default) . @@ -324,7 +466,7 @@ MODULE_DEVICE_TABLE(pci, velocity_id_table); * a pointer a static string valid while the driver is loaded. */ -static char __devinit *get_chip_name(enum chip_type chip_id) +static const char __devinit *get_chip_name(enum chip_type chip_id) { int i; for (i = 0; chip_info_table[i].name != NULL; i++) @@ -442,8 +584,7 @@ static void __devinit velocity_get_options(struct velocity_opt *opts, int index, velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname); velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname); velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname); - velocity_set_int_opt(&opts->vid, VID_setting[index], VLAN_ID_MIN, VLAN_ID_MAX, VLAN_ID_DEF, "VID_setting", devname); - velocity_set_bool_opt(&opts->flags, enable_tagging[index], TAGGING_DEF, VELOCITY_FLAGS_TAGGING, "enable_tagging", devname); + velocity_set_bool_opt(&opts->flags, txcsum_offload[index], TX_CSUM_DEF, VELOCITY_FLAGS_TX_CSUM, "txcsum_offload", devname); velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname); velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname); @@ -465,6 +606,7 @@ static void __devinit velocity_get_options(struct velocity_opt *opts, int index, static void velocity_init_cam_filter(struct velocity_info *vptr) { struct mac_regs __iomem * regs = vptr->mac_regs; + unsigned short vid; /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */ WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, ®s->MCFG); @@ -473,27 +615,52 @@ static void velocity_init_cam_filter(struct velocity_info *vptr) /* Disable all CAMs */ memset(vptr->vCAMmask, 0, sizeof(u8) * 8); memset(vptr->mCAMmask, 0, sizeof(u8) * 8); - mac_set_cam_mask(regs, vptr->vCAMmask, VELOCITY_VLAN_ID_CAM); - mac_set_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM); + mac_set_vlan_cam_mask(regs, vptr->vCAMmask); + mac_set_cam_mask(regs, vptr->mCAMmask); /* Enable first VCAM */ - if (vptr->flags & VELOCITY_FLAGS_TAGGING) { - /* If Tagging option is enabled and VLAN ID is not zero, then - turn on MCFG_RTGOPT also */ - if (vptr->options.vid != 0) - WORD_REG_BITS_ON(MCFG_RTGOPT, ®s->MCFG); - - mac_set_cam(regs, 0, (u8 *) & (vptr->options.vid), VELOCITY_VLAN_ID_CAM); + if (vptr->vlgrp) { + for (vid = 0; vid < VLAN_VID_MASK; vid++) { + if (vlan_group_get_device(vptr->vlgrp, vid)) { + /* If Tagging option is enabled and + VLAN ID is not zero, then + turn on MCFG_RTGOPT also */ + if (vid != 0) + WORD_REG_BITS_ON(MCFG_RTGOPT, ®s->MCFG); + + mac_set_vlan_cam(regs, 0, (u8 *) &vid); + } + } vptr->vCAMmask[0] |= 1; - mac_set_cam_mask(regs, vptr->vCAMmask, VELOCITY_VLAN_ID_CAM); + mac_set_vlan_cam_mask(regs, vptr->vCAMmask); } else { u16 temp = 0; - mac_set_cam(regs, 0, (u8 *) &temp, VELOCITY_VLAN_ID_CAM); + mac_set_vlan_cam(regs, 0, (u8 *) &temp); temp = 1; - mac_set_cam_mask(regs, (u8 *) &temp, VELOCITY_VLAN_ID_CAM); + mac_set_vlan_cam_mask(regs, (u8 *) &temp); } } +static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +{ + struct velocity_info *vptr = netdev_priv(dev); + + spin_lock_irq(&vptr->lock); + velocity_init_cam_filter(vptr); + spin_unlock_irq(&vptr->lock); +} + +static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +{ + struct velocity_info *vptr = netdev_priv(dev); + + spin_lock_irq(&vptr->lock); + vlan_group_set_device(vptr->vlgrp, vid, NULL); + velocity_init_cam_filter(vptr); + spin_unlock_irq(&vptr->lock); +} + + /** * velocity_rx_reset - handle a receive reset * @vptr: velocity we are resetting @@ -712,7 +879,6 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi /* Chain it all together */ - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); vptr = netdev_priv(dev); @@ -791,13 +957,17 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi dev->do_ioctl = velocity_ioctl; dev->ethtool_ops = &velocity_ethtool_ops; dev->change_mtu = velocity_change_mtu; + + dev->vlan_rx_add_vid = velocity_vlan_rx_add_vid; + dev->vlan_rx_kill_vid = velocity_vlan_rx_kill_vid; + #ifdef VELOCITY_ZERO_COPY_SUPPORT dev->features |= NETIF_F_SG; #endif + dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER; - if (vptr->flags & VELOCITY_FLAGS_TX_CSUM) { + if (vptr->flags & VELOCITY_FLAGS_TX_CSUM) dev->features |= NETIF_F_IP_CSUM; - } ret = register_netdev(dev); if (ret < 0) @@ -1071,14 +1241,12 @@ static int velocity_rx_refill(struct velocity_info *vptr) static int velocity_init_rd_ring(struct velocity_info *vptr) { - int ret = -ENOMEM; - unsigned int rsize = sizeof(struct velocity_rd_info) * - vptr->options.numrx; + int ret; - vptr->rd_info = kmalloc(rsize, GFP_KERNEL); - if(vptr->rd_info == NULL) - goto out; - memset(vptr->rd_info, 0, rsize); + vptr->rd_info = kcalloc(vptr->options.numrx, + sizeof(struct velocity_rd_info), GFP_KERNEL); + if (!vptr->rd_info) + return -ENOMEM; vptr->rd_filled = vptr->rd_dirty = vptr->rd_curr = 0; @@ -1088,7 +1256,7 @@ static int velocity_init_rd_ring(struct velocity_info *vptr) "%s: failed to allocate RX buffer.\n", vptr->dev->name); velocity_free_rd_ring(vptr); } -out: + return ret; } @@ -1142,21 +1310,19 @@ static int velocity_init_td_ring(struct velocity_info *vptr) dma_addr_t curr; struct tx_desc *td; struct velocity_td_info *td_info; - unsigned int tsize = sizeof(struct velocity_td_info) * - vptr->options.numtx; /* Init the TD ring entries */ for (j = 0; j < vptr->num_txq; j++) { curr = vptr->td_pool_dma[j]; - vptr->td_infos[j] = kmalloc(tsize, GFP_KERNEL); - if(vptr->td_infos[j] == NULL) - { + vptr->td_infos[j] = kcalloc(vptr->options.numtx, + sizeof(struct velocity_td_info), + GFP_KERNEL); + if (!vptr->td_infos[j]) { while(--j >= 0) kfree(vptr->td_infos[j]); return -ENOMEM; } - memset(vptr->td_infos[j], 0, tsize); for (i = 0; i < vptr->options.numtx; i++, curr += sizeof(struct tx_desc)) { td = &(vptr->td_rings[j][i]); @@ -1613,7 +1779,7 @@ static void velocity_error(struct velocity_info *vptr, int status) if (status & ISR_TXSTLI) { struct mac_regs __iomem * regs = vptr->mac_regs; - printk(KERN_ERR "TD structure errror TDindex=%hx\n", readw(®s->TDIdx[0])); + printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(®s->TDIdx[0])); BYTE_REG_BITS_ON(TXESR_TDSTR, ®s->TXESR); writew(TRDCSR_RUN, ®s->TDCSRClr); netif_stop_queue(vptr->dev); @@ -1994,8 +2160,8 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) td_ptr->tdesc1.CMDZ = 2; } - if (vptr->flags & VELOCITY_FLAGS_TAGGING) { - td_ptr->tdesc1.pqinf.VID = (vptr->options.vid & 0xfff); + if (vptr->vlgrp && vlan_tx_tag_present(skb)) { + td_ptr->tdesc1.pqinf.VID = vlan_tx_tag_get(skb); td_ptr->tdesc1.pqinf.priority = 0; td_ptr->tdesc1.pqinf.CFI = 0; td_ptr->tdesc1.TCR |= TCR0_VETAG; @@ -2121,14 +2287,14 @@ static void velocity_set_multi(struct net_device *dev) rx_mode = (RCR_AM | RCR_AB); } else { int offset = MCAM_SIZE - vptr->multicast_limit; - mac_get_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM); + mac_get_cam_mask(regs, vptr->mCAMmask); for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) { - mac_set_cam(regs, i + offset, mclist->dmi_addr, VELOCITY_MULTICAST_CAM); + mac_set_cam(regs, i + offset, mclist->dmi_addr); vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7); } - mac_set_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM); + mac_set_cam_mask(regs, vptr->mCAMmask); rx_mode = (RCR_AM | RCR_AB); } if (dev->mtu > 1500) diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h index b9e114d36c0f..aa9179623d90 100644 --- a/drivers/net/via-velocity.h +++ b/drivers/net/via-velocity.h @@ -1173,7 +1173,7 @@ enum chip_type { struct velocity_info_tbl { enum chip_type chip_id; - char *name; + const char *name; int txqueue; u32 flags; }; @@ -1194,14 +1194,6 @@ struct velocity_info_tbl { #define mac_disable_int(regs) writel(CR0_GINTMSK1,&((regs)->CR0Clr)) #define mac_enable_int(regs) writel(CR0_GINTMSK1,&((regs)->CR0Set)) -#define mac_hw_mibs_read(regs, MIBs) {\ - int i;\ - BYTE_REG_BITS_ON(MIBCR_MPTRINI,&((regs)->MIBCR));\ - for (i=0;i<HW_MIB_SIZE;i++) {\ - (MIBs)[i]=readl(&((regs)->MIBData));\ - }\ -} - #define mac_set_dma_length(regs, n) {\ BYTE_REG_BITS_SET((n),0x07,&((regs)->DCFG));\ } @@ -1226,195 +1218,17 @@ struct velocity_info_tbl { writew(TRDCSR_WAK<<(n*4),&((regs)->TDCSRSet));\ } -#define mac_eeprom_reload(regs) {\ - int i=0;\ - BYTE_REG_BITS_ON(EECSR_RELOAD,&((regs)->EECSR));\ - do {\ - udelay(10);\ - if (i++>0x1000) {\ - break;\ - }\ - }while (BYTE_REG_BITS_IS_ON(EECSR_RELOAD,&((regs)->EECSR)));\ -} - -enum velocity_cam_type { - VELOCITY_VLAN_ID_CAM = 0, - VELOCITY_MULTICAST_CAM -}; - -/** - * mac_get_cam_mask - Read a CAM mask - * @regs: register block for this velocity - * @mask: buffer to store mask - * @cam_type: CAM to fetch - * - * Fetch the mask bits of the selected CAM and store them into the - * provided mask buffer. - */ - -static inline void mac_get_cam_mask(struct mac_regs __iomem * regs, u8 * mask, enum velocity_cam_type cam_type) -{ - int i; - /* Select CAM mask */ - BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); - - if (cam_type == VELOCITY_VLAN_ID_CAM) - writeb(CAMADDR_VCAMSL, ®s->CAMADDR); - else - writeb(0, ®s->CAMADDR); - - /* read mask */ - for (i = 0; i < 8; i++) - *mask++ = readb(&(regs->MARCAM[i])); - - /* disable CAMEN */ - writeb(0, ®s->CAMADDR); - - /* Select mar */ - BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); +static inline void mac_eeprom_reload(struct mac_regs __iomem * regs) { + int i=0; + BYTE_REG_BITS_ON(EECSR_RELOAD,&(regs->EECSR)); + do { + udelay(10); + if (i++>0x1000) + break; + } while (BYTE_REG_BITS_IS_ON(EECSR_RELOAD,&(regs->EECSR))); } -/** - * mac_set_cam_mask - Set a CAM mask - * @regs: register block for this velocity - * @mask: CAM mask to load - * @cam_type: CAM to store - * - * Store a new mask into a CAM - */ - -static inline void mac_set_cam_mask(struct mac_regs __iomem * regs, u8 * mask, enum velocity_cam_type cam_type) -{ - int i; - /* Select CAM mask */ - BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); - - if (cam_type == VELOCITY_VLAN_ID_CAM) - writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, ®s->CAMADDR); - else - writeb(CAMADDR_CAMEN, ®s->CAMADDR); - - for (i = 0; i < 8; i++) { - writeb(*mask++, &(regs->MARCAM[i])); - } - /* disable CAMEN */ - writeb(0, ®s->CAMADDR); - - /* Select mar */ - BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); -} - -/** - * mac_set_cam - set CAM data - * @regs: register block of this velocity - * @idx: Cam index - * @addr: 2 or 6 bytes of CAM data - * @cam_type: CAM to load - * - * Load an address or vlan tag into a CAM - */ - -static inline void mac_set_cam(struct mac_regs __iomem * regs, int idx, u8 *addr, enum velocity_cam_type cam_type) -{ - int i; - - /* Select CAM mask */ - BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); - - idx &= (64 - 1); - - if (cam_type == VELOCITY_VLAN_ID_CAM) - writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, ®s->CAMADDR); - else - writeb(CAMADDR_CAMEN | idx, ®s->CAMADDR); - - if (cam_type == VELOCITY_VLAN_ID_CAM) - writew(*((u16 *) addr), ®s->MARCAM[0]); - else { - for (i = 0; i < 6; i++) { - writeb(*addr++, &(regs->MARCAM[i])); - } - } - BYTE_REG_BITS_ON(CAMCR_CAMWR, ®s->CAMCR); - - udelay(10); - - writeb(0, ®s->CAMADDR); - - /* Select mar */ - BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); -} - -/** - * mac_get_cam - fetch CAM data - * @regs: register block of this velocity - * @idx: Cam index - * @addr: buffer to hold up to 6 bytes of CAM data - * @cam_type: CAM to load - * - * Load an address or vlan tag from a CAM into the buffer provided by - * the caller. VLAN tags are 2 bytes the address cam entries are 6. - */ - -static inline void mac_get_cam(struct mac_regs __iomem * regs, int idx, u8 *addr, enum velocity_cam_type cam_type) -{ - int i; - - /* Select CAM mask */ - BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); - - idx &= (64 - 1); - - if (cam_type == VELOCITY_VLAN_ID_CAM) - writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, ®s->CAMADDR); - else - writeb(CAMADDR_CAMEN | idx, ®s->CAMADDR); - - BYTE_REG_BITS_ON(CAMCR_CAMRD, ®s->CAMCR); - - udelay(10); - - if (cam_type == VELOCITY_VLAN_ID_CAM) - *((u16 *) addr) = readw(&(regs->MARCAM[0])); - else - for (i = 0; i < 6; i++, addr++) - *((u8 *) addr) = readb(&(regs->MARCAM[i])); - - writeb(0, ®s->CAMADDR); - - /* Select mar */ - BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); -} - -/** - * mac_wol_reset - reset WOL after exiting low power - * @regs: register block of this velocity - * - * Called after we drop out of wake on lan mode in order to - * reset the Wake on lan features. This function doesn't restore - * the rest of the logic from the result of sleep/wakeup - */ - -static inline void mac_wol_reset(struct mac_regs __iomem * regs) -{ - - /* Turn off SWPTAG right after leaving power mode */ - BYTE_REG_BITS_OFF(STICKHW_SWPTAG, ®s->STICKHW); - /* clear sticky bits */ - BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), ®s->STICKHW); - - BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, ®s->CHIPGCR); - BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR); - /* disable force PME-enable */ - writeb(WOLCFG_PMEOVR, ®s->WOLCFGClr); - /* disable power-event config bit */ - writew(0xFFFF, ®s->WOLCRClr); - /* clear power status */ - writew(0xFFFF, ®s->WOLSRClr); -} - - /* * Header for WOL definitions. Used to compute hashes */ @@ -1701,7 +1515,7 @@ struct velocity_opt { int numrx; /* Number of RX descriptors */ int numtx; /* Number of TX descriptors */ enum speed_opt spd_dpx; /* Media link mode */ - int vid; /* vlan id */ + int DMA_length; /* DMA length */ int rx_thresh; /* RX_THRESH */ int flow_cntl; @@ -1727,6 +1541,7 @@ struct velocity_info { dma_addr_t tx_bufs_dma; u8 *tx_bufs; + struct vlan_group *vlgrp; u8 ip_addr[4]; enum chip_type chip_id; diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c index 6b63b350cd52..c4c8eab8574f 100644 --- a/drivers/net/wan/c101.c +++ b/drivers/net/wan/c101.c @@ -315,12 +315,11 @@ static int __init c101_run(unsigned long irq, unsigned long winbase) return -ENODEV; } - card = kmalloc(sizeof(card_t), GFP_KERNEL); + card = kzalloc(sizeof(card_t), GFP_KERNEL); if (card == NULL) { printk(KERN_ERR "c101: unable to allocate memory\n"); return -ENOBUFS; } - memset(card, 0, sizeof(card_t)); card->dev = alloc_hdlcdev(card); if (!card->dev) { @@ -364,7 +363,6 @@ static int __init c101_run(unsigned long irq, unsigned long winbase) hdlc = dev_to_hdlc(dev); spin_lock_init(&card->lock); - SET_MODULE_OWNER(dev); dev->irq = irq; dev->mem_start = winbase; dev->mem_end = winbase + C101_MAPPED_RAM_SIZE - 1; diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 9ef49ce148b2..26058b4f8f36 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -572,13 +572,11 @@ static int cosa_probe(int base, int irq, int dma) sprintf(cosa->name, "cosa%d", cosa->num); /* Initialize the per-channel data */ - cosa->chan = kmalloc(sizeof(struct channel_data)*cosa->nchannels, - GFP_KERNEL); + cosa->chan = kcalloc(cosa->nchannels, sizeof(struct channel_data), GFP_KERNEL); if (!cosa->chan) { err = -ENOMEM; goto err_out3; } - memset(cosa->chan, 0, sizeof(struct channel_data)*cosa->nchannels); for (i=0; i<cosa->nchannels; i++) { cosa->chan[i].cosa = cosa; cosa->chan[i].num = i; diff --git a/drivers/net/wan/cycx_main.c b/drivers/net/wan/cycx_main.c index 6e5f1c898517..a0e8611ad8e8 100644 --- a/drivers/net/wan/cycx_main.c +++ b/drivers/net/wan/cycx_main.c @@ -113,12 +113,10 @@ static int __init cycx_init(void) /* Verify number of cards and allocate adapter data space */ cycx_ncards = min_t(int, cycx_ncards, CYCX_MAX_CARDS); cycx_ncards = max_t(int, cycx_ncards, 1); - cycx_card_array = kmalloc(sizeof(struct cycx_device) * cycx_ncards, - GFP_KERNEL); + cycx_card_array = kcalloc(cycx_ncards, sizeof(struct cycx_device), GFP_KERNEL); if (!cycx_card_array) goto out; - memset(cycx_card_array, 0, sizeof(struct cycx_device) * cycx_ncards); /* Register adapters with WAN router */ for (cnt = 0; cnt < cycx_ncards; ++cnt) { diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c index 016b3ff3ea5e..8a1778cf98d1 100644 --- a/drivers/net/wan/cycx_x25.c +++ b/drivers/net/wan/cycx_x25.c @@ -131,14 +131,15 @@ static int cycx_wan_update(struct wan_device *wandev), cycx_wan_del_if(struct wan_device *wandev, struct net_device *dev); /* Network device interface */ -static int cycx_netdevice_init(struct net_device *dev), - cycx_netdevice_open(struct net_device *dev), - cycx_netdevice_stop(struct net_device *dev), - cycx_netdevice_hard_header(struct sk_buff *skb, - struct net_device *dev, u16 type, - void *daddr, void *saddr, unsigned len), - cycx_netdevice_rebuild_header(struct sk_buff *skb), - cycx_netdevice_hard_start_xmit(struct sk_buff *skb, +static int cycx_netdevice_init(struct net_device *dev); +static int cycx_netdevice_open(struct net_device *dev); +static int cycx_netdevice_stop(struct net_device *dev); +static int cycx_netdevice_hard_header(struct sk_buff *skb, + struct net_device *dev, u16 type, + const void *daddr, const void *saddr, + unsigned len); +static int cycx_netdevice_rebuild_header(struct sk_buff *skb); +static int cycx_netdevice_hard_start_xmit(struct sk_buff *skb, struct net_device *dev); static struct net_device_stats * @@ -376,11 +377,10 @@ static int cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev, } /* allocate and initialize private data */ - chan = kmalloc(sizeof(struct cycx_x25_channel), GFP_KERNEL); + chan = kzalloc(sizeof(struct cycx_x25_channel), GFP_KERNEL); if (!chan) return -ENOMEM; - memset(chan, 0, sizeof(*chan)); strcpy(chan->name, conf->name); chan->card = card; chan->link = conf->port; @@ -469,7 +469,14 @@ static int cycx_wan_del_if(struct wan_device *wandev, struct net_device *dev) return 0; } + /* Network Device Interface */ + +static const struct header_ops cycx_header_ops = { + .create = cycx_netdevice_hard_header, + .rebuild = cycx_netdevice_rebuild_header, +}; + /* Initialize Linux network interface. * * This routine is called only once for each interface, during Linux network @@ -484,8 +491,8 @@ static int cycx_netdevice_init(struct net_device *dev) /* Initialize device driver entry points */ dev->open = cycx_netdevice_open; dev->stop = cycx_netdevice_stop; - dev->hard_header = cycx_netdevice_hard_header; - dev->rebuild_header = cycx_netdevice_rebuild_header; + dev->header_ops = &cycx_header_ops; + dev->hard_start_xmit = cycx_netdevice_hard_start_xmit; dev->get_stats = cycx_netdevice_get_stats; @@ -509,7 +516,6 @@ static int cycx_netdevice_init(struct net_device *dev) /* Set transmit buffer queue length */ dev->tx_queue_len = 10; - SET_MODULE_OWNER(dev); /* Initialize socket buffers */ cycx_x25_set_chan_state(dev, WAN_DISCONNECTED); @@ -556,7 +562,8 @@ static int cycx_netdevice_stop(struct net_device *dev) * Return: media header length. */ static int cycx_netdevice_hard_header(struct sk_buff *skb, struct net_device *dev, u16 type, - void *daddr, void *saddr, unsigned len) + const void *daddr, const void *saddr, + unsigned len) { skb->protocol = type; diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c index 66be20c292b6..96b232446c0b 100644 --- a/drivers/net/wan/dlci.c +++ b/drivers/net/wan/dlci.c @@ -66,8 +66,8 @@ static void dlci_setup(struct net_device *); */ static int dlci_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, void *saddr, - unsigned len) + unsigned short type, const void *daddr, + const void *saddr, unsigned len) { struct frhdr hdr; struct dlci_local *dlp; @@ -361,7 +361,7 @@ static int dlci_add(struct dlci_add *dlci) /* validate slave device */ - slave = dev_get_by_name(dlci->devname); + slave = dev_get_by_name(&init_net, dlci->devname); if (!slave) return -ENODEV; @@ -427,7 +427,7 @@ static int dlci_del(struct dlci_add *dlci) int err; /* validate slave device */ - master = __dev_get_by_name(dlci->devname); + master = __dev_get_by_name(&init_net, dlci->devname); if (!master) return(-ENODEV); @@ -485,6 +485,10 @@ static int dlci_ioctl(unsigned int cmd, void __user *arg) return(err); } +static const struct header_ops dlci_header_ops = { + .create = dlci_header, +}; + static void dlci_setup(struct net_device *dev) { struct dlci_local *dlp = dev->priv; @@ -494,7 +498,7 @@ static void dlci_setup(struct net_device *dev) dev->stop = dlci_close; dev->do_ioctl = dlci_dev_ioctl; dev->hard_start_xmit = dlci_transmit; - dev->hard_header = dlci_header; + dev->header_ops = &dlci_header_ops; dev->get_stats = dlci_get_stats; dev->change_mtu = dlci_change_mtu; dev->destructor = free_netdev; @@ -513,6 +517,9 @@ static int dlci_dev_event(struct notifier_block *unused, { struct net_device *dev = (struct net_device *) ptr; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + if (event == NETDEV_UNREGISTER) { struct dlci_local *dlp; diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index dca024471455..33dc713b5301 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c @@ -890,12 +890,11 @@ static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr) struct dscc4_dev_priv *root; int i, ret = -ENOMEM; - root = kmalloc(dev_per_card*sizeof(*root), GFP_KERNEL); + root = kcalloc(dev_per_card, sizeof(*root), GFP_KERNEL); if (!root) { printk(KERN_ERR "%s: can't allocate data\n", DRV_NAME); goto err_out; } - memset(root, 0, dev_per_card*sizeof(*root)); for (i = 0; i < dev_per_card; i++) { root[i].dev = alloc_hdlcdev(root + i); @@ -903,12 +902,11 @@ static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr) goto err_free_dev; } - ppriv = kmalloc(sizeof(*ppriv), GFP_KERNEL); + ppriv = kzalloc(sizeof(*ppriv), GFP_KERNEL); if (!ppriv) { printk(KERN_ERR "%s: can't allocate private data\n", DRV_NAME); goto err_free_dev; } - memset(ppriv, 0, sizeof(struct dscc4_pci_priv)); ppriv->root = root; spin_lock_init(&ppriv->lock); @@ -927,7 +925,6 @@ static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr) d->do_ioctl = dscc4_ioctl; d->tx_timeout = dscc4_tx_timeout; d->watchdog_timeo = TX_TIMEOUT; - SET_MODULE_OWNER(d); SET_NETDEV_DEV(d, &pdev->dev); dpriv->dev_id = i; diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index 58a53b6d9b42..12dae8e24844 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -2476,13 +2476,12 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) } /* Allocate driver private data */ - card = kmalloc(sizeof (struct fst_card_info), GFP_KERNEL); + card = kzalloc(sizeof (struct fst_card_info), GFP_KERNEL); if (card == NULL) { printk_err("FarSync card found but insufficient memory for" " driver storage\n"); return -ENOMEM; } - memset(card, 0, sizeof (struct fst_card_info)); /* Try to enable the device */ if ((err = pci_enable_device(pdev)) != 0) { diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c index 65ad2e24caf0..d553e6f32851 100644 --- a/drivers/net/wan/hdlc.c +++ b/drivers/net/wan/hdlc.c @@ -36,6 +36,7 @@ #include <linux/rtnetlink.h> #include <linux/notifier.h> #include <linux/hdlc.h> +#include <net/net_namespace.h> static const char* version = "HDLC support module revision 1.21"; @@ -66,6 +67,12 @@ static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p, struct net_device *orig_dev) { struct hdlc_device_desc *desc = dev_to_desc(dev); + + if (dev->nd_net != &init_net) { + kfree_skb(skb); + return 0; + } + if (desc->netif_rx) return desc->netif_rx(skb); @@ -102,6 +109,9 @@ static int hdlc_device_event(struct notifier_block *this, unsigned long event, unsigned long flags; int on; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + if (dev->get_stats != hdlc_get_stats) return NOTIFY_DONE; /* not an HDLC device */ @@ -222,6 +232,8 @@ int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return -EINVAL; } +static const struct header_ops hdlc_null_ops; + static void hdlc_setup_dev(struct net_device *dev) { /* Re-init all variables changed by HDLC protocol drivers, @@ -233,13 +245,9 @@ static void hdlc_setup_dev(struct net_device *dev) dev->type = ARPHRD_RAWHDLC; dev->hard_header_len = 16; dev->addr_len = 0; - dev->hard_header = NULL; - dev->rebuild_header = NULL; - dev->set_mac_address = NULL; - dev->hard_header_cache = NULL; - dev->header_cache_update = NULL; + dev->header_ops = &hdlc_null_ops; + dev->change_mtu = hdlc_change_mtu; - dev->hard_header_parse = NULL; } static void hdlc_setup(struct net_device *dev) diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index 9ec6cf2e510e..038a6e748bbf 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c @@ -74,7 +74,7 @@ static inline struct cisco_state * state(hdlc_device *hdlc) static int cisco_hard_header(struct sk_buff *skb, struct net_device *dev, - u16 type, void *daddr, void *saddr, + u16 type, const void *daddr, const void *saddr, unsigned int len) { struct hdlc_header *data; @@ -309,7 +309,6 @@ static void cisco_stop(struct net_device *dev) } - static struct hdlc_proto proto = { .start = cisco_start, .stop = cisco_stop, @@ -317,7 +316,10 @@ static struct hdlc_proto proto = { .ioctl = cisco_ioctl, .module = THIS_MODULE, }; - + +static const struct header_ops cisco_header_ops = { + .create = cisco_hard_header, +}; static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) { @@ -365,7 +367,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) memcpy(&state(hdlc)->settings, &new_settings, size); dev->hard_start_xmit = hdlc->xmit; - dev->hard_header = cisco_hard_header; + dev->header_ops = &cisco_header_ops; dev->type = ARPHRD_CISCO; netif_dormant_on(dev); return 0; diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 15b6e07a4382..071a64cacd5c 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -212,14 +212,13 @@ static pvc_device* add_pvc(struct net_device *dev, u16 dlci) pvc_p = &(*pvc_p)->next; } - pvc = kmalloc(sizeof(pvc_device), GFP_ATOMIC); + pvc = kzalloc(sizeof(pvc_device), GFP_ATOMIC); #ifdef DEBUG_PVC printk(KERN_DEBUG "add_pvc: allocated pvc %p, frad %p\n", pvc, dev); #endif if (!pvc) return NULL; - memset(pvc, 0, sizeof(pvc_device)); pvc->dlci = dlci; pvc->frad = dev; pvc->next = *pvc_p; /* Put it in the chain */ diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index 4591437dd2f3..3caeb528eace 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -73,7 +73,7 @@ static void ppp_close(struct net_device *dev) sppp_close(dev); sppp_detach(dev); - dev->rebuild_header = NULL; + dev->change_mtu = state(hdlc)->old_change_mtu; dev->mtu = HDLC_MAX_MTU; dev->hard_header_len = 16; diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c index 9ba3e4ee6ec7..83dbc924fcb5 100644 --- a/drivers/net/wan/hostess_sv11.c +++ b/drivers/net/wan/hostess_sv11.c @@ -231,19 +231,16 @@ static struct sv11_device *sv11_init(int iobase, int irq) return NULL; } - sv = kmalloc(sizeof(struct sv11_device), GFP_KERNEL); + sv = kzalloc(sizeof(struct sv11_device), GFP_KERNEL); if(!sv) goto fail3; - memset(sv, 0, sizeof(*sv)); sv->if_ptr=&sv->netdev; sv->netdev.dev = alloc_netdev(0, "hdlc%d", sv11_setup); if(!sv->netdev.dev) goto fail2; - SET_MODULE_OWNER(sv->netdev.dev); - dev=&sv->sync; /* diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 6c302e9dbca2..fb37b8095231 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -91,6 +91,9 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe int len, err; struct lapbethdev *lapbeth; + if (dev->nd_net != &init_net) + goto drop; + if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) return NET_RX_DROP; @@ -213,7 +216,7 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb) skb->dev = dev = lapbeth->ethdev; - dev->hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0); + dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0); dev_queue_xmit(skb); } @@ -326,7 +329,6 @@ static void lapbeth_setup(struct net_device *dev) dev->hard_header_len = 3; dev->mtu = 1000; dev->addr_len = 0; - SET_MODULE_OWNER(dev); } /* @@ -391,6 +393,9 @@ static int lapbeth_device_event(struct notifier_block *this, struct lapbethdev *lapbeth; struct net_device *dev = ptr; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + if (!dev_is_ethdev(dev)) return NOTIFY_DONE; diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index ae132c1c5459..5ea877221f46 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -883,7 +883,6 @@ static int __devinit lmc_init_one(struct pci_dev *pdev, dev->base_addr = pci_resource_start(pdev, 0); dev->irq = pdev->irq; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); /* diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c index 31e1799571ad..426c0678d983 100644 --- a/drivers/net/wan/lmc/lmc_proto.c +++ b/drivers/net/wan/lmc/lmc_proto.c @@ -111,7 +111,7 @@ void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/ * They set a few basics because they don't use sync_ppp */ dev->flags |= IFF_POINTOPOINT; - dev->hard_header = NULL; + dev->hard_header_len = 0; dev->addr_len = 0; } diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c index 5c322dfb79f6..0a566b0daacb 100644 --- a/drivers/net/wan/n2.c +++ b/drivers/net/wan/n2.c @@ -351,12 +351,11 @@ static int __init n2_run(unsigned long io, unsigned long irq, return -ENODEV; } - card = kmalloc(sizeof(card_t), GFP_KERNEL); + card = kzalloc(sizeof(card_t), GFP_KERNEL); if (card == NULL) { printk(KERN_ERR "n2: unable to allocate memory\n"); return -ENOBUFS; } - memset(card, 0, sizeof(card_t)); card->ports[0].dev = alloc_hdlcdev(&card->ports[0]); card->ports[1].dev = alloc_hdlcdev(&card->ports[1]); @@ -460,7 +459,6 @@ static int __init n2_run(unsigned long io, unsigned long irq, port->log_node = 1; spin_lock_init(&port->lock); - SET_MODULE_OWNER(dev); dev->irq = irq; dev->mem_start = winbase; dev->mem_end = winbase + USE_WINDOWSIZE - 1; diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c index 5d8c78ee2cd9..99fee2f1d019 100644 --- a/drivers/net/wan/pc300_drv.c +++ b/drivers/net/wan/pc300_drv.c @@ -3456,7 +3456,7 @@ cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if ((err = pci_enable_device(pdev)) < 0) return err; - card = kmalloc(sizeof(pc300_t), GFP_KERNEL); + card = kzalloc(sizeof(pc300_t), GFP_KERNEL); if (card == NULL) { printk("PC300 found at RAM 0x%016llx, " "but could not allocate card structure.\n", @@ -3464,7 +3464,6 @@ cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) err = -ENOMEM; goto err_disable_dev; } - memset(card, 0, sizeof(pc300_t)); err = -ENODEV; diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c index dfbd3b00f03b..bf1b01590429 100644 --- a/drivers/net/wan/pc300too.c +++ b/drivers/net/wan/pc300too.c @@ -334,14 +334,13 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev, return i; } - card = kmalloc(sizeof(card_t), GFP_KERNEL); + card = kzalloc(sizeof(card_t), GFP_KERNEL); if (card == NULL) { printk(KERN_ERR "pc300: unable to allocate memory\n"); pci_release_regions(pdev); pci_disable_device(pdev); return -ENOBUFS; } - memset(card, 0, sizeof(card_t)); pci_set_drvdata(pdev, card); if (pdev->device == PCI_DEVICE_ID_PC300_TE_1 || @@ -469,7 +468,6 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev, port->phy_node = i; spin_lock_init(&port->lock); - SET_MODULE_OWNER(dev); dev->irq = card->irq; dev->mem_start = ramphys; dev->mem_end = ramphys + ramsize - 1; diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c index 7f720de2e9f0..b595b64e7538 100644 --- a/drivers/net/wan/pci200syn.c +++ b/drivers/net/wan/pci200syn.c @@ -312,14 +312,13 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev, return i; } - card = kmalloc(sizeof(card_t), GFP_KERNEL); + card = kzalloc(sizeof(card_t), GFP_KERNEL); if (card == NULL) { printk(KERN_ERR "pci200syn: unable to allocate memory\n"); pci_release_regions(pdev); pci_disable_device(pdev); return -ENOBUFS; } - memset(card, 0, sizeof(card_t)); pci_set_drvdata(pdev, card); card->ports[0].dev = alloc_hdlcdev(&card->ports[0]); card->ports[1].dev = alloc_hdlcdev(&card->ports[1]); @@ -416,7 +415,6 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev, port->phy_node = i; spin_lock_init(&port->lock); - SET_MODULE_OWNER(dev); dev->irq = card->irq; dev->mem_start = ramphys; dev->mem_end = ramphys + ramsize - 1; diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c index 1cc18e787a65..76db40d200d8 100644 --- a/drivers/net/wan/sbni.c +++ b/drivers/net/wan/sbni.c @@ -54,6 +54,7 @@ #include <linux/init.h> #include <linux/delay.h> +#include <net/net_namespace.h> #include <net/arp.h> #include <asm/io.h> @@ -215,8 +216,6 @@ static void __init sbni_devsetup(struct net_device *dev) dev->get_stats = &sbni_get_stats; dev->set_multicast_list = &set_multicast_list; dev->do_ioctl = &sbni_ioctl; - - SET_MODULE_OWNER( dev ); } int __init sbni_probe(int unit) @@ -1361,7 +1360,7 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd ) if (copy_from_user( slave_name, ifr->ifr_data, sizeof slave_name )) return -EFAULT; - slave_dev = dev_get_by_name( slave_name ); + slave_dev = dev_get_by_name(&init_net, slave_name ); if( !slave_dev || !(slave_dev->flags & IFF_UP) ) { printk( KERN_ERR "%s: trying to enslave non-active " "device %s\n", dev->name, slave_name ); diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c index 6a485f0556f4..b39a541b2509 100644 --- a/drivers/net/wan/sdla.c +++ b/drivers/net/wan/sdla.c @@ -1196,10 +1196,9 @@ static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int r if (read) { - temp = kmalloc(mem.len, GFP_KERNEL); + temp = kzalloc(mem.len, GFP_KERNEL); if (!temp) return(-ENOMEM); - memset(temp, 0, mem.len); sdla_read(dev, mem.addr, temp, mem.len); if(copy_to_user(mem.data, temp, mem.len)) { @@ -1604,7 +1603,6 @@ static void setup_sdla(struct net_device *dev) netdev_boot_setup_check(dev); - SET_MODULE_OWNER(dev); dev->flags = 0; dev->type = 0xFFFF; dev->hard_header_len = 0; diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c index 131358108c5a..11276bf3149f 100644 --- a/drivers/net/wan/sealevel.c +++ b/drivers/net/wan/sealevel.c @@ -270,11 +270,10 @@ static __init struct slvl_board *slvl_init(int iobase, int irq, return NULL; } - b = kmalloc(sizeof(struct slvl_board), GFP_KERNEL); + b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL); if(!b) goto fail3; - memset(b, 0, sizeof(*b)); if (!(b->dev[0]= slvl_alloc(iobase, irq))) goto fail2; diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c index 67fc67cfd452..232ecba5340f 100644 --- a/drivers/net/wan/syncppp.c +++ b/drivers/net/wan/syncppp.c @@ -51,6 +51,7 @@ #include <linux/spinlock.h> #include <linux/rcupdate.h> +#include <net/net_namespace.h> #include <net/syncppp.h> #include <asm/byteorder.h> @@ -358,8 +359,10 @@ done: * Handle transmit packets. */ -static int sppp_hard_header(struct sk_buff *skb, struct net_device *dev, __u16 type, - void *daddr, void *saddr, unsigned int len) +static int sppp_hard_header(struct sk_buff *skb, + struct net_device *dev, __u16 type, + const void *daddr, const void *saddr, + unsigned int len) { struct sppp *sp = (struct sppp *)sppp_of(dev); struct ppp_header *h; @@ -391,10 +394,9 @@ static int sppp_hard_header(struct sk_buff *skb, struct net_device *dev, __u16 t return sizeof(struct ppp_header); } -static int sppp_rebuild_header(struct sk_buff *skb) -{ - return 0; -} +static const struct header_ops sppp_header_ops = { + .create = sppp_hard_header, +}; /* * Send keepalive packets, every 10 seconds. @@ -1097,8 +1099,8 @@ void sppp_attach(struct ppp_device *pd) * hard_start_xmit. */ - dev->hard_header = sppp_hard_header; - dev->rebuild_header = sppp_rebuild_header; + dev->header_ops = &sppp_header_ops; + dev->tx_queue_len = 10; dev->type = ARPHRD_HDLC; dev->addr_len = 0; @@ -1114,8 +1116,6 @@ void sppp_attach(struct ppp_device *pd) dev->stop = sppp_close; #endif dev->change_mtu = sppp_change_mtu; - dev->hard_header_cache = NULL; - dev->header_cache_update = NULL; dev->flags = IFF_MULTICAST|IFF_POINTOPOINT|IFF_NOARP; } @@ -1445,6 +1445,11 @@ static void sppp_print_bytes (u_char *p, u16 len) static int sppp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p, struct net_device *orig_dev) { + if (dev->nd_net != &init_net) { + kfree_skb(skb); + return 0; + } + if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) return NET_RX_DROP; sppp_input(dev,skb); diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c index c73601574334..8e320b76ae0e 100644 --- a/drivers/net/wan/wanxl.c +++ b/drivers/net/wan/wanxl.c @@ -599,7 +599,7 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev, } alloc_size = sizeof(card_t) + ports * sizeof(port_t); - card = kmalloc(alloc_size, GFP_KERNEL); + card = kzalloc(alloc_size, GFP_KERNEL); if (card == NULL) { printk(KERN_ERR "wanXL %s: unable to allocate memory\n", pci_name(pdev)); @@ -607,7 +607,6 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev, pci_disable_device(pdev); return -ENOBUFS; } - memset(card, 0, alloc_size); pci_set_drvdata(pdev, card); card->pdev = pdev; @@ -780,7 +779,6 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev, port->dev = dev; hdlc = dev_to_hdlc(dev); spin_lock_init(&port->lock); - SET_MODULE_OWNER(dev); dev->tx_queue_len = 50; dev->do_ioctl = wanxl_ioctl; dev->open = wanxl_open; diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c index 1c9edd97accd..c48b1cc63fd5 100644 --- a/drivers/net/wan/x25_asy.c +++ b/drivers/net/wan/x25_asy.c @@ -786,14 +786,12 @@ static int __init init_x25_asy(void) printk(KERN_INFO "X.25 async: version 0.00 ALPHA " "(dynamic channels, max=%d).\n", x25_asy_maxdev ); - x25_asy_devs = kmalloc(sizeof(struct net_device *)*x25_asy_maxdev, - GFP_KERNEL); + x25_asy_devs = kcalloc(x25_asy_maxdev, sizeof(struct net_device*), GFP_KERNEL); if (!x25_asy_devs) { printk(KERN_WARNING "X25 async: Can't allocate x25_asy_ctrls[] " "array! Uaargh! (-> No X.25 available)\n"); return -ENOMEM; } - memset(x25_asy_devs, 0, sizeof(struct net_device *)*x25_asy_maxdev); return tty_register_ldisc(N_X25, &x25_ldisc); } diff --git a/drivers/net/wd.c b/drivers/net/wd.c index a0326818ff2f..fa14255282af 100644 --- a/drivers/net/wd.c +++ b/drivers/net/wd.c @@ -93,8 +93,6 @@ static int __init do_wd_probe(struct net_device *dev) int mem_start = dev->mem_start; int mem_end = dev->mem_end; - SET_MODULE_OWNER(dev); - if (base_addr > 0x1ff) { /* Check a user specified location. */ r = request_region(base_addr, WD_IO_EXTENT, "wd-probe"); if ( r == NULL) @@ -158,6 +156,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr) int word16 = 0; /* 0 = 8 bit, 1 = 16 bit */ const char *model_name; static unsigned version_printed; + DECLARE_MAC_BUF(mac); for (i = 0; i < 8; i++) checksum += inb(ioaddr + 8 + i); @@ -176,9 +175,11 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr) if (ei_debug && version_printed++ == 0) printk(version); - printk("%s: WD80x3 at %#3x,", dev->name, ioaddr); for (i = 0; i < 6; i++) - printk(" %2.2X", dev->dev_addr[i] = inb(ioaddr + 8 + i)); + dev->dev_addr[i] = inb(ioaddr + 8 + i); + + printk("%s: WD80x3 at %#3x, %s", + dev->name, ioaddr, print_mac(mac, dev->dev_addr)); /* The following PureData probe code was contributed by Mike Jagdis <jaggy@purplet.demon.co.uk>. Puredata does software diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index ae27af0141c0..5a6fdfd0f140 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -63,11 +63,6 @@ config WAVELAN a Radio LAN (wireless Ethernet-like Local Area Network) using the radio frequencies 900 MHz and 2.4 GHz. - This driver support the ISA version of the WaveLAN card. A separate - driver for the PCMCIA (PC-card) hardware is available in David - Hinds' pcmcia-cs package (see the file <file:Documentation/Changes> - for location). - If you want to use an ISA WaveLAN card under Linux, say Y and read the Ethernet-HOWTO, available from <http://www.tldp.org/docs.html#howto>. Some more specific @@ -280,6 +275,13 @@ config LIBERTAS_USB ---help--- A driver for Marvell Libertas 8388 USB devices. +config LIBERTAS_CS + tristate "Marvell Libertas 8385 CompactFlash 802.11b/g cards" + depends on LIBERTAS && PCMCIA && EXPERIMENTAL + select FW_LOADER + ---help--- + A driver for Marvell Libertas 8385 CompactFlash devices. + config LIBERTAS_DEBUG bool "Enable full debugging output in the Libertas module." depends on LIBERTAS @@ -379,30 +381,6 @@ config PCI_HERMES common. Some of the built-in wireless adaptors in laptops are of this variety. -config ATMEL - tristate "Atmel at76c50x chipset 802.11b support" - depends on (PCI || PCMCIA) && WLAN_80211 - select WIRELESS_EXT - select FW_LOADER - select CRC32 - ---help--- - A driver 802.11b wireless cards based on the Atmel fast-vnet - chips. This driver supports standard Linux wireless extensions. - - Many cards based on this chipset do not have flash memory - and need their firmware loaded at start-up. If yours is - one of these, you will need to provide a firmware image - to be loaded into the card by the driver. The Atmel - firmware package can be downloaded from - <http://www.thekelleys.org.uk/atmel> - -config PCI_ATMEL - tristate "Atmel at76c506 PCI cards" - depends on ATMEL && PCI - ---help--- - Enable support for PCI and mini-PCI cards containing the - Atmel at76c506 chip. - config PCMCIA_HERMES tristate "Hermes PCMCIA card support" depends on PCMCIA && HERMES @@ -414,12 +392,7 @@ config PCMCIA_HERMES such as the Linksys, D-Link and Farallon Skyline. It should also work on Symbol cards such as the 3Com AirConnect and Ericsson WLAN. - To use your PC-cards, you will need supporting software from David - Hinds' pcmcia-cs package (see the file <file:Documentation/Changes> - for location). You also want to check out the PCMCIA-HOWTO, - available from <http://www.tldp.org/docs.html#howto>. - - You will also very likely also need the Wireless Tools in order to + You will very likely need the Wireless Tools in order to configure your card and that /etc/pcmcia/wireless.opts works: <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>. @@ -437,6 +410,40 @@ config PCMCIA_SPECTRUM for downloading Symbol firmware are available at <http://sourceforge.net/projects/orinoco/> +config ATMEL + tristate "Atmel at76c50x chipset 802.11b support" + depends on (PCI || PCMCIA) && WLAN_80211 + select WIRELESS_EXT + select FW_LOADER + select CRC32 + ---help--- + A driver 802.11b wireless cards based on the Atmel fast-vnet + chips. This driver supports standard Linux wireless extensions. + + Many cards based on this chipset do not have flash memory + and need their firmware loaded at start-up. If yours is + one of these, you will need to provide a firmware image + to be loaded into the card by the driver. The Atmel + firmware package can be downloaded from + <http://www.thekelleys.org.uk/atmel> + +config PCI_ATMEL + tristate "Atmel at76c506 PCI cards" + depends on ATMEL && PCI + ---help--- + Enable support for PCI and mini-PCI cards containing the + Atmel at76c506 chip. + +config PCMCIA_ATMEL + tristate "Atmel at76c502/at76c504 PCMCIA cards" + depends on ATMEL && PCMCIA + select WIRELESS_EXT + select FW_LOADER + select CRC32 + ---help--- + Enable support for PCMCIA cards containing the + Atmel at76c502 and at76c504 chips. + config AIRO_CS tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" depends on PCMCIA && (BROKEN || !M32R) && WLAN_80211 @@ -457,21 +464,6 @@ config AIRO_CS and Cisco proprietary API, so both the Linux Wireless Tools and the Cisco Linux utilities can be used to configure the card. - To use your PC-cards, you will need supporting software from David - Hinds' pcmcia-cs package (see the file <file:Documentation/Changes> - for location). You also want to check out the PCMCIA-HOWTO, - available from <http://www.tldp.org/docs.html#howto>. - -config PCMCIA_ATMEL - tristate "Atmel at76c502/at76c504 PCMCIA cards" - depends on ATMEL && PCMCIA - select WIRELESS_EXT - select FW_LOADER - select CRC32 - ---help--- - Enable support for PCMCIA cards containing the - Atmel at76c502 and at76c504 chips. - config PCMCIA_WL3501 tristate "Planet WL3501 PCMCIA cards" depends on EXPERIMENTAL && PCMCIA && WLAN_80211 @@ -558,8 +550,52 @@ config RTL8187 Thanks to Realtek for their support! +config ADM8211 + tristate "ADMtek ADM8211 support" + depends on MAC80211 && PCI && WLAN_80211 && EXPERIMENTAL + select CRC32 + select EEPROM_93CX6 + ---help--- + This driver is for ADM8211A, ADM8211B, and ADM8211C based cards. + These are PCI/mini-PCI/Cardbus 802.11b chips found in cards such as: + + Xterasys Cardbus XN-2411b + Blitz NetWave Point PC + TrendNet 221pc + Belkin F5D6001 + SMC 2635W + Linksys WPC11 v1 + Fiberline FL-WL-200X + 3com Office Connect (3CRSHPW796) + Corega WLPCIB-11 + SMC 2602W V2 EU + D-Link DWL-520 Revision C + + However, some of these cards have been replaced with other chips + like the RTL8180L (Xterasys Cardbus XN-2411b, Belkin F5D6001) or + the Ralink RT2400 (SMC2635W) without a model number change. + + Thanks to Infineon-ADMtek for their support of this driver. + +config P54_COMMON + tristate "Softmac Prism54 support" + depends on MAC80211 && WLAN_80211 && FW_LOADER && EXPERIMENTAL + +config P54_USB + tristate "Prism54 USB support" + depends on P54_COMMON && USB + select CRC32 + +config P54_PCI + tristate "Prism54 PCI support" + depends on P54_COMMON && PCI + +source "drivers/net/wireless/iwlwifi/Kconfig" source "drivers/net/wireless/hostap/Kconfig" source "drivers/net/wireless/bcm43xx/Kconfig" +source "drivers/net/wireless/b43/Kconfig" +source "drivers/net/wireless/b43legacy/Kconfig" source "drivers/net/wireless/zd1211rw/Kconfig" +source "drivers/net/wireless/rt2x00/Kconfig" endmenu diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index ef35bc6c4a22..6f32b53ee128 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -36,6 +36,8 @@ obj-$(CONFIG_PRISM54) += prism54/ obj-$(CONFIG_HOSTAP) += hostap/ obj-$(CONFIG_BCM43XX) += bcm43xx/ +obj-$(CONFIG_B43) += b43/ +obj-$(CONFIG_B43LEGACY) += b43legacy/ obj-$(CONFIG_ZD1211RW) += zd1211rw/ # 16-bit wireless PCMCIA client drivers @@ -43,7 +45,16 @@ obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o obj-$(CONFIG_PCMCIA_WL3501) += wl3501_cs.o obj-$(CONFIG_USB_ZD1201) += zd1201.o -obj-$(CONFIG_LIBERTAS_USB) += libertas/ +obj-$(CONFIG_LIBERTAS) += libertas/ rtl8187-objs := rtl8187_dev.o rtl8187_rtl8225.o obj-$(CONFIG_RTL8187) += rtl8187.o + +obj-$(CONFIG_ADM8211) += adm8211.o + +obj-$(CONFIG_IWLWIFI) += iwlwifi/ +obj-$(CONFIG_RT2X00) += rt2x00/ + +obj-$(CONFIG_P54_COMMON) += p54common.o +obj-$(CONFIG_P54_USB) += p54usb.o +obj-$(CONFIG_P54_PCI) += p54pci.o diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c new file mode 100644 index 000000000000..5bf7913aadda --- /dev/null +++ b/drivers/net/wireless/adm8211.c @@ -0,0 +1,2053 @@ + +/* + * Linux device driver for ADMtek ADM8211 (IEEE 802.11b MAC/BBP) + * + * Copyright (c) 2003, Jouni Malinen <j@w1.fi> + * Copyright (c) 2004-2007, Michael Wu <flamingice@sourmilk.net> + * Some parts copyright (c) 2003 by David Young <dyoung@pobox.com> + * and used with permission. + * + * Much thanks to Infineon-ADMtek for their support of this driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. See README and COPYING for + * more details. + */ + +#include <linux/init.h> +#include <linux/if.h> +#include <linux/skbuff.h> +#include <linux/etherdevice.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/crc32.h> +#include <linux/eeprom_93cx6.h> +#include <net/mac80211.h> + +#include "adm8211.h" + +MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>"); +MODULE_AUTHOR("Jouni Malinen <j@w1.fi>"); +MODULE_DESCRIPTION("Driver for IEEE 802.11b wireless cards based on ADMtek ADM8211"); +MODULE_SUPPORTED_DEVICE("ADM8211"); +MODULE_LICENSE("GPL"); + +static unsigned int tx_ring_size __read_mostly = 16; +static unsigned int rx_ring_size __read_mostly = 16; + +module_param(tx_ring_size, uint, 0); +module_param(rx_ring_size, uint, 0); + +static struct pci_device_id adm8211_pci_id_table[] __devinitdata = { + /* ADMtek ADM8211 */ + { PCI_DEVICE(0x10B7, 0x6000) }, /* 3Com 3CRSHPW796 */ + { PCI_DEVICE(0x1200, 0x8201) }, /* ? */ + { PCI_DEVICE(0x1317, 0x8201) }, /* ADM8211A */ + { PCI_DEVICE(0x1317, 0x8211) }, /* ADM8211B/C */ + { 0 } +}; + +static void adm8211_eeprom_register_read(struct eeprom_93cx6 *eeprom) +{ + struct adm8211_priv *priv = eeprom->data; + u32 reg = ADM8211_CSR_READ(SPR); + + eeprom->reg_data_in = reg & ADM8211_SPR_SDI; + eeprom->reg_data_out = reg & ADM8211_SPR_SDO; + eeprom->reg_data_clock = reg & ADM8211_SPR_SCLK; + eeprom->reg_chip_select = reg & ADM8211_SPR_SCS; +} + +static void adm8211_eeprom_register_write(struct eeprom_93cx6 *eeprom) +{ + struct adm8211_priv *priv = eeprom->data; + u32 reg = 0x4000 | ADM8211_SPR_SRS; + + if (eeprom->reg_data_in) + reg |= ADM8211_SPR_SDI; + if (eeprom->reg_data_out) + reg |= ADM8211_SPR_SDO; + if (eeprom->reg_data_clock) + reg |= ADM8211_SPR_SCLK; + if (eeprom->reg_chip_select) + reg |= ADM8211_SPR_SCS; + + ADM8211_CSR_WRITE(SPR, reg); + ADM8211_CSR_READ(SPR); /* eeprom_delay */ +} + +static int adm8211_read_eeprom(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + unsigned int words, i; + struct ieee80211_chan_range chan_range; + u16 cr49; + struct eeprom_93cx6 eeprom = { + .data = priv, + .register_read = adm8211_eeprom_register_read, + .register_write = adm8211_eeprom_register_write + }; + + if (ADM8211_CSR_READ(CSR_TEST0) & ADM8211_CSR_TEST0_EPTYP) { + /* 256 * 16-bit = 512 bytes */ + eeprom.width = PCI_EEPROM_WIDTH_93C66; + words = 256; + } else { + /* 64 * 16-bit = 128 bytes */ + eeprom.width = PCI_EEPROM_WIDTH_93C46; + words = 64; + } + + priv->eeprom_len = words * 2; + priv->eeprom = kmalloc(priv->eeprom_len, GFP_KERNEL); + if (!priv->eeprom) + return -ENOMEM; + + eeprom_93cx6_multiread(&eeprom, 0, (__le16 __force *)priv->eeprom, words); + + cr49 = le16_to_cpu(priv->eeprom->cr49); + priv->rf_type = (cr49 >> 3) & 0x7; + switch (priv->rf_type) { + case ADM8211_TYPE_INTERSIL: + case ADM8211_TYPE_RFMD: + case ADM8211_TYPE_MARVEL: + case ADM8211_TYPE_AIROHA: + case ADM8211_TYPE_ADMTEK: + break; + + default: + if (priv->pdev->revision < ADM8211_REV_CA) + priv->rf_type = ADM8211_TYPE_RFMD; + else + priv->rf_type = ADM8211_TYPE_AIROHA; + + printk(KERN_WARNING "%s (adm8211): Unknown RFtype %d\n", + pci_name(priv->pdev), (cr49 >> 3) & 0x7); + } + + priv->bbp_type = cr49 & 0x7; + switch (priv->bbp_type) { + case ADM8211_TYPE_INTERSIL: + case ADM8211_TYPE_RFMD: + case ADM8211_TYPE_MARVEL: + case ADM8211_TYPE_AIROHA: + case ADM8211_TYPE_ADMTEK: + break; + default: + if (priv->pdev->revision < ADM8211_REV_CA) + priv->bbp_type = ADM8211_TYPE_RFMD; + else + priv->bbp_type = ADM8211_TYPE_ADMTEK; + + printk(KERN_WARNING "%s (adm8211): Unknown BBPtype: %d\n", + pci_name(priv->pdev), cr49 >> 3); + } + + if (priv->eeprom->country_code >= ARRAY_SIZE(cranges)) { + printk(KERN_WARNING "%s (adm8211): Invalid country code (%d)\n", + pci_name(priv->pdev), priv->eeprom->country_code); + + chan_range = cranges[2]; + } else + chan_range = cranges[priv->eeprom->country_code]; + + printk(KERN_DEBUG "%s (adm8211): Channel range: %d - %d\n", + pci_name(priv->pdev), (int)chan_range.min, (int)chan_range.max); + + priv->modes[0].num_channels = chan_range.max - chan_range.min + 1; + priv->modes[0].channels = priv->channels; + + memcpy(priv->channels, adm8211_channels, sizeof(adm8211_channels)); + + for (i = 1; i <= ARRAY_SIZE(adm8211_channels); i++) + if (i >= chan_range.min && i <= chan_range.max) + priv->channels[i - 1].flag = + IEEE80211_CHAN_W_SCAN | + IEEE80211_CHAN_W_ACTIVE_SCAN | + IEEE80211_CHAN_W_IBSS; + + switch (priv->eeprom->specific_bbptype) { + case ADM8211_BBP_RFMD3000: + case ADM8211_BBP_RFMD3002: + case ADM8211_BBP_ADM8011: + priv->specific_bbptype = priv->eeprom->specific_bbptype; + break; + + default: + if (priv->pdev->revision < ADM8211_REV_CA) + priv->specific_bbptype = ADM8211_BBP_RFMD3000; + else + priv->specific_bbptype = ADM8211_BBP_ADM8011; + + printk(KERN_WARNING "%s (adm8211): Unknown specific BBP: %d\n", + pci_name(priv->pdev), priv->eeprom->specific_bbptype); + } + + switch (priv->eeprom->specific_rftype) { + case ADM8211_RFMD2948: + case ADM8211_RFMD2958: + case ADM8211_RFMD2958_RF3000_CONTROL_POWER: + case ADM8211_MAX2820: + case ADM8211_AL2210L: + priv->transceiver_type = priv->eeprom->specific_rftype; + break; + + default: + if (priv->pdev->revision == ADM8211_REV_BA) + priv->transceiver_type = ADM8211_RFMD2958_RF3000_CONTROL_POWER; + else if (priv->pdev->revision == ADM8211_REV_CA) + priv->transceiver_type = ADM8211_AL2210L; + else if (priv->pdev->revision == ADM8211_REV_AB) + priv->transceiver_type = ADM8211_RFMD2948; + + printk(KERN_WARNING "%s (adm8211): Unknown transceiver: %d\n", + pci_name(priv->pdev), priv->eeprom->specific_rftype); + + break; + } + + printk(KERN_DEBUG "%s (adm8211): RFtype=%d BBPtype=%d Specific BBP=%d " + "Transceiver=%d\n", pci_name(priv->pdev), priv->rf_type, + priv->bbp_type, priv->specific_bbptype, priv->transceiver_type); + + return 0; +} + +static inline void adm8211_write_sram(struct ieee80211_hw *dev, + u32 addr, u32 data) +{ + struct adm8211_priv *priv = dev->priv; + + ADM8211_CSR_WRITE(WEPCTL, addr | ADM8211_WEPCTL_TABLE_WR | + (priv->pdev->revision < ADM8211_REV_BA ? + 0 : ADM8211_WEPCTL_SEL_WEPTABLE )); + ADM8211_CSR_READ(WEPCTL); + msleep(1); + + ADM8211_CSR_WRITE(WESK, data); + ADM8211_CSR_READ(WESK); + msleep(1); +} + +static void adm8211_write_sram_bytes(struct ieee80211_hw *dev, + unsigned int addr, u8 *buf, + unsigned int len) +{ + struct adm8211_priv *priv = dev->priv; + u32 reg = ADM8211_CSR_READ(WEPCTL); + unsigned int i; + + if (priv->pdev->revision < ADM8211_REV_BA) { + for (i = 0; i < len; i += 2) { + u16 val = buf[i] | (buf[i + 1] << 8); + adm8211_write_sram(dev, addr + i / 2, val); + } + } else { + for (i = 0; i < len; i += 4) { + u32 val = (buf[i + 0] << 0 ) | (buf[i + 1] << 8 ) | + (buf[i + 2] << 16) | (buf[i + 3] << 24); + adm8211_write_sram(dev, addr + i / 4, val); + } + } + + ADM8211_CSR_WRITE(WEPCTL, reg); +} + +static void adm8211_clear_sram(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + u32 reg = ADM8211_CSR_READ(WEPCTL); + unsigned int addr; + + for (addr = 0; addr < ADM8211_SRAM_SIZE; addr++) + adm8211_write_sram(dev, addr, 0); + + ADM8211_CSR_WRITE(WEPCTL, reg); +} + +static int adm8211_get_stats(struct ieee80211_hw *dev, + struct ieee80211_low_level_stats *stats) +{ + struct adm8211_priv *priv = dev->priv; + + memcpy(stats, &priv->stats, sizeof(*stats)); + + return 0; +} + +static int adm8211_get_tx_stats(struct ieee80211_hw *dev, + struct ieee80211_tx_queue_stats *stats) +{ + struct adm8211_priv *priv = dev->priv; + struct ieee80211_tx_queue_stats_data *data = &stats->data[0]; + + data->len = priv->cur_tx - priv->dirty_tx; + data->limit = priv->tx_ring_size - 2; + data->count = priv->dirty_tx; + + return 0; +} + +static void adm8211_interrupt_tci(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + unsigned int dirty_tx; + + spin_lock(&priv->lock); + + for (dirty_tx = priv->dirty_tx; priv->cur_tx - dirty_tx; dirty_tx++) { + unsigned int entry = dirty_tx % priv->tx_ring_size; + u32 status = le32_to_cpu(priv->tx_ring[entry].status); + struct ieee80211_tx_status tx_status; + struct adm8211_tx_ring_info *info; + struct sk_buff *skb; + + if (status & TDES0_CONTROL_OWN || + !(status & TDES0_CONTROL_DONE)) + break; + + info = &priv->tx_buffers[entry]; + skb = info->skb; + + /* TODO: check TDES0_STATUS_TUF and TDES0_STATUS_TRO */ + + pci_unmap_single(priv->pdev, info->mapping, + info->skb->len, PCI_DMA_TODEVICE); + + memset(&tx_status, 0, sizeof(tx_status)); + skb_pull(skb, sizeof(struct adm8211_tx_hdr)); + memcpy(skb_push(skb, info->hdrlen), skb->cb, info->hdrlen); + memcpy(&tx_status.control, &info->tx_control, + sizeof(tx_status.control)); + if (!(tx_status.control.flags & IEEE80211_TXCTL_NO_ACK)) { + if (status & TDES0_STATUS_ES) + tx_status.excessive_retries = 1; + else + tx_status.flags |= IEEE80211_TX_STATUS_ACK; + } + ieee80211_tx_status_irqsafe(dev, skb, &tx_status); + + info->skb = NULL; + } + + if (priv->cur_tx - dirty_tx < priv->tx_ring_size - 2) + ieee80211_wake_queue(dev, 0); + + priv->dirty_tx = dirty_tx; + spin_unlock(&priv->lock); +} + + +static void adm8211_interrupt_rci(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + unsigned int entry = priv->cur_rx % priv->rx_ring_size; + u32 status; + unsigned int pktlen; + struct sk_buff *skb, *newskb; + unsigned int limit = priv->rx_ring_size; + static const u8 rate_tbl[] = {10, 20, 55, 110, 220}; + u8 rssi, rate; + + while (!(priv->rx_ring[entry].status & cpu_to_le32(RDES0_STATUS_OWN))) { + if (!limit--) + break; + + status = le32_to_cpu(priv->rx_ring[entry].status); + rate = (status & RDES0_STATUS_RXDR) >> 12; + rssi = le32_to_cpu(priv->rx_ring[entry].length) & + RDES1_STATUS_RSSI; + + pktlen = status & RDES0_STATUS_FL; + if (pktlen > RX_PKT_SIZE) { + if (net_ratelimit()) + printk(KERN_DEBUG "%s: frame too long (%d)\n", + wiphy_name(dev->wiphy), pktlen); + pktlen = RX_PKT_SIZE; + } + + if (!priv->soft_rx_crc && status & RDES0_STATUS_ES) { + skb = NULL; /* old buffer will be reused */ + /* TODO: update RX error stats */ + /* TODO: check RDES0_STATUS_CRC*E */ + } else if (pktlen < RX_COPY_BREAK) { + skb = dev_alloc_skb(pktlen); + if (skb) { + pci_dma_sync_single_for_cpu( + priv->pdev, + priv->rx_buffers[entry].mapping, + pktlen, PCI_DMA_FROMDEVICE); + memcpy(skb_put(skb, pktlen), + skb_tail_pointer(priv->rx_buffers[entry].skb), + pktlen); + pci_dma_sync_single_for_device( + priv->pdev, + priv->rx_buffers[entry].mapping, + RX_PKT_SIZE, PCI_DMA_FROMDEVICE); + } + } else { + newskb = dev_alloc_skb(RX_PKT_SIZE); + if (newskb) { + skb = priv->rx_buffers[entry].skb; + skb_put(skb, pktlen); + pci_unmap_single( + priv->pdev, + priv->rx_buffers[entry].mapping, + RX_PKT_SIZE, PCI_DMA_FROMDEVICE); + priv->rx_buffers[entry].skb = newskb; + priv->rx_buffers[entry].mapping = + pci_map_single(priv->pdev, + skb_tail_pointer(newskb), + RX_PKT_SIZE, + PCI_DMA_FROMDEVICE); + } else { + skb = NULL; + /* TODO: update rx dropped stats */ + } + + priv->rx_ring[entry].buffer1 = + cpu_to_le32(priv->rx_buffers[entry].mapping); + } + + priv->rx_ring[entry].status = cpu_to_le32(RDES0_STATUS_OWN | + RDES0_STATUS_SQL); + priv->rx_ring[entry].length = + cpu_to_le32(RX_PKT_SIZE | + (entry == priv->rx_ring_size - 1 ? + RDES1_CONTROL_RER : 0)); + + if (skb) { + struct ieee80211_rx_status rx_status = {0}; + + if (priv->pdev->revision < ADM8211_REV_CA) + rx_status.ssi = rssi; + else + rx_status.ssi = 100 - rssi; + + if (rate <= 4) + rx_status.rate = rate_tbl[rate]; + + rx_status.channel = priv->channel; + rx_status.freq = adm8211_channels[priv->channel - 1].freq; + rx_status.phymode = MODE_IEEE80211B; + + ieee80211_rx_irqsafe(dev, skb, &rx_status); + } + + entry = (++priv->cur_rx) % priv->rx_ring_size; + } + + /* TODO: check LPC and update stats? */ +} + + +static irqreturn_t adm8211_interrupt(int irq, void *dev_id) +{ +#define ADM8211_INT(x) \ +do { \ + if (unlikely(stsr & ADM8211_STSR_ ## x)) \ + printk(KERN_DEBUG "%s: " #x "\n", wiphy_name(dev->wiphy)); \ +} while (0) + + struct ieee80211_hw *dev = dev_id; + struct adm8211_priv *priv = dev->priv; + u32 stsr = ADM8211_CSR_READ(STSR); + ADM8211_CSR_WRITE(STSR, stsr); + if (stsr == 0xffffffff) + return IRQ_HANDLED; + + if (!(stsr & (ADM8211_STSR_NISS | ADM8211_STSR_AISS))) + return IRQ_HANDLED; + + if (stsr & ADM8211_STSR_RCI) + adm8211_interrupt_rci(dev); + if (stsr & ADM8211_STSR_TCI) + adm8211_interrupt_tci(dev); + + /*ADM8211_INT(LinkOn);*/ + /*ADM8211_INT(LinkOff);*/ + + ADM8211_INT(PCF); + ADM8211_INT(BCNTC); + ADM8211_INT(GPINT); + ADM8211_INT(ATIMTC); + ADM8211_INT(TSFTF); + ADM8211_INT(TSCZ); + ADM8211_INT(SQL); + ADM8211_INT(WEPTD); + ADM8211_INT(ATIME); + /*ADM8211_INT(TBTT);*/ + ADM8211_INT(TEIS); + ADM8211_INT(FBE); + ADM8211_INT(REIS); + ADM8211_INT(GPTT); + ADM8211_INT(RPS); + ADM8211_INT(RDU); + ADM8211_INT(TUF); + /*ADM8211_INT(TRT);*/ + /*ADM8211_INT(TLT);*/ + /*ADM8211_INT(TDU);*/ + ADM8211_INT(TPS); + + return IRQ_HANDLED; + +#undef ADM8211_INT +} + +#define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\ +static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \ + u16 addr, u32 value) { \ + struct adm8211_priv *priv = dev->priv; \ + unsigned int i; \ + u32 reg, bitbuf; \ + \ + value &= v_mask; \ + addr &= a_mask; \ + bitbuf = (value << v_shift) | (addr << a_shift); \ + \ + ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \ + ADM8211_CSR_READ(SYNRF); \ + ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \ + ADM8211_CSR_READ(SYNRF); \ + \ + if (prewrite) { \ + ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \ + ADM8211_CSR_READ(SYNRF); \ + } \ + \ + for (i = 0; i <= bits; i++) { \ + if (bitbuf & (1 << (bits - i))) \ + reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \ + else \ + reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \ + \ + ADM8211_CSR_WRITE(SYNRF, reg); \ + ADM8211_CSR_READ(SYNRF); \ + \ + ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \ + ADM8211_CSR_READ(SYNRF); \ + ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \ + ADM8211_CSR_READ(SYNRF); \ + } \ + \ + if (postwrite == 1) { \ + ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \ + ADM8211_CSR_READ(SYNRF); \ + } \ + if (postwrite == 2) { \ + ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \ + ADM8211_CSR_READ(SYNRF); \ + } \ + \ + ADM8211_CSR_WRITE(SYNRF, 0); \ + ADM8211_CSR_READ(SYNRF); \ +} + +WRITE_SYN(max2820, 0x00FFF, 0, 0x0F, 12, 15, 1, 1) +WRITE_SYN(al2210l, 0xFFFFF, 4, 0x0F, 0, 23, 1, 1) +WRITE_SYN(rfmd2958, 0x3FFFF, 0, 0x1F, 18, 23, 0, 1) +WRITE_SYN(rfmd2948, 0x0FFFF, 4, 0x0F, 0, 21, 0, 2) + +#undef WRITE_SYN + +static int adm8211_write_bbp(struct ieee80211_hw *dev, u8 addr, u8 data) +{ + struct adm8211_priv *priv = dev->priv; + unsigned int timeout; + u32 reg; + + timeout = 10; + while (timeout > 0) { + reg = ADM8211_CSR_READ(BBPCTL); + if (!(reg & (ADM8211_BBPCTL_WR | ADM8211_BBPCTL_RD))) + break; + timeout--; + msleep(2); + } + + if (timeout == 0) { + printk(KERN_DEBUG "%s: adm8211_write_bbp(%d,%d) failed" + " prewrite (reg=0x%08x)\n", + wiphy_name(dev->wiphy), addr, data, reg); + return -ETIMEDOUT; + } + + switch (priv->bbp_type) { + case ADM8211_TYPE_INTERSIL: + reg = ADM8211_BBPCTL_MMISEL; /* three wire interface */ + break; + case ADM8211_TYPE_RFMD: + reg = (0x20 << 24) | ADM8211_BBPCTL_TXCE | ADM8211_BBPCTL_CCAP | + (0x01 << 18); + break; + case ADM8211_TYPE_ADMTEK: + reg = (0x20 << 24) | ADM8211_BBPCTL_TXCE | ADM8211_BBPCTL_CCAP | + (0x05 << 18); + break; + } + reg |= ADM8211_BBPCTL_WR | (addr << 8) | data; + + ADM8211_CSR_WRITE(BBPCTL, reg); + + timeout = 10; + while (timeout > 0) { + reg = ADM8211_CSR_READ(BBPCTL); + if (!(reg & ADM8211_BBPCTL_WR)) + break; + timeout--; + msleep(2); + } + + if (timeout == 0) { + ADM8211_CSR_WRITE(BBPCTL, ADM8211_CSR_READ(BBPCTL) & + ~ADM8211_BBPCTL_WR); + printk(KERN_DEBUG "%s: adm8211_write_bbp(%d,%d) failed" + " postwrite (reg=0x%08x)\n", + wiphy_name(dev->wiphy), addr, data, reg); + return -ETIMEDOUT; + } + + return 0; +} + +static int adm8211_rf_set_channel(struct ieee80211_hw *dev, unsigned int chan) +{ + static const u32 adm8211_rfmd2958_reg5[] = + {0x22BD, 0x22D2, 0x22E8, 0x22FE, 0x2314, 0x232A, 0x2340, + 0x2355, 0x236B, 0x2381, 0x2397, 0x23AD, 0x23C2, 0x23F7}; + static const u32 adm8211_rfmd2958_reg6[] = + {0x05D17, 0x3A2E8, 0x2E8BA, 0x22E8B, 0x1745D, 0x0BA2E, 0x00000, + 0x345D1, 0x28BA2, 0x1D174, 0x11745, 0x05D17, 0x3A2E8, 0x11745}; + + struct adm8211_priv *priv = dev->priv; + u8 ant_power = priv->ant_power > 0x3F ? + priv->eeprom->antenna_power[chan - 1] : priv->ant_power; + u8 tx_power = priv->tx_power > 0x3F ? + priv->eeprom->tx_power[chan - 1] : priv->tx_power; + u8 lpf_cutoff = priv->lpf_cutoff == 0xFF ? + priv->eeprom->lpf_cutoff[chan - 1] : priv->lpf_cutoff; + u8 lnags_thresh = priv->lnags_threshold == 0xFF ? + priv->eeprom->lnags_threshold[chan - 1] : priv->lnags_threshold; + u32 reg; + + ADM8211_IDLE(); + + /* Program synthesizer to new channel */ + switch (priv->transceiver_type) { + case ADM8211_RFMD2958: + case ADM8211_RFMD2958_RF3000_CONTROL_POWER: + adm8211_rf_write_syn_rfmd2958(dev, 0x00, 0x04007); + adm8211_rf_write_syn_rfmd2958(dev, 0x02, 0x00033); + + adm8211_rf_write_syn_rfmd2958(dev, 0x05, + adm8211_rfmd2958_reg5[chan - 1]); + adm8211_rf_write_syn_rfmd2958(dev, 0x06, + adm8211_rfmd2958_reg6[chan - 1]); + break; + + case ADM8211_RFMD2948: + adm8211_rf_write_syn_rfmd2948(dev, SI4126_MAIN_CONF, + SI4126_MAIN_XINDIV2); + adm8211_rf_write_syn_rfmd2948(dev, SI4126_POWERDOWN, + SI4126_POWERDOWN_PDIB | + SI4126_POWERDOWN_PDRB); + adm8211_rf_write_syn_rfmd2948(dev, SI4126_PHASE_DET_GAIN, 0); + adm8211_rf_write_syn_rfmd2948(dev, SI4126_RF2_N_DIV, + (chan == 14 ? + 2110 : (2033 + (chan * 5)))); + adm8211_rf_write_syn_rfmd2948(dev, SI4126_IF_N_DIV, 1496); + adm8211_rf_write_syn_rfmd2948(dev, SI4126_RF2_R_DIV, 44); + adm8211_rf_write_syn_rfmd2948(dev, SI4126_IF_R_DIV, 44); + break; + + case ADM8211_MAX2820: + adm8211_rf_write_syn_max2820(dev, 0x3, + (chan == 14 ? 0x054 : (0x7 + (chan * 5)))); + break; + + case ADM8211_AL2210L: + adm8211_rf_write_syn_al2210l(dev, 0x0, + (chan == 14 ? 0x229B4 : (0x22967 + (chan * 5)))); + break; + + default: + printk(KERN_DEBUG "%s: unsupported transceiver type %d\n", + wiphy_name(dev->wiphy), priv->transceiver_type); + break; + } + + /* write BBP regs */ + if (priv->bbp_type == ADM8211_TYPE_RFMD) { + + /* SMC 2635W specific? adm8211b doesn't use the 2948 though.. */ + /* TODO: remove if SMC 2635W doesn't need this */ + if (priv->transceiver_type == ADM8211_RFMD2948) { + reg = ADM8211_CSR_READ(GPIO); + reg &= 0xfffc0000; + reg |= ADM8211_CSR_GPIO_EN0; + if (chan != 14) + reg |= ADM8211_CSR_GPIO_O0; + ADM8211_CSR_WRITE(GPIO, reg); + } + + if (priv->transceiver_type == ADM8211_RFMD2958) { + /* set PCNT2 */ + adm8211_rf_write_syn_rfmd2958(dev, 0x0B, 0x07100); + /* set PCNT1 P_DESIRED/MID_BIAS */ + reg = le16_to_cpu(priv->eeprom->cr49); + reg >>= 13; + reg <<= 15; + reg |= ant_power << 9; + adm8211_rf_write_syn_rfmd2958(dev, 0x0A, reg); + /* set TXRX TX_GAIN */ + adm8211_rf_write_syn_rfmd2958(dev, 0x09, 0x00050 | + (priv->pdev->revision < ADM8211_REV_CA ? tx_power : 0)); + } else { + reg = ADM8211_CSR_READ(PLCPHD); + reg &= 0xff00ffff; + reg |= tx_power << 18; + ADM8211_CSR_WRITE(PLCPHD, reg); + } + + ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_SELRF | + ADM8211_SYNRF_PE1 | ADM8211_SYNRF_PHYRST); + ADM8211_CSR_READ(SYNRF); + msleep(30); + + /* RF3000 BBP */ + if (priv->transceiver_type != ADM8211_RFMD2958) + adm8211_write_bbp(dev, RF3000_TX_VAR_GAIN__TX_LEN_EXT, + tx_power<<2); + adm8211_write_bbp(dev, RF3000_LOW_GAIN_CALIB, lpf_cutoff); + adm8211_write_bbp(dev, RF3000_HIGH_GAIN_CALIB, lnags_thresh); + adm8211_write_bbp(dev, 0x1c, priv->pdev->revision == ADM8211_REV_BA ? + priv->eeprom->cr28 : 0); + adm8211_write_bbp(dev, 0x1d, priv->eeprom->cr29); + + ADM8211_CSR_WRITE(SYNRF, 0); + + /* Nothing to do for ADMtek BBP */ + } else if (priv->bbp_type != ADM8211_TYPE_ADMTEK) + printk(KERN_DEBUG "%s: unsupported BBP type %d\n", + wiphy_name(dev->wiphy), priv->bbp_type); + + ADM8211_RESTORE(); + + /* update current channel for adhoc (and maybe AP mode) */ + reg = ADM8211_CSR_READ(CAP0); + reg &= ~0xF; + reg |= chan; + ADM8211_CSR_WRITE(CAP0, reg); + + return 0; +} + +static void adm8211_update_mode(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + + ADM8211_IDLE(); + + priv->soft_rx_crc = 0; + switch (priv->mode) { + case IEEE80211_IF_TYPE_STA: + priv->nar &= ~(ADM8211_NAR_PR | ADM8211_NAR_EA); + priv->nar |= ADM8211_NAR_ST | ADM8211_NAR_SR; + break; + case IEEE80211_IF_TYPE_IBSS: + priv->nar &= ~ADM8211_NAR_PR; + priv->nar |= ADM8211_NAR_EA | ADM8211_NAR_ST | ADM8211_NAR_SR; + + /* don't trust the error bits on rev 0x20 and up in adhoc */ + if (priv->pdev->revision >= ADM8211_REV_BA) + priv->soft_rx_crc = 1; + break; + case IEEE80211_IF_TYPE_MNTR: + priv->nar &= ~(ADM8211_NAR_EA | ADM8211_NAR_ST); + priv->nar |= ADM8211_NAR_PR | ADM8211_NAR_SR; + break; + } + + ADM8211_RESTORE(); +} + +static void adm8211_hw_init_syn(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + + switch (priv->transceiver_type) { + case ADM8211_RFMD2958: + case ADM8211_RFMD2958_RF3000_CONTROL_POWER: + /* comments taken from ADMtek vendor driver */ + + /* Reset RF2958 after power on */ + adm8211_rf_write_syn_rfmd2958(dev, 0x1F, 0x00000); + /* Initialize RF VCO Core Bias to maximum */ + adm8211_rf_write_syn_rfmd2958(dev, 0x0C, 0x3001F); + /* Initialize IF PLL */ + adm8211_rf_write_syn_rfmd2958(dev, 0x01, 0x29C03); + /* Initialize IF PLL Coarse Tuning */ + adm8211_rf_write_syn_rfmd2958(dev, 0x03, 0x1FF6F); + /* Initialize RF PLL */ + adm8211_rf_write_syn_rfmd2958(dev, 0x04, 0x29403); + /* Initialize RF PLL Coarse Tuning */ + adm8211_rf_write_syn_rfmd2958(dev, 0x07, 0x1456F); + /* Initialize TX gain and filter BW (R9) */ + adm8211_rf_write_syn_rfmd2958(dev, 0x09, + (priv->transceiver_type == ADM8211_RFMD2958 ? + 0x10050 : 0x00050)); + /* Initialize CAL register */ + adm8211_rf_write_syn_rfmd2958(dev, 0x08, 0x3FFF8); + break; + + case ADM8211_MAX2820: + adm8211_rf_write_syn_max2820(dev, 0x1, 0x01E); + adm8211_rf_write_syn_max2820(dev, 0x2, 0x001); + adm8211_rf_write_syn_max2820(dev, 0x3, 0x054); + adm8211_rf_write_syn_max2820(dev, 0x4, 0x310); + adm8211_rf_write_syn_max2820(dev, 0x5, 0x000); + break; + + case ADM8211_AL2210L: + adm8211_rf_write_syn_al2210l(dev, 0x0, 0x0196C); + adm8211_rf_write_syn_al2210l(dev, 0x1, 0x007CB); + adm8211_rf_write_syn_al2210l(dev, 0x2, 0x3582F); + adm8211_rf_write_syn_al2210l(dev, 0x3, 0x010A9); + adm8211_rf_write_syn_al2210l(dev, 0x4, 0x77280); + adm8211_rf_write_syn_al2210l(dev, 0x5, 0x45641); + adm8211_rf_write_syn_al2210l(dev, 0x6, 0xEA130); + adm8211_rf_write_syn_al2210l(dev, 0x7, 0x80000); + adm8211_rf_write_syn_al2210l(dev, 0x8, 0x7850F); + adm8211_rf_write_syn_al2210l(dev, 0x9, 0xF900C); + adm8211_rf_write_syn_al2210l(dev, 0xA, 0x00000); + adm8211_rf_write_syn_al2210l(dev, 0xB, 0x00000); + break; + + case ADM8211_RFMD2948: + default: + break; + } +} + +static int adm8211_hw_init_bbp(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + u32 reg; + + /* write addresses */ + if (priv->bbp_type == ADM8211_TYPE_INTERSIL) { + ADM8211_CSR_WRITE(MMIWA, 0x100E0C0A); + ADM8211_CSR_WRITE(MMIRD0, 0x00007C7E); + ADM8211_CSR_WRITE(MMIRD1, 0x00100000); + } else if (priv->bbp_type == ADM8211_TYPE_RFMD || + priv->bbp_type == ADM8211_TYPE_ADMTEK) { + /* check specific BBP type */ + switch (priv->specific_bbptype) { + case ADM8211_BBP_RFMD3000: + case ADM8211_BBP_RFMD3002: + ADM8211_CSR_WRITE(MMIWA, 0x00009101); + ADM8211_CSR_WRITE(MMIRD0, 0x00000301); + break; + + case ADM8211_BBP_ADM8011: + ADM8211_CSR_WRITE(MMIWA, 0x00008903); + ADM8211_CSR_WRITE(MMIRD0, 0x00001716); + + reg = ADM8211_CSR_READ(BBPCTL); + reg &= ~ADM8211_BBPCTL_TYPE; + reg |= 0x5 << 18; + ADM8211_CSR_WRITE(BBPCTL, reg); + break; + } + + switch (priv->pdev->revision) { + case ADM8211_REV_CA: + if (priv->transceiver_type == ADM8211_RFMD2958 || + priv->transceiver_type == ADM8211_RFMD2958_RF3000_CONTROL_POWER || + priv->transceiver_type == ADM8211_RFMD2948) + ADM8211_CSR_WRITE(SYNCTL, 0x1 << 22); + else if (priv->transceiver_type == ADM8211_MAX2820 || + priv->transceiver_type == ADM8211_AL2210L) + ADM8211_CSR_WRITE(SYNCTL, 0x3 << 22); + break; + + case ADM8211_REV_BA: + reg = ADM8211_CSR_READ(MMIRD1); + reg &= 0x0000FFFF; + reg |= 0x7e100000; + ADM8211_CSR_WRITE(MMIRD1, reg); + break; + + case ADM8211_REV_AB: + case ADM8211_REV_AF: + default: + ADM8211_CSR_WRITE(MMIRD1, 0x7e100000); + break; + } + + /* For RFMD */ + ADM8211_CSR_WRITE(MACTEST, 0x800); + } + + adm8211_hw_init_syn(dev); + + /* Set RF Power control IF pin to PE1+PHYRST# */ + ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_SELRF | + ADM8211_SYNRF_PE1 | ADM8211_SYNRF_PHYRST); + ADM8211_CSR_READ(SYNRF); + msleep(20); + + /* write BBP regs */ + if (priv->bbp_type == ADM8211_TYPE_RFMD) { + /* RF3000 BBP */ + /* another set: + * 11: c8 + * 14: 14 + * 15: 50 (chan 1..13; chan 14: d0) + * 1c: 00 + * 1d: 84 + */ + adm8211_write_bbp(dev, RF3000_CCA_CTRL, 0x80); + /* antenna selection: diversity */ + adm8211_write_bbp(dev, RF3000_DIVERSITY__RSSI, 0x80); + adm8211_write_bbp(dev, RF3000_TX_VAR_GAIN__TX_LEN_EXT, 0x74); + adm8211_write_bbp(dev, RF3000_LOW_GAIN_CALIB, 0x38); + adm8211_write_bbp(dev, RF3000_HIGH_GAIN_CALIB, 0x40); + + if (priv->eeprom->major_version < 2) { + adm8211_write_bbp(dev, 0x1c, 0x00); + adm8211_write_bbp(dev, 0x1d, 0x80); + } else { + if (priv->pdev->revision == ADM8211_REV_BA) + adm8211_write_bbp(dev, 0x1c, priv->eeprom->cr28); + else + adm8211_write_bbp(dev, 0x1c, 0x00); + + adm8211_write_bbp(dev, 0x1d, priv->eeprom->cr29); + } + } else if (priv->bbp_type == ADM8211_TYPE_ADMTEK) { + /* reset baseband */ + adm8211_write_bbp(dev, 0x00, 0xFF); + /* antenna selection: diversity */ + adm8211_write_bbp(dev, 0x07, 0x0A); + + /* TODO: find documentation for this */ + switch (priv->transceiver_type) { + case ADM8211_RFMD2958: + case ADM8211_RFMD2958_RF3000_CONTROL_POWER: + adm8211_write_bbp(dev, 0x00, 0x00); + adm8211_write_bbp(dev, 0x01, 0x00); + adm8211_write_bbp(dev, 0x02, 0x00); + adm8211_write_bbp(dev, 0x03, 0x00); + adm8211_write_bbp(dev, 0x06, 0x0f); + adm8211_write_bbp(dev, 0x09, 0x00); + adm8211_write_bbp(dev, 0x0a, 0x00); + adm8211_write_bbp(dev, 0x0b, 0x00); + adm8211_write_bbp(dev, 0x0c, 0x00); + adm8211_write_bbp(dev, 0x0f, 0xAA); + adm8211_write_bbp(dev, 0x10, 0x8c); + adm8211_write_bbp(dev, 0x11, 0x43); + adm8211_write_bbp(dev, 0x18, 0x40); + adm8211_write_bbp(dev, 0x20, 0x23); + adm8211_write_bbp(dev, 0x21, 0x02); + adm8211_write_bbp(dev, 0x22, 0x28); + adm8211_write_bbp(dev, 0x23, 0x30); + adm8211_write_bbp(dev, 0x24, 0x2d); + adm8211_write_bbp(dev, 0x28, 0x35); + adm8211_write_bbp(dev, 0x2a, 0x8c); + adm8211_write_bbp(dev, 0x2b, 0x81); + adm8211_write_bbp(dev, 0x2c, 0x44); + adm8211_write_bbp(dev, 0x2d, 0x0A); + adm8211_write_bbp(dev, 0x29, 0x40); + adm8211_write_bbp(dev, 0x60, 0x08); + adm8211_write_bbp(dev, 0x64, 0x01); + break; + + case ADM8211_MAX2820: + adm8211_write_bbp(dev, 0x00, 0x00); + adm8211_write_bbp(dev, 0x01, 0x00); + adm8211_write_bbp(dev, 0x02, 0x00); + adm8211_write_bbp(dev, 0x03, 0x00); + adm8211_write_bbp(dev, 0x06, 0x0f); + adm8211_write_bbp(dev, 0x09, 0x05); + adm8211_write_bbp(dev, 0x0a, 0x02); + adm8211_write_bbp(dev, 0x0b, 0x00); + adm8211_write_bbp(dev, 0x0c, 0x0f); + adm8211_write_bbp(dev, 0x0f, 0x55); + adm8211_write_bbp(dev, 0x10, 0x8d); + adm8211_write_bbp(dev, 0x11, 0x43); + adm8211_write_bbp(dev, 0x18, 0x4a); + adm8211_write_bbp(dev, 0x20, 0x20); + adm8211_write_bbp(dev, 0x21, 0x02); + adm8211_write_bbp(dev, 0x22, 0x23); + adm8211_write_bbp(dev, 0x23, 0x30); + adm8211_write_bbp(dev, 0x24, 0x2d); + adm8211_write_bbp(dev, 0x2a, 0x8c); + adm8211_write_bbp(dev, 0x2b, 0x81); + adm8211_write_bbp(dev, 0x2c, 0x44); + adm8211_write_bbp(dev, 0x29, 0x4a); + adm8211_write_bbp(dev, 0x60, 0x2b); + adm8211_write_bbp(dev, 0x64, 0x01); + break; + + case ADM8211_AL2210L: + adm8211_write_bbp(dev, 0x00, 0x00); + adm8211_write_bbp(dev, 0x01, 0x00); + adm8211_write_bbp(dev, 0x02, 0x00); + adm8211_write_bbp(dev, 0x03, 0x00); + adm8211_write_bbp(dev, 0x06, 0x0f); + adm8211_write_bbp(dev, 0x07, 0x05); + adm8211_write_bbp(dev, 0x08, 0x03); + adm8211_write_bbp(dev, 0x09, 0x00); + adm8211_write_bbp(dev, 0x0a, 0x00); + adm8211_write_bbp(dev, 0x0b, 0x00); + adm8211_write_bbp(dev, 0x0c, 0x10); + adm8211_write_bbp(dev, 0x0f, 0x55); + adm8211_write_bbp(dev, 0x10, 0x8d); + adm8211_write_bbp(dev, 0x11, 0x43); + adm8211_write_bbp(dev, 0x18, 0x4a); + adm8211_write_bbp(dev, 0x20, 0x20); + adm8211_write_bbp(dev, 0x21, 0x02); + adm8211_write_bbp(dev, 0x22, 0x23); + adm8211_write_bbp(dev, 0x23, 0x30); + adm8211_write_bbp(dev, 0x24, 0x2d); + adm8211_write_bbp(dev, 0x2a, 0xaa); + adm8211_write_bbp(dev, 0x2b, 0x81); + adm8211_write_bbp(dev, 0x2c, 0x44); + adm8211_write_bbp(dev, 0x29, 0xfa); + adm8211_write_bbp(dev, 0x60, 0x2d); + adm8211_write_bbp(dev, 0x64, 0x01); + break; + + case ADM8211_RFMD2948: + break; + + default: + printk(KERN_DEBUG "%s: unsupported transceiver %d\n", + wiphy_name(dev->wiphy), priv->transceiver_type); + break; + } + } else + printk(KERN_DEBUG "%s: unsupported BBP %d\n", + wiphy_name(dev->wiphy), priv->bbp_type); + + ADM8211_CSR_WRITE(SYNRF, 0); + + /* Set RF CAL control source to MAC control */ + reg = ADM8211_CSR_READ(SYNCTL); + reg |= ADM8211_SYNCTL_SELCAL; + ADM8211_CSR_WRITE(SYNCTL, reg); + + return 0; +} + +/* configures hw beacons/probe responses */ +static int adm8211_set_rate(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + u32 reg; + int i = 0; + u8 rate_buf[12] = {0}; + + /* write supported rates */ + if (priv->pdev->revision != ADM8211_REV_BA) { + rate_buf[0] = ARRAY_SIZE(adm8211_rates); + for (i = 0; i < ARRAY_SIZE(adm8211_rates); i++) + rate_buf[i + 1] = (adm8211_rates[i].rate / 5) | 0x80; + } else { + /* workaround for rev BA specific bug */ + rate_buf[0] = 0x04; + rate_buf[1] = 0x82; + rate_buf[2] = 0x04; + rate_buf[3] = 0x0b; + rate_buf[4] = 0x16; + } + + adm8211_write_sram_bytes(dev, ADM8211_SRAM_SUPP_RATE, rate_buf, + ARRAY_SIZE(adm8211_rates) + 1); + + reg = ADM8211_CSR_READ(PLCPHD) & 0x00FFFFFF; /* keep bits 0-23 */ + reg |= 1 << 15; /* short preamble */ + reg |= 110 << 24; + ADM8211_CSR_WRITE(PLCPHD, reg); + + /* MTMLT = 512 TU (max TX MSDU lifetime) + * BCNTSIG = plcp_signal (beacon, probe resp, and atim TX rate) + * SRTYLIM = 224 (short retry limit, TX header value is default) */ + ADM8211_CSR_WRITE(TXLMT, (512 << 16) | (110 << 8) | (224 << 0)); + + return 0; +} + +static void adm8211_hw_init(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + u32 reg; + u8 cline; + + reg = le32_to_cpu(ADM8211_CSR_READ(PAR)); + reg |= ADM8211_PAR_MRLE | ADM8211_PAR_MRME; + reg &= ~(ADM8211_PAR_BAR | ADM8211_PAR_CAL); + + if (!pci_set_mwi(priv->pdev)) { + reg |= 0x1 << 24; + pci_read_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, &cline); + + switch (cline) { + case 0x8: reg |= (0x1 << 14); + break; + case 0x16: reg |= (0x2 << 14); + break; + case 0x32: reg |= (0x3 << 14); + break; + default: reg |= (0x0 << 14); + break; + } + } + + ADM8211_CSR_WRITE(PAR, reg); + + reg = ADM8211_CSR_READ(CSR_TEST1); + reg &= ~(0xF << 28); + reg |= (1 << 28) | (1 << 31); + ADM8211_CSR_WRITE(CSR_TEST1, reg); + + /* lose link after 4 lost beacons */ + reg = (0x04 << 21) | ADM8211_WCSR_TSFTWE | ADM8211_WCSR_LSOE; + ADM8211_CSR_WRITE(WCSR, reg); + + /* Disable APM, enable receive FIFO threshold, and set drain receive + * threshold to store-and-forward */ + reg = ADM8211_CSR_READ(CMDR); + reg &= ~(ADM8211_CMDR_APM | ADM8211_CMDR_DRT); + reg |= ADM8211_CMDR_RTE | ADM8211_CMDR_DRT_SF; + ADM8211_CSR_WRITE(CMDR, reg); + + adm8211_set_rate(dev); + + /* 4-bit values: + * PWR1UP = 8 * 2 ms + * PWR0PAPE = 8 us or 5 us + * PWR1PAPE = 1 us or 3 us + * PWR0TRSW = 5 us + * PWR1TRSW = 12 us + * PWR0PE2 = 13 us + * PWR1PE2 = 1 us + * PWR0TXPE = 8 or 6 */ + if (priv->pdev->revision < ADM8211_REV_CA) + ADM8211_CSR_WRITE(TOFS2, 0x8815cd18); + else + ADM8211_CSR_WRITE(TOFS2, 0x8535cd16); + + /* Enable store and forward for transmit */ + priv->nar = ADM8211_NAR_SF | ADM8211_NAR_PB; + ADM8211_CSR_WRITE(NAR, priv->nar); + + /* Reset RF */ + ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_RADIO); + ADM8211_CSR_READ(SYNRF); + msleep(10); + ADM8211_CSR_WRITE(SYNRF, 0); + ADM8211_CSR_READ(SYNRF); + msleep(5); + + /* Set CFP Max Duration to 0x10 TU */ + reg = ADM8211_CSR_READ(CFPP); + reg &= ~(0xffff << 8); + reg |= 0x0010 << 8; + ADM8211_CSR_WRITE(CFPP, reg); + + /* USCNT = 0x16 (number of system clocks, 22 MHz, in 1us + * TUCNT = 0x3ff - Tu counter 1024 us */ + ADM8211_CSR_WRITE(TOFS0, (0x16 << 24) | 0x3ff); + + /* SLOT=20 us, SIFS=110 cycles of 22 MHz (5 us), + * DIFS=50 us, EIFS=100 us */ + if (priv->pdev->revision < ADM8211_REV_CA) + ADM8211_CSR_WRITE(IFST, (20 << 23) | (110 << 15) | + (50 << 9) | 100); + else + ADM8211_CSR_WRITE(IFST, (20 << 23) | (24 << 15) | + (50 << 9) | 100); + + /* PCNT = 1 (MAC idle time awake/sleep, unit S) + * RMRD = 2346 * 8 + 1 us (max RX duration) */ + ADM8211_CSR_WRITE(RMD, (1 << 16) | 18769); + + /* MART=65535 us, MIRT=256 us, TSFTOFST=0 us */ + ADM8211_CSR_WRITE(RSPT, 0xffffff00); + + /* Initialize BBP (and SYN) */ + adm8211_hw_init_bbp(dev); + + /* make sure interrupts are off */ + ADM8211_CSR_WRITE(IER, 0); + + /* ACK interrupts */ + ADM8211_CSR_WRITE(STSR, ADM8211_CSR_READ(STSR)); + + /* Setup WEP (turns it off for now) */ + reg = ADM8211_CSR_READ(MACTEST); + reg &= ~(7 << 20); + ADM8211_CSR_WRITE(MACTEST, reg); + + reg = ADM8211_CSR_READ(WEPCTL); + reg &= ~ADM8211_WEPCTL_WEPENABLE; + reg |= ADM8211_WEPCTL_WEPRXBYP; + ADM8211_CSR_WRITE(WEPCTL, reg); + + /* Clear the missed-packet counter. */ + ADM8211_CSR_READ(LPC); +} + +static int adm8211_hw_reset(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + u32 reg, tmp; + int timeout = 100; + + /* Power-on issue */ + /* TODO: check if this is necessary */ + ADM8211_CSR_WRITE(FRCTL, 0); + + /* Reset the chip */ + tmp = ADM8211_CSR_READ(PAR); + ADM8211_CSR_WRITE(PAR, ADM8211_PAR_SWR); + + while ((ADM8211_CSR_READ(PAR) & ADM8211_PAR_SWR) && timeout--) + msleep(50); + + if (timeout <= 0) + return -ETIMEDOUT; + + ADM8211_CSR_WRITE(PAR, tmp); + + if (priv->pdev->revision == ADM8211_REV_BA && + (priv->transceiver_type == ADM8211_RFMD2958_RF3000_CONTROL_POWER || + priv->transceiver_type == ADM8211_RFMD2958)) { + reg = ADM8211_CSR_READ(CSR_TEST1); + reg |= (1 << 4) | (1 << 5); + ADM8211_CSR_WRITE(CSR_TEST1, reg); + } else if (priv->pdev->revision == ADM8211_REV_CA) { + reg = ADM8211_CSR_READ(CSR_TEST1); + reg &= ~((1 << 4) | (1 << 5)); + ADM8211_CSR_WRITE(CSR_TEST1, reg); + } + + ADM8211_CSR_WRITE(FRCTL, 0); + + reg = ADM8211_CSR_READ(CSR_TEST0); + reg |= ADM8211_CSR_TEST0_EPRLD; /* EEPROM Recall */ + ADM8211_CSR_WRITE(CSR_TEST0, reg); + + adm8211_clear_sram(dev); + + return 0; +} + +static u64 adm8211_get_tsft(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + u32 tsftl; + u64 tsft; + + tsftl = ADM8211_CSR_READ(TSFTL); + tsft = ADM8211_CSR_READ(TSFTH); + tsft <<= 32; + tsft |= tsftl; + + return tsft; +} + +static void adm8211_set_interval(struct ieee80211_hw *dev, + unsigned short bi, unsigned short li) +{ + struct adm8211_priv *priv = dev->priv; + u32 reg; + + /* BP (beacon interval) = data->beacon_interval + * LI (listen interval) = data->listen_interval (in beacon intervals) */ + reg = (bi << 16) | li; + ADM8211_CSR_WRITE(BPLI, reg); +} + +static void adm8211_set_bssid(struct ieee80211_hw *dev, const u8 *bssid) +{ + struct adm8211_priv *priv = dev->priv; + u32 reg; + + ADM8211_CSR_WRITE(BSSID0, le32_to_cpu(*(__le32 *)bssid)); + reg = ADM8211_CSR_READ(ABDA1); + reg &= 0x0000ffff; + reg |= (bssid[4] << 16) | (bssid[5] << 24); + ADM8211_CSR_WRITE(ABDA1, reg); +} + +static int adm8211_set_ssid(struct ieee80211_hw *dev, u8 *ssid, size_t ssid_len) +{ + struct adm8211_priv *priv = dev->priv; + u8 buf[36]; + + if (ssid_len > 32) + return -EINVAL; + + memset(buf, 0, sizeof(buf)); + buf[0] = ssid_len; + memcpy(buf + 1, ssid, ssid_len); + adm8211_write_sram_bytes(dev, ADM8211_SRAM_SSID, buf, 33); + /* TODO: configure beacon for adhoc? */ + return 0; +} + +static int adm8211_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf) +{ + struct adm8211_priv *priv = dev->priv; + + if (conf->channel != priv->channel) { + priv->channel = conf->channel; + adm8211_rf_set_channel(dev, priv->channel); + } + + return 0; +} + +static int adm8211_config_interface(struct ieee80211_hw *dev, int if_id, + struct ieee80211_if_conf *conf) +{ + struct adm8211_priv *priv = dev->priv; + + if (memcmp(conf->bssid, priv->bssid, ETH_ALEN)) { + adm8211_set_bssid(dev, conf->bssid); + memcpy(priv->bssid, conf->bssid, ETH_ALEN); + } + + if (conf->ssid_len != priv->ssid_len || + memcmp(conf->ssid, priv->ssid, conf->ssid_len)) { + adm8211_set_ssid(dev, conf->ssid, conf->ssid_len); + priv->ssid_len = conf->ssid_len; + memcpy(priv->ssid, conf->ssid, conf->ssid_len); + } + + return 0; +} + +static void adm8211_configure_filter(struct ieee80211_hw *dev, + unsigned int changed_flags, + unsigned int *total_flags, + int mc_count, struct dev_mc_list *mclist) +{ + static const u8 bcast[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; + struct adm8211_priv *priv = dev->priv; + unsigned int bit_nr, new_flags; + u32 mc_filter[2]; + int i; + + new_flags = 0; + + if (*total_flags & FIF_PROMISC_IN_BSS) { + new_flags |= FIF_PROMISC_IN_BSS; + priv->nar |= ADM8211_NAR_PR; + priv->nar &= ~ADM8211_NAR_MM; + mc_filter[1] = mc_filter[0] = ~0; + } else if ((*total_flags & FIF_ALLMULTI) || (mc_count > 32)) { + new_flags |= FIF_ALLMULTI; + priv->nar &= ~ADM8211_NAR_PR; + priv->nar |= ADM8211_NAR_MM; + mc_filter[1] = mc_filter[0] = ~0; + } else { + priv->nar &= ~(ADM8211_NAR_MM | ADM8211_NAR_PR); + mc_filter[1] = mc_filter[0] = 0; + for (i = 0; i < mc_count; i++) { + if (!mclist) + break; + bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; + + bit_nr &= 0x3F; + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + mclist = mclist->next; + } + } + + ADM8211_IDLE_RX(); + + ADM8211_CSR_WRITE(MAR0, mc_filter[0]); + ADM8211_CSR_WRITE(MAR1, mc_filter[1]); + ADM8211_CSR_READ(NAR); + + if (priv->nar & ADM8211_NAR_PR) + dev->flags |= IEEE80211_HW_RX_INCLUDES_FCS; + else + dev->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS; + + if (*total_flags & FIF_BCN_PRBRESP_PROMISC) + adm8211_set_bssid(dev, bcast); + else + adm8211_set_bssid(dev, priv->bssid); + + ADM8211_RESTORE(); + + *total_flags = new_flags; +} + +static int adm8211_add_interface(struct ieee80211_hw *dev, + struct ieee80211_if_init_conf *conf) +{ + struct adm8211_priv *priv = dev->priv; + if (priv->mode != IEEE80211_IF_TYPE_MNTR) + return -EOPNOTSUPP; + + switch (conf->type) { + case IEEE80211_IF_TYPE_STA: + priv->mode = conf->type; + break; + default: + return -EOPNOTSUPP; + } + + ADM8211_IDLE(); + + ADM8211_CSR_WRITE(PAR0, le32_to_cpu(*(__le32 *)conf->mac_addr)); + ADM8211_CSR_WRITE(PAR1, le16_to_cpu(*(__le16 *)(conf->mac_addr + 4))); + + adm8211_update_mode(dev); + + ADM8211_RESTORE(); + + return 0; +} + +static void adm8211_remove_interface(struct ieee80211_hw *dev, + struct ieee80211_if_init_conf *conf) +{ + struct adm8211_priv *priv = dev->priv; + priv->mode = IEEE80211_IF_TYPE_MNTR; +} + +static int adm8211_init_rings(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + struct adm8211_desc *desc = NULL; + struct adm8211_rx_ring_info *rx_info; + struct adm8211_tx_ring_info *tx_info; + unsigned int i; + + for (i = 0; i < priv->rx_ring_size; i++) { + desc = &priv->rx_ring[i]; + desc->status = 0; + desc->length = cpu_to_le32(RX_PKT_SIZE); + priv->rx_buffers[i].skb = NULL; + } + /* Mark the end of RX ring; hw returns to base address after this + * descriptor */ + desc->length |= cpu_to_le32(RDES1_CONTROL_RER); + + for (i = 0; i < priv->rx_ring_size; i++) { + desc = &priv->rx_ring[i]; + rx_info = &priv->rx_buffers[i]; + + rx_info->skb = dev_alloc_skb(RX_PKT_SIZE); + if (rx_info->skb == NULL) + break; + rx_info->mapping = pci_map_single(priv->pdev, + skb_tail_pointer(rx_info->skb), + RX_PKT_SIZE, + PCI_DMA_FROMDEVICE); + desc->buffer1 = cpu_to_le32(rx_info->mapping); + desc->status = cpu_to_le32(RDES0_STATUS_OWN | RDES0_STATUS_SQL); + } + + /* Setup TX ring. TX buffers descriptors will be filled in as needed */ + for (i = 0; i < priv->tx_ring_size; i++) { + desc = &priv->tx_ring[i]; + tx_info = &priv->tx_buffers[i]; + + tx_info->skb = NULL; + tx_info->mapping = 0; + desc->status = 0; + } + desc->length = cpu_to_le32(TDES1_CONTROL_TER); + + priv->cur_rx = priv->cur_tx = priv->dirty_tx = 0; + ADM8211_CSR_WRITE(RDB, priv->rx_ring_dma); + ADM8211_CSR_WRITE(TDBD, priv->tx_ring_dma); + + return 0; +} + +static void adm8211_free_rings(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + unsigned int i; + + for (i = 0; i < priv->rx_ring_size; i++) { + if (!priv->rx_buffers[i].skb) + continue; + + pci_unmap_single( + priv->pdev, + priv->rx_buffers[i].mapping, + RX_PKT_SIZE, PCI_DMA_FROMDEVICE); + + dev_kfree_skb(priv->rx_buffers[i].skb); + } + + for (i = 0; i < priv->tx_ring_size; i++) { + if (!priv->tx_buffers[i].skb) + continue; + + pci_unmap_single(priv->pdev, + priv->tx_buffers[i].mapping, + priv->tx_buffers[i].skb->len, + PCI_DMA_TODEVICE); + + dev_kfree_skb(priv->tx_buffers[i].skb); + } +} + +static int adm8211_start(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + int retval; + + /* Power up MAC and RF chips */ + retval = adm8211_hw_reset(dev); + if (retval) { + printk(KERN_ERR "%s: hardware reset failed\n", + wiphy_name(dev->wiphy)); + goto fail; + } + + retval = adm8211_init_rings(dev); + if (retval) { + printk(KERN_ERR "%s: failed to initialize rings\n", + wiphy_name(dev->wiphy)); + goto fail; + } + + /* Init hardware */ + adm8211_hw_init(dev); + adm8211_rf_set_channel(dev, priv->channel); + + retval = request_irq(priv->pdev->irq, &adm8211_interrupt, + IRQF_SHARED, "adm8211", dev); + if (retval) { + printk(KERN_ERR "%s: failed to register IRQ handler\n", + wiphy_name(dev->wiphy)); + goto fail; + } + + ADM8211_CSR_WRITE(IER, ADM8211_IER_NIE | ADM8211_IER_AIE | + ADM8211_IER_RCIE | ADM8211_IER_TCIE | + ADM8211_IER_TDUIE | ADM8211_IER_GPTIE); + priv->mode = IEEE80211_IF_TYPE_MNTR; + adm8211_update_mode(dev); + ADM8211_CSR_WRITE(RDR, 0); + + adm8211_set_interval(dev, 100, 10); + return 0; + +fail: + return retval; +} + +static void adm8211_stop(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + + priv->mode = IEEE80211_IF_TYPE_INVALID; + priv->nar = 0; + ADM8211_CSR_WRITE(NAR, 0); + ADM8211_CSR_WRITE(IER, 0); + ADM8211_CSR_READ(NAR); + + free_irq(priv->pdev->irq, dev); + + adm8211_free_rings(dev); +} + +static void adm8211_calc_durations(int *dur, int *plcp, size_t payload_len, int len, + int plcp_signal, int short_preamble) +{ + /* Alternative calculation from NetBSD: */ + +/* IEEE 802.11b durations for DSSS PHY in microseconds */ +#define IEEE80211_DUR_DS_LONG_PREAMBLE 144 +#define IEEE80211_DUR_DS_SHORT_PREAMBLE 72 +#define IEEE80211_DUR_DS_FAST_PLCPHDR 24 +#define IEEE80211_DUR_DS_SLOW_PLCPHDR 48 +#define IEEE80211_DUR_DS_SLOW_ACK 112 +#define IEEE80211_DUR_DS_FAST_ACK 56 +#define IEEE80211_DUR_DS_SLOW_CTS 112 +#define IEEE80211_DUR_DS_FAST_CTS 56 +#define IEEE80211_DUR_DS_SLOT 20 +#define IEEE80211_DUR_DS_SIFS 10 + + int remainder; + + *dur = (80 * (24 + payload_len) + plcp_signal - 1) + / plcp_signal; + + if (plcp_signal <= PLCP_SIGNAL_2M) + /* 1-2Mbps WLAN: send ACK/CTS at 1Mbps */ + *dur += 3 * (IEEE80211_DUR_DS_SIFS + + IEEE80211_DUR_DS_SHORT_PREAMBLE + + IEEE80211_DUR_DS_FAST_PLCPHDR) + + IEEE80211_DUR_DS_SLOW_CTS + IEEE80211_DUR_DS_SLOW_ACK; + else + /* 5-11Mbps WLAN: send ACK/CTS at 2Mbps */ + *dur += 3 * (IEEE80211_DUR_DS_SIFS + + IEEE80211_DUR_DS_SHORT_PREAMBLE + + IEEE80211_DUR_DS_FAST_PLCPHDR) + + IEEE80211_DUR_DS_FAST_CTS + IEEE80211_DUR_DS_FAST_ACK; + + /* lengthen duration if long preamble */ + if (!short_preamble) + *dur += 3 * (IEEE80211_DUR_DS_LONG_PREAMBLE - + IEEE80211_DUR_DS_SHORT_PREAMBLE) + + 3 * (IEEE80211_DUR_DS_SLOW_PLCPHDR - + IEEE80211_DUR_DS_FAST_PLCPHDR); + + + *plcp = (80 * len) / plcp_signal; + remainder = (80 * len) % plcp_signal; + if (plcp_signal == PLCP_SIGNAL_11M && + remainder <= 30 && remainder > 0) + *plcp = (*plcp | 0x8000) + 1; + else if (remainder) + (*plcp)++; +} + +/* Transmit skb w/adm8211_tx_hdr (802.11 header created by hardware) */ +static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb, + u16 plcp_signal, + struct ieee80211_tx_control *control, + size_t hdrlen) +{ + struct adm8211_priv *priv = dev->priv; + unsigned long flags; + dma_addr_t mapping; + unsigned int entry; + u32 flag; + + mapping = pci_map_single(priv->pdev, skb->data, skb->len, + PCI_DMA_TODEVICE); + + spin_lock_irqsave(&priv->lock, flags); + + if (priv->cur_tx - priv->dirty_tx == priv->tx_ring_size / 2) + flag = TDES1_CONTROL_IC | TDES1_CONTROL_LS | TDES1_CONTROL_FS; + else + flag = TDES1_CONTROL_LS | TDES1_CONTROL_FS; + + if (priv->cur_tx - priv->dirty_tx == priv->tx_ring_size - 2) + ieee80211_stop_queue(dev, 0); + + entry = priv->cur_tx % priv->tx_ring_size; + + priv->tx_buffers[entry].skb = skb; + priv->tx_buffers[entry].mapping = mapping; + memcpy(&priv->tx_buffers[entry].tx_control, control, sizeof(*control)); + priv->tx_buffers[entry].hdrlen = hdrlen; + priv->tx_ring[entry].buffer1 = cpu_to_le32(mapping); + + if (entry == priv->tx_ring_size - 1) + flag |= TDES1_CONTROL_TER; + priv->tx_ring[entry].length = cpu_to_le32(flag | skb->len); + + /* Set TX rate (SIGNAL field in PLCP PPDU format) */ + flag = TDES0_CONTROL_OWN | (plcp_signal << 20) | 8 /* ? */; + priv->tx_ring[entry].status = cpu_to_le32(flag); + + priv->cur_tx++; + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Trigger transmit poll */ + ADM8211_CSR_WRITE(TDR, 0); +} + +/* Put adm8211_tx_hdr on skb and transmit */ +static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb, + struct ieee80211_tx_control *control) +{ + struct adm8211_tx_hdr *txhdr; + u16 fc; + size_t payload_len, hdrlen; + int plcp, dur, len, plcp_signal, short_preamble; + struct ieee80211_hdr *hdr; + + if (control->tx_rate < 0) { + short_preamble = 1; + plcp_signal = -control->tx_rate; + } else { + short_preamble = 0; + plcp_signal = control->tx_rate; + } + + hdr = (struct ieee80211_hdr *)skb->data; + fc = le16_to_cpu(hdr->frame_control) & ~IEEE80211_FCTL_PROTECTED; + hdrlen = ieee80211_get_hdrlen(fc); + memcpy(skb->cb, skb->data, hdrlen); + hdr = (struct ieee80211_hdr *)skb->cb; + skb_pull(skb, hdrlen); + payload_len = skb->len; + + txhdr = (struct adm8211_tx_hdr *) skb_push(skb, sizeof(*txhdr)); + memset(txhdr, 0, sizeof(*txhdr)); + memcpy(txhdr->da, ieee80211_get_DA(hdr), ETH_ALEN); + txhdr->signal = plcp_signal; + txhdr->frame_body_size = cpu_to_le16(payload_len); + txhdr->frame_control = hdr->frame_control; + + len = hdrlen + payload_len + FCS_LEN; + if (fc & IEEE80211_FCTL_PROTECTED) + len += 8; + + txhdr->frag = cpu_to_le16(0x0FFF); + adm8211_calc_durations(&dur, &plcp, payload_len, + len, plcp_signal, short_preamble); + txhdr->plcp_frag_head_len = cpu_to_le16(plcp); + txhdr->plcp_frag_tail_len = cpu_to_le16(plcp); + txhdr->dur_frag_head = cpu_to_le16(dur); + txhdr->dur_frag_tail = cpu_to_le16(dur); + + txhdr->header_control = cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_EXTEND_HEADER); + + if (short_preamble) + txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_SHORT_PREAMBLE); + + if (control->flags & IEEE80211_TXCTL_USE_RTS_CTS) + txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_RTS); + + if (fc & IEEE80211_FCTL_PROTECTED) + txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_WEP_ENGINE); + + txhdr->retry_limit = control->retry_limit; + + adm8211_tx_raw(dev, skb, plcp_signal, control, hdrlen); + + return NETDEV_TX_OK; +} + +static int adm8211_alloc_rings(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + unsigned int ring_size; + + priv->rx_buffers = kmalloc(sizeof(*priv->rx_buffers) * priv->rx_ring_size + + sizeof(*priv->tx_buffers) * priv->tx_ring_size, GFP_KERNEL); + if (!priv->rx_buffers) + return -ENOMEM; + + priv->tx_buffers = (void *)priv->rx_buffers + + sizeof(*priv->rx_buffers) * priv->rx_ring_size; + + /* Allocate TX/RX descriptors */ + ring_size = sizeof(struct adm8211_desc) * priv->rx_ring_size + + sizeof(struct adm8211_desc) * priv->tx_ring_size; + priv->rx_ring = pci_alloc_consistent(priv->pdev, ring_size, + &priv->rx_ring_dma); + + if (!priv->rx_ring) { + kfree(priv->rx_buffers); + priv->rx_buffers = NULL; + priv->tx_buffers = NULL; + return -ENOMEM; + } + + priv->tx_ring = (struct adm8211_desc *)(priv->rx_ring + + priv->rx_ring_size); + priv->tx_ring_dma = priv->rx_ring_dma + + sizeof(struct adm8211_desc) * priv->rx_ring_size; + + return 0; +} + +static const struct ieee80211_ops adm8211_ops = { + .tx = adm8211_tx, + .start = adm8211_start, + .stop = adm8211_stop, + .add_interface = adm8211_add_interface, + .remove_interface = adm8211_remove_interface, + .config = adm8211_config, + .config_interface = adm8211_config_interface, + .configure_filter = adm8211_configure_filter, + .get_stats = adm8211_get_stats, + .get_tx_stats = adm8211_get_tx_stats, + .get_tsf = adm8211_get_tsft +}; + +static int __devinit adm8211_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct ieee80211_hw *dev; + struct adm8211_priv *priv; + unsigned long mem_addr, mem_len; + unsigned int io_addr, io_len; + int err; + u32 reg; + u8 perm_addr[ETH_ALEN]; + DECLARE_MAC_BUF(mac); + + err = pci_enable_device(pdev); + if (err) { + printk(KERN_ERR "%s (adm8211): Cannot enable new PCI device\n", + pci_name(pdev)); + return err; + } + + io_addr = pci_resource_start(pdev, 0); + io_len = pci_resource_len(pdev, 0); + mem_addr = pci_resource_start(pdev, 1); + mem_len = pci_resource_len(pdev, 1); + if (io_len < 256 || mem_len < 1024) { + printk(KERN_ERR "%s (adm8211): Too short PCI resources\n", + pci_name(pdev)); + goto err_disable_pdev; + } + + + /* check signature */ + pci_read_config_dword(pdev, 0x80 /* CR32 */, ®); + if (reg != ADM8211_SIG1 && reg != ADM8211_SIG2) { + printk(KERN_ERR "%s (adm8211): Invalid signature (0x%x)\n", + pci_name(pdev), reg); + goto err_disable_pdev; + } + + err = pci_request_regions(pdev, "adm8211"); + if (err) { + printk(KERN_ERR "%s (adm8211): Cannot obtain PCI resources\n", + pci_name(pdev)); + return err; /* someone else grabbed it? don't disable it */ + } + + if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) || + pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) { + printk(KERN_ERR "%s (adm8211): No suitable DMA available\n", + pci_name(pdev)); + goto err_free_reg; + } + + pci_set_master(pdev); + + dev = ieee80211_alloc_hw(sizeof(*priv), &adm8211_ops); + if (!dev) { + printk(KERN_ERR "%s (adm8211): ieee80211 alloc failed\n", + pci_name(pdev)); + err = -ENOMEM; + goto err_free_reg; + } + priv = dev->priv; + priv->pdev = pdev; + + spin_lock_init(&priv->lock); + + SET_IEEE80211_DEV(dev, &pdev->dev); + + pci_set_drvdata(pdev, dev); + + priv->map = pci_iomap(pdev, 1, mem_len); + if (!priv->map) + priv->map = pci_iomap(pdev, 0, io_len); + + if (!priv->map) { + printk(KERN_ERR "%s (adm8211): Cannot map device memory\n", + pci_name(pdev)); + goto err_free_dev; + } + + priv->rx_ring_size = rx_ring_size; + priv->tx_ring_size = tx_ring_size; + + if (adm8211_alloc_rings(dev)) { + printk(KERN_ERR "%s (adm8211): Cannot allocate TX/RX ring\n", + pci_name(pdev)); + goto err_iounmap; + } + + *(u32 *)perm_addr = le32_to_cpu((__force __le32)ADM8211_CSR_READ(PAR0)); + *(u16 *)&perm_addr[4] = + le16_to_cpu((__force __le16)ADM8211_CSR_READ(PAR1) & 0xFFFF); + + if (!is_valid_ether_addr(perm_addr)) { + printk(KERN_WARNING "%s (adm8211): Invalid hwaddr in EEPROM!\n", + pci_name(pdev)); + random_ether_addr(perm_addr); + } + SET_IEEE80211_PERM_ADDR(dev, perm_addr); + + dev->extra_tx_headroom = sizeof(struct adm8211_tx_hdr); + dev->flags = IEEE80211_HW_DEFAULT_REG_DOMAIN_CONFIGURED; + /* IEEE80211_HW_RX_INCLUDES_FCS in promisc mode */ + + dev->channel_change_time = 1000; + dev->max_rssi = 100; /* FIXME: find better value */ + + priv->modes[0].mode = MODE_IEEE80211B; + /* channel info filled in by adm8211_read_eeprom */ + memcpy(priv->rates, adm8211_rates, sizeof(adm8211_rates)); + priv->modes[0].num_rates = ARRAY_SIZE(adm8211_rates); + priv->modes[0].rates = priv->rates; + + dev->queues = 1; /* ADM8211C supports more, maybe ADM8211B too */ + + priv->retry_limit = 3; + priv->ant_power = 0x40; + priv->tx_power = 0x40; + priv->lpf_cutoff = 0xFF; + priv->lnags_threshold = 0xFF; + priv->mode = IEEE80211_IF_TYPE_INVALID; + + /* Power-on issue. EEPROM won't read correctly without */ + if (pdev->revision >= ADM8211_REV_BA) { + ADM8211_CSR_WRITE(FRCTL, 0); + ADM8211_CSR_READ(FRCTL); + ADM8211_CSR_WRITE(FRCTL, 1); + ADM8211_CSR_READ(FRCTL); + msleep(100); + } + + err = adm8211_read_eeprom(dev); + if (err) { + printk(KERN_ERR "%s (adm8211): Can't alloc eeprom buffer\n", + pci_name(pdev)); + goto err_free_desc; + } + + priv->channel = priv->modes[0].channels[0].chan; + + err = ieee80211_register_hwmode(dev, &priv->modes[0]); + if (err) { + printk(KERN_ERR "%s (adm8211): Can't register hwmode\n", + pci_name(pdev)); + goto err_free_desc; + } + + err = ieee80211_register_hw(dev); + if (err) { + printk(KERN_ERR "%s (adm8211): Cannot register device\n", + pci_name(pdev)); + goto err_free_desc; + } + + printk(KERN_INFO "%s: hwaddr %s, Rev 0x%02x\n", + wiphy_name(dev->wiphy), print_mac(mac, dev->wiphy->perm_addr), + pdev->revision); + + return 0; + + err_free_desc: + pci_free_consistent(pdev, + sizeof(struct adm8211_desc) * priv->rx_ring_size + + sizeof(struct adm8211_desc) * priv->tx_ring_size, + priv->rx_ring, priv->rx_ring_dma); + kfree(priv->rx_buffers); + + err_iounmap: + pci_iounmap(pdev, priv->map); + + err_free_dev: + pci_set_drvdata(pdev, NULL); + ieee80211_free_hw(dev); + + err_free_reg: + pci_release_regions(pdev); + + err_disable_pdev: + pci_disable_device(pdev); + return err; +} + + +static void __devexit adm8211_remove(struct pci_dev *pdev) +{ + struct ieee80211_hw *dev = pci_get_drvdata(pdev); + struct adm8211_priv *priv; + + if (!dev) + return; + + ieee80211_unregister_hw(dev); + + priv = dev->priv; + + pci_free_consistent(pdev, + sizeof(struct adm8211_desc) * priv->rx_ring_size + + sizeof(struct adm8211_desc) * priv->tx_ring_size, + priv->rx_ring, priv->rx_ring_dma); + + kfree(priv->rx_buffers); + kfree(priv->eeprom); + pci_iounmap(pdev, priv->map); + pci_release_regions(pdev); + pci_disable_device(pdev); + ieee80211_free_hw(dev); +} + + +#ifdef CONFIG_PM +static int adm8211_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct ieee80211_hw *dev = pci_get_drvdata(pdev); + struct adm8211_priv *priv = dev->priv; + + if (priv->mode != IEEE80211_IF_TYPE_INVALID) { + ieee80211_stop_queues(dev); + adm8211_stop(dev); + } + + pci_save_state(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + return 0; +} + +static int adm8211_resume(struct pci_dev *pdev) +{ + struct ieee80211_hw *dev = pci_get_drvdata(pdev); + struct adm8211_priv *priv = dev->priv; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + if (priv->mode != IEEE80211_IF_TYPE_INVALID) { + adm8211_start(dev); + ieee80211_start_queues(dev); + } + + return 0; +} +#endif /* CONFIG_PM */ + + +MODULE_DEVICE_TABLE(pci, adm8211_pci_id_table); + +/* TODO: implement enable_wake */ +static struct pci_driver adm8211_driver = { + .name = "adm8211", + .id_table = adm8211_pci_id_table, + .probe = adm8211_probe, + .remove = __devexit_p(adm8211_remove), +#ifdef CONFIG_PM + .suspend = adm8211_suspend, + .resume = adm8211_resume, +#endif /* CONFIG_PM */ +}; + + + +static int __init adm8211_init(void) +{ + return pci_register_driver(&adm8211_driver); +} + + +static void __exit adm8211_exit(void) +{ + pci_unregister_driver(&adm8211_driver); +} + + +module_init(adm8211_init); +module_exit(adm8211_exit); diff --git a/drivers/net/wireless/adm8211.h b/drivers/net/wireless/adm8211.h new file mode 100644 index 000000000000..ef326fed42e4 --- /dev/null +++ b/drivers/net/wireless/adm8211.h @@ -0,0 +1,656 @@ +#ifndef ADM8211_H +#define ADM8211_H + +/* ADM8211 Registers */ + +/* CR32 (SIG) signature */ +#define ADM8211_SIG1 0x82011317 /* ADM8211A */ +#define ADM8211_SIG2 0x82111317 /* ADM8211B/ADM8211C */ + +#define ADM8211_CSR_READ(r) ioread32(&priv->map->r) +#define ADM8211_CSR_WRITE(r, val) iowrite32((val), &priv->map->r) + +/* CSR (Host Control and Status Registers) */ +struct adm8211_csr { + __le32 PAR; /* 0x00 CSR0 */ + __le32 FRCTL; /* 0x04 CSR0A */ + __le32 TDR; /* 0x08 CSR1 */ + __le32 WTDP; /* 0x0C CSR1A */ + __le32 RDR; /* 0x10 CSR2 */ + __le32 WRDP; /* 0x14 CSR2A */ + __le32 RDB; /* 0x18 CSR3 */ + __le32 TDBH; /* 0x1C CSR3A */ + __le32 TDBD; /* 0x20 CSR4 */ + __le32 TDBP; /* 0x24 CSR4A */ + __le32 STSR; /* 0x28 CSR5 */ + __le32 TDBB; /* 0x2C CSR5A */ + __le32 NAR; /* 0x30 CSR6 */ + __le32 CSR6A; /* reserved */ + __le32 IER; /* 0x38 CSR7 */ + __le32 TKIPSCEP; /* 0x3C CSR7A */ + __le32 LPC; /* 0x40 CSR8 */ + __le32 CSR_TEST1; /* 0x44 CSR8A */ + __le32 SPR; /* 0x48 CSR9 */ + __le32 CSR_TEST0; /* 0x4C CSR9A */ + __le32 WCSR; /* 0x50 CSR10 */ + __le32 WPDR; /* 0x54 CSR10A */ + __le32 GPTMR; /* 0x58 CSR11 */ + __le32 GPIO; /* 0x5C CSR11A */ + __le32 BBPCTL; /* 0x60 CSR12 */ + __le32 SYNCTL; /* 0x64 CSR12A */ + __le32 PLCPHD; /* 0x68 CSR13 */ + __le32 MMIWA; /* 0x6C CSR13A */ + __le32 MMIRD0; /* 0x70 CSR14 */ + __le32 MMIRD1; /* 0x74 CSR14A */ + __le32 TXBR; /* 0x78 CSR15 */ + __le32 SYNDATA; /* 0x7C CSR15A */ + __le32 ALCS; /* 0x80 CSR16 */ + __le32 TOFS2; /* 0x84 CSR17 */ + __le32 CMDR; /* 0x88 CSR18 */ + __le32 PCIC; /* 0x8C CSR19 */ + __le32 PMCSR; /* 0x90 CSR20 */ + __le32 PAR0; /* 0x94 CSR21 */ + __le32 PAR1; /* 0x98 CSR22 */ + __le32 MAR0; /* 0x9C CSR23 */ + __le32 MAR1; /* 0xA0 CSR24 */ + __le32 ATIMDA0; /* 0xA4 CSR25 */ + __le32 ABDA1; /* 0xA8 CSR26 */ + __le32 BSSID0; /* 0xAC CSR27 */ + __le32 TXLMT; /* 0xB0 CSR28 */ + __le32 MIBCNT; /* 0xB4 CSR29 */ + __le32 BCNT; /* 0xB8 CSR30 */ + __le32 TSFTH; /* 0xBC CSR31 */ + __le32 TSC; /* 0xC0 CSR32 */ + __le32 SYNRF; /* 0xC4 CSR33 */ + __le32 BPLI; /* 0xC8 CSR34 */ + __le32 CAP0; /* 0xCC CSR35 */ + __le32 CAP1; /* 0xD0 CSR36 */ + __le32 RMD; /* 0xD4 CSR37 */ + __le32 CFPP; /* 0xD8 CSR38 */ + __le32 TOFS0; /* 0xDC CSR39 */ + __le32 TOFS1; /* 0xE0 CSR40 */ + __le32 IFST; /* 0xE4 CSR41 */ + __le32 RSPT; /* 0xE8 CSR42 */ + __le32 TSFTL; /* 0xEC CSR43 */ + __le32 WEPCTL; /* 0xF0 CSR44 */ + __le32 WESK; /* 0xF4 CSR45 */ + __le32 WEPCNT; /* 0xF8 CSR46 */ + __le32 MACTEST; /* 0xFC CSR47 */ + __le32 FER; /* 0x100 */ + __le32 FEMR; /* 0x104 */ + __le32 FPSR; /* 0x108 */ + __le32 FFER; /* 0x10C */ +} __attribute__ ((packed)); + +/* CSR0 - PAR (PCI Address Register) */ +#define ADM8211_PAR_MWIE (1 << 24) +#define ADM8211_PAR_MRLE (1 << 23) +#define ADM8211_PAR_MRME (1 << 21) +#define ADM8211_PAR_RAP ((1 << 18) | (1 << 17)) +#define ADM8211_PAR_CAL ((1 << 15) | (1 << 14)) +#define ADM8211_PAR_PBL 0x00003f00 +#define ADM8211_PAR_BLE (1 << 7) +#define ADM8211_PAR_DSL 0x0000007c +#define ADM8211_PAR_BAR (1 << 1) +#define ADM8211_PAR_SWR (1 << 0) + +/* CSR1 - FRCTL (Frame Control Register) */ +#define ADM8211_FRCTL_PWRMGT (1 << 31) +#define ADM8211_FRCTL_MAXPSP (1 << 27) +#define ADM8211_FRCTL_DRVPRSP (1 << 26) +#define ADM8211_FRCTL_DRVBCON (1 << 25) +#define ADM8211_FRCTL_AID 0x0000ffff +#define ADM8211_FRCTL_AID_ON 0x0000c000 + +/* CSR5 - STSR (Status Register) */ +#define ADM8211_STSR_PCF (1 << 31) +#define ADM8211_STSR_BCNTC (1 << 30) +#define ADM8211_STSR_GPINT (1 << 29) +#define ADM8211_STSR_LinkOff (1 << 28) +#define ADM8211_STSR_ATIMTC (1 << 27) +#define ADM8211_STSR_TSFTF (1 << 26) +#define ADM8211_STSR_TSCZ (1 << 25) +#define ADM8211_STSR_LinkOn (1 << 24) +#define ADM8211_STSR_SQL (1 << 23) +#define ADM8211_STSR_WEPTD (1 << 22) +#define ADM8211_STSR_ATIME (1 << 21) +#define ADM8211_STSR_TBTT (1 << 20) +#define ADM8211_STSR_NISS (1 << 16) +#define ADM8211_STSR_AISS (1 << 15) +#define ADM8211_STSR_TEIS (1 << 14) +#define ADM8211_STSR_FBE (1 << 13) +#define ADM8211_STSR_REIS (1 << 12) +#define ADM8211_STSR_GPTT (1 << 11) +#define ADM8211_STSR_RPS (1 << 8) +#define ADM8211_STSR_RDU (1 << 7) +#define ADM8211_STSR_RCI (1 << 6) +#define ADM8211_STSR_TUF (1 << 5) +#define ADM8211_STSR_TRT (1 << 4) +#define ADM8211_STSR_TLT (1 << 3) +#define ADM8211_STSR_TDU (1 << 2) +#define ADM8211_STSR_TPS (1 << 1) +#define ADM8211_STSR_TCI (1 << 0) + +/* CSR6 - NAR (Network Access Register) */ +#define ADM8211_NAR_TXCF (1 << 31) +#define ADM8211_NAR_HF (1 << 30) +#define ADM8211_NAR_UTR (1 << 29) +#define ADM8211_NAR_SQ (1 << 28) +#define ADM8211_NAR_CFP (1 << 27) +#define ADM8211_NAR_SF (1 << 21) +#define ADM8211_NAR_TR ((1 << 15) | (1 << 14)) +#define ADM8211_NAR_ST (1 << 13) +#define ADM8211_NAR_OM ((1 << 11) | (1 << 10)) +#define ADM8211_NAR_MM (1 << 7) +#define ADM8211_NAR_PR (1 << 6) +#define ADM8211_NAR_EA (1 << 5) +#define ADM8211_NAR_PB (1 << 3) +#define ADM8211_NAR_STPDMA (1 << 2) +#define ADM8211_NAR_SR (1 << 1) +#define ADM8211_NAR_CTX (1 << 0) + +#define ADM8211_IDLE() \ +do { \ + if (priv->nar & (ADM8211_NAR_SR | ADM8211_NAR_ST)) { \ + ADM8211_CSR_WRITE(NAR, priv->nar & \ + ~(ADM8211_NAR_SR | ADM8211_NAR_ST));\ + ADM8211_CSR_READ(NAR); \ + msleep(20); \ + } \ +} while (0) + +#define ADM8211_IDLE_RX() \ +do { \ + if (priv->nar & ADM8211_NAR_SR) { \ + ADM8211_CSR_WRITE(NAR, priv->nar & ~ADM8211_NAR_SR); \ + ADM8211_CSR_READ(NAR); \ + mdelay(20); \ + } \ +} while (0) + +#define ADM8211_RESTORE() \ +do { \ + if (priv->nar & (ADM8211_NAR_SR | ADM8211_NAR_ST)) \ + ADM8211_CSR_WRITE(NAR, priv->nar); \ +} while (0) + +/* CSR7 - IER (Interrupt Enable Register) */ +#define ADM8211_IER_PCFIE (1 << 31) +#define ADM8211_IER_BCNTCIE (1 << 30) +#define ADM8211_IER_GPIE (1 << 29) +#define ADM8211_IER_LinkOffIE (1 << 28) +#define ADM8211_IER_ATIMTCIE (1 << 27) +#define ADM8211_IER_TSFTFIE (1 << 26) +#define ADM8211_IER_TSCZE (1 << 25) +#define ADM8211_IER_LinkOnIE (1 << 24) +#define ADM8211_IER_SQLIE (1 << 23) +#define ADM8211_IER_WEPIE (1 << 22) +#define ADM8211_IER_ATIMEIE (1 << 21) +#define ADM8211_IER_TBTTIE (1 << 20) +#define ADM8211_IER_NIE (1 << 16) +#define ADM8211_IER_AIE (1 << 15) +#define ADM8211_IER_TEIE (1 << 14) +#define ADM8211_IER_FBEIE (1 << 13) +#define ADM8211_IER_REIE (1 << 12) +#define ADM8211_IER_GPTIE (1 << 11) +#define ADM8211_IER_RSIE (1 << 8) +#define ADM8211_IER_RUIE (1 << 7) +#define ADM8211_IER_RCIE (1 << 6) +#define ADM8211_IER_TUIE (1 << 5) +#define ADM8211_IER_TRTIE (1 << 4) +#define ADM8211_IER_TLTTIE (1 << 3) +#define ADM8211_IER_TDUIE (1 << 2) +#define ADM8211_IER_TPSIE (1 << 1) +#define ADM8211_IER_TCIE (1 << 0) + +/* CSR9 - SPR (Serial Port Register) */ +#define ADM8211_SPR_SRS (1 << 11) +#define ADM8211_SPR_SDO (1 << 3) +#define ADM8211_SPR_SDI (1 << 2) +#define ADM8211_SPR_SCLK (1 << 1) +#define ADM8211_SPR_SCS (1 << 0) + +/* CSR9A - CSR_TEST0 */ +#define ADM8211_CSR_TEST0_EPNE (1 << 18) +#define ADM8211_CSR_TEST0_EPSNM (1 << 17) +#define ADM8211_CSR_TEST0_EPTYP (1 << 16) +#define ADM8211_CSR_TEST0_EPRLD (1 << 15) + +/* CSR10 - WCSR (Wake-up Control/Status Register) */ +#define ADM8211_WCSR_CRCT (1 << 30) +#define ADM8211_WCSR_TSFTWE (1 << 20) +#define ADM8211_WCSR_TIMWE (1 << 19) +#define ADM8211_WCSR_ATIMWE (1 << 18) +#define ADM8211_WCSR_KEYWE (1 << 17) +#define ADM8211_WCSR_MPRE (1 << 9) +#define ADM8211_WCSR_LSOE (1 << 8) +#define ADM8211_WCSR_KEYUP (1 << 6) +#define ADM8211_WCSR_TSFTW (1 << 5) +#define ADM8211_WCSR_TIMW (1 << 4) +#define ADM8211_WCSR_ATIMW (1 << 3) +#define ADM8211_WCSR_MPR (1 << 1) +#define ADM8211_WCSR_LSO (1 << 0) + +/* CSR11A - GPIO */ +#define ADM8211_CSR_GPIO_EN5 (1 << 17) +#define ADM8211_CSR_GPIO_EN4 (1 << 16) +#define ADM8211_CSR_GPIO_EN3 (1 << 15) +#define ADM8211_CSR_GPIO_EN2 (1 << 14) +#define ADM8211_CSR_GPIO_EN1 (1 << 13) +#define ADM8211_CSR_GPIO_EN0 (1 << 12) +#define ADM8211_CSR_GPIO_O5 (1 << 11) +#define ADM8211_CSR_GPIO_O4 (1 << 10) +#define ADM8211_CSR_GPIO_O3 (1 << 9) +#define ADM8211_CSR_GPIO_O2 (1 << 8) +#define ADM8211_CSR_GPIO_O1 (1 << 7) +#define ADM8211_CSR_GPIO_O0 (1 << 6) +#define ADM8211_CSR_GPIO_IN 0x0000003f + +/* CSR12 - BBPCTL (BBP Control port) */ +#define ADM8211_BBPCTL_MMISEL (1 << 31) +#define ADM8211_BBPCTL_SPICADD (0x7F << 24) +#define ADM8211_BBPCTL_RF3000 (0x20 << 24) +#define ADM8211_BBPCTL_TXCE (1 << 23) +#define ADM8211_BBPCTL_RXCE (1 << 22) +#define ADM8211_BBPCTL_CCAP (1 << 21) +#define ADM8211_BBPCTL_TYPE 0x001c0000 +#define ADM8211_BBPCTL_WR (1 << 17) +#define ADM8211_BBPCTL_RD (1 << 16) +#define ADM8211_BBPCTL_ADDR 0x0000ff00 +#define ADM8211_BBPCTL_DATA 0x000000ff + +/* CSR12A - SYNCTL (Synthesizer Control port) */ +#define ADM8211_SYNCTL_WR (1 << 31) +#define ADM8211_SYNCTL_RD (1 << 30) +#define ADM8211_SYNCTL_CS0 (1 << 29) +#define ADM8211_SYNCTL_CS1 (1 << 28) +#define ADM8211_SYNCTL_CAL (1 << 27) +#define ADM8211_SYNCTL_SELCAL (1 << 26) +#define ADM8211_SYNCTL_RFtype ((1 << 24) || (1 << 23) || (1 << 22)) +#define ADM8211_SYNCTL_RFMD (1 << 22) +#define ADM8211_SYNCTL_GENERAL (0x7 << 22) +/* SYNCTL 21:0 Data (Si4126: 18-bit data, 4-bit address) */ + +/* CSR18 - CMDR (Command Register) */ +#define ADM8211_CMDR_PM (1 << 19) +#define ADM8211_CMDR_APM (1 << 18) +#define ADM8211_CMDR_RTE (1 << 4) +#define ADM8211_CMDR_DRT ((1 << 3) | (1 << 2)) +#define ADM8211_CMDR_DRT_8DW (0x0 << 2) +#define ADM8211_CMDR_DRT_16DW (0x1 << 2) +#define ADM8211_CMDR_DRT_SF (0x2 << 2) + +/* CSR33 - SYNRF (SYNRF direct control) */ +#define ADM8211_SYNRF_SELSYN (1 << 31) +#define ADM8211_SYNRF_SELRF (1 << 30) +#define ADM8211_SYNRF_LERF (1 << 29) +#define ADM8211_SYNRF_LEIF (1 << 28) +#define ADM8211_SYNRF_SYNCLK (1 << 27) +#define ADM8211_SYNRF_SYNDATA (1 << 26) +#define ADM8211_SYNRF_PE1 (1 << 25) +#define ADM8211_SYNRF_PE2 (1 << 24) +#define ADM8211_SYNRF_PA_PE (1 << 23) +#define ADM8211_SYNRF_TR_SW (1 << 22) +#define ADM8211_SYNRF_TR_SWN (1 << 21) +#define ADM8211_SYNRF_RADIO (1 << 20) +#define ADM8211_SYNRF_CAL_EN (1 << 19) +#define ADM8211_SYNRF_PHYRST (1 << 18) + +#define ADM8211_SYNRF_IF_SELECT_0 (1 << 31) +#define ADM8211_SYNRF_IF_SELECT_1 ((1 << 31) | (1 << 28)) +#define ADM8211_SYNRF_WRITE_SYNDATA_0 (1 << 31) +#define ADM8211_SYNRF_WRITE_SYNDATA_1 ((1 << 31) | (1 << 26)) +#define ADM8211_SYNRF_WRITE_CLOCK_0 (1 << 31) +#define ADM8211_SYNRF_WRITE_CLOCK_1 ((1 << 31) | (1 << 27)) + +/* CSR44 - WEPCTL (WEP Control) */ +#define ADM8211_WEPCTL_WEPENABLE (1 << 31) +#define ADM8211_WEPCTL_WPAENABLE (1 << 30) +#define ADM8211_WEPCTL_CURRENT_TABLE (1 << 29) +#define ADM8211_WEPCTL_TABLE_WR (1 << 28) +#define ADM8211_WEPCTL_TABLE_RD (1 << 27) +#define ADM8211_WEPCTL_WEPRXBYP (1 << 25) +#define ADM8211_WEPCTL_SEL_WEPTABLE (1 << 23) +#define ADM8211_WEPCTL_ADDR (0x000001ff) + +/* CSR45 - WESK (Data Entry for Share/Individual Key) */ +#define ADM8211_WESK_DATA (0x0000ffff) + +/* FER (Function Event Register) */ +#define ADM8211_FER_INTR_EV_ENT (1 << 15) + + +/* Si4126 RF Synthesizer - Control Registers */ +#define SI4126_MAIN_CONF 0 +#define SI4126_PHASE_DET_GAIN 1 +#define SI4126_POWERDOWN 2 +#define SI4126_RF1_N_DIV 3 /* only Si4136 */ +#define SI4126_RF2_N_DIV 4 +#define SI4126_IF_N_DIV 5 +#define SI4126_RF1_R_DIV 6 /* only Si4136 */ +#define SI4126_RF2_R_DIV 7 +#define SI4126_IF_R_DIV 8 + +/* Main Configuration */ +#define SI4126_MAIN_XINDIV2 (1 << 6) +#define SI4126_MAIN_IFDIV ((1 << 11) | (1 << 10)) +/* Powerdown */ +#define SI4126_POWERDOWN_PDIB (1 << 1) +#define SI4126_POWERDOWN_PDRB (1 << 0) + + +/* RF3000 BBP - Control Port Registers */ +/* 0x00 - reserved */ +#define RF3000_MODEM_CTRL__RX_STATUS 0x01 +#define RF3000_CCA_CTRL 0x02 +#define RF3000_DIVERSITY__RSSI 0x03 +#define RF3000_RX_SIGNAL_FIELD 0x04 +#define RF3000_RX_LEN_MSB 0x05 +#define RF3000_RX_LEN_LSB 0x06 +#define RF3000_RX_SERVICE_FIELD 0x07 +#define RF3000_TX_VAR_GAIN__TX_LEN_EXT 0x11 +#define RF3000_TX_LEN_MSB 0x12 +#define RF3000_TX_LEN_LSB 0x13 +#define RF3000_LOW_GAIN_CALIB 0x14 +#define RF3000_HIGH_GAIN_CALIB 0x15 + +/* ADM8211 revisions */ +#define ADM8211_REV_AB 0x11 +#define ADM8211_REV_AF 0x15 +#define ADM8211_REV_BA 0x20 +#define ADM8211_REV_CA 0x30 + +struct adm8211_desc { + __le32 status; + __le32 length; + __le32 buffer1; + __le32 buffer2; +}; + +#define RDES0_STATUS_OWN (1 << 31) +#define RDES0_STATUS_ES (1 << 30) +#define RDES0_STATUS_SQL (1 << 29) +#define RDES0_STATUS_DE (1 << 28) +#define RDES0_STATUS_FS (1 << 27) +#define RDES0_STATUS_LS (1 << 26) +#define RDES0_STATUS_PCF (1 << 25) +#define RDES0_STATUS_SFDE (1 << 24) +#define RDES0_STATUS_SIGE (1 << 23) +#define RDES0_STATUS_CRC16E (1 << 22) +#define RDES0_STATUS_RXTOE (1 << 21) +#define RDES0_STATUS_CRC32E (1 << 20) +#define RDES0_STATUS_ICVE (1 << 19) +#define RDES0_STATUS_DA1 (1 << 17) +#define RDES0_STATUS_DA0 (1 << 16) +#define RDES0_STATUS_RXDR ((1 << 15) | (1 << 14) | (1 << 13) | (1 << 12)) +#define RDES0_STATUS_FL (0x00000fff) + +#define RDES1_CONTROL_RER (1 << 25) +#define RDES1_CONTROL_RCH (1 << 24) +#define RDES1_CONTROL_RBS2 (0x00fff000) +#define RDES1_CONTROL_RBS1 (0x00000fff) + +#define RDES1_STATUS_RSSI (0x0000007f) + + +#define TDES0_CONTROL_OWN (1 << 31) +#define TDES0_CONTROL_DONE (1 << 30) +#define TDES0_CONTROL_TXDR (0x0ff00000) + +#define TDES0_STATUS_OWN (1 << 31) +#define TDES0_STATUS_DONE (1 << 30) +#define TDES0_STATUS_ES (1 << 29) +#define TDES0_STATUS_TLT (1 << 28) +#define TDES0_STATUS_TRT (1 << 27) +#define TDES0_STATUS_TUF (1 << 26) +#define TDES0_STATUS_TRO (1 << 25) +#define TDES0_STATUS_SOFBR (1 << 24) +#define TDES0_STATUS_ACR (0x00000fff) + +#define TDES1_CONTROL_IC (1 << 31) +#define TDES1_CONTROL_LS (1 << 30) +#define TDES1_CONTROL_FS (1 << 29) +#define TDES1_CONTROL_TER (1 << 25) +#define TDES1_CONTROL_TCH (1 << 24) +#define TDES1_CONTROL_RBS2 (0x00fff000) +#define TDES1_CONTROL_RBS1 (0x00000fff) + +/* SRAM offsets */ +#define ADM8211_SRAM(x) (priv->pdev->revision < ADM8211_REV_BA ? \ + ADM8211_SRAM_A_ ## x : ADM8211_SRAM_B_ ## x) + +#define ADM8211_SRAM_INDIV_KEY 0x0000 +#define ADM8211_SRAM_A_SHARE_KEY 0x0160 +#define ADM8211_SRAM_B_SHARE_KEY 0x00c0 + +#define ADM8211_SRAM_A_SSID 0x0180 +#define ADM8211_SRAM_B_SSID 0x00d4 +#define ADM8211_SRAM_SSID ADM8211_SRAM(SSID) + +#define ADM8211_SRAM_A_SUPP_RATE 0x0191 +#define ADM8211_SRAM_B_SUPP_RATE 0x00dd +#define ADM8211_SRAM_SUPP_RATE ADM8211_SRAM(SUPP_RATE) + +#define ADM8211_SRAM_A_SIZE 0x0200 +#define ADM8211_SRAM_B_SIZE 0x01c0 +#define ADM8211_SRAM_SIZE ADM8211_SRAM(SIZE) + +struct adm8211_rx_ring_info { + struct sk_buff *skb; + dma_addr_t mapping; +}; + +struct adm8211_tx_ring_info { + struct sk_buff *skb; + dma_addr_t mapping; + struct ieee80211_tx_control tx_control; + size_t hdrlen; +}; + +#define PLCP_SIGNAL_1M 0x0a +#define PLCP_SIGNAL_2M 0x14 +#define PLCP_SIGNAL_5M5 0x37 +#define PLCP_SIGNAL_11M 0x6e + +struct adm8211_tx_hdr { + u8 da[6]; + u8 signal; /* PLCP signal / TX rate in 100 Kbps */ + u8 service; + __le16 frame_body_size; + __le16 frame_control; + __le16 plcp_frag_tail_len; + __le16 plcp_frag_head_len; + __le16 dur_frag_tail; + __le16 dur_frag_head; + u8 addr4[6]; + +#define ADM8211_TXHDRCTL_SHORT_PREAMBLE (1 << 0) +#define ADM8211_TXHDRCTL_MORE_FRAG (1 << 1) +#define ADM8211_TXHDRCTL_MORE_DATA (1 << 2) +#define ADM8211_TXHDRCTL_FRAG_NO (1 << 3) /* ? */ +#define ADM8211_TXHDRCTL_ENABLE_RTS (1 << 4) +#define ADM8211_TXHDRCTL_ENABLE_WEP_ENGINE (1 << 5) +#define ADM8211_TXHDRCTL_ENABLE_EXTEND_HEADER (1 << 15) /* ? */ + __le16 header_control; + __le16 frag; + u8 reserved_0; + u8 retry_limit; + + u32 wep2key0; + u32 wep2key1; + u32 wep2key2; + u32 wep2key3; + + u8 keyid; + u8 entry_control; // huh?? + u16 reserved_1; + u32 reserved_2; +} __attribute__ ((packed)); + + +#define RX_COPY_BREAK 128 +#define RX_PKT_SIZE 2500 + +struct adm8211_eeprom { + __le16 signature; /* 0x00 */ + u8 major_version; /* 0x02 */ + u8 minor_version; /* 0x03 */ + u8 reserved_1[4]; /* 0x04 */ + u8 hwaddr[6]; /* 0x08 */ + u8 reserved_2[8]; /* 0x1E */ + __le16 cr49; /* 0x16 */ + u8 cr03; /* 0x18 */ + u8 cr28; /* 0x19 */ + u8 cr29; /* 0x1A */ + u8 country_code; /* 0x1B */ + +/* specific bbp types */ +#define ADM8211_BBP_RFMD3000 0x00 +#define ADM8211_BBP_RFMD3002 0x01 +#define ADM8211_BBP_ADM8011 0x04 + u8 specific_bbptype; /* 0x1C */ + u8 specific_rftype; /* 0x1D */ + u8 reserved_3[2]; /* 0x1E */ + __le16 device_id; /* 0x20 */ + __le16 vendor_id; /* 0x22 */ + __le16 subsystem_id; /* 0x24 */ + __le16 subsystem_vendor_id; /* 0x26 */ + u8 maxlat; /* 0x28 */ + u8 mingnt; /* 0x29 */ + __le16 cis_pointer_low; /* 0x2A */ + __le16 cis_pointer_high; /* 0x2C */ + __le16 csr18; /* 0x2E */ + u8 reserved_4[16]; /* 0x30 */ + u8 d1_pwrdara; /* 0x40 */ + u8 d0_pwrdara; /* 0x41 */ + u8 d3_pwrdara; /* 0x42 */ + u8 d2_pwrdara; /* 0x43 */ + u8 antenna_power[14]; /* 0x44 */ + __le16 cis_wordcnt; /* 0x52 */ + u8 tx_power[14]; /* 0x54 */ + u8 lpf_cutoff[14]; /* 0x62 */ + u8 lnags_threshold[14]; /* 0x70 */ + __le16 checksum; /* 0x7E */ + u8 cis_data[0]; /* 0x80, 384 bytes */ +} __attribute__ ((packed)); + +static const struct ieee80211_rate adm8211_rates[] = { + { .rate = 10, + .val = 10, + .val2 = -10, + .flags = IEEE80211_RATE_CCK_2 }, + { .rate = 20, + .val = 20, + .val2 = -20, + .flags = IEEE80211_RATE_CCK_2 }, + { .rate = 55, + .val = 55, + .val2 = -55, + .flags = IEEE80211_RATE_CCK_2 }, + { .rate = 110, + .val = 110, + .val2 = -110, + .flags = IEEE80211_RATE_CCK_2 } +}; + +struct ieee80211_chan_range { + u8 min; + u8 max; +}; + +static const struct ieee80211_channel adm8211_channels[] = { + { .chan = 1, + .freq = 2412}, + { .chan = 2, + .freq = 2417}, + { .chan = 3, + .freq = 2422}, + { .chan = 4, + .freq = 2427}, + { .chan = 5, + .freq = 2432}, + { .chan = 6, + .freq = 2437}, + { .chan = 7, + .freq = 2442}, + { .chan = 8, + .freq = 2447}, + { .chan = 9, + .freq = 2452}, + { .chan = 10, + .freq = 2457}, + { .chan = 11, + .freq = 2462}, + { .chan = 12, + .freq = 2467}, + { .chan = 13, + .freq = 2472}, + { .chan = 14, + .freq = 2484}, +}; + +struct adm8211_priv { + struct pci_dev *pdev; + spinlock_t lock; + struct adm8211_csr __iomem *map; + struct adm8211_desc *rx_ring; + struct adm8211_desc *tx_ring; + dma_addr_t rx_ring_dma; + dma_addr_t tx_ring_dma; + struct adm8211_rx_ring_info *rx_buffers; + struct adm8211_tx_ring_info *tx_buffers; + unsigned int rx_ring_size, tx_ring_size; + unsigned int cur_tx, dirty_tx, cur_rx; + + struct ieee80211_low_level_stats stats; + struct ieee80211_hw_mode modes[1]; + struct ieee80211_channel channels[ARRAY_SIZE(adm8211_channels)]; + struct ieee80211_rate rates[ARRAY_SIZE(adm8211_rates)]; + int mode; + + int channel; + u8 bssid[ETH_ALEN]; + u8 ssid[32]; + size_t ssid_len; + + u8 soft_rx_crc; + u8 retry_limit; + + u8 ant_power; + u8 tx_power; + u8 lpf_cutoff; + u8 lnags_threshold; + struct adm8211_eeprom *eeprom; + size_t eeprom_len; + + u32 nar; + +#define ADM8211_TYPE_INTERSIL 0x00 +#define ADM8211_TYPE_RFMD 0x01 +#define ADM8211_TYPE_MARVEL 0x02 +#define ADM8211_TYPE_AIROHA 0x03 +#define ADM8211_TYPE_ADMTEK 0x05 + unsigned int rf_type:3; + unsigned int bbp_type:3; + + u8 specific_bbptype; + enum { + ADM8211_RFMD2948 = 0x0, + ADM8211_RFMD2958 = 0x1, + ADM8211_RFMD2958_RF3000_CONTROL_POWER = 0x2, + ADM8211_MAX2820 = 0x8, + ADM8211_AL2210L = 0xC, /* Airoha */ + } transceiver_type; +}; + +static const struct ieee80211_chan_range cranges[] = { + {1, 11}, /* FCC */ + {1, 11}, /* IC */ + {1, 13}, /* ETSI */ + {10, 11}, /* SPAIN */ + {10, 13}, /* FRANCE */ + {14, 14}, /* MMK */ + {1, 14}, /* MMK2 */ +}; + +#endif /* ADM8211_H */ diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index ee1cc14db389..074055e18c5c 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -241,8 +241,8 @@ static int proc_perm = 0644; MODULE_AUTHOR("Benjamin Reed"); MODULE_DESCRIPTION("Support for Cisco/Aironet 802.11 wireless ethernet \ - cards. Direct support for ISA/PCI/MPI cards and support \ - for PCMCIA when used with airo_cs."); +cards. Direct support for ISA/PCI/MPI cards and support \ +for PCMCIA when used with airo_cs."); MODULE_LICENSE("Dual BSD/GPL"); MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340/350"); module_param_array(io, int, NULL, 0); @@ -2481,7 +2481,7 @@ void stop_airo_card( struct net_device *dev, int freeres ) EXPORT_SYMBOL(stop_airo_card); -static int wll_header_parse(struct sk_buff *skb, unsigned char *haddr) +static int wll_header_parse(const struct sk_buff *skb, unsigned char *haddr) { memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); return ETH_ALEN; @@ -2696,14 +2696,13 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci) return rc; } +static const struct header_ops airo_header_ops = { + .parse = wll_header_parse, +}; + static void wifi_setup(struct net_device *dev) { - dev->hard_header = NULL; - dev->rebuild_header = NULL; - dev->hard_header_cache = NULL; - dev->header_cache_update= NULL; - - dev->hard_header_parse = wll_header_parse; + dev->header_ops = &airo_header_ops; dev->hard_start_xmit = &airo_start_xmit11; dev->get_stats = &airo_get_stats; dev->set_mac_address = &airo_set_mac_address; @@ -2821,6 +2820,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port, struct net_device *dev; struct airo_info *ai; int i, rc; + DECLARE_MAC_BUF(mac); /* Create the network device object. */ dev = alloc_netdev(sizeof(*ai), "", ether_setup); @@ -2870,7 +2870,6 @@ static struct net_device *_init_airo_card( unsigned short irq, int port, dev->base_addr = port; SET_NETDEV_DEV(dev, dmdev); - SET_MODULE_OWNER(dev); reset_card (dev, 1); msleep(400); @@ -2924,9 +2923,8 @@ static struct net_device *_init_airo_card( unsigned short irq, int port, goto err_out_reg; set_bit(FLAG_REGISTERED,&ai->flags); - airo_print_info(dev->name, "MAC enabled %x:%x:%x:%x:%x:%x", - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5] ); + airo_print_info(dev->name, "MAC enabled %s", + print_mac(mac, dev->dev_addr)); /* Allocate the transmit buffers */ if (probe && !test_bit(FLAG_MPI,&ai->flags)) @@ -2983,6 +2981,7 @@ int reset_airo_card( struct net_device *dev ) { int i; struct airo_info *ai = dev->priv; + DECLARE_MAC_BUF(mac); if (reset_card (dev, 1)) return -1; @@ -2991,9 +2990,8 @@ int reset_airo_card( struct net_device *dev ) airo_print_err(dev->name, "MAC could not be enabled"); return -1; } - airo_print_info(dev->name, "MAC enabled %x:%x:%x:%x:%x:%x", - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + airo_print_info(dev->name, "MAC enabled %s", + print_mac(mac, dev->dev_addr)); /* Allocate the transmit buffers if needed */ if (!test_bit(FLAG_MPI,&ai->flags)) for( i = 0; i < MAX_FIDS; i++ ) @@ -5427,6 +5425,7 @@ static int proc_APList_open( struct inode *inode, struct file *file ) { int i; char *ptr; APListRid APList_rid; + DECLARE_MAC_BUF(mac); if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) return -ENOMEM; @@ -5450,13 +5449,8 @@ static int proc_APList_open( struct inode *inode, struct file *file ) { // We end when we find a zero MAC if ( !*(int*)APList_rid.ap[i] && !*(int*)&APList_rid.ap[i][2]) break; - ptr += sprintf(ptr, "%02x:%02x:%02x:%02x:%02x:%02x\n", - (int)APList_rid.ap[i][0], - (int)APList_rid.ap[i][1], - (int)APList_rid.ap[i][2], - (int)APList_rid.ap[i][3], - (int)APList_rid.ap[i][4], - (int)APList_rid.ap[i][5]); + ptr += sprintf(ptr, "%s\n", + print_mac(mac, APList_rid.ap[i])); } if (i==0) ptr += sprintf(ptr, "Not using specific APs\n"); @@ -5475,6 +5469,7 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) { int rc; /* If doLoseSync is not 1, we won't do a Lose Sync */ int doLoseSync = -1; + DECLARE_MAC_BUF(mac); if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) return -ENOMEM; @@ -5511,13 +5506,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) { we have to add a spin lock... */ rc = readBSSListRid(ai, doLoseSync, &BSSList_rid); while(rc == 0 && BSSList_rid.index != 0xffff) { - ptr += sprintf(ptr, "%02x:%02x:%02x:%02x:%02x:%02x %*s rssi = %d", - (int)BSSList_rid.bssid[0], - (int)BSSList_rid.bssid[1], - (int)BSSList_rid.bssid[2], - (int)BSSList_rid.bssid[3], - (int)BSSList_rid.bssid[4], - (int)BSSList_rid.bssid[5], + ptr += sprintf(ptr, "%s %*s rssi = %d", + print_mac(mac, BSSList_rid.bssid), (int)BSSList_rid.ssidLen, BSSList_rid.ssid, (int)BSSList_rid.dBm); @@ -7579,9 +7569,9 @@ static const iw_handler airo_private_handler[] = static const struct iw_handler_def airo_handler_def = { - .num_standard = sizeof(airo_handler)/sizeof(iw_handler), - .num_private = sizeof(airo_private_handler)/sizeof(iw_handler), - .num_private_args = sizeof(airo_private_args)/sizeof(struct iw_priv_args), + .num_standard = ARRAY_SIZE(airo_handler), + .num_private = ARRAY_SIZE(airo_private_handler), + .num_private_args = ARRAY_SIZE(airo_private_args), .standard = airo_handler, .private = airo_private_handler, .private_args = airo_private_args, diff --git a/drivers/net/wireless/airport.c b/drivers/net/wireless/airport.c index 7d5b8c2cc614..6f7eb9f59223 100644 --- a/drivers/net/wireless/airport.c +++ b/drivers/net/wireless/airport.c @@ -197,7 +197,6 @@ airport_attach(struct macio_dev *mdev, const struct of_device_id *match) return -EBUSY; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &mdev->ofdev.dev); macio_set_drvdata(mdev, dev); diff --git a/drivers/net/wireless/arlan-main.c b/drivers/net/wireless/arlan-main.c index 498e8486d125..dbdfc9e39d20 100644 --- a/drivers/net/wireless/arlan-main.c +++ b/drivers/net/wireless/arlan-main.c @@ -1469,10 +1469,10 @@ static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short while (dmi) { if (dmi->dmi_addrlen == 6) { + DECLARE_MAC_BUF(mac); if (arlan_debug & ARLAN_DEBUG_HEADER_DUMP) - printk(KERN_ERR "%s mcl %2x:%2x:%2x:%2x:%2x:%2x \n", dev->name, - dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2], - dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]); + printk(KERN_ERR "%s mcl %s\n", + dev->name, print_mac(mac, dmi->dmi_addr)); for (i = 0; i < 6; i++) if (dmi->dmi_addr[i] != hw_dst_addr[i]) break; @@ -1512,17 +1512,18 @@ static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short { char immedDestAddress[6]; char immedSrcAddress[6]; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); + DECLARE_MAC_BUF(mac3); + DECLARE_MAC_BUF(mac4); memcpy_fromio(immedDestAddress, arlan->immedDestAddress, 6); memcpy_fromio(immedSrcAddress, arlan->immedSrcAddress, 6); - printk(KERN_WARNING "%s t %2x:%2x:%2x:%2x:%2x:%2x f %2x:%2x:%2x:%2x:%2x:%2x imd %2x:%2x:%2x:%2x:%2x:%2x ims %2x:%2x:%2x:%2x:%2x:%2x\n", dev->name, - (unsigned char) skbtmp[0], (unsigned char) skbtmp[1], (unsigned char) skbtmp[2], (unsigned char) skbtmp[3], - (unsigned char) skbtmp[4], (unsigned char) skbtmp[5], (unsigned char) skbtmp[6], (unsigned char) skbtmp[7], - (unsigned char) skbtmp[8], (unsigned char) skbtmp[9], (unsigned char) skbtmp[10], (unsigned char) skbtmp[11], - immedDestAddress[0], immedDestAddress[1], immedDestAddress[2], - immedDestAddress[3], immedDestAddress[4], immedDestAddress[5], - immedSrcAddress[0], immedSrcAddress[1], immedSrcAddress[2], - immedSrcAddress[3], immedSrcAddress[4], immedSrcAddress[5]); + printk(KERN_WARNING "%s t %s f %s imd %s ims %s\n", + dev->name, print_mac(mac, skbtmp), + print_mac(mac2, &skbtmp[6]), + print_mac(mac3, immedDestAddress), + print_mac(mac4, immedSrcAddress)); } skb->protocol = eth_type_trans(skb, dev); IFDEBUG(ARLAN_DEBUG_HEADER_DUMP) @@ -1792,8 +1793,6 @@ struct net_device * __init arlan_probe(int unit) if (!dev) return ERR_PTR(-ENOMEM); - SET_MODULE_OWNER(dev); - if (unit >= 0) { sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); diff --git a/drivers/net/wireless/arlan-proc.c b/drivers/net/wireless/arlan-proc.c index 015abd928ab0..c6e70dbc5de8 100644 --- a/drivers/net/wireless/arlan-proc.c +++ b/drivers/net/wireless/arlan-proc.c @@ -435,7 +435,7 @@ static int arlan_sysctl_info(ctl_table * ctl, int write, struct file *filp, goto final; } else - priva = arlan_device[devnum]->priv; + priva = netdev_priv(arlan_device[devnum]); if (priva == NULL) { @@ -654,7 +654,7 @@ static int arlan_sysctl_info161719(ctl_table * ctl, int write, struct file *filp goto final; } else - priva = arlan_device[devnum]->priv; + priva = netdev_priv(arlan_device[devnum]); if (priva == NULL) { printk(KERN_WARNING " Could not find the device private in arlan procsys, bad\n "); @@ -688,7 +688,7 @@ static int arlan_sysctl_infotxRing(ctl_table * ctl, int write, struct file *filp goto final; } else - priva = arlan_device[devnum]->priv; + priva = netdev_priv(arlan_device[devnum]); if (priva == NULL) { printk(KERN_WARNING " Could not find the device private in arlan procsys, bad\n "); @@ -716,7 +716,7 @@ static int arlan_sysctl_inforxRing(ctl_table * ctl, int write, struct file *filp pos += sprintf(arlan_drive_info + pos, "No device found here \n"); goto final; } else - priva = arlan_device[devnum]->priv; + priva = netdev_priv(arlan_device[devnum]); if (priva == NULL) { printk(KERN_WARNING " Could not find the device private in arlan procsys, bad\n "); @@ -745,7 +745,7 @@ static int arlan_sysctl_info18(ctl_table * ctl, int write, struct file *filp, goto final; } else - priva = arlan_device[devnum]->priv; + priva = netdev_priv(arlan_device[devnum]); if (priva == NULL) { printk(KERN_WARNING " Could not find the device private in arlan procsys, bad\n "); @@ -780,7 +780,7 @@ static int arlan_configure(ctl_table * ctl, int write, struct file *filp, } else if (arlan_device[devnum] != NULL) { - priv = arlan_device[devnum]->priv; + priv = netdev_priv(arlan_device[devnum]); arlan_command(arlan_device[devnum], ARLAN_COMMAND_CLEAN_AND_CONF); } @@ -805,7 +805,7 @@ static int arlan_sysctl_reset(ctl_table * ctl, int write, struct file *filp, } else if (arlan_device[devnum] != NULL) { - priv = arlan_device[devnum]->priv; + priv = netdev_priv(arlan_device[devnum]); arlan_command(arlan_device[devnum], ARLAN_COMMAND_CLEAN_AND_RESET); } else diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c index 51a7db53afa5..059ce3f07dba 100644 --- a/drivers/net/wireless/atmel.c +++ b/drivers/net/wireless/atmel.c @@ -1484,6 +1484,7 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port, struct net_device *dev; struct atmel_private *priv; int rc; + DECLARE_MAC_BUF(mac); /* Create the network device object. */ dev = alloc_etherdev(sizeof(*priv)); @@ -1598,12 +1599,9 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port, if (!ent) printk(KERN_WARNING "atmel: unable to create /proc entry.\n"); - printk(KERN_INFO "%s: Atmel at76c50x. Version %d.%d. MAC %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", - dev->name, DRIVER_MAJOR, DRIVER_MINOR, - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5] ); + printk(KERN_INFO "%s: Atmel at76c50x. Version %d.%d. MAC %s\n", + dev->name, DRIVER_MAJOR, DRIVER_MINOR, print_mac(mac, dev->dev_addr)); - SET_MODULE_OWNER(dev); return dev; err_out_res: diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig new file mode 100644 index 000000000000..e3c573e56b63 --- /dev/null +++ b/drivers/net/wireless/b43/Kconfig @@ -0,0 +1,131 @@ +config B43 + tristate "Broadcom 43xx wireless support (mac80211 stack)" + depends on SSB_POSSIBLE && MAC80211 && WLAN_80211 + select SSB + select FW_LOADER + select HW_RANDOM + ---help--- + b43 is a driver for the Broadcom 43xx series wireless devices. + + Check "lspci" for something like + "Broadcom Corporation BCM43XX 802.11 Wireless LAN Controller" + to determine whether you own such a device. + + This driver supports the new BCM43xx IEEE 802.11G devices, but not + the old IEEE 802.11B devices. Old devices are supported by + the b43legacy driver. + Note that this has nothing to do with the standard that your AccessPoint + supports (A, B, G or a combination). + IEEE 802.11G devices can talk to IEEE 802.11B AccessPoints. + + It is safe to include both b43 and b43legacy as the underlying glue + layer will automatically load the correct version for your device. + + This driver uses V4 firmware, which must be installed separately using + b43-fwcutter. + + This driver can be built as a module (recommended) that will be called "b43". + If unsure, say M. + +# Auto-select SSB PCI-HOST support, if possible +config B43_PCI_AUTOSELECT + bool + depends on B43 && SSB_PCIHOST_POSSIBLE + select SSB_PCIHOST + default y + +# Auto-select SSB PCICORE driver, if possible +config B43_PCICORE_AUTOSELECT + bool + depends on B43 && SSB_DRIVER_PCICORE_POSSIBLE + select SSB_DRIVER_PCICORE + default y + +config B43_PCMCIA + bool "Broadcom 43xx PCMCIA device support (EXPERIMENTAL)" + depends on B43 && SSB_PCMCIAHOST_POSSIBLE && EXPERIMENTAL + select SSB_PCMCIAHOST + ---help--- + Broadcom 43xx PCMCIA device support. + + Support for 16bit PCMCIA devices. + Please note that most PC-CARD devices are _NOT_ 16bit PCMCIA + devices, but 32bit CardBUS devices. CardBUS devices are supported + out of the box by b43. + + With this config option you can drive b43 cards in + CompactFlash formfactor in a PCMCIA adaptor. + CF b43 cards can sometimes be found in handheld PCs. + + It's safe to select Y here, even if you don't have a B43 PCMCIA device. + + If unsure, say N. + +# LED support +config B43_LEDS + bool + depends on B43 && MAC80211_LEDS + default y + +# RFKILL support +config B43_RFKILL + bool + depends on B43 && RFKILL && RFKILL_INPUT && INPUT_POLLDEV + default y + +config B43_DEBUG + bool "Broadcom 43xx debugging" + depends on B43 + ---help--- + Broadcom 43xx debugging messages. + + Say Y, if you want to find out why the driver does not + work for you. + +config B43_DMA + bool + depends on B43 +config B43_PIO + bool + depends on B43 + +choice + prompt "Broadcom 43xx data transfer mode" + depends on B43 + default B43_DMA_AND_PIO_MODE + +config B43_DMA_AND_PIO_MODE + bool "DMA + PIO" + select B43_DMA + select B43_PIO + ---help--- + Include both, Direct Memory Access (DMA) and Programmed I/O (PIO) + data transfer modes. + The actually used mode is selectable through the module + parameter "pio". If the module parameter is pio=0, DMA is used. + Otherwise PIO is used. DMA is default. + + If unsure, choose this option. + +config B43_DMA_MODE + bool "DMA (Direct Memory Access) only" + select B43_DMA + ---help--- + Only include Direct Memory Access (DMA). + This reduces the size of the driver module, by omitting the PIO code. + +config B43_PIO_MODE + bool "PIO (Programmed I/O) only" + select B43_PIO + ---help--- + Only include Programmed I/O (PIO). + This reduces the size of the driver module, by omitting the DMA code. + Please note that PIO transfers are slow (compared to DMA). + + Also note that not all devices of the 43xx series support PIO. + The 4306 (Apple Airport Extreme and others) supports PIO, while + the 4318 is known to _not_ support PIO. + + Only use PIO, if DMA does not work for you. + +endchoice diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile new file mode 100644 index 000000000000..485e59e2dfab --- /dev/null +++ b/drivers/net/wireless/b43/Makefile @@ -0,0 +1,20 @@ +# b43 core +b43-y += main.o +b43-y += tables.o +b43-y += phy.o +b43-y += sysfs.o +b43-y += xmit.o +b43-y += lo.o +# b43 RFKILL button support +b43-$(CONFIG_B43_RFKILL) += rfkill.o +# b43 LED support +b43-$(CONFIG_B43_LEDS) += leds.o +# b43 PCMCIA support +b43-$(CONFIG_B43_PCMCIA) += pcmcia.o +# b43 debugging +b43-$(CONFIG_B43_DEBUG) += debugfs.o +# b43 DMA and PIO +b43-$(CONFIG_B43_DMA) += dma.o +b43-$(CONFIG_B43_PIO) += pio.o + +obj-$(CONFIG_B43) += b43.o diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h new file mode 100644 index 000000000000..a28ad230d63e --- /dev/null +++ b/drivers/net/wireless/b43/b43.h @@ -0,0 +1,854 @@ +#ifndef B43_H_ +#define B43_H_ + +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <linux/hw_random.h> +#include <linux/ssb/ssb.h> +#include <net/mac80211.h> + +#include "debugfs.h" +#include "leds.h" +#include "rfkill.h" +#include "lo.h" +#include "phy.h" + +#ifdef CONFIG_B43_DEBUG +# define B43_DEBUG 1 +#else +# define B43_DEBUG 0 +#endif + +#define B43_RX_MAX_SSI 60 + +/* MMIO offsets */ +#define B43_MMIO_DMA0_REASON 0x20 +#define B43_MMIO_DMA0_IRQ_MASK 0x24 +#define B43_MMIO_DMA1_REASON 0x28 +#define B43_MMIO_DMA1_IRQ_MASK 0x2C +#define B43_MMIO_DMA2_REASON 0x30 +#define B43_MMIO_DMA2_IRQ_MASK 0x34 +#define B43_MMIO_DMA3_REASON 0x38 +#define B43_MMIO_DMA3_IRQ_MASK 0x3C +#define B43_MMIO_DMA4_REASON 0x40 +#define B43_MMIO_DMA4_IRQ_MASK 0x44 +#define B43_MMIO_DMA5_REASON 0x48 +#define B43_MMIO_DMA5_IRQ_MASK 0x4C +#define B43_MMIO_MACCTL 0x120 +#define B43_MMIO_STATUS2_BITFIELD 0x124 +#define B43_MMIO_GEN_IRQ_REASON 0x128 +#define B43_MMIO_GEN_IRQ_MASK 0x12C +#define B43_MMIO_RAM_CONTROL 0x130 +#define B43_MMIO_RAM_DATA 0x134 +#define B43_MMIO_PS_STATUS 0x140 +#define B43_MMIO_RADIO_HWENABLED_HI 0x158 +#define B43_MMIO_SHM_CONTROL 0x160 +#define B43_MMIO_SHM_DATA 0x164 +#define B43_MMIO_SHM_DATA_UNALIGNED 0x166 +#define B43_MMIO_XMITSTAT_0 0x170 +#define B43_MMIO_XMITSTAT_1 0x174 +#define B43_MMIO_REV3PLUS_TSF_LOW 0x180 /* core rev >= 3 only */ +#define B43_MMIO_REV3PLUS_TSF_HIGH 0x184 /* core rev >= 3 only */ + +/* 32-bit DMA */ +#define B43_MMIO_DMA32_BASE0 0x200 +#define B43_MMIO_DMA32_BASE1 0x220 +#define B43_MMIO_DMA32_BASE2 0x240 +#define B43_MMIO_DMA32_BASE3 0x260 +#define B43_MMIO_DMA32_BASE4 0x280 +#define B43_MMIO_DMA32_BASE5 0x2A0 +/* 64-bit DMA */ +#define B43_MMIO_DMA64_BASE0 0x200 +#define B43_MMIO_DMA64_BASE1 0x240 +#define B43_MMIO_DMA64_BASE2 0x280 +#define B43_MMIO_DMA64_BASE3 0x2C0 +#define B43_MMIO_DMA64_BASE4 0x300 +#define B43_MMIO_DMA64_BASE5 0x340 +/* PIO */ +#define B43_MMIO_PIO1_BASE 0x300 +#define B43_MMIO_PIO2_BASE 0x310 +#define B43_MMIO_PIO3_BASE 0x320 +#define B43_MMIO_PIO4_BASE 0x330 + +#define B43_MMIO_PHY_VER 0x3E0 +#define B43_MMIO_PHY_RADIO 0x3E2 +#define B43_MMIO_PHY0 0x3E6 +#define B43_MMIO_ANTENNA 0x3E8 +#define B43_MMIO_CHANNEL 0x3F0 +#define B43_MMIO_CHANNEL_EXT 0x3F4 +#define B43_MMIO_RADIO_CONTROL 0x3F6 +#define B43_MMIO_RADIO_DATA_HIGH 0x3F8 +#define B43_MMIO_RADIO_DATA_LOW 0x3FA +#define B43_MMIO_PHY_CONTROL 0x3FC +#define B43_MMIO_PHY_DATA 0x3FE +#define B43_MMIO_MACFILTER_CONTROL 0x420 +#define B43_MMIO_MACFILTER_DATA 0x422 +#define B43_MMIO_RCMTA_COUNT 0x43C +#define B43_MMIO_RADIO_HWENABLED_LO 0x49A +#define B43_MMIO_GPIO_CONTROL 0x49C +#define B43_MMIO_GPIO_MASK 0x49E +#define B43_MMIO_TSF_0 0x632 /* core rev < 3 only */ +#define B43_MMIO_TSF_1 0x634 /* core rev < 3 only */ +#define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */ +#define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */ +#define B43_MMIO_RNG 0x65A +#define B43_MMIO_POWERUP_DELAY 0x6A8 + +/* SPROM boardflags_lo values */ +#define B43_BFL_BTCOEXIST 0x0001 /* implements Bluetooth coexistance */ +#define B43_BFL_PACTRL 0x0002 /* GPIO 9 controlling the PA */ +#define B43_BFL_AIRLINEMODE 0x0004 /* implements GPIO 13 radio disable indication */ +#define B43_BFL_RSSI 0x0008 /* software calculates nrssi slope. */ +#define B43_BFL_ENETSPI 0x0010 /* has ephy roboswitch spi */ +#define B43_BFL_XTAL_NOSLOW 0x0020 /* no slow clock available */ +#define B43_BFL_CCKHIPWR 0x0040 /* can do high power CCK transmission */ +#define B43_BFL_ENETADM 0x0080 /* has ADMtek switch */ +#define B43_BFL_ENETVLAN 0x0100 /* can do vlan */ +#define B43_BFL_AFTERBURNER 0x0200 /* supports Afterburner mode */ +#define B43_BFL_NOPCI 0x0400 /* leaves PCI floating */ +#define B43_BFL_FEM 0x0800 /* supports the Front End Module */ +#define B43_BFL_EXTLNA 0x1000 /* has an external LNA */ +#define B43_BFL_HGPA 0x2000 /* had high gain PA */ +#define B43_BFL_BTCMOD 0x4000 /* BFL_BTCOEXIST is given in alternate GPIOs */ +#define B43_BFL_ALTIQ 0x8000 /* alternate I/Q settings */ + +/* GPIO register offset, in both ChipCommon and PCI core. */ +#define B43_GPIO_CONTROL 0x6c + +/* SHM Routing */ +enum { + B43_SHM_UCODE, /* Microcode memory */ + B43_SHM_SHARED, /* Shared memory */ + B43_SHM_SCRATCH, /* Scratch memory */ + B43_SHM_HW, /* Internal hardware register */ + B43_SHM_RCMTA, /* Receive match transmitter address (rev >= 5 only) */ +}; +/* SHM Routing modifiers */ +#define B43_SHM_AUTOINC_R 0x0200 /* Auto-increment address on read */ +#define B43_SHM_AUTOINC_W 0x0100 /* Auto-increment address on write */ +#define B43_SHM_AUTOINC_RW (B43_SHM_AUTOINC_R | \ + B43_SHM_AUTOINC_W) + +/* Misc SHM_SHARED offsets */ +#define B43_SHM_SH_WLCOREREV 0x0016 /* 802.11 core revision */ +#define B43_SHM_SH_PCTLWDPOS 0x0008 +#define B43_SHM_SH_RXPADOFF 0x0034 /* RX Padding data offset (PIO only) */ +#define B43_SHM_SH_PHYVER 0x0050 /* PHY version */ +#define B43_SHM_SH_PHYTYPE 0x0052 /* PHY type */ +#define B43_SHM_SH_ANTSWAP 0x005C /* Antenna swap threshold */ +#define B43_SHM_SH_HOSTFLO 0x005E /* Hostflags for ucode options (low) */ +#define B43_SHM_SH_HOSTFHI 0x0060 /* Hostflags for ucode options (high) */ +#define B43_SHM_SH_RFATT 0x0064 /* Current radio attenuation value */ +#define B43_SHM_SH_RADAR 0x0066 /* Radar register */ +#define B43_SHM_SH_PHYTXNOI 0x006E /* PHY noise directly after TX (lower 8bit only) */ +#define B43_SHM_SH_RFRXSP1 0x0072 /* RF RX SP Register 1 */ +#define B43_SHM_SH_CHAN 0x00A0 /* Current channel (low 8bit only) */ +#define B43_SHM_SH_CHAN_5GHZ 0x0100 /* Bit set, if 5Ghz channel */ +#define B43_SHM_SH_BCMCFIFOID 0x0108 /* Last posted cookie to the bcast/mcast FIFO */ +/* SHM_SHARED TX FIFO variables */ +#define B43_SHM_SH_SIZE01 0x0098 /* TX FIFO size for FIFO 0 (low) and 1 (high) */ +#define B43_SHM_SH_SIZE23 0x009A /* TX FIFO size for FIFO 2 and 3 */ +#define B43_SHM_SH_SIZE45 0x009C /* TX FIFO size for FIFO 4 and 5 */ +#define B43_SHM_SH_SIZE67 0x009E /* TX FIFO size for FIFO 6 and 7 */ +/* SHM_SHARED background noise */ +#define B43_SHM_SH_JSSI0 0x0088 /* Measure JSSI 0 */ +#define B43_SHM_SH_JSSI1 0x008A /* Measure JSSI 1 */ +#define B43_SHM_SH_JSSIAUX 0x008C /* Measure JSSI AUX */ +/* SHM_SHARED crypto engine */ +#define B43_SHM_SH_DEFAULTIV 0x003C /* Default IV location */ +#define B43_SHM_SH_NRRXTRANS 0x003E /* # of soft RX transmitter addresses (max 8) */ +#define B43_SHM_SH_KTP 0x0056 /* Key table pointer */ +#define B43_SHM_SH_TKIPTSCTTAK 0x0318 +#define B43_SHM_SH_KEYIDXBLOCK 0x05D4 /* Key index/algorithm block (v4 firmware) */ +#define B43_SHM_SH_PSM 0x05F4 /* PSM transmitter address match block (rev < 5) */ +/* SHM_SHARED WME variables */ +#define B43_SHM_SH_EDCFSTAT 0x000E /* EDCF status */ +#define B43_SHM_SH_TXFCUR 0x0030 /* TXF current index */ +#define B43_SHM_SH_EDCFQ 0x0240 /* EDCF Q info */ +/* SHM_SHARED powersave mode related */ +#define B43_SHM_SH_SLOTT 0x0010 /* Slot time */ +#define B43_SHM_SH_DTIMPER 0x0012 /* DTIM period */ +#define B43_SHM_SH_NOSLPZNATDTIM 0x004C /* NOSLPZNAT DTIM */ +/* SHM_SHARED beacon variables */ +#define B43_SHM_SH_BTL0 0x0018 /* Beacon template length 0 */ +#define B43_SHM_SH_BTL1 0x001A /* Beacon template length 1 */ +#define B43_SHM_SH_BTSFOFF 0x001C /* Beacon TSF offset */ +#define B43_SHM_SH_TIMBPOS 0x001E /* TIM B position in beacon */ +#define B43_SHM_SH_SFFBLIM 0x0044 /* Short frame fallback retry limit */ +#define B43_SHM_SH_LFFBLIM 0x0046 /* Long frame fallback retry limit */ +#define B43_SHM_SH_BEACPHYCTL 0x0054 /* Beacon PHY TX control word (see PHY TX control) */ +/* SHM_SHARED ACK/CTS control */ +#define B43_SHM_SH_ACKCTSPHYCTL 0x0022 /* ACK/CTS PHY control word (see PHY TX control) */ +/* SHM_SHARED probe response variables */ +#define B43_SHM_SH_PRSSID 0x0160 /* Probe Response SSID */ +#define B43_SHM_SH_PRSSIDLEN 0x0048 /* Probe Response SSID length */ +#define B43_SHM_SH_PRTLEN 0x004A /* Probe Response template length */ +#define B43_SHM_SH_PRMAXTIME 0x0074 /* Probe Response max time */ +#define B43_SHM_SH_PRPHYCTL 0x0188 /* Probe Response PHY TX control word */ +/* SHM_SHARED rate tables */ +#define B43_SHM_SH_OFDMDIRECT 0x01C0 /* Pointer to OFDM direct map */ +#define B43_SHM_SH_OFDMBASIC 0x01E0 /* Pointer to OFDM basic rate map */ +#define B43_SHM_SH_CCKDIRECT 0x0200 /* Pointer to CCK direct map */ +#define B43_SHM_SH_CCKBASIC 0x0220 /* Pointer to CCK basic rate map */ +/* SHM_SHARED microcode soft registers */ +#define B43_SHM_SH_UCODEREV 0x0000 /* Microcode revision */ +#define B43_SHM_SH_UCODEPATCH 0x0002 /* Microcode patchlevel */ +#define B43_SHM_SH_UCODEDATE 0x0004 /* Microcode date */ +#define B43_SHM_SH_UCODETIME 0x0006 /* Microcode time */ +#define B43_SHM_SH_UCODESTAT 0x0040 /* Microcode debug status code */ +#define B43_SHM_SH_UCODESTAT_INVALID 0 +#define B43_SHM_SH_UCODESTAT_INIT 1 +#define B43_SHM_SH_UCODESTAT_ACTIVE 2 +#define B43_SHM_SH_UCODESTAT_SUSP 3 /* suspended */ +#define B43_SHM_SH_UCODESTAT_SLEEP 4 /* asleep (PS) */ +#define B43_SHM_SH_MAXBFRAMES 0x0080 /* Maximum number of frames in a burst */ +#define B43_SHM_SH_SPUWKUP 0x0094 /* pre-wakeup for synth PU in us */ +#define B43_SHM_SH_PRETBTT 0x0096 /* pre-TBTT in us */ + +/* SHM_SCRATCH offsets */ +#define B43_SHM_SC_MINCONT 0x0003 /* Minimum contention window */ +#define B43_SHM_SC_MAXCONT 0x0004 /* Maximum contention window */ +#define B43_SHM_SC_CURCONT 0x0005 /* Current contention window */ +#define B43_SHM_SC_SRLIMIT 0x0006 /* Short retry count limit */ +#define B43_SHM_SC_LRLIMIT 0x0007 /* Long retry count limit */ +#define B43_SHM_SC_DTIMC 0x0008 /* Current DTIM count */ +#define B43_SHM_SC_BTL0LEN 0x0015 /* Beacon 0 template length */ +#define B43_SHM_SC_BTL1LEN 0x0016 /* Beacon 1 template length */ +#define B43_SHM_SC_SCFB 0x0017 /* Short frame transmit count threshold for rate fallback */ +#define B43_SHM_SC_LCFB 0x0018 /* Long frame transmit count threshold for rate fallback */ + +/* Hardware Radio Enable masks */ +#define B43_MMIO_RADIO_HWENABLED_HI_MASK (1 << 16) +#define B43_MMIO_RADIO_HWENABLED_LO_MASK (1 << 4) + +/* HostFlags. See b43_hf_read/write() */ +#define B43_HF_ANTDIVHELP 0x00000001 /* ucode antenna div helper */ +#define B43_HF_SYMW 0x00000002 /* G-PHY SYM workaround */ +#define B43_HF_RXPULLW 0x00000004 /* RX pullup workaround */ +#define B43_HF_CCKBOOST 0x00000008 /* 4dB CCK power boost (exclusive with OFDM boost) */ +#define B43_HF_BTCOEX 0x00000010 /* Bluetooth coexistance */ +#define B43_HF_GDCW 0x00000020 /* G-PHY DV canceller filter bw workaround */ +#define B43_HF_OFDMPABOOST 0x00000040 /* Enable PA gain boost for OFDM */ +#define B43_HF_ACPR 0x00000080 /* Disable for Japan, channel 14 */ +#define B43_HF_EDCF 0x00000100 /* on if WME and MAC suspended */ +#define B43_HF_TSSIRPSMW 0x00000200 /* TSSI reset PSM ucode workaround */ +#define B43_HF_DSCRQ 0x00000400 /* Disable slow clock request in ucode */ +#define B43_HF_ACIW 0x00000800 /* ACI workaround: shift bits by 2 on PHY CRS */ +#define B43_HF_2060W 0x00001000 /* 2060 radio workaround */ +#define B43_HF_RADARW 0x00002000 /* Radar workaround */ +#define B43_HF_USEDEFKEYS 0x00004000 /* Enable use of default keys */ +#define B43_HF_BT4PRIOCOEX 0x00010000 /* Bluetooth 2-priority coexistance */ +#define B43_HF_FWKUP 0x00020000 /* Fast wake-up ucode */ +#define B43_HF_VCORECALC 0x00040000 /* Force VCO recalculation when powering up synthpu */ +#define B43_HF_PCISCW 0x00080000 /* PCI slow clock workaround */ +#define B43_HF_4318TSSI 0x00200000 /* 4318 TSSI */ +#define B43_HF_FBCMCFIFO 0x00400000 /* Flush bcast/mcast FIFO immediately */ +#define B43_HF_HWPCTL 0x00800000 /* Enable hardwarre power control */ +#define B43_HF_BTCOEXALT 0x01000000 /* Bluetooth coexistance in alternate pins */ +#define B43_HF_TXBTCHECK 0x02000000 /* Bluetooth check during transmission */ +#define B43_HF_SKCFPUP 0x04000000 /* Skip CFP update */ + +/* MacFilter offsets. */ +#define B43_MACFILTER_SELF 0x0000 +#define B43_MACFILTER_BSSID 0x0003 + +/* PowerControl */ +#define B43_PCTL_IN 0xB0 +#define B43_PCTL_OUT 0xB4 +#define B43_PCTL_OUTENABLE 0xB8 +#define B43_PCTL_XTAL_POWERUP 0x40 +#define B43_PCTL_PLL_POWERDOWN 0x80 + +/* PowerControl Clock Modes */ +#define B43_PCTL_CLK_FAST 0x00 +#define B43_PCTL_CLK_SLOW 0x01 +#define B43_PCTL_CLK_DYNAMIC 0x02 + +#define B43_PCTL_FORCE_SLOW 0x0800 +#define B43_PCTL_FORCE_PLL 0x1000 +#define B43_PCTL_DYN_XTAL 0x2000 + +/* PHYVersioning */ +#define B43_PHYTYPE_A 0x00 +#define B43_PHYTYPE_B 0x01 +#define B43_PHYTYPE_G 0x02 + +/* PHYRegisters */ +#define B43_PHY_ILT_A_CTRL 0x0072 +#define B43_PHY_ILT_A_DATA1 0x0073 +#define B43_PHY_ILT_A_DATA2 0x0074 +#define B43_PHY_G_LO_CONTROL 0x0810 +#define B43_PHY_ILT_G_CTRL 0x0472 +#define B43_PHY_ILT_G_DATA1 0x0473 +#define B43_PHY_ILT_G_DATA2 0x0474 +#define B43_PHY_A_PCTL 0x007B +#define B43_PHY_G_PCTL 0x0029 +#define B43_PHY_A_CRS 0x0029 +#define B43_PHY_RADIO_BITFIELD 0x0401 +#define B43_PHY_G_CRS 0x0429 +#define B43_PHY_NRSSILT_CTRL 0x0803 +#define B43_PHY_NRSSILT_DATA 0x0804 + +/* RadioRegisters */ +#define B43_RADIOCTL_ID 0x01 + +/* MAC Control bitfield */ +#define B43_MACCTL_ENABLED 0x00000001 /* MAC Enabled */ +#define B43_MACCTL_PSM_RUN 0x00000002 /* Run Microcode */ +#define B43_MACCTL_PSM_JMP0 0x00000004 /* Microcode jump to 0 */ +#define B43_MACCTL_SHM_ENABLED 0x00000100 /* SHM Enabled */ +#define B43_MACCTL_SHM_UPPER 0x00000200 /* SHM Upper */ +#define B43_MACCTL_IHR_ENABLED 0x00000400 /* IHR Region Enabled */ +#define B43_MACCTL_PSM_DBG 0x00002000 /* Microcode debugging enabled */ +#define B43_MACCTL_GPOUTSMSK 0x0000C000 /* GPOUT Select Mask */ +#define B43_MACCTL_BE 0x00010000 /* Big Endian mode */ +#define B43_MACCTL_INFRA 0x00020000 /* Infrastructure mode */ +#define B43_MACCTL_AP 0x00040000 /* AccessPoint mode */ +#define B43_MACCTL_RADIOLOCK 0x00080000 /* Radio lock */ +#define B43_MACCTL_BEACPROMISC 0x00100000 /* Beacon Promiscuous */ +#define B43_MACCTL_KEEP_BADPLCP 0x00200000 /* Keep frames with bad PLCP */ +#define B43_MACCTL_KEEP_CTL 0x00400000 /* Keep control frames */ +#define B43_MACCTL_KEEP_BAD 0x00800000 /* Keep bad frames (FCS) */ +#define B43_MACCTL_PROMISC 0x01000000 /* Promiscuous mode */ +#define B43_MACCTL_HWPS 0x02000000 /* Hardware Power Saving */ +#define B43_MACCTL_AWAKE 0x04000000 /* Device is awake */ +#define B43_MACCTL_CLOSEDNET 0x08000000 /* Closed net (no SSID bcast) */ +#define B43_MACCTL_TBTTHOLD 0x10000000 /* TBTT Hold */ +#define B43_MACCTL_DISCTXSTAT 0x20000000 /* Discard TX status */ +#define B43_MACCTL_DISCPMQ 0x40000000 /* Discard Power Management Queue */ +#define B43_MACCTL_GMODE 0x80000000 /* G Mode */ + +/* 802.11 core specific TM State Low flags */ +#define B43_TMSLOW_GMODE 0x20000000 /* G Mode Enable */ +#define B43_TMSLOW_PLLREFSEL 0x00200000 /* PLL Frequency Reference Select */ +#define B43_TMSLOW_MACPHYCLKEN 0x00100000 /* MAC PHY Clock Control Enable (rev >= 5) */ +#define B43_TMSLOW_PHYRESET 0x00080000 /* PHY Reset */ +#define B43_TMSLOW_PHYCLKEN 0x00040000 /* PHY Clock Enable */ + +/* 802.11 core specific TM State High flags */ +#define B43_TMSHIGH_FCLOCK 0x00040000 /* Fast Clock Available (rev >= 5) */ +#define B43_TMSHIGH_APHY 0x00020000 /* A-PHY available (rev >= 5) */ +#define B43_TMSHIGH_GPHY 0x00010000 /* G-PHY available (rev >= 5) */ + +/* Generic-Interrupt reasons. */ +#define B43_IRQ_MAC_SUSPENDED 0x00000001 +#define B43_IRQ_BEACON 0x00000002 +#define B43_IRQ_TBTT_INDI 0x00000004 +#define B43_IRQ_BEACON_TX_OK 0x00000008 +#define B43_IRQ_BEACON_CANCEL 0x00000010 +#define B43_IRQ_ATIM_END 0x00000020 +#define B43_IRQ_PMQ 0x00000040 +#define B43_IRQ_PIO_WORKAROUND 0x00000100 +#define B43_IRQ_MAC_TXERR 0x00000200 +#define B43_IRQ_PHY_TXERR 0x00000800 +#define B43_IRQ_PMEVENT 0x00001000 +#define B43_IRQ_TIMER0 0x00002000 +#define B43_IRQ_TIMER1 0x00004000 +#define B43_IRQ_DMA 0x00008000 +#define B43_IRQ_TXFIFO_FLUSH_OK 0x00010000 +#define B43_IRQ_CCA_MEASURE_OK 0x00020000 +#define B43_IRQ_NOISESAMPLE_OK 0x00040000 +#define B43_IRQ_UCODE_DEBUG 0x08000000 +#define B43_IRQ_RFKILL 0x10000000 +#define B43_IRQ_TX_OK 0x20000000 +#define B43_IRQ_PHY_G_CHANGED 0x40000000 +#define B43_IRQ_TIMEOUT 0x80000000 + +#define B43_IRQ_ALL 0xFFFFFFFF +#define B43_IRQ_MASKTEMPLATE (B43_IRQ_MAC_SUSPENDED | \ + B43_IRQ_BEACON | \ + B43_IRQ_TBTT_INDI | \ + B43_IRQ_ATIM_END | \ + B43_IRQ_PMQ | \ + B43_IRQ_MAC_TXERR | \ + B43_IRQ_PHY_TXERR | \ + B43_IRQ_DMA | \ + B43_IRQ_TXFIFO_FLUSH_OK | \ + B43_IRQ_NOISESAMPLE_OK | \ + B43_IRQ_UCODE_DEBUG | \ + B43_IRQ_RFKILL | \ + B43_IRQ_TX_OK) + +/* Device specific rate values. + * The actual values defined here are (rate_in_mbps * 2). + * Some code depends on this. Don't change it. */ +#define B43_CCK_RATE_1MB 0x02 +#define B43_CCK_RATE_2MB 0x04 +#define B43_CCK_RATE_5MB 0x0B +#define B43_CCK_RATE_11MB 0x16 +#define B43_OFDM_RATE_6MB 0x0C +#define B43_OFDM_RATE_9MB 0x12 +#define B43_OFDM_RATE_12MB 0x18 +#define B43_OFDM_RATE_18MB 0x24 +#define B43_OFDM_RATE_24MB 0x30 +#define B43_OFDM_RATE_36MB 0x48 +#define B43_OFDM_RATE_48MB 0x60 +#define B43_OFDM_RATE_54MB 0x6C +/* Convert a b43 rate value to a rate in 100kbps */ +#define B43_RATE_TO_BASE100KBPS(rate) (((rate) * 10) / 2) + +#define B43_DEFAULT_SHORT_RETRY_LIMIT 7 +#define B43_DEFAULT_LONG_RETRY_LIMIT 4 + +/* Max size of a security key */ +#define B43_SEC_KEYSIZE 16 +/* Security algorithms. */ +enum { + B43_SEC_ALGO_NONE = 0, /* unencrypted, as of TX header. */ + B43_SEC_ALGO_WEP40, + B43_SEC_ALGO_TKIP, + B43_SEC_ALGO_AES, + B43_SEC_ALGO_WEP104, + B43_SEC_ALGO_AES_LEGACY, +}; + +struct b43_dmaring; +struct b43_pioqueue; + +/* The firmware file header */ +#define B43_FW_TYPE_UCODE 'u' +#define B43_FW_TYPE_PCM 'p' +#define B43_FW_TYPE_IV 'i' +struct b43_fw_header { + /* File type */ + u8 type; + /* File format version */ + u8 ver; + u8 __padding[2]; + /* Size of the data. For ucode and PCM this is in bytes. + * For IV this is number-of-ivs. */ + __be32 size; +} __attribute__((__packed__)); + +/* Initial Value file format */ +#define B43_IV_OFFSET_MASK 0x7FFF +#define B43_IV_32BIT 0x8000 +struct b43_iv { + __be16 offset_size; + union { + __be16 d16; + __be32 d32; + } data __attribute__((__packed__)); +} __attribute__((__packed__)); + + +#define B43_PHYMODE(phytype) (1 << (phytype)) +#define B43_PHYMODE_A B43_PHYMODE(B43_PHYTYPE_A) +#define B43_PHYMODE_B B43_PHYMODE(B43_PHYTYPE_B) +#define B43_PHYMODE_G B43_PHYMODE(B43_PHYTYPE_G) + +struct b43_phy { + /* Possible PHYMODEs on this PHY */ + u8 possible_phymodes; + /* GMODE bit enabled? */ + bool gmode; + /* Possible ieee80211 subsystem hwmodes for this PHY. + * Which mode is selected, depends on thr GMODE enabled bit */ +#define B43_MAX_PHYHWMODES 2 + struct ieee80211_hw_mode hwmodes[B43_MAX_PHYHWMODES]; + + /* Analog Type */ + u8 analog; + /* B43_PHYTYPE_ */ + u8 type; + /* PHY revision number. */ + u8 rev; + + /* Radio versioning */ + u16 radio_manuf; /* Radio manufacturer */ + u16 radio_ver; /* Radio version */ + u8 radio_rev; /* Radio revision */ + + bool locked; /* Only used in b43_phy_{un}lock() */ + bool dyn_tssi_tbl; /* tssi2dbm is kmalloc()ed. */ + + /* ACI (adjacent channel interference) flags. */ + bool aci_enable; + bool aci_wlan_automatic; + bool aci_hw_rssi; + + /* Radio switched on/off */ + bool radio_on; + struct { + /* Values saved when turning the radio off. + * They are needed when turning it on again. */ + bool valid; + u16 rfover; + u16 rfoverval; + } radio_off_context; + + u16 minlowsig[2]; + u16 minlowsigpos[2]; + + /* TSSI to dBm table in use */ + const s8 *tssi2dbm; + /* Target idle TSSI */ + int tgt_idle_tssi; + /* Current idle TSSI */ + int cur_idle_tssi; + + /* LocalOscillator control values. */ + struct b43_txpower_lo_control *lo_control; + /* Values from b43_calc_loopback_gain() */ + s16 max_lb_gain; /* Maximum Loopback gain in hdB */ + s16 trsw_rx_gain; /* TRSW RX gain in hdB */ + s16 lna_lod_gain; /* LNA lod */ + s16 lna_gain; /* LNA */ + s16 pga_gain; /* PGA */ + + /* PHY lock for core.rev < 3 + * This lock is only used by b43_phy_{un}lock() + */ + spinlock_t lock; + + /* Desired TX power level (in dBm). + * This is set by the user and adjusted in b43_phy_xmitpower(). */ + u8 power_level; + /* A-PHY TX Power control value. */ + u16 txpwr_offset; + + /* Current TX power level attenuation control values */ + struct b43_bbatt bbatt; + struct b43_rfatt rfatt; + u8 tx_control; /* B43_TXCTL_XXX */ +#ifdef CONFIG_B43_DEBUG + bool manual_txpower_control; /* Manual TX-power control enabled? */ +#endif + /* Hardware Power Control enabled? */ + bool hardware_power_control; + + /* Current Interference Mitigation mode */ + int interfmode; + /* Stack of saved values from the Interference Mitigation code. + * Each value in the stack is layed out as follows: + * bit 0-11: offset + * bit 12-15: register ID + * bit 16-32: value + * register ID is: 0x1 PHY, 0x2 Radio, 0x3 ILT + */ +#define B43_INTERFSTACK_SIZE 26 + u32 interfstack[B43_INTERFSTACK_SIZE]; //FIXME: use a data structure + + /* Saved values from the NRSSI Slope calculation */ + s16 nrssi[2]; + s32 nrssislope; + /* In memory nrssi lookup table. */ + s8 nrssi_lt[64]; + + /* current channel */ + u8 channel; + + u16 lofcal; + + u16 initval; //FIXME rename? +}; + +/* Data structures for DMA transmission, per 80211 core. */ +struct b43_dma { + struct b43_dmaring *tx_ring0; + struct b43_dmaring *tx_ring1; + struct b43_dmaring *tx_ring2; + struct b43_dmaring *tx_ring3; + struct b43_dmaring *tx_ring4; + struct b43_dmaring *tx_ring5; + + struct b43_dmaring *rx_ring0; + struct b43_dmaring *rx_ring3; /* only available on core.rev < 5 */ +}; + +/* Data structures for PIO transmission, per 80211 core. */ +struct b43_pio { + struct b43_pioqueue *queue0; + struct b43_pioqueue *queue1; + struct b43_pioqueue *queue2; + struct b43_pioqueue *queue3; +}; + +/* Context information for a noise calculation (Link Quality). */ +struct b43_noise_calculation { + u8 channel_at_start; + bool calculation_running; + u8 nr_samples; + s8 samples[8][4]; +}; + +struct b43_stats { + u8 link_noise; + /* Store the last TX/RX times here for updating the leds. */ + unsigned long last_tx; + unsigned long last_rx; +}; + +struct b43_key { + /* If keyconf is NULL, this key is disabled. + * keyconf is a cookie. Don't derefenrence it outside of the set_key + * path, because b43 doesn't own it. */ + struct ieee80211_key_conf *keyconf; + u8 algorithm; +}; + +struct b43_wldev; + +/* Data structure for the WLAN parts (802.11 cores) of the b43 chip. */ +struct b43_wl { + /* Pointer to the active wireless device on this chip */ + struct b43_wldev *current_dev; + /* Pointer to the ieee80211 hardware data structure */ + struct ieee80211_hw *hw; + + spinlock_t irq_lock; + struct mutex mutex; + spinlock_t leds_lock; + + /* We can only have one operating interface (802.11 core) + * at a time. General information about this interface follows. + */ + + /* Opaque ID of the operating interface from the ieee80211 + * subsystem. Do not modify. + */ + int if_id; + /* The MAC address of the operating interface. */ + u8 mac_addr[ETH_ALEN]; + /* Current BSSID */ + u8 bssid[ETH_ALEN]; + /* Interface type. (IEEE80211_IF_TYPE_XXX) */ + int if_type; + /* Is the card operating in AP, STA or IBSS mode? */ + bool operating; + /* filter flags */ + unsigned int filter_flags; + /* Stats about the wireless interface */ + struct ieee80211_low_level_stats ieee_stats; + + struct hwrng rng; + u8 rng_initialized; + char rng_name[30 + 1]; + + /* The RF-kill button */ + struct b43_rfkill rfkill; + + /* List of all wireless devices on this chip */ + struct list_head devlist; + u8 nr_devs; +}; + +/* Pointers to the firmware data and meta information about it. */ +struct b43_firmware { + /* Microcode */ + const struct firmware *ucode; + /* PCM code */ + const struct firmware *pcm; + /* Initial MMIO values for the firmware */ + const struct firmware *initvals; + /* Initial MMIO values for the firmware, band-specific */ + const struct firmware *initvals_band; + /* Firmware revision */ + u16 rev; + /* Firmware patchlevel */ + u16 patch; +}; + +/* Device (802.11 core) initialization status. */ +enum { + B43_STAT_UNINIT = 0, /* Uninitialized. */ + B43_STAT_INITIALIZED = 1, /* Initialized, but not started, yet. */ + B43_STAT_STARTED = 2, /* Up and running. */ +}; +#define b43_status(wldev) atomic_read(&(wldev)->__init_status) +#define b43_set_status(wldev, stat) do { \ + atomic_set(&(wldev)->__init_status, (stat)); \ + smp_wmb(); \ + } while (0) + +/* XXX--- HOW LOCKING WORKS IN B43 ---XXX + * + * You should always acquire both, wl->mutex and wl->irq_lock unless: + * - You don't need to acquire wl->irq_lock, if the interface is stopped. + * - You don't need to acquire wl->mutex in the IRQ handler, IRQ tasklet + * and packet TX path (and _ONLY_ there.) + */ + +/* Data structure for one wireless device (802.11 core) */ +struct b43_wldev { + struct ssb_device *dev; + struct b43_wl *wl; + + /* The device initialization status. + * Use b43_status() to query. */ + atomic_t __init_status; + /* Saved init status for handling suspend. */ + int suspend_init_status; + + bool __using_pio; /* Internal, use b43_using_pio(). */ + bool bad_frames_preempt; /* Use "Bad Frames Preemption" (default off) */ + bool reg124_set_0x4; /* Some variable to keep track of IRQ stuff. */ + bool short_preamble; /* TRUE, if short preamble is enabled. */ + bool short_slot; /* TRUE, if short slot timing is enabled. */ + bool radio_hw_enable; /* saved state of radio hardware enabled state */ + + /* PHY/Radio device. */ + struct b43_phy phy; + union { + /* DMA engines. */ + struct b43_dma dma; + /* PIO engines. */ + struct b43_pio pio; + }; + + /* Various statistics about the physical device. */ + struct b43_stats stats; + + /* The device LEDs. */ + struct b43_led led_tx; + struct b43_led led_rx; + struct b43_led led_assoc; + struct b43_led led_radio; + + /* Reason code of the last interrupt. */ + u32 irq_reason; + u32 dma_reason[6]; + /* saved irq enable/disable state bitfield. */ + u32 irq_savedstate; + /* Link Quality calculation context. */ + struct b43_noise_calculation noisecalc; + /* if > 0 MAC is suspended. if == 0 MAC is enabled. */ + int mac_suspended; + + /* Interrupt Service Routine tasklet (bottom-half) */ + struct tasklet_struct isr_tasklet; + + /* Periodic tasks */ + struct delayed_work periodic_work; + unsigned int periodic_state; + + struct work_struct restart_work; + + /* encryption/decryption */ + u16 ktp; /* Key table pointer */ + u8 max_nr_keys; + struct b43_key key[58]; + + /* Cached beacon template while uploading the template. */ + struct sk_buff *cached_beacon; + + /* Firmware data */ + struct b43_firmware fw; + + /* Devicelist in struct b43_wl (all 802.11 cores) */ + struct list_head list; + + /* Debugging stuff follows. */ +#ifdef CONFIG_B43_DEBUG + struct b43_dfsentry *dfsentry; +#endif +}; + +static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw) +{ + return hw->priv; +} + +/* Helper function, which returns a boolean. + * TRUE, if PIO is used; FALSE, if DMA is used. + */ +#if defined(CONFIG_B43_DMA) && defined(CONFIG_B43_PIO) +static inline int b43_using_pio(struct b43_wldev *dev) +{ + return dev->__using_pio; +} +#elif defined(CONFIG_B43_DMA) +static inline int b43_using_pio(struct b43_wldev *dev) +{ + return 0; +} +#elif defined(CONFIG_B43_PIO) +static inline int b43_using_pio(struct b43_wldev *dev) +{ + return 1; +} +#else +# error "Using neither DMA nor PIO? Confused..." +#endif + +static inline struct b43_wldev *dev_to_b43_wldev(struct device *dev) +{ + struct ssb_device *ssb_dev = dev_to_ssb_dev(dev); + return ssb_get_drvdata(ssb_dev); +} + +/* Is the device operating in a specified mode (IEEE80211_IF_TYPE_XXX). */ +static inline int b43_is_mode(struct b43_wl *wl, int type) +{ + return (wl->operating && wl->if_type == type); +} + +static inline u16 b43_read16(struct b43_wldev *dev, u16 offset) +{ + return ssb_read16(dev->dev, offset); +} + +static inline void b43_write16(struct b43_wldev *dev, u16 offset, u16 value) +{ + ssb_write16(dev->dev, offset, value); +} + +static inline u32 b43_read32(struct b43_wldev *dev, u16 offset) +{ + return ssb_read32(dev->dev, offset); +} + +static inline void b43_write32(struct b43_wldev *dev, u16 offset, u32 value) +{ + ssb_write32(dev->dev, offset, value); +} + +/* Message printing */ +void b43info(struct b43_wl *wl, const char *fmt, ...) + __attribute__ ((format(printf, 2, 3))); +void b43err(struct b43_wl *wl, const char *fmt, ...) + __attribute__ ((format(printf, 2, 3))); +void b43warn(struct b43_wl *wl, const char *fmt, ...) + __attribute__ ((format(printf, 2, 3))); +#if B43_DEBUG +void b43dbg(struct b43_wl *wl, const char *fmt, ...) + __attribute__ ((format(printf, 2, 3))); +#else /* DEBUG */ +# define b43dbg(wl, fmt...) do { /* nothing */ } while (0) +#endif /* DEBUG */ + +/* A WARN_ON variant that vanishes when b43 debugging is disabled. + * This _also_ evaluates the arg with debugging disabled. */ +#if B43_DEBUG +# define B43_WARN_ON(x) WARN_ON(x) +#else +static inline bool __b43_warn_on_dummy(bool x) { return x; } +# define B43_WARN_ON(x) __b43_warn_on_dummy(unlikely(!!(x))) +#endif + +/** Limit a value between two limits */ +#ifdef limit_value +# undef limit_value +#endif +#define limit_value(value, min, max) \ + ({ \ + typeof(value) __value = (value); \ + typeof(value) __min = (min); \ + typeof(value) __max = (max); \ + if (__value < __min) \ + __value = __min; \ + else if (__value > __max) \ + __value = __max; \ + __value; \ + }) + +/* Convert an integer to a Q5.2 value */ +#define INT_TO_Q52(i) ((i) << 2) +/* Convert a Q5.2 value to an integer (precision loss!) */ +#define Q52_TO_INT(q52) ((q52) >> 2) +/* Macros for printing a value in Q5.2 format */ +#define Q52_FMT "%u.%u" +#define Q52_ARG(q52) Q52_TO_INT(q52), ((((q52) & 0x3) * 100) / 4) + +#endif /* B43_H_ */ diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c new file mode 100644 index 000000000000..734e70e1a06d --- /dev/null +++ b/drivers/net/wireless/b43/debugfs.c @@ -0,0 +1,656 @@ +/* + + Broadcom B43 wireless driver + + debugfs driver debugging code + + Copyright (c) 2005-2007 Michael Buesch <mb@bu3sch.de> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include <linux/fs.h> +#include <linux/debugfs.h> +#include <linux/slab.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/mutex.h> + +#include "b43.h" +#include "main.h" +#include "debugfs.h" +#include "dma.h" +#include "pio.h" +#include "xmit.h" + + +/* The root directory. */ +static struct dentry *rootdir; + +struct b43_debugfs_fops { + ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize); + int (*write)(struct b43_wldev *dev, const char *buf, size_t count); + struct file_operations fops; + /* Offset of struct b43_dfs_file in struct b43_dfsentry */ + size_t file_struct_offset; + /* Take wl->irq_lock before calling read/write? */ + bool take_irqlock; +}; + +static inline +struct b43_dfs_file * fops_to_dfs_file(struct b43_wldev *dev, + const struct b43_debugfs_fops *dfops) +{ + void *p; + + p = dev->dfsentry; + p += dfops->file_struct_offset; + + return p; +} + + +#define fappend(fmt, x...) \ + do { \ + if (bufsize - count) \ + count += snprintf(buf + count, \ + bufsize - count, \ + fmt , ##x); \ + else \ + printk(KERN_ERR "b43: fappend overflow\n"); \ + } while (0) + + +/* wl->irq_lock is locked */ +static ssize_t tsf_read_file(struct b43_wldev *dev, + char *buf, size_t bufsize) +{ + ssize_t count = 0; + u64 tsf; + + b43_tsf_read(dev, &tsf); + fappend("0x%08x%08x\n", + (unsigned int)((tsf & 0xFFFFFFFF00000000ULL) >> 32), + (unsigned int)(tsf & 0xFFFFFFFFULL)); + + return count; +} + +/* wl->irq_lock is locked */ +static int tsf_write_file(struct b43_wldev *dev, + const char *buf, size_t count) +{ + u64 tsf; + + if (sscanf(buf, "%llu", (unsigned long long *)(&tsf)) != 1) + return -EINVAL; + b43_tsf_write(dev, tsf); + + return 0; +} + +/* wl->irq_lock is locked */ +static ssize_t ucode_regs_read_file(struct b43_wldev *dev, + char *buf, size_t bufsize) +{ + ssize_t count = 0; + int i; + + for (i = 0; i < 64; i++) { + fappend("r%d = 0x%04x\n", i, + b43_shm_read16(dev, B43_SHM_SCRATCH, i)); + } + + return count; +} + +/* wl->irq_lock is locked */ +static ssize_t shm_read_file(struct b43_wldev *dev, + char *buf, size_t bufsize) +{ + ssize_t count = 0; + int i; + u16 tmp; + __le16 *le16buf = (__le16 *)buf; + + for (i = 0; i < 0x1000; i++) { + if (bufsize <= 0) + break; + tmp = b43_shm_read16(dev, B43_SHM_SHARED, 2 * i); + le16buf[i] = cpu_to_le16(tmp); + count += sizeof(tmp); + bufsize -= sizeof(tmp); + } + + return count; +} + +static ssize_t txstat_read_file(struct b43_wldev *dev, + char *buf, size_t bufsize) +{ + struct b43_txstatus_log *log = &dev->dfsentry->txstatlog; + ssize_t count = 0; + unsigned long flags; + int i, idx; + struct b43_txstatus *stat; + + spin_lock_irqsave(&log->lock, flags); + if (log->end < 0) { + fappend("Nothing transmitted, yet\n"); + goto out_unlock; + } + fappend("b43 TX status reports:\n\n" + "index | cookie | seq | phy_stat | frame_count | " + "rts_count | supp_reason | pm_indicated | " + "intermediate | for_ampdu | acked\n" "---\n"); + i = log->end + 1; + idx = 0; + while (1) { + if (i == B43_NR_LOGGED_TXSTATUS) + i = 0; + stat = &(log->log[i]); + if (stat->cookie) { + fappend("%03d | " + "0x%04X | 0x%04X | 0x%02X | " + "0x%X | 0x%X | " + "%u | %u | " + "%u | %u | %u\n", + idx, + stat->cookie, stat->seq, stat->phy_stat, + stat->frame_count, stat->rts_count, + stat->supp_reason, stat->pm_indicated, + stat->intermediate, stat->for_ampdu, + stat->acked); + idx++; + } + if (i == log->end) + break; + i++; + } +out_unlock: + spin_unlock_irqrestore(&log->lock, flags); + + return count; +} + +static ssize_t txpower_g_read_file(struct b43_wldev *dev, + char *buf, size_t bufsize) +{ + ssize_t count = 0; + + if (dev->phy.type != B43_PHYTYPE_G) { + fappend("Device is not a G-PHY\n"); + goto out; + } + fappend("Control: %s\n", dev->phy.manual_txpower_control ? + "MANUAL" : "AUTOMATIC"); + fappend("Baseband attenuation: %u\n", dev->phy.bbatt.att); + fappend("Radio attenuation: %u\n", dev->phy.rfatt.att); + fappend("TX Mixer Gain: %s\n", + (dev->phy.tx_control & B43_TXCTL_TXMIX) ? "ON" : "OFF"); + fappend("PA Gain 2dB: %s\n", + (dev->phy.tx_control & B43_TXCTL_PA2DB) ? "ON" : "OFF"); + fappend("PA Gain 3dB: %s\n", + (dev->phy.tx_control & B43_TXCTL_PA3DB) ? "ON" : "OFF"); + fappend("\n\n"); + fappend("You can write to this file:\n"); + fappend("Writing \"auto\" enables automatic txpower control.\n"); + fappend + ("Writing the attenuation values as \"bbatt rfatt txmix pa2db pa3db\" " + "enables manual txpower control.\n"); + fappend("Example: 5 4 0 0 1\n"); + fappend("Enables manual control with Baseband attenuation 5, " + "Radio attenuation 4, No TX Mixer Gain, " + "No PA Gain 2dB, With PA Gain 3dB.\n"); +out: + return count; +} + +static int txpower_g_write_file(struct b43_wldev *dev, + const char *buf, size_t count) +{ + unsigned long phy_flags; + + if (dev->phy.type != B43_PHYTYPE_G) + return -ENODEV; + if ((count >= 4) && (memcmp(buf, "auto", 4) == 0)) { + /* Automatic control */ + dev->phy.manual_txpower_control = 0; + b43_phy_xmitpower(dev); + } else { + int bbatt = 0, rfatt = 0, txmix = 0, pa2db = 0, pa3db = 0; + /* Manual control */ + if (sscanf(buf, "%d %d %d %d %d", &bbatt, &rfatt, + &txmix, &pa2db, &pa3db) != 5) + return -EINVAL; + b43_put_attenuation_into_ranges(dev, &bbatt, &rfatt); + dev->phy.manual_txpower_control = 1; + dev->phy.bbatt.att = bbatt; + dev->phy.rfatt.att = rfatt; + dev->phy.tx_control = 0; + if (txmix) + dev->phy.tx_control |= B43_TXCTL_TXMIX; + if (pa2db) + dev->phy.tx_control |= B43_TXCTL_PA2DB; + if (pa3db) + dev->phy.tx_control |= B43_TXCTL_PA3DB; + b43_phy_lock(dev, phy_flags); + b43_radio_lock(dev); + b43_set_txpower_g(dev, &dev->phy.bbatt, + &dev->phy.rfatt, dev->phy.tx_control); + b43_radio_unlock(dev); + b43_phy_unlock(dev, phy_flags); + } + + return 0; +} + +/* wl->irq_lock is locked */ +static int restart_write_file(struct b43_wldev *dev, + const char *buf, size_t count) +{ + int err = 0; + + if (count > 0 && buf[0] == '1') { + b43_controller_restart(dev, "manually restarted"); + } else + err = -EINVAL; + + return err; +} + +static ssize_t append_lo_table(ssize_t count, char *buf, const size_t bufsize, + struct b43_loctl table[B43_NR_BB][B43_NR_RF]) +{ + unsigned int i, j; + struct b43_loctl *ctl; + + for (i = 0; i < B43_NR_BB; i++) { + for (j = 0; j < B43_NR_RF; j++) { + ctl = &(table[i][j]); + fappend("(bbatt %2u, rfatt %2u) -> " + "(I %+3d, Q %+3d, Used: %d, Calibrated: %d)\n", + i, j, ctl->i, ctl->q, + ctl->used, + b43_loctl_is_calibrated(ctl)); + } + } + + return count; +} + +static ssize_t loctls_read_file(struct b43_wldev *dev, + char *buf, size_t bufsize) +{ + ssize_t count = 0; + struct b43_txpower_lo_control *lo; + int i, err = 0; + + if (dev->phy.type != B43_PHYTYPE_G) { + fappend("Device is not a G-PHY\n"); + err = -ENODEV; + goto out; + } + lo = dev->phy.lo_control; + fappend("-- Local Oscillator calibration data --\n\n"); + fappend("Measured: %d, Rebuild: %d, HW-power-control: %d\n", + lo->lo_measured, + lo->rebuild, + dev->phy.hardware_power_control); + fappend("TX Bias: 0x%02X, TX Magn: 0x%02X\n", + lo->tx_bias, lo->tx_magn); + fappend("Power Vector: 0x%08X%08X\n", + (unsigned int)((lo->power_vector & 0xFFFFFFFF00000000ULL) >> 32), + (unsigned int)(lo->power_vector & 0x00000000FFFFFFFFULL)); + fappend("\nControl table WITH PADMIX:\n"); + count = append_lo_table(count, buf, bufsize, lo->with_padmix); + fappend("\nControl table WITHOUT PADMIX:\n"); + count = append_lo_table(count, buf, bufsize, lo->no_padmix); + fappend("\nUsed RF attenuation values: Value(WithPadmix flag)\n"); + for (i = 0; i < lo->rfatt_list.len; i++) { + fappend("%u(%d), ", + lo->rfatt_list.list[i].att, + lo->rfatt_list.list[i].with_padmix); + } + fappend("\n"); + fappend("\nUsed Baseband attenuation values:\n"); + for (i = 0; i < lo->bbatt_list.len; i++) { + fappend("%u, ", + lo->bbatt_list.list[i].att); + } + fappend("\n"); + +out: + return err ? err : count; +} + +#undef fappend + +static int b43_debugfs_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t b43_debugfs_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct b43_wldev *dev; + struct b43_debugfs_fops *dfops; + struct b43_dfs_file *dfile; + ssize_t ret; + char *buf; + const size_t bufsize = 1024 * 128; + const size_t buforder = get_order(bufsize); + int err = 0; + + if (!count) + return 0; + dev = file->private_data; + if (!dev) + return -ENODEV; + + mutex_lock(&dev->wl->mutex); + if (b43_status(dev) < B43_STAT_INITIALIZED) { + err = -ENODEV; + goto out_unlock; + } + + dfops = container_of(file->f_op, struct b43_debugfs_fops, fops); + if (!dfops->read) { + err = -ENOSYS; + goto out_unlock; + } + dfile = fops_to_dfs_file(dev, dfops); + + if (!dfile->buffer) { + buf = (char *)__get_free_pages(GFP_KERNEL, buforder); + if (!buf) { + err = -ENOMEM; + goto out_unlock; + } + /* Sparse warns about the following memset, because it has a big + * size value. That warning is bogus, so I will ignore it. --mb */ + memset(buf, 0, bufsize); + if (dfops->take_irqlock) { + spin_lock_irq(&dev->wl->irq_lock); + ret = dfops->read(dev, buf, bufsize); + spin_unlock_irq(&dev->wl->irq_lock); + } else + ret = dfops->read(dev, buf, bufsize); + if (ret <= 0) { + free_pages((unsigned long)buf, buforder); + err = ret; + goto out_unlock; + } + dfile->data_len = ret; + dfile->buffer = buf; + } + + ret = simple_read_from_buffer(userbuf, count, ppos, + dfile->buffer, + dfile->data_len); + if (*ppos >= dfile->data_len) { + free_pages((unsigned long)dfile->buffer, buforder); + dfile->buffer = NULL; + dfile->data_len = 0; + } +out_unlock: + mutex_unlock(&dev->wl->mutex); + + return err ? err : ret; +} + +static ssize_t b43_debugfs_write(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct b43_wldev *dev; + struct b43_debugfs_fops *dfops; + char *buf; + int err = 0; + + if (!count) + return 0; + if (count > PAGE_SIZE) + return -E2BIG; + dev = file->private_data; + if (!dev) + return -ENODEV; + + mutex_lock(&dev->wl->mutex); + if (b43_status(dev) < B43_STAT_INITIALIZED) { + err = -ENODEV; + goto out_unlock; + } + + dfops = container_of(file->f_op, struct b43_debugfs_fops, fops); + if (!dfops->write) { + err = -ENOSYS; + goto out_unlock; + } + + buf = (char *)get_zeroed_page(GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + goto out_unlock; + } + if (copy_from_user(buf, userbuf, count)) { + err = -EFAULT; + goto out_freepage; + } + if (dfops->take_irqlock) { + spin_lock_irq(&dev->wl->irq_lock); + err = dfops->write(dev, buf, count); + spin_unlock_irq(&dev->wl->irq_lock); + } else + err = dfops->write(dev, buf, count); + if (err) + goto out_freepage; + +out_freepage: + free_page((unsigned long)buf); +out_unlock: + mutex_unlock(&dev->wl->mutex); + + return err ? err : count; +} + + +#define B43_DEBUGFS_FOPS(name, _read, _write, _take_irqlock) \ + static struct b43_debugfs_fops fops_##name = { \ + .read = _read, \ + .write = _write, \ + .fops = { \ + .open = b43_debugfs_open, \ + .read = b43_debugfs_read, \ + .write = b43_debugfs_write, \ + }, \ + .file_struct_offset = offsetof(struct b43_dfsentry, \ + file_##name), \ + .take_irqlock = _take_irqlock, \ + } + +B43_DEBUGFS_FOPS(tsf, tsf_read_file, tsf_write_file, 1); +B43_DEBUGFS_FOPS(ucode_regs, ucode_regs_read_file, NULL, 1); +B43_DEBUGFS_FOPS(shm, shm_read_file, NULL, 1); +B43_DEBUGFS_FOPS(txstat, txstat_read_file, NULL, 0); +B43_DEBUGFS_FOPS(txpower_g, txpower_g_read_file, txpower_g_write_file, 0); +B43_DEBUGFS_FOPS(restart, NULL, restart_write_file, 1); +B43_DEBUGFS_FOPS(loctls, loctls_read_file, NULL, 0); + + +int b43_debug(struct b43_wldev *dev, enum b43_dyndbg feature) +{ + return !!(dev->dfsentry && dev->dfsentry->dyn_debug[feature]); +} + +static void b43_remove_dynamic_debug(struct b43_wldev *dev) +{ + struct b43_dfsentry *e = dev->dfsentry; + int i; + + for (i = 0; i < __B43_NR_DYNDBG; i++) + debugfs_remove(e->dyn_debug_dentries[i]); +} + +static void b43_add_dynamic_debug(struct b43_wldev *dev) +{ + struct b43_dfsentry *e = dev->dfsentry; + struct dentry *d; + +#define add_dyn_dbg(name, id, initstate) do { \ + e->dyn_debug[id] = (initstate); \ + d = debugfs_create_bool(name, 0600, e->subdir, \ + &(e->dyn_debug[id])); \ + if (!IS_ERR(d)) \ + e->dyn_debug_dentries[id] = d; \ + } while (0) + + add_dyn_dbg("debug_xmitpower", B43_DBG_XMITPOWER, 0); + add_dyn_dbg("debug_dmaoverflow", B43_DBG_DMAOVERFLOW, 0); + add_dyn_dbg("debug_dmaverbose", B43_DBG_DMAVERBOSE, 0); + add_dyn_dbg("debug_pwork_fast", B43_DBG_PWORK_FAST, 0); + add_dyn_dbg("debug_pwork_stop", B43_DBG_PWORK_STOP, 0); + +#undef add_dyn_dbg +} + +void b43_debugfs_add_device(struct b43_wldev *dev) +{ + struct b43_dfsentry *e; + struct b43_txstatus_log *log; + char devdir[16]; + + B43_WARN_ON(!dev); + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) { + b43err(dev->wl, "debugfs: add device OOM\n"); + return; + } + e->dev = dev; + log = &e->txstatlog; + log->log = kcalloc(B43_NR_LOGGED_TXSTATUS, + sizeof(struct b43_txstatus), GFP_KERNEL); + if (!log->log) { + b43err(dev->wl, "debugfs: add device txstatus OOM\n"); + kfree(e); + return; + } + log->end = -1; + spin_lock_init(&log->lock); + + dev->dfsentry = e; + + snprintf(devdir, sizeof(devdir), "%s", wiphy_name(dev->wl->hw->wiphy)); + e->subdir = debugfs_create_dir(devdir, rootdir); + if (!e->subdir || IS_ERR(e->subdir)) { + if (e->subdir == ERR_PTR(-ENODEV)) { + b43dbg(dev->wl, "DebugFS (CONFIG_DEBUG_FS) not " + "enabled in kernel config\n"); + } else { + b43err(dev->wl, "debugfs: cannot create %s directory\n", + devdir); + } + dev->dfsentry = NULL; + kfree(log->log); + kfree(e); + return; + } + +#define ADD_FILE(name, mode) \ + do { \ + struct dentry *d; \ + d = debugfs_create_file(__stringify(name), \ + mode, e->subdir, dev, \ + &fops_##name.fops); \ + e->file_##name.dentry = NULL; \ + if (!IS_ERR(d)) \ + e->file_##name.dentry = d; \ + } while (0) + + + ADD_FILE(tsf, 0600); + ADD_FILE(ucode_regs, 0400); + ADD_FILE(shm, 0400); + ADD_FILE(txstat, 0400); + ADD_FILE(txpower_g, 0600); + ADD_FILE(restart, 0200); + ADD_FILE(loctls, 0400); + +#undef ADD_FILE + + b43_add_dynamic_debug(dev); +} + +void b43_debugfs_remove_device(struct b43_wldev *dev) +{ + struct b43_dfsentry *e; + + if (!dev) + return; + e = dev->dfsentry; + if (!e) + return; + b43_remove_dynamic_debug(dev); + + debugfs_remove(e->file_tsf.dentry); + debugfs_remove(e->file_ucode_regs.dentry); + debugfs_remove(e->file_shm.dentry); + debugfs_remove(e->file_txstat.dentry); + debugfs_remove(e->file_txpower_g.dentry); + debugfs_remove(e->file_restart.dentry); + debugfs_remove(e->file_loctls.dentry); + + debugfs_remove(e->subdir); + kfree(e->txstatlog.log); + kfree(e); +} + +void b43_debugfs_log_txstat(struct b43_wldev *dev, + const struct b43_txstatus *status) +{ + struct b43_dfsentry *e = dev->dfsentry; + struct b43_txstatus_log *log; + struct b43_txstatus *cur; + int i; + + if (!e) + return; + log = &e->txstatlog; + B43_WARN_ON(!irqs_disabled()); + spin_lock(&log->lock); + i = log->end + 1; + if (i == B43_NR_LOGGED_TXSTATUS) + i = 0; + log->end = i; + cur = &(log->log[i]); + memcpy(cur, status, sizeof(*cur)); + spin_unlock(&log->lock); +} + +void b43_debugfs_init(void) +{ + rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL); + if (IS_ERR(rootdir)) + rootdir = NULL; +} + +void b43_debugfs_exit(void) +{ + debugfs_remove(rootdir); +} diff --git a/drivers/net/wireless/b43/debugfs.h b/drivers/net/wireless/b43/debugfs.h new file mode 100644 index 000000000000..6eebe858db5a --- /dev/null +++ b/drivers/net/wireless/b43/debugfs.h @@ -0,0 +1,89 @@ +#ifndef B43_DEBUGFS_H_ +#define B43_DEBUGFS_H_ + +struct b43_wldev; +struct b43_txstatus; + +enum b43_dyndbg { /* Dynamic debugging features */ + B43_DBG_XMITPOWER, + B43_DBG_DMAOVERFLOW, + B43_DBG_DMAVERBOSE, + B43_DBG_PWORK_FAST, + B43_DBG_PWORK_STOP, + __B43_NR_DYNDBG, +}; + +#ifdef CONFIG_B43_DEBUG + +struct dentry; + +#define B43_NR_LOGGED_TXSTATUS 100 + +struct b43_txstatus_log { + struct b43_txstatus *log; + int end; + spinlock_t lock; +}; + +struct b43_dfs_file { + struct dentry *dentry; + char *buffer; + size_t data_len; +}; + +struct b43_dfsentry { + struct b43_wldev *dev; + struct dentry *subdir; + + struct b43_dfs_file file_tsf; + struct b43_dfs_file file_ucode_regs; + struct b43_dfs_file file_shm; + struct b43_dfs_file file_txstat; + struct b43_dfs_file file_txpower_g; + struct b43_dfs_file file_restart; + struct b43_dfs_file file_loctls; + + struct b43_txstatus_log txstatlog; + + /* Enabled/Disabled list for the dynamic debugging features. */ + u32 dyn_debug[__B43_NR_DYNDBG]; + /* Dentries for the dynamic debugging entries. */ + struct dentry *dyn_debug_dentries[__B43_NR_DYNDBG]; +}; + +int b43_debug(struct b43_wldev *dev, enum b43_dyndbg feature); + +void b43_debugfs_init(void); +void b43_debugfs_exit(void); +void b43_debugfs_add_device(struct b43_wldev *dev); +void b43_debugfs_remove_device(struct b43_wldev *dev); +void b43_debugfs_log_txstat(struct b43_wldev *dev, + const struct b43_txstatus *status); + +#else /* CONFIG_B43_DEBUG */ + +static inline int b43_debug(struct b43_wldev *dev, enum b43_dyndbg feature) +{ + return 0; +} + +static inline void b43_debugfs_init(void) +{ +} +static inline void b43_debugfs_exit(void) +{ +} +static inline void b43_debugfs_add_device(struct b43_wldev *dev) +{ +} +static inline void b43_debugfs_remove_device(struct b43_wldev *dev) +{ +} +static inline void b43_debugfs_log_txstat(struct b43_wldev *dev, + const struct b43_txstatus *status) +{ +} + +#endif /* CONFIG_B43_DEBUG */ + +#endif /* B43_DEBUGFS_H_ */ diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c new file mode 100644 index 000000000000..5e8f8ac0f1dd --- /dev/null +++ b/drivers/net/wireless/b43/dma.c @@ -0,0 +1,1494 @@ +/* + + Broadcom B43 wireless driver + + DMA ringbuffer and descriptor allocation/management + + Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de> + + Some code in this file is derived from the b44.c driver + Copyright (C) 2002 David S. Miller + Copyright (C) Pekka Pietikainen + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43.h" +#include "dma.h" +#include "main.h" +#include "debugfs.h" +#include "xmit.h" + +#include <linux/dma-mapping.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/skbuff.h> + +/* 32bit DMA ops. */ +static +struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring, + int slot, + struct b43_dmadesc_meta **meta) +{ + struct b43_dmadesc32 *desc; + + *meta = &(ring->meta[slot]); + desc = ring->descbase; + desc = &(desc[slot]); + + return (struct b43_dmadesc_generic *)desc; +} + +static void op32_fill_descriptor(struct b43_dmaring *ring, + struct b43_dmadesc_generic *desc, + dma_addr_t dmaaddr, u16 bufsize, + int start, int end, int irq) +{ + struct b43_dmadesc32 *descbase = ring->descbase; + int slot; + u32 ctl; + u32 addr; + u32 addrext; + + slot = (int)(&(desc->dma32) - descbase); + B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); + + addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK); + addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + addr |= ssb_dma_translation(ring->dev->dev); + ctl = (bufsize - ring->frameoffset) + & B43_DMA32_DCTL_BYTECNT; + if (slot == ring->nr_slots - 1) + ctl |= B43_DMA32_DCTL_DTABLEEND; + if (start) + ctl |= B43_DMA32_DCTL_FRAMESTART; + if (end) + ctl |= B43_DMA32_DCTL_FRAMEEND; + if (irq) + ctl |= B43_DMA32_DCTL_IRQ; + ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT) + & B43_DMA32_DCTL_ADDREXT_MASK; + + desc->dma32.control = cpu_to_le32(ctl); + desc->dma32.address = cpu_to_le32(addr); +} + +static void op32_poke_tx(struct b43_dmaring *ring, int slot) +{ + b43_dma_write(ring, B43_DMA32_TXINDEX, + (u32) (slot * sizeof(struct b43_dmadesc32))); +} + +static void op32_tx_suspend(struct b43_dmaring *ring) +{ + b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL) + | B43_DMA32_TXSUSPEND); +} + +static void op32_tx_resume(struct b43_dmaring *ring) +{ + b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL) + & ~B43_DMA32_TXSUSPEND); +} + +static int op32_get_current_rxslot(struct b43_dmaring *ring) +{ + u32 val; + + val = b43_dma_read(ring, B43_DMA32_RXSTATUS); + val &= B43_DMA32_RXDPTR; + + return (val / sizeof(struct b43_dmadesc32)); +} + +static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot) +{ + b43_dma_write(ring, B43_DMA32_RXINDEX, + (u32) (slot * sizeof(struct b43_dmadesc32))); +} + +static const struct b43_dma_ops dma32_ops = { + .idx2desc = op32_idx2desc, + .fill_descriptor = op32_fill_descriptor, + .poke_tx = op32_poke_tx, + .tx_suspend = op32_tx_suspend, + .tx_resume = op32_tx_resume, + .get_current_rxslot = op32_get_current_rxslot, + .set_current_rxslot = op32_set_current_rxslot, +}; + +/* 64bit DMA ops. */ +static +struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring, + int slot, + struct b43_dmadesc_meta **meta) +{ + struct b43_dmadesc64 *desc; + + *meta = &(ring->meta[slot]); + desc = ring->descbase; + desc = &(desc[slot]); + + return (struct b43_dmadesc_generic *)desc; +} + +static void op64_fill_descriptor(struct b43_dmaring *ring, + struct b43_dmadesc_generic *desc, + dma_addr_t dmaaddr, u16 bufsize, + int start, int end, int irq) +{ + struct b43_dmadesc64 *descbase = ring->descbase; + int slot; + u32 ctl0 = 0, ctl1 = 0; + u32 addrlo, addrhi; + u32 addrext; + + slot = (int)(&(desc->dma64) - descbase); + B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); + + addrlo = (u32) (dmaaddr & 0xFFFFFFFF); + addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK); + addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + addrhi |= ssb_dma_translation(ring->dev->dev); + if (slot == ring->nr_slots - 1) + ctl0 |= B43_DMA64_DCTL0_DTABLEEND; + if (start) + ctl0 |= B43_DMA64_DCTL0_FRAMESTART; + if (end) + ctl0 |= B43_DMA64_DCTL0_FRAMEEND; + if (irq) + ctl0 |= B43_DMA64_DCTL0_IRQ; + ctl1 |= (bufsize - ring->frameoffset) + & B43_DMA64_DCTL1_BYTECNT; + ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) + & B43_DMA64_DCTL1_ADDREXT_MASK; + + desc->dma64.control0 = cpu_to_le32(ctl0); + desc->dma64.control1 = cpu_to_le32(ctl1); + desc->dma64.address_low = cpu_to_le32(addrlo); + desc->dma64.address_high = cpu_to_le32(addrhi); +} + +static void op64_poke_tx(struct b43_dmaring *ring, int slot) +{ + b43_dma_write(ring, B43_DMA64_TXINDEX, + (u32) (slot * sizeof(struct b43_dmadesc64))); +} + +static void op64_tx_suspend(struct b43_dmaring *ring) +{ + b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL) + | B43_DMA64_TXSUSPEND); +} + +static void op64_tx_resume(struct b43_dmaring *ring) +{ + b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL) + & ~B43_DMA64_TXSUSPEND); +} + +static int op64_get_current_rxslot(struct b43_dmaring *ring) +{ + u32 val; + + val = b43_dma_read(ring, B43_DMA64_RXSTATUS); + val &= B43_DMA64_RXSTATDPTR; + + return (val / sizeof(struct b43_dmadesc64)); +} + +static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot) +{ + b43_dma_write(ring, B43_DMA64_RXINDEX, + (u32) (slot * sizeof(struct b43_dmadesc64))); +} + +static const struct b43_dma_ops dma64_ops = { + .idx2desc = op64_idx2desc, + .fill_descriptor = op64_fill_descriptor, + .poke_tx = op64_poke_tx, + .tx_suspend = op64_tx_suspend, + .tx_resume = op64_tx_resume, + .get_current_rxslot = op64_get_current_rxslot, + .set_current_rxslot = op64_set_current_rxslot, +}; + +static inline int free_slots(struct b43_dmaring *ring) +{ + return (ring->nr_slots - ring->used_slots); +} + +static inline int next_slot(struct b43_dmaring *ring, int slot) +{ + B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1)); + if (slot == ring->nr_slots - 1) + return 0; + return slot + 1; +} + +static inline int prev_slot(struct b43_dmaring *ring, int slot) +{ + B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1)); + if (slot == 0) + return ring->nr_slots - 1; + return slot - 1; +} + +#ifdef CONFIG_B43_DEBUG +static void update_max_used_slots(struct b43_dmaring *ring, + int current_used_slots) +{ + if (current_used_slots <= ring->max_used_slots) + return; + ring->max_used_slots = current_used_slots; + if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) { + b43dbg(ring->dev->wl, + "max_used_slots increased to %d on %s ring %d\n", + ring->max_used_slots, + ring->tx ? "TX" : "RX", ring->index); + } +} +#else +static inline + void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots) +{ +} +#endif /* DEBUG */ + +/* Request a slot for usage. */ +static inline int request_slot(struct b43_dmaring *ring) +{ + int slot; + + B43_WARN_ON(!ring->tx); + B43_WARN_ON(ring->stopped); + B43_WARN_ON(free_slots(ring) == 0); + + slot = next_slot(ring, ring->current_slot); + ring->current_slot = slot; + ring->used_slots++; + + update_max_used_slots(ring, ring->used_slots); + + return slot; +} + +/* Mac80211-queue to b43-ring mapping */ +static struct b43_dmaring *priority_to_txring(struct b43_wldev *dev, + int queue_priority) +{ + struct b43_dmaring *ring; + +/*FIXME: For now we always run on TX-ring-1 */ + return dev->dma.tx_ring1; + + /* 0 = highest priority */ + switch (queue_priority) { + default: + B43_WARN_ON(1); + /* fallthrough */ + case 0: + ring = dev->dma.tx_ring3; + break; + case 1: + ring = dev->dma.tx_ring2; + break; + case 2: + ring = dev->dma.tx_ring1; + break; + case 3: + ring = dev->dma.tx_ring0; + break; + case 4: + ring = dev->dma.tx_ring4; + break; + case 5: + ring = dev->dma.tx_ring5; + break; + } + + return ring; +} + +/* Bcm43xx-ring to mac80211-queue mapping */ +static inline int txring_to_priority(struct b43_dmaring *ring) +{ + static const u8 idx_to_prio[] = { 3, 2, 1, 0, 4, 5, }; + +/*FIXME: have only one queue, for now */ + return 0; + + return idx_to_prio[ring->index]; +} + +u16 b43_dmacontroller_base(int dma64bit, int controller_idx) +{ + static const u16 map64[] = { + B43_MMIO_DMA64_BASE0, + B43_MMIO_DMA64_BASE1, + B43_MMIO_DMA64_BASE2, + B43_MMIO_DMA64_BASE3, + B43_MMIO_DMA64_BASE4, + B43_MMIO_DMA64_BASE5, + }; + static const u16 map32[] = { + B43_MMIO_DMA32_BASE0, + B43_MMIO_DMA32_BASE1, + B43_MMIO_DMA32_BASE2, + B43_MMIO_DMA32_BASE3, + B43_MMIO_DMA32_BASE4, + B43_MMIO_DMA32_BASE5, + }; + + if (dma64bit) { + B43_WARN_ON(!(controller_idx >= 0 && + controller_idx < ARRAY_SIZE(map64))); + return map64[controller_idx]; + } + B43_WARN_ON(!(controller_idx >= 0 && + controller_idx < ARRAY_SIZE(map32))); + return map32[controller_idx]; +} + +static inline + dma_addr_t map_descbuffer(struct b43_dmaring *ring, + unsigned char *buf, size_t len, int tx) +{ + dma_addr_t dmaaddr; + + if (tx) { + dmaaddr = dma_map_single(ring->dev->dev->dev, + buf, len, DMA_TO_DEVICE); + } else { + dmaaddr = dma_map_single(ring->dev->dev->dev, + buf, len, DMA_FROM_DEVICE); + } + + return dmaaddr; +} + +static inline + void unmap_descbuffer(struct b43_dmaring *ring, + dma_addr_t addr, size_t len, int tx) +{ + if (tx) { + dma_unmap_single(ring->dev->dev->dev, addr, len, DMA_TO_DEVICE); + } else { + dma_unmap_single(ring->dev->dev->dev, + addr, len, DMA_FROM_DEVICE); + } +} + +static inline + void sync_descbuffer_for_cpu(struct b43_dmaring *ring, + dma_addr_t addr, size_t len) +{ + B43_WARN_ON(ring->tx); + dma_sync_single_for_cpu(ring->dev->dev->dev, + addr, len, DMA_FROM_DEVICE); +} + +static inline + void sync_descbuffer_for_device(struct b43_dmaring *ring, + dma_addr_t addr, size_t len) +{ + B43_WARN_ON(ring->tx); + dma_sync_single_for_device(ring->dev->dev->dev, + addr, len, DMA_FROM_DEVICE); +} + +static inline + void free_descriptor_buffer(struct b43_dmaring *ring, + struct b43_dmadesc_meta *meta) +{ + if (meta->skb) { + dev_kfree_skb_any(meta->skb); + meta->skb = NULL; + } +} + +static int alloc_ringmemory(struct b43_dmaring *ring) +{ + struct device *dev = ring->dev->dev->dev; + + ring->descbase = dma_alloc_coherent(dev, B43_DMA_RINGMEMSIZE, + &(ring->dmabase), GFP_KERNEL); + if (!ring->descbase) { + b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); + return -ENOMEM; + } + memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE); + + return 0; +} + +static void free_ringmemory(struct b43_dmaring *ring) +{ + struct device *dev = ring->dev->dev->dev; + + dma_free_coherent(dev, B43_DMA_RINGMEMSIZE, + ring->descbase, ring->dmabase); +} + +/* Reset the RX DMA channel */ +int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64) +{ + int i; + u32 value; + u16 offset; + + might_sleep(); + + offset = dma64 ? B43_DMA64_RXCTL : B43_DMA32_RXCTL; + b43_write32(dev, mmio_base + offset, 0); + for (i = 0; i < 10; i++) { + offset = dma64 ? B43_DMA64_RXSTATUS : B43_DMA32_RXSTATUS; + value = b43_read32(dev, mmio_base + offset); + if (dma64) { + value &= B43_DMA64_RXSTAT; + if (value == B43_DMA64_RXSTAT_DISABLED) { + i = -1; + break; + } + } else { + value &= B43_DMA32_RXSTATE; + if (value == B43_DMA32_RXSTAT_DISABLED) { + i = -1; + break; + } + } + msleep(1); + } + if (i != -1) { + b43err(dev->wl, "DMA RX reset timed out\n"); + return -ENODEV; + } + + return 0; +} + +/* Reset the RX DMA channel */ +int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64) +{ + int i; + u32 value; + u16 offset; + + might_sleep(); + + for (i = 0; i < 10; i++) { + offset = dma64 ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS; + value = b43_read32(dev, mmio_base + offset); + if (dma64) { + value &= B43_DMA64_TXSTAT; + if (value == B43_DMA64_TXSTAT_DISABLED || + value == B43_DMA64_TXSTAT_IDLEWAIT || + value == B43_DMA64_TXSTAT_STOPPED) + break; + } else { + value &= B43_DMA32_TXSTATE; + if (value == B43_DMA32_TXSTAT_DISABLED || + value == B43_DMA32_TXSTAT_IDLEWAIT || + value == B43_DMA32_TXSTAT_STOPPED) + break; + } + msleep(1); + } + offset = dma64 ? B43_DMA64_TXCTL : B43_DMA32_TXCTL; + b43_write32(dev, mmio_base + offset, 0); + for (i = 0; i < 10; i++) { + offset = dma64 ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS; + value = b43_read32(dev, mmio_base + offset); + if (dma64) { + value &= B43_DMA64_TXSTAT; + if (value == B43_DMA64_TXSTAT_DISABLED) { + i = -1; + break; + } + } else { + value &= B43_DMA32_TXSTATE; + if (value == B43_DMA32_TXSTAT_DISABLED) { + i = -1; + break; + } + } + msleep(1); + } + if (i != -1) { + b43err(dev->wl, "DMA TX reset timed out\n"); + return -ENODEV; + } + /* ensure the reset is completed. */ + msleep(1); + + return 0; +} + +static int setup_rx_descbuffer(struct b43_dmaring *ring, + struct b43_dmadesc_generic *desc, + struct b43_dmadesc_meta *meta, gfp_t gfp_flags) +{ + struct b43_rxhdr_fw4 *rxhdr; + struct b43_hwtxstatus *txstat; + dma_addr_t dmaaddr; + struct sk_buff *skb; + + B43_WARN_ON(ring->tx); + + skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); + if (unlikely(!skb)) + return -ENOMEM; + dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); + if (dma_mapping_error(dmaaddr)) { + /* ugh. try to realloc in zone_dma */ + gfp_flags |= GFP_DMA; + + dev_kfree_skb_any(skb); + + skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); + if (unlikely(!skb)) + return -ENOMEM; + dmaaddr = map_descbuffer(ring, skb->data, + ring->rx_buffersize, 0); + } + + if (dma_mapping_error(dmaaddr)) { + dev_kfree_skb_any(skb); + return -EIO; + } + + meta->skb = skb; + meta->dmaaddr = dmaaddr; + ring->ops->fill_descriptor(ring, desc, dmaaddr, + ring->rx_buffersize, 0, 0, 0); + + rxhdr = (struct b43_rxhdr_fw4 *)(skb->data); + rxhdr->frame_len = 0; + txstat = (struct b43_hwtxstatus *)(skb->data); + txstat->cookie = 0; + + return 0; +} + +/* Allocate the initial descbuffers. + * This is used for an RX ring only. + */ +static int alloc_initial_descbuffers(struct b43_dmaring *ring) +{ + int i, err = -ENOMEM; + struct b43_dmadesc_generic *desc; + struct b43_dmadesc_meta *meta; + + for (i = 0; i < ring->nr_slots; i++) { + desc = ring->ops->idx2desc(ring, i, &meta); + + err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); + if (err) { + b43err(ring->dev->wl, + "Failed to allocate initial descbuffers\n"); + goto err_unwind; + } + } + mb(); + ring->used_slots = ring->nr_slots; + err = 0; + out: + return err; + + err_unwind: + for (i--; i >= 0; i--) { + desc = ring->ops->idx2desc(ring, i, &meta); + + unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); + dev_kfree_skb(meta->skb); + } + goto out; +} + +/* Do initial setup of the DMA controller. + * Reset the controller, write the ring busaddress + * and switch the "enable" bit on. + */ +static int dmacontroller_setup(struct b43_dmaring *ring) +{ + int err = 0; + u32 value; + u32 addrext; + u32 trans = ssb_dma_translation(ring->dev->dev); + + if (ring->tx) { + if (ring->dma64) { + u64 ringbase = (u64) (ring->dmabase); + + addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + value = B43_DMA64_TXENABLE; + value |= (addrext << B43_DMA64_TXADDREXT_SHIFT) + & B43_DMA64_TXADDREXT_MASK; + b43_dma_write(ring, B43_DMA64_TXCTL, value); + b43_dma_write(ring, B43_DMA64_TXRINGLO, + (ringbase & 0xFFFFFFFF)); + b43_dma_write(ring, B43_DMA64_TXRINGHI, + ((ringbase >> 32) & + ~SSB_DMA_TRANSLATION_MASK) + | trans); + } else { + u32 ringbase = (u32) (ring->dmabase); + + addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + value = B43_DMA32_TXENABLE; + value |= (addrext << B43_DMA32_TXADDREXT_SHIFT) + & B43_DMA32_TXADDREXT_MASK; + b43_dma_write(ring, B43_DMA32_TXCTL, value); + b43_dma_write(ring, B43_DMA32_TXRING, + (ringbase & ~SSB_DMA_TRANSLATION_MASK) + | trans); + } + } else { + err = alloc_initial_descbuffers(ring); + if (err) + goto out; + if (ring->dma64) { + u64 ringbase = (u64) (ring->dmabase); + + addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT); + value |= B43_DMA64_RXENABLE; + value |= (addrext << B43_DMA64_RXADDREXT_SHIFT) + & B43_DMA64_RXADDREXT_MASK; + b43_dma_write(ring, B43_DMA64_RXCTL, value); + b43_dma_write(ring, B43_DMA64_RXRINGLO, + (ringbase & 0xFFFFFFFF)); + b43_dma_write(ring, B43_DMA64_RXRINGHI, + ((ringbase >> 32) & + ~SSB_DMA_TRANSLATION_MASK) + | trans); + b43_dma_write(ring, B43_DMA64_RXINDEX, 200); + } else { + u32 ringbase = (u32) (ring->dmabase); + + addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT); + value |= B43_DMA32_RXENABLE; + value |= (addrext << B43_DMA32_RXADDREXT_SHIFT) + & B43_DMA32_RXADDREXT_MASK; + b43_dma_write(ring, B43_DMA32_RXCTL, value); + b43_dma_write(ring, B43_DMA32_RXRING, + (ringbase & ~SSB_DMA_TRANSLATION_MASK) + | trans); + b43_dma_write(ring, B43_DMA32_RXINDEX, 200); + } + } + + out: + return err; +} + +/* Shutdown the DMA controller. */ +static void dmacontroller_cleanup(struct b43_dmaring *ring) +{ + if (ring->tx) { + b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base, + ring->dma64); + if (ring->dma64) { + b43_dma_write(ring, B43_DMA64_TXRINGLO, 0); + b43_dma_write(ring, B43_DMA64_TXRINGHI, 0); + } else + b43_dma_write(ring, B43_DMA32_TXRING, 0); + } else { + b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base, + ring->dma64); + if (ring->dma64) { + b43_dma_write(ring, B43_DMA64_RXRINGLO, 0); + b43_dma_write(ring, B43_DMA64_RXRINGHI, 0); + } else + b43_dma_write(ring, B43_DMA32_RXRING, 0); + } +} + +static void free_all_descbuffers(struct b43_dmaring *ring) +{ + struct b43_dmadesc_generic *desc; + struct b43_dmadesc_meta *meta; + int i; + + if (!ring->used_slots) + return; + for (i = 0; i < ring->nr_slots; i++) { + desc = ring->ops->idx2desc(ring, i, &meta); + + if (!meta->skb) { + B43_WARN_ON(!ring->tx); + continue; + } + if (ring->tx) { + unmap_descbuffer(ring, meta->dmaaddr, + meta->skb->len, 1); + } else { + unmap_descbuffer(ring, meta->dmaaddr, + ring->rx_buffersize, 0); + } + free_descriptor_buffer(ring, meta); + } +} + +static u64 supported_dma_mask(struct b43_wldev *dev) +{ + u32 tmp; + u16 mmio_base; + + tmp = b43_read32(dev, SSB_TMSHIGH); + if (tmp & SSB_TMSHIGH_DMA64) + return DMA_64BIT_MASK; + mmio_base = b43_dmacontroller_base(0, 0); + b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK); + tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL); + if (tmp & B43_DMA32_TXADDREXT_MASK) + return DMA_32BIT_MASK; + + return DMA_30BIT_MASK; +} + +/* Main initialization function. */ +static +struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, + int controller_index, + int for_tx, int dma64) +{ + struct b43_dmaring *ring; + int err; + int nr_slots; + dma_addr_t dma_test; + + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) + goto out; + + nr_slots = B43_RXRING_SLOTS; + if (for_tx) + nr_slots = B43_TXRING_SLOTS; + + ring->meta = kcalloc(nr_slots, sizeof(struct b43_dmadesc_meta), + GFP_KERNEL); + if (!ring->meta) + goto err_kfree_ring; + if (for_tx) { + ring->txhdr_cache = kcalloc(nr_slots, + sizeof(struct b43_txhdr_fw4), + GFP_KERNEL); + if (!ring->txhdr_cache) + goto err_kfree_meta; + + /* test for ability to dma to txhdr_cache */ + dma_test = dma_map_single(dev->dev->dev, + ring->txhdr_cache, + sizeof(struct b43_txhdr_fw4), + DMA_TO_DEVICE); + + if (dma_mapping_error(dma_test)) { + /* ugh realloc */ + kfree(ring->txhdr_cache); + ring->txhdr_cache = kcalloc(nr_slots, + sizeof(struct + b43_txhdr_fw4), + GFP_KERNEL | GFP_DMA); + if (!ring->txhdr_cache) + goto err_kfree_meta; + + dma_test = dma_map_single(dev->dev->dev, + ring->txhdr_cache, + sizeof(struct b43_txhdr_fw4), + DMA_TO_DEVICE); + + if (dma_mapping_error(dma_test)) + goto err_kfree_txhdr_cache; + } + + dma_unmap_single(dev->dev->dev, + dma_test, sizeof(struct b43_txhdr_fw4), + DMA_TO_DEVICE); + } + + ring->dev = dev; + ring->nr_slots = nr_slots; + ring->mmio_base = b43_dmacontroller_base(dma64, controller_index); + ring->index = controller_index; + ring->dma64 = !!dma64; + if (dma64) + ring->ops = &dma64_ops; + else + ring->ops = &dma32_ops; + if (for_tx) { + ring->tx = 1; + ring->current_slot = -1; + } else { + if (ring->index == 0) { + ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE; + ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET; + } else if (ring->index == 3) { + ring->rx_buffersize = B43_DMA3_RX_BUFFERSIZE; + ring->frameoffset = B43_DMA3_RX_FRAMEOFFSET; + } else + B43_WARN_ON(1); + } + spin_lock_init(&ring->lock); +#ifdef CONFIG_B43_DEBUG + ring->last_injected_overflow = jiffies; +#endif + + err = alloc_ringmemory(ring); + if (err) + goto err_kfree_txhdr_cache; + err = dmacontroller_setup(ring); + if (err) + goto err_free_ringmemory; + + out: + return ring; + + err_free_ringmemory: + free_ringmemory(ring); + err_kfree_txhdr_cache: + kfree(ring->txhdr_cache); + err_kfree_meta: + kfree(ring->meta); + err_kfree_ring: + kfree(ring); + ring = NULL; + goto out; +} + +/* Main cleanup function. */ +static void b43_destroy_dmaring(struct b43_dmaring *ring) +{ + if (!ring) + return; + + b43dbg(ring->dev->wl, "DMA-%s 0x%04X (%s) max used slots: %d/%d\n", + (ring->dma64) ? "64" : "32", + ring->mmio_base, + (ring->tx) ? "TX" : "RX", ring->max_used_slots, ring->nr_slots); + /* Device IRQs are disabled prior entering this function, + * so no need to take care of concurrency with rx handler stuff. + */ + dmacontroller_cleanup(ring); + free_all_descbuffers(ring); + free_ringmemory(ring); + + kfree(ring->txhdr_cache); + kfree(ring->meta); + kfree(ring); +} + +void b43_dma_free(struct b43_wldev *dev) +{ + struct b43_dma *dma; + + if (b43_using_pio(dev)) + return; + dma = &dev->dma; + + b43_destroy_dmaring(dma->rx_ring3); + dma->rx_ring3 = NULL; + b43_destroy_dmaring(dma->rx_ring0); + dma->rx_ring0 = NULL; + + b43_destroy_dmaring(dma->tx_ring5); + dma->tx_ring5 = NULL; + b43_destroy_dmaring(dma->tx_ring4); + dma->tx_ring4 = NULL; + b43_destroy_dmaring(dma->tx_ring3); + dma->tx_ring3 = NULL; + b43_destroy_dmaring(dma->tx_ring2); + dma->tx_ring2 = NULL; + b43_destroy_dmaring(dma->tx_ring1); + dma->tx_ring1 = NULL; + b43_destroy_dmaring(dma->tx_ring0); + dma->tx_ring0 = NULL; +} + +int b43_dma_init(struct b43_wldev *dev) +{ + struct b43_dma *dma = &dev->dma; + struct b43_dmaring *ring; + int err; + u64 dmamask; + int dma64 = 0; + + dmamask = supported_dma_mask(dev); + if (dmamask == DMA_64BIT_MASK) + dma64 = 1; + + err = ssb_dma_set_mask(dev->dev, dmamask); + if (err) { +#ifdef B43_PIO + b43warn(dev->wl, "DMA for this device not supported. " + "Falling back to PIO\n"); + dev->__using_pio = 1; + return -EAGAIN; +#else + b43err(dev->wl, "DMA for this device not supported and " + "no PIO support compiled in\n"); + return -EOPNOTSUPP; +#endif + } + + err = -ENOMEM; + /* setup TX DMA channels. */ + ring = b43_setup_dmaring(dev, 0, 1, dma64); + if (!ring) + goto out; + dma->tx_ring0 = ring; + + ring = b43_setup_dmaring(dev, 1, 1, dma64); + if (!ring) + goto err_destroy_tx0; + dma->tx_ring1 = ring; + + ring = b43_setup_dmaring(dev, 2, 1, dma64); + if (!ring) + goto err_destroy_tx1; + dma->tx_ring2 = ring; + + ring = b43_setup_dmaring(dev, 3, 1, dma64); + if (!ring) + goto err_destroy_tx2; + dma->tx_ring3 = ring; + + ring = b43_setup_dmaring(dev, 4, 1, dma64); + if (!ring) + goto err_destroy_tx3; + dma->tx_ring4 = ring; + + ring = b43_setup_dmaring(dev, 5, 1, dma64); + if (!ring) + goto err_destroy_tx4; + dma->tx_ring5 = ring; + + /* setup RX DMA channels. */ + ring = b43_setup_dmaring(dev, 0, 0, dma64); + if (!ring) + goto err_destroy_tx5; + dma->rx_ring0 = ring; + + if (dev->dev->id.revision < 5) { + ring = b43_setup_dmaring(dev, 3, 0, dma64); + if (!ring) + goto err_destroy_rx0; + dma->rx_ring3 = ring; + } + + b43dbg(dev->wl, "%d-bit DMA initialized\n", + (dmamask == DMA_64BIT_MASK) ? 64 : + (dmamask == DMA_32BIT_MASK) ? 32 : 30); + err = 0; + out: + return err; + + err_destroy_rx0: + b43_destroy_dmaring(dma->rx_ring0); + dma->rx_ring0 = NULL; + err_destroy_tx5: + b43_destroy_dmaring(dma->tx_ring5); + dma->tx_ring5 = NULL; + err_destroy_tx4: + b43_destroy_dmaring(dma->tx_ring4); + dma->tx_ring4 = NULL; + err_destroy_tx3: + b43_destroy_dmaring(dma->tx_ring3); + dma->tx_ring3 = NULL; + err_destroy_tx2: + b43_destroy_dmaring(dma->tx_ring2); + dma->tx_ring2 = NULL; + err_destroy_tx1: + b43_destroy_dmaring(dma->tx_ring1); + dma->tx_ring1 = NULL; + err_destroy_tx0: + b43_destroy_dmaring(dma->tx_ring0); + dma->tx_ring0 = NULL; + goto out; +} + +/* Generate a cookie for the TX header. */ +static u16 generate_cookie(struct b43_dmaring *ring, int slot) +{ + u16 cookie = 0x1000; + + /* Use the upper 4 bits of the cookie as + * DMA controller ID and store the slot number + * in the lower 12 bits. + * Note that the cookie must never be 0, as this + * is a special value used in RX path. + */ + switch (ring->index) { + case 0: + cookie = 0xA000; + break; + case 1: + cookie = 0xB000; + break; + case 2: + cookie = 0xC000; + break; + case 3: + cookie = 0xD000; + break; + case 4: + cookie = 0xE000; + break; + case 5: + cookie = 0xF000; + break; + } + B43_WARN_ON(slot & ~0x0FFF); + cookie |= (u16) slot; + + return cookie; +} + +/* Inspect a cookie and find out to which controller/slot it belongs. */ +static +struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot) +{ + struct b43_dma *dma = &dev->dma; + struct b43_dmaring *ring = NULL; + + switch (cookie & 0xF000) { + case 0xA000: + ring = dma->tx_ring0; + break; + case 0xB000: + ring = dma->tx_ring1; + break; + case 0xC000: + ring = dma->tx_ring2; + break; + case 0xD000: + ring = dma->tx_ring3; + break; + case 0xE000: + ring = dma->tx_ring4; + break; + case 0xF000: + ring = dma->tx_ring5; + break; + default: + B43_WARN_ON(1); + } + *slot = (cookie & 0x0FFF); + B43_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots)); + + return ring; +} + +static int dma_tx_fragment(struct b43_dmaring *ring, + struct sk_buff *skb, + struct ieee80211_tx_control *ctl) +{ + const struct b43_dma_ops *ops = ring->ops; + u8 *header; + int slot; + int err; + struct b43_dmadesc_generic *desc; + struct b43_dmadesc_meta *meta; + struct b43_dmadesc_meta *meta_hdr; + struct sk_buff *bounce_skb; + +#define SLOTS_PER_PACKET 2 + B43_WARN_ON(skb_shinfo(skb)->nr_frags); + + /* Get a slot for the header. */ + slot = request_slot(ring); + desc = ops->idx2desc(ring, slot, &meta_hdr); + memset(meta_hdr, 0, sizeof(*meta_hdr)); + + header = &(ring->txhdr_cache[slot * sizeof(struct b43_txhdr_fw4)]); + b43_generate_txhdr(ring->dev, header, + skb->data, skb->len, ctl, + generate_cookie(ring, slot)); + + meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, + sizeof(struct b43_txhdr_fw4), 1); + if (dma_mapping_error(meta_hdr->dmaaddr)) + return -EIO; + ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr, + sizeof(struct b43_txhdr_fw4), 1, 0, 0); + + /* Get a slot for the payload. */ + slot = request_slot(ring); + desc = ops->idx2desc(ring, slot, &meta); + memset(meta, 0, sizeof(*meta)); + + memcpy(&meta->txstat.control, ctl, sizeof(*ctl)); + meta->skb = skb; + meta->is_last_fragment = 1; + + meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); + /* create a bounce buffer in zone_dma on mapping failure. */ + if (dma_mapping_error(meta->dmaaddr)) { + bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); + if (!bounce_skb) { + err = -ENOMEM; + goto out_unmap_hdr; + } + + memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len); + dev_kfree_skb_any(skb); + skb = bounce_skb; + meta->skb = skb; + meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); + if (dma_mapping_error(meta->dmaaddr)) { + err = -EIO; + goto out_free_bounce; + } + } + + ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1); + + /* Now transfer the whole frame. */ + wmb(); + ops->poke_tx(ring, next_slot(ring, slot)); + return 0; + + out_free_bounce: + dev_kfree_skb_any(skb); + out_unmap_hdr: + unmap_descbuffer(ring, meta_hdr->dmaaddr, + sizeof(struct b43_txhdr_fw4), 1); + return err; +} + +static inline int should_inject_overflow(struct b43_dmaring *ring) +{ +#ifdef CONFIG_B43_DEBUG + if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) { + /* Check if we should inject another ringbuffer overflow + * to test handling of this situation in the stack. */ + unsigned long next_overflow; + + next_overflow = ring->last_injected_overflow + HZ; + if (time_after(jiffies, next_overflow)) { + ring->last_injected_overflow = jiffies; + b43dbg(ring->dev->wl, + "Injecting TX ring overflow on " + "DMA controller %d\n", ring->index); + return 1; + } + } +#endif /* CONFIG_B43_DEBUG */ + return 0; +} + +int b43_dma_tx(struct b43_wldev *dev, + struct sk_buff *skb, struct ieee80211_tx_control *ctl) +{ + struct b43_dmaring *ring; + int err = 0; + unsigned long flags; + + ring = priority_to_txring(dev, ctl->queue); + spin_lock_irqsave(&ring->lock, flags); + B43_WARN_ON(!ring->tx); + if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) { + b43warn(dev->wl, "DMA queue overflow\n"); + err = -ENOSPC; + goto out_unlock; + } + /* Check if the queue was stopped in mac80211, + * but we got called nevertheless. + * That would be a mac80211 bug. */ + B43_WARN_ON(ring->stopped); + + err = dma_tx_fragment(ring, skb, ctl); + if (unlikely(err)) { + b43err(dev->wl, "DMA tx mapping failure\n"); + goto out_unlock; + } + ring->nr_tx_packets++; + if ((free_slots(ring) < SLOTS_PER_PACKET) || + should_inject_overflow(ring)) { + /* This TX ring is full. */ + ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring)); + ring->stopped = 1; + if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { + b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); + } + } + out_unlock: + spin_unlock_irqrestore(&ring->lock, flags); + + return err; +} + +void b43_dma_handle_txstatus(struct b43_wldev *dev, + const struct b43_txstatus *status) +{ + const struct b43_dma_ops *ops; + struct b43_dmaring *ring; + struct b43_dmadesc_generic *desc; + struct b43_dmadesc_meta *meta; + int slot; + + ring = parse_cookie(dev, status->cookie, &slot); + if (unlikely(!ring)) + return; + B43_WARN_ON(!irqs_disabled()); + spin_lock(&ring->lock); + + B43_WARN_ON(!ring->tx); + ops = ring->ops; + while (1) { + B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); + desc = ops->idx2desc(ring, slot, &meta); + + if (meta->skb) + unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, + 1); + else + unmap_descbuffer(ring, meta->dmaaddr, + sizeof(struct b43_txhdr_fw4), 1); + + if (meta->is_last_fragment) { + B43_WARN_ON(!meta->skb); + /* Call back to inform the ieee80211 subsystem about the + * status of the transmission. + * Some fields of txstat are already filled in dma_tx(). + */ + if (status->acked) { + meta->txstat.flags |= IEEE80211_TX_STATUS_ACK; + } else { + if (!(meta->txstat.control.flags + & IEEE80211_TXCTL_NO_ACK)) + meta->txstat.excessive_retries = 1; + } + if (status->frame_count == 0) { + /* The frame was not transmitted at all. */ + meta->txstat.retry_count = 0; + } else + meta->txstat.retry_count = status->frame_count - 1; + ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb, + &(meta->txstat)); + /* skb is freed by ieee80211_tx_status_irqsafe() */ + meta->skb = NULL; + } else { + /* No need to call free_descriptor_buffer here, as + * this is only the txhdr, which is not allocated. + */ + B43_WARN_ON(meta->skb); + } + + /* Everything unmapped and free'd. So it's not used anymore. */ + ring->used_slots--; + + if (meta->is_last_fragment) + break; + slot = next_slot(ring, slot); + } + dev->stats.last_tx = jiffies; + if (ring->stopped) { + B43_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET); + ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring)); + ring->stopped = 0; + if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { + b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index); + } + } + + spin_unlock(&ring->lock); +} + +void b43_dma_get_tx_stats(struct b43_wldev *dev, + struct ieee80211_tx_queue_stats *stats) +{ + const int nr_queues = dev->wl->hw->queues; + struct b43_dmaring *ring; + struct ieee80211_tx_queue_stats_data *data; + unsigned long flags; + int i; + + for (i = 0; i < nr_queues; i++) { + data = &(stats->data[i]); + ring = priority_to_txring(dev, i); + + spin_lock_irqsave(&ring->lock, flags); + data->len = ring->used_slots / SLOTS_PER_PACKET; + data->limit = ring->nr_slots / SLOTS_PER_PACKET; + data->count = ring->nr_tx_packets; + spin_unlock_irqrestore(&ring->lock, flags); + } +} + +static void dma_rx(struct b43_dmaring *ring, int *slot) +{ + const struct b43_dma_ops *ops = ring->ops; + struct b43_dmadesc_generic *desc; + struct b43_dmadesc_meta *meta; + struct b43_rxhdr_fw4 *rxhdr; + struct sk_buff *skb; + u16 len; + int err; + dma_addr_t dmaaddr; + + desc = ops->idx2desc(ring, *slot, &meta); + + sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); + skb = meta->skb; + + if (ring->index == 3) { + /* We received an xmit status. */ + struct b43_hwtxstatus *hw = (struct b43_hwtxstatus *)skb->data; + int i = 0; + + while (hw->cookie == 0) { + if (i > 100) + break; + i++; + udelay(2); + barrier(); + } + b43_handle_hwtxstatus(ring->dev, hw); + /* recycle the descriptor buffer. */ + sync_descbuffer_for_device(ring, meta->dmaaddr, + ring->rx_buffersize); + + return; + } + rxhdr = (struct b43_rxhdr_fw4 *)skb->data; + len = le16_to_cpu(rxhdr->frame_len); + if (len == 0) { + int i = 0; + + do { + udelay(2); + barrier(); + len = le16_to_cpu(rxhdr->frame_len); + } while (len == 0 && i++ < 5); + if (unlikely(len == 0)) { + /* recycle the descriptor buffer. */ + sync_descbuffer_for_device(ring, meta->dmaaddr, + ring->rx_buffersize); + goto drop; + } + } + if (unlikely(len > ring->rx_buffersize)) { + /* The data did not fit into one descriptor buffer + * and is split over multiple buffers. + * This should never happen, as we try to allocate buffers + * big enough. So simply ignore this packet. + */ + int cnt = 0; + s32 tmp = len; + + while (1) { + desc = ops->idx2desc(ring, *slot, &meta); + /* recycle the descriptor buffer. */ + sync_descbuffer_for_device(ring, meta->dmaaddr, + ring->rx_buffersize); + *slot = next_slot(ring, *slot); + cnt++; + tmp -= ring->rx_buffersize; + if (tmp <= 0) + break; + } + b43err(ring->dev->wl, "DMA RX buffer too small " + "(len: %u, buffer: %u, nr-dropped: %d)\n", + len, ring->rx_buffersize, cnt); + goto drop; + } + + dmaaddr = meta->dmaaddr; + err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); + if (unlikely(err)) { + b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n"); + sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); + goto drop; + } + + unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); + skb_put(skb, len + ring->frameoffset); + skb_pull(skb, ring->frameoffset); + + b43_rx(ring->dev, skb, rxhdr); + drop: + return; +} + +void b43_dma_rx(struct b43_dmaring *ring) +{ + const struct b43_dma_ops *ops = ring->ops; + int slot, current_slot; + int used_slots = 0; + + B43_WARN_ON(ring->tx); + current_slot = ops->get_current_rxslot(ring); + B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots)); + + slot = ring->current_slot; + for (; slot != current_slot; slot = next_slot(ring, slot)) { + dma_rx(ring, &slot); + update_max_used_slots(ring, ++used_slots); + } + ops->set_current_rxslot(ring, slot); + ring->current_slot = slot; +} + +static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring) +{ + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); + B43_WARN_ON(!ring->tx); + ring->ops->tx_suspend(ring); + spin_unlock_irqrestore(&ring->lock, flags); +} + +static void b43_dma_tx_resume_ring(struct b43_dmaring *ring) +{ + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); + B43_WARN_ON(!ring->tx); + ring->ops->tx_resume(ring); + spin_unlock_irqrestore(&ring->lock, flags); +} + +void b43_dma_tx_suspend(struct b43_wldev *dev) +{ + b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); + b43_dma_tx_suspend_ring(dev->dma.tx_ring0); + b43_dma_tx_suspend_ring(dev->dma.tx_ring1); + b43_dma_tx_suspend_ring(dev->dma.tx_ring2); + b43_dma_tx_suspend_ring(dev->dma.tx_ring3); + b43_dma_tx_suspend_ring(dev->dma.tx_ring4); + b43_dma_tx_suspend_ring(dev->dma.tx_ring5); +} + +void b43_dma_tx_resume(struct b43_wldev *dev) +{ + b43_dma_tx_resume_ring(dev->dma.tx_ring5); + b43_dma_tx_resume_ring(dev->dma.tx_ring4); + b43_dma_tx_resume_ring(dev->dma.tx_ring3); + b43_dma_tx_resume_ring(dev->dma.tx_ring2); + b43_dma_tx_resume_ring(dev->dma.tx_ring1); + b43_dma_tx_resume_ring(dev->dma.tx_ring0); + b43_power_saving_ctl_bits(dev, 0); +} diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h new file mode 100644 index 000000000000..3eed185be725 --- /dev/null +++ b/drivers/net/wireless/b43/dma.h @@ -0,0 +1,337 @@ +#ifndef B43_DMA_H_ +#define B43_DMA_H_ + +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> +#include <linux/linkage.h> +#include <asm/atomic.h> + +#include "b43.h" + +/* DMA-Interrupt reasons. */ +#define B43_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \ + | (1 << 14) | (1 << 15)) +#define B43_DMAIRQ_NONFATALMASK (1 << 13) +#define B43_DMAIRQ_RX_DONE (1 << 16) + +/*** 32-bit DMA Engine. ***/ + +/* 32-bit DMA controller registers. */ +#define B43_DMA32_TXCTL 0x00 +#define B43_DMA32_TXENABLE 0x00000001 +#define B43_DMA32_TXSUSPEND 0x00000002 +#define B43_DMA32_TXLOOPBACK 0x00000004 +#define B43_DMA32_TXFLUSH 0x00000010 +#define B43_DMA32_TXADDREXT_MASK 0x00030000 +#define B43_DMA32_TXADDREXT_SHIFT 16 +#define B43_DMA32_TXRING 0x04 +#define B43_DMA32_TXINDEX 0x08 +#define B43_DMA32_TXSTATUS 0x0C +#define B43_DMA32_TXDPTR 0x00000FFF +#define B43_DMA32_TXSTATE 0x0000F000 +#define B43_DMA32_TXSTAT_DISABLED 0x00000000 +#define B43_DMA32_TXSTAT_ACTIVE 0x00001000 +#define B43_DMA32_TXSTAT_IDLEWAIT 0x00002000 +#define B43_DMA32_TXSTAT_STOPPED 0x00003000 +#define B43_DMA32_TXSTAT_SUSP 0x00004000 +#define B43_DMA32_TXERROR 0x000F0000 +#define B43_DMA32_TXERR_NOERR 0x00000000 +#define B43_DMA32_TXERR_PROT 0x00010000 +#define B43_DMA32_TXERR_UNDERRUN 0x00020000 +#define B43_DMA32_TXERR_BUFREAD 0x00030000 +#define B43_DMA32_TXERR_DESCREAD 0x00040000 +#define B43_DMA32_TXACTIVE 0xFFF00000 +#define B43_DMA32_RXCTL 0x10 +#define B43_DMA32_RXENABLE 0x00000001 +#define B43_DMA32_RXFROFF_MASK 0x000000FE +#define B43_DMA32_RXFROFF_SHIFT 1 +#define B43_DMA32_RXDIRECTFIFO 0x00000100 +#define B43_DMA32_RXADDREXT_MASK 0x00030000 +#define B43_DMA32_RXADDREXT_SHIFT 16 +#define B43_DMA32_RXRING 0x14 +#define B43_DMA32_RXINDEX 0x18 +#define B43_DMA32_RXSTATUS 0x1C +#define B43_DMA32_RXDPTR 0x00000FFF +#define B43_DMA32_RXSTATE 0x0000F000 +#define B43_DMA32_RXSTAT_DISABLED 0x00000000 +#define B43_DMA32_RXSTAT_ACTIVE 0x00001000 +#define B43_DMA32_RXSTAT_IDLEWAIT 0x00002000 +#define B43_DMA32_RXSTAT_STOPPED 0x00003000 +#define B43_DMA32_RXERROR 0x000F0000 +#define B43_DMA32_RXERR_NOERR 0x00000000 +#define B43_DMA32_RXERR_PROT 0x00010000 +#define B43_DMA32_RXERR_OVERFLOW 0x00020000 +#define B43_DMA32_RXERR_BUFWRITE 0x00030000 +#define B43_DMA32_RXERR_DESCREAD 0x00040000 +#define B43_DMA32_RXACTIVE 0xFFF00000 + +/* 32-bit DMA descriptor. */ +struct b43_dmadesc32 { + __le32 control; + __le32 address; +} __attribute__ ((__packed__)); +#define B43_DMA32_DCTL_BYTECNT 0x00001FFF +#define B43_DMA32_DCTL_ADDREXT_MASK 0x00030000 +#define B43_DMA32_DCTL_ADDREXT_SHIFT 16 +#define B43_DMA32_DCTL_DTABLEEND 0x10000000 +#define B43_DMA32_DCTL_IRQ 0x20000000 +#define B43_DMA32_DCTL_FRAMEEND 0x40000000 +#define B43_DMA32_DCTL_FRAMESTART 0x80000000 + +/*** 64-bit DMA Engine. ***/ + +/* 64-bit DMA controller registers. */ +#define B43_DMA64_TXCTL 0x00 +#define B43_DMA64_TXENABLE 0x00000001 +#define B43_DMA64_TXSUSPEND 0x00000002 +#define B43_DMA64_TXLOOPBACK 0x00000004 +#define B43_DMA64_TXFLUSH 0x00000010 +#define B43_DMA64_TXADDREXT_MASK 0x00030000 +#define B43_DMA64_TXADDREXT_SHIFT 16 +#define B43_DMA64_TXINDEX 0x04 +#define B43_DMA64_TXRINGLO 0x08 +#define B43_DMA64_TXRINGHI 0x0C +#define B43_DMA64_TXSTATUS 0x10 +#define B43_DMA64_TXSTATDPTR 0x00001FFF +#define B43_DMA64_TXSTAT 0xF0000000 +#define B43_DMA64_TXSTAT_DISABLED 0x00000000 +#define B43_DMA64_TXSTAT_ACTIVE 0x10000000 +#define B43_DMA64_TXSTAT_IDLEWAIT 0x20000000 +#define B43_DMA64_TXSTAT_STOPPED 0x30000000 +#define B43_DMA64_TXSTAT_SUSP 0x40000000 +#define B43_DMA64_TXERROR 0x14 +#define B43_DMA64_TXERRDPTR 0x0001FFFF +#define B43_DMA64_TXERR 0xF0000000 +#define B43_DMA64_TXERR_NOERR 0x00000000 +#define B43_DMA64_TXERR_PROT 0x10000000 +#define B43_DMA64_TXERR_UNDERRUN 0x20000000 +#define B43_DMA64_TXERR_TRANSFER 0x30000000 +#define B43_DMA64_TXERR_DESCREAD 0x40000000 +#define B43_DMA64_TXERR_CORE 0x50000000 +#define B43_DMA64_RXCTL 0x20 +#define B43_DMA64_RXENABLE 0x00000001 +#define B43_DMA64_RXFROFF_MASK 0x000000FE +#define B43_DMA64_RXFROFF_SHIFT 1 +#define B43_DMA64_RXDIRECTFIFO 0x00000100 +#define B43_DMA64_RXADDREXT_MASK 0x00030000 +#define B43_DMA64_RXADDREXT_SHIFT 16 +#define B43_DMA64_RXINDEX 0x24 +#define B43_DMA64_RXRINGLO 0x28 +#define B43_DMA64_RXRINGHI 0x2C +#define B43_DMA64_RXSTATUS 0x30 +#define B43_DMA64_RXSTATDPTR 0x00001FFF +#define B43_DMA64_RXSTAT 0xF0000000 +#define B43_DMA64_RXSTAT_DISABLED 0x00000000 +#define B43_DMA64_RXSTAT_ACTIVE 0x10000000 +#define B43_DMA64_RXSTAT_IDLEWAIT 0x20000000 +#define B43_DMA64_RXSTAT_STOPPED 0x30000000 +#define B43_DMA64_RXSTAT_SUSP 0x40000000 +#define B43_DMA64_RXERROR 0x34 +#define B43_DMA64_RXERRDPTR 0x0001FFFF +#define B43_DMA64_RXERR 0xF0000000 +#define B43_DMA64_RXERR_NOERR 0x00000000 +#define B43_DMA64_RXERR_PROT 0x10000000 +#define B43_DMA64_RXERR_UNDERRUN 0x20000000 +#define B43_DMA64_RXERR_TRANSFER 0x30000000 +#define B43_DMA64_RXERR_DESCREAD 0x40000000 +#define B43_DMA64_RXERR_CORE 0x50000000 + +/* 64-bit DMA descriptor. */ +struct b43_dmadesc64 { + __le32 control0; + __le32 control1; + __le32 address_low; + __le32 address_high; +} __attribute__ ((__packed__)); +#define B43_DMA64_DCTL0_DTABLEEND 0x10000000 +#define B43_DMA64_DCTL0_IRQ 0x20000000 +#define B43_DMA64_DCTL0_FRAMEEND 0x40000000 +#define B43_DMA64_DCTL0_FRAMESTART 0x80000000 +#define B43_DMA64_DCTL1_BYTECNT 0x00001FFF +#define B43_DMA64_DCTL1_ADDREXT_MASK 0x00030000 +#define B43_DMA64_DCTL1_ADDREXT_SHIFT 16 + +struct b43_dmadesc_generic { + union { + struct b43_dmadesc32 dma32; + struct b43_dmadesc64 dma64; + } __attribute__ ((__packed__)); +} __attribute__ ((__packed__)); + +/* Misc DMA constants */ +#define B43_DMA_RINGMEMSIZE PAGE_SIZE +#define B43_DMA0_RX_FRAMEOFFSET 30 +#define B43_DMA3_RX_FRAMEOFFSET 0 + +/* DMA engine tuning knobs */ +#define B43_TXRING_SLOTS 128 +#define B43_RXRING_SLOTS 64 +#define B43_DMA0_RX_BUFFERSIZE (2304 + 100) +#define B43_DMA3_RX_BUFFERSIZE 16 + +#ifdef CONFIG_B43_DMA + +struct sk_buff; +struct b43_private; +struct b43_txstatus; + +struct b43_dmadesc_meta { + /* The kernel DMA-able buffer. */ + struct sk_buff *skb; + /* DMA base bus-address of the descriptor buffer. */ + dma_addr_t dmaaddr; + /* ieee80211 TX status. Only used once per 802.11 frag. */ + bool is_last_fragment; + struct ieee80211_tx_status txstat; +}; + +struct b43_dmaring; + +/* Lowlevel DMA operations that differ between 32bit and 64bit DMA. */ +struct b43_dma_ops { + struct b43_dmadesc_generic *(*idx2desc) (struct b43_dmaring * ring, + int slot, + struct b43_dmadesc_meta ** + meta); + void (*fill_descriptor) (struct b43_dmaring * ring, + struct b43_dmadesc_generic * desc, + dma_addr_t dmaaddr, u16 bufsize, int start, + int end, int irq); + void (*poke_tx) (struct b43_dmaring * ring, int slot); + void (*tx_suspend) (struct b43_dmaring * ring); + void (*tx_resume) (struct b43_dmaring * ring); + int (*get_current_rxslot) (struct b43_dmaring * ring); + void (*set_current_rxslot) (struct b43_dmaring * ring, int slot); +}; + +struct b43_dmaring { + /* Lowlevel DMA ops. */ + const struct b43_dma_ops *ops; + /* Kernel virtual base address of the ring memory. */ + void *descbase; + /* Meta data about all descriptors. */ + struct b43_dmadesc_meta *meta; + /* Cache of TX headers for each slot. + * This is to avoid an allocation on each TX. + * This is NULL for an RX ring. + */ + u8 *txhdr_cache; + /* (Unadjusted) DMA base bus-address of the ring memory. */ + dma_addr_t dmabase; + /* Number of descriptor slots in the ring. */ + int nr_slots; + /* Number of used descriptor slots. */ + int used_slots; + /* Currently used slot in the ring. */ + int current_slot; + /* Total number of packets sent. Statistics only. */ + unsigned int nr_tx_packets; + /* Frameoffset in octets. */ + u32 frameoffset; + /* Descriptor buffer size. */ + u16 rx_buffersize; + /* The MMIO base register of the DMA controller. */ + u16 mmio_base; + /* DMA controller index number (0-5). */ + int index; + /* Boolean. Is this a TX ring? */ + bool tx; + /* Boolean. 64bit DMA if true, 32bit DMA otherwise. */ + bool dma64; + /* Boolean. Is this ring stopped at ieee80211 level? */ + bool stopped; + /* Lock, only used for TX. */ + spinlock_t lock; + struct b43_wldev *dev; +#ifdef CONFIG_B43_DEBUG + /* Maximum number of used slots. */ + int max_used_slots; + /* Last time we injected a ring overflow. */ + unsigned long last_injected_overflow; +#endif /* CONFIG_B43_DEBUG */ +}; + +static inline u32 b43_dma_read(struct b43_dmaring *ring, u16 offset) +{ + return b43_read32(ring->dev, ring->mmio_base + offset); +} + +static inline + void b43_dma_write(struct b43_dmaring *ring, u16 offset, u32 value) +{ + b43_write32(ring->dev, ring->mmio_base + offset, value); +} + +int b43_dma_init(struct b43_wldev *dev); +void b43_dma_free(struct b43_wldev *dev); + +int b43_dmacontroller_rx_reset(struct b43_wldev *dev, + u16 dmacontroller_mmio_base, int dma64); +int b43_dmacontroller_tx_reset(struct b43_wldev *dev, + u16 dmacontroller_mmio_base, int dma64); + +u16 b43_dmacontroller_base(int dma64bit, int dmacontroller_idx); + +void b43_dma_tx_suspend(struct b43_wldev *dev); +void b43_dma_tx_resume(struct b43_wldev *dev); + +void b43_dma_get_tx_stats(struct b43_wldev *dev, + struct ieee80211_tx_queue_stats *stats); + +int b43_dma_tx(struct b43_wldev *dev, + struct sk_buff *skb, struct ieee80211_tx_control *ctl); +void b43_dma_handle_txstatus(struct b43_wldev *dev, + const struct b43_txstatus *status); + +void b43_dma_rx(struct b43_dmaring *ring); + +#else /* CONFIG_B43_DMA */ + +static inline int b43_dma_init(struct b43_wldev *dev) +{ + return 0; +} +static inline void b43_dma_free(struct b43_wldev *dev) +{ +} +static inline + int b43_dmacontroller_rx_reset(struct b43_wldev *dev, + u16 dmacontroller_mmio_base, int dma64) +{ + return 0; +} +static inline + int b43_dmacontroller_tx_reset(struct b43_wldev *dev, + u16 dmacontroller_mmio_base, int dma64) +{ + return 0; +} +static inline + void b43_dma_get_tx_stats(struct b43_wldev *dev, + struct ieee80211_tx_queue_stats *stats) +{ +} +static inline + int b43_dma_tx(struct b43_wldev *dev, + struct sk_buff *skb, struct ieee80211_tx_control *ctl) +{ + return 0; +} +static inline + void b43_dma_handle_txstatus(struct b43_wldev *dev, + const struct b43_txstatus *status) +{ +} +static inline void b43_dma_rx(struct b43_dmaring *ring) +{ +} +static inline void b43_dma_tx_suspend(struct b43_wldev *dev) +{ +} +static inline void b43_dma_tx_resume(struct b43_wldev *dev) +{ +} + +#endif /* CONFIG_B43_DMA */ +#endif /* B43_DMA_H_ */ diff --git a/drivers/net/wireless/b43/leds.c b/drivers/net/wireless/b43/leds.c new file mode 100644 index 000000000000..19e588582c7c --- /dev/null +++ b/drivers/net/wireless/b43/leds.c @@ -0,0 +1,235 @@ +/* + + Broadcom B43 wireless driver + LED control + + Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>, + Copyright (c) 2005 Stefano Brivio <st3@riseup.net> + Copyright (c) 2005-2007 Michael Buesch <mb@bu3sch.de> + Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org> + Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43.h" +#include "leds.h" + + +static void b43_led_turn_on(struct b43_wldev *dev, u8 led_index, + bool activelow) +{ + struct b43_wl *wl = dev->wl; + unsigned long flags; + u16 ctl; + + spin_lock_irqsave(&wl->leds_lock, flags); + ctl = b43_read16(dev, B43_MMIO_GPIO_CONTROL); + if (activelow) + ctl &= ~(1 << led_index); + else + ctl |= (1 << led_index); + b43_write16(dev, B43_MMIO_GPIO_CONTROL, ctl); + spin_unlock_irqrestore(&wl->leds_lock, flags); +} + +static void b43_led_turn_off(struct b43_wldev *dev, u8 led_index, + bool activelow) +{ + struct b43_wl *wl = dev->wl; + unsigned long flags; + u16 ctl; + + spin_lock_irqsave(&wl->leds_lock, flags); + ctl = b43_read16(dev, B43_MMIO_GPIO_CONTROL); + if (activelow) + ctl |= (1 << led_index); + else + ctl &= ~(1 << led_index); + b43_write16(dev, B43_MMIO_GPIO_CONTROL, ctl); + spin_unlock_irqrestore(&wl->leds_lock, flags); +} + +/* Callback from the LED subsystem. */ +static void b43_led_brightness_set(struct led_classdev *led_dev, + enum led_brightness brightness) +{ + struct b43_led *led = container_of(led_dev, struct b43_led, led_dev); + struct b43_wldev *dev = led->dev; + bool radio_enabled; + + /* Checking the radio-enabled status here is slightly racy, + * but we want to avoid the locking overhead and we don't care + * whether the LED has the wrong state for a second. */ + radio_enabled = (dev->phy.radio_on && dev->radio_hw_enable); + + if (brightness == LED_OFF || !radio_enabled) + b43_led_turn_off(dev, led->index, led->activelow); + else + b43_led_turn_on(dev, led->index, led->activelow); +} + +static int b43_register_led(struct b43_wldev *dev, struct b43_led *led, + const char *name, char *default_trigger, + u8 led_index, bool activelow) +{ + int err; + + b43_led_turn_off(dev, led_index, activelow); + if (led->dev) + return -EEXIST; + if (!default_trigger) + return -EINVAL; + led->dev = dev; + led->index = led_index; + led->activelow = activelow; + strncpy(led->name, name, sizeof(led->name)); + + led->led_dev.name = led->name; + led->led_dev.default_trigger = default_trigger; + led->led_dev.brightness_set = b43_led_brightness_set; + + err = led_classdev_register(dev->dev->dev, &led->led_dev); + if (err) { + b43warn(dev->wl, "LEDs: Failed to register %s\n", name); + led->dev = NULL; + return err; + } + return 0; +} + +static void b43_unregister_led(struct b43_led *led) +{ + if (!led->dev) + return; + led_classdev_unregister(&led->led_dev); + b43_led_turn_off(led->dev, led->index, led->activelow); + led->dev = NULL; +} + +static void b43_map_led(struct b43_wldev *dev, + u8 led_index, + enum b43_led_behaviour behaviour, + bool activelow) +{ + struct ieee80211_hw *hw = dev->wl->hw; + char name[B43_LED_MAX_NAME_LEN + 1]; + + /* Map the b43 specific LED behaviour value to the + * generic LED triggers. */ + switch (behaviour) { + case B43_LED_INACTIVE: + break; + case B43_LED_OFF: + b43_led_turn_off(dev, led_index, activelow); + break; + case B43_LED_ON: + b43_led_turn_on(dev, led_index, activelow); + break; + case B43_LED_ACTIVITY: + case B43_LED_TRANSFER: + case B43_LED_APTRANSFER: + snprintf(name, sizeof(name), + "b43-%s:tx", wiphy_name(hw->wiphy)); + b43_register_led(dev, &dev->led_tx, name, + ieee80211_get_tx_led_name(hw), + led_index, activelow); + snprintf(name, sizeof(name), + "b43-%s:rx", wiphy_name(hw->wiphy)); + b43_register_led(dev, &dev->led_rx, name, + ieee80211_get_rx_led_name(hw), + led_index, activelow); + break; + case B43_LED_RADIO_ALL: + case B43_LED_RADIO_A: + case B43_LED_RADIO_B: + case B43_LED_MODE_BG: + snprintf(name, sizeof(name), + "b43-%s:radio", wiphy_name(hw->wiphy)); + b43_register_led(dev, &dev->led_radio, name, + b43_rfkill_led_name(dev), + led_index, activelow); + break; + case B43_LED_WEIRD: + case B43_LED_ASSOC: + snprintf(name, sizeof(name), + "b43-%s:assoc", wiphy_name(hw->wiphy)); + b43_register_led(dev, &dev->led_assoc, name, + ieee80211_get_assoc_led_name(hw), + led_index, activelow); + break; + default: + b43warn(dev->wl, "LEDs: Unknown behaviour 0x%02X\n", + behaviour); + break; + } +} + +void b43_leds_init(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + u8 sprom[4]; + int i; + enum b43_led_behaviour behaviour; + bool activelow; + + sprom[0] = bus->sprom.r1.gpio0; + sprom[1] = bus->sprom.r1.gpio1; + sprom[2] = bus->sprom.r1.gpio2; + sprom[3] = bus->sprom.r1.gpio3; + + for (i = 0; i < 4; i++) { + if (sprom[i] == 0xFF) { + /* There is no LED information in the SPROM + * for this LED. Hardcode it here. */ + activelow = 0; + switch (i) { + case 0: + behaviour = B43_LED_ACTIVITY; + activelow = 1; + if (bus->boardinfo.vendor == PCI_VENDOR_ID_COMPAQ) + behaviour = B43_LED_RADIO_ALL; + break; + case 1: + behaviour = B43_LED_RADIO_B; + if (bus->boardinfo.vendor == PCI_VENDOR_ID_ASUSTEK) + behaviour = B43_LED_ASSOC; + break; + case 2: + behaviour = B43_LED_RADIO_A; + break; + case 3: + behaviour = B43_LED_OFF; + break; + default: + B43_WARN_ON(1); + return; + } + } else { + behaviour = sprom[i] & B43_LED_BEHAVIOUR; + activelow = !!(sprom[i] & B43_LED_ACTIVELOW); + } + b43_map_led(dev, i, behaviour, activelow); + } +} + +void b43_leds_exit(struct b43_wldev *dev) +{ + b43_unregister_led(&dev->led_tx); + b43_unregister_led(&dev->led_rx); + b43_unregister_led(&dev->led_assoc); +} diff --git a/drivers/net/wireless/b43/leds.h b/drivers/net/wireless/b43/leds.h new file mode 100644 index 000000000000..b8b1dd521243 --- /dev/null +++ b/drivers/net/wireless/b43/leds.h @@ -0,0 +1,64 @@ +#ifndef B43_LEDS_H_ +#define B43_LEDS_H_ + +struct b43_wldev; + +#ifdef CONFIG_B43_LEDS + +#include <linux/types.h> +#include <linux/leds.h> + + +#define B43_LED_MAX_NAME_LEN 31 + +struct b43_led { + struct b43_wldev *dev; + /* The LED class device */ + struct led_classdev led_dev; + /* The index number of the LED. */ + u8 index; + /* If activelow is true, the LED is ON if the + * bit is switched off. */ + bool activelow; + /* The unique name string for this LED device. */ + char name[B43_LED_MAX_NAME_LEN + 1]; +}; + +#define B43_LED_BEHAVIOUR 0x7F +#define B43_LED_ACTIVELOW 0x80 +/* LED behaviour values */ +enum b43_led_behaviour { + B43_LED_OFF, + B43_LED_ON, + B43_LED_ACTIVITY, + B43_LED_RADIO_ALL, + B43_LED_RADIO_A, + B43_LED_RADIO_B, + B43_LED_MODE_BG, + B43_LED_TRANSFER, + B43_LED_APTRANSFER, + B43_LED_WEIRD, //FIXME + B43_LED_ASSOC, + B43_LED_INACTIVE, +}; + +void b43_leds_init(struct b43_wldev *dev); +void b43_leds_exit(struct b43_wldev *dev); + + +#else /* CONFIG_B43_LEDS */ +/* LED support disabled */ + +struct b43_led { + /* empty */ +}; + +static inline void b43_leds_init(struct b43_wldev *dev) +{ +} +static inline void b43_leds_exit(struct b43_wldev *dev) +{ +} +#endif /* CONFIG_B43_LEDS */ + +#endif /* B43_LEDS_H_ */ diff --git a/drivers/net/wireless/b43/lo.c b/drivers/net/wireless/b43/lo.c new file mode 100644 index 000000000000..b14a1753a0d7 --- /dev/null +++ b/drivers/net/wireless/b43/lo.c @@ -0,0 +1,1261 @@ +/* + + Broadcom B43 wireless driver + + G PHY LO (LocalOscillator) Measuring and Control routines + + Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>, + Copyright (c) 2005, 2006 Stefano Brivio <st3@riseup.net> + Copyright (c) 2005-2007 Michael Buesch <mb@bu3sch.de> + Copyright (c) 2005, 2006 Danny van Dyk <kugelfang@gentoo.org> + Copyright (c) 2005, 2006 Andreas Jaggi <andreas.jaggi@waterwave.ch> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43.h" +#include "lo.h" +#include "phy.h" +#include "main.h" + +#include <linux/delay.h> +#include <linux/sched.h> + + +/* Define to 1 to always calibrate all possible LO control pairs. + * This is a workaround until we fix the partial LO calibration optimization. */ +#define B43_CALIB_ALL_LOCTLS 1 + + +/* Write the LocalOscillator Control (adjust) value-pair. */ +static void b43_lo_write(struct b43_wldev *dev, struct b43_loctl *control) +{ + struct b43_phy *phy = &dev->phy; + u16 value; + u16 reg; + + if (B43_DEBUG) { + if (unlikely(abs(control->i) > 16 || abs(control->q) > 16)) { + b43dbg(dev->wl, "Invalid LO control pair " + "(I: %d, Q: %d)\n", control->i, control->q); + dump_stack(); + return; + } + } + + value = (u8) (control->q); + value |= ((u8) (control->i)) << 8; + + reg = (phy->type == B43_PHYTYPE_B) ? 0x002F : B43_PHY_LO_CTL; + b43_phy_write(dev, reg, value); +} + +static int assert_rfatt_and_bbatt(const struct b43_rfatt *rfatt, + const struct b43_bbatt *bbatt, + struct b43_wldev *dev) +{ + int err = 0; + + /* Check the attenuation values against the LO control array sizes. */ + if (unlikely(rfatt->att >= B43_NR_RF)) { + b43err(dev->wl, "rfatt(%u) >= size of LO array\n", rfatt->att); + err = -EINVAL; + } + if (unlikely(bbatt->att >= B43_NR_BB)) { + b43err(dev->wl, "bbatt(%u) >= size of LO array\n", bbatt->att); + err = -EINVAL; + } + + return err; +} + +#if !B43_CALIB_ALL_LOCTLS +static +struct b43_loctl *b43_get_lo_g_ctl_nopadmix(struct b43_wldev *dev, + const struct b43_rfatt *rfatt, + const struct b43_bbatt *bbatt) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + + if (assert_rfatt_and_bbatt(rfatt, bbatt, dev)) + return &(lo->no_padmix[0][0]); /* Just prevent a crash */ + return &(lo->no_padmix[bbatt->att][rfatt->att]); +} +#endif /* !B43_CALIB_ALL_LOCTLS */ + +struct b43_loctl *b43_get_lo_g_ctl(struct b43_wldev *dev, + const struct b43_rfatt *rfatt, + const struct b43_bbatt *bbatt) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + + if (assert_rfatt_and_bbatt(rfatt, bbatt, dev)) + return &(lo->no_padmix[0][0]); /* Just prevent a crash */ + if (rfatt->with_padmix) + return &(lo->with_padmix[bbatt->att][rfatt->att]); + return &(lo->no_padmix[bbatt->att][rfatt->att]); +} + +/* Call a function for every possible LO control value-pair. */ +static void b43_call_for_each_loctl(struct b43_wldev *dev, + void (*func) (struct b43_wldev *, + struct b43_loctl *)) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *ctl = phy->lo_control; + int i, j; + + for (i = 0; i < B43_NR_BB; i++) { + for (j = 0; j < B43_NR_RF; j++) + func(dev, &(ctl->with_padmix[i][j])); + } + for (i = 0; i < B43_NR_BB; i++) { + for (j = 0; j < B43_NR_RF; j++) + func(dev, &(ctl->no_padmix[i][j])); + } +} + +static u16 lo_b_r15_loop(struct b43_wldev *dev) +{ + int i; + u16 ret = 0; + + for (i = 0; i < 10; i++) { + b43_phy_write(dev, 0x0015, 0xAFA0); + udelay(1); + b43_phy_write(dev, 0x0015, 0xEFA0); + udelay(10); + b43_phy_write(dev, 0x0015, 0xFFA0); + udelay(40); + ret += b43_phy_read(dev, 0x002C); + } + + return ret; +} + +void b43_lo_b_measure(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 regstack[12] = { 0 }; + u16 mls; + u16 fval; + int i, j; + + regstack[0] = b43_phy_read(dev, 0x0015); + regstack[1] = b43_radio_read16(dev, 0x0052) & 0xFFF0; + + if (phy->radio_ver == 0x2053) { + regstack[2] = b43_phy_read(dev, 0x000A); + regstack[3] = b43_phy_read(dev, 0x002A); + regstack[4] = b43_phy_read(dev, 0x0035); + regstack[5] = b43_phy_read(dev, 0x0003); + regstack[6] = b43_phy_read(dev, 0x0001); + regstack[7] = b43_phy_read(dev, 0x0030); + + regstack[8] = b43_radio_read16(dev, 0x0043); + regstack[9] = b43_radio_read16(dev, 0x007A); + regstack[10] = b43_read16(dev, 0x03EC); + regstack[11] = b43_radio_read16(dev, 0x0052) & 0x00F0; + + b43_phy_write(dev, 0x0030, 0x00FF); + b43_write16(dev, 0x03EC, 0x3F3F); + b43_phy_write(dev, 0x0035, regstack[4] & 0xFF7F); + b43_radio_write16(dev, 0x007A, regstack[9] & 0xFFF0); + } + b43_phy_write(dev, 0x0015, 0xB000); + b43_phy_write(dev, 0x002B, 0x0004); + + if (phy->radio_ver == 0x2053) { + b43_phy_write(dev, 0x002B, 0x0203); + b43_phy_write(dev, 0x002A, 0x08A3); + } + + phy->minlowsig[0] = 0xFFFF; + + for (i = 0; i < 4; i++) { + b43_radio_write16(dev, 0x0052, regstack[1] | i); + lo_b_r15_loop(dev); + } + for (i = 0; i < 10; i++) { + b43_radio_write16(dev, 0x0052, regstack[1] | i); + mls = lo_b_r15_loop(dev) / 10; + if (mls < phy->minlowsig[0]) { + phy->minlowsig[0] = mls; + phy->minlowsigpos[0] = i; + } + } + b43_radio_write16(dev, 0x0052, regstack[1] | phy->minlowsigpos[0]); + + phy->minlowsig[1] = 0xFFFF; + + for (i = -4; i < 5; i += 2) { + for (j = -4; j < 5; j += 2) { + if (j < 0) + fval = (0x0100 * i) + j + 0x0100; + else + fval = (0x0100 * i) + j; + b43_phy_write(dev, 0x002F, fval); + mls = lo_b_r15_loop(dev) / 10; + if (mls < phy->minlowsig[1]) { + phy->minlowsig[1] = mls; + phy->minlowsigpos[1] = fval; + } + } + } + phy->minlowsigpos[1] += 0x0101; + + b43_phy_write(dev, 0x002F, phy->minlowsigpos[1]); + if (phy->radio_ver == 0x2053) { + b43_phy_write(dev, 0x000A, regstack[2]); + b43_phy_write(dev, 0x002A, regstack[3]); + b43_phy_write(dev, 0x0035, regstack[4]); + b43_phy_write(dev, 0x0003, regstack[5]); + b43_phy_write(dev, 0x0001, regstack[6]); + b43_phy_write(dev, 0x0030, regstack[7]); + + b43_radio_write16(dev, 0x0043, regstack[8]); + b43_radio_write16(dev, 0x007A, regstack[9]); + + b43_radio_write16(dev, 0x0052, + (b43_radio_read16(dev, 0x0052) & 0x000F) + | regstack[11]); + + b43_write16(dev, 0x03EC, regstack[10]); + } + b43_phy_write(dev, 0x0015, regstack[0]); +} + +static u16 lo_measure_feedthrough(struct b43_wldev *dev, + u16 lna, u16 pga, u16 trsw_rx) +{ + struct b43_phy *phy = &dev->phy; + u16 rfover; + u16 feedthrough; + + if (phy->gmode) { + lna <<= B43_PHY_RFOVERVAL_LNA_SHIFT; + pga <<= B43_PHY_RFOVERVAL_PGA_SHIFT; + + B43_WARN_ON(lna & ~B43_PHY_RFOVERVAL_LNA); + B43_WARN_ON(pga & ~B43_PHY_RFOVERVAL_PGA); +/*FIXME This assertion fails B43_WARN_ON(trsw_rx & ~(B43_PHY_RFOVERVAL_TRSWRX | + B43_PHY_RFOVERVAL_BW)); +*/ + trsw_rx &= (B43_PHY_RFOVERVAL_TRSWRX | B43_PHY_RFOVERVAL_BW); + + /* Construct the RF Override Value */ + rfover = B43_PHY_RFOVERVAL_UNK; + rfover |= pga; + rfover |= lna; + rfover |= trsw_rx; + if ((dev->dev->bus->sprom.r1.boardflags_lo & B43_BFL_EXTLNA) && + phy->rev > 6) + rfover |= B43_PHY_RFOVERVAL_EXTLNA; + + b43_phy_write(dev, B43_PHY_PGACTL, 0xE300); + b43_phy_write(dev, B43_PHY_RFOVERVAL, rfover); + udelay(10); + rfover |= B43_PHY_RFOVERVAL_BW_LBW; + b43_phy_write(dev, B43_PHY_RFOVERVAL, rfover); + udelay(10); + rfover |= B43_PHY_RFOVERVAL_BW_LPF; + b43_phy_write(dev, B43_PHY_RFOVERVAL, rfover); + udelay(10); + b43_phy_write(dev, B43_PHY_PGACTL, 0xF300); + } else { + pga |= B43_PHY_PGACTL_UNKNOWN; + b43_phy_write(dev, B43_PHY_PGACTL, pga); + udelay(10); + pga |= B43_PHY_PGACTL_LOWBANDW; + b43_phy_write(dev, B43_PHY_PGACTL, pga); + udelay(10); + pga |= B43_PHY_PGACTL_LPF; + b43_phy_write(dev, B43_PHY_PGACTL, pga); + } + udelay(21); + feedthrough = b43_phy_read(dev, B43_PHY_LO_LEAKAGE); + + /* This is a good place to check if we need to relax a bit, + * as this is the main function called regularly + * in the LO calibration. */ + cond_resched(); + + return feedthrough; +} + +/* TXCTL Register and Value Table. + * Returns the "TXCTL Register". + * "value" is the "TXCTL Value". + * "pad_mix_gain" is the PAD Mixer Gain. + */ +static u16 lo_txctl_register_table(struct b43_wldev *dev, + u16 * value, u16 * pad_mix_gain) +{ + struct b43_phy *phy = &dev->phy; + u16 reg, v, padmix; + + if (phy->type == B43_PHYTYPE_B) { + v = 0x30; + if (phy->radio_rev <= 5) { + reg = 0x43; + padmix = 0; + } else { + reg = 0x52; + padmix = 5; + } + } else { + if (phy->rev >= 2 && phy->radio_rev == 8) { + reg = 0x43; + v = 0x10; + padmix = 2; + } else { + reg = 0x52; + v = 0x30; + padmix = 5; + } + } + if (value) + *value = v; + if (pad_mix_gain) + *pad_mix_gain = padmix; + + return reg; +} + +static void lo_measure_txctl_values(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + u16 reg, mask; + u16 trsw_rx, pga; + u16 radio_pctl_reg; + + static const u8 tx_bias_values[] = { + 0x09, 0x08, 0x0A, 0x01, 0x00, + 0x02, 0x05, 0x04, 0x06, + }; + static const u8 tx_magn_values[] = { + 0x70, 0x40, + }; + + if (!has_loopback_gain(phy)) { + radio_pctl_reg = 6; + trsw_rx = 2; + pga = 0; + } else { + int lb_gain; /* Loopback gain (in dB) */ + + trsw_rx = 0; + lb_gain = phy->max_lb_gain / 2; + if (lb_gain > 10) { + radio_pctl_reg = 0; + pga = abs(10 - lb_gain) / 6; + pga = limit_value(pga, 0, 15); + } else { + int cmp_val; + int tmp; + + pga = 0; + cmp_val = 0x24; + if ((phy->rev >= 2) && + (phy->radio_ver == 0x2050) && (phy->radio_rev == 8)) + cmp_val = 0x3C; + tmp = lb_gain; + if ((10 - lb_gain) < cmp_val) + tmp = (10 - lb_gain); + if (tmp < 0) + tmp += 6; + else + tmp += 3; + cmp_val /= 4; + tmp /= 4; + if (tmp >= cmp_val) + radio_pctl_reg = cmp_val; + else + radio_pctl_reg = tmp; + } + } + b43_radio_write16(dev, 0x43, (b43_radio_read16(dev, 0x43) + & 0xFFF0) | radio_pctl_reg); + b43_phy_set_baseband_attenuation(dev, 2); + + reg = lo_txctl_register_table(dev, &mask, NULL); + mask = ~mask; + b43_radio_write16(dev, reg, b43_radio_read16(dev, reg) + & mask); + + if (has_tx_magnification(phy)) { + int i, j; + int feedthrough; + int min_feedth = 0xFFFF; + u8 tx_magn, tx_bias; + + for (i = 0; i < ARRAY_SIZE(tx_magn_values); i++) { + tx_magn = tx_magn_values[i]; + b43_radio_write16(dev, 0x52, + (b43_radio_read16(dev, 0x52) + & 0xFF0F) | tx_magn); + for (j = 0; j < ARRAY_SIZE(tx_bias_values); j++) { + tx_bias = tx_bias_values[j]; + b43_radio_write16(dev, 0x52, + (b43_radio_read16(dev, 0x52) + & 0xFFF0) | tx_bias); + feedthrough = + lo_measure_feedthrough(dev, 0, pga, + trsw_rx); + if (feedthrough < min_feedth) { + lo->tx_bias = tx_bias; + lo->tx_magn = tx_magn; + min_feedth = feedthrough; + } + if (lo->tx_bias == 0) + break; + } + b43_radio_write16(dev, 0x52, + (b43_radio_read16(dev, 0x52) + & 0xFF00) | lo->tx_bias | lo-> + tx_magn); + } + } else { + lo->tx_magn = 0; + lo->tx_bias = 0; + b43_radio_write16(dev, 0x52, b43_radio_read16(dev, 0x52) + & 0xFFF0); /* TX bias == 0 */ + } +} + +static void lo_read_power_vector(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + u16 i; + u64 tmp; + u64 power_vector = 0; + int rf_offset, bb_offset; + struct b43_loctl *loctl; + + for (i = 0; i < 8; i += 2) { + tmp = b43_shm_read16(dev, B43_SHM_SHARED, 0x310 + i); + /* Clear the top byte. We get holes in the bitmap... */ + tmp &= 0xFF; + power_vector |= (tmp << (i * 8)); + /* Clear the vector on the device. */ + b43_shm_write16(dev, B43_SHM_SHARED, 0x310 + i, 0); + } + + if (power_vector) + lo->power_vector = power_vector; + power_vector = lo->power_vector; + + for (i = 0; i < 64; i++) { + if (power_vector & ((u64) 1ULL << i)) { + /* Now figure out which b43_loctl corresponds + * to this bit. + */ + rf_offset = i / lo->rfatt_list.len; + bb_offset = i % lo->rfatt_list.len; //FIXME? + loctl = + b43_get_lo_g_ctl(dev, + &lo->rfatt_list.list[rf_offset], + &lo->bbatt_list.list[bb_offset]); + /* And mark it as "used", as the device told us + * through the bitmap it is using it. + */ + loctl->used = 1; + } + } +} + +/* 802.11/LO/GPHY/MeasuringGains */ +static void lo_measure_gain_values(struct b43_wldev *dev, + s16 max_rx_gain, int use_trsw_rx) +{ + struct b43_phy *phy = &dev->phy; + u16 tmp; + + if (max_rx_gain < 0) + max_rx_gain = 0; + + if (has_loopback_gain(phy)) { + int trsw_rx = 0; + int trsw_rx_gain; + + if (use_trsw_rx) { + trsw_rx_gain = phy->trsw_rx_gain / 2; + if (max_rx_gain >= trsw_rx_gain) { + trsw_rx_gain = max_rx_gain - trsw_rx_gain; + trsw_rx = 0x20; + } + } else + trsw_rx_gain = max_rx_gain; + if (trsw_rx_gain < 9) { + phy->lna_lod_gain = 0; + } else { + phy->lna_lod_gain = 1; + trsw_rx_gain -= 8; + } + trsw_rx_gain = limit_value(trsw_rx_gain, 0, 0x2D); + phy->pga_gain = trsw_rx_gain / 3; + if (phy->pga_gain >= 5) { + phy->pga_gain -= 5; + phy->lna_gain = 2; + } else + phy->lna_gain = 0; + } else { + phy->lna_gain = 0; + phy->trsw_rx_gain = 0x20; + if (max_rx_gain >= 0x14) { + phy->lna_lod_gain = 1; + phy->pga_gain = 2; + } else if (max_rx_gain >= 0x12) { + phy->lna_lod_gain = 1; + phy->pga_gain = 1; + } else if (max_rx_gain >= 0xF) { + phy->lna_lod_gain = 1; + phy->pga_gain = 0; + } else { + phy->lna_lod_gain = 0; + phy->pga_gain = 0; + } + } + + tmp = b43_radio_read16(dev, 0x7A); + if (phy->lna_lod_gain == 0) + tmp &= ~0x0008; + else + tmp |= 0x0008; + b43_radio_write16(dev, 0x7A, tmp); +} + +struct lo_g_saved_values { + u8 old_channel; + + /* Core registers */ + u16 reg_3F4; + u16 reg_3E2; + + /* PHY registers */ + u16 phy_lo_mask; + u16 phy_extg_01; + u16 phy_dacctl_hwpctl; + u16 phy_dacctl; + u16 phy_base_14; + u16 phy_hpwr_tssictl; + u16 phy_analogover; + u16 phy_analogoverval; + u16 phy_rfover; + u16 phy_rfoverval; + u16 phy_classctl; + u16 phy_base_3E; + u16 phy_crs0; + u16 phy_pgactl; + u16 phy_base_2A; + u16 phy_syncctl; + u16 phy_base_30; + u16 phy_base_06; + + /* Radio registers */ + u16 radio_43; + u16 radio_7A; + u16 radio_52; +}; + +static void lo_measure_setup(struct b43_wldev *dev, + struct lo_g_saved_values *sav) +{ + struct ssb_sprom *sprom = &dev->dev->bus->sprom; + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + u16 tmp; + + if (b43_has_hardware_pctl(phy)) { + sav->phy_lo_mask = b43_phy_read(dev, B43_PHY_LO_MASK); + sav->phy_extg_01 = b43_phy_read(dev, B43_PHY_EXTG(0x01)); + sav->phy_dacctl_hwpctl = b43_phy_read(dev, B43_PHY_DACCTL); + sav->phy_base_14 = b43_phy_read(dev, B43_PHY_BASE(0x14)); + sav->phy_hpwr_tssictl = b43_phy_read(dev, B43_PHY_HPWR_TSSICTL); + + b43_phy_write(dev, B43_PHY_HPWR_TSSICTL, + b43_phy_read(dev, B43_PHY_HPWR_TSSICTL) + | 0x100); + b43_phy_write(dev, B43_PHY_EXTG(0x01), + b43_phy_read(dev, B43_PHY_EXTG(0x01)) + | 0x40); + b43_phy_write(dev, B43_PHY_DACCTL, + b43_phy_read(dev, B43_PHY_DACCTL) + | 0x40); + b43_phy_write(dev, B43_PHY_BASE(0x14), + b43_phy_read(dev, B43_PHY_BASE(0x14)) + | 0x200); + } + if (phy->type == B43_PHYTYPE_B && + phy->radio_ver == 0x2050 && phy->radio_rev < 6) { + b43_phy_write(dev, B43_PHY_BASE(0x16), 0x410); + b43_phy_write(dev, B43_PHY_BASE(0x17), 0x820); + } + if (!lo->rebuild && b43_has_hardware_pctl(phy)) + lo_read_power_vector(dev); + if (phy->rev >= 2) { + sav->phy_analogover = b43_phy_read(dev, B43_PHY_ANALOGOVER); + sav->phy_analogoverval = + b43_phy_read(dev, B43_PHY_ANALOGOVERVAL); + sav->phy_rfover = b43_phy_read(dev, B43_PHY_RFOVER); + sav->phy_rfoverval = b43_phy_read(dev, B43_PHY_RFOVERVAL); + sav->phy_classctl = b43_phy_read(dev, B43_PHY_CLASSCTL); + sav->phy_base_3E = b43_phy_read(dev, B43_PHY_BASE(0x3E)); + sav->phy_crs0 = b43_phy_read(dev, B43_PHY_CRS0); + + b43_phy_write(dev, B43_PHY_CLASSCTL, + b43_phy_read(dev, B43_PHY_CLASSCTL) + & 0xFFFC); + b43_phy_write(dev, B43_PHY_CRS0, b43_phy_read(dev, B43_PHY_CRS0) + & 0x7FFF); + b43_phy_write(dev, B43_PHY_ANALOGOVER, + b43_phy_read(dev, B43_PHY_ANALOGOVER) + | 0x0003); + b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, + b43_phy_read(dev, B43_PHY_ANALOGOVERVAL) + & 0xFFFC); + if (phy->type == B43_PHYTYPE_G) { + if ((phy->rev >= 7) && + (sprom->r1.boardflags_lo & B43_BFL_EXTLNA)) { + b43_phy_write(dev, B43_PHY_RFOVER, 0x933); + } else { + b43_phy_write(dev, B43_PHY_RFOVER, 0x133); + } + } else { + b43_phy_write(dev, B43_PHY_RFOVER, 0); + } + b43_phy_write(dev, B43_PHY_BASE(0x3E), 0); + } + sav->reg_3F4 = b43_read16(dev, 0x3F4); + sav->reg_3E2 = b43_read16(dev, 0x3E2); + sav->radio_43 = b43_radio_read16(dev, 0x43); + sav->radio_7A = b43_radio_read16(dev, 0x7A); + sav->phy_pgactl = b43_phy_read(dev, B43_PHY_PGACTL); + sav->phy_base_2A = b43_phy_read(dev, B43_PHY_BASE(0x2A)); + sav->phy_syncctl = b43_phy_read(dev, B43_PHY_SYNCCTL); + sav->phy_dacctl = b43_phy_read(dev, B43_PHY_DACCTL); + + if (!has_tx_magnification(phy)) { + sav->radio_52 = b43_radio_read16(dev, 0x52); + sav->radio_52 &= 0x00F0; + } + if (phy->type == B43_PHYTYPE_B) { + sav->phy_base_30 = b43_phy_read(dev, B43_PHY_BASE(0x30)); + sav->phy_base_06 = b43_phy_read(dev, B43_PHY_BASE(0x06)); + b43_phy_write(dev, B43_PHY_BASE(0x30), 0x00FF); + b43_phy_write(dev, B43_PHY_BASE(0x06), 0x3F3F); + } else { + b43_write16(dev, 0x3E2, b43_read16(dev, 0x3E2) + | 0x8000); + } + b43_write16(dev, 0x3F4, b43_read16(dev, 0x3F4) + & 0xF000); + + tmp = + (phy->type == B43_PHYTYPE_G) ? B43_PHY_LO_MASK : B43_PHY_BASE(0x2E); + b43_phy_write(dev, tmp, 0x007F); + + tmp = sav->phy_syncctl; + b43_phy_write(dev, B43_PHY_SYNCCTL, tmp & 0xFF7F); + tmp = sav->radio_7A; + b43_radio_write16(dev, 0x007A, tmp & 0xFFF0); + + b43_phy_write(dev, B43_PHY_BASE(0x2A), 0x8A3); + if (phy->type == B43_PHYTYPE_G || + (phy->type == B43_PHYTYPE_B && + phy->radio_ver == 0x2050 && phy->radio_rev >= 6)) { + b43_phy_write(dev, B43_PHY_BASE(0x2B), 0x1003); + } else + b43_phy_write(dev, B43_PHY_BASE(0x2B), 0x0802); + if (phy->rev >= 2) + b43_dummy_transmission(dev); + b43_radio_selectchannel(dev, 6, 0); + b43_radio_read16(dev, 0x51); /* dummy read */ + if (phy->type == B43_PHYTYPE_G) + b43_phy_write(dev, B43_PHY_BASE(0x2F), 0); + if (lo->rebuild) + lo_measure_txctl_values(dev); + if (phy->type == B43_PHYTYPE_G && phy->rev >= 3) { + b43_phy_write(dev, B43_PHY_LO_MASK, 0xC078); + } else { + if (phy->type == B43_PHYTYPE_B) + b43_phy_write(dev, B43_PHY_BASE(0x2E), 0x8078); + else + b43_phy_write(dev, B43_PHY_LO_MASK, 0x8078); + } +} + +static void lo_measure_restore(struct b43_wldev *dev, + struct lo_g_saved_values *sav) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + u16 tmp; + + if (phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_PGACTL, 0xE300); + tmp = (phy->pga_gain << 8); + b43_phy_write(dev, B43_PHY_RFOVERVAL, tmp | 0xA0); + udelay(5); + b43_phy_write(dev, B43_PHY_RFOVERVAL, tmp | 0xA2); + udelay(2); + b43_phy_write(dev, B43_PHY_RFOVERVAL, tmp | 0xA3); + } else { + tmp = (phy->pga_gain | 0xEFA0); + b43_phy_write(dev, B43_PHY_PGACTL, tmp); + } + if (b43_has_hardware_pctl(phy)) { + b43_gphy_dc_lt_init(dev); + } else { + if (lo->rebuild) + b43_lo_g_adjust_to(dev, 3, 2, 0); + else + b43_lo_g_adjust(dev); + } + if (phy->type == B43_PHYTYPE_G) { + if (phy->rev >= 3) + b43_phy_write(dev, B43_PHY_BASE(0x2E), 0xC078); + else + b43_phy_write(dev, B43_PHY_BASE(0x2E), 0x8078); + if (phy->rev >= 2) + b43_phy_write(dev, B43_PHY_BASE(0x2F), 0x0202); + else + b43_phy_write(dev, B43_PHY_BASE(0x2F), 0x0101); + } + b43_write16(dev, 0x3F4, sav->reg_3F4); + b43_phy_write(dev, B43_PHY_PGACTL, sav->phy_pgactl); + b43_phy_write(dev, B43_PHY_BASE(0x2A), sav->phy_base_2A); + b43_phy_write(dev, B43_PHY_SYNCCTL, sav->phy_syncctl); + b43_phy_write(dev, B43_PHY_DACCTL, sav->phy_dacctl); + b43_radio_write16(dev, 0x43, sav->radio_43); + b43_radio_write16(dev, 0x7A, sav->radio_7A); + if (!has_tx_magnification(phy)) { + tmp = sav->radio_52; + b43_radio_write16(dev, 0x52, (b43_radio_read16(dev, 0x52) + & 0xFF0F) | tmp); + } + b43_write16(dev, 0x3E2, sav->reg_3E2); + if (phy->type == B43_PHYTYPE_B && + phy->radio_ver == 0x2050 && phy->radio_rev <= 5) { + b43_phy_write(dev, B43_PHY_BASE(0x30), sav->phy_base_30); + b43_phy_write(dev, B43_PHY_BASE(0x06), sav->phy_base_06); + } + if (phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_ANALOGOVER, sav->phy_analogover); + b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, + sav->phy_analogoverval); + b43_phy_write(dev, B43_PHY_CLASSCTL, sav->phy_classctl); + b43_phy_write(dev, B43_PHY_RFOVER, sav->phy_rfover); + b43_phy_write(dev, B43_PHY_RFOVERVAL, sav->phy_rfoverval); + b43_phy_write(dev, B43_PHY_BASE(0x3E), sav->phy_base_3E); + b43_phy_write(dev, B43_PHY_CRS0, sav->phy_crs0); + } + if (b43_has_hardware_pctl(phy)) { + tmp = (sav->phy_lo_mask & 0xBFFF); + b43_phy_write(dev, B43_PHY_LO_MASK, tmp); + b43_phy_write(dev, B43_PHY_EXTG(0x01), sav->phy_extg_01); + b43_phy_write(dev, B43_PHY_DACCTL, sav->phy_dacctl_hwpctl); + b43_phy_write(dev, B43_PHY_BASE(0x14), sav->phy_base_14); + b43_phy_write(dev, B43_PHY_HPWR_TSSICTL, sav->phy_hpwr_tssictl); + } + b43_radio_selectchannel(dev, sav->old_channel, 1); +} + +struct b43_lo_g_statemachine { + int current_state; + int nr_measured; + int state_val_multiplier; + u16 lowest_feedth; + struct b43_loctl min_loctl; +}; + +/* Loop over each possible value in this state. */ +static int lo_probe_possible_loctls(struct b43_wldev *dev, + struct b43_loctl *probe_loctl, + struct b43_lo_g_statemachine *d) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + struct b43_loctl test_loctl; + struct b43_loctl orig_loctl; + struct b43_loctl prev_loctl = { + .i = -100, + .q = -100, + }; + int i; + int begin, end; + int found_lower = 0; + u16 feedth; + + static const struct b43_loctl modifiers[] = { + {.i = 1,.q = 1,}, + {.i = 1,.q = 0,}, + {.i = 1,.q = -1,}, + {.i = 0,.q = -1,}, + {.i = -1,.q = -1,}, + {.i = -1,.q = 0,}, + {.i = -1,.q = 1,}, + {.i = 0,.q = 1,}, + }; + + if (d->current_state == 0) { + begin = 1; + end = 8; + } else if (d->current_state % 2 == 0) { + begin = d->current_state - 1; + end = d->current_state + 1; + } else { + begin = d->current_state - 2; + end = d->current_state + 2; + } + if (begin < 1) + begin += 8; + if (end > 8) + end -= 8; + + memcpy(&orig_loctl, probe_loctl, sizeof(struct b43_loctl)); + i = begin; + d->current_state = i; + while (1) { + B43_WARN_ON(!(i >= 1 && i <= 8)); + memcpy(&test_loctl, &orig_loctl, sizeof(struct b43_loctl)); + test_loctl.i += modifiers[i - 1].i * d->state_val_multiplier; + test_loctl.q += modifiers[i - 1].q * d->state_val_multiplier; + if ((test_loctl.i != prev_loctl.i || + test_loctl.q != prev_loctl.q) && + (abs(test_loctl.i) <= 16 && abs(test_loctl.q) <= 16)) { + b43_lo_write(dev, &test_loctl); + feedth = lo_measure_feedthrough(dev, phy->lna_gain, + phy->pga_gain, + phy->trsw_rx_gain); + if (feedth < d->lowest_feedth) { + memcpy(probe_loctl, &test_loctl, + sizeof(struct b43_loctl)); + found_lower = 1; + d->lowest_feedth = feedth; + if ((d->nr_measured < 2) && + (!has_loopback_gain(phy) || lo->rebuild)) + break; + } + } + memcpy(&prev_loctl, &test_loctl, sizeof(prev_loctl)); + if (i == end) + break; + if (i == 8) + i = 1; + else + i++; + d->current_state = i; + } + + return found_lower; +} + +static void lo_probe_loctls_statemachine(struct b43_wldev *dev, + struct b43_loctl *loctl, + int *max_rx_gain) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + struct b43_lo_g_statemachine d; + u16 feedth; + int found_lower; + struct b43_loctl probe_loctl; + int max_repeat = 1, repeat_cnt = 0; + + d.nr_measured = 0; + d.state_val_multiplier = 1; + if (has_loopback_gain(phy) && !lo->rebuild) + d.state_val_multiplier = 3; + + memcpy(&d.min_loctl, loctl, sizeof(struct b43_loctl)); + if (has_loopback_gain(phy) && lo->rebuild) + max_repeat = 4; + do { + b43_lo_write(dev, &d.min_loctl); + feedth = lo_measure_feedthrough(dev, phy->lna_gain, + phy->pga_gain, + phy->trsw_rx_gain); + if (!lo->rebuild && feedth < 0x258) { + if (feedth >= 0x12C) + *max_rx_gain += 6; + else + *max_rx_gain += 3; + feedth = lo_measure_feedthrough(dev, phy->lna_gain, + phy->pga_gain, + phy->trsw_rx_gain); + } + d.lowest_feedth = feedth; + + d.current_state = 0; + do { + B43_WARN_ON(! + (d.current_state >= 0 + && d.current_state <= 8)); + memcpy(&probe_loctl, &d.min_loctl, + sizeof(struct b43_loctl)); + found_lower = + lo_probe_possible_loctls(dev, &probe_loctl, &d); + if (!found_lower) + break; + if ((probe_loctl.i == d.min_loctl.i) && + (probe_loctl.q == d.min_loctl.q)) + break; + memcpy(&d.min_loctl, &probe_loctl, + sizeof(struct b43_loctl)); + d.nr_measured++; + } while (d.nr_measured < 24); + memcpy(loctl, &d.min_loctl, sizeof(struct b43_loctl)); + + if (has_loopback_gain(phy)) { + if (d.lowest_feedth > 0x1194) + *max_rx_gain -= 6; + else if (d.lowest_feedth < 0x5DC) + *max_rx_gain += 3; + if (repeat_cnt == 0) { + if (d.lowest_feedth <= 0x5DC) { + d.state_val_multiplier = 1; + repeat_cnt++; + } else + d.state_val_multiplier = 2; + } else if (repeat_cnt == 2) + d.state_val_multiplier = 1; + } + lo_measure_gain_values(dev, *max_rx_gain, + has_loopback_gain(phy)); + } while (++repeat_cnt < max_repeat); +} + +#if B43_CALIB_ALL_LOCTLS +static const struct b43_rfatt b43_full_rfatt_list_items[] = { + { .att = 0, .with_padmix = 0, }, + { .att = 1, .with_padmix = 0, }, + { .att = 2, .with_padmix = 0, }, + { .att = 3, .with_padmix = 0, }, + { .att = 4, .with_padmix = 0, }, + { .att = 5, .with_padmix = 0, }, + { .att = 6, .with_padmix = 0, }, + { .att = 7, .with_padmix = 0, }, + { .att = 8, .with_padmix = 0, }, + { .att = 9, .with_padmix = 0, }, + { .att = 10, .with_padmix = 0, }, + { .att = 11, .with_padmix = 0, }, + { .att = 12, .with_padmix = 0, }, + { .att = 13, .with_padmix = 0, }, + { .att = 14, .with_padmix = 0, }, + { .att = 15, .with_padmix = 0, }, + { .att = 0, .with_padmix = 1, }, + { .att = 1, .with_padmix = 1, }, + { .att = 2, .with_padmix = 1, }, + { .att = 3, .with_padmix = 1, }, + { .att = 4, .with_padmix = 1, }, + { .att = 5, .with_padmix = 1, }, + { .att = 6, .with_padmix = 1, }, + { .att = 7, .with_padmix = 1, }, + { .att = 8, .with_padmix = 1, }, + { .att = 9, .with_padmix = 1, }, + { .att = 10, .with_padmix = 1, }, + { .att = 11, .with_padmix = 1, }, + { .att = 12, .with_padmix = 1, }, + { .att = 13, .with_padmix = 1, }, + { .att = 14, .with_padmix = 1, }, + { .att = 15, .with_padmix = 1, }, +}; +static const struct b43_rfatt_list b43_full_rfatt_list = { + .list = b43_full_rfatt_list_items, + .len = ARRAY_SIZE(b43_full_rfatt_list_items), +}; + +static const struct b43_bbatt b43_full_bbatt_list_items[] = { + { .att = 0, }, + { .att = 1, }, + { .att = 2, }, + { .att = 3, }, + { .att = 4, }, + { .att = 5, }, + { .att = 6, }, + { .att = 7, }, + { .att = 8, }, + { .att = 9, }, + { .att = 10, }, + { .att = 11, }, +}; +static const struct b43_bbatt_list b43_full_bbatt_list = { + .list = b43_full_bbatt_list_items, + .len = ARRAY_SIZE(b43_full_bbatt_list_items), +}; +#endif /* B43_CALIB_ALL_LOCTLS */ + +static void lo_measure(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + struct b43_loctl loctl = { + .i = 0, + .q = 0, + }; + struct b43_loctl *ploctl; + int max_rx_gain; + int rfidx, bbidx; + const struct b43_bbatt_list *bbatt_list; + const struct b43_rfatt_list *rfatt_list; + + /* Values from the "TXCTL Register and Value Table" */ + u16 txctl_reg; + u16 txctl_value; + u16 pad_mix_gain; + + bbatt_list = &lo->bbatt_list; + rfatt_list = &lo->rfatt_list; +#if B43_CALIB_ALL_LOCTLS + bbatt_list = &b43_full_bbatt_list; + rfatt_list = &b43_full_rfatt_list; +#endif + + txctl_reg = lo_txctl_register_table(dev, &txctl_value, &pad_mix_gain); + + for (rfidx = 0; rfidx < rfatt_list->len; rfidx++) { + + b43_radio_write16(dev, 0x43, (b43_radio_read16(dev, 0x43) + & 0xFFF0) | + rfatt_list->list[rfidx].att); + b43_radio_write16(dev, txctl_reg, + (b43_radio_read16(dev, txctl_reg) + & ~txctl_value) + | (rfatt_list->list[rfidx].with_padmix ? + txctl_value : 0)); + + for (bbidx = 0; bbidx < bbatt_list->len; bbidx++) { + if (lo->rebuild) { +#if B43_CALIB_ALL_LOCTLS + ploctl = b43_get_lo_g_ctl(dev, + &rfatt_list->list[rfidx], + &bbatt_list->list[bbidx]); +#else + ploctl = b43_get_lo_g_ctl_nopadmix(dev, + &rfatt_list-> + list[rfidx], + &bbatt_list-> + list[bbidx]); +#endif + } else { + ploctl = b43_get_lo_g_ctl(dev, + &rfatt_list->list[rfidx], + &bbatt_list->list[bbidx]); + if (!ploctl->used) + continue; + } + memcpy(&loctl, ploctl, sizeof(loctl)); + loctl.i = 0; + loctl.q = 0; + + max_rx_gain = rfatt_list->list[rfidx].att * 2; + max_rx_gain += bbatt_list->list[bbidx].att / 2; + if (rfatt_list->list[rfidx].with_padmix) + max_rx_gain -= pad_mix_gain; + if (has_loopback_gain(phy)) + max_rx_gain += phy->max_lb_gain; + lo_measure_gain_values(dev, max_rx_gain, + has_loopback_gain(phy)); + + b43_phy_set_baseband_attenuation(dev, + bbatt_list->list[bbidx].att); + lo_probe_loctls_statemachine(dev, &loctl, &max_rx_gain); + if (phy->type == B43_PHYTYPE_B) { + loctl.i++; + loctl.q++; + } + b43_loctl_set_calibrated(&loctl, 1); + memcpy(ploctl, &loctl, sizeof(loctl)); + } + } +} + +#if B43_DEBUG +static void do_validate_loctl(struct b43_wldev *dev, struct b43_loctl *control) +{ + const int is_initializing = (b43_status(dev) == B43_STAT_UNINIT); + int i = control->i; + int q = control->q; + + if (b43_loctl_is_calibrated(control)) { + if ((abs(i) > 16) || (abs(q) > 16)) + goto error; + } else { + if (control->used) + goto error; + if (dev->phy.lo_control->rebuild) { + control->i = 0; + control->q = 0; + if ((i != B43_LOCTL_POISON) || + (q != B43_LOCTL_POISON)) + goto error; + } + } + if (is_initializing && control->used) + goto error; + + return; +error: + b43err(dev->wl, "LO control pair validation failed " + "(I: %d, Q: %d, used %u, calib: %u, initing: %d)\n", + i, q, control->used, + b43_loctl_is_calibrated(control), + is_initializing); +} + +static void validate_all_loctls(struct b43_wldev *dev) +{ + b43_call_for_each_loctl(dev, do_validate_loctl); +} + +static void do_reset_calib(struct b43_wldev *dev, struct b43_loctl *control) +{ + if (dev->phy.lo_control->rebuild || + control->used) { + b43_loctl_set_calibrated(control, 0); + control->i = B43_LOCTL_POISON; + control->q = B43_LOCTL_POISON; + } +} + +static void reset_all_loctl_calibration_states(struct b43_wldev *dev) +{ + b43_call_for_each_loctl(dev, do_reset_calib); +} + +#else /* B43_DEBUG */ +static inline void validate_all_loctls(struct b43_wldev *dev) { } +static inline void reset_all_loctl_calibration_states(struct b43_wldev *dev) { } +#endif /* B43_DEBUG */ + +void b43_lo_g_measure(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct lo_g_saved_values uninitialized_var(sav); + + B43_WARN_ON((phy->type != B43_PHYTYPE_B) && + (phy->type != B43_PHYTYPE_G)); + + sav.old_channel = phy->channel; + lo_measure_setup(dev, &sav); + reset_all_loctl_calibration_states(dev); + lo_measure(dev); + lo_measure_restore(dev, &sav); + + validate_all_loctls(dev); + + phy->lo_control->lo_measured = 1; + phy->lo_control->rebuild = 0; +} + +#if B43_DEBUG +static void validate_loctl_calibration(struct b43_wldev *dev, + struct b43_loctl *loctl, + struct b43_rfatt *rfatt, + struct b43_bbatt *bbatt) +{ + if (b43_loctl_is_calibrated(loctl)) + return; + if (!dev->phy.lo_control->lo_measured) { + /* On init we set the attenuation values before we + * calibrated the LO. I guess that's OK. */ + return; + } + b43err(dev->wl, "Adjusting Local Oscillator to an uncalibrated " + "control pair: rfatt=%u,%spadmix bbatt=%u\n", + rfatt->att, + (rfatt->with_padmix) ? "" : "no-", + bbatt->att); +} +#else +static inline void validate_loctl_calibration(struct b43_wldev *dev, + struct b43_loctl *loctl, + struct b43_rfatt *rfatt, + struct b43_bbatt *bbatt) +{ +} +#endif + +static inline void fixup_rfatt_for_txcontrol(struct b43_rfatt *rf, + u8 tx_control) +{ + if (tx_control & B43_TXCTL_TXMIX) { + if (rf->att < 5) + rf->att = 4; + } +} + +void b43_lo_g_adjust(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_rfatt rf; + struct b43_loctl *loctl; + + memcpy(&rf, &phy->rfatt, sizeof(rf)); + fixup_rfatt_for_txcontrol(&rf, phy->tx_control); + + loctl = b43_get_lo_g_ctl(dev, &rf, &phy->bbatt); + validate_loctl_calibration(dev, loctl, &rf, &phy->bbatt); + b43_lo_write(dev, loctl); +} + +void b43_lo_g_adjust_to(struct b43_wldev *dev, + u16 rfatt, u16 bbatt, u16 tx_control) +{ + struct b43_rfatt rf; + struct b43_bbatt bb; + struct b43_loctl *loctl; + + memset(&rf, 0, sizeof(rf)); + memset(&bb, 0, sizeof(bb)); + rf.att = rfatt; + bb.att = bbatt; + fixup_rfatt_for_txcontrol(&rf, tx_control); + loctl = b43_get_lo_g_ctl(dev, &rf, &bb); + validate_loctl_calibration(dev, loctl, &rf, &bb); + b43_lo_write(dev, loctl); +} + +static void do_mark_unused(struct b43_wldev *dev, struct b43_loctl *control) +{ + control->used = 0; +} + +void b43_lo_g_ctl_mark_all_unused(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + + b43_call_for_each_loctl(dev, do_mark_unused); + lo->rebuild = 1; +} + +void b43_lo_g_ctl_mark_cur_used(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_rfatt rf; + + memcpy(&rf, &phy->rfatt, sizeof(rf)); + fixup_rfatt_for_txcontrol(&rf, phy->tx_control); + + b43_get_lo_g_ctl(dev, &rf, &phy->bbatt)->used = 1; +} diff --git a/drivers/net/wireless/b43/lo.h b/drivers/net/wireless/b43/lo.h new file mode 100644 index 000000000000..455615d1f8c6 --- /dev/null +++ b/drivers/net/wireless/b43/lo.h @@ -0,0 +1,112 @@ +#ifndef B43_LO_H_ +#define B43_LO_H_ + +#include "phy.h" + +struct b43_wldev; + +/* Local Oscillator control value-pair. */ +struct b43_loctl { + /* Control values. */ + s8 i; + s8 q; + /* "Used by hardware" flag. */ + bool used; +#ifdef CONFIG_B43_DEBUG + /* Is this lo-control-array entry calibrated? */ + bool calibrated; +#endif +}; + +/* Debugging: Poison value for i and q values. */ +#define B43_LOCTL_POISON 111 + +/* loctl->calibrated debugging mechanism */ +#ifdef CONFIG_B43_DEBUG +static inline void b43_loctl_set_calibrated(struct b43_loctl *loctl, + bool calibrated) +{ + loctl->calibrated = calibrated; +} +static inline bool b43_loctl_is_calibrated(struct b43_loctl *loctl) +{ + return loctl->calibrated; +} +#else +static inline void b43_loctl_set_calibrated(struct b43_loctl *loctl, + bool calibrated) +{ +} +static inline bool b43_loctl_is_calibrated(struct b43_loctl *loctl) +{ + return 1; +} +#endif + +/* TX Power LO Control Array. + * Value-pairs to adjust the LocalOscillator are stored + * in this structure. + * There are two different set of values. One for "Flag is Set" + * and one for "Flag is Unset". + * By "Flag" the flag in struct b43_rfatt is meant. + * The Value arrays are two-dimensional. The first index + * is the baseband attenuation and the second index + * is the radio attenuation. + * Use b43_get_lo_g_ctl() to retrieve a value from the lists. + */ +struct b43_txpower_lo_control { +#define B43_NR_BB 12 +#define B43_NR_RF 16 + /* LO Control values, with PAD Mixer */ + struct b43_loctl with_padmix[B43_NR_BB][B43_NR_RF]; + /* LO Control values, without PAD Mixer */ + struct b43_loctl no_padmix[B43_NR_BB][B43_NR_RF]; + + /* Flag to indicate a complete rebuild of the two tables above + * to the LO measuring code. */ + bool rebuild; + + /* Lists of valid RF and BB attenuation values for this device. */ + struct b43_rfatt_list rfatt_list; + struct b43_bbatt_list bbatt_list; + + /* Current TX Bias value */ + u8 tx_bias; + /* Current TX Magnification Value (if used by the device) */ + u8 tx_magn; + + /* GPHY LO is measured. */ + bool lo_measured; + + /* Saved device PowerVector */ + u64 power_vector; +}; + +/* Measure the BPHY Local Oscillator. */ +void b43_lo_b_measure(struct b43_wldev *dev); +/* Measure the BPHY/GPHY Local Oscillator. */ +void b43_lo_g_measure(struct b43_wldev *dev); + +/* Adjust the Local Oscillator to the saved attenuation + * and txctl values. + */ +void b43_lo_g_adjust(struct b43_wldev *dev); +/* Adjust to specific values. */ +void b43_lo_g_adjust_to(struct b43_wldev *dev, + u16 rfatt, u16 bbatt, u16 tx_control); + +/* Mark all possible b43_lo_g_ctl as "unused" */ +void b43_lo_g_ctl_mark_all_unused(struct b43_wldev *dev); +/* Mark the b43_lo_g_ctl corresponding to the current + * attenuation values as used. + */ +void b43_lo_g_ctl_mark_cur_used(struct b43_wldev *dev); + +/* Get a reference to a LO Control value pair in the + * TX Power LO Control Array. + */ +struct b43_loctl *b43_get_lo_g_ctl(struct b43_wldev *dev, + const struct b43_rfatt *rfatt, + const struct b43_bbatt *bbatt); + +#endif /* B43_LO_H_ */ diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c new file mode 100644 index 000000000000..c141a264ac45 --- /dev/null +++ b/drivers/net/wireless/b43/main.c @@ -0,0 +1,4070 @@ +/* + + Broadcom B43 wireless driver + + Copyright (c) 2005 Martin Langer <martin-langer@gmx.de> + Copyright (c) 2005 Stefano Brivio <st3@riseup.net> + Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de> + Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org> + Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch> + + Some parts of the code in this file are derived from the ipw2200 + driver Copyright(c) 2003 - 2004 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/moduleparam.h> +#include <linux/if_arp.h> +#include <linux/etherdevice.h> +#include <linux/version.h> +#include <linux/firmware.h> +#include <linux/wireless.h> +#include <linux/workqueue.h> +#include <linux/skbuff.h> +#include <linux/dma-mapping.h> +#include <asm/unaligned.h> + +#include "b43.h" +#include "main.h" +#include "debugfs.h" +#include "phy.h" +#include "dma.h" +#include "pio.h" +#include "sysfs.h" +#include "xmit.h" +#include "sysfs.h" +#include "lo.h" +#include "pcmcia.h" + +MODULE_DESCRIPTION("Broadcom B43 wireless driver"); +MODULE_AUTHOR("Martin Langer"); +MODULE_AUTHOR("Stefano Brivio"); +MODULE_AUTHOR("Michael Buesch"); +MODULE_LICENSE("GPL"); + +extern char *nvram_get(char *name); + +#if defined(CONFIG_B43_DMA) && defined(CONFIG_B43_PIO) +static int modparam_pio; +module_param_named(pio, modparam_pio, int, 0444); +MODULE_PARM_DESC(pio, "enable(1) / disable(0) PIO mode"); +#elif defined(CONFIG_B43_DMA) +# define modparam_pio 0 +#elif defined(CONFIG_B43_PIO) +# define modparam_pio 1 +#endif + +static int modparam_bad_frames_preempt; +module_param_named(bad_frames_preempt, modparam_bad_frames_preempt, int, 0444); +MODULE_PARM_DESC(bad_frames_preempt, + "enable(1) / disable(0) Bad Frames Preemption"); + +static int modparam_short_retry = B43_DEFAULT_SHORT_RETRY_LIMIT; +module_param_named(short_retry, modparam_short_retry, int, 0444); +MODULE_PARM_DESC(short_retry, "Short-Retry-Limit (0 - 15)"); + +static int modparam_long_retry = B43_DEFAULT_LONG_RETRY_LIMIT; +module_param_named(long_retry, modparam_long_retry, int, 0444); +MODULE_PARM_DESC(long_retry, "Long-Retry-Limit (0 - 15)"); + +static char modparam_fwpostfix[16]; +module_param_string(fwpostfix, modparam_fwpostfix, 16, 0444); +MODULE_PARM_DESC(fwpostfix, "Postfix for the .fw files to load."); + +static int modparam_hwpctl; +module_param_named(hwpctl, modparam_hwpctl, int, 0444); +MODULE_PARM_DESC(hwpctl, "Enable hardware-side power control (default off)"); + +static int modparam_nohwcrypt; +module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444); +MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); + +static const struct ssb_device_id b43_ssb_tbl[] = { + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 6), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 7), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 9), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 10), + SSB_DEVTABLE_END +}; + +MODULE_DEVICE_TABLE(ssb, b43_ssb_tbl); + +/* Channel and ratetables are shared for all devices. + * They can't be const, because ieee80211 puts some precalculated + * data in there. This data is the same for all devices, so we don't + * get concurrency issues */ +#define RATETAB_ENT(_rateid, _flags) \ + { \ + .rate = B43_RATE_TO_BASE100KBPS(_rateid), \ + .val = (_rateid), \ + .val2 = (_rateid), \ + .flags = (_flags), \ + } +static struct ieee80211_rate __b43_ratetable[] = { + RATETAB_ENT(B43_CCK_RATE_1MB, IEEE80211_RATE_CCK), + RATETAB_ENT(B43_CCK_RATE_2MB, IEEE80211_RATE_CCK_2), + RATETAB_ENT(B43_CCK_RATE_5MB, IEEE80211_RATE_CCK_2), + RATETAB_ENT(B43_CCK_RATE_11MB, IEEE80211_RATE_CCK_2), + RATETAB_ENT(B43_OFDM_RATE_6MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43_OFDM_RATE_9MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43_OFDM_RATE_12MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43_OFDM_RATE_18MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43_OFDM_RATE_24MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43_OFDM_RATE_36MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43_OFDM_RATE_48MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43_OFDM_RATE_54MB, IEEE80211_RATE_OFDM), +}; + +#define b43_a_ratetable (__b43_ratetable + 4) +#define b43_a_ratetable_size 8 +#define b43_b_ratetable (__b43_ratetable + 0) +#define b43_b_ratetable_size 4 +#define b43_g_ratetable (__b43_ratetable + 0) +#define b43_g_ratetable_size 12 + +#define CHANTAB_ENT(_chanid, _freq) \ + { \ + .chan = (_chanid), \ + .freq = (_freq), \ + .val = (_chanid), \ + .flag = IEEE80211_CHAN_W_SCAN | \ + IEEE80211_CHAN_W_ACTIVE_SCAN | \ + IEEE80211_CHAN_W_IBSS, \ + .power_level = 0xFF, \ + .antenna_max = 0xFF, \ + } +static struct ieee80211_channel b43_bg_chantable[] = { + CHANTAB_ENT(1, 2412), + CHANTAB_ENT(2, 2417), + CHANTAB_ENT(3, 2422), + CHANTAB_ENT(4, 2427), + CHANTAB_ENT(5, 2432), + CHANTAB_ENT(6, 2437), + CHANTAB_ENT(7, 2442), + CHANTAB_ENT(8, 2447), + CHANTAB_ENT(9, 2452), + CHANTAB_ENT(10, 2457), + CHANTAB_ENT(11, 2462), + CHANTAB_ENT(12, 2467), + CHANTAB_ENT(13, 2472), + CHANTAB_ENT(14, 2484), +}; + +#define b43_bg_chantable_size ARRAY_SIZE(b43_bg_chantable) +static struct ieee80211_channel b43_a_chantable[] = { + CHANTAB_ENT(36, 5180), + CHANTAB_ENT(40, 5200), + CHANTAB_ENT(44, 5220), + CHANTAB_ENT(48, 5240), + CHANTAB_ENT(52, 5260), + CHANTAB_ENT(56, 5280), + CHANTAB_ENT(60, 5300), + CHANTAB_ENT(64, 5320), + CHANTAB_ENT(149, 5745), + CHANTAB_ENT(153, 5765), + CHANTAB_ENT(157, 5785), + CHANTAB_ENT(161, 5805), + CHANTAB_ENT(165, 5825), +}; + +#define b43_a_chantable_size ARRAY_SIZE(b43_a_chantable) + +static void b43_wireless_core_exit(struct b43_wldev *dev); +static int b43_wireless_core_init(struct b43_wldev *dev); +static void b43_wireless_core_stop(struct b43_wldev *dev); +static int b43_wireless_core_start(struct b43_wldev *dev); + +static int b43_ratelimit(struct b43_wl *wl) +{ + if (!wl || !wl->current_dev) + return 1; + if (b43_status(wl->current_dev) < B43_STAT_STARTED) + return 1; + /* We are up and running. + * Ratelimit the messages to avoid DoS over the net. */ + return net_ratelimit(); +} + +void b43info(struct b43_wl *wl, const char *fmt, ...) +{ + va_list args; + + if (!b43_ratelimit(wl)) + return; + va_start(args, fmt); + printk(KERN_INFO "b43-%s: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} + +void b43err(struct b43_wl *wl, const char *fmt, ...) +{ + va_list args; + + if (!b43_ratelimit(wl)) + return; + va_start(args, fmt); + printk(KERN_ERR "b43-%s ERROR: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} + +void b43warn(struct b43_wl *wl, const char *fmt, ...) +{ + va_list args; + + if (!b43_ratelimit(wl)) + return; + va_start(args, fmt); + printk(KERN_WARNING "b43-%s warning: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} + +#if B43_DEBUG +void b43dbg(struct b43_wl *wl, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + printk(KERN_DEBUG "b43-%s debug: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} +#endif /* DEBUG */ + +static void b43_ram_write(struct b43_wldev *dev, u16 offset, u32 val) +{ + u32 macctl; + + B43_WARN_ON(offset % 4 != 0); + + macctl = b43_read32(dev, B43_MMIO_MACCTL); + if (macctl & B43_MACCTL_BE) + val = swab32(val); + + b43_write32(dev, B43_MMIO_RAM_CONTROL, offset); + mmiowb(); + b43_write32(dev, B43_MMIO_RAM_DATA, val); +} + +static inline + void b43_shm_control_word(struct b43_wldev *dev, u16 routing, u16 offset) +{ + u32 control; + + /* "offset" is the WORD offset. */ + + control = routing; + control <<= 16; + control |= offset; + b43_write32(dev, B43_MMIO_SHM_CONTROL, control); +} + +u32 b43_shm_read32(struct b43_wldev *dev, u16 routing, u16 offset) +{ + u32 ret; + + if (routing == B43_SHM_SHARED) { + B43_WARN_ON(offset & 0x0001); + if (offset & 0x0003) { + /* Unaligned access */ + b43_shm_control_word(dev, routing, offset >> 2); + ret = b43_read16(dev, B43_MMIO_SHM_DATA_UNALIGNED); + ret <<= 16; + b43_shm_control_word(dev, routing, (offset >> 2) + 1); + ret |= b43_read16(dev, B43_MMIO_SHM_DATA); + + return ret; + } + offset >>= 2; + } + b43_shm_control_word(dev, routing, offset); + ret = b43_read32(dev, B43_MMIO_SHM_DATA); + + return ret; +} + +u16 b43_shm_read16(struct b43_wldev * dev, u16 routing, u16 offset) +{ + u16 ret; + + if (routing == B43_SHM_SHARED) { + B43_WARN_ON(offset & 0x0001); + if (offset & 0x0003) { + /* Unaligned access */ + b43_shm_control_word(dev, routing, offset >> 2); + ret = b43_read16(dev, B43_MMIO_SHM_DATA_UNALIGNED); + + return ret; + } + offset >>= 2; + } + b43_shm_control_word(dev, routing, offset); + ret = b43_read16(dev, B43_MMIO_SHM_DATA); + + return ret; +} + +void b43_shm_write32(struct b43_wldev *dev, u16 routing, u16 offset, u32 value) +{ + if (routing == B43_SHM_SHARED) { + B43_WARN_ON(offset & 0x0001); + if (offset & 0x0003) { + /* Unaligned access */ + b43_shm_control_word(dev, routing, offset >> 2); + mmiowb(); + b43_write16(dev, B43_MMIO_SHM_DATA_UNALIGNED, + (value >> 16) & 0xffff); + mmiowb(); + b43_shm_control_word(dev, routing, (offset >> 2) + 1); + mmiowb(); + b43_write16(dev, B43_MMIO_SHM_DATA, value & 0xffff); + return; + } + offset >>= 2; + } + b43_shm_control_word(dev, routing, offset); + mmiowb(); + b43_write32(dev, B43_MMIO_SHM_DATA, value); +} + +void b43_shm_write16(struct b43_wldev *dev, u16 routing, u16 offset, u16 value) +{ + if (routing == B43_SHM_SHARED) { + B43_WARN_ON(offset & 0x0001); + if (offset & 0x0003) { + /* Unaligned access */ + b43_shm_control_word(dev, routing, offset >> 2); + mmiowb(); + b43_write16(dev, B43_MMIO_SHM_DATA_UNALIGNED, value); + return; + } + offset >>= 2; + } + b43_shm_control_word(dev, routing, offset); + mmiowb(); + b43_write16(dev, B43_MMIO_SHM_DATA, value); +} + +/* Read HostFlags */ +u32 b43_hf_read(struct b43_wldev * dev) +{ + u32 ret; + + ret = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI); + ret <<= 16; + ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO); + + return ret; +} + +/* Write HostFlags */ +void b43_hf_write(struct b43_wldev *dev, u32 value) +{ + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_HOSTFLO, (value & 0x0000FFFF)); + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_HOSTFHI, ((value & 0xFFFF0000) >> 16)); +} + +void b43_tsf_read(struct b43_wldev *dev, u64 * tsf) +{ + /* We need to be careful. As we read the TSF from multiple + * registers, we should take care of register overflows. + * In theory, the whole tsf read process should be atomic. + * We try to be atomic here, by restaring the read process, + * if any of the high registers changed (overflew). + */ + if (dev->dev->id.revision >= 3) { + u32 low, high, high2; + + do { + high = b43_read32(dev, B43_MMIO_REV3PLUS_TSF_HIGH); + low = b43_read32(dev, B43_MMIO_REV3PLUS_TSF_LOW); + high2 = b43_read32(dev, B43_MMIO_REV3PLUS_TSF_HIGH); + } while (unlikely(high != high2)); + + *tsf = high; + *tsf <<= 32; + *tsf |= low; + } else { + u64 tmp; + u16 v0, v1, v2, v3; + u16 test1, test2, test3; + + do { + v3 = b43_read16(dev, B43_MMIO_TSF_3); + v2 = b43_read16(dev, B43_MMIO_TSF_2); + v1 = b43_read16(dev, B43_MMIO_TSF_1); + v0 = b43_read16(dev, B43_MMIO_TSF_0); + + test3 = b43_read16(dev, B43_MMIO_TSF_3); + test2 = b43_read16(dev, B43_MMIO_TSF_2); + test1 = b43_read16(dev, B43_MMIO_TSF_1); + } while (v3 != test3 || v2 != test2 || v1 != test1); + + *tsf = v3; + *tsf <<= 48; + tmp = v2; + tmp <<= 32; + *tsf |= tmp; + tmp = v1; + tmp <<= 16; + *tsf |= tmp; + *tsf |= v0; + } +} + +static void b43_time_lock(struct b43_wldev *dev) +{ + u32 macctl; + + macctl = b43_read32(dev, B43_MMIO_MACCTL); + macctl |= B43_MACCTL_TBTTHOLD; + b43_write32(dev, B43_MMIO_MACCTL, macctl); + /* Commit the write */ + b43_read32(dev, B43_MMIO_MACCTL); +} + +static void b43_time_unlock(struct b43_wldev *dev) +{ + u32 macctl; + + macctl = b43_read32(dev, B43_MMIO_MACCTL); + macctl &= ~B43_MACCTL_TBTTHOLD; + b43_write32(dev, B43_MMIO_MACCTL, macctl); + /* Commit the write */ + b43_read32(dev, B43_MMIO_MACCTL); +} + +static void b43_tsf_write_locked(struct b43_wldev *dev, u64 tsf) +{ + /* Be careful with the in-progress timer. + * First zero out the low register, so we have a full + * register-overflow duration to complete the operation. + */ + if (dev->dev->id.revision >= 3) { + u32 lo = (tsf & 0x00000000FFFFFFFFULL); + u32 hi = (tsf & 0xFFFFFFFF00000000ULL) >> 32; + + b43_write32(dev, B43_MMIO_REV3PLUS_TSF_LOW, 0); + mmiowb(); + b43_write32(dev, B43_MMIO_REV3PLUS_TSF_HIGH, hi); + mmiowb(); + b43_write32(dev, B43_MMIO_REV3PLUS_TSF_LOW, lo); + } else { + u16 v0 = (tsf & 0x000000000000FFFFULL); + u16 v1 = (tsf & 0x00000000FFFF0000ULL) >> 16; + u16 v2 = (tsf & 0x0000FFFF00000000ULL) >> 32; + u16 v3 = (tsf & 0xFFFF000000000000ULL) >> 48; + + b43_write16(dev, B43_MMIO_TSF_0, 0); + mmiowb(); + b43_write16(dev, B43_MMIO_TSF_3, v3); + mmiowb(); + b43_write16(dev, B43_MMIO_TSF_2, v2); + mmiowb(); + b43_write16(dev, B43_MMIO_TSF_1, v1); + mmiowb(); + b43_write16(dev, B43_MMIO_TSF_0, v0); + } +} + +void b43_tsf_write(struct b43_wldev *dev, u64 tsf) +{ + b43_time_lock(dev); + b43_tsf_write_locked(dev, tsf); + b43_time_unlock(dev); +} + +static +void b43_macfilter_set(struct b43_wldev *dev, u16 offset, const u8 * mac) +{ + static const u8 zero_addr[ETH_ALEN] = { 0 }; + u16 data; + + if (!mac) + mac = zero_addr; + + offset |= 0x0020; + b43_write16(dev, B43_MMIO_MACFILTER_CONTROL, offset); + + data = mac[0]; + data |= mac[1] << 8; + b43_write16(dev, B43_MMIO_MACFILTER_DATA, data); + data = mac[2]; + data |= mac[3] << 8; + b43_write16(dev, B43_MMIO_MACFILTER_DATA, data); + data = mac[4]; + data |= mac[5] << 8; + b43_write16(dev, B43_MMIO_MACFILTER_DATA, data); +} + +static void b43_write_mac_bssid_templates(struct b43_wldev *dev) +{ + const u8 *mac; + const u8 *bssid; + u8 mac_bssid[ETH_ALEN * 2]; + int i; + u32 tmp; + + bssid = dev->wl->bssid; + mac = dev->wl->mac_addr; + + b43_macfilter_set(dev, B43_MACFILTER_BSSID, bssid); + + memcpy(mac_bssid, mac, ETH_ALEN); + memcpy(mac_bssid + ETH_ALEN, bssid, ETH_ALEN); + + /* Write our MAC address and BSSID to template ram */ + for (i = 0; i < ARRAY_SIZE(mac_bssid); i += sizeof(u32)) { + tmp = (u32) (mac_bssid[i + 0]); + tmp |= (u32) (mac_bssid[i + 1]) << 8; + tmp |= (u32) (mac_bssid[i + 2]) << 16; + tmp |= (u32) (mac_bssid[i + 3]) << 24; + b43_ram_write(dev, 0x20 + i, tmp); + } +} + +static void b43_upload_card_macaddress(struct b43_wldev *dev) +{ + b43_write_mac_bssid_templates(dev); + b43_macfilter_set(dev, B43_MACFILTER_SELF, dev->wl->mac_addr); +} + +static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time) +{ + /* slot_time is in usec. */ + if (dev->phy.type != B43_PHYTYPE_G) + return; + b43_write16(dev, 0x684, 510 + slot_time); + b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time); +} + +static void b43_short_slot_timing_enable(struct b43_wldev *dev) +{ + b43_set_slot_time(dev, 9); + dev->short_slot = 1; +} + +static void b43_short_slot_timing_disable(struct b43_wldev *dev) +{ + b43_set_slot_time(dev, 20); + dev->short_slot = 0; +} + +/* Enable a Generic IRQ. "mask" is the mask of which IRQs to enable. + * Returns the _previously_ enabled IRQ mask. + */ +static inline u32 b43_interrupt_enable(struct b43_wldev *dev, u32 mask) +{ + u32 old_mask; + + old_mask = b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); + b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, old_mask | mask); + + return old_mask; +} + +/* Disable a Generic IRQ. "mask" is the mask of which IRQs to disable. + * Returns the _previously_ enabled IRQ mask. + */ +static inline u32 b43_interrupt_disable(struct b43_wldev *dev, u32 mask) +{ + u32 old_mask; + + old_mask = b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); + b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, old_mask & ~mask); + + return old_mask; +} + +/* Synchronize IRQ top- and bottom-half. + * IRQs must be masked before calling this. + * This must not be called with the irq_lock held. + */ +static void b43_synchronize_irq(struct b43_wldev *dev) +{ + synchronize_irq(dev->dev->irq); + tasklet_kill(&dev->isr_tasklet); +} + +/* DummyTransmission function, as documented on + * http://bcm-specs.sipsolutions.net/DummyTransmission + */ +void b43_dummy_transmission(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + unsigned int i, max_loop; + u16 value; + u32 buffer[5] = { + 0x00000000, + 0x00D40000, + 0x00000000, + 0x01000000, + 0x00000000, + }; + + switch (phy->type) { + case B43_PHYTYPE_A: + max_loop = 0x1E; + buffer[0] = 0x000201CC; + break; + case B43_PHYTYPE_B: + case B43_PHYTYPE_G: + max_loop = 0xFA; + buffer[0] = 0x000B846E; + break; + default: + B43_WARN_ON(1); + return; + } + + for (i = 0; i < 5; i++) + b43_ram_write(dev, i * 4, buffer[i]); + + /* Commit writes */ + b43_read32(dev, B43_MMIO_MACCTL); + + b43_write16(dev, 0x0568, 0x0000); + b43_write16(dev, 0x07C0, 0x0000); + value = ((phy->type == B43_PHYTYPE_A) ? 1 : 0); + b43_write16(dev, 0x050C, value); + b43_write16(dev, 0x0508, 0x0000); + b43_write16(dev, 0x050A, 0x0000); + b43_write16(dev, 0x054C, 0x0000); + b43_write16(dev, 0x056A, 0x0014); + b43_write16(dev, 0x0568, 0x0826); + b43_write16(dev, 0x0500, 0x0000); + b43_write16(dev, 0x0502, 0x0030); + + if (phy->radio_ver == 0x2050 && phy->radio_rev <= 0x5) + b43_radio_write16(dev, 0x0051, 0x0017); + for (i = 0x00; i < max_loop; i++) { + value = b43_read16(dev, 0x050E); + if (value & 0x0080) + break; + udelay(10); + } + for (i = 0x00; i < 0x0A; i++) { + value = b43_read16(dev, 0x050E); + if (value & 0x0400) + break; + udelay(10); + } + for (i = 0x00; i < 0x0A; i++) { + value = b43_read16(dev, 0x0690); + if (!(value & 0x0100)) + break; + udelay(10); + } + if (phy->radio_ver == 0x2050 && phy->radio_rev <= 0x5) + b43_radio_write16(dev, 0x0051, 0x0037); +} + +static void key_write(struct b43_wldev *dev, + u8 index, u8 algorithm, const u8 * key) +{ + unsigned int i; + u32 offset; + u16 value; + u16 kidx; + + /* Key index/algo block */ + kidx = b43_kidx_to_fw(dev, index); + value = ((kidx << 4) | algorithm); + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_KEYIDXBLOCK + (kidx * 2), value); + + /* Write the key to the Key Table Pointer offset */ + offset = dev->ktp + (index * B43_SEC_KEYSIZE); + for (i = 0; i < B43_SEC_KEYSIZE; i += 2) { + value = key[i]; + value |= (u16) (key[i + 1]) << 8; + b43_shm_write16(dev, B43_SHM_SHARED, offset + i, value); + } +} + +static void keymac_write(struct b43_wldev *dev, u8 index, const u8 * addr) +{ + u32 addrtmp[2] = { 0, 0, }; + u8 per_sta_keys_start = 8; + + if (b43_new_kidx_api(dev)) + per_sta_keys_start = 4; + + B43_WARN_ON(index < per_sta_keys_start); + /* We have two default TX keys and possibly two default RX keys. + * Physical mac 0 is mapped to physical key 4 or 8, depending + * on the firmware version. + * So we must adjust the index here. + */ + index -= per_sta_keys_start; + + if (addr) { + addrtmp[0] = addr[0]; + addrtmp[0] |= ((u32) (addr[1]) << 8); + addrtmp[0] |= ((u32) (addr[2]) << 16); + addrtmp[0] |= ((u32) (addr[3]) << 24); + addrtmp[1] = addr[4]; + addrtmp[1] |= ((u32) (addr[5]) << 8); + } + + if (dev->dev->id.revision >= 5) { + /* Receive match transmitter address mechanism */ + b43_shm_write32(dev, B43_SHM_RCMTA, + (index * 2) + 0, addrtmp[0]); + b43_shm_write16(dev, B43_SHM_RCMTA, + (index * 2) + 1, addrtmp[1]); + } else { + /* RXE (Receive Engine) and + * PSM (Programmable State Machine) mechanism + */ + if (index < 8) { + /* TODO write to RCM 16, 19, 22 and 25 */ + } else { + b43_shm_write32(dev, B43_SHM_SHARED, + B43_SHM_SH_PSM + (index * 6) + 0, + addrtmp[0]); + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_PSM + (index * 6) + 4, + addrtmp[1]); + } + } +} + +static void do_key_write(struct b43_wldev *dev, + u8 index, u8 algorithm, + const u8 * key, size_t key_len, const u8 * mac_addr) +{ + u8 buf[B43_SEC_KEYSIZE] = { 0, }; + u8 per_sta_keys_start = 8; + + if (b43_new_kidx_api(dev)) + per_sta_keys_start = 4; + + B43_WARN_ON(index >= dev->max_nr_keys); + B43_WARN_ON(key_len > B43_SEC_KEYSIZE); + + if (index >= per_sta_keys_start) + keymac_write(dev, index, NULL); /* First zero out mac. */ + if (key) + memcpy(buf, key, key_len); + key_write(dev, index, algorithm, buf); + if (index >= per_sta_keys_start) + keymac_write(dev, index, mac_addr); + + dev->key[index].algorithm = algorithm; +} + +static int b43_key_write(struct b43_wldev *dev, + int index, u8 algorithm, + const u8 * key, size_t key_len, + const u8 * mac_addr, + struct ieee80211_key_conf *keyconf) +{ + int i; + int sta_keys_start; + + if (key_len > B43_SEC_KEYSIZE) + return -EINVAL; + for (i = 0; i < dev->max_nr_keys; i++) { + /* Check that we don't already have this key. */ + B43_WARN_ON(dev->key[i].keyconf == keyconf); + } + if (index < 0) { + /* Either pairwise key or address is 00:00:00:00:00:00 + * for transmit-only keys. Search the index. */ + if (b43_new_kidx_api(dev)) + sta_keys_start = 4; + else + sta_keys_start = 8; + for (i = sta_keys_start; i < dev->max_nr_keys; i++) { + if (!dev->key[i].keyconf) { + /* found empty */ + index = i; + break; + } + } + if (index < 0) { + b43err(dev->wl, "Out of hardware key memory\n"); + return -ENOSPC; + } + } else + B43_WARN_ON(index > 3); + + do_key_write(dev, index, algorithm, key, key_len, mac_addr); + if ((index <= 3) && !b43_new_kidx_api(dev)) { + /* Default RX key */ + B43_WARN_ON(mac_addr); + do_key_write(dev, index + 4, algorithm, key, key_len, NULL); + } + keyconf->hw_key_idx = index; + dev->key[index].keyconf = keyconf; + + return 0; +} + +static int b43_key_clear(struct b43_wldev *dev, int index) +{ + if (B43_WARN_ON((index < 0) || (index >= dev->max_nr_keys))) + return -EINVAL; + do_key_write(dev, index, B43_SEC_ALGO_NONE, + NULL, B43_SEC_KEYSIZE, NULL); + if ((index <= 3) && !b43_new_kidx_api(dev)) { + do_key_write(dev, index + 4, B43_SEC_ALGO_NONE, + NULL, B43_SEC_KEYSIZE, NULL); + } + dev->key[index].keyconf = NULL; + + return 0; +} + +static void b43_clear_keys(struct b43_wldev *dev) +{ + int i; + + for (i = 0; i < dev->max_nr_keys; i++) + b43_key_clear(dev, i); +} + +void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags) +{ + u32 macctl; + u16 ucstat; + bool hwps; + bool awake; + int i; + + B43_WARN_ON((ps_flags & B43_PS_ENABLED) && + (ps_flags & B43_PS_DISABLED)); + B43_WARN_ON((ps_flags & B43_PS_AWAKE) && (ps_flags & B43_PS_ASLEEP)); + + if (ps_flags & B43_PS_ENABLED) { + hwps = 1; + } else if (ps_flags & B43_PS_DISABLED) { + hwps = 0; + } else { + //TODO: If powersave is not off and FIXME is not set and we are not in adhoc + // and thus is not an AP and we are associated, set bit 25 + } + if (ps_flags & B43_PS_AWAKE) { + awake = 1; + } else if (ps_flags & B43_PS_ASLEEP) { + awake = 0; + } else { + //TODO: If the device is awake or this is an AP, or we are scanning, or FIXME, + // or we are associated, or FIXME, or the latest PS-Poll packet sent was + // successful, set bit26 + } + +/* FIXME: For now we force awake-on and hwps-off */ + hwps = 0; + awake = 1; + + macctl = b43_read32(dev, B43_MMIO_MACCTL); + if (hwps) + macctl |= B43_MACCTL_HWPS; + else + macctl &= ~B43_MACCTL_HWPS; + if (awake) + macctl |= B43_MACCTL_AWAKE; + else + macctl &= ~B43_MACCTL_AWAKE; + b43_write32(dev, B43_MMIO_MACCTL, macctl); + /* Commit write */ + b43_read32(dev, B43_MMIO_MACCTL); + if (awake && dev->dev->id.revision >= 5) { + /* Wait for the microcode to wake up. */ + for (i = 0; i < 100; i++) { + ucstat = b43_shm_read16(dev, B43_SHM_SHARED, + B43_SHM_SH_UCODESTAT); + if (ucstat != B43_SHM_SH_UCODESTAT_SLEEP) + break; + udelay(10); + } + } +} + +/* Turn the Analog ON/OFF */ +static void b43_switch_analog(struct b43_wldev *dev, int on) +{ + b43_write16(dev, B43_MMIO_PHY0, on ? 0 : 0xF4); +} + +void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags) +{ + u32 tmslow; + u32 macctl; + + flags |= B43_TMSLOW_PHYCLKEN; + flags |= B43_TMSLOW_PHYRESET; + ssb_device_enable(dev->dev, flags); + msleep(2); /* Wait for the PLL to turn on. */ + + /* Now take the PHY out of Reset again */ + tmslow = ssb_read32(dev->dev, SSB_TMSLOW); + tmslow |= SSB_TMSLOW_FGC; + tmslow &= ~B43_TMSLOW_PHYRESET; + ssb_write32(dev->dev, SSB_TMSLOW, tmslow); + ssb_read32(dev->dev, SSB_TMSLOW); /* flush */ + msleep(1); + tmslow &= ~SSB_TMSLOW_FGC; + ssb_write32(dev->dev, SSB_TMSLOW, tmslow); + ssb_read32(dev->dev, SSB_TMSLOW); /* flush */ + msleep(1); + + /* Turn Analog ON */ + b43_switch_analog(dev, 1); + + macctl = b43_read32(dev, B43_MMIO_MACCTL); + macctl &= ~B43_MACCTL_GMODE; + if (flags & B43_TMSLOW_GMODE) + macctl |= B43_MACCTL_GMODE; + macctl |= B43_MACCTL_IHR_ENABLED; + b43_write32(dev, B43_MMIO_MACCTL, macctl); +} + +static void handle_irq_transmit_status(struct b43_wldev *dev) +{ + u32 v0, v1; + u16 tmp; + struct b43_txstatus stat; + + while (1) { + v0 = b43_read32(dev, B43_MMIO_XMITSTAT_0); + if (!(v0 & 0x00000001)) + break; + v1 = b43_read32(dev, B43_MMIO_XMITSTAT_1); + + stat.cookie = (v0 >> 16); + stat.seq = (v1 & 0x0000FFFF); + stat.phy_stat = ((v1 & 0x00FF0000) >> 16); + tmp = (v0 & 0x0000FFFF); + stat.frame_count = ((tmp & 0xF000) >> 12); + stat.rts_count = ((tmp & 0x0F00) >> 8); + stat.supp_reason = ((tmp & 0x001C) >> 2); + stat.pm_indicated = !!(tmp & 0x0080); + stat.intermediate = !!(tmp & 0x0040); + stat.for_ampdu = !!(tmp & 0x0020); + stat.acked = !!(tmp & 0x0002); + + b43_handle_txstatus(dev, &stat); + } +} + +static void drain_txstatus_queue(struct b43_wldev *dev) +{ + u32 dummy; + + if (dev->dev->id.revision < 5) + return; + /* Read all entries from the microcode TXstatus FIFO + * and throw them away. + */ + while (1) { + dummy = b43_read32(dev, B43_MMIO_XMITSTAT_0); + if (!(dummy & 0x00000001)) + break; + dummy = b43_read32(dev, B43_MMIO_XMITSTAT_1); + } +} + +static u32 b43_jssi_read(struct b43_wldev *dev) +{ + u32 val = 0; + + val = b43_shm_read16(dev, B43_SHM_SHARED, 0x08A); + val <<= 16; + val |= b43_shm_read16(dev, B43_SHM_SHARED, 0x088); + + return val; +} + +static void b43_jssi_write(struct b43_wldev *dev, u32 jssi) +{ + b43_shm_write16(dev, B43_SHM_SHARED, 0x088, (jssi & 0x0000FFFF)); + b43_shm_write16(dev, B43_SHM_SHARED, 0x08A, (jssi & 0xFFFF0000) >> 16); +} + +static void b43_generate_noise_sample(struct b43_wldev *dev) +{ + b43_jssi_write(dev, 0x7F7F7F7F); + b43_write32(dev, B43_MMIO_STATUS2_BITFIELD, + b43_read32(dev, B43_MMIO_STATUS2_BITFIELD) + | (1 << 4)); + B43_WARN_ON(dev->noisecalc.channel_at_start != dev->phy.channel); +} + +static void b43_calculate_link_quality(struct b43_wldev *dev) +{ + /* Top half of Link Quality calculation. */ + + if (dev->noisecalc.calculation_running) + return; + dev->noisecalc.channel_at_start = dev->phy.channel; + dev->noisecalc.calculation_running = 1; + dev->noisecalc.nr_samples = 0; + + b43_generate_noise_sample(dev); +} + +static void handle_irq_noise(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 tmp; + u8 noise[4]; + u8 i, j; + s32 average; + + /* Bottom half of Link Quality calculation. */ + + B43_WARN_ON(!dev->noisecalc.calculation_running); + if (dev->noisecalc.channel_at_start != phy->channel) + goto drop_calculation; + *((__le32 *)noise) = cpu_to_le32(b43_jssi_read(dev)); + if (noise[0] == 0x7F || noise[1] == 0x7F || + noise[2] == 0x7F || noise[3] == 0x7F) + goto generate_new; + + /* Get the noise samples. */ + B43_WARN_ON(dev->noisecalc.nr_samples >= 8); + i = dev->noisecalc.nr_samples; + noise[0] = limit_value(noise[0], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + noise[1] = limit_value(noise[1], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + noise[2] = limit_value(noise[2], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + noise[3] = limit_value(noise[3], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + dev->noisecalc.samples[i][0] = phy->nrssi_lt[noise[0]]; + dev->noisecalc.samples[i][1] = phy->nrssi_lt[noise[1]]; + dev->noisecalc.samples[i][2] = phy->nrssi_lt[noise[2]]; + dev->noisecalc.samples[i][3] = phy->nrssi_lt[noise[3]]; + dev->noisecalc.nr_samples++; + if (dev->noisecalc.nr_samples == 8) { + /* Calculate the Link Quality by the noise samples. */ + average = 0; + for (i = 0; i < 8; i++) { + for (j = 0; j < 4; j++) + average += dev->noisecalc.samples[i][j]; + } + average /= (8 * 4); + average *= 125; + average += 64; + average /= 128; + tmp = b43_shm_read16(dev, B43_SHM_SHARED, 0x40C); + tmp = (tmp / 128) & 0x1F; + if (tmp >= 8) + average += 2; + else + average -= 25; + if (tmp == 8) + average -= 72; + else + average -= 48; + + dev->stats.link_noise = average; + drop_calculation: + dev->noisecalc.calculation_running = 0; + return; + } + generate_new: + b43_generate_noise_sample(dev); +} + +static void handle_irq_tbtt_indication(struct b43_wldev *dev) +{ + if (b43_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) { + ///TODO: PS TBTT + } else { + if (1 /*FIXME: the last PSpoll frame was sent successfully */ ) + b43_power_saving_ctl_bits(dev, 0); + } + dev->reg124_set_0x4 = 0; + if (b43_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS)) + dev->reg124_set_0x4 = 1; +} + +static void handle_irq_atim_end(struct b43_wldev *dev) +{ + if (!dev->reg124_set_0x4 /*FIXME rename this variable */ ) + return; + b43_write32(dev, B43_MMIO_STATUS2_BITFIELD, + b43_read32(dev, B43_MMIO_STATUS2_BITFIELD) + | 0x4); +} + +static void handle_irq_pmq(struct b43_wldev *dev) +{ + u32 tmp; + + //TODO: AP mode. + + while (1) { + tmp = b43_read32(dev, B43_MMIO_PS_STATUS); + if (!(tmp & 0x00000008)) + break; + } + /* 16bit write is odd, but correct. */ + b43_write16(dev, B43_MMIO_PS_STATUS, 0x0002); +} + +static void b43_write_template_common(struct b43_wldev *dev, + const u8 * data, u16 size, + u16 ram_offset, + u16 shm_size_offset, u8 rate) +{ + u32 i, tmp; + struct b43_plcp_hdr4 plcp; + + plcp.data = 0; + b43_generate_plcp_hdr(&plcp, size + FCS_LEN, rate); + b43_ram_write(dev, ram_offset, le32_to_cpu(plcp.data)); + ram_offset += sizeof(u32); + /* The PLCP is 6 bytes long, but we only wrote 4 bytes, yet. + * So leave the first two bytes of the next write blank. + */ + tmp = (u32) (data[0]) << 16; + tmp |= (u32) (data[1]) << 24; + b43_ram_write(dev, ram_offset, tmp); + ram_offset += sizeof(u32); + for (i = 2; i < size; i += sizeof(u32)) { + tmp = (u32) (data[i + 0]); + if (i + 1 < size) + tmp |= (u32) (data[i + 1]) << 8; + if (i + 2 < size) + tmp |= (u32) (data[i + 2]) << 16; + if (i + 3 < size) + tmp |= (u32) (data[i + 3]) << 24; + b43_ram_write(dev, ram_offset + i - 2, tmp); + } + b43_shm_write16(dev, B43_SHM_SHARED, shm_size_offset, + size + sizeof(struct b43_plcp_hdr6)); +} + +static void b43_write_beacon_template(struct b43_wldev *dev, + u16 ram_offset, + u16 shm_size_offset, u8 rate) +{ + int len; + const u8 *data; + + B43_WARN_ON(!dev->cached_beacon); + len = min((size_t) dev->cached_beacon->len, + 0x200 - sizeof(struct b43_plcp_hdr6)); + data = (const u8 *)(dev->cached_beacon->data); + b43_write_template_common(dev, data, + len, ram_offset, shm_size_offset, rate); +} + +static void b43_write_probe_resp_plcp(struct b43_wldev *dev, + u16 shm_offset, u16 size, u8 rate) +{ + struct b43_plcp_hdr4 plcp; + u32 tmp; + __le16 dur; + + plcp.data = 0; + b43_generate_plcp_hdr(&plcp, size + FCS_LEN, rate); + dur = ieee80211_generic_frame_duration(dev->wl->hw, + dev->wl->if_id, size, + B43_RATE_TO_BASE100KBPS(rate)); + /* Write PLCP in two parts and timing for packet transfer */ + tmp = le32_to_cpu(plcp.data); + b43_shm_write16(dev, B43_SHM_SHARED, shm_offset, tmp & 0xFFFF); + b43_shm_write16(dev, B43_SHM_SHARED, shm_offset + 2, tmp >> 16); + b43_shm_write16(dev, B43_SHM_SHARED, shm_offset + 6, le16_to_cpu(dur)); +} + +/* Instead of using custom probe response template, this function + * just patches custom beacon template by: + * 1) Changing packet type + * 2) Patching duration field + * 3) Stripping TIM + */ +static u8 *b43_generate_probe_resp(struct b43_wldev *dev, + u16 * dest_size, u8 rate) +{ + const u8 *src_data; + u8 *dest_data; + u16 src_size, elem_size, src_pos, dest_pos; + __le16 dur; + struct ieee80211_hdr *hdr; + + B43_WARN_ON(!dev->cached_beacon); + src_size = dev->cached_beacon->len; + src_data = (const u8 *)dev->cached_beacon->data; + + if (unlikely(src_size < 0x24)) { + b43dbg(dev->wl, "b43_generate_probe_resp: " "invalid beacon\n"); + return NULL; + } + + dest_data = kmalloc(src_size, GFP_ATOMIC); + if (unlikely(!dest_data)) + return NULL; + + /* 0x24 is offset of first variable-len Information-Element + * in beacon frame. + */ + memcpy(dest_data, src_data, 0x24); + src_pos = dest_pos = 0x24; + for (; src_pos < src_size - 2; src_pos += elem_size) { + elem_size = src_data[src_pos + 1] + 2; + if (src_data[src_pos] != 0x05) { /* TIM */ + memcpy(dest_data + dest_pos, src_data + src_pos, + elem_size); + dest_pos += elem_size; + } + } + *dest_size = dest_pos; + hdr = (struct ieee80211_hdr *)dest_data; + + /* Set the frame control. */ + hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_PROBE_RESP); + dur = ieee80211_generic_frame_duration(dev->wl->hw, + dev->wl->if_id, *dest_size, + B43_RATE_TO_BASE100KBPS(rate)); + hdr->duration_id = dur; + + return dest_data; +} + +static void b43_write_probe_resp_template(struct b43_wldev *dev, + u16 ram_offset, + u16 shm_size_offset, u8 rate) +{ + u8 *probe_resp_data; + u16 size; + + B43_WARN_ON(!dev->cached_beacon); + size = dev->cached_beacon->len; + probe_resp_data = b43_generate_probe_resp(dev, &size, rate); + if (unlikely(!probe_resp_data)) + return; + + /* Looks like PLCP headers plus packet timings are stored for + * all possible basic rates + */ + b43_write_probe_resp_plcp(dev, 0x31A, size, B43_CCK_RATE_1MB); + b43_write_probe_resp_plcp(dev, 0x32C, size, B43_CCK_RATE_2MB); + b43_write_probe_resp_plcp(dev, 0x33E, size, B43_CCK_RATE_5MB); + b43_write_probe_resp_plcp(dev, 0x350, size, B43_CCK_RATE_11MB); + + size = min((size_t) size, 0x200 - sizeof(struct b43_plcp_hdr6)); + b43_write_template_common(dev, probe_resp_data, + size, ram_offset, shm_size_offset, rate); + kfree(probe_resp_data); +} + +static int b43_refresh_cached_beacon(struct b43_wldev *dev, + struct sk_buff *beacon) +{ + if (dev->cached_beacon) + kfree_skb(dev->cached_beacon); + dev->cached_beacon = beacon; + + return 0; +} + +static void b43_update_templates(struct b43_wldev *dev) +{ + u32 status; + + B43_WARN_ON(!dev->cached_beacon); + + b43_write_beacon_template(dev, 0x68, 0x18, B43_CCK_RATE_1MB); + b43_write_beacon_template(dev, 0x468, 0x1A, B43_CCK_RATE_1MB); + b43_write_probe_resp_template(dev, 0x268, 0x4A, B43_CCK_RATE_11MB); + + status = b43_read32(dev, B43_MMIO_STATUS2_BITFIELD); + status |= 0x03; + b43_write32(dev, B43_MMIO_STATUS2_BITFIELD, status); +} + +static void b43_refresh_templates(struct b43_wldev *dev, struct sk_buff *beacon) +{ + int err; + + err = b43_refresh_cached_beacon(dev, beacon); + if (unlikely(err)) + return; + b43_update_templates(dev); +} + +static void b43_set_ssid(struct b43_wldev *dev, const u8 * ssid, u8 ssid_len) +{ + u32 tmp; + u16 i, len; + + len = min((u16) ssid_len, (u16) 0x100); + for (i = 0; i < len; i += sizeof(u32)) { + tmp = (u32) (ssid[i + 0]); + if (i + 1 < len) + tmp |= (u32) (ssid[i + 1]) << 8; + if (i + 2 < len) + tmp |= (u32) (ssid[i + 2]) << 16; + if (i + 3 < len) + tmp |= (u32) (ssid[i + 3]) << 24; + b43_shm_write32(dev, B43_SHM_SHARED, 0x380 + i, tmp); + } + b43_shm_write16(dev, B43_SHM_SHARED, 0x48, len); +} + +static void b43_set_beacon_int(struct b43_wldev *dev, u16 beacon_int) +{ + b43_time_lock(dev); + if (dev->dev->id.revision >= 3) { + b43_write32(dev, 0x188, (beacon_int << 16)); + } else { + b43_write16(dev, 0x606, (beacon_int >> 6)); + b43_write16(dev, 0x610, beacon_int); + } + b43_time_unlock(dev); +} + +static void handle_irq_beacon(struct b43_wldev *dev) +{ + u32 status; + + if (!b43_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) + return; + + dev->irq_savedstate &= ~B43_IRQ_BEACON; + status = b43_read32(dev, B43_MMIO_STATUS2_BITFIELD); + + if (!dev->cached_beacon || ((status & 0x1) && (status & 0x2))) { + /* ACK beacon IRQ. */ + b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, B43_IRQ_BEACON); + dev->irq_savedstate |= B43_IRQ_BEACON; + if (dev->cached_beacon) + kfree_skb(dev->cached_beacon); + dev->cached_beacon = NULL; + return; + } + if (!(status & 0x1)) { + b43_write_beacon_template(dev, 0x68, 0x18, B43_CCK_RATE_1MB); + status |= 0x1; + b43_write32(dev, B43_MMIO_STATUS2_BITFIELD, status); + } + if (!(status & 0x2)) { + b43_write_beacon_template(dev, 0x468, 0x1A, B43_CCK_RATE_1MB); + status |= 0x2; + b43_write32(dev, B43_MMIO_STATUS2_BITFIELD, status); + } +} + +static void handle_irq_ucode_debug(struct b43_wldev *dev) +{ + //TODO +} + +/* Interrupt handler bottom-half */ +static void b43_interrupt_tasklet(struct b43_wldev *dev) +{ + u32 reason; + u32 dma_reason[ARRAY_SIZE(dev->dma_reason)]; + u32 merged_dma_reason = 0; + int i; + unsigned long flags; + + spin_lock_irqsave(&dev->wl->irq_lock, flags); + + B43_WARN_ON(b43_status(dev) != B43_STAT_STARTED); + + reason = dev->irq_reason; + for (i = 0; i < ARRAY_SIZE(dma_reason); i++) { + dma_reason[i] = dev->dma_reason[i]; + merged_dma_reason |= dma_reason[i]; + } + + if (unlikely(reason & B43_IRQ_MAC_TXERR)) + b43err(dev->wl, "MAC transmission error\n"); + + if (unlikely(reason & B43_IRQ_PHY_TXERR)) + b43err(dev->wl, "PHY transmission error\n"); + + if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK | + B43_DMAIRQ_NONFATALMASK))) { + if (merged_dma_reason & B43_DMAIRQ_FATALMASK) { + b43err(dev->wl, "Fatal DMA error: " + "0x%08X, 0x%08X, 0x%08X, " + "0x%08X, 0x%08X, 0x%08X\n", + dma_reason[0], dma_reason[1], + dma_reason[2], dma_reason[3], + dma_reason[4], dma_reason[5]); + b43_controller_restart(dev, "DMA error"); + mmiowb(); + spin_unlock_irqrestore(&dev->wl->irq_lock, flags); + return; + } + if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) { + b43err(dev->wl, "DMA error: " + "0x%08X, 0x%08X, 0x%08X, " + "0x%08X, 0x%08X, 0x%08X\n", + dma_reason[0], dma_reason[1], + dma_reason[2], dma_reason[3], + dma_reason[4], dma_reason[5]); + } + } + + if (unlikely(reason & B43_IRQ_UCODE_DEBUG)) + handle_irq_ucode_debug(dev); + if (reason & B43_IRQ_TBTT_INDI) + handle_irq_tbtt_indication(dev); + if (reason & B43_IRQ_ATIM_END) + handle_irq_atim_end(dev); + if (reason & B43_IRQ_BEACON) + handle_irq_beacon(dev); + if (reason & B43_IRQ_PMQ) + handle_irq_pmq(dev); + if (reason & B43_IRQ_TXFIFO_FLUSH_OK) + ;/* TODO */ + if (reason & B43_IRQ_NOISESAMPLE_OK) + handle_irq_noise(dev); + + /* Check the DMA reason registers for received data. */ + if (dma_reason[0] & B43_DMAIRQ_RX_DONE) { + if (b43_using_pio(dev)) + b43_pio_rx(dev->pio.queue0); + else + b43_dma_rx(dev->dma.rx_ring0); + } + B43_WARN_ON(dma_reason[1] & B43_DMAIRQ_RX_DONE); + B43_WARN_ON(dma_reason[2] & B43_DMAIRQ_RX_DONE); + if (dma_reason[3] & B43_DMAIRQ_RX_DONE) { + if (b43_using_pio(dev)) + b43_pio_rx(dev->pio.queue3); + else + b43_dma_rx(dev->dma.rx_ring3); + } + B43_WARN_ON(dma_reason[4] & B43_DMAIRQ_RX_DONE); + B43_WARN_ON(dma_reason[5] & B43_DMAIRQ_RX_DONE); + + if (reason & B43_IRQ_TX_OK) + handle_irq_transmit_status(dev); + + b43_interrupt_enable(dev, dev->irq_savedstate); + mmiowb(); + spin_unlock_irqrestore(&dev->wl->irq_lock, flags); +} + +static void pio_irq_workaround(struct b43_wldev *dev, u16 base, int queueidx) +{ + u16 rxctl; + + rxctl = b43_read16(dev, base + B43_PIO_RXCTL); + if (rxctl & B43_PIO_RXCTL_DATAAVAILABLE) + dev->dma_reason[queueidx] |= B43_DMAIRQ_RX_DONE; + else + dev->dma_reason[queueidx] &= ~B43_DMAIRQ_RX_DONE; +} + +static void b43_interrupt_ack(struct b43_wldev *dev, u32 reason) +{ + if (b43_using_pio(dev) && + (dev->dev->id.revision < 3) && + (!(reason & B43_IRQ_PIO_WORKAROUND))) { + /* Apply a PIO specific workaround to the dma_reasons */ + pio_irq_workaround(dev, B43_MMIO_PIO1_BASE, 0); + pio_irq_workaround(dev, B43_MMIO_PIO2_BASE, 1); + pio_irq_workaround(dev, B43_MMIO_PIO3_BASE, 2); + pio_irq_workaround(dev, B43_MMIO_PIO4_BASE, 3); + } + + b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, reason); + + b43_write32(dev, B43_MMIO_DMA0_REASON, dev->dma_reason[0]); + b43_write32(dev, B43_MMIO_DMA1_REASON, dev->dma_reason[1]); + b43_write32(dev, B43_MMIO_DMA2_REASON, dev->dma_reason[2]); + b43_write32(dev, B43_MMIO_DMA3_REASON, dev->dma_reason[3]); + b43_write32(dev, B43_MMIO_DMA4_REASON, dev->dma_reason[4]); + b43_write32(dev, B43_MMIO_DMA5_REASON, dev->dma_reason[5]); +} + +/* Interrupt handler top-half */ +static irqreturn_t b43_interrupt_handler(int irq, void *dev_id) +{ + irqreturn_t ret = IRQ_NONE; + struct b43_wldev *dev = dev_id; + u32 reason; + + if (!dev) + return IRQ_NONE; + + spin_lock(&dev->wl->irq_lock); + + if (b43_status(dev) < B43_STAT_STARTED) + goto out; + reason = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); + if (reason == 0xffffffff) /* shared IRQ */ + goto out; + ret = IRQ_HANDLED; + reason &= b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); + if (!reason) + goto out; + + dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON) + & 0x0001DC00; + dev->dma_reason[1] = b43_read32(dev, B43_MMIO_DMA1_REASON) + & 0x0000DC00; + dev->dma_reason[2] = b43_read32(dev, B43_MMIO_DMA2_REASON) + & 0x0000DC00; + dev->dma_reason[3] = b43_read32(dev, B43_MMIO_DMA3_REASON) + & 0x0001DC00; + dev->dma_reason[4] = b43_read32(dev, B43_MMIO_DMA4_REASON) + & 0x0000DC00; + dev->dma_reason[5] = b43_read32(dev, B43_MMIO_DMA5_REASON) + & 0x0000DC00; + + b43_interrupt_ack(dev, reason); + /* disable all IRQs. They are enabled again in the bottom half. */ + dev->irq_savedstate = b43_interrupt_disable(dev, B43_IRQ_ALL); + /* save the reason code and call our bottom half. */ + dev->irq_reason = reason; + tasklet_schedule(&dev->isr_tasklet); + out: + mmiowb(); + spin_unlock(&dev->wl->irq_lock); + + return ret; +} + +static void b43_release_firmware(struct b43_wldev *dev) +{ + release_firmware(dev->fw.ucode); + dev->fw.ucode = NULL; + release_firmware(dev->fw.pcm); + dev->fw.pcm = NULL; + release_firmware(dev->fw.initvals); + dev->fw.initvals = NULL; + release_firmware(dev->fw.initvals_band); + dev->fw.initvals_band = NULL; +} + +static void b43_print_fw_helptext(struct b43_wl *wl) +{ + b43err(wl, "You must go to " + "http://linuxwireless.org/en/users/Drivers/bcm43xx#devicefirmware " + "and download the correct firmware (version 4).\n"); +} + +static int do_request_fw(struct b43_wldev *dev, + const char *name, + const struct firmware **fw) +{ + char path[sizeof(modparam_fwpostfix) + 32]; + struct b43_fw_header *hdr; + u32 size; + int err; + + if (!name) + return 0; + + snprintf(path, ARRAY_SIZE(path), + "b43%s/%s.fw", + modparam_fwpostfix, name); + err = request_firmware(fw, path, dev->dev->dev); + if (err) { + b43err(dev->wl, "Firmware file \"%s\" not found " + "or load failed.\n", path); + return err; + } + if ((*fw)->size < sizeof(struct b43_fw_header)) + goto err_format; + hdr = (struct b43_fw_header *)((*fw)->data); + switch (hdr->type) { + case B43_FW_TYPE_UCODE: + case B43_FW_TYPE_PCM: + size = be32_to_cpu(hdr->size); + if (size != (*fw)->size - sizeof(struct b43_fw_header)) + goto err_format; + /* fallthrough */ + case B43_FW_TYPE_IV: + if (hdr->ver != 1) + goto err_format; + break; + default: + goto err_format; + } + + return err; + +err_format: + b43err(dev->wl, "Firmware file \"%s\" format error.\n", path); + return -EPROTO; +} + +static int b43_request_firmware(struct b43_wldev *dev) +{ + struct b43_firmware *fw = &dev->fw; + const u8 rev = dev->dev->id.revision; + const char *filename; + u32 tmshigh; + int err; + + tmshigh = ssb_read32(dev->dev, SSB_TMSHIGH); + if (!fw->ucode) { + if ((rev >= 5) && (rev <= 10)) + filename = "ucode5"; + else if ((rev >= 11) && (rev <= 12)) + filename = "ucode11"; + else if (rev >= 13) + filename = "ucode13"; + else + goto err_no_ucode; + err = do_request_fw(dev, filename, &fw->ucode); + if (err) + goto err_load; + } + if (!fw->pcm) { + if ((rev >= 5) && (rev <= 10)) + filename = "pcm5"; + else if (rev >= 11) + filename = NULL; + else + goto err_no_pcm; + err = do_request_fw(dev, filename, &fw->pcm); + if (err) + goto err_load; + } + if (!fw->initvals) { + switch (dev->phy.type) { + case B43_PHYTYPE_A: + if ((rev >= 5) && (rev <= 10)) { + if (tmshigh & B43_TMSHIGH_GPHY) + filename = "a0g1initvals5"; + else + filename = "a0g0initvals5"; + } else + goto err_no_initvals; + break; + case B43_PHYTYPE_G: + if ((rev >= 5) && (rev <= 10)) + filename = "b0g0initvals5"; + else if (rev >= 13) + filename = "lp0initvals13"; + else + goto err_no_initvals; + break; + default: + goto err_no_initvals; + } + err = do_request_fw(dev, filename, &fw->initvals); + if (err) + goto err_load; + } + if (!fw->initvals_band) { + switch (dev->phy.type) { + case B43_PHYTYPE_A: + if ((rev >= 5) && (rev <= 10)) { + if (tmshigh & B43_TMSHIGH_GPHY) + filename = "a0g1bsinitvals5"; + else + filename = "a0g0bsinitvals5"; + } else if (rev >= 11) + filename = NULL; + else + goto err_no_initvals; + break; + case B43_PHYTYPE_G: + if ((rev >= 5) && (rev <= 10)) + filename = "b0g0bsinitvals5"; + else if (rev >= 11) + filename = NULL; + else + goto err_no_initvals; + break; + default: + goto err_no_initvals; + } + err = do_request_fw(dev, filename, &fw->initvals_band); + if (err) + goto err_load; + } + + return 0; + +err_load: + b43_print_fw_helptext(dev->wl); + goto error; + +err_no_ucode: + err = -ENODEV; + b43err(dev->wl, "No microcode available for core rev %u\n", rev); + goto error; + +err_no_pcm: + err = -ENODEV; + b43err(dev->wl, "No PCM available for core rev %u\n", rev); + goto error; + +err_no_initvals: + err = -ENODEV; + b43err(dev->wl, "No Initial Values firmware file for PHY %u, " + "core rev %u\n", dev->phy.type, rev); + goto error; + +error: + b43_release_firmware(dev); + return err; +} + +static int b43_upload_microcode(struct b43_wldev *dev) +{ + const size_t hdr_len = sizeof(struct b43_fw_header); + const __be32 *data; + unsigned int i, len; + u16 fwrev, fwpatch, fwdate, fwtime; + u32 tmp; + int err = 0; + + /* Upload Microcode. */ + data = (__be32 *) (dev->fw.ucode->data + hdr_len); + len = (dev->fw.ucode->size - hdr_len) / sizeof(__be32); + b43_shm_control_word(dev, B43_SHM_UCODE | B43_SHM_AUTOINC_W, 0x0000); + for (i = 0; i < len; i++) { + b43_write32(dev, B43_MMIO_SHM_DATA, be32_to_cpu(data[i])); + udelay(10); + } + + if (dev->fw.pcm) { + /* Upload PCM data. */ + data = (__be32 *) (dev->fw.pcm->data + hdr_len); + len = (dev->fw.pcm->size - hdr_len) / sizeof(__be32); + b43_shm_control_word(dev, B43_SHM_HW, 0x01EA); + b43_write32(dev, B43_MMIO_SHM_DATA, 0x00004000); + /* No need for autoinc bit in SHM_HW */ + b43_shm_control_word(dev, B43_SHM_HW, 0x01EB); + for (i = 0; i < len; i++) { + b43_write32(dev, B43_MMIO_SHM_DATA, be32_to_cpu(data[i])); + udelay(10); + } + } + + b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, B43_IRQ_ALL); + b43_write32(dev, B43_MMIO_MACCTL, + B43_MACCTL_PSM_RUN | + B43_MACCTL_IHR_ENABLED | B43_MACCTL_INFRA); + + /* Wait for the microcode to load and respond */ + i = 0; + while (1) { + tmp = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); + if (tmp == B43_IRQ_MAC_SUSPENDED) + break; + i++; + if (i >= 50) { + b43err(dev->wl, "Microcode not responding\n"); + b43_print_fw_helptext(dev->wl); + err = -ENODEV; + goto out; + } + udelay(10); + } + b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); /* dummy read */ + + /* Get and check the revisions. */ + fwrev = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_UCODEREV); + fwpatch = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_UCODEPATCH); + fwdate = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_UCODEDATE); + fwtime = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_UCODETIME); + + if (fwrev <= 0x128) { + b43err(dev->wl, "YOUR FIRMWARE IS TOO OLD. Firmware from " + "binary drivers older than version 4.x is unsupported. " + "You must upgrade your firmware files.\n"); + b43_print_fw_helptext(dev->wl); + b43_write32(dev, B43_MMIO_MACCTL, 0); + err = -EOPNOTSUPP; + goto out; + } + b43dbg(dev->wl, "Loading firmware version %u.%u " + "(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n", + fwrev, fwpatch, + (fwdate >> 12) & 0xF, (fwdate >> 8) & 0xF, fwdate & 0xFF, + (fwtime >> 11) & 0x1F, (fwtime >> 5) & 0x3F, fwtime & 0x1F); + + dev->fw.rev = fwrev; + dev->fw.patch = fwpatch; + + out: + return err; +} + +static int b43_write_initvals(struct b43_wldev *dev, + const struct b43_iv *ivals, + size_t count, + size_t array_size) +{ + const struct b43_iv *iv; + u16 offset; + size_t i; + bool bit32; + + BUILD_BUG_ON(sizeof(struct b43_iv) != 6); + iv = ivals; + for (i = 0; i < count; i++) { + if (array_size < sizeof(iv->offset_size)) + goto err_format; + array_size -= sizeof(iv->offset_size); + offset = be16_to_cpu(iv->offset_size); + bit32 = !!(offset & B43_IV_32BIT); + offset &= B43_IV_OFFSET_MASK; + if (offset >= 0x1000) + goto err_format; + if (bit32) { + u32 value; + + if (array_size < sizeof(iv->data.d32)) + goto err_format; + array_size -= sizeof(iv->data.d32); + + value = be32_to_cpu(get_unaligned(&iv->data.d32)); + b43_write32(dev, offset, value); + + iv = (const struct b43_iv *)((const uint8_t *)iv + + sizeof(__be16) + + sizeof(__be32)); + } else { + u16 value; + + if (array_size < sizeof(iv->data.d16)) + goto err_format; + array_size -= sizeof(iv->data.d16); + + value = be16_to_cpu(iv->data.d16); + b43_write16(dev, offset, value); + + iv = (const struct b43_iv *)((const uint8_t *)iv + + sizeof(__be16) + + sizeof(__be16)); + } + } + if (array_size) + goto err_format; + + return 0; + +err_format: + b43err(dev->wl, "Initial Values Firmware file-format error.\n"); + b43_print_fw_helptext(dev->wl); + + return -EPROTO; +} + +static int b43_upload_initvals(struct b43_wldev *dev) +{ + const size_t hdr_len = sizeof(struct b43_fw_header); + const struct b43_fw_header *hdr; + struct b43_firmware *fw = &dev->fw; + const struct b43_iv *ivals; + size_t count; + int err; + + hdr = (const struct b43_fw_header *)(fw->initvals->data); + ivals = (const struct b43_iv *)(fw->initvals->data + hdr_len); + count = be32_to_cpu(hdr->size); + err = b43_write_initvals(dev, ivals, count, + fw->initvals->size - hdr_len); + if (err) + goto out; + if (fw->initvals_band) { + hdr = (const struct b43_fw_header *)(fw->initvals_band->data); + ivals = (const struct b43_iv *)(fw->initvals_band->data + hdr_len); + count = be32_to_cpu(hdr->size); + err = b43_write_initvals(dev, ivals, count, + fw->initvals_band->size - hdr_len); + if (err) + goto out; + } +out: + + return err; +} + +/* Initialize the GPIOs + * http://bcm-specs.sipsolutions.net/GPIO + */ +static int b43_gpio_init(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct ssb_device *gpiodev, *pcidev = NULL; + u32 mask, set; + + b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) + & ~B43_MACCTL_GPOUTSMSK); + + b43_write16(dev, B43_MMIO_GPIO_MASK, b43_read16(dev, B43_MMIO_GPIO_MASK) + | 0x000F); + + mask = 0x0000001F; + set = 0x0000000F; + if (dev->dev->bus->chip_id == 0x4301) { + mask |= 0x0060; + set |= 0x0060; + } + if (0 /* FIXME: conditional unknown */ ) { + b43_write16(dev, B43_MMIO_GPIO_MASK, + b43_read16(dev, B43_MMIO_GPIO_MASK) + | 0x0100); + mask |= 0x0180; + set |= 0x0180; + } + if (dev->dev->bus->sprom.r1.boardflags_lo & B43_BFL_PACTRL) { + b43_write16(dev, B43_MMIO_GPIO_MASK, + b43_read16(dev, B43_MMIO_GPIO_MASK) + | 0x0200); + mask |= 0x0200; + set |= 0x0200; + } + if (dev->dev->id.revision >= 2) + mask |= 0x0010; /* FIXME: This is redundant. */ + +#ifdef CONFIG_SSB_DRIVER_PCICORE + pcidev = bus->pcicore.dev; +#endif + gpiodev = bus->chipco.dev ? : pcidev; + if (!gpiodev) + return 0; + ssb_write32(gpiodev, B43_GPIO_CONTROL, + (ssb_read32(gpiodev, B43_GPIO_CONTROL) + & mask) | set); + + return 0; +} + +/* Turn off all GPIO stuff. Call this on module unload, for example. */ +static void b43_gpio_cleanup(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct ssb_device *gpiodev, *pcidev = NULL; + +#ifdef CONFIG_SSB_DRIVER_PCICORE + pcidev = bus->pcicore.dev; +#endif + gpiodev = bus->chipco.dev ? : pcidev; + if (!gpiodev) + return; + ssb_write32(gpiodev, B43_GPIO_CONTROL, 0); +} + +/* http://bcm-specs.sipsolutions.net/EnableMac */ +void b43_mac_enable(struct b43_wldev *dev) +{ + dev->mac_suspended--; + B43_WARN_ON(dev->mac_suspended < 0); + B43_WARN_ON(irqs_disabled()); + if (dev->mac_suspended == 0) { + b43_write32(dev, B43_MMIO_MACCTL, + b43_read32(dev, B43_MMIO_MACCTL) + | B43_MACCTL_ENABLED); + b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, + B43_IRQ_MAC_SUSPENDED); + /* Commit writes */ + b43_read32(dev, B43_MMIO_MACCTL); + b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); + b43_power_saving_ctl_bits(dev, 0); + + /* Re-enable IRQs. */ + spin_lock_irq(&dev->wl->irq_lock); + b43_interrupt_enable(dev, dev->irq_savedstate); + spin_unlock_irq(&dev->wl->irq_lock); + } +} + +/* http://bcm-specs.sipsolutions.net/SuspendMAC */ +void b43_mac_suspend(struct b43_wldev *dev) +{ + int i; + u32 tmp; + + might_sleep(); + B43_WARN_ON(irqs_disabled()); + B43_WARN_ON(dev->mac_suspended < 0); + + if (dev->mac_suspended == 0) { + /* Mask IRQs before suspending MAC. Otherwise + * the MAC stays busy and won't suspend. */ + spin_lock_irq(&dev->wl->irq_lock); + tmp = b43_interrupt_disable(dev, B43_IRQ_ALL); + spin_unlock_irq(&dev->wl->irq_lock); + b43_synchronize_irq(dev); + dev->irq_savedstate = tmp; + + b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); + b43_write32(dev, B43_MMIO_MACCTL, + b43_read32(dev, B43_MMIO_MACCTL) + & ~B43_MACCTL_ENABLED); + /* force pci to flush the write */ + b43_read32(dev, B43_MMIO_MACCTL); + for (i = 40; i; i--) { + tmp = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); + if (tmp & B43_IRQ_MAC_SUSPENDED) + goto out; + msleep(1); + } + b43err(dev->wl, "MAC suspend failed\n"); + } +out: + dev->mac_suspended++; +} + +static void b43_adjust_opmode(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + u32 ctl; + u16 cfp_pretbtt; + + ctl = b43_read32(dev, B43_MMIO_MACCTL); + /* Reset status to STA infrastructure mode. */ + ctl &= ~B43_MACCTL_AP; + ctl &= ~B43_MACCTL_KEEP_CTL; + ctl &= ~B43_MACCTL_KEEP_BADPLCP; + ctl &= ~B43_MACCTL_KEEP_BAD; + ctl &= ~B43_MACCTL_PROMISC; + ctl &= ~B43_MACCTL_BEACPROMISC; + ctl |= B43_MACCTL_INFRA; + + if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP)) + ctl |= B43_MACCTL_AP; + else if (b43_is_mode(wl, IEEE80211_IF_TYPE_IBSS)) + ctl &= ~B43_MACCTL_INFRA; + + if (wl->filter_flags & FIF_CONTROL) + ctl |= B43_MACCTL_KEEP_CTL; + if (wl->filter_flags & FIF_FCSFAIL) + ctl |= B43_MACCTL_KEEP_BAD; + if (wl->filter_flags & FIF_PLCPFAIL) + ctl |= B43_MACCTL_KEEP_BADPLCP; + if (wl->filter_flags & FIF_PROMISC_IN_BSS) + ctl |= B43_MACCTL_PROMISC; + if (wl->filter_flags & FIF_BCN_PRBRESP_PROMISC) + ctl |= B43_MACCTL_BEACPROMISC; + + /* Workaround: On old hardware the HW-MAC-address-filter + * doesn't work properly, so always run promisc in filter + * it in software. */ + if (dev->dev->id.revision <= 4) + ctl |= B43_MACCTL_PROMISC; + + b43_write32(dev, B43_MMIO_MACCTL, ctl); + + cfp_pretbtt = 2; + if ((ctl & B43_MACCTL_INFRA) && !(ctl & B43_MACCTL_AP)) { + if (dev->dev->bus->chip_id == 0x4306 && + dev->dev->bus->chip_rev == 3) + cfp_pretbtt = 100; + else + cfp_pretbtt = 50; + } + b43_write16(dev, 0x612, cfp_pretbtt); +} + +static void b43_rate_memory_write(struct b43_wldev *dev, u16 rate, int is_ofdm) +{ + u16 offset; + + if (is_ofdm) { + offset = 0x480; + offset += (b43_plcp_get_ratecode_ofdm(rate) & 0x000F) * 2; + } else { + offset = 0x4C0; + offset += (b43_plcp_get_ratecode_cck(rate) & 0x000F) * 2; + } + b43_shm_write16(dev, B43_SHM_SHARED, offset + 0x20, + b43_shm_read16(dev, B43_SHM_SHARED, offset)); +} + +static void b43_rate_memory_init(struct b43_wldev *dev) +{ + switch (dev->phy.type) { + case B43_PHYTYPE_A: + case B43_PHYTYPE_G: + b43_rate_memory_write(dev, B43_OFDM_RATE_6MB, 1); + b43_rate_memory_write(dev, B43_OFDM_RATE_12MB, 1); + b43_rate_memory_write(dev, B43_OFDM_RATE_18MB, 1); + b43_rate_memory_write(dev, B43_OFDM_RATE_24MB, 1); + b43_rate_memory_write(dev, B43_OFDM_RATE_36MB, 1); + b43_rate_memory_write(dev, B43_OFDM_RATE_48MB, 1); + b43_rate_memory_write(dev, B43_OFDM_RATE_54MB, 1); + if (dev->phy.type == B43_PHYTYPE_A) + break; + /* fallthrough */ + case B43_PHYTYPE_B: + b43_rate_memory_write(dev, B43_CCK_RATE_1MB, 0); + b43_rate_memory_write(dev, B43_CCK_RATE_2MB, 0); + b43_rate_memory_write(dev, B43_CCK_RATE_5MB, 0); + b43_rate_memory_write(dev, B43_CCK_RATE_11MB, 0); + break; + default: + B43_WARN_ON(1); + } +} + +/* Set the TX-Antenna for management frames sent by firmware. */ +static void b43_mgmtframe_txantenna(struct b43_wldev *dev, int antenna) +{ + u16 ant = 0; + u16 tmp; + + switch (antenna) { + case B43_ANTENNA0: + ant |= B43_TX4_PHY_ANT0; + break; + case B43_ANTENNA1: + ant |= B43_TX4_PHY_ANT1; + break; + case B43_ANTENNA_AUTO: + ant |= B43_TX4_PHY_ANTLAST; + break; + default: + B43_WARN_ON(1); + } + + /* FIXME We also need to set the other flags of the PHY control field somewhere. */ + + /* For Beacons */ + tmp = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_BEACPHYCTL); + tmp = (tmp & ~B43_TX4_PHY_ANT) | ant; + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_BEACPHYCTL, tmp); + /* For ACK/CTS */ + tmp = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_ACKCTSPHYCTL); + tmp = (tmp & ~B43_TX4_PHY_ANT) | ant; + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_ACKCTSPHYCTL, tmp); + /* For Probe Resposes */ + tmp = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_PRPHYCTL); + tmp = (tmp & ~B43_TX4_PHY_ANT) | ant; + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRPHYCTL, tmp); +} + +/* This is the opposite of b43_chip_init() */ +static void b43_chip_exit(struct b43_wldev *dev) +{ + b43_radio_turn_off(dev, 1); + b43_leds_exit(dev); + b43_gpio_cleanup(dev); + /* firmware is released later */ +} + +/* Initialize the chip + * http://bcm-specs.sipsolutions.net/ChipInit + */ +static int b43_chip_init(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + int err, tmp; + u32 value32; + u16 value16; + + b43_write32(dev, B43_MMIO_MACCTL, + B43_MACCTL_PSM_JMP0 | B43_MACCTL_IHR_ENABLED); + + err = b43_request_firmware(dev); + if (err) + goto out; + err = b43_upload_microcode(dev); + if (err) + goto out; /* firmware is released later */ + + err = b43_gpio_init(dev); + if (err) + goto out; /* firmware is released later */ + b43_leds_init(dev); + + err = b43_upload_initvals(dev); + if (err) + goto err_leds_exit; + b43_radio_turn_on(dev); + + b43_write16(dev, 0x03E6, 0x0000); + err = b43_phy_init(dev); + if (err) + goto err_radio_off; + + /* Select initial Interference Mitigation. */ + tmp = phy->interfmode; + phy->interfmode = B43_INTERFMODE_NONE; + b43_radio_set_interference_mitigation(dev, tmp); + + b43_set_rx_antenna(dev, B43_ANTENNA_DEFAULT); + b43_mgmtframe_txantenna(dev, B43_ANTENNA_DEFAULT); + + if (phy->type == B43_PHYTYPE_B) { + value16 = b43_read16(dev, 0x005E); + value16 |= 0x0004; + b43_write16(dev, 0x005E, value16); + } + b43_write32(dev, 0x0100, 0x01000000); + if (dev->dev->id.revision < 5) + b43_write32(dev, 0x010C, 0x01000000); + + b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) + & ~B43_MACCTL_INFRA); + b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) + | B43_MACCTL_INFRA); + + if (b43_using_pio(dev)) { + b43_write32(dev, 0x0210, 0x00000100); + b43_write32(dev, 0x0230, 0x00000100); + b43_write32(dev, 0x0250, 0x00000100); + b43_write32(dev, 0x0270, 0x00000100); + b43_shm_write16(dev, B43_SHM_SHARED, 0x0034, 0x0000); + } + + /* Probe Response Timeout value */ + /* FIXME: Default to 0, has to be set by ioctl probably... :-/ */ + b43_shm_write16(dev, B43_SHM_SHARED, 0x0074, 0x0000); + + /* Initially set the wireless operation mode. */ + b43_adjust_opmode(dev); + + if (dev->dev->id.revision < 3) { + b43_write16(dev, 0x060E, 0x0000); + b43_write16(dev, 0x0610, 0x8000); + b43_write16(dev, 0x0604, 0x0000); + b43_write16(dev, 0x0606, 0x0200); + } else { + b43_write32(dev, 0x0188, 0x80000000); + b43_write32(dev, 0x018C, 0x02000000); + } + b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000); + b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001DC00); + b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00); + b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00); + b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00); + b43_write32(dev, B43_MMIO_DMA4_IRQ_MASK, 0x0000DC00); + b43_write32(dev, B43_MMIO_DMA5_IRQ_MASK, 0x0000DC00); + + value32 = ssb_read32(dev->dev, SSB_TMSLOW); + value32 |= 0x00100000; + ssb_write32(dev->dev, SSB_TMSLOW, value32); + + b43_write16(dev, B43_MMIO_POWERUP_DELAY, + dev->dev->bus->chipco.fast_pwrup_delay); + + err = 0; + b43dbg(dev->wl, "Chip initialized\n"); +out: + return err; + +err_radio_off: + b43_radio_turn_off(dev, 1); +err_leds_exit: + b43_leds_exit(dev); + b43_gpio_cleanup(dev); + return err; +} + +static void b43_periodic_every120sec(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + if (phy->type != B43_PHYTYPE_G || phy->rev < 2) + return; + + b43_mac_suspend(dev); + b43_lo_g_measure(dev); + b43_mac_enable(dev); + if (b43_has_hardware_pctl(phy)) + b43_lo_g_ctl_mark_all_unused(dev); +} + +static void b43_periodic_every60sec(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + if (!b43_has_hardware_pctl(phy)) + b43_lo_g_ctl_mark_all_unused(dev); + if (dev->dev->bus->sprom.r1.boardflags_lo & B43_BFL_RSSI) { + b43_mac_suspend(dev); + b43_calc_nrssi_slope(dev); + if ((phy->radio_ver == 0x2050) && (phy->radio_rev == 8)) { + u8 old_chan = phy->channel; + + /* VCO Calibration */ + if (old_chan >= 8) + b43_radio_selectchannel(dev, 1, 0); + else + b43_radio_selectchannel(dev, 13, 0); + b43_radio_selectchannel(dev, old_chan, 0); + } + b43_mac_enable(dev); + } +} + +static void b43_periodic_every30sec(struct b43_wldev *dev) +{ + /* Update device statistics. */ + b43_calculate_link_quality(dev); +} + +static void b43_periodic_every15sec(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + if (phy->type == B43_PHYTYPE_G) { + //TODO: update_aci_moving_average + if (phy->aci_enable && phy->aci_wlan_automatic) { + b43_mac_suspend(dev); + if (!phy->aci_enable && 1 /*TODO: not scanning? */ ) { + if (0 /*TODO: bunch of conditions */ ) { + b43_radio_set_interference_mitigation + (dev, B43_INTERFMODE_MANUALWLAN); + } + } else if (1 /*TODO*/) { + /* + if ((aci_average > 1000) && !(b43_radio_aci_scan(dev))) { + b43_radio_set_interference_mitigation(dev, + B43_INTERFMODE_NONE); + } + */ + } + b43_mac_enable(dev); + } else if (phy->interfmode == B43_INTERFMODE_NONWLAN && + phy->rev == 1) { + //TODO: implement rev1 workaround + } + } + b43_phy_xmitpower(dev); //FIXME: unless scanning? + //TODO for APHY (temperature?) +} + +static void do_periodic_work(struct b43_wldev *dev) +{ + unsigned int state; + + state = dev->periodic_state; + if (state % 8 == 0) + b43_periodic_every120sec(dev); + if (state % 4 == 0) + b43_periodic_every60sec(dev); + if (state % 2 == 0) + b43_periodic_every30sec(dev); + b43_periodic_every15sec(dev); +} + +/* Periodic work locking policy: + * The whole periodic work handler is protected by + * wl->mutex. If another lock is needed somewhere in the + * pwork callchain, it's aquired in-place, where it's needed. + */ +static void b43_periodic_work_handler(struct work_struct *work) +{ + struct b43_wldev *dev = container_of(work, struct b43_wldev, + periodic_work.work); + struct b43_wl *wl = dev->wl; + unsigned long delay; + + mutex_lock(&wl->mutex); + + if (unlikely(b43_status(dev) != B43_STAT_STARTED)) + goto out; + if (b43_debug(dev, B43_DBG_PWORK_STOP)) + goto out_requeue; + + do_periodic_work(dev); + + dev->periodic_state++; +out_requeue: + if (b43_debug(dev, B43_DBG_PWORK_FAST)) + delay = msecs_to_jiffies(50); + else + delay = round_jiffies(HZ * 15); + queue_delayed_work(wl->hw->workqueue, &dev->periodic_work, delay); +out: + mutex_unlock(&wl->mutex); +} + +static void b43_periodic_tasks_setup(struct b43_wldev *dev) +{ + struct delayed_work *work = &dev->periodic_work; + + dev->periodic_state = 0; + INIT_DELAYED_WORK(work, b43_periodic_work_handler); + queue_delayed_work(dev->wl->hw->workqueue, work, 0); +} + +/* Validate access to the chip (SHM) */ +static int b43_validate_chipaccess(struct b43_wldev *dev) +{ + u32 value; + u32 shm_backup; + + shm_backup = b43_shm_read32(dev, B43_SHM_SHARED, 0); + b43_shm_write32(dev, B43_SHM_SHARED, 0, 0xAA5555AA); + if (b43_shm_read32(dev, B43_SHM_SHARED, 0) != 0xAA5555AA) + goto error; + b43_shm_write32(dev, B43_SHM_SHARED, 0, 0x55AAAA55); + if (b43_shm_read32(dev, B43_SHM_SHARED, 0) != 0x55AAAA55) + goto error; + b43_shm_write32(dev, B43_SHM_SHARED, 0, shm_backup); + + value = b43_read32(dev, B43_MMIO_MACCTL); + if ((value | B43_MACCTL_GMODE) != + (B43_MACCTL_GMODE | B43_MACCTL_IHR_ENABLED)) + goto error; + + value = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); + if (value) + goto error; + + return 0; + error: + b43err(dev->wl, "Failed to validate the chipaccess\n"); + return -ENODEV; +} + +static void b43_security_init(struct b43_wldev *dev) +{ + dev->max_nr_keys = (dev->dev->id.revision >= 5) ? 58 : 20; + B43_WARN_ON(dev->max_nr_keys > ARRAY_SIZE(dev->key)); + dev->ktp = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_KTP); + /* KTP is a word address, but we address SHM bytewise. + * So multiply by two. + */ + dev->ktp *= 2; + if (dev->dev->id.revision >= 5) { + /* Number of RCMTA address slots */ + b43_write16(dev, B43_MMIO_RCMTA_COUNT, dev->max_nr_keys - 8); + } + b43_clear_keys(dev); +} + +static int b43_rng_read(struct hwrng *rng, u32 * data) +{ + struct b43_wl *wl = (struct b43_wl *)rng->priv; + unsigned long flags; + + /* Don't take wl->mutex here, as it could deadlock with + * hwrng internal locking. It's not needed to take + * wl->mutex here, anyway. */ + + spin_lock_irqsave(&wl->irq_lock, flags); + *data = b43_read16(wl->current_dev, B43_MMIO_RNG); + spin_unlock_irqrestore(&wl->irq_lock, flags); + + return (sizeof(u16)); +} + +static void b43_rng_exit(struct b43_wl *wl) +{ + if (wl->rng_initialized) + hwrng_unregister(&wl->rng); +} + +static int b43_rng_init(struct b43_wl *wl) +{ + int err; + + snprintf(wl->rng_name, ARRAY_SIZE(wl->rng_name), + "%s_%s", KBUILD_MODNAME, wiphy_name(wl->hw->wiphy)); + wl->rng.name = wl->rng_name; + wl->rng.data_read = b43_rng_read; + wl->rng.priv = (unsigned long)wl; + wl->rng_initialized = 1; + err = hwrng_register(&wl->rng); + if (err) { + wl->rng_initialized = 0; + b43err(wl, "Failed to register the random " + "number generator (%d)\n", err); + } + + return err; +} + +static int b43_tx(struct ieee80211_hw *hw, + struct sk_buff *skb, struct ieee80211_tx_control *ctl) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev = wl->current_dev; + int err = -ENODEV; + unsigned long flags; + + if (unlikely(!dev)) + goto out; + if (unlikely(b43_status(dev) < B43_STAT_STARTED)) + goto out; + /* DMA-TX is done without a global lock. */ + if (b43_using_pio(dev)) { + spin_lock_irqsave(&wl->irq_lock, flags); + err = b43_pio_tx(dev, skb, ctl); + spin_unlock_irqrestore(&wl->irq_lock, flags); + } else + err = b43_dma_tx(dev, skb, ctl); + out: + if (unlikely(err)) + return NETDEV_TX_BUSY; + return NETDEV_TX_OK; +} + +static int b43_conf_tx(struct ieee80211_hw *hw, + int queue, + const struct ieee80211_tx_queue_params *params) +{ + return 0; +} + +static int b43_get_tx_stats(struct ieee80211_hw *hw, + struct ieee80211_tx_queue_stats *stats) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev = wl->current_dev; + unsigned long flags; + int err = -ENODEV; + + if (!dev) + goto out; + spin_lock_irqsave(&wl->irq_lock, flags); + if (likely(b43_status(dev) >= B43_STAT_STARTED)) { + if (b43_using_pio(dev)) + b43_pio_get_tx_stats(dev, stats); + else + b43_dma_get_tx_stats(dev, stats); + err = 0; + } + spin_unlock_irqrestore(&wl->irq_lock, flags); + out: + return err; +} + +static int b43_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + unsigned long flags; + + spin_lock_irqsave(&wl->irq_lock, flags); + memcpy(stats, &wl->ieee_stats, sizeof(*stats)); + spin_unlock_irqrestore(&wl->irq_lock, flags); + + return 0; +} + +static const char *phymode_to_string(unsigned int phymode) +{ + switch (phymode) { + case B43_PHYMODE_A: + return "A"; + case B43_PHYMODE_B: + return "B"; + case B43_PHYMODE_G: + return "G"; + default: + B43_WARN_ON(1); + } + return ""; +} + +static int find_wldev_for_phymode(struct b43_wl *wl, + unsigned int phymode, + struct b43_wldev **dev, bool * gmode) +{ + struct b43_wldev *d; + + list_for_each_entry(d, &wl->devlist, list) { + if (d->phy.possible_phymodes & phymode) { + /* Ok, this device supports the PHY-mode. + * Now figure out how the gmode bit has to be + * set to support it. */ + if (phymode == B43_PHYMODE_A) + *gmode = 0; + else + *gmode = 1; + *dev = d; + + return 0; + } + } + + return -ESRCH; +} + +static void b43_put_phy_into_reset(struct b43_wldev *dev) +{ + struct ssb_device *sdev = dev->dev; + u32 tmslow; + + tmslow = ssb_read32(sdev, SSB_TMSLOW); + tmslow &= ~B43_TMSLOW_GMODE; + tmslow |= B43_TMSLOW_PHYRESET; + tmslow |= SSB_TMSLOW_FGC; + ssb_write32(sdev, SSB_TMSLOW, tmslow); + msleep(1); + + tmslow = ssb_read32(sdev, SSB_TMSLOW); + tmslow &= ~SSB_TMSLOW_FGC; + tmslow |= B43_TMSLOW_PHYRESET; + ssb_write32(sdev, SSB_TMSLOW, tmslow); + msleep(1); +} + +/* Expects wl->mutex locked */ +static int b43_switch_phymode(struct b43_wl *wl, unsigned int new_mode) +{ + struct b43_wldev *up_dev; + struct b43_wldev *down_dev; + int err; + bool gmode = 0; + int prev_status; + + err = find_wldev_for_phymode(wl, new_mode, &up_dev, &gmode); + if (err) { + b43err(wl, "Could not find a device for %s-PHY mode\n", + phymode_to_string(new_mode)); + return err; + } + if ((up_dev == wl->current_dev) && + (!!wl->current_dev->phy.gmode == !!gmode)) { + /* This device is already running. */ + return 0; + } + b43dbg(wl, "Reconfiguring PHYmode to %s-PHY\n", + phymode_to_string(new_mode)); + down_dev = wl->current_dev; + + prev_status = b43_status(down_dev); + /* Shutdown the currently running core. */ + if (prev_status >= B43_STAT_STARTED) + b43_wireless_core_stop(down_dev); + if (prev_status >= B43_STAT_INITIALIZED) + b43_wireless_core_exit(down_dev); + + if (down_dev != up_dev) { + /* We switch to a different core, so we put PHY into + * RESET on the old core. */ + b43_put_phy_into_reset(down_dev); + } + + /* Now start the new core. */ + up_dev->phy.gmode = gmode; + if (prev_status >= B43_STAT_INITIALIZED) { + err = b43_wireless_core_init(up_dev); + if (err) { + b43err(wl, "Fatal: Could not initialize device for " + "newly selected %s-PHY mode\n", + phymode_to_string(new_mode)); + goto init_failure; + } + } + if (prev_status >= B43_STAT_STARTED) { + err = b43_wireless_core_start(up_dev); + if (err) { + b43err(wl, "Fatal: Coult not start device for " + "newly selected %s-PHY mode\n", + phymode_to_string(new_mode)); + b43_wireless_core_exit(up_dev); + goto init_failure; + } + } + B43_WARN_ON(b43_status(up_dev) != prev_status); + + wl->current_dev = up_dev; + + return 0; + init_failure: + /* Whoops, failed to init the new core. No core is operating now. */ + wl->current_dev = NULL; + return err; +} + +static int b43_antenna_from_ieee80211(u8 antenna) +{ + switch (antenna) { + case 0: /* default/diversity */ + return B43_ANTENNA_DEFAULT; + case 1: /* Antenna 0 */ + return B43_ANTENNA0; + case 2: /* Antenna 1 */ + return B43_ANTENNA1; + default: + return B43_ANTENNA_DEFAULT; + } +} + +static int b43_dev_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev; + struct b43_phy *phy; + unsigned long flags; + unsigned int new_phymode = 0xFFFF; + int antenna_tx; + int antenna_rx; + int err = 0; + u32 savedirqs; + + antenna_tx = b43_antenna_from_ieee80211(conf->antenna_sel_tx); + antenna_rx = b43_antenna_from_ieee80211(conf->antenna_sel_rx); + + mutex_lock(&wl->mutex); + + /* Switch the PHY mode (if necessary). */ + switch (conf->phymode) { + case MODE_IEEE80211A: + new_phymode = B43_PHYMODE_A; + break; + case MODE_IEEE80211B: + new_phymode = B43_PHYMODE_B; + break; + case MODE_IEEE80211G: + new_phymode = B43_PHYMODE_G; + break; + default: + B43_WARN_ON(1); + } + err = b43_switch_phymode(wl, new_phymode); + if (err) + goto out_unlock_mutex; + dev = wl->current_dev; + phy = &dev->phy; + + /* Disable IRQs while reconfiguring the device. + * This makes it possible to drop the spinlock throughout + * the reconfiguration process. */ + spin_lock_irqsave(&wl->irq_lock, flags); + if (b43_status(dev) < B43_STAT_STARTED) { + spin_unlock_irqrestore(&wl->irq_lock, flags); + goto out_unlock_mutex; + } + savedirqs = b43_interrupt_disable(dev, B43_IRQ_ALL); + spin_unlock_irqrestore(&wl->irq_lock, flags); + b43_synchronize_irq(dev); + + /* Switch to the requested channel. + * The firmware takes care of races with the TX handler. */ + if (conf->channel_val != phy->channel) + b43_radio_selectchannel(dev, conf->channel_val, 0); + + /* Enable/Disable ShortSlot timing. */ + if ((!!(conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME)) != + dev->short_slot) { + B43_WARN_ON(phy->type != B43_PHYTYPE_G); + if (conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME) + b43_short_slot_timing_enable(dev); + else + b43_short_slot_timing_disable(dev); + } + + /* Adjust the desired TX power level. */ + if (conf->power_level != 0) { + if (conf->power_level != phy->power_level) { + phy->power_level = conf->power_level; + b43_phy_xmitpower(dev); + } + } + + /* Antennas for RX and management frame TX. */ + b43_mgmtframe_txantenna(dev, antenna_tx); + b43_set_rx_antenna(dev, antenna_rx); + + /* Update templates for AP mode. */ + if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP)) + b43_set_beacon_int(dev, conf->beacon_int); + + if (!!conf->radio_enabled != phy->radio_on) { + if (conf->radio_enabled) { + b43_radio_turn_on(dev); + b43info(dev->wl, "Radio turned on by software\n"); + if (!dev->radio_hw_enable) { + b43info(dev->wl, "The hardware RF-kill button " + "still turns the radio physically off. " + "Press the button to turn it on.\n"); + } + } else { + b43_radio_turn_off(dev, 0); + b43info(dev->wl, "Radio turned off by software\n"); + } + } + + spin_lock_irqsave(&wl->irq_lock, flags); + b43_interrupt_enable(dev, savedirqs); + mmiowb(); + spin_unlock_irqrestore(&wl->irq_lock, flags); + out_unlock_mutex: + mutex_unlock(&wl->mutex); + + return err; +} + +static int b43_dev_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + const u8 *local_addr, const u8 *addr, + struct ieee80211_key_conf *key) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev = wl->current_dev; + unsigned long flags; + u8 algorithm; + u8 index; + int err = -EINVAL; + DECLARE_MAC_BUF(mac); + + if (modparam_nohwcrypt) + return -ENOSPC; /* User disabled HW-crypto */ + + if (!dev) + return -ENODEV; + switch (key->alg) { + case ALG_WEP: + if (key->keylen == 5) + algorithm = B43_SEC_ALGO_WEP40; + else + algorithm = B43_SEC_ALGO_WEP104; + break; + case ALG_TKIP: + algorithm = B43_SEC_ALGO_TKIP; + break; + case ALG_CCMP: + algorithm = B43_SEC_ALGO_AES; + break; + default: + B43_WARN_ON(1); + goto out; + } + + index = (u8) (key->keyidx); + if (index > 3) + goto out; + + mutex_lock(&wl->mutex); + spin_lock_irqsave(&wl->irq_lock, flags); + + if (b43_status(dev) < B43_STAT_INITIALIZED) { + err = -ENODEV; + goto out_unlock; + } + + switch (cmd) { + case SET_KEY: + if (algorithm == B43_SEC_ALGO_TKIP) { + /* FIXME: No TKIP hardware encryption for now. */ + err = -EOPNOTSUPP; + goto out_unlock; + } + + if (is_broadcast_ether_addr(addr)) { + /* addr is FF:FF:FF:FF:FF:FF for default keys */ + err = b43_key_write(dev, index, algorithm, + key->key, key->keylen, NULL, key); + } else { + /* + * either pairwise key or address is 00:00:00:00:00:00 + * for transmit-only keys + */ + err = b43_key_write(dev, -1, algorithm, + key->key, key->keylen, addr, key); + } + if (err) + goto out_unlock; + + if (algorithm == B43_SEC_ALGO_WEP40 || + algorithm == B43_SEC_ALGO_WEP104) { + b43_hf_write(dev, b43_hf_read(dev) | B43_HF_USEDEFKEYS); + } else { + b43_hf_write(dev, + b43_hf_read(dev) & ~B43_HF_USEDEFKEYS); + } + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + break; + case DISABLE_KEY: { + err = b43_key_clear(dev, key->hw_key_idx); + if (err) + goto out_unlock; + break; + } + default: + B43_WARN_ON(1); + } +out_unlock: + spin_unlock_irqrestore(&wl->irq_lock, flags); + mutex_unlock(&wl->mutex); +out: + if (!err) { + b43dbg(wl, "%s hardware based encryption for keyidx: %d, " + "mac: %s\n", + cmd == SET_KEY ? "Using" : "Disabling", key->keyidx, + print_mac(mac, addr)); + } + return err; +} + +static void b43_configure_filter(struct ieee80211_hw *hw, + unsigned int changed, unsigned int *fflags, + int mc_count, struct dev_addr_list *mc_list) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev = wl->current_dev; + unsigned long flags; + + if (!dev) { + *fflags = 0; + return; + } + + spin_lock_irqsave(&wl->irq_lock, flags); + *fflags &= FIF_PROMISC_IN_BSS | + FIF_ALLMULTI | + FIF_FCSFAIL | + FIF_PLCPFAIL | + FIF_CONTROL | + FIF_OTHER_BSS | + FIF_BCN_PRBRESP_PROMISC; + + changed &= FIF_PROMISC_IN_BSS | + FIF_ALLMULTI | + FIF_FCSFAIL | + FIF_PLCPFAIL | + FIF_CONTROL | + FIF_OTHER_BSS | + FIF_BCN_PRBRESP_PROMISC; + + wl->filter_flags = *fflags; + + if (changed && b43_status(dev) >= B43_STAT_INITIALIZED) + b43_adjust_opmode(dev); + spin_unlock_irqrestore(&wl->irq_lock, flags); +} + +static int b43_config_interface(struct ieee80211_hw *hw, + int if_id, struct ieee80211_if_conf *conf) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev = wl->current_dev; + unsigned long flags; + + if (!dev) + return -ENODEV; + mutex_lock(&wl->mutex); + spin_lock_irqsave(&wl->irq_lock, flags); + B43_WARN_ON(wl->if_id != if_id); + if (conf->bssid) + memcpy(wl->bssid, conf->bssid, ETH_ALEN); + else + memset(wl->bssid, 0, ETH_ALEN); + if (b43_status(dev) >= B43_STAT_INITIALIZED) { + if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP)) { + B43_WARN_ON(conf->type != IEEE80211_IF_TYPE_AP); + b43_set_ssid(dev, conf->ssid, conf->ssid_len); + if (conf->beacon) + b43_refresh_templates(dev, conf->beacon); + } + b43_write_mac_bssid_templates(dev); + } + spin_unlock_irqrestore(&wl->irq_lock, flags); + mutex_unlock(&wl->mutex); + + return 0; +} + +/* Locking: wl->mutex */ +static void b43_wireless_core_stop(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + unsigned long flags; + + if (b43_status(dev) < B43_STAT_STARTED) + return; + b43_set_status(dev, B43_STAT_INITIALIZED); + + mutex_unlock(&wl->mutex); + /* Must unlock as it would otherwise deadlock. No races here. + * Cancel the possibly running self-rearming periodic work. */ + cancel_delayed_work_sync(&dev->periodic_work); + mutex_lock(&wl->mutex); + + ieee80211_stop_queues(wl->hw); //FIXME this could cause a deadlock, as mac80211 seems buggy. + + /* Disable and sync interrupts. */ + spin_lock_irqsave(&wl->irq_lock, flags); + dev->irq_savedstate = b43_interrupt_disable(dev, B43_IRQ_ALL); + b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); /* flush */ + spin_unlock_irqrestore(&wl->irq_lock, flags); + b43_synchronize_irq(dev); + + b43_mac_suspend(dev); + free_irq(dev->dev->irq, dev); + b43dbg(wl, "Wireless interface stopped\n"); +} + +/* Locking: wl->mutex */ +static int b43_wireless_core_start(struct b43_wldev *dev) +{ + int err; + + B43_WARN_ON(b43_status(dev) != B43_STAT_INITIALIZED); + + drain_txstatus_queue(dev); + err = request_irq(dev->dev->irq, b43_interrupt_handler, + IRQF_SHARED, KBUILD_MODNAME, dev); + if (err) { + b43err(dev->wl, "Cannot request IRQ-%d\n", dev->dev->irq); + goto out; + } + + /* We are ready to run. */ + b43_set_status(dev, B43_STAT_STARTED); + + /* Start data flow (TX/RX). */ + b43_mac_enable(dev); + b43_interrupt_enable(dev, dev->irq_savedstate); + ieee80211_start_queues(dev->wl->hw); + + /* Start maintainance work */ + b43_periodic_tasks_setup(dev); + + b43dbg(dev->wl, "Wireless interface started\n"); + out: + return err; +} + +/* Get PHY and RADIO versioning numbers */ +static int b43_phy_versioning(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u32 tmp; + u8 analog_type; + u8 phy_type; + u8 phy_rev; + u16 radio_manuf; + u16 radio_ver; + u16 radio_rev; + int unsupported = 0; + + /* Get PHY versioning */ + tmp = b43_read16(dev, B43_MMIO_PHY_VER); + analog_type = (tmp & B43_PHYVER_ANALOG) >> B43_PHYVER_ANALOG_SHIFT; + phy_type = (tmp & B43_PHYVER_TYPE) >> B43_PHYVER_TYPE_SHIFT; + phy_rev = (tmp & B43_PHYVER_VERSION); + switch (phy_type) { + case B43_PHYTYPE_A: + if (phy_rev >= 4) + unsupported = 1; + break; + case B43_PHYTYPE_B: + if (phy_rev != 2 && phy_rev != 4 && phy_rev != 6 + && phy_rev != 7) + unsupported = 1; + break; + case B43_PHYTYPE_G: + if (phy_rev > 8) + unsupported = 1; + break; + default: + unsupported = 1; + }; + if (unsupported) { + b43err(dev->wl, "FOUND UNSUPPORTED PHY " + "(Analog %u, Type %u, Revision %u)\n", + analog_type, phy_type, phy_rev); + return -EOPNOTSUPP; + } + b43dbg(dev->wl, "Found PHY: Analog %u, Type %u, Revision %u\n", + analog_type, phy_type, phy_rev); + + /* Get RADIO versioning */ + if (dev->dev->bus->chip_id == 0x4317) { + if (dev->dev->bus->chip_rev == 0) + tmp = 0x3205017F; + else if (dev->dev->bus->chip_rev == 1) + tmp = 0x4205017F; + else + tmp = 0x5205017F; + } else { + b43_write16(dev, B43_MMIO_RADIO_CONTROL, B43_RADIOCTL_ID); + tmp = b43_read16(dev, B43_MMIO_RADIO_DATA_HIGH); + tmp <<= 16; + b43_write16(dev, B43_MMIO_RADIO_CONTROL, B43_RADIOCTL_ID); + tmp |= b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); + } + radio_manuf = (tmp & 0x00000FFF); + radio_ver = (tmp & 0x0FFFF000) >> 12; + radio_rev = (tmp & 0xF0000000) >> 28; + switch (phy_type) { + case B43_PHYTYPE_A: + if (radio_ver != 0x2060) + unsupported = 1; + if (radio_rev != 1) + unsupported = 1; + if (radio_manuf != 0x17F) + unsupported = 1; + break; + case B43_PHYTYPE_B: + if ((radio_ver & 0xFFF0) != 0x2050) + unsupported = 1; + break; + case B43_PHYTYPE_G: + if (radio_ver != 0x2050) + unsupported = 1; + break; + default: + B43_WARN_ON(1); + } + if (unsupported) { + b43err(dev->wl, "FOUND UNSUPPORTED RADIO " + "(Manuf 0x%X, Version 0x%X, Revision %u)\n", + radio_manuf, radio_ver, radio_rev); + return -EOPNOTSUPP; + } + b43dbg(dev->wl, "Found Radio: Manuf 0x%X, Version 0x%X, Revision %u\n", + radio_manuf, radio_ver, radio_rev); + + phy->radio_manuf = radio_manuf; + phy->radio_ver = radio_ver; + phy->radio_rev = radio_rev; + + phy->analog = analog_type; + phy->type = phy_type; + phy->rev = phy_rev; + + return 0; +} + +static void setup_struct_phy_for_init(struct b43_wldev *dev, + struct b43_phy *phy) +{ + struct b43_txpower_lo_control *lo; + int i; + + memset(phy->minlowsig, 0xFF, sizeof(phy->minlowsig)); + memset(phy->minlowsigpos, 0, sizeof(phy->minlowsigpos)); + + /* Flags */ + phy->locked = 0; + + phy->aci_enable = 0; + phy->aci_wlan_automatic = 0; + phy->aci_hw_rssi = 0; + + phy->radio_off_context.valid = 0; + + lo = phy->lo_control; + if (lo) { + memset(lo, 0, sizeof(*(phy->lo_control))); + lo->rebuild = 1; + lo->tx_bias = 0xFF; + } + phy->max_lb_gain = 0; + phy->trsw_rx_gain = 0; + phy->txpwr_offset = 0; + + /* NRSSI */ + phy->nrssislope = 0; + for (i = 0; i < ARRAY_SIZE(phy->nrssi); i++) + phy->nrssi[i] = -1000; + for (i = 0; i < ARRAY_SIZE(phy->nrssi_lt); i++) + phy->nrssi_lt[i] = i; + + phy->lofcal = 0xFFFF; + phy->initval = 0xFFFF; + + spin_lock_init(&phy->lock); + phy->interfmode = B43_INTERFMODE_NONE; + phy->channel = 0xFF; + + phy->hardware_power_control = !!modparam_hwpctl; +} + +static void setup_struct_wldev_for_init(struct b43_wldev *dev) +{ + /* Flags */ + dev->reg124_set_0x4 = 0; + /* Assume the radio is enabled. If it's not enabled, the state will + * immediately get fixed on the first periodic work run. */ + dev->radio_hw_enable = 1; + + /* Stats */ + memset(&dev->stats, 0, sizeof(dev->stats)); + + setup_struct_phy_for_init(dev, &dev->phy); + + /* IRQ related flags */ + dev->irq_reason = 0; + memset(dev->dma_reason, 0, sizeof(dev->dma_reason)); + dev->irq_savedstate = B43_IRQ_MASKTEMPLATE; + + dev->mac_suspended = 1; + + /* Noise calculation context */ + memset(&dev->noisecalc, 0, sizeof(dev->noisecalc)); +} + +static void b43_bluetooth_coext_enable(struct b43_wldev *dev) +{ + struct ssb_sprom *sprom = &dev->dev->bus->sprom; + u32 hf; + + if (!(sprom->r1.boardflags_lo & B43_BFL_BTCOEXIST)) + return; + if (dev->phy.type != B43_PHYTYPE_B && !dev->phy.gmode) + return; + + hf = b43_hf_read(dev); + if (sprom->r1.boardflags_lo & B43_BFL_BTCMOD) + hf |= B43_HF_BTCOEXALT; + else + hf |= B43_HF_BTCOEX; + b43_hf_write(dev, hf); + //TODO +} + +static void b43_bluetooth_coext_disable(struct b43_wldev *dev) +{ //TODO +} + +static void b43_imcfglo_timeouts_workaround(struct b43_wldev *dev) +{ +#ifdef CONFIG_SSB_DRIVER_PCICORE + struct ssb_bus *bus = dev->dev->bus; + u32 tmp; + + if (bus->pcicore.dev && + bus->pcicore.dev->id.coreid == SSB_DEV_PCI && + bus->pcicore.dev->id.revision <= 5) { + /* IMCFGLO timeouts workaround. */ + tmp = ssb_read32(dev->dev, SSB_IMCFGLO); + tmp &= ~SSB_IMCFGLO_REQTO; + tmp &= ~SSB_IMCFGLO_SERTO; + switch (bus->bustype) { + case SSB_BUSTYPE_PCI: + case SSB_BUSTYPE_PCMCIA: + tmp |= 0x32; + break; + case SSB_BUSTYPE_SSB: + tmp |= 0x53; + break; + } + ssb_write32(dev->dev, SSB_IMCFGLO, tmp); + } +#endif /* CONFIG_SSB_DRIVER_PCICORE */ +} + +/* Shutdown a wireless core */ +/* Locking: wl->mutex */ +static void b43_wireless_core_exit(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + B43_WARN_ON(b43_status(dev) > B43_STAT_INITIALIZED); + if (b43_status(dev) != B43_STAT_INITIALIZED) + return; + b43_set_status(dev, B43_STAT_UNINIT); + + mutex_unlock(&dev->wl->mutex); + b43_rfkill_exit(dev); + mutex_lock(&dev->wl->mutex); + + b43_rng_exit(dev->wl); + b43_pio_free(dev); + b43_dma_free(dev); + b43_chip_exit(dev); + b43_radio_turn_off(dev, 1); + b43_switch_analog(dev, 0); + if (phy->dyn_tssi_tbl) + kfree(phy->tssi2dbm); + kfree(phy->lo_control); + phy->lo_control = NULL; + ssb_device_disable(dev->dev, 0); + ssb_bus_may_powerdown(dev->dev->bus); +} + +/* Initialize a wireless core */ +static int b43_wireless_core_init(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + struct ssb_bus *bus = dev->dev->bus; + struct ssb_sprom *sprom = &bus->sprom; + struct b43_phy *phy = &dev->phy; + int err; + u32 hf, tmp; + + B43_WARN_ON(b43_status(dev) != B43_STAT_UNINIT); + + err = ssb_bus_powerup(bus, 0); + if (err) + goto out; + if (!ssb_device_is_enabled(dev->dev)) { + tmp = phy->gmode ? B43_TMSLOW_GMODE : 0; + b43_wireless_core_reset(dev, tmp); + } + + if ((phy->type == B43_PHYTYPE_B) || (phy->type == B43_PHYTYPE_G)) { + phy->lo_control = + kzalloc(sizeof(*(phy->lo_control)), GFP_KERNEL); + if (!phy->lo_control) { + err = -ENOMEM; + goto err_busdown; + } + } + setup_struct_wldev_for_init(dev); + + err = b43_phy_init_tssi2dbm_table(dev); + if (err) + goto err_kfree_lo_control; + + /* Enable IRQ routing to this device. */ + ssb_pcicore_dev_irqvecs_enable(&bus->pcicore, dev->dev); + + b43_imcfglo_timeouts_workaround(dev); + b43_bluetooth_coext_disable(dev); + b43_phy_early_init(dev); + err = b43_chip_init(dev); + if (err) + goto err_kfree_tssitbl; + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_WLCOREREV, dev->dev->id.revision); + hf = b43_hf_read(dev); + if (phy->type == B43_PHYTYPE_G) { + hf |= B43_HF_SYMW; + if (phy->rev == 1) + hf |= B43_HF_GDCW; + if (sprom->r1.boardflags_lo & B43_BFL_PACTRL) + hf |= B43_HF_OFDMPABOOST; + } else if (phy->type == B43_PHYTYPE_B) { + hf |= B43_HF_SYMW; + if (phy->rev >= 2 && phy->radio_ver == 0x2050) + hf &= ~B43_HF_GDCW; + } + b43_hf_write(dev, hf); + + /* Short/Long Retry Limit. + * The retry-limit is a 4-bit counter. Enforce this to avoid overflowing + * the chip-internal counter. + */ + tmp = limit_value(modparam_short_retry, 0, 0xF); + b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_SRLIMIT, tmp); + tmp = limit_value(modparam_long_retry, 0, 0xF); + b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_LRLIMIT, tmp); + + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_SFFBLIM, 3); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_LFFBLIM, 2); + + /* Disable sending probe responses from firmware. + * Setting the MaxTime to one usec will always trigger + * a timeout, so we never send any probe resp. + * A timeout of zero is infinite. */ + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRMAXTIME, 1); + + b43_rate_memory_init(dev); + + /* Minimum Contention Window */ + if (phy->type == B43_PHYTYPE_B) { + b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MINCONT, 0x1F); + } else { + b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MINCONT, 0xF); + } + /* Maximum Contention Window */ + b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MAXCONT, 0x3FF); + + do { + if (b43_using_pio(dev)) { + err = b43_pio_init(dev); + } else { + err = b43_dma_init(dev); + if (!err) + b43_qos_init(dev); + } + } while (err == -EAGAIN); + if (err) + goto err_chip_exit; + +//FIXME +#if 1 + b43_write16(dev, 0x0612, 0x0050); + b43_shm_write16(dev, B43_SHM_SHARED, 0x0416, 0x0050); + b43_shm_write16(dev, B43_SHM_SHARED, 0x0414, 0x01F4); +#endif + + b43_bluetooth_coext_enable(dev); + + ssb_bus_powerup(bus, 1); /* Enable dynamic PCTL */ + memset(wl->bssid, 0, ETH_ALEN); + memset(wl->mac_addr, 0, ETH_ALEN); + b43_upload_card_macaddress(dev); + b43_security_init(dev); + b43_rfkill_init(dev); + b43_rng_init(wl); + + b43_set_status(dev, B43_STAT_INITIALIZED); + + out: + return err; + + err_chip_exit: + b43_chip_exit(dev); + err_kfree_tssitbl: + if (phy->dyn_tssi_tbl) + kfree(phy->tssi2dbm); + err_kfree_lo_control: + kfree(phy->lo_control); + phy->lo_control = NULL; + err_busdown: + ssb_bus_may_powerdown(bus); + B43_WARN_ON(b43_status(dev) != B43_STAT_UNINIT); + return err; +} + +static int b43_add_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev; + unsigned long flags; + int err = -EOPNOTSUPP; + + /* TODO: allow WDS/AP devices to coexist */ + + if (conf->type != IEEE80211_IF_TYPE_AP && + conf->type != IEEE80211_IF_TYPE_STA && + conf->type != IEEE80211_IF_TYPE_WDS && + conf->type != IEEE80211_IF_TYPE_IBSS) + return -EOPNOTSUPP; + + mutex_lock(&wl->mutex); + if (wl->operating) + goto out_mutex_unlock; + + b43dbg(wl, "Adding Interface type %d\n", conf->type); + + dev = wl->current_dev; + wl->operating = 1; + wl->if_id = conf->if_id; + wl->if_type = conf->type; + memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN); + + spin_lock_irqsave(&wl->irq_lock, flags); + b43_adjust_opmode(dev); + b43_upload_card_macaddress(dev); + spin_unlock_irqrestore(&wl->irq_lock, flags); + + err = 0; + out_mutex_unlock: + mutex_unlock(&wl->mutex); + + return err; +} + +static void b43_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev = wl->current_dev; + unsigned long flags; + + b43dbg(wl, "Removing Interface type %d\n", conf->type); + + mutex_lock(&wl->mutex); + + B43_WARN_ON(!wl->operating); + B43_WARN_ON(wl->if_id != conf->if_id); + + wl->operating = 0; + + spin_lock_irqsave(&wl->irq_lock, flags); + b43_adjust_opmode(dev); + memset(wl->mac_addr, 0, ETH_ALEN); + b43_upload_card_macaddress(dev); + spin_unlock_irqrestore(&wl->irq_lock, flags); + + mutex_unlock(&wl->mutex); +} + +static int b43_start(struct ieee80211_hw *hw) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev = wl->current_dev; + int did_init = 0; + int err; + + mutex_lock(&wl->mutex); + + if (b43_status(dev) < B43_STAT_INITIALIZED) { + err = b43_wireless_core_init(dev); + if (err) + goto out_mutex_unlock; + did_init = 1; + } + + if (b43_status(dev) < B43_STAT_STARTED) { + err = b43_wireless_core_start(dev); + if (err) { + if (did_init) + b43_wireless_core_exit(dev); + goto out_mutex_unlock; + } + } + + out_mutex_unlock: + mutex_unlock(&wl->mutex); + + return err; +} + +void b43_stop(struct ieee80211_hw *hw) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev = wl->current_dev; + + mutex_lock(&wl->mutex); + if (b43_status(dev) >= B43_STAT_STARTED) + b43_wireless_core_stop(dev); + b43_wireless_core_exit(dev); + mutex_unlock(&wl->mutex); +} + +static const struct ieee80211_ops b43_hw_ops = { + .tx = b43_tx, + .conf_tx = b43_conf_tx, + .add_interface = b43_add_interface, + .remove_interface = b43_remove_interface, + .config = b43_dev_config, + .config_interface = b43_config_interface, + .configure_filter = b43_configure_filter, + .set_key = b43_dev_set_key, + .get_stats = b43_get_stats, + .get_tx_stats = b43_get_tx_stats, + .start = b43_start, + .stop = b43_stop, +}; + +/* Hard-reset the chip. Do not call this directly. + * Use b43_controller_restart() + */ +static void b43_chip_reset(struct work_struct *work) +{ + struct b43_wldev *dev = + container_of(work, struct b43_wldev, restart_work); + struct b43_wl *wl = dev->wl; + int err = 0; + int prev_status; + + mutex_lock(&wl->mutex); + + prev_status = b43_status(dev); + /* Bring the device down... */ + if (prev_status >= B43_STAT_STARTED) + b43_wireless_core_stop(dev); + if (prev_status >= B43_STAT_INITIALIZED) + b43_wireless_core_exit(dev); + + /* ...and up again. */ + if (prev_status >= B43_STAT_INITIALIZED) { + err = b43_wireless_core_init(dev); + if (err) + goto out; + } + if (prev_status >= B43_STAT_STARTED) { + err = b43_wireless_core_start(dev); + if (err) { + b43_wireless_core_exit(dev); + goto out; + } + } + out: + mutex_unlock(&wl->mutex); + if (err) + b43err(wl, "Controller restart FAILED\n"); + else + b43info(wl, "Controller restarted\n"); +} + +static int b43_setup_modes(struct b43_wldev *dev, + int have_aphy, int have_bphy, int have_gphy) +{ + struct ieee80211_hw *hw = dev->wl->hw; + struct ieee80211_hw_mode *mode; + struct b43_phy *phy = &dev->phy; + int cnt = 0; + int err; + +/*FIXME: Don't tell ieee80211 about an A-PHY, because we currently don't support A-PHY. */ + have_aphy = 0; + + phy->possible_phymodes = 0; + for (; 1; cnt++) { + if (have_aphy) { + B43_WARN_ON(cnt >= B43_MAX_PHYHWMODES); + mode = &phy->hwmodes[cnt]; + + mode->mode = MODE_IEEE80211A; + mode->num_channels = b43_a_chantable_size; + mode->channels = b43_a_chantable; + mode->num_rates = b43_a_ratetable_size; + mode->rates = b43_a_ratetable; + err = ieee80211_register_hwmode(hw, mode); + if (err) + return err; + + phy->possible_phymodes |= B43_PHYMODE_A; + have_aphy = 0; + continue; + } + if (have_bphy) { + B43_WARN_ON(cnt >= B43_MAX_PHYHWMODES); + mode = &phy->hwmodes[cnt]; + + mode->mode = MODE_IEEE80211B; + mode->num_channels = b43_bg_chantable_size; + mode->channels = b43_bg_chantable; + mode->num_rates = b43_b_ratetable_size; + mode->rates = b43_b_ratetable; + err = ieee80211_register_hwmode(hw, mode); + if (err) + return err; + + phy->possible_phymodes |= B43_PHYMODE_B; + have_bphy = 0; + continue; + } + if (have_gphy) { + B43_WARN_ON(cnt >= B43_MAX_PHYHWMODES); + mode = &phy->hwmodes[cnt]; + + mode->mode = MODE_IEEE80211G; + mode->num_channels = b43_bg_chantable_size; + mode->channels = b43_bg_chantable; + mode->num_rates = b43_g_ratetable_size; + mode->rates = b43_g_ratetable; + err = ieee80211_register_hwmode(hw, mode); + if (err) + return err; + + phy->possible_phymodes |= B43_PHYMODE_G; + have_gphy = 0; + continue; + } + break; + } + + return 0; +} + +static void b43_wireless_core_detach(struct b43_wldev *dev) +{ + b43_rfkill_free(dev); + /* We release firmware that late to not be required to re-request + * is all the time when we reinit the core. */ + b43_release_firmware(dev); +} + +static int b43_wireless_core_attach(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + struct ssb_bus *bus = dev->dev->bus; + struct pci_dev *pdev = bus->host_pci; + int err; + int have_aphy = 0, have_bphy = 0, have_gphy = 0; + u32 tmp; + + /* Do NOT do any device initialization here. + * Do it in wireless_core_init() instead. + * This function is for gathering basic information about the HW, only. + * Also some structs may be set up here. But most likely you want to have + * that in core_init(), too. + */ + + err = ssb_bus_powerup(bus, 0); + if (err) { + b43err(wl, "Bus powerup failed\n"); + goto out; + } + /* Get the PHY type. */ + if (dev->dev->id.revision >= 5) { + u32 tmshigh; + + tmshigh = ssb_read32(dev->dev, SSB_TMSHIGH); + have_aphy = !!(tmshigh & B43_TMSHIGH_APHY); + have_gphy = !!(tmshigh & B43_TMSHIGH_GPHY); + if (!have_aphy && !have_gphy) + have_bphy = 1; + } else if (dev->dev->id.revision == 4) { + have_gphy = 1; + have_aphy = 1; + } else + have_bphy = 1; + + dev->phy.gmode = (have_gphy || have_bphy); + tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0; + b43_wireless_core_reset(dev, tmp); + + err = b43_phy_versioning(dev); + if (err) + goto err_powerdown; + /* Check if this device supports multiband. */ + if (!pdev || + (pdev->device != 0x4312 && + pdev->device != 0x4319 && pdev->device != 0x4324)) { + /* No multiband support. */ + have_aphy = 0; + have_bphy = 0; + have_gphy = 0; + switch (dev->phy.type) { + case B43_PHYTYPE_A: + have_aphy = 1; + break; + case B43_PHYTYPE_B: + have_bphy = 1; + break; + case B43_PHYTYPE_G: + have_gphy = 1; + break; + default: + B43_WARN_ON(1); + } + } + dev->phy.gmode = (have_gphy || have_bphy); + tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0; + b43_wireless_core_reset(dev, tmp); + + err = b43_validate_chipaccess(dev); + if (err) + goto err_powerdown; + err = b43_setup_modes(dev, have_aphy, have_bphy, have_gphy); + if (err) + goto err_powerdown; + + /* Now set some default "current_dev" */ + if (!wl->current_dev) + wl->current_dev = dev; + INIT_WORK(&dev->restart_work, b43_chip_reset); + b43_rfkill_alloc(dev); + + b43_radio_turn_off(dev, 1); + b43_switch_analog(dev, 0); + ssb_device_disable(dev->dev, 0); + ssb_bus_may_powerdown(bus); + +out: + return err; + +err_powerdown: + ssb_bus_may_powerdown(bus); + return err; +} + +static void b43_one_core_detach(struct ssb_device *dev) +{ + struct b43_wldev *wldev; + struct b43_wl *wl; + + wldev = ssb_get_drvdata(dev); + wl = wldev->wl; + cancel_work_sync(&wldev->restart_work); + b43_debugfs_remove_device(wldev); + b43_wireless_core_detach(wldev); + list_del(&wldev->list); + wl->nr_devs--; + ssb_set_drvdata(dev, NULL); + kfree(wldev); +} + +static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl) +{ + struct b43_wldev *wldev; + struct pci_dev *pdev; + int err = -ENOMEM; + + if (!list_empty(&wl->devlist)) { + /* We are not the first core on this chip. */ + pdev = dev->bus->host_pci; + /* Only special chips support more than one wireless + * core, although some of the other chips have more than + * one wireless core as well. Check for this and + * bail out early. + */ + if (!pdev || + ((pdev->device != 0x4321) && + (pdev->device != 0x4313) && (pdev->device != 0x431A))) { + b43dbg(wl, "Ignoring unconnected 802.11 core\n"); + return -ENODEV; + } + } + + wldev = kzalloc(sizeof(*wldev), GFP_KERNEL); + if (!wldev) + goto out; + + wldev->dev = dev; + wldev->wl = wl; + b43_set_status(wldev, B43_STAT_UNINIT); + wldev->bad_frames_preempt = modparam_bad_frames_preempt; + tasklet_init(&wldev->isr_tasklet, + (void (*)(unsigned long))b43_interrupt_tasklet, + (unsigned long)wldev); + if (modparam_pio) + wldev->__using_pio = 1; + INIT_LIST_HEAD(&wldev->list); + + err = b43_wireless_core_attach(wldev); + if (err) + goto err_kfree_wldev; + + list_add(&wldev->list, &wl->devlist); + wl->nr_devs++; + ssb_set_drvdata(dev, wldev); + b43_debugfs_add_device(wldev); + + out: + return err; + + err_kfree_wldev: + kfree(wldev); + return err; +} + +static void b43_sprom_fixup(struct ssb_bus *bus) +{ + /* boardflags workarounds */ + if (bus->boardinfo.vendor == SSB_BOARDVENDOR_DELL && + bus->chip_id == 0x4301 && bus->boardinfo.rev == 0x74) + bus->sprom.r1.boardflags_lo |= B43_BFL_BTCOEXIST; + if (bus->boardinfo.vendor == PCI_VENDOR_ID_APPLE && + bus->boardinfo.type == 0x4E && bus->boardinfo.rev > 0x40) + bus->sprom.r1.boardflags_lo |= B43_BFL_PACTRL; + + /* Handle case when gain is not set in sprom */ + if (bus->sprom.r1.antenna_gain_a == 0xFF) + bus->sprom.r1.antenna_gain_a = 2; + if (bus->sprom.r1.antenna_gain_bg == 0xFF) + bus->sprom.r1.antenna_gain_bg = 2; + + /* Convert Antennagain values to Q5.2 */ + bus->sprom.r1.antenna_gain_a <<= 2; + bus->sprom.r1.antenna_gain_bg <<= 2; +} + +static void b43_wireless_exit(struct ssb_device *dev, struct b43_wl *wl) +{ + struct ieee80211_hw *hw = wl->hw; + + ssb_set_devtypedata(dev, NULL); + ieee80211_free_hw(hw); +} + +static int b43_wireless_init(struct ssb_device *dev) +{ + struct ssb_sprom *sprom = &dev->bus->sprom; + struct ieee80211_hw *hw; + struct b43_wl *wl; + int err = -ENOMEM; + + b43_sprom_fixup(dev->bus); + + hw = ieee80211_alloc_hw(sizeof(*wl), &b43_hw_ops); + if (!hw) { + b43err(NULL, "Could not allocate ieee80211 device\n"); + goto out; + } + + /* fill hw info */ + hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE; + hw->max_signal = 100; + hw->max_rssi = -110; + hw->max_noise = -110; + hw->queues = 1; /* FIXME: hardware has more queues */ + SET_IEEE80211_DEV(hw, dev->dev); + if (is_valid_ether_addr(sprom->r1.et1mac)) + SET_IEEE80211_PERM_ADDR(hw, sprom->r1.et1mac); + else + SET_IEEE80211_PERM_ADDR(hw, sprom->r1.il0mac); + + /* Get and initialize struct b43_wl */ + wl = hw_to_b43_wl(hw); + memset(wl, 0, sizeof(*wl)); + wl->hw = hw; + spin_lock_init(&wl->irq_lock); + spin_lock_init(&wl->leds_lock); + mutex_init(&wl->mutex); + INIT_LIST_HEAD(&wl->devlist); + + ssb_set_devtypedata(dev, wl); + b43info(wl, "Broadcom %04X WLAN found\n", dev->bus->chip_id); + err = 0; + out: + return err; +} + +static int b43_probe(struct ssb_device *dev, const struct ssb_device_id *id) +{ + struct b43_wl *wl; + int err; + int first = 0; + + wl = ssb_get_devtypedata(dev); + if (!wl) { + /* Probing the first core. Must setup common struct b43_wl */ + first = 1; + err = b43_wireless_init(dev); + if (err) + goto out; + wl = ssb_get_devtypedata(dev); + B43_WARN_ON(!wl); + } + err = b43_one_core_attach(dev, wl); + if (err) + goto err_wireless_exit; + + if (first) { + err = ieee80211_register_hw(wl->hw); + if (err) + goto err_one_core_detach; + } + + out: + return err; + + err_one_core_detach: + b43_one_core_detach(dev); + err_wireless_exit: + if (first) + b43_wireless_exit(dev, wl); + return err; +} + +static void b43_remove(struct ssb_device *dev) +{ + struct b43_wl *wl = ssb_get_devtypedata(dev); + struct b43_wldev *wldev = ssb_get_drvdata(dev); + + B43_WARN_ON(!wl); + if (wl->current_dev == wldev) + ieee80211_unregister_hw(wl->hw); + + b43_one_core_detach(dev); + + if (list_empty(&wl->devlist)) { + /* Last core on the chip unregistered. + * We can destroy common struct b43_wl. + */ + b43_wireless_exit(dev, wl); + } +} + +/* Perform a hardware reset. This can be called from any context. */ +void b43_controller_restart(struct b43_wldev *dev, const char *reason) +{ + /* Must avoid requeueing, if we are in shutdown. */ + if (b43_status(dev) < B43_STAT_INITIALIZED) + return; + b43info(dev->wl, "Controller RESET (%s) ...\n", reason); + queue_work(dev->wl->hw->workqueue, &dev->restart_work); +} + +#ifdef CONFIG_PM + +static int b43_suspend(struct ssb_device *dev, pm_message_t state) +{ + struct b43_wldev *wldev = ssb_get_drvdata(dev); + struct b43_wl *wl = wldev->wl; + + b43dbg(wl, "Suspending...\n"); + + mutex_lock(&wl->mutex); + wldev->suspend_init_status = b43_status(wldev); + if (wldev->suspend_init_status >= B43_STAT_STARTED) + b43_wireless_core_stop(wldev); + if (wldev->suspend_init_status >= B43_STAT_INITIALIZED) + b43_wireless_core_exit(wldev); + mutex_unlock(&wl->mutex); + + b43dbg(wl, "Device suspended.\n"); + + return 0; +} + +static int b43_resume(struct ssb_device *dev) +{ + struct b43_wldev *wldev = ssb_get_drvdata(dev); + struct b43_wl *wl = wldev->wl; + int err = 0; + + b43dbg(wl, "Resuming...\n"); + + mutex_lock(&wl->mutex); + if (wldev->suspend_init_status >= B43_STAT_INITIALIZED) { + err = b43_wireless_core_init(wldev); + if (err) { + b43err(wl, "Resume failed at core init\n"); + goto out; + } + } + if (wldev->suspend_init_status >= B43_STAT_STARTED) { + err = b43_wireless_core_start(wldev); + if (err) { + b43_wireless_core_exit(wldev); + b43err(wl, "Resume failed at core start\n"); + goto out; + } + } + mutex_unlock(&wl->mutex); + + b43dbg(wl, "Device resumed.\n"); + out: + return err; +} + +#else /* CONFIG_PM */ +# define b43_suspend NULL +# define b43_resume NULL +#endif /* CONFIG_PM */ + +static struct ssb_driver b43_ssb_driver = { + .name = KBUILD_MODNAME, + .id_table = b43_ssb_tbl, + .probe = b43_probe, + .remove = b43_remove, + .suspend = b43_suspend, + .resume = b43_resume, +}; + +static int __init b43_init(void) +{ + int err; + + b43_debugfs_init(); + err = b43_pcmcia_init(); + if (err) + goto err_dfs_exit; + err = ssb_driver_register(&b43_ssb_driver); + if (err) + goto err_pcmcia_exit; + + return err; + +err_pcmcia_exit: + b43_pcmcia_exit(); +err_dfs_exit: + b43_debugfs_exit(); + return err; +} + +static void __exit b43_exit(void) +{ + ssb_driver_unregister(&b43_ssb_driver); + b43_pcmcia_exit(); + b43_debugfs_exit(); +} + +module_init(b43_init) +module_exit(b43_exit) diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/b43/main.h new file mode 100644 index 000000000000..284d17da17d1 --- /dev/null +++ b/drivers/net/wireless/b43/main.h @@ -0,0 +1,125 @@ +/* + + Broadcom B43 wireless driver + + Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>, + Stefano Brivio <st3@riseup.net> + Michael Buesch <mb@bu3sch.de> + Danny van Dyk <kugelfang@gentoo.org> + Andreas Jaggi <andreas.jaggi@waterwave.ch> + + Some parts of the code in this file are derived from the ipw2200 + driver Copyright(c) 2003 - 2004 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#ifndef B43_MAIN_H_ +#define B43_MAIN_H_ + +#include "b43.h" + +#define P4D_BYT3S(magic, nr_bytes) u8 __p4dding##magic[nr_bytes] +#define P4D_BYTES(line, nr_bytes) P4D_BYT3S(line, nr_bytes) +/* Magic helper macro to pad structures. Ignore those above. It's magic. */ +#define PAD_BYTES(nr_bytes) P4D_BYTES( __LINE__ , (nr_bytes)) + +/* Lightweight function to convert a frequency (in Mhz) to a channel number. */ +static inline u8 b43_freq_to_channel_a(int freq) +{ + return ((freq - 5000) / 5); +} +static inline u8 b43_freq_to_channel_bg(int freq) +{ + u8 channel; + + if (freq == 2484) + channel = 14; + else + channel = (freq - 2407) / 5; + + return channel; +} +static inline u8 b43_freq_to_channel(struct b43_wldev *dev, int freq) +{ + if (dev->phy.type == B43_PHYTYPE_A) + return b43_freq_to_channel_a(freq); + return b43_freq_to_channel_bg(freq); +} + +/* Lightweight function to convert a channel number to a frequency (in Mhz). */ +static inline int b43_channel_to_freq_a(u8 channel) +{ + return (5000 + (5 * channel)); +} +static inline int b43_channel_to_freq_bg(u8 channel) +{ + int freq; + + if (channel == 14) + freq = 2484; + else + freq = 2407 + (5 * channel); + + return freq; +} +static inline int b43_channel_to_freq(struct b43_wldev *dev, u8 channel) +{ + if (dev->phy.type == B43_PHYTYPE_A) + return b43_channel_to_freq_a(channel); + return b43_channel_to_freq_bg(channel); +} + +static inline int b43_is_cck_rate(int rate) +{ + return (rate == B43_CCK_RATE_1MB || + rate == B43_CCK_RATE_2MB || + rate == B43_CCK_RATE_5MB || rate == B43_CCK_RATE_11MB); +} + +static inline int b43_is_ofdm_rate(int rate) +{ + return !b43_is_cck_rate(rate); +} + +void b43_tsf_read(struct b43_wldev *dev, u64 * tsf); +void b43_tsf_write(struct b43_wldev *dev, u64 tsf); + +u32 b43_shm_read32(struct b43_wldev *dev, u16 routing, u16 offset); +u16 b43_shm_read16(struct b43_wldev *dev, u16 routing, u16 offset); +void b43_shm_write32(struct b43_wldev *dev, u16 routing, u16 offset, u32 value); +void b43_shm_write16(struct b43_wldev *dev, u16 routing, u16 offset, u16 value); + +u32 b43_hf_read(struct b43_wldev *dev); +void b43_hf_write(struct b43_wldev *dev, u32 value); + +void b43_dummy_transmission(struct b43_wldev *dev); + +void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags); + +void b43_mac_suspend(struct b43_wldev *dev); +void b43_mac_enable(struct b43_wldev *dev); + +void b43_controller_restart(struct b43_wldev *dev, const char *reason); + +#define B43_PS_ENABLED (1 << 0) /* Force enable hardware power saving */ +#define B43_PS_DISABLED (1 << 1) /* Force disable hardware power saving */ +#define B43_PS_AWAKE (1 << 2) /* Force device awake */ +#define B43_PS_ASLEEP (1 << 3) /* Force device asleep */ +void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags); + +#endif /* B43_MAIN_H_ */ diff --git a/drivers/net/wireless/b43/pcmcia.c b/drivers/net/wireless/b43/pcmcia.c new file mode 100644 index 000000000000..b242a9a90dd2 --- /dev/null +++ b/drivers/net/wireless/b43/pcmcia.c @@ -0,0 +1,160 @@ +/* + + Broadcom B43 wireless driver + + Copyright (c) 2007 Michael Buesch <mb@bu3sch.de> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "pcmcia.h" + +#include <linux/ssb/ssb.h> + +#include <pcmcia/cs_types.h> +#include <pcmcia/cs.h> +#include <pcmcia/cistpl.h> +#include <pcmcia/ciscode.h> +#include <pcmcia/ds.h> +#include <pcmcia/cisreg.h> + + +static /*const */ struct pcmcia_device_id b43_pcmcia_tbl[] = { + PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x448), + PCMCIA_DEVICE_NULL, +}; + +MODULE_DEVICE_TABLE(pcmcia, b43_pcmcia_tbl); + +#ifdef CONFIG_PM +static int b43_pcmcia_suspend(struct pcmcia_device *dev) +{ + //TODO + return 0; +} + +static int b43_pcmcia_resume(struct pcmcia_device *dev) +{ + //TODO + return 0; +} +#else /* CONFIG_PM */ +# define b43_pcmcia_suspend NULL +# define b43_pcmcia_resume NULL +#endif /* CONFIG_PM */ + +static int __devinit b43_pcmcia_probe(struct pcmcia_device *dev) +{ + struct ssb_bus *ssb; + win_req_t win; + memreq_t mem; + tuple_t tuple; + cisparse_t parse; + int err = -ENOMEM; + int res; + unsigned char buf[64]; + + ssb = kzalloc(sizeof(*ssb), GFP_KERNEL); + if (!ssb) + goto out; + + err = -ENODEV; + tuple.DesiredTuple = CISTPL_CONFIG; + tuple.Attributes = 0; + tuple.TupleData = buf; + tuple.TupleDataMax = sizeof(buf); + tuple.TupleOffset = 0; + + res = pcmcia_get_first_tuple(dev, &tuple); + if (res != CS_SUCCESS) + goto err_kfree_ssb; + res = pcmcia_get_tuple_data(dev, &tuple); + if (res != CS_SUCCESS) + goto err_kfree_ssb; + res = pcmcia_parse_tuple(dev, &tuple, &parse); + if (res != CS_SUCCESS) + goto err_kfree_ssb; + + dev->conf.ConfigBase = parse.config.base; + dev->conf.Present = parse.config.rmask[0]; + + dev->io.BasePort2 = 0; + dev->io.NumPorts2 = 0; + dev->io.Attributes2 = 0; + + win.Attributes = WIN_MEMORY_TYPE_CM | WIN_ENABLE | WIN_USE_WAIT; + win.Base = 0; + win.Size = SSB_CORE_SIZE; + win.AccessSpeed = 1000; + res = pcmcia_request_window(&dev, &win, &dev->win); + if (res != CS_SUCCESS) + goto err_kfree_ssb; + + mem.CardOffset = 0; + mem.Page = 0; + res = pcmcia_map_mem_page(dev->win, &mem); + if (res != CS_SUCCESS) + goto err_kfree_ssb; + + res = pcmcia_request_configuration(dev, &dev->conf); + if (res != CS_SUCCESS) + goto err_disable; + + err = ssb_bus_pcmciabus_register(ssb, dev, win.Base); + dev->priv = ssb; + + out: + return err; + err_disable: + pcmcia_disable_device(dev); + err_kfree_ssb: + kfree(ssb); + return err; +} + +static void __devexit b43_pcmcia_remove(struct pcmcia_device *dev) +{ + struct ssb_bus *ssb = dev->priv; + + ssb_bus_unregister(ssb); + pcmcia_release_window(dev->win); + pcmcia_disable_device(dev); + kfree(ssb); + dev->priv = NULL; +} + +static struct pcmcia_driver b43_pcmcia_driver = { + .owner = THIS_MODULE, + .drv = { + .name = "b43-pcmcia", + }, + .id_table = b43_pcmcia_tbl, + .probe = b43_pcmcia_probe, + .remove = b43_pcmcia_remove, + .suspend = b43_pcmcia_suspend, + .resume = b43_pcmcia_resume, +}; + +int b43_pcmcia_init(void) +{ + return pcmcia_register_driver(&b43_pcmcia_driver); +} + +void b43_pcmcia_exit(void) +{ + pcmcia_unregister_driver(&b43_pcmcia_driver); +} diff --git a/drivers/net/wireless/b43/pcmcia.h b/drivers/net/wireless/b43/pcmcia.h new file mode 100644 index 000000000000..85f120a67cbe --- /dev/null +++ b/drivers/net/wireless/b43/pcmcia.h @@ -0,0 +1,20 @@ +#ifndef B43_PCMCIA_H_ +#define B43_PCMCIA_H_ + +#ifdef CONFIG_B43_PCMCIA + +int b43_pcmcia_init(void); +void b43_pcmcia_exit(void); + +#else /* CONFIG_B43_PCMCIA */ + +static inline int b43_pcmcia_init(void) +{ + return 0; +} +static inline void b43_pcmcia_exit(void) +{ +} + +#endif /* CONFIG_B43_PCMCIA */ +#endif /* B43_PCMCIA_H_ */ diff --git a/drivers/net/wireless/b43/phy.c b/drivers/net/wireless/b43/phy.c new file mode 100644 index 000000000000..5f7ffa0a76c0 --- /dev/null +++ b/drivers/net/wireless/b43/phy.c @@ -0,0 +1,4380 @@ +/* + + Broadcom B43 wireless driver + + Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>, + Copyright (c) 2005, 2006 Stefano Brivio <st3@riseup.net> + Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de> + Copyright (c) 2005, 2006 Danny van Dyk <kugelfang@gentoo.org> + Copyright (c) 2005, 2006 Andreas Jaggi <andreas.jaggi@waterwave.ch> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include <linux/delay.h> +#include <linux/types.h> + +#include "b43.h" +#include "phy.h" +#include "main.h" +#include "tables.h" +#include "lo.h" + +static const s8 b43_tssi2dbm_b_table[] = { + 0x4D, 0x4C, 0x4B, 0x4A, + 0x4A, 0x49, 0x48, 0x47, + 0x47, 0x46, 0x45, 0x45, + 0x44, 0x43, 0x42, 0x42, + 0x41, 0x40, 0x3F, 0x3E, + 0x3D, 0x3C, 0x3B, 0x3A, + 0x39, 0x38, 0x37, 0x36, + 0x35, 0x34, 0x32, 0x31, + 0x30, 0x2F, 0x2D, 0x2C, + 0x2B, 0x29, 0x28, 0x26, + 0x25, 0x23, 0x21, 0x1F, + 0x1D, 0x1A, 0x17, 0x14, + 0x10, 0x0C, 0x06, 0x00, + -7, -7, -7, -7, + -7, -7, -7, -7, + -7, -7, -7, -7, +}; + +static const s8 b43_tssi2dbm_g_table[] = { + 77, 77, 77, 76, + 76, 76, 75, 75, + 74, 74, 73, 73, + 73, 72, 72, 71, + 71, 70, 70, 69, + 68, 68, 67, 67, + 66, 65, 65, 64, + 63, 63, 62, 61, + 60, 59, 58, 57, + 56, 55, 54, 53, + 52, 50, 49, 47, + 45, 43, 40, 37, + 33, 28, 22, 14, + 5, -7, -20, -20, + -20, -20, -20, -20, + -20, -20, -20, -20, +}; + +const u8 b43_radio_channel_codes_bg[] = { + 12, 17, 22, 27, + 32, 37, 42, 47, + 52, 57, 62, 67, + 72, 84, +}; + +static void b43_phy_initg(struct b43_wldev *dev); + +/* Reverse the bits of a 4bit value. + * Example: 1101 is flipped 1011 + */ +static u16 flip_4bit(u16 value) +{ + u16 flipped = 0x0000; + + B43_WARN_ON(value & ~0x000F); + + flipped |= (value & 0x0001) << 3; + flipped |= (value & 0x0002) << 1; + flipped |= (value & 0x0004) >> 1; + flipped |= (value & 0x0008) >> 3; + + return flipped; +} + +static void generate_rfatt_list(struct b43_wldev *dev, + struct b43_rfatt_list *list) +{ + struct b43_phy *phy = &dev->phy; + + /* APHY.rev < 5 || GPHY.rev < 6 */ + static const struct b43_rfatt rfatt_0[] = { + {.att = 3,.with_padmix = 0,}, + {.att = 1,.with_padmix = 0,}, + {.att = 5,.with_padmix = 0,}, + {.att = 7,.with_padmix = 0,}, + {.att = 9,.with_padmix = 0,}, + {.att = 2,.with_padmix = 0,}, + {.att = 0,.with_padmix = 0,}, + {.att = 4,.with_padmix = 0,}, + {.att = 6,.with_padmix = 0,}, + {.att = 8,.with_padmix = 0,}, + {.att = 1,.with_padmix = 1,}, + {.att = 2,.with_padmix = 1,}, + {.att = 3,.with_padmix = 1,}, + {.att = 4,.with_padmix = 1,}, + }; + /* Radio.rev == 8 && Radio.version == 0x2050 */ + static const struct b43_rfatt rfatt_1[] = { + {.att = 2,.with_padmix = 1,}, + {.att = 4,.with_padmix = 1,}, + {.att = 6,.with_padmix = 1,}, + {.att = 8,.with_padmix = 1,}, + {.att = 10,.with_padmix = 1,}, + {.att = 12,.with_padmix = 1,}, + {.att = 14,.with_padmix = 1,}, + }; + /* Otherwise */ + static const struct b43_rfatt rfatt_2[] = { + {.att = 0,.with_padmix = 1,}, + {.att = 2,.with_padmix = 1,}, + {.att = 4,.with_padmix = 1,}, + {.att = 6,.with_padmix = 1,}, + {.att = 8,.with_padmix = 1,}, + {.att = 9,.with_padmix = 1,}, + {.att = 9,.with_padmix = 1,}, + }; + + if ((phy->type == B43_PHYTYPE_A && phy->rev < 5) || + (phy->type == B43_PHYTYPE_G && phy->rev < 6)) { + /* Software pctl */ + list->list = rfatt_0; + list->len = ARRAY_SIZE(rfatt_0); + list->min_val = 0; + list->max_val = 9; + return; + } + if (phy->radio_ver == 0x2050 && phy->radio_rev == 8) { + /* Hardware pctl */ + list->list = rfatt_1; + list->len = ARRAY_SIZE(rfatt_1); + list->min_val = 2; + list->max_val = 14; + return; + } + /* Hardware pctl */ + list->list = rfatt_2; + list->len = ARRAY_SIZE(rfatt_2); + list->min_val = 0; + list->max_val = 9; +} + +static void generate_bbatt_list(struct b43_wldev *dev, + struct b43_bbatt_list *list) +{ + static const struct b43_bbatt bbatt_0[] = { + {.att = 0,}, + {.att = 1,}, + {.att = 2,}, + {.att = 3,}, + {.att = 4,}, + {.att = 5,}, + {.att = 6,}, + {.att = 7,}, + {.att = 8,}, + }; + + list->list = bbatt_0; + list->len = ARRAY_SIZE(bbatt_0); + list->min_val = 0; + list->max_val = 8; +} + +bool b43_has_hardware_pctl(struct b43_phy *phy) +{ + if (!phy->hardware_power_control) + return 0; + switch (phy->type) { + case B43_PHYTYPE_A: + if (phy->rev >= 5) + return 1; + break; + case B43_PHYTYPE_G: + if (phy->rev >= 6) + return 1; + break; + default: + B43_WARN_ON(1); + } + return 0; +} + +static void b43_shm_clear_tssi(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + switch (phy->type) { + case B43_PHYTYPE_A: + b43_shm_write16(dev, B43_SHM_SHARED, 0x0068, 0x7F7F); + b43_shm_write16(dev, B43_SHM_SHARED, 0x006a, 0x7F7F); + break; + case B43_PHYTYPE_B: + case B43_PHYTYPE_G: + b43_shm_write16(dev, B43_SHM_SHARED, 0x0058, 0x7F7F); + b43_shm_write16(dev, B43_SHM_SHARED, 0x005a, 0x7F7F); + b43_shm_write16(dev, B43_SHM_SHARED, 0x0070, 0x7F7F); + b43_shm_write16(dev, B43_SHM_SHARED, 0x0072, 0x7F7F); + break; + } +} + +void b43_raw_phy_lock(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + B43_WARN_ON(!irqs_disabled()); + + /* We had a check for MACCTL==0 here, but I think that doesn't + * make sense, as MACCTL is never 0 when this is called. + * --mb */ + B43_WARN_ON(b43_read32(dev, B43_MMIO_MACCTL) == 0); + + if (dev->dev->id.revision < 3) { + b43_mac_suspend(dev); + spin_lock(&phy->lock); + } else { + if (!b43_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) + b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); + } + phy->locked = 1; +} + +void b43_raw_phy_unlock(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + B43_WARN_ON(!irqs_disabled()); + if (dev->dev->id.revision < 3) { + if (phy->locked) { + spin_unlock(&phy->lock); + b43_mac_enable(dev); + } + } else { + if (!b43_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) + b43_power_saving_ctl_bits(dev, 0); + } + phy->locked = 0; +} + +/* Different PHYs require different register routing flags. + * This adjusts (and does sanity checks on) the routing flags. + */ +static inline u16 adjust_phyreg_for_phytype(struct b43_phy *phy, + u16 offset, struct b43_wldev *dev) +{ + if (phy->type == B43_PHYTYPE_A) { + /* OFDM registers are base-registers for the A-PHY. */ + offset &= ~B43_PHYROUTE_OFDM_GPHY; + } + if (offset & B43_PHYROUTE_EXT_GPHY) { + /* Ext-G registers are only available on G-PHYs */ + if (phy->type != B43_PHYTYPE_G) { + b43dbg(dev->wl, "EXT-G PHY access at " + "0x%04X on %u type PHY\n", offset, phy->type); + } + } + + return offset; +} + +u16 b43_phy_read(struct b43_wldev * dev, u16 offset) +{ + struct b43_phy *phy = &dev->phy; + + offset = adjust_phyreg_for_phytype(phy, offset, dev); + b43_write16(dev, B43_MMIO_PHY_CONTROL, offset); + return b43_read16(dev, B43_MMIO_PHY_DATA); +} + +void b43_phy_write(struct b43_wldev *dev, u16 offset, u16 val) +{ + struct b43_phy *phy = &dev->phy; + + offset = adjust_phyreg_for_phytype(phy, offset, dev); + b43_write16(dev, B43_MMIO_PHY_CONTROL, offset); + mmiowb(); + b43_write16(dev, B43_MMIO_PHY_DATA, val); +} + +static void b43_radio_set_txpower_a(struct b43_wldev *dev, u16 txpower); + +/* Adjust the transmission power output (G-PHY) */ +void b43_set_txpower_g(struct b43_wldev *dev, + const struct b43_bbatt *bbatt, + const struct b43_rfatt *rfatt, u8 tx_control) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + u16 bb, rf; + u16 tx_bias, tx_magn; + + bb = bbatt->att; + rf = rfatt->att; + tx_bias = lo->tx_bias; + tx_magn = lo->tx_magn; + if (unlikely(tx_bias == 0xFF)) + tx_bias = 0; + + /* Save the values for later */ + phy->tx_control = tx_control; + memcpy(&phy->rfatt, rfatt, sizeof(*rfatt)); + memcpy(&phy->bbatt, bbatt, sizeof(*bbatt)); + + if (b43_debug(dev, B43_DBG_XMITPOWER)) { + b43dbg(dev->wl, "Tuning TX-power to bbatt(%u), " + "rfatt(%u), tx_control(0x%02X), " + "tx_bias(0x%02X), tx_magn(0x%02X)\n", + bb, rf, tx_control, tx_bias, tx_magn); + } + + b43_phy_set_baseband_attenuation(dev, bb); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_RFATT, rf); + if (phy->radio_ver == 0x2050 && phy->radio_rev == 8) { + b43_radio_write16(dev, 0x43, + (rf & 0x000F) | (tx_control & 0x0070)); + } else { + b43_radio_write16(dev, 0x43, (b43_radio_read16(dev, 0x43) + & 0xFFF0) | (rf & 0x000F)); + b43_radio_write16(dev, 0x52, (b43_radio_read16(dev, 0x52) + & ~0x0070) | (tx_control & + 0x0070)); + } + if (has_tx_magnification(phy)) { + b43_radio_write16(dev, 0x52, tx_magn | tx_bias); + } else { + b43_radio_write16(dev, 0x52, (b43_radio_read16(dev, 0x52) + & 0xFFF0) | (tx_bias & 0x000F)); + } + if (phy->type == B43_PHYTYPE_G) + b43_lo_g_adjust(dev); +} + +static void default_baseband_attenuation(struct b43_wldev *dev, + struct b43_bbatt *bb) +{ + struct b43_phy *phy = &dev->phy; + + if (phy->radio_ver == 0x2050 && phy->radio_rev < 6) + bb->att = 0; + else + bb->att = 2; +} + +static void default_radio_attenuation(struct b43_wldev *dev, + struct b43_rfatt *rf) +{ + struct ssb_bus *bus = dev->dev->bus; + struct b43_phy *phy = &dev->phy; + + rf->with_padmix = 0; + + if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM && + bus->boardinfo.type == SSB_BOARD_BCM4309G) { + if (bus->boardinfo.rev < 0x43) { + rf->att = 2; + return; + } else if (bus->boardinfo.rev < 0x51) { + rf->att = 3; + return; + } + } + + if (phy->type == B43_PHYTYPE_A) { + rf->att = 0x60; + return; + } + + switch (phy->radio_ver) { + case 0x2053: + switch (phy->radio_rev) { + case 1: + rf->att = 6; + return; + } + break; + case 0x2050: + switch (phy->radio_rev) { + case 0: + rf->att = 5; + return; + case 1: + if (phy->type == B43_PHYTYPE_G) { + if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM + && bus->boardinfo.type == SSB_BOARD_BCM4309G + && bus->boardinfo.rev >= 30) + rf->att = 3; + else if (bus->boardinfo.vendor == + SSB_BOARDVENDOR_BCM + && bus->boardinfo.type == + SSB_BOARD_BU4306) + rf->att = 3; + else + rf->att = 1; + } else { + if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM + && bus->boardinfo.type == SSB_BOARD_BCM4309G + && bus->boardinfo.rev >= 30) + rf->att = 7; + else + rf->att = 6; + } + return; + case 2: + if (phy->type == B43_PHYTYPE_G) { + if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM + && bus->boardinfo.type == SSB_BOARD_BCM4309G + && bus->boardinfo.rev >= 30) + rf->att = 3; + else if (bus->boardinfo.vendor == + SSB_BOARDVENDOR_BCM + && bus->boardinfo.type == + SSB_BOARD_BU4306) + rf->att = 5; + else if (bus->chip_id == 0x4320) + rf->att = 4; + else + rf->att = 3; + } else + rf->att = 6; + return; + case 3: + rf->att = 5; + return; + case 4: + case 5: + rf->att = 1; + return; + case 6: + case 7: + rf->att = 5; + return; + case 8: + rf->att = 0xA; + rf->with_padmix = 1; + return; + case 9: + default: + rf->att = 5; + return; + } + } + rf->att = 5; +} + +static u16 default_tx_control(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + if (phy->radio_ver != 0x2050) + return 0; + if (phy->radio_rev == 1) + return B43_TXCTL_PA2DB | B43_TXCTL_TXMIX; + if (phy->radio_rev < 6) + return B43_TXCTL_PA2DB; + if (phy->radio_rev == 8) + return B43_TXCTL_TXMIX; + return 0; +} + +/* This func is called "PHY calibrate" in the specs... */ +void b43_phy_early_init(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + + default_baseband_attenuation(dev, &phy->bbatt); + default_radio_attenuation(dev, &phy->rfatt); + phy->tx_control = (default_tx_control(dev) << 4); + + /* Commit previous writes */ + b43_read32(dev, B43_MMIO_MACCTL); + + if (phy->type == B43_PHYTYPE_B || phy->type == B43_PHYTYPE_G) { + generate_rfatt_list(dev, &lo->rfatt_list); + generate_bbatt_list(dev, &lo->bbatt_list); + } + if (phy->type == B43_PHYTYPE_G && phy->rev == 1) { + /* Workaround: Temporarly disable gmode through the early init + * phase, as the gmode stuff is not needed for phy rev 1 */ + phy->gmode = 0; + b43_wireless_core_reset(dev, 0); + b43_phy_initg(dev); + phy->gmode = 1; + b43_wireless_core_reset(dev, B43_TMSLOW_GMODE); + } +} + +/* GPHY_TSSI_Power_Lookup_Table_Init */ +static void b43_gphy_tssi_power_lt_init(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + int i; + u16 value; + + for (i = 0; i < 32; i++) + b43_ofdmtab_write16(dev, 0x3C20, i, phy->tssi2dbm[i]); + for (i = 32; i < 64; i++) + b43_ofdmtab_write16(dev, 0x3C00, i - 32, phy->tssi2dbm[i]); + for (i = 0; i < 64; i += 2) { + value = (u16) phy->tssi2dbm[i]; + value |= ((u16) phy->tssi2dbm[i + 1]) << 8; + b43_phy_write(dev, 0x380 + (i / 2), value); + } +} + +/* GPHY_Gain_Lookup_Table_Init */ +static void b43_gphy_gain_lt_init(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + u16 nr_written = 0; + u16 tmp; + u8 rf, bb; + + if (!lo->lo_measured) { + b43_phy_write(dev, 0x3FF, 0); + return; + } + + for (rf = 0; rf < lo->rfatt_list.len; rf++) { + for (bb = 0; bb < lo->bbatt_list.len; bb++) { + if (nr_written >= 0x40) + return; + tmp = lo->bbatt_list.list[bb].att; + tmp <<= 8; + if (phy->radio_rev == 8) + tmp |= 0x50; + else + tmp |= 0x40; + tmp |= lo->rfatt_list.list[rf].att; + b43_phy_write(dev, 0x3C0 + nr_written, tmp); + nr_written++; + } + } +} + +/* GPHY_DC_Lookup_Table */ +void b43_gphy_dc_lt_init(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + struct b43_loctl *loctl0; + struct b43_loctl *loctl1; + int i; + int rf_offset, bb_offset; + u16 tmp; + + for (i = 0; i < lo->rfatt_list.len + lo->bbatt_list.len; i += 2) { + rf_offset = i / lo->rfatt_list.len; + bb_offset = i % lo->rfatt_list.len; + + loctl0 = b43_get_lo_g_ctl(dev, &lo->rfatt_list.list[rf_offset], + &lo->bbatt_list.list[bb_offset]); + if (i + 1 < lo->rfatt_list.len * lo->bbatt_list.len) { + rf_offset = (i + 1) / lo->rfatt_list.len; + bb_offset = (i + 1) % lo->rfatt_list.len; + + loctl1 = + b43_get_lo_g_ctl(dev, + &lo->rfatt_list.list[rf_offset], + &lo->bbatt_list.list[bb_offset]); + } else + loctl1 = loctl0; + + tmp = ((u16) loctl0->q & 0xF); + tmp |= ((u16) loctl0->i & 0xF) << 4; + tmp |= ((u16) loctl1->q & 0xF) << 8; + tmp |= ((u16) loctl1->i & 0xF) << 12; //FIXME? + b43_phy_write(dev, 0x3A0 + (i / 2), tmp); + } +} + +static void hardware_pctl_init_aphy(struct b43_wldev *dev) +{ + //TODO +} + +static void hardware_pctl_init_gphy(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + b43_phy_write(dev, 0x0036, (b43_phy_read(dev, 0x0036) & 0xFFC0) + | (phy->tgt_idle_tssi - phy->cur_idle_tssi)); + b43_phy_write(dev, 0x0478, (b43_phy_read(dev, 0x0478) & 0xFF00) + | (phy->tgt_idle_tssi - phy->cur_idle_tssi)); + b43_gphy_tssi_power_lt_init(dev); + b43_gphy_gain_lt_init(dev); + b43_phy_write(dev, 0x0060, b43_phy_read(dev, 0x0060) & 0xFFBF); + b43_phy_write(dev, 0x0014, 0x0000); + + B43_WARN_ON(phy->rev < 6); + b43_phy_write(dev, 0x0478, b43_phy_read(dev, 0x0478) + | 0x0800); + b43_phy_write(dev, 0x0478, b43_phy_read(dev, 0x0478) + & 0xFEFF); + b43_phy_write(dev, 0x0801, b43_phy_read(dev, 0x0801) + & 0xFFBF); + + b43_gphy_dc_lt_init(dev); +} + +/* HardwarePowerControl init for A and G PHY */ +static void b43_hardware_pctl_init(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + if (!b43_has_hardware_pctl(phy)) { + /* No hardware power control */ + b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_HWPCTL); + return; + } + /* Init the hwpctl related hardware */ + switch (phy->type) { + case B43_PHYTYPE_A: + hardware_pctl_init_aphy(dev); + break; + case B43_PHYTYPE_G: + hardware_pctl_init_gphy(dev); + break; + default: + B43_WARN_ON(1); + } + /* Enable hardware pctl in firmware. */ + b43_hf_write(dev, b43_hf_read(dev) | B43_HF_HWPCTL); +} + +static void b43_hardware_pctl_early_init(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + if (!b43_has_hardware_pctl(phy)) { + b43_phy_write(dev, 0x047A, 0xC111); + return; + } + + b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036) & 0xFEFF); + b43_phy_write(dev, 0x002F, 0x0202); + b43_phy_write(dev, 0x047C, b43_phy_read(dev, 0x047C) | 0x0002); + b43_phy_write(dev, 0x047A, b43_phy_read(dev, 0x047A) | 0xF000); + if (phy->radio_ver == 0x2050 && phy->radio_rev == 8) { + b43_phy_write(dev, 0x047A, (b43_phy_read(dev, 0x047A) + & 0xFF0F) | 0x0010); + b43_phy_write(dev, 0x005D, b43_phy_read(dev, 0x005D) + | 0x8000); + b43_phy_write(dev, 0x004E, (b43_phy_read(dev, 0x004E) + & 0xFFC0) | 0x0010); + b43_phy_write(dev, 0x002E, 0xC07F); + b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036) + | 0x0400); + } else { + b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036) + | 0x0200); + b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036) + | 0x0400); + b43_phy_write(dev, 0x005D, b43_phy_read(dev, 0x005D) + & 0x7FFF); + b43_phy_write(dev, 0x004F, b43_phy_read(dev, 0x004F) + & 0xFFFE); + b43_phy_write(dev, 0x004E, (b43_phy_read(dev, 0x004E) + & 0xFFC0) | 0x0010); + b43_phy_write(dev, 0x002E, 0xC07F); + b43_phy_write(dev, 0x047A, (b43_phy_read(dev, 0x047A) + & 0xFF0F) | 0x0010); + } +} + +/* Intialize B/G PHY power control + * as described in http://bcm-specs.sipsolutions.net/InitPowerControl + */ +static void b43_phy_init_pctl(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct b43_phy *phy = &dev->phy; + struct b43_rfatt old_rfatt; + struct b43_bbatt old_bbatt; + u8 old_tx_control = 0; + + if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) && + (bus->boardinfo.type == SSB_BOARD_BU4306)) + return; + + b43_phy_write(dev, 0x0028, 0x8018); + + /* This does something with the Analog... */ + b43_write16(dev, B43_MMIO_PHY0, b43_read16(dev, B43_MMIO_PHY0) + & 0xFFDF); + + if (phy->type == B43_PHYTYPE_G && !phy->gmode) + return; + b43_hardware_pctl_early_init(dev); + if (phy->cur_idle_tssi == 0) { + if (phy->radio_ver == 0x2050 && phy->analog == 0) { + b43_radio_write16(dev, 0x0076, + (b43_radio_read16(dev, 0x0076) + & 0x00F7) | 0x0084); + } else { + struct b43_rfatt rfatt; + struct b43_bbatt bbatt; + + memcpy(&old_rfatt, &phy->rfatt, sizeof(old_rfatt)); + memcpy(&old_bbatt, &phy->bbatt, sizeof(old_bbatt)); + old_tx_control = phy->tx_control; + + bbatt.att = 11; + if (phy->radio_rev == 8) { + rfatt.att = 15; + rfatt.with_padmix = 1; + } else { + rfatt.att = 9; + rfatt.with_padmix = 0; + } + b43_set_txpower_g(dev, &bbatt, &rfatt, 0); + } + b43_dummy_transmission(dev); + phy->cur_idle_tssi = b43_phy_read(dev, B43_PHY_ITSSI); + if (B43_DEBUG) { + /* Current-Idle-TSSI sanity check. */ + if (abs(phy->cur_idle_tssi - phy->tgt_idle_tssi) >= 20) { + b43dbg(dev->wl, + "!WARNING! Idle-TSSI phy->cur_idle_tssi " + "measuring failed. (cur=%d, tgt=%d). Disabling TX power " + "adjustment.\n", phy->cur_idle_tssi, + phy->tgt_idle_tssi); + phy->cur_idle_tssi = 0; + } + } + if (phy->radio_ver == 0x2050 && phy->analog == 0) { + b43_radio_write16(dev, 0x0076, + b43_radio_read16(dev, 0x0076) + & 0xFF7B); + } else { + b43_set_txpower_g(dev, &old_bbatt, + &old_rfatt, old_tx_control); + } + } + b43_hardware_pctl_init(dev); + b43_shm_clear_tssi(dev); +} + +static void b43_phy_agcsetup(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 offset = 0x0000; + + if (phy->rev == 1) + offset = 0x4C00; + + b43_ofdmtab_write16(dev, offset, 0, 0x00FE); + b43_ofdmtab_write16(dev, offset, 1, 0x000D); + b43_ofdmtab_write16(dev, offset, 2, 0x0013); + b43_ofdmtab_write16(dev, offset, 3, 0x0019); + + if (phy->rev == 1) { + b43_ofdmtab_write16(dev, 0x1800, 0, 0x2710); + b43_ofdmtab_write16(dev, 0x1801, 0, 0x9B83); + b43_ofdmtab_write16(dev, 0x1802, 0, 0x9B83); + b43_ofdmtab_write16(dev, 0x1803, 0, 0x0F8D); + b43_phy_write(dev, 0x0455, 0x0004); + } + + b43_phy_write(dev, 0x04A5, (b43_phy_read(dev, 0x04A5) + & 0x00FF) | 0x5700); + b43_phy_write(dev, 0x041A, (b43_phy_read(dev, 0x041A) + & 0xFF80) | 0x000F); + b43_phy_write(dev, 0x041A, (b43_phy_read(dev, 0x041A) + & 0xC07F) | 0x2B80); + b43_phy_write(dev, 0x048C, (b43_phy_read(dev, 0x048C) + & 0xF0FF) | 0x0300); + + b43_radio_write16(dev, 0x007A, b43_radio_read16(dev, 0x007A) + | 0x0008); + + b43_phy_write(dev, 0x04A0, (b43_phy_read(dev, 0x04A0) + & 0xFFF0) | 0x0008); + b43_phy_write(dev, 0x04A1, (b43_phy_read(dev, 0x04A1) + & 0xF0FF) | 0x0600); + b43_phy_write(dev, 0x04A2, (b43_phy_read(dev, 0x04A2) + & 0xF0FF) | 0x0700); + b43_phy_write(dev, 0x04A0, (b43_phy_read(dev, 0x04A0) + & 0xF0FF) | 0x0100); + + if (phy->rev == 1) { + b43_phy_write(dev, 0x04A2, (b43_phy_read(dev, 0x04A2) + & 0xFFF0) | 0x0007); + } + + b43_phy_write(dev, 0x0488, (b43_phy_read(dev, 0x0488) + & 0xFF00) | 0x001C); + b43_phy_write(dev, 0x0488, (b43_phy_read(dev, 0x0488) + & 0xC0FF) | 0x0200); + b43_phy_write(dev, 0x0496, (b43_phy_read(dev, 0x0496) + & 0xFF00) | 0x001C); + b43_phy_write(dev, 0x0489, (b43_phy_read(dev, 0x0489) + & 0xFF00) | 0x0020); + b43_phy_write(dev, 0x0489, (b43_phy_read(dev, 0x0489) + & 0xC0FF) | 0x0200); + b43_phy_write(dev, 0x0482, (b43_phy_read(dev, 0x0482) + & 0xFF00) | 0x002E); + b43_phy_write(dev, 0x0496, (b43_phy_read(dev, 0x0496) + & 0x00FF) | 0x1A00); + b43_phy_write(dev, 0x0481, (b43_phy_read(dev, 0x0481) + & 0xFF00) | 0x0028); + b43_phy_write(dev, 0x0481, (b43_phy_read(dev, 0x0481) + & 0x00FF) | 0x2C00); + + if (phy->rev == 1) { + b43_phy_write(dev, 0x0430, 0x092B); + b43_phy_write(dev, 0x041B, (b43_phy_read(dev, 0x041B) + & 0xFFE1) | 0x0002); + } else { + b43_phy_write(dev, 0x041B, b43_phy_read(dev, 0x041B) + & 0xFFE1); + b43_phy_write(dev, 0x041F, 0x287A); + b43_phy_write(dev, 0x0420, (b43_phy_read(dev, 0x0420) + & 0xFFF0) | 0x0004); + } + + if (phy->rev >= 6) { + b43_phy_write(dev, 0x0422, 0x287A); + b43_phy_write(dev, 0x0420, (b43_phy_read(dev, 0x0420) + & 0x0FFF) | 0x3000); + } + + b43_phy_write(dev, 0x04A8, (b43_phy_read(dev, 0x04A8) + & 0x8080) | 0x7874); + b43_phy_write(dev, 0x048E, 0x1C00); + + offset = 0x0800; + if (phy->rev == 1) { + offset = 0x5400; + b43_phy_write(dev, 0x04AB, (b43_phy_read(dev, 0x04AB) + & 0xF0FF) | 0x0600); + b43_phy_write(dev, 0x048B, 0x005E); + b43_phy_write(dev, 0x048C, (b43_phy_read(dev, 0x048C) + & 0xFF00) | 0x001E); + b43_phy_write(dev, 0x048D, 0x0002); + } + b43_ofdmtab_write16(dev, offset, 0, 0x00); + b43_ofdmtab_write16(dev, offset, 1, 0x07); + b43_ofdmtab_write16(dev, offset, 2, 0x10); + b43_ofdmtab_write16(dev, offset, 3, 0x1C); + + if (phy->rev >= 6) { + b43_phy_write(dev, 0x0426, b43_phy_read(dev, 0x0426) + & 0xFFFC); + b43_phy_write(dev, 0x0426, b43_phy_read(dev, 0x0426) + & 0xEFFF); + } +} + +static void b43_phy_setupg(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct b43_phy *phy = &dev->phy; + u16 i; + + B43_WARN_ON(phy->type != B43_PHYTYPE_G); + if (phy->rev == 1) { + b43_phy_write(dev, 0x0406, 0x4F19); + b43_phy_write(dev, B43_PHY_G_CRS, + (b43_phy_read(dev, B43_PHY_G_CRS) & 0xFC3F) | + 0x0340); + b43_phy_write(dev, 0x042C, 0x005A); + b43_phy_write(dev, 0x0427, 0x001A); + + for (i = 0; i < B43_TAB_FINEFREQG_SIZE; i++) + b43_ofdmtab_write16(dev, 0x5800, i, + b43_tab_finefreqg[i]); + for (i = 0; i < B43_TAB_NOISEG1_SIZE; i++) + b43_ofdmtab_write16(dev, 0x1800, i, b43_tab_noiseg1[i]); + for (i = 0; i < B43_TAB_ROTOR_SIZE; i++) + b43_ofdmtab_write16(dev, 0x2000, i, b43_tab_rotor[i]); + } else { + /* nrssi values are signed 6-bit values. Not sure why we write 0x7654 here... */ + b43_nrssi_hw_write(dev, 0xBA98, (s16) 0x7654); + + if (phy->rev == 2) { + b43_phy_write(dev, 0x04C0, 0x1861); + b43_phy_write(dev, 0x04C1, 0x0271); + } else if (phy->rev > 2) { + b43_phy_write(dev, 0x04C0, 0x0098); + b43_phy_write(dev, 0x04C1, 0x0070); + b43_phy_write(dev, 0x04C9, 0x0080); + } + b43_phy_write(dev, 0x042B, b43_phy_read(dev, 0x042B) | 0x800); + + for (i = 0; i < 64; i++) + b43_ofdmtab_write16(dev, 0x4000, i, i); + for (i = 0; i < B43_TAB_NOISEG2_SIZE; i++) + b43_ofdmtab_write16(dev, 0x1800, i, b43_tab_noiseg2[i]); + } + + if (phy->rev <= 2) + for (i = 0; i < B43_TAB_NOISESCALEG_SIZE; i++) + b43_ofdmtab_write16(dev, 0x1400, i, + b43_tab_noisescaleg1[i]); + else if ((phy->rev >= 7) && (b43_phy_read(dev, 0x0449) & 0x0200)) + for (i = 0; i < B43_TAB_NOISESCALEG_SIZE; i++) + b43_ofdmtab_write16(dev, 0x1400, i, + b43_tab_noisescaleg3[i]); + else + for (i = 0; i < B43_TAB_NOISESCALEG_SIZE; i++) + b43_ofdmtab_write16(dev, 0x1400, i, + b43_tab_noisescaleg2[i]); + + if (phy->rev == 2) + for (i = 0; i < B43_TAB_SIGMASQR_SIZE; i++) + b43_ofdmtab_write16(dev, 0x5000, i, + b43_tab_sigmasqr1[i]); + else if ((phy->rev > 2) && (phy->rev <= 8)) + for (i = 0; i < B43_TAB_SIGMASQR_SIZE; i++) + b43_ofdmtab_write16(dev, 0x5000, i, + b43_tab_sigmasqr2[i]); + + if (phy->rev == 1) { + for (i = 0; i < B43_TAB_RETARD_SIZE; i++) + b43_ofdmtab_write32(dev, 0x2400, i, b43_tab_retard[i]); + for (i = 4; i < 20; i++) + b43_ofdmtab_write16(dev, 0x5400, i, 0x0020); + b43_phy_agcsetup(dev); + + if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) && + (bus->boardinfo.type == SSB_BOARD_BU4306) && + (bus->boardinfo.rev == 0x17)) + return; + + b43_ofdmtab_write16(dev, 0x5001, 0, 0x0002); + b43_ofdmtab_write16(dev, 0x5002, 0, 0x0001); + } else { + for (i = 0; i < 0x20; i++) + b43_ofdmtab_write16(dev, 0x1000, i, 0x0820); + b43_phy_agcsetup(dev); + b43_phy_read(dev, 0x0400); /* dummy read */ + b43_phy_write(dev, 0x0403, 0x1000); + b43_ofdmtab_write16(dev, 0x3C02, 0, 0x000F); + b43_ofdmtab_write16(dev, 0x3C03, 0, 0x0014); + + if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) && + (bus->boardinfo.type == SSB_BOARD_BU4306) && + (bus->boardinfo.rev == 0x17)) + return; + + b43_ofdmtab_write16(dev, 0x0401, 0, 0x0002); + b43_ofdmtab_write16(dev, 0x0402, 0, 0x0001); + } +} + +/* Initialize the noisescaletable for APHY */ +static void b43_phy_init_noisescaletbl(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + int i; + + for (i = 0; i < 12; i++) { + if (phy->rev == 2) + b43_ofdmtab_write16(dev, 0x1400, i, 0x6767); + else + b43_ofdmtab_write16(dev, 0x1400, i, 0x2323); + } + if (phy->rev == 2) + b43_ofdmtab_write16(dev, 0x1400, i, 0x6700); + else + b43_ofdmtab_write16(dev, 0x1400, i, 0x2300); + for (i = 0; i < 11; i++) { + if (phy->rev == 2) + b43_ofdmtab_write16(dev, 0x1400, i, 0x6767); + else + b43_ofdmtab_write16(dev, 0x1400, i, 0x2323); + } + if (phy->rev == 2) + b43_ofdmtab_write16(dev, 0x1400, i, 0x0067); + else + b43_ofdmtab_write16(dev, 0x1400, i, 0x0023); +} + +static void b43_phy_setupa(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 i; + + B43_WARN_ON(phy->type != B43_PHYTYPE_A); + switch (phy->rev) { + case 2: + b43_phy_write(dev, 0x008E, 0x3800); + b43_phy_write(dev, 0x0035, 0x03FF); + b43_phy_write(dev, 0x0036, 0x0400); + + b43_ofdmtab_write16(dev, 0x3807, 0, 0x0051); + + b43_phy_write(dev, 0x001C, 0x0FF9); + b43_phy_write(dev, 0x0020, b43_phy_read(dev, 0x0020) & 0xFF0F); + b43_ofdmtab_write16(dev, 0x3C0C, 0, 0x07BF); + b43_radio_write16(dev, 0x0002, 0x07BF); + + b43_phy_write(dev, 0x0024, 0x4680); + b43_phy_write(dev, 0x0020, 0x0003); + b43_phy_write(dev, 0x001D, 0x0F40); + b43_phy_write(dev, 0x001F, 0x1C00); + + b43_phy_write(dev, 0x002A, (b43_phy_read(dev, 0x002A) + & 0x00FF) | 0x0400); + b43_phy_write(dev, 0x002B, b43_phy_read(dev, 0x002B) + & 0xFBFF); + b43_phy_write(dev, 0x008E, 0x58C1); + + b43_ofdmtab_write16(dev, 0x0803, 0, 0x000F); + b43_ofdmtab_write16(dev, 0x0804, 0, 0x001F); + b43_ofdmtab_write16(dev, 0x0805, 0, 0x002A); + b43_ofdmtab_write16(dev, 0x0805, 0, 0x0030); + b43_ofdmtab_write16(dev, 0x0807, 0, 0x003A); + + b43_ofdmtab_write16(dev, 0x0000, 0, 0x0013); + b43_ofdmtab_write16(dev, 0x0000, 1, 0x0013); + b43_ofdmtab_write16(dev, 0x0000, 2, 0x0013); + b43_ofdmtab_write16(dev, 0x0000, 3, 0x0013); + b43_ofdmtab_write16(dev, 0x0000, 4, 0x0015); + b43_ofdmtab_write16(dev, 0x0000, 5, 0x0015); + b43_ofdmtab_write16(dev, 0x0000, 6, 0x0019); + + b43_ofdmtab_write16(dev, 0x0404, 0, 0x0003); + b43_ofdmtab_write16(dev, 0x0405, 0, 0x0003); + b43_ofdmtab_write16(dev, 0x0406, 0, 0x0007); + + for (i = 0; i < 16; i++) + b43_ofdmtab_write16(dev, 0x4000, i, (0x8 + i) & 0x000F); + + b43_ofdmtab_write16(dev, 0x3003, 0, 0x1044); + b43_ofdmtab_write16(dev, 0x3004, 0, 0x7201); + b43_ofdmtab_write16(dev, 0x3006, 0, 0x0040); + b43_ofdmtab_write16(dev, 0x3001, 0, + (b43_ofdmtab_read16(dev, 0x3001, 0) & + 0x0010) | 0x0008); + + for (i = 0; i < B43_TAB_FINEFREQA_SIZE; i++) + b43_ofdmtab_write16(dev, 0x5800, i, + b43_tab_finefreqa[i]); + for (i = 0; i < B43_TAB_NOISEA2_SIZE; i++) + b43_ofdmtab_write16(dev, 0x1800, i, b43_tab_noisea2[i]); + for (i = 0; i < B43_TAB_ROTOR_SIZE; i++) + b43_ofdmtab_write32(dev, 0x2000, i, b43_tab_rotor[i]); + b43_phy_init_noisescaletbl(dev); + for (i = 0; i < B43_TAB_RETARD_SIZE; i++) + b43_ofdmtab_write32(dev, 0x2400, i, b43_tab_retard[i]); + break; + case 3: + for (i = 0; i < 64; i++) + b43_ofdmtab_write16(dev, 0x4000, i, i); + + b43_ofdmtab_write16(dev, 0x3807, 0, 0x0051); + + b43_phy_write(dev, 0x001C, 0x0FF9); + b43_phy_write(dev, 0x0020, b43_phy_read(dev, 0x0020) & 0xFF0F); + b43_radio_write16(dev, 0x0002, 0x07BF); + + b43_phy_write(dev, 0x0024, 0x4680); + b43_phy_write(dev, 0x0020, 0x0003); + b43_phy_write(dev, 0x001D, 0x0F40); + b43_phy_write(dev, 0x001F, 0x1C00); + b43_phy_write(dev, 0x002A, (b43_phy_read(dev, 0x002A) + & 0x00FF) | 0x0400); + + b43_ofdmtab_write16(dev, 0x3000, 1, + (b43_ofdmtab_read16(dev, 0x3000, 1) + & 0x0010) | 0x0008); + for (i = 0; i < B43_TAB_NOISEA3_SIZE; i++) { + b43_ofdmtab_write16(dev, 0x1800, i, b43_tab_noisea3[i]); + } + b43_phy_init_noisescaletbl(dev); + for (i = 0; i < B43_TAB_SIGMASQR_SIZE; i++) { + b43_ofdmtab_write16(dev, 0x5000, i, + b43_tab_sigmasqr1[i]); + } + + b43_phy_write(dev, 0x0003, 0x1808); + + b43_ofdmtab_write16(dev, 0x0803, 0, 0x000F); + b43_ofdmtab_write16(dev, 0x0804, 0, 0x001F); + b43_ofdmtab_write16(dev, 0x0805, 0, 0x002A); + b43_ofdmtab_write16(dev, 0x0805, 0, 0x0030); + b43_ofdmtab_write16(dev, 0x0807, 0, 0x003A); + + b43_ofdmtab_write16(dev, 0x0000, 0, 0x0013); + b43_ofdmtab_write16(dev, 0x0001, 0, 0x0013); + b43_ofdmtab_write16(dev, 0x0002, 0, 0x0013); + b43_ofdmtab_write16(dev, 0x0003, 0, 0x0013); + b43_ofdmtab_write16(dev, 0x0004, 0, 0x0015); + b43_ofdmtab_write16(dev, 0x0005, 0, 0x0015); + b43_ofdmtab_write16(dev, 0x0006, 0, 0x0019); + + b43_ofdmtab_write16(dev, 0x0404, 0, 0x0003); + b43_ofdmtab_write16(dev, 0x0405, 0, 0x0003); + b43_ofdmtab_write16(dev, 0x0406, 0, 0x0007); + + b43_ofdmtab_write16(dev, 0x3C02, 0, 0x000F); + b43_ofdmtab_write16(dev, 0x3C03, 0, 0x0014); + break; + default: + B43_WARN_ON(1); + } +} + +/* Initialize APHY. This is also called for the GPHY in some cases. */ +static void b43_phy_inita(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct b43_phy *phy = &dev->phy; + u16 tval; + + might_sleep(); + + if (phy->type == B43_PHYTYPE_A) { + b43_phy_setupa(dev); + } else { + b43_phy_setupg(dev); + if (phy->gmode && + (dev->dev->bus->sprom.r1.boardflags_lo & B43_BFL_PACTRL)) + b43_phy_write(dev, 0x046E, 0x03CF); + return; + } + + b43_phy_write(dev, B43_PHY_A_CRS, + (b43_phy_read(dev, B43_PHY_A_CRS) & 0xF83C) | 0x0340); + b43_phy_write(dev, 0x0034, 0x0001); + + //TODO: RSSI AGC + b43_phy_write(dev, B43_PHY_A_CRS, + b43_phy_read(dev, B43_PHY_A_CRS) | (1 << 14)); + b43_radio_init2060(dev); + + if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) && + ((bus->boardinfo.type == SSB_BOARD_BU4306) || + (bus->boardinfo.type == SSB_BOARD_BU4309))) { + if (phy->lofcal == 0xFFFF) { + //TODO: LOF Cal + b43_radio_set_tx_iq(dev); + } else + b43_radio_write16(dev, 0x001E, phy->lofcal); + } + + b43_phy_write(dev, 0x007A, 0xF111); + + if (phy->cur_idle_tssi == 0) { + b43_radio_write16(dev, 0x0019, 0x0000); + b43_radio_write16(dev, 0x0017, 0x0020); + + tval = b43_ofdmtab_read16(dev, 0x3001, 0); + if (phy->rev == 1) { + b43_ofdmtab_write16(dev, 0x3001, 0, + (b43_ofdmtab_read16(dev, 0x3001, 0) + & 0xFF87) + | 0x0058); + } else { + b43_ofdmtab_write16(dev, 0x3001, 0, + (b43_ofdmtab_read16(dev, 0x3001, 0) + & 0xFFC3) + | 0x002C); + } + b43_dummy_transmission(dev); + phy->cur_idle_tssi = b43_phy_read(dev, B43_PHY_A_PCTL); + b43_ofdmtab_write16(dev, 0x3001, 0, tval); + + b43_radio_set_txpower_a(dev, 0x0018); + } + b43_shm_clear_tssi(dev); +} + +static void b43_phy_initb2(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 offset, val; + + b43_write16(dev, 0x03EC, 0x3F22); + b43_phy_write(dev, 0x0020, 0x301C); + b43_phy_write(dev, 0x0026, 0x0000); + b43_phy_write(dev, 0x0030, 0x00C6); + b43_phy_write(dev, 0x0088, 0x3E00); + val = 0x3C3D; + for (offset = 0x0089; offset < 0x00A7; offset++) { + b43_phy_write(dev, offset, val); + val -= 0x0202; + } + b43_phy_write(dev, 0x03E4, 0x3000); + b43_radio_selectchannel(dev, phy->channel, 0); + if (phy->radio_ver != 0x2050) { + b43_radio_write16(dev, 0x0075, 0x0080); + b43_radio_write16(dev, 0x0079, 0x0081); + } + b43_radio_write16(dev, 0x0050, 0x0020); + b43_radio_write16(dev, 0x0050, 0x0023); + if (phy->radio_ver == 0x2050) { + b43_radio_write16(dev, 0x0050, 0x0020); + b43_radio_write16(dev, 0x005A, 0x0070); + b43_radio_write16(dev, 0x005B, 0x007B); + b43_radio_write16(dev, 0x005C, 0x00B0); + b43_radio_write16(dev, 0x007A, 0x000F); + b43_phy_write(dev, 0x0038, 0x0677); + b43_radio_init2050(dev); + } + b43_phy_write(dev, 0x0014, 0x0080); + b43_phy_write(dev, 0x0032, 0x00CA); + b43_phy_write(dev, 0x0032, 0x00CC); + b43_phy_write(dev, 0x0035, 0x07C2); + b43_lo_b_measure(dev); + b43_phy_write(dev, 0x0026, 0xCC00); + if (phy->radio_ver != 0x2050) + b43_phy_write(dev, 0x0026, 0xCE00); + b43_write16(dev, B43_MMIO_CHANNEL_EXT, 0x1000); + b43_phy_write(dev, 0x002A, 0x88A3); + if (phy->radio_ver != 0x2050) + b43_phy_write(dev, 0x002A, 0x88C2); + b43_set_txpower_g(dev, &phy->bbatt, &phy->rfatt, phy->tx_control); + b43_phy_init_pctl(dev); +} + +static void b43_phy_initb4(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 offset, val; + + b43_write16(dev, 0x03EC, 0x3F22); + b43_phy_write(dev, 0x0020, 0x301C); + b43_phy_write(dev, 0x0026, 0x0000); + b43_phy_write(dev, 0x0030, 0x00C6); + b43_phy_write(dev, 0x0088, 0x3E00); + val = 0x3C3D; + for (offset = 0x0089; offset < 0x00A7; offset++) { + b43_phy_write(dev, offset, val); + val -= 0x0202; + } + b43_phy_write(dev, 0x03E4, 0x3000); + b43_radio_selectchannel(dev, phy->channel, 0); + if (phy->radio_ver != 0x2050) { + b43_radio_write16(dev, 0x0075, 0x0080); + b43_radio_write16(dev, 0x0079, 0x0081); + } + b43_radio_write16(dev, 0x0050, 0x0020); + b43_radio_write16(dev, 0x0050, 0x0023); + if (phy->radio_ver == 0x2050) { + b43_radio_write16(dev, 0x0050, 0x0020); + b43_radio_write16(dev, 0x005A, 0x0070); + b43_radio_write16(dev, 0x005B, 0x007B); + b43_radio_write16(dev, 0x005C, 0x00B0); + b43_radio_write16(dev, 0x007A, 0x000F); + b43_phy_write(dev, 0x0038, 0x0677); + b43_radio_init2050(dev); + } + b43_phy_write(dev, 0x0014, 0x0080); + b43_phy_write(dev, 0x0032, 0x00CA); + if (phy->radio_ver == 0x2050) + b43_phy_write(dev, 0x0032, 0x00E0); + b43_phy_write(dev, 0x0035, 0x07C2); + + b43_lo_b_measure(dev); + + b43_phy_write(dev, 0x0026, 0xCC00); + if (phy->radio_ver == 0x2050) + b43_phy_write(dev, 0x0026, 0xCE00); + b43_write16(dev, B43_MMIO_CHANNEL_EXT, 0x1100); + b43_phy_write(dev, 0x002A, 0x88A3); + if (phy->radio_ver == 0x2050) + b43_phy_write(dev, 0x002A, 0x88C2); + b43_set_txpower_g(dev, &phy->bbatt, &phy->rfatt, phy->tx_control); + if (dev->dev->bus->sprom.r1.boardflags_lo & B43_BFL_RSSI) { + b43_calc_nrssi_slope(dev); + b43_calc_nrssi_threshold(dev); + } + b43_phy_init_pctl(dev); +} + +static void b43_phy_initb5(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct b43_phy *phy = &dev->phy; + u16 offset, value; + u8 old_channel; + + if (phy->analog == 1) { + b43_radio_write16(dev, 0x007A, b43_radio_read16(dev, 0x007A) + | 0x0050); + } + if ((bus->boardinfo.vendor != SSB_BOARDVENDOR_BCM) && + (bus->boardinfo.type != SSB_BOARD_BU4306)) { + value = 0x2120; + for (offset = 0x00A8; offset < 0x00C7; offset++) { + b43_phy_write(dev, offset, value); + value += 0x202; + } + } + b43_phy_write(dev, 0x0035, (b43_phy_read(dev, 0x0035) & 0xF0FF) + | 0x0700); + if (phy->radio_ver == 0x2050) + b43_phy_write(dev, 0x0038, 0x0667); + + if (phy->gmode || phy->rev >= 2) { + if (phy->radio_ver == 0x2050) { + b43_radio_write16(dev, 0x007A, + b43_radio_read16(dev, 0x007A) + | 0x0020); + b43_radio_write16(dev, 0x0051, + b43_radio_read16(dev, 0x0051) + | 0x0004); + } + b43_write16(dev, B43_MMIO_PHY_RADIO, 0x0000); + + b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) | 0x0100); + b43_phy_write(dev, 0x042B, b43_phy_read(dev, 0x042B) | 0x2000); + + b43_phy_write(dev, 0x001C, 0x186A); + + b43_phy_write(dev, 0x0013, + (b43_phy_read(dev, 0x0013) & 0x00FF) | 0x1900); + b43_phy_write(dev, 0x0035, + (b43_phy_read(dev, 0x0035) & 0xFFC0) | 0x0064); + b43_phy_write(dev, 0x005D, + (b43_phy_read(dev, 0x005D) & 0xFF80) | 0x000A); + } + + if (dev->bad_frames_preempt) { + b43_phy_write(dev, B43_PHY_RADIO_BITFIELD, + b43_phy_read(dev, + B43_PHY_RADIO_BITFIELD) | (1 << 11)); + } + + if (phy->analog == 1) { + b43_phy_write(dev, 0x0026, 0xCE00); + b43_phy_write(dev, 0x0021, 0x3763); + b43_phy_write(dev, 0x0022, 0x1BC3); + b43_phy_write(dev, 0x0023, 0x06F9); + b43_phy_write(dev, 0x0024, 0x037E); + } else + b43_phy_write(dev, 0x0026, 0xCC00); + b43_phy_write(dev, 0x0030, 0x00C6); + b43_write16(dev, 0x03EC, 0x3F22); + + if (phy->analog == 1) + b43_phy_write(dev, 0x0020, 0x3E1C); + else + b43_phy_write(dev, 0x0020, 0x301C); + + if (phy->analog == 0) + b43_write16(dev, 0x03E4, 0x3000); + + old_channel = phy->channel; + /* Force to channel 7, even if not supported. */ + b43_radio_selectchannel(dev, 7, 0); + + if (phy->radio_ver != 0x2050) { + b43_radio_write16(dev, 0x0075, 0x0080); + b43_radio_write16(dev, 0x0079, 0x0081); + } + + b43_radio_write16(dev, 0x0050, 0x0020); + b43_radio_write16(dev, 0x0050, 0x0023); + + if (phy->radio_ver == 0x2050) { + b43_radio_write16(dev, 0x0050, 0x0020); + b43_radio_write16(dev, 0x005A, 0x0070); + } + + b43_radio_write16(dev, 0x005B, 0x007B); + b43_radio_write16(dev, 0x005C, 0x00B0); + + b43_radio_write16(dev, 0x007A, b43_radio_read16(dev, 0x007A) | 0x0007); + + b43_radio_selectchannel(dev, old_channel, 0); + + b43_phy_write(dev, 0x0014, 0x0080); + b43_phy_write(dev, 0x0032, 0x00CA); + b43_phy_write(dev, 0x002A, 0x88A3); + + b43_set_txpower_g(dev, &phy->bbatt, &phy->rfatt, phy->tx_control); + + if (phy->radio_ver == 0x2050) + b43_radio_write16(dev, 0x005D, 0x000D); + + b43_write16(dev, 0x03E4, (b43_read16(dev, 0x03E4) & 0xFFC0) | 0x0004); +} + +static void b43_phy_initb6(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 offset, val; + u8 old_channel; + + b43_phy_write(dev, 0x003E, 0x817A); + b43_radio_write16(dev, 0x007A, + (b43_radio_read16(dev, 0x007A) | 0x0058)); + if (phy->radio_rev == 4 || phy->radio_rev == 5) { + b43_radio_write16(dev, 0x51, 0x37); + b43_radio_write16(dev, 0x52, 0x70); + b43_radio_write16(dev, 0x53, 0xB3); + b43_radio_write16(dev, 0x54, 0x9B); + b43_radio_write16(dev, 0x5A, 0x88); + b43_radio_write16(dev, 0x5B, 0x88); + b43_radio_write16(dev, 0x5D, 0x88); + b43_radio_write16(dev, 0x5E, 0x88); + b43_radio_write16(dev, 0x7D, 0x88); + b43_hf_write(dev, b43_hf_read(dev) + | B43_HF_TSSIRPSMW); + } + B43_WARN_ON(phy->radio_rev == 6 || phy->radio_rev == 7); /* We had code for these revs here... */ + if (phy->radio_rev == 8) { + b43_radio_write16(dev, 0x51, 0); + b43_radio_write16(dev, 0x52, 0x40); + b43_radio_write16(dev, 0x53, 0xB7); + b43_radio_write16(dev, 0x54, 0x98); + b43_radio_write16(dev, 0x5A, 0x88); + b43_radio_write16(dev, 0x5B, 0x6B); + b43_radio_write16(dev, 0x5C, 0x0F); + if (dev->dev->bus->sprom.r1.boardflags_lo & B43_BFL_ALTIQ) { + b43_radio_write16(dev, 0x5D, 0xFA); + b43_radio_write16(dev, 0x5E, 0xD8); + } else { + b43_radio_write16(dev, 0x5D, 0xF5); + b43_radio_write16(dev, 0x5E, 0xB8); + } + b43_radio_write16(dev, 0x0073, 0x0003); + b43_radio_write16(dev, 0x007D, 0x00A8); + b43_radio_write16(dev, 0x007C, 0x0001); + b43_radio_write16(dev, 0x007E, 0x0008); + } + val = 0x1E1F; + for (offset = 0x0088; offset < 0x0098; offset++) { + b43_phy_write(dev, offset, val); + val -= 0x0202; + } + val = 0x3E3F; + for (offset = 0x0098; offset < 0x00A8; offset++) { + b43_phy_write(dev, offset, val); + val -= 0x0202; + } + val = 0x2120; + for (offset = 0x00A8; offset < 0x00C8; offset++) { + b43_phy_write(dev, offset, (val & 0x3F3F)); + val += 0x0202; + } + if (phy->type == B43_PHYTYPE_G) { + b43_radio_write16(dev, 0x007A, + b43_radio_read16(dev, 0x007A) | 0x0020); + b43_radio_write16(dev, 0x0051, + b43_radio_read16(dev, 0x0051) | 0x0004); + b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) | 0x0100); + b43_phy_write(dev, 0x042B, b43_phy_read(dev, 0x042B) | 0x2000); + b43_phy_write(dev, 0x5B, 0); + b43_phy_write(dev, 0x5C, 0); + } + + old_channel = phy->channel; + if (old_channel >= 8) + b43_radio_selectchannel(dev, 1, 0); + else + b43_radio_selectchannel(dev, 13, 0); + + b43_radio_write16(dev, 0x0050, 0x0020); + b43_radio_write16(dev, 0x0050, 0x0023); + udelay(40); + if (phy->radio_rev < 6 || phy->radio_rev == 8) { + b43_radio_write16(dev, 0x7C, (b43_radio_read16(dev, 0x7C) + | 0x0002)); + b43_radio_write16(dev, 0x50, 0x20); + } + if (phy->radio_rev <= 2) { + b43_radio_write16(dev, 0x7C, 0x20); + b43_radio_write16(dev, 0x5A, 0x70); + b43_radio_write16(dev, 0x5B, 0x7B); + b43_radio_write16(dev, 0x5C, 0xB0); + } + b43_radio_write16(dev, 0x007A, + (b43_radio_read16(dev, 0x007A) & 0x00F8) | 0x0007); + + b43_radio_selectchannel(dev, old_channel, 0); + + b43_phy_write(dev, 0x0014, 0x0200); + if (phy->radio_rev >= 6) + b43_phy_write(dev, 0x2A, 0x88C2); + else + b43_phy_write(dev, 0x2A, 0x8AC0); + b43_phy_write(dev, 0x0038, 0x0668); + b43_set_txpower_g(dev, &phy->bbatt, &phy->rfatt, phy->tx_control); + if (phy->radio_rev <= 5) { + b43_phy_write(dev, 0x5D, (b43_phy_read(dev, 0x5D) + & 0xFF80) | 0x0003); + } + if (phy->radio_rev <= 2) + b43_radio_write16(dev, 0x005D, 0x000D); + + if (phy->analog == 4) { + b43_write16(dev, 0x3E4, 9); + b43_phy_write(dev, 0x61, b43_phy_read(dev, 0x61) + & 0x0FFF); + } else { + b43_phy_write(dev, 0x0002, (b43_phy_read(dev, 0x0002) & 0xFFC0) + | 0x0004); + } + if (phy->type == B43_PHYTYPE_B) { + b43_write16(dev, 0x03E6, 0x8140); + b43_phy_write(dev, 0x0016, 0x0410); + b43_phy_write(dev, 0x0017, 0x0820); + b43_phy_write(dev, 0x0062, 0x0007); + b43_radio_init2050(dev); + b43_lo_g_measure(dev); + if (dev->dev->bus->sprom.r1.boardflags_lo & B43_BFL_RSSI) { + b43_calc_nrssi_slope(dev); + b43_calc_nrssi_threshold(dev); + } + b43_phy_init_pctl(dev); + } else if (phy->type == B43_PHYTYPE_G) + b43_write16(dev, 0x03E6, 0x0); +} + +static void b43_calc_loopback_gain(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 backup_phy[16] = { 0 }; + u16 backup_radio[3]; + u16 backup_bband; + u16 i, j, loop_i_max; + u16 trsw_rx; + u16 loop1_outer_done, loop1_inner_done; + + backup_phy[0] = b43_phy_read(dev, B43_PHY_CRS0); + backup_phy[1] = b43_phy_read(dev, B43_PHY_CCKBBANDCFG); + backup_phy[2] = b43_phy_read(dev, B43_PHY_RFOVER); + backup_phy[3] = b43_phy_read(dev, B43_PHY_RFOVERVAL); + if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ + backup_phy[4] = b43_phy_read(dev, B43_PHY_ANALOGOVER); + backup_phy[5] = b43_phy_read(dev, B43_PHY_ANALOGOVERVAL); + } + backup_phy[6] = b43_phy_read(dev, B43_PHY_BASE(0x5A)); + backup_phy[7] = b43_phy_read(dev, B43_PHY_BASE(0x59)); + backup_phy[8] = b43_phy_read(dev, B43_PHY_BASE(0x58)); + backup_phy[9] = b43_phy_read(dev, B43_PHY_BASE(0x0A)); + backup_phy[10] = b43_phy_read(dev, B43_PHY_BASE(0x03)); + backup_phy[11] = b43_phy_read(dev, B43_PHY_LO_MASK); + backup_phy[12] = b43_phy_read(dev, B43_PHY_LO_CTL); + backup_phy[13] = b43_phy_read(dev, B43_PHY_BASE(0x2B)); + backup_phy[14] = b43_phy_read(dev, B43_PHY_PGACTL); + backup_phy[15] = b43_phy_read(dev, B43_PHY_LO_LEAKAGE); + backup_bband = phy->bbatt.att; + backup_radio[0] = b43_radio_read16(dev, 0x52); + backup_radio[1] = b43_radio_read16(dev, 0x43); + backup_radio[2] = b43_radio_read16(dev, 0x7A); + + b43_phy_write(dev, B43_PHY_CRS0, + b43_phy_read(dev, B43_PHY_CRS0) & 0x3FFF); + b43_phy_write(dev, B43_PHY_CCKBBANDCFG, + b43_phy_read(dev, B43_PHY_CCKBBANDCFG) | 0x8000); + b43_phy_write(dev, B43_PHY_RFOVER, + b43_phy_read(dev, B43_PHY_RFOVER) | 0x0002); + b43_phy_write(dev, B43_PHY_RFOVERVAL, + b43_phy_read(dev, B43_PHY_RFOVERVAL) & 0xFFFD); + b43_phy_write(dev, B43_PHY_RFOVER, + b43_phy_read(dev, B43_PHY_RFOVER) | 0x0001); + b43_phy_write(dev, B43_PHY_RFOVERVAL, + b43_phy_read(dev, B43_PHY_RFOVERVAL) & 0xFFFE); + if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ + b43_phy_write(dev, B43_PHY_ANALOGOVER, + b43_phy_read(dev, B43_PHY_ANALOGOVER) | 0x0001); + b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, + b43_phy_read(dev, + B43_PHY_ANALOGOVERVAL) & 0xFFFE); + b43_phy_write(dev, B43_PHY_ANALOGOVER, + b43_phy_read(dev, B43_PHY_ANALOGOVER) | 0x0002); + b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, + b43_phy_read(dev, + B43_PHY_ANALOGOVERVAL) & 0xFFFD); + } + b43_phy_write(dev, B43_PHY_RFOVER, + b43_phy_read(dev, B43_PHY_RFOVER) | 0x000C); + b43_phy_write(dev, B43_PHY_RFOVERVAL, + b43_phy_read(dev, B43_PHY_RFOVERVAL) | 0x000C); + b43_phy_write(dev, B43_PHY_RFOVER, + b43_phy_read(dev, B43_PHY_RFOVER) | 0x0030); + b43_phy_write(dev, B43_PHY_RFOVERVAL, + (b43_phy_read(dev, B43_PHY_RFOVERVAL) + & 0xFFCF) | 0x10); + + b43_phy_write(dev, B43_PHY_BASE(0x5A), 0x0780); + b43_phy_write(dev, B43_PHY_BASE(0x59), 0xC810); + b43_phy_write(dev, B43_PHY_BASE(0x58), 0x000D); + + b43_phy_write(dev, B43_PHY_BASE(0x0A), + b43_phy_read(dev, B43_PHY_BASE(0x0A)) | 0x2000); + if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ + b43_phy_write(dev, B43_PHY_ANALOGOVER, + b43_phy_read(dev, B43_PHY_ANALOGOVER) | 0x0004); + b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, + b43_phy_read(dev, + B43_PHY_ANALOGOVERVAL) & 0xFFFB); + } + b43_phy_write(dev, B43_PHY_BASE(0x03), + (b43_phy_read(dev, B43_PHY_BASE(0x03)) + & 0xFF9F) | 0x40); + + if (phy->radio_rev == 8) { + b43_radio_write16(dev, 0x43, 0x000F); + } else { + b43_radio_write16(dev, 0x52, 0); + b43_radio_write16(dev, 0x43, (b43_radio_read16(dev, 0x43) + & 0xFFF0) | 0x9); + } + b43_phy_set_baseband_attenuation(dev, 11); + + if (phy->rev >= 3) + b43_phy_write(dev, B43_PHY_LO_MASK, 0xC020); + else + b43_phy_write(dev, B43_PHY_LO_MASK, 0x8020); + b43_phy_write(dev, B43_PHY_LO_CTL, 0); + + b43_phy_write(dev, B43_PHY_BASE(0x2B), + (b43_phy_read(dev, B43_PHY_BASE(0x2B)) + & 0xFFC0) | 0x01); + b43_phy_write(dev, B43_PHY_BASE(0x2B), + (b43_phy_read(dev, B43_PHY_BASE(0x2B)) + & 0xC0FF) | 0x800); + + b43_phy_write(dev, B43_PHY_RFOVER, + b43_phy_read(dev, B43_PHY_RFOVER) | 0x0100); + b43_phy_write(dev, B43_PHY_RFOVERVAL, + b43_phy_read(dev, B43_PHY_RFOVERVAL) & 0xCFFF); + + if (dev->dev->bus->sprom.r1.boardflags_lo & B43_BFL_EXTLNA) { + if (phy->rev >= 7) { + b43_phy_write(dev, B43_PHY_RFOVER, + b43_phy_read(dev, B43_PHY_RFOVER) + | 0x0800); + b43_phy_write(dev, B43_PHY_RFOVERVAL, + b43_phy_read(dev, B43_PHY_RFOVERVAL) + | 0x8000); + } + } + b43_radio_write16(dev, 0x7A, b43_radio_read16(dev, 0x7A) + & 0x00F7); + + j = 0; + loop_i_max = (phy->radio_rev == 8) ? 15 : 9; + for (i = 0; i < loop_i_max; i++) { + for (j = 0; j < 16; j++) { + b43_radio_write16(dev, 0x43, i); + b43_phy_write(dev, B43_PHY_RFOVERVAL, + (b43_phy_read(dev, B43_PHY_RFOVERVAL) + & 0xF0FF) | (j << 8)); + b43_phy_write(dev, B43_PHY_PGACTL, + (b43_phy_read(dev, B43_PHY_PGACTL) + & 0x0FFF) | 0xA000); + b43_phy_write(dev, B43_PHY_PGACTL, + b43_phy_read(dev, B43_PHY_PGACTL) + | 0xF000); + udelay(20); + if (b43_phy_read(dev, B43_PHY_LO_LEAKAGE) >= 0xDFC) + goto exit_loop1; + } + } + exit_loop1: + loop1_outer_done = i; + loop1_inner_done = j; + if (j >= 8) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + b43_phy_read(dev, B43_PHY_RFOVERVAL) + | 0x30); + trsw_rx = 0x1B; + for (j = j - 8; j < 16; j++) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + (b43_phy_read(dev, B43_PHY_RFOVERVAL) + & 0xF0FF) | (j << 8)); + b43_phy_write(dev, B43_PHY_PGACTL, + (b43_phy_read(dev, B43_PHY_PGACTL) + & 0x0FFF) | 0xA000); + b43_phy_write(dev, B43_PHY_PGACTL, + b43_phy_read(dev, B43_PHY_PGACTL) + | 0xF000); + udelay(20); + trsw_rx -= 3; + if (b43_phy_read(dev, B43_PHY_LO_LEAKAGE) >= 0xDFC) + goto exit_loop2; + } + } else + trsw_rx = 0x18; + exit_loop2: + + if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ + b43_phy_write(dev, B43_PHY_ANALOGOVER, backup_phy[4]); + b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, backup_phy[5]); + } + b43_phy_write(dev, B43_PHY_BASE(0x5A), backup_phy[6]); + b43_phy_write(dev, B43_PHY_BASE(0x59), backup_phy[7]); + b43_phy_write(dev, B43_PHY_BASE(0x58), backup_phy[8]); + b43_phy_write(dev, B43_PHY_BASE(0x0A), backup_phy[9]); + b43_phy_write(dev, B43_PHY_BASE(0x03), backup_phy[10]); + b43_phy_write(dev, B43_PHY_LO_MASK, backup_phy[11]); + b43_phy_write(dev, B43_PHY_LO_CTL, backup_phy[12]); + b43_phy_write(dev, B43_PHY_BASE(0x2B), backup_phy[13]); + b43_phy_write(dev, B43_PHY_PGACTL, backup_phy[14]); + + b43_phy_set_baseband_attenuation(dev, backup_bband); + + b43_radio_write16(dev, 0x52, backup_radio[0]); + b43_radio_write16(dev, 0x43, backup_radio[1]); + b43_radio_write16(dev, 0x7A, backup_radio[2]); + + b43_phy_write(dev, B43_PHY_RFOVER, backup_phy[2] | 0x0003); + udelay(10); + b43_phy_write(dev, B43_PHY_RFOVER, backup_phy[2]); + b43_phy_write(dev, B43_PHY_RFOVERVAL, backup_phy[3]); + b43_phy_write(dev, B43_PHY_CRS0, backup_phy[0]); + b43_phy_write(dev, B43_PHY_CCKBBANDCFG, backup_phy[1]); + + phy->max_lb_gain = + ((loop1_inner_done * 6) - (loop1_outer_done * 4)) - 11; + phy->trsw_rx_gain = trsw_rx * 2; +} + +static void b43_phy_initg(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 tmp; + + if (phy->rev == 1) + b43_phy_initb5(dev); + else + b43_phy_initb6(dev); + + if (phy->rev >= 2 || phy->gmode) + b43_phy_inita(dev); + + if (phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_ANALOGOVER, 0); + b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, 0); + } + if (phy->rev == 2) { + b43_phy_write(dev, B43_PHY_RFOVER, 0); + b43_phy_write(dev, B43_PHY_PGACTL, 0xC0); + } + if (phy->rev > 5) { + b43_phy_write(dev, B43_PHY_RFOVER, 0x400); + b43_phy_write(dev, B43_PHY_PGACTL, 0xC0); + } + if (phy->gmode || phy->rev >= 2) { + tmp = b43_phy_read(dev, B43_PHY_VERSION_OFDM); + tmp &= B43_PHYVER_VERSION; + if (tmp == 3 || tmp == 5) { + b43_phy_write(dev, B43_PHY_OFDM(0xC2), 0x1816); + b43_phy_write(dev, B43_PHY_OFDM(0xC3), 0x8006); + } + if (tmp == 5) { + b43_phy_write(dev, B43_PHY_OFDM(0xCC), + (b43_phy_read(dev, B43_PHY_OFDM(0xCC)) + & 0x00FF) | 0x1F00); + } + } + if ((phy->rev <= 2 && phy->gmode) || phy->rev >= 2) + b43_phy_write(dev, B43_PHY_OFDM(0x7E), 0x78); + if (phy->radio_rev == 8) { + b43_phy_write(dev, B43_PHY_EXTG(0x01), + b43_phy_read(dev, B43_PHY_EXTG(0x01)) + | 0x80); + b43_phy_write(dev, B43_PHY_OFDM(0x3E), + b43_phy_read(dev, B43_PHY_OFDM(0x3E)) + | 0x4); + } + if (has_loopback_gain(phy)) + b43_calc_loopback_gain(dev); + + if (phy->radio_rev != 8) { + if (phy->initval == 0xFFFF) + phy->initval = b43_radio_init2050(dev); + else + b43_radio_write16(dev, 0x0078, phy->initval); + } + if (phy->lo_control->tx_bias == 0xFF) { + b43_lo_g_measure(dev); + } else { + if (has_tx_magnification(phy)) { + b43_radio_write16(dev, 0x52, + (b43_radio_read16(dev, 0x52) & 0xFF00) + | phy->lo_control->tx_bias | phy-> + lo_control->tx_magn); + } else { + b43_radio_write16(dev, 0x52, + (b43_radio_read16(dev, 0x52) & 0xFFF0) + | phy->lo_control->tx_bias); + } + if (phy->rev >= 6) { + b43_phy_write(dev, B43_PHY_BASE(0x36), + (b43_phy_read(dev, B43_PHY_BASE(0x36)) + & 0x0FFF) | (phy->lo_control-> + tx_bias << 12)); + } + if (dev->dev->bus->sprom.r1.boardflags_lo & B43_BFL_PACTRL) + b43_phy_write(dev, B43_PHY_BASE(0x2E), 0x8075); + else + b43_phy_write(dev, B43_PHY_BASE(0x2E), 0x807F); + if (phy->rev < 2) + b43_phy_write(dev, B43_PHY_BASE(0x2F), 0x101); + else + b43_phy_write(dev, B43_PHY_BASE(0x2F), 0x202); + } + if (phy->gmode || phy->rev >= 2) { + b43_lo_g_adjust(dev); + b43_phy_write(dev, B43_PHY_LO_MASK, 0x8078); + } + + if (!(dev->dev->bus->sprom.r1.boardflags_lo & B43_BFL_RSSI)) { + /* The specs state to update the NRSSI LT with + * the value 0x7FFFFFFF here. I think that is some weird + * compiler optimization in the original driver. + * Essentially, what we do here is resetting all NRSSI LT + * entries to -32 (see the limit_value() in nrssi_hw_update()) + */ + b43_nrssi_hw_update(dev, 0xFFFF); //FIXME? + b43_calc_nrssi_threshold(dev); + } else if (phy->gmode || phy->rev >= 2) { + if (phy->nrssi[0] == -1000) { + B43_WARN_ON(phy->nrssi[1] != -1000); + b43_calc_nrssi_slope(dev); + } else + b43_calc_nrssi_threshold(dev); + } + if (phy->radio_rev == 8) + b43_phy_write(dev, B43_PHY_EXTG(0x05), 0x3230); + b43_phy_init_pctl(dev); + /* FIXME: The spec says in the following if, the 0 should be replaced + 'if OFDM may not be used in the current locale' + but OFDM is legal everywhere */ + if ((dev->dev->bus->chip_id == 0x4306 + && dev->dev->bus->chip_package == 2) || 0) { + b43_phy_write(dev, B43_PHY_CRS0, b43_phy_read(dev, B43_PHY_CRS0) + & 0xBFFF); + b43_phy_write(dev, B43_PHY_OFDM(0xC3), + b43_phy_read(dev, B43_PHY_OFDM(0xC3)) + & 0x7FFF); + } +} + +/* Set the baseband attenuation value on chip. */ +void b43_phy_set_baseband_attenuation(struct b43_wldev *dev, + u16 baseband_attenuation) +{ + struct b43_phy *phy = &dev->phy; + + if (phy->analog == 0) { + b43_write16(dev, B43_MMIO_PHY0, (b43_read16(dev, B43_MMIO_PHY0) + & 0xFFF0) | + baseband_attenuation); + } else if (phy->analog > 1) { + b43_phy_write(dev, B43_PHY_DACCTL, + (b43_phy_read(dev, B43_PHY_DACCTL) + & 0xFFC3) | (baseband_attenuation << 2)); + } else { + b43_phy_write(dev, B43_PHY_DACCTL, + (b43_phy_read(dev, B43_PHY_DACCTL) + & 0xFF87) | (baseband_attenuation << 3)); + } +} + +/* http://bcm-specs.sipsolutions.net/EstimatePowerOut + * This function converts a TSSI value to dBm in Q5.2 + */ +static s8 b43_phy_estimate_power_out(struct b43_wldev *dev, s8 tssi) +{ + struct b43_phy *phy = &dev->phy; + s8 dbm = 0; + s32 tmp; + + tmp = (phy->tgt_idle_tssi - phy->cur_idle_tssi + tssi); + + switch (phy->type) { + case B43_PHYTYPE_A: + tmp += 0x80; + tmp = limit_value(tmp, 0x00, 0xFF); + dbm = phy->tssi2dbm[tmp]; + //TODO: There's a FIXME on the specs + break; + case B43_PHYTYPE_B: + case B43_PHYTYPE_G: + tmp = limit_value(tmp, 0x00, 0x3F); + dbm = phy->tssi2dbm[tmp]; + break; + default: + B43_WARN_ON(1); + } + + return dbm; +} + +void b43_put_attenuation_into_ranges(struct b43_wldev *dev, + int *_bbatt, int *_rfatt) +{ + int rfatt = *_rfatt; + int bbatt = *_bbatt; + struct b43_txpower_lo_control *lo = dev->phy.lo_control; + + /* Get baseband and radio attenuation values into their permitted ranges. + * Radio attenuation affects power level 4 times as much as baseband. */ + + /* Range constants */ + const int rf_min = lo->rfatt_list.min_val; + const int rf_max = lo->rfatt_list.max_val; + const int bb_min = lo->bbatt_list.min_val; + const int bb_max = lo->bbatt_list.max_val; + + while (1) { + if (rfatt > rf_max && bbatt > bb_max - 4) + break; /* Can not get it into ranges */ + if (rfatt < rf_min && bbatt < bb_min + 4) + break; /* Can not get it into ranges */ + if (bbatt > bb_max && rfatt > rf_max - 1) + break; /* Can not get it into ranges */ + if (bbatt < bb_min && rfatt < rf_min + 1) + break; /* Can not get it into ranges */ + + if (bbatt > bb_max) { + bbatt -= 4; + rfatt += 1; + continue; + } + if (bbatt < bb_min) { + bbatt += 4; + rfatt -= 1; + continue; + } + if (rfatt > rf_max) { + rfatt -= 1; + bbatt += 4; + continue; + } + if (rfatt < rf_min) { + rfatt += 1; + bbatt -= 4; + continue; + } + break; + } + + *_rfatt = limit_value(rfatt, rf_min, rf_max); + *_bbatt = limit_value(bbatt, bb_min, bb_max); +} + +/* http://bcm-specs.sipsolutions.net/RecalculateTransmissionPower */ +void b43_phy_xmitpower(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct b43_phy *phy = &dev->phy; + + if (phy->cur_idle_tssi == 0) + return; + if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) && + (bus->boardinfo.type == SSB_BOARD_BU4306)) + return; +#ifdef CONFIG_B43_DEBUG + if (phy->manual_txpower_control) + return; +#endif + + switch (phy->type) { + case B43_PHYTYPE_A:{ + + //TODO: Nothing for A PHYs yet :-/ + + break; + } + case B43_PHYTYPE_B: + case B43_PHYTYPE_G:{ + u16 tmp; + s8 v0, v1, v2, v3; + s8 average; + int max_pwr; + int desired_pwr, estimated_pwr, pwr_adjust; + int rfatt_delta, bbatt_delta; + int rfatt, bbatt; + u8 tx_control; + unsigned long phylock_flags; + + tmp = b43_shm_read16(dev, B43_SHM_SHARED, 0x0058); + v0 = (s8) (tmp & 0x00FF); + v1 = (s8) ((tmp & 0xFF00) >> 8); + tmp = b43_shm_read16(dev, B43_SHM_SHARED, 0x005A); + v2 = (s8) (tmp & 0x00FF); + v3 = (s8) ((tmp & 0xFF00) >> 8); + tmp = 0; + + if (v0 == 0x7F || v1 == 0x7F || v2 == 0x7F + || v3 == 0x7F) { + tmp = + b43_shm_read16(dev, B43_SHM_SHARED, 0x0070); + v0 = (s8) (tmp & 0x00FF); + v1 = (s8) ((tmp & 0xFF00) >> 8); + tmp = + b43_shm_read16(dev, B43_SHM_SHARED, 0x0072); + v2 = (s8) (tmp & 0x00FF); + v3 = (s8) ((tmp & 0xFF00) >> 8); + if (v0 == 0x7F || v1 == 0x7F || v2 == 0x7F + || v3 == 0x7F) + return; + v0 = (v0 + 0x20) & 0x3F; + v1 = (v1 + 0x20) & 0x3F; + v2 = (v2 + 0x20) & 0x3F; + v3 = (v3 + 0x20) & 0x3F; + tmp = 1; + } + b43_shm_clear_tssi(dev); + + average = (v0 + v1 + v2 + v3 + 2) / 4; + + if (tmp + && (b43_shm_read16(dev, B43_SHM_SHARED, 0x005E) & + 0x8)) + average -= 13; + + estimated_pwr = + b43_phy_estimate_power_out(dev, average); + + max_pwr = dev->dev->bus->sprom.r1.maxpwr_bg; + if ((dev->dev->bus->sprom.r1. + boardflags_lo & B43_BFL_PACTRL) + && (phy->type == B43_PHYTYPE_G)) + max_pwr -= 0x3; + if (unlikely(max_pwr <= 0)) { + b43warn(dev->wl, + "Invalid max-TX-power value in SPROM.\n"); + max_pwr = 60; /* fake it */ + dev->dev->bus->sprom.r1.maxpwr_bg = max_pwr; + } + + /*TODO: + max_pwr = min(REG - dev->dev->bus->sprom.antennagain_bgphy - 0x6, max_pwr) + where REG is the max power as per the regulatory domain + */ + + /* Get desired power (in Q5.2) */ + desired_pwr = INT_TO_Q52(phy->power_level); + /* And limit it. max_pwr already is Q5.2 */ + desired_pwr = limit_value(desired_pwr, 0, max_pwr); + if (b43_debug(dev, B43_DBG_XMITPOWER)) { + b43dbg(dev->wl, + "Current TX power output: " Q52_FMT + " dBm, " "Desired TX power output: " + Q52_FMT " dBm\n", Q52_ARG(estimated_pwr), + Q52_ARG(desired_pwr)); + } + + /* Calculate the adjustment delta. */ + pwr_adjust = desired_pwr - estimated_pwr; + + /* RF attenuation delta. */ + rfatt_delta = ((pwr_adjust + 7) / 8); + /* Lower attenuation => Bigger power output. Negate it. */ + rfatt_delta = -rfatt_delta; + + /* Baseband attenuation delta. */ + bbatt_delta = pwr_adjust / 2; + /* Lower attenuation => Bigger power output. Negate it. */ + bbatt_delta = -bbatt_delta; + /* RF att affects power level 4 times as much as + * Baseband attennuation. Subtract it. */ + bbatt_delta -= 4 * rfatt_delta; + + /* So do we finally need to adjust something? */ + if ((rfatt_delta == 0) && (bbatt_delta == 0)) { + b43_lo_g_ctl_mark_cur_used(dev); + return; + } + + /* Calculate the new attenuation values. */ + bbatt = phy->bbatt.att; + bbatt += bbatt_delta; + rfatt = phy->rfatt.att; + rfatt += rfatt_delta; + + b43_put_attenuation_into_ranges(dev, &bbatt, &rfatt); + tx_control = phy->tx_control; + if ((phy->radio_ver == 0x2050) && (phy->radio_rev == 2)) { + if (rfatt <= 1) { + if (tx_control == 0) { + tx_control = + B43_TXCTL_PA2DB | + B43_TXCTL_TXMIX; + rfatt += 2; + bbatt += 2; + } else if (dev->dev->bus->sprom.r1. + boardflags_lo & + B43_BFL_PACTRL) { + bbatt += 4 * (rfatt - 2); + rfatt = 2; + } + } else if (rfatt > 4 && tx_control) { + tx_control = 0; + if (bbatt < 3) { + rfatt -= 3; + bbatt += 2; + } else { + rfatt -= 2; + bbatt -= 2; + } + } + } + /* Save the control values */ + phy->tx_control = tx_control; + b43_put_attenuation_into_ranges(dev, &bbatt, &rfatt); + phy->rfatt.att = rfatt; + phy->bbatt.att = bbatt; + + /* Adjust the hardware */ + b43_phy_lock(dev, phylock_flags); + b43_radio_lock(dev); + b43_set_txpower_g(dev, &phy->bbatt, &phy->rfatt, + phy->tx_control); + b43_lo_g_ctl_mark_cur_used(dev); + b43_radio_unlock(dev); + b43_phy_unlock(dev, phylock_flags); + break; + } + default: + B43_WARN_ON(1); + } +} + +static inline s32 b43_tssi2dbm_ad(s32 num, s32 den) +{ + if (num < 0) + return num / den; + else + return (num + den / 2) / den; +} + +static inline + s8 b43_tssi2dbm_entry(s8 entry[], u8 index, s16 pab0, s16 pab1, s16 pab2) +{ + s32 m1, m2, f = 256, q, delta; + s8 i = 0; + + m1 = b43_tssi2dbm_ad(16 * pab0 + index * pab1, 32); + m2 = max(b43_tssi2dbm_ad(32768 + index * pab2, 256), 1); + do { + if (i > 15) + return -EINVAL; + q = b43_tssi2dbm_ad(f * 4096 - + b43_tssi2dbm_ad(m2 * f, 16) * f, 2048); + delta = abs(q - f); + f = q; + i++; + } while (delta >= 2); + entry[index] = limit_value(b43_tssi2dbm_ad(m1 * f, 8192), -127, 128); + return 0; +} + +/* http://bcm-specs.sipsolutions.net/TSSI_to_DBM_Table */ +int b43_phy_init_tssi2dbm_table(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + s16 pab0, pab1, pab2; + u8 idx; + s8 *dyn_tssi2dbm; + + if (phy->type == B43_PHYTYPE_A) { + pab0 = (s16) (dev->dev->bus->sprom.r1.pa1b0); + pab1 = (s16) (dev->dev->bus->sprom.r1.pa1b1); + pab2 = (s16) (dev->dev->bus->sprom.r1.pa1b2); + } else { + pab0 = (s16) (dev->dev->bus->sprom.r1.pa0b0); + pab1 = (s16) (dev->dev->bus->sprom.r1.pa0b1); + pab2 = (s16) (dev->dev->bus->sprom.r1.pa0b2); + } + + if ((dev->dev->bus->chip_id == 0x4301) && (phy->radio_ver != 0x2050)) { + phy->tgt_idle_tssi = 0x34; + phy->tssi2dbm = b43_tssi2dbm_b_table; + return 0; + } + + if (pab0 != 0 && pab1 != 0 && pab2 != 0 && + pab0 != -1 && pab1 != -1 && pab2 != -1) { + /* The pabX values are set in SPROM. Use them. */ + if (phy->type == B43_PHYTYPE_A) { + if ((s8) dev->dev->bus->sprom.r1.itssi_a != 0 && + (s8) dev->dev->bus->sprom.r1.itssi_a != -1) + phy->tgt_idle_tssi = + (s8) (dev->dev->bus->sprom.r1.itssi_a); + else + phy->tgt_idle_tssi = 62; + } else { + if ((s8) dev->dev->bus->sprom.r1.itssi_bg != 0 && + (s8) dev->dev->bus->sprom.r1.itssi_bg != -1) + phy->tgt_idle_tssi = + (s8) (dev->dev->bus->sprom.r1.itssi_bg); + else + phy->tgt_idle_tssi = 62; + } + dyn_tssi2dbm = kmalloc(64, GFP_KERNEL); + if (dyn_tssi2dbm == NULL) { + b43err(dev->wl, "Could not allocate memory" + "for tssi2dbm table\n"); + return -ENOMEM; + } + for (idx = 0; idx < 64; idx++) + if (b43_tssi2dbm_entry + (dyn_tssi2dbm, idx, pab0, pab1, pab2)) { + phy->tssi2dbm = NULL; + b43err(dev->wl, "Could not generate " + "tssi2dBm table\n"); + kfree(dyn_tssi2dbm); + return -ENODEV; + } + phy->tssi2dbm = dyn_tssi2dbm; + phy->dyn_tssi_tbl = 1; + } else { + /* pabX values not set in SPROM. */ + switch (phy->type) { + case B43_PHYTYPE_A: + /* APHY needs a generated table. */ + phy->tssi2dbm = NULL; + b43err(dev->wl, "Could not generate tssi2dBm " + "table (wrong SPROM info)!\n"); + return -ENODEV; + case B43_PHYTYPE_B: + phy->tgt_idle_tssi = 0x34; + phy->tssi2dbm = b43_tssi2dbm_b_table; + break; + case B43_PHYTYPE_G: + phy->tgt_idle_tssi = 0x34; + phy->tssi2dbm = b43_tssi2dbm_g_table; + break; + } + } + + return 0; +} + +int b43_phy_init(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + int err = -ENODEV; + + switch (phy->type) { + case B43_PHYTYPE_A: + if (phy->rev == 2 || phy->rev == 3) { + b43_phy_inita(dev); + err = 0; + } + break; + case B43_PHYTYPE_B: + switch (phy->rev) { + case 2: + b43_phy_initb2(dev); + err = 0; + break; + case 4: + b43_phy_initb4(dev); + err = 0; + break; + case 5: + b43_phy_initb5(dev); + err = 0; + break; + case 6: + b43_phy_initb6(dev); + err = 0; + break; + } + break; + case B43_PHYTYPE_G: + b43_phy_initg(dev); + err = 0; + break; + } + if (err) + b43err(dev->wl, "Unknown PHYTYPE found\n"); + + return err; +} + +void b43_set_rx_antenna(struct b43_wldev *dev, int antenna) +{ + struct b43_phy *phy = &dev->phy; + u32 hf; + u16 tmp; + int autodiv = 0; + + if (antenna == B43_ANTENNA_AUTO0 || antenna == B43_ANTENNA_AUTO1) + autodiv = 1; + + hf = b43_hf_read(dev); + hf &= ~B43_HF_ANTDIVHELP; + b43_hf_write(dev, hf); + + switch (phy->type) { + case B43_PHYTYPE_A: + case B43_PHYTYPE_G: + tmp = b43_phy_read(dev, B43_PHY_BBANDCFG); + tmp &= ~B43_PHY_BBANDCFG_RXANT; + tmp |= (autodiv ? B43_ANTENNA_AUTO0 : antenna) + << B43_PHY_BBANDCFG_RXANT_SHIFT; + b43_phy_write(dev, B43_PHY_BBANDCFG, tmp); + + if (autodiv) { + tmp = b43_phy_read(dev, B43_PHY_ANTDWELL); + if (antenna == B43_ANTENNA_AUTO0) + tmp &= ~B43_PHY_ANTDWELL_AUTODIV1; + else + tmp |= B43_PHY_ANTDWELL_AUTODIV1; + b43_phy_write(dev, B43_PHY_ANTDWELL, tmp); + } + if (phy->type == B43_PHYTYPE_G) { + tmp = b43_phy_read(dev, B43_PHY_ANTWRSETT); + if (autodiv) + tmp |= B43_PHY_ANTWRSETT_ARXDIV; + else + tmp &= ~B43_PHY_ANTWRSETT_ARXDIV; + b43_phy_write(dev, B43_PHY_ANTWRSETT, tmp); + if (phy->rev >= 2) { + tmp = b43_phy_read(dev, B43_PHY_OFDM61); + tmp |= B43_PHY_OFDM61_10; + b43_phy_write(dev, B43_PHY_OFDM61, tmp); + + tmp = + b43_phy_read(dev, B43_PHY_DIVSRCHGAINBACK); + tmp = (tmp & 0xFF00) | 0x15; + b43_phy_write(dev, B43_PHY_DIVSRCHGAINBACK, + tmp); + + if (phy->rev == 2) { + b43_phy_write(dev, B43_PHY_ADIVRELATED, + 8); + } else { + tmp = + b43_phy_read(dev, + B43_PHY_ADIVRELATED); + tmp = (tmp & 0xFF00) | 8; + b43_phy_write(dev, B43_PHY_ADIVRELATED, + tmp); + } + } + if (phy->rev >= 6) + b43_phy_write(dev, B43_PHY_OFDM9B, 0xDC); + } else { + if (phy->rev < 3) { + tmp = b43_phy_read(dev, B43_PHY_ANTDWELL); + tmp = (tmp & 0xFF00) | 0x24; + b43_phy_write(dev, B43_PHY_ANTDWELL, tmp); + } else { + tmp = b43_phy_read(dev, B43_PHY_OFDM61); + tmp |= 0x10; + b43_phy_write(dev, B43_PHY_OFDM61, tmp); + if (phy->analog == 3) { + b43_phy_write(dev, B43_PHY_CLIPPWRDOWNT, + 0x1D); + b43_phy_write(dev, B43_PHY_ADIVRELATED, + 8); + } else { + b43_phy_write(dev, B43_PHY_CLIPPWRDOWNT, + 0x3A); + tmp = + b43_phy_read(dev, + B43_PHY_ADIVRELATED); + tmp = (tmp & 0xFF00) | 8; + b43_phy_write(dev, B43_PHY_ADIVRELATED, + tmp); + } + } + } + break; + case B43_PHYTYPE_B: + tmp = b43_phy_read(dev, B43_PHY_CCKBBANDCFG); + tmp &= ~B43_PHY_BBANDCFG_RXANT; + tmp |= (autodiv ? B43_ANTENNA_AUTO0 : antenna) + << B43_PHY_BBANDCFG_RXANT_SHIFT; + b43_phy_write(dev, B43_PHY_CCKBBANDCFG, tmp); + break; + default: + B43_WARN_ON(1); + } + + hf |= B43_HF_ANTDIVHELP; + b43_hf_write(dev, hf); +} + +/* Get the freq, as it has to be written to the device. */ +static inline u16 channel2freq_bg(u8 channel) +{ + B43_WARN_ON(!(channel >= 1 && channel <= 14)); + + return b43_radio_channel_codes_bg[channel - 1]; +} + +/* Get the freq, as it has to be written to the device. */ +static inline u16 channel2freq_a(u8 channel) +{ + B43_WARN_ON(channel > 200); + + return (5000 + 5 * channel); +} + +void b43_radio_lock(struct b43_wldev *dev) +{ + u32 macctl; + + macctl = b43_read32(dev, B43_MMIO_MACCTL); + macctl |= B43_MACCTL_RADIOLOCK; + b43_write32(dev, B43_MMIO_MACCTL, macctl); + /* Commit the write and wait for the device + * to exit any radio register access. */ + b43_read32(dev, B43_MMIO_MACCTL); + udelay(10); +} + +void b43_radio_unlock(struct b43_wldev *dev) +{ + u32 macctl; + + /* Commit any write */ + b43_read16(dev, B43_MMIO_PHY_VER); + /* unlock */ + macctl = b43_read32(dev, B43_MMIO_MACCTL); + macctl &= ~B43_MACCTL_RADIOLOCK; + b43_write32(dev, B43_MMIO_MACCTL, macctl); +} + +u16 b43_radio_read16(struct b43_wldev *dev, u16 offset) +{ + struct b43_phy *phy = &dev->phy; + + switch (phy->type) { + case B43_PHYTYPE_A: + offset |= 0x0040; + break; + case B43_PHYTYPE_B: + if (phy->radio_ver == 0x2053) { + if (offset < 0x70) + offset += 0x80; + else if (offset < 0x80) + offset += 0x70; + } else if (phy->radio_ver == 0x2050) { + offset |= 0x80; + } else + B43_WARN_ON(1); + break; + case B43_PHYTYPE_G: + offset |= 0x80; + break; + } + + b43_write16(dev, B43_MMIO_RADIO_CONTROL, offset); + return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); +} + +void b43_radio_write16(struct b43_wldev *dev, u16 offset, u16 val) +{ + b43_write16(dev, B43_MMIO_RADIO_CONTROL, offset); + mmiowb(); + b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, val); +} + +static void b43_set_all_gains(struct b43_wldev *dev, + s16 first, s16 second, s16 third) +{ + struct b43_phy *phy = &dev->phy; + u16 i; + u16 start = 0x08, end = 0x18; + u16 tmp; + u16 table; + + if (phy->rev <= 1) { + start = 0x10; + end = 0x20; + } + + table = B43_OFDMTAB_GAINX; + if (phy->rev <= 1) + table = B43_OFDMTAB_GAINX_R1; + for (i = 0; i < 4; i++) + b43_ofdmtab_write16(dev, table, i, first); + + for (i = start; i < end; i++) + b43_ofdmtab_write16(dev, table, i, second); + + if (third != -1) { + tmp = ((u16) third << 14) | ((u16) third << 6); + b43_phy_write(dev, 0x04A0, + (b43_phy_read(dev, 0x04A0) & 0xBFBF) | tmp); + b43_phy_write(dev, 0x04A1, + (b43_phy_read(dev, 0x04A1) & 0xBFBF) | tmp); + b43_phy_write(dev, 0x04A2, + (b43_phy_read(dev, 0x04A2) & 0xBFBF) | tmp); + } + b43_dummy_transmission(dev); +} + +static void b43_set_original_gains(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 i, tmp; + u16 table; + u16 start = 0x0008, end = 0x0018; + + if (phy->rev <= 1) { + start = 0x0010; + end = 0x0020; + } + + table = B43_OFDMTAB_GAINX; + if (phy->rev <= 1) + table = B43_OFDMTAB_GAINX_R1; + for (i = 0; i < 4; i++) { + tmp = (i & 0xFFFC); + tmp |= (i & 0x0001) << 1; + tmp |= (i & 0x0002) >> 1; + + b43_ofdmtab_write16(dev, table, i, tmp); + } + + for (i = start; i < end; i++) + b43_ofdmtab_write16(dev, table, i, i - start); + + b43_phy_write(dev, 0x04A0, + (b43_phy_read(dev, 0x04A0) & 0xBFBF) | 0x4040); + b43_phy_write(dev, 0x04A1, + (b43_phy_read(dev, 0x04A1) & 0xBFBF) | 0x4040); + b43_phy_write(dev, 0x04A2, + (b43_phy_read(dev, 0x04A2) & 0xBFBF) | 0x4000); + b43_dummy_transmission(dev); +} + +/* Synthetic PU workaround */ +static void b43_synth_pu_workaround(struct b43_wldev *dev, u8 channel) +{ + struct b43_phy *phy = &dev->phy; + + might_sleep(); + + if (phy->radio_ver != 0x2050 || phy->radio_rev >= 6) { + /* We do not need the workaround. */ + return; + } + + if (channel <= 10) { + b43_write16(dev, B43_MMIO_CHANNEL, + channel2freq_bg(channel + 4)); + } else { + b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(1)); + } + msleep(1); + b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(channel)); +} + +u8 b43_radio_aci_detect(struct b43_wldev *dev, u8 channel) +{ + struct b43_phy *phy = &dev->phy; + u8 ret = 0; + u16 saved, rssi, temp; + int i, j = 0; + + saved = b43_phy_read(dev, 0x0403); + b43_radio_selectchannel(dev, channel, 0); + b43_phy_write(dev, 0x0403, (saved & 0xFFF8) | 5); + if (phy->aci_hw_rssi) + rssi = b43_phy_read(dev, 0x048A) & 0x3F; + else + rssi = saved & 0x3F; + /* clamp temp to signed 5bit */ + if (rssi > 32) + rssi -= 64; + for (i = 0; i < 100; i++) { + temp = (b43_phy_read(dev, 0x047F) >> 8) & 0x3F; + if (temp > 32) + temp -= 64; + if (temp < rssi) + j++; + if (j >= 20) + ret = 1; + } + b43_phy_write(dev, 0x0403, saved); + + return ret; +} + +u8 b43_radio_aci_scan(struct b43_wldev * dev) +{ + struct b43_phy *phy = &dev->phy; + u8 ret[13]; + unsigned int channel = phy->channel; + unsigned int i, j, start, end; + unsigned long phylock_flags; + + if (!((phy->type == B43_PHYTYPE_G) && (phy->rev > 0))) + return 0; + + b43_phy_lock(dev, phylock_flags); + b43_radio_lock(dev); + b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) & 0xFFFC); + b43_phy_write(dev, B43_PHY_G_CRS, + b43_phy_read(dev, B43_PHY_G_CRS) & 0x7FFF); + b43_set_all_gains(dev, 3, 8, 1); + + start = (channel - 5 > 0) ? channel - 5 : 1; + end = (channel + 5 < 14) ? channel + 5 : 13; + + for (i = start; i <= end; i++) { + if (abs(channel - i) > 2) + ret[i - 1] = b43_radio_aci_detect(dev, i); + } + b43_radio_selectchannel(dev, channel, 0); + b43_phy_write(dev, 0x0802, + (b43_phy_read(dev, 0x0802) & 0xFFFC) | 0x0003); + b43_phy_write(dev, 0x0403, b43_phy_read(dev, 0x0403) & 0xFFF8); + b43_phy_write(dev, B43_PHY_G_CRS, + b43_phy_read(dev, B43_PHY_G_CRS) | 0x8000); + b43_set_original_gains(dev); + for (i = 0; i < 13; i++) { + if (!ret[i]) + continue; + end = (i + 5 < 13) ? i + 5 : 13; + for (j = i; j < end; j++) + ret[j] = 1; + } + b43_radio_unlock(dev); + b43_phy_unlock(dev, phylock_flags); + + return ret[channel - 1]; +} + +/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +void b43_nrssi_hw_write(struct b43_wldev *dev, u16 offset, s16 val) +{ + b43_phy_write(dev, B43_PHY_NRSSILT_CTRL, offset); + mmiowb(); + b43_phy_write(dev, B43_PHY_NRSSILT_DATA, (u16) val); +} + +/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +s16 b43_nrssi_hw_read(struct b43_wldev *dev, u16 offset) +{ + u16 val; + + b43_phy_write(dev, B43_PHY_NRSSILT_CTRL, offset); + val = b43_phy_read(dev, B43_PHY_NRSSILT_DATA); + + return (s16) val; +} + +/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +void b43_nrssi_hw_update(struct b43_wldev *dev, u16 val) +{ + u16 i; + s16 tmp; + + for (i = 0; i < 64; i++) { + tmp = b43_nrssi_hw_read(dev, i); + tmp -= val; + tmp = limit_value(tmp, -32, 31); + b43_nrssi_hw_write(dev, i, tmp); + } +} + +/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +void b43_nrssi_mem_update(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + s16 i, delta; + s32 tmp; + + delta = 0x1F - phy->nrssi[0]; + for (i = 0; i < 64; i++) { + tmp = (i - delta) * phy->nrssislope; + tmp /= 0x10000; + tmp += 0x3A; + tmp = limit_value(tmp, 0, 0x3F); + phy->nrssi_lt[i] = tmp; + } +} + +static void b43_calc_nrssi_offset(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 backup[20] = { 0 }; + s16 v47F; + u16 i; + u16 saved = 0xFFFF; + + backup[0] = b43_phy_read(dev, 0x0001); + backup[1] = b43_phy_read(dev, 0x0811); + backup[2] = b43_phy_read(dev, 0x0812); + if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ + backup[3] = b43_phy_read(dev, 0x0814); + backup[4] = b43_phy_read(dev, 0x0815); + } + backup[5] = b43_phy_read(dev, 0x005A); + backup[6] = b43_phy_read(dev, 0x0059); + backup[7] = b43_phy_read(dev, 0x0058); + backup[8] = b43_phy_read(dev, 0x000A); + backup[9] = b43_phy_read(dev, 0x0003); + backup[10] = b43_radio_read16(dev, 0x007A); + backup[11] = b43_radio_read16(dev, 0x0043); + + b43_phy_write(dev, 0x0429, b43_phy_read(dev, 0x0429) & 0x7FFF); + b43_phy_write(dev, 0x0001, + (b43_phy_read(dev, 0x0001) & 0x3FFF) | 0x4000); + b43_phy_write(dev, 0x0811, b43_phy_read(dev, 0x0811) | 0x000C); + b43_phy_write(dev, 0x0812, + (b43_phy_read(dev, 0x0812) & 0xFFF3) | 0x0004); + b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) & ~(0x1 | 0x2)); + if (phy->rev >= 6) { + backup[12] = b43_phy_read(dev, 0x002E); + backup[13] = b43_phy_read(dev, 0x002F); + backup[14] = b43_phy_read(dev, 0x080F); + backup[15] = b43_phy_read(dev, 0x0810); + backup[16] = b43_phy_read(dev, 0x0801); + backup[17] = b43_phy_read(dev, 0x0060); + backup[18] = b43_phy_read(dev, 0x0014); + backup[19] = b43_phy_read(dev, 0x0478); + + b43_phy_write(dev, 0x002E, 0); + b43_phy_write(dev, 0x002F, 0); + b43_phy_write(dev, 0x080F, 0); + b43_phy_write(dev, 0x0810, 0); + b43_phy_write(dev, 0x0478, b43_phy_read(dev, 0x0478) | 0x0100); + b43_phy_write(dev, 0x0801, b43_phy_read(dev, 0x0801) | 0x0040); + b43_phy_write(dev, 0x0060, b43_phy_read(dev, 0x0060) | 0x0040); + b43_phy_write(dev, 0x0014, b43_phy_read(dev, 0x0014) | 0x0200); + } + b43_radio_write16(dev, 0x007A, b43_radio_read16(dev, 0x007A) | 0x0070); + b43_radio_write16(dev, 0x007A, b43_radio_read16(dev, 0x007A) | 0x0080); + udelay(30); + + v47F = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F); + if (v47F >= 0x20) + v47F -= 0x40; + if (v47F == 31) { + for (i = 7; i >= 4; i--) { + b43_radio_write16(dev, 0x007B, i); + udelay(20); + v47F = + (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F); + if (v47F >= 0x20) + v47F -= 0x40; + if (v47F < 31 && saved == 0xFFFF) + saved = i; + } + if (saved == 0xFFFF) + saved = 4; + } else { + b43_radio_write16(dev, 0x007A, + b43_radio_read16(dev, 0x007A) & 0x007F); + if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ + b43_phy_write(dev, 0x0814, + b43_phy_read(dev, 0x0814) | 0x0001); + b43_phy_write(dev, 0x0815, + b43_phy_read(dev, 0x0815) & 0xFFFE); + } + b43_phy_write(dev, 0x0811, b43_phy_read(dev, 0x0811) | 0x000C); + b43_phy_write(dev, 0x0812, b43_phy_read(dev, 0x0812) | 0x000C); + b43_phy_write(dev, 0x0811, b43_phy_read(dev, 0x0811) | 0x0030); + b43_phy_write(dev, 0x0812, b43_phy_read(dev, 0x0812) | 0x0030); + b43_phy_write(dev, 0x005A, 0x0480); + b43_phy_write(dev, 0x0059, 0x0810); + b43_phy_write(dev, 0x0058, 0x000D); + if (phy->rev == 0) { + b43_phy_write(dev, 0x0003, 0x0122); + } else { + b43_phy_write(dev, 0x000A, b43_phy_read(dev, 0x000A) + | 0x2000); + } + if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ + b43_phy_write(dev, 0x0814, + b43_phy_read(dev, 0x0814) | 0x0004); + b43_phy_write(dev, 0x0815, + b43_phy_read(dev, 0x0815) & 0xFFFB); + } + b43_phy_write(dev, 0x0003, (b43_phy_read(dev, 0x0003) & 0xFF9F) + | 0x0040); + b43_radio_write16(dev, 0x007A, + b43_radio_read16(dev, 0x007A) | 0x000F); + b43_set_all_gains(dev, 3, 0, 1); + b43_radio_write16(dev, 0x0043, (b43_radio_read16(dev, 0x0043) + & 0x00F0) | 0x000F); + udelay(30); + v47F = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F); + if (v47F >= 0x20) + v47F -= 0x40; + if (v47F == -32) { + for (i = 0; i < 4; i++) { + b43_radio_write16(dev, 0x007B, i); + udelay(20); + v47F = + (s16) ((b43_phy_read(dev, 0x047F) >> 8) & + 0x003F); + if (v47F >= 0x20) + v47F -= 0x40; + if (v47F > -31 && saved == 0xFFFF) + saved = i; + } + if (saved == 0xFFFF) + saved = 3; + } else + saved = 0; + } + b43_radio_write16(dev, 0x007B, saved); + + if (phy->rev >= 6) { + b43_phy_write(dev, 0x002E, backup[12]); + b43_phy_write(dev, 0x002F, backup[13]); + b43_phy_write(dev, 0x080F, backup[14]); + b43_phy_write(dev, 0x0810, backup[15]); + } + if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ + b43_phy_write(dev, 0x0814, backup[3]); + b43_phy_write(dev, 0x0815, backup[4]); + } + b43_phy_write(dev, 0x005A, backup[5]); + b43_phy_write(dev, 0x0059, backup[6]); + b43_phy_write(dev, 0x0058, backup[7]); + b43_phy_write(dev, 0x000A, backup[8]); + b43_phy_write(dev, 0x0003, backup[9]); + b43_radio_write16(dev, 0x0043, backup[11]); + b43_radio_write16(dev, 0x007A, backup[10]); + b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) | 0x1 | 0x2); + b43_phy_write(dev, 0x0429, b43_phy_read(dev, 0x0429) | 0x8000); + b43_set_original_gains(dev); + if (phy->rev >= 6) { + b43_phy_write(dev, 0x0801, backup[16]); + b43_phy_write(dev, 0x0060, backup[17]); + b43_phy_write(dev, 0x0014, backup[18]); + b43_phy_write(dev, 0x0478, backup[19]); + } + b43_phy_write(dev, 0x0001, backup[0]); + b43_phy_write(dev, 0x0812, backup[2]); + b43_phy_write(dev, 0x0811, backup[1]); +} + +void b43_calc_nrssi_slope(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 backup[18] = { 0 }; + u16 tmp; + s16 nrssi0, nrssi1; + + switch (phy->type) { + case B43_PHYTYPE_B: + backup[0] = b43_radio_read16(dev, 0x007A); + backup[1] = b43_radio_read16(dev, 0x0052); + backup[2] = b43_radio_read16(dev, 0x0043); + backup[3] = b43_phy_read(dev, 0x0030); + backup[4] = b43_phy_read(dev, 0x0026); + backup[5] = b43_phy_read(dev, 0x0015); + backup[6] = b43_phy_read(dev, 0x002A); + backup[7] = b43_phy_read(dev, 0x0020); + backup[8] = b43_phy_read(dev, 0x005A); + backup[9] = b43_phy_read(dev, 0x0059); + backup[10] = b43_phy_read(dev, 0x0058); + backup[11] = b43_read16(dev, 0x03E2); + backup[12] = b43_read16(dev, 0x03E6); + backup[13] = b43_read16(dev, B43_MMIO_CHANNEL_EXT); + + tmp = b43_radio_read16(dev, 0x007A); + tmp &= (phy->rev >= 5) ? 0x007F : 0x000F; + b43_radio_write16(dev, 0x007A, tmp); + b43_phy_write(dev, 0x0030, 0x00FF); + b43_write16(dev, 0x03EC, 0x7F7F); + b43_phy_write(dev, 0x0026, 0x0000); + b43_phy_write(dev, 0x0015, b43_phy_read(dev, 0x0015) | 0x0020); + b43_phy_write(dev, 0x002A, 0x08A3); + b43_radio_write16(dev, 0x007A, + b43_radio_read16(dev, 0x007A) | 0x0080); + + nrssi0 = (s16) b43_phy_read(dev, 0x0027); + b43_radio_write16(dev, 0x007A, + b43_radio_read16(dev, 0x007A) & 0x007F); + if (phy->rev >= 2) { + b43_write16(dev, 0x03E6, 0x0040); + } else if (phy->rev == 0) { + b43_write16(dev, 0x03E6, 0x0122); + } else { + b43_write16(dev, B43_MMIO_CHANNEL_EXT, + b43_read16(dev, + B43_MMIO_CHANNEL_EXT) & 0x2000); + } + b43_phy_write(dev, 0x0020, 0x3F3F); + b43_phy_write(dev, 0x0015, 0xF330); + b43_radio_write16(dev, 0x005A, 0x0060); + b43_radio_write16(dev, 0x0043, + b43_radio_read16(dev, 0x0043) & 0x00F0); + b43_phy_write(dev, 0x005A, 0x0480); + b43_phy_write(dev, 0x0059, 0x0810); + b43_phy_write(dev, 0x0058, 0x000D); + udelay(20); + + nrssi1 = (s16) b43_phy_read(dev, 0x0027); + b43_phy_write(dev, 0x0030, backup[3]); + b43_radio_write16(dev, 0x007A, backup[0]); + b43_write16(dev, 0x03E2, backup[11]); + b43_phy_write(dev, 0x0026, backup[4]); + b43_phy_write(dev, 0x0015, backup[5]); + b43_phy_write(dev, 0x002A, backup[6]); + b43_synth_pu_workaround(dev, phy->channel); + if (phy->rev != 0) + b43_write16(dev, 0x03F4, backup[13]); + + b43_phy_write(dev, 0x0020, backup[7]); + b43_phy_write(dev, 0x005A, backup[8]); + b43_phy_write(dev, 0x0059, backup[9]); + b43_phy_write(dev, 0x0058, backup[10]); + b43_radio_write16(dev, 0x0052, backup[1]); + b43_radio_write16(dev, 0x0043, backup[2]); + + if (nrssi0 == nrssi1) + phy->nrssislope = 0x00010000; + else + phy->nrssislope = 0x00400000 / (nrssi0 - nrssi1); + + if (nrssi0 <= -4) { + phy->nrssi[0] = nrssi0; + phy->nrssi[1] = nrssi1; + } + break; + case B43_PHYTYPE_G: + if (phy->radio_rev >= 9) + return; + if (phy->radio_rev == 8) + b43_calc_nrssi_offset(dev); + + b43_phy_write(dev, B43_PHY_G_CRS, + b43_phy_read(dev, B43_PHY_G_CRS) & 0x7FFF); + b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) & 0xFFFC); + backup[7] = b43_read16(dev, 0x03E2); + b43_write16(dev, 0x03E2, b43_read16(dev, 0x03E2) | 0x8000); + backup[0] = b43_radio_read16(dev, 0x007A); + backup[1] = b43_radio_read16(dev, 0x0052); + backup[2] = b43_radio_read16(dev, 0x0043); + backup[3] = b43_phy_read(dev, 0x0015); + backup[4] = b43_phy_read(dev, 0x005A); + backup[5] = b43_phy_read(dev, 0x0059); + backup[6] = b43_phy_read(dev, 0x0058); + backup[8] = b43_read16(dev, 0x03E6); + backup[9] = b43_read16(dev, B43_MMIO_CHANNEL_EXT); + if (phy->rev >= 3) { + backup[10] = b43_phy_read(dev, 0x002E); + backup[11] = b43_phy_read(dev, 0x002F); + backup[12] = b43_phy_read(dev, 0x080F); + backup[13] = b43_phy_read(dev, B43_PHY_G_LO_CONTROL); + backup[14] = b43_phy_read(dev, 0x0801); + backup[15] = b43_phy_read(dev, 0x0060); + backup[16] = b43_phy_read(dev, 0x0014); + backup[17] = b43_phy_read(dev, 0x0478); + b43_phy_write(dev, 0x002E, 0); + b43_phy_write(dev, B43_PHY_G_LO_CONTROL, 0); + switch (phy->rev) { + case 4: + case 6: + case 7: + b43_phy_write(dev, 0x0478, + b43_phy_read(dev, 0x0478) + | 0x0100); + b43_phy_write(dev, 0x0801, + b43_phy_read(dev, 0x0801) + | 0x0040); + break; + case 3: + case 5: + b43_phy_write(dev, 0x0801, + b43_phy_read(dev, 0x0801) + & 0xFFBF); + break; + } + b43_phy_write(dev, 0x0060, b43_phy_read(dev, 0x0060) + | 0x0040); + b43_phy_write(dev, 0x0014, b43_phy_read(dev, 0x0014) + | 0x0200); + } + b43_radio_write16(dev, 0x007A, + b43_radio_read16(dev, 0x007A) | 0x0070); + b43_set_all_gains(dev, 0, 8, 0); + b43_radio_write16(dev, 0x007A, + b43_radio_read16(dev, 0x007A) & 0x00F7); + if (phy->rev >= 2) { + b43_phy_write(dev, 0x0811, + (b43_phy_read(dev, 0x0811) & 0xFFCF) | + 0x0030); + b43_phy_write(dev, 0x0812, + (b43_phy_read(dev, 0x0812) & 0xFFCF) | + 0x0010); + } + b43_radio_write16(dev, 0x007A, + b43_radio_read16(dev, 0x007A) | 0x0080); + udelay(20); + + nrssi0 = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F); + if (nrssi0 >= 0x0020) + nrssi0 -= 0x0040; + + b43_radio_write16(dev, 0x007A, + b43_radio_read16(dev, 0x007A) & 0x007F); + if (phy->rev >= 2) { + b43_phy_write(dev, 0x0003, (b43_phy_read(dev, 0x0003) + & 0xFF9F) | 0x0040); + } + + b43_write16(dev, B43_MMIO_CHANNEL_EXT, + b43_read16(dev, B43_MMIO_CHANNEL_EXT) + | 0x2000); + b43_radio_write16(dev, 0x007A, + b43_radio_read16(dev, 0x007A) | 0x000F); + b43_phy_write(dev, 0x0015, 0xF330); + if (phy->rev >= 2) { + b43_phy_write(dev, 0x0812, + (b43_phy_read(dev, 0x0812) & 0xFFCF) | + 0x0020); + b43_phy_write(dev, 0x0811, + (b43_phy_read(dev, 0x0811) & 0xFFCF) | + 0x0020); + } + + b43_set_all_gains(dev, 3, 0, 1); + if (phy->radio_rev == 8) { + b43_radio_write16(dev, 0x0043, 0x001F); + } else { + tmp = b43_radio_read16(dev, 0x0052) & 0xFF0F; + b43_radio_write16(dev, 0x0052, tmp | 0x0060); + tmp = b43_radio_read16(dev, 0x0043) & 0xFFF0; + b43_radio_write16(dev, 0x0043, tmp | 0x0009); + } + b43_phy_write(dev, 0x005A, 0x0480); + b43_phy_write(dev, 0x0059, 0x0810); + b43_phy_write(dev, 0x0058, 0x000D); + udelay(20); + nrssi1 = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F); + if (nrssi1 >= 0x0020) + nrssi1 -= 0x0040; + if (nrssi0 == nrssi1) + phy->nrssislope = 0x00010000; + else + phy->nrssislope = 0x00400000 / (nrssi0 - nrssi1); + if (nrssi0 >= -4) { + phy->nrssi[0] = nrssi1; + phy->nrssi[1] = nrssi0; + } + if (phy->rev >= 3) { + b43_phy_write(dev, 0x002E, backup[10]); + b43_phy_write(dev, 0x002F, backup[11]); + b43_phy_write(dev, 0x080F, backup[12]); + b43_phy_write(dev, B43_PHY_G_LO_CONTROL, backup[13]); + } + if (phy->rev >= 2) { + b43_phy_write(dev, 0x0812, + b43_phy_read(dev, 0x0812) & 0xFFCF); + b43_phy_write(dev, 0x0811, + b43_phy_read(dev, 0x0811) & 0xFFCF); + } + + b43_radio_write16(dev, 0x007A, backup[0]); + b43_radio_write16(dev, 0x0052, backup[1]); + b43_radio_write16(dev, 0x0043, backup[2]); + b43_write16(dev, 0x03E2, backup[7]); + b43_write16(dev, 0x03E6, backup[8]); + b43_write16(dev, B43_MMIO_CHANNEL_EXT, backup[9]); + b43_phy_write(dev, 0x0015, backup[3]); + b43_phy_write(dev, 0x005A, backup[4]); + b43_phy_write(dev, 0x0059, backup[5]); + b43_phy_write(dev, 0x0058, backup[6]); + b43_synth_pu_workaround(dev, phy->channel); + b43_phy_write(dev, 0x0802, + b43_phy_read(dev, 0x0802) | (0x0001 | 0x0002)); + b43_set_original_gains(dev); + b43_phy_write(dev, B43_PHY_G_CRS, + b43_phy_read(dev, B43_PHY_G_CRS) | 0x8000); + if (phy->rev >= 3) { + b43_phy_write(dev, 0x0801, backup[14]); + b43_phy_write(dev, 0x0060, backup[15]); + b43_phy_write(dev, 0x0014, backup[16]); + b43_phy_write(dev, 0x0478, backup[17]); + } + b43_nrssi_mem_update(dev); + b43_calc_nrssi_threshold(dev); + break; + default: + B43_WARN_ON(1); + } +} + +void b43_calc_nrssi_threshold(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + s32 threshold; + s32 a, b; + s16 tmp16; + u16 tmp_u16; + + switch (phy->type) { + case B43_PHYTYPE_B:{ + if (phy->radio_ver != 0x2050) + return; + if (! + (dev->dev->bus->sprom.r1. + boardflags_lo & B43_BFL_RSSI)) + return; + + if (phy->radio_rev >= 6) { + threshold = + (phy->nrssi[1] - phy->nrssi[0]) * 32; + threshold += 20 * (phy->nrssi[0] + 1); + threshold /= 40; + } else + threshold = phy->nrssi[1] - 5; + + threshold = limit_value(threshold, 0, 0x3E); + b43_phy_read(dev, 0x0020); /* dummy read */ + b43_phy_write(dev, 0x0020, + (((u16) threshold) << 8) | 0x001C); + + if (phy->radio_rev >= 6) { + b43_phy_write(dev, 0x0087, 0x0E0D); + b43_phy_write(dev, 0x0086, 0x0C0B); + b43_phy_write(dev, 0x0085, 0x0A09); + b43_phy_write(dev, 0x0084, 0x0808); + b43_phy_write(dev, 0x0083, 0x0808); + b43_phy_write(dev, 0x0082, 0x0604); + b43_phy_write(dev, 0x0081, 0x0302); + b43_phy_write(dev, 0x0080, 0x0100); + } + break; + } + case B43_PHYTYPE_G: + if (!phy->gmode || + !(dev->dev->bus->sprom.r1.boardflags_lo & B43_BFL_RSSI)) { + tmp16 = b43_nrssi_hw_read(dev, 0x20); + if (tmp16 >= 0x20) + tmp16 -= 0x40; + if (tmp16 < 3) { + b43_phy_write(dev, 0x048A, + (b43_phy_read(dev, 0x048A) + & 0xF000) | 0x09EB); + } else { + b43_phy_write(dev, 0x048A, + (b43_phy_read(dev, 0x048A) + & 0xF000) | 0x0AED); + } + } else { + if (phy->interfmode == B43_INTERFMODE_NONWLAN) { + a = 0xE; + b = 0xA; + } else if (!phy->aci_wlan_automatic && phy->aci_enable) { + a = 0x13; + b = 0x12; + } else { + a = 0xE; + b = 0x11; + } + + a = a * (phy->nrssi[1] - phy->nrssi[0]); + a += (phy->nrssi[0] << 6); + if (a < 32) + a += 31; + else + a += 32; + a = a >> 6; + a = limit_value(a, -31, 31); + + b = b * (phy->nrssi[1] - phy->nrssi[0]); + b += (phy->nrssi[0] << 6); + if (b < 32) + b += 31; + else + b += 32; + b = b >> 6; + b = limit_value(b, -31, 31); + + tmp_u16 = b43_phy_read(dev, 0x048A) & 0xF000; + tmp_u16 |= ((u32) b & 0x0000003F); + tmp_u16 |= (((u32) a & 0x0000003F) << 6); + b43_phy_write(dev, 0x048A, tmp_u16); + } + break; + default: + B43_WARN_ON(1); + } +} + +/* Stack implementation to save/restore values from the + * interference mitigation code. + * It is save to restore values in random order. + */ +static void _stack_save(u32 * _stackptr, size_t * stackidx, + u8 id, u16 offset, u16 value) +{ + u32 *stackptr = &(_stackptr[*stackidx]); + + B43_WARN_ON(offset & 0xF000); + B43_WARN_ON(id & 0xF0); + *stackptr = offset; + *stackptr |= ((u32) id) << 12; + *stackptr |= ((u32) value) << 16; + (*stackidx)++; + B43_WARN_ON(*stackidx >= B43_INTERFSTACK_SIZE); +} + +static u16 _stack_restore(u32 * stackptr, u8 id, u16 offset) +{ + size_t i; + + B43_WARN_ON(offset & 0xF000); + B43_WARN_ON(id & 0xF0); + for (i = 0; i < B43_INTERFSTACK_SIZE; i++, stackptr++) { + if ((*stackptr & 0x00000FFF) != offset) + continue; + if (((*stackptr & 0x0000F000) >> 12) != id) + continue; + return ((*stackptr & 0xFFFF0000) >> 16); + } + B43_WARN_ON(1); + + return 0; +} + +#define phy_stacksave(offset) \ + do { \ + _stack_save(stack, &stackidx, 0x1, (offset), \ + b43_phy_read(dev, (offset))); \ + } while (0) +#define phy_stackrestore(offset) \ + do { \ + b43_phy_write(dev, (offset), \ + _stack_restore(stack, 0x1, \ + (offset))); \ + } while (0) +#define radio_stacksave(offset) \ + do { \ + _stack_save(stack, &stackidx, 0x2, (offset), \ + b43_radio_read16(dev, (offset))); \ + } while (0) +#define radio_stackrestore(offset) \ + do { \ + b43_radio_write16(dev, (offset), \ + _stack_restore(stack, 0x2, \ + (offset))); \ + } while (0) +#define ofdmtab_stacksave(table, offset) \ + do { \ + _stack_save(stack, &stackidx, 0x3, (offset)|(table), \ + b43_ofdmtab_read16(dev, (table), (offset))); \ + } while (0) +#define ofdmtab_stackrestore(table, offset) \ + do { \ + b43_ofdmtab_write16(dev, (table), (offset), \ + _stack_restore(stack, 0x3, \ + (offset)|(table))); \ + } while (0) + +static void +b43_radio_interference_mitigation_enable(struct b43_wldev *dev, int mode) +{ + struct b43_phy *phy = &dev->phy; + u16 tmp, flipped; + size_t stackidx = 0; + u32 *stack = phy->interfstack; + + switch (mode) { + case B43_INTERFMODE_NONWLAN: + if (phy->rev != 1) { + b43_phy_write(dev, 0x042B, + b43_phy_read(dev, 0x042B) | 0x0800); + b43_phy_write(dev, B43_PHY_G_CRS, + b43_phy_read(dev, + B43_PHY_G_CRS) & ~0x4000); + break; + } + radio_stacksave(0x0078); + tmp = (b43_radio_read16(dev, 0x0078) & 0x001E); + flipped = flip_4bit(tmp); + if (flipped < 10 && flipped >= 8) + flipped = 7; + else if (flipped >= 10) + flipped -= 3; + flipped = flip_4bit(flipped); + flipped = (flipped << 1) | 0x0020; + b43_radio_write16(dev, 0x0078, flipped); + + b43_calc_nrssi_threshold(dev); + + phy_stacksave(0x0406); + b43_phy_write(dev, 0x0406, 0x7E28); + + b43_phy_write(dev, 0x042B, b43_phy_read(dev, 0x042B) | 0x0800); + b43_phy_write(dev, B43_PHY_RADIO_BITFIELD, + b43_phy_read(dev, + B43_PHY_RADIO_BITFIELD) | 0x1000); + + phy_stacksave(0x04A0); + b43_phy_write(dev, 0x04A0, + (b43_phy_read(dev, 0x04A0) & 0xC0C0) | 0x0008); + phy_stacksave(0x04A1); + b43_phy_write(dev, 0x04A1, + (b43_phy_read(dev, 0x04A1) & 0xC0C0) | 0x0605); + phy_stacksave(0x04A2); + b43_phy_write(dev, 0x04A2, + (b43_phy_read(dev, 0x04A2) & 0xC0C0) | 0x0204); + phy_stacksave(0x04A8); + b43_phy_write(dev, 0x04A8, + (b43_phy_read(dev, 0x04A8) & 0xC0C0) | 0x0803); + phy_stacksave(0x04AB); + b43_phy_write(dev, 0x04AB, + (b43_phy_read(dev, 0x04AB) & 0xC0C0) | 0x0605); + + phy_stacksave(0x04A7); + b43_phy_write(dev, 0x04A7, 0x0002); + phy_stacksave(0x04A3); + b43_phy_write(dev, 0x04A3, 0x287A); + phy_stacksave(0x04A9); + b43_phy_write(dev, 0x04A9, 0x2027); + phy_stacksave(0x0493); + b43_phy_write(dev, 0x0493, 0x32F5); + phy_stacksave(0x04AA); + b43_phy_write(dev, 0x04AA, 0x2027); + phy_stacksave(0x04AC); + b43_phy_write(dev, 0x04AC, 0x32F5); + break; + case B43_INTERFMODE_MANUALWLAN: + if (b43_phy_read(dev, 0x0033) & 0x0800) + break; + + phy->aci_enable = 1; + + phy_stacksave(B43_PHY_RADIO_BITFIELD); + phy_stacksave(B43_PHY_G_CRS); + if (phy->rev < 2) { + phy_stacksave(0x0406); + } else { + phy_stacksave(0x04C0); + phy_stacksave(0x04C1); + } + phy_stacksave(0x0033); + phy_stacksave(0x04A7); + phy_stacksave(0x04A3); + phy_stacksave(0x04A9); + phy_stacksave(0x04AA); + phy_stacksave(0x04AC); + phy_stacksave(0x0493); + phy_stacksave(0x04A1); + phy_stacksave(0x04A0); + phy_stacksave(0x04A2); + phy_stacksave(0x048A); + phy_stacksave(0x04A8); + phy_stacksave(0x04AB); + if (phy->rev == 2) { + phy_stacksave(0x04AD); + phy_stacksave(0x04AE); + } else if (phy->rev >= 3) { + phy_stacksave(0x04AD); + phy_stacksave(0x0415); + phy_stacksave(0x0416); + phy_stacksave(0x0417); + ofdmtab_stacksave(0x1A00, 0x2); + ofdmtab_stacksave(0x1A00, 0x3); + } + phy_stacksave(0x042B); + phy_stacksave(0x048C); + + b43_phy_write(dev, B43_PHY_RADIO_BITFIELD, + b43_phy_read(dev, B43_PHY_RADIO_BITFIELD) + & ~0x1000); + b43_phy_write(dev, B43_PHY_G_CRS, + (b43_phy_read(dev, B43_PHY_G_CRS) + & 0xFFFC) | 0x0002); + + b43_phy_write(dev, 0x0033, 0x0800); + b43_phy_write(dev, 0x04A3, 0x2027); + b43_phy_write(dev, 0x04A9, 0x1CA8); + b43_phy_write(dev, 0x0493, 0x287A); + b43_phy_write(dev, 0x04AA, 0x1CA8); + b43_phy_write(dev, 0x04AC, 0x287A); + + b43_phy_write(dev, 0x04A0, (b43_phy_read(dev, 0x04A0) + & 0xFFC0) | 0x001A); + b43_phy_write(dev, 0x04A7, 0x000D); + + if (phy->rev < 2) { + b43_phy_write(dev, 0x0406, 0xFF0D); + } else if (phy->rev == 2) { + b43_phy_write(dev, 0x04C0, 0xFFFF); + b43_phy_write(dev, 0x04C1, 0x00A9); + } else { + b43_phy_write(dev, 0x04C0, 0x00C1); + b43_phy_write(dev, 0x04C1, 0x0059); + } + + b43_phy_write(dev, 0x04A1, (b43_phy_read(dev, 0x04A1) + & 0xC0FF) | 0x1800); + b43_phy_write(dev, 0x04A1, (b43_phy_read(dev, 0x04A1) + & 0xFFC0) | 0x0015); + b43_phy_write(dev, 0x04A8, (b43_phy_read(dev, 0x04A8) + & 0xCFFF) | 0x1000); + b43_phy_write(dev, 0x04A8, (b43_phy_read(dev, 0x04A8) + & 0xF0FF) | 0x0A00); + b43_phy_write(dev, 0x04AB, (b43_phy_read(dev, 0x04AB) + & 0xCFFF) | 0x1000); + b43_phy_write(dev, 0x04AB, (b43_phy_read(dev, 0x04AB) + & 0xF0FF) | 0x0800); + b43_phy_write(dev, 0x04AB, (b43_phy_read(dev, 0x04AB) + & 0xFFCF) | 0x0010); + b43_phy_write(dev, 0x04AB, (b43_phy_read(dev, 0x04AB) + & 0xFFF0) | 0x0005); + b43_phy_write(dev, 0x04A8, (b43_phy_read(dev, 0x04A8) + & 0xFFCF) | 0x0010); + b43_phy_write(dev, 0x04A8, (b43_phy_read(dev, 0x04A8) + & 0xFFF0) | 0x0006); + b43_phy_write(dev, 0x04A2, (b43_phy_read(dev, 0x04A2) + & 0xF0FF) | 0x0800); + b43_phy_write(dev, 0x04A0, (b43_phy_read(dev, 0x04A0) + & 0xF0FF) | 0x0500); + b43_phy_write(dev, 0x04A2, (b43_phy_read(dev, 0x04A2) + & 0xFFF0) | 0x000B); + + if (phy->rev >= 3) { + b43_phy_write(dev, 0x048A, b43_phy_read(dev, 0x048A) + & ~0x8000); + b43_phy_write(dev, 0x0415, (b43_phy_read(dev, 0x0415) + & 0x8000) | 0x36D8); + b43_phy_write(dev, 0x0416, (b43_phy_read(dev, 0x0416) + & 0x8000) | 0x36D8); + b43_phy_write(dev, 0x0417, (b43_phy_read(dev, 0x0417) + & 0xFE00) | 0x016D); + } else { + b43_phy_write(dev, 0x048A, b43_phy_read(dev, 0x048A) + | 0x1000); + b43_phy_write(dev, 0x048A, (b43_phy_read(dev, 0x048A) + & 0x9FFF) | 0x2000); + b43_hf_write(dev, b43_hf_read(dev) | B43_HF_ACIW); + } + if (phy->rev >= 2) { + b43_phy_write(dev, 0x042B, b43_phy_read(dev, 0x042B) + | 0x0800); + } + b43_phy_write(dev, 0x048C, (b43_phy_read(dev, 0x048C) + & 0xF0FF) | 0x0200); + if (phy->rev == 2) { + b43_phy_write(dev, 0x04AE, (b43_phy_read(dev, 0x04AE) + & 0xFF00) | 0x007F); + b43_phy_write(dev, 0x04AD, (b43_phy_read(dev, 0x04AD) + & 0x00FF) | 0x1300); + } else if (phy->rev >= 6) { + b43_ofdmtab_write16(dev, 0x1A00, 0x3, 0x007F); + b43_ofdmtab_write16(dev, 0x1A00, 0x2, 0x007F); + b43_phy_write(dev, 0x04AD, b43_phy_read(dev, 0x04AD) + & 0x00FF); + } + b43_calc_nrssi_slope(dev); + break; + default: + B43_WARN_ON(1); + } +} + +static void +b43_radio_interference_mitigation_disable(struct b43_wldev *dev, int mode) +{ + struct b43_phy *phy = &dev->phy; + u32 *stack = phy->interfstack; + + switch (mode) { + case B43_INTERFMODE_NONWLAN: + if (phy->rev != 1) { + b43_phy_write(dev, 0x042B, + b43_phy_read(dev, 0x042B) & ~0x0800); + b43_phy_write(dev, B43_PHY_G_CRS, + b43_phy_read(dev, + B43_PHY_G_CRS) | 0x4000); + break; + } + radio_stackrestore(0x0078); + b43_calc_nrssi_threshold(dev); + phy_stackrestore(0x0406); + b43_phy_write(dev, 0x042B, b43_phy_read(dev, 0x042B) & ~0x0800); + if (!dev->bad_frames_preempt) { + b43_phy_write(dev, B43_PHY_RADIO_BITFIELD, + b43_phy_read(dev, B43_PHY_RADIO_BITFIELD) + & ~(1 << 11)); + } + b43_phy_write(dev, B43_PHY_G_CRS, + b43_phy_read(dev, B43_PHY_G_CRS) | 0x4000); + phy_stackrestore(0x04A0); + phy_stackrestore(0x04A1); + phy_stackrestore(0x04A2); + phy_stackrestore(0x04A8); + phy_stackrestore(0x04AB); + phy_stackrestore(0x04A7); + phy_stackrestore(0x04A3); + phy_stackrestore(0x04A9); + phy_stackrestore(0x0493); + phy_stackrestore(0x04AA); + phy_stackrestore(0x04AC); + break; + case B43_INTERFMODE_MANUALWLAN: + if (!(b43_phy_read(dev, 0x0033) & 0x0800)) + break; + + phy->aci_enable = 0; + + phy_stackrestore(B43_PHY_RADIO_BITFIELD); + phy_stackrestore(B43_PHY_G_CRS); + phy_stackrestore(0x0033); + phy_stackrestore(0x04A3); + phy_stackrestore(0x04A9); + phy_stackrestore(0x0493); + phy_stackrestore(0x04AA); + phy_stackrestore(0x04AC); + phy_stackrestore(0x04A0); + phy_stackrestore(0x04A7); + if (phy->rev >= 2) { + phy_stackrestore(0x04C0); + phy_stackrestore(0x04C1); + } else + phy_stackrestore(0x0406); + phy_stackrestore(0x04A1); + phy_stackrestore(0x04AB); + phy_stackrestore(0x04A8); + if (phy->rev == 2) { + phy_stackrestore(0x04AD); + phy_stackrestore(0x04AE); + } else if (phy->rev >= 3) { + phy_stackrestore(0x04AD); + phy_stackrestore(0x0415); + phy_stackrestore(0x0416); + phy_stackrestore(0x0417); + ofdmtab_stackrestore(0x1A00, 0x2); + ofdmtab_stackrestore(0x1A00, 0x3); + } + phy_stackrestore(0x04A2); + phy_stackrestore(0x048A); + phy_stackrestore(0x042B); + phy_stackrestore(0x048C); + b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_ACIW); + b43_calc_nrssi_slope(dev); + break; + default: + B43_WARN_ON(1); + } +} + +#undef phy_stacksave +#undef phy_stackrestore +#undef radio_stacksave +#undef radio_stackrestore +#undef ofdmtab_stacksave +#undef ofdmtab_stackrestore + +int b43_radio_set_interference_mitigation(struct b43_wldev *dev, int mode) +{ + struct b43_phy *phy = &dev->phy; + int currentmode; + + if ((phy->type != B43_PHYTYPE_G) || (phy->rev == 0) || (!phy->gmode)) + return -ENODEV; + + phy->aci_wlan_automatic = 0; + switch (mode) { + case B43_INTERFMODE_AUTOWLAN: + phy->aci_wlan_automatic = 1; + if (phy->aci_enable) + mode = B43_INTERFMODE_MANUALWLAN; + else + mode = B43_INTERFMODE_NONE; + break; + case B43_INTERFMODE_NONE: + case B43_INTERFMODE_NONWLAN: + case B43_INTERFMODE_MANUALWLAN: + break; + default: + return -EINVAL; + } + + currentmode = phy->interfmode; + if (currentmode == mode) + return 0; + if (currentmode != B43_INTERFMODE_NONE) + b43_radio_interference_mitigation_disable(dev, currentmode); + + if (mode == B43_INTERFMODE_NONE) { + phy->aci_enable = 0; + phy->aci_hw_rssi = 0; + } else + b43_radio_interference_mitigation_enable(dev, mode); + phy->interfmode = mode; + + return 0; +} + +static u16 b43_radio_core_calibration_value(struct b43_wldev *dev) +{ + u16 reg, index, ret; + + static const u8 rcc_table[] = { + 0x02, 0x03, 0x01, 0x0F, + 0x06, 0x07, 0x05, 0x0F, + 0x0A, 0x0B, 0x09, 0x0F, + 0x0E, 0x0F, 0x0D, 0x0F, + }; + + reg = b43_radio_read16(dev, 0x60); + index = (reg & 0x001E) >> 1; + ret = rcc_table[index] << 1; + ret |= (reg & 0x0001); + ret |= 0x0020; + + return ret; +} + +#define LPD(L, P, D) (((L) << 2) | ((P) << 1) | ((D) << 0)) +static u16 radio2050_rfover_val(struct b43_wldev *dev, + u16 phy_register, unsigned int lpd) +{ + struct b43_phy *phy = &dev->phy; + struct ssb_sprom *sprom = &(dev->dev->bus->sprom); + + if (!phy->gmode) + return 0; + + if (has_loopback_gain(phy)) { + int max_lb_gain = phy->max_lb_gain; + u16 extlna; + u16 i; + + if (phy->radio_rev == 8) + max_lb_gain += 0x3E; + else + max_lb_gain += 0x26; + if (max_lb_gain >= 0x46) { + extlna = 0x3000; + max_lb_gain -= 0x46; + } else if (max_lb_gain >= 0x3A) { + extlna = 0x1000; + max_lb_gain -= 0x3A; + } else if (max_lb_gain >= 0x2E) { + extlna = 0x2000; + max_lb_gain -= 0x2E; + } else { + extlna = 0; + max_lb_gain -= 0x10; + } + + for (i = 0; i < 16; i++) { + max_lb_gain -= (i * 6); + if (max_lb_gain < 6) + break; + } + + if ((phy->rev < 7) || + !(sprom->r1.boardflags_lo & B43_BFL_EXTLNA)) { + if (phy_register == B43_PHY_RFOVER) { + return 0x1B3; + } else if (phy_register == B43_PHY_RFOVERVAL) { + extlna |= (i << 8); + switch (lpd) { + case LPD(0, 1, 1): + return 0x0F92; + case LPD(0, 0, 1): + case LPD(1, 0, 1): + return (0x0092 | extlna); + case LPD(1, 0, 0): + return (0x0093 | extlna); + } + B43_WARN_ON(1); + } + B43_WARN_ON(1); + } else { + if (phy_register == B43_PHY_RFOVER) { + return 0x9B3; + } else if (phy_register == B43_PHY_RFOVERVAL) { + if (extlna) + extlna |= 0x8000; + extlna |= (i << 8); + switch (lpd) { + case LPD(0, 1, 1): + return 0x8F92; + case LPD(0, 0, 1): + return (0x8092 | extlna); + case LPD(1, 0, 1): + return (0x2092 | extlna); + case LPD(1, 0, 0): + return (0x2093 | extlna); + } + B43_WARN_ON(1); + } + B43_WARN_ON(1); + } + } else { + if ((phy->rev < 7) || + !(sprom->r1.boardflags_lo & B43_BFL_EXTLNA)) { + if (phy_register == B43_PHY_RFOVER) { + return 0x1B3; + } else if (phy_register == B43_PHY_RFOVERVAL) { + switch (lpd) { + case LPD(0, 1, 1): + return 0x0FB2; + case LPD(0, 0, 1): + return 0x00B2; + case LPD(1, 0, 1): + return 0x30B2; + case LPD(1, 0, 0): + return 0x30B3; + } + B43_WARN_ON(1); + } + B43_WARN_ON(1); + } else { + if (phy_register == B43_PHY_RFOVER) { + return 0x9B3; + } else if (phy_register == B43_PHY_RFOVERVAL) { + switch (lpd) { + case LPD(0, 1, 1): + return 0x8FB2; + case LPD(0, 0, 1): + return 0x80B2; + case LPD(1, 0, 1): + return 0x20B2; + case LPD(1, 0, 0): + return 0x20B3; + } + B43_WARN_ON(1); + } + B43_WARN_ON(1); + } + } + return 0; +} + +struct init2050_saved_values { + /* Core registers */ + u16 reg_3EC; + u16 reg_3E6; + u16 reg_3F4; + /* Radio registers */ + u16 radio_43; + u16 radio_51; + u16 radio_52; + /* PHY registers */ + u16 phy_pgactl; + u16 phy_base_5A; + u16 phy_base_59; + u16 phy_base_58; + u16 phy_base_30; + u16 phy_rfover; + u16 phy_rfoverval; + u16 phy_analogover; + u16 phy_analogoverval; + u16 phy_crs0; + u16 phy_classctl; + u16 phy_lo_mask; + u16 phy_lo_ctl; + u16 phy_syncctl; +}; + +u16 b43_radio_init2050(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct init2050_saved_values sav; + u16 rcc; + u16 radio78; + u16 ret; + u16 i, j; + u32 tmp1 = 0, tmp2 = 0; + + memset(&sav, 0, sizeof(sav)); /* get rid of "may be used uninitialized..." */ + + sav.radio_43 = b43_radio_read16(dev, 0x43); + sav.radio_51 = b43_radio_read16(dev, 0x51); + sav.radio_52 = b43_radio_read16(dev, 0x52); + sav.phy_pgactl = b43_phy_read(dev, B43_PHY_PGACTL); + sav.phy_base_5A = b43_phy_read(dev, B43_PHY_BASE(0x5A)); + sav.phy_base_59 = b43_phy_read(dev, B43_PHY_BASE(0x59)); + sav.phy_base_58 = b43_phy_read(dev, B43_PHY_BASE(0x58)); + + if (phy->type == B43_PHYTYPE_B) { + sav.phy_base_30 = b43_phy_read(dev, B43_PHY_BASE(0x30)); + sav.reg_3EC = b43_read16(dev, 0x3EC); + + b43_phy_write(dev, B43_PHY_BASE(0x30), 0xFF); + b43_write16(dev, 0x3EC, 0x3F3F); + } else if (phy->gmode || phy->rev >= 2) { + sav.phy_rfover = b43_phy_read(dev, B43_PHY_RFOVER); + sav.phy_rfoverval = b43_phy_read(dev, B43_PHY_RFOVERVAL); + sav.phy_analogover = b43_phy_read(dev, B43_PHY_ANALOGOVER); + sav.phy_analogoverval = + b43_phy_read(dev, B43_PHY_ANALOGOVERVAL); + sav.phy_crs0 = b43_phy_read(dev, B43_PHY_CRS0); + sav.phy_classctl = b43_phy_read(dev, B43_PHY_CLASSCTL); + + b43_phy_write(dev, B43_PHY_ANALOGOVER, + b43_phy_read(dev, B43_PHY_ANALOGOVER) + | 0x0003); + b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, + b43_phy_read(dev, B43_PHY_ANALOGOVERVAL) + & 0xFFFC); + b43_phy_write(dev, B43_PHY_CRS0, b43_phy_read(dev, B43_PHY_CRS0) + & 0x7FFF); + b43_phy_write(dev, B43_PHY_CLASSCTL, + b43_phy_read(dev, B43_PHY_CLASSCTL) + & 0xFFFC); + if (has_loopback_gain(phy)) { + sav.phy_lo_mask = b43_phy_read(dev, B43_PHY_LO_MASK); + sav.phy_lo_ctl = b43_phy_read(dev, B43_PHY_LO_CTL); + + if (phy->rev >= 3) + b43_phy_write(dev, B43_PHY_LO_MASK, 0xC020); + else + b43_phy_write(dev, B43_PHY_LO_MASK, 0x8020); + b43_phy_write(dev, B43_PHY_LO_CTL, 0); + } + + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, B43_PHY_RFOVERVAL, + LPD(0, 1, 1))); + b43_phy_write(dev, B43_PHY_RFOVER, + radio2050_rfover_val(dev, B43_PHY_RFOVER, 0)); + } + b43_write16(dev, 0x3E2, b43_read16(dev, 0x3E2) | 0x8000); + + sav.phy_syncctl = b43_phy_read(dev, B43_PHY_SYNCCTL); + b43_phy_write(dev, B43_PHY_SYNCCTL, b43_phy_read(dev, B43_PHY_SYNCCTL) + & 0xFF7F); + sav.reg_3E6 = b43_read16(dev, 0x3E6); + sav.reg_3F4 = b43_read16(dev, 0x3F4); + + if (phy->analog == 0) { + b43_write16(dev, 0x03E6, 0x0122); + } else { + if (phy->analog >= 2) { + b43_phy_write(dev, B43_PHY_BASE(0x03), + (b43_phy_read(dev, B43_PHY_BASE(0x03)) + & 0xFFBF) | 0x40); + } + b43_write16(dev, B43_MMIO_CHANNEL_EXT, + (b43_read16(dev, B43_MMIO_CHANNEL_EXT) | 0x2000)); + } + + rcc = b43_radio_core_calibration_value(dev); + + if (phy->type == B43_PHYTYPE_B) + b43_radio_write16(dev, 0x78, 0x26); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, B43_PHY_RFOVERVAL, + LPD(0, 1, 1))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xBFAF); + b43_phy_write(dev, B43_PHY_BASE(0x2B), 0x1403); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, B43_PHY_RFOVERVAL, + LPD(0, 0, 1))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xBFA0); + b43_radio_write16(dev, 0x51, b43_radio_read16(dev, 0x51) + | 0x0004); + if (phy->radio_rev == 8) { + b43_radio_write16(dev, 0x43, 0x1F); + } else { + b43_radio_write16(dev, 0x52, 0); + b43_radio_write16(dev, 0x43, (b43_radio_read16(dev, 0x43) + & 0xFFF0) | 0x0009); + } + b43_phy_write(dev, B43_PHY_BASE(0x58), 0); + + for (i = 0; i < 16; i++) { + b43_phy_write(dev, B43_PHY_BASE(0x5A), 0x0480); + b43_phy_write(dev, B43_PHY_BASE(0x59), 0xC810); + b43_phy_write(dev, B43_PHY_BASE(0x58), 0x000D); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, + B43_PHY_RFOVERVAL, + LPD(1, 0, 1))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xAFB0); + udelay(10); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, + B43_PHY_RFOVERVAL, + LPD(1, 0, 1))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xEFB0); + udelay(10); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, + B43_PHY_RFOVERVAL, + LPD(1, 0, 0))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xFFF0); + udelay(20); + tmp1 += b43_phy_read(dev, B43_PHY_LO_LEAKAGE); + b43_phy_write(dev, B43_PHY_BASE(0x58), 0); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, + B43_PHY_RFOVERVAL, + LPD(1, 0, 1))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xAFB0); + } + udelay(10); + + b43_phy_write(dev, B43_PHY_BASE(0x58), 0); + tmp1++; + tmp1 >>= 9; + + for (i = 0; i < 16; i++) { + radio78 = ((flip_4bit(i) << 1) | 0x20); + b43_radio_write16(dev, 0x78, radio78); + udelay(10); + for (j = 0; j < 16; j++) { + b43_phy_write(dev, B43_PHY_BASE(0x5A), 0x0D80); + b43_phy_write(dev, B43_PHY_BASE(0x59), 0xC810); + b43_phy_write(dev, B43_PHY_BASE(0x58), 0x000D); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, + B43_PHY_RFOVERVAL, + LPD(1, 0, + 1))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xAFB0); + udelay(10); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, + B43_PHY_RFOVERVAL, + LPD(1, 0, + 1))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xEFB0); + udelay(10); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, + B43_PHY_RFOVERVAL, + LPD(1, 0, + 0))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xFFF0); + udelay(10); + tmp2 += b43_phy_read(dev, B43_PHY_LO_LEAKAGE); + b43_phy_write(dev, B43_PHY_BASE(0x58), 0); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, + B43_PHY_RFOVERVAL, + LPD(1, 0, + 1))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xAFB0); + } + tmp2++; + tmp2 >>= 8; + if (tmp1 < tmp2) + break; + } + + /* Restore the registers */ + b43_phy_write(dev, B43_PHY_PGACTL, sav.phy_pgactl); + b43_radio_write16(dev, 0x51, sav.radio_51); + b43_radio_write16(dev, 0x52, sav.radio_52); + b43_radio_write16(dev, 0x43, sav.radio_43); + b43_phy_write(dev, B43_PHY_BASE(0x5A), sav.phy_base_5A); + b43_phy_write(dev, B43_PHY_BASE(0x59), sav.phy_base_59); + b43_phy_write(dev, B43_PHY_BASE(0x58), sav.phy_base_58); + b43_write16(dev, 0x3E6, sav.reg_3E6); + if (phy->analog != 0) + b43_write16(dev, 0x3F4, sav.reg_3F4); + b43_phy_write(dev, B43_PHY_SYNCCTL, sav.phy_syncctl); + b43_synth_pu_workaround(dev, phy->channel); + if (phy->type == B43_PHYTYPE_B) { + b43_phy_write(dev, B43_PHY_BASE(0x30), sav.phy_base_30); + b43_write16(dev, 0x3EC, sav.reg_3EC); + } else if (phy->gmode) { + b43_write16(dev, B43_MMIO_PHY_RADIO, + b43_read16(dev, B43_MMIO_PHY_RADIO) + & 0x7FFF); + b43_phy_write(dev, B43_PHY_RFOVER, sav.phy_rfover); + b43_phy_write(dev, B43_PHY_RFOVERVAL, sav.phy_rfoverval); + b43_phy_write(dev, B43_PHY_ANALOGOVER, sav.phy_analogover); + b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, + sav.phy_analogoverval); + b43_phy_write(dev, B43_PHY_CRS0, sav.phy_crs0); + b43_phy_write(dev, B43_PHY_CLASSCTL, sav.phy_classctl); + if (has_loopback_gain(phy)) { + b43_phy_write(dev, B43_PHY_LO_MASK, sav.phy_lo_mask); + b43_phy_write(dev, B43_PHY_LO_CTL, sav.phy_lo_ctl); + } + } + if (i > 15) + ret = radio78; + else + ret = rcc; + + return ret; +} + +void b43_radio_init2060(struct b43_wldev *dev) +{ + int err; + + b43_radio_write16(dev, 0x0004, 0x00C0); + b43_radio_write16(dev, 0x0005, 0x0008); + b43_radio_write16(dev, 0x0009, 0x0040); + b43_radio_write16(dev, 0x0005, 0x00AA); + b43_radio_write16(dev, 0x0032, 0x008F); + b43_radio_write16(dev, 0x0006, 0x008F); + b43_radio_write16(dev, 0x0034, 0x008F); + b43_radio_write16(dev, 0x002C, 0x0007); + b43_radio_write16(dev, 0x0082, 0x0080); + b43_radio_write16(dev, 0x0080, 0x0000); + b43_radio_write16(dev, 0x003F, 0x00DA); + b43_radio_write16(dev, 0x0005, b43_radio_read16(dev, 0x0005) & ~0x0008); + b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0010); + b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0020); + b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0020); + msleep(1); /* delay 400usec */ + + b43_radio_write16(dev, 0x0081, + (b43_radio_read16(dev, 0x0081) & ~0x0020) | 0x0010); + msleep(1); /* delay 400usec */ + + b43_radio_write16(dev, 0x0005, + (b43_radio_read16(dev, 0x0005) & ~0x0008) | 0x0008); + b43_radio_write16(dev, 0x0085, b43_radio_read16(dev, 0x0085) & ~0x0010); + b43_radio_write16(dev, 0x0005, b43_radio_read16(dev, 0x0005) & ~0x0008); + b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0040); + b43_radio_write16(dev, 0x0081, + (b43_radio_read16(dev, 0x0081) & ~0x0040) | 0x0040); + b43_radio_write16(dev, 0x0005, + (b43_radio_read16(dev, 0x0081) & ~0x0008) | 0x0008); + b43_phy_write(dev, 0x0063, 0xDDC6); + b43_phy_write(dev, 0x0069, 0x07BE); + b43_phy_write(dev, 0x006A, 0x0000); + + err = b43_radio_selectchannel(dev, B43_DEFAULT_CHANNEL_A, 0); + B43_WARN_ON(err); + + msleep(1); +} + +static inline u16 freq_r3A_value(u16 frequency) +{ + u16 value; + + if (frequency < 5091) + value = 0x0040; + else if (frequency < 5321) + value = 0x0000; + else if (frequency < 5806) + value = 0x0080; + else + value = 0x0040; + + return value; +} + +void b43_radio_set_tx_iq(struct b43_wldev *dev) +{ + static const u8 data_high[5] = { 0x00, 0x40, 0x80, 0x90, 0xD0 }; + static const u8 data_low[5] = { 0x00, 0x01, 0x05, 0x06, 0x0A }; + u16 tmp = b43_radio_read16(dev, 0x001E); + int i, j; + + for (i = 0; i < 5; i++) { + for (j = 0; j < 5; j++) { + if (tmp == (data_high[i] << 4 | data_low[j])) { + b43_phy_write(dev, 0x0069, + (i - j) << 8 | 0x00C0); + return; + } + } + } +} + +int b43_radio_selectchannel(struct b43_wldev *dev, + u8 channel, int synthetic_pu_workaround) +{ + struct b43_phy *phy = &dev->phy; + u16 r8, tmp; + u16 freq; + u16 channelcookie; + + if (channel == 0xFF) { + switch (phy->type) { + case B43_PHYTYPE_A: + channel = B43_DEFAULT_CHANNEL_A; + break; + case B43_PHYTYPE_B: + case B43_PHYTYPE_G: + channel = B43_DEFAULT_CHANNEL_BG; + break; + default: + B43_WARN_ON(1); + } + } + + /* First we set the channel radio code to prevent the + * firmware from sending ghost packets. + */ + channelcookie = channel; + if (phy->type == B43_PHYTYPE_A) + channelcookie |= 0x100; + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_CHAN, channelcookie); + + if (phy->type == B43_PHYTYPE_A) { + if (channel > 200) + return -EINVAL; + freq = channel2freq_a(channel); + + r8 = b43_radio_read16(dev, 0x0008); + b43_write16(dev, 0x03F0, freq); + b43_radio_write16(dev, 0x0008, r8); + + //TODO: write max channel TX power? to Radio 0x2D + tmp = b43_radio_read16(dev, 0x002E); + tmp &= 0x0080; + //TODO: OR tmp with the Power out estimation for this channel? + b43_radio_write16(dev, 0x002E, tmp); + + if (freq >= 4920 && freq <= 5500) { + /* + * r8 = (((freq * 15 * 0xE1FC780F) >> 32) / 29) & 0x0F; + * = (freq * 0.025862069 + */ + r8 = 3 * freq / 116; /* is equal to r8 = freq * 0.025862 */ + } + b43_radio_write16(dev, 0x0007, (r8 << 4) | r8); + b43_radio_write16(dev, 0x0020, (r8 << 4) | r8); + b43_radio_write16(dev, 0x0021, (r8 << 4) | r8); + b43_radio_write16(dev, 0x0022, (b43_radio_read16(dev, 0x0022) + & 0x000F) | (r8 << 4)); + b43_radio_write16(dev, 0x002A, (r8 << 4)); + b43_radio_write16(dev, 0x002B, (r8 << 4)); + b43_radio_write16(dev, 0x0008, (b43_radio_read16(dev, 0x0008) + & 0x00F0) | (r8 << 4)); + b43_radio_write16(dev, 0x0029, (b43_radio_read16(dev, 0x0029) + & 0xFF0F) | 0x00B0); + b43_radio_write16(dev, 0x0035, 0x00AA); + b43_radio_write16(dev, 0x0036, 0x0085); + b43_radio_write16(dev, 0x003A, (b43_radio_read16(dev, 0x003A) + & 0xFF20) | + freq_r3A_value(freq)); + b43_radio_write16(dev, 0x003D, + b43_radio_read16(dev, 0x003D) & 0x00FF); + b43_radio_write16(dev, 0x0081, (b43_radio_read16(dev, 0x0081) + & 0xFF7F) | 0x0080); + b43_radio_write16(dev, 0x0035, + b43_radio_read16(dev, 0x0035) & 0xFFEF); + b43_radio_write16(dev, 0x0035, (b43_radio_read16(dev, 0x0035) + & 0xFFEF) | 0x0010); + b43_radio_set_tx_iq(dev); + //TODO: TSSI2dbm workaround + b43_phy_xmitpower(dev); //FIXME correct? + } else { + if ((channel < 1) || (channel > 14)) + return -EINVAL; + + if (synthetic_pu_workaround) + b43_synth_pu_workaround(dev, channel); + + b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(channel)); + + if (channel == 14) { + if (dev->dev->bus->sprom.r1.country_code == + SSB_SPROM1CCODE_JAPAN) + b43_hf_write(dev, + b43_hf_read(dev) & ~B43_HF_ACPR); + else + b43_hf_write(dev, + b43_hf_read(dev) | B43_HF_ACPR); + b43_write16(dev, B43_MMIO_CHANNEL_EXT, + b43_read16(dev, B43_MMIO_CHANNEL_EXT) + | (1 << 11)); + } else { + b43_write16(dev, B43_MMIO_CHANNEL_EXT, + b43_read16(dev, B43_MMIO_CHANNEL_EXT) + & 0xF7BF); + } + } + + phy->channel = channel; + /* Wait for the radio to tune to the channel and stabilize. */ + msleep(8); + + return 0; +} + +/* http://bcm-specs.sipsolutions.net/TX_Gain_Base_Band */ +static u16 b43_get_txgain_base_band(u16 txpower) +{ + u16 ret; + + B43_WARN_ON(txpower > 63); + + if (txpower >= 54) + ret = 2; + else if (txpower >= 49) + ret = 4; + else if (txpower >= 44) + ret = 5; + else + ret = 6; + + return ret; +} + +/* http://bcm-specs.sipsolutions.net/TX_Gain_Radio_Frequency_Power_Amplifier */ +static u16 b43_get_txgain_freq_power_amp(u16 txpower) +{ + u16 ret; + + B43_WARN_ON(txpower > 63); + + if (txpower >= 32) + ret = 0; + else if (txpower >= 25) + ret = 1; + else if (txpower >= 20) + ret = 2; + else if (txpower >= 12) + ret = 3; + else + ret = 4; + + return ret; +} + +/* http://bcm-specs.sipsolutions.net/TX_Gain_Digital_Analog_Converter */ +static u16 b43_get_txgain_dac(u16 txpower) +{ + u16 ret; + + B43_WARN_ON(txpower > 63); + + if (txpower >= 54) + ret = txpower - 53; + else if (txpower >= 49) + ret = txpower - 42; + else if (txpower >= 44) + ret = txpower - 37; + else if (txpower >= 32) + ret = txpower - 32; + else if (txpower >= 25) + ret = txpower - 20; + else if (txpower >= 20) + ret = txpower - 13; + else if (txpower >= 12) + ret = txpower - 8; + else + ret = txpower; + + return ret; +} + +static void b43_radio_set_txpower_a(struct b43_wldev *dev, u16 txpower) +{ + struct b43_phy *phy = &dev->phy; + u16 pamp, base, dac, t; + + txpower = limit_value(txpower, 0, 63); + + pamp = b43_get_txgain_freq_power_amp(txpower); + pamp <<= 5; + pamp &= 0x00E0; + b43_phy_write(dev, 0x0019, pamp); + + base = b43_get_txgain_base_band(txpower); + base &= 0x000F; + b43_phy_write(dev, 0x0017, base | 0x0020); + + t = b43_ofdmtab_read16(dev, 0x3000, 1); + t &= 0x0007; + + dac = b43_get_txgain_dac(txpower); + dac <<= 3; + dac |= t; + + b43_ofdmtab_write16(dev, 0x3000, 1, dac); + + phy->txpwr_offset = txpower; + + //TODO: FuncPlaceholder (Adjust BB loft cancel) +} + +void b43_radio_turn_on(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + int err; + u8 channel; + + might_sleep(); + + if (phy->radio_on) + return; + + switch (phy->type) { + case B43_PHYTYPE_A: + b43_radio_write16(dev, 0x0004, 0x00C0); + b43_radio_write16(dev, 0x0005, 0x0008); + b43_phy_write(dev, 0x0010, b43_phy_read(dev, 0x0010) & 0xFFF7); + b43_phy_write(dev, 0x0011, b43_phy_read(dev, 0x0011) & 0xFFF7); + b43_radio_init2060(dev); + break; + case B43_PHYTYPE_B: + case B43_PHYTYPE_G: + b43_phy_write(dev, 0x0015, 0x8000); + b43_phy_write(dev, 0x0015, 0xCC00); + b43_phy_write(dev, 0x0015, (phy->gmode ? 0x00C0 : 0x0000)); + if (phy->radio_off_context.valid) { + /* Restore the RFover values. */ + b43_phy_write(dev, B43_PHY_RFOVER, + phy->radio_off_context.rfover); + b43_phy_write(dev, B43_PHY_RFOVERVAL, + phy->radio_off_context.rfoverval); + phy->radio_off_context.valid = 0; + } + channel = phy->channel; + err = b43_radio_selectchannel(dev, B43_DEFAULT_CHANNEL_BG, 1); + err |= b43_radio_selectchannel(dev, channel, 0); + B43_WARN_ON(err); + break; + default: + B43_WARN_ON(1); + } + phy->radio_on = 1; +} + +void b43_radio_turn_off(struct b43_wldev *dev, bool force) +{ + struct b43_phy *phy = &dev->phy; + + if (!phy->radio_on && !force) + return; + + if (phy->type == B43_PHYTYPE_A) { + b43_radio_write16(dev, 0x0004, 0x00FF); + b43_radio_write16(dev, 0x0005, 0x00FB); + b43_phy_write(dev, 0x0010, b43_phy_read(dev, 0x0010) | 0x0008); + b43_phy_write(dev, 0x0011, b43_phy_read(dev, 0x0011) | 0x0008); + } + if (phy->type == B43_PHYTYPE_G && dev->dev->id.revision >= 5) { + u16 rfover, rfoverval; + + rfover = b43_phy_read(dev, B43_PHY_RFOVER); + rfoverval = b43_phy_read(dev, B43_PHY_RFOVERVAL); + if (!force) { + phy->radio_off_context.rfover = rfover; + phy->radio_off_context.rfoverval = rfoverval; + phy->radio_off_context.valid = 1; + } + b43_phy_write(dev, B43_PHY_RFOVER, rfover | 0x008C); + b43_phy_write(dev, B43_PHY_RFOVERVAL, rfoverval & 0xFF73); + } else + b43_phy_write(dev, 0x0015, 0xAA00); + phy->radio_on = 0; +} diff --git a/drivers/net/wireless/b43/phy.h b/drivers/net/wireless/b43/phy.h new file mode 100644 index 000000000000..c64d74504fc0 --- /dev/null +++ b/drivers/net/wireless/b43/phy.h @@ -0,0 +1,297 @@ +#ifndef B43_PHY_H_ +#define B43_PHY_H_ + +#include <linux/types.h> + +struct b43_wldev; +struct b43_phy; + +/*** PHY Registers ***/ + +/* Routing */ +#define B43_PHYROUTE_OFDM_GPHY 0x400 +#define B43_PHYROUTE_EXT_GPHY 0x800 + +/* Base registers. */ +#define B43_PHY_BASE(reg) (reg) +/* OFDM (A) registers of a G-PHY */ +#define B43_PHY_OFDM(reg) ((reg) | B43_PHYROUTE_OFDM_GPHY) +/* Extended G-PHY registers */ +#define B43_PHY_EXTG(reg) ((reg) | B43_PHYROUTE_EXT_GPHY) + +/* OFDM (A) PHY Registers */ +#define B43_PHY_VERSION_OFDM B43_PHY_OFDM(0x00) /* Versioning register for A-PHY */ +#define B43_PHY_BBANDCFG B43_PHY_OFDM(0x01) /* Baseband config */ +#define B43_PHY_BBANDCFG_RXANT 0x180 /* RX Antenna selection */ +#define B43_PHY_BBANDCFG_RXANT_SHIFT 7 +#define B43_PHY_PWRDOWN B43_PHY_OFDM(0x03) /* Powerdown */ +#define B43_PHY_CRSTHRES1 B43_PHY_OFDM(0x06) /* CRS Threshold 1 */ +#define B43_PHY_LNAHPFCTL B43_PHY_OFDM(0x1C) /* LNA/HPF control */ +#define B43_PHY_ADIVRELATED B43_PHY_OFDM(0x27) /* FIXME rename */ +#define B43_PHY_CRS0 B43_PHY_OFDM(0x29) +#define B43_PHY_ANTDWELL B43_PHY_OFDM(0x2B) /* Antenna dwell */ +#define B43_PHY_ANTDWELL_AUTODIV1 0x0100 /* Automatic RX diversity start antenna */ +#define B43_PHY_ENCORE B43_PHY_OFDM(0x49) /* "Encore" (RangeMax / BroadRange) */ +#define B43_PHY_ENCORE_EN 0x0200 /* Encore enable */ +#define B43_PHY_LMS B43_PHY_OFDM(0x55) +#define B43_PHY_OFDM61 B43_PHY_OFDM(0x61) /* FIXME rename */ +#define B43_PHY_OFDM61_10 0x0010 /* FIXME rename */ +#define B43_PHY_IQBAL B43_PHY_OFDM(0x69) /* I/Q balance */ +#define B43_PHY_OTABLECTL B43_PHY_OFDM(0x72) /* OFDM table control (see below) */ +#define B43_PHY_OTABLEOFF 0x03FF /* OFDM table offset (see below) */ +#define B43_PHY_OTABLENR 0xFC00 /* OFDM table number (see below) */ +#define B43_PHY_OTABLENR_SHIFT 10 +#define B43_PHY_OTABLEI B43_PHY_OFDM(0x73) /* OFDM table data I */ +#define B43_PHY_OTABLEQ B43_PHY_OFDM(0x74) /* OFDM table data Q */ +#define B43_PHY_HPWR_TSSICTL B43_PHY_OFDM(0x78) /* Hardware power TSSI control */ +#define B43_PHY_NRSSITHRES B43_PHY_OFDM(0x8A) /* NRSSI threshold */ +#define B43_PHY_ANTWRSETT B43_PHY_OFDM(0x8C) /* Antenna WR settle */ +#define B43_PHY_ANTWRSETT_ARXDIV 0x2000 /* Automatic RX diversity enabled */ +#define B43_PHY_CLIPPWRDOWNT B43_PHY_OFDM(0x93) /* Clip powerdown threshold */ +#define B43_PHY_OFDM9B B43_PHY_OFDM(0x9B) /* FIXME rename */ +#define B43_PHY_N1P1GAIN B43_PHY_OFDM(0xA0) +#define B43_PHY_P1P2GAIN B43_PHY_OFDM(0xA1) +#define B43_PHY_N1N2GAIN B43_PHY_OFDM(0xA2) +#define B43_PHY_CLIPTHRES B43_PHY_OFDM(0xA3) +#define B43_PHY_CLIPN1P2THRES B43_PHY_OFDM(0xA4) +#define B43_PHY_DIVSRCHIDX B43_PHY_OFDM(0xA8) /* Divider search gain/index */ +#define B43_PHY_CLIPP2THRES B43_PHY_OFDM(0xA9) +#define B43_PHY_CLIPP3THRES B43_PHY_OFDM(0xAA) +#define B43_PHY_DIVP1P2GAIN B43_PHY_OFDM(0xAB) +#define B43_PHY_DIVSRCHGAINBACK B43_PHY_OFDM(0xAD) /* Divider search gain back */ +#define B43_PHY_DIVSRCHGAINCHNG B43_PHY_OFDM(0xAE) /* Divider search gain change */ +#define B43_PHY_CRSTHRES1_R1 B43_PHY_OFDM(0xC0) /* CRS Threshold 1 (rev 1 only) */ +#define B43_PHY_CRSTHRES2_R1 B43_PHY_OFDM(0xC1) /* CRS Threshold 2 (rev 1 only) */ +#define B43_PHY_TSSIP_LTBASE B43_PHY_OFDM(0x380) /* TSSI power lookup table base */ +#define B43_PHY_DC_LTBASE B43_PHY_OFDM(0x3A0) /* DC lookup table base */ +#define B43_PHY_GAIN_LTBASE B43_PHY_OFDM(0x3C0) /* Gain lookup table base */ + +/* CCK (B) PHY Registers */ +#define B43_PHY_VERSION_CCK B43_PHY_BASE(0x00) /* Versioning register for B-PHY */ +#define B43_PHY_CCKBBANDCFG B43_PHY_BASE(0x01) /* Contains antenna 0/1 control bit */ +#define B43_PHY_PGACTL B43_PHY_BASE(0x15) /* PGA control */ +#define B43_PHY_PGACTL_LPF 0x1000 /* Low pass filter (?) */ +#define B43_PHY_PGACTL_LOWBANDW 0x0040 /* Low bandwidth flag */ +#define B43_PHY_PGACTL_UNKNOWN 0xEFA0 +#define B43_PHY_FBCTL1 B43_PHY_BASE(0x18) /* Frequency bandwidth control 1 */ +#define B43_PHY_ITSSI B43_PHY_BASE(0x29) /* Idle TSSI */ +#define B43_PHY_LO_LEAKAGE B43_PHY_BASE(0x2D) /* Measured LO leakage */ +#define B43_PHY_ENERGY B43_PHY_BASE(0x33) /* Energy */ +#define B43_PHY_SYNCCTL B43_PHY_BASE(0x35) +#define B43_PHY_FBCTL2 B43_PHY_BASE(0x38) /* Frequency bandwidth control 2 */ +#define B43_PHY_DACCTL B43_PHY_BASE(0x60) /* DAC control */ +#define B43_PHY_RCCALOVER B43_PHY_BASE(0x78) /* RC calibration override */ + +/* Extended G-PHY Registers */ +#define B43_PHY_CLASSCTL B43_PHY_EXTG(0x02) /* Classify control */ +#define B43_PHY_GTABCTL B43_PHY_EXTG(0x03) /* G-PHY table control (see below) */ +#define B43_PHY_GTABOFF 0x03FF /* G-PHY table offset (see below) */ +#define B43_PHY_GTABNR 0xFC00 /* G-PHY table number (see below) */ +#define B43_PHY_GTABNR_SHIFT 10 +#define B43_PHY_GTABDATA B43_PHY_EXTG(0x04) /* G-PHY table data */ +#define B43_PHY_LO_MASK B43_PHY_EXTG(0x0F) /* Local Oscillator control mask */ +#define B43_PHY_LO_CTL B43_PHY_EXTG(0x10) /* Local Oscillator control */ +#define B43_PHY_RFOVER B43_PHY_EXTG(0x11) /* RF override */ +#define B43_PHY_RFOVERVAL B43_PHY_EXTG(0x12) /* RF override value */ +#define B43_PHY_RFOVERVAL_EXTLNA 0x8000 +#define B43_PHY_RFOVERVAL_LNA 0x7000 +#define B43_PHY_RFOVERVAL_LNA_SHIFT 12 +#define B43_PHY_RFOVERVAL_PGA 0x0F00 +#define B43_PHY_RFOVERVAL_PGA_SHIFT 8 +#define B43_PHY_RFOVERVAL_UNK 0x0010 /* Unknown, always set. */ +#define B43_PHY_RFOVERVAL_TRSWRX 0x00E0 +#define B43_PHY_RFOVERVAL_BW 0x0003 /* Bandwidth flags */ +#define B43_PHY_RFOVERVAL_BW_LPF 0x0001 /* Low Pass Filter */ +#define B43_PHY_RFOVERVAL_BW_LBW 0x0002 /* Low Bandwidth (when set), high when unset */ +#define B43_PHY_ANALOGOVER B43_PHY_EXTG(0x14) /* Analog override */ +#define B43_PHY_ANALOGOVERVAL B43_PHY_EXTG(0x15) /* Analog override value */ + +/*** OFDM table numbers ***/ +#define B43_OFDMTAB(number, offset) (((number) << B43_PHY_OTABLENR_SHIFT) | (offset)) +#define B43_OFDMTAB_AGC1 B43_OFDMTAB(0x00, 0) +#define B43_OFDMTAB_GAIN0 B43_OFDMTAB(0x00, 0) +#define B43_OFDMTAB_GAINX B43_OFDMTAB(0x01, 0) //TODO rename +#define B43_OFDMTAB_GAIN1 B43_OFDMTAB(0x01, 4) +#define B43_OFDMTAB_AGC3 B43_OFDMTAB(0x02, 0) +#define B43_OFDMTAB_GAIN2 B43_OFDMTAB(0x02, 3) +#define B43_OFDMTAB_LNAHPFGAIN1 B43_OFDMTAB(0x03, 0) +#define B43_OFDMTAB_WRSSI B43_OFDMTAB(0x04, 0) +#define B43_OFDMTAB_LNAHPFGAIN2 B43_OFDMTAB(0x04, 0) +#define B43_OFDMTAB_NOISESCALE B43_OFDMTAB(0x05, 0) +#define B43_OFDMTAB_AGC2 B43_OFDMTAB(0x06, 0) +#define B43_OFDMTAB_ROTOR B43_OFDMTAB(0x08, 0) +#define B43_OFDMTAB_ADVRETARD B43_OFDMTAB(0x09, 0) +#define B43_OFDMTAB_DAC B43_OFDMTAB(0x0C, 0) +#define B43_OFDMTAB_DC B43_OFDMTAB(0x0E, 7) +#define B43_OFDMTAB_PWRDYN2 B43_OFDMTAB(0x0E, 12) +#define B43_OFDMTAB_LNAGAIN B43_OFDMTAB(0x0E, 13) +//TODO +#define B43_OFDMTAB_LPFGAIN B43_OFDMTAB(0x0F, 12) +#define B43_OFDMTAB_RSSI B43_OFDMTAB(0x10, 0) +//TODO +#define B43_OFDMTAB_AGC1_R1 B43_OFDMTAB(0x13, 0) +#define B43_OFDMTAB_GAINX_R1 B43_OFDMTAB(0x14, 0) //TODO rename +#define B43_OFDMTAB_MINSIGSQ B43_OFDMTAB(0x14, 1) +#define B43_OFDMTAB_AGC3_R1 B43_OFDMTAB(0x15, 0) +#define B43_OFDMTAB_WRSSI_R1 B43_OFDMTAB(0x15, 4) +#define B43_OFDMTAB_TSSI B43_OFDMTAB(0x15, 0) +#define B43_OFDMTAB_DACRFPABB B43_OFDMTAB(0x16, 0) +#define B43_OFDMTAB_DACOFF B43_OFDMTAB(0x17, 0) +#define B43_OFDMTAB_DCBIAS B43_OFDMTAB(0x18, 0) + +u16 b43_ofdmtab_read16(struct b43_wldev *dev, u16 table, u16 offset); +void b43_ofdmtab_write16(struct b43_wldev *dev, u16 table, + u16 offset, u16 value); +u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset); +void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table, + u16 offset, u32 value); + +/*** G-PHY table numbers */ +#define B43_GTAB(number, offset) (((number) << B43_PHY_GTABNR_SHIFT) | (offset)) +#define B43_GTAB_NRSSI B43_GTAB(0x00, 0) +#define B43_GTAB_TRFEMW B43_GTAB(0x0C, 0x120) +#define B43_GTAB_ORIGTR B43_GTAB(0x2E, 0x298) + +u16 b43_gtab_read(struct b43_wldev *dev, u16 table, u16 offset); //TODO implement +void b43_gtab_write(struct b43_wldev *dev, u16 table, u16 offset, u16 value); //TODO implement + +#define B43_DEFAULT_CHANNEL_A 36 +#define B43_DEFAULT_CHANNEL_BG 6 + +enum { + B43_ANTENNA0, /* Antenna 0 */ + B43_ANTENNA1, /* Antenna 0 */ + B43_ANTENNA_AUTO1, /* Automatic, starting with antenna 1 */ + B43_ANTENNA_AUTO0, /* Automatic, starting with antenna 0 */ + + B43_ANTENNA_AUTO = B43_ANTENNA_AUTO0, + B43_ANTENNA_DEFAULT = B43_ANTENNA_AUTO, +}; + +enum { + B43_INTERFMODE_NONE, + B43_INTERFMODE_NONWLAN, + B43_INTERFMODE_MANUALWLAN, + B43_INTERFMODE_AUTOWLAN, +}; + +/* Masks for the different PHY versioning registers. */ +#define B43_PHYVER_ANALOG 0xF000 +#define B43_PHYVER_ANALOG_SHIFT 12 +#define B43_PHYVER_TYPE 0x0F00 +#define B43_PHYVER_TYPE_SHIFT 8 +#define B43_PHYVER_VERSION 0x00FF + +void b43_raw_phy_lock(struct b43_wldev *dev); +#define b43_phy_lock(dev, flags) \ + do { \ + local_irq_save(flags); \ + b43_raw_phy_lock(dev); \ + } while (0) +void b43_raw_phy_unlock(struct b43_wldev *dev); +#define b43_phy_unlock(dev, flags) \ + do { \ + b43_raw_phy_unlock(dev); \ + local_irq_restore(flags); \ + } while (0) + +u16 b43_phy_read(struct b43_wldev *dev, u16 offset); +void b43_phy_write(struct b43_wldev *dev, u16 offset, u16 val); + +int b43_phy_init_tssi2dbm_table(struct b43_wldev *dev); + +void b43_phy_early_init(struct b43_wldev *dev); +int b43_phy_init(struct b43_wldev *dev); + +void b43_set_rx_antenna(struct b43_wldev *dev, int antenna); + +void b43_phy_xmitpower(struct b43_wldev *dev); +void b43_gphy_dc_lt_init(struct b43_wldev *dev); + +/* Returns the boolean whether the board has HardwarePowerControl */ +bool b43_has_hardware_pctl(struct b43_phy *phy); +/* Returns the boolean whether "TX Magnification" is enabled. */ +#define has_tx_magnification(phy) \ + (((phy)->rev >= 2) && \ + ((phy)->radio_ver == 0x2050) && \ + ((phy)->radio_rev == 8)) +/* Card uses the loopback gain stuff */ +#define has_loopback_gain(phy) \ + (((phy)->rev > 1) || ((phy)->gmode)) + +/* Radio Attenuation (RF Attenuation) */ +struct b43_rfatt { + u8 att; /* Attenuation value */ + bool with_padmix; /* Flag, PAD Mixer enabled. */ +}; +struct b43_rfatt_list { + /* Attenuation values list */ + const struct b43_rfatt *list; + u8 len; + /* Minimum/Maximum attenuation values */ + u8 min_val; + u8 max_val; +}; + +/* Baseband Attenuation */ +struct b43_bbatt { + u8 att; /* Attenuation value */ +}; +struct b43_bbatt_list { + /* Attenuation values list */ + const struct b43_bbatt *list; + u8 len; + /* Minimum/Maximum attenuation values */ + u8 min_val; + u8 max_val; +}; + +/* tx_control bits. */ +#define B43_TXCTL_PA3DB 0x40 /* PA Gain 3dB */ +#define B43_TXCTL_PA2DB 0x20 /* PA Gain 2dB */ +#define B43_TXCTL_TXMIX 0x10 /* TX Mixer Gain */ + +/* Write BasebandAttenuation value to the device. */ +void b43_phy_set_baseband_attenuation(struct b43_wldev *dev, + u16 baseband_attenuation); + +extern const u8 b43_radio_channel_codes_bg[]; + +void b43_radio_lock(struct b43_wldev *dev); +void b43_radio_unlock(struct b43_wldev *dev); + +u16 b43_radio_read16(struct b43_wldev *dev, u16 offset); +void b43_radio_write16(struct b43_wldev *dev, u16 offset, u16 val); + +u16 b43_radio_init2050(struct b43_wldev *dev); +void b43_radio_init2060(struct b43_wldev *dev); + +void b43_radio_turn_on(struct b43_wldev *dev); +void b43_radio_turn_off(struct b43_wldev *dev, bool force); + +int b43_radio_selectchannel(struct b43_wldev *dev, u8 channel, + int synthetic_pu_workaround); + +u8 b43_radio_aci_detect(struct b43_wldev *dev, u8 channel); +u8 b43_radio_aci_scan(struct b43_wldev *dev); + +int b43_radio_set_interference_mitigation(struct b43_wldev *dev, int mode); + +void b43_calc_nrssi_slope(struct b43_wldev *dev); +void b43_calc_nrssi_threshold(struct b43_wldev *dev); +s16 b43_nrssi_hw_read(struct b43_wldev *dev, u16 offset); +void b43_nrssi_hw_write(struct b43_wldev *dev, u16 offset, s16 val); +void b43_nrssi_hw_update(struct b43_wldev *dev, u16 val); +void b43_nrssi_mem_update(struct b43_wldev *dev); + +void b43_radio_set_tx_iq(struct b43_wldev *dev); +u16 b43_radio_calibrationvalue(struct b43_wldev *dev); + +void b43_put_attenuation_into_ranges(struct b43_wldev *dev, + int *_bbatt, int *_rfatt); + +void b43_set_txpower_g(struct b43_wldev *dev, + const struct b43_bbatt *bbatt, + const struct b43_rfatt *rfatt, u8 tx_control); + +#endif /* B43_PHY_H_ */ diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c new file mode 100644 index 000000000000..67752a28eb9c --- /dev/null +++ b/drivers/net/wireless/b43/pio.c @@ -0,0 +1,652 @@ +/* + + Broadcom B43 wireless driver + + PIO Transmission + + Copyright (c) 2005 Michael Buesch <mb@bu3sch.de> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43.h" +#include "pio.h" +#include "main.h" +#include "xmit.h" + +#include <linux/delay.h> + +static void tx_start(struct b43_pioqueue *queue) +{ + b43_pio_write(queue, B43_PIO_TXCTL, B43_PIO_TXCTL_INIT); +} + +static void tx_octet(struct b43_pioqueue *queue, u8 octet) +{ + if (queue->need_workarounds) { + b43_pio_write(queue, B43_PIO_TXDATA, octet); + b43_pio_write(queue, B43_PIO_TXCTL, B43_PIO_TXCTL_WRITELO); + } else { + b43_pio_write(queue, B43_PIO_TXCTL, B43_PIO_TXCTL_WRITELO); + b43_pio_write(queue, B43_PIO_TXDATA, octet); + } +} + +static u16 tx_get_next_word(const u8 * txhdr, + const u8 * packet, + size_t txhdr_size, unsigned int *pos) +{ + const u8 *source; + unsigned int i = *pos; + u16 ret; + + if (i < txhdr_size) { + source = txhdr; + } else { + source = packet; + i -= txhdr_size; + } + ret = le16_to_cpu(*((__le16 *)(source + i))); + *pos += 2; + + return ret; +} + +static void tx_data(struct b43_pioqueue *queue, + u8 * txhdr, const u8 * packet, unsigned int octets) +{ + u16 data; + unsigned int i = 0; + + if (queue->need_workarounds) { + data = tx_get_next_word(txhdr, packet, + sizeof(struct b43_txhdr_fw4), &i); + b43_pio_write(queue, B43_PIO_TXDATA, data); + } + b43_pio_write(queue, B43_PIO_TXCTL, + B43_PIO_TXCTL_WRITELO | B43_PIO_TXCTL_WRITEHI); + while (i < octets - 1) { + data = tx_get_next_word(txhdr, packet, + sizeof(struct b43_txhdr_fw4), &i); + b43_pio_write(queue, B43_PIO_TXDATA, data); + } + if (octets % 2) + tx_octet(queue, + packet[octets - sizeof(struct b43_txhdr_fw4) - 1]); +} + +static void tx_complete(struct b43_pioqueue *queue, struct sk_buff *skb) +{ + if (queue->need_workarounds) { + b43_pio_write(queue, B43_PIO_TXDATA, skb->data[skb->len - 1]); + b43_pio_write(queue, B43_PIO_TXCTL, + B43_PIO_TXCTL_WRITELO | B43_PIO_TXCTL_COMPLETE); + } else { + b43_pio_write(queue, B43_PIO_TXCTL, B43_PIO_TXCTL_COMPLETE); + } +} + +static u16 generate_cookie(struct b43_pioqueue *queue, + struct b43_pio_txpacket *packet) +{ + u16 cookie = 0x0000; + u16 packetindex; + + /* We use the upper 4 bits for the PIO + * controller ID and the lower 12 bits + * for the packet index (in the cache). + */ + switch (queue->mmio_base) { + case B43_MMIO_PIO1_BASE: + break; + case B43_MMIO_PIO2_BASE: + cookie = 0x1000; + break; + case B43_MMIO_PIO3_BASE: + cookie = 0x2000; + break; + case B43_MMIO_PIO4_BASE: + cookie = 0x3000; + break; + default: + B43_WARN_ON(1); + } + packetindex = packet->index; + B43_WARN_ON(packetindex & ~0x0FFF); + cookie |= (u16) packetindex; + + return cookie; +} + +static +struct b43_pioqueue *parse_cookie(struct b43_wldev *dev, + u16 cookie, struct b43_pio_txpacket **packet) +{ + struct b43_pio *pio = &dev->pio; + struct b43_pioqueue *queue = NULL; + int packetindex; + + switch (cookie & 0xF000) { + case 0x0000: + queue = pio->queue0; + break; + case 0x1000: + queue = pio->queue1; + break; + case 0x2000: + queue = pio->queue2; + break; + case 0x3000: + queue = pio->queue3; + break; + default: + B43_WARN_ON(1); + } + packetindex = (cookie & 0x0FFF); + B43_WARN_ON(!(packetindex >= 0 && packetindex < B43_PIO_MAXTXPACKETS)); + *packet = &(queue->tx_packets_cache[packetindex]); + + return queue; +} + +union txhdr_union { + struct b43_txhdr_fw4 txhdr_fw4; +}; + +static void pio_tx_write_fragment(struct b43_pioqueue *queue, + struct sk_buff *skb, + struct b43_pio_txpacket *packet, + size_t txhdr_size) +{ + union txhdr_union txhdr_data; + u8 *txhdr = NULL; + unsigned int octets; + + txhdr = (u8 *) (&txhdr_data.txhdr_fw4); + + B43_WARN_ON(skb_shinfo(skb)->nr_frags); + b43_generate_txhdr(queue->dev, + txhdr, skb->data, skb->len, + &packet->txstat.control, + generate_cookie(queue, packet)); + + tx_start(queue); + octets = skb->len + txhdr_size; + if (queue->need_workarounds) + octets--; + tx_data(queue, txhdr, (u8 *) skb->data, octets); + tx_complete(queue, skb); +} + +static void free_txpacket(struct b43_pio_txpacket *packet) +{ + struct b43_pioqueue *queue = packet->queue; + + if (packet->skb) + dev_kfree_skb_any(packet->skb); + list_move(&packet->list, &queue->txfree); + queue->nr_txfree++; +} + +static int pio_tx_packet(struct b43_pio_txpacket *packet) +{ + struct b43_pioqueue *queue = packet->queue; + struct sk_buff *skb = packet->skb; + u16 octets; + + octets = (u16) skb->len + sizeof(struct b43_txhdr_fw4); + if (queue->tx_devq_size < octets) { + b43warn(queue->dev->wl, "PIO queue too small. " + "Dropping packet.\n"); + /* Drop it silently (return success) */ + free_txpacket(packet); + return 0; + } + B43_WARN_ON(queue->tx_devq_packets > B43_PIO_MAXTXDEVQPACKETS); + B43_WARN_ON(queue->tx_devq_used > queue->tx_devq_size); + /* Check if there is sufficient free space on the device + * TX queue. If not, return and let the TX tasklet + * retry later. + */ + if (queue->tx_devq_packets == B43_PIO_MAXTXDEVQPACKETS) + return -EBUSY; + if (queue->tx_devq_used + octets > queue->tx_devq_size) + return -EBUSY; + /* Now poke the device. */ + pio_tx_write_fragment(queue, skb, packet, sizeof(struct b43_txhdr_fw4)); + + /* Account for the packet size. + * (We must not overflow the device TX queue) + */ + queue->tx_devq_packets++; + queue->tx_devq_used += octets; + + /* Transmission started, everything ok, move the + * packet to the txrunning list. + */ + list_move_tail(&packet->list, &queue->txrunning); + + return 0; +} + +static void tx_tasklet(unsigned long d) +{ + struct b43_pioqueue *queue = (struct b43_pioqueue *)d; + struct b43_wldev *dev = queue->dev; + unsigned long flags; + struct b43_pio_txpacket *packet, *tmp_packet; + int err; + u16 txctl; + + spin_lock_irqsave(&dev->wl->irq_lock, flags); + if (queue->tx_frozen) + goto out_unlock; + txctl = b43_pio_read(queue, B43_PIO_TXCTL); + if (txctl & B43_PIO_TXCTL_SUSPEND) + goto out_unlock; + + list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list) { + /* Try to transmit the packet. This can fail, if + * the device queue is full. In case of failure, the + * packet is left in the txqueue. + * If transmission succeed, the packet is moved to txrunning. + * If it is impossible to transmit the packet, it + * is dropped. + */ + err = pio_tx_packet(packet); + if (err) + break; + } + out_unlock: + spin_unlock_irqrestore(&dev->wl->irq_lock, flags); +} + +static void setup_txqueues(struct b43_pioqueue *queue) +{ + struct b43_pio_txpacket *packet; + int i; + + queue->nr_txfree = B43_PIO_MAXTXPACKETS; + for (i = 0; i < B43_PIO_MAXTXPACKETS; i++) { + packet = &(queue->tx_packets_cache[i]); + + packet->queue = queue; + INIT_LIST_HEAD(&packet->list); + packet->index = i; + + list_add(&packet->list, &queue->txfree); + } +} + +static +struct b43_pioqueue *b43_setup_pioqueue(struct b43_wldev *dev, + u16 pio_mmio_base) +{ + struct b43_pioqueue *queue; + u16 qsize; + + queue = kzalloc(sizeof(*queue), GFP_KERNEL); + if (!queue) + goto out; + + queue->dev = dev; + queue->mmio_base = pio_mmio_base; + queue->need_workarounds = (dev->dev->id.revision < 3); + + INIT_LIST_HEAD(&queue->txfree); + INIT_LIST_HEAD(&queue->txqueue); + INIT_LIST_HEAD(&queue->txrunning); + tasklet_init(&queue->txtask, tx_tasklet, (unsigned long)queue); + + b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) + & ~B43_MACCTL_BE); + + qsize = b43_read16(dev, queue->mmio_base + B43_PIO_TXQBUFSIZE); + if (qsize == 0) { + b43err(dev->wl, "This card does not support PIO " + "operation mode. Please use DMA mode " + "(module parameter pio=0).\n"); + goto err_freequeue; + } + if (qsize <= B43_PIO_TXQADJUST) { + b43err(dev->wl, "PIO tx device-queue too small (%u)\n", qsize); + goto err_freequeue; + } + qsize -= B43_PIO_TXQADJUST; + queue->tx_devq_size = qsize; + + setup_txqueues(queue); + + out: + return queue; + + err_freequeue: + kfree(queue); + queue = NULL; + goto out; +} + +static void cancel_transfers(struct b43_pioqueue *queue) +{ + struct b43_pio_txpacket *packet, *tmp_packet; + + tasklet_disable(&queue->txtask); + + list_for_each_entry_safe(packet, tmp_packet, &queue->txrunning, list) + free_txpacket(packet); + list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list) + free_txpacket(packet); +} + +static void b43_destroy_pioqueue(struct b43_pioqueue *queue) +{ + if (!queue) + return; + + cancel_transfers(queue); + kfree(queue); +} + +void b43_pio_free(struct b43_wldev *dev) +{ + struct b43_pio *pio; + + if (!b43_using_pio(dev)) + return; + pio = &dev->pio; + + b43_destroy_pioqueue(pio->queue3); + pio->queue3 = NULL; + b43_destroy_pioqueue(pio->queue2); + pio->queue2 = NULL; + b43_destroy_pioqueue(pio->queue1); + pio->queue1 = NULL; + b43_destroy_pioqueue(pio->queue0); + pio->queue0 = NULL; +} + +int b43_pio_init(struct b43_wldev *dev) +{ + struct b43_pio *pio = &dev->pio; + struct b43_pioqueue *queue; + int err = -ENOMEM; + + queue = b43_setup_pioqueue(dev, B43_MMIO_PIO1_BASE); + if (!queue) + goto out; + pio->queue0 = queue; + + queue = b43_setup_pioqueue(dev, B43_MMIO_PIO2_BASE); + if (!queue) + goto err_destroy0; + pio->queue1 = queue; + + queue = b43_setup_pioqueue(dev, B43_MMIO_PIO3_BASE); + if (!queue) + goto err_destroy1; + pio->queue2 = queue; + + queue = b43_setup_pioqueue(dev, B43_MMIO_PIO4_BASE); + if (!queue) + goto err_destroy2; + pio->queue3 = queue; + + if (dev->dev->id.revision < 3) + dev->irq_savedstate |= B43_IRQ_PIO_WORKAROUND; + + b43dbg(dev->wl, "PIO initialized\n"); + err = 0; + out: + return err; + + err_destroy2: + b43_destroy_pioqueue(pio->queue2); + pio->queue2 = NULL; + err_destroy1: + b43_destroy_pioqueue(pio->queue1); + pio->queue1 = NULL; + err_destroy0: + b43_destroy_pioqueue(pio->queue0); + pio->queue0 = NULL; + goto out; +} + +int b43_pio_tx(struct b43_wldev *dev, + struct sk_buff *skb, struct ieee80211_tx_control *ctl) +{ + struct b43_pioqueue *queue = dev->pio.queue1; + struct b43_pio_txpacket *packet; + + B43_WARN_ON(queue->tx_suspended); + B43_WARN_ON(list_empty(&queue->txfree)); + + packet = list_entry(queue->txfree.next, struct b43_pio_txpacket, list); + packet->skb = skb; + + memset(&packet->txstat, 0, sizeof(packet->txstat)); + memcpy(&packet->txstat.control, ctl, sizeof(*ctl)); + + list_move_tail(&packet->list, &queue->txqueue); + queue->nr_txfree--; + queue->nr_tx_packets++; + B43_WARN_ON(queue->nr_txfree >= B43_PIO_MAXTXPACKETS); + + tasklet_schedule(&queue->txtask); + + return 0; +} + +void b43_pio_handle_txstatus(struct b43_wldev *dev, + const struct b43_txstatus *status) +{ + struct b43_pioqueue *queue; + struct b43_pio_txpacket *packet; + + queue = parse_cookie(dev, status->cookie, &packet); + if (B43_WARN_ON(!queue)) + return; + + queue->tx_devq_packets--; + queue->tx_devq_used -= + (packet->skb->len + sizeof(struct b43_txhdr_fw4)); + + if (status->acked) { + packet->txstat.flags |= IEEE80211_TX_STATUS_ACK; + } else { + if (!(packet->txstat.control.flags & IEEE80211_TXCTL_NO_ACK)) + packet->txstat.excessive_retries = 1; + } + if (status->frame_count == 0) { + /* The frame was not transmitted at all. */ + packet->txstat.retry_count = 0; + } else + packet->txstat.retry_count = status->frame_count - 1; + ieee80211_tx_status_irqsafe(dev->wl->hw, packet->skb, + &(packet->txstat)); + packet->skb = NULL; + + free_txpacket(packet); + /* If there are packets on the txqueue, poke the tasklet + * to transmit them. + */ + if (!list_empty(&queue->txqueue)) + tasklet_schedule(&queue->txtask); +} + +void b43_pio_get_tx_stats(struct b43_wldev *dev, + struct ieee80211_tx_queue_stats *stats) +{ + struct b43_pio *pio = &dev->pio; + struct b43_pioqueue *queue; + struct ieee80211_tx_queue_stats_data *data; + + queue = pio->queue1; + data = &(stats->data[0]); + data->len = B43_PIO_MAXTXPACKETS - queue->nr_txfree; + data->limit = B43_PIO_MAXTXPACKETS; + data->count = queue->nr_tx_packets; +} + +static void pio_rx_error(struct b43_pioqueue *queue, + int clear_buffers, const char *error) +{ + int i; + + b43err(queue->dev->wl, "PIO RX error: %s\n", error); + b43_pio_write(queue, B43_PIO_RXCTL, B43_PIO_RXCTL_READY); + if (clear_buffers) { + B43_WARN_ON(queue->mmio_base != B43_MMIO_PIO1_BASE); + for (i = 0; i < 15; i++) { + /* Dummy read. */ + b43_pio_read(queue, B43_PIO_RXDATA); + } + } +} + +void b43_pio_rx(struct b43_pioqueue *queue) +{ + __le16 preamble[21] = { 0 }; + struct b43_rxhdr_fw4 *rxhdr; + u16 tmp, len; + u32 macstat; + int i, preamble_readwords; + struct sk_buff *skb; + + tmp = b43_pio_read(queue, B43_PIO_RXCTL); + if (!(tmp & B43_PIO_RXCTL_DATAAVAILABLE)) + return; + b43_pio_write(queue, B43_PIO_RXCTL, B43_PIO_RXCTL_DATAAVAILABLE); + + for (i = 0; i < 10; i++) { + tmp = b43_pio_read(queue, B43_PIO_RXCTL); + if (tmp & B43_PIO_RXCTL_READY) + goto data_ready; + udelay(10); + } + b43dbg(queue->dev->wl, "PIO RX timed out\n"); + return; +data_ready: + + len = b43_pio_read(queue, B43_PIO_RXDATA); + if (unlikely(len > 0x700)) { + pio_rx_error(queue, 0, "len > 0x700"); + return; + } + if (unlikely(len == 0 && queue->mmio_base != B43_MMIO_PIO4_BASE)) { + pio_rx_error(queue, 0, "len == 0"); + return; + } + preamble[0] = cpu_to_le16(len); + if (queue->mmio_base == B43_MMIO_PIO4_BASE) + preamble_readwords = 14 / sizeof(u16); + else + preamble_readwords = 18 / sizeof(u16); + for (i = 0; i < preamble_readwords; i++) { + tmp = b43_pio_read(queue, B43_PIO_RXDATA); + preamble[i + 1] = cpu_to_le16(tmp); + } + rxhdr = (struct b43_rxhdr_fw4 *)preamble; + macstat = le32_to_cpu(rxhdr->mac_status); + if (macstat & B43_RX_MAC_FCSERR) { + pio_rx_error(queue, + (queue->mmio_base == B43_MMIO_PIO1_BASE), + "Frame FCS error"); + return; + } + if (queue->mmio_base == B43_MMIO_PIO4_BASE) { + /* We received an xmit status. */ + struct b43_hwtxstatus *hw; + + hw = (struct b43_hwtxstatus *)(preamble + 1); + b43_handle_hwtxstatus(queue->dev, hw); + + return; + } + + skb = dev_alloc_skb(len); + if (unlikely(!skb)) { + pio_rx_error(queue, 1, "OOM"); + return; + } + skb_put(skb, len); + for (i = 0; i < len - 1; i += 2) { + tmp = b43_pio_read(queue, B43_PIO_RXDATA); + *((__le16 *)(skb->data + i)) = cpu_to_le16(tmp); + } + if (len % 2) { + tmp = b43_pio_read(queue, B43_PIO_RXDATA); + skb->data[len - 1] = (tmp & 0x00FF); +/* The specs say the following is required, but + * it is wrong and corrupts the PLCP. If we don't do + * this, the PLCP seems to be correct. So ifdef it out for now. + */ +#if 0 + if (rxflags2 & B43_RXHDR_FLAGS2_TYPE2FRAME) + skb->data[2] = (tmp & 0xFF00) >> 8; + else + skb->data[0] = (tmp & 0xFF00) >> 8; +#endif + } + b43_rx(queue->dev, skb, rxhdr); +} + +void b43_pio_tx_suspend(struct b43_pioqueue *queue) +{ + b43_power_saving_ctl_bits(queue->dev, B43_PS_AWAKE); + b43_pio_write(queue, B43_PIO_TXCTL, b43_pio_read(queue, B43_PIO_TXCTL) + | B43_PIO_TXCTL_SUSPEND); +} + +void b43_pio_tx_resume(struct b43_pioqueue *queue) +{ + b43_pio_write(queue, B43_PIO_TXCTL, b43_pio_read(queue, B43_PIO_TXCTL) + & ~B43_PIO_TXCTL_SUSPEND); + b43_power_saving_ctl_bits(queue->dev, 0); + tasklet_schedule(&queue->txtask); +} + +void b43_pio_freeze_txqueues(struct b43_wldev *dev) +{ + struct b43_pio *pio; + + B43_WARN_ON(!b43_using_pio(dev)); + pio = &dev->pio; + pio->queue0->tx_frozen = 1; + pio->queue1->tx_frozen = 1; + pio->queue2->tx_frozen = 1; + pio->queue3->tx_frozen = 1; +} + +void b43_pio_thaw_txqueues(struct b43_wldev *dev) +{ + struct b43_pio *pio; + + B43_WARN_ON(!b43_using_pio(dev)); + pio = &dev->pio; + pio->queue0->tx_frozen = 0; + pio->queue1->tx_frozen = 0; + pio->queue2->tx_frozen = 0; + pio->queue3->tx_frozen = 0; + if (!list_empty(&pio->queue0->txqueue)) + tasklet_schedule(&pio->queue0->txtask); + if (!list_empty(&pio->queue1->txqueue)) + tasklet_schedule(&pio->queue1->txtask); + if (!list_empty(&pio->queue2->txqueue)) + tasklet_schedule(&pio->queue2->txtask); + if (!list_empty(&pio->queue3->txqueue)) + tasklet_schedule(&pio->queue3->txtask); +} diff --git a/drivers/net/wireless/b43/pio.h b/drivers/net/wireless/b43/pio.h new file mode 100644 index 000000000000..34a44c1b6314 --- /dev/null +++ b/drivers/net/wireless/b43/pio.h @@ -0,0 +1,152 @@ +#ifndef B43_PIO_H_ +#define B43_PIO_H_ + +#include "b43.h" + +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/skbuff.h> + +#define B43_PIO_TXCTL 0x00 +#define B43_PIO_TXDATA 0x02 +#define B43_PIO_TXQBUFSIZE 0x04 +#define B43_PIO_RXCTL 0x08 +#define B43_PIO_RXDATA 0x0A + +#define B43_PIO_TXCTL_WRITELO (1 << 0) +#define B43_PIO_TXCTL_WRITEHI (1 << 1) +#define B43_PIO_TXCTL_COMPLETE (1 << 2) +#define B43_PIO_TXCTL_INIT (1 << 3) +#define B43_PIO_TXCTL_SUSPEND (1 << 7) + +#define B43_PIO_RXCTL_DATAAVAILABLE (1 << 0) +#define B43_PIO_RXCTL_READY (1 << 1) + +/* PIO constants */ +#define B43_PIO_MAXTXDEVQPACKETS 31 +#define B43_PIO_TXQADJUST 80 + +/* PIO tuning knobs */ +#define B43_PIO_MAXTXPACKETS 256 + +#ifdef CONFIG_B43_PIO + +struct b43_pioqueue; +struct b43_xmitstatus; + +struct b43_pio_txpacket { + struct b43_pioqueue *queue; + struct sk_buff *skb; + struct ieee80211_tx_status txstat; + struct list_head list; + u16 index; /* Index in the tx_packets_cache */ +}; + +struct b43_pioqueue { + struct b43_wldev *dev; + u16 mmio_base; + + bool tx_suspended; + bool tx_frozen; + bool need_workarounds; /* Workarounds needed for core.rev < 3 */ + + /* Adjusted size of the device internal TX buffer. */ + u16 tx_devq_size; + /* Used octets of the device internal TX buffer. */ + u16 tx_devq_used; + /* Used packet slots in the device internal TX buffer. */ + u8 tx_devq_packets; + /* Packets from the txfree list can + * be taken on incoming TX requests. + */ + struct list_head txfree; + unsigned int nr_txfree; + /* Packets on the txqueue are queued, + * but not completely written to the chip, yet. + */ + struct list_head txqueue; + /* Packets on the txrunning queue are completely + * posted to the device. We are waiting for the txstatus. + */ + struct list_head txrunning; + /* Total number or packets sent. + * (This counter can obviously wrap). + */ + unsigned int nr_tx_packets; + struct tasklet_struct txtask; + struct b43_pio_txpacket tx_packets_cache[B43_PIO_MAXTXPACKETS]; +}; + +static inline u16 b43_pio_read(struct b43_pioqueue *queue, u16 offset) +{ + return b43_read16(queue->dev, queue->mmio_base + offset); +} + +static inline + void b43_pio_write(struct b43_pioqueue *queue, u16 offset, u16 value) +{ + b43_write16(queue->dev, queue->mmio_base + offset, value); + mmiowb(); +} + +int b43_pio_init(struct b43_wldev *dev); +void b43_pio_free(struct b43_wldev *dev); + +int b43_pio_tx(struct b43_wldev *dev, + struct sk_buff *skb, struct ieee80211_tx_control *ctl); +void b43_pio_handle_txstatus(struct b43_wldev *dev, + const struct b43_txstatus *status); +void b43_pio_get_tx_stats(struct b43_wldev *dev, + struct ieee80211_tx_queue_stats *stats); +void b43_pio_rx(struct b43_pioqueue *queue); + +/* Suspend TX queue in hardware. */ +void b43_pio_tx_suspend(struct b43_pioqueue *queue); +void b43_pio_tx_resume(struct b43_pioqueue *queue); +/* Suspend (freeze) the TX tasklet (software level). */ +void b43_pio_freeze_txqueues(struct b43_wldev *dev); +void b43_pio_thaw_txqueues(struct b43_wldev *dev); + +#else /* CONFIG_B43_PIO */ + +static inline int b43_pio_init(struct b43_wldev *dev) +{ + return 0; +} +static inline void b43_pio_free(struct b43_wldev *dev) +{ +} +static inline + int b43_pio_tx(struct b43_wldev *dev, + struct sk_buff *skb, struct ieee80211_tx_control *ctl) +{ + return 0; +} +static inline + void b43_pio_handle_txstatus(struct b43_wldev *dev, + const struct b43_txstatus *status) +{ +} +static inline + void b43_pio_get_tx_stats(struct b43_wldev *dev, + struct ieee80211_tx_queue_stats *stats) +{ +} +static inline void b43_pio_rx(struct b43_pioqueue *queue) +{ +} +static inline void b43_pio_tx_suspend(struct b43_pioqueue *queue) +{ +} +static inline void b43_pio_tx_resume(struct b43_pioqueue *queue) +{ +} +static inline void b43_pio_freeze_txqueues(struct b43_wldev *dev) +{ +} +static inline void b43_pio_thaw_txqueues(struct b43_wldev *dev) +{ +} + +#endif /* CONFIG_B43_PIO */ +#endif /* B43_PIO_H_ */ diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c new file mode 100644 index 000000000000..800e0a61a7f5 --- /dev/null +++ b/drivers/net/wireless/b43/rfkill.c @@ -0,0 +1,184 @@ +/* + + Broadcom B43 wireless driver + RFKILL support + + Copyright (c) 2007 Michael Buesch <mb@bu3sch.de> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "rfkill.h" +#include "b43.h" + + +/* Returns TRUE, if the radio is enabled in hardware. */ +static bool b43_is_hw_radio_enabled(struct b43_wldev *dev) +{ + if (dev->phy.rev >= 3) { + if (!(b43_read32(dev, B43_MMIO_RADIO_HWENABLED_HI) + & B43_MMIO_RADIO_HWENABLED_HI_MASK)) + return 1; + } else { + if (b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO) + & B43_MMIO_RADIO_HWENABLED_LO_MASK) + return 1; + } + return 0; +} + +/* The poll callback for the hardware button. */ +static void b43_rfkill_poll(struct input_polled_dev *poll_dev) +{ + struct b43_wldev *dev = poll_dev->private; + struct b43_wl *wl = dev->wl; + bool enabled; + + mutex_lock(&wl->mutex); + B43_WARN_ON(b43_status(dev) < B43_STAT_INITIALIZED); + enabled = b43_is_hw_radio_enabled(dev); + if (unlikely(enabled != dev->radio_hw_enable)) { + dev->radio_hw_enable = enabled; + b43info(wl, "Radio hardware status changed to %s\n", + enabled ? "ENABLED" : "DISABLED"); + mutex_unlock(&wl->mutex); + input_report_key(poll_dev->input, KEY_WLAN, enabled); + } else + mutex_unlock(&wl->mutex); +} + +/* Called when the RFKILL toggled in software. + * This is called without locking. */ +static int b43_rfkill_soft_toggle(void *data, enum rfkill_state state) +{ + struct b43_wldev *dev = data; + struct b43_wl *wl = dev->wl; + int err = 0; + + mutex_lock(&wl->mutex); + if (b43_status(dev) < B43_STAT_INITIALIZED) + goto out_unlock; + + switch (state) { + case RFKILL_STATE_ON: + if (!dev->radio_hw_enable) { + /* No luck. We can't toggle the hardware RF-kill + * button from software. */ + err = -EBUSY; + goto out_unlock; + } + if (!dev->phy.radio_on) + b43_radio_turn_on(dev); + break; + case RFKILL_STATE_OFF: + if (dev->phy.radio_on) + b43_radio_turn_off(dev, 0); + break; + } + +out_unlock: + mutex_unlock(&wl->mutex); + + return err; +} + +char * b43_rfkill_led_name(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + + if (!wl->rfkill.rfkill) + return NULL; + return rfkill_get_led_name(wl->rfkill.rfkill); +} + +void b43_rfkill_init(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + struct b43_rfkill *rfk = &(wl->rfkill); + int err; + + if (rfk->rfkill) { + err = rfkill_register(rfk->rfkill); + if (err) { + b43warn(wl, "Failed to register RF-kill button\n"); + goto err_free_rfk; + } + } + if (rfk->poll_dev) { + err = input_register_polled_device(rfk->poll_dev); + if (err) { + b43warn(wl, "Failed to register RF-kill polldev\n"); + goto err_free_polldev; + } + } + + return; +err_free_rfk: + rfkill_free(rfk->rfkill); + rfk->rfkill = NULL; +err_free_polldev: + input_free_polled_device(rfk->poll_dev); + rfk->poll_dev = NULL; +} + +void b43_rfkill_exit(struct b43_wldev *dev) +{ + struct b43_rfkill *rfk = &(dev->wl->rfkill); + + if (rfk->poll_dev) + input_unregister_polled_device(rfk->poll_dev); + if (rfk->rfkill) + rfkill_unregister(rfk->rfkill); +} + +void b43_rfkill_alloc(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + struct b43_rfkill *rfk = &(wl->rfkill); + + snprintf(rfk->name, sizeof(rfk->name), + "b43-%s", wiphy_name(wl->hw->wiphy)); + + rfk->rfkill = rfkill_allocate(dev->dev->dev, RFKILL_TYPE_WLAN); + if (!rfk->rfkill) { + b43warn(wl, "Failed to allocate RF-kill button\n"); + return; + } + rfk->rfkill->name = rfk->name; + rfk->rfkill->state = RFKILL_STATE_ON; + rfk->rfkill->data = dev; + rfk->rfkill->toggle_radio = b43_rfkill_soft_toggle; + rfk->rfkill->user_claim_unsupported = 1; + + rfk->poll_dev = input_allocate_polled_device(); + if (rfk->poll_dev) { + rfk->poll_dev->private = dev; + rfk->poll_dev->poll = b43_rfkill_poll; + rfk->poll_dev->poll_interval = 1000; /* msecs */ + } else + b43warn(wl, "Failed to allocate RF-kill polldev\n"); +} + +void b43_rfkill_free(struct b43_wldev *dev) +{ + struct b43_rfkill *rfk = &(dev->wl->rfkill); + + input_free_polled_device(rfk->poll_dev); + rfk->poll_dev = NULL; + rfkill_free(rfk->rfkill); + rfk->rfkill = NULL; +} diff --git a/drivers/net/wireless/b43/rfkill.h b/drivers/net/wireless/b43/rfkill.h new file mode 100644 index 000000000000..29544e8c9e5f --- /dev/null +++ b/drivers/net/wireless/b43/rfkill.h @@ -0,0 +1,58 @@ +#ifndef B43_RFKILL_H_ +#define B43_RFKILL_H_ + +struct b43_wldev; + + +#ifdef CONFIG_B43_RFKILL + +#include <linux/rfkill.h> +#include <linux/input-polldev.h> + + +struct b43_rfkill { + /* The RFKILL subsystem data structure */ + struct rfkill *rfkill; + /* The poll device for the RFKILL input button */ + struct input_polled_dev *poll_dev; + /* The unique name of this rfkill switch */ + char name[32]; +}; + +/* All the init functions return void, because we are not interested + * in failing the b43 init process when rfkill init failed. */ +void b43_rfkill_alloc(struct b43_wldev *dev); +void b43_rfkill_free(struct b43_wldev *dev); +void b43_rfkill_init(struct b43_wldev *dev); +void b43_rfkill_exit(struct b43_wldev *dev); + +char * b43_rfkill_led_name(struct b43_wldev *dev); + + +#else /* CONFIG_B43_RFKILL */ +/* No RFKILL support. */ + +struct b43_rfkill { + /* empty */ +}; + +static inline void b43_rfkill_alloc(struct b43_wldev *dev) +{ +} +static inline void b43_rfkill_free(struct b43_wldev *dev) +{ +} +static inline void b43_rfkill_init(struct b43_wldev *dev) +{ +} +static inline void b43_rfkill_exit(struct b43_wldev *dev) +{ +} +static inline char * b43_rfkill_led_name(struct b43_wldev *dev) +{ + return NULL; +} + +#endif /* CONFIG_B43_RFKILL */ + +#endif /* B43_RFKILL_H_ */ diff --git a/drivers/net/wireless/b43/sysfs.c b/drivers/net/wireless/b43/sysfs.c new file mode 100644 index 000000000000..fcb777383e70 --- /dev/null +++ b/drivers/net/wireless/b43/sysfs.c @@ -0,0 +1,235 @@ +/* + + Broadcom B43 wireless driver + + SYSFS support routines + + Copyright (c) 2006 Michael Buesch <mb@bu3sch.de> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43.h" +#include "sysfs.h" +#include "main.h" +#include "phy.h" + +#include <linux/capability.h> + +#define GENERIC_FILESIZE 64 + +static int get_integer(const char *buf, size_t count) +{ + char tmp[10 + 1] = { 0 }; + int ret = -EINVAL; + + if (count == 0) + goto out; + count = min(count, (size_t) 10); + memcpy(tmp, buf, count); + ret = simple_strtol(tmp, NULL, 10); + out: + return ret; +} + +static int get_boolean(const char *buf, size_t count) +{ + if (count != 0) { + if (buf[0] == '1') + return 1; + if (buf[0] == '0') + return 0; + if (count >= 4 && memcmp(buf, "true", 4) == 0) + return 1; + if (count >= 5 && memcmp(buf, "false", 5) == 0) + return 0; + if (count >= 3 && memcmp(buf, "yes", 3) == 0) + return 1; + if (count >= 2 && memcmp(buf, "no", 2) == 0) + return 0; + if (count >= 2 && memcmp(buf, "on", 2) == 0) + return 1; + if (count >= 3 && memcmp(buf, "off", 3) == 0) + return 0; + } + return -EINVAL; +} + +static ssize_t b43_attr_interfmode_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct b43_wldev *wldev = dev_to_b43_wldev(dev); + ssize_t count = 0; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + mutex_lock(&wldev->wl->mutex); + + switch (wldev->phy.interfmode) { + case B43_INTERFMODE_NONE: + count = + snprintf(buf, PAGE_SIZE, + "0 (No Interference Mitigation)\n"); + break; + case B43_INTERFMODE_NONWLAN: + count = + snprintf(buf, PAGE_SIZE, + "1 (Non-WLAN Interference Mitigation)\n"); + break; + case B43_INTERFMODE_MANUALWLAN: + count = + snprintf(buf, PAGE_SIZE, + "2 (WLAN Interference Mitigation)\n"); + break; + default: + B43_WARN_ON(1); + } + + mutex_unlock(&wldev->wl->mutex); + + return count; +} + +static ssize_t b43_attr_interfmode_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct b43_wldev *wldev = dev_to_b43_wldev(dev); + unsigned long flags; + int err; + int mode; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + mode = get_integer(buf, count); + switch (mode) { + case 0: + mode = B43_INTERFMODE_NONE; + break; + case 1: + mode = B43_INTERFMODE_NONWLAN; + break; + case 2: + mode = B43_INTERFMODE_MANUALWLAN; + break; + case 3: + mode = B43_INTERFMODE_AUTOWLAN; + break; + default: + return -EINVAL; + } + + mutex_lock(&wldev->wl->mutex); + spin_lock_irqsave(&wldev->wl->irq_lock, flags); + + err = b43_radio_set_interference_mitigation(wldev, mode); + if (err) { + b43err(wldev->wl, "Interference Mitigation not " + "supported by device\n"); + } + mmiowb(); + spin_unlock_irqrestore(&wldev->wl->irq_lock, flags); + mutex_unlock(&wldev->wl->mutex); + + return err ? err : count; +} + +static DEVICE_ATTR(interference, 0644, + b43_attr_interfmode_show, b43_attr_interfmode_store); + +static ssize_t b43_attr_preamble_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct b43_wldev *wldev = dev_to_b43_wldev(dev); + ssize_t count; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + mutex_lock(&wldev->wl->mutex); + + if (wldev->short_preamble) + count = + snprintf(buf, PAGE_SIZE, "1 (Short Preamble enabled)\n"); + else + count = + snprintf(buf, PAGE_SIZE, "0 (Short Preamble disabled)\n"); + + mutex_unlock(&wldev->wl->mutex); + + return count; +} + +static ssize_t b43_attr_preamble_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct b43_wldev *wldev = dev_to_b43_wldev(dev); + unsigned long flags; + int value; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + value = get_boolean(buf, count); + if (value < 0) + return value; + mutex_lock(&wldev->wl->mutex); + spin_lock_irqsave(&wldev->wl->irq_lock, flags); + + wldev->short_preamble = !!value; + + spin_unlock_irqrestore(&wldev->wl->irq_lock, flags); + mutex_unlock(&wldev->wl->mutex); + + return count; +} + +static DEVICE_ATTR(shortpreamble, 0644, + b43_attr_preamble_show, b43_attr_preamble_store); + +int b43_sysfs_register(struct b43_wldev *wldev) +{ + struct device *dev = wldev->dev->dev; + int err; + + B43_WARN_ON(b43_status(wldev) != B43_STAT_INITIALIZED); + + err = device_create_file(dev, &dev_attr_interference); + if (err) + goto out; + err = device_create_file(dev, &dev_attr_shortpreamble); + if (err) + goto err_remove_interfmode; + + out: + return err; + err_remove_interfmode: + device_remove_file(dev, &dev_attr_interference); + goto out; +} + +void b43_sysfs_unregister(struct b43_wldev *wldev) +{ + struct device *dev = wldev->dev->dev; + + device_remove_file(dev, &dev_attr_shortpreamble); + device_remove_file(dev, &dev_attr_interference); +} diff --git a/drivers/net/wireless/b43/sysfs.h b/drivers/net/wireless/b43/sysfs.h new file mode 100644 index 000000000000..12bda9ef1a85 --- /dev/null +++ b/drivers/net/wireless/b43/sysfs.h @@ -0,0 +1,9 @@ +#ifndef B43_SYSFS_H_ +#define B43_SYSFS_H_ + +struct b43_wldev; + +int b43_sysfs_register(struct b43_wldev *dev); +void b43_sysfs_unregister(struct b43_wldev *dev); + +#endif /* B43_SYSFS_H_ */ diff --git a/drivers/net/wireless/b43/tables.c b/drivers/net/wireless/b43/tables.c new file mode 100644 index 000000000000..15a87183a572 --- /dev/null +++ b/drivers/net/wireless/b43/tables.c @@ -0,0 +1,375 @@ +/* + + Broadcom B43 wireless driver + + Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>, + Copyright (c) 2005 Stefano Brivio <st3@riseup.net> + Copyright (c) 2006, 2006 Michael Buesch <mb@bu3sch.de> + Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org> + Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43.h" +#include "tables.h" +#include "phy.h" + +const u32 b43_tab_rotor[] = { + 0xFEB93FFD, 0xFEC63FFD, /* 0 */ + 0xFED23FFD, 0xFEDF3FFD, + 0xFEEC3FFE, 0xFEF83FFE, + 0xFF053FFE, 0xFF113FFE, + 0xFF1E3FFE, 0xFF2A3FFF, /* 8 */ + 0xFF373FFF, 0xFF443FFF, + 0xFF503FFF, 0xFF5D3FFF, + 0xFF693FFF, 0xFF763FFF, + 0xFF824000, 0xFF8F4000, /* 16 */ + 0xFF9B4000, 0xFFA84000, + 0xFFB54000, 0xFFC14000, + 0xFFCE4000, 0xFFDA4000, + 0xFFE74000, 0xFFF34000, /* 24 */ + 0x00004000, 0x000D4000, + 0x00194000, 0x00264000, + 0x00324000, 0x003F4000, + 0x004B4000, 0x00584000, /* 32 */ + 0x00654000, 0x00714000, + 0x007E4000, 0x008A3FFF, + 0x00973FFF, 0x00A33FFF, + 0x00B03FFF, 0x00BC3FFF, /* 40 */ + 0x00C93FFF, 0x00D63FFF, + 0x00E23FFE, 0x00EF3FFE, + 0x00FB3FFE, 0x01083FFE, + 0x01143FFE, 0x01213FFD, /* 48 */ + 0x012E3FFD, 0x013A3FFD, + 0x01473FFD, +}; + +const u32 b43_tab_retard[] = { + 0xDB93CB87, 0xD666CF64, /* 0 */ + 0xD1FDD358, 0xCDA6D826, + 0xCA38DD9F, 0xC729E2B4, + 0xC469E88E, 0xC26AEE2B, + 0xC0DEF46C, 0xC073FA62, /* 8 */ + 0xC01D00D5, 0xC0760743, + 0xC1560D1E, 0xC2E51369, + 0xC4ED18FF, 0xC7AC1ED7, + 0xCB2823B2, 0xCEFA28D9, /* 16 */ + 0xD2F62D3F, 0xD7BB3197, + 0xDCE53568, 0xE1FE3875, + 0xE7D13B35, 0xED663D35, + 0xF39B3EC4, 0xF98E3FA7, /* 24 */ + 0x00004000, 0x06723FA7, + 0x0C653EC4, 0x129A3D35, + 0x182F3B35, 0x1E023875, + 0x231B3568, 0x28453197, /* 32 */ + 0x2D0A2D3F, 0x310628D9, + 0x34D823B2, 0x38541ED7, + 0x3B1318FF, 0x3D1B1369, + 0x3EAA0D1E, 0x3F8A0743, /* 40 */ + 0x3FE300D5, 0x3F8DFA62, + 0x3F22F46C, 0x3D96EE2B, + 0x3B97E88E, 0x38D7E2B4, + 0x35C8DD9F, 0x325AD826, /* 48 */ + 0x2E03D358, 0x299ACF64, + 0x246DCB87, +}; + +const u16 b43_tab_finefreqa[] = { + 0x0082, 0x0082, 0x0102, 0x0182, /* 0 */ + 0x0202, 0x0282, 0x0302, 0x0382, + 0x0402, 0x0482, 0x0502, 0x0582, + 0x05E2, 0x0662, 0x06E2, 0x0762, + 0x07E2, 0x0842, 0x08C2, 0x0942, /* 16 */ + 0x09C2, 0x0A22, 0x0AA2, 0x0B02, + 0x0B82, 0x0BE2, 0x0C62, 0x0CC2, + 0x0D42, 0x0DA2, 0x0E02, 0x0E62, + 0x0EE2, 0x0F42, 0x0FA2, 0x1002, /* 32 */ + 0x1062, 0x10C2, 0x1122, 0x1182, + 0x11E2, 0x1242, 0x12A2, 0x12E2, + 0x1342, 0x13A2, 0x1402, 0x1442, + 0x14A2, 0x14E2, 0x1542, 0x1582, /* 48 */ + 0x15E2, 0x1622, 0x1662, 0x16C1, + 0x1701, 0x1741, 0x1781, 0x17E1, + 0x1821, 0x1861, 0x18A1, 0x18E1, + 0x1921, 0x1961, 0x19A1, 0x19E1, /* 64 */ + 0x1A21, 0x1A61, 0x1AA1, 0x1AC1, + 0x1B01, 0x1B41, 0x1B81, 0x1BA1, + 0x1BE1, 0x1C21, 0x1C41, 0x1C81, + 0x1CA1, 0x1CE1, 0x1D01, 0x1D41, /* 80 */ + 0x1D61, 0x1DA1, 0x1DC1, 0x1E01, + 0x1E21, 0x1E61, 0x1E81, 0x1EA1, + 0x1EE1, 0x1F01, 0x1F21, 0x1F41, + 0x1F81, 0x1FA1, 0x1FC1, 0x1FE1, /* 96 */ + 0x2001, 0x2041, 0x2061, 0x2081, + 0x20A1, 0x20C1, 0x20E1, 0x2101, + 0x2121, 0x2141, 0x2161, 0x2181, + 0x21A1, 0x21C1, 0x21E1, 0x2201, /* 112 */ + 0x2221, 0x2241, 0x2261, 0x2281, + 0x22A1, 0x22C1, 0x22C1, 0x22E1, + 0x2301, 0x2321, 0x2341, 0x2361, + 0x2361, 0x2381, 0x23A1, 0x23C1, /* 128 */ + 0x23E1, 0x23E1, 0x2401, 0x2421, + 0x2441, 0x2441, 0x2461, 0x2481, + 0x2481, 0x24A1, 0x24C1, 0x24C1, + 0x24E1, 0x2501, 0x2501, 0x2521, /* 144 */ + 0x2541, 0x2541, 0x2561, 0x2561, + 0x2581, 0x25A1, 0x25A1, 0x25C1, + 0x25C1, 0x25E1, 0x2601, 0x2601, + 0x2621, 0x2621, 0x2641, 0x2641, /* 160 */ + 0x2661, 0x2661, 0x2681, 0x2681, + 0x26A1, 0x26A1, 0x26C1, 0x26C1, + 0x26E1, 0x26E1, 0x2701, 0x2701, + 0x2721, 0x2721, 0x2740, 0x2740, /* 176 */ + 0x2760, 0x2760, 0x2780, 0x2780, + 0x2780, 0x27A0, 0x27A0, 0x27C0, + 0x27C0, 0x27E0, 0x27E0, 0x27E0, + 0x2800, 0x2800, 0x2820, 0x2820, /* 192 */ + 0x2820, 0x2840, 0x2840, 0x2840, + 0x2860, 0x2860, 0x2880, 0x2880, + 0x2880, 0x28A0, 0x28A0, 0x28A0, + 0x28C0, 0x28C0, 0x28C0, 0x28E0, /* 208 */ + 0x28E0, 0x28E0, 0x2900, 0x2900, + 0x2900, 0x2920, 0x2920, 0x2920, + 0x2940, 0x2940, 0x2940, 0x2960, + 0x2960, 0x2960, 0x2960, 0x2980, /* 224 */ + 0x2980, 0x2980, 0x29A0, 0x29A0, + 0x29A0, 0x29A0, 0x29C0, 0x29C0, + 0x29C0, 0x29E0, 0x29E0, 0x29E0, + 0x29E0, 0x2A00, 0x2A00, 0x2A00, /* 240 */ + 0x2A00, 0x2A20, 0x2A20, 0x2A20, + 0x2A20, 0x2A40, 0x2A40, 0x2A40, + 0x2A40, 0x2A60, 0x2A60, 0x2A60, +}; + +const u16 b43_tab_finefreqg[] = { + 0x0089, 0x02E9, 0x0409, 0x04E9, /* 0 */ + 0x05A9, 0x0669, 0x0709, 0x0789, + 0x0829, 0x08A9, 0x0929, 0x0989, + 0x0A09, 0x0A69, 0x0AC9, 0x0B29, + 0x0BA9, 0x0BE9, 0x0C49, 0x0CA9, /* 16 */ + 0x0D09, 0x0D69, 0x0DA9, 0x0E09, + 0x0E69, 0x0EA9, 0x0F09, 0x0F49, + 0x0FA9, 0x0FE9, 0x1029, 0x1089, + 0x10C9, 0x1109, 0x1169, 0x11A9, /* 32 */ + 0x11E9, 0x1229, 0x1289, 0x12C9, + 0x1309, 0x1349, 0x1389, 0x13C9, + 0x1409, 0x1449, 0x14A9, 0x14E9, + 0x1529, 0x1569, 0x15A9, 0x15E9, /* 48 */ + 0x1629, 0x1669, 0x16A9, 0x16E8, + 0x1728, 0x1768, 0x17A8, 0x17E8, + 0x1828, 0x1868, 0x18A8, 0x18E8, + 0x1928, 0x1968, 0x19A8, 0x19E8, /* 64 */ + 0x1A28, 0x1A68, 0x1AA8, 0x1AE8, + 0x1B28, 0x1B68, 0x1BA8, 0x1BE8, + 0x1C28, 0x1C68, 0x1CA8, 0x1CE8, + 0x1D28, 0x1D68, 0x1DC8, 0x1E08, /* 80 */ + 0x1E48, 0x1E88, 0x1EC8, 0x1F08, + 0x1F48, 0x1F88, 0x1FE8, 0x2028, + 0x2068, 0x20A8, 0x2108, 0x2148, + 0x2188, 0x21C8, 0x2228, 0x2268, /* 96 */ + 0x22C8, 0x2308, 0x2348, 0x23A8, + 0x23E8, 0x2448, 0x24A8, 0x24E8, + 0x2548, 0x25A8, 0x2608, 0x2668, + 0x26C8, 0x2728, 0x2787, 0x27E7, /* 112 */ + 0x2847, 0x28C7, 0x2947, 0x29A7, + 0x2A27, 0x2AC7, 0x2B47, 0x2BE7, + 0x2CA7, 0x2D67, 0x2E47, 0x2F67, + 0x3247, 0x3526, 0x3646, 0x3726, /* 128 */ + 0x3806, 0x38A6, 0x3946, 0x39E6, + 0x3A66, 0x3AE6, 0x3B66, 0x3BC6, + 0x3C45, 0x3CA5, 0x3D05, 0x3D85, + 0x3DE5, 0x3E45, 0x3EA5, 0x3EE5, /* 144 */ + 0x3F45, 0x3FA5, 0x4005, 0x4045, + 0x40A5, 0x40E5, 0x4145, 0x4185, + 0x41E5, 0x4225, 0x4265, 0x42C5, + 0x4305, 0x4345, 0x43A5, 0x43E5, /* 160 */ + 0x4424, 0x4464, 0x44C4, 0x4504, + 0x4544, 0x4584, 0x45C4, 0x4604, + 0x4644, 0x46A4, 0x46E4, 0x4724, + 0x4764, 0x47A4, 0x47E4, 0x4824, /* 176 */ + 0x4864, 0x48A4, 0x48E4, 0x4924, + 0x4964, 0x49A4, 0x49E4, 0x4A24, + 0x4A64, 0x4AA4, 0x4AE4, 0x4B23, + 0x4B63, 0x4BA3, 0x4BE3, 0x4C23, /* 192 */ + 0x4C63, 0x4CA3, 0x4CE3, 0x4D23, + 0x4D63, 0x4DA3, 0x4DE3, 0x4E23, + 0x4E63, 0x4EA3, 0x4EE3, 0x4F23, + 0x4F63, 0x4FC3, 0x5003, 0x5043, /* 208 */ + 0x5083, 0x50C3, 0x5103, 0x5143, + 0x5183, 0x51E2, 0x5222, 0x5262, + 0x52A2, 0x52E2, 0x5342, 0x5382, + 0x53C2, 0x5402, 0x5462, 0x54A2, /* 224 */ + 0x5502, 0x5542, 0x55A2, 0x55E2, + 0x5642, 0x5682, 0x56E2, 0x5722, + 0x5782, 0x57E1, 0x5841, 0x58A1, + 0x5901, 0x5961, 0x59C1, 0x5A21, /* 240 */ + 0x5AA1, 0x5B01, 0x5B81, 0x5BE1, + 0x5C61, 0x5D01, 0x5D80, 0x5E20, + 0x5EE0, 0x5FA0, 0x6080, 0x61C0, +}; + +const u16 b43_tab_noisea2[] = { + 0x0001, 0x0001, 0x0001, 0xFFFE, + 0xFFFE, 0x3FFF, 0x1000, 0x0393, +}; + +const u16 b43_tab_noisea3[] = { + 0x4C4C, 0x4C4C, 0x4C4C, 0x2D36, + 0x4C4C, 0x4C4C, 0x4C4C, 0x2D36, +}; + +const u16 b43_tab_noiseg1[] = { + 0x013C, 0x01F5, 0x031A, 0x0631, + 0x0001, 0x0001, 0x0001, 0x0001, +}; + +const u16 b43_tab_noiseg2[] = { + 0x5484, 0x3C40, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, +}; + +const u16 b43_tab_noisescaleg1[] = { + 0x6C77, 0x5162, 0x3B40, 0x3335, /* 0 */ + 0x2F2D, 0x2A2A, 0x2527, 0x1F21, + 0x1A1D, 0x1719, 0x1616, 0x1414, + 0x1414, 0x1400, 0x1414, 0x1614, + 0x1716, 0x1A19, 0x1F1D, 0x2521, /* 16 */ + 0x2A27, 0x2F2A, 0x332D, 0x3B35, + 0x5140, 0x6C62, 0x0077, +}; + +const u16 b43_tab_noisescaleg2[] = { + 0xD8DD, 0xCBD4, 0xBCC0, 0XB6B7, /* 0 */ + 0xB2B0, 0xADAD, 0xA7A9, 0x9FA1, + 0x969B, 0x9195, 0x8F8F, 0x8A8A, + 0x8A8A, 0x8A00, 0x8A8A, 0x8F8A, + 0x918F, 0x9695, 0x9F9B, 0xA7A1, /* 16 */ + 0xADA9, 0xB2AD, 0xB6B0, 0xBCB7, + 0xCBC0, 0xD8D4, 0x00DD, +}; + +const u16 b43_tab_noisescaleg3[] = { + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, /* 0 */ + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, + 0xA4A4, 0xA400, 0xA4A4, 0xA4A4, + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, /* 16 */ + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, + 0xA4A4, 0xA4A4, 0x00A4, +}; + +const u16 b43_tab_sigmasqr1[] = { + 0x007A, 0x0075, 0x0071, 0x006C, /* 0 */ + 0x0067, 0x0063, 0x005E, 0x0059, + 0x0054, 0x0050, 0x004B, 0x0046, + 0x0042, 0x003D, 0x003D, 0x003D, + 0x003D, 0x003D, 0x003D, 0x003D, /* 16 */ + 0x003D, 0x003D, 0x003D, 0x003D, + 0x003D, 0x003D, 0x0000, 0x003D, + 0x003D, 0x003D, 0x003D, 0x003D, + 0x003D, 0x003D, 0x003D, 0x003D, /* 32 */ + 0x003D, 0x003D, 0x003D, 0x003D, + 0x0042, 0x0046, 0x004B, 0x0050, + 0x0054, 0x0059, 0x005E, 0x0063, + 0x0067, 0x006C, 0x0071, 0x0075, /* 48 */ + 0x007A, +}; + +const u16 b43_tab_sigmasqr2[] = { + 0x00DE, 0x00DC, 0x00DA, 0x00D8, /* 0 */ + 0x00D6, 0x00D4, 0x00D2, 0x00CF, + 0x00CD, 0x00CA, 0x00C7, 0x00C4, + 0x00C1, 0x00BE, 0x00BE, 0x00BE, + 0x00BE, 0x00BE, 0x00BE, 0x00BE, /* 16 */ + 0x00BE, 0x00BE, 0x00BE, 0x00BE, + 0x00BE, 0x00BE, 0x0000, 0x00BE, + 0x00BE, 0x00BE, 0x00BE, 0x00BE, + 0x00BE, 0x00BE, 0x00BE, 0x00BE, /* 32 */ + 0x00BE, 0x00BE, 0x00BE, 0x00BE, + 0x00C1, 0x00C4, 0x00C7, 0x00CA, + 0x00CD, 0x00CF, 0x00D2, 0x00D4, + 0x00D6, 0x00D8, 0x00DA, 0x00DC, /* 48 */ + 0x00DE, +}; + +static inline void assert_sizes(void) +{ + BUILD_BUG_ON(B43_TAB_ROTOR_SIZE != ARRAY_SIZE(b43_tab_rotor)); + BUILD_BUG_ON(B43_TAB_RETARD_SIZE != ARRAY_SIZE(b43_tab_retard)); + BUILD_BUG_ON(B43_TAB_FINEFREQA_SIZE != ARRAY_SIZE(b43_tab_finefreqa)); + BUILD_BUG_ON(B43_TAB_FINEFREQG_SIZE != ARRAY_SIZE(b43_tab_finefreqg)); + BUILD_BUG_ON(B43_TAB_NOISEA2_SIZE != ARRAY_SIZE(b43_tab_noisea2)); + BUILD_BUG_ON(B43_TAB_NOISEA3_SIZE != ARRAY_SIZE(b43_tab_noisea3)); + BUILD_BUG_ON(B43_TAB_NOISEG1_SIZE != ARRAY_SIZE(b43_tab_noiseg1)); + BUILD_BUG_ON(B43_TAB_NOISEG2_SIZE != ARRAY_SIZE(b43_tab_noiseg2)); + BUILD_BUG_ON(B43_TAB_NOISESCALEG_SIZE != + ARRAY_SIZE(b43_tab_noisescaleg1)); + BUILD_BUG_ON(B43_TAB_NOISESCALEG_SIZE != + ARRAY_SIZE(b43_tab_noisescaleg2)); + BUILD_BUG_ON(B43_TAB_NOISESCALEG_SIZE != + ARRAY_SIZE(b43_tab_noisescaleg3)); + BUILD_BUG_ON(B43_TAB_SIGMASQR_SIZE != ARRAY_SIZE(b43_tab_sigmasqr1)); + BUILD_BUG_ON(B43_TAB_SIGMASQR_SIZE != ARRAY_SIZE(b43_tab_sigmasqr2)); +} + +u16 b43_ofdmtab_read16(struct b43_wldev *dev, u16 table, u16 offset) +{ + assert_sizes(); + + b43_phy_write(dev, B43_PHY_OTABLECTL, table + offset); + return b43_phy_read(dev, B43_PHY_OTABLEI); +} + +void b43_ofdmtab_write16(struct b43_wldev *dev, u16 table, + u16 offset, u16 value) +{ + b43_phy_write(dev, B43_PHY_OTABLECTL, table + offset); + b43_phy_write(dev, B43_PHY_OTABLEI, value); +} + +u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset) +{ + u32 ret; + + b43_phy_write(dev, B43_PHY_OTABLECTL, table + offset); + ret = b43_phy_read(dev, B43_PHY_OTABLEQ); + ret <<= 16; + ret |= b43_phy_read(dev, B43_PHY_OTABLEI); + + return ret; +} + +void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table, + u16 offset, u32 value) +{ + b43_phy_write(dev, B43_PHY_OTABLECTL, table + offset); + b43_phy_write(dev, B43_PHY_OTABLEI, value); + b43_phy_write(dev, B43_PHY_OTABLEQ, (value >> 16)); +} + +u16 b43_gtab_read(struct b43_wldev *dev, u16 table, u16 offset) +{ + b43_phy_write(dev, B43_PHY_GTABCTL, table + offset); + return b43_phy_read(dev, B43_PHY_GTABDATA); +} + +void b43_gtab_write(struct b43_wldev *dev, u16 table, u16 offset, u16 value) +{ + b43_phy_write(dev, B43_PHY_GTABCTL, table + offset); + b43_phy_write(dev, B43_PHY_GTABDATA, value); +} diff --git a/drivers/net/wireless/b43/tables.h b/drivers/net/wireless/b43/tables.h new file mode 100644 index 000000000000..64635d7b518c --- /dev/null +++ b/drivers/net/wireless/b43/tables.h @@ -0,0 +1,28 @@ +#ifndef B43_TABLES_H_ +#define B43_TABLES_H_ + +#define B43_TAB_ROTOR_SIZE 53 +extern const u32 b43_tab_rotor[]; +#define B43_TAB_RETARD_SIZE 53 +extern const u32 b43_tab_retard[]; +#define B43_TAB_FINEFREQA_SIZE 256 +extern const u16 b43_tab_finefreqa[]; +#define B43_TAB_FINEFREQG_SIZE 256 +extern const u16 b43_tab_finefreqg[]; +#define B43_TAB_NOISEA2_SIZE 8 +extern const u16 b43_tab_noisea2[]; +#define B43_TAB_NOISEA3_SIZE 8 +extern const u16 b43_tab_noisea3[]; +#define B43_TAB_NOISEG1_SIZE 8 +extern const u16 b43_tab_noiseg1[]; +#define B43_TAB_NOISEG2_SIZE 8 +extern const u16 b43_tab_noiseg2[]; +#define B43_TAB_NOISESCALEG_SIZE 27 +extern const u16 b43_tab_noisescaleg1[]; +extern const u16 b43_tab_noisescaleg2[]; +extern const u16 b43_tab_noisescaleg3[]; +#define B43_TAB_SIGMASQR_SIZE 53 +extern const u16 b43_tab_sigmasqr1[]; +extern const u16 b43_tab_sigmasqr2[]; + +#endif /* B43_TABLES_H_ */ diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c new file mode 100644 index 000000000000..0bd6f8a348a8 --- /dev/null +++ b/drivers/net/wireless/b43/xmit.c @@ -0,0 +1,650 @@ +/* + + Broadcom B43 wireless driver + + Transmission (TX/RX) related functions. + + Copyright (C) 2005 Martin Langer <martin-langer@gmx.de> + Copyright (C) 2005 Stefano Brivio <st3@riseup.net> + Copyright (C) 2005, 2006 Michael Buesch <mb@bu3sch.de> + Copyright (C) 2005 Danny van Dyk <kugelfang@gentoo.org> + Copyright (C) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "xmit.h" +#include "phy.h" +#include "dma.h" +#include "pio.h" + +/* Extract the bitrate out of a CCK PLCP header. */ +static u8 b43_plcp_get_bitrate_cck(struct b43_plcp_hdr6 *plcp) +{ + switch (plcp->raw[0]) { + case 0x0A: + return B43_CCK_RATE_1MB; + case 0x14: + return B43_CCK_RATE_2MB; + case 0x37: + return B43_CCK_RATE_5MB; + case 0x6E: + return B43_CCK_RATE_11MB; + } + B43_WARN_ON(1); + return 0; +} + +/* Extract the bitrate out of an OFDM PLCP header. */ +static u8 b43_plcp_get_bitrate_ofdm(struct b43_plcp_hdr6 *plcp) +{ + switch (plcp->raw[0] & 0xF) { + case 0xB: + return B43_OFDM_RATE_6MB; + case 0xF: + return B43_OFDM_RATE_9MB; + case 0xA: + return B43_OFDM_RATE_12MB; + case 0xE: + return B43_OFDM_RATE_18MB; + case 0x9: + return B43_OFDM_RATE_24MB; + case 0xD: + return B43_OFDM_RATE_36MB; + case 0x8: + return B43_OFDM_RATE_48MB; + case 0xC: + return B43_OFDM_RATE_54MB; + } + B43_WARN_ON(1); + return 0; +} + +u8 b43_plcp_get_ratecode_cck(const u8 bitrate) +{ + switch (bitrate) { + case B43_CCK_RATE_1MB: + return 0x0A; + case B43_CCK_RATE_2MB: + return 0x14; + case B43_CCK_RATE_5MB: + return 0x37; + case B43_CCK_RATE_11MB: + return 0x6E; + } + B43_WARN_ON(1); + return 0; +} + +u8 b43_plcp_get_ratecode_ofdm(const u8 bitrate) +{ + switch (bitrate) { + case B43_OFDM_RATE_6MB: + return 0xB; + case B43_OFDM_RATE_9MB: + return 0xF; + case B43_OFDM_RATE_12MB: + return 0xA; + case B43_OFDM_RATE_18MB: + return 0xE; + case B43_OFDM_RATE_24MB: + return 0x9; + case B43_OFDM_RATE_36MB: + return 0xD; + case B43_OFDM_RATE_48MB: + return 0x8; + case B43_OFDM_RATE_54MB: + return 0xC; + } + B43_WARN_ON(1); + return 0; +} + +void b43_generate_plcp_hdr(struct b43_plcp_hdr4 *plcp, + const u16 octets, const u8 bitrate) +{ + __le32 *data = &(plcp->data); + __u8 *raw = plcp->raw; + + if (b43_is_ofdm_rate(bitrate)) { + u32 d; + + d = b43_plcp_get_ratecode_ofdm(bitrate); + B43_WARN_ON(octets & 0xF000); + d |= (octets << 5); + *data = cpu_to_le32(d); + } else { + u32 plen; + + plen = octets * 16 / bitrate; + if ((octets * 16 % bitrate) > 0) { + plen++; + if ((bitrate == B43_CCK_RATE_11MB) + && ((octets * 8 % 11) < 4)) { + raw[1] = 0x84; + } else + raw[1] = 0x04; + } else + raw[1] = 0x04; + *data |= cpu_to_le32(plen << 16); + raw[0] = b43_plcp_get_ratecode_cck(bitrate); + } +} + +static u8 b43_calc_fallback_rate(u8 bitrate) +{ + switch (bitrate) { + case B43_CCK_RATE_1MB: + return B43_CCK_RATE_1MB; + case B43_CCK_RATE_2MB: + return B43_CCK_RATE_1MB; + case B43_CCK_RATE_5MB: + return B43_CCK_RATE_2MB; + case B43_CCK_RATE_11MB: + return B43_CCK_RATE_5MB; + case B43_OFDM_RATE_6MB: + return B43_CCK_RATE_5MB; + case B43_OFDM_RATE_9MB: + return B43_OFDM_RATE_6MB; + case B43_OFDM_RATE_12MB: + return B43_OFDM_RATE_9MB; + case B43_OFDM_RATE_18MB: + return B43_OFDM_RATE_12MB; + case B43_OFDM_RATE_24MB: + return B43_OFDM_RATE_18MB; + case B43_OFDM_RATE_36MB: + return B43_OFDM_RATE_24MB; + case B43_OFDM_RATE_48MB: + return B43_OFDM_RATE_36MB; + case B43_OFDM_RATE_54MB: + return B43_OFDM_RATE_48MB; + } + B43_WARN_ON(1); + return 0; +} + +static void generate_txhdr_fw4(struct b43_wldev *dev, + struct b43_txhdr_fw4 *txhdr, + const unsigned char *fragment_data, + unsigned int fragment_len, + const struct ieee80211_tx_control *txctl, + u16 cookie) +{ + const struct b43_phy *phy = &dev->phy; + const struct ieee80211_hdr *wlhdr = + (const struct ieee80211_hdr *)fragment_data; + int use_encryption = (!(txctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)); + u16 fctl = le16_to_cpu(wlhdr->frame_control); + u8 rate, rate_fb; + int rate_ofdm, rate_fb_ofdm; + unsigned int plcp_fragment_len; + u32 mac_ctl = 0; + u16 phy_ctl = 0; + u8 extra_ft = 0; + + memset(txhdr, 0, sizeof(*txhdr)); + + rate = txctl->tx_rate; + rate_ofdm = b43_is_ofdm_rate(rate); + rate_fb = (txctl->alt_retry_rate == -1) ? rate : txctl->alt_retry_rate; + rate_fb_ofdm = b43_is_ofdm_rate(rate_fb); + + if (rate_ofdm) + txhdr->phy_rate = b43_plcp_get_ratecode_ofdm(rate); + else + txhdr->phy_rate = b43_plcp_get_ratecode_cck(rate); + txhdr->mac_frame_ctl = wlhdr->frame_control; + memcpy(txhdr->tx_receiver, wlhdr->addr1, 6); + + /* Calculate duration for fallback rate */ + if ((rate_fb == rate) || + (wlhdr->duration_id & cpu_to_le16(0x8000)) || + (wlhdr->duration_id == cpu_to_le16(0))) { + /* If the fallback rate equals the normal rate or the + * dur_id field contains an AID, CFP magic or 0, + * use the original dur_id field. */ + txhdr->dur_fb = wlhdr->duration_id; + } else { + int fbrate_base100kbps = B43_RATE_TO_BASE100KBPS(rate_fb); + txhdr->dur_fb = ieee80211_generic_frame_duration(dev->wl->hw, + dev->wl->if_id, + fragment_len, + fbrate_base100kbps); + } + + plcp_fragment_len = fragment_len + FCS_LEN; + if (use_encryption) { + u8 key_idx = (u16) (txctl->key_idx); + struct b43_key *key; + int wlhdr_len; + size_t iv_len; + + B43_WARN_ON(key_idx >= dev->max_nr_keys); + key = &(dev->key[key_idx]); + B43_WARN_ON(!key->keyconf); + + /* Hardware appends ICV. */ + plcp_fragment_len += txctl->icv_len; + + key_idx = b43_kidx_to_fw(dev, key_idx); + mac_ctl |= (key_idx << B43_TX4_MAC_KEYIDX_SHIFT) & + B43_TX4_MAC_KEYIDX; + mac_ctl |= (key->algorithm << B43_TX4_MAC_KEYALG_SHIFT) & + B43_TX4_MAC_KEYALG; + wlhdr_len = ieee80211_get_hdrlen(fctl); + iv_len = min((size_t) txctl->iv_len, + ARRAY_SIZE(txhdr->iv)); + memcpy(txhdr->iv, ((u8 *) wlhdr) + wlhdr_len, iv_len); + } + b43_generate_plcp_hdr((struct b43_plcp_hdr4 *)(&txhdr->plcp), + plcp_fragment_len, rate); + b43_generate_plcp_hdr((struct b43_plcp_hdr4 *)(&txhdr->plcp_fb), + plcp_fragment_len, rate_fb); + + /* Extra Frame Types */ + if (rate_fb_ofdm) + extra_ft |= B43_TX4_EFT_FBOFDM; + + /* Set channel radio code. Note that the micrcode ORs 0x100 to + * this value before comparing it to the value in SHM, if this + * is a 5Ghz packet. + */ + txhdr->chan_radio_code = phy->channel; + + /* PHY TX Control word */ + if (rate_ofdm) + phy_ctl |= B43_TX4_PHY_OFDM; + if (dev->short_preamble) + phy_ctl |= B43_TX4_PHY_SHORTPRMBL; + switch (txctl->antenna_sel_tx) { + case 0: + phy_ctl |= B43_TX4_PHY_ANTLAST; + break; + case 1: + phy_ctl |= B43_TX4_PHY_ANT0; + break; + case 2: + phy_ctl |= B43_TX4_PHY_ANT1; + break; + default: + B43_WARN_ON(1); + } + + /* MAC control */ + if (!(txctl->flags & IEEE80211_TXCTL_NO_ACK)) + mac_ctl |= B43_TX4_MAC_ACK; + if (!(((fctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) && + ((fctl & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL))) + mac_ctl |= B43_TX4_MAC_HWSEQ; + if (txctl->flags & IEEE80211_TXCTL_FIRST_FRAGMENT) + mac_ctl |= B43_TX4_MAC_STMSDU; + if (phy->type == B43_PHYTYPE_A) + mac_ctl |= B43_TX4_MAC_5GHZ; + + /* Generate the RTS or CTS-to-self frame */ + if ((txctl->flags & IEEE80211_TXCTL_USE_RTS_CTS) || + (txctl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)) { + unsigned int len; + struct ieee80211_hdr *hdr; + int rts_rate, rts_rate_fb; + int rts_rate_ofdm, rts_rate_fb_ofdm; + + rts_rate = txctl->rts_cts_rate; + rts_rate_ofdm = b43_is_ofdm_rate(rts_rate); + rts_rate_fb = b43_calc_fallback_rate(rts_rate); + rts_rate_fb_ofdm = b43_is_ofdm_rate(rts_rate_fb); + + if (txctl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) { + ieee80211_ctstoself_get(dev->wl->hw, dev->wl->if_id, + fragment_data, fragment_len, + txctl, + (struct ieee80211_cts *)(txhdr-> + rts_frame)); + mac_ctl |= B43_TX4_MAC_SENDCTS; + len = sizeof(struct ieee80211_cts); + } else { + ieee80211_rts_get(dev->wl->hw, dev->wl->if_id, + fragment_data, fragment_len, txctl, + (struct ieee80211_rts *)(txhdr-> + rts_frame)); + mac_ctl |= B43_TX4_MAC_SENDRTS; + len = sizeof(struct ieee80211_rts); + } + len += FCS_LEN; + b43_generate_plcp_hdr((struct b43_plcp_hdr4 *)(&txhdr-> + rts_plcp), len, + rts_rate); + b43_generate_plcp_hdr((struct b43_plcp_hdr4 *)(&txhdr-> + rts_plcp_fb), + len, rts_rate_fb); + hdr = (struct ieee80211_hdr *)(&txhdr->rts_frame); + txhdr->rts_dur_fb = hdr->duration_id; + if (rts_rate_ofdm) { + extra_ft |= B43_TX4_EFT_RTSOFDM; + txhdr->phy_rate_rts = + b43_plcp_get_ratecode_ofdm(rts_rate); + } else + txhdr->phy_rate_rts = + b43_plcp_get_ratecode_cck(rts_rate); + if (rts_rate_fb_ofdm) + extra_ft |= B43_TX4_EFT_RTSFBOFDM; + mac_ctl |= B43_TX4_MAC_LONGFRAME; + } + + /* Magic cookie */ + txhdr->cookie = cpu_to_le16(cookie); + + /* Apply the bitfields */ + txhdr->mac_ctl = cpu_to_le32(mac_ctl); + txhdr->phy_ctl = cpu_to_le16(phy_ctl); + txhdr->extra_ft = extra_ft; +} + +void b43_generate_txhdr(struct b43_wldev *dev, + u8 * txhdr, + const unsigned char *fragment_data, + unsigned int fragment_len, + const struct ieee80211_tx_control *txctl, u16 cookie) +{ + generate_txhdr_fw4(dev, (struct b43_txhdr_fw4 *)txhdr, + fragment_data, fragment_len, txctl, cookie); +} + +static s8 b43_rssi_postprocess(struct b43_wldev *dev, + u8 in_rssi, int ofdm, + int adjust_2053, int adjust_2050) +{ + struct b43_phy *phy = &dev->phy; + s32 tmp; + + switch (phy->radio_ver) { + case 0x2050: + if (ofdm) { + tmp = in_rssi; + if (tmp > 127) + tmp -= 256; + tmp *= 73; + tmp /= 64; + if (adjust_2050) + tmp += 25; + else + tmp -= 3; + } else { + if (dev->dev->bus->sprom.r1. + boardflags_lo & B43_BFL_RSSI) { + if (in_rssi > 63) + in_rssi = 63; + tmp = phy->nrssi_lt[in_rssi]; + tmp = 31 - tmp; + tmp *= -131; + tmp /= 128; + tmp -= 57; + } else { + tmp = in_rssi; + tmp = 31 - tmp; + tmp *= -149; + tmp /= 128; + tmp -= 68; + } + if (phy->type == B43_PHYTYPE_G && adjust_2050) + tmp += 25; + } + break; + case 0x2060: + if (in_rssi > 127) + tmp = in_rssi - 256; + else + tmp = in_rssi; + break; + default: + tmp = in_rssi; + tmp -= 11; + tmp *= 103; + tmp /= 64; + if (adjust_2053) + tmp -= 109; + else + tmp -= 83; + } + + return (s8) tmp; +} + +//TODO +#if 0 +static s8 b43_rssinoise_postprocess(struct b43_wldev *dev, u8 in_rssi) +{ + struct b43_phy *phy = &dev->phy; + s8 ret; + + if (phy->type == B43_PHYTYPE_A) { + //TODO: Incomplete specs. + ret = 0; + } else + ret = b43_rssi_postprocess(dev, in_rssi, 0, 1, 1); + + return ret; +} +#endif + +void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr) +{ + struct ieee80211_rx_status status; + struct b43_plcp_hdr6 *plcp; + struct ieee80211_hdr *wlhdr; + const struct b43_rxhdr_fw4 *rxhdr = _rxhdr; + u16 fctl; + u16 phystat0, phystat3, chanstat, mactime; + u32 macstat; + u16 chanid; + u8 jssi; + int padding; + + memset(&status, 0, sizeof(status)); + + /* Get metadata about the frame from the header. */ + phystat0 = le16_to_cpu(rxhdr->phy_status0); + phystat3 = le16_to_cpu(rxhdr->phy_status3); + jssi = rxhdr->jssi; + macstat = le32_to_cpu(rxhdr->mac_status); + mactime = le16_to_cpu(rxhdr->mac_time); + chanstat = le16_to_cpu(rxhdr->channel); + + if (macstat & B43_RX_MAC_FCSERR) + dev->wl->ieee_stats.dot11FCSErrorCount++; + if (macstat & B43_RX_MAC_DECERR) { + /* Decryption with the given key failed. + * Drop the packet. We also won't be able to decrypt it with + * the key in software. */ + goto drop; + } + + /* Skip PLCP and padding */ + padding = (macstat & B43_RX_MAC_PADDING) ? 2 : 0; + if (unlikely(skb->len < (sizeof(struct b43_plcp_hdr6) + padding))) { + b43dbg(dev->wl, "RX: Packet size underrun (1)\n"); + goto drop; + } + plcp = (struct b43_plcp_hdr6 *)(skb->data + padding); + skb_pull(skb, sizeof(struct b43_plcp_hdr6) + padding); + /* The skb contains the Wireless Header + payload data now */ + if (unlikely(skb->len < (2 + 2 + 6 /*minimum hdr */ + FCS_LEN))) { + b43dbg(dev->wl, "RX: Packet size underrun (2)\n"); + goto drop; + } + wlhdr = (struct ieee80211_hdr *)(skb->data); + fctl = le16_to_cpu(wlhdr->frame_control); + skb_trim(skb, skb->len - FCS_LEN); + + if (macstat & B43_RX_MAC_DEC) { + unsigned int keyidx; + int wlhdr_len; + + keyidx = ((macstat & B43_RX_MAC_KEYIDX) + >> B43_RX_MAC_KEYIDX_SHIFT); + /* We must adjust the key index here. We want the "physical" + * key index, but the ucode passed it slightly different. + */ + keyidx = b43_kidx_to_raw(dev, keyidx); + B43_WARN_ON(keyidx >= dev->max_nr_keys); + + if (dev->key[keyidx].algorithm != B43_SEC_ALGO_NONE) { + wlhdr_len = ieee80211_get_hdrlen(fctl); + if (unlikely(skb->len < (wlhdr_len + 3))) { + b43dbg(dev->wl, + "RX: Packet size underrun (3)\n"); + goto drop; + } + status.flag |= RX_FLAG_DECRYPTED; + } + } + + status.ssi = b43_rssi_postprocess(dev, jssi, + (phystat0 & B43_RX_PHYST0_OFDM), + (phystat0 & B43_RX_PHYST0_GAINCTL), + (phystat3 & B43_RX_PHYST3_TRSTATE)); + status.noise = dev->stats.link_noise; + /* the next line looks wrong, but is what mac80211 wants */ + status.signal = (jssi * 100) / B43_RX_MAX_SSI; + if (phystat0 & B43_RX_PHYST0_OFDM) + status.rate = b43_plcp_get_bitrate_ofdm(plcp); + else + status.rate = b43_plcp_get_bitrate_cck(plcp); + status.antenna = !!(phystat0 & B43_RX_PHYST0_ANT); + status.mactime = mactime; + + chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT; + switch (chanstat & B43_RX_CHAN_PHYTYPE) { + case B43_PHYTYPE_A: + status.phymode = MODE_IEEE80211A; + status.freq = chanid; + status.channel = b43_freq_to_channel_a(chanid); + break; + case B43_PHYTYPE_B: + status.phymode = MODE_IEEE80211B; + status.freq = chanid + 2400; + status.channel = b43_freq_to_channel_bg(chanid + 2400); + break; + case B43_PHYTYPE_G: + status.phymode = MODE_IEEE80211G; + status.freq = chanid + 2400; + status.channel = b43_freq_to_channel_bg(chanid + 2400); + break; + default: + B43_WARN_ON(1); + } + + dev->stats.last_rx = jiffies; + ieee80211_rx_irqsafe(dev->wl->hw, skb, &status); + + return; +drop: + b43dbg(dev->wl, "RX: Packet dropped\n"); + dev_kfree_skb_any(skb); +} + +void b43_handle_txstatus(struct b43_wldev *dev, + const struct b43_txstatus *status) +{ + b43_debugfs_log_txstat(dev, status); + + if (status->intermediate) + return; + if (status->for_ampdu) + return; + if (!status->acked) + dev->wl->ieee_stats.dot11ACKFailureCount++; + if (status->rts_count) { + if (status->rts_count == 0xF) //FIXME + dev->wl->ieee_stats.dot11RTSFailureCount++; + else + dev->wl->ieee_stats.dot11RTSSuccessCount++; + } + + if (b43_using_pio(dev)) + b43_pio_handle_txstatus(dev, status); + else + b43_dma_handle_txstatus(dev, status); +} + +/* Handle TX status report as received through DMA/PIO queues */ +void b43_handle_hwtxstatus(struct b43_wldev *dev, + const struct b43_hwtxstatus *hw) +{ + struct b43_txstatus status; + u8 tmp; + + status.cookie = le16_to_cpu(hw->cookie); + status.seq = le16_to_cpu(hw->seq); + status.phy_stat = hw->phy_stat; + tmp = hw->count; + status.frame_count = (tmp >> 4); + status.rts_count = (tmp & 0x0F); + tmp = hw->flags; + status.supp_reason = ((tmp & 0x1C) >> 2); + status.pm_indicated = !!(tmp & 0x80); + status.intermediate = !!(tmp & 0x40); + status.for_ampdu = !!(tmp & 0x20); + status.acked = !!(tmp & 0x02); + + b43_handle_txstatus(dev, &status); +} + +/* Stop any TX operation on the device (suspend the hardware queues) */ +void b43_tx_suspend(struct b43_wldev *dev) +{ + if (b43_using_pio(dev)) + b43_pio_freeze_txqueues(dev); + else + b43_dma_tx_suspend(dev); +} + +/* Resume any TX operation on the device (resume the hardware queues) */ +void b43_tx_resume(struct b43_wldev *dev) +{ + if (b43_using_pio(dev)) + b43_pio_thaw_txqueues(dev); + else + b43_dma_tx_resume(dev); +} + +#if 0 +static void upload_qos_parms(struct b43_wldev *dev, + const u16 * parms, u16 offset) +{ + int i; + + for (i = 0; i < B43_NR_QOSPARMS; i++) { + b43_shm_write16(dev, B43_SHM_SHARED, + offset + (i * 2), parms[i]); + } +} +#endif + +/* Initialize the QoS parameters */ +void b43_qos_init(struct b43_wldev *dev) +{ + /* FIXME: This function must probably be called from the mac80211 + * config callback. */ + return; + + b43_hf_write(dev, b43_hf_read(dev) | B43_HF_EDCF); + //FIXME kill magic + b43_write16(dev, 0x688, b43_read16(dev, 0x688) | 0x4); + + /*TODO: We might need some stack support here to get the values. */ +} diff --git a/drivers/net/wireless/b43/xmit.h b/drivers/net/wireless/b43/xmit.h new file mode 100644 index 000000000000..03bddd251618 --- /dev/null +++ b/drivers/net/wireless/b43/xmit.h @@ -0,0 +1,250 @@ +#ifndef B43_XMIT_H_ +#define B43_XMIT_H_ + +#include "main.h" + +#define _b43_declare_plcp_hdr(size) \ + struct b43_plcp_hdr##size { \ + union { \ + __le32 data; \ + __u8 raw[size]; \ + } __attribute__((__packed__)); \ + } __attribute__((__packed__)) + +/* struct b43_plcp_hdr4 */ +_b43_declare_plcp_hdr(4); +/* struct b43_plcp_hdr6 */ +_b43_declare_plcp_hdr(6); + +#undef _b43_declare_plcp_hdr + +/* TX header for v4 firmware */ +struct b43_txhdr_fw4 { + __le32 mac_ctl; /* MAC TX control */ + __le16 mac_frame_ctl; /* Copy of the FrameControl field */ + __le16 tx_fes_time_norm; /* TX FES Time Normal */ + __le16 phy_ctl; /* PHY TX control */ + __le16 phy_ctl_0; /* Unused */ + __le16 phy_ctl_1; /* Unused */ + __le16 phy_ctl_rts_0; /* Unused */ + __le16 phy_ctl_rts_1; /* Unused */ + __u8 phy_rate; /* PHY rate */ + __u8 phy_rate_rts; /* PHY rate for RTS/CTS */ + __u8 extra_ft; /* Extra Frame Types */ + __u8 chan_radio_code; /* Channel Radio Code */ + __u8 iv[16]; /* Encryption IV */ + __u8 tx_receiver[6]; /* TX Frame Receiver address */ + __le16 tx_fes_time_fb; /* TX FES Time Fallback */ + struct b43_plcp_hdr6 rts_plcp_fb; /* RTS fallback PLCP */ + __le16 rts_dur_fb; /* RTS fallback duration */ + struct b43_plcp_hdr6 plcp_fb; /* Fallback PLCP */ + __le16 dur_fb; /* Fallback duration */ + __le16 mm_dur_time; /* Unused */ + __le16 mm_dur_time_fb; /* Unused */ + __le32 time_stamp; /* Timestamp */ + PAD_BYTES(2); + __le16 cookie; /* TX frame cookie */ + __le16 tx_status; /* TX status */ + struct b43_plcp_hdr6 rts_plcp; /* RTS PLCP */ + __u8 rts_frame[16]; /* The RTS frame (if used) */ + PAD_BYTES(2); + struct b43_plcp_hdr6 plcp; /* Main PLCP */ +} __attribute__ ((__packed__)); + +/* MAC TX control */ +#define B43_TX4_MAC_KEYIDX 0x0FF00000 /* Security key index */ +#define B43_TX4_MAC_KEYIDX_SHIFT 20 +#define B43_TX4_MAC_KEYALG 0x00070000 /* Security key algorithm */ +#define B43_TX4_MAC_KEYALG_SHIFT 16 +#define B43_TX4_MAC_LIFETIME 0x00001000 +#define B43_TX4_MAC_FRAMEBURST 0x00000800 +#define B43_TX4_MAC_SENDCTS 0x00000400 +#define B43_TX4_MAC_AMPDU 0x00000300 +#define B43_TX4_MAC_AMPDU_SHIFT 8 +#define B43_TX4_MAC_5GHZ 0x00000080 +#define B43_TX4_MAC_IGNPMQ 0x00000020 +#define B43_TX4_MAC_HWSEQ 0x00000010 /* Use Hardware Sequence Number */ +#define B43_TX4_MAC_STMSDU 0x00000008 /* Start MSDU */ +#define B43_TX4_MAC_SENDRTS 0x00000004 +#define B43_TX4_MAC_LONGFRAME 0x00000002 +#define B43_TX4_MAC_ACK 0x00000001 + +/* Extra Frame Types */ +#define B43_TX4_EFT_FBOFDM 0x0001 /* Data frame fallback rate type */ +#define B43_TX4_EFT_RTSOFDM 0x0004 /* RTS/CTS rate type */ +#define B43_TX4_EFT_RTSFBOFDM 0x0010 /* RTS/CTS fallback rate type */ + +/* PHY TX control word */ +#define B43_TX4_PHY_OFDM 0x0001 /* Data frame rate type */ +#define B43_TX4_PHY_SHORTPRMBL 0x0010 /* Use short preamble */ +#define B43_TX4_PHY_ANT 0x03C0 /* Antenna selection */ +#define B43_TX4_PHY_ANT0 0x0000 /* Use antenna 0 */ +#define B43_TX4_PHY_ANT1 0x0100 /* Use antenna 1 */ +#define B43_TX4_PHY_ANTLAST 0x0300 /* Use last used antenna */ + +void b43_generate_txhdr(struct b43_wldev *dev, + u8 * txhdr, + const unsigned char *fragment_data, + unsigned int fragment_len, + const struct ieee80211_tx_control *txctl, u16 cookie); + +/* Transmit Status */ +struct b43_txstatus { + u16 cookie; /* The cookie from the txhdr */ + u16 seq; /* Sequence number */ + u8 phy_stat; /* PHY TX status */ + u8 frame_count; /* Frame transmit count */ + u8 rts_count; /* RTS transmit count */ + u8 supp_reason; /* Suppression reason */ + /* flags */ + u8 pm_indicated; /* PM mode indicated to AP */ + u8 intermediate; /* Intermediate status notification (not final) */ + u8 for_ampdu; /* Status is for an AMPDU (afterburner) */ + u8 acked; /* Wireless ACK received */ +}; + +/* txstatus supp_reason values */ +enum { + B43_TXST_SUPP_NONE, /* Not suppressed */ + B43_TXST_SUPP_PMQ, /* Suppressed due to PMQ entry */ + B43_TXST_SUPP_FLUSH, /* Suppressed due to flush request */ + B43_TXST_SUPP_PREV, /* Previous fragment failed */ + B43_TXST_SUPP_CHAN, /* Channel mismatch */ + B43_TXST_SUPP_LIFE, /* Lifetime expired */ + B43_TXST_SUPP_UNDER, /* Buffer underflow */ + B43_TXST_SUPP_ABNACK, /* Afterburner NACK */ +}; + +/* Transmit Status as received through DMA/PIO on old chips */ +struct b43_hwtxstatus { + PAD_BYTES(4); + __le16 cookie; + u8 flags; + u8 count; + PAD_BYTES(2); + __le16 seq; + u8 phy_stat; + PAD_BYTES(1); +} __attribute__ ((__packed__)); + +/* Receive header for v4 firmware. */ +struct b43_rxhdr_fw4 { + __le16 frame_len; /* Frame length */ + PAD_BYTES(2); + __le16 phy_status0; /* PHY RX Status 0 */ + __u8 jssi; /* PHY RX Status 1: JSSI */ + __u8 sig_qual; /* PHY RX Status 1: Signal Quality */ + __le16 phy_status2; /* PHY RX Status 2 */ + __le16 phy_status3; /* PHY RX Status 3 */ + __le32 mac_status; /* MAC RX status */ + __le16 mac_time; + __le16 channel; +} __attribute__ ((__packed__)); + +/* PHY RX Status 0 */ +#define B43_RX_PHYST0_GAINCTL 0x4000 /* Gain Control */ +#define B43_RX_PHYST0_PLCPHCF 0x0200 +#define B43_RX_PHYST0_PLCPFV 0x0100 +#define B43_RX_PHYST0_SHORTPRMBL 0x0080 /* Received with Short Preamble */ +#define B43_RX_PHYST0_LCRS 0x0040 +#define B43_RX_PHYST0_ANT 0x0020 /* Antenna */ +#define B43_RX_PHYST0_UNSRATE 0x0010 +#define B43_RX_PHYST0_CLIP 0x000C +#define B43_RX_PHYST0_CLIP_SHIFT 2 +#define B43_RX_PHYST0_FTYPE 0x0003 /* Frame type */ +#define B43_RX_PHYST0_CCK 0x0000 /* Frame type: CCK */ +#define B43_RX_PHYST0_OFDM 0x0001 /* Frame type: OFDM */ +#define B43_RX_PHYST0_PRE_N 0x0002 /* Pre-standard N-PHY frame */ +#define B43_RX_PHYST0_STD_N 0x0003 /* Standard N-PHY frame */ + +/* PHY RX Status 2 */ +#define B43_RX_PHYST2_LNAG 0xC000 /* LNA Gain */ +#define B43_RX_PHYST2_LNAG_SHIFT 14 +#define B43_RX_PHYST2_PNAG 0x3C00 /* PNA Gain */ +#define B43_RX_PHYST2_PNAG_SHIFT 10 +#define B43_RX_PHYST2_FOFF 0x03FF /* F offset */ + +/* PHY RX Status 3 */ +#define B43_RX_PHYST3_DIGG 0x1800 /* DIG Gain */ +#define B43_RX_PHYST3_DIGG_SHIFT 11 +#define B43_RX_PHYST3_TRSTATE 0x0400 /* TR state */ + +/* MAC RX Status */ +#define B43_RX_MAC_BEACONSENT 0x00008000 /* Beacon send flag */ +#define B43_RX_MAC_KEYIDX 0x000007E0 /* Key index */ +#define B43_RX_MAC_KEYIDX_SHIFT 5 +#define B43_RX_MAC_DECERR 0x00000010 /* Decrypt error */ +#define B43_RX_MAC_DEC 0x00000008 /* Decryption attempted */ +#define B43_RX_MAC_PADDING 0x00000004 /* Pad bytes present */ +#define B43_RX_MAC_RESP 0x00000002 /* Response frame transmitted */ +#define B43_RX_MAC_FCSERR 0x00000001 /* FCS error */ + +/* RX channel */ +#define B43_RX_CHAN_GAIN 0xFC00 /* Gain */ +#define B43_RX_CHAN_GAIN_SHIFT 10 +#define B43_RX_CHAN_ID 0x03FC /* Channel ID */ +#define B43_RX_CHAN_ID_SHIFT 2 +#define B43_RX_CHAN_PHYTYPE 0x0003 /* PHY type */ + +u8 b43_plcp_get_ratecode_cck(const u8 bitrate); +u8 b43_plcp_get_ratecode_ofdm(const u8 bitrate); + +void b43_generate_plcp_hdr(struct b43_plcp_hdr4 *plcp, + const u16 octets, const u8 bitrate); + +void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr); + +void b43_handle_txstatus(struct b43_wldev *dev, + const struct b43_txstatus *status); + +void b43_handle_hwtxstatus(struct b43_wldev *dev, + const struct b43_hwtxstatus *hw); + +void b43_tx_suspend(struct b43_wldev *dev); +void b43_tx_resume(struct b43_wldev *dev); + +#define B43_NR_QOSPARMS 22 +enum { + B43_QOSPARM_TXOP = 0, + B43_QOSPARM_CWMIN, + B43_QOSPARM_CWMAX, + B43_QOSPARM_CWCUR, + B43_QOSPARM_AIFS, + B43_QOSPARM_BSLOTS, + B43_QOSPARM_REGGAP, + B43_QOSPARM_STATUS, +}; +void b43_qos_init(struct b43_wldev *dev); + +/* Helper functions for converting the key-table index from "firmware-format" + * to "raw-format" and back. The firmware API changed for this at some revision. + * We need to account for that here. */ +static inline int b43_new_kidx_api(struct b43_wldev *dev) +{ + /* FIXME: Not sure the change was at rev 351 */ + return (dev->fw.rev >= 351); +} +static inline u8 b43_kidx_to_fw(struct b43_wldev *dev, u8 raw_kidx) +{ + u8 firmware_kidx; + if (b43_new_kidx_api(dev)) { + firmware_kidx = raw_kidx; + } else { + if (raw_kidx >= 4) /* Is per STA key? */ + firmware_kidx = raw_kidx - 4; + else + firmware_kidx = raw_kidx; /* TX default key */ + } + return firmware_kidx; +} +static inline u8 b43_kidx_to_raw(struct b43_wldev *dev, u8 firmware_kidx) +{ + u8 raw_kidx; + if (b43_new_kidx_api(dev)) + raw_kidx = firmware_kidx; + else + raw_kidx = firmware_kidx + 4; /* RX default keys or per STA keys */ + return raw_kidx; +} + +#endif /* B43_XMIT_H_ */ diff --git a/drivers/net/wireless/b43legacy/Kconfig b/drivers/net/wireless/b43legacy/Kconfig new file mode 100644 index 000000000000..7e23ec23fc98 --- /dev/null +++ b/drivers/net/wireless/b43legacy/Kconfig @@ -0,0 +1,89 @@ +config B43LEGACY + tristate "Broadcom 43xx-legacy wireless support (mac80211 stack)" + depends on SSB_POSSIBLE && MAC80211 && WLAN_80211 + select SSB + select FW_LOADER + select HW_RANDOM + ---help--- + b43legacy is a driver for 802.11b devices from Broadcom (BCM4301 and + BCM4303) and early model 802.11g chips (BCM4306 Ver. 2) used in the + Linksys WPC54G V1 PCMCIA devices. + + Newer 802.11g and 802.11a devices need b43. + + It is safe to include both b43 and b43legacy as the underlying glue + layer will automatically load the correct version for your device. + + This driver uses V3 firmware, which must be installed separately using + b43-fwcutter. + + This driver can be built as a module (recommended) that will be + called "b43legacy". If unsure, say M. + +# Auto-select SSB PCI-HOST support, if possible +config B43LEGACY_PCI_AUTOSELECT + bool + depends on B43LEGACY && SSB_PCIHOST_POSSIBLE + select SSB_PCIHOST + default y + +# Auto-select SSB PCICORE driver, if possible +config B43LEGACY_PCICORE_AUTOSELECT + bool + depends on B43LEGACY && SSB_DRIVER_PCICORE_POSSIBLE + select SSB_DRIVER_PCICORE + default y + +config B43LEGACY_DEBUG + bool "Broadcom 43xx-legacy debugging" + depends on B43LEGACY + default y + ---help--- + Say Y, because this information will help you get the driver running. + This option generates a minimum of log output. + +config B43LEGACY_DMA + bool + depends on B43LEGACY + +config B43LEGACY_PIO + bool + depends on B43LEGACY + +choice + prompt "Broadcom 43xx-legacy data transfer mode" + depends on B43LEGACY + default B43LEGACY_DMA_AND_PIO_MODE + +config B43LEGACY_DMA_AND_PIO_MODE + bool "DMA + PIO" + select B43LEGACY_DMA + select B43LEGACY_PIO + ---help--- + Include both, Direct Memory Access (DMA) and Programmed I/O (PIO) + data transfer modes. The mode actually used is selectable through + the module parameter "pio". With pio=0 as a module parameter, the + default DMA is used, otherwise PIO is used. + + If unsure, choose this option. + +config B43LEGACY_DMA_MODE + bool "DMA (Direct Memory Access) only" + select B43LEGACY_DMA + ---help--- + Only include Direct Memory Access (DMA). + This reduces the size of the driver module, by omitting the PIO code. + +config B43LEGACY_PIO_MODE + bool "PIO (Programmed I/O) only" + select B43LEGACY_PIO + ---help--- + Only include Programmed I/O (PIO). + This reduces the size of the driver module, by omitting the DMA code. + Please note that PIO transfers are slow (compared to DMA). + + Also note that not all devices of the b43legacy series support PIO. + + You should use PIO only if DMA does not work for you. + +endchoice diff --git a/drivers/net/wireless/b43legacy/Makefile b/drivers/net/wireless/b43legacy/Makefile new file mode 100644 index 000000000000..ec3a2482bbad --- /dev/null +++ b/drivers/net/wireless/b43legacy/Makefile @@ -0,0 +1,14 @@ +obj-$(CONFIG_B43LEGACY) += b43legacy.o +b43legacy-obj-$(CONFIG_B43LEGACY_DEBUG) += debugfs.o + +b43legacy-obj-$(CONFIG_B43LEGACY_DMA) += dma.o +b43legacy-obj-$(CONFIG_B43LEGACY_PIO) += pio.o + +b43legacy-objs := main.o \ + ilt.o \ + leds.o \ + phy.o \ + radio.o \ + sysfs.o \ + xmit.o \ + $(b43legacy-obj-y) diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h new file mode 100644 index 000000000000..afe145cec067 --- /dev/null +++ b/drivers/net/wireless/b43legacy/b43legacy.h @@ -0,0 +1,832 @@ +#ifndef B43legacy_H_ +#define B43legacy_H_ + +#include <linux/hw_random.h> +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <linux/stringify.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <asm/atomic.h> +#include <linux/io.h> + +#include <linux/ssb/ssb.h> +#include <linux/ssb/ssb_driver_chipcommon.h> + +#include <linux/wireless.h> +#include <net/mac80211.h> + +#include "debugfs.h" +#include "leds.h" +#include "phy.h" + + +#define B43legacy_IRQWAIT_MAX_RETRIES 100 + +#define B43legacy_RX_MAX_SSI 60 /* best guess at max ssi */ + +/* MMIO offsets */ +#define B43legacy_MMIO_DMA0_REASON 0x20 +#define B43legacy_MMIO_DMA0_IRQ_MASK 0x24 +#define B43legacy_MMIO_DMA1_REASON 0x28 +#define B43legacy_MMIO_DMA1_IRQ_MASK 0x2C +#define B43legacy_MMIO_DMA2_REASON 0x30 +#define B43legacy_MMIO_DMA2_IRQ_MASK 0x34 +#define B43legacy_MMIO_DMA3_REASON 0x38 +#define B43legacy_MMIO_DMA3_IRQ_MASK 0x3C +#define B43legacy_MMIO_DMA4_REASON 0x40 +#define B43legacy_MMIO_DMA4_IRQ_MASK 0x44 +#define B43legacy_MMIO_DMA5_REASON 0x48 +#define B43legacy_MMIO_DMA5_IRQ_MASK 0x4C +#define B43legacy_MMIO_MACCTL 0x120 +#define B43legacy_MMIO_STATUS_BITFIELD 0x120 +#define B43legacy_MMIO_STATUS2_BITFIELD 0x124 +#define B43legacy_MMIO_GEN_IRQ_REASON 0x128 +#define B43legacy_MMIO_GEN_IRQ_MASK 0x12C +#define B43legacy_MMIO_RAM_CONTROL 0x130 +#define B43legacy_MMIO_RAM_DATA 0x134 +#define B43legacy_MMIO_PS_STATUS 0x140 +#define B43legacy_MMIO_RADIO_HWENABLED_HI 0x158 +#define B43legacy_MMIO_SHM_CONTROL 0x160 +#define B43legacy_MMIO_SHM_DATA 0x164 +#define B43legacy_MMIO_SHM_DATA_UNALIGNED 0x166 +#define B43legacy_MMIO_XMITSTAT_0 0x170 +#define B43legacy_MMIO_XMITSTAT_1 0x174 +#define B43legacy_MMIO_REV3PLUS_TSF_LOW 0x180 /* core rev >= 3 only */ +#define B43legacy_MMIO_REV3PLUS_TSF_HIGH 0x184 /* core rev >= 3 only */ + +/* 32-bit DMA */ +#define B43legacy_MMIO_DMA32_BASE0 0x200 +#define B43legacy_MMIO_DMA32_BASE1 0x220 +#define B43legacy_MMIO_DMA32_BASE2 0x240 +#define B43legacy_MMIO_DMA32_BASE3 0x260 +#define B43legacy_MMIO_DMA32_BASE4 0x280 +#define B43legacy_MMIO_DMA32_BASE5 0x2A0 +/* 64-bit DMA */ +#define B43legacy_MMIO_DMA64_BASE0 0x200 +#define B43legacy_MMIO_DMA64_BASE1 0x240 +#define B43legacy_MMIO_DMA64_BASE2 0x280 +#define B43legacy_MMIO_DMA64_BASE3 0x2C0 +#define B43legacy_MMIO_DMA64_BASE4 0x300 +#define B43legacy_MMIO_DMA64_BASE5 0x340 +/* PIO */ +#define B43legacy_MMIO_PIO1_BASE 0x300 +#define B43legacy_MMIO_PIO2_BASE 0x310 +#define B43legacy_MMIO_PIO3_BASE 0x320 +#define B43legacy_MMIO_PIO4_BASE 0x330 + +#define B43legacy_MMIO_PHY_VER 0x3E0 +#define B43legacy_MMIO_PHY_RADIO 0x3E2 +#define B43legacy_MMIO_PHY0 0x3E6 +#define B43legacy_MMIO_ANTENNA 0x3E8 +#define B43legacy_MMIO_CHANNEL 0x3F0 +#define B43legacy_MMIO_CHANNEL_EXT 0x3F4 +#define B43legacy_MMIO_RADIO_CONTROL 0x3F6 +#define B43legacy_MMIO_RADIO_DATA_HIGH 0x3F8 +#define B43legacy_MMIO_RADIO_DATA_LOW 0x3FA +#define B43legacy_MMIO_PHY_CONTROL 0x3FC +#define B43legacy_MMIO_PHY_DATA 0x3FE +#define B43legacy_MMIO_MACFILTER_CONTROL 0x420 +#define B43legacy_MMIO_MACFILTER_DATA 0x422 +#define B43legacy_MMIO_RCMTA_COUNT 0x43C /* Receive Match Transmitter Addr */ +#define B43legacy_MMIO_RADIO_HWENABLED_LO 0x49A +#define B43legacy_MMIO_GPIO_CONTROL 0x49C +#define B43legacy_MMIO_GPIO_MASK 0x49E +#define B43legacy_MMIO_TSF_0 0x632 /* core rev < 3 only */ +#define B43legacy_MMIO_TSF_1 0x634 /* core rev < 3 only */ +#define B43legacy_MMIO_TSF_2 0x636 /* core rev < 3 only */ +#define B43legacy_MMIO_TSF_3 0x638 /* core rev < 3 only */ +#define B43legacy_MMIO_RNG 0x65A +#define B43legacy_MMIO_POWERUP_DELAY 0x6A8 + +/* SPROM boardflags_lo values */ +#define B43legacy_BFL_PACTRL 0x0002 +#define B43legacy_BFL_RSSI 0x0008 +#define B43legacy_BFL_EXTLNA 0x1000 + +/* GPIO register offset, in both ChipCommon and PCI core. */ +#define B43legacy_GPIO_CONTROL 0x6c + +/* SHM Routing */ +#define B43legacy_SHM_SHARED 0x0001 +#define B43legacy_SHM_WIRELESS 0x0002 +#define B43legacy_SHM_HW 0x0004 +#define B43legacy_SHM_UCODE 0x0300 + +/* SHM Routing modifiers */ +#define B43legacy_SHM_AUTOINC_R 0x0200 /* Read Auto-increment */ +#define B43legacy_SHM_AUTOINC_W 0x0100 /* Write Auto-increment */ +#define B43legacy_SHM_AUTOINC_RW (B43legacy_SHM_AUTOINC_R | \ + B43legacy_SHM_AUTOINC_W) + +/* Misc SHM_SHARED offsets */ +#define B43legacy_SHM_SH_WLCOREREV 0x0016 /* 802.11 core revision */ +#define B43legacy_SHM_SH_HOSTFLO 0x005E /* Hostflags ucode opts (low) */ +#define B43legacy_SHM_SH_HOSTFHI 0x0060 /* Hostflags ucode opts (high) */ +/* SHM_SHARED crypto engine */ +#define B43legacy_SHM_SH_KEYIDXBLOCK 0x05D4 /* Key index/algorithm block */ +/* SHM_SHARED beacon variables */ +#define B43legacy_SHM_SH_BEACPHYCTL 0x0054 /* Beacon PHY TX control word */ +/* SHM_SHARED ACK/CTS control */ +#define B43legacy_SHM_SH_ACKCTSPHYCTL 0x0022 /* ACK/CTS PHY control word */ +/* SHM_SHARED probe response variables */ +#define B43legacy_SHM_SH_PRPHYCTL 0x0188 /* Probe Resp PHY TX control */ +#define B43legacy_SHM_SH_PRMAXTIME 0x0074 /* Probe Response max time */ +/* SHM_SHARED rate tables */ +/* SHM_SHARED microcode soft registers */ +#define B43legacy_SHM_SH_UCODEREV 0x0000 /* Microcode revision */ +#define B43legacy_SHM_SH_UCODEPATCH 0x0002 /* Microcode patchlevel */ +#define B43legacy_SHM_SH_UCODEDATE 0x0004 /* Microcode date */ +#define B43legacy_SHM_SH_UCODETIME 0x0006 /* Microcode time */ + +#define B43legacy_UCODEFLAGS_OFFSET 0x005E + +/* Hardware Radio Enable masks */ +#define B43legacy_MMIO_RADIO_HWENABLED_HI_MASK (1 << 16) +#define B43legacy_MMIO_RADIO_HWENABLED_LO_MASK (1 << 4) + +/* HostFlags. See b43legacy_hf_read/write() */ +#define B43legacy_HF_SYMW 0x00000002 /* G-PHY SYM workaround */ +#define B43legacy_HF_GDCW 0x00000020 /* G-PHY DV cancel filter */ +#define B43legacy_HF_OFDMPABOOST 0x00000040 /* Enable PA boost OFDM */ +#define B43legacy_HF_EDCF 0x00000100 /* on if WME/MAC suspended */ + +/* MacFilter offsets. */ +#define B43legacy_MACFILTER_SELF 0x0000 +#define B43legacy_MACFILTER_BSSID 0x0003 +#define B43legacy_MACFILTER_MAC 0x0010 + +/* PHYVersioning */ +#define B43legacy_PHYTYPE_B 0x01 +#define B43legacy_PHYTYPE_G 0x02 + +/* PHYRegisters */ +#define B43legacy_PHY_G_LO_CONTROL 0x0810 +#define B43legacy_PHY_ILT_G_CTRL 0x0472 +#define B43legacy_PHY_ILT_G_DATA1 0x0473 +#define B43legacy_PHY_ILT_G_DATA2 0x0474 +#define B43legacy_PHY_G_PCTL 0x0029 +#define B43legacy_PHY_RADIO_BITFIELD 0x0401 +#define B43legacy_PHY_G_CRS 0x0429 +#define B43legacy_PHY_NRSSILT_CTRL 0x0803 +#define B43legacy_PHY_NRSSILT_DATA 0x0804 + +/* RadioRegisters */ +#define B43legacy_RADIOCTL_ID 0x01 + +/* MAC Control bitfield */ +#define B43legacy_MACCTL_IHR_ENABLED 0x00000400 /* IHR Region Enabled */ +#define B43legacy_MACCTL_INFRA 0x00020000 /* Infrastructure mode */ +#define B43legacy_MACCTL_AP 0x00040000 /* AccessPoint mode */ +#define B43legacy_MACCTL_BEACPROMISC 0x00100000 /* Beacon Promiscuous */ +#define B43legacy_MACCTL_KEEP_BADPLCP 0x00200000 /* Keep bad PLCP frames */ +#define B43legacy_MACCTL_KEEP_CTL 0x00400000 /* Keep control frames */ +#define B43legacy_MACCTL_KEEP_BAD 0x00800000 /* Keep bad frames (FCS) */ +#define B43legacy_MACCTL_PROMISC 0x01000000 /* Promiscuous mode */ +#define B43legacy_MACCTL_GMODE 0x80000000 /* G Mode */ + +/* StatusBitField */ +#define B43legacy_SBF_MAC_ENABLED 0x00000001 +#define B43legacy_SBF_CORE_READY 0x00000004 +#define B43legacy_SBF_400 0x00000400 /*FIXME: fix name*/ +#define B43legacy_SBF_XFER_REG_BYTESWAP 0x00010000 +#define B43legacy_SBF_MODE_NOTADHOC 0x00020000 +#define B43legacy_SBF_MODE_AP 0x00040000 +#define B43legacy_SBF_RADIOREG_LOCK 0x00080000 +#define B43legacy_SBF_MODE_MONITOR 0x00400000 +#define B43legacy_SBF_MODE_PROMISC 0x01000000 +#define B43legacy_SBF_PS1 0x02000000 +#define B43legacy_SBF_PS2 0x04000000 +#define B43legacy_SBF_NO_SSID_BCAST 0x08000000 +#define B43legacy_SBF_TIME_UPDATE 0x10000000 + +/* 802.11 core specific TM State Low flags */ +#define B43legacy_TMSLOW_GMODE 0x20000000 /* G Mode Enable */ +#define B43legacy_TMSLOW_PLLREFSEL 0x00200000 /* PLL Freq Ref Select */ +#define B43legacy_TMSLOW_MACPHYCLKEN 0x00100000 /* MAC PHY Clock Ctrl Enbl */ +#define B43legacy_TMSLOW_PHYRESET 0x00080000 /* PHY Reset */ +#define B43legacy_TMSLOW_PHYCLKEN 0x00040000 /* PHY Clock Enable */ + +/* 802.11 core specific TM State High flags */ +#define B43legacy_TMSHIGH_FCLOCK 0x00040000 /* Fast Clock Available */ +#define B43legacy_TMSHIGH_GPHY 0x00010000 /* G-PHY avail (rev >= 5) */ + +#define B43legacy_UCODEFLAG_AUTODIV 0x0001 + +/* Generic-Interrupt reasons. */ +#define B43legacy_IRQ_MAC_SUSPENDED 0x00000001 +#define B43legacy_IRQ_BEACON 0x00000002 +#define B43legacy_IRQ_TBTT_INDI 0x00000004 /* Target Beacon Transmit Time */ +#define B43legacy_IRQ_BEACON_TX_OK 0x00000008 +#define B43legacy_IRQ_BEACON_CANCEL 0x00000010 +#define B43legacy_IRQ_ATIM_END 0x00000020 +#define B43legacy_IRQ_PMQ 0x00000040 +#define B43legacy_IRQ_PIO_WORKAROUND 0x00000100 +#define B43legacy_IRQ_MAC_TXERR 0x00000200 +#define B43legacy_IRQ_PHY_TXERR 0x00000800 +#define B43legacy_IRQ_PMEVENT 0x00001000 +#define B43legacy_IRQ_TIMER0 0x00002000 +#define B43legacy_IRQ_TIMER1 0x00004000 +#define B43legacy_IRQ_DMA 0x00008000 +#define B43legacy_IRQ_TXFIFO_FLUSH_OK 0x00010000 +#define B43legacy_IRQ_CCA_MEASURE_OK 0x00020000 +#define B43legacy_IRQ_NOISESAMPLE_OK 0x00040000 +#define B43legacy_IRQ_UCODE_DEBUG 0x08000000 +#define B43legacy_IRQ_RFKILL 0x10000000 +#define B43legacy_IRQ_TX_OK 0x20000000 +#define B43legacy_IRQ_PHY_G_CHANGED 0x40000000 +#define B43legacy_IRQ_TIMEOUT 0x80000000 + +#define B43legacy_IRQ_ALL 0xFFFFFFFF +#define B43legacy_IRQ_MASKTEMPLATE (B43legacy_IRQ_MAC_SUSPENDED | \ + B43legacy_IRQ_BEACON | \ + B43legacy_IRQ_TBTT_INDI | \ + B43legacy_IRQ_ATIM_END | \ + B43legacy_IRQ_PMQ | \ + B43legacy_IRQ_MAC_TXERR | \ + B43legacy_IRQ_PHY_TXERR | \ + B43legacy_IRQ_DMA | \ + B43legacy_IRQ_TXFIFO_FLUSH_OK | \ + B43legacy_IRQ_NOISESAMPLE_OK | \ + B43legacy_IRQ_UCODE_DEBUG | \ + B43legacy_IRQ_RFKILL | \ + B43legacy_IRQ_TX_OK) + +/* Device specific rate values. + * The actual values defined here are (rate_in_mbps * 2). + * Some code depends on this. Don't change it. */ +#define B43legacy_CCK_RATE_1MB 2 +#define B43legacy_CCK_RATE_2MB 4 +#define B43legacy_CCK_RATE_5MB 11 +#define B43legacy_CCK_RATE_11MB 22 +#define B43legacy_OFDM_RATE_6MB 12 +#define B43legacy_OFDM_RATE_9MB 18 +#define B43legacy_OFDM_RATE_12MB 24 +#define B43legacy_OFDM_RATE_18MB 36 +#define B43legacy_OFDM_RATE_24MB 48 +#define B43legacy_OFDM_RATE_36MB 72 +#define B43legacy_OFDM_RATE_48MB 96 +#define B43legacy_OFDM_RATE_54MB 108 +/* Convert a b43legacy rate value to a rate in 100kbps */ +#define B43legacy_RATE_TO_100KBPS(rate) (((rate) * 10) / 2) + + +#define B43legacy_DEFAULT_SHORT_RETRY_LIMIT 7 +#define B43legacy_DEFAULT_LONG_RETRY_LIMIT 4 + +/* Max size of a security key */ +#define B43legacy_SEC_KEYSIZE 16 +/* Security algorithms. */ +enum { + B43legacy_SEC_ALGO_NONE = 0, /* unencrypted, as of TX header. */ + B43legacy_SEC_ALGO_WEP40, + B43legacy_SEC_ALGO_TKIP, + B43legacy_SEC_ALGO_AES, + B43legacy_SEC_ALGO_WEP104, + B43legacy_SEC_ALGO_AES_LEGACY, +}; + +/* Core Information Registers */ +#define B43legacy_CIR_BASE 0xf00 +#define B43legacy_CIR_SBTPSFLAG (B43legacy_CIR_BASE + 0x18) +#define B43legacy_CIR_SBIMSTATE (B43legacy_CIR_BASE + 0x90) +#define B43legacy_CIR_SBINTVEC (B43legacy_CIR_BASE + 0x94) +#define B43legacy_CIR_SBTMSTATELOW (B43legacy_CIR_BASE + 0x98) +#define B43legacy_CIR_SBTMSTATEHIGH (B43legacy_CIR_BASE + 0x9c) +#define B43legacy_CIR_SBIMCONFIGLOW (B43legacy_CIR_BASE + 0xa8) +#define B43legacy_CIR_SB_ID_HI (B43legacy_CIR_BASE + 0xfc) + +/* sbtmstatehigh state flags */ +#define B43legacy_SBTMSTATEHIGH_SERROR 0x00000001 +#define B43legacy_SBTMSTATEHIGH_BUSY 0x00000004 +#define B43legacy_SBTMSTATEHIGH_TIMEOUT 0x00000020 +#define B43legacy_SBTMSTATEHIGH_G_PHY_AVAIL 0x00010000 +#define B43legacy_SBTMSTATEHIGH_COREFLAGS 0x1FFF0000 +#define B43legacy_SBTMSTATEHIGH_DMA64BIT 0x10000000 +#define B43legacy_SBTMSTATEHIGH_GATEDCLK 0x20000000 +#define B43legacy_SBTMSTATEHIGH_BISTFAILED 0x40000000 +#define B43legacy_SBTMSTATEHIGH_BISTCOMPLETE 0x80000000 + +/* sbimstate flags */ +#define B43legacy_SBIMSTATE_IB_ERROR 0x20000 +#define B43legacy_SBIMSTATE_TIMEOUT 0x40000 + +#define PFX KBUILD_MODNAME ": " +#ifdef assert +# undef assert +#endif +#ifdef CONFIG_B43LEGACY_DEBUG +# define B43legacy_WARN_ON(expr) \ + do { \ + if (unlikely((expr))) { \ + printk(KERN_INFO PFX "Test (%s) failed at:" \ + " %s:%d:%s()\n", \ + #expr, __FILE__, \ + __LINE__, __FUNCTION__); \ + } \ + } while (0) +# define B43legacy_BUG_ON(expr) \ + do { \ + if (unlikely((expr))) { \ + printk(KERN_INFO PFX "Test (%s) failed\n", \ + #expr); \ + BUG_ON(expr); \ + } \ + } while (0) +# define B43legacy_DEBUG 1 +#else +# define B43legacy_WARN_ON(x) do { /* nothing */ } while (0) +# define B43legacy_BUG_ON(x) do { /* nothing */ } while (0) +# define B43legacy_DEBUG 0 +#endif + + +struct net_device; +struct pci_dev; +struct b43legacy_dmaring; +struct b43legacy_pioqueue; + +/* The firmware file header */ +#define B43legacy_FW_TYPE_UCODE 'u' +#define B43legacy_FW_TYPE_PCM 'p' +#define B43legacy_FW_TYPE_IV 'i' +struct b43legacy_fw_header { + /* File type */ + u8 type; + /* File format version */ + u8 ver; + u8 __padding[2]; + /* Size of the data. For ucode and PCM this is in bytes. + * For IV this is number-of-ivs. */ + __be32 size; +} __attribute__((__packed__)); + +/* Initial Value file format */ +#define B43legacy_IV_OFFSET_MASK 0x7FFF +#define B43legacy_IV_32BIT 0x8000 +struct b43legacy_iv { + __be16 offset_size; + union { + __be16 d16; + __be32 d32; + } data __attribute__((__packed__)); +} __attribute__((__packed__)); + +#define B43legacy_PHYMODE(phytype) (1 << (phytype)) +#define B43legacy_PHYMODE_B B43legacy_PHYMODE \ + ((B43legacy_PHYTYPE_B)) +#define B43legacy_PHYMODE_G B43legacy_PHYMODE \ + ((B43legacy_PHYTYPE_G)) + +/* Value pair to measure the LocalOscillator. */ +struct b43legacy_lopair { + s8 low; + s8 high; + u8 used:1; +}; +#define B43legacy_LO_COUNT (14*4) + +struct b43legacy_phy { + /* Possible PHYMODEs on this PHY */ + u8 possible_phymodes; + /* GMODE bit enabled in MACCTL? */ + bool gmode; + /* Possible ieee80211 subsystem hwmodes for this PHY. + * Which mode is selected, depends on thr GMODE enabled bit */ +#define B43legacy_MAX_PHYHWMODES 2 + struct ieee80211_hw_mode hwmodes[B43legacy_MAX_PHYHWMODES]; + + /* Analog Type */ + u8 analog; + /* B43legacy_PHYTYPE_ */ + u8 type; + /* PHY revision number. */ + u8 rev; + + u16 antenna_diversity; + u16 savedpctlreg; + /* Radio versioning */ + u16 radio_manuf; /* Radio manufacturer */ + u16 radio_ver; /* Radio version */ + u8 calibrated:1; + u8 radio_rev; /* Radio revision */ + + bool locked; /* Only used in b43legacy_phy_{un}lock() */ + bool dyn_tssi_tbl; /* tssi2dbm is kmalloc()ed. */ + + /* ACI (adjacent channel interference) flags. */ + bool aci_enable; + bool aci_wlan_automatic; + bool aci_hw_rssi; + + /* Radio switched on/off */ + bool radio_on; + struct { + /* Values saved when turning the radio off. + * They are needed when turning it on again. */ + bool valid; + u16 rfover; + u16 rfoverval; + } radio_off_context; + + u16 minlowsig[2]; + u16 minlowsigpos[2]; + + /* LO Measurement Data. + * Use b43legacy_get_lopair() to get a value. + */ + struct b43legacy_lopair *_lo_pairs; + /* TSSI to dBm table in use */ + const s8 *tssi2dbm; + /* idle TSSI value */ + s8 idle_tssi; + /* Target idle TSSI */ + int tgt_idle_tssi; + /* Current idle TSSI */ + int cur_idle_tssi; + + /* LocalOscillator control values. */ + struct b43legacy_txpower_lo_control *lo_control; + /* Values from b43legacy_calc_loopback_gain() */ + s16 max_lb_gain; /* Maximum Loopback gain in hdB */ + s16 trsw_rx_gain; /* TRSW RX gain in hdB */ + s16 lna_lod_gain; /* LNA lod */ + s16 lna_gain; /* LNA */ + s16 pga_gain; /* PGA */ + + /* PHY lock for core.rev < 3 + * This lock is only used by b43legacy_phy_{un}lock() + */ + spinlock_t lock; + + /* Desired TX power level (in dBm). This is set by the user and + * adjusted in b43legacy_phy_xmitpower(). */ + u8 power_level; + + /* Values from b43legacy_calc_loopback_gain() */ + u16 loopback_gain[2]; + + /* TX Power control values. */ + /* B/G PHY */ + struct { + /* Current Radio Attenuation for TXpower recalculation. */ + u16 rfatt; + /* Current Baseband Attenuation for TXpower recalculation. */ + u16 bbatt; + /* Current TXpower control value for TXpower recalculation. */ + u16 txctl1; + u16 txctl2; + }; + /* A PHY */ + struct { + u16 txpwr_offset; + }; + +#ifdef CONFIG_B43LEGACY_DEBUG + bool manual_txpower_control; /* Manual TX-power control enabled? */ +#endif + /* Current Interference Mitigation mode */ + int interfmode; + /* Stack of saved values from the Interference Mitigation code. + * Each value in the stack is layed out as follows: + * bit 0-11: offset + * bit 12-15: register ID + * bit 16-32: value + * register ID is: 0x1 PHY, 0x2 Radio, 0x3 ILT + */ +#define B43legacy_INTERFSTACK_SIZE 26 + u32 interfstack[B43legacy_INTERFSTACK_SIZE]; + + /* Saved values from the NRSSI Slope calculation */ + s16 nrssi[2]; + s32 nrssislope; + /* In memory nrssi lookup table. */ + s8 nrssi_lt[64]; + + /* current channel */ + u8 channel; + + u16 lofcal; + + u16 initval; +}; + +/* Data structures for DMA transmission, per 80211 core. */ +struct b43legacy_dma { + struct b43legacy_dmaring *tx_ring0; + struct b43legacy_dmaring *tx_ring1; + struct b43legacy_dmaring *tx_ring2; + struct b43legacy_dmaring *tx_ring3; + struct b43legacy_dmaring *tx_ring4; + struct b43legacy_dmaring *tx_ring5; + + struct b43legacy_dmaring *rx_ring0; + struct b43legacy_dmaring *rx_ring3; /* only on core.rev < 5 */ +}; + +/* Data structures for PIO transmission, per 80211 core. */ +struct b43legacy_pio { + struct b43legacy_pioqueue *queue0; + struct b43legacy_pioqueue *queue1; + struct b43legacy_pioqueue *queue2; + struct b43legacy_pioqueue *queue3; +}; + +/* Context information for a noise calculation (Link Quality). */ +struct b43legacy_noise_calculation { + u8 channel_at_start; + bool calculation_running; + u8 nr_samples; + s8 samples[8][4]; +}; + +struct b43legacy_stats { + u8 link_noise; + /* Store the last TX/RX times here for updating the leds. */ + unsigned long last_tx; + unsigned long last_rx; +}; + +struct b43legacy_key { + void *keyconf; + bool enabled; + u8 algorithm; +}; + +struct b43legacy_wldev; + +/* Data structure for the WLAN parts (802.11 cores) of the b43legacy chip. */ +struct b43legacy_wl { + /* Pointer to the active wireless device on this chip */ + struct b43legacy_wldev *current_dev; + /* Pointer to the ieee80211 hardware data structure */ + struct ieee80211_hw *hw; + + spinlock_t irq_lock; /* locks IRQ */ + struct mutex mutex; /* locks wireless core state */ + spinlock_t leds_lock; /* lock for leds */ + + /* We can only have one operating interface (802.11 core) + * at a time. General information about this interface follows. + */ + + /* Opaque ID of the operating interface from the ieee80211 + * subsystem. Do not modify. + */ + int if_id; + /* MAC address (can be NULL). */ + u8 mac_addr[ETH_ALEN]; + /* Current BSSID (can be NULL). */ + u8 bssid[ETH_ALEN]; + /* Interface type. (IEEE80211_IF_TYPE_XXX) */ + int if_type; + /* Is the card operating in AP, STA or IBSS mode? */ + bool operating; + /* filter flags */ + unsigned int filter_flags; + /* Stats about the wireless interface */ + struct ieee80211_low_level_stats ieee_stats; + + struct hwrng rng; + u8 rng_initialized; + char rng_name[30 + 1]; + + /* List of all wireless devices on this chip */ + struct list_head devlist; + u8 nr_devs; +}; + +/* Pointers to the firmware data and meta information about it. */ +struct b43legacy_firmware { + /* Microcode */ + const struct firmware *ucode; + /* PCM code */ + const struct firmware *pcm; + /* Initial MMIO values for the firmware */ + const struct firmware *initvals; + /* Initial MMIO values for the firmware, band-specific */ + const struct firmware *initvals_band; + /* Firmware revision */ + u16 rev; + /* Firmware patchlevel */ + u16 patch; +}; + +/* Device (802.11 core) initialization status. */ +enum { + B43legacy_STAT_UNINIT = 0, /* Uninitialized. */ + B43legacy_STAT_INITIALIZED = 1, /* Initialized, not yet started. */ + B43legacy_STAT_STARTED = 2, /* Up and running. */ +}; +#define b43legacy_status(wldev) atomic_read(&(wldev)->__init_status) +#define b43legacy_set_status(wldev, stat) do { \ + atomic_set(&(wldev)->__init_status, (stat)); \ + smp_wmb(); \ + } while (0) + +/* *** --- HOW LOCKING WORKS IN B43legacy --- *** + * + * You should always acquire both, wl->mutex and wl->irq_lock unless: + * - You don't need to acquire wl->irq_lock, if the interface is stopped. + * - You don't need to acquire wl->mutex in the IRQ handler, IRQ tasklet + * and packet TX path (and _ONLY_ there.) + */ + +/* Data structure for one wireless device (802.11 core) */ +struct b43legacy_wldev { + struct ssb_device *dev; + struct b43legacy_wl *wl; + + /* The device initialization status. + * Use b43legacy_status() to query. */ + atomic_t __init_status; + /* Saved init status for handling suspend. */ + int suspend_init_status; + + bool __using_pio; /* Using pio rather than dma. */ + bool bad_frames_preempt;/* Use "Bad Frames Preemption". */ + bool reg124_set_0x4; /* Variable to keep track of IRQ. */ + bool short_preamble; /* TRUE if using short preamble. */ + bool short_slot; /* TRUE if using short slot timing. */ + bool radio_hw_enable; /* State of radio hardware enable bit. */ + + /* PHY/Radio device. */ + struct b43legacy_phy phy; + union { + /* DMA engines. */ + struct b43legacy_dma dma; + /* PIO engines. */ + struct b43legacy_pio pio; + }; + + /* Various statistics about the physical device. */ + struct b43legacy_stats stats; + +#define B43legacy_NR_LEDS 4 + struct b43legacy_led leds[B43legacy_NR_LEDS]; + + /* Reason code of the last interrupt. */ + u32 irq_reason; + u32 dma_reason[6]; + /* saved irq enable/disable state bitfield. */ + u32 irq_savedstate; + /* Link Quality calculation context. */ + struct b43legacy_noise_calculation noisecalc; + /* if > 0 MAC is suspended. if == 0 MAC is enabled. */ + int mac_suspended; + + /* Interrupt Service Routine tasklet (bottom-half) */ + struct tasklet_struct isr_tasklet; + + /* Periodic tasks */ + struct delayed_work periodic_work; + unsigned int periodic_state; + + struct work_struct restart_work; + + /* encryption/decryption */ + u16 ktp; /* Key table pointer */ + u8 max_nr_keys; + struct b43legacy_key key[58]; + + /* Cached beacon template while uploading the template. */ + struct sk_buff *cached_beacon; + + /* Firmware data */ + struct b43legacy_firmware fw; + + /* Devicelist in struct b43legacy_wl (all 802.11 cores) */ + struct list_head list; + + /* Debugging stuff follows. */ +#ifdef CONFIG_B43LEGACY_DEBUG + struct b43legacy_dfsentry *dfsentry; +#endif +}; + + +static inline +struct b43legacy_wl *hw_to_b43legacy_wl(struct ieee80211_hw *hw) +{ + return hw->priv; +} + +/* Helper function, which returns a boolean. + * TRUE, if PIO is used; FALSE, if DMA is used. + */ +#if defined(CONFIG_B43LEGACY_DMA) && defined(CONFIG_B43LEGACY_PIO) +static inline +int b43legacy_using_pio(struct b43legacy_wldev *dev) +{ + return dev->__using_pio; +} +#elif defined(CONFIG_B43LEGACY_DMA) +static inline +int b43legacy_using_pio(struct b43legacy_wldev *dev) +{ + return 0; +} +#elif defined(CONFIG_B43LEGACY_PIO) +static inline +int b43legacy_using_pio(struct b43legacy_wldev *dev) +{ + return 1; +} +#else +# error "Using neither DMA nor PIO? Confused..." +#endif + + +static inline +struct b43legacy_wldev *dev_to_b43legacy_wldev(struct device *dev) +{ + struct ssb_device *ssb_dev = dev_to_ssb_dev(dev); + return ssb_get_drvdata(ssb_dev); +} + +/* Is the device operating in a specified mode (IEEE80211_IF_TYPE_XXX). */ +static inline +int b43legacy_is_mode(struct b43legacy_wl *wl, int type) +{ + return (wl->operating && + wl->if_type == type); +} + +static inline +bool is_bcm_board_vendor(struct b43legacy_wldev *dev) +{ + return (dev->dev->bus->boardinfo.vendor == PCI_VENDOR_ID_BROADCOM); +} + +static inline +u16 b43legacy_read16(struct b43legacy_wldev *dev, u16 offset) +{ + return ssb_read16(dev->dev, offset); +} + +static inline +void b43legacy_write16(struct b43legacy_wldev *dev, u16 offset, u16 value) +{ + ssb_write16(dev->dev, offset, value); +} + +static inline +u32 b43legacy_read32(struct b43legacy_wldev *dev, u16 offset) +{ + return ssb_read32(dev->dev, offset); +} + +static inline +void b43legacy_write32(struct b43legacy_wldev *dev, u16 offset, u32 value) +{ + ssb_write32(dev->dev, offset, value); +} + +static inline +struct b43legacy_lopair *b43legacy_get_lopair(struct b43legacy_phy *phy, + u16 radio_attenuation, + u16 baseband_attenuation) +{ + return phy->_lo_pairs + (radio_attenuation + + 14 * (baseband_attenuation / 2)); +} + + + +/* Message printing */ +void b43legacyinfo(struct b43legacy_wl *wl, const char *fmt, ...) + __attribute__((format(printf, 2, 3))); +void b43legacyerr(struct b43legacy_wl *wl, const char *fmt, ...) + __attribute__((format(printf, 2, 3))); +void b43legacywarn(struct b43legacy_wl *wl, const char *fmt, ...) + __attribute__((format(printf, 2, 3))); +#if B43legacy_DEBUG +void b43legacydbg(struct b43legacy_wl *wl, const char *fmt, ...) + __attribute__((format(printf, 2, 3))); +#else /* DEBUG */ +# define b43legacydbg(wl, fmt...) do { /* nothing */ } while (0) +#endif /* DEBUG */ + + +/** Limit a value between two limits */ +#ifdef limit_value +# undef limit_value +#endif +#define limit_value(value, min, max) \ + ({ \ + typeof(value) __value = (value); \ + typeof(value) __min = (min); \ + typeof(value) __max = (max); \ + if (__value < __min) \ + __value = __min; \ + else if (__value > __max) \ + __value = __max; \ + __value; \ + }) + +/* Macros for printing a value in Q5.2 format */ +#define Q52_FMT "%u.%u" +#define Q52_ARG(q52) ((q52) / 4), (((q52) & 3) * 100 / 4) + +#endif /* B43legacy_H_ */ diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c new file mode 100644 index 000000000000..eefa6fb79685 --- /dev/null +++ b/drivers/net/wireless/b43legacy/debugfs.c @@ -0,0 +1,505 @@ +/* + + Broadcom B43legacy wireless driver + + debugfs driver debugging code + + Copyright (c) 2005-2007 Michael Buesch <mb@bu3sch.de> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include <linux/fs.h> +#include <linux/debugfs.h> +#include <linux/slab.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/mutex.h> + +#include "b43legacy.h" +#include "main.h" +#include "debugfs.h" +#include "dma.h" +#include "pio.h" +#include "xmit.h" + + +/* The root directory. */ +static struct dentry *rootdir; + +struct b43legacy_debugfs_fops { + ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize); + int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count); + struct file_operations fops; + /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */ + size_t file_struct_offset; + /* Take wl->irq_lock before calling read/write? */ + bool take_irqlock; +}; + +static inline +struct b43legacy_dfs_file * fops_to_dfs_file(struct b43legacy_wldev *dev, + const struct b43legacy_debugfs_fops *dfops) +{ + void *p; + + p = dev->dfsentry; + p += dfops->file_struct_offset; + + return p; +} + + +#define fappend(fmt, x...) \ + do { \ + if (bufsize - count) \ + count += snprintf(buf + count, \ + bufsize - count, \ + fmt , ##x); \ + else \ + printk(KERN_ERR "b43legacy: fappend overflow\n"); \ + } while (0) + + +/* wl->irq_lock is locked */ +static ssize_t tsf_read_file(struct b43legacy_wldev *dev, char *buf, size_t bufsize) +{ + ssize_t count = 0; + u64 tsf; + + b43legacy_tsf_read(dev, &tsf); + fappend("0x%08x%08x\n", + (unsigned int)((tsf & 0xFFFFFFFF00000000ULL) >> 32), + (unsigned int)(tsf & 0xFFFFFFFFULL)); + + return count; +} + +/* wl->irq_lock is locked */ +static int tsf_write_file(struct b43legacy_wldev *dev, const char *buf, size_t count) +{ + u64 tsf; + + if (sscanf(buf, "%llu", (unsigned long long *)(&tsf)) != 1) + return -EINVAL; + b43legacy_tsf_write(dev, tsf); + + return 0; +} + +/* wl->irq_lock is locked */ +static ssize_t ucode_regs_read_file(struct b43legacy_wldev *dev, char *buf, size_t bufsize) +{ + ssize_t count = 0; + int i; + + for (i = 0; i < 64; i++) { + fappend("r%d = 0x%04x\n", i, + b43legacy_shm_read16(dev, B43legacy_SHM_WIRELESS, i)); + } + + return count; +} + +/* wl->irq_lock is locked */ +static ssize_t shm_read_file(struct b43legacy_wldev *dev, char *buf, size_t bufsize) +{ + ssize_t count = 0; + int i; + u16 tmp; + __le16 *le16buf = (__le16 *)buf; + + for (i = 0; i < 0x1000; i++) { + if (bufsize <= 0) + break; + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 2 * i); + le16buf[i] = cpu_to_le16(tmp); + count += sizeof(tmp); + bufsize -= sizeof(tmp); + } + + return count; +} + +static ssize_t txstat_read_file(struct b43legacy_wldev *dev, char *buf, size_t bufsize) +{ + struct b43legacy_txstatus_log *log = &dev->dfsentry->txstatlog; + ssize_t count = 0; + unsigned long flags; + int i, idx; + struct b43legacy_txstatus *stat; + + spin_lock_irqsave(&log->lock, flags); + if (log->end < 0) { + fappend("Nothing transmitted, yet\n"); + goto out_unlock; + } + fappend("b43legacy TX status reports:\n\n" + "index | cookie | seq | phy_stat | frame_count | " + "rts_count | supp_reason | pm_indicated | " + "intermediate | for_ampdu | acked\n" "---\n"); + i = log->end + 1; + idx = 0; + while (1) { + if (i == B43legacy_NR_LOGGED_TXSTATUS) + i = 0; + stat = &(log->log[i]); + if (stat->cookie) { + fappend("%03d | " + "0x%04X | 0x%04X | 0x%02X | " + "0x%X | 0x%X | " + "%u | %u | " + "%u | %u | %u\n", + idx, + stat->cookie, stat->seq, stat->phy_stat, + stat->frame_count, stat->rts_count, + stat->supp_reason, stat->pm_indicated, + stat->intermediate, stat->for_ampdu, + stat->acked); + idx++; + } + if (i == log->end) + break; + i++; + } +out_unlock: + spin_unlock_irqrestore(&log->lock, flags); + + return count; +} + +/* wl->irq_lock is locked */ +static int restart_write_file(struct b43legacy_wldev *dev, const char *buf, size_t count) +{ + int err = 0; + + if (count > 0 && buf[0] == '1') { + b43legacy_controller_restart(dev, "manually restarted"); + } else + err = -EINVAL; + + return err; +} + +#undef fappend + +static int b43legacy_debugfs_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t b43legacy_debugfs_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct b43legacy_wldev *dev; + struct b43legacy_debugfs_fops *dfops; + struct b43legacy_dfs_file *dfile; + ssize_t ret = 0; + char *buf; + const size_t bufsize = 1024 * 128; + const size_t buforder = get_order(bufsize); + int err = 0; + + if (!count) + return 0; + dev = file->private_data; + if (!dev) + return -ENODEV; + + mutex_lock(&dev->wl->mutex); + if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED) { + err = -ENODEV; + goto out_unlock; + } + + dfops = container_of(file->f_op, struct b43legacy_debugfs_fops, fops); + if (!dfops->read) { + err = -ENOSYS; + goto out_unlock; + } + dfile = fops_to_dfs_file(dev, dfops); + + if (!dfile->buffer) { + buf = (char *)__get_free_pages(GFP_KERNEL, buforder); + if (!buf) { + err = -ENOMEM; + goto out_unlock; + } + memset(buf, 0, bufsize); + if (dfops->take_irqlock) { + spin_lock_irq(&dev->wl->irq_lock); + ret = dfops->read(dev, buf, bufsize); + spin_unlock_irq(&dev->wl->irq_lock); + } else + ret = dfops->read(dev, buf, bufsize); + if (ret <= 0) { + free_pages((unsigned long)buf, buforder); + err = ret; + goto out_unlock; + } + dfile->data_len = ret; + dfile->buffer = buf; + } + + ret = simple_read_from_buffer(userbuf, count, ppos, + dfile->buffer, + dfile->data_len); + if (*ppos >= dfile->data_len) { + free_pages((unsigned long)dfile->buffer, buforder); + dfile->buffer = NULL; + dfile->data_len = 0; + } +out_unlock: + mutex_unlock(&dev->wl->mutex); + + return err ? err : ret; +} + +static ssize_t b43legacy_debugfs_write(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct b43legacy_wldev *dev; + struct b43legacy_debugfs_fops *dfops; + char *buf; + int err = 0; + + if (!count) + return 0; + if (count > PAGE_SIZE) + return -E2BIG; + dev = file->private_data; + if (!dev) + return -ENODEV; + + mutex_lock(&dev->wl->mutex); + if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED) { + err = -ENODEV; + goto out_unlock; + } + + dfops = container_of(file->f_op, struct b43legacy_debugfs_fops, fops); + if (!dfops->write) { + err = -ENOSYS; + goto out_unlock; + } + + buf = (char *)get_zeroed_page(GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + goto out_unlock; + } + if (copy_from_user(buf, userbuf, count)) { + err = -EFAULT; + goto out_freepage; + } + if (dfops->take_irqlock) { + spin_lock_irq(&dev->wl->irq_lock); + err = dfops->write(dev, buf, count); + spin_unlock_irq(&dev->wl->irq_lock); + } else + err = dfops->write(dev, buf, count); + if (err) + goto out_freepage; + +out_freepage: + free_page((unsigned long)buf); +out_unlock: + mutex_unlock(&dev->wl->mutex); + + return err ? err : count; +} + + +#define B43legacy_DEBUGFS_FOPS(name, _read, _write, _take_irqlock) \ + static struct b43legacy_debugfs_fops fops_##name = { \ + .read = _read, \ + .write = _write, \ + .fops = { \ + .open = b43legacy_debugfs_open, \ + .read = b43legacy_debugfs_read, \ + .write = b43legacy_debugfs_write, \ + }, \ + .file_struct_offset = offsetof(struct b43legacy_dfsentry, \ + file_##name), \ + .take_irqlock = _take_irqlock, \ + } + +B43legacy_DEBUGFS_FOPS(tsf, tsf_read_file, tsf_write_file, 1); +B43legacy_DEBUGFS_FOPS(ucode_regs, ucode_regs_read_file, NULL, 1); +B43legacy_DEBUGFS_FOPS(shm, shm_read_file, NULL, 1); +B43legacy_DEBUGFS_FOPS(txstat, txstat_read_file, NULL, 0); +B43legacy_DEBUGFS_FOPS(restart, NULL, restart_write_file, 1); + + +int b43legacy_debug(struct b43legacy_wldev *dev, enum b43legacy_dyndbg feature) +{ + return !!(dev->dfsentry && dev->dfsentry->dyn_debug[feature]); +} + +static void b43legacy_remove_dynamic_debug(struct b43legacy_wldev *dev) +{ + struct b43legacy_dfsentry *e = dev->dfsentry; + int i; + + for (i = 0; i < __B43legacy_NR_DYNDBG; i++) + debugfs_remove(e->dyn_debug_dentries[i]); +} + +static void b43legacy_add_dynamic_debug(struct b43legacy_wldev *dev) +{ + struct b43legacy_dfsentry *e = dev->dfsentry; + struct dentry *d; + +#define add_dyn_dbg(name, id, initstate) do { \ + e->dyn_debug[id] = (initstate); \ + d = debugfs_create_bool(name, 0600, e->subdir, \ + &(e->dyn_debug[id])); \ + if (!IS_ERR(d)) \ + e->dyn_debug_dentries[id] = d; \ + } while (0) + + add_dyn_dbg("debug_xmitpower", B43legacy_DBG_XMITPOWER, 0); + add_dyn_dbg("debug_dmaoverflow", B43legacy_DBG_DMAOVERFLOW, 0); + add_dyn_dbg("debug_dmaverbose", B43legacy_DBG_DMAVERBOSE, 0); + add_dyn_dbg("debug_pwork_fast", B43legacy_DBG_PWORK_FAST, 0); + add_dyn_dbg("debug_pwork_stop", B43legacy_DBG_PWORK_STOP, 0); + +#undef add_dyn_dbg +} + +void b43legacy_debugfs_add_device(struct b43legacy_wldev *dev) +{ + struct b43legacy_dfsentry *e; + struct b43legacy_txstatus_log *log; + char devdir[16]; + + B43legacy_WARN_ON(!dev); + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) { + b43legacyerr(dev->wl, "debugfs: add device OOM\n"); + return; + } + e->dev = dev; + log = &e->txstatlog; + log->log = kcalloc(B43legacy_NR_LOGGED_TXSTATUS, + sizeof(struct b43legacy_txstatus), GFP_KERNEL); + if (!log->log) { + b43legacyerr(dev->wl, "debugfs: add device txstatus OOM\n"); + kfree(e); + return; + } + log->end = -1; + spin_lock_init(&log->lock); + + dev->dfsentry = e; + + snprintf(devdir, sizeof(devdir), "%s", wiphy_name(dev->wl->hw->wiphy)); + e->subdir = debugfs_create_dir(devdir, rootdir); + if (!e->subdir || IS_ERR(e->subdir)) { + if (e->subdir == ERR_PTR(-ENODEV)) { + b43legacydbg(dev->wl, "DebugFS (CONFIG_DEBUG_FS) not " + "enabled in kernel config\n"); + } else { + b43legacyerr(dev->wl, "debugfs: cannot create %s directory\n", + devdir); + } + dev->dfsentry = NULL; + kfree(log->log); + kfree(e); + return; + } + +#define ADD_FILE(name, mode) \ + do { \ + struct dentry *d; \ + d = debugfs_create_file(__stringify(name), \ + mode, e->subdir, dev, \ + &fops_##name.fops); \ + e->file_##name.dentry = NULL; \ + if (!IS_ERR(d)) \ + e->file_##name.dentry = d; \ + } while (0) + + + ADD_FILE(tsf, 0600); + ADD_FILE(ucode_regs, 0400); + ADD_FILE(shm, 0400); + ADD_FILE(txstat, 0400); + ADD_FILE(restart, 0200); + +#undef ADD_FILE + + b43legacy_add_dynamic_debug(dev); +} + +void b43legacy_debugfs_remove_device(struct b43legacy_wldev *dev) +{ + struct b43legacy_dfsentry *e; + + if (!dev) + return; + e = dev->dfsentry; + if (!e) + return; + b43legacy_remove_dynamic_debug(dev); + + debugfs_remove(e->file_tsf.dentry); + debugfs_remove(e->file_ucode_regs.dentry); + debugfs_remove(e->file_shm.dentry); + debugfs_remove(e->file_txstat.dentry); + debugfs_remove(e->file_restart.dentry); + + debugfs_remove(e->subdir); + kfree(e->txstatlog.log); + kfree(e); +} + +void b43legacy_debugfs_log_txstat(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status) +{ + struct b43legacy_dfsentry *e = dev->dfsentry; + struct b43legacy_txstatus_log *log; + struct b43legacy_txstatus *cur; + int i; + + if (!e) + return; + log = &e->txstatlog; + B43legacy_WARN_ON(!irqs_disabled()); + spin_lock(&log->lock); + i = log->end + 1; + if (i == B43legacy_NR_LOGGED_TXSTATUS) + i = 0; + log->end = i; + cur = &(log->log[i]); + memcpy(cur, status, sizeof(*cur)); + spin_unlock(&log->lock); +} + +void b43legacy_debugfs_init(void) +{ + rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL); + if (IS_ERR(rootdir)) + rootdir = NULL; +} + +void b43legacy_debugfs_exit(void) +{ + debugfs_remove(rootdir); +} diff --git a/drivers/net/wireless/b43legacy/debugfs.h b/drivers/net/wireless/b43legacy/debugfs.h new file mode 100644 index 000000000000..ae3b0d0fa849 --- /dev/null +++ b/drivers/net/wireless/b43legacy/debugfs.h @@ -0,0 +1,89 @@ +#ifndef B43legacy_DEBUGFS_H_ +#define B43legacy_DEBUGFS_H_ + +struct b43legacy_wldev; +struct b43legacy_txstatus; + +enum b43legacy_dyndbg { /* Dynamic debugging features */ + B43legacy_DBG_XMITPOWER, + B43legacy_DBG_DMAOVERFLOW, + B43legacy_DBG_DMAVERBOSE, + B43legacy_DBG_PWORK_FAST, + B43legacy_DBG_PWORK_STOP, + __B43legacy_NR_DYNDBG, +}; + + +#ifdef CONFIG_B43LEGACY_DEBUG + +struct dentry; + +#define B43legacy_NR_LOGGED_TXSTATUS 100 + +struct b43legacy_txstatus_log { + struct b43legacy_txstatus *log; + int end; + spinlock_t lock; /* lock for debugging */ +}; + +struct b43legacy_dfs_file { + struct dentry *dentry; + char *buffer; + size_t data_len; +}; + +struct b43legacy_dfsentry { + struct b43legacy_wldev *dev; + struct dentry *subdir; + + struct b43legacy_dfs_file file_tsf; + struct b43legacy_dfs_file file_ucode_regs; + struct b43legacy_dfs_file file_shm; + struct b43legacy_dfs_file file_txstat; + struct b43legacy_dfs_file file_txpower_g; + struct b43legacy_dfs_file file_restart; + struct b43legacy_dfs_file file_loctls; + + struct b43legacy_txstatus_log txstatlog; + + /* Enabled/Disabled list for the dynamic debugging features. */ + u32 dyn_debug[__B43legacy_NR_DYNDBG]; + /* Dentries for the dynamic debugging entries. */ + struct dentry *dyn_debug_dentries[__B43legacy_NR_DYNDBG]; +}; + +int b43legacy_debug(struct b43legacy_wldev *dev, + enum b43legacy_dyndbg feature); + +void b43legacy_debugfs_init(void); +void b43legacy_debugfs_exit(void); +void b43legacy_debugfs_add_device(struct b43legacy_wldev *dev); +void b43legacy_debugfs_remove_device(struct b43legacy_wldev *dev); +void b43legacy_debugfs_log_txstat(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status); + +#else /* CONFIG_B43LEGACY_DEBUG*/ + +static inline +int b43legacy_debug(struct b43legacy_wldev *dev, + enum b43legacy_dyndbg feature) +{ + return 0; +} + +static inline +void b43legacy_debugfs_init(void) { } +static inline +void b43legacy_debugfs_exit(void) { } +static inline +void b43legacy_debugfs_add_device(struct b43legacy_wldev *dev) { } +static inline +void b43legacy_debugfs_remove_device(struct b43legacy_wldev *dev) { } +static inline +void b43legacy_debugfs_log_txstat(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status) + { } + +#endif /* CONFIG_B43LEGACY_DEBUG*/ + +#endif /* B43legacy_DEBUGFS_H_ */ diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c new file mode 100644 index 000000000000..8cb3dc4c4745 --- /dev/null +++ b/drivers/net/wireless/b43legacy/dma.c @@ -0,0 +1,1565 @@ +/* + + Broadcom B43legacy wireless driver + + DMA ringbuffer and descriptor allocation/management + + Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de> + + Some code in this file is derived from the b44.c driver + Copyright (C) 2002 David S. Miller + Copyright (C) Pekka Pietikainen + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43legacy.h" +#include "dma.h" +#include "main.h" +#include "debugfs.h" +#include "xmit.h" + +#include <linux/dma-mapping.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/skbuff.h> +#include <net/dst.h> + +/* 32bit DMA ops. */ +static +struct b43legacy_dmadesc_generic *op32_idx2desc( + struct b43legacy_dmaring *ring, + int slot, + struct b43legacy_dmadesc_meta **meta) +{ + struct b43legacy_dmadesc32 *desc; + + *meta = &(ring->meta[slot]); + desc = ring->descbase; + desc = &(desc[slot]); + + return (struct b43legacy_dmadesc_generic *)desc; +} + +static void op32_fill_descriptor(struct b43legacy_dmaring *ring, + struct b43legacy_dmadesc_generic *desc, + dma_addr_t dmaaddr, u16 bufsize, + int start, int end, int irq) +{ + struct b43legacy_dmadesc32 *descbase = ring->descbase; + int slot; + u32 ctl; + u32 addr; + u32 addrext; + + slot = (int)(&(desc->dma32) - descbase); + B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); + + addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK); + addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + addr |= ssb_dma_translation(ring->dev->dev); + ctl = (bufsize - ring->frameoffset) + & B43legacy_DMA32_DCTL_BYTECNT; + if (slot == ring->nr_slots - 1) + ctl |= B43legacy_DMA32_DCTL_DTABLEEND; + if (start) + ctl |= B43legacy_DMA32_DCTL_FRAMESTART; + if (end) + ctl |= B43legacy_DMA32_DCTL_FRAMEEND; + if (irq) + ctl |= B43legacy_DMA32_DCTL_IRQ; + ctl |= (addrext << B43legacy_DMA32_DCTL_ADDREXT_SHIFT) + & B43legacy_DMA32_DCTL_ADDREXT_MASK; + + desc->dma32.control = cpu_to_le32(ctl); + desc->dma32.address = cpu_to_le32(addr); +} + +static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot) +{ + b43legacy_dma_write(ring, B43legacy_DMA32_TXINDEX, + (u32)(slot * sizeof(struct b43legacy_dmadesc32))); +} + +static void op32_tx_suspend(struct b43legacy_dmaring *ring) +{ + b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, + b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL) + | B43legacy_DMA32_TXSUSPEND); +} + +static void op32_tx_resume(struct b43legacy_dmaring *ring) +{ + b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, + b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL) + & ~B43legacy_DMA32_TXSUSPEND); +} + +static int op32_get_current_rxslot(struct b43legacy_dmaring *ring) +{ + u32 val; + + val = b43legacy_dma_read(ring, B43legacy_DMA32_RXSTATUS); + val &= B43legacy_DMA32_RXDPTR; + + return (val / sizeof(struct b43legacy_dmadesc32)); +} + +static void op32_set_current_rxslot(struct b43legacy_dmaring *ring, + int slot) +{ + b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, + (u32)(slot * sizeof(struct b43legacy_dmadesc32))); +} + +static const struct b43legacy_dma_ops dma32_ops = { + .idx2desc = op32_idx2desc, + .fill_descriptor = op32_fill_descriptor, + .poke_tx = op32_poke_tx, + .tx_suspend = op32_tx_suspend, + .tx_resume = op32_tx_resume, + .get_current_rxslot = op32_get_current_rxslot, + .set_current_rxslot = op32_set_current_rxslot, +}; + +/* 64bit DMA ops. */ +static +struct b43legacy_dmadesc_generic *op64_idx2desc( + struct b43legacy_dmaring *ring, + int slot, + struct b43legacy_dmadesc_meta + **meta) +{ + struct b43legacy_dmadesc64 *desc; + + *meta = &(ring->meta[slot]); + desc = ring->descbase; + desc = &(desc[slot]); + + return (struct b43legacy_dmadesc_generic *)desc; +} + +static void op64_fill_descriptor(struct b43legacy_dmaring *ring, + struct b43legacy_dmadesc_generic *desc, + dma_addr_t dmaaddr, u16 bufsize, + int start, int end, int irq) +{ + struct b43legacy_dmadesc64 *descbase = ring->descbase; + int slot; + u32 ctl0 = 0; + u32 ctl1 = 0; + u32 addrlo; + u32 addrhi; + u32 addrext; + + slot = (int)(&(desc->dma64) - descbase); + B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); + + addrlo = (u32)(dmaaddr & 0xFFFFFFFF); + addrhi = (((u64)dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK); + addrext = (((u64)dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + addrhi |= ssb_dma_translation(ring->dev->dev); + if (slot == ring->nr_slots - 1) + ctl0 |= B43legacy_DMA64_DCTL0_DTABLEEND; + if (start) + ctl0 |= B43legacy_DMA64_DCTL0_FRAMESTART; + if (end) + ctl0 |= B43legacy_DMA64_DCTL0_FRAMEEND; + if (irq) + ctl0 |= B43legacy_DMA64_DCTL0_IRQ; + ctl1 |= (bufsize - ring->frameoffset) + & B43legacy_DMA64_DCTL1_BYTECNT; + ctl1 |= (addrext << B43legacy_DMA64_DCTL1_ADDREXT_SHIFT) + & B43legacy_DMA64_DCTL1_ADDREXT_MASK; + + desc->dma64.control0 = cpu_to_le32(ctl0); + desc->dma64.control1 = cpu_to_le32(ctl1); + desc->dma64.address_low = cpu_to_le32(addrlo); + desc->dma64.address_high = cpu_to_le32(addrhi); +} + +static void op64_poke_tx(struct b43legacy_dmaring *ring, int slot) +{ + b43legacy_dma_write(ring, B43legacy_DMA64_TXINDEX, + (u32)(slot * sizeof(struct b43legacy_dmadesc64))); +} + +static void op64_tx_suspend(struct b43legacy_dmaring *ring) +{ + b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL, + b43legacy_dma_read(ring, B43legacy_DMA64_TXCTL) + | B43legacy_DMA64_TXSUSPEND); +} + +static void op64_tx_resume(struct b43legacy_dmaring *ring) +{ + b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL, + b43legacy_dma_read(ring, B43legacy_DMA64_TXCTL) + & ~B43legacy_DMA64_TXSUSPEND); +} + +static int op64_get_current_rxslot(struct b43legacy_dmaring *ring) +{ + u32 val; + + val = b43legacy_dma_read(ring, B43legacy_DMA64_RXSTATUS); + val &= B43legacy_DMA64_RXSTATDPTR; + + return (val / sizeof(struct b43legacy_dmadesc64)); +} + +static void op64_set_current_rxslot(struct b43legacy_dmaring *ring, + int slot) +{ + b43legacy_dma_write(ring, B43legacy_DMA64_RXINDEX, + (u32)(slot * sizeof(struct b43legacy_dmadesc64))); +} + +static const struct b43legacy_dma_ops dma64_ops = { + .idx2desc = op64_idx2desc, + .fill_descriptor = op64_fill_descriptor, + .poke_tx = op64_poke_tx, + .tx_suspend = op64_tx_suspend, + .tx_resume = op64_tx_resume, + .get_current_rxslot = op64_get_current_rxslot, + .set_current_rxslot = op64_set_current_rxslot, +}; + + +static inline int free_slots(struct b43legacy_dmaring *ring) +{ + return (ring->nr_slots - ring->used_slots); +} + +static inline int next_slot(struct b43legacy_dmaring *ring, int slot) +{ + B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1)); + if (slot == ring->nr_slots - 1) + return 0; + return slot + 1; +} + +static inline int prev_slot(struct b43legacy_dmaring *ring, int slot) +{ + B43legacy_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1)); + if (slot == 0) + return ring->nr_slots - 1; + return slot - 1; +} + +#ifdef CONFIG_B43LEGACY_DEBUG +static void update_max_used_slots(struct b43legacy_dmaring *ring, + int current_used_slots) +{ + if (current_used_slots <= ring->max_used_slots) + return; + ring->max_used_slots = current_used_slots; + if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE)) + b43legacydbg(ring->dev->wl, + "max_used_slots increased to %d on %s ring %d\n", + ring->max_used_slots, + ring->tx ? "TX" : "RX", + ring->index); +} +#else +static inline +void update_max_used_slots(struct b43legacy_dmaring *ring, + int current_used_slots) +{ } +#endif /* DEBUG */ + +/* Request a slot for usage. */ +static inline +int request_slot(struct b43legacy_dmaring *ring) +{ + int slot; + + B43legacy_WARN_ON(!ring->tx); + B43legacy_WARN_ON(ring->stopped); + B43legacy_WARN_ON(free_slots(ring) == 0); + + slot = next_slot(ring, ring->current_slot); + ring->current_slot = slot; + ring->used_slots++; + + update_max_used_slots(ring, ring->used_slots); + + return slot; +} + +/* Mac80211-queue to b43legacy-ring mapping */ +static struct b43legacy_dmaring *priority_to_txring( + struct b43legacy_wldev *dev, + int queue_priority) +{ + struct b43legacy_dmaring *ring; + +/*FIXME: For now we always run on TX-ring-1 */ +return dev->dma.tx_ring1; + + /* 0 = highest priority */ + switch (queue_priority) { + default: + B43legacy_WARN_ON(1); + /* fallthrough */ + case 0: + ring = dev->dma.tx_ring3; + break; + case 1: + ring = dev->dma.tx_ring2; + break; + case 2: + ring = dev->dma.tx_ring1; + break; + case 3: + ring = dev->dma.tx_ring0; + break; + case 4: + ring = dev->dma.tx_ring4; + break; + case 5: + ring = dev->dma.tx_ring5; + break; + } + + return ring; +} + +/* Bcm4301-ring to mac80211-queue mapping */ +static inline int txring_to_priority(struct b43legacy_dmaring *ring) +{ + static const u8 idx_to_prio[] = + { 3, 2, 1, 0, 4, 5, }; + +/*FIXME: have only one queue, for now */ +return 0; + + return idx_to_prio[ring->index]; +} + + +u16 b43legacy_dmacontroller_base(int dma64bit, int controller_idx) +{ + static const u16 map64[] = { + B43legacy_MMIO_DMA64_BASE0, + B43legacy_MMIO_DMA64_BASE1, + B43legacy_MMIO_DMA64_BASE2, + B43legacy_MMIO_DMA64_BASE3, + B43legacy_MMIO_DMA64_BASE4, + B43legacy_MMIO_DMA64_BASE5, + }; + static const u16 map32[] = { + B43legacy_MMIO_DMA32_BASE0, + B43legacy_MMIO_DMA32_BASE1, + B43legacy_MMIO_DMA32_BASE2, + B43legacy_MMIO_DMA32_BASE3, + B43legacy_MMIO_DMA32_BASE4, + B43legacy_MMIO_DMA32_BASE5, + }; + + if (dma64bit) { + B43legacy_WARN_ON(!(controller_idx >= 0 && + controller_idx < ARRAY_SIZE(map64))); + return map64[controller_idx]; + } + B43legacy_WARN_ON(!(controller_idx >= 0 && + controller_idx < ARRAY_SIZE(map32))); + return map32[controller_idx]; +} + +static inline +dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring, + unsigned char *buf, + size_t len, + int tx) +{ + dma_addr_t dmaaddr; + + if (tx) + dmaaddr = dma_map_single(ring->dev->dev->dev, + buf, len, + DMA_TO_DEVICE); + else + dmaaddr = dma_map_single(ring->dev->dev->dev, + buf, len, + DMA_FROM_DEVICE); + + return dmaaddr; +} + +static inline +void unmap_descbuffer(struct b43legacy_dmaring *ring, + dma_addr_t addr, + size_t len, + int tx) +{ + if (tx) + dma_unmap_single(ring->dev->dev->dev, + addr, len, + DMA_TO_DEVICE); + else + dma_unmap_single(ring->dev->dev->dev, + addr, len, + DMA_FROM_DEVICE); +} + +static inline +void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring, + dma_addr_t addr, + size_t len) +{ + B43legacy_WARN_ON(ring->tx); + + dma_sync_single_for_cpu(ring->dev->dev->dev, + addr, len, DMA_FROM_DEVICE); +} + +static inline +void sync_descbuffer_for_device(struct b43legacy_dmaring *ring, + dma_addr_t addr, + size_t len) +{ + B43legacy_WARN_ON(ring->tx); + + dma_sync_single_for_device(ring->dev->dev->dev, + addr, len, DMA_FROM_DEVICE); +} + +static inline +void free_descriptor_buffer(struct b43legacy_dmaring *ring, + struct b43legacy_dmadesc_meta *meta, + int irq_context) +{ + if (meta->skb) { + if (irq_context) + dev_kfree_skb_irq(meta->skb); + else + dev_kfree_skb(meta->skb); + meta->skb = NULL; + } +} + +static int alloc_ringmemory(struct b43legacy_dmaring *ring) +{ + struct device *dev = ring->dev->dev->dev; + + ring->descbase = dma_alloc_coherent(dev, B43legacy_DMA_RINGMEMSIZE, + &(ring->dmabase), GFP_KERNEL); + if (!ring->descbase) { + b43legacyerr(ring->dev->wl, "DMA ringmemory allocation" + " failed\n"); + return -ENOMEM; + } + memset(ring->descbase, 0, B43legacy_DMA_RINGMEMSIZE); + + return 0; +} + +static void free_ringmemory(struct b43legacy_dmaring *ring) +{ + struct device *dev = ring->dev->dev->dev; + + dma_free_coherent(dev, B43legacy_DMA_RINGMEMSIZE, + ring->descbase, ring->dmabase); +} + +/* Reset the RX DMA channel */ +int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev, + u16 mmio_base, int dma64) +{ + int i; + u32 value; + u16 offset; + + might_sleep(); + + offset = dma64 ? B43legacy_DMA64_RXCTL : B43legacy_DMA32_RXCTL; + b43legacy_write32(dev, mmio_base + offset, 0); + for (i = 0; i < 10; i++) { + offset = dma64 ? B43legacy_DMA64_RXSTATUS : + B43legacy_DMA32_RXSTATUS; + value = b43legacy_read32(dev, mmio_base + offset); + if (dma64) { + value &= B43legacy_DMA64_RXSTAT; + if (value == B43legacy_DMA64_RXSTAT_DISABLED) { + i = -1; + break; + } + } else { + value &= B43legacy_DMA32_RXSTATE; + if (value == B43legacy_DMA32_RXSTAT_DISABLED) { + i = -1; + break; + } + } + msleep(1); + } + if (i != -1) { + b43legacyerr(dev->wl, "DMA RX reset timed out\n"); + return -ENODEV; + } + + return 0; +} + +/* Reset the RX DMA channel */ +int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev, + u16 mmio_base, int dma64) +{ + int i; + u32 value; + u16 offset; + + might_sleep(); + + for (i = 0; i < 10; i++) { + offset = dma64 ? B43legacy_DMA64_TXSTATUS : + B43legacy_DMA32_TXSTATUS; + value = b43legacy_read32(dev, mmio_base + offset); + if (dma64) { + value &= B43legacy_DMA64_TXSTAT; + if (value == B43legacy_DMA64_TXSTAT_DISABLED || + value == B43legacy_DMA64_TXSTAT_IDLEWAIT || + value == B43legacy_DMA64_TXSTAT_STOPPED) + break; + } else { + value &= B43legacy_DMA32_TXSTATE; + if (value == B43legacy_DMA32_TXSTAT_DISABLED || + value == B43legacy_DMA32_TXSTAT_IDLEWAIT || + value == B43legacy_DMA32_TXSTAT_STOPPED) + break; + } + msleep(1); + } + offset = dma64 ? B43legacy_DMA64_TXCTL : B43legacy_DMA32_TXCTL; + b43legacy_write32(dev, mmio_base + offset, 0); + for (i = 0; i < 10; i++) { + offset = dma64 ? B43legacy_DMA64_TXSTATUS : + B43legacy_DMA32_TXSTATUS; + value = b43legacy_read32(dev, mmio_base + offset); + if (dma64) { + value &= B43legacy_DMA64_TXSTAT; + if (value == B43legacy_DMA64_TXSTAT_DISABLED) { + i = -1; + break; + } + } else { + value &= B43legacy_DMA32_TXSTATE; + if (value == B43legacy_DMA32_TXSTAT_DISABLED) { + i = -1; + break; + } + } + msleep(1); + } + if (i != -1) { + b43legacyerr(dev->wl, "DMA TX reset timed out\n"); + return -ENODEV; + } + /* ensure the reset is completed. */ + msleep(1); + + return 0; +} + +static int setup_rx_descbuffer(struct b43legacy_dmaring *ring, + struct b43legacy_dmadesc_generic *desc, + struct b43legacy_dmadesc_meta *meta, + gfp_t gfp_flags) +{ + struct b43legacy_rxhdr_fw3 *rxhdr; + struct b43legacy_hwtxstatus *txstat; + dma_addr_t dmaaddr; + struct sk_buff *skb; + + B43legacy_WARN_ON(ring->tx); + + skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); + if (unlikely(!skb)) + return -ENOMEM; + dmaaddr = map_descbuffer(ring, skb->data, + ring->rx_buffersize, 0); + if (dma_mapping_error(dmaaddr)) { + /* ugh. try to realloc in zone_dma */ + gfp_flags |= GFP_DMA; + + dev_kfree_skb_any(skb); + + skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); + if (unlikely(!skb)) + return -ENOMEM; + dmaaddr = map_descbuffer(ring, skb->data, + ring->rx_buffersize, 0); + } + + if (dma_mapping_error(dmaaddr)) { + dev_kfree_skb_any(skb); + return -EIO; + } + + meta->skb = skb; + meta->dmaaddr = dmaaddr; + ring->ops->fill_descriptor(ring, desc, dmaaddr, + ring->rx_buffersize, 0, 0, 0); + + rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data); + rxhdr->frame_len = 0; + txstat = (struct b43legacy_hwtxstatus *)(skb->data); + txstat->cookie = 0; + + return 0; +} + +/* Allocate the initial descbuffers. + * This is used for an RX ring only. + */ +static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring) +{ + int i; + int err = -ENOMEM; + struct b43legacy_dmadesc_generic *desc; + struct b43legacy_dmadesc_meta *meta; + + for (i = 0; i < ring->nr_slots; i++) { + desc = ring->ops->idx2desc(ring, i, &meta); + + err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); + if (err) { + b43legacyerr(ring->dev->wl, + "Failed to allocate initial descbuffers\n"); + goto err_unwind; + } + } + mb(); /* all descbuffer setup before next line */ + ring->used_slots = ring->nr_slots; + err = 0; +out: + return err; + +err_unwind: + for (i--; i >= 0; i--) { + desc = ring->ops->idx2desc(ring, i, &meta); + + unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); + dev_kfree_skb(meta->skb); + } + goto out; +} + +/* Do initial setup of the DMA controller. + * Reset the controller, write the ring busaddress + * and switch the "enable" bit on. + */ +static int dmacontroller_setup(struct b43legacy_dmaring *ring) +{ + int err = 0; + u32 value; + u32 addrext; + u32 trans = ssb_dma_translation(ring->dev->dev); + + if (ring->tx) { + if (ring->dma64) { + u64 ringbase = (u64)(ring->dmabase); + + addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + value = B43legacy_DMA64_TXENABLE; + value |= (addrext << B43legacy_DMA64_TXADDREXT_SHIFT) + & B43legacy_DMA64_TXADDREXT_MASK; + b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL, + value); + b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO, + (ringbase & 0xFFFFFFFF)); + b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI, + ((ringbase >> 32) + & ~SSB_DMA_TRANSLATION_MASK) + | trans); + } else { + u32 ringbase = (u32)(ring->dmabase); + + addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + value = B43legacy_DMA32_TXENABLE; + value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT) + & B43legacy_DMA32_TXADDREXT_MASK; + b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, + value); + b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, + (ringbase & + ~SSB_DMA_TRANSLATION_MASK) + | trans); + } + } else { + err = alloc_initial_descbuffers(ring); + if (err) + goto out; + if (ring->dma64) { + u64 ringbase = (u64)(ring->dmabase); + + addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + value = (ring->frameoffset << + B43legacy_DMA64_RXFROFF_SHIFT); + value |= B43legacy_DMA64_RXENABLE; + value |= (addrext << B43legacy_DMA64_RXADDREXT_SHIFT) + & B43legacy_DMA64_RXADDREXT_MASK; + b43legacy_dma_write(ring, B43legacy_DMA64_RXCTL, + value); + b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO, + (ringbase & 0xFFFFFFFF)); + b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI, + ((ringbase >> 32) & + ~SSB_DMA_TRANSLATION_MASK) | + trans); + b43legacy_dma_write(ring, B43legacy_DMA64_RXINDEX, + 200); + } else { + u32 ringbase = (u32)(ring->dmabase); + + addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + value = (ring->frameoffset << + B43legacy_DMA32_RXFROFF_SHIFT); + value |= B43legacy_DMA32_RXENABLE; + value |= (addrext << + B43legacy_DMA32_RXADDREXT_SHIFT) + & B43legacy_DMA32_RXADDREXT_MASK; + b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL, + value); + b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, + (ringbase & + ~SSB_DMA_TRANSLATION_MASK) + | trans); + b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, + 200); + } + } + +out: + return err; +} + +/* Shutdown the DMA controller. */ +static void dmacontroller_cleanup(struct b43legacy_dmaring *ring) +{ + if (ring->tx) { + b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base, + ring->dma64); + if (ring->dma64) { + b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO, 0); + b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI, 0); + } else + b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0); + } else { + b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base, + ring->dma64); + if (ring->dma64) { + b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO, 0); + b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI, 0); + } else + b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0); + } +} + +static void free_all_descbuffers(struct b43legacy_dmaring *ring) +{ + struct b43legacy_dmadesc_generic *desc; + struct b43legacy_dmadesc_meta *meta; + int i; + + if (!ring->used_slots) + return; + for (i = 0; i < ring->nr_slots; i++) { + desc = ring->ops->idx2desc(ring, i, &meta); + + if (!meta->skb) { + B43legacy_WARN_ON(!ring->tx); + continue; + } + if (ring->tx) + unmap_descbuffer(ring, meta->dmaaddr, + meta->skb->len, 1); + else + unmap_descbuffer(ring, meta->dmaaddr, + ring->rx_buffersize, 0); + free_descriptor_buffer(ring, meta, 0); + } +} + +static u64 supported_dma_mask(struct b43legacy_wldev *dev) +{ + u32 tmp; + u16 mmio_base; + + tmp = b43legacy_read32(dev, SSB_TMSHIGH); + if (tmp & SSB_TMSHIGH_DMA64) + return DMA_64BIT_MASK; + mmio_base = b43legacy_dmacontroller_base(0, 0); + b43legacy_write32(dev, + mmio_base + B43legacy_DMA32_TXCTL, + B43legacy_DMA32_TXADDREXT_MASK); + tmp = b43legacy_read32(dev, mmio_base + + B43legacy_DMA32_TXCTL); + if (tmp & B43legacy_DMA32_TXADDREXT_MASK) + return DMA_32BIT_MASK; + + return DMA_30BIT_MASK; +} + +/* Main initialization function. */ +static +struct b43legacy_dmaring *b43legacy_setup_dmaring( + struct b43legacy_wldev *dev, + int controller_index, + int for_tx, + int dma64) +{ + struct b43legacy_dmaring *ring; + int err; + int nr_slots; + dma_addr_t dma_test; + + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) + goto out; + + nr_slots = B43legacy_RXRING_SLOTS; + if (for_tx) + nr_slots = B43legacy_TXRING_SLOTS; + + ring->meta = kcalloc(nr_slots, sizeof(struct b43legacy_dmadesc_meta), + GFP_KERNEL); + if (!ring->meta) + goto err_kfree_ring; + if (for_tx) { + ring->txhdr_cache = kcalloc(nr_slots, + sizeof(struct b43legacy_txhdr_fw3), + GFP_KERNEL); + if (!ring->txhdr_cache) + goto err_kfree_meta; + + /* test for ability to dma to txhdr_cache */ + dma_test = dma_map_single(dev->dev->dev, + ring->txhdr_cache, + sizeof(struct b43legacy_txhdr_fw3), + DMA_TO_DEVICE); + + if (dma_mapping_error(dma_test)) { + /* ugh realloc */ + kfree(ring->txhdr_cache); + ring->txhdr_cache = kcalloc(nr_slots, + sizeof(struct b43legacy_txhdr_fw3), + GFP_KERNEL | GFP_DMA); + if (!ring->txhdr_cache) + goto err_kfree_meta; + + dma_test = dma_map_single(dev->dev->dev, + ring->txhdr_cache, + sizeof(struct b43legacy_txhdr_fw3), + DMA_TO_DEVICE); + + if (dma_mapping_error(dma_test)) + goto err_kfree_txhdr_cache; + } + + dma_unmap_single(dev->dev->dev, + dma_test, sizeof(struct b43legacy_txhdr_fw3), + DMA_TO_DEVICE); + } + + ring->dev = dev; + ring->nr_slots = nr_slots; + ring->mmio_base = b43legacy_dmacontroller_base(dma64, + controller_index); + ring->index = controller_index; + ring->dma64 = !!dma64; + if (dma64) + ring->ops = &dma64_ops; + else + ring->ops = &dma32_ops; + if (for_tx) { + ring->tx = 1; + ring->current_slot = -1; + } else { + if (ring->index == 0) { + ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE; + ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET; + } else if (ring->index == 3) { + ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE; + ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET; + } else + B43legacy_WARN_ON(1); + } + spin_lock_init(&ring->lock); +#ifdef CONFIG_B43LEGACY_DEBUG + ring->last_injected_overflow = jiffies; +#endif + + err = alloc_ringmemory(ring); + if (err) + goto err_kfree_txhdr_cache; + err = dmacontroller_setup(ring); + if (err) + goto err_free_ringmemory; + +out: + return ring; + +err_free_ringmemory: + free_ringmemory(ring); +err_kfree_txhdr_cache: + kfree(ring->txhdr_cache); +err_kfree_meta: + kfree(ring->meta); +err_kfree_ring: + kfree(ring); + ring = NULL; + goto out; +} + +/* Main cleanup function. */ +static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring) +{ + if (!ring) + return; + + b43legacydbg(ring->dev->wl, "DMA-%s 0x%04X (%s) max used slots:" + " %d/%d\n", (ring->dma64) ? "64" : "32", ring->mmio_base, + (ring->tx) ? "TX" : "RX", + ring->max_used_slots, ring->nr_slots); + /* Device IRQs are disabled prior entering this function, + * so no need to take care of concurrency with rx handler stuff. + */ + dmacontroller_cleanup(ring); + free_all_descbuffers(ring); + free_ringmemory(ring); + + kfree(ring->txhdr_cache); + kfree(ring->meta); + kfree(ring); +} + +void b43legacy_dma_free(struct b43legacy_wldev *dev) +{ + struct b43legacy_dma *dma; + + if (b43legacy_using_pio(dev)) + return; + dma = &dev->dma; + + b43legacy_destroy_dmaring(dma->rx_ring3); + dma->rx_ring3 = NULL; + b43legacy_destroy_dmaring(dma->rx_ring0); + dma->rx_ring0 = NULL; + + b43legacy_destroy_dmaring(dma->tx_ring5); + dma->tx_ring5 = NULL; + b43legacy_destroy_dmaring(dma->tx_ring4); + dma->tx_ring4 = NULL; + b43legacy_destroy_dmaring(dma->tx_ring3); + dma->tx_ring3 = NULL; + b43legacy_destroy_dmaring(dma->tx_ring2); + dma->tx_ring2 = NULL; + b43legacy_destroy_dmaring(dma->tx_ring1); + dma->tx_ring1 = NULL; + b43legacy_destroy_dmaring(dma->tx_ring0); + dma->tx_ring0 = NULL; +} + +int b43legacy_dma_init(struct b43legacy_wldev *dev) +{ + struct b43legacy_dma *dma = &dev->dma; + struct b43legacy_dmaring *ring; + int err; + u64 dmamask; + int dma64 = 0; + + dmamask = supported_dma_mask(dev); + if (dmamask == DMA_64BIT_MASK) + dma64 = 1; + + err = ssb_dma_set_mask(dev->dev, dmamask); + if (err) { +#ifdef BCM43XX_PIO + b43legacywarn(dev->wl, "DMA for this device not supported. " + "Falling back to PIO\n"); + dev->__using_pio = 1; + return -EAGAIN; +#else + b43legacyerr(dev->wl, "DMA for this device not supported and " + "no PIO support compiled in\n"); + return -EOPNOTSUPP; +#endif + } + + err = -ENOMEM; + /* setup TX DMA channels. */ + ring = b43legacy_setup_dmaring(dev, 0, 1, dma64); + if (!ring) + goto out; + dma->tx_ring0 = ring; + + ring = b43legacy_setup_dmaring(dev, 1, 1, dma64); + if (!ring) + goto err_destroy_tx0; + dma->tx_ring1 = ring; + + ring = b43legacy_setup_dmaring(dev, 2, 1, dma64); + if (!ring) + goto err_destroy_tx1; + dma->tx_ring2 = ring; + + ring = b43legacy_setup_dmaring(dev, 3, 1, dma64); + if (!ring) + goto err_destroy_tx2; + dma->tx_ring3 = ring; + + ring = b43legacy_setup_dmaring(dev, 4, 1, dma64); + if (!ring) + goto err_destroy_tx3; + dma->tx_ring4 = ring; + + ring = b43legacy_setup_dmaring(dev, 5, 1, dma64); + if (!ring) + goto err_destroy_tx4; + dma->tx_ring5 = ring; + + /* setup RX DMA channels. */ + ring = b43legacy_setup_dmaring(dev, 0, 0, dma64); + if (!ring) + goto err_destroy_tx5; + dma->rx_ring0 = ring; + + if (dev->dev->id.revision < 5) { + ring = b43legacy_setup_dmaring(dev, 3, 0, dma64); + if (!ring) + goto err_destroy_rx0; + dma->rx_ring3 = ring; + } + + b43legacydbg(dev->wl, "%d-bit DMA initialized\n", + (dmamask == DMA_64BIT_MASK) ? 64 : + (dmamask == DMA_32BIT_MASK) ? 32 : 30); + err = 0; +out: + return err; + +err_destroy_rx0: + b43legacy_destroy_dmaring(dma->rx_ring0); + dma->rx_ring0 = NULL; +err_destroy_tx5: + b43legacy_destroy_dmaring(dma->tx_ring5); + dma->tx_ring5 = NULL; +err_destroy_tx4: + b43legacy_destroy_dmaring(dma->tx_ring4); + dma->tx_ring4 = NULL; +err_destroy_tx3: + b43legacy_destroy_dmaring(dma->tx_ring3); + dma->tx_ring3 = NULL; +err_destroy_tx2: + b43legacy_destroy_dmaring(dma->tx_ring2); + dma->tx_ring2 = NULL; +err_destroy_tx1: + b43legacy_destroy_dmaring(dma->tx_ring1); + dma->tx_ring1 = NULL; +err_destroy_tx0: + b43legacy_destroy_dmaring(dma->tx_ring0); + dma->tx_ring0 = NULL; + goto out; +} + +/* Generate a cookie for the TX header. */ +static u16 generate_cookie(struct b43legacy_dmaring *ring, + int slot) +{ + u16 cookie = 0x1000; + + /* Use the upper 4 bits of the cookie as + * DMA controller ID and store the slot number + * in the lower 12 bits. + * Note that the cookie must never be 0, as this + * is a special value used in RX path. + */ + switch (ring->index) { + case 0: + cookie = 0xA000; + break; + case 1: + cookie = 0xB000; + break; + case 2: + cookie = 0xC000; + break; + case 3: + cookie = 0xD000; + break; + case 4: + cookie = 0xE000; + break; + case 5: + cookie = 0xF000; + break; + } + B43legacy_WARN_ON(!(((u16)slot & 0xF000) == 0x0000)); + cookie |= (u16)slot; + + return cookie; +} + +/* Inspect a cookie and find out to which controller/slot it belongs. */ +static +struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev, + u16 cookie, int *slot) +{ + struct b43legacy_dma *dma = &dev->dma; + struct b43legacy_dmaring *ring = NULL; + + switch (cookie & 0xF000) { + case 0xA000: + ring = dma->tx_ring0; + break; + case 0xB000: + ring = dma->tx_ring1; + break; + case 0xC000: + ring = dma->tx_ring2; + break; + case 0xD000: + ring = dma->tx_ring3; + break; + case 0xE000: + ring = dma->tx_ring4; + break; + case 0xF000: + ring = dma->tx_ring5; + break; + default: + B43legacy_WARN_ON(1); + } + *slot = (cookie & 0x0FFF); + B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots)); + + return ring; +} + +static int dma_tx_fragment(struct b43legacy_dmaring *ring, + struct sk_buff *skb, + struct ieee80211_tx_control *ctl) +{ + const struct b43legacy_dma_ops *ops = ring->ops; + u8 *header; + int slot; + int err; + struct b43legacy_dmadesc_generic *desc; + struct b43legacy_dmadesc_meta *meta; + struct b43legacy_dmadesc_meta *meta_hdr; + struct sk_buff *bounce_skb; + +#define SLOTS_PER_PACKET 2 + B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0); + + /* Get a slot for the header. */ + slot = request_slot(ring); + desc = ops->idx2desc(ring, slot, &meta_hdr); + memset(meta_hdr, 0, sizeof(*meta_hdr)); + + header = &(ring->txhdr_cache[slot * sizeof( + struct b43legacy_txhdr_fw3)]); + b43legacy_generate_txhdr(ring->dev, header, + skb->data, skb->len, ctl, + generate_cookie(ring, slot)); + + meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, + sizeof(struct b43legacy_txhdr_fw3), 1); + if (dma_mapping_error(meta_hdr->dmaaddr)) + return -EIO; + ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr, + sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0); + + /* Get a slot for the payload. */ + slot = request_slot(ring); + desc = ops->idx2desc(ring, slot, &meta); + memset(meta, 0, sizeof(*meta)); + + memcpy(&meta->txstat.control, ctl, sizeof(*ctl)); + meta->skb = skb; + meta->is_last_fragment = 1; + + meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); + /* create a bounce buffer in zone_dma on mapping failure. */ + if (dma_mapping_error(meta->dmaaddr)) { + bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); + if (!bounce_skb) { + err = -ENOMEM; + goto out_unmap_hdr; + } + + memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len); + dev_kfree_skb_any(skb); + skb = bounce_skb; + meta->skb = skb; + meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); + if (dma_mapping_error(meta->dmaaddr)) { + err = -EIO; + goto out_free_bounce; + } + } + + ops->fill_descriptor(ring, desc, meta->dmaaddr, + skb->len, 0, 1, 1); + + wmb(); /* previous stuff MUST be done */ + /* Now transfer the whole frame. */ + ops->poke_tx(ring, next_slot(ring, slot)); + return 0; + +out_free_bounce: + dev_kfree_skb_any(skb); +out_unmap_hdr: + unmap_descbuffer(ring, meta_hdr->dmaaddr, + sizeof(struct b43legacy_txhdr_fw3), 1); + return err; +} + +static inline +int should_inject_overflow(struct b43legacy_dmaring *ring) +{ +#ifdef CONFIG_B43LEGACY_DEBUG + if (unlikely(b43legacy_debug(ring->dev, + B43legacy_DBG_DMAOVERFLOW))) { + /* Check if we should inject another ringbuffer overflow + * to test handling of this situation in the stack. */ + unsigned long next_overflow; + + next_overflow = ring->last_injected_overflow + HZ; + if (time_after(jiffies, next_overflow)) { + ring->last_injected_overflow = jiffies; + b43legacydbg(ring->dev->wl, + "Injecting TX ring overflow on " + "DMA controller %d\n", ring->index); + return 1; + } + } +#endif /* CONFIG_B43LEGACY_DEBUG */ + return 0; +} + +int b43legacy_dma_tx(struct b43legacy_wldev *dev, + struct sk_buff *skb, + struct ieee80211_tx_control *ctl) +{ + struct b43legacy_dmaring *ring; + int err = 0; + unsigned long flags; + + ring = priority_to_txring(dev, ctl->queue); + spin_lock_irqsave(&ring->lock, flags); + B43legacy_WARN_ON(!ring->tx); + if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) { + b43legacywarn(dev->wl, "DMA queue overflow\n"); + err = -ENOSPC; + goto out_unlock; + } + /* Check if the queue was stopped in mac80211, + * but we got called nevertheless. + * That would be a mac80211 bug. */ + B43legacy_BUG_ON(ring->stopped); + + err = dma_tx_fragment(ring, skb, ctl); + if (unlikely(err)) { + b43legacyerr(dev->wl, "DMA tx mapping failure\n"); + goto out_unlock; + } + ring->nr_tx_packets++; + if ((free_slots(ring) < SLOTS_PER_PACKET) || + should_inject_overflow(ring)) { + /* This TX ring is full. */ + ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring)); + ring->stopped = 1; + if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) + b43legacydbg(dev->wl, "Stopped TX ring %d\n", + ring->index); + } +out_unlock: + spin_unlock_irqrestore(&ring->lock, flags); + + return err; +} + +void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status) +{ + const struct b43legacy_dma_ops *ops; + struct b43legacy_dmaring *ring; + struct b43legacy_dmadesc_generic *desc; + struct b43legacy_dmadesc_meta *meta; + int slot; + + ring = parse_cookie(dev, status->cookie, &slot); + if (unlikely(!ring)) + return; + B43legacy_WARN_ON(!irqs_disabled()); + spin_lock(&ring->lock); + + B43legacy_WARN_ON(!ring->tx); + ops = ring->ops; + while (1) { + B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); + desc = ops->idx2desc(ring, slot, &meta); + + if (meta->skb) + unmap_descbuffer(ring, meta->dmaaddr, + meta->skb->len, 1); + else + unmap_descbuffer(ring, meta->dmaaddr, + sizeof(struct b43legacy_txhdr_fw3), + 1); + + if (meta->is_last_fragment) { + B43legacy_WARN_ON(!meta->skb); + /* Call back to inform the ieee80211 subsystem about the + * status of the transmission. + * Some fields of txstat are already filled in dma_tx(). + */ + if (status->acked) { + meta->txstat.flags |= IEEE80211_TX_STATUS_ACK; + } else { + if (!(meta->txstat.control.flags + & IEEE80211_TXCTL_NO_ACK)) + meta->txstat.excessive_retries = 1; + } + if (status->frame_count == 0) { + /* The frame was not transmitted at all. */ + meta->txstat.retry_count = 0; + } else + meta->txstat.retry_count = status->frame_count + - 1; + ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb, + &(meta->txstat)); + /* skb is freed by ieee80211_tx_status_irqsafe() */ + meta->skb = NULL; + } else { + /* No need to call free_descriptor_buffer here, as + * this is only the txhdr, which is not allocated. + */ + B43legacy_WARN_ON(meta->skb != NULL); + } + + /* Everything unmapped and free'd. So it's not used anymore. */ + ring->used_slots--; + + if (meta->is_last_fragment) + break; + slot = next_slot(ring, slot); + } + dev->stats.last_tx = jiffies; + if (ring->stopped) { + B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET); + ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring)); + ring->stopped = 0; + if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) + b43legacydbg(dev->wl, "Woke up TX ring %d\n", + ring->index); + } + + spin_unlock(&ring->lock); +} + +void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev, + struct ieee80211_tx_queue_stats *stats) +{ + const int nr_queues = dev->wl->hw->queues; + struct b43legacy_dmaring *ring; + struct ieee80211_tx_queue_stats_data *data; + unsigned long flags; + int i; + + for (i = 0; i < nr_queues; i++) { + data = &(stats->data[i]); + ring = priority_to_txring(dev, i); + + spin_lock_irqsave(&ring->lock, flags); + data->len = ring->used_slots / SLOTS_PER_PACKET; + data->limit = ring->nr_slots / SLOTS_PER_PACKET; + data->count = ring->nr_tx_packets; + spin_unlock_irqrestore(&ring->lock, flags); + } +} + +static void dma_rx(struct b43legacy_dmaring *ring, + int *slot) +{ + const struct b43legacy_dma_ops *ops = ring->ops; + struct b43legacy_dmadesc_generic *desc; + struct b43legacy_dmadesc_meta *meta; + struct b43legacy_rxhdr_fw3 *rxhdr; + struct sk_buff *skb; + u16 len; + int err; + dma_addr_t dmaaddr; + + desc = ops->idx2desc(ring, *slot, &meta); + + sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); + skb = meta->skb; + + if (ring->index == 3) { + /* We received an xmit status. */ + struct b43legacy_hwtxstatus *hw = + (struct b43legacy_hwtxstatus *)skb->data; + int i = 0; + + while (hw->cookie == 0) { + if (i > 100) + break; + i++; + udelay(2); + barrier(); + } + b43legacy_handle_hwtxstatus(ring->dev, hw); + /* recycle the descriptor buffer. */ + sync_descbuffer_for_device(ring, meta->dmaaddr, + ring->rx_buffersize); + + return; + } + rxhdr = (struct b43legacy_rxhdr_fw3 *)skb->data; + len = le16_to_cpu(rxhdr->frame_len); + if (len == 0) { + int i = 0; + + do { + udelay(2); + barrier(); + len = le16_to_cpu(rxhdr->frame_len); + } while (len == 0 && i++ < 5); + if (unlikely(len == 0)) { + /* recycle the descriptor buffer. */ + sync_descbuffer_for_device(ring, meta->dmaaddr, + ring->rx_buffersize); + goto drop; + } + } + if (unlikely(len > ring->rx_buffersize)) { + /* The data did not fit into one descriptor buffer + * and is split over multiple buffers. + * This should never happen, as we try to allocate buffers + * big enough. So simply ignore this packet. + */ + int cnt = 0; + s32 tmp = len; + + while (1) { + desc = ops->idx2desc(ring, *slot, &meta); + /* recycle the descriptor buffer. */ + sync_descbuffer_for_device(ring, meta->dmaaddr, + ring->rx_buffersize); + *slot = next_slot(ring, *slot); + cnt++; + tmp -= ring->rx_buffersize; + if (tmp <= 0) + break; + } + b43legacyerr(ring->dev->wl, "DMA RX buffer too small " + "(len: %u, buffer: %u, nr-dropped: %d)\n", + len, ring->rx_buffersize, cnt); + goto drop; + } + + dmaaddr = meta->dmaaddr; + err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); + if (unlikely(err)) { + b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()" + " failed\n"); + sync_descbuffer_for_device(ring, dmaaddr, + ring->rx_buffersize); + goto drop; + } + + unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); + skb_put(skb, len + ring->frameoffset); + skb_pull(skb, ring->frameoffset); + + b43legacy_rx(ring->dev, skb, rxhdr); +drop: + return; +} + +void b43legacy_dma_rx(struct b43legacy_dmaring *ring) +{ + const struct b43legacy_dma_ops *ops = ring->ops; + int slot; + int current_slot; + int used_slots = 0; + + B43legacy_WARN_ON(ring->tx); + current_slot = ops->get_current_rxslot(ring); + B43legacy_WARN_ON(!(current_slot >= 0 && current_slot < + ring->nr_slots)); + + slot = ring->current_slot; + for (; slot != current_slot; slot = next_slot(ring, slot)) { + dma_rx(ring, &slot); + update_max_used_slots(ring, ++used_slots); + } + ops->set_current_rxslot(ring, slot); + ring->current_slot = slot; +} + +static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring) +{ + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); + B43legacy_WARN_ON(!ring->tx); + ring->ops->tx_suspend(ring); + spin_unlock_irqrestore(&ring->lock, flags); +} + +static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring) +{ + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); + B43legacy_WARN_ON(!ring->tx); + ring->ops->tx_resume(ring); + spin_unlock_irqrestore(&ring->lock, flags); +} + +void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev) +{ + b43legacy_power_saving_ctl_bits(dev, -1, 1); + b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring0); + b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring1); + b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring2); + b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring3); + b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring4); + b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring5); +} + +void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev) +{ + b43legacy_dma_tx_resume_ring(dev->dma.tx_ring5); + b43legacy_dma_tx_resume_ring(dev->dma.tx_ring4); + b43legacy_dma_tx_resume_ring(dev->dma.tx_ring3); + b43legacy_dma_tx_resume_ring(dev->dma.tx_ring2); + b43legacy_dma_tx_resume_ring(dev->dma.tx_ring1); + b43legacy_dma_tx_resume_ring(dev->dma.tx_ring0); + b43legacy_power_saving_ctl_bits(dev, -1, -1); +} diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/b43legacy/dma.h new file mode 100644 index 000000000000..26f6ab08de75 --- /dev/null +++ b/drivers/net/wireless/b43legacy/dma.h @@ -0,0 +1,367 @@ +#ifndef B43legacy_DMA_H_ +#define B43legacy_DMA_H_ + +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> +#include <linux/linkage.h> +#include <asm/atomic.h> + +#include "b43legacy.h" + + +/* DMA-Interrupt reasons. */ +#define B43legacy_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \ + | (1 << 14) | (1 << 15)) +#define B43legacy_DMAIRQ_NONFATALMASK (1 << 13) +#define B43legacy_DMAIRQ_RX_DONE (1 << 16) + + +/*** 32-bit DMA Engine. ***/ + +/* 32-bit DMA controller registers. */ +#define B43legacy_DMA32_TXCTL 0x00 +#define B43legacy_DMA32_TXENABLE 0x00000001 +#define B43legacy_DMA32_TXSUSPEND 0x00000002 +#define B43legacy_DMA32_TXLOOPBACK 0x00000004 +#define B43legacy_DMA32_TXFLUSH 0x00000010 +#define B43legacy_DMA32_TXADDREXT_MASK 0x00030000 +#define B43legacy_DMA32_TXADDREXT_SHIFT 16 +#define B43legacy_DMA32_TXRING 0x04 +#define B43legacy_DMA32_TXINDEX 0x08 +#define B43legacy_DMA32_TXSTATUS 0x0C +#define B43legacy_DMA32_TXDPTR 0x00000FFF +#define B43legacy_DMA32_TXSTATE 0x0000F000 +#define B43legacy_DMA32_TXSTAT_DISABLED 0x00000000 +#define B43legacy_DMA32_TXSTAT_ACTIVE 0x00001000 +#define B43legacy_DMA32_TXSTAT_IDLEWAIT 0x00002000 +#define B43legacy_DMA32_TXSTAT_STOPPED 0x00003000 +#define B43legacy_DMA32_TXSTAT_SUSP 0x00004000 +#define B43legacy_DMA32_TXERROR 0x000F0000 +#define B43legacy_DMA32_TXERR_NOERR 0x00000000 +#define B43legacy_DMA32_TXERR_PROT 0x00010000 +#define B43legacy_DMA32_TXERR_UNDERRUN 0x00020000 +#define B43legacy_DMA32_TXERR_BUFREAD 0x00030000 +#define B43legacy_DMA32_TXERR_DESCREAD 0x00040000 +#define B43legacy_DMA32_TXACTIVE 0xFFF00000 +#define B43legacy_DMA32_RXCTL 0x10 +#define B43legacy_DMA32_RXENABLE 0x00000001 +#define B43legacy_DMA32_RXFROFF_MASK 0x000000FE +#define B43legacy_DMA32_RXFROFF_SHIFT 1 +#define B43legacy_DMA32_RXDIRECTFIFO 0x00000100 +#define B43legacy_DMA32_RXADDREXT_MASK 0x00030000 +#define B43legacy_DMA32_RXADDREXT_SHIFT 16 +#define B43legacy_DMA32_RXRING 0x14 +#define B43legacy_DMA32_RXINDEX 0x18 +#define B43legacy_DMA32_RXSTATUS 0x1C +#define B43legacy_DMA32_RXDPTR 0x00000FFF +#define B43legacy_DMA32_RXSTATE 0x0000F000 +#define B43legacy_DMA32_RXSTAT_DISABLED 0x00000000 +#define B43legacy_DMA32_RXSTAT_ACTIVE 0x00001000 +#define B43legacy_DMA32_RXSTAT_IDLEWAIT 0x00002000 +#define B43legacy_DMA32_RXSTAT_STOPPED 0x00003000 +#define B43legacy_DMA32_RXERROR 0x000F0000 +#define B43legacy_DMA32_RXERR_NOERR 0x00000000 +#define B43legacy_DMA32_RXERR_PROT 0x00010000 +#define B43legacy_DMA32_RXERR_OVERFLOW 0x00020000 +#define B43legacy_DMA32_RXERR_BUFWRITE 0x00030000 +#define B43legacy_DMA32_RXERR_DESCREAD 0x00040000 +#define B43legacy_DMA32_RXACTIVE 0xFFF00000 + +/* 32-bit DMA descriptor. */ +struct b43legacy_dmadesc32 { + __le32 control; + __le32 address; +} __attribute__((__packed__)); +#define B43legacy_DMA32_DCTL_BYTECNT 0x00001FFF +#define B43legacy_DMA32_DCTL_ADDREXT_MASK 0x00030000 +#define B43legacy_DMA32_DCTL_ADDREXT_SHIFT 16 +#define B43legacy_DMA32_DCTL_DTABLEEND 0x10000000 +#define B43legacy_DMA32_DCTL_IRQ 0x20000000 +#define B43legacy_DMA32_DCTL_FRAMEEND 0x40000000 +#define B43legacy_DMA32_DCTL_FRAMESTART 0x80000000 + + + +/*** 64-bit DMA Engine. ***/ + +/* 64-bit DMA controller registers. */ +#define B43legacy_DMA64_TXCTL 0x00 +#define B43legacy_DMA64_TXENABLE 0x00000001 +#define B43legacy_DMA64_TXSUSPEND 0x00000002 +#define B43legacy_DMA64_TXLOOPBACK 0x00000004 +#define B43legacy_DMA64_TXFLUSH 0x00000010 +#define B43legacy_DMA64_TXADDREXT_MASK 0x00030000 +#define B43legacy_DMA64_TXADDREXT_SHIFT 16 +#define B43legacy_DMA64_TXINDEX 0x04 +#define B43legacy_DMA64_TXRINGLO 0x08 +#define B43legacy_DMA64_TXRINGHI 0x0C +#define B43legacy_DMA64_TXSTATUS 0x10 +#define B43legacy_DMA64_TXSTATDPTR 0x00001FFF +#define B43legacy_DMA64_TXSTAT 0xF0000000 +#define B43legacy_DMA64_TXSTAT_DISABLED 0x00000000 +#define B43legacy_DMA64_TXSTAT_ACTIVE 0x10000000 +#define B43legacy_DMA64_TXSTAT_IDLEWAIT 0x20000000 +#define B43legacy_DMA64_TXSTAT_STOPPED 0x30000000 +#define B43legacy_DMA64_TXSTAT_SUSP 0x40000000 +#define B43legacy_DMA64_TXERROR 0x14 +#define B43legacy_DMA64_TXERRDPTR 0x0001FFFF +#define B43legacy_DMA64_TXERR 0xF0000000 +#define B43legacy_DMA64_TXERR_NOERR 0x00000000 +#define B43legacy_DMA64_TXERR_PROT 0x10000000 +#define B43legacy_DMA64_TXERR_UNDERRUN 0x20000000 +#define B43legacy_DMA64_TXERR_TRANSFER 0x30000000 +#define B43legacy_DMA64_TXERR_DESCREAD 0x40000000 +#define B43legacy_DMA64_TXERR_CORE 0x50000000 +#define B43legacy_DMA64_RXCTL 0x20 +#define B43legacy_DMA64_RXENABLE 0x00000001 +#define B43legacy_DMA64_RXFROFF_MASK 0x000000FE +#define B43legacy_DMA64_RXFROFF_SHIFT 1 +#define B43legacy_DMA64_RXDIRECTFIFO 0x00000100 +#define B43legacy_DMA64_RXADDREXT_MASK 0x00030000 +#define B43legacy_DMA64_RXADDREXT_SHIFT 16 +#define B43legacy_DMA64_RXINDEX 0x24 +#define B43legacy_DMA64_RXRINGLO 0x28 +#define B43legacy_DMA64_RXRINGHI 0x2C +#define B43legacy_DMA64_RXSTATUS 0x30 +#define B43legacy_DMA64_RXSTATDPTR 0x00001FFF +#define B43legacy_DMA64_RXSTAT 0xF0000000 +#define B43legacy_DMA64_RXSTAT_DISABLED 0x00000000 +#define B43legacy_DMA64_RXSTAT_ACTIVE 0x10000000 +#define B43legacy_DMA64_RXSTAT_IDLEWAIT 0x20000000 +#define B43legacy_DMA64_RXSTAT_STOPPED 0x30000000 +#define B43legacy_DMA64_RXSTAT_SUSP 0x40000000 +#define B43legacy_DMA64_RXERROR 0x34 +#define B43legacy_DMA64_RXERRDPTR 0x0001FFFF +#define B43legacy_DMA64_RXERR 0xF0000000 +#define B43legacy_DMA64_RXERR_NOERR 0x00000000 +#define B43legacy_DMA64_RXERR_PROT 0x10000000 +#define B43legacy_DMA64_RXERR_UNDERRUN 0x20000000 +#define B43legacy_DMA64_RXERR_TRANSFER 0x30000000 +#define B43legacy_DMA64_RXERR_DESCREAD 0x40000000 +#define B43legacy_DMA64_RXERR_CORE 0x50000000 + +/* 64-bit DMA descriptor. */ +struct b43legacy_dmadesc64 { + __le32 control0; + __le32 control1; + __le32 address_low; + __le32 address_high; +} __attribute__((__packed__)); +#define B43legacy_DMA64_DCTL0_DTABLEEND 0x10000000 +#define B43legacy_DMA64_DCTL0_IRQ 0x20000000 +#define B43legacy_DMA64_DCTL0_FRAMEEND 0x40000000 +#define B43legacy_DMA64_DCTL0_FRAMESTART 0x80000000 +#define B43legacy_DMA64_DCTL1_BYTECNT 0x00001FFF +#define B43legacy_DMA64_DCTL1_ADDREXT_MASK 0x00030000 +#define B43legacy_DMA64_DCTL1_ADDREXT_SHIFT 16 + + + +struct b43legacy_dmadesc_generic { + union { + struct b43legacy_dmadesc32 dma32; + struct b43legacy_dmadesc64 dma64; + } __attribute__((__packed__)); +} __attribute__((__packed__)); + + +/* Misc DMA constants */ +#define B43legacy_DMA_RINGMEMSIZE PAGE_SIZE +#define B43legacy_DMA0_RX_FRAMEOFFSET 30 +#define B43legacy_DMA3_RX_FRAMEOFFSET 0 + + +/* DMA engine tuning knobs */ +#define B43legacy_TXRING_SLOTS 128 +#define B43legacy_RXRING_SLOTS 64 +#define B43legacy_DMA0_RX_BUFFERSIZE (2304 + 100) +#define B43legacy_DMA3_RX_BUFFERSIZE 16 + + + +#ifdef CONFIG_B43LEGACY_DMA + + +struct sk_buff; +struct b43legacy_private; +struct b43legacy_txstatus; + + +struct b43legacy_dmadesc_meta { + /* The kernel DMA-able buffer. */ + struct sk_buff *skb; + /* DMA base bus-address of the descriptor buffer. */ + dma_addr_t dmaaddr; + /* ieee80211 TX status. Only used once per 802.11 frag. */ + bool is_last_fragment; + struct ieee80211_tx_status txstat; +}; + +struct b43legacy_dmaring; + +/* Lowlevel DMA operations that differ between 32bit and 64bit DMA. */ +struct b43legacy_dma_ops { + struct b43legacy_dmadesc_generic * (*idx2desc) + (struct b43legacy_dmaring *ring, + int slot, + struct b43legacy_dmadesc_meta + **meta); + void (*fill_descriptor)(struct b43legacy_dmaring *ring, + struct b43legacy_dmadesc_generic *desc, + dma_addr_t dmaaddr, u16 bufsize, + int start, int end, int irq); + void (*poke_tx)(struct b43legacy_dmaring *ring, int slot); + void (*tx_suspend)(struct b43legacy_dmaring *ring); + void (*tx_resume)(struct b43legacy_dmaring *ring); + int (*get_current_rxslot)(struct b43legacy_dmaring *ring); + void (*set_current_rxslot)(struct b43legacy_dmaring *ring, int slot); +}; + +struct b43legacy_dmaring { + /* Lowlevel DMA ops. */ + const struct b43legacy_dma_ops *ops; + /* Kernel virtual base address of the ring memory. */ + void *descbase; + /* Meta data about all descriptors. */ + struct b43legacy_dmadesc_meta *meta; + /* Cache of TX headers for each slot. + * This is to avoid an allocation on each TX. + * This is NULL for an RX ring. + */ + u8 *txhdr_cache; + /* (Unadjusted) DMA base bus-address of the ring memory. */ + dma_addr_t dmabase; + /* Number of descriptor slots in the ring. */ + int nr_slots; + /* Number of used descriptor slots. */ + int used_slots; + /* Currently used slot in the ring. */ + int current_slot; + /* Total number of packets sent. Statistics only. */ + unsigned int nr_tx_packets; + /* Frameoffset in octets. */ + u32 frameoffset; + /* Descriptor buffer size. */ + u16 rx_buffersize; + /* The MMIO base register of the DMA controller. */ + u16 mmio_base; + /* DMA controller index number (0-5). */ + int index; + /* Boolean. Is this a TX ring? */ + bool tx; + /* Boolean. 64bit DMA if true, 32bit DMA otherwise. */ + bool dma64; + /* Boolean. Is this ring stopped at ieee80211 level? */ + bool stopped; + /* Lock, only used for TX. */ + spinlock_t lock; + struct b43legacy_wldev *dev; +#ifdef CONFIG_B43LEGACY_DEBUG + /* Maximum number of used slots. */ + int max_used_slots; + /* Last time we injected a ring overflow. */ + unsigned long last_injected_overflow; +#endif /* CONFIG_B43LEGACY_DEBUG*/ +}; + + +static inline +u32 b43legacy_dma_read(struct b43legacy_dmaring *ring, + u16 offset) +{ + return b43legacy_read32(ring->dev, ring->mmio_base + offset); +} + +static inline +void b43legacy_dma_write(struct b43legacy_dmaring *ring, + u16 offset, u32 value) +{ + b43legacy_write32(ring->dev, ring->mmio_base + offset, value); +} + + +int b43legacy_dma_init(struct b43legacy_wldev *dev); +void b43legacy_dma_free(struct b43legacy_wldev *dev); + +int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev, + u16 dmacontroller_mmio_base, + int dma64); +int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev, + u16 dmacontroller_mmio_base, + int dma64); + +u16 b43legacy_dmacontroller_base(int dma64bit, int dmacontroller_idx); + +void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev); +void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev); + +void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev, + struct ieee80211_tx_queue_stats *stats); + +int b43legacy_dma_tx(struct b43legacy_wldev *dev, + struct sk_buff *skb, + struct ieee80211_tx_control *ctl); +void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status); + +void b43legacy_dma_rx(struct b43legacy_dmaring *ring); + +#else /* CONFIG_B43LEGACY_DMA */ + + +static inline +int b43legacy_dma_init(struct b43legacy_wldev *dev) +{ + return 0; +} +static inline +void b43legacy_dma_free(struct b43legacy_wldev *dev) +{ +} +static inline +int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev, + u16 dmacontroller_mmio_base, + int dma64) +{ + return 0; +} +static inline +int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev, + u16 dmacontroller_mmio_base, + int dma64) +{ + return 0; +} +static inline +void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev, + struct ieee80211_tx_queue_stats *stats) +{ +} +static inline +int b43legacy_dma_tx(struct b43legacy_wldev *dev, + struct sk_buff *skb, + struct ieee80211_tx_control *ctl) +{ + return 0; +} +static inline +void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status) +{ +} +static inline +void b43legacy_dma_rx(struct b43legacy_dmaring *ring) +{ +} +static inline +void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev) +{ +} +static inline +void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev) +{ +} + +#endif /* CONFIG_B43LEGACY_DMA */ +#endif /* B43legacy_DMA_H_ */ diff --git a/drivers/net/wireless/b43legacy/ilt.c b/drivers/net/wireless/b43legacy/ilt.c new file mode 100644 index 000000000000..247fc780ffdb --- /dev/null +++ b/drivers/net/wireless/b43legacy/ilt.c @@ -0,0 +1,336 @@ +/* + + Broadcom B43legacy wireless driver + + Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>, + Stefano Brivio <st3@riseup.net> + Michael Buesch <mbuesch@freenet.de> + Danny van Dyk <kugelfang@gentoo.org> + Andreas Jaggi <andreas.jaggi@waterwave.ch> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43legacy.h" +#include "ilt.h" +#include "phy.h" + + +/**** Initial Internal Lookup Tables ****/ + +const u32 b43legacy_ilt_rotor[B43legacy_ILT_ROTOR_SIZE] = { + 0xFEB93FFD, 0xFEC63FFD, /* 0 */ + 0xFED23FFD, 0xFEDF3FFD, + 0xFEEC3FFE, 0xFEF83FFE, + 0xFF053FFE, 0xFF113FFE, + 0xFF1E3FFE, 0xFF2A3FFF, /* 8 */ + 0xFF373FFF, 0xFF443FFF, + 0xFF503FFF, 0xFF5D3FFF, + 0xFF693FFF, 0xFF763FFF, + 0xFF824000, 0xFF8F4000, /* 16 */ + 0xFF9B4000, 0xFFA84000, + 0xFFB54000, 0xFFC14000, + 0xFFCE4000, 0xFFDA4000, + 0xFFE74000, 0xFFF34000, /* 24 */ + 0x00004000, 0x000D4000, + 0x00194000, 0x00264000, + 0x00324000, 0x003F4000, + 0x004B4000, 0x00584000, /* 32 */ + 0x00654000, 0x00714000, + 0x007E4000, 0x008A3FFF, + 0x00973FFF, 0x00A33FFF, + 0x00B03FFF, 0x00BC3FFF, /* 40 */ + 0x00C93FFF, 0x00D63FFF, + 0x00E23FFE, 0x00EF3FFE, + 0x00FB3FFE, 0x01083FFE, + 0x01143FFE, 0x01213FFD, /* 48 */ + 0x012E3FFD, 0x013A3FFD, + 0x01473FFD, +}; + +const u32 b43legacy_ilt_retard[B43legacy_ILT_RETARD_SIZE] = { + 0xDB93CB87, 0xD666CF64, /* 0 */ + 0xD1FDD358, 0xCDA6D826, + 0xCA38DD9F, 0xC729E2B4, + 0xC469E88E, 0xC26AEE2B, + 0xC0DEF46C, 0xC073FA62, /* 8 */ + 0xC01D00D5, 0xC0760743, + 0xC1560D1E, 0xC2E51369, + 0xC4ED18FF, 0xC7AC1ED7, + 0xCB2823B2, 0xCEFA28D9, /* 16 */ + 0xD2F62D3F, 0xD7BB3197, + 0xDCE53568, 0xE1FE3875, + 0xE7D13B35, 0xED663D35, + 0xF39B3EC4, 0xF98E3FA7, /* 24 */ + 0x00004000, 0x06723FA7, + 0x0C653EC4, 0x129A3D35, + 0x182F3B35, 0x1E023875, + 0x231B3568, 0x28453197, /* 32 */ + 0x2D0A2D3F, 0x310628D9, + 0x34D823B2, 0x38541ED7, + 0x3B1318FF, 0x3D1B1369, + 0x3EAA0D1E, 0x3F8A0743, /* 40 */ + 0x3FE300D5, 0x3F8DFA62, + 0x3F22F46C, 0x3D96EE2B, + 0x3B97E88E, 0x38D7E2B4, + 0x35C8DD9F, 0x325AD826, /* 48 */ + 0x2E03D358, 0x299ACF64, + 0x246DCB87, +}; + +const u16 b43legacy_ilt_finefreqa[B43legacy_ILT_FINEFREQA_SIZE] = { + 0x0082, 0x0082, 0x0102, 0x0182, /* 0 */ + 0x0202, 0x0282, 0x0302, 0x0382, + 0x0402, 0x0482, 0x0502, 0x0582, + 0x05E2, 0x0662, 0x06E2, 0x0762, + 0x07E2, 0x0842, 0x08C2, 0x0942, /* 16 */ + 0x09C2, 0x0A22, 0x0AA2, 0x0B02, + 0x0B82, 0x0BE2, 0x0C62, 0x0CC2, + 0x0D42, 0x0DA2, 0x0E02, 0x0E62, + 0x0EE2, 0x0F42, 0x0FA2, 0x1002, /* 32 */ + 0x1062, 0x10C2, 0x1122, 0x1182, + 0x11E2, 0x1242, 0x12A2, 0x12E2, + 0x1342, 0x13A2, 0x1402, 0x1442, + 0x14A2, 0x14E2, 0x1542, 0x1582, /* 48 */ + 0x15E2, 0x1622, 0x1662, 0x16C1, + 0x1701, 0x1741, 0x1781, 0x17E1, + 0x1821, 0x1861, 0x18A1, 0x18E1, + 0x1921, 0x1961, 0x19A1, 0x19E1, /* 64 */ + 0x1A21, 0x1A61, 0x1AA1, 0x1AC1, + 0x1B01, 0x1B41, 0x1B81, 0x1BA1, + 0x1BE1, 0x1C21, 0x1C41, 0x1C81, + 0x1CA1, 0x1CE1, 0x1D01, 0x1D41, /* 80 */ + 0x1D61, 0x1DA1, 0x1DC1, 0x1E01, + 0x1E21, 0x1E61, 0x1E81, 0x1EA1, + 0x1EE1, 0x1F01, 0x1F21, 0x1F41, + 0x1F81, 0x1FA1, 0x1FC1, 0x1FE1, /* 96 */ + 0x2001, 0x2041, 0x2061, 0x2081, + 0x20A1, 0x20C1, 0x20E1, 0x2101, + 0x2121, 0x2141, 0x2161, 0x2181, + 0x21A1, 0x21C1, 0x21E1, 0x2201, /* 112 */ + 0x2221, 0x2241, 0x2261, 0x2281, + 0x22A1, 0x22C1, 0x22C1, 0x22E1, + 0x2301, 0x2321, 0x2341, 0x2361, + 0x2361, 0x2381, 0x23A1, 0x23C1, /* 128 */ + 0x23E1, 0x23E1, 0x2401, 0x2421, + 0x2441, 0x2441, 0x2461, 0x2481, + 0x2481, 0x24A1, 0x24C1, 0x24C1, + 0x24E1, 0x2501, 0x2501, 0x2521, /* 144 */ + 0x2541, 0x2541, 0x2561, 0x2561, + 0x2581, 0x25A1, 0x25A1, 0x25C1, + 0x25C1, 0x25E1, 0x2601, 0x2601, + 0x2621, 0x2621, 0x2641, 0x2641, /* 160 */ + 0x2661, 0x2661, 0x2681, 0x2681, + 0x26A1, 0x26A1, 0x26C1, 0x26C1, + 0x26E1, 0x26E1, 0x2701, 0x2701, + 0x2721, 0x2721, 0x2740, 0x2740, /* 176 */ + 0x2760, 0x2760, 0x2780, 0x2780, + 0x2780, 0x27A0, 0x27A0, 0x27C0, + 0x27C0, 0x27E0, 0x27E0, 0x27E0, + 0x2800, 0x2800, 0x2820, 0x2820, /* 192 */ + 0x2820, 0x2840, 0x2840, 0x2840, + 0x2860, 0x2860, 0x2880, 0x2880, + 0x2880, 0x28A0, 0x28A0, 0x28A0, + 0x28C0, 0x28C0, 0x28C0, 0x28E0, /* 208 */ + 0x28E0, 0x28E0, 0x2900, 0x2900, + 0x2900, 0x2920, 0x2920, 0x2920, + 0x2940, 0x2940, 0x2940, 0x2960, + 0x2960, 0x2960, 0x2960, 0x2980, /* 224 */ + 0x2980, 0x2980, 0x29A0, 0x29A0, + 0x29A0, 0x29A0, 0x29C0, 0x29C0, + 0x29C0, 0x29E0, 0x29E0, 0x29E0, + 0x29E0, 0x2A00, 0x2A00, 0x2A00, /* 240 */ + 0x2A00, 0x2A20, 0x2A20, 0x2A20, + 0x2A20, 0x2A40, 0x2A40, 0x2A40, + 0x2A40, 0x2A60, 0x2A60, 0x2A60, +}; + +const u16 b43legacy_ilt_finefreqg[B43legacy_ILT_FINEFREQG_SIZE] = { + 0x0089, 0x02E9, 0x0409, 0x04E9, /* 0 */ + 0x05A9, 0x0669, 0x0709, 0x0789, + 0x0829, 0x08A9, 0x0929, 0x0989, + 0x0A09, 0x0A69, 0x0AC9, 0x0B29, + 0x0BA9, 0x0BE9, 0x0C49, 0x0CA9, /* 16 */ + 0x0D09, 0x0D69, 0x0DA9, 0x0E09, + 0x0E69, 0x0EA9, 0x0F09, 0x0F49, + 0x0FA9, 0x0FE9, 0x1029, 0x1089, + 0x10C9, 0x1109, 0x1169, 0x11A9, /* 32 */ + 0x11E9, 0x1229, 0x1289, 0x12C9, + 0x1309, 0x1349, 0x1389, 0x13C9, + 0x1409, 0x1449, 0x14A9, 0x14E9, + 0x1529, 0x1569, 0x15A9, 0x15E9, /* 48 */ + 0x1629, 0x1669, 0x16A9, 0x16E8, + 0x1728, 0x1768, 0x17A8, 0x17E8, + 0x1828, 0x1868, 0x18A8, 0x18E8, + 0x1928, 0x1968, 0x19A8, 0x19E8, /* 64 */ + 0x1A28, 0x1A68, 0x1AA8, 0x1AE8, + 0x1B28, 0x1B68, 0x1BA8, 0x1BE8, + 0x1C28, 0x1C68, 0x1CA8, 0x1CE8, + 0x1D28, 0x1D68, 0x1DC8, 0x1E08, /* 80 */ + 0x1E48, 0x1E88, 0x1EC8, 0x1F08, + 0x1F48, 0x1F88, 0x1FE8, 0x2028, + 0x2068, 0x20A8, 0x2108, 0x2148, + 0x2188, 0x21C8, 0x2228, 0x2268, /* 96 */ + 0x22C8, 0x2308, 0x2348, 0x23A8, + 0x23E8, 0x2448, 0x24A8, 0x24E8, + 0x2548, 0x25A8, 0x2608, 0x2668, + 0x26C8, 0x2728, 0x2787, 0x27E7, /* 112 */ + 0x2847, 0x28C7, 0x2947, 0x29A7, + 0x2A27, 0x2AC7, 0x2B47, 0x2BE7, + 0x2CA7, 0x2D67, 0x2E47, 0x2F67, + 0x3247, 0x3526, 0x3646, 0x3726, /* 128 */ + 0x3806, 0x38A6, 0x3946, 0x39E6, + 0x3A66, 0x3AE6, 0x3B66, 0x3BC6, + 0x3C45, 0x3CA5, 0x3D05, 0x3D85, + 0x3DE5, 0x3E45, 0x3EA5, 0x3EE5, /* 144 */ + 0x3F45, 0x3FA5, 0x4005, 0x4045, + 0x40A5, 0x40E5, 0x4145, 0x4185, + 0x41E5, 0x4225, 0x4265, 0x42C5, + 0x4305, 0x4345, 0x43A5, 0x43E5, /* 160 */ + 0x4424, 0x4464, 0x44C4, 0x4504, + 0x4544, 0x4584, 0x45C4, 0x4604, + 0x4644, 0x46A4, 0x46E4, 0x4724, + 0x4764, 0x47A4, 0x47E4, 0x4824, /* 176 */ + 0x4864, 0x48A4, 0x48E4, 0x4924, + 0x4964, 0x49A4, 0x49E4, 0x4A24, + 0x4A64, 0x4AA4, 0x4AE4, 0x4B23, + 0x4B63, 0x4BA3, 0x4BE3, 0x4C23, /* 192 */ + 0x4C63, 0x4CA3, 0x4CE3, 0x4D23, + 0x4D63, 0x4DA3, 0x4DE3, 0x4E23, + 0x4E63, 0x4EA3, 0x4EE3, 0x4F23, + 0x4F63, 0x4FC3, 0x5003, 0x5043, /* 208 */ + 0x5083, 0x50C3, 0x5103, 0x5143, + 0x5183, 0x51E2, 0x5222, 0x5262, + 0x52A2, 0x52E2, 0x5342, 0x5382, + 0x53C2, 0x5402, 0x5462, 0x54A2, /* 224 */ + 0x5502, 0x5542, 0x55A2, 0x55E2, + 0x5642, 0x5682, 0x56E2, 0x5722, + 0x5782, 0x57E1, 0x5841, 0x58A1, + 0x5901, 0x5961, 0x59C1, 0x5A21, /* 240 */ + 0x5AA1, 0x5B01, 0x5B81, 0x5BE1, + 0x5C61, 0x5D01, 0x5D80, 0x5E20, + 0x5EE0, 0x5FA0, 0x6080, 0x61C0, +}; + +const u16 b43legacy_ilt_noisea2[B43legacy_ILT_NOISEA2_SIZE] = { + 0x0001, 0x0001, 0x0001, 0xFFFE, + 0xFFFE, 0x3FFF, 0x1000, 0x0393, +}; + +const u16 b43legacy_ilt_noisea3[B43legacy_ILT_NOISEA3_SIZE] = { + 0x4C4C, 0x4C4C, 0x4C4C, 0x2D36, + 0x4C4C, 0x4C4C, 0x4C4C, 0x2D36, +}; + +const u16 b43legacy_ilt_noiseg1[B43legacy_ILT_NOISEG1_SIZE] = { + 0x013C, 0x01F5, 0x031A, 0x0631, + 0x0001, 0x0001, 0x0001, 0x0001, +}; + +const u16 b43legacy_ilt_noiseg2[B43legacy_ILT_NOISEG2_SIZE] = { + 0x5484, 0x3C40, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, +}; + +const u16 b43legacy_ilt_noisescaleg1[B43legacy_ILT_NOISESCALEG_SIZE] = { + 0x6C77, 0x5162, 0x3B40, 0x3335, /* 0 */ + 0x2F2D, 0x2A2A, 0x2527, 0x1F21, + 0x1A1D, 0x1719, 0x1616, 0x1414, + 0x1414, 0x1400, 0x1414, 0x1614, + 0x1716, 0x1A19, 0x1F1D, 0x2521, /* 16 */ + 0x2A27, 0x2F2A, 0x332D, 0x3B35, + 0x5140, 0x6C62, 0x0077, +}; + +const u16 b43legacy_ilt_noisescaleg2[B43legacy_ILT_NOISESCALEG_SIZE] = { + 0xD8DD, 0xCBD4, 0xBCC0, 0XB6B7, /* 0 */ + 0xB2B0, 0xADAD, 0xA7A9, 0x9FA1, + 0x969B, 0x9195, 0x8F8F, 0x8A8A, + 0x8A8A, 0x8A00, 0x8A8A, 0x8F8A, + 0x918F, 0x9695, 0x9F9B, 0xA7A1, /* 16 */ + 0xADA9, 0xB2AD, 0xB6B0, 0xBCB7, + 0xCBC0, 0xD8D4, 0x00DD, +}; + +const u16 b43legacy_ilt_noisescaleg3[B43legacy_ILT_NOISESCALEG_SIZE] = { + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, /* 0 */ + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, + 0xA4A4, 0xA400, 0xA4A4, 0xA4A4, + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, /* 16 */ + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, + 0xA4A4, 0xA4A4, 0x00A4, +}; + +const u16 b43legacy_ilt_sigmasqr1[B43legacy_ILT_SIGMASQR_SIZE] = { + 0x007A, 0x0075, 0x0071, 0x006C, /* 0 */ + 0x0067, 0x0063, 0x005E, 0x0059, + 0x0054, 0x0050, 0x004B, 0x0046, + 0x0042, 0x003D, 0x003D, 0x003D, + 0x003D, 0x003D, 0x003D, 0x003D, /* 16 */ + 0x003D, 0x003D, 0x003D, 0x003D, + 0x003D, 0x003D, 0x0000, 0x003D, + 0x003D, 0x003D, 0x003D, 0x003D, + 0x003D, 0x003D, 0x003D, 0x003D, /* 32 */ + 0x003D, 0x003D, 0x003D, 0x003D, + 0x0042, 0x0046, 0x004B, 0x0050, + 0x0054, 0x0059, 0x005E, 0x0063, + 0x0067, 0x006C, 0x0071, 0x0075, /* 48 */ + 0x007A, +}; + +const u16 b43legacy_ilt_sigmasqr2[B43legacy_ILT_SIGMASQR_SIZE] = { + 0x00DE, 0x00DC, 0x00DA, 0x00D8, /* 0 */ + 0x00D6, 0x00D4, 0x00D2, 0x00CF, + 0x00CD, 0x00CA, 0x00C7, 0x00C4, + 0x00C1, 0x00BE, 0x00BE, 0x00BE, + 0x00BE, 0x00BE, 0x00BE, 0x00BE, /* 16 */ + 0x00BE, 0x00BE, 0x00BE, 0x00BE, + 0x00BE, 0x00BE, 0x0000, 0x00BE, + 0x00BE, 0x00BE, 0x00BE, 0x00BE, + 0x00BE, 0x00BE, 0x00BE, 0x00BE, /* 32 */ + 0x00BE, 0x00BE, 0x00BE, 0x00BE, + 0x00C1, 0x00C4, 0x00C7, 0x00CA, + 0x00CD, 0x00CF, 0x00D2, 0x00D4, + 0x00D6, 0x00D8, 0x00DA, 0x00DC, /* 48 */ + 0x00DE, +}; + +/**** Helper functions to access the device Internal Lookup Tables ****/ + +void b43legacy_ilt_write(struct b43legacy_wldev *dev, u16 offset, u16 val) +{ + b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset); + mmiowb(); + b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA1, val); +} + +void b43legacy_ilt_write32(struct b43legacy_wldev *dev, u16 offset, u32 val) +{ + b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset); + mmiowb(); + b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA2, + (val & 0xFFFF0000) >> 16); + b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA1, + val & 0x0000FFFF); +} + +u16 b43legacy_ilt_read(struct b43legacy_wldev *dev, u16 offset) +{ + b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset); + return b43legacy_phy_read(dev, B43legacy_PHY_ILT_G_DATA1); +} diff --git a/drivers/net/wireless/b43legacy/ilt.h b/drivers/net/wireless/b43legacy/ilt.h new file mode 100644 index 000000000000..48bcf37eccb8 --- /dev/null +++ b/drivers/net/wireless/b43legacy/ilt.h @@ -0,0 +1,34 @@ +#ifndef B43legacy_ILT_H_ +#define B43legacy_ILT_H_ + +#define B43legacy_ILT_ROTOR_SIZE 53 +extern const u32 b43legacy_ilt_rotor[B43legacy_ILT_ROTOR_SIZE]; +#define B43legacy_ILT_RETARD_SIZE 53 +extern const u32 b43legacy_ilt_retard[B43legacy_ILT_RETARD_SIZE]; +#define B43legacy_ILT_FINEFREQA_SIZE 256 +extern const u16 b43legacy_ilt_finefreqa[B43legacy_ILT_FINEFREQA_SIZE]; +#define B43legacy_ILT_FINEFREQG_SIZE 256 +extern const u16 b43legacy_ilt_finefreqg[B43legacy_ILT_FINEFREQG_SIZE]; +#define B43legacy_ILT_NOISEA2_SIZE 8 +extern const u16 b43legacy_ilt_noisea2[B43legacy_ILT_NOISEA2_SIZE]; +#define B43legacy_ILT_NOISEA3_SIZE 8 +extern const u16 b43legacy_ilt_noisea3[B43legacy_ILT_NOISEA3_SIZE]; +#define B43legacy_ILT_NOISEG1_SIZE 8 +extern const u16 b43legacy_ilt_noiseg1[B43legacy_ILT_NOISEG1_SIZE]; +#define B43legacy_ILT_NOISEG2_SIZE 8 +extern const u16 b43legacy_ilt_noiseg2[B43legacy_ILT_NOISEG2_SIZE]; +#define B43legacy_ILT_NOISESCALEG_SIZE 27 +extern const u16 b43legacy_ilt_noisescaleg1[B43legacy_ILT_NOISESCALEG_SIZE]; +extern const u16 b43legacy_ilt_noisescaleg2[B43legacy_ILT_NOISESCALEG_SIZE]; +extern const u16 b43legacy_ilt_noisescaleg3[B43legacy_ILT_NOISESCALEG_SIZE]; +#define B43legacy_ILT_SIGMASQR_SIZE 53 +extern const u16 b43legacy_ilt_sigmasqr1[B43legacy_ILT_SIGMASQR_SIZE]; +extern const u16 b43legacy_ilt_sigmasqr2[B43legacy_ILT_SIGMASQR_SIZE]; + + +void b43legacy_ilt_write(struct b43legacy_wldev *dev, u16 offset, u16 val); +void b43legacy_ilt_write32(struct b43legacy_wldev *dev, u16 offset, + u32 val); +u16 b43legacy_ilt_read(struct b43legacy_wldev *dev, u16 offset); + +#endif /* B43legacy_ILT_H_ */ diff --git a/drivers/net/wireless/b43legacy/leds.c b/drivers/net/wireless/b43legacy/leds.c new file mode 100644 index 000000000000..a584ea810502 --- /dev/null +++ b/drivers/net/wireless/b43legacy/leds.c @@ -0,0 +1,298 @@ +/* + + Broadcom B43legacy wireless driver + + Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>, + Stefano Brivio <st3@riseup.net> + Michael Buesch <mb@bu3sch.de> + Danny van Dyk <kugelfang@gentoo.org> + Andreas Jaggi <andreas.jaggi@waterwave.ch> + Copyright (c) 2007 Larry Finger <Larry.Finger@lwfinger.net> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "leds.h" +#include "b43legacy.h" +#include "main.h" + +static void b43legacy_led_changestate(struct b43legacy_led *led) +{ + struct b43legacy_wldev *dev = led->dev; + const int index = led->index; + u16 ledctl; + + B43legacy_WARN_ON(!(index >= 0 && index < B43legacy_NR_LEDS)); + B43legacy_WARN_ON(!led->blink_interval); + ledctl = b43legacy_read16(dev, B43legacy_MMIO_GPIO_CONTROL); + ledctl ^= (1 << index); + b43legacy_write16(dev, B43legacy_MMIO_GPIO_CONTROL, ledctl); +} + +static void b43legacy_led_blink(unsigned long d) +{ + struct b43legacy_led *led = (struct b43legacy_led *)d; + struct b43legacy_wldev *dev = led->dev; + unsigned long flags; + + spin_lock_irqsave(&dev->wl->leds_lock, flags); + if (led->blink_interval) { + b43legacy_led_changestate(led); + mod_timer(&led->blink_timer, jiffies + led->blink_interval); + } + spin_unlock_irqrestore(&dev->wl->leds_lock, flags); +} + +static void b43legacy_led_blink_start(struct b43legacy_led *led, + unsigned long interval) +{ + if (led->blink_interval) + return; + led->blink_interval = interval; + b43legacy_led_changestate(led); + led->blink_timer.expires = jiffies + interval; + add_timer(&led->blink_timer); +} + +static void b43legacy_led_blink_stop(struct b43legacy_led *led, int sync) +{ + struct b43legacy_wldev *dev = led->dev; + const int index = led->index; + u16 ledctl; + + if (!led->blink_interval) + return; + if (unlikely(sync)) + del_timer_sync(&led->blink_timer); + else + del_timer(&led->blink_timer); + led->blink_interval = 0; + + /* Make sure the LED is turned off. */ + B43legacy_WARN_ON(!(index >= 0 && index < B43legacy_NR_LEDS)); + ledctl = b43legacy_read16(dev, B43legacy_MMIO_GPIO_CONTROL); + if (led->activelow) + ledctl |= (1 << index); + else + ledctl &= ~(1 << index); + b43legacy_write16(dev, B43legacy_MMIO_GPIO_CONTROL, ledctl); +} + +static void b43legacy_led_init_hardcoded(struct b43legacy_wldev *dev, + struct b43legacy_led *led, + int led_index) +{ + struct ssb_bus *bus = dev->dev->bus; + + /* This function is called, if the behaviour (and activelow) + * information for a LED is missing in the SPROM. + * We hardcode the behaviour values for various devices here. + * Note that the B43legacy_LED_TEST_XXX behaviour values can + * be used to figure out which led is mapped to which index. + */ + + switch (led_index) { + case 0: + led->behaviour = B43legacy_LED_ACTIVITY; + led->activelow = 1; + if (bus->boardinfo.vendor == PCI_VENDOR_ID_COMPAQ) + led->behaviour = B43legacy_LED_RADIO_ALL; + break; + case 1: + led->behaviour = B43legacy_LED_RADIO_B; + if (bus->boardinfo.vendor == PCI_VENDOR_ID_ASUSTEK) + led->behaviour = B43legacy_LED_ASSOC; + break; + case 2: + led->behaviour = B43legacy_LED_RADIO_A; + break; + case 3: + led->behaviour = B43legacy_LED_OFF; + break; + default: + B43legacy_BUG_ON(1); + } +} + +int b43legacy_leds_init(struct b43legacy_wldev *dev) +{ + struct b43legacy_led *led; + u8 sprom[4]; + int i; + + sprom[0] = dev->dev->bus->sprom.r1.gpio0; + sprom[1] = dev->dev->bus->sprom.r1.gpio1; + sprom[2] = dev->dev->bus->sprom.r1.gpio2; + sprom[3] = dev->dev->bus->sprom.r1.gpio3; + + for (i = 0; i < B43legacy_NR_LEDS; i++) { + led = &(dev->leds[i]); + led->index = i; + led->dev = dev; + setup_timer(&led->blink_timer, + b43legacy_led_blink, + (unsigned long)led); + + if (sprom[i] == 0xFF) + b43legacy_led_init_hardcoded(dev, led, i); + else { + led->behaviour = sprom[i] & B43legacy_LED_BEHAVIOUR; + led->activelow = !!(sprom[i] & + B43legacy_LED_ACTIVELOW); + } + } + + return 0; +} + +void b43legacy_leds_exit(struct b43legacy_wldev *dev) +{ + struct b43legacy_led *led; + int i; + + for (i = 0; i < B43legacy_NR_LEDS; i++) { + led = &(dev->leds[i]); + b43legacy_led_blink_stop(led, 1); + } + b43legacy_leds_switch_all(dev, 0); +} + +void b43legacy_leds_update(struct b43legacy_wldev *dev, int activity) +{ + struct b43legacy_led *led; + struct b43legacy_phy *phy = &dev->phy; + const int transferring = (jiffies - dev->stats.last_tx) + < B43legacy_LED_XFER_THRES; + int i; + int turn_on; + unsigned long interval = 0; + u16 ledctl; + unsigned long flags; + bool radio_enabled = (phy->radio_on && dev->radio_hw_enable); + + spin_lock_irqsave(&dev->wl->leds_lock, flags); + ledctl = b43legacy_read16(dev, B43legacy_MMIO_GPIO_CONTROL); + for (i = 0; i < B43legacy_NR_LEDS; i++) { + led = &(dev->leds[i]); + + turn_on = 0; + switch (led->behaviour) { + case B43legacy_LED_INACTIVE: + continue; + case B43legacy_LED_OFF: + break; + case B43legacy_LED_ON: + turn_on = 1; + break; + case B43legacy_LED_ACTIVITY: + turn_on = activity; + break; + case B43legacy_LED_RADIO_ALL: + turn_on = radio_enabled; + break; + case B43legacy_LED_RADIO_A: + break; + case B43legacy_LED_RADIO_B: + turn_on = radio_enabled; + break; + case B43legacy_LED_MODE_BG: + if (phy->type == B43legacy_PHYTYPE_G && radio_enabled) + turn_on = 1; + break; + case B43legacy_LED_TRANSFER: + if (transferring) + b43legacy_led_blink_start(led, + B43legacy_LEDBLINK_MEDIUM); + else + b43legacy_led_blink_stop(led, 0); + continue; + case B43legacy_LED_APTRANSFER: + if (b43legacy_is_mode(dev->wl, + IEEE80211_IF_TYPE_AP)) { + if (transferring) { + interval = B43legacy_LEDBLINK_FAST; + turn_on = 1; + } + } else { + turn_on = 1; + if (transferring) + interval = B43legacy_LEDBLINK_FAST; + else + turn_on = 0; + } + if (turn_on) + b43legacy_led_blink_start(led, interval); + else + b43legacy_led_blink_stop(led, 0); + continue; + case B43legacy_LED_WEIRD: + break; + case B43legacy_LED_ASSOC: + turn_on = 1; +#ifdef CONFIG_B43LEGACY_DEBUG + case B43legacy_LED_TEST_BLINKSLOW: + b43legacy_led_blink_start(led, B43legacy_LEDBLINK_SLOW); + continue; + case B43legacy_LED_TEST_BLINKMEDIUM: + b43legacy_led_blink_start(led, + B43legacy_LEDBLINK_MEDIUM); + continue; + case B43legacy_LED_TEST_BLINKFAST: + b43legacy_led_blink_start(led, B43legacy_LEDBLINK_FAST); + continue; +#endif /* CONFIG_B43LEGACY_DEBUG */ + default: + B43legacy_BUG_ON(1); + }; + + if (led->activelow) + turn_on = !turn_on; + if (turn_on) + ledctl |= (1 << i); + else + ledctl &= ~(1 << i); + } + b43legacy_write16(dev, B43legacy_MMIO_GPIO_CONTROL, ledctl); + spin_unlock_irqrestore(&dev->wl->leds_lock, flags); +} + +void b43legacy_leds_switch_all(struct b43legacy_wldev *dev, int on) +{ + struct b43legacy_led *led; + u16 ledctl; + int i; + int bit_on; + unsigned long flags; + + spin_lock_irqsave(&dev->wl->leds_lock, flags); + ledctl = b43legacy_read16(dev, B43legacy_MMIO_GPIO_CONTROL); + for (i = 0; i < B43legacy_NR_LEDS; i++) { + led = &(dev->leds[i]); + if (led->behaviour == B43legacy_LED_INACTIVE) + continue; + if (on) + bit_on = led->activelow ? 0 : 1; + else + bit_on = led->activelow ? 1 : 0; + if (bit_on) + ledctl |= (1 << i); + else + ledctl &= ~(1 << i); + } + b43legacy_write16(dev, B43legacy_MMIO_GPIO_CONTROL, ledctl); + spin_unlock_irqrestore(&dev->wl->leds_lock, flags); +} diff --git a/drivers/net/wireless/b43legacy/leds.h b/drivers/net/wireless/b43legacy/leds.h new file mode 100644 index 000000000000..b989f503e684 --- /dev/null +++ b/drivers/net/wireless/b43legacy/leds.h @@ -0,0 +1,56 @@ +#ifndef B43legacy_LEDS_H_ +#define B43legacy_LEDS_H_ + +#include <linux/types.h> +#include <linux/timer.h> + + +struct b43legacy_led { + u8 behaviour; + bool activelow; + /* Index in the "leds" array in b43legacy_wldev */ + u8 index; + struct b43legacy_wldev *dev; + struct timer_list blink_timer; + unsigned long blink_interval; +}; + +/* Delay between state changes when blinking in jiffies */ +#define B43legacy_LEDBLINK_SLOW (HZ / 1) +#define B43legacy_LEDBLINK_MEDIUM (HZ / 4) +#define B43legacy_LEDBLINK_FAST (HZ / 8) + +#define B43legacy_LED_XFER_THRES (HZ / 100) + +#define B43legacy_LED_BEHAVIOUR 0x7F +#define B43legacy_LED_ACTIVELOW 0x80 +enum { /* LED behaviour values */ + B43legacy_LED_OFF, + B43legacy_LED_ON, + B43legacy_LED_ACTIVITY, + B43legacy_LED_RADIO_ALL, + B43legacy_LED_RADIO_A, + B43legacy_LED_RADIO_B, + B43legacy_LED_MODE_BG, + B43legacy_LED_TRANSFER, + B43legacy_LED_APTRANSFER, + B43legacy_LED_WEIRD, + B43legacy_LED_ASSOC, + B43legacy_LED_INACTIVE, + + /* Behaviour values for testing. + * With these values it is easier to figure out + * the real behaviour of leds, in case the SPROM + * is missing information. + */ + B43legacy_LED_TEST_BLINKSLOW, + B43legacy_LED_TEST_BLINKMEDIUM, + B43legacy_LED_TEST_BLINKFAST, +}; + +int b43legacy_leds_init(struct b43legacy_wldev *dev); +void b43legacy_leds_exit(struct b43legacy_wldev *dev); +void b43legacy_leds_update(struct b43legacy_wldev *dev, int activity); +void b43legacy_leds_switch_all(struct b43legacy_wldev *dev, int on); + +#endif /* B43legacy_LEDS_H_ */ diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c new file mode 100644 index 000000000000..f0749510bcd7 --- /dev/null +++ b/drivers/net/wireless/b43legacy/main.c @@ -0,0 +1,3856 @@ +/* + * + * Broadcom B43legacy wireless driver + * + * Copyright (c) 2005 Martin Langer <martin-langer@gmx.de> + * Copyright (c) 2005 Stefano Brivio <st3@riseup.net> + * Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de> + * Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org> + * Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch> + * Copyright (c) 2007 Larry Finger <Larry.Finger@lwfinger.net> + * + * Some parts of the code in this file are derived from the ipw2200 + * driver Copyright(c) 2003 - 2004 Intel Corporation. + + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/moduleparam.h> +#include <linux/if_arp.h> +#include <linux/etherdevice.h> +#include <linux/version.h> +#include <linux/firmware.h> +#include <linux/wireless.h> +#include <linux/workqueue.h> +#include <linux/skbuff.h> +#include <linux/dma-mapping.h> +#include <net/dst.h> +#include <asm/unaligned.h> + +#include "b43legacy.h" +#include "main.h" +#include "debugfs.h" +#include "phy.h" +#include "dma.h" +#include "pio.h" +#include "sysfs.h" +#include "xmit.h" +#include "radio.h" + + +MODULE_DESCRIPTION("Broadcom B43legacy wireless driver"); +MODULE_AUTHOR("Martin Langer"); +MODULE_AUTHOR("Stefano Brivio"); +MODULE_AUTHOR("Michael Buesch"); +MODULE_LICENSE("GPL"); + +#if defined(CONFIG_B43LEGACY_DMA) && defined(CONFIG_B43LEGACY_PIO) +static int modparam_pio; +module_param_named(pio, modparam_pio, int, 0444); +MODULE_PARM_DESC(pio, "enable(1) / disable(0) PIO mode"); +#elif defined(CONFIG_B43LEGACY_DMA) +# define modparam_pio 0 +#elif defined(CONFIG_B43LEGACY_PIO) +# define modparam_pio 1 +#endif + +static int modparam_bad_frames_preempt; +module_param_named(bad_frames_preempt, modparam_bad_frames_preempt, int, 0444); +MODULE_PARM_DESC(bad_frames_preempt, "enable(1) / disable(0) Bad Frames" + " Preemption"); + +static int modparam_short_retry = B43legacy_DEFAULT_SHORT_RETRY_LIMIT; +module_param_named(short_retry, modparam_short_retry, int, 0444); +MODULE_PARM_DESC(short_retry, "Short-Retry-Limit (0 - 15)"); + +static int modparam_long_retry = B43legacy_DEFAULT_LONG_RETRY_LIMIT; +module_param_named(long_retry, modparam_long_retry, int, 0444); +MODULE_PARM_DESC(long_retry, "Long-Retry-Limit (0 - 15)"); + +static int modparam_noleds; +module_param_named(noleds, modparam_noleds, int, 0444); +MODULE_PARM_DESC(noleds, "Turn off all LED activity"); + +static char modparam_fwpostfix[16]; +module_param_string(fwpostfix, modparam_fwpostfix, 16, 0444); +MODULE_PARM_DESC(fwpostfix, "Postfix for the firmware files to load."); + +/* The following table supports BCM4301, BCM4303 and BCM4306/2 devices. */ +static const struct ssb_device_id b43legacy_ssb_tbl[] = { + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 2), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 4), + SSB_DEVTABLE_END +}; +MODULE_DEVICE_TABLE(ssb, b43legacy_ssb_tbl); + + +/* Channel and ratetables are shared for all devices. + * They can't be const, because ieee80211 puts some precalculated + * data in there. This data is the same for all devices, so we don't + * get concurrency issues */ +#define RATETAB_ENT(_rateid, _flags) \ + { \ + .rate = B43legacy_RATE_TO_100KBPS(_rateid), \ + .val = (_rateid), \ + .val2 = (_rateid), \ + .flags = (_flags), \ + } +static struct ieee80211_rate __b43legacy_ratetable[] = { + RATETAB_ENT(B43legacy_CCK_RATE_1MB, IEEE80211_RATE_CCK), + RATETAB_ENT(B43legacy_CCK_RATE_2MB, IEEE80211_RATE_CCK_2), + RATETAB_ENT(B43legacy_CCK_RATE_5MB, IEEE80211_RATE_CCK_2), + RATETAB_ENT(B43legacy_CCK_RATE_11MB, IEEE80211_RATE_CCK_2), + RATETAB_ENT(B43legacy_OFDM_RATE_6MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43legacy_OFDM_RATE_9MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43legacy_OFDM_RATE_12MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43legacy_OFDM_RATE_18MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43legacy_OFDM_RATE_24MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43legacy_OFDM_RATE_36MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43legacy_OFDM_RATE_48MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43legacy_OFDM_RATE_54MB, IEEE80211_RATE_OFDM), +}; +#define b43legacy_a_ratetable (__b43legacy_ratetable + 4) +#define b43legacy_a_ratetable_size 8 +#define b43legacy_b_ratetable (__b43legacy_ratetable + 0) +#define b43legacy_b_ratetable_size 4 +#define b43legacy_g_ratetable (__b43legacy_ratetable + 0) +#define b43legacy_g_ratetable_size 12 + +#define CHANTAB_ENT(_chanid, _freq) \ + { \ + .chan = (_chanid), \ + .freq = (_freq), \ + .val = (_chanid), \ + .flag = IEEE80211_CHAN_W_SCAN | \ + IEEE80211_CHAN_W_ACTIVE_SCAN | \ + IEEE80211_CHAN_W_IBSS, \ + .power_level = 0x0A, \ + .antenna_max = 0xFF, \ + } +static struct ieee80211_channel b43legacy_bg_chantable[] = { + CHANTAB_ENT(1, 2412), + CHANTAB_ENT(2, 2417), + CHANTAB_ENT(3, 2422), + CHANTAB_ENT(4, 2427), + CHANTAB_ENT(5, 2432), + CHANTAB_ENT(6, 2437), + CHANTAB_ENT(7, 2442), + CHANTAB_ENT(8, 2447), + CHANTAB_ENT(9, 2452), + CHANTAB_ENT(10, 2457), + CHANTAB_ENT(11, 2462), + CHANTAB_ENT(12, 2467), + CHANTAB_ENT(13, 2472), + CHANTAB_ENT(14, 2484), +}; +#define b43legacy_bg_chantable_size ARRAY_SIZE(b43legacy_bg_chantable) + +static void b43legacy_wireless_core_exit(struct b43legacy_wldev *dev); +static int b43legacy_wireless_core_init(struct b43legacy_wldev *dev); +static void b43legacy_wireless_core_stop(struct b43legacy_wldev *dev); +static int b43legacy_wireless_core_start(struct b43legacy_wldev *dev); + + +static int b43legacy_ratelimit(struct b43legacy_wl *wl) +{ + if (!wl || !wl->current_dev) + return 1; + if (b43legacy_status(wl->current_dev) < B43legacy_STAT_STARTED) + return 1; + /* We are up and running. + * Ratelimit the messages to avoid DoS over the net. */ + return net_ratelimit(); +} + +void b43legacyinfo(struct b43legacy_wl *wl, const char *fmt, ...) +{ + va_list args; + + if (!b43legacy_ratelimit(wl)) + return; + va_start(args, fmt); + printk(KERN_INFO "b43legacy-%s: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} + +void b43legacyerr(struct b43legacy_wl *wl, const char *fmt, ...) +{ + va_list args; + + if (!b43legacy_ratelimit(wl)) + return; + va_start(args, fmt); + printk(KERN_ERR "b43legacy-%s ERROR: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} + +void b43legacywarn(struct b43legacy_wl *wl, const char *fmt, ...) +{ + va_list args; + + if (!b43legacy_ratelimit(wl)) + return; + va_start(args, fmt); + printk(KERN_WARNING "b43legacy-%s warning: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} + +#if B43legacy_DEBUG +void b43legacydbg(struct b43legacy_wl *wl, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + printk(KERN_DEBUG "b43legacy-%s debug: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} +#endif /* DEBUG */ + +static void b43legacy_ram_write(struct b43legacy_wldev *dev, u16 offset, + u32 val) +{ + u32 status; + + B43legacy_WARN_ON(offset % 4 != 0); + + status = b43legacy_read32(dev, B43legacy_MMIO_STATUS_BITFIELD); + if (status & B43legacy_SBF_XFER_REG_BYTESWAP) + val = swab32(val); + + b43legacy_write32(dev, B43legacy_MMIO_RAM_CONTROL, offset); + mmiowb(); + b43legacy_write32(dev, B43legacy_MMIO_RAM_DATA, val); +} + +static inline +void b43legacy_shm_control_word(struct b43legacy_wldev *dev, + u16 routing, u16 offset) +{ + u32 control; + + /* "offset" is the WORD offset. */ + + control = routing; + control <<= 16; + control |= offset; + b43legacy_write32(dev, B43legacy_MMIO_SHM_CONTROL, control); +} + +u32 b43legacy_shm_read32(struct b43legacy_wldev *dev, + u16 routing, u16 offset) +{ + u32 ret; + + if (routing == B43legacy_SHM_SHARED) { + B43legacy_WARN_ON((offset & 0x0001) != 0); + if (offset & 0x0003) { + /* Unaligned access */ + b43legacy_shm_control_word(dev, routing, offset >> 2); + ret = b43legacy_read16(dev, + B43legacy_MMIO_SHM_DATA_UNALIGNED); + ret <<= 16; + b43legacy_shm_control_word(dev, routing, + (offset >> 2) + 1); + ret |= b43legacy_read16(dev, B43legacy_MMIO_SHM_DATA); + + return ret; + } + offset >>= 2; + } + b43legacy_shm_control_word(dev, routing, offset); + ret = b43legacy_read32(dev, B43legacy_MMIO_SHM_DATA); + + return ret; +} + +u16 b43legacy_shm_read16(struct b43legacy_wldev *dev, + u16 routing, u16 offset) +{ + u16 ret; + + if (routing == B43legacy_SHM_SHARED) { + B43legacy_WARN_ON((offset & 0x0001) != 0); + if (offset & 0x0003) { + /* Unaligned access */ + b43legacy_shm_control_word(dev, routing, offset >> 2); + ret = b43legacy_read16(dev, + B43legacy_MMIO_SHM_DATA_UNALIGNED); + + return ret; + } + offset >>= 2; + } + b43legacy_shm_control_word(dev, routing, offset); + ret = b43legacy_read16(dev, B43legacy_MMIO_SHM_DATA); + + return ret; +} + +void b43legacy_shm_write32(struct b43legacy_wldev *dev, + u16 routing, u16 offset, + u32 value) +{ + if (routing == B43legacy_SHM_SHARED) { + B43legacy_WARN_ON((offset & 0x0001) != 0); + if (offset & 0x0003) { + /* Unaligned access */ + b43legacy_shm_control_word(dev, routing, offset >> 2); + mmiowb(); + b43legacy_write16(dev, + B43legacy_MMIO_SHM_DATA_UNALIGNED, + (value >> 16) & 0xffff); + mmiowb(); + b43legacy_shm_control_word(dev, routing, + (offset >> 2) + 1); + mmiowb(); + b43legacy_write16(dev, B43legacy_MMIO_SHM_DATA, + value & 0xffff); + return; + } + offset >>= 2; + } + b43legacy_shm_control_word(dev, routing, offset); + mmiowb(); + b43legacy_write32(dev, B43legacy_MMIO_SHM_DATA, value); +} + +void b43legacy_shm_write16(struct b43legacy_wldev *dev, u16 routing, u16 offset, + u16 value) +{ + if (routing == B43legacy_SHM_SHARED) { + B43legacy_WARN_ON((offset & 0x0001) != 0); + if (offset & 0x0003) { + /* Unaligned access */ + b43legacy_shm_control_word(dev, routing, offset >> 2); + mmiowb(); + b43legacy_write16(dev, + B43legacy_MMIO_SHM_DATA_UNALIGNED, + value); + return; + } + offset >>= 2; + } + b43legacy_shm_control_word(dev, routing, offset); + mmiowb(); + b43legacy_write16(dev, B43legacy_MMIO_SHM_DATA, value); +} + +/* Read HostFlags */ +u32 b43legacy_hf_read(struct b43legacy_wldev *dev) +{ + u32 ret; + + ret = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_HOSTFHI); + ret <<= 16; + ret |= b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_HOSTFLO); + + return ret; +} + +/* Write HostFlags */ +void b43legacy_hf_write(struct b43legacy_wldev *dev, u32 value) +{ + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_HOSTFLO, + (value & 0x0000FFFF)); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_HOSTFHI, + ((value & 0xFFFF0000) >> 16)); +} + +void b43legacy_tsf_read(struct b43legacy_wldev *dev, u64 *tsf) +{ + /* We need to be careful. As we read the TSF from multiple + * registers, we should take care of register overflows. + * In theory, the whole tsf read process should be atomic. + * We try to be atomic here, by restaring the read process, + * if any of the high registers changed (overflew). + */ + if (dev->dev->id.revision >= 3) { + u32 low; + u32 high; + u32 high2; + + do { + high = b43legacy_read32(dev, + B43legacy_MMIO_REV3PLUS_TSF_HIGH); + low = b43legacy_read32(dev, + B43legacy_MMIO_REV3PLUS_TSF_LOW); + high2 = b43legacy_read32(dev, + B43legacy_MMIO_REV3PLUS_TSF_HIGH); + } while (unlikely(high != high2)); + + *tsf = high; + *tsf <<= 32; + *tsf |= low; + } else { + u64 tmp; + u16 v0; + u16 v1; + u16 v2; + u16 v3; + u16 test1; + u16 test2; + u16 test3; + + do { + v3 = b43legacy_read16(dev, B43legacy_MMIO_TSF_3); + v2 = b43legacy_read16(dev, B43legacy_MMIO_TSF_2); + v1 = b43legacy_read16(dev, B43legacy_MMIO_TSF_1); + v0 = b43legacy_read16(dev, B43legacy_MMIO_TSF_0); + + test3 = b43legacy_read16(dev, B43legacy_MMIO_TSF_3); + test2 = b43legacy_read16(dev, B43legacy_MMIO_TSF_2); + test1 = b43legacy_read16(dev, B43legacy_MMIO_TSF_1); + } while (v3 != test3 || v2 != test2 || v1 != test1); + + *tsf = v3; + *tsf <<= 48; + tmp = v2; + tmp <<= 32; + *tsf |= tmp; + tmp = v1; + tmp <<= 16; + *tsf |= tmp; + *tsf |= v0; + } +} + +static void b43legacy_time_lock(struct b43legacy_wldev *dev) +{ + u32 status; + + status = b43legacy_read32(dev, B43legacy_MMIO_STATUS_BITFIELD); + status |= B43legacy_SBF_TIME_UPDATE; + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, status); + mmiowb(); +} + +static void b43legacy_time_unlock(struct b43legacy_wldev *dev) +{ + u32 status; + + status = b43legacy_read32(dev, B43legacy_MMIO_STATUS_BITFIELD); + status &= ~B43legacy_SBF_TIME_UPDATE; + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, status); +} + +static void b43legacy_tsf_write_locked(struct b43legacy_wldev *dev, u64 tsf) +{ + /* Be careful with the in-progress timer. + * First zero out the low register, so we have a full + * register-overflow duration to complete the operation. + */ + if (dev->dev->id.revision >= 3) { + u32 lo = (tsf & 0x00000000FFFFFFFFULL); + u32 hi = (tsf & 0xFFFFFFFF00000000ULL) >> 32; + + b43legacy_write32(dev, B43legacy_MMIO_REV3PLUS_TSF_LOW, 0); + mmiowb(); + b43legacy_write32(dev, B43legacy_MMIO_REV3PLUS_TSF_HIGH, + hi); + mmiowb(); + b43legacy_write32(dev, B43legacy_MMIO_REV3PLUS_TSF_LOW, + lo); + } else { + u16 v0 = (tsf & 0x000000000000FFFFULL); + u16 v1 = (tsf & 0x00000000FFFF0000ULL) >> 16; + u16 v2 = (tsf & 0x0000FFFF00000000ULL) >> 32; + u16 v3 = (tsf & 0xFFFF000000000000ULL) >> 48; + + b43legacy_write16(dev, B43legacy_MMIO_TSF_0, 0); + mmiowb(); + b43legacy_write16(dev, B43legacy_MMIO_TSF_3, v3); + mmiowb(); + b43legacy_write16(dev, B43legacy_MMIO_TSF_2, v2); + mmiowb(); + b43legacy_write16(dev, B43legacy_MMIO_TSF_1, v1); + mmiowb(); + b43legacy_write16(dev, B43legacy_MMIO_TSF_0, v0); + } +} + +void b43legacy_tsf_write(struct b43legacy_wldev *dev, u64 tsf) +{ + b43legacy_time_lock(dev); + b43legacy_tsf_write_locked(dev, tsf); + b43legacy_time_unlock(dev); +} + +static +void b43legacy_macfilter_set(struct b43legacy_wldev *dev, + u16 offset, const u8 *mac) +{ + static const u8 zero_addr[ETH_ALEN] = { 0 }; + u16 data; + + if (!mac) + mac = zero_addr; + + offset |= 0x0020; + b43legacy_write16(dev, B43legacy_MMIO_MACFILTER_CONTROL, offset); + + data = mac[0]; + data |= mac[1] << 8; + b43legacy_write16(dev, B43legacy_MMIO_MACFILTER_DATA, data); + data = mac[2]; + data |= mac[3] << 8; + b43legacy_write16(dev, B43legacy_MMIO_MACFILTER_DATA, data); + data = mac[4]; + data |= mac[5] << 8; + b43legacy_write16(dev, B43legacy_MMIO_MACFILTER_DATA, data); +} + +static void b43legacy_write_mac_bssid_templates(struct b43legacy_wldev *dev) +{ + static const u8 zero_addr[ETH_ALEN] = { 0 }; + const u8 *mac = dev->wl->mac_addr; + const u8 *bssid = dev->wl->bssid; + u8 mac_bssid[ETH_ALEN * 2]; + int i; + u32 tmp; + + if (!bssid) + bssid = zero_addr; + if (!mac) + mac = zero_addr; + + b43legacy_macfilter_set(dev, B43legacy_MACFILTER_BSSID, bssid); + + memcpy(mac_bssid, mac, ETH_ALEN); + memcpy(mac_bssid + ETH_ALEN, bssid, ETH_ALEN); + + /* Write our MAC address and BSSID to template ram */ + for (i = 0; i < ARRAY_SIZE(mac_bssid); i += sizeof(u32)) { + tmp = (u32)(mac_bssid[i + 0]); + tmp |= (u32)(mac_bssid[i + 1]) << 8; + tmp |= (u32)(mac_bssid[i + 2]) << 16; + tmp |= (u32)(mac_bssid[i + 3]) << 24; + b43legacy_ram_write(dev, 0x20 + i, tmp); + b43legacy_ram_write(dev, 0x78 + i, tmp); + b43legacy_ram_write(dev, 0x478 + i, tmp); + } +} + +static void b43legacy_upload_card_macaddress(struct b43legacy_wldev *dev) +{ + b43legacy_write_mac_bssid_templates(dev); + b43legacy_macfilter_set(dev, B43legacy_MACFILTER_SELF, + dev->wl->mac_addr); +} + +static void b43legacy_set_slot_time(struct b43legacy_wldev *dev, + u16 slot_time) +{ + /* slot_time is in usec. */ + if (dev->phy.type != B43legacy_PHYTYPE_G) + return; + b43legacy_write16(dev, 0x684, 510 + slot_time); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0010, + slot_time); +} + +static void b43legacy_short_slot_timing_enable(struct b43legacy_wldev *dev) +{ + b43legacy_set_slot_time(dev, 9); + dev->short_slot = 1; +} + +static void b43legacy_short_slot_timing_disable(struct b43legacy_wldev *dev) +{ + b43legacy_set_slot_time(dev, 20); + dev->short_slot = 0; +} + +/* Enable a Generic IRQ. "mask" is the mask of which IRQs to enable. + * Returns the _previously_ enabled IRQ mask. + */ +static inline u32 b43legacy_interrupt_enable(struct b43legacy_wldev *dev, + u32 mask) +{ + u32 old_mask; + + old_mask = b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_MASK); + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, old_mask | + mask); + + return old_mask; +} + +/* Disable a Generic IRQ. "mask" is the mask of which IRQs to disable. + * Returns the _previously_ enabled IRQ mask. + */ +static inline u32 b43legacy_interrupt_disable(struct b43legacy_wldev *dev, + u32 mask) +{ + u32 old_mask; + + old_mask = b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_MASK); + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, old_mask & ~mask); + + return old_mask; +} + +/* Synchronize IRQ top- and bottom-half. + * IRQs must be masked before calling this. + * This must not be called with the irq_lock held. + */ +static void b43legacy_synchronize_irq(struct b43legacy_wldev *dev) +{ + synchronize_irq(dev->dev->irq); + tasklet_kill(&dev->isr_tasklet); +} + +/* DummyTransmission function, as documented on + * http://bcm-specs.sipsolutions.net/DummyTransmission + */ +void b43legacy_dummy_transmission(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + unsigned int i; + unsigned int max_loop; + u16 value; + u32 buffer[5] = { + 0x00000000, + 0x00D40000, + 0x00000000, + 0x01000000, + 0x00000000, + }; + + switch (phy->type) { + case B43legacy_PHYTYPE_B: + case B43legacy_PHYTYPE_G: + max_loop = 0xFA; + buffer[0] = 0x000B846E; + break; + default: + B43legacy_BUG_ON(1); + return; + } + + for (i = 0; i < 5; i++) + b43legacy_ram_write(dev, i * 4, buffer[i]); + + /* dummy read follows */ + b43legacy_read32(dev, B43legacy_MMIO_STATUS_BITFIELD); + + b43legacy_write16(dev, 0x0568, 0x0000); + b43legacy_write16(dev, 0x07C0, 0x0000); + b43legacy_write16(dev, 0x050C, 0x0000); + b43legacy_write16(dev, 0x0508, 0x0000); + b43legacy_write16(dev, 0x050A, 0x0000); + b43legacy_write16(dev, 0x054C, 0x0000); + b43legacy_write16(dev, 0x056A, 0x0014); + b43legacy_write16(dev, 0x0568, 0x0826); + b43legacy_write16(dev, 0x0500, 0x0000); + b43legacy_write16(dev, 0x0502, 0x0030); + + if (phy->radio_ver == 0x2050 && phy->radio_rev <= 0x5) + b43legacy_radio_write16(dev, 0x0051, 0x0017); + for (i = 0x00; i < max_loop; i++) { + value = b43legacy_read16(dev, 0x050E); + if (value & 0x0080) + break; + udelay(10); + } + for (i = 0x00; i < 0x0A; i++) { + value = b43legacy_read16(dev, 0x050E); + if (value & 0x0400) + break; + udelay(10); + } + for (i = 0x00; i < 0x0A; i++) { + value = b43legacy_read16(dev, 0x0690); + if (!(value & 0x0100)) + break; + udelay(10); + } + if (phy->radio_ver == 0x2050 && phy->radio_rev <= 0x5) + b43legacy_radio_write16(dev, 0x0051, 0x0037); +} + +/* Turn the Analog ON/OFF */ +static void b43legacy_switch_analog(struct b43legacy_wldev *dev, int on) +{ + b43legacy_write16(dev, B43legacy_MMIO_PHY0, on ? 0 : 0xF4); +} + +void b43legacy_wireless_core_reset(struct b43legacy_wldev *dev, u32 flags) +{ + u32 tmslow; + u32 macctl; + + flags |= B43legacy_TMSLOW_PHYCLKEN; + flags |= B43legacy_TMSLOW_PHYRESET; + ssb_device_enable(dev->dev, flags); + msleep(2); /* Wait for the PLL to turn on. */ + + /* Now take the PHY out of Reset again */ + tmslow = ssb_read32(dev->dev, SSB_TMSLOW); + tmslow |= SSB_TMSLOW_FGC; + tmslow &= ~B43legacy_TMSLOW_PHYRESET; + ssb_write32(dev->dev, SSB_TMSLOW, tmslow); + ssb_read32(dev->dev, SSB_TMSLOW); /* flush */ + msleep(1); + tmslow &= ~SSB_TMSLOW_FGC; + ssb_write32(dev->dev, SSB_TMSLOW, tmslow); + ssb_read32(dev->dev, SSB_TMSLOW); /* flush */ + msleep(1); + + /* Turn Analog ON */ + b43legacy_switch_analog(dev, 1); + + macctl = b43legacy_read32(dev, B43legacy_MMIO_MACCTL); + macctl &= ~B43legacy_MACCTL_GMODE; + if (flags & B43legacy_TMSLOW_GMODE) { + macctl |= B43legacy_MACCTL_GMODE; + dev->phy.gmode = 1; + } else + dev->phy.gmode = 0; + macctl |= B43legacy_MACCTL_IHR_ENABLED; + b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl); +} + +static void handle_irq_transmit_status(struct b43legacy_wldev *dev) +{ + u32 v0; + u32 v1; + u16 tmp; + struct b43legacy_txstatus stat; + + while (1) { + v0 = b43legacy_read32(dev, B43legacy_MMIO_XMITSTAT_0); + if (!(v0 & 0x00000001)) + break; + v1 = b43legacy_read32(dev, B43legacy_MMIO_XMITSTAT_1); + + stat.cookie = (v0 >> 16); + stat.seq = (v1 & 0x0000FFFF); + stat.phy_stat = ((v1 & 0x00FF0000) >> 16); + tmp = (v0 & 0x0000FFFF); + stat.frame_count = ((tmp & 0xF000) >> 12); + stat.rts_count = ((tmp & 0x0F00) >> 8); + stat.supp_reason = ((tmp & 0x001C) >> 2); + stat.pm_indicated = !!(tmp & 0x0080); + stat.intermediate = !!(tmp & 0x0040); + stat.for_ampdu = !!(tmp & 0x0020); + stat.acked = !!(tmp & 0x0002); + + b43legacy_handle_txstatus(dev, &stat); + } +} + +static void drain_txstatus_queue(struct b43legacy_wldev *dev) +{ + u32 dummy; + + if (dev->dev->id.revision < 5) + return; + /* Read all entries from the microcode TXstatus FIFO + * and throw them away. + */ + while (1) { + dummy = b43legacy_read32(dev, B43legacy_MMIO_XMITSTAT_0); + if (!(dummy & 0x00000001)) + break; + dummy = b43legacy_read32(dev, B43legacy_MMIO_XMITSTAT_1); + } +} + +static u32 b43legacy_jssi_read(struct b43legacy_wldev *dev) +{ + u32 val = 0; + + val = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x40A); + val <<= 16; + val |= b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x408); + + return val; +} + +static void b43legacy_jssi_write(struct b43legacy_wldev *dev, u32 jssi) +{ + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x408, + (jssi & 0x0000FFFF)); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x40A, + (jssi & 0xFFFF0000) >> 16); +} + +static void b43legacy_generate_noise_sample(struct b43legacy_wldev *dev) +{ + b43legacy_jssi_write(dev, 0x7F7F7F7F); + b43legacy_write32(dev, B43legacy_MMIO_STATUS2_BITFIELD, + b43legacy_read32(dev, + B43legacy_MMIO_STATUS2_BITFIELD) + | (1 << 4)); + B43legacy_WARN_ON(dev->noisecalc.channel_at_start != + dev->phy.channel); +} + +static void b43legacy_calculate_link_quality(struct b43legacy_wldev *dev) +{ + /* Top half of Link Quality calculation. */ + + if (dev->noisecalc.calculation_running) + return; + dev->noisecalc.channel_at_start = dev->phy.channel; + dev->noisecalc.calculation_running = 1; + dev->noisecalc.nr_samples = 0; + + b43legacy_generate_noise_sample(dev); +} + +static void handle_irq_noise(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 tmp; + u8 noise[4]; + u8 i; + u8 j; + s32 average; + + /* Bottom half of Link Quality calculation. */ + + B43legacy_WARN_ON(!dev->noisecalc.calculation_running); + if (dev->noisecalc.channel_at_start != phy->channel) + goto drop_calculation; + *((__le32 *)noise) = cpu_to_le32(b43legacy_jssi_read(dev)); + if (noise[0] == 0x7F || noise[1] == 0x7F || + noise[2] == 0x7F || noise[3] == 0x7F) + goto generate_new; + + /* Get the noise samples. */ + B43legacy_WARN_ON(dev->noisecalc.nr_samples >= 8); + i = dev->noisecalc.nr_samples; + noise[0] = limit_value(noise[0], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + noise[1] = limit_value(noise[1], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + noise[2] = limit_value(noise[2], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + noise[3] = limit_value(noise[3], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + dev->noisecalc.samples[i][0] = phy->nrssi_lt[noise[0]]; + dev->noisecalc.samples[i][1] = phy->nrssi_lt[noise[1]]; + dev->noisecalc.samples[i][2] = phy->nrssi_lt[noise[2]]; + dev->noisecalc.samples[i][3] = phy->nrssi_lt[noise[3]]; + dev->noisecalc.nr_samples++; + if (dev->noisecalc.nr_samples == 8) { + /* Calculate the Link Quality by the noise samples. */ + average = 0; + for (i = 0; i < 8; i++) { + for (j = 0; j < 4; j++) + average += dev->noisecalc.samples[i][j]; + } + average /= (8 * 4); + average *= 125; + average += 64; + average /= 128; + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + 0x40C); + tmp = (tmp / 128) & 0x1F; + if (tmp >= 8) + average += 2; + else + average -= 25; + if (tmp == 8) + average -= 72; + else + average -= 48; + + dev->stats.link_noise = average; +drop_calculation: + dev->noisecalc.calculation_running = 0; + return; + } +generate_new: + b43legacy_generate_noise_sample(dev); +} + +static void handle_irq_tbtt_indication(struct b43legacy_wldev *dev) +{ + if (b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) { + /* TODO: PS TBTT */ + } else { + if (1/*FIXME: the last PSpoll frame was sent successfully */) + b43legacy_power_saving_ctl_bits(dev, -1, -1); + } + dev->reg124_set_0x4 = 0; + if (b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS)) + dev->reg124_set_0x4 = 1; +} + +static void handle_irq_atim_end(struct b43legacy_wldev *dev) +{ + if (!dev->reg124_set_0x4) /*FIXME rename this variable*/ + return; + b43legacy_write32(dev, B43legacy_MMIO_STATUS2_BITFIELD, + b43legacy_read32(dev, B43legacy_MMIO_STATUS2_BITFIELD) + | 0x4); +} + +static void handle_irq_pmq(struct b43legacy_wldev *dev) +{ + u32 tmp; + + /* TODO: AP mode. */ + + while (1) { + tmp = b43legacy_read32(dev, B43legacy_MMIO_PS_STATUS); + if (!(tmp & 0x00000008)) + break; + } + /* 16bit write is odd, but correct. */ + b43legacy_write16(dev, B43legacy_MMIO_PS_STATUS, 0x0002); +} + +static void b43legacy_write_template_common(struct b43legacy_wldev *dev, + const u8 *data, u16 size, + u16 ram_offset, + u16 shm_size_offset, u8 rate) +{ + u32 i; + u32 tmp; + struct b43legacy_plcp_hdr4 plcp; + + plcp.data = 0; + b43legacy_generate_plcp_hdr(&plcp, size + FCS_LEN, rate); + b43legacy_ram_write(dev, ram_offset, le32_to_cpu(plcp.data)); + ram_offset += sizeof(u32); + /* The PLCP is 6 bytes long, but we only wrote 4 bytes, yet. + * So leave the first two bytes of the next write blank. + */ + tmp = (u32)(data[0]) << 16; + tmp |= (u32)(data[1]) << 24; + b43legacy_ram_write(dev, ram_offset, tmp); + ram_offset += sizeof(u32); + for (i = 2; i < size; i += sizeof(u32)) { + tmp = (u32)(data[i + 0]); + if (i + 1 < size) + tmp |= (u32)(data[i + 1]) << 8; + if (i + 2 < size) + tmp |= (u32)(data[i + 2]) << 16; + if (i + 3 < size) + tmp |= (u32)(data[i + 3]) << 24; + b43legacy_ram_write(dev, ram_offset + i - 2, tmp); + } + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, shm_size_offset, + size + sizeof(struct b43legacy_plcp_hdr6)); +} + +static void b43legacy_write_beacon_template(struct b43legacy_wldev *dev, + u16 ram_offset, + u16 shm_size_offset, u8 rate) +{ + int len; + const u8 *data; + + B43legacy_WARN_ON(!dev->cached_beacon); + len = min((size_t)dev->cached_beacon->len, + 0x200 - sizeof(struct b43legacy_plcp_hdr6)); + data = (const u8 *)(dev->cached_beacon->data); + b43legacy_write_template_common(dev, data, + len, ram_offset, + shm_size_offset, rate); +} + +static void b43legacy_write_probe_resp_plcp(struct b43legacy_wldev *dev, + u16 shm_offset, u16 size, + u8 rate) +{ + struct b43legacy_plcp_hdr4 plcp; + u32 tmp; + __le16 dur; + + plcp.data = 0; + b43legacy_generate_plcp_hdr(&plcp, size + FCS_LEN, rate); + dur = ieee80211_generic_frame_duration(dev->wl->hw, + dev->wl->if_id, + size, + B43legacy_RATE_TO_100KBPS(rate)); + /* Write PLCP in two parts and timing for packet transfer */ + tmp = le32_to_cpu(plcp.data); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, shm_offset, + tmp & 0xFFFF); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, shm_offset + 2, + tmp >> 16); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, shm_offset + 6, + le16_to_cpu(dur)); +} + +/* Instead of using custom probe response template, this function + * just patches custom beacon template by: + * 1) Changing packet type + * 2) Patching duration field + * 3) Stripping TIM + */ +static u8 *b43legacy_generate_probe_resp(struct b43legacy_wldev *dev, + u16 *dest_size, u8 rate) +{ + const u8 *src_data; + u8 *dest_data; + u16 src_size; + u16 elem_size; + u16 src_pos; + u16 dest_pos; + __le16 dur; + struct ieee80211_hdr *hdr; + + B43legacy_WARN_ON(!dev->cached_beacon); + src_size = dev->cached_beacon->len; + src_data = (const u8 *)dev->cached_beacon->data; + + if (unlikely(src_size < 0x24)) { + b43legacydbg(dev->wl, "b43legacy_generate_probe_resp: " + "invalid beacon\n"); + return NULL; + } + + dest_data = kmalloc(src_size, GFP_ATOMIC); + if (unlikely(!dest_data)) + return NULL; + + /* 0x24 is offset of first variable-len Information-Element + * in beacon frame. + */ + memcpy(dest_data, src_data, 0x24); + src_pos = 0x24; + dest_pos = 0x24; + for (; src_pos < src_size - 2; src_pos += elem_size) { + elem_size = src_data[src_pos + 1] + 2; + if (src_data[src_pos] != 0x05) { /* TIM */ + memcpy(dest_data + dest_pos, src_data + src_pos, + elem_size); + dest_pos += elem_size; + } + } + *dest_size = dest_pos; + hdr = (struct ieee80211_hdr *)dest_data; + + /* Set the frame control. */ + hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_PROBE_RESP); + dur = ieee80211_generic_frame_duration(dev->wl->hw, + dev->wl->if_id, + *dest_size, + B43legacy_RATE_TO_100KBPS(rate)); + hdr->duration_id = dur; + + return dest_data; +} + +static void b43legacy_write_probe_resp_template(struct b43legacy_wldev *dev, + u16 ram_offset, + u16 shm_size_offset, u8 rate) +{ + u8 *probe_resp_data; + u16 size; + + B43legacy_WARN_ON(!dev->cached_beacon); + size = dev->cached_beacon->len; + probe_resp_data = b43legacy_generate_probe_resp(dev, &size, rate); + if (unlikely(!probe_resp_data)) + return; + + /* Looks like PLCP headers plus packet timings are stored for + * all possible basic rates + */ + b43legacy_write_probe_resp_plcp(dev, 0x31A, size, + B43legacy_CCK_RATE_1MB); + b43legacy_write_probe_resp_plcp(dev, 0x32C, size, + B43legacy_CCK_RATE_2MB); + b43legacy_write_probe_resp_plcp(dev, 0x33E, size, + B43legacy_CCK_RATE_5MB); + b43legacy_write_probe_resp_plcp(dev, 0x350, size, + B43legacy_CCK_RATE_11MB); + + size = min((size_t)size, + 0x200 - sizeof(struct b43legacy_plcp_hdr6)); + b43legacy_write_template_common(dev, probe_resp_data, + size, ram_offset, + shm_size_offset, rate); + kfree(probe_resp_data); +} + +static int b43legacy_refresh_cached_beacon(struct b43legacy_wldev *dev, + struct sk_buff *beacon) +{ + if (dev->cached_beacon) + kfree_skb(dev->cached_beacon); + dev->cached_beacon = beacon; + + return 0; +} + +static void b43legacy_update_templates(struct b43legacy_wldev *dev) +{ + u32 status; + + B43legacy_WARN_ON(!dev->cached_beacon); + + b43legacy_write_beacon_template(dev, 0x68, 0x18, + B43legacy_CCK_RATE_1MB); + b43legacy_write_beacon_template(dev, 0x468, 0x1A, + B43legacy_CCK_RATE_1MB); + b43legacy_write_probe_resp_template(dev, 0x268, 0x4A, + B43legacy_CCK_RATE_11MB); + + status = b43legacy_read32(dev, B43legacy_MMIO_STATUS2_BITFIELD); + status |= 0x03; + b43legacy_write32(dev, B43legacy_MMIO_STATUS2_BITFIELD, status); +} + +static void b43legacy_refresh_templates(struct b43legacy_wldev *dev, + struct sk_buff *beacon) +{ + int err; + + err = b43legacy_refresh_cached_beacon(dev, beacon); + if (unlikely(err)) + return; + b43legacy_update_templates(dev); +} + +static void b43legacy_set_ssid(struct b43legacy_wldev *dev, + const u8 *ssid, u8 ssid_len) +{ + u32 tmp; + u16 i; + u16 len; + + len = min((u16)ssid_len, (u16)0x100); + for (i = 0; i < len; i += sizeof(u32)) { + tmp = (u32)(ssid[i + 0]); + if (i + 1 < len) + tmp |= (u32)(ssid[i + 1]) << 8; + if (i + 2 < len) + tmp |= (u32)(ssid[i + 2]) << 16; + if (i + 3 < len) + tmp |= (u32)(ssid[i + 3]) << 24; + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, + 0x380 + i, tmp); + } + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + 0x48, len); +} + +static void b43legacy_set_beacon_int(struct b43legacy_wldev *dev, + u16 beacon_int) +{ + b43legacy_time_lock(dev); + if (dev->dev->id.revision >= 3) + b43legacy_write32(dev, 0x188, (beacon_int << 16)); + else { + b43legacy_write16(dev, 0x606, (beacon_int >> 6)); + b43legacy_write16(dev, 0x610, beacon_int); + } + b43legacy_time_unlock(dev); +} + +static void handle_irq_beacon(struct b43legacy_wldev *dev) +{ + u32 status; + + if (!b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) + return; + + dev->irq_savedstate &= ~B43legacy_IRQ_BEACON; + status = b43legacy_read32(dev, B43legacy_MMIO_STATUS2_BITFIELD); + + if (!dev->cached_beacon || ((status & 0x1) && (status & 0x2))) { + /* ACK beacon IRQ. */ + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_REASON, + B43legacy_IRQ_BEACON); + dev->irq_savedstate |= B43legacy_IRQ_BEACON; + if (dev->cached_beacon) + kfree_skb(dev->cached_beacon); + dev->cached_beacon = NULL; + return; + } + if (!(status & 0x1)) { + b43legacy_write_beacon_template(dev, 0x68, 0x18, + B43legacy_CCK_RATE_1MB); + status |= 0x1; + b43legacy_write32(dev, B43legacy_MMIO_STATUS2_BITFIELD, + status); + } + if (!(status & 0x2)) { + b43legacy_write_beacon_template(dev, 0x468, 0x1A, + B43legacy_CCK_RATE_1MB); + status |= 0x2; + b43legacy_write32(dev, B43legacy_MMIO_STATUS2_BITFIELD, + status); + } +} + +static void handle_irq_ucode_debug(struct b43legacy_wldev *dev) +{ +} + +/* Interrupt handler bottom-half */ +static void b43legacy_interrupt_tasklet(struct b43legacy_wldev *dev) +{ + u32 reason; + u32 dma_reason[ARRAY_SIZE(dev->dma_reason)]; + u32 merged_dma_reason = 0; + int i; + int activity = 0; + unsigned long flags; + + spin_lock_irqsave(&dev->wl->irq_lock, flags); + + B43legacy_WARN_ON(b43legacy_status(dev) < + B43legacy_STAT_INITIALIZED); + + reason = dev->irq_reason; + for (i = 0; i < ARRAY_SIZE(dma_reason); i++) { + dma_reason[i] = dev->dma_reason[i]; + merged_dma_reason |= dma_reason[i]; + } + + if (unlikely(reason & B43legacy_IRQ_MAC_TXERR)) + b43legacyerr(dev->wl, "MAC transmission error\n"); + + if (unlikely(reason & B43legacy_IRQ_PHY_TXERR)) + b43legacyerr(dev->wl, "PHY transmission error\n"); + + if (unlikely(merged_dma_reason & (B43legacy_DMAIRQ_FATALMASK | + B43legacy_DMAIRQ_NONFATALMASK))) { + if (merged_dma_reason & B43legacy_DMAIRQ_FATALMASK) { + b43legacyerr(dev->wl, "Fatal DMA error: " + "0x%08X, 0x%08X, 0x%08X, " + "0x%08X, 0x%08X, 0x%08X\n", + dma_reason[0], dma_reason[1], + dma_reason[2], dma_reason[3], + dma_reason[4], dma_reason[5]); + b43legacy_controller_restart(dev, "DMA error"); + mmiowb(); + spin_unlock_irqrestore(&dev->wl->irq_lock, flags); + return; + } + if (merged_dma_reason & B43legacy_DMAIRQ_NONFATALMASK) + b43legacyerr(dev->wl, "DMA error: " + "0x%08X, 0x%08X, 0x%08X, " + "0x%08X, 0x%08X, 0x%08X\n", + dma_reason[0], dma_reason[1], + dma_reason[2], dma_reason[3], + dma_reason[4], dma_reason[5]); + } + + if (unlikely(reason & B43legacy_IRQ_UCODE_DEBUG)) + handle_irq_ucode_debug(dev); + if (reason & B43legacy_IRQ_TBTT_INDI) + handle_irq_tbtt_indication(dev); + if (reason & B43legacy_IRQ_ATIM_END) + handle_irq_atim_end(dev); + if (reason & B43legacy_IRQ_BEACON) + handle_irq_beacon(dev); + if (reason & B43legacy_IRQ_PMQ) + handle_irq_pmq(dev); + if (reason & B43legacy_IRQ_TXFIFO_FLUSH_OK) + ;/*TODO*/ + if (reason & B43legacy_IRQ_NOISESAMPLE_OK) + handle_irq_noise(dev); + + /* Check the DMA reason registers for received data. */ + if (dma_reason[0] & B43legacy_DMAIRQ_RX_DONE) { + if (b43legacy_using_pio(dev)) + b43legacy_pio_rx(dev->pio.queue0); + else + b43legacy_dma_rx(dev->dma.rx_ring0); + /* We intentionally don't set "activity" to 1, here. */ + } + B43legacy_WARN_ON(dma_reason[1] & B43legacy_DMAIRQ_RX_DONE); + B43legacy_WARN_ON(dma_reason[2] & B43legacy_DMAIRQ_RX_DONE); + if (dma_reason[3] & B43legacy_DMAIRQ_RX_DONE) { + if (b43legacy_using_pio(dev)) + b43legacy_pio_rx(dev->pio.queue3); + else + b43legacy_dma_rx(dev->dma.rx_ring3); + activity = 1; + } + B43legacy_WARN_ON(dma_reason[4] & B43legacy_DMAIRQ_RX_DONE); + B43legacy_WARN_ON(dma_reason[5] & B43legacy_DMAIRQ_RX_DONE); + + if (reason & B43legacy_IRQ_TX_OK) { + handle_irq_transmit_status(dev); + activity = 1; + /* TODO: In AP mode, this also causes sending of powersave + responses. */ + } + + if (!modparam_noleds) + b43legacy_leds_update(dev, activity); + b43legacy_interrupt_enable(dev, dev->irq_savedstate); + mmiowb(); + spin_unlock_irqrestore(&dev->wl->irq_lock, flags); +} + +static void pio_irq_workaround(struct b43legacy_wldev *dev, + u16 base, int queueidx) +{ + u16 rxctl; + + rxctl = b43legacy_read16(dev, base + B43legacy_PIO_RXCTL); + if (rxctl & B43legacy_PIO_RXCTL_DATAAVAILABLE) + dev->dma_reason[queueidx] |= B43legacy_DMAIRQ_RX_DONE; + else + dev->dma_reason[queueidx] &= ~B43legacy_DMAIRQ_RX_DONE; +} + +static void b43legacy_interrupt_ack(struct b43legacy_wldev *dev, u32 reason) +{ + if (b43legacy_using_pio(dev) && + (dev->dev->id.revision < 3) && + (!(reason & B43legacy_IRQ_PIO_WORKAROUND))) { + /* Apply a PIO specific workaround to the dma_reasons */ + pio_irq_workaround(dev, B43legacy_MMIO_PIO1_BASE, 0); + pio_irq_workaround(dev, B43legacy_MMIO_PIO2_BASE, 1); + pio_irq_workaround(dev, B43legacy_MMIO_PIO3_BASE, 2); + pio_irq_workaround(dev, B43legacy_MMIO_PIO4_BASE, 3); + } + + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_REASON, reason); + + b43legacy_write32(dev, B43legacy_MMIO_DMA0_REASON, + dev->dma_reason[0]); + b43legacy_write32(dev, B43legacy_MMIO_DMA1_REASON, + dev->dma_reason[1]); + b43legacy_write32(dev, B43legacy_MMIO_DMA2_REASON, + dev->dma_reason[2]); + b43legacy_write32(dev, B43legacy_MMIO_DMA3_REASON, + dev->dma_reason[3]); + b43legacy_write32(dev, B43legacy_MMIO_DMA4_REASON, + dev->dma_reason[4]); + b43legacy_write32(dev, B43legacy_MMIO_DMA5_REASON, + dev->dma_reason[5]); +} + +/* Interrupt handler top-half */ +static irqreturn_t b43legacy_interrupt_handler(int irq, void *dev_id) +{ + irqreturn_t ret = IRQ_NONE; + struct b43legacy_wldev *dev = dev_id; + u32 reason; + + if (!dev) + return IRQ_NONE; + + spin_lock(&dev->wl->irq_lock); + + if (b43legacy_status(dev) < B43legacy_STAT_STARTED) + goto out; + reason = b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_REASON); + if (reason == 0xffffffff) /* shared IRQ */ + goto out; + ret = IRQ_HANDLED; + reason &= b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_MASK); + if (!reason) + goto out; + + dev->dma_reason[0] = b43legacy_read32(dev, + B43legacy_MMIO_DMA0_REASON) + & 0x0001DC00; + dev->dma_reason[1] = b43legacy_read32(dev, + B43legacy_MMIO_DMA1_REASON) + & 0x0000DC00; + dev->dma_reason[2] = b43legacy_read32(dev, + B43legacy_MMIO_DMA2_REASON) + & 0x0000DC00; + dev->dma_reason[3] = b43legacy_read32(dev, + B43legacy_MMIO_DMA3_REASON) + & 0x0001DC00; + dev->dma_reason[4] = b43legacy_read32(dev, + B43legacy_MMIO_DMA4_REASON) + & 0x0000DC00; + dev->dma_reason[5] = b43legacy_read32(dev, + B43legacy_MMIO_DMA5_REASON) + & 0x0000DC00; + + b43legacy_interrupt_ack(dev, reason); + /* disable all IRQs. They are enabled again in the bottom half. */ + dev->irq_savedstate = b43legacy_interrupt_disable(dev, + B43legacy_IRQ_ALL); + /* save the reason code and call our bottom half. */ + dev->irq_reason = reason; + tasklet_schedule(&dev->isr_tasklet); +out: + mmiowb(); + spin_unlock(&dev->wl->irq_lock); + + return ret; +} + +static void b43legacy_release_firmware(struct b43legacy_wldev *dev) +{ + release_firmware(dev->fw.ucode); + dev->fw.ucode = NULL; + release_firmware(dev->fw.pcm); + dev->fw.pcm = NULL; + release_firmware(dev->fw.initvals); + dev->fw.initvals = NULL; + release_firmware(dev->fw.initvals_band); + dev->fw.initvals_band = NULL; +} + +static void b43legacy_print_fw_helptext(struct b43legacy_wl *wl) +{ + b43legacyerr(wl, "You must go to http://linuxwireless.org/en/users/" + "Drivers/bcm43xx#devicefirmware " + "and download the correct firmware (version 3).\n"); +} + +static int do_request_fw(struct b43legacy_wldev *dev, + const char *name, + const struct firmware **fw) +{ + char path[sizeof(modparam_fwpostfix) + 32]; + struct b43legacy_fw_header *hdr; + u32 size; + int err; + + if (!name) + return 0; + + snprintf(path, ARRAY_SIZE(path), + "b43legacy%s/%s.fw", + modparam_fwpostfix, name); + err = request_firmware(fw, path, dev->dev->dev); + if (err) { + b43legacyerr(dev->wl, "Firmware file \"%s\" not found " + "or load failed.\n", path); + return err; + } + if ((*fw)->size < sizeof(struct b43legacy_fw_header)) + goto err_format; + hdr = (struct b43legacy_fw_header *)((*fw)->data); + switch (hdr->type) { + case B43legacy_FW_TYPE_UCODE: + case B43legacy_FW_TYPE_PCM: + size = be32_to_cpu(hdr->size); + if (size != (*fw)->size - sizeof(struct b43legacy_fw_header)) + goto err_format; + /* fallthrough */ + case B43legacy_FW_TYPE_IV: + if (hdr->ver != 1) + goto err_format; + break; + default: + goto err_format; + } + + return err; + +err_format: + b43legacyerr(dev->wl, "Firmware file \"%s\" format error.\n", path); + return -EPROTO; +} + +static int b43legacy_request_firmware(struct b43legacy_wldev *dev) +{ + struct b43legacy_firmware *fw = &dev->fw; + const u8 rev = dev->dev->id.revision; + const char *filename; + u32 tmshigh; + int err; + + tmshigh = ssb_read32(dev->dev, SSB_TMSHIGH); + if (!fw->ucode) { + if (rev == 2) + filename = "ucode2"; + else if (rev == 4) + filename = "ucode4"; + else + filename = "ucode5"; + err = do_request_fw(dev, filename, &fw->ucode); + if (err) + goto err_load; + } + if (!fw->pcm) { + if (rev < 5) + filename = "pcm4"; + else + filename = "pcm5"; + err = do_request_fw(dev, filename, &fw->pcm); + if (err) + goto err_load; + } + if (!fw->initvals) { + switch (dev->phy.type) { + case B43legacy_PHYTYPE_G: + if ((rev >= 5) && (rev <= 10)) + filename = "b0g0initvals5"; + else if (rev == 2 || rev == 4) + filename = "b0g0initvals2"; + else + goto err_no_initvals; + break; + default: + goto err_no_initvals; + } + err = do_request_fw(dev, filename, &fw->initvals); + if (err) + goto err_load; + } + if (!fw->initvals_band) { + switch (dev->phy.type) { + case B43legacy_PHYTYPE_G: + if ((rev >= 5) && (rev <= 10)) + filename = "b0g0bsinitvals5"; + else if (rev >= 11) + filename = NULL; + else if (rev == 2 || rev == 4) + filename = NULL; + else + goto err_no_initvals; + break; + default: + goto err_no_initvals; + } + err = do_request_fw(dev, filename, &fw->initvals_band); + if (err) + goto err_load; + } + + return 0; + +err_load: + b43legacy_print_fw_helptext(dev->wl); + goto error; + +err_no_initvals: + err = -ENODEV; + b43legacyerr(dev->wl, "No Initial Values firmware file for PHY %u, " + "core rev %u\n", dev->phy.type, rev); + goto error; + +error: + b43legacy_release_firmware(dev); + return err; +} + +static int b43legacy_upload_microcode(struct b43legacy_wldev *dev) +{ + const size_t hdr_len = sizeof(struct b43legacy_fw_header); + const __be32 *data; + unsigned int i; + unsigned int len; + u16 fwrev; + u16 fwpatch; + u16 fwdate; + u16 fwtime; + u32 tmp; + int err = 0; + + /* Upload Microcode. */ + data = (__be32 *) (dev->fw.ucode->data + hdr_len); + len = (dev->fw.ucode->size - hdr_len) / sizeof(__be32); + b43legacy_shm_control_word(dev, + B43legacy_SHM_UCODE | + B43legacy_SHM_AUTOINC_W, + 0x0000); + for (i = 0; i < len; i++) { + b43legacy_write32(dev, B43legacy_MMIO_SHM_DATA, + be32_to_cpu(data[i])); + udelay(10); + } + + if (dev->fw.pcm) { + /* Upload PCM data. */ + data = (__be32 *) (dev->fw.pcm->data + hdr_len); + len = (dev->fw.pcm->size - hdr_len) / sizeof(__be32); + b43legacy_shm_control_word(dev, B43legacy_SHM_HW, 0x01EA); + b43legacy_write32(dev, B43legacy_MMIO_SHM_DATA, 0x00004000); + /* No need for autoinc bit in SHM_HW */ + b43legacy_shm_control_word(dev, B43legacy_SHM_HW, 0x01EB); + for (i = 0; i < len; i++) { + b43legacy_write32(dev, B43legacy_MMIO_SHM_DATA, + be32_to_cpu(data[i])); + udelay(10); + } + } + + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_REASON, + B43legacy_IRQ_ALL); + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, 0x00020402); + + /* Wait for the microcode to load and respond */ + i = 0; + while (1) { + tmp = b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_REASON); + if (tmp == B43legacy_IRQ_MAC_SUSPENDED) + break; + i++; + if (i >= B43legacy_IRQWAIT_MAX_RETRIES) { + b43legacyerr(dev->wl, "Microcode not responding\n"); + b43legacy_print_fw_helptext(dev->wl); + err = -ENODEV; + goto out; + } + udelay(10); + } + /* dummy read follows */ + b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_REASON); + + /* Get and check the revisions. */ + fwrev = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_UCODEREV); + fwpatch = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_UCODEPATCH); + fwdate = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_UCODEDATE); + fwtime = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_UCODETIME); + + if (fwrev > 0x128) { + b43legacyerr(dev->wl, "YOU ARE TRYING TO LOAD V4 FIRMWARE." + " Only firmware from binary drivers version 3.x" + " is supported. You must change your firmware" + " files.\n"); + b43legacy_print_fw_helptext(dev->wl); + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, 0); + err = -EOPNOTSUPP; + goto out; + } + b43legacydbg(dev->wl, "Loading firmware version 0x%X, patch level %u " + "(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n", fwrev, fwpatch, + (fwdate >> 12) & 0xF, (fwdate >> 8) & 0xF, fwdate & 0xFF, + (fwtime >> 11) & 0x1F, (fwtime >> 5) & 0x3F, fwtime & 0x1F); + + dev->fw.rev = fwrev; + dev->fw.patch = fwpatch; + +out: + return err; +} + +static int b43legacy_write_initvals(struct b43legacy_wldev *dev, + const struct b43legacy_iv *ivals, + size_t count, + size_t array_size) +{ + const struct b43legacy_iv *iv; + u16 offset; + size_t i; + bool bit32; + + BUILD_BUG_ON(sizeof(struct b43legacy_iv) != 6); + iv = ivals; + for (i = 0; i < count; i++) { + if (array_size < sizeof(iv->offset_size)) + goto err_format; + array_size -= sizeof(iv->offset_size); + offset = be16_to_cpu(iv->offset_size); + bit32 = !!(offset & B43legacy_IV_32BIT); + offset &= B43legacy_IV_OFFSET_MASK; + if (offset >= 0x1000) + goto err_format; + if (bit32) { + u32 value; + + if (array_size < sizeof(iv->data.d32)) + goto err_format; + array_size -= sizeof(iv->data.d32); + + value = be32_to_cpu(get_unaligned(&iv->data.d32)); + b43legacy_write32(dev, offset, value); + + iv = (const struct b43legacy_iv *)((const uint8_t *)iv + + sizeof(__be16) + + sizeof(__be32)); + } else { + u16 value; + + if (array_size < sizeof(iv->data.d16)) + goto err_format; + array_size -= sizeof(iv->data.d16); + + value = be16_to_cpu(iv->data.d16); + b43legacy_write16(dev, offset, value); + + iv = (const struct b43legacy_iv *)((const uint8_t *)iv + + sizeof(__be16) + + sizeof(__be16)); + } + } + if (array_size) + goto err_format; + + return 0; + +err_format: + b43legacyerr(dev->wl, "Initial Values Firmware file-format error.\n"); + b43legacy_print_fw_helptext(dev->wl); + + return -EPROTO; +} + +static int b43legacy_upload_initvals(struct b43legacy_wldev *dev) +{ + const size_t hdr_len = sizeof(struct b43legacy_fw_header); + const struct b43legacy_fw_header *hdr; + struct b43legacy_firmware *fw = &dev->fw; + const struct b43legacy_iv *ivals; + size_t count; + int err; + + hdr = (const struct b43legacy_fw_header *)(fw->initvals->data); + ivals = (const struct b43legacy_iv *)(fw->initvals->data + hdr_len); + count = be32_to_cpu(hdr->size); + err = b43legacy_write_initvals(dev, ivals, count, + fw->initvals->size - hdr_len); + if (err) + goto out; + if (fw->initvals_band) { + hdr = (const struct b43legacy_fw_header *) + (fw->initvals_band->data); + ivals = (const struct b43legacy_iv *)(fw->initvals_band->data + + hdr_len); + count = be32_to_cpu(hdr->size); + err = b43legacy_write_initvals(dev, ivals, count, + fw->initvals_band->size - hdr_len); + if (err) + goto out; + } +out: + + return err; +} + +/* Initialize the GPIOs + * http://bcm-specs.sipsolutions.net/GPIO + */ +static int b43legacy_gpio_init(struct b43legacy_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct ssb_device *gpiodev, *pcidev = NULL; + u32 mask; + u32 set; + + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, + b43legacy_read32(dev, + B43legacy_MMIO_STATUS_BITFIELD) + & 0xFFFF3FFF); + + b43legacy_leds_switch_all(dev, 0); + b43legacy_write16(dev, B43legacy_MMIO_GPIO_MASK, + b43legacy_read16(dev, + B43legacy_MMIO_GPIO_MASK) + | 0x000F); + + mask = 0x0000001F; + set = 0x0000000F; + if (dev->dev->bus->chip_id == 0x4301) { + mask |= 0x0060; + set |= 0x0060; + } + if (dev->dev->bus->sprom.r1.boardflags_lo & B43legacy_BFL_PACTRL) { + b43legacy_write16(dev, B43legacy_MMIO_GPIO_MASK, + b43legacy_read16(dev, + B43legacy_MMIO_GPIO_MASK) + | 0x0200); + mask |= 0x0200; + set |= 0x0200; + } + if (dev->dev->id.revision >= 2) + mask |= 0x0010; /* FIXME: This is redundant. */ + +#ifdef CONFIG_SSB_DRIVER_PCICORE + pcidev = bus->pcicore.dev; +#endif + gpiodev = bus->chipco.dev ? : pcidev; + if (!gpiodev) + return 0; + ssb_write32(gpiodev, B43legacy_GPIO_CONTROL, + (ssb_read32(gpiodev, B43legacy_GPIO_CONTROL) + & mask) | set); + + return 0; +} + +/* Turn off all GPIO stuff. Call this on module unload, for example. */ +static void b43legacy_gpio_cleanup(struct b43legacy_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct ssb_device *gpiodev, *pcidev = NULL; + +#ifdef CONFIG_SSB_DRIVER_PCICORE + pcidev = bus->pcicore.dev; +#endif + gpiodev = bus->chipco.dev ? : pcidev; + if (!gpiodev) + return; + ssb_write32(gpiodev, B43legacy_GPIO_CONTROL, 0); +} + +/* http://bcm-specs.sipsolutions.net/EnableMac */ +void b43legacy_mac_enable(struct b43legacy_wldev *dev) +{ + dev->mac_suspended--; + B43legacy_WARN_ON(dev->mac_suspended < 0); + if (dev->mac_suspended == 0) { + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, + b43legacy_read32(dev, + B43legacy_MMIO_STATUS_BITFIELD) + | B43legacy_SBF_MAC_ENABLED); + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_REASON, + B43legacy_IRQ_MAC_SUSPENDED); + /* the next two are dummy reads */ + b43legacy_read32(dev, B43legacy_MMIO_STATUS_BITFIELD); + b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_REASON); + b43legacy_power_saving_ctl_bits(dev, -1, -1); + } +} + +/* http://bcm-specs.sipsolutions.net/SuspendMAC */ +void b43legacy_mac_suspend(struct b43legacy_wldev *dev) +{ + int i; + u32 tmp; + + B43legacy_WARN_ON(dev->mac_suspended < 0); + if (dev->mac_suspended == 0) { + b43legacy_power_saving_ctl_bits(dev, -1, 1); + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, + b43legacy_read32(dev, + B43legacy_MMIO_STATUS_BITFIELD) + & ~B43legacy_SBF_MAC_ENABLED); + b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_REASON); + for (i = 10000; i; i--) { + tmp = b43legacy_read32(dev, + B43legacy_MMIO_GEN_IRQ_REASON); + if (tmp & B43legacy_IRQ_MAC_SUSPENDED) + goto out; + udelay(1); + } + b43legacyerr(dev->wl, "MAC suspend failed\n"); + } +out: + dev->mac_suspended++; +} + +static void b43legacy_adjust_opmode(struct b43legacy_wldev *dev) +{ + struct b43legacy_wl *wl = dev->wl; + u32 ctl; + u16 cfp_pretbtt; + + ctl = b43legacy_read32(dev, B43legacy_MMIO_MACCTL); + /* Reset status to STA infrastructure mode. */ + ctl &= ~B43legacy_MACCTL_AP; + ctl &= ~B43legacy_MACCTL_KEEP_CTL; + ctl &= ~B43legacy_MACCTL_KEEP_BADPLCP; + ctl &= ~B43legacy_MACCTL_KEEP_BAD; + ctl &= ~B43legacy_MACCTL_PROMISC; + ctl &= ~B43legacy_MACCTL_BEACPROMISC; + ctl |= B43legacy_MACCTL_INFRA; + + if (b43legacy_is_mode(wl, IEEE80211_IF_TYPE_AP)) + ctl |= B43legacy_MACCTL_AP; + else if (b43legacy_is_mode(wl, IEEE80211_IF_TYPE_IBSS)) + ctl &= ~B43legacy_MACCTL_INFRA; + + if (wl->filter_flags & FIF_CONTROL) + ctl |= B43legacy_MACCTL_KEEP_CTL; + if (wl->filter_flags & FIF_FCSFAIL) + ctl |= B43legacy_MACCTL_KEEP_BAD; + if (wl->filter_flags & FIF_PLCPFAIL) + ctl |= B43legacy_MACCTL_KEEP_BADPLCP; + if (wl->filter_flags & FIF_PROMISC_IN_BSS) + ctl |= B43legacy_MACCTL_PROMISC; + if (wl->filter_flags & FIF_BCN_PRBRESP_PROMISC) + ctl |= B43legacy_MACCTL_BEACPROMISC; + + /* Workaround: On old hardware the HW-MAC-address-filter + * doesn't work properly, so always run promisc in filter + * it in software. */ + if (dev->dev->id.revision <= 4) + ctl |= B43legacy_MACCTL_PROMISC; + + b43legacy_write32(dev, B43legacy_MMIO_MACCTL, ctl); + + cfp_pretbtt = 2; + if ((ctl & B43legacy_MACCTL_INFRA) && + !(ctl & B43legacy_MACCTL_AP)) { + if (dev->dev->bus->chip_id == 0x4306 && + dev->dev->bus->chip_rev == 3) + cfp_pretbtt = 100; + else + cfp_pretbtt = 50; + } + b43legacy_write16(dev, 0x612, cfp_pretbtt); +} + +static void b43legacy_rate_memory_write(struct b43legacy_wldev *dev, + u16 rate, + int is_ofdm) +{ + u16 offset; + + if (is_ofdm) { + offset = 0x480; + offset += (b43legacy_plcp_get_ratecode_ofdm(rate) & 0x000F) * 2; + } else { + offset = 0x4C0; + offset += (b43legacy_plcp_get_ratecode_cck(rate) & 0x000F) * 2; + } + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, offset + 0x20, + b43legacy_shm_read16(dev, + B43legacy_SHM_SHARED, offset)); +} + +static void b43legacy_rate_memory_init(struct b43legacy_wldev *dev) +{ + switch (dev->phy.type) { + case B43legacy_PHYTYPE_G: + b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_6MB, 1); + b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_12MB, 1); + b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_18MB, 1); + b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_24MB, 1); + b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_36MB, 1); + b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_48MB, 1); + b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_54MB, 1); + /* fallthrough */ + case B43legacy_PHYTYPE_B: + b43legacy_rate_memory_write(dev, B43legacy_CCK_RATE_1MB, 0); + b43legacy_rate_memory_write(dev, B43legacy_CCK_RATE_2MB, 0); + b43legacy_rate_memory_write(dev, B43legacy_CCK_RATE_5MB, 0); + b43legacy_rate_memory_write(dev, B43legacy_CCK_RATE_11MB, 0); + break; + default: + B43legacy_BUG_ON(1); + } +} + +/* Set the TX-Antenna for management frames sent by firmware. */ +static void b43legacy_mgmtframe_txantenna(struct b43legacy_wldev *dev, + int antenna) +{ + u16 ant = 0; + u16 tmp; + + switch (antenna) { + case B43legacy_ANTENNA0: + ant |= B43legacy_TX4_PHY_ANT0; + break; + case B43legacy_ANTENNA1: + ant |= B43legacy_TX4_PHY_ANT1; + break; + case B43legacy_ANTENNA_AUTO: + ant |= B43legacy_TX4_PHY_ANTLAST; + break; + default: + B43legacy_BUG_ON(1); + } + + /* FIXME We also need to set the other flags of the PHY control + * field somewhere. */ + + /* For Beacons */ + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_BEACPHYCTL); + tmp = (tmp & ~B43legacy_TX4_PHY_ANT) | ant; + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_BEACPHYCTL, tmp); + /* For ACK/CTS */ + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_ACKCTSPHYCTL); + tmp = (tmp & ~B43legacy_TX4_PHY_ANT) | ant; + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_ACKCTSPHYCTL, tmp); + /* For Probe Resposes */ + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_PRPHYCTL); + tmp = (tmp & ~B43legacy_TX4_PHY_ANT) | ant; + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_PRPHYCTL, tmp); +} + +/* Returns TRUE, if the radio is enabled in hardware. */ +static bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev) +{ + if (dev->phy.rev >= 3) { + if (!(b43legacy_read32(dev, B43legacy_MMIO_RADIO_HWENABLED_HI) + & B43legacy_MMIO_RADIO_HWENABLED_HI_MASK)) + return 1; + } else { + if (b43legacy_read16(dev, B43legacy_MMIO_RADIO_HWENABLED_LO) + & B43legacy_MMIO_RADIO_HWENABLED_LO_MASK) + return 1; + } + return 0; +} + +/* This is the opposite of b43legacy_chip_init() */ +static void b43legacy_chip_exit(struct b43legacy_wldev *dev) +{ + b43legacy_radio_turn_off(dev); + if (!modparam_noleds) + b43legacy_leds_exit(dev); + b43legacy_gpio_cleanup(dev); + /* firmware is released later */ +} + +/* Initialize the chip + * http://bcm-specs.sipsolutions.net/ChipInit + */ +static int b43legacy_chip_init(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + int err; + int tmp; + u32 value32; + u16 value16; + + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, + B43legacy_SBF_CORE_READY + | B43legacy_SBF_400); + + err = b43legacy_request_firmware(dev); + if (err) + goto out; + err = b43legacy_upload_microcode(dev); + if (err) + goto out; /* firmware is released later */ + + err = b43legacy_gpio_init(dev); + if (err) + goto out; /* firmware is released later */ + err = b43legacy_upload_initvals(dev); + if (err) + goto err_gpio_cleanup; + b43legacy_radio_turn_on(dev); + + b43legacy_write16(dev, 0x03E6, 0x0000); + err = b43legacy_phy_init(dev); + if (err) + goto err_radio_off; + + /* Select initial Interference Mitigation. */ + tmp = phy->interfmode; + phy->interfmode = B43legacy_INTERFMODE_NONE; + b43legacy_radio_set_interference_mitigation(dev, tmp); + + b43legacy_phy_set_antenna_diversity(dev); + b43legacy_mgmtframe_txantenna(dev, B43legacy_ANTENNA_DEFAULT); + + if (phy->type == B43legacy_PHYTYPE_B) { + value16 = b43legacy_read16(dev, 0x005E); + value16 |= 0x0004; + b43legacy_write16(dev, 0x005E, value16); + } + b43legacy_write32(dev, 0x0100, 0x01000000); + if (dev->dev->id.revision < 5) + b43legacy_write32(dev, 0x010C, 0x01000000); + + value32 = b43legacy_read32(dev, B43legacy_MMIO_STATUS_BITFIELD); + value32 &= ~B43legacy_SBF_MODE_NOTADHOC; + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, value32); + value32 = b43legacy_read32(dev, B43legacy_MMIO_STATUS_BITFIELD); + value32 |= B43legacy_SBF_MODE_NOTADHOC; + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, value32); + + if (b43legacy_using_pio(dev)) { + b43legacy_write32(dev, 0x0210, 0x00000100); + b43legacy_write32(dev, 0x0230, 0x00000100); + b43legacy_write32(dev, 0x0250, 0x00000100); + b43legacy_write32(dev, 0x0270, 0x00000100); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0034, + 0x0000); + } + + /* Probe Response Timeout value */ + /* FIXME: Default to 0, has to be set by ioctl probably... :-/ */ + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0074, 0x0000); + + /* Initially set the wireless operation mode. */ + b43legacy_adjust_opmode(dev); + + if (dev->dev->id.revision < 3) { + b43legacy_write16(dev, 0x060E, 0x0000); + b43legacy_write16(dev, 0x0610, 0x8000); + b43legacy_write16(dev, 0x0604, 0x0000); + b43legacy_write16(dev, 0x0606, 0x0200); + } else { + b43legacy_write32(dev, 0x0188, 0x80000000); + b43legacy_write32(dev, 0x018C, 0x02000000); + } + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_REASON, 0x00004000); + b43legacy_write32(dev, B43legacy_MMIO_DMA0_IRQ_MASK, 0x0001DC00); + b43legacy_write32(dev, B43legacy_MMIO_DMA1_IRQ_MASK, 0x0000DC00); + b43legacy_write32(dev, B43legacy_MMIO_DMA2_IRQ_MASK, 0x0000DC00); + b43legacy_write32(dev, B43legacy_MMIO_DMA3_IRQ_MASK, 0x0001DC00); + b43legacy_write32(dev, B43legacy_MMIO_DMA4_IRQ_MASK, 0x0000DC00); + b43legacy_write32(dev, B43legacy_MMIO_DMA5_IRQ_MASK, 0x0000DC00); + + value32 = ssb_read32(dev->dev, SSB_TMSLOW); + value32 |= 0x00100000; + ssb_write32(dev->dev, SSB_TMSLOW, value32); + + b43legacy_write16(dev, B43legacy_MMIO_POWERUP_DELAY, + dev->dev->bus->chipco.fast_pwrup_delay); + + B43legacy_WARN_ON(err != 0); + b43legacydbg(dev->wl, "Chip initialized\n"); +out: + return err; + +err_radio_off: + b43legacy_radio_turn_off(dev); +err_gpio_cleanup: + b43legacy_gpio_cleanup(dev); + goto out; +} + +static void b43legacy_periodic_every120sec(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + + if (phy->type != B43legacy_PHYTYPE_G || phy->rev < 2) + return; + + b43legacy_mac_suspend(dev); + b43legacy_phy_lo_g_measure(dev); + b43legacy_mac_enable(dev); +} + +static void b43legacy_periodic_every60sec(struct b43legacy_wldev *dev) +{ + b43legacy_phy_lo_mark_all_unused(dev); + if (dev->dev->bus->sprom.r1.boardflags_lo & B43legacy_BFL_RSSI) { + b43legacy_mac_suspend(dev); + b43legacy_calc_nrssi_slope(dev); + b43legacy_mac_enable(dev); + } +} + +static void b43legacy_periodic_every30sec(struct b43legacy_wldev *dev) +{ + /* Update device statistics. */ + b43legacy_calculate_link_quality(dev); +} + +static void b43legacy_periodic_every15sec(struct b43legacy_wldev *dev) +{ + b43legacy_phy_xmitpower(dev); /* FIXME: unless scanning? */ +} + +static void b43legacy_periodic_every1sec(struct b43legacy_wldev *dev) +{ + bool radio_hw_enable; + + /* check if radio hardware enabled status changed */ + radio_hw_enable = b43legacy_is_hw_radio_enabled(dev); + if (unlikely(dev->radio_hw_enable != radio_hw_enable)) { + dev->radio_hw_enable = radio_hw_enable; + b43legacyinfo(dev->wl, "Radio hardware status changed to %s\n", + (radio_hw_enable) ? "enabled" : "disabled"); + b43legacy_leds_update(dev, 0); + } +} + +static void do_periodic_work(struct b43legacy_wldev *dev) +{ + unsigned int state; + + state = dev->periodic_state; + if (state % 120 == 0) + b43legacy_periodic_every120sec(dev); + if (state % 60 == 0) + b43legacy_periodic_every60sec(dev); + if (state % 30 == 0) + b43legacy_periodic_every30sec(dev); + if (state % 15 == 0) + b43legacy_periodic_every15sec(dev); + b43legacy_periodic_every1sec(dev); +} + +/* Estimate a "Badness" value based on the periodic work + * state-machine state. "Badness" is worse (bigger), if the + * periodic work will take longer. + */ +static int estimate_periodic_work_badness(unsigned int state) +{ + int badness = 0; + + if (state % 120 == 0) /* every 120 sec */ + badness += 10; + if (state % 60 == 0) /* every 60 sec */ + badness += 5; + if (state % 30 == 0) /* every 30 sec */ + badness += 1; + if (state % 15 == 0) /* every 15 sec */ + badness += 1; + +#define BADNESS_LIMIT 4 + return badness; +} + +static void b43legacy_periodic_work_handler(struct work_struct *work) +{ + struct b43legacy_wldev *dev = + container_of(work, struct b43legacy_wldev, + periodic_work.work); + unsigned long flags; + unsigned long delay; + u32 savedirqs = 0; + int badness; + + mutex_lock(&dev->wl->mutex); + + if (unlikely(b43legacy_status(dev) != B43legacy_STAT_STARTED)) + goto out; + if (b43legacy_debug(dev, B43legacy_DBG_PWORK_STOP)) + goto out_requeue; + + badness = estimate_periodic_work_badness(dev->periodic_state); + if (badness > BADNESS_LIMIT) { + spin_lock_irqsave(&dev->wl->irq_lock, flags); + /* Suspend TX as we don't want to transmit packets while + * we recalibrate the hardware. */ + b43legacy_tx_suspend(dev); + savedirqs = b43legacy_interrupt_disable(dev, + B43legacy_IRQ_ALL); + /* Periodic work will take a long time, so we want it to + * be preemtible and release the spinlock. */ + spin_unlock_irqrestore(&dev->wl->irq_lock, flags); + b43legacy_synchronize_irq(dev); + + do_periodic_work(dev); + + spin_lock_irqsave(&dev->wl->irq_lock, flags); + b43legacy_interrupt_enable(dev, savedirqs); + b43legacy_tx_resume(dev); + mmiowb(); + spin_unlock_irqrestore(&dev->wl->irq_lock, flags); + } else { + /* Take the global driver lock. This will lock any operation. */ + spin_lock_irqsave(&dev->wl->irq_lock, flags); + + do_periodic_work(dev); + + mmiowb(); + spin_unlock_irqrestore(&dev->wl->irq_lock, flags); + } + dev->periodic_state++; +out_requeue: + if (b43legacy_debug(dev, B43legacy_DBG_PWORK_FAST)) + delay = msecs_to_jiffies(50); + else + delay = round_jiffies(HZ); + queue_delayed_work(dev->wl->hw->workqueue, + &dev->periodic_work, delay); +out: + mutex_unlock(&dev->wl->mutex); +} + +static void b43legacy_periodic_tasks_setup(struct b43legacy_wldev *dev) +{ + struct delayed_work *work = &dev->periodic_work; + + dev->periodic_state = 0; + INIT_DELAYED_WORK(work, b43legacy_periodic_work_handler); + queue_delayed_work(dev->wl->hw->workqueue, work, 0); +} + +/* Validate access to the chip (SHM) */ +static int b43legacy_validate_chipaccess(struct b43legacy_wldev *dev) +{ + u32 value; + u32 shm_backup; + + shm_backup = b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, 0); + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, 0, 0xAA5555AA); + if (b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, 0) != + 0xAA5555AA) + goto error; + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, 0, 0x55AAAA55); + if (b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, 0) != + 0x55AAAA55) + goto error; + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, 0, shm_backup); + + value = b43legacy_read32(dev, B43legacy_MMIO_MACCTL); + if ((value | B43legacy_MACCTL_GMODE) != + (B43legacy_MACCTL_GMODE | B43legacy_MACCTL_IHR_ENABLED)) + goto error; + + value = b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_REASON); + if (value) + goto error; + + return 0; +error: + b43legacyerr(dev->wl, "Failed to validate the chipaccess\n"); + return -ENODEV; +} + +static void b43legacy_security_init(struct b43legacy_wldev *dev) +{ + dev->max_nr_keys = (dev->dev->id.revision >= 5) ? 58 : 20; + B43legacy_WARN_ON(dev->max_nr_keys > ARRAY_SIZE(dev->key)); + dev->ktp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + 0x0056); + /* KTP is a word address, but we address SHM bytewise. + * So multiply by two. + */ + dev->ktp *= 2; + if (dev->dev->id.revision >= 5) + /* Number of RCMTA address slots */ + b43legacy_write16(dev, B43legacy_MMIO_RCMTA_COUNT, + dev->max_nr_keys - 8); +} + +static int b43legacy_rng_read(struct hwrng *rng, u32 *data) +{ + struct b43legacy_wl *wl = (struct b43legacy_wl *)rng->priv; + unsigned long flags; + + /* Don't take wl->mutex here, as it could deadlock with + * hwrng internal locking. It's not needed to take + * wl->mutex here, anyway. */ + + spin_lock_irqsave(&wl->irq_lock, flags); + *data = b43legacy_read16(wl->current_dev, B43legacy_MMIO_RNG); + spin_unlock_irqrestore(&wl->irq_lock, flags); + + return (sizeof(u16)); +} + +static void b43legacy_rng_exit(struct b43legacy_wl *wl) +{ + if (wl->rng_initialized) + hwrng_unregister(&wl->rng); +} + +static int b43legacy_rng_init(struct b43legacy_wl *wl) +{ + int err; + + snprintf(wl->rng_name, ARRAY_SIZE(wl->rng_name), + "%s_%s", KBUILD_MODNAME, wiphy_name(wl->hw->wiphy)); + wl->rng.name = wl->rng_name; + wl->rng.data_read = b43legacy_rng_read; + wl->rng.priv = (unsigned long)wl; + wl->rng_initialized = 1; + err = hwrng_register(&wl->rng); + if (err) { + wl->rng_initialized = 0; + b43legacyerr(wl, "Failed to register the random " + "number generator (%d)\n", err); + } + + return err; +} + +static int b43legacy_tx(struct ieee80211_hw *hw, + struct sk_buff *skb, + struct ieee80211_tx_control *ctl) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev = wl->current_dev; + int err = -ENODEV; + unsigned long flags; + + if (unlikely(!dev)) + goto out; + if (unlikely(b43legacy_status(dev) < B43legacy_STAT_STARTED)) + goto out; + /* DMA-TX is done without a global lock. */ + if (b43legacy_using_pio(dev)) { + spin_lock_irqsave(&wl->irq_lock, flags); + err = b43legacy_pio_tx(dev, skb, ctl); + spin_unlock_irqrestore(&wl->irq_lock, flags); + } else + err = b43legacy_dma_tx(dev, skb, ctl); +out: + if (unlikely(err)) + return NETDEV_TX_BUSY; + return NETDEV_TX_OK; +} + +static int b43legacy_conf_tx(struct ieee80211_hw *hw, + int queue, + const struct ieee80211_tx_queue_params *params) +{ + return 0; +} + +static int b43legacy_get_tx_stats(struct ieee80211_hw *hw, + struct ieee80211_tx_queue_stats *stats) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev = wl->current_dev; + unsigned long flags; + int err = -ENODEV; + + if (!dev) + goto out; + spin_lock_irqsave(&wl->irq_lock, flags); + if (likely(b43legacy_status(dev) >= B43legacy_STAT_STARTED)) { + if (b43legacy_using_pio(dev)) + b43legacy_pio_get_tx_stats(dev, stats); + else + b43legacy_dma_get_tx_stats(dev, stats); + err = 0; + } + spin_unlock_irqrestore(&wl->irq_lock, flags); +out: + return err; +} + +static int b43legacy_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + unsigned long flags; + + spin_lock_irqsave(&wl->irq_lock, flags); + memcpy(stats, &wl->ieee_stats, sizeof(*stats)); + spin_unlock_irqrestore(&wl->irq_lock, flags); + + return 0; +} + +static const char *phymode_to_string(unsigned int phymode) +{ + switch (phymode) { + case B43legacy_PHYMODE_B: + return "B"; + case B43legacy_PHYMODE_G: + return "G"; + default: + B43legacy_BUG_ON(1); + } + return ""; +} + +static int find_wldev_for_phymode(struct b43legacy_wl *wl, + unsigned int phymode, + struct b43legacy_wldev **dev, + bool *gmode) +{ + struct b43legacy_wldev *d; + + list_for_each_entry(d, &wl->devlist, list) { + if (d->phy.possible_phymodes & phymode) { + /* Ok, this device supports the PHY-mode. + * Set the gmode bit. */ + *gmode = 1; + *dev = d; + + return 0; + } + } + + return -ESRCH; +} + +static void b43legacy_put_phy_into_reset(struct b43legacy_wldev *dev) +{ + struct ssb_device *sdev = dev->dev; + u32 tmslow; + + tmslow = ssb_read32(sdev, SSB_TMSLOW); + tmslow &= ~B43legacy_TMSLOW_GMODE; + tmslow |= B43legacy_TMSLOW_PHYRESET; + tmslow |= SSB_TMSLOW_FGC; + ssb_write32(sdev, SSB_TMSLOW, tmslow); + msleep(1); + + tmslow = ssb_read32(sdev, SSB_TMSLOW); + tmslow &= ~SSB_TMSLOW_FGC; + tmslow |= B43legacy_TMSLOW_PHYRESET; + ssb_write32(sdev, SSB_TMSLOW, tmslow); + msleep(1); +} + +/* Expects wl->mutex locked */ +static int b43legacy_switch_phymode(struct b43legacy_wl *wl, + unsigned int new_mode) +{ + struct b43legacy_wldev *up_dev; + struct b43legacy_wldev *down_dev; + int err; + bool gmode = 0; + int prev_status; + + err = find_wldev_for_phymode(wl, new_mode, &up_dev, &gmode); + if (err) { + b43legacyerr(wl, "Could not find a device for %s-PHY mode\n", + phymode_to_string(new_mode)); + return err; + } + if ((up_dev == wl->current_dev) && + (!!wl->current_dev->phy.gmode == !!gmode)) + /* This device is already running. */ + return 0; + b43legacydbg(wl, "Reconfiguring PHYmode to %s-PHY\n", + phymode_to_string(new_mode)); + down_dev = wl->current_dev; + + prev_status = b43legacy_status(down_dev); + /* Shutdown the currently running core. */ + if (prev_status >= B43legacy_STAT_STARTED) + b43legacy_wireless_core_stop(down_dev); + if (prev_status >= B43legacy_STAT_INITIALIZED) + b43legacy_wireless_core_exit(down_dev); + + if (down_dev != up_dev) + /* We switch to a different core, so we put PHY into + * RESET on the old core. */ + b43legacy_put_phy_into_reset(down_dev); + + /* Now start the new core. */ + up_dev->phy.gmode = gmode; + if (prev_status >= B43legacy_STAT_INITIALIZED) { + err = b43legacy_wireless_core_init(up_dev); + if (err) { + b43legacyerr(wl, "Fatal: Could not initialize device" + " for newly selected %s-PHY mode\n", + phymode_to_string(new_mode)); + goto init_failure; + } + } + if (prev_status >= B43legacy_STAT_STARTED) { + err = b43legacy_wireless_core_start(up_dev); + if (err) { + b43legacyerr(wl, "Fatal: Coult not start device for " + "newly selected %s-PHY mode\n", + phymode_to_string(new_mode)); + b43legacy_wireless_core_exit(up_dev); + goto init_failure; + } + } + B43legacy_WARN_ON(b43legacy_status(up_dev) != prev_status); + + b43legacy_shm_write32(up_dev, B43legacy_SHM_SHARED, 0x003E, 0); + + wl->current_dev = up_dev; + + return 0; +init_failure: + /* Whoops, failed to init the new core. No core is operating now. */ + wl->current_dev = NULL; + return err; +} + +static int b43legacy_antenna_from_ieee80211(u8 antenna) +{ + switch (antenna) { + case 0: /* default/diversity */ + return B43legacy_ANTENNA_DEFAULT; + case 1: /* Antenna 0 */ + return B43legacy_ANTENNA0; + case 2: /* Antenna 1 */ + return B43legacy_ANTENNA1; + default: + return B43legacy_ANTENNA_DEFAULT; + } +} + +static int b43legacy_dev_config(struct ieee80211_hw *hw, + struct ieee80211_conf *conf) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev; + struct b43legacy_phy *phy; + unsigned long flags; + unsigned int new_phymode = 0xFFFF; + int antenna_tx; + int antenna_rx; + int err = 0; + u32 savedirqs; + + antenna_tx = b43legacy_antenna_from_ieee80211(conf->antenna_sel_tx); + antenna_rx = b43legacy_antenna_from_ieee80211(conf->antenna_sel_rx); + + mutex_lock(&wl->mutex); + + /* Switch the PHY mode (if necessary). */ + switch (conf->phymode) { + case MODE_IEEE80211B: + new_phymode = B43legacy_PHYMODE_B; + break; + case MODE_IEEE80211G: + new_phymode = B43legacy_PHYMODE_G; + break; + default: + B43legacy_WARN_ON(1); + } + err = b43legacy_switch_phymode(wl, new_phymode); + if (err) + goto out_unlock_mutex; + dev = wl->current_dev; + phy = &dev->phy; + + /* Disable IRQs while reconfiguring the device. + * This makes it possible to drop the spinlock throughout + * the reconfiguration process. */ + spin_lock_irqsave(&wl->irq_lock, flags); + if (b43legacy_status(dev) < B43legacy_STAT_STARTED) { + spin_unlock_irqrestore(&wl->irq_lock, flags); + goto out_unlock_mutex; + } + savedirqs = b43legacy_interrupt_disable(dev, B43legacy_IRQ_ALL); + spin_unlock_irqrestore(&wl->irq_lock, flags); + b43legacy_synchronize_irq(dev); + + /* Switch to the requested channel. + * The firmware takes care of races with the TX handler. */ + if (conf->channel_val != phy->channel) + b43legacy_radio_selectchannel(dev, conf->channel_val, 0); + + /* Enable/Disable ShortSlot timing. */ + if ((!!(conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME)) + != dev->short_slot) { + B43legacy_WARN_ON(phy->type != B43legacy_PHYTYPE_G); + if (conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME) + b43legacy_short_slot_timing_enable(dev); + else + b43legacy_short_slot_timing_disable(dev); + } + + /* Adjust the desired TX power level. */ + if (conf->power_level != 0) { + if (conf->power_level != phy->power_level) { + phy->power_level = conf->power_level; + b43legacy_phy_xmitpower(dev); + } + } + + /* Antennas for RX and management frame TX. */ + b43legacy_mgmtframe_txantenna(dev, antenna_tx); + + /* Update templates for AP mode. */ + if (b43legacy_is_mode(wl, IEEE80211_IF_TYPE_AP)) + b43legacy_set_beacon_int(dev, conf->beacon_int); + + + if (!!conf->radio_enabled != phy->radio_on) { + if (conf->radio_enabled) { + b43legacy_radio_turn_on(dev); + b43legacyinfo(dev->wl, "Radio turned on by software\n"); + if (!dev->radio_hw_enable) + b43legacyinfo(dev->wl, "The hardware RF-kill" + " button still turns the radio" + " physically off. Press the" + " button to turn it on.\n"); + } else { + b43legacy_radio_turn_off(dev); + b43legacyinfo(dev->wl, "Radio turned off by" + " software\n"); + } + } + + spin_lock_irqsave(&wl->irq_lock, flags); + b43legacy_interrupt_enable(dev, savedirqs); + mmiowb(); + spin_unlock_irqrestore(&wl->irq_lock, flags); +out_unlock_mutex: + mutex_unlock(&wl->mutex); + + return err; +} + +static int b43legacy_dev_set_key(struct ieee80211_hw *hw, + enum set_key_cmd cmd, + const u8 *local_addr, const u8 *addr, + struct ieee80211_key_conf *key) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev = wl->current_dev; + unsigned long flags; + int err = -EOPNOTSUPP; + DECLARE_MAC_BUF(mac); + + if (!dev) + return -ENODEV; + mutex_lock(&wl->mutex); + spin_lock_irqsave(&wl->irq_lock, flags); + + if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED) { + err = -ENODEV; + } + spin_unlock_irqrestore(&wl->irq_lock, flags); + mutex_unlock(&wl->mutex); + b43legacydbg(wl, "Using software based encryption for " + "mac: %s\n", print_mac(mac, addr)); + return err; +} + +static void b43legacy_configure_filter(struct ieee80211_hw *hw, + unsigned int changed, + unsigned int *fflags, + int mc_count, + struct dev_addr_list *mc_list) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev = wl->current_dev; + unsigned long flags; + + if (!dev) { + *fflags = 0; + return; + } + + spin_lock_irqsave(&wl->irq_lock, flags); + *fflags &= FIF_PROMISC_IN_BSS | + FIF_ALLMULTI | + FIF_FCSFAIL | + FIF_PLCPFAIL | + FIF_CONTROL | + FIF_OTHER_BSS | + FIF_BCN_PRBRESP_PROMISC; + + changed &= FIF_PROMISC_IN_BSS | + FIF_ALLMULTI | + FIF_FCSFAIL | + FIF_PLCPFAIL | + FIF_CONTROL | + FIF_OTHER_BSS | + FIF_BCN_PRBRESP_PROMISC; + + wl->filter_flags = *fflags; + + if (changed && b43legacy_status(dev) >= B43legacy_STAT_INITIALIZED) + b43legacy_adjust_opmode(dev); + spin_unlock_irqrestore(&wl->irq_lock, flags); +} + +static int b43legacy_config_interface(struct ieee80211_hw *hw, + int if_id, + struct ieee80211_if_conf *conf) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev = wl->current_dev; + unsigned long flags; + + if (!dev) + return -ENODEV; + mutex_lock(&wl->mutex); + spin_lock_irqsave(&wl->irq_lock, flags); + B43legacy_WARN_ON(wl->if_id != if_id); + if (conf->bssid) + memcpy(wl->bssid, conf->bssid, ETH_ALEN); + else + memset(wl->bssid, 0, ETH_ALEN); + if (b43legacy_status(dev) >= B43legacy_STAT_INITIALIZED) { + if (b43legacy_is_mode(wl, IEEE80211_IF_TYPE_AP)) { + B43legacy_WARN_ON(conf->type != IEEE80211_IF_TYPE_AP); + b43legacy_set_ssid(dev, conf->ssid, conf->ssid_len); + if (conf->beacon) + b43legacy_refresh_templates(dev, conf->beacon); + } + b43legacy_write_mac_bssid_templates(dev); + } + spin_unlock_irqrestore(&wl->irq_lock, flags); + mutex_unlock(&wl->mutex); + + return 0; +} + +/* Locking: wl->mutex */ +static void b43legacy_wireless_core_stop(struct b43legacy_wldev *dev) +{ + struct b43legacy_wl *wl = dev->wl; + unsigned long flags; + + if (b43legacy_status(dev) < B43legacy_STAT_STARTED) + return; + b43legacy_set_status(dev, B43legacy_STAT_INITIALIZED); + + mutex_unlock(&wl->mutex); + /* Must unlock as it would otherwise deadlock. No races here. + * Cancel the possibly running self-rearming periodic work. */ + cancel_delayed_work_sync(&dev->periodic_work); + mutex_lock(&wl->mutex); + + ieee80211_stop_queues(wl->hw); /* FIXME this could cause a deadlock */ + + /* Disable and sync interrupts. */ + spin_lock_irqsave(&wl->irq_lock, flags); + dev->irq_savedstate = b43legacy_interrupt_disable(dev, + B43legacy_IRQ_ALL); + b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_MASK); /* flush */ + spin_unlock_irqrestore(&wl->irq_lock, flags); + b43legacy_synchronize_irq(dev); + + b43legacy_mac_suspend(dev); + free_irq(dev->dev->irq, dev); + b43legacydbg(wl, "Wireless interface stopped\n"); +} + +/* Locking: wl->mutex */ +static int b43legacy_wireless_core_start(struct b43legacy_wldev *dev) +{ + int err; + + B43legacy_WARN_ON(b43legacy_status(dev) != B43legacy_STAT_INITIALIZED); + + drain_txstatus_queue(dev); + err = request_irq(dev->dev->irq, b43legacy_interrupt_handler, + IRQF_SHARED, KBUILD_MODNAME, dev); + if (err) { + b43legacyerr(dev->wl, "Cannot request IRQ-%d\n", + dev->dev->irq); + goto out; + } + /* We are ready to run. */ + b43legacy_set_status(dev, B43legacy_STAT_STARTED); + + /* Start data flow (TX/RX) */ + b43legacy_mac_enable(dev); + b43legacy_interrupt_enable(dev, dev->irq_savedstate); + ieee80211_start_queues(dev->wl->hw); + + /* Start maintenance work */ + b43legacy_periodic_tasks_setup(dev); + + b43legacydbg(dev->wl, "Wireless interface started\n"); +out: + return err; +} + +/* Get PHY and RADIO versioning numbers */ +static int b43legacy_phy_versioning(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u32 tmp; + u8 analog_type; + u8 phy_type; + u8 phy_rev; + u16 radio_manuf; + u16 radio_ver; + u16 radio_rev; + int unsupported = 0; + + /* Get PHY versioning */ + tmp = b43legacy_read16(dev, B43legacy_MMIO_PHY_VER); + analog_type = (tmp & B43legacy_PHYVER_ANALOG) + >> B43legacy_PHYVER_ANALOG_SHIFT; + phy_type = (tmp & B43legacy_PHYVER_TYPE) >> B43legacy_PHYVER_TYPE_SHIFT; + phy_rev = (tmp & B43legacy_PHYVER_VERSION); + switch (phy_type) { + case B43legacy_PHYTYPE_B: + if (phy_rev != 2 && phy_rev != 4 + && phy_rev != 6 && phy_rev != 7) + unsupported = 1; + break; + case B43legacy_PHYTYPE_G: + if (phy_rev > 8) + unsupported = 1; + break; + default: + unsupported = 1; + }; + if (unsupported) { + b43legacyerr(dev->wl, "FOUND UNSUPPORTED PHY " + "(Analog %u, Type %u, Revision %u)\n", + analog_type, phy_type, phy_rev); + return -EOPNOTSUPP; + } + b43legacydbg(dev->wl, "Found PHY: Analog %u, Type %u, Revision %u\n", + analog_type, phy_type, phy_rev); + + + /* Get RADIO versioning */ + if (dev->dev->bus->chip_id == 0x4317) { + if (dev->dev->bus->chip_rev == 0) + tmp = 0x3205017F; + else if (dev->dev->bus->chip_rev == 1) + tmp = 0x4205017F; + else + tmp = 0x5205017F; + } else { + b43legacy_write16(dev, B43legacy_MMIO_RADIO_CONTROL, + B43legacy_RADIOCTL_ID); + tmp = b43legacy_read16(dev, B43legacy_MMIO_RADIO_DATA_HIGH); + tmp <<= 16; + b43legacy_write16(dev, B43legacy_MMIO_RADIO_CONTROL, + B43legacy_RADIOCTL_ID); + tmp |= b43legacy_read16(dev, B43legacy_MMIO_RADIO_DATA_LOW); + } + radio_manuf = (tmp & 0x00000FFF); + radio_ver = (tmp & 0x0FFFF000) >> 12; + radio_rev = (tmp & 0xF0000000) >> 28; + switch (phy_type) { + case B43legacy_PHYTYPE_B: + if ((radio_ver & 0xFFF0) != 0x2050) + unsupported = 1; + break; + case B43legacy_PHYTYPE_G: + if (radio_ver != 0x2050) + unsupported = 1; + break; + default: + B43legacy_BUG_ON(1); + } + if (unsupported) { + b43legacyerr(dev->wl, "FOUND UNSUPPORTED RADIO " + "(Manuf 0x%X, Version 0x%X, Revision %u)\n", + radio_manuf, radio_ver, radio_rev); + return -EOPNOTSUPP; + } + b43legacydbg(dev->wl, "Found Radio: Manuf 0x%X, Version 0x%X," + " Revision %u\n", radio_manuf, radio_ver, radio_rev); + + + phy->radio_manuf = radio_manuf; + phy->radio_ver = radio_ver; + phy->radio_rev = radio_rev; + + phy->analog = analog_type; + phy->type = phy_type; + phy->rev = phy_rev; + + return 0; +} + +static void setup_struct_phy_for_init(struct b43legacy_wldev *dev, + struct b43legacy_phy *phy) +{ + struct b43legacy_lopair *lo; + int i; + + memset(phy->minlowsig, 0xFF, sizeof(phy->minlowsig)); + memset(phy->minlowsigpos, 0, sizeof(phy->minlowsigpos)); + + /* Flags */ + phy->locked = 0; + /* Assume the radio is enabled. If it's not enabled, the state will + * immediately get fixed on the first periodic work run. */ + dev->radio_hw_enable = 1; + + phy->savedpctlreg = 0xFFFF; + phy->aci_enable = 0; + phy->aci_wlan_automatic = 0; + phy->aci_hw_rssi = 0; + + lo = phy->_lo_pairs; + if (lo) + memset(lo, 0, sizeof(struct b43legacy_lopair) * + B43legacy_LO_COUNT); + phy->max_lb_gain = 0; + phy->trsw_rx_gain = 0; + + /* Set default attenuation values. */ + phy->bbatt = b43legacy_default_baseband_attenuation(dev); + phy->rfatt = b43legacy_default_radio_attenuation(dev); + phy->txctl1 = b43legacy_default_txctl1(dev); + phy->txpwr_offset = 0; + + /* NRSSI */ + phy->nrssislope = 0; + for (i = 0; i < ARRAY_SIZE(phy->nrssi); i++) + phy->nrssi[i] = -1000; + for (i = 0; i < ARRAY_SIZE(phy->nrssi_lt); i++) + phy->nrssi_lt[i] = i; + + phy->lofcal = 0xFFFF; + phy->initval = 0xFFFF; + + spin_lock_init(&phy->lock); + phy->interfmode = B43legacy_INTERFMODE_NONE; + phy->channel = 0xFF; +} + +static void setup_struct_wldev_for_init(struct b43legacy_wldev *dev) +{ + /* Flags */ + dev->reg124_set_0x4 = 0; + + /* Stats */ + memset(&dev->stats, 0, sizeof(dev->stats)); + + setup_struct_phy_for_init(dev, &dev->phy); + + /* IRQ related flags */ + dev->irq_reason = 0; + memset(dev->dma_reason, 0, sizeof(dev->dma_reason)); + dev->irq_savedstate = B43legacy_IRQ_MASKTEMPLATE; + + dev->mac_suspended = 1; + + /* Noise calculation context */ + memset(&dev->noisecalc, 0, sizeof(dev->noisecalc)); +} + +static void b43legacy_imcfglo_timeouts_workaround(struct b43legacy_wldev *dev) +{ +#ifdef CONFIG_SSB_DRIVER_PCICORE + struct ssb_bus *bus = dev->dev->bus; + u32 tmp; + + if (bus->pcicore.dev && + bus->pcicore.dev->id.coreid == SSB_DEV_PCI && + bus->pcicore.dev->id.revision <= 5) { + /* IMCFGLO timeouts workaround. */ + tmp = ssb_read32(dev->dev, SSB_IMCFGLO); + tmp &= ~SSB_IMCFGLO_REQTO; + tmp &= ~SSB_IMCFGLO_SERTO; + switch (bus->bustype) { + case SSB_BUSTYPE_PCI: + case SSB_BUSTYPE_PCMCIA: + tmp |= 0x32; + break; + case SSB_BUSTYPE_SSB: + tmp |= 0x53; + break; + } + ssb_write32(dev->dev, SSB_IMCFGLO, tmp); + } +#endif /* CONFIG_SSB_DRIVER_PCICORE */ +} + +/* Shutdown a wireless core */ +/* Locking: wl->mutex */ +static void b43legacy_wireless_core_exit(struct b43legacy_wldev *dev) +{ + struct b43legacy_wl *wl = dev->wl; + struct b43legacy_phy *phy = &dev->phy; + + B43legacy_WARN_ON(b43legacy_status(dev) > B43legacy_STAT_INITIALIZED); + if (b43legacy_status(dev) != B43legacy_STAT_INITIALIZED) + return; + b43legacy_set_status(dev, B43legacy_STAT_UNINIT); + + mutex_unlock(&wl->mutex); + /* Must unlock as it would otherwise deadlock. No races here. + * Cancel possibly pending workqueues. */ + cancel_work_sync(&dev->restart_work); + mutex_lock(&wl->mutex); + + b43legacy_rng_exit(dev->wl); + b43legacy_pio_free(dev); + b43legacy_dma_free(dev); + b43legacy_chip_exit(dev); + b43legacy_radio_turn_off(dev); + b43legacy_switch_analog(dev, 0); + if (phy->dyn_tssi_tbl) + kfree(phy->tssi2dbm); + kfree(phy->lo_control); + phy->lo_control = NULL; + ssb_device_disable(dev->dev, 0); + ssb_bus_may_powerdown(dev->dev->bus); +} + +static void prepare_phy_data_for_init(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + int i; + + /* Set default attenuation values. */ + phy->bbatt = b43legacy_default_baseband_attenuation(dev); + phy->rfatt = b43legacy_default_radio_attenuation(dev); + phy->txctl1 = b43legacy_default_txctl1(dev); + phy->txctl2 = 0xFFFF; + phy->txpwr_offset = 0; + + /* NRSSI */ + phy->nrssislope = 0; + for (i = 0; i < ARRAY_SIZE(phy->nrssi); i++) + phy->nrssi[i] = -1000; + for (i = 0; i < ARRAY_SIZE(phy->nrssi_lt); i++) + phy->nrssi_lt[i] = i; + + phy->lofcal = 0xFFFF; + phy->initval = 0xFFFF; + + phy->aci_enable = 0; + phy->aci_wlan_automatic = 0; + phy->aci_hw_rssi = 0; + + phy->antenna_diversity = 0xFFFF; + memset(phy->minlowsig, 0xFF, sizeof(phy->minlowsig)); + memset(phy->minlowsigpos, 0, sizeof(phy->minlowsigpos)); + + /* Flags */ + phy->calibrated = 0; + phy->locked = 0; + + if (phy->_lo_pairs) + memset(phy->_lo_pairs, 0, + sizeof(struct b43legacy_lopair) * B43legacy_LO_COUNT); + memset(phy->loopback_gain, 0, sizeof(phy->loopback_gain)); +} + +/* Initialize a wireless core */ +static int b43legacy_wireless_core_init(struct b43legacy_wldev *dev) +{ + struct b43legacy_wl *wl = dev->wl; + struct ssb_bus *bus = dev->dev->bus; + struct b43legacy_phy *phy = &dev->phy; + struct ssb_sprom *sprom = &dev->dev->bus->sprom; + int err; + u32 hf; + u32 tmp; + + B43legacy_WARN_ON(b43legacy_status(dev) != B43legacy_STAT_UNINIT); + + err = ssb_bus_powerup(bus, 0); + if (err) + goto out; + if (!ssb_device_is_enabled(dev->dev)) { + tmp = phy->gmode ? B43legacy_TMSLOW_GMODE : 0; + b43legacy_wireless_core_reset(dev, tmp); + } + + if ((phy->type == B43legacy_PHYTYPE_B) || + (phy->type == B43legacy_PHYTYPE_G)) { + phy->_lo_pairs = kzalloc(sizeof(struct b43legacy_lopair) + * B43legacy_LO_COUNT, + GFP_KERNEL); + if (!phy->_lo_pairs) + return -ENOMEM; + } + setup_struct_wldev_for_init(dev); + + err = b43legacy_phy_init_tssi2dbm_table(dev); + if (err) + goto err_kfree_lo_control; + + /* Enable IRQ routing to this device. */ + ssb_pcicore_dev_irqvecs_enable(&bus->pcicore, dev->dev); + + b43legacy_imcfglo_timeouts_workaround(dev); + prepare_phy_data_for_init(dev); + b43legacy_phy_calibrate(dev); + err = b43legacy_chip_init(dev); + if (err) + goto err_kfree_tssitbl; + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_WLCOREREV, + dev->dev->id.revision); + hf = b43legacy_hf_read(dev); + if (phy->type == B43legacy_PHYTYPE_G) { + hf |= B43legacy_HF_SYMW; + if (phy->rev == 1) + hf |= B43legacy_HF_GDCW; + if (sprom->r1.boardflags_lo & B43legacy_BFL_PACTRL) + hf |= B43legacy_HF_OFDMPABOOST; + } else if (phy->type == B43legacy_PHYTYPE_B) { + hf |= B43legacy_HF_SYMW; + if (phy->rev >= 2 && phy->radio_ver == 0x2050) + hf &= ~B43legacy_HF_GDCW; + } + b43legacy_hf_write(dev, hf); + + /* Short/Long Retry Limit. + * The retry-limit is a 4-bit counter. Enforce this to avoid overflowing + * the chip-internal counter. + */ + tmp = limit_value(modparam_short_retry, 0, 0xF); + b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS, + 0x0006, tmp); + tmp = limit_value(modparam_long_retry, 0, 0xF); + b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS, + 0x0007, tmp); + + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + 0x0044, 3); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + 0x0046, 2); + + /* Disable sending probe responses from firmware. + * Setting the MaxTime to one usec will always trigger + * a timeout, so we never send any probe resp. + * A timeout of zero is infinite. */ + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_PRMAXTIME, 1); + + b43legacy_rate_memory_init(dev); + + /* Minimum Contention Window */ + if (phy->type == B43legacy_PHYTYPE_B) + b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS, + 0x0003, 31); + else + b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS, + 0x0003, 15); + /* Maximum Contention Window */ + b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS, + 0x0004, 1023); + + do { + if (b43legacy_using_pio(dev)) + err = b43legacy_pio_init(dev); + else { + err = b43legacy_dma_init(dev); + if (!err) + b43legacy_qos_init(dev); + } + } while (err == -EAGAIN); + if (err) + goto err_chip_exit; + + b43legacy_write16(dev, 0x0612, 0x0050); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0416, 0x0050); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0414, 0x01F4); + + ssb_bus_powerup(bus, 1); /* Enable dynamic PCTL */ + memset(wl->bssid, 0, ETH_ALEN); + memset(wl->mac_addr, 0, ETH_ALEN); + b43legacy_upload_card_macaddress(dev); + b43legacy_security_init(dev); + b43legacy_rng_init(wl); + + b43legacy_set_status(dev, B43legacy_STAT_INITIALIZED); + +out: + return err; + +err_chip_exit: + b43legacy_chip_exit(dev); +err_kfree_tssitbl: + if (phy->dyn_tssi_tbl) + kfree(phy->tssi2dbm); +err_kfree_lo_control: + kfree(phy->lo_control); + phy->lo_control = NULL; + ssb_bus_may_powerdown(bus); + B43legacy_WARN_ON(b43legacy_status(dev) != B43legacy_STAT_UNINIT); + return err; +} + +static int b43legacy_add_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev; + unsigned long flags; + int err = -EOPNOTSUPP; + + /* TODO: allow WDS/AP devices to coexist */ + + if (conf->type != IEEE80211_IF_TYPE_AP && + conf->type != IEEE80211_IF_TYPE_STA && + conf->type != IEEE80211_IF_TYPE_WDS && + conf->type != IEEE80211_IF_TYPE_IBSS) + return -EOPNOTSUPP; + + mutex_lock(&wl->mutex); + if (wl->operating) + goto out_mutex_unlock; + + b43legacydbg(wl, "Adding Interface type %d\n", conf->type); + + dev = wl->current_dev; + wl->operating = 1; + wl->if_id = conf->if_id; + wl->if_type = conf->type; + memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN); + + spin_lock_irqsave(&wl->irq_lock, flags); + b43legacy_adjust_opmode(dev); + b43legacy_upload_card_macaddress(dev); + spin_unlock_irqrestore(&wl->irq_lock, flags); + + err = 0; + out_mutex_unlock: + mutex_unlock(&wl->mutex); + + return err; +} + +static void b43legacy_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev = wl->current_dev; + unsigned long flags; + + b43legacydbg(wl, "Removing Interface type %d\n", conf->type); + + mutex_lock(&wl->mutex); + + B43legacy_WARN_ON(!wl->operating); + B43legacy_WARN_ON(wl->if_id != conf->if_id); + + wl->operating = 0; + + spin_lock_irqsave(&wl->irq_lock, flags); + b43legacy_adjust_opmode(dev); + memset(wl->mac_addr, 0, ETH_ALEN); + b43legacy_upload_card_macaddress(dev); + spin_unlock_irqrestore(&wl->irq_lock, flags); + + mutex_unlock(&wl->mutex); +} + +static int b43legacy_start(struct ieee80211_hw *hw) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev = wl->current_dev; + int did_init = 0; + int err; + + mutex_lock(&wl->mutex); + + if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED) { + err = b43legacy_wireless_core_init(dev); + if (err) + goto out_mutex_unlock; + did_init = 1; + } + + if (b43legacy_status(dev) < B43legacy_STAT_STARTED) { + err = b43legacy_wireless_core_start(dev); + if (err) { + if (did_init) + b43legacy_wireless_core_exit(dev); + goto out_mutex_unlock; + } + } + +out_mutex_unlock: + mutex_unlock(&wl->mutex); + + return err; +} + +void b43legacy_stop(struct ieee80211_hw *hw) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev = wl->current_dev; + + mutex_lock(&wl->mutex); + if (b43legacy_status(dev) >= B43legacy_STAT_STARTED) + b43legacy_wireless_core_stop(dev); + b43legacy_wireless_core_exit(dev); + mutex_unlock(&wl->mutex); +} + + +static const struct ieee80211_ops b43legacy_hw_ops = { + .tx = b43legacy_tx, + .conf_tx = b43legacy_conf_tx, + .add_interface = b43legacy_add_interface, + .remove_interface = b43legacy_remove_interface, + .config = b43legacy_dev_config, + .config_interface = b43legacy_config_interface, + .set_key = b43legacy_dev_set_key, + .configure_filter = b43legacy_configure_filter, + .get_stats = b43legacy_get_stats, + .get_tx_stats = b43legacy_get_tx_stats, + .start = b43legacy_start, + .stop = b43legacy_stop, +}; + +/* Hard-reset the chip. Do not call this directly. + * Use b43legacy_controller_restart() + */ +static void b43legacy_chip_reset(struct work_struct *work) +{ + struct b43legacy_wldev *dev = + container_of(work, struct b43legacy_wldev, restart_work); + struct b43legacy_wl *wl = dev->wl; + int err = 0; + int prev_status; + + mutex_lock(&wl->mutex); + + prev_status = b43legacy_status(dev); + /* Bring the device down... */ + if (prev_status >= B43legacy_STAT_STARTED) + b43legacy_wireless_core_stop(dev); + if (prev_status >= B43legacy_STAT_INITIALIZED) + b43legacy_wireless_core_exit(dev); + + /* ...and up again. */ + if (prev_status >= B43legacy_STAT_INITIALIZED) { + err = b43legacy_wireless_core_init(dev); + if (err) + goto out; + } + if (prev_status >= B43legacy_STAT_STARTED) { + err = b43legacy_wireless_core_start(dev); + if (err) { + b43legacy_wireless_core_exit(dev); + goto out; + } + } +out: + mutex_unlock(&wl->mutex); + if (err) + b43legacyerr(wl, "Controller restart FAILED\n"); + else + b43legacyinfo(wl, "Controller restarted\n"); +} + +static int b43legacy_setup_modes(struct b43legacy_wldev *dev, + int have_bphy, + int have_gphy) +{ + struct ieee80211_hw *hw = dev->wl->hw; + struct ieee80211_hw_mode *mode; + struct b43legacy_phy *phy = &dev->phy; + int cnt = 0; + int err; + + phy->possible_phymodes = 0; + for (; 1; cnt++) { + if (have_bphy) { + B43legacy_WARN_ON(cnt >= B43legacy_MAX_PHYHWMODES); + mode = &phy->hwmodes[cnt]; + + mode->mode = MODE_IEEE80211B; + mode->num_channels = b43legacy_bg_chantable_size; + mode->channels = b43legacy_bg_chantable; + mode->num_rates = b43legacy_b_ratetable_size; + mode->rates = b43legacy_b_ratetable; + err = ieee80211_register_hwmode(hw, mode); + if (err) + return err; + + phy->possible_phymodes |= B43legacy_PHYMODE_B; + have_bphy = 0; + continue; + } + if (have_gphy) { + B43legacy_WARN_ON(cnt >= B43legacy_MAX_PHYHWMODES); + mode = &phy->hwmodes[cnt]; + + mode->mode = MODE_IEEE80211G; + mode->num_channels = b43legacy_bg_chantable_size; + mode->channels = b43legacy_bg_chantable; + mode->num_rates = b43legacy_g_ratetable_size; + mode->rates = b43legacy_g_ratetable; + err = ieee80211_register_hwmode(hw, mode); + if (err) + return err; + + phy->possible_phymodes |= B43legacy_PHYMODE_G; + have_gphy = 0; + continue; + } + break; + } + + return 0; +} + +static void b43legacy_wireless_core_detach(struct b43legacy_wldev *dev) +{ + /* We release firmware that late to not be required to re-request + * is all the time when we reinit the core. */ + b43legacy_release_firmware(dev); +} + +static int b43legacy_wireless_core_attach(struct b43legacy_wldev *dev) +{ + struct b43legacy_wl *wl = dev->wl; + struct ssb_bus *bus = dev->dev->bus; + struct pci_dev *pdev = bus->host_pci; + int err; + int have_bphy = 0; + int have_gphy = 0; + u32 tmp; + + /* Do NOT do any device initialization here. + * Do it in wireless_core_init() instead. + * This function is for gathering basic information about the HW, only. + * Also some structs may be set up here. But most likely you want to + * have that in core_init(), too. + */ + + err = ssb_bus_powerup(bus, 0); + if (err) { + b43legacyerr(wl, "Bus powerup failed\n"); + goto out; + } + /* Get the PHY type. */ + if (dev->dev->id.revision >= 5) { + u32 tmshigh; + + tmshigh = ssb_read32(dev->dev, SSB_TMSHIGH); + have_gphy = !!(tmshigh & B43legacy_TMSHIGH_GPHY); + if (!have_gphy) + have_bphy = 1; + } else if (dev->dev->id.revision == 4) + have_gphy = 1; + else + have_bphy = 1; + + /* Initialize LEDs structs. */ + err = b43legacy_leds_init(dev); + if (err) + goto err_powerdown; + + dev->phy.gmode = (have_gphy || have_bphy); + tmp = dev->phy.gmode ? B43legacy_TMSLOW_GMODE : 0; + b43legacy_wireless_core_reset(dev, tmp); + + err = b43legacy_phy_versioning(dev); + if (err) + goto err_leds_exit; + /* Check if this device supports multiband. */ + if (!pdev || + (pdev->device != 0x4312 && + pdev->device != 0x4319 && + pdev->device != 0x4324)) { + /* No multiband support. */ + have_bphy = 0; + have_gphy = 0; + switch (dev->phy.type) { + case B43legacy_PHYTYPE_B: + have_bphy = 1; + break; + case B43legacy_PHYTYPE_G: + have_gphy = 1; + break; + default: + B43legacy_BUG_ON(1); + } + } + dev->phy.gmode = (have_gphy || have_bphy); + tmp = dev->phy.gmode ? B43legacy_TMSLOW_GMODE : 0; + b43legacy_wireless_core_reset(dev, tmp); + + err = b43legacy_validate_chipaccess(dev); + if (err) + goto err_leds_exit; + err = b43legacy_setup_modes(dev, have_bphy, have_gphy); + if (err) + goto err_leds_exit; + + /* Now set some default "current_dev" */ + if (!wl->current_dev) + wl->current_dev = dev; + INIT_WORK(&dev->restart_work, b43legacy_chip_reset); + + b43legacy_radio_turn_off(dev); + b43legacy_switch_analog(dev, 0); + ssb_device_disable(dev->dev, 0); + ssb_bus_may_powerdown(bus); + +out: + return err; + +err_leds_exit: + b43legacy_leds_exit(dev); +err_powerdown: + ssb_bus_may_powerdown(bus); + return err; +} + +static void b43legacy_one_core_detach(struct ssb_device *dev) +{ + struct b43legacy_wldev *wldev; + struct b43legacy_wl *wl; + + wldev = ssb_get_drvdata(dev); + wl = wldev->wl; + cancel_work_sync(&wldev->restart_work); + b43legacy_debugfs_remove_device(wldev); + b43legacy_wireless_core_detach(wldev); + list_del(&wldev->list); + wl->nr_devs--; + ssb_set_drvdata(dev, NULL); + kfree(wldev); +} + +static int b43legacy_one_core_attach(struct ssb_device *dev, + struct b43legacy_wl *wl) +{ + struct b43legacy_wldev *wldev; + struct pci_dev *pdev; + int err = -ENOMEM; + + if (!list_empty(&wl->devlist)) { + /* We are not the first core on this chip. */ + pdev = dev->bus->host_pci; + /* Only special chips support more than one wireless + * core, although some of the other chips have more than + * one wireless core as well. Check for this and + * bail out early. + */ + if (!pdev || + ((pdev->device != 0x4321) && + (pdev->device != 0x4313) && + (pdev->device != 0x431A))) { + b43legacydbg(wl, "Ignoring unconnected 802.11 core\n"); + return -ENODEV; + } + } + + wldev = kzalloc(sizeof(*wldev), GFP_KERNEL); + if (!wldev) + goto out; + + wldev->dev = dev; + wldev->wl = wl; + b43legacy_set_status(wldev, B43legacy_STAT_UNINIT); + wldev->bad_frames_preempt = modparam_bad_frames_preempt; + tasklet_init(&wldev->isr_tasklet, + (void (*)(unsigned long))b43legacy_interrupt_tasklet, + (unsigned long)wldev); + if (modparam_pio) + wldev->__using_pio = 1; + INIT_LIST_HEAD(&wldev->list); + + err = b43legacy_wireless_core_attach(wldev); + if (err) + goto err_kfree_wldev; + + list_add(&wldev->list, &wl->devlist); + wl->nr_devs++; + ssb_set_drvdata(dev, wldev); + b43legacy_debugfs_add_device(wldev); +out: + return err; + +err_kfree_wldev: + kfree(wldev); + return err; +} + +static void b43legacy_sprom_fixup(struct ssb_bus *bus) +{ + /* boardflags workarounds */ + if (bus->boardinfo.vendor == PCI_VENDOR_ID_APPLE && + bus->boardinfo.type == 0x4E && + bus->boardinfo.rev > 0x40) + bus->sprom.r1.boardflags_lo |= B43legacy_BFL_PACTRL; + + /* Convert Antennagain values to Q5.2 */ + if (bus->sprom.r1.antenna_gain_bg == 0xFF) + bus->sprom.r1.antenna_gain_bg = 2; /* if unset, use 2 dBm */ + bus->sprom.r1.antenna_gain_bg <<= 2; +} + +static void b43legacy_wireless_exit(struct ssb_device *dev, + struct b43legacy_wl *wl) +{ + struct ieee80211_hw *hw = wl->hw; + + ssb_set_devtypedata(dev, NULL); + ieee80211_free_hw(hw); +} + +static int b43legacy_wireless_init(struct ssb_device *dev) +{ + struct ssb_sprom *sprom = &dev->bus->sprom; + struct ieee80211_hw *hw; + struct b43legacy_wl *wl; + int err = -ENOMEM; + + b43legacy_sprom_fixup(dev->bus); + + hw = ieee80211_alloc_hw(sizeof(*wl), &b43legacy_hw_ops); + if (!hw) { + b43legacyerr(NULL, "Could not allocate ieee80211 device\n"); + goto out; + } + + /* fill hw info */ + hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE | + IEEE80211_HW_RX_INCLUDES_FCS; + hw->max_signal = 100; + hw->max_rssi = -110; + hw->max_noise = -110; + hw->queues = 1; /* FIXME: hardware has more queues */ + SET_IEEE80211_DEV(hw, dev->dev); + if (is_valid_ether_addr(sprom->r1.et1mac)) + SET_IEEE80211_PERM_ADDR(hw, sprom->r1.et1mac); + else + SET_IEEE80211_PERM_ADDR(hw, sprom->r1.il0mac); + + /* Get and initialize struct b43legacy_wl */ + wl = hw_to_b43legacy_wl(hw); + memset(wl, 0, sizeof(*wl)); + wl->hw = hw; + spin_lock_init(&wl->irq_lock); + spin_lock_init(&wl->leds_lock); + mutex_init(&wl->mutex); + INIT_LIST_HEAD(&wl->devlist); + + ssb_set_devtypedata(dev, wl); + b43legacyinfo(wl, "Broadcom %04X WLAN found\n", dev->bus->chip_id); + err = 0; +out: + return err; +} + +static int b43legacy_probe(struct ssb_device *dev, + const struct ssb_device_id *id) +{ + struct b43legacy_wl *wl; + int err; + int first = 0; + + wl = ssb_get_devtypedata(dev); + if (!wl) { + /* Probing the first core - setup common struct b43legacy_wl */ + first = 1; + err = b43legacy_wireless_init(dev); + if (err) + goto out; + wl = ssb_get_devtypedata(dev); + B43legacy_WARN_ON(!wl); + } + err = b43legacy_one_core_attach(dev, wl); + if (err) + goto err_wireless_exit; + + if (first) { + err = ieee80211_register_hw(wl->hw); + if (err) + goto err_one_core_detach; + } + +out: + return err; + +err_one_core_detach: + b43legacy_one_core_detach(dev); +err_wireless_exit: + if (first) + b43legacy_wireless_exit(dev, wl); + return err; +} + +static void b43legacy_remove(struct ssb_device *dev) +{ + struct b43legacy_wl *wl = ssb_get_devtypedata(dev); + struct b43legacy_wldev *wldev = ssb_get_drvdata(dev); + + B43legacy_WARN_ON(!wl); + if (wl->current_dev == wldev) + ieee80211_unregister_hw(wl->hw); + + b43legacy_one_core_detach(dev); + + if (list_empty(&wl->devlist)) + /* Last core on the chip unregistered. + * We can destroy common struct b43legacy_wl. + */ + b43legacy_wireless_exit(dev, wl); +} + +/* Perform a hardware reset. This can be called from any context. */ +void b43legacy_controller_restart(struct b43legacy_wldev *dev, + const char *reason) +{ + /* Must avoid requeueing, if we are in shutdown. */ + if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED) + return; + b43legacyinfo(dev->wl, "Controller RESET (%s) ...\n", reason); + queue_work(dev->wl->hw->workqueue, &dev->restart_work); +} + +#ifdef CONFIG_PM + +static int b43legacy_suspend(struct ssb_device *dev, pm_message_t state) +{ + struct b43legacy_wldev *wldev = ssb_get_drvdata(dev); + struct b43legacy_wl *wl = wldev->wl; + + b43legacydbg(wl, "Suspending...\n"); + + mutex_lock(&wl->mutex); + wldev->suspend_init_status = b43legacy_status(wldev); + if (wldev->suspend_init_status >= B43legacy_STAT_STARTED) + b43legacy_wireless_core_stop(wldev); + if (wldev->suspend_init_status >= B43legacy_STAT_INITIALIZED) + b43legacy_wireless_core_exit(wldev); + mutex_unlock(&wl->mutex); + + b43legacydbg(wl, "Device suspended.\n"); + + return 0; +} + +static int b43legacy_resume(struct ssb_device *dev) +{ + struct b43legacy_wldev *wldev = ssb_get_drvdata(dev); + struct b43legacy_wl *wl = wldev->wl; + int err = 0; + + b43legacydbg(wl, "Resuming...\n"); + + mutex_lock(&wl->mutex); + if (wldev->suspend_init_status >= B43legacy_STAT_INITIALIZED) { + err = b43legacy_wireless_core_init(wldev); + if (err) { + b43legacyerr(wl, "Resume failed at core init\n"); + goto out; + } + } + if (wldev->suspend_init_status >= B43legacy_STAT_STARTED) { + err = b43legacy_wireless_core_start(wldev); + if (err) { + b43legacy_wireless_core_exit(wldev); + b43legacyerr(wl, "Resume failed at core start\n"); + goto out; + } + } + mutex_unlock(&wl->mutex); + + b43legacydbg(wl, "Device resumed.\n"); +out: + return err; +} + +#else /* CONFIG_PM */ +# define b43legacy_suspend NULL +# define b43legacy_resume NULL +#endif /* CONFIG_PM */ + +static struct ssb_driver b43legacy_ssb_driver = { + .name = KBUILD_MODNAME, + .id_table = b43legacy_ssb_tbl, + .probe = b43legacy_probe, + .remove = b43legacy_remove, + .suspend = b43legacy_suspend, + .resume = b43legacy_resume, +}; + +static int __init b43legacy_init(void) +{ + int err; + + b43legacy_debugfs_init(); + + err = ssb_driver_register(&b43legacy_ssb_driver); + if (err) + goto err_dfs_exit; + + return err; + +err_dfs_exit: + b43legacy_debugfs_exit(); + return err; +} + +static void __exit b43legacy_exit(void) +{ + ssb_driver_unregister(&b43legacy_ssb_driver); + b43legacy_debugfs_exit(); +} + +module_init(b43legacy_init) +module_exit(b43legacy_exit) diff --git a/drivers/net/wireless/b43legacy/main.h b/drivers/net/wireless/b43legacy/main.h new file mode 100644 index 000000000000..68435c50d8e0 --- /dev/null +++ b/drivers/net/wireless/b43legacy/main.h @@ -0,0 +1,127 @@ +/* + + Broadcom B43legacy wireless driver + + Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>, + Copyright (c) 2005 Stefano Brivio <st3@riseup.net> + Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de> + Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org> + Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch> + Copyright (c) 2007 Larry Finger <Larry.Finger@lwfinger.net> + + Some parts of the code in this file are derived from the ipw2200 + driver Copyright(c) 2003 - 2004 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#ifndef B43legacy_MAIN_H_ +#define B43legacy_MAIN_H_ + +#include "b43legacy.h" + + +#define P4D_BYT3S(magic, nr_bytes) u8 __p4dding##magic[nr_bytes] +#define P4D_BYTES(line, nr_bytes) P4D_BYT3S(line, nr_bytes) +/* Magic helper macro to pad structures. Ignore those above. It's magic. */ +#define PAD_BYTES(nr_bytes) P4D_BYTES(__LINE__ , (nr_bytes)) + + +/* Lightweight function to convert a frequency (in Mhz) to a channel number. */ +static inline +u8 b43legacy_freq_to_channel_bg(int freq) +{ + u8 channel; + + if (freq == 2484) + channel = 14; + else + channel = (freq - 2407) / 5; + + return channel; +} +static inline +u8 b43legacy_freq_to_channel(struct b43legacy_wldev *dev, + int freq) +{ + return b43legacy_freq_to_channel_bg(freq); +} + +/* Lightweight function to convert a channel number to a frequency (in Mhz). */ +static inline +int b43legacy_channel_to_freq_bg(u8 channel) +{ + int freq; + + if (channel == 14) + freq = 2484; + else + freq = 2407 + (5 * channel); + + return freq; +} + +static inline +int b43legacy_channel_to_freq(struct b43legacy_wldev *dev, + u8 channel) +{ + return b43legacy_channel_to_freq_bg(channel); +} + +static inline +int b43legacy_is_cck_rate(int rate) +{ + return (rate == B43legacy_CCK_RATE_1MB || + rate == B43legacy_CCK_RATE_2MB || + rate == B43legacy_CCK_RATE_5MB || + rate == B43legacy_CCK_RATE_11MB); +} + +static inline +int b43legacy_is_ofdm_rate(int rate) +{ + return !b43legacy_is_cck_rate(rate); +} + +void b43legacy_tsf_read(struct b43legacy_wldev *dev, u64 *tsf); +void b43legacy_tsf_write(struct b43legacy_wldev *dev, u64 tsf); + +u32 b43legacy_shm_read32(struct b43legacy_wldev *dev, + u16 routing, u16 offset); +u16 b43legacy_shm_read16(struct b43legacy_wldev *dev, + u16 routing, u16 offset); +void b43legacy_shm_write32(struct b43legacy_wldev *dev, + u16 routing, u16 offset, + u32 value); +void b43legacy_shm_write16(struct b43legacy_wldev *dev, + u16 routing, u16 offset, + u16 value); + +u32 b43legacy_hf_read(struct b43legacy_wldev *dev); +void b43legacy_hf_write(struct b43legacy_wldev *dev, u32 value); + +void b43legacy_dummy_transmission(struct b43legacy_wldev *dev); + +void b43legacy_wireless_core_reset(struct b43legacy_wldev *dev, u32 flags); + +void b43legacy_mac_suspend(struct b43legacy_wldev *dev); +void b43legacy_mac_enable(struct b43legacy_wldev *dev); + +void b43legacy_controller_restart(struct b43legacy_wldev *dev, + const char *reason); + +#endif /* B43legacy_MAIN_H_ */ diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c new file mode 100644 index 000000000000..22a4b3d0186d --- /dev/null +++ b/drivers/net/wireless/b43legacy/phy.c @@ -0,0 +1,2255 @@ +/* + + Broadcom B43legacy wireless driver + + Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>, + Stefano Brivio <st3@riseup.net> + Michael Buesch <mbuesch@freenet.de> + Danny van Dyk <kugelfang@gentoo.org> + Andreas Jaggi <andreas.jaggi@waterwave.ch> + Copyright (c) 2007 Larry Finger <Larry.Finger@lwfinger.net> + + Some parts of the code in this file are derived from the ipw2200 + driver Copyright(c) 2003 - 2004 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/types.h> + +#include "b43legacy.h" +#include "phy.h" +#include "main.h" +#include "radio.h" +#include "ilt.h" + + +static const s8 b43legacy_tssi2dbm_b_table[] = { + 0x4D, 0x4C, 0x4B, 0x4A, + 0x4A, 0x49, 0x48, 0x47, + 0x47, 0x46, 0x45, 0x45, + 0x44, 0x43, 0x42, 0x42, + 0x41, 0x40, 0x3F, 0x3E, + 0x3D, 0x3C, 0x3B, 0x3A, + 0x39, 0x38, 0x37, 0x36, + 0x35, 0x34, 0x32, 0x31, + 0x30, 0x2F, 0x2D, 0x2C, + 0x2B, 0x29, 0x28, 0x26, + 0x25, 0x23, 0x21, 0x1F, + 0x1D, 0x1A, 0x17, 0x14, + 0x10, 0x0C, 0x06, 0x00, + -7, -7, -7, -7, + -7, -7, -7, -7, + -7, -7, -7, -7, +}; + +static const s8 b43legacy_tssi2dbm_g_table[] = { + 77, 77, 77, 76, + 76, 76, 75, 75, + 74, 74, 73, 73, + 73, 72, 72, 71, + 71, 70, 70, 69, + 68, 68, 67, 67, + 66, 65, 65, 64, + 63, 63, 62, 61, + 60, 59, 58, 57, + 56, 55, 54, 53, + 52, 50, 49, 47, + 45, 43, 40, 37, + 33, 28, 22, 14, + 5, -7, -20, -20, + -20, -20, -20, -20, + -20, -20, -20, -20, +}; + +static void b43legacy_phy_initg(struct b43legacy_wldev *dev); + + +static inline +void b43legacy_voluntary_preempt(void) +{ + B43legacy_BUG_ON(!(!in_atomic() && !in_irq() && + !in_interrupt() && !irqs_disabled())); +#ifndef CONFIG_PREEMPT + cond_resched(); +#endif /* CONFIG_PREEMPT */ +} + +void b43legacy_raw_phy_lock(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + + B43legacy_WARN_ON(!irqs_disabled()); + if (b43legacy_read32(dev, B43legacy_MMIO_STATUS_BITFIELD) == 0) { + phy->locked = 0; + return; + } + if (dev->dev->id.revision < 3) { + b43legacy_mac_suspend(dev); + spin_lock(&phy->lock); + } else { + if (!b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) + b43legacy_power_saving_ctl_bits(dev, -1, 1); + } + phy->locked = 1; +} + +void b43legacy_raw_phy_unlock(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + + B43legacy_WARN_ON(!irqs_disabled()); + if (dev->dev->id.revision < 3) { + if (phy->locked) { + spin_unlock(&phy->lock); + b43legacy_mac_enable(dev); + } + } else { + if (!b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) + b43legacy_power_saving_ctl_bits(dev, -1, -1); + } + phy->locked = 0; +} + +u16 b43legacy_phy_read(struct b43legacy_wldev *dev, u16 offset) +{ + b43legacy_write16(dev, B43legacy_MMIO_PHY_CONTROL, offset); + return b43legacy_read16(dev, B43legacy_MMIO_PHY_DATA); +} + +void b43legacy_phy_write(struct b43legacy_wldev *dev, u16 offset, u16 val) +{ + b43legacy_write16(dev, B43legacy_MMIO_PHY_CONTROL, offset); + mmiowb(); + b43legacy_write16(dev, B43legacy_MMIO_PHY_DATA, val); +} + +void b43legacy_phy_calibrate(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + + b43legacy_read32(dev, B43legacy_MMIO_STATUS_BITFIELD); /* Dummy read. */ + if (phy->calibrated) + return; + if (phy->type == B43legacy_PHYTYPE_G && phy->rev == 1) { + b43legacy_wireless_core_reset(dev, 0); + b43legacy_phy_initg(dev); + b43legacy_wireless_core_reset(dev, B43legacy_TMSLOW_GMODE); + } + phy->calibrated = 1; +} + +/* intialize B PHY power control + * as described in http://bcm-specs.sipsolutions.net/InitPowerControl + */ +static void b43legacy_phy_init_pctl(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 saved_batt = 0; + u16 saved_ratt = 0; + u16 saved_txctl1 = 0; + int must_reset_txpower = 0; + + B43legacy_BUG_ON(!(phy->type == B43legacy_PHYTYPE_B || + phy->type == B43legacy_PHYTYPE_G)); + if (is_bcm_board_vendor(dev) && + (dev->dev->bus->boardinfo.type == 0x0416)) + return; + + b43legacy_phy_write(dev, 0x0028, 0x8018); + b43legacy_write16(dev, 0x03E6, b43legacy_read16(dev, 0x03E6) & 0xFFDF); + + if (phy->type == B43legacy_PHYTYPE_G) { + if (!phy->gmode) + return; + b43legacy_phy_write(dev, 0x047A, 0xC111); + } + if (phy->savedpctlreg != 0xFFFF) + return; +#ifdef CONFIG_B43LEGACY_DEBUG + if (phy->manual_txpower_control) + return; +#endif + + if (phy->type == B43legacy_PHYTYPE_B && + phy->rev >= 2 && + phy->radio_ver == 0x2050) + b43legacy_radio_write16(dev, 0x0076, + b43legacy_radio_read16(dev, 0x0076) + | 0x0084); + else { + saved_batt = phy->bbatt; + saved_ratt = phy->rfatt; + saved_txctl1 = phy->txctl1; + if ((phy->radio_rev >= 6) && (phy->radio_rev <= 8) + && /*FIXME: incomplete specs for 5 < revision < 9 */ 0) + b43legacy_radio_set_txpower_bg(dev, 0xB, 0x1F, 0); + else + b43legacy_radio_set_txpower_bg(dev, 0xB, 9, 0); + must_reset_txpower = 1; + } + b43legacy_dummy_transmission(dev); + + phy->savedpctlreg = b43legacy_phy_read(dev, B43legacy_PHY_G_PCTL); + + if (must_reset_txpower) + b43legacy_radio_set_txpower_bg(dev, saved_batt, saved_ratt, + saved_txctl1); + else + b43legacy_radio_write16(dev, 0x0076, b43legacy_radio_read16(dev, + 0x0076) & 0xFF7B); + b43legacy_radio_clear_tssi(dev); +} + +static void b43legacy_phy_agcsetup(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 offset = 0x0000; + + if (phy->rev == 1) + offset = 0x4C00; + + b43legacy_ilt_write(dev, offset, 0x00FE); + b43legacy_ilt_write(dev, offset + 1, 0x000D); + b43legacy_ilt_write(dev, offset + 2, 0x0013); + b43legacy_ilt_write(dev, offset + 3, 0x0019); + + if (phy->rev == 1) { + b43legacy_ilt_write(dev, 0x1800, 0x2710); + b43legacy_ilt_write(dev, 0x1801, 0x9B83); + b43legacy_ilt_write(dev, 0x1802, 0x9B83); + b43legacy_ilt_write(dev, 0x1803, 0x0F8D); + b43legacy_phy_write(dev, 0x0455, 0x0004); + } + + b43legacy_phy_write(dev, 0x04A5, (b43legacy_phy_read(dev, 0x04A5) + & 0x00FF) | 0x5700); + b43legacy_phy_write(dev, 0x041A, (b43legacy_phy_read(dev, 0x041A) + & 0xFF80) | 0x000F); + b43legacy_phy_write(dev, 0x041A, (b43legacy_phy_read(dev, 0x041A) + & 0xC07F) | 0x2B80); + b43legacy_phy_write(dev, 0x048C, (b43legacy_phy_read(dev, 0x048C) + & 0xF0FF) | 0x0300); + + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + | 0x0008); + + b43legacy_phy_write(dev, 0x04A0, (b43legacy_phy_read(dev, 0x04A0) + & 0xFFF0) | 0x0008); + b43legacy_phy_write(dev, 0x04A1, (b43legacy_phy_read(dev, 0x04A1) + & 0xF0FF) | 0x0600); + b43legacy_phy_write(dev, 0x04A2, (b43legacy_phy_read(dev, 0x04A2) + & 0xF0FF) | 0x0700); + b43legacy_phy_write(dev, 0x04A0, (b43legacy_phy_read(dev, 0x04A0) + & 0xF0FF) | 0x0100); + + if (phy->rev == 1) + b43legacy_phy_write(dev, 0x04A2, + (b43legacy_phy_read(dev, 0x04A2) + & 0xFFF0) | 0x0007); + + b43legacy_phy_write(dev, 0x0488, (b43legacy_phy_read(dev, 0x0488) + & 0xFF00) | 0x001C); + b43legacy_phy_write(dev, 0x0488, (b43legacy_phy_read(dev, 0x0488) + & 0xC0FF) | 0x0200); + b43legacy_phy_write(dev, 0x0496, (b43legacy_phy_read(dev, 0x0496) + & 0xFF00) | 0x001C); + b43legacy_phy_write(dev, 0x0489, (b43legacy_phy_read(dev, 0x0489) + & 0xFF00) | 0x0020); + b43legacy_phy_write(dev, 0x0489, (b43legacy_phy_read(dev, 0x0489) + & 0xC0FF) | 0x0200); + b43legacy_phy_write(dev, 0x0482, (b43legacy_phy_read(dev, 0x0482) + & 0xFF00) | 0x002E); + b43legacy_phy_write(dev, 0x0496, (b43legacy_phy_read(dev, 0x0496) + & 0x00FF) | 0x1A00); + b43legacy_phy_write(dev, 0x0481, (b43legacy_phy_read(dev, 0x0481) + & 0xFF00) | 0x0028); + b43legacy_phy_write(dev, 0x0481, (b43legacy_phy_read(dev, 0x0481) + & 0x00FF) | 0x2C00); + + if (phy->rev == 1) { + b43legacy_phy_write(dev, 0x0430, 0x092B); + b43legacy_phy_write(dev, 0x041B, + (b43legacy_phy_read(dev, 0x041B) + & 0xFFE1) | 0x0002); + } else { + b43legacy_phy_write(dev, 0x041B, + b43legacy_phy_read(dev, 0x041B) & 0xFFE1); + b43legacy_phy_write(dev, 0x041F, 0x287A); + b43legacy_phy_write(dev, 0x0420, + (b43legacy_phy_read(dev, 0x0420) + & 0xFFF0) | 0x0004); + } + + if (phy->rev > 2) { + b43legacy_phy_write(dev, 0x0422, 0x287A); + b43legacy_phy_write(dev, 0x0420, + (b43legacy_phy_read(dev, 0x0420) + & 0x0FFF) | 0x3000); + } + + b43legacy_phy_write(dev, 0x04A8, (b43legacy_phy_read(dev, 0x04A8) + & 0x8080) | 0x7874); + b43legacy_phy_write(dev, 0x048E, 0x1C00); + + if (phy->rev == 1) { + b43legacy_phy_write(dev, 0x04AB, + (b43legacy_phy_read(dev, 0x04AB) + & 0xF0FF) | 0x0600); + b43legacy_phy_write(dev, 0x048B, 0x005E); + b43legacy_phy_write(dev, 0x048C, + (b43legacy_phy_read(dev, 0x048C) & 0xFF00) + | 0x001E); + b43legacy_phy_write(dev, 0x048D, 0x0002); + } + + b43legacy_ilt_write(dev, offset + 0x0800, 0); + b43legacy_ilt_write(dev, offset + 0x0801, 7); + b43legacy_ilt_write(dev, offset + 0x0802, 16); + b43legacy_ilt_write(dev, offset + 0x0803, 28); + + if (phy->rev >= 6) { + b43legacy_phy_write(dev, 0x0426, + (b43legacy_phy_read(dev, 0x0426) & 0xFFFC)); + b43legacy_phy_write(dev, 0x0426, + (b43legacy_phy_read(dev, 0x0426) & 0xEFFF)); + } +} + +static void b43legacy_phy_setupg(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 i; + + B43legacy_BUG_ON(phy->type != B43legacy_PHYTYPE_G); + if (phy->rev == 1) { + b43legacy_phy_write(dev, 0x0406, 0x4F19); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + (b43legacy_phy_read(dev, + B43legacy_PHY_G_CRS) & 0xFC3F) | 0x0340); + b43legacy_phy_write(dev, 0x042C, 0x005A); + b43legacy_phy_write(dev, 0x0427, 0x001A); + + for (i = 0; i < B43legacy_ILT_FINEFREQG_SIZE; i++) + b43legacy_ilt_write(dev, 0x5800 + i, + b43legacy_ilt_finefreqg[i]); + for (i = 0; i < B43legacy_ILT_NOISEG1_SIZE; i++) + b43legacy_ilt_write(dev, 0x1800 + i, + b43legacy_ilt_noiseg1[i]); + for (i = 0; i < B43legacy_ILT_ROTOR_SIZE; i++) + b43legacy_ilt_write32(dev, 0x2000 + i, + b43legacy_ilt_rotor[i]); + } else { + /* nrssi values are signed 6-bit values. Why 0x7654 here? */ + b43legacy_nrssi_hw_write(dev, 0xBA98, (s16)0x7654); + + if (phy->rev == 2) { + b43legacy_phy_write(dev, 0x04C0, 0x1861); + b43legacy_phy_write(dev, 0x04C1, 0x0271); + } else if (phy->rev > 2) { + b43legacy_phy_write(dev, 0x04C0, 0x0098); + b43legacy_phy_write(dev, 0x04C1, 0x0070); + b43legacy_phy_write(dev, 0x04C9, 0x0080); + } + b43legacy_phy_write(dev, 0x042B, b43legacy_phy_read(dev, + 0x042B) | 0x800); + + for (i = 0; i < 64; i++) + b43legacy_ilt_write(dev, 0x4000 + i, i); + for (i = 0; i < B43legacy_ILT_NOISEG2_SIZE; i++) + b43legacy_ilt_write(dev, 0x1800 + i, + b43legacy_ilt_noiseg2[i]); + } + + if (phy->rev <= 2) + for (i = 0; i < B43legacy_ILT_NOISESCALEG_SIZE; i++) + b43legacy_ilt_write(dev, 0x1400 + i, + b43legacy_ilt_noisescaleg1[i]); + else if ((phy->rev >= 7) && (b43legacy_phy_read(dev, 0x0449) & 0x0200)) + for (i = 0; i < B43legacy_ILT_NOISESCALEG_SIZE; i++) + b43legacy_ilt_write(dev, 0x1400 + i, + b43legacy_ilt_noisescaleg3[i]); + else + for (i = 0; i < B43legacy_ILT_NOISESCALEG_SIZE; i++) + b43legacy_ilt_write(dev, 0x1400 + i, + b43legacy_ilt_noisescaleg2[i]); + + if (phy->rev == 2) + for (i = 0; i < B43legacy_ILT_SIGMASQR_SIZE; i++) + b43legacy_ilt_write(dev, 0x5000 + i, + b43legacy_ilt_sigmasqr1[i]); + else if ((phy->rev > 2) && (phy->rev <= 8)) + for (i = 0; i < B43legacy_ILT_SIGMASQR_SIZE; i++) + b43legacy_ilt_write(dev, 0x5000 + i, + b43legacy_ilt_sigmasqr2[i]); + + if (phy->rev == 1) { + for (i = 0; i < B43legacy_ILT_RETARD_SIZE; i++) + b43legacy_ilt_write32(dev, 0x2400 + i, + b43legacy_ilt_retard[i]); + for (i = 4; i < 20; i++) + b43legacy_ilt_write(dev, 0x5400 + i, 0x0020); + b43legacy_phy_agcsetup(dev); + + if (is_bcm_board_vendor(dev) && + (dev->dev->bus->boardinfo.type == 0x0416) && + (dev->dev->bus->boardinfo.rev == 0x0017)) + return; + + b43legacy_ilt_write(dev, 0x5001, 0x0002); + b43legacy_ilt_write(dev, 0x5002, 0x0001); + } else { + for (i = 0; i <= 0x20; i++) + b43legacy_ilt_write(dev, 0x1000 + i, 0x0820); + b43legacy_phy_agcsetup(dev); + b43legacy_phy_read(dev, 0x0400); /* dummy read */ + b43legacy_phy_write(dev, 0x0403, 0x1000); + b43legacy_ilt_write(dev, 0x3C02, 0x000F); + b43legacy_ilt_write(dev, 0x3C03, 0x0014); + + if (is_bcm_board_vendor(dev) && + (dev->dev->bus->boardinfo.type == 0x0416) && + (dev->dev->bus->boardinfo.rev == 0x0017)) + return; + + b43legacy_ilt_write(dev, 0x0401, 0x0002); + b43legacy_ilt_write(dev, 0x0402, 0x0001); + } +} + +/* Initialize the APHY portion of a GPHY. */ +static void b43legacy_phy_inita(struct b43legacy_wldev *dev) +{ + + might_sleep(); + + b43legacy_phy_setupg(dev); + if (dev->dev->bus->sprom.r1.boardflags_lo & B43legacy_BFL_PACTRL) + b43legacy_phy_write(dev, 0x046E, 0x03CF); +} + +static void b43legacy_phy_initb2(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 offset; + int val; + + b43legacy_write16(dev, 0x03EC, 0x3F22); + b43legacy_phy_write(dev, 0x0020, 0x301C); + b43legacy_phy_write(dev, 0x0026, 0x0000); + b43legacy_phy_write(dev, 0x0030, 0x00C6); + b43legacy_phy_write(dev, 0x0088, 0x3E00); + val = 0x3C3D; + for (offset = 0x0089; offset < 0x00A7; offset++) { + b43legacy_phy_write(dev, offset, val); + val -= 0x0202; + } + b43legacy_phy_write(dev, 0x03E4, 0x3000); + b43legacy_radio_selectchannel(dev, phy->channel, 0); + if (phy->radio_ver != 0x2050) { + b43legacy_radio_write16(dev, 0x0075, 0x0080); + b43legacy_radio_write16(dev, 0x0079, 0x0081); + } + b43legacy_radio_write16(dev, 0x0050, 0x0020); + b43legacy_radio_write16(dev, 0x0050, 0x0023); + if (phy->radio_ver == 0x2050) { + b43legacy_radio_write16(dev, 0x0050, 0x0020); + b43legacy_radio_write16(dev, 0x005A, 0x0070); + b43legacy_radio_write16(dev, 0x005B, 0x007B); + b43legacy_radio_write16(dev, 0x005C, 0x00B0); + b43legacy_radio_write16(dev, 0x007A, 0x000F); + b43legacy_phy_write(dev, 0x0038, 0x0677); + b43legacy_radio_init2050(dev); + } + b43legacy_phy_write(dev, 0x0014, 0x0080); + b43legacy_phy_write(dev, 0x0032, 0x00CA); + b43legacy_phy_write(dev, 0x0032, 0x00CC); + b43legacy_phy_write(dev, 0x0035, 0x07C2); + b43legacy_phy_lo_b_measure(dev); + b43legacy_phy_write(dev, 0x0026, 0xCC00); + if (phy->radio_ver != 0x2050) + b43legacy_phy_write(dev, 0x0026, 0xCE00); + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, 0x1000); + b43legacy_phy_write(dev, 0x002A, 0x88A3); + if (phy->radio_ver != 0x2050) + b43legacy_phy_write(dev, 0x002A, 0x88C2); + b43legacy_radio_set_txpower_bg(dev, 0xFFFF, 0xFFFF, 0xFFFF); + b43legacy_phy_init_pctl(dev); +} + +static void b43legacy_phy_initb4(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 offset; + u16 val; + + b43legacy_write16(dev, 0x03EC, 0x3F22); + b43legacy_phy_write(dev, 0x0020, 0x301C); + b43legacy_phy_write(dev, 0x0026, 0x0000); + b43legacy_phy_write(dev, 0x0030, 0x00C6); + b43legacy_phy_write(dev, 0x0088, 0x3E00); + val = 0x3C3D; + for (offset = 0x0089; offset < 0x00A7; offset++) { + b43legacy_phy_write(dev, offset, val); + val -= 0x0202; + } + b43legacy_phy_write(dev, 0x03E4, 0x3000); + b43legacy_radio_selectchannel(dev, phy->channel, 0); + if (phy->radio_ver != 0x2050) { + b43legacy_radio_write16(dev, 0x0075, 0x0080); + b43legacy_radio_write16(dev, 0x0079, 0x0081); + } + b43legacy_radio_write16(dev, 0x0050, 0x0020); + b43legacy_radio_write16(dev, 0x0050, 0x0023); + if (phy->radio_ver == 0x2050) { + b43legacy_radio_write16(dev, 0x0050, 0x0020); + b43legacy_radio_write16(dev, 0x005A, 0x0070); + b43legacy_radio_write16(dev, 0x005B, 0x007B); + b43legacy_radio_write16(dev, 0x005C, 0x00B0); + b43legacy_radio_write16(dev, 0x007A, 0x000F); + b43legacy_phy_write(dev, 0x0038, 0x0677); + b43legacy_radio_init2050(dev); + } + b43legacy_phy_write(dev, 0x0014, 0x0080); + b43legacy_phy_write(dev, 0x0032, 0x00CA); + if (phy->radio_ver == 0x2050) + b43legacy_phy_write(dev, 0x0032, 0x00E0); + b43legacy_phy_write(dev, 0x0035, 0x07C2); + + b43legacy_phy_lo_b_measure(dev); + + b43legacy_phy_write(dev, 0x0026, 0xCC00); + if (phy->radio_ver == 0x2050) + b43legacy_phy_write(dev, 0x0026, 0xCE00); + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, 0x1100); + b43legacy_phy_write(dev, 0x002A, 0x88A3); + if (phy->radio_ver == 0x2050) + b43legacy_phy_write(dev, 0x002A, 0x88C2); + b43legacy_radio_set_txpower_bg(dev, 0xFFFF, 0xFFFF, 0xFFFF); + if (dev->dev->bus->sprom.r1.boardflags_lo & B43legacy_BFL_RSSI) { + b43legacy_calc_nrssi_slope(dev); + b43legacy_calc_nrssi_threshold(dev); + } + b43legacy_phy_init_pctl(dev); +} + +static void b43legacy_phy_initb5(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 offset; + u16 value; + u8 old_channel; + + if (phy->analog == 1) + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + | 0x0050); + if (!is_bcm_board_vendor(dev) && + (dev->dev->bus->boardinfo.type != 0x0416)) { + value = 0x2120; + for (offset = 0x00A8 ; offset < 0x00C7; offset++) { + b43legacy_phy_write(dev, offset, value); + value += 0x0202; + } + } + b43legacy_phy_write(dev, 0x0035, + (b43legacy_phy_read(dev, 0x0035) & 0xF0FF) + | 0x0700); + if (phy->radio_ver == 0x2050) + b43legacy_phy_write(dev, 0x0038, 0x0667); + + if (phy->gmode) { + if (phy->radio_ver == 0x2050) { + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + | 0x0020); + b43legacy_radio_write16(dev, 0x0051, + b43legacy_radio_read16(dev, 0x0051) + | 0x0004); + } + b43legacy_write16(dev, B43legacy_MMIO_PHY_RADIO, 0x0000); + + b43legacy_phy_write(dev, 0x0802, b43legacy_phy_read(dev, 0x0802) + | 0x0100); + b43legacy_phy_write(dev, 0x042B, b43legacy_phy_read(dev, 0x042B) + | 0x2000); + + b43legacy_phy_write(dev, 0x001C, 0x186A); + + b43legacy_phy_write(dev, 0x0013, (b43legacy_phy_read(dev, + 0x0013) & 0x00FF) | 0x1900); + b43legacy_phy_write(dev, 0x0035, (b43legacy_phy_read(dev, + 0x0035) & 0xFFC0) | 0x0064); + b43legacy_phy_write(dev, 0x005D, (b43legacy_phy_read(dev, + 0x005D) & 0xFF80) | 0x000A); + } + + if (dev->bad_frames_preempt) + b43legacy_phy_write(dev, B43legacy_PHY_RADIO_BITFIELD, + b43legacy_phy_read(dev, + B43legacy_PHY_RADIO_BITFIELD) | (1 << 11)); + + if (phy->analog == 1) { + b43legacy_phy_write(dev, 0x0026, 0xCE00); + b43legacy_phy_write(dev, 0x0021, 0x3763); + b43legacy_phy_write(dev, 0x0022, 0x1BC3); + b43legacy_phy_write(dev, 0x0023, 0x06F9); + b43legacy_phy_write(dev, 0x0024, 0x037E); + } else + b43legacy_phy_write(dev, 0x0026, 0xCC00); + b43legacy_phy_write(dev, 0x0030, 0x00C6); + b43legacy_write16(dev, 0x03EC, 0x3F22); + + if (phy->analog == 1) + b43legacy_phy_write(dev, 0x0020, 0x3E1C); + else + b43legacy_phy_write(dev, 0x0020, 0x301C); + + if (phy->analog == 0) + b43legacy_write16(dev, 0x03E4, 0x3000); + + old_channel = (phy->channel == 0xFF) ? 1 : phy->channel; + /* Force to channel 7, even if not supported. */ + b43legacy_radio_selectchannel(dev, 7, 0); + + if (phy->radio_ver != 0x2050) { + b43legacy_radio_write16(dev, 0x0075, 0x0080); + b43legacy_radio_write16(dev, 0x0079, 0x0081); + } + + b43legacy_radio_write16(dev, 0x0050, 0x0020); + b43legacy_radio_write16(dev, 0x0050, 0x0023); + + if (phy->radio_ver == 0x2050) { + b43legacy_radio_write16(dev, 0x0050, 0x0020); + b43legacy_radio_write16(dev, 0x005A, 0x0070); + } + + b43legacy_radio_write16(dev, 0x005B, 0x007B); + b43legacy_radio_write16(dev, 0x005C, 0x00B0); + + b43legacy_radio_write16(dev, 0x007A, b43legacy_radio_read16(dev, + 0x007A) | 0x0007); + + b43legacy_radio_selectchannel(dev, old_channel, 0); + + b43legacy_phy_write(dev, 0x0014, 0x0080); + b43legacy_phy_write(dev, 0x0032, 0x00CA); + b43legacy_phy_write(dev, 0x002A, 0x88A3); + + b43legacy_radio_set_txpower_bg(dev, 0xFFFF, 0xFFFF, 0xFFFF); + + if (phy->radio_ver == 0x2050) + b43legacy_radio_write16(dev, 0x005D, 0x000D); + + b43legacy_write16(dev, 0x03E4, (b43legacy_read16(dev, 0x03E4) & + 0xFFC0) | 0x0004); +} + +static void b43legacy_phy_initb6(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 offset; + u16 val; + u8 old_channel; + + b43legacy_phy_write(dev, 0x003E, 0x817A); + b43legacy_radio_write16(dev, 0x007A, + (b43legacy_radio_read16(dev, 0x007A) | 0x0058)); + if (phy->radio_rev == 4 || + phy->radio_rev == 5) { + b43legacy_radio_write16(dev, 0x0051, 0x0037); + b43legacy_radio_write16(dev, 0x0052, 0x0070); + b43legacy_radio_write16(dev, 0x0053, 0x00B3); + b43legacy_radio_write16(dev, 0x0054, 0x009B); + b43legacy_radio_write16(dev, 0x005A, 0x0088); + b43legacy_radio_write16(dev, 0x005B, 0x0088); + b43legacy_radio_write16(dev, 0x005D, 0x0088); + b43legacy_radio_write16(dev, 0x005E, 0x0088); + b43legacy_radio_write16(dev, 0x007D, 0x0088); + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET, + (b43legacy_shm_read32(dev, + B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET) + | 0x00000200)); + } + if (phy->radio_rev == 8) { + b43legacy_radio_write16(dev, 0x0051, 0x0000); + b43legacy_radio_write16(dev, 0x0052, 0x0040); + b43legacy_radio_write16(dev, 0x0053, 0x00B7); + b43legacy_radio_write16(dev, 0x0054, 0x0098); + b43legacy_radio_write16(dev, 0x005A, 0x0088); + b43legacy_radio_write16(dev, 0x005B, 0x006B); + b43legacy_radio_write16(dev, 0x005C, 0x000F); + if (dev->dev->bus->sprom.r1.boardflags_lo & 0x8000) { + b43legacy_radio_write16(dev, 0x005D, 0x00FA); + b43legacy_radio_write16(dev, 0x005E, 0x00D8); + } else { + b43legacy_radio_write16(dev, 0x005D, 0x00F5); + b43legacy_radio_write16(dev, 0x005E, 0x00B8); + } + b43legacy_radio_write16(dev, 0x0073, 0x0003); + b43legacy_radio_write16(dev, 0x007D, 0x00A8); + b43legacy_radio_write16(dev, 0x007C, 0x0001); + b43legacy_radio_write16(dev, 0x007E, 0x0008); + } + val = 0x1E1F; + for (offset = 0x0088; offset < 0x0098; offset++) { + b43legacy_phy_write(dev, offset, val); + val -= 0x0202; + } + val = 0x3E3F; + for (offset = 0x0098; offset < 0x00A8; offset++) { + b43legacy_phy_write(dev, offset, val); + val -= 0x0202; + } + val = 0x2120; + for (offset = 0x00A8; offset < 0x00C8; offset++) { + b43legacy_phy_write(dev, offset, (val & 0x3F3F)); + val += 0x0202; + } + if (phy->type == B43legacy_PHYTYPE_G) { + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) | + 0x0020); + b43legacy_radio_write16(dev, 0x0051, + b43legacy_radio_read16(dev, 0x0051) | + 0x0004); + b43legacy_phy_write(dev, 0x0802, + b43legacy_phy_read(dev, 0x0802) | 0x0100); + b43legacy_phy_write(dev, 0x042B, + b43legacy_phy_read(dev, 0x042B) | 0x2000); + b43legacy_phy_write(dev, 0x5B, 0x0000); + b43legacy_phy_write(dev, 0x5C, 0x0000); + } + + old_channel = phy->channel; + if (old_channel >= 8) + b43legacy_radio_selectchannel(dev, 1, 0); + else + b43legacy_radio_selectchannel(dev, 13, 0); + + b43legacy_radio_write16(dev, 0x0050, 0x0020); + b43legacy_radio_write16(dev, 0x0050, 0x0023); + udelay(40); + if (phy->radio_rev < 6 || phy->radio_rev == 8) { + b43legacy_radio_write16(dev, 0x007C, + (b43legacy_radio_read16(dev, 0x007C) + | 0x0002)); + b43legacy_radio_write16(dev, 0x0050, 0x0020); + } + if (phy->radio_rev <= 2) { + b43legacy_radio_write16(dev, 0x007C, 0x0020); + b43legacy_radio_write16(dev, 0x005A, 0x0070); + b43legacy_radio_write16(dev, 0x005B, 0x007B); + b43legacy_radio_write16(dev, 0x005C, 0x00B0); + } + b43legacy_radio_write16(dev, 0x007A, + (b43legacy_radio_read16(dev, + 0x007A) & 0x00F8) | 0x0007); + + b43legacy_radio_selectchannel(dev, old_channel, 0); + + b43legacy_phy_write(dev, 0x0014, 0x0200); + if (phy->radio_rev >= 6) + b43legacy_phy_write(dev, 0x002A, 0x88C2); + else + b43legacy_phy_write(dev, 0x002A, 0x8AC0); + b43legacy_phy_write(dev, 0x0038, 0x0668); + b43legacy_radio_set_txpower_bg(dev, 0xFFFF, 0xFFFF, 0xFFFF); + if (phy->radio_rev <= 5) + b43legacy_phy_write(dev, 0x005D, (b43legacy_phy_read(dev, + 0x005D) & 0xFF80) | 0x0003); + if (phy->radio_rev <= 2) + b43legacy_radio_write16(dev, 0x005D, 0x000D); + + if (phy->analog == 4) { + b43legacy_write16(dev, 0x03E4, 0x0009); + b43legacy_phy_write(dev, 0x61, b43legacy_phy_read(dev, 0x61) + & 0xFFF); + } else + b43legacy_phy_write(dev, 0x0002, (b43legacy_phy_read(dev, + 0x0002) & 0xFFC0) | 0x0004); + if (phy->type == B43legacy_PHYTYPE_G) + b43legacy_write16(dev, 0x03E6, 0x0); + if (phy->type == B43legacy_PHYTYPE_B) { + b43legacy_write16(dev, 0x03E6, 0x8140); + b43legacy_phy_write(dev, 0x0016, 0x0410); + b43legacy_phy_write(dev, 0x0017, 0x0820); + b43legacy_phy_write(dev, 0x0062, 0x0007); + b43legacy_radio_init2050(dev); + b43legacy_phy_lo_g_measure(dev); + if (dev->dev->bus->sprom.r1.boardflags_lo & + B43legacy_BFL_RSSI) { + b43legacy_calc_nrssi_slope(dev); + b43legacy_calc_nrssi_threshold(dev); + } + b43legacy_phy_init_pctl(dev); + } +} + +static void b43legacy_calc_loopback_gain(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 backup_phy[15] = {0}; + u16 backup_radio[3]; + u16 backup_bband; + u16 i; + u16 loop1_cnt; + u16 loop1_done; + u16 loop1_omitted; + u16 loop2_done; + + backup_phy[0] = b43legacy_phy_read(dev, 0x0429); + backup_phy[1] = b43legacy_phy_read(dev, 0x0001); + backup_phy[2] = b43legacy_phy_read(dev, 0x0811); + backup_phy[3] = b43legacy_phy_read(dev, 0x0812); + if (phy->rev != 1) { + backup_phy[4] = b43legacy_phy_read(dev, 0x0814); + backup_phy[5] = b43legacy_phy_read(dev, 0x0815); + } + backup_phy[6] = b43legacy_phy_read(dev, 0x005A); + backup_phy[7] = b43legacy_phy_read(dev, 0x0059); + backup_phy[8] = b43legacy_phy_read(dev, 0x0058); + backup_phy[9] = b43legacy_phy_read(dev, 0x000A); + backup_phy[10] = b43legacy_phy_read(dev, 0x0003); + backup_phy[11] = b43legacy_phy_read(dev, 0x080F); + backup_phy[12] = b43legacy_phy_read(dev, 0x0810); + backup_phy[13] = b43legacy_phy_read(dev, 0x002B); + backup_phy[14] = b43legacy_phy_read(dev, 0x0015); + b43legacy_phy_read(dev, 0x002D); /* dummy read */ + backup_bband = phy->bbatt; + backup_radio[0] = b43legacy_radio_read16(dev, 0x0052); + backup_radio[1] = b43legacy_radio_read16(dev, 0x0043); + backup_radio[2] = b43legacy_radio_read16(dev, 0x007A); + + b43legacy_phy_write(dev, 0x0429, + b43legacy_phy_read(dev, 0x0429) & 0x3FFF); + b43legacy_phy_write(dev, 0x0001, + b43legacy_phy_read(dev, 0x0001) & 0x8000); + b43legacy_phy_write(dev, 0x0811, + b43legacy_phy_read(dev, 0x0811) | 0x0002); + b43legacy_phy_write(dev, 0x0812, + b43legacy_phy_read(dev, 0x0812) & 0xFFFD); + b43legacy_phy_write(dev, 0x0811, + b43legacy_phy_read(dev, 0x0811) | 0x0001); + b43legacy_phy_write(dev, 0x0812, + b43legacy_phy_read(dev, 0x0812) & 0xFFFE); + if (phy->rev != 1) { + b43legacy_phy_write(dev, 0x0814, + b43legacy_phy_read(dev, 0x0814) | 0x0001); + b43legacy_phy_write(dev, 0x0815, + b43legacy_phy_read(dev, 0x0815) & 0xFFFE); + b43legacy_phy_write(dev, 0x0814, + b43legacy_phy_read(dev, 0x0814) | 0x0002); + b43legacy_phy_write(dev, 0x0815, + b43legacy_phy_read(dev, 0x0815) & 0xFFFD); + } + b43legacy_phy_write(dev, 0x0811, b43legacy_phy_read(dev, 0x0811) | + 0x000C); + b43legacy_phy_write(dev, 0x0812, b43legacy_phy_read(dev, 0x0812) | + 0x000C); + + b43legacy_phy_write(dev, 0x0811, (b43legacy_phy_read(dev, 0x0811) + & 0xFFCF) | 0x0030); + b43legacy_phy_write(dev, 0x0812, (b43legacy_phy_read(dev, 0x0812) + & 0xFFCF) | 0x0010); + + b43legacy_phy_write(dev, 0x005A, 0x0780); + b43legacy_phy_write(dev, 0x0059, 0xC810); + b43legacy_phy_write(dev, 0x0058, 0x000D); + if (phy->analog == 0) + b43legacy_phy_write(dev, 0x0003, 0x0122); + else + b43legacy_phy_write(dev, 0x000A, + b43legacy_phy_read(dev, 0x000A) + | 0x2000); + if (phy->rev != 1) { + b43legacy_phy_write(dev, 0x0814, + b43legacy_phy_read(dev, 0x0814) | 0x0004); + b43legacy_phy_write(dev, 0x0815, + b43legacy_phy_read(dev, 0x0815) & 0xFFFB); + } + b43legacy_phy_write(dev, 0x0003, + (b43legacy_phy_read(dev, 0x0003) + & 0xFF9F) | 0x0040); + if (phy->radio_ver == 0x2050 && phy->radio_rev == 2) { + b43legacy_radio_write16(dev, 0x0052, 0x0000); + b43legacy_radio_write16(dev, 0x0043, + (b43legacy_radio_read16(dev, 0x0043) + & 0xFFF0) | 0x0009); + loop1_cnt = 9; + } else if (phy->radio_rev == 8) { + b43legacy_radio_write16(dev, 0x0043, 0x000F); + loop1_cnt = 15; + } else + loop1_cnt = 0; + + b43legacy_phy_set_baseband_attenuation(dev, 11); + + if (phy->rev >= 3) + b43legacy_phy_write(dev, 0x080F, 0xC020); + else + b43legacy_phy_write(dev, 0x080F, 0x8020); + b43legacy_phy_write(dev, 0x0810, 0x0000); + + b43legacy_phy_write(dev, 0x002B, + (b43legacy_phy_read(dev, 0x002B) + & 0xFFC0) | 0x0001); + b43legacy_phy_write(dev, 0x002B, + (b43legacy_phy_read(dev, 0x002B) + & 0xC0FF) | 0x0800); + b43legacy_phy_write(dev, 0x0811, + b43legacy_phy_read(dev, 0x0811) | 0x0100); + b43legacy_phy_write(dev, 0x0812, + b43legacy_phy_read(dev, 0x0812) & 0xCFFF); + if (dev->dev->bus->sprom.r1.boardflags_lo & B43legacy_BFL_EXTLNA) { + if (phy->rev >= 7) { + b43legacy_phy_write(dev, 0x0811, + b43legacy_phy_read(dev, 0x0811) + | 0x0800); + b43legacy_phy_write(dev, 0x0812, + b43legacy_phy_read(dev, 0x0812) + | 0x8000); + } + } + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + & 0x00F7); + + for (i = 0; i < loop1_cnt; i++) { + b43legacy_radio_write16(dev, 0x0043, loop1_cnt); + b43legacy_phy_write(dev, 0x0812, + (b43legacy_phy_read(dev, 0x0812) + & 0xF0FF) | (i << 8)); + b43legacy_phy_write(dev, 0x0015, + (b43legacy_phy_read(dev, 0x0015) + & 0x0FFF) | 0xA000); + b43legacy_phy_write(dev, 0x0015, + (b43legacy_phy_read(dev, 0x0015) + & 0x0FFF) | 0xF000); + udelay(20); + if (b43legacy_phy_read(dev, 0x002D) >= 0x0DFC) + break; + } + loop1_done = i; + loop1_omitted = loop1_cnt - loop1_done; + + loop2_done = 0; + if (loop1_done >= 8) { + b43legacy_phy_write(dev, 0x0812, + b43legacy_phy_read(dev, 0x0812) + | 0x0030); + for (i = loop1_done - 8; i < 16; i++) { + b43legacy_phy_write(dev, 0x0812, + (b43legacy_phy_read(dev, 0x0812) + & 0xF0FF) | (i << 8)); + b43legacy_phy_write(dev, 0x0015, + (b43legacy_phy_read(dev, 0x0015) + & 0x0FFF) | 0xA000); + b43legacy_phy_write(dev, 0x0015, + (b43legacy_phy_read(dev, 0x0015) + & 0x0FFF) | 0xF000); + udelay(20); + if (b43legacy_phy_read(dev, 0x002D) >= 0x0DFC) + break; + } + } + + if (phy->rev != 1) { + b43legacy_phy_write(dev, 0x0814, backup_phy[4]); + b43legacy_phy_write(dev, 0x0815, backup_phy[5]); + } + b43legacy_phy_write(dev, 0x005A, backup_phy[6]); + b43legacy_phy_write(dev, 0x0059, backup_phy[7]); + b43legacy_phy_write(dev, 0x0058, backup_phy[8]); + b43legacy_phy_write(dev, 0x000A, backup_phy[9]); + b43legacy_phy_write(dev, 0x0003, backup_phy[10]); + b43legacy_phy_write(dev, 0x080F, backup_phy[11]); + b43legacy_phy_write(dev, 0x0810, backup_phy[12]); + b43legacy_phy_write(dev, 0x002B, backup_phy[13]); + b43legacy_phy_write(dev, 0x0015, backup_phy[14]); + + b43legacy_phy_set_baseband_attenuation(dev, backup_bband); + + b43legacy_radio_write16(dev, 0x0052, backup_radio[0]); + b43legacy_radio_write16(dev, 0x0043, backup_radio[1]); + b43legacy_radio_write16(dev, 0x007A, backup_radio[2]); + + b43legacy_phy_write(dev, 0x0811, backup_phy[2] | 0x0003); + udelay(10); + b43legacy_phy_write(dev, 0x0811, backup_phy[2]); + b43legacy_phy_write(dev, 0x0812, backup_phy[3]); + b43legacy_phy_write(dev, 0x0429, backup_phy[0]); + b43legacy_phy_write(dev, 0x0001, backup_phy[1]); + + phy->loopback_gain[0] = ((loop1_done * 6) - (loop1_omitted * 4)) - 11; + phy->loopback_gain[1] = (24 - (3 * loop2_done)) * 2; +} + +static void b43legacy_phy_initg(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 tmp; + + if (phy->rev == 1) + b43legacy_phy_initb5(dev); + else + b43legacy_phy_initb6(dev); + if (phy->rev >= 2 || phy->gmode) + b43legacy_phy_inita(dev); + + if (phy->rev >= 2) { + b43legacy_phy_write(dev, 0x0814, 0x0000); + b43legacy_phy_write(dev, 0x0815, 0x0000); + } + if (phy->rev == 2) { + b43legacy_phy_write(dev, 0x0811, 0x0000); + b43legacy_phy_write(dev, 0x0015, 0x00C0); + } + if (phy->rev > 5) { + b43legacy_phy_write(dev, 0x0811, 0x0400); + b43legacy_phy_write(dev, 0x0015, 0x00C0); + } + if (phy->rev >= 2 || phy->gmode) { + tmp = b43legacy_phy_read(dev, 0x0400) & 0xFF; + if (tmp == 3 || tmp == 5) { + b43legacy_phy_write(dev, 0x04C2, 0x1816); + b43legacy_phy_write(dev, 0x04C3, 0x8006); + if (tmp == 5) + b43legacy_phy_write(dev, 0x04CC, + (b43legacy_phy_read(dev, + 0x04CC) & 0x00FF) | + 0x1F00); + } + b43legacy_phy_write(dev, 0x047E, 0x0078); + } + if (phy->radio_rev == 8) { + b43legacy_phy_write(dev, 0x0801, b43legacy_phy_read(dev, 0x0801) + | 0x0080); + b43legacy_phy_write(dev, 0x043E, b43legacy_phy_read(dev, 0x043E) + | 0x0004); + } + if (phy->rev >= 2 && phy->gmode) + b43legacy_calc_loopback_gain(dev); + if (phy->radio_rev != 8) { + if (phy->initval == 0xFFFF) + phy->initval = b43legacy_radio_init2050(dev); + else + b43legacy_radio_write16(dev, 0x0078, phy->initval); + } + if (phy->txctl2 == 0xFFFF) + b43legacy_phy_lo_g_measure(dev); + else { + if (phy->radio_ver == 0x2050 && phy->radio_rev == 8) + b43legacy_radio_write16(dev, 0x0052, + (phy->txctl1 << 4) | + phy->txctl2); + else + b43legacy_radio_write16(dev, 0x0052, + (b43legacy_radio_read16(dev, + 0x0052) & 0xFFF0) | + phy->txctl1); + if (phy->rev >= 6) + b43legacy_phy_write(dev, 0x0036, + (b43legacy_phy_read(dev, 0x0036) + & 0x0FFF) | (phy->txctl2 << 12)); + if (dev->dev->bus->sprom.r1.boardflags_lo & + B43legacy_BFL_PACTRL) + b43legacy_phy_write(dev, 0x002E, 0x8075); + else + b43legacy_phy_write(dev, 0x002E, 0x807F); + if (phy->rev < 2) + b43legacy_phy_write(dev, 0x002F, 0x0101); + else + b43legacy_phy_write(dev, 0x002F, 0x0202); + } + if (phy->gmode || phy->rev >= 2) { + b43legacy_phy_lo_adjust(dev, 0); + b43legacy_phy_write(dev, 0x080F, 0x8078); + } + + if (!(dev->dev->bus->sprom.r1.boardflags_lo & B43legacy_BFL_RSSI)) { + /* The specs state to update the NRSSI LT with + * the value 0x7FFFFFFF here. I think that is some weird + * compiler optimization in the original driver. + * Essentially, what we do here is resetting all NRSSI LT + * entries to -32 (see the limit_value() in nrssi_hw_update()) + */ + b43legacy_nrssi_hw_update(dev, 0xFFFF); + b43legacy_calc_nrssi_threshold(dev); + } else if (phy->gmode || phy->rev >= 2) { + if (phy->nrssi[0] == -1000) { + B43legacy_WARN_ON(phy->nrssi[1] != -1000); + b43legacy_calc_nrssi_slope(dev); + } else { + B43legacy_WARN_ON(phy->nrssi[1] == -1000); + b43legacy_calc_nrssi_threshold(dev); + } + } + if (phy->radio_rev == 8) + b43legacy_phy_write(dev, 0x0805, 0x3230); + b43legacy_phy_init_pctl(dev); + if (dev->dev->bus->chip_id == 0x4306 + && dev->dev->bus->chip_package == 2) { + b43legacy_phy_write(dev, 0x0429, + b43legacy_phy_read(dev, 0x0429) & 0xBFFF); + b43legacy_phy_write(dev, 0x04C3, + b43legacy_phy_read(dev, 0x04C3) & 0x7FFF); + } +} + +static u16 b43legacy_phy_lo_b_r15_loop(struct b43legacy_wldev *dev) +{ + int i; + u16 ret = 0; + unsigned long flags; + + local_irq_save(flags); + for (i = 0; i < 10; i++) { + b43legacy_phy_write(dev, 0x0015, 0xAFA0); + udelay(1); + b43legacy_phy_write(dev, 0x0015, 0xEFA0); + udelay(10); + b43legacy_phy_write(dev, 0x0015, 0xFFA0); + udelay(40); + ret += b43legacy_phy_read(dev, 0x002C); + } + local_irq_restore(flags); + b43legacy_voluntary_preempt(); + + return ret; +} + +void b43legacy_phy_lo_b_measure(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 regstack[12] = { 0 }; + u16 mls; + u16 fval; + int i; + int j; + + regstack[0] = b43legacy_phy_read(dev, 0x0015); + regstack[1] = b43legacy_radio_read16(dev, 0x0052) & 0xFFF0; + + if (phy->radio_ver == 0x2053) { + regstack[2] = b43legacy_phy_read(dev, 0x000A); + regstack[3] = b43legacy_phy_read(dev, 0x002A); + regstack[4] = b43legacy_phy_read(dev, 0x0035); + regstack[5] = b43legacy_phy_read(dev, 0x0003); + regstack[6] = b43legacy_phy_read(dev, 0x0001); + regstack[7] = b43legacy_phy_read(dev, 0x0030); + + regstack[8] = b43legacy_radio_read16(dev, 0x0043); + regstack[9] = b43legacy_radio_read16(dev, 0x007A); + regstack[10] = b43legacy_read16(dev, 0x03EC); + regstack[11] = b43legacy_radio_read16(dev, 0x0052) & 0x00F0; + + b43legacy_phy_write(dev, 0x0030, 0x00FF); + b43legacy_write16(dev, 0x03EC, 0x3F3F); + b43legacy_phy_write(dev, 0x0035, regstack[4] & 0xFF7F); + b43legacy_radio_write16(dev, 0x007A, regstack[9] & 0xFFF0); + } + b43legacy_phy_write(dev, 0x0015, 0xB000); + b43legacy_phy_write(dev, 0x002B, 0x0004); + + if (phy->radio_ver == 0x2053) { + b43legacy_phy_write(dev, 0x002B, 0x0203); + b43legacy_phy_write(dev, 0x002A, 0x08A3); + } + + phy->minlowsig[0] = 0xFFFF; + + for (i = 0; i < 4; i++) { + b43legacy_radio_write16(dev, 0x0052, regstack[1] | i); + b43legacy_phy_lo_b_r15_loop(dev); + } + for (i = 0; i < 10; i++) { + b43legacy_radio_write16(dev, 0x0052, regstack[1] | i); + mls = b43legacy_phy_lo_b_r15_loop(dev) / 10; + if (mls < phy->minlowsig[0]) { + phy->minlowsig[0] = mls; + phy->minlowsigpos[0] = i; + } + } + b43legacy_radio_write16(dev, 0x0052, regstack[1] + | phy->minlowsigpos[0]); + + phy->minlowsig[1] = 0xFFFF; + + for (i = -4; i < 5; i += 2) { + for (j = -4; j < 5; j += 2) { + if (j < 0) + fval = (0x0100 * i) + j + 0x0100; + else + fval = (0x0100 * i) + j; + b43legacy_phy_write(dev, 0x002F, fval); + mls = b43legacy_phy_lo_b_r15_loop(dev) / 10; + if (mls < phy->minlowsig[1]) { + phy->minlowsig[1] = mls; + phy->minlowsigpos[1] = fval; + } + } + } + phy->minlowsigpos[1] += 0x0101; + + b43legacy_phy_write(dev, 0x002F, phy->minlowsigpos[1]); + if (phy->radio_ver == 0x2053) { + b43legacy_phy_write(dev, 0x000A, regstack[2]); + b43legacy_phy_write(dev, 0x002A, regstack[3]); + b43legacy_phy_write(dev, 0x0035, regstack[4]); + b43legacy_phy_write(dev, 0x0003, regstack[5]); + b43legacy_phy_write(dev, 0x0001, regstack[6]); + b43legacy_phy_write(dev, 0x0030, regstack[7]); + + b43legacy_radio_write16(dev, 0x0043, regstack[8]); + b43legacy_radio_write16(dev, 0x007A, regstack[9]); + + b43legacy_radio_write16(dev, 0x0052, + (b43legacy_radio_read16(dev, 0x0052) + & 0x000F) | regstack[11]); + + b43legacy_write16(dev, 0x03EC, regstack[10]); + } + b43legacy_phy_write(dev, 0x0015, regstack[0]); +} + +static inline +u16 b43legacy_phy_lo_g_deviation_subval(struct b43legacy_wldev *dev, + u16 control) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 ret; + unsigned long flags; + + local_irq_save(flags); + if (phy->gmode) { + b43legacy_phy_write(dev, 0x15, 0xE300); + control <<= 8; + b43legacy_phy_write(dev, 0x0812, control | 0x00B0); + udelay(5); + b43legacy_phy_write(dev, 0x0812, control | 0x00B2); + udelay(2); + b43legacy_phy_write(dev, 0x0812, control | 0x00B3); + udelay(4); + b43legacy_phy_write(dev, 0x0015, 0xF300); + udelay(8); + } else { + b43legacy_phy_write(dev, 0x0015, control | 0xEFA0); + udelay(2); + b43legacy_phy_write(dev, 0x0015, control | 0xEFE0); + udelay(4); + b43legacy_phy_write(dev, 0x0015, control | 0xFFE0); + udelay(8); + } + ret = b43legacy_phy_read(dev, 0x002D); + local_irq_restore(flags); + b43legacy_voluntary_preempt(); + + return ret; +} + +static u32 b43legacy_phy_lo_g_singledeviation(struct b43legacy_wldev *dev, + u16 control) +{ + int i; + u32 ret = 0; + + for (i = 0; i < 8; i++) + ret += b43legacy_phy_lo_g_deviation_subval(dev, control); + + return ret; +} + +/* Write the LocalOscillator CONTROL */ +static inline +void b43legacy_lo_write(struct b43legacy_wldev *dev, + struct b43legacy_lopair *pair) +{ + u16 value; + + value = (u8)(pair->low); + value |= ((u8)(pair->high)) << 8; + +#ifdef CONFIG_B43LEGACY_DEBUG + /* Sanity check. */ + if (pair->low < -8 || pair->low > 8 || + pair->high < -8 || pair->high > 8) { + struct b43legacy_phy *phy = &dev->phy; + b43legacydbg(dev->wl, + "WARNING: Writing invalid LOpair " + "(low: %d, high: %d, index: %lu)\n", + pair->low, pair->high, + (unsigned long)(pair - phy->_lo_pairs)); + dump_stack(); + } +#endif + + b43legacy_phy_write(dev, B43legacy_PHY_G_LO_CONTROL, value); +} + +static inline +struct b43legacy_lopair *b43legacy_find_lopair(struct b43legacy_wldev *dev, + u16 bbatt, + u16 rfatt, + u16 tx) +{ + static const u8 dict[10] = { 11, 10, 11, 12, 13, 12, 13, 12, 13, 12 }; + struct b43legacy_phy *phy = &dev->phy; + + if (bbatt > 6) + bbatt = 6; + B43legacy_WARN_ON(rfatt >= 10); + + if (tx == 3) + return b43legacy_get_lopair(phy, rfatt, bbatt); + return b43legacy_get_lopair(phy, dict[rfatt], bbatt); +} + +static inline +struct b43legacy_lopair *b43legacy_current_lopair(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + + return b43legacy_find_lopair(dev, phy->bbatt, + phy->rfatt, phy->txctl1); +} + +/* Adjust B/G LO */ +void b43legacy_phy_lo_adjust(struct b43legacy_wldev *dev, int fixed) +{ + struct b43legacy_lopair *pair; + + if (fixed) { + /* Use fixed values. Only for initialization. */ + pair = b43legacy_find_lopair(dev, 2, 3, 0); + } else + pair = b43legacy_current_lopair(dev); + b43legacy_lo_write(dev, pair); +} + +static void b43legacy_phy_lo_g_measure_txctl2(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 txctl2 = 0; + u16 i; + u32 smallest; + u32 tmp; + + b43legacy_radio_write16(dev, 0x0052, 0x0000); + udelay(10); + smallest = b43legacy_phy_lo_g_singledeviation(dev, 0); + for (i = 0; i < 16; i++) { + b43legacy_radio_write16(dev, 0x0052, i); + udelay(10); + tmp = b43legacy_phy_lo_g_singledeviation(dev, 0); + if (tmp < smallest) { + smallest = tmp; + txctl2 = i; + } + } + phy->txctl2 = txctl2; +} + +static +void b43legacy_phy_lo_g_state(struct b43legacy_wldev *dev, + const struct b43legacy_lopair *in_pair, + struct b43legacy_lopair *out_pair, + u16 r27) +{ + static const struct b43legacy_lopair transitions[8] = { + { .high = 1, .low = 1, }, + { .high = 1, .low = 0, }, + { .high = 1, .low = -1, }, + { .high = 0, .low = -1, }, + { .high = -1, .low = -1, }, + { .high = -1, .low = 0, }, + { .high = -1, .low = 1, }, + { .high = 0, .low = 1, }, + }; + struct b43legacy_lopair lowest_transition = { + .high = in_pair->high, + .low = in_pair->low, + }; + struct b43legacy_lopair tmp_pair; + struct b43legacy_lopair transition; + int i = 12; + int state = 0; + int found_lower; + int j; + int begin; + int end; + u32 lowest_deviation; + u32 tmp; + + /* Note that in_pair and out_pair can point to the same pair. + * Be careful. */ + + b43legacy_lo_write(dev, &lowest_transition); + lowest_deviation = b43legacy_phy_lo_g_singledeviation(dev, r27); + do { + found_lower = 0; + B43legacy_WARN_ON(!(state >= 0 && state <= 8)); + if (state == 0) { + begin = 1; + end = 8; + } else if (state % 2 == 0) { + begin = state - 1; + end = state + 1; + } else { + begin = state - 2; + end = state + 2; + } + if (begin < 1) + begin += 8; + if (end > 8) + end -= 8; + + j = begin; + tmp_pair.high = lowest_transition.high; + tmp_pair.low = lowest_transition.low; + while (1) { + B43legacy_WARN_ON(!(j >= 1 && j <= 8)); + transition.high = tmp_pair.high + + transitions[j - 1].high; + transition.low = tmp_pair.low + transitions[j - 1].low; + if ((abs(transition.low) < 9) + && (abs(transition.high) < 9)) { + b43legacy_lo_write(dev, &transition); + tmp = b43legacy_phy_lo_g_singledeviation(dev, + r27); + if (tmp < lowest_deviation) { + lowest_deviation = tmp; + state = j; + found_lower = 1; + + lowest_transition.high = + transition.high; + lowest_transition.low = transition.low; + } + } + if (j == end) + break; + if (j == 8) + j = 1; + else + j++; + } + } while (i-- && found_lower); + + out_pair->high = lowest_transition.high; + out_pair->low = lowest_transition.low; +} + +/* Set the baseband attenuation value on chip. */ +void b43legacy_phy_set_baseband_attenuation(struct b43legacy_wldev *dev, + u16 bbatt) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 value; + + if (phy->analog == 0) { + value = (b43legacy_read16(dev, 0x03E6) & 0xFFF0); + value |= (bbatt & 0x000F); + b43legacy_write16(dev, 0x03E6, value); + return; + } + + if (phy->analog > 1) { + value = b43legacy_phy_read(dev, 0x0060) & 0xFFC3; + value |= (bbatt << 2) & 0x003C; + } else { + value = b43legacy_phy_read(dev, 0x0060) & 0xFF87; + value |= (bbatt << 3) & 0x0078; + } + b43legacy_phy_write(dev, 0x0060, value); +} + +/* http://bcm-specs.sipsolutions.net/LocalOscillator/Measure */ +void b43legacy_phy_lo_g_measure(struct b43legacy_wldev *dev) +{ + static const u8 pairorder[10] = { 3, 1, 5, 7, 9, 2, 0, 4, 6, 8 }; + const int is_initializing = (b43legacy_status(dev) + < B43legacy_STAT_STARTED); + struct b43legacy_phy *phy = &dev->phy; + u16 h; + u16 i; + u16 oldi = 0; + u16 j; + struct b43legacy_lopair control; + struct b43legacy_lopair *tmp_control; + u16 tmp; + u16 regstack[16] = { 0 }; + u8 oldchannel; + + /* XXX: What are these? */ + u8 r27 = 0; + u16 r31; + + oldchannel = phy->channel; + /* Setup */ + if (phy->gmode) { + regstack[0] = b43legacy_phy_read(dev, B43legacy_PHY_G_CRS); + regstack[1] = b43legacy_phy_read(dev, 0x0802); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, regstack[0] + & 0x7FFF); + b43legacy_phy_write(dev, 0x0802, regstack[1] & 0xFFFC); + } + regstack[3] = b43legacy_read16(dev, 0x03E2); + b43legacy_write16(dev, 0x03E2, regstack[3] | 0x8000); + regstack[4] = b43legacy_read16(dev, B43legacy_MMIO_CHANNEL_EXT); + regstack[5] = b43legacy_phy_read(dev, 0x15); + regstack[6] = b43legacy_phy_read(dev, 0x2A); + regstack[7] = b43legacy_phy_read(dev, 0x35); + regstack[8] = b43legacy_phy_read(dev, 0x60); + regstack[9] = b43legacy_radio_read16(dev, 0x43); + regstack[10] = b43legacy_radio_read16(dev, 0x7A); + regstack[11] = b43legacy_radio_read16(dev, 0x52); + if (phy->gmode) { + regstack[12] = b43legacy_phy_read(dev, 0x0811); + regstack[13] = b43legacy_phy_read(dev, 0x0812); + regstack[14] = b43legacy_phy_read(dev, 0x0814); + regstack[15] = b43legacy_phy_read(dev, 0x0815); + } + b43legacy_radio_selectchannel(dev, 6, 0); + if (phy->gmode) { + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, regstack[0] + & 0x7FFF); + b43legacy_phy_write(dev, 0x0802, regstack[1] & 0xFFFC); + b43legacy_dummy_transmission(dev); + } + b43legacy_radio_write16(dev, 0x0043, 0x0006); + + b43legacy_phy_set_baseband_attenuation(dev, 2); + + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, 0x0000); + b43legacy_phy_write(dev, 0x002E, 0x007F); + b43legacy_phy_write(dev, 0x080F, 0x0078); + b43legacy_phy_write(dev, 0x0035, regstack[7] & ~(1 << 7)); + b43legacy_radio_write16(dev, 0x007A, regstack[10] & 0xFFF0); + b43legacy_phy_write(dev, 0x002B, 0x0203); + b43legacy_phy_write(dev, 0x002A, 0x08A3); + if (phy->gmode) { + b43legacy_phy_write(dev, 0x0814, regstack[14] | 0x0003); + b43legacy_phy_write(dev, 0x0815, regstack[15] & 0xFFFC); + b43legacy_phy_write(dev, 0x0811, 0x01B3); + b43legacy_phy_write(dev, 0x0812, 0x00B2); + } + if (is_initializing) + b43legacy_phy_lo_g_measure_txctl2(dev); + b43legacy_phy_write(dev, 0x080F, 0x8078); + + /* Measure */ + control.low = 0; + control.high = 0; + for (h = 0; h < 10; h++) { + /* Loop over each possible RadioAttenuation (0-9) */ + i = pairorder[h]; + if (is_initializing) { + if (i == 3) { + control.low = 0; + control.high = 0; + } else if (((i % 2 == 1) && (oldi % 2 == 1)) || + ((i % 2 == 0) && (oldi % 2 == 0))) { + tmp_control = b43legacy_get_lopair(phy, oldi, + 0); + memcpy(&control, tmp_control, sizeof(control)); + } else { + tmp_control = b43legacy_get_lopair(phy, 3, 0); + memcpy(&control, tmp_control, sizeof(control)); + } + } + /* Loop over each possible BasebandAttenuation/2 */ + for (j = 0; j < 4; j++) { + if (is_initializing) { + tmp = i * 2 + j; + r27 = 0; + r31 = 0; + if (tmp > 14) { + r31 = 1; + if (tmp > 17) + r27 = 1; + if (tmp > 19) + r27 = 2; + } + } else { + tmp_control = b43legacy_get_lopair(phy, i, + j * 2); + if (!tmp_control->used) + continue; + memcpy(&control, tmp_control, sizeof(control)); + r27 = 3; + r31 = 0; + } + b43legacy_radio_write16(dev, 0x43, i); + b43legacy_radio_write16(dev, 0x52, phy->txctl2); + udelay(10); + b43legacy_voluntary_preempt(); + + b43legacy_phy_set_baseband_attenuation(dev, j * 2); + + tmp = (regstack[10] & 0xFFF0); + if (r31) + tmp |= 0x0008; + b43legacy_radio_write16(dev, 0x007A, tmp); + + tmp_control = b43legacy_get_lopair(phy, i, j * 2); + b43legacy_phy_lo_g_state(dev, &control, tmp_control, + r27); + } + oldi = i; + } + /* Loop over each possible RadioAttenuation (10-13) */ + for (i = 10; i < 14; i++) { + /* Loop over each possible BasebandAttenuation/2 */ + for (j = 0; j < 4; j++) { + if (is_initializing) { + tmp_control = b43legacy_get_lopair(phy, i - 9, + j * 2); + memcpy(&control, tmp_control, sizeof(control)); + /* FIXME: The next line is wrong, as the + * following if statement can never trigger. */ + tmp = (i - 9) * 2 + j - 5; + r27 = 0; + r31 = 0; + if (tmp > 14) { + r31 = 1; + if (tmp > 17) + r27 = 1; + if (tmp > 19) + r27 = 2; + } + } else { + tmp_control = b43legacy_get_lopair(phy, i - 9, + j * 2); + if (!tmp_control->used) + continue; + memcpy(&control, tmp_control, sizeof(control)); + r27 = 3; + r31 = 0; + } + b43legacy_radio_write16(dev, 0x43, i - 9); + /* FIXME: shouldn't txctl1 be zero in the next line + * and 3 in the loop above? */ + b43legacy_radio_write16(dev, 0x52, + phy->txctl2 + | (3/*txctl1*/ << 4)); + udelay(10); + b43legacy_voluntary_preempt(); + + b43legacy_phy_set_baseband_attenuation(dev, j * 2); + + tmp = (regstack[10] & 0xFFF0); + if (r31) + tmp |= 0x0008; + b43legacy_radio_write16(dev, 0x7A, tmp); + + tmp_control = b43legacy_get_lopair(phy, i, j * 2); + b43legacy_phy_lo_g_state(dev, &control, tmp_control, + r27); + } + } + + /* Restoration */ + if (phy->gmode) { + b43legacy_phy_write(dev, 0x0015, 0xE300); + b43legacy_phy_write(dev, 0x0812, (r27 << 8) | 0xA0); + udelay(5); + b43legacy_phy_write(dev, 0x0812, (r27 << 8) | 0xA2); + udelay(2); + b43legacy_phy_write(dev, 0x0812, (r27 << 8) | 0xA3); + b43legacy_voluntary_preempt(); + } else + b43legacy_phy_write(dev, 0x0015, r27 | 0xEFA0); + b43legacy_phy_lo_adjust(dev, is_initializing); + b43legacy_phy_write(dev, 0x002E, 0x807F); + if (phy->gmode) + b43legacy_phy_write(dev, 0x002F, 0x0202); + else + b43legacy_phy_write(dev, 0x002F, 0x0101); + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, regstack[4]); + b43legacy_phy_write(dev, 0x0015, regstack[5]); + b43legacy_phy_write(dev, 0x002A, regstack[6]); + b43legacy_phy_write(dev, 0x0035, regstack[7]); + b43legacy_phy_write(dev, 0x0060, regstack[8]); + b43legacy_radio_write16(dev, 0x0043, regstack[9]); + b43legacy_radio_write16(dev, 0x007A, regstack[10]); + regstack[11] &= 0x00F0; + regstack[11] |= (b43legacy_radio_read16(dev, 0x52) & 0x000F); + b43legacy_radio_write16(dev, 0x52, regstack[11]); + b43legacy_write16(dev, 0x03E2, regstack[3]); + if (phy->gmode) { + b43legacy_phy_write(dev, 0x0811, regstack[12]); + b43legacy_phy_write(dev, 0x0812, regstack[13]); + b43legacy_phy_write(dev, 0x0814, regstack[14]); + b43legacy_phy_write(dev, 0x0815, regstack[15]); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, regstack[0]); + b43legacy_phy_write(dev, 0x0802, regstack[1]); + } + b43legacy_radio_selectchannel(dev, oldchannel, 1); + +#ifdef CONFIG_B43LEGACY_DEBUG + { + /* Sanity check for all lopairs. */ + for (i = 0; i < B43legacy_LO_COUNT; i++) { + tmp_control = phy->_lo_pairs + i; + if (tmp_control->low < -8 || tmp_control->low > 8 || + tmp_control->high < -8 || tmp_control->high > 8) + b43legacywarn(dev->wl, + "WARNING: Invalid LOpair (low: %d, high:" + " %d, index: %d)\n", + tmp_control->low, tmp_control->high, i); + } + } +#endif /* CONFIG_B43LEGACY_DEBUG */ +} + +static +void b43legacy_phy_lo_mark_current_used(struct b43legacy_wldev *dev) +{ + struct b43legacy_lopair *pair; + + pair = b43legacy_current_lopair(dev); + pair->used = 1; +} + +void b43legacy_phy_lo_mark_all_unused(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + struct b43legacy_lopair *pair; + int i; + + for (i = 0; i < B43legacy_LO_COUNT; i++) { + pair = phy->_lo_pairs + i; + pair->used = 0; + } +} + +/* http://bcm-specs.sipsolutions.net/EstimatePowerOut + * This function converts a TSSI value to dBm in Q5.2 + */ +static s8 b43legacy_phy_estimate_power_out(struct b43legacy_wldev *dev, s8 tssi) +{ + struct b43legacy_phy *phy = &dev->phy; + s8 dbm = 0; + s32 tmp; + + tmp = phy->idle_tssi; + tmp += tssi; + tmp -= phy->savedpctlreg; + + switch (phy->type) { + case B43legacy_PHYTYPE_B: + case B43legacy_PHYTYPE_G: + tmp = limit_value(tmp, 0x00, 0x3F); + dbm = phy->tssi2dbm[tmp]; + break; + default: + B43legacy_BUG_ON(1); + } + + return dbm; +} + +/* http://bcm-specs.sipsolutions.net/RecalculateTransmissionPower */ +void b43legacy_phy_xmitpower(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 tmp; + u16 txpower; + s8 v0; + s8 v1; + s8 v2; + s8 v3; + s8 average; + int max_pwr; + s16 desired_pwr; + s16 estimated_pwr; + s16 pwr_adjust; + s16 radio_att_delta; + s16 baseband_att_delta; + s16 radio_attenuation; + s16 baseband_attenuation; + unsigned long phylock_flags; + + if (phy->savedpctlreg == 0xFFFF) + return; + if ((dev->dev->bus->boardinfo.type == 0x0416) && + is_bcm_board_vendor(dev)) + return; +#ifdef CONFIG_B43LEGACY_DEBUG + if (phy->manual_txpower_control) + return; +#endif + + B43legacy_BUG_ON(!(phy->type == B43legacy_PHYTYPE_B || + phy->type == B43legacy_PHYTYPE_G)); + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x0058); + v0 = (s8)(tmp & 0x00FF); + v1 = (s8)((tmp & 0xFF00) >> 8); + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x005A); + v2 = (s8)(tmp & 0x00FF); + v3 = (s8)((tmp & 0xFF00) >> 8); + tmp = 0; + + if (v0 == 0x7F || v1 == 0x7F || v2 == 0x7F || v3 == 0x7F) { + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + 0x0070); + v0 = (s8)(tmp & 0x00FF); + v1 = (s8)((tmp & 0xFF00) >> 8); + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + 0x0072); + v2 = (s8)(tmp & 0x00FF); + v3 = (s8)((tmp & 0xFF00) >> 8); + if (v0 == 0x7F || v1 == 0x7F || v2 == 0x7F || v3 == 0x7F) + return; + v0 = (v0 + 0x20) & 0x3F; + v1 = (v1 + 0x20) & 0x3F; + v2 = (v2 + 0x20) & 0x3F; + v3 = (v3 + 0x20) & 0x3F; + tmp = 1; + } + b43legacy_radio_clear_tssi(dev); + + average = (v0 + v1 + v2 + v3 + 2) / 4; + + if (tmp && (b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x005E) + & 0x8)) + average -= 13; + + estimated_pwr = b43legacy_phy_estimate_power_out(dev, average); + + max_pwr = dev->dev->bus->sprom.r1.maxpwr_bg; + + if ((dev->dev->bus->sprom.r1.boardflags_lo + & B43legacy_BFL_PACTRL) && + (phy->type == B43legacy_PHYTYPE_G)) + max_pwr -= 0x3; + if (unlikely(max_pwr <= 0)) { + b43legacywarn(dev->wl, "Invalid max-TX-power value in SPROM." + "\n"); + max_pwr = 74; /* fake it */ + dev->dev->bus->sprom.r1.maxpwr_bg = max_pwr; + } + + /* Use regulatory information to get the maximum power. + * In the absence of such data from mac80211, we will use 20 dBm, which + * is the value for the EU, US, Canada, and most of the world. + * The regulatory maximum is reduced by the antenna gain (from sprom) + * and 1.5 dBm (a safety factor??). The result is in Q5.2 format + * which accounts for the factor of 4 */ +#define REG_MAX_PWR 20 + max_pwr = min(REG_MAX_PWR * 4 - dev->dev->bus->sprom.r1.antenna_gain_bg + - 0x6, max_pwr); + + /* find the desired power in Q5.2 - power_level is in dBm + * and limit it - max_pwr is already in Q5.2 */ + desired_pwr = limit_value(phy->power_level << 2, 0, max_pwr); + if (b43legacy_debug(dev, B43legacy_DBG_XMITPOWER)) + b43legacydbg(dev->wl, "Current TX power output: " Q52_FMT + " dBm, Desired TX power output: " Q52_FMT + " dBm\n", Q52_ARG(estimated_pwr), + Q52_ARG(desired_pwr)); + /* Check if we need to adjust the current power. The factor of 2 is + * for damping */ + pwr_adjust = (desired_pwr - estimated_pwr) / 2; + /* RF attenuation delta + * The minus sign is because lower attenuation => more power */ + radio_att_delta = -(pwr_adjust + 7) >> 3; + /* Baseband attenuation delta */ + baseband_att_delta = -(pwr_adjust >> 1) - (4 * radio_att_delta); + /* Do we need to adjust anything? */ + if ((radio_att_delta == 0) && (baseband_att_delta == 0)) { + b43legacy_phy_lo_mark_current_used(dev); + return; + } + + /* Calculate the new attenuation values. */ + baseband_attenuation = phy->bbatt; + baseband_attenuation += baseband_att_delta; + radio_attenuation = phy->rfatt; + radio_attenuation += radio_att_delta; + + /* Get baseband and radio attenuation values into permitted ranges. + * baseband 0-11, radio 0-9. + * Radio attenuation affects power level 4 times as much as baseband. + */ + if (radio_attenuation < 0) { + baseband_attenuation -= (4 * -radio_attenuation); + radio_attenuation = 0; + } else if (radio_attenuation > 9) { + baseband_attenuation += (4 * (radio_attenuation - 9)); + radio_attenuation = 9; + } else { + while (baseband_attenuation < 0 && radio_attenuation > 0) { + baseband_attenuation += 4; + radio_attenuation--; + } + while (baseband_attenuation > 11 && radio_attenuation < 9) { + baseband_attenuation -= 4; + radio_attenuation++; + } + } + baseband_attenuation = limit_value(baseband_attenuation, 0, 11); + + txpower = phy->txctl1; + if ((phy->radio_ver == 0x2050) && (phy->radio_rev == 2)) { + if (radio_attenuation <= 1) { + if (txpower == 0) { + txpower = 3; + radio_attenuation += 2; + baseband_attenuation += 2; + } else if (dev->dev->bus->sprom.r1.boardflags_lo + & B43legacy_BFL_PACTRL) { + baseband_attenuation += 4 * + (radio_attenuation - 2); + radio_attenuation = 2; + } + } else if (radio_attenuation > 4 && txpower != 0) { + txpower = 0; + if (baseband_attenuation < 3) { + radio_attenuation -= 3; + baseband_attenuation += 2; + } else { + radio_attenuation -= 2; + baseband_attenuation -= 2; + } + } + } + /* Save the control values */ + phy->txctl1 = txpower; + baseband_attenuation = limit_value(baseband_attenuation, 0, 11); + radio_attenuation = limit_value(radio_attenuation, 0, 9); + phy->rfatt = radio_attenuation; + phy->bbatt = baseband_attenuation; + + /* Adjust the hardware */ + b43legacy_phy_lock(dev, phylock_flags); + b43legacy_radio_lock(dev); + b43legacy_radio_set_txpower_bg(dev, baseband_attenuation, + radio_attenuation, txpower); + b43legacy_phy_lo_mark_current_used(dev); + b43legacy_radio_unlock(dev); + b43legacy_phy_unlock(dev, phylock_flags); +} + +static inline +s32 b43legacy_tssi2dbm_ad(s32 num, s32 den) +{ + if (num < 0) + return num/den; + else + return (num+den/2)/den; +} + +static inline +s8 b43legacy_tssi2dbm_entry(s8 entry [], u8 index, s16 pab0, s16 pab1, s16 pab2) +{ + s32 m1; + s32 m2; + s32 f = 256; + s32 q; + s32 delta; + s8 i = 0; + + m1 = b43legacy_tssi2dbm_ad(16 * pab0 + index * pab1, 32); + m2 = max(b43legacy_tssi2dbm_ad(32768 + index * pab2, 256), 1); + do { + if (i > 15) + return -EINVAL; + q = b43legacy_tssi2dbm_ad(f * 4096 - + b43legacy_tssi2dbm_ad(m2 * f, 16) * + f, 2048); + delta = abs(q - f); + f = q; + i++; + } while (delta >= 2); + entry[index] = limit_value(b43legacy_tssi2dbm_ad(m1 * f, 8192), + -127, 128); + return 0; +} + +/* http://bcm-specs.sipsolutions.net/TSSI_to_DBM_Table */ +int b43legacy_phy_init_tssi2dbm_table(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + s16 pab0; + s16 pab1; + s16 pab2; + u8 idx; + s8 *dyn_tssi2dbm; + + B43legacy_WARN_ON(!(phy->type == B43legacy_PHYTYPE_B || + phy->type == B43legacy_PHYTYPE_G)); + pab0 = (s16)(dev->dev->bus->sprom.r1.pa0b0); + pab1 = (s16)(dev->dev->bus->sprom.r1.pa0b1); + pab2 = (s16)(dev->dev->bus->sprom.r1.pa0b2); + + if ((dev->dev->bus->chip_id == 0x4301) && (phy->radio_ver != 0x2050)) { + phy->idle_tssi = 0x34; + phy->tssi2dbm = b43legacy_tssi2dbm_b_table; + return 0; + } + + if (pab0 != 0 && pab1 != 0 && pab2 != 0 && + pab0 != -1 && pab1 != -1 && pab2 != -1) { + /* The pabX values are set in SPROM. Use them. */ + if ((s8)dev->dev->bus->sprom.r1.itssi_bg != 0 && + (s8)dev->dev->bus->sprom.r1.itssi_bg != -1) + phy->idle_tssi = (s8)(dev->dev->bus->sprom.r1.itssi_bg); + else + phy->idle_tssi = 62; + dyn_tssi2dbm = kmalloc(64, GFP_KERNEL); + if (dyn_tssi2dbm == NULL) { + b43legacyerr(dev->wl, "Could not allocate memory" + "for tssi2dbm table\n"); + return -ENOMEM; + } + for (idx = 0; idx < 64; idx++) + if (b43legacy_tssi2dbm_entry(dyn_tssi2dbm, idx, pab0, + pab1, pab2)) { + phy->tssi2dbm = NULL; + b43legacyerr(dev->wl, "Could not generate " + "tssi2dBm table\n"); + kfree(dyn_tssi2dbm); + return -ENODEV; + } + phy->tssi2dbm = dyn_tssi2dbm; + phy->dyn_tssi_tbl = 1; + } else { + /* pabX values not set in SPROM. */ + switch (phy->type) { + case B43legacy_PHYTYPE_B: + phy->idle_tssi = 0x34; + phy->tssi2dbm = b43legacy_tssi2dbm_b_table; + break; + case B43legacy_PHYTYPE_G: + phy->idle_tssi = 0x34; + phy->tssi2dbm = b43legacy_tssi2dbm_g_table; + break; + } + } + + return 0; +} + +int b43legacy_phy_init(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + int err = -ENODEV; + + switch (phy->type) { + case B43legacy_PHYTYPE_B: + switch (phy->rev) { + case 2: + b43legacy_phy_initb2(dev); + err = 0; + break; + case 4: + b43legacy_phy_initb4(dev); + err = 0; + break; + case 5: + b43legacy_phy_initb5(dev); + err = 0; + break; + case 6: + b43legacy_phy_initb6(dev); + err = 0; + break; + } + break; + case B43legacy_PHYTYPE_G: + b43legacy_phy_initg(dev); + err = 0; + break; + } + if (err) + b43legacyerr(dev->wl, "Unknown PHYTYPE found\n"); + + return err; +} + +void b43legacy_phy_set_antenna_diversity(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 antennadiv; + u16 offset; + u16 value; + u32 ucodeflags; + + antennadiv = phy->antenna_diversity; + + if (antennadiv == 0xFFFF) + antennadiv = 3; + B43legacy_WARN_ON(antennadiv > 3); + + ucodeflags = b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET); + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET, + ucodeflags & ~B43legacy_UCODEFLAG_AUTODIV); + + switch (phy->type) { + case B43legacy_PHYTYPE_G: + offset = 0x0400; + + if (antennadiv == 2) + value = (3/*automatic*/ << 7); + else + value = (antennadiv << 7); + b43legacy_phy_write(dev, offset + 1, + (b43legacy_phy_read(dev, offset + 1) + & 0x7E7F) | value); + + if (antennadiv >= 2) { + if (antennadiv == 2) + value = (antennadiv << 7); + else + value = (0/*force0*/ << 7); + b43legacy_phy_write(dev, offset + 0x2B, + (b43legacy_phy_read(dev, + offset + 0x2B) + & 0xFEFF) | value); + } + + if (phy->type == B43legacy_PHYTYPE_G) { + if (antennadiv >= 2) + b43legacy_phy_write(dev, 0x048C, + b43legacy_phy_read(dev, + 0x048C) | 0x2000); + else + b43legacy_phy_write(dev, 0x048C, + b43legacy_phy_read(dev, + 0x048C) & ~0x2000); + if (phy->rev >= 2) { + b43legacy_phy_write(dev, 0x0461, + b43legacy_phy_read(dev, + 0x0461) | 0x0010); + b43legacy_phy_write(dev, 0x04AD, + (b43legacy_phy_read(dev, + 0x04AD) + & 0x00FF) | 0x0015); + if (phy->rev == 2) + b43legacy_phy_write(dev, 0x0427, + 0x0008); + else + b43legacy_phy_write(dev, 0x0427, + (b43legacy_phy_read(dev, 0x0427) + & 0x00FF) | 0x0008); + } else if (phy->rev >= 6) + b43legacy_phy_write(dev, 0x049B, 0x00DC); + } else { + if (phy->rev < 3) + b43legacy_phy_write(dev, 0x002B, + (b43legacy_phy_read(dev, + 0x002B) & 0x00FF) + | 0x0024); + else { + b43legacy_phy_write(dev, 0x0061, + b43legacy_phy_read(dev, + 0x0061) | 0x0010); + if (phy->rev == 3) { + b43legacy_phy_write(dev, 0x0093, + 0x001D); + b43legacy_phy_write(dev, 0x0027, + 0x0008); + } else { + b43legacy_phy_write(dev, 0x0093, + 0x003A); + b43legacy_phy_write(dev, 0x0027, + (b43legacy_phy_read(dev, 0x0027) + & 0x00FF) | 0x0008); + } + } + } + break; + case B43legacy_PHYTYPE_B: + if (dev->dev->id.revision == 2) + value = (3/*automatic*/ << 7); + else + value = (antennadiv << 7); + b43legacy_phy_write(dev, 0x03E2, + (b43legacy_phy_read(dev, 0x03E2) + & 0xFE7F) | value); + break; + default: + B43legacy_WARN_ON(1); + } + + if (antennadiv >= 2) { + ucodeflags = b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET); + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET, + ucodeflags | B43legacy_UCODEFLAG_AUTODIV); + } + + phy->antenna_diversity = antennadiv; +} + +/* Set the PowerSavingControlBits. + * Bitvalues: + * 0 => unset the bit + * 1 => set the bit + * -1 => calculate the bit + */ +void b43legacy_power_saving_ctl_bits(struct b43legacy_wldev *dev, + int bit25, int bit26) +{ + int i; + u32 status; + +/* FIXME: Force 25 to off and 26 to on for now: */ +bit25 = 0; +bit26 = 1; + + if (bit25 == -1) { + /* TODO: If powersave is not off and FIXME is not set and we + * are not in adhoc and thus is not an AP and we arei + * associated, set bit 25 */ + } + if (bit26 == -1) { + /* TODO: If the device is awake or this is an AP, or we are + * scanning, or FIXME, or we are associated, or FIXME, + * or the latest PS-Poll packet sent was successful, + * set bit26 */ + } + status = b43legacy_read32(dev, B43legacy_MMIO_STATUS_BITFIELD); + if (bit25) + status |= B43legacy_SBF_PS1; + else + status &= ~B43legacy_SBF_PS1; + if (bit26) + status |= B43legacy_SBF_PS2; + else + status &= ~B43legacy_SBF_PS2; + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, status); + if (bit26 && dev->dev->id.revision >= 5) { + for (i = 0; i < 100; i++) { + if (b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, + 0x0040) != 4) + break; + udelay(10); + } + } +} diff --git a/drivers/net/wireless/b43legacy/phy.h b/drivers/net/wireless/b43legacy/phy.h new file mode 100644 index 000000000000..f11b4271714c --- /dev/null +++ b/drivers/net/wireless/b43legacy/phy.h @@ -0,0 +1,219 @@ +/* + + Broadcom B43legacy wireless driver + + Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>, + Stefano Brivio <st3@riseup.net> + Michael Buesch <mbuesch@freenet.de> + Danny van Dyk <kugelfang@gentoo.org> + Andreas Jaggi <andreas.jaggi@waterwave.ch> + Copyright (c) 2007 Larry Finger <Larry.Finger@lwfinger.net> + + Some parts of the code in this file are derived from the ipw2200 + driver Copyright(c) 2003 - 2004 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#ifndef B43legacy_PHY_H_ +#define B43legacy_PHY_H_ + +#include <linux/types.h> + +enum { + B43legacy_ANTENNA0, /* Antenna 0 */ + B43legacy_ANTENNA1, /* Antenna 0 */ + B43legacy_ANTENNA_AUTO1, /* Automatic, starting with antenna 1 */ + B43legacy_ANTENNA_AUTO0, /* Automatic, starting with antenna 0 */ + + B43legacy_ANTENNA_AUTO = B43legacy_ANTENNA_AUTO0, + B43legacy_ANTENNA_DEFAULT = B43legacy_ANTENNA_AUTO, +}; + +enum { + B43legacy_INTERFMODE_NONE, + B43legacy_INTERFMODE_NONWLAN, + B43legacy_INTERFMODE_MANUALWLAN, + B43legacy_INTERFMODE_AUTOWLAN, +}; + +/*** PHY Registers ***/ + +/* Routing */ +#define B43legacy_PHYROUTE_OFDM_GPHY 0x400 +#define B43legacy_PHYROUTE_EXT_GPHY 0x800 + +/* Base registers. */ +#define B43legacy_PHY_BASE(reg) (reg) +/* OFDM (A) registers of a G-PHY */ +#define B43legacy_PHY_OFDM(reg) ((reg) | B43legacy_PHYROUTE_OFDM_GPHY) +/* Extended G-PHY registers */ +#define B43legacy_PHY_EXTG(reg) ((reg) | B43legacy_PHYROUTE_EXT_GPHY) + + +/* Extended G-PHY Registers */ +#define B43legacy_PHY_CLASSCTL B43legacy_PHY_EXTG(0x02) /* Classify control */ +#define B43legacy_PHY_GTABCTL B43legacy_PHY_EXTG(0x03) /* G-PHY table control (see below) */ +#define B43legacy_PHY_GTABOFF 0x03FF /* G-PHY table offset (see below) */ +#define B43legacy_PHY_GTABNR 0xFC00 /* G-PHY table number (see below) */ +#define B43legacy_PHY_GTABNR_SHIFT 10 +#define B43legacy_PHY_GTABDATA B43legacy_PHY_EXTG(0x04) /* G-PHY table data */ +#define B43legacy_PHY_LO_MASK B43legacy_PHY_EXTG(0x0F) /* Local Oscillator control mask */ +#define B43legacy_PHY_LO_CTL B43legacy_PHY_EXTG(0x10) /* Local Oscillator control */ +#define B43legacy_PHY_RFOVER B43legacy_PHY_EXTG(0x11) /* RF override */ +#define B43legacy_PHY_RFOVERVAL B43legacy_PHY_EXTG(0x12) /* RF override value */ +/*** OFDM table numbers ***/ +#define B43legacy_OFDMTAB(number, offset) \ + (((number) << B43legacy_PHY_OTABLENR_SHIFT) \ + | (offset)) +#define B43legacy_OFDMTAB_AGC1 B43legacy_OFDMTAB(0x00, 0) +#define B43legacy_OFDMTAB_GAIN0 B43legacy_OFDMTAB(0x00, 0) +#define B43legacy_OFDMTAB_GAINX B43legacy_OFDMTAB(0x01, 0) +#define B43legacy_OFDMTAB_GAIN1 B43legacy_OFDMTAB(0x01, 4) +#define B43legacy_OFDMTAB_AGC3 B43legacy_OFDMTAB(0x02, 0) +#define B43legacy_OFDMTAB_GAIN2 B43legacy_OFDMTAB(0x02, 3) +#define B43legacy_OFDMTAB_LNAHPFGAIN1 B43legacy_OFDMTAB(0x03, 0) +#define B43legacy_OFDMTAB_WRSSI B43legacy_OFDMTAB(0x04, 0) +#define B43legacy_OFDMTAB_LNAHPFGAIN2 B43legacy_OFDMTAB(0x04, 0) +#define B43legacy_OFDMTAB_NOISESCALE B43legacy_OFDMTAB(0x05, 0) +#define B43legacy_OFDMTAB_AGC2 B43legacy_OFDMTAB(0x06, 0) +#define B43legacy_OFDMTAB_ROTOR B43legacy_OFDMTAB(0x08, 0) +#define B43legacy_OFDMTAB_ADVRETARD B43legacy_OFDMTAB(0x09, 0) +#define B43legacy_OFDMTAB_DAC B43legacy_OFDMTAB(0x0C, 0) +#define B43legacy_OFDMTAB_DC B43legacy_OFDMTAB(0x0E, 7) +#define B43legacy_OFDMTAB_PWRDYN2 B43legacy_OFDMTAB(0x0E, 12) +#define B43legacy_OFDMTAB_LNAGAIN B43legacy_OFDMTAB(0x0E, 13) + +#define B43legacy_OFDMTAB_LPFGAIN B43legacy_OFDMTAB(0x0F, 12) +#define B43legacy_OFDMTAB_RSSI B43legacy_OFDMTAB(0x10, 0) + +#define B43legacy_OFDMTAB_AGC1_R1 B43legacy_OFDMTAB(0x13, 0) +#define B43legacy_OFDMTAB_GAINX_R1 B43legacy_OFDMTAB(0x14, 0) +#define B43legacy_OFDMTAB_MINSIGSQ B43legacy_OFDMTAB(0x14, 1) +#define B43legacy_OFDMTAB_AGC3_R1 B43legacy_OFDMTAB(0x15, 0) +#define B43legacy_OFDMTAB_WRSSI_R1 B43legacy_OFDMTAB(0x15, 4) +#define B43legacy_OFDMTAB_TSSI B43legacy_OFDMTAB(0x15, 0) +#define B43legacy_OFDMTAB_DACRFPABB B43legacy_OFDMTAB(0x16, 0) +#define B43legacy_OFDMTAB_DACOFF B43legacy_OFDMTAB(0x17, 0) +#define B43legacy_OFDMTAB_DCBIAS B43legacy_OFDMTAB(0x18, 0) + +void b43legacy_put_attenuation_into_ranges(int *_bbatt, int *_rfatt); + +/* OFDM (A) PHY Registers */ +#define B43legacy_PHY_VERSION_OFDM B43legacy_PHY_OFDM(0x00) /* Versioning register for A-PHY */ +#define B43legacy_PHY_BBANDCFG B43legacy_PHY_OFDM(0x01) /* Baseband config */ +#define B43legacy_PHY_BBANDCFG_RXANT 0x180 /* RX Antenna selection */ +#define B43legacy_PHY_BBANDCFG_RXANT_SHIFT 7 +#define B43legacy_PHY_PWRDOWN B43legacy_PHY_OFDM(0x03) /* Powerdown */ +#define B43legacy_PHY_CRSTHRES1 B43legacy_PHY_OFDM(0x06) /* CRS Threshold 1 */ +#define B43legacy_PHY_LNAHPFCTL B43legacy_PHY_OFDM(0x1C) /* LNA/HPF control */ +#define B43legacy_PHY_ADIVRELATED B43legacy_PHY_OFDM(0x27) /* FIXME rename */ +#define B43legacy_PHY_CRS0 B43legacy_PHY_OFDM(0x29) +#define B43legacy_PHY_ANTDWELL B43legacy_PHY_OFDM(0x2B) /* Antenna dwell */ +#define B43legacy_PHY_ANTDWELL_AUTODIV1 0x0100 /* Automatic RX diversity start antenna */ +#define B43legacy_PHY_ENCORE B43legacy_PHY_OFDM(0x49) /* "Encore" (RangeMax / BroadRange) */ +#define B43legacy_PHY_ENCORE_EN 0x0200 /* Encore enable */ +#define B43legacy_PHY_LMS B43legacy_PHY_OFDM(0x55) +#define B43legacy_PHY_OFDM61 B43legacy_PHY_OFDM(0x61) /* FIXME rename */ +#define B43legacy_PHY_OFDM61_10 0x0010 /* FIXME rename */ +#define B43legacy_PHY_IQBAL B43legacy_PHY_OFDM(0x69) /* I/Q balance */ +#define B43legacy_PHY_OTABLECTL B43legacy_PHY_OFDM(0x72) /* OFDM table control (see below) */ +#define B43legacy_PHY_OTABLEOFF 0x03FF /* OFDM table offset (see below) */ +#define B43legacy_PHY_OTABLENR 0xFC00 /* OFDM table number (see below) */ +#define B43legacy_PHY_OTABLENR_SHIFT 10 +#define B43legacy_PHY_OTABLEI B43legacy_PHY_OFDM(0x73) /* OFDM table data I */ +#define B43legacy_PHY_OTABLEQ B43legacy_PHY_OFDM(0x74) /* OFDM table data Q */ +#define B43legacy_PHY_HPWR_TSSICTL B43legacy_PHY_OFDM(0x78) /* Hardware power TSSI control */ +#define B43legacy_PHY_NRSSITHRES B43legacy_PHY_OFDM(0x8A) /* NRSSI threshold */ +#define B43legacy_PHY_ANTWRSETT B43legacy_PHY_OFDM(0x8C) /* Antenna WR settle */ +#define B43legacy_PHY_ANTWRSETT_ARXDIV 0x2000 /* Automatic RX diversity enabled */ +#define B43legacy_PHY_CLIPPWRDOWNT B43legacy_PHY_OFDM(0x93) /* Clip powerdown threshold */ +#define B43legacy_PHY_OFDM9B B43legacy_PHY_OFDM(0x9B) /* FIXME rename */ +#define B43legacy_PHY_N1P1GAIN B43legacy_PHY_OFDM(0xA0) +#define B43legacy_PHY_P1P2GAIN B43legacy_PHY_OFDM(0xA1) +#define B43legacy_PHY_N1N2GAIN B43legacy_PHY_OFDM(0xA2) +#define B43legacy_PHY_CLIPTHRES B43legacy_PHY_OFDM(0xA3) +#define B43legacy_PHY_CLIPN1P2THRES B43legacy_PHY_OFDM(0xA4) +#define B43legacy_PHY_DIVSRCHIDX B43legacy_PHY_OFDM(0xA8) /* Divider search gain/index */ +#define B43legacy_PHY_CLIPP2THRES B43legacy_PHY_OFDM(0xA9) +#define B43legacy_PHY_CLIPP3THRES B43legacy_PHY_OFDM(0xAA) +#define B43legacy_PHY_DIVP1P2GAIN B43legacy_PHY_OFDM(0xAB) +#define B43legacy_PHY_DIVSRCHGAINBACK B43legacy_PHY_OFDM(0xAD) /* Divider search gain back */ +#define B43legacy_PHY_DIVSRCHGAINCHNG B43legacy_PHY_OFDM(0xAE) /* Divider search gain change */ +#define B43legacy_PHY_CRSTHRES1_R1 B43legacy_PHY_OFDM(0xC0) /* CRS Threshold 1 (rev 1 only) */ +#define B43legacy_PHY_CRSTHRES2_R1 B43legacy_PHY_OFDM(0xC1) /* CRS Threshold 2 (rev 1 only) */ +#define B43legacy_PHY_TSSIP_LTBASE B43legacy_PHY_OFDM(0x380) /* TSSI power lookup table base */ +#define B43legacy_PHY_DC_LTBASE B43legacy_PHY_OFDM(0x3A0) /* DC lookup table base */ +#define B43legacy_PHY_GAIN_LTBASE B43legacy_PHY_OFDM(0x3C0) /* Gain lookup table base */ + +void b43legacy_put_attenuation_into_ranges(int *_bbatt, int *_rfatt); + +/* Masks for the different PHY versioning registers. */ +#define B43legacy_PHYVER_ANALOG 0xF000 +#define B43legacy_PHYVER_ANALOG_SHIFT 12 +#define B43legacy_PHYVER_TYPE 0x0F00 +#define B43legacy_PHYVER_TYPE_SHIFT 8 +#define B43legacy_PHYVER_VERSION 0x00FF + +struct b43legacy_wldev; + +void b43legacy_raw_phy_lock(struct b43legacy_wldev *dev); +#define b43legacy_phy_lock(bcm, flags) \ + do { \ + local_irq_save(flags); \ + b43legacy_raw_phy_lock(bcm); \ + } while (0) +void b43legacy_raw_phy_unlock(struct b43legacy_wldev *dev); +#define b43legacy_phy_unlock(bcm, flags) \ + do { \ + b43legacy_raw_phy_unlock(bcm); \ + local_irq_restore(flags); \ + } while (0) + +/* Card uses the loopback gain stuff */ +#define has_loopback_gain(phy) \ + (((phy)->rev > 1) || ((phy)->gmode)) + +u16 b43legacy_phy_read(struct b43legacy_wldev *dev, u16 offset); +void b43legacy_phy_write(struct b43legacy_wldev *dev, u16 offset, u16 val); + +int b43legacy_phy_init_tssi2dbm_table(struct b43legacy_wldev *dev); +int b43legacy_phy_init(struct b43legacy_wldev *dev); + +void b43legacy_set_rx_antenna(struct b43legacy_wldev *dev, int antenna); + +void b43legacy_phy_set_antenna_diversity(struct b43legacy_wldev *dev); +void b43legacy_phy_calibrate(struct b43legacy_wldev *dev); +int b43legacy_phy_connect(struct b43legacy_wldev *dev, int connect); + +void b43legacy_phy_lo_b_measure(struct b43legacy_wldev *dev); +void b43legacy_phy_lo_g_measure(struct b43legacy_wldev *dev); +void b43legacy_phy_xmitpower(struct b43legacy_wldev *dev); + +/* Adjust the LocalOscillator to the saved values. + * "fixed" is only set to 1 once in initialization. Set to 0 otherwise. + */ +void b43legacy_phy_lo_adjust(struct b43legacy_wldev *dev, int fixed); +void b43legacy_phy_lo_mark_all_unused(struct b43legacy_wldev *dev); + +void b43legacy_phy_set_baseband_attenuation(struct b43legacy_wldev *dev, + u16 baseband_attenuation); + +void b43legacy_power_saving_ctl_bits(struct b43legacy_wldev *dev, + int bit25, int bit26); + +#endif /* B43legacy_PHY_H_ */ diff --git a/drivers/net/wireless/b43legacy/pio.c b/drivers/net/wireless/b43legacy/pio.c new file mode 100644 index 000000000000..de843ac147ae --- /dev/null +++ b/drivers/net/wireless/b43legacy/pio.c @@ -0,0 +1,668 @@ +/* + + Broadcom B43legacy wireless driver + + PIO Transmission + + Copyright (c) 2005 Michael Buesch <mb@bu3sch.de> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43legacy.h" +#include "pio.h" +#include "main.h" +#include "xmit.h" + +#include <linux/delay.h> + + +static void tx_start(struct b43legacy_pioqueue *queue) +{ + b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, + B43legacy_PIO_TXCTL_INIT); +} + +static void tx_octet(struct b43legacy_pioqueue *queue, + u8 octet) +{ + if (queue->need_workarounds) { + b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); + b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, + B43legacy_PIO_TXCTL_WRITELO); + } else { + b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, + B43legacy_PIO_TXCTL_WRITELO); + b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); + } +} + +static u16 tx_get_next_word(const u8 *txhdr, + const u8 *packet, + size_t txhdr_size, + unsigned int *pos) +{ + const u8 *source; + unsigned int i = *pos; + u16 ret; + + if (i < txhdr_size) + source = txhdr; + else { + source = packet; + i -= txhdr_size; + } + ret = le16_to_cpu(*((__le16 *)(source + i))); + *pos += 2; + + return ret; +} + +static void tx_data(struct b43legacy_pioqueue *queue, + u8 *txhdr, + const u8 *packet, + unsigned int octets) +{ + u16 data; + unsigned int i = 0; + + if (queue->need_workarounds) { + data = tx_get_next_word(txhdr, packet, + sizeof(struct b43legacy_txhdr_fw3), &i); + b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, data); + } + b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, + B43legacy_PIO_TXCTL_WRITELO | + B43legacy_PIO_TXCTL_WRITEHI); + while (i < octets - 1) { + data = tx_get_next_word(txhdr, packet, + sizeof(struct b43legacy_txhdr_fw3), &i); + b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, data); + } + if (octets % 2) + tx_octet(queue, packet[octets - + sizeof(struct b43legacy_txhdr_fw3) - 1]); +} + +static void tx_complete(struct b43legacy_pioqueue *queue, + struct sk_buff *skb) +{ + if (queue->need_workarounds) { + b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, + skb->data[skb->len - 1]); + b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, + B43legacy_PIO_TXCTL_WRITELO | + B43legacy_PIO_TXCTL_COMPLETE); + } else + b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, + B43legacy_PIO_TXCTL_COMPLETE); +} + +static u16 generate_cookie(struct b43legacy_pioqueue *queue, + struct b43legacy_pio_txpacket *packet) +{ + u16 cookie = 0x0000; + int packetindex; + + /* We use the upper 4 bits for the PIO + * controller ID and the lower 12 bits + * for the packet index (in the cache). + */ + switch (queue->mmio_base) { + case B43legacy_MMIO_PIO1_BASE: + break; + case B43legacy_MMIO_PIO2_BASE: + cookie = 0x1000; + break; + case B43legacy_MMIO_PIO3_BASE: + cookie = 0x2000; + break; + case B43legacy_MMIO_PIO4_BASE: + cookie = 0x3000; + break; + default: + B43legacy_WARN_ON(1); + } + packetindex = pio_txpacket_getindex(packet); + B43legacy_WARN_ON(!(((u16)packetindex & 0xF000) == 0x0000)); + cookie |= (u16)packetindex; + + return cookie; +} + +static +struct b43legacy_pioqueue *parse_cookie(struct b43legacy_wldev *dev, + u16 cookie, + struct b43legacy_pio_txpacket **packet) +{ + struct b43legacy_pio *pio = &dev->pio; + struct b43legacy_pioqueue *queue = NULL; + int packetindex; + + switch (cookie & 0xF000) { + case 0x0000: + queue = pio->queue0; + break; + case 0x1000: + queue = pio->queue1; + break; + case 0x2000: + queue = pio->queue2; + break; + case 0x3000: + queue = pio->queue3; + break; + default: + B43legacy_WARN_ON(1); + } + packetindex = (cookie & 0x0FFF); + B43legacy_WARN_ON(!(packetindex >= 0 && packetindex + < B43legacy_PIO_MAXTXPACKETS)); + *packet = &(queue->tx_packets_cache[packetindex]); + + return queue; +} + +union txhdr_union { + struct b43legacy_txhdr_fw3 txhdr_fw3; +}; + +static void pio_tx_write_fragment(struct b43legacy_pioqueue *queue, + struct sk_buff *skb, + struct b43legacy_pio_txpacket *packet, + size_t txhdr_size) +{ + union txhdr_union txhdr_data; + u8 *txhdr = NULL; + unsigned int octets; + + txhdr = (u8 *)(&txhdr_data.txhdr_fw3); + + B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0); + b43legacy_generate_txhdr(queue->dev, + txhdr, skb->data, skb->len, + &packet->txstat.control, + generate_cookie(queue, packet)); + + tx_start(queue); + octets = skb->len + txhdr_size; + if (queue->need_workarounds) + octets--; + tx_data(queue, txhdr, (u8 *)skb->data, octets); + tx_complete(queue, skb); +} + +static void free_txpacket(struct b43legacy_pio_txpacket *packet, + int irq_context) +{ + struct b43legacy_pioqueue *queue = packet->queue; + + if (packet->skb) { + if (irq_context) + dev_kfree_skb_irq(packet->skb); + else + dev_kfree_skb(packet->skb); + } + list_move(&packet->list, &queue->txfree); + queue->nr_txfree++; +} + +static int pio_tx_packet(struct b43legacy_pio_txpacket *packet) +{ + struct b43legacy_pioqueue *queue = packet->queue; + struct sk_buff *skb = packet->skb; + u16 octets; + + octets = (u16)skb->len + sizeof(struct b43legacy_txhdr_fw3); + if (queue->tx_devq_size < octets) { + b43legacywarn(queue->dev->wl, "PIO queue too small. " + "Dropping packet.\n"); + /* Drop it silently (return success) */ + free_txpacket(packet, 1); + return 0; + } + B43legacy_WARN_ON(queue->tx_devq_packets > + B43legacy_PIO_MAXTXDEVQPACKETS); + B43legacy_WARN_ON(queue->tx_devq_used > queue->tx_devq_size); + /* Check if there is sufficient free space on the device + * TX queue. If not, return and let the TX tasklet + * retry later. + */ + if (queue->tx_devq_packets == B43legacy_PIO_MAXTXDEVQPACKETS) + return -EBUSY; + if (queue->tx_devq_used + octets > queue->tx_devq_size) + return -EBUSY; + /* Now poke the device. */ + pio_tx_write_fragment(queue, skb, packet, + sizeof(struct b43legacy_txhdr_fw3)); + + /* Account for the packet size. + * (We must not overflow the device TX queue) + */ + queue->tx_devq_packets++; + queue->tx_devq_used += octets; + + /* Transmission started, everything ok, move the + * packet to the txrunning list. + */ + list_move_tail(&packet->list, &queue->txrunning); + + return 0; +} + +static void tx_tasklet(unsigned long d) +{ + struct b43legacy_pioqueue *queue = (struct b43legacy_pioqueue *)d; + struct b43legacy_wldev *dev = queue->dev; + unsigned long flags; + struct b43legacy_pio_txpacket *packet, *tmp_packet; + int err; + u16 txctl; + + spin_lock_irqsave(&dev->wl->irq_lock, flags); + if (queue->tx_frozen) + goto out_unlock; + txctl = b43legacy_pio_read(queue, B43legacy_PIO_TXCTL); + if (txctl & B43legacy_PIO_TXCTL_SUSPEND) + goto out_unlock; + + list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list) { + /* Try to transmit the packet. This can fail, if + * the device queue is full. In case of failure, the + * packet is left in the txqueue. + * If transmission succeed, the packet is moved to txrunning. + * If it is impossible to transmit the packet, it + * is dropped. + */ + err = pio_tx_packet(packet); + if (err) + break; + } +out_unlock: + spin_unlock_irqrestore(&dev->wl->irq_lock, flags); +} + +static void setup_txqueues(struct b43legacy_pioqueue *queue) +{ + struct b43legacy_pio_txpacket *packet; + int i; + + queue->nr_txfree = B43legacy_PIO_MAXTXPACKETS; + for (i = 0; i < B43legacy_PIO_MAXTXPACKETS; i++) { + packet = &(queue->tx_packets_cache[i]); + + packet->queue = queue; + INIT_LIST_HEAD(&packet->list); + + list_add(&packet->list, &queue->txfree); + } +} + +static +struct b43legacy_pioqueue *b43legacy_setup_pioqueue(struct b43legacy_wldev *dev, + u16 pio_mmio_base) +{ + struct b43legacy_pioqueue *queue; + u32 value; + u16 qsize; + + queue = kzalloc(sizeof(*queue), GFP_KERNEL); + if (!queue) + goto out; + + queue->dev = dev; + queue->mmio_base = pio_mmio_base; + queue->need_workarounds = (dev->dev->id.revision < 3); + + INIT_LIST_HEAD(&queue->txfree); + INIT_LIST_HEAD(&queue->txqueue); + INIT_LIST_HEAD(&queue->txrunning); + tasklet_init(&queue->txtask, tx_tasklet, + (unsigned long)queue); + + value = b43legacy_read32(dev, B43legacy_MMIO_STATUS_BITFIELD); + value &= ~B43legacy_SBF_XFER_REG_BYTESWAP; + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, value); + + qsize = b43legacy_read16(dev, queue->mmio_base + + B43legacy_PIO_TXQBUFSIZE); + if (qsize == 0) { + b43legacyerr(dev->wl, "This card does not support PIO " + "operation mode. Please use DMA mode " + "(module parameter pio=0).\n"); + goto err_freequeue; + } + if (qsize <= B43legacy_PIO_TXQADJUST) { + b43legacyerr(dev->wl, "PIO tx device-queue too small (%u)\n", + qsize); + goto err_freequeue; + } + qsize -= B43legacy_PIO_TXQADJUST; + queue->tx_devq_size = qsize; + + setup_txqueues(queue); + +out: + return queue; + +err_freequeue: + kfree(queue); + queue = NULL; + goto out; +} + +static void cancel_transfers(struct b43legacy_pioqueue *queue) +{ + struct b43legacy_pio_txpacket *packet, *tmp_packet; + + tasklet_disable(&queue->txtask); + + list_for_each_entry_safe(packet, tmp_packet, &queue->txrunning, list) + free_txpacket(packet, 0); + list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list) + free_txpacket(packet, 0); +} + +static void b43legacy_destroy_pioqueue(struct b43legacy_pioqueue *queue) +{ + if (!queue) + return; + + cancel_transfers(queue); + kfree(queue); +} + +void b43legacy_pio_free(struct b43legacy_wldev *dev) +{ + struct b43legacy_pio *pio; + + if (!b43legacy_using_pio(dev)) + return; + pio = &dev->pio; + + b43legacy_destroy_pioqueue(pio->queue3); + pio->queue3 = NULL; + b43legacy_destroy_pioqueue(pio->queue2); + pio->queue2 = NULL; + b43legacy_destroy_pioqueue(pio->queue1); + pio->queue1 = NULL; + b43legacy_destroy_pioqueue(pio->queue0); + pio->queue0 = NULL; +} + +int b43legacy_pio_init(struct b43legacy_wldev *dev) +{ + struct b43legacy_pio *pio = &dev->pio; + struct b43legacy_pioqueue *queue; + int err = -ENOMEM; + + queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO1_BASE); + if (!queue) + goto out; + pio->queue0 = queue; + + queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO2_BASE); + if (!queue) + goto err_destroy0; + pio->queue1 = queue; + + queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO3_BASE); + if (!queue) + goto err_destroy1; + pio->queue2 = queue; + + queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO4_BASE); + if (!queue) + goto err_destroy2; + pio->queue3 = queue; + + if (dev->dev->id.revision < 3) + dev->irq_savedstate |= B43legacy_IRQ_PIO_WORKAROUND; + + b43legacydbg(dev->wl, "PIO initialized\n"); + err = 0; +out: + return err; + +err_destroy2: + b43legacy_destroy_pioqueue(pio->queue2); + pio->queue2 = NULL; +err_destroy1: + b43legacy_destroy_pioqueue(pio->queue1); + pio->queue1 = NULL; +err_destroy0: + b43legacy_destroy_pioqueue(pio->queue0); + pio->queue0 = NULL; + goto out; +} + +int b43legacy_pio_tx(struct b43legacy_wldev *dev, + struct sk_buff *skb, + struct ieee80211_tx_control *ctl) +{ + struct b43legacy_pioqueue *queue = dev->pio.queue1; + struct b43legacy_pio_txpacket *packet; + + B43legacy_WARN_ON(queue->tx_suspended); + B43legacy_WARN_ON(list_empty(&queue->txfree)); + + packet = list_entry(queue->txfree.next, struct b43legacy_pio_txpacket, + list); + packet->skb = skb; + + memset(&packet->txstat, 0, sizeof(packet->txstat)); + memcpy(&packet->txstat.control, ctl, sizeof(*ctl)); + + list_move_tail(&packet->list, &queue->txqueue); + queue->nr_txfree--; + queue->nr_tx_packets++; + B43legacy_WARN_ON(queue->nr_txfree >= B43legacy_PIO_MAXTXPACKETS); + + tasklet_schedule(&queue->txtask); + + return 0; +} + +void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status) +{ + struct b43legacy_pioqueue *queue; + struct b43legacy_pio_txpacket *packet; + + queue = parse_cookie(dev, status->cookie, &packet); + B43legacy_WARN_ON(!queue); + + queue->tx_devq_packets--; + queue->tx_devq_used -= (packet->skb->len + + sizeof(struct b43legacy_txhdr_fw3)); + + if (status->acked) + packet->txstat.flags |= IEEE80211_TX_STATUS_ACK; + packet->txstat.retry_count = status->frame_count - 1; + ieee80211_tx_status_irqsafe(dev->wl->hw, packet->skb, + &(packet->txstat)); + packet->skb = NULL; + + free_txpacket(packet, 1); + /* If there are packets on the txqueue, poke the tasklet + * to transmit them. + */ + if (!list_empty(&queue->txqueue)) + tasklet_schedule(&queue->txtask); +} + +void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev, + struct ieee80211_tx_queue_stats *stats) +{ + struct b43legacy_pio *pio = &dev->pio; + struct b43legacy_pioqueue *queue; + struct ieee80211_tx_queue_stats_data *data; + + queue = pio->queue1; + data = &(stats->data[0]); + data->len = B43legacy_PIO_MAXTXPACKETS - queue->nr_txfree; + data->limit = B43legacy_PIO_MAXTXPACKETS; + data->count = queue->nr_tx_packets; +} + +static void pio_rx_error(struct b43legacy_pioqueue *queue, + int clear_buffers, + const char *error) +{ + int i; + + b43legacyerr(queue->dev->wl, "PIO RX error: %s\n", error); + b43legacy_pio_write(queue, B43legacy_PIO_RXCTL, + B43legacy_PIO_RXCTL_READY); + if (clear_buffers) { + B43legacy_WARN_ON(queue->mmio_base != B43legacy_MMIO_PIO1_BASE); + for (i = 0; i < 15; i++) { + /* Dummy read. */ + b43legacy_pio_read(queue, B43legacy_PIO_RXDATA); + } + } +} + +void b43legacy_pio_rx(struct b43legacy_pioqueue *queue) +{ + __le16 preamble[21] = { 0 }; + struct b43legacy_rxhdr_fw3 *rxhdr; + u16 tmp; + u16 len; + u16 macstat; + int i; + int preamble_readwords; + struct sk_buff *skb; + + tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXCTL); + if (!(tmp & B43legacy_PIO_RXCTL_DATAAVAILABLE)) + return; + b43legacy_pio_write(queue, B43legacy_PIO_RXCTL, + B43legacy_PIO_RXCTL_DATAAVAILABLE); + + for (i = 0; i < 10; i++) { + tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXCTL); + if (tmp & B43legacy_PIO_RXCTL_READY) + goto data_ready; + udelay(10); + } + b43legacydbg(queue->dev->wl, "PIO RX timed out\n"); + return; +data_ready: + + len = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA); + if (unlikely(len > 0x700)) { + pio_rx_error(queue, 0, "len > 0x700"); + return; + } + if (unlikely(len == 0 && queue->mmio_base != + B43legacy_MMIO_PIO4_BASE)) { + pio_rx_error(queue, 0, "len == 0"); + return; + } + preamble[0] = cpu_to_le16(len); + if (queue->mmio_base == B43legacy_MMIO_PIO4_BASE) + preamble_readwords = 14 / sizeof(u16); + else + preamble_readwords = 18 / sizeof(u16); + for (i = 0; i < preamble_readwords; i++) { + tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA); + preamble[i + 1] = cpu_to_le16(tmp); + } + rxhdr = (struct b43legacy_rxhdr_fw3 *)preamble; + macstat = le16_to_cpu(rxhdr->mac_status); + if (macstat & B43legacy_RX_MAC_FCSERR) { + pio_rx_error(queue, + (queue->mmio_base == B43legacy_MMIO_PIO1_BASE), + "Frame FCS error"); + return; + } + if (queue->mmio_base == B43legacy_MMIO_PIO4_BASE) { + /* We received an xmit status. */ + struct b43legacy_hwtxstatus *hw; + + hw = (struct b43legacy_hwtxstatus *)(preamble + 1); + b43legacy_handle_hwtxstatus(queue->dev, hw); + + return; + } + + skb = dev_alloc_skb(len); + if (unlikely(!skb)) { + pio_rx_error(queue, 1, "OOM"); + return; + } + skb_put(skb, len); + for (i = 0; i < len - 1; i += 2) { + tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA); + *((__le16 *)(skb->data + i)) = cpu_to_le16(tmp); + } + if (len % 2) { + tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA); + skb->data[len - 1] = (tmp & 0x00FF); + } + b43legacy_rx(queue->dev, skb, rxhdr); +} + +void b43legacy_pio_tx_suspend(struct b43legacy_pioqueue *queue) +{ + b43legacy_power_saving_ctl_bits(queue->dev, -1, 1); + b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, + b43legacy_pio_read(queue, B43legacy_PIO_TXCTL) + | B43legacy_PIO_TXCTL_SUSPEND); +} + +void b43legacy_pio_tx_resume(struct b43legacy_pioqueue *queue) +{ + b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, + b43legacy_pio_read(queue, B43legacy_PIO_TXCTL) + & ~B43legacy_PIO_TXCTL_SUSPEND); + b43legacy_power_saving_ctl_bits(queue->dev, -1, -1); + tasklet_schedule(&queue->txtask); +} + +void b43legacy_pio_freeze_txqueues(struct b43legacy_wldev *dev) +{ + struct b43legacy_pio *pio; + + B43legacy_WARN_ON(!b43legacy_using_pio(dev)); + pio = &dev->pio; + pio->queue0->tx_frozen = 1; + pio->queue1->tx_frozen = 1; + pio->queue2->tx_frozen = 1; + pio->queue3->tx_frozen = 1; +} + +void b43legacy_pio_thaw_txqueues(struct b43legacy_wldev *dev) +{ + struct b43legacy_pio *pio; + + B43legacy_WARN_ON(!b43legacy_using_pio(dev)); + pio = &dev->pio; + pio->queue0->tx_frozen = 0; + pio->queue1->tx_frozen = 0; + pio->queue2->tx_frozen = 0; + pio->queue3->tx_frozen = 0; + if (!list_empty(&pio->queue0->txqueue)) + tasklet_schedule(&pio->queue0->txtask); + if (!list_empty(&pio->queue1->txqueue)) + tasklet_schedule(&pio->queue1->txtask); + if (!list_empty(&pio->queue2->txqueue)) + tasklet_schedule(&pio->queue2->txtask); + if (!list_empty(&pio->queue3->txqueue)) + tasklet_schedule(&pio->queue3->txtask); +} diff --git a/drivers/net/wireless/b43legacy/pio.h b/drivers/net/wireless/b43legacy/pio.h new file mode 100644 index 000000000000..5bfed0c40030 --- /dev/null +++ b/drivers/net/wireless/b43legacy/pio.h @@ -0,0 +1,172 @@ +#ifndef B43legacy_PIO_H_ +#define B43legacy_PIO_H_ + +#include "b43legacy.h" + +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/skbuff.h> + + +#define B43legacy_PIO_TXCTL 0x00 +#define B43legacy_PIO_TXDATA 0x02 +#define B43legacy_PIO_TXQBUFSIZE 0x04 +#define B43legacy_PIO_RXCTL 0x08 +#define B43legacy_PIO_RXDATA 0x0A + +#define B43legacy_PIO_TXCTL_WRITELO (1 << 0) +#define B43legacy_PIO_TXCTL_WRITEHI (1 << 1) +#define B43legacy_PIO_TXCTL_COMPLETE (1 << 2) +#define B43legacy_PIO_TXCTL_INIT (1 << 3) +#define B43legacy_PIO_TXCTL_SUSPEND (1 << 7) + +#define B43legacy_PIO_RXCTL_DATAAVAILABLE (1 << 0) +#define B43legacy_PIO_RXCTL_READY (1 << 1) + +/* PIO constants */ +#define B43legacy_PIO_MAXTXDEVQPACKETS 31 +#define B43legacy_PIO_TXQADJUST 80 + +/* PIO tuning knobs */ +#define B43legacy_PIO_MAXTXPACKETS 256 + + + +#ifdef CONFIG_B43LEGACY_PIO + + +struct b43legacy_pioqueue; +struct b43legacy_xmitstatus; + +struct b43legacy_pio_txpacket { + struct b43legacy_pioqueue *queue; + struct sk_buff *skb; + struct ieee80211_tx_status txstat; + struct list_head list; +}; + +#define pio_txpacket_getindex(packet) ((int)((packet) - \ + (packet)->queue->tx_packets_cache)) + +struct b43legacy_pioqueue { + struct b43legacy_wldev *dev; + u16 mmio_base; + + bool tx_suspended; + bool tx_frozen; + bool need_workarounds; /* Workarounds needed for core.rev < 3 */ + + /* Adjusted size of the device internal TX buffer. */ + u16 tx_devq_size; + /* Used octets of the device internal TX buffer. */ + u16 tx_devq_used; + /* Used packet slots in the device internal TX buffer. */ + u8 tx_devq_packets; + /* Packets from the txfree list can + * be taken on incoming TX requests. + */ + struct list_head txfree; + unsigned int nr_txfree; + /* Packets on the txqueue are queued, + * but not completely written to the chip, yet. + */ + struct list_head txqueue; + /* Packets on the txrunning queue are completely + * posted to the device. We are waiting for the txstatus. + */ + struct list_head txrunning; + /* Total number or packets sent. + * (This counter can obviously wrap). + */ + unsigned int nr_tx_packets; + struct tasklet_struct txtask; + struct b43legacy_pio_txpacket + tx_packets_cache[B43legacy_PIO_MAXTXPACKETS]; +}; + +static inline +u16 b43legacy_pio_read(struct b43legacy_pioqueue *queue, + u16 offset) +{ + return b43legacy_read16(queue->dev, queue->mmio_base + offset); +} + +static inline +void b43legacy_pio_write(struct b43legacy_pioqueue *queue, + u16 offset, u16 value) +{ + b43legacy_write16(queue->dev, queue->mmio_base + offset, value); + mmiowb(); +} + + +int b43legacy_pio_init(struct b43legacy_wldev *dev); +void b43legacy_pio_free(struct b43legacy_wldev *dev); + +int b43legacy_pio_tx(struct b43legacy_wldev *dev, + struct sk_buff *skb, + struct ieee80211_tx_control *ctl); +void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status); +void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev, + struct ieee80211_tx_queue_stats *stats); +void b43legacy_pio_rx(struct b43legacy_pioqueue *queue); + +/* Suspend TX queue in hardware. */ +void b43legacy_pio_tx_suspend(struct b43legacy_pioqueue *queue); +void b43legacy_pio_tx_resume(struct b43legacy_pioqueue *queue); +/* Suspend (freeze) the TX tasklet (software level). */ +void b43legacy_pio_freeze_txqueues(struct b43legacy_wldev *dev); +void b43legacy_pio_thaw_txqueues(struct b43legacy_wldev *dev); + +#else /* CONFIG_B43LEGACY_PIO */ + +static inline +int b43legacy_pio_init(struct b43legacy_wldev *dev) +{ + return 0; +} +static inline +void b43legacy_pio_free(struct b43legacy_wldev *dev) +{ +} +static inline +int b43legacy_pio_tx(struct b43legacy_wldev *dev, + struct sk_buff *skb, + struct ieee80211_tx_control *ctl) +{ + return 0; +} +static inline +void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status) +{ +} +static inline +void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev, + struct ieee80211_tx_queue_stats *stats) +{ +} +static inline +void b43legacy_pio_rx(struct b43legacy_pioqueue *queue) +{ +} +static inline +void b43legacy_pio_tx_suspend(struct b43legacy_pioqueue *queue) +{ +} +static inline +void b43legacy_pio_tx_resume(struct b43legacy_pioqueue *queue) +{ +} +static inline +void b43legacy_pio_freeze_txqueues(struct b43legacy_wldev *dev) +{ +} +static inline +void b43legacy_pio_thaw_txqueues(struct b43legacy_wldev *dev) +{ +} + +#endif /* CONFIG_B43LEGACY_PIO */ +#endif /* B43legacy_PIO_H_ */ diff --git a/drivers/net/wireless/b43legacy/radio.c b/drivers/net/wireless/b43legacy/radio.c new file mode 100644 index 000000000000..a361dee664af --- /dev/null +++ b/drivers/net/wireless/b43legacy/radio.c @@ -0,0 +1,2158 @@ +/* + + Broadcom B43legacy wireless driver + + Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>, + Stefano Brivio <st3@riseup.net> + Michael Buesch <mbuesch@freenet.de> + Danny van Dyk <kugelfang@gentoo.org> + Andreas Jaggi <andreas.jaggi@waterwave.ch> + Copyright (c) 2007 Larry Finger <Larry.Finger@lwfinger.net> + + Some parts of the code in this file are derived from the ipw2200 + driver Copyright(c) 2003 - 2004 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include <linux/delay.h> + +#include "b43legacy.h" +#include "main.h" +#include "phy.h" +#include "radio.h" +#include "ilt.h" + + +/* Table for b43legacy_radio_calibrationvalue() */ +static const u16 rcc_table[16] = { + 0x0002, 0x0003, 0x0001, 0x000F, + 0x0006, 0x0007, 0x0005, 0x000F, + 0x000A, 0x000B, 0x0009, 0x000F, + 0x000E, 0x000F, 0x000D, 0x000F, +}; + +/* Reverse the bits of a 4bit value. + * Example: 1101 is flipped 1011 + */ +static u16 flip_4bit(u16 value) +{ + u16 flipped = 0x0000; + + B43legacy_BUG_ON(!((value & ~0x000F) == 0x0000)); + + flipped |= (value & 0x0001) << 3; + flipped |= (value & 0x0002) << 1; + flipped |= (value & 0x0004) >> 1; + flipped |= (value & 0x0008) >> 3; + + return flipped; +} + +/* Get the freq, as it has to be written to the device. */ +static inline +u16 channel2freq_bg(u8 channel) +{ + /* Frequencies are given as frequencies_bg[index] + 2.4GHz + * Starting with channel 1 + */ + static const u16 frequencies_bg[14] = { + 12, 17, 22, 27, + 32, 37, 42, 47, + 52, 57, 62, 67, + 72, 84, + }; + + if (unlikely(channel < 1 || channel > 14)) { + printk(KERN_INFO "b43legacy: Channel %d is out of range\n", + channel); + dump_stack(); + return 2412; + } + + return frequencies_bg[channel - 1]; +} + +void b43legacy_radio_lock(struct b43legacy_wldev *dev) +{ + u32 status; + + status = b43legacy_read32(dev, B43legacy_MMIO_STATUS_BITFIELD); + status |= B43legacy_SBF_RADIOREG_LOCK; + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, status); + mmiowb(); + udelay(10); +} + +void b43legacy_radio_unlock(struct b43legacy_wldev *dev) +{ + u32 status; + + b43legacy_read16(dev, B43legacy_MMIO_PHY_VER); /* dummy read */ + status = b43legacy_read32(dev, B43legacy_MMIO_STATUS_BITFIELD); + status &= ~B43legacy_SBF_RADIOREG_LOCK; + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, status); + mmiowb(); +} + +u16 b43legacy_radio_read16(struct b43legacy_wldev *dev, u16 offset) +{ + struct b43legacy_phy *phy = &dev->phy; + + switch (phy->type) { + case B43legacy_PHYTYPE_B: + if (phy->radio_ver == 0x2053) { + if (offset < 0x70) + offset += 0x80; + else if (offset < 0x80) + offset += 0x70; + } else if (phy->radio_ver == 0x2050) + offset |= 0x80; + else + B43legacy_WARN_ON(1); + break; + case B43legacy_PHYTYPE_G: + offset |= 0x80; + break; + default: + B43legacy_BUG_ON(1); + } + + b43legacy_write16(dev, B43legacy_MMIO_RADIO_CONTROL, offset); + return b43legacy_read16(dev, B43legacy_MMIO_RADIO_DATA_LOW); +} + +void b43legacy_radio_write16(struct b43legacy_wldev *dev, u16 offset, u16 val) +{ + b43legacy_write16(dev, B43legacy_MMIO_RADIO_CONTROL, offset); + mmiowb(); + b43legacy_write16(dev, B43legacy_MMIO_RADIO_DATA_LOW, val); +} + +static void b43legacy_set_all_gains(struct b43legacy_wldev *dev, + s16 first, s16 second, s16 third) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 i; + u16 start = 0x08; + u16 end = 0x18; + u16 offset = 0x0400; + u16 tmp; + + if (phy->rev <= 1) { + offset = 0x5000; + start = 0x10; + end = 0x20; + } + + for (i = 0; i < 4; i++) + b43legacy_ilt_write(dev, offset + i, first); + + for (i = start; i < end; i++) + b43legacy_ilt_write(dev, offset + i, second); + + if (third != -1) { + tmp = ((u16)third << 14) | ((u16)third << 6); + b43legacy_phy_write(dev, 0x04A0, + (b43legacy_phy_read(dev, 0x04A0) & 0xBFBF) + | tmp); + b43legacy_phy_write(dev, 0x04A1, + (b43legacy_phy_read(dev, 0x04A1) & 0xBFBF) + | tmp); + b43legacy_phy_write(dev, 0x04A2, + (b43legacy_phy_read(dev, 0x04A2) & 0xBFBF) + | tmp); + } + b43legacy_dummy_transmission(dev); +} + +static void b43legacy_set_original_gains(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 i; + u16 tmp; + u16 offset = 0x0400; + u16 start = 0x0008; + u16 end = 0x0018; + + if (phy->rev <= 1) { + offset = 0x5000; + start = 0x0010; + end = 0x0020; + } + + for (i = 0; i < 4; i++) { + tmp = (i & 0xFFFC); + tmp |= (i & 0x0001) << 1; + tmp |= (i & 0x0002) >> 1; + + b43legacy_ilt_write(dev, offset + i, tmp); + } + + for (i = start; i < end; i++) + b43legacy_ilt_write(dev, offset + i, i - start); + + b43legacy_phy_write(dev, 0x04A0, + (b43legacy_phy_read(dev, 0x04A0) & 0xBFBF) + | 0x4040); + b43legacy_phy_write(dev, 0x04A1, + (b43legacy_phy_read(dev, 0x04A1) & 0xBFBF) + | 0x4040); + b43legacy_phy_write(dev, 0x04A2, + (b43legacy_phy_read(dev, 0x04A2) & 0xBFBF) + | 0x4000); + b43legacy_dummy_transmission(dev); +} + +/* Synthetic PU workaround */ +static void b43legacy_synth_pu_workaround(struct b43legacy_wldev *dev, + u8 channel) +{ + struct b43legacy_phy *phy = &dev->phy; + + might_sleep(); + + if (phy->radio_ver != 0x2050 || phy->radio_rev >= 6) + /* We do not need the workaround. */ + return; + + if (channel <= 10) + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL, + channel2freq_bg(channel + 4)); + else + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL, + channel2freq_bg(channel)); + msleep(1); + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL, + channel2freq_bg(channel)); +} + +u8 b43legacy_radio_aci_detect(struct b43legacy_wldev *dev, u8 channel) +{ + struct b43legacy_phy *phy = &dev->phy; + u8 ret = 0; + u16 saved; + u16 rssi; + u16 temp; + int i; + int j = 0; + + saved = b43legacy_phy_read(dev, 0x0403); + b43legacy_radio_selectchannel(dev, channel, 0); + b43legacy_phy_write(dev, 0x0403, (saved & 0xFFF8) | 5); + if (phy->aci_hw_rssi) + rssi = b43legacy_phy_read(dev, 0x048A) & 0x3F; + else + rssi = saved & 0x3F; + /* clamp temp to signed 5bit */ + if (rssi > 32) + rssi -= 64; + for (i = 0; i < 100; i++) { + temp = (b43legacy_phy_read(dev, 0x047F) >> 8) & 0x3F; + if (temp > 32) + temp -= 64; + if (temp < rssi) + j++; + if (j >= 20) + ret = 1; + } + b43legacy_phy_write(dev, 0x0403, saved); + + return ret; +} + +u8 b43legacy_radio_aci_scan(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u8 ret[13]; + unsigned int channel = phy->channel; + unsigned int i; + unsigned int j; + unsigned int start; + unsigned int end; + unsigned long phylock_flags; + + if (!((phy->type == B43legacy_PHYTYPE_G) && (phy->rev > 0))) + return 0; + + b43legacy_phy_lock(dev, phylock_flags); + b43legacy_radio_lock(dev); + b43legacy_phy_write(dev, 0x0802, + b43legacy_phy_read(dev, 0x0802) & 0xFFFC); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + b43legacy_phy_read(dev, B43legacy_PHY_G_CRS) + & 0x7FFF); + b43legacy_set_all_gains(dev, 3, 8, 1); + + start = (channel - 5 > 0) ? channel - 5 : 1; + end = (channel + 5 < 14) ? channel + 5 : 13; + + for (i = start; i <= end; i++) { + if (abs(channel - i) > 2) + ret[i-1] = b43legacy_radio_aci_detect(dev, i); + } + b43legacy_radio_selectchannel(dev, channel, 0); + b43legacy_phy_write(dev, 0x0802, + (b43legacy_phy_read(dev, 0x0802) & 0xFFFC) + | 0x0003); + b43legacy_phy_write(dev, 0x0403, + b43legacy_phy_read(dev, 0x0403) & 0xFFF8); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + b43legacy_phy_read(dev, B43legacy_PHY_G_CRS) + | 0x8000); + b43legacy_set_original_gains(dev); + for (i = 0; i < 13; i++) { + if (!ret[i]) + continue; + end = (i + 5 < 13) ? i + 5 : 13; + for (j = i; j < end; j++) + ret[j] = 1; + } + b43legacy_radio_unlock(dev); + b43legacy_phy_unlock(dev, phylock_flags); + + return ret[channel - 1]; +} + +/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +void b43legacy_nrssi_hw_write(struct b43legacy_wldev *dev, u16 offset, s16 val) +{ + b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_CTRL, offset); + mmiowb(); + b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_DATA, (u16)val); +} + +/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +s16 b43legacy_nrssi_hw_read(struct b43legacy_wldev *dev, u16 offset) +{ + u16 val; + + b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_CTRL, offset); + val = b43legacy_phy_read(dev, B43legacy_PHY_NRSSILT_DATA); + + return (s16)val; +} + +/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +void b43legacy_nrssi_hw_update(struct b43legacy_wldev *dev, u16 val) +{ + u16 i; + s16 tmp; + + for (i = 0; i < 64; i++) { + tmp = b43legacy_nrssi_hw_read(dev, i); + tmp -= val; + tmp = limit_value(tmp, -32, 31); + b43legacy_nrssi_hw_write(dev, i, tmp); + } +} + +/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +void b43legacy_nrssi_mem_update(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + s16 i; + s16 delta; + s32 tmp; + + delta = 0x1F - phy->nrssi[0]; + for (i = 0; i < 64; i++) { + tmp = (i - delta) * phy->nrssislope; + tmp /= 0x10000; + tmp += 0x3A; + tmp = limit_value(tmp, 0, 0x3F); + phy->nrssi_lt[i] = tmp; + } +} + +static void b43legacy_calc_nrssi_offset(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 backup[20] = { 0 }; + s16 v47F; + u16 i; + u16 saved = 0xFFFF; + + backup[0] = b43legacy_phy_read(dev, 0x0001); + backup[1] = b43legacy_phy_read(dev, 0x0811); + backup[2] = b43legacy_phy_read(dev, 0x0812); + backup[3] = b43legacy_phy_read(dev, 0x0814); + backup[4] = b43legacy_phy_read(dev, 0x0815); + backup[5] = b43legacy_phy_read(dev, 0x005A); + backup[6] = b43legacy_phy_read(dev, 0x0059); + backup[7] = b43legacy_phy_read(dev, 0x0058); + backup[8] = b43legacy_phy_read(dev, 0x000A); + backup[9] = b43legacy_phy_read(dev, 0x0003); + backup[10] = b43legacy_radio_read16(dev, 0x007A); + backup[11] = b43legacy_radio_read16(dev, 0x0043); + + b43legacy_phy_write(dev, 0x0429, + b43legacy_phy_read(dev, 0x0429) & 0x7FFF); + b43legacy_phy_write(dev, 0x0001, + (b43legacy_phy_read(dev, 0x0001) & 0x3FFF) + | 0x4000); + b43legacy_phy_write(dev, 0x0811, + b43legacy_phy_read(dev, 0x0811) | 0x000C); + b43legacy_phy_write(dev, 0x0812, + (b43legacy_phy_read(dev, 0x0812) & 0xFFF3) + | 0x0004); + b43legacy_phy_write(dev, 0x0802, + b43legacy_phy_read(dev, 0x0802) & ~(0x1 | 0x2)); + if (phy->rev >= 6) { + backup[12] = b43legacy_phy_read(dev, 0x002E); + backup[13] = b43legacy_phy_read(dev, 0x002F); + backup[14] = b43legacy_phy_read(dev, 0x080F); + backup[15] = b43legacy_phy_read(dev, 0x0810); + backup[16] = b43legacy_phy_read(dev, 0x0801); + backup[17] = b43legacy_phy_read(dev, 0x0060); + backup[18] = b43legacy_phy_read(dev, 0x0014); + backup[19] = b43legacy_phy_read(dev, 0x0478); + + b43legacy_phy_write(dev, 0x002E, 0); + b43legacy_phy_write(dev, 0x002F, 0); + b43legacy_phy_write(dev, 0x080F, 0); + b43legacy_phy_write(dev, 0x0810, 0); + b43legacy_phy_write(dev, 0x0478, + b43legacy_phy_read(dev, 0x0478) | 0x0100); + b43legacy_phy_write(dev, 0x0801, + b43legacy_phy_read(dev, 0x0801) | 0x0040); + b43legacy_phy_write(dev, 0x0060, + b43legacy_phy_read(dev, 0x0060) | 0x0040); + b43legacy_phy_write(dev, 0x0014, + b43legacy_phy_read(dev, 0x0014) | 0x0200); + } + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) | 0x0070); + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) | 0x0080); + udelay(30); + + v47F = (s16)((b43legacy_phy_read(dev, 0x047F) >> 8) & 0x003F); + if (v47F >= 0x20) + v47F -= 0x40; + if (v47F == 31) { + for (i = 7; i >= 4; i--) { + b43legacy_radio_write16(dev, 0x007B, i); + udelay(20); + v47F = (s16)((b43legacy_phy_read(dev, 0x047F) >> 8) + & 0x003F); + if (v47F >= 0x20) + v47F -= 0x40; + if (v47F < 31 && saved == 0xFFFF) + saved = i; + } + if (saved == 0xFFFF) + saved = 4; + } else { + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + & 0x007F); + b43legacy_phy_write(dev, 0x0814, + b43legacy_phy_read(dev, 0x0814) | 0x0001); + b43legacy_phy_write(dev, 0x0815, + b43legacy_phy_read(dev, 0x0815) & 0xFFFE); + b43legacy_phy_write(dev, 0x0811, + b43legacy_phy_read(dev, 0x0811) | 0x000C); + b43legacy_phy_write(dev, 0x0812, + b43legacy_phy_read(dev, 0x0812) | 0x000C); + b43legacy_phy_write(dev, 0x0811, + b43legacy_phy_read(dev, 0x0811) | 0x0030); + b43legacy_phy_write(dev, 0x0812, + b43legacy_phy_read(dev, 0x0812) | 0x0030); + b43legacy_phy_write(dev, 0x005A, 0x0480); + b43legacy_phy_write(dev, 0x0059, 0x0810); + b43legacy_phy_write(dev, 0x0058, 0x000D); + if (phy->analog == 0) + b43legacy_phy_write(dev, 0x0003, 0x0122); + else + b43legacy_phy_write(dev, 0x000A, + b43legacy_phy_read(dev, 0x000A) + | 0x2000); + b43legacy_phy_write(dev, 0x0814, + b43legacy_phy_read(dev, 0x0814) | 0x0004); + b43legacy_phy_write(dev, 0x0815, + b43legacy_phy_read(dev, 0x0815) & 0xFFFB); + b43legacy_phy_write(dev, 0x0003, + (b43legacy_phy_read(dev, 0x0003) & 0xFF9F) + | 0x0040); + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + | 0x000F); + b43legacy_set_all_gains(dev, 3, 0, 1); + b43legacy_radio_write16(dev, 0x0043, + (b43legacy_radio_read16(dev, 0x0043) + & 0x00F0) | 0x000F); + udelay(30); + v47F = (s16)((b43legacy_phy_read(dev, 0x047F) >> 8) & 0x003F); + if (v47F >= 0x20) + v47F -= 0x40; + if (v47F == -32) { + for (i = 0; i < 4; i++) { + b43legacy_radio_write16(dev, 0x007B, i); + udelay(20); + v47F = (s16)((b43legacy_phy_read(dev, 0x047F) >> + 8) & 0x003F); + if (v47F >= 0x20) + v47F -= 0x40; + if (v47F > -31 && saved == 0xFFFF) + saved = i; + } + if (saved == 0xFFFF) + saved = 3; + } else + saved = 0; + } + b43legacy_radio_write16(dev, 0x007B, saved); + + if (phy->rev >= 6) { + b43legacy_phy_write(dev, 0x002E, backup[12]); + b43legacy_phy_write(dev, 0x002F, backup[13]); + b43legacy_phy_write(dev, 0x080F, backup[14]); + b43legacy_phy_write(dev, 0x0810, backup[15]); + } + b43legacy_phy_write(dev, 0x0814, backup[3]); + b43legacy_phy_write(dev, 0x0815, backup[4]); + b43legacy_phy_write(dev, 0x005A, backup[5]); + b43legacy_phy_write(dev, 0x0059, backup[6]); + b43legacy_phy_write(dev, 0x0058, backup[7]); + b43legacy_phy_write(dev, 0x000A, backup[8]); + b43legacy_phy_write(dev, 0x0003, backup[9]); + b43legacy_radio_write16(dev, 0x0043, backup[11]); + b43legacy_radio_write16(dev, 0x007A, backup[10]); + b43legacy_phy_write(dev, 0x0802, + b43legacy_phy_read(dev, 0x0802) | 0x1 | 0x2); + b43legacy_phy_write(dev, 0x0429, + b43legacy_phy_read(dev, 0x0429) | 0x8000); + b43legacy_set_original_gains(dev); + if (phy->rev >= 6) { + b43legacy_phy_write(dev, 0x0801, backup[16]); + b43legacy_phy_write(dev, 0x0060, backup[17]); + b43legacy_phy_write(dev, 0x0014, backup[18]); + b43legacy_phy_write(dev, 0x0478, backup[19]); + } + b43legacy_phy_write(dev, 0x0001, backup[0]); + b43legacy_phy_write(dev, 0x0812, backup[2]); + b43legacy_phy_write(dev, 0x0811, backup[1]); +} + +void b43legacy_calc_nrssi_slope(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 backup[18] = { 0 }; + u16 tmp; + s16 nrssi0; + s16 nrssi1; + + switch (phy->type) { + case B43legacy_PHYTYPE_B: + backup[0] = b43legacy_radio_read16(dev, 0x007A); + backup[1] = b43legacy_radio_read16(dev, 0x0052); + backup[2] = b43legacy_radio_read16(dev, 0x0043); + backup[3] = b43legacy_phy_read(dev, 0x0030); + backup[4] = b43legacy_phy_read(dev, 0x0026); + backup[5] = b43legacy_phy_read(dev, 0x0015); + backup[6] = b43legacy_phy_read(dev, 0x002A); + backup[7] = b43legacy_phy_read(dev, 0x0020); + backup[8] = b43legacy_phy_read(dev, 0x005A); + backup[9] = b43legacy_phy_read(dev, 0x0059); + backup[10] = b43legacy_phy_read(dev, 0x0058); + backup[11] = b43legacy_read16(dev, 0x03E2); + backup[12] = b43legacy_read16(dev, 0x03E6); + backup[13] = b43legacy_read16(dev, B43legacy_MMIO_CHANNEL_EXT); + + tmp = b43legacy_radio_read16(dev, 0x007A); + tmp &= (phy->rev >= 5) ? 0x007F : 0x000F; + b43legacy_radio_write16(dev, 0x007A, tmp); + b43legacy_phy_write(dev, 0x0030, 0x00FF); + b43legacy_write16(dev, 0x03EC, 0x7F7F); + b43legacy_phy_write(dev, 0x0026, 0x0000); + b43legacy_phy_write(dev, 0x0015, + b43legacy_phy_read(dev, 0x0015) | 0x0020); + b43legacy_phy_write(dev, 0x002A, 0x08A3); + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + | 0x0080); + + nrssi0 = (s16)b43legacy_phy_read(dev, 0x0027); + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + & 0x007F); + if (phy->analog >= 2) + b43legacy_write16(dev, 0x03E6, 0x0040); + else if (phy->analog == 0) + b43legacy_write16(dev, 0x03E6, 0x0122); + else + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, + b43legacy_read16(dev, + B43legacy_MMIO_CHANNEL_EXT) & 0x2000); + b43legacy_phy_write(dev, 0x0020, 0x3F3F); + b43legacy_phy_write(dev, 0x0015, 0xF330); + b43legacy_radio_write16(dev, 0x005A, 0x0060); + b43legacy_radio_write16(dev, 0x0043, + b43legacy_radio_read16(dev, 0x0043) + & 0x00F0); + b43legacy_phy_write(dev, 0x005A, 0x0480); + b43legacy_phy_write(dev, 0x0059, 0x0810); + b43legacy_phy_write(dev, 0x0058, 0x000D); + udelay(20); + + nrssi1 = (s16)b43legacy_phy_read(dev, 0x0027); + b43legacy_phy_write(dev, 0x0030, backup[3]); + b43legacy_radio_write16(dev, 0x007A, backup[0]); + b43legacy_write16(dev, 0x03E2, backup[11]); + b43legacy_phy_write(dev, 0x0026, backup[4]); + b43legacy_phy_write(dev, 0x0015, backup[5]); + b43legacy_phy_write(dev, 0x002A, backup[6]); + b43legacy_synth_pu_workaround(dev, phy->channel); + if (phy->analog != 0) + b43legacy_write16(dev, 0x03F4, backup[13]); + + b43legacy_phy_write(dev, 0x0020, backup[7]); + b43legacy_phy_write(dev, 0x005A, backup[8]); + b43legacy_phy_write(dev, 0x0059, backup[9]); + b43legacy_phy_write(dev, 0x0058, backup[10]); + b43legacy_radio_write16(dev, 0x0052, backup[1]); + b43legacy_radio_write16(dev, 0x0043, backup[2]); + + if (nrssi0 == nrssi1) + phy->nrssislope = 0x00010000; + else + phy->nrssislope = 0x00400000 / (nrssi0 - nrssi1); + + if (nrssi0 <= -4) { + phy->nrssi[0] = nrssi0; + phy->nrssi[1] = nrssi1; + } + break; + case B43legacy_PHYTYPE_G: + if (phy->radio_rev >= 9) + return; + if (phy->radio_rev == 8) + b43legacy_calc_nrssi_offset(dev); + + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + b43legacy_phy_read(dev, B43legacy_PHY_G_CRS) + & 0x7FFF); + b43legacy_phy_write(dev, 0x0802, + b43legacy_phy_read(dev, 0x0802) & 0xFFFC); + backup[7] = b43legacy_read16(dev, 0x03E2); + b43legacy_write16(dev, 0x03E2, + b43legacy_read16(dev, 0x03E2) | 0x8000); + backup[0] = b43legacy_radio_read16(dev, 0x007A); + backup[1] = b43legacy_radio_read16(dev, 0x0052); + backup[2] = b43legacy_radio_read16(dev, 0x0043); + backup[3] = b43legacy_phy_read(dev, 0x0015); + backup[4] = b43legacy_phy_read(dev, 0x005A); + backup[5] = b43legacy_phy_read(dev, 0x0059); + backup[6] = b43legacy_phy_read(dev, 0x0058); + backup[8] = b43legacy_read16(dev, 0x03E6); + backup[9] = b43legacy_read16(dev, B43legacy_MMIO_CHANNEL_EXT); + if (phy->rev >= 3) { + backup[10] = b43legacy_phy_read(dev, 0x002E); + backup[11] = b43legacy_phy_read(dev, 0x002F); + backup[12] = b43legacy_phy_read(dev, 0x080F); + backup[13] = b43legacy_phy_read(dev, + B43legacy_PHY_G_LO_CONTROL); + backup[14] = b43legacy_phy_read(dev, 0x0801); + backup[15] = b43legacy_phy_read(dev, 0x0060); + backup[16] = b43legacy_phy_read(dev, 0x0014); + backup[17] = b43legacy_phy_read(dev, 0x0478); + b43legacy_phy_write(dev, 0x002E, 0); + b43legacy_phy_write(dev, B43legacy_PHY_G_LO_CONTROL, 0); + switch (phy->rev) { + case 4: case 6: case 7: + b43legacy_phy_write(dev, 0x0478, + b43legacy_phy_read(dev, + 0x0478) | 0x0100); + b43legacy_phy_write(dev, 0x0801, + b43legacy_phy_read(dev, + 0x0801) | 0x0040); + break; + case 3: case 5: + b43legacy_phy_write(dev, 0x0801, + b43legacy_phy_read(dev, + 0x0801) & 0xFFBF); + break; + } + b43legacy_phy_write(dev, 0x0060, + b43legacy_phy_read(dev, 0x0060) + | 0x0040); + b43legacy_phy_write(dev, 0x0014, + b43legacy_phy_read(dev, 0x0014) + | 0x0200); + } + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + | 0x0070); + b43legacy_set_all_gains(dev, 0, 8, 0); + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + & 0x00F7); + if (phy->rev >= 2) { + b43legacy_phy_write(dev, 0x0811, + (b43legacy_phy_read(dev, 0x0811) + & 0xFFCF) | 0x0030); + b43legacy_phy_write(dev, 0x0812, + (b43legacy_phy_read(dev, 0x0812) + & 0xFFCF) | 0x0010); + } + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + | 0x0080); + udelay(20); + + nrssi0 = (s16)((b43legacy_phy_read(dev, 0x047F) >> 8) & 0x003F); + if (nrssi0 >= 0x0020) + nrssi0 -= 0x0040; + + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + & 0x007F); + if (phy->analog >= 2) + b43legacy_phy_write(dev, 0x0003, + (b43legacy_phy_read(dev, 0x0003) + & 0xFF9F) | 0x0040); + + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, + b43legacy_read16(dev, + B43legacy_MMIO_CHANNEL_EXT) | 0x2000); + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + | 0x000F); + b43legacy_phy_write(dev, 0x0015, 0xF330); + if (phy->rev >= 2) { + b43legacy_phy_write(dev, 0x0812, + (b43legacy_phy_read(dev, 0x0812) + & 0xFFCF) | 0x0020); + b43legacy_phy_write(dev, 0x0811, + (b43legacy_phy_read(dev, 0x0811) + & 0xFFCF) | 0x0020); + } + + b43legacy_set_all_gains(dev, 3, 0, 1); + if (phy->radio_rev == 8) + b43legacy_radio_write16(dev, 0x0043, 0x001F); + else { + tmp = b43legacy_radio_read16(dev, 0x0052) & 0xFF0F; + b43legacy_radio_write16(dev, 0x0052, tmp | 0x0060); + tmp = b43legacy_radio_read16(dev, 0x0043) & 0xFFF0; + b43legacy_radio_write16(dev, 0x0043, tmp | 0x0009); + } + b43legacy_phy_write(dev, 0x005A, 0x0480); + b43legacy_phy_write(dev, 0x0059, 0x0810); + b43legacy_phy_write(dev, 0x0058, 0x000D); + udelay(20); + nrssi1 = (s16)((b43legacy_phy_read(dev, 0x047F) >> 8) & 0x003F); + if (nrssi1 >= 0x0020) + nrssi1 -= 0x0040; + if (nrssi0 == nrssi1) + phy->nrssislope = 0x00010000; + else + phy->nrssislope = 0x00400000 / (nrssi0 - nrssi1); + if (nrssi0 >= -4) { + phy->nrssi[0] = nrssi1; + phy->nrssi[1] = nrssi0; + } + if (phy->rev >= 3) { + b43legacy_phy_write(dev, 0x002E, backup[10]); + b43legacy_phy_write(dev, 0x002F, backup[11]); + b43legacy_phy_write(dev, 0x080F, backup[12]); + b43legacy_phy_write(dev, B43legacy_PHY_G_LO_CONTROL, + backup[13]); + } + if (phy->rev >= 2) { + b43legacy_phy_write(dev, 0x0812, + b43legacy_phy_read(dev, 0x0812) + & 0xFFCF); + b43legacy_phy_write(dev, 0x0811, + b43legacy_phy_read(dev, 0x0811) + & 0xFFCF); + } + + b43legacy_radio_write16(dev, 0x007A, backup[0]); + b43legacy_radio_write16(dev, 0x0052, backup[1]); + b43legacy_radio_write16(dev, 0x0043, backup[2]); + b43legacy_write16(dev, 0x03E2, backup[7]); + b43legacy_write16(dev, 0x03E6, backup[8]); + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, backup[9]); + b43legacy_phy_write(dev, 0x0015, backup[3]); + b43legacy_phy_write(dev, 0x005A, backup[4]); + b43legacy_phy_write(dev, 0x0059, backup[5]); + b43legacy_phy_write(dev, 0x0058, backup[6]); + b43legacy_synth_pu_workaround(dev, phy->channel); + b43legacy_phy_write(dev, 0x0802, + b43legacy_phy_read(dev, 0x0802) | 0x0003); + b43legacy_set_original_gains(dev); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + b43legacy_phy_read(dev, B43legacy_PHY_G_CRS) + | 0x8000); + if (phy->rev >= 3) { + b43legacy_phy_write(dev, 0x0801, backup[14]); + b43legacy_phy_write(dev, 0x0060, backup[15]); + b43legacy_phy_write(dev, 0x0014, backup[16]); + b43legacy_phy_write(dev, 0x0478, backup[17]); + } + b43legacy_nrssi_mem_update(dev); + b43legacy_calc_nrssi_threshold(dev); + break; + default: + B43legacy_BUG_ON(1); + } +} + +void b43legacy_calc_nrssi_threshold(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + s32 threshold; + s32 a; + s32 b; + s16 tmp16; + u16 tmp_u16; + + switch (phy->type) { + case B43legacy_PHYTYPE_B: { + if (phy->radio_ver != 0x2050) + return; + if (!(dev->dev->bus->sprom.r1.boardflags_lo & + B43legacy_BFL_RSSI)) + return; + + if (phy->radio_rev >= 6) { + threshold = (phy->nrssi[1] - phy->nrssi[0]) * 32; + threshold += 20 * (phy->nrssi[0] + 1); + threshold /= 40; + } else + threshold = phy->nrssi[1] - 5; + + threshold = limit_value(threshold, 0, 0x3E); + b43legacy_phy_read(dev, 0x0020); /* dummy read */ + b43legacy_phy_write(dev, 0x0020, (((u16)threshold) << 8) + | 0x001C); + + if (phy->radio_rev >= 6) { + b43legacy_phy_write(dev, 0x0087, 0x0E0D); + b43legacy_phy_write(dev, 0x0086, 0x0C0B); + b43legacy_phy_write(dev, 0x0085, 0x0A09); + b43legacy_phy_write(dev, 0x0084, 0x0808); + b43legacy_phy_write(dev, 0x0083, 0x0808); + b43legacy_phy_write(dev, 0x0082, 0x0604); + b43legacy_phy_write(dev, 0x0081, 0x0302); + b43legacy_phy_write(dev, 0x0080, 0x0100); + } + break; + } + case B43legacy_PHYTYPE_G: + if (!phy->gmode || + !(dev->dev->bus->sprom.r1.boardflags_lo & + B43legacy_BFL_RSSI)) { + tmp16 = b43legacy_nrssi_hw_read(dev, 0x20); + if (tmp16 >= 0x20) + tmp16 -= 0x40; + if (tmp16 < 3) + b43legacy_phy_write(dev, 0x048A, + (b43legacy_phy_read(dev, + 0x048A) & 0xF000) | 0x09EB); + else + b43legacy_phy_write(dev, 0x048A, + (b43legacy_phy_read(dev, + 0x048A) & 0xF000) | 0x0AED); + } else { + if (phy->interfmode == + B43legacy_RADIO_INTERFMODE_NONWLAN) { + a = 0xE; + b = 0xA; + } else if (!phy->aci_wlan_automatic && + phy->aci_enable) { + a = 0x13; + b = 0x12; + } else { + a = 0xE; + b = 0x11; + } + + a = a * (phy->nrssi[1] - phy->nrssi[0]); + a += (phy->nrssi[0] << 6); + if (a < 32) + a += 31; + else + a += 32; + a = a >> 6; + a = limit_value(a, -31, 31); + + b = b * (phy->nrssi[1] - phy->nrssi[0]); + b += (phy->nrssi[0] << 6); + if (b < 32) + b += 31; + else + b += 32; + b = b >> 6; + b = limit_value(b, -31, 31); + + tmp_u16 = b43legacy_phy_read(dev, 0x048A) & 0xF000; + tmp_u16 |= ((u32)b & 0x0000003F); + tmp_u16 |= (((u32)a & 0x0000003F) << 6); + b43legacy_phy_write(dev, 0x048A, tmp_u16); + } + break; + default: + B43legacy_BUG_ON(1); + } +} + +/* Stack implementation to save/restore values from the + * interference mitigation code. + * It is save to restore values in random order. + */ +static void _stack_save(u32 *_stackptr, size_t *stackidx, + u8 id, u16 offset, u16 value) +{ + u32 *stackptr = &(_stackptr[*stackidx]); + + B43legacy_WARN_ON(!((offset & 0xE000) == 0x0000)); + B43legacy_WARN_ON(!((id & 0xF8) == 0x00)); + *stackptr = offset; + *stackptr |= ((u32)id) << 13; + *stackptr |= ((u32)value) << 16; + (*stackidx)++; + B43legacy_WARN_ON(!(*stackidx < B43legacy_INTERFSTACK_SIZE)); +} + +static u16 _stack_restore(u32 *stackptr, + u8 id, u16 offset) +{ + size_t i; + + B43legacy_WARN_ON(!((offset & 0xE000) == 0x0000)); + B43legacy_WARN_ON(!((id & 0xF8) == 0x00)); + for (i = 0; i < B43legacy_INTERFSTACK_SIZE; i++, stackptr++) { + if ((*stackptr & 0x00001FFF) != offset) + continue; + if (((*stackptr & 0x00007000) >> 13) != id) + continue; + return ((*stackptr & 0xFFFF0000) >> 16); + } + B43legacy_BUG_ON(1); + + return 0; +} + +#define phy_stacksave(offset) \ + do { \ + _stack_save(stack, &stackidx, 0x1, (offset), \ + b43legacy_phy_read(dev, (offset))); \ + } while (0) +#define phy_stackrestore(offset) \ + do { \ + b43legacy_phy_write(dev, (offset), \ + _stack_restore(stack, 0x1, \ + (offset))); \ + } while (0) +#define radio_stacksave(offset) \ + do { \ + _stack_save(stack, &stackidx, 0x2, (offset), \ + b43legacy_radio_read16(dev, (offset))); \ + } while (0) +#define radio_stackrestore(offset) \ + do { \ + b43legacy_radio_write16(dev, (offset), \ + _stack_restore(stack, 0x2, \ + (offset))); \ + } while (0) +#define ilt_stacksave(offset) \ + do { \ + _stack_save(stack, &stackidx, 0x3, (offset), \ + b43legacy_ilt_read(dev, (offset))); \ + } while (0) +#define ilt_stackrestore(offset) \ + do { \ + b43legacy_ilt_write(dev, (offset), \ + _stack_restore(stack, 0x3, \ + (offset))); \ + } while (0) + +static void +b43legacy_radio_interference_mitigation_enable(struct b43legacy_wldev *dev, + int mode) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 tmp; + u16 flipped; + u32 tmp32; + size_t stackidx = 0; + u32 *stack = phy->interfstack; + + switch (mode) { + case B43legacy_RADIO_INTERFMODE_NONWLAN: + if (phy->rev != 1) { + b43legacy_phy_write(dev, 0x042B, + b43legacy_phy_read(dev, 0x042B) + | 0x0800); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + b43legacy_phy_read(dev, + B43legacy_PHY_G_CRS) & ~0x4000); + break; + } + radio_stacksave(0x0078); + tmp = (b43legacy_radio_read16(dev, 0x0078) & 0x001E); + flipped = flip_4bit(tmp); + if (flipped < 10 && flipped >= 8) + flipped = 7; + else if (flipped >= 10) + flipped -= 3; + flipped = flip_4bit(flipped); + flipped = (flipped << 1) | 0x0020; + b43legacy_radio_write16(dev, 0x0078, flipped); + + b43legacy_calc_nrssi_threshold(dev); + + phy_stacksave(0x0406); + b43legacy_phy_write(dev, 0x0406, 0x7E28); + + b43legacy_phy_write(dev, 0x042B, + b43legacy_phy_read(dev, 0x042B) | 0x0800); + b43legacy_phy_write(dev, B43legacy_PHY_RADIO_BITFIELD, + b43legacy_phy_read(dev, + B43legacy_PHY_RADIO_BITFIELD) | 0x1000); + + phy_stacksave(0x04A0); + b43legacy_phy_write(dev, 0x04A0, + (b43legacy_phy_read(dev, 0x04A0) & 0xC0C0) + | 0x0008); + phy_stacksave(0x04A1); + b43legacy_phy_write(dev, 0x04A1, + (b43legacy_phy_read(dev, 0x04A1) & 0xC0C0) + | 0x0605); + phy_stacksave(0x04A2); + b43legacy_phy_write(dev, 0x04A2, + (b43legacy_phy_read(dev, 0x04A2) & 0xC0C0) + | 0x0204); + phy_stacksave(0x04A8); + b43legacy_phy_write(dev, 0x04A8, + (b43legacy_phy_read(dev, 0x04A8) & 0xC0C0) + | 0x0803); + phy_stacksave(0x04AB); + b43legacy_phy_write(dev, 0x04AB, + (b43legacy_phy_read(dev, 0x04AB) & 0xC0C0) + | 0x0605); + + phy_stacksave(0x04A7); + b43legacy_phy_write(dev, 0x04A7, 0x0002); + phy_stacksave(0x04A3); + b43legacy_phy_write(dev, 0x04A3, 0x287A); + phy_stacksave(0x04A9); + b43legacy_phy_write(dev, 0x04A9, 0x2027); + phy_stacksave(0x0493); + b43legacy_phy_write(dev, 0x0493, 0x32F5); + phy_stacksave(0x04AA); + b43legacy_phy_write(dev, 0x04AA, 0x2027); + phy_stacksave(0x04AC); + b43legacy_phy_write(dev, 0x04AC, 0x32F5); + break; + case B43legacy_RADIO_INTERFMODE_MANUALWLAN: + if (b43legacy_phy_read(dev, 0x0033) & 0x0800) + break; + + phy->aci_enable = 1; + + phy_stacksave(B43legacy_PHY_RADIO_BITFIELD); + phy_stacksave(B43legacy_PHY_G_CRS); + if (phy->rev < 2) + phy_stacksave(0x0406); + else { + phy_stacksave(0x04C0); + phy_stacksave(0x04C1); + } + phy_stacksave(0x0033); + phy_stacksave(0x04A7); + phy_stacksave(0x04A3); + phy_stacksave(0x04A9); + phy_stacksave(0x04AA); + phy_stacksave(0x04AC); + phy_stacksave(0x0493); + phy_stacksave(0x04A1); + phy_stacksave(0x04A0); + phy_stacksave(0x04A2); + phy_stacksave(0x048A); + phy_stacksave(0x04A8); + phy_stacksave(0x04AB); + if (phy->rev == 2) { + phy_stacksave(0x04AD); + phy_stacksave(0x04AE); + } else if (phy->rev >= 3) { + phy_stacksave(0x04AD); + phy_stacksave(0x0415); + phy_stacksave(0x0416); + phy_stacksave(0x0417); + ilt_stacksave(0x1A00 + 0x2); + ilt_stacksave(0x1A00 + 0x3); + } + phy_stacksave(0x042B); + phy_stacksave(0x048C); + + b43legacy_phy_write(dev, B43legacy_PHY_RADIO_BITFIELD, + b43legacy_phy_read(dev, + B43legacy_PHY_RADIO_BITFIELD) & ~0x1000); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + (b43legacy_phy_read(dev, + B43legacy_PHY_G_CRS) + & 0xFFFC) | 0x0002); + + b43legacy_phy_write(dev, 0x0033, 0x0800); + b43legacy_phy_write(dev, 0x04A3, 0x2027); + b43legacy_phy_write(dev, 0x04A9, 0x1CA8); + b43legacy_phy_write(dev, 0x0493, 0x287A); + b43legacy_phy_write(dev, 0x04AA, 0x1CA8); + b43legacy_phy_write(dev, 0x04AC, 0x287A); + + b43legacy_phy_write(dev, 0x04A0, + (b43legacy_phy_read(dev, 0x04A0) + & 0xFFC0) | 0x001A); + b43legacy_phy_write(dev, 0x04A7, 0x000D); + + if (phy->rev < 2) + b43legacy_phy_write(dev, 0x0406, 0xFF0D); + else if (phy->rev == 2) { + b43legacy_phy_write(dev, 0x04C0, 0xFFFF); + b43legacy_phy_write(dev, 0x04C1, 0x00A9); + } else { + b43legacy_phy_write(dev, 0x04C0, 0x00C1); + b43legacy_phy_write(dev, 0x04C1, 0x0059); + } + + b43legacy_phy_write(dev, 0x04A1, + (b43legacy_phy_read(dev, 0x04A1) + & 0xC0FF) | 0x1800); + b43legacy_phy_write(dev, 0x04A1, + (b43legacy_phy_read(dev, 0x04A1) + & 0xFFC0) | 0x0015); + b43legacy_phy_write(dev, 0x04A8, + (b43legacy_phy_read(dev, 0x04A8) + & 0xCFFF) | 0x1000); + b43legacy_phy_write(dev, 0x04A8, + (b43legacy_phy_read(dev, 0x04A8) + & 0xF0FF) | 0x0A00); + b43legacy_phy_write(dev, 0x04AB, + (b43legacy_phy_read(dev, 0x04AB) + & 0xCFFF) | 0x1000); + b43legacy_phy_write(dev, 0x04AB, + (b43legacy_phy_read(dev, 0x04AB) + & 0xF0FF) | 0x0800); + b43legacy_phy_write(dev, 0x04AB, + (b43legacy_phy_read(dev, 0x04AB) + & 0xFFCF) | 0x0010); + b43legacy_phy_write(dev, 0x04AB, + (b43legacy_phy_read(dev, 0x04AB) + & 0xFFF0) | 0x0005); + b43legacy_phy_write(dev, 0x04A8, + (b43legacy_phy_read(dev, 0x04A8) + & 0xFFCF) | 0x0010); + b43legacy_phy_write(dev, 0x04A8, + (b43legacy_phy_read(dev, 0x04A8) + & 0xFFF0) | 0x0006); + b43legacy_phy_write(dev, 0x04A2, + (b43legacy_phy_read(dev, 0x04A2) + & 0xF0FF) | 0x0800); + b43legacy_phy_write(dev, 0x04A0, + (b43legacy_phy_read(dev, 0x04A0) + & 0xF0FF) | 0x0500); + b43legacy_phy_write(dev, 0x04A2, + (b43legacy_phy_read(dev, 0x04A2) + & 0xFFF0) | 0x000B); + + if (phy->rev >= 3) { + b43legacy_phy_write(dev, 0x048A, + b43legacy_phy_read(dev, 0x048A) + & ~0x8000); + b43legacy_phy_write(dev, 0x0415, + (b43legacy_phy_read(dev, 0x0415) + & 0x8000) | 0x36D8); + b43legacy_phy_write(dev, 0x0416, + (b43legacy_phy_read(dev, 0x0416) + & 0x8000) | 0x36D8); + b43legacy_phy_write(dev, 0x0417, + (b43legacy_phy_read(dev, 0x0417) + & 0xFE00) | 0x016D); + } else { + b43legacy_phy_write(dev, 0x048A, + b43legacy_phy_read(dev, 0x048A) + | 0x1000); + b43legacy_phy_write(dev, 0x048A, + (b43legacy_phy_read(dev, 0x048A) + & 0x9FFF) | 0x2000); + tmp32 = b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET); + if (!(tmp32 & 0x800)) { + tmp32 |= 0x800; + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET, + tmp32); + } + } + if (phy->rev >= 2) + b43legacy_phy_write(dev, 0x042B, + b43legacy_phy_read(dev, 0x042B) + | 0x0800); + b43legacy_phy_write(dev, 0x048C, + (b43legacy_phy_read(dev, 0x048C) + & 0xF0FF) | 0x0200); + if (phy->rev == 2) { + b43legacy_phy_write(dev, 0x04AE, + (b43legacy_phy_read(dev, 0x04AE) + & 0xFF00) | 0x007F); + b43legacy_phy_write(dev, 0x04AD, + (b43legacy_phy_read(dev, 0x04AD) + & 0x00FF) | 0x1300); + } else if (phy->rev >= 6) { + b43legacy_ilt_write(dev, 0x1A00 + 0x3, 0x007F); + b43legacy_ilt_write(dev, 0x1A00 + 0x2, 0x007F); + b43legacy_phy_write(dev, 0x04AD, + b43legacy_phy_read(dev, 0x04AD) + & 0x00FF); + } + b43legacy_calc_nrssi_slope(dev); + break; + default: + B43legacy_BUG_ON(1); + } +} + +static void +b43legacy_radio_interference_mitigation_disable(struct b43legacy_wldev *dev, + int mode) +{ + struct b43legacy_phy *phy = &dev->phy; + u32 tmp32; + u32 *stack = phy->interfstack; + + switch (mode) { + case B43legacy_RADIO_INTERFMODE_NONWLAN: + if (phy->rev != 1) { + b43legacy_phy_write(dev, 0x042B, + b43legacy_phy_read(dev, 0x042B) + & ~0x0800); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + b43legacy_phy_read(dev, + B43legacy_PHY_G_CRS) | 0x4000); + break; + } + phy_stackrestore(0x0078); + b43legacy_calc_nrssi_threshold(dev); + phy_stackrestore(0x0406); + b43legacy_phy_write(dev, 0x042B, + b43legacy_phy_read(dev, 0x042B) & ~0x0800); + if (!dev->bad_frames_preempt) + b43legacy_phy_write(dev, B43legacy_PHY_RADIO_BITFIELD, + b43legacy_phy_read(dev, + B43legacy_PHY_RADIO_BITFIELD) + & ~(1 << 11)); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + b43legacy_phy_read(dev, B43legacy_PHY_G_CRS) + | 0x4000); + phy_stackrestore(0x04A0); + phy_stackrestore(0x04A1); + phy_stackrestore(0x04A2); + phy_stackrestore(0x04A8); + phy_stackrestore(0x04AB); + phy_stackrestore(0x04A7); + phy_stackrestore(0x04A3); + phy_stackrestore(0x04A9); + phy_stackrestore(0x0493); + phy_stackrestore(0x04AA); + phy_stackrestore(0x04AC); + break; + case B43legacy_RADIO_INTERFMODE_MANUALWLAN: + if (!(b43legacy_phy_read(dev, 0x0033) & 0x0800)) + break; + + phy->aci_enable = 0; + + phy_stackrestore(B43legacy_PHY_RADIO_BITFIELD); + phy_stackrestore(B43legacy_PHY_G_CRS); + phy_stackrestore(0x0033); + phy_stackrestore(0x04A3); + phy_stackrestore(0x04A9); + phy_stackrestore(0x0493); + phy_stackrestore(0x04AA); + phy_stackrestore(0x04AC); + phy_stackrestore(0x04A0); + phy_stackrestore(0x04A7); + if (phy->rev >= 2) { + phy_stackrestore(0x04C0); + phy_stackrestore(0x04C1); + } else + phy_stackrestore(0x0406); + phy_stackrestore(0x04A1); + phy_stackrestore(0x04AB); + phy_stackrestore(0x04A8); + if (phy->rev == 2) { + phy_stackrestore(0x04AD); + phy_stackrestore(0x04AE); + } else if (phy->rev >= 3) { + phy_stackrestore(0x04AD); + phy_stackrestore(0x0415); + phy_stackrestore(0x0416); + phy_stackrestore(0x0417); + ilt_stackrestore(0x1A00 + 0x2); + ilt_stackrestore(0x1A00 + 0x3); + } + phy_stackrestore(0x04A2); + phy_stackrestore(0x04A8); + phy_stackrestore(0x042B); + phy_stackrestore(0x048C); + tmp32 = b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET); + if (tmp32 & 0x800) { + tmp32 &= ~0x800; + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET, + tmp32); + } + b43legacy_calc_nrssi_slope(dev); + break; + default: + B43legacy_BUG_ON(1); + } +} + +#undef phy_stacksave +#undef phy_stackrestore +#undef radio_stacksave +#undef radio_stackrestore +#undef ilt_stacksave +#undef ilt_stackrestore + +int b43legacy_radio_set_interference_mitigation(struct b43legacy_wldev *dev, + int mode) +{ + struct b43legacy_phy *phy = &dev->phy; + int currentmode; + + if ((phy->type != B43legacy_PHYTYPE_G) || + (phy->rev == 0) || (!phy->gmode)) + return -ENODEV; + + phy->aci_wlan_automatic = 0; + switch (mode) { + case B43legacy_RADIO_INTERFMODE_AUTOWLAN: + phy->aci_wlan_automatic = 1; + if (phy->aci_enable) + mode = B43legacy_RADIO_INTERFMODE_MANUALWLAN; + else + mode = B43legacy_RADIO_INTERFMODE_NONE; + break; + case B43legacy_RADIO_INTERFMODE_NONE: + case B43legacy_RADIO_INTERFMODE_NONWLAN: + case B43legacy_RADIO_INTERFMODE_MANUALWLAN: + break; + default: + return -EINVAL; + } + + currentmode = phy->interfmode; + if (currentmode == mode) + return 0; + if (currentmode != B43legacy_RADIO_INTERFMODE_NONE) + b43legacy_radio_interference_mitigation_disable(dev, + currentmode); + + if (mode == B43legacy_RADIO_INTERFMODE_NONE) { + phy->aci_enable = 0; + phy->aci_hw_rssi = 0; + } else + b43legacy_radio_interference_mitigation_enable(dev, mode); + phy->interfmode = mode; + + return 0; +} + +u16 b43legacy_radio_calibrationvalue(struct b43legacy_wldev *dev) +{ + u16 reg; + u16 index; + u16 ret; + + reg = b43legacy_radio_read16(dev, 0x0060); + index = (reg & 0x001E) >> 1; + ret = rcc_table[index] << 1; + ret |= (reg & 0x0001); + ret |= 0x0020; + + return ret; +} + +#define LPD(L, P, D) (((L) << 2) | ((P) << 1) | ((D) << 0)) +static u16 b43legacy_get_812_value(struct b43legacy_wldev *dev, u8 lpd) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 loop_or = 0; + u16 adj_loopback_gain = phy->loopback_gain[0]; + u8 loop; + u16 extern_lna_control; + + if (!phy->gmode) + return 0; + if (!has_loopback_gain(phy)) { + if (phy->rev < 7 || !(dev->dev->bus->sprom.r1.boardflags_lo + & B43legacy_BFL_EXTLNA)) { + switch (lpd) { + case LPD(0, 1, 1): + return 0x0FB2; + case LPD(0, 0, 1): + return 0x00B2; + case LPD(1, 0, 1): + return 0x30B2; + case LPD(1, 0, 0): + return 0x30B3; + default: + B43legacy_BUG_ON(1); + } + } else { + switch (lpd) { + case LPD(0, 1, 1): + return 0x8FB2; + case LPD(0, 0, 1): + return 0x80B2; + case LPD(1, 0, 1): + return 0x20B2; + case LPD(1, 0, 0): + return 0x20B3; + default: + B43legacy_BUG_ON(1); + } + } + } else { + if (phy->radio_rev == 8) + adj_loopback_gain += 0x003E; + else + adj_loopback_gain += 0x0026; + if (adj_loopback_gain >= 0x46) { + adj_loopback_gain -= 0x46; + extern_lna_control = 0x3000; + } else if (adj_loopback_gain >= 0x3A) { + adj_loopback_gain -= 0x3A; + extern_lna_control = 0x2000; + } else if (adj_loopback_gain >= 0x2E) { + adj_loopback_gain -= 0x2E; + extern_lna_control = 0x1000; + } else { + adj_loopback_gain -= 0x10; + extern_lna_control = 0x0000; + } + for (loop = 0; loop < 16; loop++) { + u16 tmp = adj_loopback_gain - 6 * loop; + if (tmp < 6) + break; + } + + loop_or = (loop << 8) | extern_lna_control; + if (phy->rev >= 7 && dev->dev->bus->sprom.r1.boardflags_lo + & B43legacy_BFL_EXTLNA) { + if (extern_lna_control) + loop_or |= 0x8000; + switch (lpd) { + case LPD(0, 1, 1): + return 0x8F92; + case LPD(0, 0, 1): + return (0x8092 | loop_or); + case LPD(1, 0, 1): + return (0x2092 | loop_or); + case LPD(1, 0, 0): + return (0x2093 | loop_or); + default: + B43legacy_BUG_ON(1); + } + } else { + switch (lpd) { + case LPD(0, 1, 1): + return 0x0F92; + case LPD(0, 0, 1): + case LPD(1, 0, 1): + return (0x0092 | loop_or); + case LPD(1, 0, 0): + return (0x0093 | loop_or); + default: + B43legacy_BUG_ON(1); + } + } + } + return 0; +} + +u16 b43legacy_radio_init2050(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 backup[21] = { 0 }; + u16 ret; + u16 i; + u16 j; + u32 tmp1 = 0; + u32 tmp2 = 0; + + backup[0] = b43legacy_radio_read16(dev, 0x0043); + backup[14] = b43legacy_radio_read16(dev, 0x0051); + backup[15] = b43legacy_radio_read16(dev, 0x0052); + backup[1] = b43legacy_phy_read(dev, 0x0015); + backup[16] = b43legacy_phy_read(dev, 0x005A); + backup[17] = b43legacy_phy_read(dev, 0x0059); + backup[18] = b43legacy_phy_read(dev, 0x0058); + if (phy->type == B43legacy_PHYTYPE_B) { + backup[2] = b43legacy_phy_read(dev, 0x0030); + backup[3] = b43legacy_read16(dev, 0x03EC); + b43legacy_phy_write(dev, 0x0030, 0x00FF); + b43legacy_write16(dev, 0x03EC, 0x3F3F); + } else { + if (phy->gmode) { + backup[4] = b43legacy_phy_read(dev, 0x0811); + backup[5] = b43legacy_phy_read(dev, 0x0812); + backup[6] = b43legacy_phy_read(dev, 0x0814); + backup[7] = b43legacy_phy_read(dev, 0x0815); + backup[8] = b43legacy_phy_read(dev, + B43legacy_PHY_G_CRS); + backup[9] = b43legacy_phy_read(dev, 0x0802); + b43legacy_phy_write(dev, 0x0814, + (b43legacy_phy_read(dev, 0x0814) + | 0x0003)); + b43legacy_phy_write(dev, 0x0815, + (b43legacy_phy_read(dev, 0x0815) + & 0xFFFC)); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + (b43legacy_phy_read(dev, + B43legacy_PHY_G_CRS) & 0x7FFF)); + b43legacy_phy_write(dev, 0x0802, + (b43legacy_phy_read(dev, 0x0802) + & 0xFFFC)); + if (phy->rev > 1) { /* loopback gain enabled */ + backup[19] = b43legacy_phy_read(dev, 0x080F); + backup[20] = b43legacy_phy_read(dev, 0x0810); + if (phy->rev >= 3) + b43legacy_phy_write(dev, 0x080F, + 0xC020); + else + b43legacy_phy_write(dev, 0x080F, + 0x8020); + b43legacy_phy_write(dev, 0x0810, 0x0000); + } + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(0, 1, 1))); + if (phy->rev < 7 || + !(dev->dev->bus->sprom.r1.boardflags_lo + & B43legacy_BFL_EXTLNA)) + b43legacy_phy_write(dev, 0x0811, 0x01B3); + else + b43legacy_phy_write(dev, 0x0811, 0x09B3); + } + } + b43legacy_write16(dev, B43legacy_MMIO_PHY_RADIO, + (b43legacy_read16(dev, B43legacy_MMIO_PHY_RADIO) + | 0x8000)); + backup[10] = b43legacy_phy_read(dev, 0x0035); + b43legacy_phy_write(dev, 0x0035, + (b43legacy_phy_read(dev, 0x0035) & 0xFF7F)); + backup[11] = b43legacy_read16(dev, 0x03E6); + backup[12] = b43legacy_read16(dev, B43legacy_MMIO_CHANNEL_EXT); + + /* Initialization */ + if (phy->analog == 0) + b43legacy_write16(dev, 0x03E6, 0x0122); + else { + if (phy->analog >= 2) + b43legacy_phy_write(dev, 0x0003, + (b43legacy_phy_read(dev, 0x0003) + & 0xFFBF) | 0x0040); + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, + (b43legacy_read16(dev, + B43legacy_MMIO_CHANNEL_EXT) | 0x2000)); + } + + ret = b43legacy_radio_calibrationvalue(dev); + + if (phy->type == B43legacy_PHYTYPE_B) + b43legacy_radio_write16(dev, 0x0078, 0x0026); + + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(0, 1, 1))); + b43legacy_phy_write(dev, 0x0015, 0xBFAF); + b43legacy_phy_write(dev, 0x002B, 0x1403); + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(0, 0, 1))); + b43legacy_phy_write(dev, 0x0015, 0xBFA0); + b43legacy_radio_write16(dev, 0x0051, + (b43legacy_radio_read16(dev, 0x0051) + | 0x0004)); + if (phy->radio_rev == 8) + b43legacy_radio_write16(dev, 0x0043, 0x001F); + else { + b43legacy_radio_write16(dev, 0x0052, 0x0000); + b43legacy_radio_write16(dev, 0x0043, + (b43legacy_radio_read16(dev, 0x0043) + & 0xFFF0) | 0x0009); + } + b43legacy_phy_write(dev, 0x0058, 0x0000); + + for (i = 0; i < 16; i++) { + b43legacy_phy_write(dev, 0x005A, 0x0480); + b43legacy_phy_write(dev, 0x0059, 0xC810); + b43legacy_phy_write(dev, 0x0058, 0x000D); + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(1, 0, 1))); + b43legacy_phy_write(dev, 0x0015, 0xAFB0); + udelay(10); + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(1, 0, 1))); + b43legacy_phy_write(dev, 0x0015, 0xEFB0); + udelay(10); + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(1, 0, 0))); + b43legacy_phy_write(dev, 0x0015, 0xFFF0); + udelay(20); + tmp1 += b43legacy_phy_read(dev, 0x002D); + b43legacy_phy_write(dev, 0x0058, 0x0000); + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(1, 0, 1))); + b43legacy_phy_write(dev, 0x0015, 0xAFB0); + } + + tmp1++; + tmp1 >>= 9; + udelay(10); + b43legacy_phy_write(dev, 0x0058, 0x0000); + + for (i = 0; i < 16; i++) { + b43legacy_radio_write16(dev, 0x0078, (flip_4bit(i) << 1) + | 0x0020); + backup[13] = b43legacy_radio_read16(dev, 0x0078); + udelay(10); + for (j = 0; j < 16; j++) { + b43legacy_phy_write(dev, 0x005A, 0x0D80); + b43legacy_phy_write(dev, 0x0059, 0xC810); + b43legacy_phy_write(dev, 0x0058, 0x000D); + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(1, 0, 1))); + b43legacy_phy_write(dev, 0x0015, 0xAFB0); + udelay(10); + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(1, 0, 1))); + b43legacy_phy_write(dev, 0x0015, 0xEFB0); + udelay(10); + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(1, 0, 0))); + b43legacy_phy_write(dev, 0x0015, 0xFFF0); + udelay(10); + tmp2 += b43legacy_phy_read(dev, 0x002D); + b43legacy_phy_write(dev, 0x0058, 0x0000); + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(1, 0, 1))); + b43legacy_phy_write(dev, 0x0015, 0xAFB0); + } + tmp2++; + tmp2 >>= 8; + if (tmp1 < tmp2) + break; + } + + /* Restore the registers */ + b43legacy_phy_write(dev, 0x0015, backup[1]); + b43legacy_radio_write16(dev, 0x0051, backup[14]); + b43legacy_radio_write16(dev, 0x0052, backup[15]); + b43legacy_radio_write16(dev, 0x0043, backup[0]); + b43legacy_phy_write(dev, 0x005A, backup[16]); + b43legacy_phy_write(dev, 0x0059, backup[17]); + b43legacy_phy_write(dev, 0x0058, backup[18]); + b43legacy_write16(dev, 0x03E6, backup[11]); + if (phy->analog != 0) + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, backup[12]); + b43legacy_phy_write(dev, 0x0035, backup[10]); + b43legacy_radio_selectchannel(dev, phy->channel, 1); + if (phy->type == B43legacy_PHYTYPE_B) { + b43legacy_phy_write(dev, 0x0030, backup[2]); + b43legacy_write16(dev, 0x03EC, backup[3]); + } else { + if (phy->gmode) { + b43legacy_write16(dev, B43legacy_MMIO_PHY_RADIO, + (b43legacy_read16(dev, + B43legacy_MMIO_PHY_RADIO) & 0x7FFF)); + b43legacy_phy_write(dev, 0x0811, backup[4]); + b43legacy_phy_write(dev, 0x0812, backup[5]); + b43legacy_phy_write(dev, 0x0814, backup[6]); + b43legacy_phy_write(dev, 0x0815, backup[7]); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + backup[8]); + b43legacy_phy_write(dev, 0x0802, backup[9]); + if (phy->rev > 1) { + b43legacy_phy_write(dev, 0x080F, backup[19]); + b43legacy_phy_write(dev, 0x0810, backup[20]); + } + } + } + if (i >= 15) + ret = backup[13]; + + return ret; +} + +static inline +u16 freq_r3A_value(u16 frequency) +{ + u16 value; + + if (frequency < 5091) + value = 0x0040; + else if (frequency < 5321) + value = 0x0000; + else if (frequency < 5806) + value = 0x0080; + else + value = 0x0040; + + return value; +} + +void b43legacy_radio_set_tx_iq(struct b43legacy_wldev *dev) +{ + static const u8 data_high[5] = { 0x00, 0x40, 0x80, 0x90, 0xD0 }; + static const u8 data_low[5] = { 0x00, 0x01, 0x05, 0x06, 0x0A }; + u16 tmp = b43legacy_radio_read16(dev, 0x001E); + int i; + int j; + + for (i = 0; i < 5; i++) { + for (j = 0; j < 5; j++) { + if (tmp == (data_high[i] | data_low[j])) { + b43legacy_phy_write(dev, 0x0069, (i - j) << 8 | + 0x00C0); + return; + } + } + } +} + +int b43legacy_radio_selectchannel(struct b43legacy_wldev *dev, + u8 channel, + int synthetic_pu_workaround) +{ + struct b43legacy_phy *phy = &dev->phy; + + if (channel == 0xFF) { + switch (phy->type) { + case B43legacy_PHYTYPE_B: + case B43legacy_PHYTYPE_G: + channel = B43legacy_RADIO_DEFAULT_CHANNEL_BG; + break; + default: + B43legacy_WARN_ON(1); + } + } + +/* TODO: Check if channel is valid - return -EINVAL if not */ + if (synthetic_pu_workaround) + b43legacy_synth_pu_workaround(dev, channel); + + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL, + channel2freq_bg(channel)); + + if (channel == 14) { + if (dev->dev->bus->sprom.r1.country_code == 5) /* JAPAN) */ + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET, + b43legacy_shm_read32(dev, + B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET) + & ~(1 << 7)); + else + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET, + b43legacy_shm_read32(dev, + B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET) + | (1 << 7)); + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, + b43legacy_read16(dev, + B43legacy_MMIO_CHANNEL_EXT) | (1 << 11)); + } else + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, + b43legacy_read16(dev, + B43legacy_MMIO_CHANNEL_EXT) & 0xF7BF); + + phy->channel = channel; + /*XXX: Using the longer of 2 timeouts (8000 vs 2000 usecs). Specs states + * that 2000 usecs might suffice. */ + msleep(8); + + return 0; +} + +void b43legacy_radio_set_txantenna(struct b43legacy_wldev *dev, u32 val) +{ + u16 tmp; + + val <<= 8; + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x0022) & 0xFCFF; + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0022, tmp | val); + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x03A8) & 0xFCFF; + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x03A8, tmp | val); + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x0054) & 0xFCFF; + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0054, tmp | val); +} + +/* http://bcm-specs.sipsolutions.net/TX_Gain_Base_Band */ +static u16 b43legacy_get_txgain_base_band(u16 txpower) +{ + u16 ret; + + B43legacy_WARN_ON(txpower > 63); + + if (txpower >= 54) + ret = 2; + else if (txpower >= 49) + ret = 4; + else if (txpower >= 44) + ret = 5; + else + ret = 6; + + return ret; +} + +/* http://bcm-specs.sipsolutions.net/TX_Gain_Radio_Frequency_Power_Amplifier */ +static u16 b43legacy_get_txgain_freq_power_amp(u16 txpower) +{ + u16 ret; + + B43legacy_WARN_ON(txpower > 63); + + if (txpower >= 32) + ret = 0; + else if (txpower >= 25) + ret = 1; + else if (txpower >= 20) + ret = 2; + else if (txpower >= 12) + ret = 3; + else + ret = 4; + + return ret; +} + +/* http://bcm-specs.sipsolutions.net/TX_Gain_Digital_Analog_Converter */ +static u16 b43legacy_get_txgain_dac(u16 txpower) +{ + u16 ret; + + B43legacy_WARN_ON(txpower > 63); + + if (txpower >= 54) + ret = txpower - 53; + else if (txpower >= 49) + ret = txpower - 42; + else if (txpower >= 44) + ret = txpower - 37; + else if (txpower >= 32) + ret = txpower - 32; + else if (txpower >= 25) + ret = txpower - 20; + else if (txpower >= 20) + ret = txpower - 13; + else if (txpower >= 12) + ret = txpower - 8; + else + ret = txpower; + + return ret; +} + +void b43legacy_radio_set_txpower_a(struct b43legacy_wldev *dev, u16 txpower) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 pamp; + u16 base; + u16 dac; + u16 ilt; + + txpower = limit_value(txpower, 0, 63); + + pamp = b43legacy_get_txgain_freq_power_amp(txpower); + pamp <<= 5; + pamp &= 0x00E0; + b43legacy_phy_write(dev, 0x0019, pamp); + + base = b43legacy_get_txgain_base_band(txpower); + base &= 0x000F; + b43legacy_phy_write(dev, 0x0017, base | 0x0020); + + ilt = b43legacy_ilt_read(dev, 0x3001); + ilt &= 0x0007; + + dac = b43legacy_get_txgain_dac(txpower); + dac <<= 3; + dac |= ilt; + + b43legacy_ilt_write(dev, 0x3001, dac); + + phy->txpwr_offset = txpower; + + /* TODO: FuncPlaceholder (Adjust BB loft cancel) */ +} + +void b43legacy_radio_set_txpower_bg(struct b43legacy_wldev *dev, + u16 baseband_attenuation, + u16 radio_attenuation, + u16 txpower) +{ + struct b43legacy_phy *phy = &dev->phy; + + if (baseband_attenuation == 0xFFFF) + baseband_attenuation = phy->bbatt; + if (radio_attenuation == 0xFFFF) + radio_attenuation = phy->rfatt; + if (txpower == 0xFFFF) + txpower = phy->txctl1; + phy->bbatt = baseband_attenuation; + phy->rfatt = radio_attenuation; + phy->txctl1 = txpower; + + B43legacy_WARN_ON(baseband_attenuation > 11); + if (phy->radio_rev < 6) + B43legacy_WARN_ON(radio_attenuation > 9); + else + B43legacy_WARN_ON(radio_attenuation > 31); + B43legacy_WARN_ON(txpower > 7); + + b43legacy_phy_set_baseband_attenuation(dev, baseband_attenuation); + b43legacy_radio_write16(dev, 0x0043, radio_attenuation); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0064, + radio_attenuation); + if (phy->radio_ver == 0x2050) + b43legacy_radio_write16(dev, 0x0052, + (b43legacy_radio_read16(dev, 0x0052) + & ~0x0070) | ((txpower << 4) & 0x0070)); + /* FIXME: The spec is very weird and unclear here. */ + if (phy->type == B43legacy_PHYTYPE_G) + b43legacy_phy_lo_adjust(dev, 0); +} + +u16 b43legacy_default_baseband_attenuation(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + + if (phy->radio_ver == 0x2050 && phy->radio_rev < 6) + return 0; + return 2; +} + +u16 b43legacy_default_radio_attenuation(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 att = 0xFFFF; + + switch (phy->radio_ver) { + case 0x2053: + switch (phy->radio_rev) { + case 1: + att = 6; + break; + } + break; + case 0x2050: + switch (phy->radio_rev) { + case 0: + att = 5; + break; + case 1: + if (phy->type == B43legacy_PHYTYPE_G) { + if (is_bcm_board_vendor(dev) && + dev->dev->bus->boardinfo.type == 0x421 && + dev->dev->bus->boardinfo.rev >= 30) + att = 3; + else if (is_bcm_board_vendor(dev) && + dev->dev->bus->boardinfo.type == 0x416) + att = 3; + else + att = 1; + } else { + if (is_bcm_board_vendor(dev) && + dev->dev->bus->boardinfo.type == 0x421 && + dev->dev->bus->boardinfo.rev >= 30) + att = 7; + else + att = 6; + } + break; + case 2: + if (phy->type == B43legacy_PHYTYPE_G) { + if (is_bcm_board_vendor(dev) && + dev->dev->bus->boardinfo.type == 0x421 && + dev->dev->bus->boardinfo.rev >= 30) + att = 3; + else if (is_bcm_board_vendor(dev) && + dev->dev->bus->boardinfo.type == + 0x416) + att = 5; + else if (dev->dev->bus->chip_id == 0x4320) + att = 4; + else + att = 3; + } else + att = 6; + break; + case 3: + att = 5; + break; + case 4: + case 5: + att = 1; + break; + case 6: + case 7: + att = 5; + break; + case 8: + att = 0x1A; + break; + case 9: + default: + att = 5; + } + } + if (is_bcm_board_vendor(dev) && + dev->dev->bus->boardinfo.type == 0x421) { + if (dev->dev->bus->boardinfo.rev < 0x43) + att = 2; + else if (dev->dev->bus->boardinfo.rev < 0x51) + att = 3; + } + if (att == 0xFFFF) + att = 5; + + return att; +} + +u16 b43legacy_default_txctl1(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + + if (phy->radio_ver != 0x2050) + return 0; + if (phy->radio_rev == 1) + return 3; + if (phy->radio_rev < 6) + return 2; + if (phy->radio_rev == 8) + return 1; + return 0; +} + +void b43legacy_radio_turn_on(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + int err; + u8 channel; + + might_sleep(); + + if (phy->radio_on) + return; + + switch (phy->type) { + case B43legacy_PHYTYPE_B: + case B43legacy_PHYTYPE_G: + b43legacy_phy_write(dev, 0x0015, 0x8000); + b43legacy_phy_write(dev, 0x0015, 0xCC00); + b43legacy_phy_write(dev, 0x0015, + (phy->gmode ? 0x00C0 : 0x0000)); + if (phy->radio_off_context.valid) { + /* Restore the RFover values. */ + b43legacy_phy_write(dev, B43legacy_PHY_RFOVER, + phy->radio_off_context.rfover); + b43legacy_phy_write(dev, B43legacy_PHY_RFOVERVAL, + phy->radio_off_context.rfoverval); + phy->radio_off_context.valid = 0; + } + channel = phy->channel; + err = b43legacy_radio_selectchannel(dev, + B43legacy_RADIO_DEFAULT_CHANNEL_BG, 1); + err |= b43legacy_radio_selectchannel(dev, channel, 0); + B43legacy_WARN_ON(err); + break; + default: + B43legacy_BUG_ON(1); + } + phy->radio_on = 1; + b43legacy_leds_update(dev, 0); +} + +void b43legacy_radio_turn_off(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + + if (phy->type == B43legacy_PHYTYPE_G && dev->dev->id.revision >= 5) { + u16 rfover, rfoverval; + + rfover = b43legacy_phy_read(dev, B43legacy_PHY_RFOVER); + rfoverval = b43legacy_phy_read(dev, B43legacy_PHY_RFOVERVAL); + phy->radio_off_context.rfover = rfover; + phy->radio_off_context.rfoverval = rfoverval; + phy->radio_off_context.valid = 1; + b43legacy_phy_write(dev, B43legacy_PHY_RFOVER, rfover | 0x008C); + b43legacy_phy_write(dev, B43legacy_PHY_RFOVERVAL, + rfoverval & 0xFF73); + } else + b43legacy_phy_write(dev, 0x0015, 0xAA00); + phy->radio_on = 0; + b43legacydbg(dev->wl, "Radio initialized\n"); + b43legacy_leds_update(dev, 0); +} + +void b43legacy_radio_clear_tssi(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + + switch (phy->type) { + case B43legacy_PHYTYPE_B: + case B43legacy_PHYTYPE_G: + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0058, + 0x7F7F); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x005a, + 0x7F7F); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0070, + 0x7F7F); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0072, + 0x7F7F); + break; + } +} diff --git a/drivers/net/wireless/b43legacy/radio.h b/drivers/net/wireless/b43legacy/radio.h new file mode 100644 index 000000000000..6c6a203439e1 --- /dev/null +++ b/drivers/net/wireless/b43legacy/radio.h @@ -0,0 +1,98 @@ +/* + + Broadcom B43legacy wireless driver + + Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>, + Stefano Brivio <st3@riseup.net> + Michael Buesch <mbuesch@freenet.de> + Danny van Dyk <kugelfang@gentoo.org> + Andreas Jaggi <andreas.jaggi@waterwave.ch> + + Some parts of the code in this file are derived from the ipw2200 + driver Copyright(c) 2003 - 2004 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#ifndef B43legacy_RADIO_H_ +#define B43legacy_RADIO_H_ + +#include "b43legacy.h" + + +#define B43legacy_RADIO_DEFAULT_CHANNEL_BG 6 + +/* Force antenna 0. */ +#define B43legacy_RADIO_TXANTENNA_0 0 +/* Force antenna 1. */ +#define B43legacy_RADIO_TXANTENNA_1 1 +/* Use the RX antenna, that was selected for the most recently + * received good PLCP header. + */ +#define B43legacy_RADIO_TXANTENNA_LASTPLCP 3 +#define B43legacy_RADIO_TXANTENNA_DEFAULT B43legacy_RADIO_TXANTENNA_LASTPLCP + +#define B43legacy_RADIO_INTERFMODE_NONE 0 +#define B43legacy_RADIO_INTERFMODE_NONWLAN 1 +#define B43legacy_RADIO_INTERFMODE_MANUALWLAN 2 +#define B43legacy_RADIO_INTERFMODE_AUTOWLAN 3 + + +void b43legacy_radio_lock(struct b43legacy_wldev *dev); +void b43legacy_radio_unlock(struct b43legacy_wldev *dev); + +u16 b43legacy_radio_read16(struct b43legacy_wldev *dev, u16 offset); +void b43legacy_radio_write16(struct b43legacy_wldev *dev, u16 offset, u16 val); + +u16 b43legacy_radio_init2050(struct b43legacy_wldev *dev); + +void b43legacy_radio_turn_on(struct b43legacy_wldev *dev); +void b43legacy_radio_turn_off(struct b43legacy_wldev *dev); + +int b43legacy_radio_selectchannel(struct b43legacy_wldev *dev, u8 channel, + int synthetic_pu_workaround); + +void b43legacy_radio_set_txpower_a(struct b43legacy_wldev *dev, u16 txpower); +void b43legacy_radio_set_txpower_bg(struct b43legacy_wldev *dev, + u16 baseband_attenuation, u16 attenuation, + u16 txpower); + +u16 b43legacy_default_baseband_attenuation(struct b43legacy_wldev *dev); +u16 b43legacy_default_radio_attenuation(struct b43legacy_wldev *dev); +u16 b43legacy_default_txctl1(struct b43legacy_wldev *dev); + +void b43legacy_radio_set_txantenna(struct b43legacy_wldev *dev, u32 val); + +void b43legacy_radio_clear_tssi(struct b43legacy_wldev *dev); + +u8 b43legacy_radio_aci_detect(struct b43legacy_wldev *dev, u8 channel); +u8 b43legacy_radio_aci_scan(struct b43legacy_wldev *dev); + +int b43legacy_radio_set_interference_mitigation(struct b43legacy_wldev *dev, + int mode); + +void b43legacy_calc_nrssi_slope(struct b43legacy_wldev *dev); +void b43legacy_calc_nrssi_threshold(struct b43legacy_wldev *dev); +s16 b43legacy_nrssi_hw_read(struct b43legacy_wldev *dev, u16 offset); +void b43legacy_nrssi_hw_write(struct b43legacy_wldev *dev, u16 offset, s16 val); +void b43legacy_nrssi_hw_update(struct b43legacy_wldev *dev, u16 val); +void b43legacy_nrssi_mem_update(struct b43legacy_wldev *dev); + +void b43legacy_radio_set_tx_iq(struct b43legacy_wldev *dev); +u16 b43legacy_radio_calibrationvalue(struct b43legacy_wldev *dev); + +#endif /* B43legacy_RADIO_H_ */ diff --git a/drivers/net/wireless/b43legacy/sysfs.c b/drivers/net/wireless/b43legacy/sysfs.c new file mode 100644 index 000000000000..56c384fa9b1f --- /dev/null +++ b/drivers/net/wireless/b43legacy/sysfs.c @@ -0,0 +1,238 @@ +/* + + Broadcom B43legacy wireless driver + + SYSFS support routines + + Copyright (c) 2006 Michael Buesch <mb@bu3sch.de> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "sysfs.h" +#include "b43legacy.h" +#include "main.h" +#include "phy.h" +#include "radio.h" + +#include <linux/capability.h> + + +#define GENERIC_FILESIZE 64 + + +static int get_integer(const char *buf, size_t count) +{ + char tmp[10 + 1] = { 0 }; + int ret = -EINVAL; + + if (count == 0) + goto out; + count = min(count, (size_t)10); + memcpy(tmp, buf, count); + ret = simple_strtol(tmp, NULL, 10); +out: + return ret; +} + +static int get_boolean(const char *buf, size_t count) +{ + if (count != 0) { + if (buf[0] == '1') + return 1; + if (buf[0] == '0') + return 0; + if (count >= 4 && memcmp(buf, "true", 4) == 0) + return 1; + if (count >= 5 && memcmp(buf, "false", 5) == 0) + return 0; + if (count >= 3 && memcmp(buf, "yes", 3) == 0) + return 1; + if (count >= 2 && memcmp(buf, "no", 2) == 0) + return 0; + if (count >= 2 && memcmp(buf, "on", 2) == 0) + return 1; + if (count >= 3 && memcmp(buf, "off", 3) == 0) + return 0; + } + return -EINVAL; +} + +static ssize_t b43legacy_attr_interfmode_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct b43legacy_wldev *wldev = dev_to_b43legacy_wldev(dev); + ssize_t count = 0; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + mutex_lock(&wldev->wl->mutex); + + switch (wldev->phy.interfmode) { + case B43legacy_INTERFMODE_NONE: + count = snprintf(buf, PAGE_SIZE, "0 (No Interference" + " Mitigation)\n"); + break; + case B43legacy_INTERFMODE_NONWLAN: + count = snprintf(buf, PAGE_SIZE, "1 (Non-WLAN Interference" + " Mitigation)\n"); + break; + case B43legacy_INTERFMODE_MANUALWLAN: + count = snprintf(buf, PAGE_SIZE, "2 (WLAN Interference" + " Mitigation)\n"); + break; + default: + B43legacy_WARN_ON(1); + } + + mutex_unlock(&wldev->wl->mutex); + + return count; +} + +static ssize_t b43legacy_attr_interfmode_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct b43legacy_wldev *wldev = dev_to_b43legacy_wldev(dev); + unsigned long flags; + int err; + int mode; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + mode = get_integer(buf, count); + switch (mode) { + case 0: + mode = B43legacy_INTERFMODE_NONE; + break; + case 1: + mode = B43legacy_INTERFMODE_NONWLAN; + break; + case 2: + mode = B43legacy_INTERFMODE_MANUALWLAN; + break; + case 3: + mode = B43legacy_INTERFMODE_AUTOWLAN; + break; + default: + return -EINVAL; + } + + mutex_lock(&wldev->wl->mutex); + spin_lock_irqsave(&wldev->wl->irq_lock, flags); + + err = b43legacy_radio_set_interference_mitigation(wldev, mode); + if (err) + b43legacyerr(wldev->wl, "Interference Mitigation not " + "supported by device\n"); + mmiowb(); + spin_unlock_irqrestore(&wldev->wl->irq_lock, flags); + mutex_unlock(&wldev->wl->mutex); + + return err ? err : count; +} + +static DEVICE_ATTR(interference, 0644, + b43legacy_attr_interfmode_show, + b43legacy_attr_interfmode_store); + +static ssize_t b43legacy_attr_preamble_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct b43legacy_wldev *wldev = dev_to_b43legacy_wldev(dev); + ssize_t count; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + mutex_lock(&wldev->wl->mutex); + + if (wldev->short_preamble) + count = snprintf(buf, PAGE_SIZE, "1 (Short Preamble" + " enabled)\n"); + else + count = snprintf(buf, PAGE_SIZE, "0 (Short Preamble" + " disabled)\n"); + + mutex_unlock(&wldev->wl->mutex); + + return count; +} + +static ssize_t b43legacy_attr_preamble_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct b43legacy_wldev *wldev = dev_to_b43legacy_wldev(dev); + unsigned long flags; + int value; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + value = get_boolean(buf, count); + if (value < 0) + return value; + mutex_lock(&wldev->wl->mutex); + spin_lock_irqsave(&wldev->wl->irq_lock, flags); + + wldev->short_preamble = !!value; + + spin_unlock_irqrestore(&wldev->wl->irq_lock, flags); + mutex_unlock(&wldev->wl->mutex); + + return count; +} + +static DEVICE_ATTR(shortpreamble, 0644, + b43legacy_attr_preamble_show, + b43legacy_attr_preamble_store); + +int b43legacy_sysfs_register(struct b43legacy_wldev *wldev) +{ + struct device *dev = wldev->dev->dev; + int err; + + B43legacy_WARN_ON(b43legacy_status(wldev) != + B43legacy_STAT_INITIALIZED); + + err = device_create_file(dev, &dev_attr_interference); + if (err) + goto out; + err = device_create_file(dev, &dev_attr_shortpreamble); + if (err) + goto err_remove_interfmode; + +out: + return err; +err_remove_interfmode: + device_remove_file(dev, &dev_attr_interference); + goto out; +} + +void b43legacy_sysfs_unregister(struct b43legacy_wldev *wldev) +{ + struct device *dev = wldev->dev->dev; + + device_remove_file(dev, &dev_attr_shortpreamble); + device_remove_file(dev, &dev_attr_interference); +} diff --git a/drivers/net/wireless/b43legacy/sysfs.h b/drivers/net/wireless/b43legacy/sysfs.h new file mode 100644 index 000000000000..417d509803c7 --- /dev/null +++ b/drivers/net/wireless/b43legacy/sysfs.h @@ -0,0 +1,9 @@ +#ifndef B43legacy_SYSFS_H_ +#define B43legacy_SYSFS_H_ + +struct b43legacy_wldev; + +int b43legacy_sysfs_register(struct b43legacy_wldev *dev); +void b43legacy_sysfs_unregister(struct b43legacy_wldev *dev); + +#endif /* B43legacy_SYSFS_H_ */ diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c new file mode 100644 index 000000000000..fa1e65687a63 --- /dev/null +++ b/drivers/net/wireless/b43legacy/xmit.c @@ -0,0 +1,642 @@ +/* + + Broadcom B43legacy wireless driver + + Transmission (TX/RX) related functions. + + Copyright (C) 2005 Martin Langer <martin-langer@gmx.de> + Copyright (C) 2005 Stefano Brivio <st3@riseup.net> + Copyright (C) 2005, 2006 Michael Buesch <mb@bu3sch.de> + Copyright (C) 2005 Danny van Dyk <kugelfang@gentoo.org> + Copyright (C) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch> + Copyright (C) 2007 Larry Finger <Larry.Finger@lwfinger.net> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include <net/dst.h> + +#include "xmit.h" +#include "phy.h" +#include "dma.h" +#include "pio.h" + + +/* Extract the bitrate out of a CCK PLCP header. */ +static u8 b43legacy_plcp_get_bitrate_cck(struct b43legacy_plcp_hdr6 *plcp) +{ + switch (plcp->raw[0]) { + case 0x0A: + return B43legacy_CCK_RATE_1MB; + case 0x14: + return B43legacy_CCK_RATE_2MB; + case 0x37: + return B43legacy_CCK_RATE_5MB; + case 0x6E: + return B43legacy_CCK_RATE_11MB; + } + B43legacy_BUG_ON(1); + return 0; +} + +/* Extract the bitrate out of an OFDM PLCP header. */ +static u8 b43legacy_plcp_get_bitrate_ofdm(struct b43legacy_plcp_hdr6 *plcp) +{ + switch (plcp->raw[0] & 0xF) { + case 0xB: + return B43legacy_OFDM_RATE_6MB; + case 0xF: + return B43legacy_OFDM_RATE_9MB; + case 0xA: + return B43legacy_OFDM_RATE_12MB; + case 0xE: + return B43legacy_OFDM_RATE_18MB; + case 0x9: + return B43legacy_OFDM_RATE_24MB; + case 0xD: + return B43legacy_OFDM_RATE_36MB; + case 0x8: + return B43legacy_OFDM_RATE_48MB; + case 0xC: + return B43legacy_OFDM_RATE_54MB; + } + B43legacy_BUG_ON(1); + return 0; +} + +u8 b43legacy_plcp_get_ratecode_cck(const u8 bitrate) +{ + switch (bitrate) { + case B43legacy_CCK_RATE_1MB: + return 0x0A; + case B43legacy_CCK_RATE_2MB: + return 0x14; + case B43legacy_CCK_RATE_5MB: + return 0x37; + case B43legacy_CCK_RATE_11MB: + return 0x6E; + } + B43legacy_BUG_ON(1); + return 0; +} + +u8 b43legacy_plcp_get_ratecode_ofdm(const u8 bitrate) +{ + switch (bitrate) { + case B43legacy_OFDM_RATE_6MB: + return 0xB; + case B43legacy_OFDM_RATE_9MB: + return 0xF; + case B43legacy_OFDM_RATE_12MB: + return 0xA; + case B43legacy_OFDM_RATE_18MB: + return 0xE; + case B43legacy_OFDM_RATE_24MB: + return 0x9; + case B43legacy_OFDM_RATE_36MB: + return 0xD; + case B43legacy_OFDM_RATE_48MB: + return 0x8; + case B43legacy_OFDM_RATE_54MB: + return 0xC; + } + B43legacy_BUG_ON(1); + return 0; +} + +void b43legacy_generate_plcp_hdr(struct b43legacy_plcp_hdr4 *plcp, + const u16 octets, const u8 bitrate) +{ + __le32 *data = &(plcp->data); + __u8 *raw = plcp->raw; + + if (b43legacy_is_ofdm_rate(bitrate)) { + u16 d; + + d = b43legacy_plcp_get_ratecode_ofdm(bitrate); + B43legacy_WARN_ON(octets & 0xF000); + d |= (octets << 5); + *data = cpu_to_le32(d); + } else { + u32 plen; + + plen = octets * 16 / bitrate; + if ((octets * 16 % bitrate) > 0) { + plen++; + if ((bitrate == B43legacy_CCK_RATE_11MB) + && ((octets * 8 % 11) < 4)) + raw[1] = 0x84; + else + raw[1] = 0x04; + } else + raw[1] = 0x04; + *data |= cpu_to_le32(plen << 16); + raw[0] = b43legacy_plcp_get_ratecode_cck(bitrate); + } +} + +static u8 b43legacy_calc_fallback_rate(u8 bitrate) +{ + switch (bitrate) { + case B43legacy_CCK_RATE_1MB: + return B43legacy_CCK_RATE_1MB; + case B43legacy_CCK_RATE_2MB: + return B43legacy_CCK_RATE_1MB; + case B43legacy_CCK_RATE_5MB: + return B43legacy_CCK_RATE_2MB; + case B43legacy_CCK_RATE_11MB: + return B43legacy_CCK_RATE_5MB; + case B43legacy_OFDM_RATE_6MB: + return B43legacy_CCK_RATE_5MB; + case B43legacy_OFDM_RATE_9MB: + return B43legacy_OFDM_RATE_6MB; + case B43legacy_OFDM_RATE_12MB: + return B43legacy_OFDM_RATE_9MB; + case B43legacy_OFDM_RATE_18MB: + return B43legacy_OFDM_RATE_12MB; + case B43legacy_OFDM_RATE_24MB: + return B43legacy_OFDM_RATE_18MB; + case B43legacy_OFDM_RATE_36MB: + return B43legacy_OFDM_RATE_24MB; + case B43legacy_OFDM_RATE_48MB: + return B43legacy_OFDM_RATE_36MB; + case B43legacy_OFDM_RATE_54MB: + return B43legacy_OFDM_RATE_48MB; + } + B43legacy_BUG_ON(1); + return 0; +} + +static void generate_txhdr_fw3(struct b43legacy_wldev *dev, + struct b43legacy_txhdr_fw3 *txhdr, + const unsigned char *fragment_data, + unsigned int fragment_len, + const struct ieee80211_tx_control *txctl, + u16 cookie) +{ + const struct ieee80211_hdr *wlhdr; + int use_encryption = (!(txctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)); + u16 fctl; + u8 rate; + u8 rate_fb; + int rate_ofdm; + int rate_fb_ofdm; + unsigned int plcp_fragment_len; + u32 mac_ctl = 0; + u16 phy_ctl = 0; + + wlhdr = (const struct ieee80211_hdr *)fragment_data; + fctl = le16_to_cpu(wlhdr->frame_control); + + memset(txhdr, 0, sizeof(*txhdr)); + + rate = txctl->tx_rate; + rate_ofdm = b43legacy_is_ofdm_rate(rate); + rate_fb = (txctl->alt_retry_rate == -1) ? rate : txctl->alt_retry_rate; + rate_fb_ofdm = b43legacy_is_ofdm_rate(rate_fb); + + txhdr->mac_frame_ctl = wlhdr->frame_control; + memcpy(txhdr->tx_receiver, wlhdr->addr1, 6); + + /* Calculate duration for fallback rate */ + if ((rate_fb == rate) || + (wlhdr->duration_id & cpu_to_le16(0x8000)) || + (wlhdr->duration_id == cpu_to_le16(0))) { + /* If the fallback rate equals the normal rate or the + * dur_id field contains an AID, CFP magic or 0, + * use the original dur_id field. */ + txhdr->dur_fb = wlhdr->duration_id; + } else { + int fbrate_base100kbps = B43legacy_RATE_TO_100KBPS(rate_fb); + txhdr->dur_fb = ieee80211_generic_frame_duration(dev->wl->hw, + dev->wl->if_id, + fragment_len, + fbrate_base100kbps); + } + + plcp_fragment_len = fragment_len + FCS_LEN; + if (use_encryption) { + u8 key_idx = (u16)(txctl->key_idx); + struct b43legacy_key *key; + int wlhdr_len; + size_t iv_len; + + B43legacy_WARN_ON(key_idx >= dev->max_nr_keys); + key = &(dev->key[key_idx]); + + if (key->enabled) { + /* Hardware appends ICV. */ + plcp_fragment_len += txctl->icv_len; + + key_idx = b43legacy_kidx_to_fw(dev, key_idx); + mac_ctl |= (key_idx << B43legacy_TX4_MAC_KEYIDX_SHIFT) & + B43legacy_TX4_MAC_KEYIDX; + mac_ctl |= (key->algorithm << + B43legacy_TX4_MAC_KEYALG_SHIFT) & + B43legacy_TX4_MAC_KEYALG; + wlhdr_len = ieee80211_get_hdrlen(fctl); + iv_len = min((size_t)txctl->iv_len, + ARRAY_SIZE(txhdr->iv)); + memcpy(txhdr->iv, ((u8 *)wlhdr) + wlhdr_len, iv_len); + } + } + b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *) + (&txhdr->plcp), plcp_fragment_len, + rate); + b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *) + (&txhdr->plcp_fb), plcp_fragment_len, + rate_fb); + + /* PHY TX Control word */ + if (rate_ofdm) + phy_ctl |= B43legacy_TX4_PHY_OFDM; + if (dev->short_preamble) + phy_ctl |= B43legacy_TX4_PHY_SHORTPRMBL; + switch (txctl->antenna_sel_tx) { + case 0: + phy_ctl |= B43legacy_TX4_PHY_ANTLAST; + break; + case 1: + phy_ctl |= B43legacy_TX4_PHY_ANT0; + break; + case 2: + phy_ctl |= B43legacy_TX4_PHY_ANT1; + break; + default: + B43legacy_BUG_ON(1); + } + + /* MAC control */ + if (!(txctl->flags & IEEE80211_TXCTL_NO_ACK)) + mac_ctl |= B43legacy_TX4_MAC_ACK; + if (!(((fctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) && + ((fctl & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL))) + mac_ctl |= B43legacy_TX4_MAC_HWSEQ; + if (txctl->flags & IEEE80211_TXCTL_FIRST_FRAGMENT) + mac_ctl |= B43legacy_TX4_MAC_STMSDU; + if (rate_fb_ofdm) + mac_ctl |= B43legacy_TX4_MAC_FALLBACKOFDM; + + /* Generate the RTS or CTS-to-self frame */ + if ((txctl->flags & IEEE80211_TXCTL_USE_RTS_CTS) || + (txctl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)) { + unsigned int len; + struct ieee80211_hdr *hdr; + int rts_rate; + int rts_rate_fb; + int rts_rate_ofdm; + int rts_rate_fb_ofdm; + + rts_rate = txctl->rts_cts_rate; + rts_rate_ofdm = b43legacy_is_ofdm_rate(rts_rate); + rts_rate_fb = b43legacy_calc_fallback_rate(rts_rate); + rts_rate_fb_ofdm = b43legacy_is_ofdm_rate(rts_rate_fb); + if (rts_rate_fb_ofdm) + mac_ctl |= B43legacy_TX4_MAC_CTSFALLBACKOFDM; + + if (txctl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) { + ieee80211_ctstoself_get(dev->wl->hw, + dev->wl->if_id, + fragment_data, + fragment_len, txctl, + (struct ieee80211_cts *) + (txhdr->rts_frame)); + mac_ctl |= B43legacy_TX4_MAC_SENDCTS; + len = sizeof(struct ieee80211_cts); + } else { + ieee80211_rts_get(dev->wl->hw, + dev->wl->if_id, + fragment_data, fragment_len, txctl, + (struct ieee80211_rts *) + (txhdr->rts_frame)); + mac_ctl |= B43legacy_TX4_MAC_SENDRTS; + len = sizeof(struct ieee80211_rts); + } + len += FCS_LEN; + b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *) + (&txhdr->rts_plcp), + len, rts_rate); + b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *) + (&txhdr->rts_plcp_fb), + len, rts_rate_fb); + hdr = (struct ieee80211_hdr *)(&txhdr->rts_frame); + txhdr->rts_dur_fb = hdr->duration_id; + mac_ctl |= B43legacy_TX4_MAC_LONGFRAME; + } + + /* Magic cookie */ + txhdr->cookie = cpu_to_le16(cookie); + + /* Apply the bitfields */ + txhdr->mac_ctl = cpu_to_le32(mac_ctl); + txhdr->phy_ctl = cpu_to_le16(phy_ctl); +} + +void b43legacy_generate_txhdr(struct b43legacy_wldev *dev, + u8 *txhdr, + const unsigned char *fragment_data, + unsigned int fragment_len, + const struct ieee80211_tx_control *txctl, + u16 cookie) +{ + generate_txhdr_fw3(dev, (struct b43legacy_txhdr_fw3 *)txhdr, + fragment_data, fragment_len, + txctl, cookie); +} + +static s8 b43legacy_rssi_postprocess(struct b43legacy_wldev *dev, + u8 in_rssi, int ofdm, + int adjust_2053, int adjust_2050) +{ + struct b43legacy_phy *phy = &dev->phy; + s32 tmp; + + switch (phy->radio_ver) { + case 0x2050: + if (ofdm) { + tmp = in_rssi; + if (tmp > 127) + tmp -= 256; + tmp *= 73; + tmp /= 64; + if (adjust_2050) + tmp += 25; + else + tmp -= 3; + } else { + if (dev->dev->bus->sprom.r1.boardflags_lo + & B43legacy_BFL_RSSI) { + if (in_rssi > 63) + in_rssi = 63; + tmp = phy->nrssi_lt[in_rssi]; + tmp = 31 - tmp; + tmp *= -131; + tmp /= 128; + tmp -= 57; + } else { + tmp = in_rssi; + tmp = 31 - tmp; + tmp *= -149; + tmp /= 128; + tmp -= 68; + } + if (phy->type == B43legacy_PHYTYPE_G && + adjust_2050) + tmp += 25; + } + break; + case 0x2060: + if (in_rssi > 127) + tmp = in_rssi - 256; + else + tmp = in_rssi; + break; + default: + tmp = in_rssi; + tmp -= 11; + tmp *= 103; + tmp /= 64; + if (adjust_2053) + tmp -= 109; + else + tmp -= 83; + } + + return (s8)tmp; +} + +void b43legacy_rx(struct b43legacy_wldev *dev, + struct sk_buff *skb, + const void *_rxhdr) +{ + struct ieee80211_rx_status status; + struct b43legacy_plcp_hdr6 *plcp; + struct ieee80211_hdr *wlhdr; + const struct b43legacy_rxhdr_fw3 *rxhdr = _rxhdr; + u16 fctl; + u16 phystat0; + u16 phystat3; + u16 chanstat; + u16 mactime; + u32 macstat; + u16 chanid; + u8 jssi; + int padding; + + memset(&status, 0, sizeof(status)); + + /* Get metadata about the frame from the header. */ + phystat0 = le16_to_cpu(rxhdr->phy_status0); + phystat3 = le16_to_cpu(rxhdr->phy_status3); + jssi = rxhdr->jssi; + macstat = le16_to_cpu(rxhdr->mac_status); + mactime = le16_to_cpu(rxhdr->mac_time); + chanstat = le16_to_cpu(rxhdr->channel); + + if (macstat & B43legacy_RX_MAC_FCSERR) + dev->wl->ieee_stats.dot11FCSErrorCount++; + + /* Skip PLCP and padding */ + padding = (macstat & B43legacy_RX_MAC_PADDING) ? 2 : 0; + if (unlikely(skb->len < (sizeof(struct b43legacy_plcp_hdr6) + + padding))) { + b43legacydbg(dev->wl, "RX: Packet size underrun (1)\n"); + goto drop; + } + plcp = (struct b43legacy_plcp_hdr6 *)(skb->data + padding); + skb_pull(skb, sizeof(struct b43legacy_plcp_hdr6) + padding); + /* The skb contains the Wireless Header + payload data now */ + if (unlikely(skb->len < (2+2+6/*minimum hdr*/ + FCS_LEN))) { + b43legacydbg(dev->wl, "RX: Packet size underrun (2)\n"); + goto drop; + } + wlhdr = (struct ieee80211_hdr *)(skb->data); + fctl = le16_to_cpu(wlhdr->frame_control); + + if ((macstat & B43legacy_RX_MAC_DEC) && + !(macstat & B43legacy_RX_MAC_DECERR)) { + unsigned int keyidx; + int wlhdr_len; + int iv_len; + int icv_len; + + keyidx = ((macstat & B43legacy_RX_MAC_KEYIDX) + >> B43legacy_RX_MAC_KEYIDX_SHIFT); + /* We must adjust the key index here. We want the "physical" + * key index, but the ucode passed it slightly different. + */ + keyidx = b43legacy_kidx_to_raw(dev, keyidx); + B43legacy_WARN_ON(keyidx >= dev->max_nr_keys); + + if (dev->key[keyidx].algorithm != B43legacy_SEC_ALGO_NONE) { + /* Remove PROTECTED flag to mark it as decrypted. */ + B43legacy_WARN_ON(!(fctl & IEEE80211_FCTL_PROTECTED)); + fctl &= ~IEEE80211_FCTL_PROTECTED; + wlhdr->frame_control = cpu_to_le16(fctl); + + wlhdr_len = ieee80211_get_hdrlen(fctl); + if (unlikely(skb->len < (wlhdr_len + 3))) { + b43legacydbg(dev->wl, "RX: Packet size" + " underrun3\n"); + goto drop; + } + if (skb->data[wlhdr_len + 3] & (1 << 5)) { + /* The Ext-IV Bit is set in the "KeyID" + * octet of the IV. + */ + iv_len = 8; + icv_len = 8; + } else { + iv_len = 4; + icv_len = 4; + } + if (unlikely(skb->len < (wlhdr_len + iv_len + + icv_len))) { + b43legacydbg(dev->wl, "RX: Packet size" + " underrun4\n"); + goto drop; + } + /* Remove the IV */ + memmove(skb->data + iv_len, skb->data, wlhdr_len); + skb_pull(skb, iv_len); + /* Remove the ICV */ + skb_trim(skb, skb->len - icv_len); + + status.flag |= RX_FLAG_DECRYPTED; + } + } + + status.ssi = b43legacy_rssi_postprocess(dev, jssi, + (phystat0 & B43legacy_RX_PHYST0_OFDM), + (phystat0 & B43legacy_RX_PHYST0_GAINCTL), + (phystat3 & B43legacy_RX_PHYST3_TRSTATE)); + status.noise = dev->stats.link_noise; + status.signal = (jssi * 100) / B43legacy_RX_MAX_SSI; + if (phystat0 & B43legacy_RX_PHYST0_OFDM) + status.rate = b43legacy_plcp_get_bitrate_ofdm(plcp); + else + status.rate = b43legacy_plcp_get_bitrate_cck(plcp); + status.antenna = !!(phystat0 & B43legacy_RX_PHYST0_ANT); + status.mactime = mactime; + + chanid = (chanstat & B43legacy_RX_CHAN_ID) >> + B43legacy_RX_CHAN_ID_SHIFT; + switch (chanstat & B43legacy_RX_CHAN_PHYTYPE) { + case B43legacy_PHYTYPE_B: + status.phymode = MODE_IEEE80211B; + status.freq = chanid + 2400; + status.channel = b43legacy_freq_to_channel_bg(chanid + 2400); + break; + case B43legacy_PHYTYPE_G: + status.phymode = MODE_IEEE80211G; + status.freq = chanid + 2400; + status.channel = b43legacy_freq_to_channel_bg(chanid + 2400); + break; + default: + b43legacywarn(dev->wl, "Unexpected value for chanstat (0x%X)\n", + chanstat); + } + + dev->stats.last_rx = jiffies; + ieee80211_rx_irqsafe(dev->wl->hw, skb, &status); + + return; +drop: + b43legacydbg(dev->wl, "RX: Packet dropped\n"); + dev_kfree_skb_any(skb); +} + +void b43legacy_handle_txstatus(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status) +{ + b43legacy_debugfs_log_txstat(dev, status); + + if (status->intermediate) + return; + if (status->for_ampdu) + return; + if (!status->acked) + dev->wl->ieee_stats.dot11ACKFailureCount++; + if (status->rts_count) { + if (status->rts_count == 0xF) /* FIXME */ + dev->wl->ieee_stats.dot11RTSFailureCount++; + else + dev->wl->ieee_stats.dot11RTSSuccessCount++; + } + + if (b43legacy_using_pio(dev)) + b43legacy_pio_handle_txstatus(dev, status); + else + b43legacy_dma_handle_txstatus(dev, status); +} + +/* Handle TX status report as received through DMA/PIO queues */ +void b43legacy_handle_hwtxstatus(struct b43legacy_wldev *dev, + const struct b43legacy_hwtxstatus *hw) +{ + struct b43legacy_txstatus status; + u8 tmp; + + status.cookie = le16_to_cpu(hw->cookie); + status.seq = le16_to_cpu(hw->seq); + status.phy_stat = hw->phy_stat; + tmp = hw->count; + status.frame_count = (tmp >> 4); + status.rts_count = (tmp & 0x0F); + tmp = hw->flags; + status.supp_reason = ((tmp & 0x1C) >> 2); + status.pm_indicated = !!(tmp & 0x80); + status.intermediate = !!(tmp & 0x40); + status.for_ampdu = !!(tmp & 0x20); + status.acked = !!(tmp & 0x02); + + b43legacy_handle_txstatus(dev, &status); +} + +/* Stop any TX operation on the device (suspend the hardware queues) */ +void b43legacy_tx_suspend(struct b43legacy_wldev *dev) +{ + if (b43legacy_using_pio(dev)) + b43legacy_pio_freeze_txqueues(dev); + else + b43legacy_dma_tx_suspend(dev); +} + +/* Resume any TX operation on the device (resume the hardware queues) */ +void b43legacy_tx_resume(struct b43legacy_wldev *dev) +{ + if (b43legacy_using_pio(dev)) + b43legacy_pio_thaw_txqueues(dev); + else + b43legacy_dma_tx_resume(dev); +} + +/* Initialize the QoS parameters */ +void b43legacy_qos_init(struct b43legacy_wldev *dev) +{ + /* FIXME: This function must probably be called from the mac80211 + * config callback. */ +return; + + b43legacy_hf_write(dev, b43legacy_hf_read(dev) | B43legacy_HF_EDCF); + /* FIXME kill magic */ + b43legacy_write16(dev, 0x688, + b43legacy_read16(dev, 0x688) | 0x4); + + + /*TODO: We might need some stack support here to get the values. */ +} diff --git a/drivers/net/wireless/b43legacy/xmit.h b/drivers/net/wireless/b43legacy/xmit.h new file mode 100644 index 000000000000..8a155d0a5d1f --- /dev/null +++ b/drivers/net/wireless/b43legacy/xmit.h @@ -0,0 +1,259 @@ +#ifndef B43legacy_XMIT_H_ +#define B43legacy_XMIT_H_ + +#include "main.h" + + +#define _b43legacy_declare_plcp_hdr(size) \ + struct b43legacy_plcp_hdr##size { \ + union { \ + __le32 data; \ + __u8 raw[size]; \ + } __attribute__((__packed__)); \ + } __attribute__((__packed__)) + +/* struct b43legacy_plcp_hdr4 */ +_b43legacy_declare_plcp_hdr(4); +/* struct b43legacy_plcp_hdr6 */ +_b43legacy_declare_plcp_hdr(6); + +#undef _b43legacy_declare_plcp_hdr + + +/* TX header for v3 firmware */ +struct b43legacy_txhdr_fw3 { + __le32 mac_ctl; /* MAC TX control */ + __le16 mac_frame_ctl; /* Copy of the FrameControl */ + __le16 tx_fes_time_norm; /* TX FES Time Normal */ + __le16 phy_ctl; /* PHY TX control */ + __u8 iv[16]; /* Encryption IV */ + __u8 tx_receiver[6]; /* TX Frame Receiver address */ + __le16 tx_fes_time_fb; /* TX FES Time Fallback */ + struct b43legacy_plcp_hdr4 rts_plcp_fb; /* RTS fallback PLCP */ + __le16 rts_dur_fb; /* RTS fallback duration */ + struct b43legacy_plcp_hdr4 plcp_fb; /* Fallback PLCP */ + __le16 dur_fb; /* Fallback duration */ + PAD_BYTES(2); + __le16 cookie; + __le16 unknown_scb_stuff; + struct b43legacy_plcp_hdr6 rts_plcp; /* RTS PLCP */ + __u8 rts_frame[18]; /* The RTS frame (if used) */ + struct b43legacy_plcp_hdr6 plcp; +} __attribute__((__packed__)); + +/* MAC TX control */ +#define B43legacy_TX4_MAC_KEYIDX 0x0FF00000 /* Security key index */ +#define B43legacy_TX4_MAC_KEYIDX_SHIFT 20 +#define B43legacy_TX4_MAC_KEYALG 0x00070000 /* Security key algorithm */ +#define B43legacy_TX4_MAC_KEYALG_SHIFT 16 +#define B43legacy_TX4_MAC_LIFETIME 0x00001000 +#define B43legacy_TX4_MAC_FRAMEBURST 0x00000800 +#define B43legacy_TX4_MAC_SENDCTS 0x00000400 +#define B43legacy_TX4_MAC_AMPDU 0x00000300 +#define B43legacy_TX4_MAC_AMPDU_SHIFT 8 +#define B43legacy_TX4_MAC_CTSFALLBACKOFDM 0x00000200 +#define B43legacy_TX4_MAC_FALLBACKOFDM 0x00000100 +#define B43legacy_TX4_MAC_5GHZ 0x00000080 +#define B43legacy_TX4_MAC_IGNPMQ 0x00000020 +#define B43legacy_TX4_MAC_HWSEQ 0x00000010 /* Use Hardware Seq No */ +#define B43legacy_TX4_MAC_STMSDU 0x00000008 /* Start MSDU */ +#define B43legacy_TX4_MAC_SENDRTS 0x00000004 +#define B43legacy_TX4_MAC_LONGFRAME 0x00000002 +#define B43legacy_TX4_MAC_ACK 0x00000001 + +/* Extra Frame Types */ +#define B43legacy_TX4_EFT_FBOFDM 0x0001 /* Data frame fb rate type */ +#define B43legacy_TX4_EFT_RTSOFDM 0x0004 /* RTS/CTS rate type */ +#define B43legacy_TX4_EFT_RTSFBOFDM 0x0010 /* RTS/CTS fallback rate type */ + +/* PHY TX control word */ +#define B43legacy_TX4_PHY_OFDM 0x0001 /* Data frame rate type */ +#define B43legacy_TX4_PHY_SHORTPRMBL 0x0010 /* Use short preamble */ +#define B43legacy_TX4_PHY_ANT 0x03C0 /* Antenna selection */ +#define B43legacy_TX4_PHY_ANT0 0x0000 /* Use antenna 0 */ +#define B43legacy_TX4_PHY_ANT1 0x0100 /* Use antenna 1 */ +#define B43legacy_TX4_PHY_ANTLAST 0x0300 /* Use last used antenna */ + + + +void b43legacy_generate_txhdr(struct b43legacy_wldev *dev, + u8 *txhdr, + const unsigned char *fragment_data, + unsigned int fragment_len, + const struct ieee80211_tx_control *txctl, + u16 cookie); + + +/* Transmit Status */ +struct b43legacy_txstatus { + u16 cookie; /* The cookie from the txhdr */ + u16 seq; /* Sequence number */ + u8 phy_stat; /* PHY TX status */ + u8 frame_count; /* Frame transmit count */ + u8 rts_count; /* RTS transmit count */ + u8 supp_reason; /* Suppression reason */ + /* flags */ + u8 pm_indicated;/* PM mode indicated to AP */ + u8 intermediate;/* Intermediate status notification */ + u8 for_ampdu; /* Status is for an AMPDU (afterburner) */ + u8 acked; /* Wireless ACK received */ +}; + +/* txstatus supp_reason values */ +enum { + B43legacy_TXST_SUPP_NONE, /* Not suppressed */ + B43legacy_TXST_SUPP_PMQ, /* Suppressed due to PMQ entry */ + B43legacy_TXST_SUPP_FLUSH, /* Suppressed due to flush request */ + B43legacy_TXST_SUPP_PREV, /* Previous fragment failed */ + B43legacy_TXST_SUPP_CHAN, /* Channel mismatch */ + B43legacy_TXST_SUPP_LIFE, /* Lifetime expired */ + B43legacy_TXST_SUPP_UNDER, /* Buffer underflow */ + B43legacy_TXST_SUPP_ABNACK, /* Afterburner NACK */ +}; + +/* Transmit Status as received through DMA/PIO on old chips */ +struct b43legacy_hwtxstatus { + PAD_BYTES(4); + __le16 cookie; + u8 flags; + u8 count; + PAD_BYTES(2); + __le16 seq; + u8 phy_stat; + PAD_BYTES(1); +} __attribute__((__packed__)); + + +/* Receive header for v3 firmware. */ +struct b43legacy_rxhdr_fw3 { + __le16 frame_len; /* Frame length */ + PAD_BYTES(2); + __le16 phy_status0; /* PHY RX Status 0 */ + __u8 jssi; /* PHY RX Status 1: JSSI */ + __u8 sig_qual; /* PHY RX Status 1: Signal Quality */ + PAD_BYTES(2); /* PHY RX Status 2 */ + __le16 phy_status3; /* PHY RX Status 3 */ + __le16 mac_status; /* MAC RX status */ + __le16 mac_time; + __le16 channel; +} __attribute__((__packed__)); + + +/* PHY RX Status 0 */ +#define B43legacy_RX_PHYST0_GAINCTL 0x4000 /* Gain Control */ +#define B43legacy_RX_PHYST0_PLCPHCF 0x0200 +#define B43legacy_RX_PHYST0_PLCPFV 0x0100 +#define B43legacy_RX_PHYST0_SHORTPRMBL 0x0080 /* Recvd with Short Preamble */ +#define B43legacy_RX_PHYST0_LCRS 0x0040 +#define B43legacy_RX_PHYST0_ANT 0x0020 /* Antenna */ +#define B43legacy_RX_PHYST0_UNSRATE 0x0010 +#define B43legacy_RX_PHYST0_CLIP 0x000C +#define B43legacy_RX_PHYST0_CLIP_SHIFT 2 +#define B43legacy_RX_PHYST0_FTYPE 0x0003 /* Frame type */ +#define B43legacy_RX_PHYST0_CCK 0x0000 /* Frame type: CCK */ +#define B43legacy_RX_PHYST0_OFDM 0x0001 /* Frame type: OFDM */ +#define B43legacy_RX_PHYST0_PRE_N 0x0002 /* Pre-standard N-PHY frame */ +#define B43legacy_RX_PHYST0_STD_N 0x0003 /* Standard N-PHY frame */ + +/* PHY RX Status 2 */ +#define B43legacy_RX_PHYST2_LNAG 0xC000 /* LNA Gain */ +#define B43legacy_RX_PHYST2_LNAG_SHIFT 14 +#define B43legacy_RX_PHYST2_PNAG 0x3C00 /* PNA Gain */ +#define B43legacy_RX_PHYST2_PNAG_SHIFT 10 +#define B43legacy_RX_PHYST2_FOFF 0x03FF /* F offset */ + +/* PHY RX Status 3 */ +#define B43legacy_RX_PHYST3_DIGG 0x1800 /* DIG Gain */ +#define B43legacy_RX_PHYST3_DIGG_SHIFT 11 +#define B43legacy_RX_PHYST3_TRSTATE 0x0400 /* TR state */ + +/* MAC RX Status */ +#define B43legacy_RX_MAC_BEACONSENT 0x00008000 /* Beacon send flag */ +#define B43legacy_RX_MAC_KEYIDX 0x000007E0 /* Key index */ +#define B43legacy_RX_MAC_KEYIDX_SHIFT 5 +#define B43legacy_RX_MAC_DECERR 0x00000010 /* Decrypt error */ +#define B43legacy_RX_MAC_DEC 0x00000008 /* Decryption attempted */ +#define B43legacy_RX_MAC_PADDING 0x00000004 /* Pad bytes present */ +#define B43legacy_RX_MAC_RESP 0x00000002 /* Response frame xmitted */ +#define B43legacy_RX_MAC_FCSERR 0x00000001 /* FCS error */ + +/* RX channel */ +#define B43legacy_RX_CHAN_GAIN 0xFC00 /* Gain */ +#define B43legacy_RX_CHAN_GAIN_SHIFT 10 +#define B43legacy_RX_CHAN_ID 0x03FC /* Channel ID */ +#define B43legacy_RX_CHAN_ID_SHIFT 2 +#define B43legacy_RX_CHAN_PHYTYPE 0x0003 /* PHY type */ + + + +u8 b43legacy_plcp_get_ratecode_cck(const u8 bitrate); +u8 b43legacy_plcp_get_ratecode_ofdm(const u8 bitrate); + +void b43legacy_generate_plcp_hdr(struct b43legacy_plcp_hdr4 *plcp, + const u16 octets, const u8 bitrate); + +void b43legacy_rx(struct b43legacy_wldev *dev, + struct sk_buff *skb, + const void *_rxhdr); + +void b43legacy_handle_txstatus(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status); + +void b43legacy_handle_hwtxstatus(struct b43legacy_wldev *dev, + const struct b43legacy_hwtxstatus *hw); + +void b43legacy_tx_suspend(struct b43legacy_wldev *dev); +void b43legacy_tx_resume(struct b43legacy_wldev *dev); + + +#define B43legacy_NR_QOSPARMS 22 +enum { + B43legacy_QOSPARM_TXOP = 0, + B43legacy_QOSPARM_CWMIN, + B43legacy_QOSPARM_CWMAX, + B43legacy_QOSPARM_CWCUR, + B43legacy_QOSPARM_AIFS, + B43legacy_QOSPARM_BSLOTS, + B43legacy_QOSPARM_REGGAP, + B43legacy_QOSPARM_STATUS, +}; + +void b43legacy_qos_init(struct b43legacy_wldev *dev); + + +/* Helper functions for converting the key-table index from "firmware-format" + * to "raw-format" and back. The firmware API changed for this at some revision. + * We need to account for that here. */ +static inline +int b43legacy_new_kidx_api(struct b43legacy_wldev *dev) +{ + /* FIXME: Not sure the change was at rev 351 */ + return (dev->fw.rev >= 351); +} +static inline +u8 b43legacy_kidx_to_fw(struct b43legacy_wldev *dev, u8 raw_kidx) +{ + u8 firmware_kidx; + if (b43legacy_new_kidx_api(dev)) + firmware_kidx = raw_kidx; + else { + if (raw_kidx >= 4) /* Is per STA key? */ + firmware_kidx = raw_kidx - 4; + else + firmware_kidx = raw_kidx; /* TX default key */ + } + return firmware_kidx; +} +static inline +u8 b43legacy_kidx_to_raw(struct b43legacy_wldev *dev, u8 firmware_kidx) +{ + u8 raw_kidx; + if (b43legacy_new_kidx_api(dev)) + raw_kidx = firmware_kidx; + else + /* RX default keys or per STA keys */ + raw_kidx = firmware_kidx + 4; + return raw_kidx; +} + +#endif /* B43legacy_XMIT_H_ */ diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h index 10e07e865426..5fdbf24d2443 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx.h +++ b/drivers/net/wireless/bcm43xx/bcm43xx.h @@ -994,10 +994,4 @@ int bcm43xx_pci_write_config32(struct bcm43xx_private *bcm, int offset, u32 valu __value; \ }) -/** Helpers to print MAC addresses. */ -#define BCM43xx_MACFMT "%02x:%02x:%02x:%02x:%02x:%02x" -#define BCM43xx_MACARG(x) ((u8*)(x))[0], ((u8*)(x))[1], \ - ((u8*)(x))[2], ((u8*)(x))[3], \ - ((u8*)(x))[4], ((u8*)(x))[5] - #endif /* BCM43xx_H_ */ diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c index c5d6753a55ea..45683437a98d 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c @@ -2380,7 +2380,7 @@ static int bcm43xx_chip_init(struct bcm43xx_private *bcm) goto err_gpio_cleanup; bcm43xx_radio_turn_on(bcm); bcm->radio_hw_enable = bcm43xx_is_hw_radio_enabled(bcm); - dprintk(KERN_INFO PFX "Radio %s by hardware\n", + printk(KERN_INFO PFX "Radio %s by hardware\n", (bcm->radio_hw_enable == 0) ? "disabled" : "enabled"); bcm43xx_write16(bcm, 0x03E6, 0x0000); @@ -3129,7 +3129,7 @@ static void bcm43xx_periodic_every1sec(struct bcm43xx_private *bcm) radio_hw_enable = bcm43xx_is_hw_radio_enabled(bcm); if (unlikely(bcm->radio_hw_enable != radio_hw_enable)) { bcm->radio_hw_enable = radio_hw_enable; - dprintk(KERN_INFO PFX "Radio hardware status changed to %s\n", + printk(KERN_INFO PFX "Radio hardware status changed to %s\n", (radio_hw_enable == 0) ? "disabled" : "enabled"); bcm43xx_leds_update(bcm, 0); } @@ -3183,6 +3183,9 @@ static void bcm43xx_periodic_work_handler(struct work_struct *work) unsigned long orig_trans_start = 0; mutex_lock(&bcm->mutex); + /* keep from doing and rearming periodic work if shutting down */ + if (bcm43xx_status(bcm) == BCM43xx_STAT_UNINIT) + goto unlock_mutex; if (unlikely(bcm->periodic_state % 60 == 0)) { /* Periodic work will take a long time, so we want it to * be preemtible. @@ -3228,14 +3231,10 @@ static void bcm43xx_periodic_work_handler(struct work_struct *work) mmiowb(); bcm->periodic_state++; spin_unlock_irqrestore(&bcm->irq_lock, flags); +unlock_mutex: mutex_unlock(&bcm->mutex); } -void bcm43xx_periodic_tasks_delete(struct bcm43xx_private *bcm) -{ - cancel_rearming_delayed_work(&bcm->periodic_work); -} - void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm) { struct delayed_work *work = &bcm->periodic_work; @@ -3285,6 +3284,14 @@ static int bcm43xx_rng_init(struct bcm43xx_private *bcm) return err; } +void bcm43xx_cancel_work(struct bcm43xx_private *bcm) +{ + /* The system must be unlocked when this routine is entered. + * If not, the next 2 steps may deadlock */ + cancel_work_sync(&bcm->restart_work); + cancel_delayed_work_sync(&bcm->periodic_work); +} + static int bcm43xx_shutdown_all_wireless_cores(struct bcm43xx_private *bcm) { int ret = 0; @@ -3321,7 +3328,12 @@ static void bcm43xx_free_board(struct bcm43xx_private *bcm) { bcm43xx_rng_exit(bcm); bcm43xx_sysfs_unregister(bcm); - bcm43xx_periodic_tasks_delete(bcm); + + mutex_lock(&(bcm)->mutex); + bcm43xx_set_status(bcm, BCM43xx_STAT_UNINIT); + mutex_unlock(&(bcm)->mutex); + + bcm43xx_cancel_work(bcm); mutex_lock(&(bcm)->mutex); bcm43xx_shutdown_all_wireless_cores(bcm); @@ -4016,7 +4028,7 @@ static int bcm43xx_net_stop(struct net_device *net_dev) err = bcm43xx_disable_interrupts_sync(bcm); assert(!err); bcm43xx_free_board(bcm); - flush_scheduled_work(); + bcm43xx_cancel_work(bcm); return 0; } @@ -4080,7 +4092,6 @@ static int __devinit bcm43xx_init_one(struct pci_dev *pdev, goto out; } /* initialize the net_device struct */ - SET_MODULE_OWNER(net_dev); SET_NETDEV_DEV(net_dev, &pdev->dev); net_dev->open = bcm43xx_net_open; @@ -4148,9 +4159,9 @@ static void bcm43xx_chip_reset(struct work_struct *work) struct bcm43xx_phyinfo *phy; int err = -ENODEV; + bcm43xx_cancel_work(bcm); mutex_lock(&(bcm)->mutex); if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) { - bcm43xx_periodic_tasks_delete(bcm); phy = bcm43xx_current_phy(bcm); err = bcm43xx_select_wireless_core(bcm, phy->type); if (!err) diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.h b/drivers/net/wireless/bcm43xx/bcm43xx_main.h index c8f3c532bab5..14cfbeb582ef 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_main.h +++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.h @@ -122,7 +122,7 @@ void bcm43xx_wireless_core_reset(struct bcm43xx_private *bcm, int connect_phy); void bcm43xx_mac_suspend(struct bcm43xx_private *bcm); void bcm43xx_mac_enable(struct bcm43xx_private *bcm); -void bcm43xx_periodic_tasks_delete(struct bcm43xx_private *bcm); +void bcm43xx_cancel_work(struct bcm43xx_private *bcm); void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm); void bcm43xx_controller_restart(struct bcm43xx_private *bcm, const char *reason); diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c index d779199c30d0..b37f1e348700 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c @@ -1638,7 +1638,7 @@ void bcm43xx_phy_set_baseband_attenuation(struct bcm43xx_private *bcm, return; } - if (phy->analog == 1) { + if (phy->analog > 1) { value = bcm43xx_phy_read(bcm, 0x0060) & ~0x003C; value |= (baseband_attenuation << 2) & 0x003C; } else { diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_radio.c b/drivers/net/wireless/bcm43xx/bcm43xx_radio.c index 6a109f4a1b71..c605099c9baf 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_radio.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_radio.c @@ -2146,7 +2146,7 @@ void bcm43xx_radio_turn_off(struct bcm43xx_private *bcm) } else bcm43xx_phy_write(bcm, 0x0015, 0xAA00); radio->enabled = 0; - dprintk(KERN_INFO PFX "Radio turned off\n"); + dprintk(KERN_INFO PFX "Radio initialized\n"); bcm43xx_leds_update(bcm, 0); } diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c b/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c index c71b998a3694..8ab5f93d192a 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c @@ -327,7 +327,7 @@ static ssize_t bcm43xx_attr_phymode_store(struct device *dev, goto out; } - bcm43xx_periodic_tasks_delete(bcm); + bcm43xx_cancel_work(bcm); mutex_lock(&(bcm)->mutex); err = bcm43xx_select_wireless_core(bcm, phytype); if (!err) diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c index d6d9413d7f23..6acfdc49dccd 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c @@ -444,7 +444,7 @@ static int bcm43xx_wx_set_xmitpower(struct net_device *net_dev, u16 maxpower; if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) { - printk(PFX KERN_ERR "TX power not in dBm.\n"); + printk(KERN_ERR PFX "TX power not in dBm.\n"); return -EOPNOTSUPP; } diff --git a/drivers/net/wireless/hostap/hostap.h b/drivers/net/wireless/hostap/hostap.h index ef37a75d550b..547ba84dc797 100644 --- a/drivers/net/wireless/hostap/hostap.h +++ b/drivers/net/wireless/hostap/hostap.h @@ -30,12 +30,11 @@ void hostap_dump_rx_header(const char *name, const struct hfa384x_rx_frame *rx); void hostap_dump_tx_header(const char *name, const struct hfa384x_tx_frame *tx); -int hostap_80211_header_parse(struct sk_buff *skb, unsigned char *haddr); -int hostap_80211_prism_header_parse(struct sk_buff *skb, unsigned char *haddr); +extern const struct header_ops hostap_80211_ops; int hostap_80211_get_hdrlen(u16 fc); struct net_device_stats *hostap_get_stats(struct net_device *dev); void hostap_setup_dev(struct net_device *dev, local_info_t *local, - int main_dev); + int type); void hostap_set_multicast_list_queue(struct work_struct *work); int hostap_set_hostapd(local_info_t *local, int val, int rtnl_locked); int hostap_set_hostapd_sta(local_info_t *local, int val, int rtnl_locked); diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c index cbedc9ee740a..ef084df3d48e 100644 --- a/drivers/net/wireless/hostap/hostap_80211_rx.c +++ b/drivers/net/wireless/hostap/hostap_80211_rx.c @@ -19,6 +19,7 @@ void hostap_dump_rx_80211(const char *name, struct sk_buff *skb, { struct ieee80211_hdr_4addr *hdr; u16 fc; + DECLARE_MAC_BUF(mac); hdr = (struct ieee80211_hdr_4addr *) skb->data; @@ -44,10 +45,11 @@ void hostap_dump_rx_80211(const char *name, struct sk_buff *skb, printk(" dur=0x%04x seq=0x%04x\n", le16_to_cpu(hdr->duration_id), le16_to_cpu(hdr->seq_ctl)); - printk(KERN_DEBUG " A1=" MACSTR " A2=" MACSTR " A3=" MACSTR, - MAC2STR(hdr->addr1), MAC2STR(hdr->addr2), MAC2STR(hdr->addr3)); + printk(KERN_DEBUG " A1=%s", print_mac(mac, hdr->addr1)); + printk(" A2=%s", print_mac(mac, hdr->addr2)); + printk(" A3=%s", print_mac(mac, hdr->addr3)); if (skb->len >= 30) - printk(" A4=" MACSTR, MAC2STR(hdr->addr4)); + printk(" A4=%s", print_mac(mac, hdr->addr4)); printk("\n"); } @@ -534,6 +536,7 @@ static int hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr_4addr *hdr, u16 fc, struct net_device **wds) { + DECLARE_MAC_BUF(mac); /* FIX: is this really supposed to accept WDS frames only in Master * mode? What about Repeater or Managed with WDS frames? */ if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) != @@ -549,10 +552,10 @@ hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr_4addr *hdr, hdr->addr1[4] != 0xff || hdr->addr1[5] != 0xff)) { /* RA (or BSSID) is not ours - drop */ PDEBUG(DEBUG_EXTRA, "%s: received WDS frame with " - "not own or broadcast %s=" MACSTR "\n", + "not own or broadcast %s=%s\n", local->dev->name, fc & IEEE80211_FCTL_FROMDS ? "RA" : "BSSID", - MAC2STR(hdr->addr1)); + print_mac(mac, hdr->addr1)); return -1; } @@ -565,8 +568,8 @@ hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr_4addr *hdr, /* require that WDS link has been registered with TA or the * frame is from current AP when using 'AP client mode' */ PDEBUG(DEBUG_EXTRA, "%s: received WDS[4 addr] frame " - "from unknown TA=" MACSTR "\n", - local->dev->name, MAC2STR(hdr->addr2)); + "from unknown TA=%s\n", + local->dev->name, print_mac(mac, hdr->addr2)); if (local->ap && local->ap->autom_ap_wds) hostap_wds_link_oper(local, hdr->addr2, WDS_ADD); return -1; @@ -632,6 +635,7 @@ hostap_rx_frame_decrypt(local_info_t *local, struct sk_buff *skb, { struct ieee80211_hdr_4addr *hdr; int res, hdrlen; + DECLARE_MAC_BUF(mac); if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL) return 0; @@ -643,8 +647,8 @@ hostap_rx_frame_decrypt(local_info_t *local, struct sk_buff *skb, strcmp(crypt->ops->name, "TKIP") == 0) { if (net_ratelimit()) { printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " - "received packet from " MACSTR "\n", - local->dev->name, MAC2STR(hdr->addr2)); + "received packet from %s\n", + local->dev->name, print_mac(mac, hdr->addr2)); } return -1; } @@ -653,9 +657,9 @@ hostap_rx_frame_decrypt(local_info_t *local, struct sk_buff *skb, res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv); atomic_dec(&crypt->refcnt); if (res < 0) { - printk(KERN_DEBUG "%s: decryption failed (SA=" MACSTR + printk(KERN_DEBUG "%s: decryption failed (SA=%s" ") res=%d\n", - local->dev->name, MAC2STR(hdr->addr2), res); + local->dev->name, print_mac(mac, hdr->addr2), res); local->comm_tallies.rx_discards_wep_undecryptable++; return -1; } @@ -671,6 +675,7 @@ hostap_rx_frame_decrypt_msdu(local_info_t *local, struct sk_buff *skb, { struct ieee80211_hdr_4addr *hdr; int res, hdrlen; + DECLARE_MAC_BUF(mac); if (crypt == NULL || crypt->ops->decrypt_msdu == NULL) return 0; @@ -683,8 +688,8 @@ hostap_rx_frame_decrypt_msdu(local_info_t *local, struct sk_buff *skb, atomic_dec(&crypt->refcnt); if (res < 0) { printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed" - " (SA=" MACSTR " keyidx=%d)\n", - local->dev->name, MAC2STR(hdr->addr2), keyidx); + " (SA=%s keyidx=%d)\n", + local->dev->name, print_mac(mac, hdr->addr2), keyidx); return -1; } @@ -716,6 +721,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, struct ieee80211_crypt_data *crypt = NULL; void *sta = NULL; int keyidx = 0; + DECLARE_MAC_BUF(mac); iface = netdev_priv(dev); local = iface->local; @@ -792,8 +798,8 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, * frames silently instead of filling system log with * these reports. */ printk(KERN_DEBUG "%s: WEP decryption failed (not set)" - " (SA=" MACSTR ")\n", - local->dev->name, MAC2STR(hdr->addr2)); + " (SA=%s)\n", + local->dev->name, print_mac(mac, hdr->addr2)); #endif local->comm_tallies.rx_discards_wep_undecryptable++; goto rx_dropped; @@ -807,8 +813,8 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, (keyidx = hostap_rx_frame_decrypt(local, skb, crypt)) < 0) { printk(KERN_DEBUG "%s: failed to decrypt mgmt::auth " - "from " MACSTR "\n", dev->name, - MAC2STR(hdr->addr2)); + "from %s\n", dev->name, + print_mac(mac, hdr->addr2)); /* TODO: could inform hostapd about this so that it * could send auth failure report */ goto rx_dropped; @@ -976,8 +982,8 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, "unencrypted EAPOL frame\n", local->dev->name); } else { printk(KERN_DEBUG "%s: encryption configured, but RX " - "frame not encrypted (SA=" MACSTR ")\n", - local->dev->name, MAC2STR(hdr->addr2)); + "frame not encrypted (SA=%s)\n", + local->dev->name, print_mac(mac, hdr->addr2)); goto rx_dropped; } } @@ -986,8 +992,9 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, !hostap_is_eapol_frame(local, skb)) { if (net_ratelimit()) { printk(KERN_DEBUG "%s: dropped unencrypted RX data " - "frame from " MACSTR " (drop_unencrypted=1)\n", - dev->name, MAC2STR(hdr->addr2)); + "frame from %s" + " (drop_unencrypted=1)\n", + dev->name, print_mac(mac, hdr->addr2)); } goto rx_dropped; } diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c index 3df3c60263d4..e7afc3ec3e6d 100644 --- a/drivers/net/wireless/hostap/hostap_80211_tx.c +++ b/drivers/net/wireless/hostap/hostap_80211_tx.c @@ -17,6 +17,7 @@ void hostap_dump_tx_80211(const char *name, struct sk_buff *skb) { struct ieee80211_hdr_4addr *hdr; u16 fc; + DECLARE_MAC_BUF(mac); hdr = (struct ieee80211_hdr_4addr *) skb->data; @@ -40,10 +41,11 @@ void hostap_dump_tx_80211(const char *name, struct sk_buff *skb) printk(" dur=0x%04x seq=0x%04x\n", le16_to_cpu(hdr->duration_id), le16_to_cpu(hdr->seq_ctl)); - printk(KERN_DEBUG " A1=" MACSTR " A2=" MACSTR " A3=" MACSTR, - MAC2STR(hdr->addr1), MAC2STR(hdr->addr2), MAC2STR(hdr->addr3)); + printk(KERN_DEBUG " A1=%s", print_mac(mac, hdr->addr1)); + printk(" A2=%s", print_mac(mac, hdr->addr2)); + printk(" A3=%s", print_mac(mac, hdr->addr3)); if (skb->len >= 30) - printk(" A4=" MACSTR, MAC2STR(hdr->addr4)); + printk(" A4=%s", print_mac(mac, hdr->addr4)); printk("\n"); } @@ -312,6 +314,7 @@ static struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb, struct ieee80211_hdr_4addr *hdr; u16 fc; int prefix_len, postfix_len, hdr_len, res; + DECLARE_MAC_BUF(mac); iface = netdev_priv(skb->dev); local = iface->local; @@ -326,8 +329,8 @@ static struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb, hdr = (struct ieee80211_hdr_4addr *) skb->data; if (net_ratelimit()) { printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " - "TX packet to " MACSTR "\n", - local->dev->name, MAC2STR(hdr->addr1)); + "TX packet to %s\n", + local->dev->name, print_mac(mac, hdr->addr1)); } kfree_skb(skb); return NULL; diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c index 90900525379c..6bbdb76b32df 100644 --- a/drivers/net/wireless/hostap/hostap_ap.c +++ b/drivers/net/wireless/hostap/hostap_ap.c @@ -94,6 +94,7 @@ static void ap_sta_hash_add(struct ap_data *ap, struct sta_info *sta) static void ap_sta_hash_del(struct ap_data *ap, struct sta_info *sta) { struct sta_info *s; + DECLARE_MAC_BUF(mac); s = ap->sta_hash[STA_HASH(sta->addr)]; if (s == NULL) return; @@ -108,18 +109,20 @@ static void ap_sta_hash_del(struct ap_data *ap, struct sta_info *sta) if (s->hnext != NULL) s->hnext = s->hnext->hnext; else - printk("AP: could not remove STA " MACSTR " from hash table\n", - MAC2STR(sta->addr)); + printk("AP: could not remove STA %s" + " from hash table\n", + print_mac(mac, sta->addr)); } static void ap_free_sta(struct ap_data *ap, struct sta_info *sta) { + DECLARE_MAC_BUF(mac); if (sta->ap && sta->local) hostap_event_expired_sta(sta->local->dev, sta); if (ap->proc != NULL) { char name[20]; - sprintf(name, MACSTR, MAC2STR(sta->addr)); + sprintf(name, "%s", print_mac(mac, sta->addr)); remove_proc_entry(name, ap->proc); } @@ -182,6 +185,7 @@ static void ap_handle_timer(unsigned long data) struct ap_data *ap; unsigned long next_time = 0; int was_assoc; + DECLARE_MAC_BUF(mac); if (sta == NULL || sta->local == NULL || sta->local->ap == NULL) { PDEBUG(DEBUG_AP, "ap_handle_timer() called with NULL data\n"); @@ -238,8 +242,8 @@ static void ap_handle_timer(unsigned long data) if (sta->ap) { if (ap->autom_ap_wds) { PDEBUG(DEBUG_AP, "%s: removing automatic WDS " - "connection to AP " MACSTR "\n", - local->dev->name, MAC2STR(sta->addr)); + "connection to AP %s\n", + local->dev->name, print_mac(mac, sta->addr)); hostap_wds_link_oper(local, sta->addr, WDS_DEL); } } else if (sta->timeout_next == STA_NULLFUNC) { @@ -255,11 +259,11 @@ static void ap_handle_timer(unsigned long data) } else { int deauth = sta->timeout_next == STA_DEAUTH; u16 resp; - PDEBUG(DEBUG_AP, "%s: sending %s info to STA " MACSTR + PDEBUG(DEBUG_AP, "%s: sending %s info to STA %s" "(last=%lu, jiffies=%lu)\n", local->dev->name, deauth ? "deauthentication" : "disassociation", - MAC2STR(sta->addr), sta->last_rx, jiffies); + print_mac(mac, sta->addr), sta->last_rx, jiffies); resp = cpu_to_le16(deauth ? WLAN_REASON_PREV_AUTH_NOT_VALID : WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY); @@ -271,9 +275,10 @@ static void ap_handle_timer(unsigned long data) if (sta->timeout_next == STA_DEAUTH) { if (sta->flags & WLAN_STA_PERM) { - PDEBUG(DEBUG_AP, "%s: STA " MACSTR " would have been " - "removed, but it has 'perm' flag\n", - local->dev->name, MAC2STR(sta->addr)); + PDEBUG(DEBUG_AP, "%s: STA %s" + " would have been removed, " + "but it has 'perm' flag\n", + local->dev->name, print_mac(mac, sta->addr)); } else ap_free_sta(ap, sta); return; @@ -327,6 +332,7 @@ static int ap_control_proc_read(char *page, char **start, off_t off, struct ap_data *ap = (struct ap_data *) data; char *policy_txt; struct mac_entry *entry; + DECLARE_MAC_BUF(mac); if (off != 0) { *eof = 1; @@ -357,7 +363,7 @@ static int ap_control_proc_read(char *page, char **start, off_t off, break; } - p += sprintf(p, MACSTR "\n", MAC2STR(entry->addr)); + p += sprintf(p, "%s\n", print_mac(mac, entry->addr)); } spin_unlock_bh(&ap->mac_restrictions.lock); @@ -514,6 +520,7 @@ static int prism2_ap_proc_read(char *page, char **start, off_t off, struct ap_data *ap = (struct ap_data *) data; struct sta_info *sta; int i; + DECLARE_MAC_BUF(mac); if (off > PROC_LIMIT) { *eof = 1; @@ -526,7 +533,8 @@ static int prism2_ap_proc_read(char *page, char **start, off_t off, if (!sta->ap) continue; - p += sprintf(p, MACSTR " %d %d %d %d '", MAC2STR(sta->addr), + p += sprintf(p, "%s %d %d %d %d '", + print_mac(mac, sta->addr), sta->u.ap.channel, sta->last_rx_signal, sta->last_rx_silence, sta->last_rx_rate); for (i = 0; i < sta->u.ap.ssid_len; i++) @@ -623,6 +631,7 @@ static void hostap_ap_tx_cb_auth(struct sk_buff *skb, int ok, void *data) u16 fc, *pos, auth_alg, auth_transaction, status; struct sta_info *sta = NULL; char *txt = NULL; + DECLARE_MAC_BUF(mac); if (ap->local->hostapd) { dev_kfree_skb(skb); @@ -674,9 +683,9 @@ static void hostap_ap_tx_cb_auth(struct sk_buff *skb, int ok, void *data) if (sta) atomic_dec(&sta->users); if (txt) { - PDEBUG(DEBUG_AP, "%s: " MACSTR " auth_cb - alg=%d trans#=%d " - "status=%d - %s\n", - dev->name, MAC2STR(hdr->addr1), auth_alg, + PDEBUG(DEBUG_AP, "%s: %s auth_cb - alg=%d " + "trans#=%d status=%d - %s\n", + dev->name, print_mac(mac, hdr->addr1), auth_alg, auth_transaction, status, txt); } dev_kfree_skb(skb); @@ -692,6 +701,7 @@ static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data) u16 fc, *pos, status; struct sta_info *sta = NULL; char *txt = NULL; + DECLARE_MAC_BUF(mac); if (ap->local->hostapd) { dev_kfree_skb(skb); @@ -742,8 +752,8 @@ static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data) if (sta) atomic_dec(&sta->users); if (txt) { - PDEBUG(DEBUG_AP, "%s: " MACSTR " assoc_cb - %s\n", - dev->name, MAC2STR(hdr->addr1), txt); + PDEBUG(DEBUG_AP, "%s: %s assoc_cb - %s\n", + dev->name, print_mac(mac, hdr->addr1), txt); } dev_kfree_skb(skb); } @@ -755,6 +765,7 @@ static void hostap_ap_tx_cb_poll(struct sk_buff *skb, int ok, void *data) struct ap_data *ap = data; struct ieee80211_hdr_4addr *hdr; struct sta_info *sta; + DECLARE_MAC_BUF(mac); if (skb->len < 24) goto fail; @@ -766,9 +777,9 @@ static void hostap_ap_tx_cb_poll(struct sk_buff *skb, int ok, void *data) sta->flags &= ~WLAN_STA_PENDING_POLL; spin_unlock(&ap->sta_table_lock); } else { - PDEBUG(DEBUG_AP, "%s: STA " MACSTR " did not ACK activity " - "poll frame\n", ap->local->dev->name, - MAC2STR(hdr->addr1)); + PDEBUG(DEBUG_AP, "%s: STA %s" + " did not ACK activity poll frame\n", + ap->local->dev->name, print_mac(mac, hdr->addr1)); } fail: @@ -985,6 +996,7 @@ static int prism2_sta_proc_read(char *page, char **start, off_t off, char *p = page; struct sta_info *sta = (struct sta_info *) data; int i; + DECLARE_MAC_BUF(mac); /* FIX: possible race condition.. the STA data could have just expired, * but proc entry was still here so that the read could have started; @@ -995,11 +1007,11 @@ static int prism2_sta_proc_read(char *page, char **start, off_t off, return 0; } - p += sprintf(p, "%s=" MACSTR "\nusers=%d\naid=%d\n" + p += sprintf(p, "%s=%s\nusers=%d\naid=%d\n" "flags=0x%04x%s%s%s%s%s%s%s\n" "capability=0x%02x\nlisten_interval=%d\nsupported_rates=", sta->ap ? "AP" : "STA", - MAC2STR(sta->addr), atomic_read(&sta->users), sta->aid, + print_mac(mac, sta->addr), atomic_read(&sta->users), sta->aid, sta->flags, sta->flags & WLAN_STA_AUTH ? " AUTH" : "", sta->flags & WLAN_STA_ASSOC ? " ASSOC" : "", @@ -1060,6 +1072,7 @@ static void handle_add_proc_queue(struct work_struct *work) struct sta_info *sta; char name[20]; struct add_sta_proc_data *entry, *prev; + DECLARE_MAC_BUF(mac); entry = ap->add_sta_proc_entries; ap->add_sta_proc_entries = NULL; @@ -1072,7 +1085,7 @@ static void handle_add_proc_queue(struct work_struct *work) spin_unlock_bh(&ap->sta_table_lock); if (sta) { - sprintf(name, MACSTR, MAC2STR(sta->addr)); + sprintf(name, "%s", print_mac(mac, sta->addr)); sta->proc = create_proc_read_entry( name, 0, ap->proc, prism2_sta_proc_read, sta); @@ -1290,6 +1303,7 @@ static void handle_authen(local_info_t *local, struct sk_buff *skb, struct sta_info *sta = NULL; struct ieee80211_crypt_data *crypt; char *txt = ""; + DECLARE_MAC_BUF(mac); len = skb->len - IEEE80211_MGMT_HDR_LEN; @@ -1298,8 +1312,8 @@ static void handle_authen(local_info_t *local, struct sk_buff *skb, if (len < 6) { PDEBUG(DEBUG_AP, "%s: handle_authen - too short payload " - "(len=%d) from " MACSTR "\n", dev->name, len, - MAC2STR(hdr->addr2)); + "(len=%d) from %s\n", dev->name, len, + print_mac(mac, hdr->addr2)); return; } @@ -1364,8 +1378,8 @@ static void handle_authen(local_info_t *local, struct sk_buff *skb, if (time_after(jiffies, sta->u.ap.last_beacon + (10 * sta->listen_interval * HZ) / 1024)) { PDEBUG(DEBUG_AP, "%s: no beacons received for a while," - " assuming AP " MACSTR " is now STA\n", - dev->name, MAC2STR(sta->addr)); + " assuming AP %s is now STA\n", + dev->name, print_mac(mac, sta->addr)); sta->ap = 0; sta->flags = 0; sta->u.sta.challenge = NULL; @@ -1480,9 +1494,9 @@ static void handle_authen(local_info_t *local, struct sk_buff *skb, } if (resp) { - PDEBUG(DEBUG_AP, "%s: " MACSTR " auth (alg=%d trans#=%d " - "stat=%d len=%d fc=%04x) ==> %d (%s)\n", - dev->name, MAC2STR(hdr->addr2), auth_alg, + PDEBUG(DEBUG_AP, "%s: %s auth (alg=%d " + "trans#=%d stat=%d len=%d fc=%04x) ==> %d (%s)\n", + dev->name, print_mac(mac, hdr->addr2), auth_alg, auth_transaction, status_code, len, fc, resp, txt); } } @@ -1502,13 +1516,14 @@ static void handle_assoc(local_info_t *local, struct sk_buff *skb, int send_deauth = 0; char *txt = ""; u8 prev_ap[ETH_ALEN]; + DECLARE_MAC_BUF(mac); left = len = skb->len - IEEE80211_MGMT_HDR_LEN; if (len < (reassoc ? 10 : 4)) { PDEBUG(DEBUG_AP, "%s: handle_assoc - too short payload " - "(len=%d, reassoc=%d) from " MACSTR "\n", - dev->name, len, reassoc, MAC2STR(hdr->addr2)); + "(len=%d, reassoc=%d) from %s\n", + dev->name, len, reassoc, print_mac(mac, hdr->addr2)); return; } @@ -1585,9 +1600,9 @@ static void handle_assoc(local_info_t *local, struct sk_buff *skb, } if (left > 0) { - PDEBUG(DEBUG_AP, "%s: assoc from " MACSTR " with extra" - " data (%d bytes) [", - dev->name, MAC2STR(hdr->addr2), left); + PDEBUG(DEBUG_AP, "%s: assoc from %s" + " with extra data (%d bytes) [", + dev->name, print_mac(mac, hdr->addr2), left); while (left > 0) { PDEBUG2(DEBUG_AP, "<%02x>", *u); u++; left--; @@ -1687,10 +1702,10 @@ static void handle_assoc(local_info_t *local, struct sk_buff *skb, } #if 0 - PDEBUG(DEBUG_AP, "%s: " MACSTR " %sassoc (len=%d prev_ap=" MACSTR - ") => %d(%d) (%s)\n", - dev->name, MAC2STR(hdr->addr2), reassoc ? "re" : "", len, - MAC2STR(prev_ap), resp, send_deauth, txt); + PDEBUG(DEBUG_AP, "%s: %s %sassoc (len=%d " + "prev_ap=%s) => %d(%d) (%s)\n", + dev->name, print_mac(mac, hdr->addr2), reassoc ? "re" : "", len, + print_mac(mac, prev_ap), resp, send_deauth, txt); #endif } @@ -1705,6 +1720,7 @@ static void handle_deauth(local_info_t *local, struct sk_buff *skb, int len; u16 reason_code, *pos; struct sta_info *sta = NULL; + DECLARE_MAC_BUF(mac); len = skb->len - IEEE80211_MGMT_HDR_LEN; @@ -1716,8 +1732,8 @@ static void handle_deauth(local_info_t *local, struct sk_buff *skb, pos = (u16 *) body; reason_code = __le16_to_cpu(*pos); - PDEBUG(DEBUG_AP, "%s: deauthentication: " MACSTR " len=%d, " - "reason_code=%d\n", dev->name, MAC2STR(hdr->addr2), len, + PDEBUG(DEBUG_AP, "%s: deauthentication: %s len=%d, " + "reason_code=%d\n", dev->name, print_mac(mac, hdr->addr2), len, reason_code); spin_lock_bh(&local->ap->sta_table_lock); @@ -1729,9 +1745,9 @@ static void handle_deauth(local_info_t *local, struct sk_buff *skb, } spin_unlock_bh(&local->ap->sta_table_lock); if (sta == NULL) { - printk("%s: deauthentication from " MACSTR ", " + printk("%s: deauthentication from %s, " "reason_code=%d, but STA not authenticated\n", dev->name, - MAC2STR(hdr->addr2), reason_code); + print_mac(mac, hdr->addr2), reason_code); } } @@ -1746,6 +1762,7 @@ static void handle_disassoc(local_info_t *local, struct sk_buff *skb, int len; u16 reason_code, *pos; struct sta_info *sta = NULL; + DECLARE_MAC_BUF(mac); len = skb->len - IEEE80211_MGMT_HDR_LEN; @@ -1757,8 +1774,8 @@ static void handle_disassoc(local_info_t *local, struct sk_buff *skb, pos = (u16 *) body; reason_code = __le16_to_cpu(*pos); - PDEBUG(DEBUG_AP, "%s: disassociation: " MACSTR " len=%d, " - "reason_code=%d\n", dev->name, MAC2STR(hdr->addr2), len, + PDEBUG(DEBUG_AP, "%s: disassociation: %s len=%d, " + "reason_code=%d\n", dev->name, print_mac(mac, hdr->addr2), len, reason_code); spin_lock_bh(&local->ap->sta_table_lock); @@ -1770,9 +1787,9 @@ static void handle_disassoc(local_info_t *local, struct sk_buff *skb, } spin_unlock_bh(&local->ap->sta_table_lock); if (sta == NULL) { - printk("%s: disassociation from " MACSTR ", " + printk("%s: disassociation from %s, " "reason_code=%d, but STA not authenticated\n", - dev->name, MAC2STR(hdr->addr2), reason_code); + dev->name, print_mac(mac, hdr->addr2), reason_code); } } @@ -1862,15 +1879,16 @@ static void handle_pspoll(local_info_t *local, struct sta_info *sta; u16 aid; struct sk_buff *skb; + DECLARE_MAC_BUF(mac); - PDEBUG(DEBUG_PS2, "handle_pspoll: BSSID=" MACSTR ", TA=" MACSTR - " PWRMGT=%d\n", - MAC2STR(hdr->addr1), MAC2STR(hdr->addr2), + PDEBUG(DEBUG_PS2, "handle_pspoll: BSSID=%s" + ", TA=%s PWRMGT=%d\n", + print_mac(mac, hdr->addr1), print_mac(mac, hdr->addr2), !!(le16_to_cpu(hdr->frame_ctl) & IEEE80211_FCTL_PM)); if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) { - PDEBUG(DEBUG_AP, "handle_pspoll - addr1(BSSID)=" MACSTR - " not own MAC\n", MAC2STR(hdr->addr1)); + PDEBUG(DEBUG_AP, "handle_pspoll - addr1(BSSID)=%s" + " not own MAC\n", print_mac(mac, hdr->addr1)); return; } @@ -1948,6 +1966,7 @@ static void handle_wds_oper_queue(struct work_struct *work) wds_oper_queue); local_info_t *local = ap->local; struct wds_oper_data *entry, *prev; + DECLARE_MAC_BUF(mac); spin_lock_bh(&local->lock); entry = local->ap->wds_oper_entries; @@ -1956,10 +1975,10 @@ static void handle_wds_oper_queue(struct work_struct *work) while (entry) { PDEBUG(DEBUG_AP, "%s: %s automatic WDS connection " - "to AP " MACSTR "\n", + "to AP %s\n", local->dev->name, entry->type == WDS_ADD ? "adding" : "removing", - MAC2STR(entry->addr)); + print_mac(mac, entry->addr)); if (entry->type == WDS_ADD) prism2_wds_add(local, entry->addr, 0); else if (entry->type == WDS_DEL) @@ -2135,6 +2154,7 @@ static void handle_ap_item(local_info_t *local, struct sk_buff *skb, #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ u16 fc, type, stype; struct ieee80211_hdr_4addr *hdr; + DECLARE_MAC_BUF(mac); /* FIX: should give skb->len to handler functions and check that the * buffer is long enough */ @@ -2163,8 +2183,8 @@ static void handle_ap_item(local_info_t *local, struct sk_buff *skb, if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) { PDEBUG(DEBUG_AP, "handle_ap_item - addr1(BSSID)=" - MACSTR " not own MAC\n", - MAC2STR(hdr->addr1)); + "%s not own MAC\n", + print_mac(mac, hdr->addr1)); goto done; } @@ -2200,14 +2220,14 @@ static void handle_ap_item(local_info_t *local, struct sk_buff *skb, } if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) { - PDEBUG(DEBUG_AP, "handle_ap_item - addr1(DA)=" MACSTR - " not own MAC\n", MAC2STR(hdr->addr1)); + PDEBUG(DEBUG_AP, "handle_ap_item - addr1(DA)=%s" + " not own MAC\n", print_mac(mac, hdr->addr1)); goto done; } if (memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN)) { - PDEBUG(DEBUG_AP, "handle_ap_item - addr3(BSSID)=" MACSTR - " not own MAC\n", MAC2STR(hdr->addr3)); + PDEBUG(DEBUG_AP, "handle_ap_item - addr3(BSSID)=%s" + " not own MAC\n", print_mac(mac, hdr->addr3)); goto done; } @@ -2288,6 +2308,7 @@ static void schedule_packet_send(local_info_t *local, struct sta_info *sta) struct sk_buff *skb; struct ieee80211_hdr_4addr *hdr; struct hostap_80211_rx_status rx_stats; + DECLARE_MAC_BUF(mac); if (skb_queue_empty(&sta->tx_buf)) return; @@ -2308,8 +2329,8 @@ static void schedule_packet_send(local_info_t *local, struct sta_info *sta) memcpy(hdr->addr2, sta->addr, ETH_ALEN); hdr->duration_id = cpu_to_le16(sta->aid | BIT(15) | BIT(14)); - PDEBUG(DEBUG_PS2, "%s: Scheduling buffered packet delivery for " - "STA " MACSTR "\n", local->dev->name, MAC2STR(sta->addr)); + PDEBUG(DEBUG_PS2, "%s: Scheduling buffered packet delivery for STA " + "%s\n", local->dev->name, print_mac(mac, sta->addr)); skb->dev = local->dev; @@ -2636,6 +2657,7 @@ static int ap_update_sta_tx_rate(struct sta_info *sta, struct net_device *dev) int ret = sta->tx_rate; struct hostap_interface *iface; local_info_t *local; + DECLARE_MAC_BUF(mac); iface = netdev_priv(dev); local = iface->local; @@ -2663,9 +2685,9 @@ static int ap_update_sta_tx_rate(struct sta_info *sta, struct net_device *dev) case 3: sta->tx_rate = 110; break; default: sta->tx_rate = 0; break; } - PDEBUG(DEBUG_AP, "%s: STA " MACSTR " TX rate raised to" - " %d\n", dev->name, MAC2STR(sta->addr), - sta->tx_rate); + PDEBUG(DEBUG_AP, "%s: STA %s" + " TX rate raised to %d\n", + dev->name, print_mac(mac, sta->addr), sta->tx_rate); } sta->tx_since_last_failure = 0; } @@ -2683,6 +2705,7 @@ ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx) int set_tim, ret; struct ieee80211_hdr_4addr *hdr; struct hostap_skb_tx_data *meta; + DECLARE_MAC_BUF(mac); meta = (struct hostap_skb_tx_data *) skb->cb; ret = AP_TX_CONTINUE; @@ -2718,7 +2741,8 @@ ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx) * print out any errors here. */ if (net_ratelimit()) { printk(KERN_DEBUG "AP: drop packet to non-associated " - "STA " MACSTR "\n", MAC2STR(hdr->addr1)); + "STA %s\n", + print_mac(mac, hdr->addr1)); } #endif local->ap->tx_drop_nonassoc++; @@ -2756,8 +2780,9 @@ ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx) } if (skb_queue_len(&sta->tx_buf) >= STA_MAX_TX_BUFFER) { - PDEBUG(DEBUG_PS, "%s: No more space in STA (" MACSTR ")'s PS " - "mode buffer\n", local->dev->name, MAC2STR(sta->addr)); + PDEBUG(DEBUG_PS, "%s: No more space in STA (%s" + ")'s PS mode buffer\n", + local->dev->name, print_mac(mac, sta->addr)); /* Make sure that TIM is set for the station (it might not be * after AP wlan hw reset). */ /* FIX: should fix hw reset to restore bits based on STA @@ -2821,6 +2846,7 @@ void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb) struct sta_info *sta; struct ieee80211_hdr_4addr *hdr; struct hostap_skb_tx_data *meta; + DECLARE_MAC_BUF(mac); hdr = (struct ieee80211_hdr_4addr *) skb->data; meta = (struct hostap_skb_tx_data *) skb->cb; @@ -2829,9 +2855,9 @@ void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb) sta = ap_get_sta(local->ap, hdr->addr1); if (!sta) { spin_unlock(&local->ap->sta_table_lock); - PDEBUG(DEBUG_AP, "%s: Could not find STA " MACSTR " for this " - "TX error (@%lu)\n", - local->dev->name, MAC2STR(hdr->addr1), jiffies); + PDEBUG(DEBUG_AP, "%s: Could not find STA %s" + " for this TX error (@%lu)\n", + local->dev->name, print_mac(mac, hdr->addr1), jiffies); return; } @@ -2858,8 +2884,9 @@ void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb) case 3: sta->tx_rate = 110; break; default: sta->tx_rate = 0; break; } - PDEBUG(DEBUG_AP, "%s: STA " MACSTR " TX rate lowered " - "to %d\n", local->dev->name, MAC2STR(sta->addr), + PDEBUG(DEBUG_AP, "%s: STA %s" + " TX rate lowered to %d\n", + local->dev->name, print_mac(mac, sta->addr), sta->tx_rate); } sta->tx_consecutive_exc = 0; @@ -2871,16 +2898,17 @@ void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb) static void hostap_update_sta_ps2(local_info_t *local, struct sta_info *sta, int pwrmgt, int type, int stype) { + DECLARE_MAC_BUF(mac); if (pwrmgt && !(sta->flags & WLAN_STA_PS)) { sta->flags |= WLAN_STA_PS; - PDEBUG(DEBUG_PS2, "STA " MACSTR " changed to use PS " + PDEBUG(DEBUG_PS2, "STA %s changed to use PS " "mode (type=0x%02X, stype=0x%02X)\n", - MAC2STR(sta->addr), type >> 2, stype >> 4); + print_mac(mac, sta->addr), type >> 2, stype >> 4); } else if (!pwrmgt && (sta->flags & WLAN_STA_PS)) { sta->flags &= ~WLAN_STA_PS; - PDEBUG(DEBUG_PS2, "STA " MACSTR " changed to not use " + PDEBUG(DEBUG_PS2, "STA %s changed to not use " "PS mode (type=0x%02X, stype=0x%02X)\n", - MAC2STR(sta->addr), type >> 2, stype >> 4); + print_mac(mac, sta->addr), type >> 2, stype >> 4); if (type != IEEE80211_FTYPE_CTL || stype != IEEE80211_STYPE_PSPOLL) schedule_packet_send(local, sta); @@ -2924,6 +2952,7 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev, struct sta_info *sta; u16 fc, type, stype; struct ieee80211_hdr_4addr *hdr; + DECLARE_MAC_BUF(mac); if (local->ap == NULL) return AP_RX_CONTINUE; @@ -2954,9 +2983,10 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev, #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT } else { printk(KERN_DEBUG "%s: dropped received packet" - " from non-associated STA " MACSTR + " from non-associated STA " + "%s" " (type=0x%02x, subtype=0x%02x)\n", - dev->name, MAC2STR(hdr->addr2), + dev->name, print_mac(mac, hdr->addr2), type >> 2, stype >> 4); hostap_rx(dev, skb, rx_stats); #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ @@ -2991,8 +3021,8 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev, * being associated. */ printk(KERN_DEBUG "%s: rejected received nullfunc " "frame without ToDS from not associated STA " - MACSTR "\n", - dev->name, MAC2STR(hdr->addr2)); + "%s\n", + dev->name, print_mac(mac, hdr->addr2)); hostap_rx(dev, skb, rx_stats); #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ } @@ -3009,9 +3039,9 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev, * If BSSID is own, report the dropping of this frame. */ if (memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) { printk(KERN_DEBUG "%s: dropped received packet from " - MACSTR " with no ToDS flag (type=0x%02x, " - "subtype=0x%02x)\n", dev->name, - MAC2STR(hdr->addr2), type >> 2, stype >> 4); + "%s with no ToDS flag " + "(type=0x%02x, subtype=0x%02x)\n", dev->name, + print_mac(mac, hdr->addr2), type >> 2, stype >> 4); hostap_dump_rx_80211(dev->name, skb, rx_stats); } ret = AP_RX_DROP; diff --git a/drivers/net/wireless/hostap/hostap_common.h b/drivers/net/wireless/hostap/hostap_common.h index b31e6a05f23c..ceb7f1e5e9e0 100644 --- a/drivers/net/wireless/hostap/hostap_common.h +++ b/drivers/net/wireless/hostap/hostap_common.h @@ -6,9 +6,6 @@ #define BIT(x) (1 << (x)) -#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5] -#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x" - /* IEEE 802.11 defines */ diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c index 30e723f65979..877d3bdd37a0 100644 --- a/drivers/net/wireless/hostap/hostap_cs.c +++ b/drivers/net/wireless/hostap/hostap_cs.c @@ -272,7 +272,7 @@ static int sandisk_enable_wireless(struct net_device *dev) { int res, ret = 0; conf_reg_t reg; - struct hostap_interface *iface = dev->priv; + struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; tuple_t tuple; cisparse_t *parse = NULL; @@ -822,6 +822,7 @@ static struct pcmcia_device_id hostap_cs_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000), PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002), + PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x3301), PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002), PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030b), PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612), @@ -889,6 +890,10 @@ static struct pcmcia_device_id hostap_cs_ids[] = { PCMCIA_DEVICE_PROD_ID123( "corega", "WL PCCL-11", "ISL37300P", 0xa21501a, 0x59868926, 0xc9049a39), + PCMCIA_DEVICE_PROD_ID1234( + "The Linksys Group, Inc.", "Wireless Network CF Card", "ISL37300P", + "RevA", + 0xa5f472c2, 0x9c05598d, 0xc9049a39, 0x57a66194), PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, hostap_cs_ids); diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c index 959887b70ca7..c592641e914e 100644 --- a/drivers/net/wireless/hostap/hostap_hw.c +++ b/drivers/net/wireless/hostap/hostap_hw.c @@ -825,7 +825,7 @@ static int hfa384x_get_rid(struct net_device *dev, u16 rid, void *buf, int len, local->hw_downloading) return -ENODEV; - res = down_interruptible(&local->rid_bap_sem); + res = mutex_lock_interruptible(&local->rid_bap_mtx); if (res) return res; @@ -834,7 +834,7 @@ static int hfa384x_get_rid(struct net_device *dev, u16 rid, void *buf, int len, printk(KERN_DEBUG "%s: hfa384x_get_rid: CMDCODE_ACCESS failed " "(res=%d, rid=%04x, len=%d)\n", dev->name, res, rid, len); - up(&local->rid_bap_sem); + mutex_unlock(&local->rid_bap_mtx); return res; } @@ -861,7 +861,7 @@ static int hfa384x_get_rid(struct net_device *dev, u16 rid, void *buf, int len, res = hfa384x_from_bap(dev, BAP0, buf, len); spin_unlock_bh(&local->baplock); - up(&local->rid_bap_sem); + mutex_unlock(&local->rid_bap_mtx); if (res) { if (res != -ENODATA) @@ -902,7 +902,7 @@ static int hfa384x_set_rid(struct net_device *dev, u16 rid, void *buf, int len) /* RID len in words and +1 for rec.rid */ rec.len = cpu_to_le16(len / 2 + len % 2 + 1); - res = down_interruptible(&local->rid_bap_sem); + res = mutex_lock_interruptible(&local->rid_bap_mtx); if (res) return res; @@ -917,12 +917,12 @@ static int hfa384x_set_rid(struct net_device *dev, u16 rid, void *buf, int len) if (res) { printk(KERN_DEBUG "%s: hfa384x_set_rid (rid=%04x, len=%d) - " "failed - res=%d\n", dev->name, rid, len, res); - up(&local->rid_bap_sem); + mutex_unlock(&local->rid_bap_mtx); return res; } res = hfa384x_cmd(dev, HFA384X_CMDCODE_ACCESS_WRITE, rid, NULL, NULL); - up(&local->rid_bap_sem); + mutex_unlock(&local->rid_bap_mtx); if (res) { printk(KERN_DEBUG "%s: hfa384x_set_rid: CMDCODE_ACCESS_WRITE " @@ -2335,6 +2335,10 @@ static void prism2_txexc(local_info_t *local) int show_dump, res; char *payload = NULL; struct hfa384x_tx_frame txdesc; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); + DECLARE_MAC_BUF(mac3); + DECLARE_MAC_BUF(mac4); show_dump = local->frame_dump & PRISM2_DUMP_TXEXC_HDR; local->stats.tx_errors++; @@ -2400,10 +2404,9 @@ static void prism2_txexc(local_info_t *local) WLAN_FC_GET_STYPE(fc) >> 4, fc & IEEE80211_FCTL_TODS ? " ToDS" : "", fc & IEEE80211_FCTL_FROMDS ? " FromDS" : ""); - PDEBUG(DEBUG_EXTRA, " A1=" MACSTR " A2=" MACSTR " A3=" - MACSTR " A4=" MACSTR "\n", - MAC2STR(txdesc.addr1), MAC2STR(txdesc.addr2), - MAC2STR(txdesc.addr3), MAC2STR(txdesc.addr4)); + PDEBUG(DEBUG_EXTRA, " A1=%s A2=%s A3=%s A4=%s\n", + print_mac(mac, txdesc.addr1), print_mac(mac2, txdesc.addr2), + print_mac(mac3, txdesc.addr3), print_mac(mac4, txdesc.addr4)); } @@ -3171,7 +3174,7 @@ prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx, spin_lock_init(&local->cmdlock); spin_lock_init(&local->baplock); spin_lock_init(&local->lock); - init_MUTEX(&local->rid_bap_sem); + mutex_init(&local->rid_bap_mtx); if (card_idx < 0 || card_idx >= MAX_PARM_DEVICES) card_idx = 0; @@ -3254,12 +3257,11 @@ while (0) INIT_LIST_HEAD(&local->bss_list); - hostap_setup_dev(dev, local, 1); - local->saved_eth_header_parse = dev->hard_header_parse; + hostap_setup_dev(dev, local, HOSTAP_INTERFACE_MASTER); dev->hard_start_xmit = hostap_master_start_xmit; dev->type = ARPHRD_IEEE80211; - dev->hard_header_parse = hostap_80211_header_parse; + dev->header_ops = &hostap_80211_ops; rtnl_lock(); ret = dev_alloc_name(dev, "wifi%d"); @@ -3424,7 +3426,7 @@ static void prism2_suspend(struct net_device *dev) struct local_info *local; union iwreq_data wrqu; - iface = dev->priv; + iface = netdev_priv(dev); local = iface->local; /* Send disconnect event, e.g., to trigger reassociation after resume diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c index b6a02a02da74..636f4b2382ea 100644 --- a/drivers/net/wireless/hostap/hostap_info.c +++ b/drivers/net/wireless/hostap/hostap_info.c @@ -166,6 +166,7 @@ static void prism2_host_roaming(local_info_t *local) struct hfa384x_hostscan_result *selected, *entry; int i; unsigned long flags; + DECLARE_MAC_BUF(mac); if (local->last_join_time && time_before(jiffies, local->last_join_time + 10 * HZ)) { @@ -198,8 +199,9 @@ static void prism2_host_roaming(local_info_t *local) local->preferred_ap[2] || local->preferred_ap[3] || local->preferred_ap[4] || local->preferred_ap[5]) { /* Try to find preferred AP */ - PDEBUG(DEBUG_EXTRA, "%s: Preferred AP BSSID " MACSTR "\n", - dev->name, MAC2STR(local->preferred_ap)); + PDEBUG(DEBUG_EXTRA, "%s: Preferred AP BSSID " + "%s\n", + dev->name, print_mac(mac, local->preferred_ap)); for (i = 0; i < local->last_scan_results_count; i++) { entry = &local->last_scan_results[i]; if (memcmp(local->preferred_ap, entry->bssid, 6) == 0) @@ -216,8 +218,9 @@ static void prism2_host_roaming(local_info_t *local) req.channel = selected->chid; spin_unlock_irqrestore(&local->lock, flags); - PDEBUG(DEBUG_EXTRA, "%s: JoinRequest: BSSID=" MACSTR " channel=%d\n", - dev->name, MAC2STR(req.bssid), le16_to_cpu(req.channel)); + PDEBUG(DEBUG_EXTRA, "%s: JoinRequest: BSSID=%s" + " channel=%d\n", + dev->name, print_mac(mac, req.bssid), le16_to_cpu(req.channel)); if (local->func->set_rid(dev, HFA384X_RID_JOINREQUEST, &req, sizeof(req))) { printk(KERN_DEBUG "%s: JoinRequest failed\n", dev->name); @@ -409,6 +412,7 @@ static void handle_info_queue_linkstatus(local_info_t *local) int val = local->prev_link_status; int connected; union iwreq_data wrqu; + DECLARE_MAC_BUF(mac); connected = val == HFA384X_LINKSTATUS_CONNECTED || @@ -420,9 +424,10 @@ static void handle_info_queue_linkstatus(local_info_t *local) printk(KERN_DEBUG "%s: could not read CURRENTBSSID after " "LinkStatus event\n", local->dev->name); } else { - PDEBUG(DEBUG_EXTRA, "%s: LinkStatus: BSSID=" MACSTR "\n", + PDEBUG(DEBUG_EXTRA, "%s: LinkStatus: BSSID=" + "%s\n", local->dev->name, - MAC2STR((unsigned char *) local->bssid)); + print_mac(mac, (unsigned char *) local->bssid)); if (local->wds_type & HOSTAP_WDS_AP_CLIENT) hostap_add_sta(local->ap, local->bssid); } diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c index 8c71077d653c..40f516d42c5e 100644 --- a/drivers/net/wireless/hostap/hostap_ioctl.c +++ b/drivers/net/wireless/hostap/hostap_ioctl.c @@ -664,6 +664,7 @@ static int hostap_join_ap(struct net_device *dev) unsigned long flags; int i; struct hfa384x_hostscan_result *entry; + DECLARE_MAC_BUF(mac); iface = netdev_priv(dev); local = iface->local; @@ -685,14 +686,14 @@ static int hostap_join_ap(struct net_device *dev) if (local->func->set_rid(dev, HFA384X_RID_JOINREQUEST, &req, sizeof(req))) { - printk(KERN_DEBUG "%s: JoinRequest " MACSTR + printk(KERN_DEBUG "%s: JoinRequest %s" " failed\n", - dev->name, MAC2STR(local->preferred_ap)); + dev->name, print_mac(mac, local->preferred_ap)); return -1; } - printk(KERN_DEBUG "%s: Trying to join BSSID " MACSTR "\n", - dev->name, MAC2STR(local->preferred_ap)); + printk(KERN_DEBUG "%s: Trying to join BSSID %s\n", + dev->name, print_mac(mac, local->preferred_ap)); return 0; } @@ -896,11 +897,8 @@ static void hostap_monitor_set_type(local_info_t *local) if (local->monitor_type == PRISM2_MONITOR_PRISM || local->monitor_type == PRISM2_MONITOR_CAPHDR) { dev->type = ARPHRD_IEEE80211_PRISM; - dev->hard_header_parse = - hostap_80211_prism_header_parse; } else { dev->type = ARPHRD_IEEE80211; - dev->hard_header_parse = hostap_80211_header_parse; } } @@ -1140,7 +1138,7 @@ static int hostap_monitor_mode_disable(local_info_t *local) printk(KERN_DEBUG "%s: Disabling monitor mode\n", dev->name); dev->type = ARPHRD_ETHER; - dev->hard_header_parse = local->saved_eth_header_parse; + if (local->func->cmd(dev, HFA384X_CMDCODE_TEST | (HFA384X_TEST_STOP << 8), 0, NULL, NULL)) @@ -3088,7 +3086,7 @@ static int prism2_ioctl_priv_download(local_info_t *local, struct iw_point *p) static int prism2_set_genericelement(struct net_device *dev, u8 *elem, size_t len) { - struct hostap_interface *iface = dev->priv; + struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; u8 *buf; @@ -3116,7 +3114,7 @@ static int prism2_ioctl_siwauth(struct net_device *dev, struct iw_request_info *info, struct iw_param *data, char *extra) { - struct hostap_interface *iface = dev->priv; + struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; switch (data->flags & IW_AUTH_INDEX) { @@ -3182,7 +3180,7 @@ static int prism2_ioctl_giwauth(struct net_device *dev, struct iw_request_info *info, struct iw_param *data, char *extra) { - struct hostap_interface *iface = dev->priv; + struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; switch (data->flags & IW_AUTH_INDEX) { @@ -3221,7 +3219,7 @@ static int prism2_ioctl_siwencodeext(struct net_device *dev, struct iw_request_info *info, struct iw_point *erq, char *extra) { - struct hostap_interface *iface = dev->priv; + struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; struct iw_encode_ext *ext = (struct iw_encode_ext *) extra; int i, ret = 0; @@ -3395,7 +3393,7 @@ static int prism2_ioctl_giwencodeext(struct net_device *dev, struct iw_request_info *info, struct iw_point *erq, char *extra) { - struct hostap_interface *iface = dev->priv; + struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; struct ieee80211_crypt_data **crypt; void *sta_ptr; @@ -3697,8 +3695,10 @@ static int prism2_ioctl_set_assoc_ap_addr(local_info_t *local, struct prism2_hostapd_param *param, int param_len) { - printk(KERN_DEBUG "%ssta: associated as client with AP " MACSTR "\n", - local->dev->name, MAC2STR(param->sta_addr)); + DECLARE_MAC_BUF(mac); + printk(KERN_DEBUG "%ssta: associated as client with AP " + "%s\n", + local->dev->name, print_mac(mac, param->sta_addr)); memcpy(local->assoc_ap_addr, param->sta_addr, ETH_ALEN); return 0; } @@ -3716,7 +3716,7 @@ static int prism2_ioctl_giwgenie(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *extra) { - struct hostap_interface *iface = dev->priv; + struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; int len = local->generic_elem_len - 2; @@ -3755,7 +3755,7 @@ static int prism2_ioctl_siwmlme(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *extra) { - struct hostap_interface *iface = dev->priv; + struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; struct iw_mlme *mlme = (struct iw_mlme *) extra; u16 reason; @@ -3976,9 +3976,9 @@ static const iw_handler prism2_private_handler[] = const struct iw_handler_def hostap_iw_handler_def = { - .num_standard = sizeof(prism2_handler) / sizeof(iw_handler), - .num_private = sizeof(prism2_private_handler) / sizeof(iw_handler), - .num_private_args = sizeof(prism2_priv) / sizeof(struct iw_priv_args), + .num_standard = ARRAY_SIZE(prism2_handler), + .num_private = ARRAY_SIZE(prism2_private_handler), + .num_private_args = ARRAY_SIZE(prism2_priv), .standard = (iw_handler *) prism2_handler, .private = (iw_handler *) prism2_private_handler, .private_args = (struct iw_priv_args *) prism2_priv, diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c index 446de51bab74..17c58e9bdad5 100644 --- a/drivers/net/wireless/hostap/hostap_main.c +++ b/drivers/net/wireless/hostap/hostap_main.c @@ -24,6 +24,7 @@ #include <linux/rtnetlink.h> #include <linux/wireless.h> #include <linux/etherdevice.h> +#include <net/net_namespace.h> #include <net/iw_handler.h> #include <net/ieee80211.h> #include <net/ieee80211_crypt.h> @@ -72,7 +73,7 @@ struct net_device * hostap_add_interface(struct local_info *local, dev->mem_start = mdev->mem_start; dev->mem_end = mdev->mem_end; - hostap_setup_dev(dev, local, 0); + hostap_setup_dev(dev, local, type); dev->destructor = free_netdev; sprintf(dev->name, "%s%s", prefix, name); @@ -529,6 +530,10 @@ int hostap_set_auth_algs(local_info_t *local) void hostap_dump_rx_header(const char *name, const struct hfa384x_rx_frame *rx) { u16 status, fc; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); + DECLARE_MAC_BUF(mac3); + DECLARE_MAC_BUF(mac4); status = __le16_to_cpu(rx->status); @@ -547,13 +552,12 @@ void hostap_dump_rx_header(const char *name, const struct hfa384x_rx_frame *rx) fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "", fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : ""); - printk(KERN_DEBUG " A1=" MACSTR " A2=" MACSTR " A3=" MACSTR " A4=" - MACSTR "\n", - MAC2STR(rx->addr1), MAC2STR(rx->addr2), MAC2STR(rx->addr3), - MAC2STR(rx->addr4)); + printk(KERN_DEBUG " A1=%s A2=%s A3=%s A4=%s\n", + print_mac(mac, rx->addr1), print_mac(mac2, rx->addr2), + print_mac(mac3, rx->addr3), print_mac(mac4, rx->addr4)); - printk(KERN_DEBUG " dst=" MACSTR " src=" MACSTR " len=%d\n", - MAC2STR(rx->dst_addr), MAC2STR(rx->src_addr), + printk(KERN_DEBUG " dst=%s src=%s len=%d\n", + print_mac(mac, rx->dst_addr), print_mac(mac2, rx->src_addr), __be16_to_cpu(rx->len)); } @@ -561,6 +565,10 @@ void hostap_dump_rx_header(const char *name, const struct hfa384x_rx_frame *rx) void hostap_dump_tx_header(const char *name, const struct hfa384x_tx_frame *tx) { u16 fc; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); + DECLARE_MAC_BUF(mac3); + DECLARE_MAC_BUF(mac4); printk(KERN_DEBUG "%s: TX status=0x%04x retry_count=%d tx_rate=%d " "tx_control=0x%04x; jiffies=%ld\n", @@ -576,35 +584,37 @@ void hostap_dump_tx_header(const char *name, const struct hfa384x_tx_frame *tx) fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "", fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : ""); - printk(KERN_DEBUG " A1=" MACSTR " A2=" MACSTR " A3=" MACSTR " A4=" - MACSTR "\n", - MAC2STR(tx->addr1), MAC2STR(tx->addr2), MAC2STR(tx->addr3), - MAC2STR(tx->addr4)); + printk(KERN_DEBUG " A1=%s A2=%s A3=%s A4=%s\n", + print_mac(mac, tx->addr1), print_mac(mac2, tx->addr2), + print_mac(mac3, tx->addr3), print_mac(mac4, tx->addr4)); - printk(KERN_DEBUG " dst=" MACSTR " src=" MACSTR " len=%d\n", - MAC2STR(tx->dst_addr), MAC2STR(tx->src_addr), + printk(KERN_DEBUG " dst=%s src=%s len=%d\n", + print_mac(mac, tx->dst_addr), print_mac(mac2, tx->src_addr), __be16_to_cpu(tx->len)); } -int hostap_80211_header_parse(struct sk_buff *skb, unsigned char *haddr) +int hostap_80211_header_parse(const struct sk_buff *skb, unsigned char *haddr) { - memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */ - return ETH_ALEN; -} - + struct hostap_interface *iface = netdev_priv(skb->dev); + local_info_t *local = iface->local; + + if (local->monitor_type == PRISM2_MONITOR_PRISM || + local->monitor_type == PRISM2_MONITOR_CAPHDR) { + const unsigned char *mac = skb_mac_header(skb); + + if (*(u32 *)mac == LWNG_CAP_DID_BASE) { + memcpy(haddr, + mac + sizeof(struct linux_wlan_ng_prism_hdr) + 10, + ETH_ALEN); /* addr2 */ + } else { /* (*(u32 *)mac == htonl(LWNG_CAPHDR_VERSION)) */ + memcpy(haddr, + mac + sizeof(struct linux_wlan_ng_cap_hdr) + 10, + ETH_ALEN); /* addr2 */ + } + } else + memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */ -int hostap_80211_prism_header_parse(struct sk_buff *skb, unsigned char *haddr) -{ - const unsigned char *mac = skb_mac_header(skb); - - if (*(u32 *)mac == LWNG_CAP_DID_BASE) { - memcpy(haddr, mac + sizeof(struct linux_wlan_ng_prism_hdr) + 10, - ETH_ALEN); /* addr2 */ - } else { /* (*(u32 *)mac == htonl(LWNG_CAPHDR_VERSION)) */ - memcpy(haddr, mac + sizeof(struct linux_wlan_ng_cap_hdr) + 10, - ETH_ALEN); /* addr2 */ - } return ETH_ALEN; } @@ -836,9 +846,18 @@ static void prism2_tx_timeout(struct net_device *dev) local->func->schedule_reset(local); } +const struct header_ops hostap_80211_ops = { + .create = eth_header, + .rebuild = eth_rebuild_header, + .cache = eth_header_cache, + .cache_update = eth_header_cache_update, + + .parse = hostap_80211_header_parse, +}; +EXPORT_SYMBOL(hostap_80211_ops); void hostap_setup_dev(struct net_device *dev, local_info_t *local, - int main_dev) + int type) { struct hostap_interface *iface; @@ -858,15 +877,22 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local, dev->do_ioctl = hostap_ioctl; dev->open = prism2_open; dev->stop = prism2_close; - dev->hard_start_xmit = hostap_data_start_xmit; dev->set_mac_address = prism2_set_mac_address; dev->set_multicast_list = hostap_set_multicast_list; dev->change_mtu = prism2_change_mtu; dev->tx_timeout = prism2_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; + if (type == HOSTAP_INTERFACE_AP) { + dev->hard_start_xmit = hostap_mgmt_start_xmit; + dev->type = ARPHRD_IEEE80211; + dev->header_ops = &hostap_80211_ops; + } else { + dev->hard_start_xmit = hostap_data_start_xmit; + } + dev->mtu = local->mtu; - if (!main_dev) { + if (type != HOSTAP_INTERFACE_MASTER) { /* use main radio device queue */ dev->tx_queue_len = 0; } @@ -876,7 +902,6 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local, netif_stop_queue(dev); } - static int hostap_enable_hostapd(local_info_t *local, int rtnl_locked) { struct net_device *dev = local->dev; @@ -892,10 +917,6 @@ static int hostap_enable_hostapd(local_info_t *local, int rtnl_locked) if (local->apdev == NULL) return -ENOMEM; - local->apdev->hard_start_xmit = hostap_mgmt_start_xmit; - local->apdev->type = ARPHRD_IEEE80211; - local->apdev->hard_header_parse = hostap_80211_header_parse; - return 0; } @@ -1093,8 +1114,8 @@ struct proc_dir_entry *hostap_proc; static int __init hostap_init(void) { - if (proc_net != NULL) { - hostap_proc = proc_mkdir("hostap", proc_net); + if (init_net.proc_net != NULL) { + hostap_proc = proc_mkdir("hostap", init_net.proc_net); if (!hostap_proc) printk(KERN_WARNING "Failed to mkdir " "/proc/net/hostap\n"); @@ -1109,7 +1130,7 @@ static void __exit hostap_exit(void) { if (hostap_proc != NULL) { hostap_proc = NULL; - remove_proc_entry("hostap", proc_net); + remove_proc_entry("hostap", init_net.proc_net); } } diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c index d1d8ce022e63..b03536008ad9 100644 --- a/drivers/net/wireless/hostap/hostap_proc.c +++ b/drivers/net/wireless/hostap/hostap_proc.c @@ -106,6 +106,7 @@ static int prism2_wds_proc_read(char *page, char **start, off_t off, local_info_t *local = (local_info_t *) data; struct list_head *ptr; struct hostap_interface *iface; + DECLARE_MAC_BUF(mac); if (off > PROC_LIMIT) { *eof = 1; @@ -117,9 +118,9 @@ static int prism2_wds_proc_read(char *page, char **start, off_t off, iface = list_entry(ptr, struct hostap_interface, list); if (iface->type != HOSTAP_INTERFACE_WDS) continue; - p += sprintf(p, "%s\t" MACSTR "\n", + p += sprintf(p, "%s\t%s\n", iface->dev->name, - MAC2STR(iface->u.wds.remote_addr)); + print_mac(mac, iface->u.wds.remote_addr)); if ((p - page) > PROC_LIMIT) { printk(KERN_DEBUG "%s: wds proc did not fit\n", local->dev->name); @@ -147,6 +148,7 @@ static int prism2_bss_list_proc_read(char *page, char **start, off_t off, struct list_head *ptr; struct hostap_bss_info *bss; int i; + DECLARE_MAC_BUF(mac); if (off > PROC_LIMIT) { *eof = 1; @@ -158,8 +160,8 @@ static int prism2_bss_list_proc_read(char *page, char **start, off_t off, spin_lock_bh(&local->lock); list_for_each(ptr, &local->bss_list) { bss = list_entry(ptr, struct hostap_bss_info, list); - p += sprintf(p, MACSTR "\t%lu\t%u\t0x%x\t", - MAC2STR(bss->bssid), bss->last_update, + p += sprintf(p, "%s\t%lu\t%u\t0x%x\t", + print_mac(mac, bss->bssid), bss->last_update, bss->count, bss->capab_info); for (i = 0; i < bss->ssid_len; i++) { p += sprintf(p, "%c", @@ -312,6 +314,7 @@ static int prism2_scan_results_proc_read(char *page, char **start, off_t off, int entry, i, len, total = 0; struct hfa384x_hostscan_result *scanres; u8 *pos; + DECLARE_MAC_BUF(mac); p += sprintf(p, "CHID ANL SL BcnInt Capab Rate BSSID ATIM SupRates " "SSID\n"); @@ -329,14 +332,14 @@ static int prism2_scan_results_proc_read(char *page, char **start, off_t off, if ((p - page) > (PAGE_SIZE - 200)) break; - p += sprintf(p, "%d %d %d %d 0x%02x %d " MACSTR " %d ", + p += sprintf(p, "%d %d %d %d 0x%02x %d %s %d ", le16_to_cpu(scanres->chid), (s16) le16_to_cpu(scanres->anl), (s16) le16_to_cpu(scanres->sl), le16_to_cpu(scanres->beacon_interval), le16_to_cpu(scanres->capability), le16_to_cpu(scanres->rate), - MAC2STR(scanres->bssid), + print_mac(mac, scanres->bssid), le16_to_cpu(scanres->atim)); pos = scanres->sup_rates; diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h index 87a54aa6f4dd..c27b2c1c06af 100644 --- a/drivers/net/wireless/hostap/hostap_wlan.h +++ b/drivers/net/wireless/hostap/hostap_wlan.h @@ -3,6 +3,7 @@ #include <linux/wireless.h> #include <linux/netdevice.h> +#include <linux/mutex.h> #include <net/iw_handler.h> #include "hostap_config.h" @@ -641,7 +642,7 @@ struct local_info { * when removing entries from the list. * TX and RX paths can use read lock. */ spinlock_t cmdlock, baplock, lock; - struct semaphore rid_bap_sem; + struct mutex rid_bap_mtx; u16 infofid; /* MAC buffer id for info frame */ /* txfid, intransmitfid, next_txtid, and next_alloc are protected by * txfidlock */ @@ -735,8 +736,6 @@ struct local_info { PRISM2_MONITOR_80211 = 0, PRISM2_MONITOR_PRISM = 1, PRISM2_MONITOR_CAPHDR = 2 } monitor_type; - int (*saved_eth_header_parse)(struct sk_buff *skb, - unsigned char *haddr); int monitor_allow_fcserr; int hostapd; /* whether user space daemon, hostapd, is used for AP diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c index 072ede71e575..2d46a16c0945 100644 --- a/drivers/net/wireless/ipw2100.c +++ b/drivers/net/wireless/ipw2100.c @@ -1922,6 +1922,7 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status) u32 chan; char *txratename; u8 bssid[ETH_ALEN]; + DECLARE_MAC_BUF(mac); /* * TBD: BSSID is usually 00:00:00:00:00:00 here and not @@ -1983,9 +1984,9 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status) } IPW_DEBUG_INFO("%s: Associated with '%s' at %s, channel %d (BSSID=" - MAC_FMT ")\n", + "%s)\n", priv->net_dev->name, escape_essid(essid, essid_len), - txratename, chan, MAC_ARG(bssid)); + txratename, chan, print_mac(mac, bssid)); /* now we copy read ssid into dev */ if (!(priv->config & CFG_STATIC_ESSID)) { @@ -2053,10 +2054,12 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid, static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status) { + DECLARE_MAC_BUF(mac); + IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, - "disassociated: '%s' " MAC_FMT " \n", + "disassociated: '%s' %s \n", escape_essid(priv->essid, priv->essid_len), - MAC_ARG(priv->bssid)); + print_mac(mac, priv->bssid)); priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING); @@ -4049,6 +4052,7 @@ static ssize_t show_bssinfo(struct device *d, struct device_attribute *attr, char *out = buf; int length; int ret; + DECLARE_MAC_BUF(mac); if (priv->status & STATUS_RF_KILL_MASK) return 0; @@ -4076,9 +4080,7 @@ static ssize_t show_bssinfo(struct device *d, struct device_attribute *attr, __LINE__); out += sprintf(out, "ESSID: %s\n", essid); - out += sprintf(out, "BSSID: %02x:%02x:%02x:%02x:%02x:%02x\n", - bssid[0], bssid[1], bssid[2], - bssid[3], bssid[4], bssid[5]); + out += sprintf(out, "BSSID: %s\n", print_mac(mac, bssid)); out += sprintf(out, "Channel: %d\n", chan); return out - buf; @@ -4652,19 +4654,20 @@ static void ipw2100_rx_free(struct ipw2100_priv *priv) static int ipw2100_read_mac_address(struct ipw2100_priv *priv) { u32 length = ETH_ALEN; - u8 mac[ETH_ALEN]; + u8 addr[ETH_ALEN]; + DECLARE_MAC_BUF(mac); int err; - err = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ADAPTER_MAC, mac, &length); + err = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ADAPTER_MAC, addr, &length); if (err) { IPW_DEBUG_INFO("MAC address read failed\n"); return -EIO; } - IPW_DEBUG_INFO("card MAC is %02X:%02X:%02X:%02X:%02X:%02X\n", - mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); - memcpy(priv->net_dev->dev_addr, mac, ETH_ALEN); + memcpy(priv->net_dev->dev_addr, addr, ETH_ALEN); + IPW_DEBUG_INFO("card MAC is %s\n", + print_mac(mac, priv->net_dev->dev_addr)); return 0; } @@ -5043,10 +5046,10 @@ static int ipw2100_set_mandatory_bssid(struct ipw2100_priv *priv, u8 * bssid, int err; #ifdef CONFIG_IPW2100_DEBUG + DECLARE_MAC_BUF(mac); if (bssid != NULL) - IPW_DEBUG_HC("MANDATORY_BSSID: %02X:%02X:%02X:%02X:%02X:%02X\n", - bssid[0], bssid[1], bssid[2], bssid[3], bssid[4], - bssid[5]); + IPW_DEBUG_HC("MANDATORY_BSSID: %s\n", + print_mac(mac, bssid)); else IPW_DEBUG_HC("MANDATORY_BSSID: <clear>\n"); #endif @@ -6239,8 +6242,6 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, IPW_DEBUG_INFO("Attempting to register device...\n"); - SET_MODULE_OWNER(dev); - printk(KERN_INFO DRV_NAME ": Detected Intel PRO/Wireless 2100 Network Connection\n"); @@ -6894,6 +6895,7 @@ static int ipw2100_wx_set_wap(struct net_device *dev, static const unsigned char off[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; + DECLARE_MAC_BUF(mac); // sanity checks if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) @@ -6919,13 +6921,8 @@ static int ipw2100_wx_set_wap(struct net_device *dev, err = ipw2100_set_mandatory_bssid(priv, wrqu->ap_addr.sa_data, 0); - IPW_DEBUG_WX("SET BSSID -> %02X:%02X:%02X:%02X:%02X:%02X\n", - wrqu->ap_addr.sa_data[0] & 0xff, - wrqu->ap_addr.sa_data[1] & 0xff, - wrqu->ap_addr.sa_data[2] & 0xff, - wrqu->ap_addr.sa_data[3] & 0xff, - wrqu->ap_addr.sa_data[4] & 0xff, - wrqu->ap_addr.sa_data[5] & 0xff); + IPW_DEBUG_WX("SET BSSID -> %s\n", + print_mac(mac, wrqu->ap_addr.sa_data)); done: mutex_unlock(&priv->action_mutex); @@ -6941,6 +6938,7 @@ static int ipw2100_wx_get_wap(struct net_device *dev, */ struct ipw2100_priv *priv = ieee80211_priv(dev); + DECLARE_MAC_BUF(mac); /* If we are associated, trying to associate, or have a statically * configured BSSID then return that; otherwise return ANY */ @@ -6950,8 +6948,8 @@ static int ipw2100_wx_get_wap(struct net_device *dev, } else memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN); - IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n", - MAC_ARG(wrqu->ap_addr.sa_data)); + IPW_DEBUG_WX("Getting WAP BSSID: %s\n", + print_mac(mac, wrqu->ap_addr.sa_data)); return 0; } @@ -7868,10 +7866,10 @@ static int ipw2100_wx_set_powermode(struct net_device *dev, goto done; } - if ((mode < 1) || (mode > POWER_MODES)) + if ((mode < 0) || (mode > POWER_MODES)) mode = IPW_POWER_AUTO; - if (priv->power_mode != mode) + if (IPW_POWER_LEVEL(priv->power_mode) != mode) err = ipw2100_set_power_mode(priv, mode); done: mutex_unlock(&priv->action_mutex); @@ -7902,7 +7900,7 @@ static int ipw2100_wx_get_powermode(struct net_device *dev, break; case IPW_POWER_AUTO: snprintf(extra, MAX_POWER_STRING, - "Power save level: %d (Auto)", 0); + "Power save level: %d (Auto)", level); break; default: timeout = timeout_duration[level - 1] / 1000; @@ -8279,10 +8277,9 @@ static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev) static struct iw_handler_def ipw2100_wx_handler_def = { .standard = ipw2100_wx_handlers, - .num_standard = sizeof(ipw2100_wx_handlers) / sizeof(iw_handler), - .num_private = sizeof(ipw2100_private_handler) / sizeof(iw_handler), - .num_private_args = sizeof(ipw2100_private_args) / - sizeof(struct iw_priv_args), + .num_standard = ARRAY_SIZE(ipw2100_wx_handlers), + .num_private = ARRAY_SIZE(ipw2100_private_handler), + .num_private_args = ARRAY_SIZE(ipw2100_private_args), .private = (iw_handler *) ipw2100_private_handler, .private_args = (struct iw_priv_args *)ipw2100_private_args, .get_wireless_stats = ipw2100_wx_wireless_stats, diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c index aa32a97380ec..feb8fcbab2d5 100644 --- a/drivers/net/wireless/ipw2200.c +++ b/drivers/net/wireless/ipw2200.c @@ -70,7 +70,7 @@ #define VQ #endif -#define IPW2200_VERSION "1.2.0" VK VD VM VP VR VQ +#define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver" #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" #define DRV_VERSION IPW2200_VERSION @@ -1740,8 +1740,10 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio) if (disable_radio) { priv->status |= STATUS_RF_KILL_SW; - if (priv->workqueue) + if (priv->workqueue) { cancel_delayed_work(&priv->request_scan); + cancel_delayed_work(&priv->scan_event); + } queue_work(priv->workqueue, &priv->down); } else { priv->status &= ~STATUS_RF_KILL_SW; @@ -1992,6 +1994,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv) wake_up_interruptible(&priv->wait_command_queue); priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING); cancel_delayed_work(&priv->request_scan); + cancel_delayed_work(&priv->scan_event); schedule_work(&priv->link_down); queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ); handled |= IPW_INTA_BIT_RF_KILL_DONE; @@ -2247,8 +2250,8 @@ static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac) return -1; } - IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n", - priv->net_dev->name, MAC_ARG(mac)); + IPW_DEBUG_INFO("%s: Setting MAC to %s\n", + priv->net_dev->name, print_mac(mac, mac)); return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac); } @@ -2506,7 +2509,7 @@ static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode) break; } - param = cpu_to_le32(mode); + param = cpu_to_le32(param); return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param), ¶m); } @@ -3796,6 +3799,7 @@ static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid) { struct ipw_station_entry entry; int i; + DECLARE_MAC_BUF(mac); for (i = 0; i < priv->num_stations; i++) { if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) { @@ -3812,7 +3816,7 @@ static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid) if (i == MAX_STATIONS) return IPW_INVALID_STATION; - IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid)); + IPW_DEBUG_SCAN("Adding AdHoc station: %s\n", print_mac(mac, bssid)); entry.reserved = 0; entry.support_mode = 0; @@ -3839,6 +3843,7 @@ static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid) static void ipw_send_disassociate(struct ipw_priv *priv, int quiet) { int err; + DECLARE_MAC_BUF(mac); if (priv->status & STATUS_ASSOCIATING) { IPW_DEBUG_ASSOC("Disassociating while associating.\n"); @@ -3851,9 +3856,9 @@ static void ipw_send_disassociate(struct ipw_priv *priv, int quiet) return; } - IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " " + IPW_DEBUG_ASSOC("Disassocation attempt from %s " "on channel %d.\n", - MAC_ARG(priv->assoc_request.bssid), + print_mac(mac, priv->assoc_request.bssid), priv->assoc_request.channel); priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED); @@ -4341,6 +4346,37 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv, IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count); } +static void ipw_scan_event(struct work_struct *work) +{ + union iwreq_data wrqu; + + struct ipw_priv *priv = + container_of(work, struct ipw_priv, scan_event.work); + + wrqu.data.length = 0; + wrqu.data.flags = 0; + wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL); +} + +static void handle_scan_event(struct ipw_priv *priv) +{ + /* Only userspace-requested scan completion events go out immediately */ + if (!priv->user_requested_scan) { + if (!delayed_work_pending(&priv->scan_event)) + queue_delayed_work(priv->workqueue, &priv->scan_event, + round_jiffies(msecs_to_jiffies(4000))); + } else { + union iwreq_data wrqu; + + priv->user_requested_scan = 0; + cancel_delayed_work(&priv->scan_event); + + wrqu.data.length = 0; + wrqu.data.flags = 0; + wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL); + } +} + /** * Handle host notification packet. * Called from interrupt routine @@ -4348,6 +4384,7 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv, static void ipw_rx_notification(struct ipw_priv *priv, struct ipw_rx_notification *notif) { + DECLARE_MAC_BUF(mac); notif->size = le16_to_cpu(notif->size); IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, notif->size); @@ -4360,11 +4397,11 @@ static void ipw_rx_notification(struct ipw_priv *priv, case CMAS_ASSOCIATED:{ IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, - "associated: '%s' " MAC_FMT + "associated: '%s' %s" " \n", escape_essid(priv->essid, priv->essid_len), - MAC_ARG(priv->bssid)); + print_mac(mac, priv->bssid)); switch (priv->ieee->iw_mode) { case IW_MODE_INFRA: @@ -4444,13 +4481,13 @@ static void ipw_rx_notification(struct ipw_priv *priv, IPW_DL_STATE | IPW_DL_ASSOC, "deauthenticated: '%s' " - MAC_FMT + "%s" ": (0x%04X) - %s \n", escape_essid(priv-> essid, priv-> essid_len), - MAC_ARG(priv->bssid), + print_mac(mac, priv->bssid), ntohs(auth->status), ipw_get_status_code (ntohs @@ -4467,11 +4504,11 @@ static void ipw_rx_notification(struct ipw_priv *priv, IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, - "authenticated: '%s' " MAC_FMT + "authenticated: '%s' %s" "\n", escape_essid(priv->essid, priv->essid_len), - MAC_ARG(priv->bssid)); + print_mac(mac, priv->bssid)); break; } @@ -4496,11 +4533,11 @@ static void ipw_rx_notification(struct ipw_priv *priv, IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, - "disassociated: '%s' " MAC_FMT + "disassociated: '%s' %s" " \n", escape_essid(priv->essid, priv->essid_len), - MAC_ARG(priv->bssid)); + print_mac(mac, priv->bssid)); priv->status &= ~(STATUS_DISASSOCIATING | @@ -4535,10 +4572,10 @@ static void ipw_rx_notification(struct ipw_priv *priv, switch (auth->state) { case CMAS_AUTHENTICATED: IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, - "authenticated: '%s' " MAC_FMT " \n", + "authenticated: '%s' %s \n", escape_essid(priv->essid, priv->essid_len), - MAC_ARG(priv->bssid)); + print_mac(mac, priv->bssid)); priv->status |= STATUS_AUTH; break; @@ -4554,10 +4591,10 @@ static void ipw_rx_notification(struct ipw_priv *priv, } IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, - "deauthenticated: '%s' " MAC_FMT "\n", + "deauthenticated: '%s' %s\n", escape_essid(priv->essid, priv->essid_len), - MAC_ARG(priv->bssid)); + print_mac(mac, priv->bssid)); priv->status &= ~(STATUS_ASSOCIATING | STATUS_AUTH | @@ -4702,14 +4739,8 @@ static void ipw_rx_notification(struct ipw_priv *priv, * on how the scan was initiated. User space can just * sync on periodic scan to get fresh data... * Jean II */ - if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) { - union iwreq_data wrqu; - - wrqu.data.length = 0; - wrqu.data.flags = 0; - wireless_send_event(priv->net_dev, SIOCGIWSCAN, - &wrqu, NULL); - } + if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) + handle_scan_event(priv); break; } @@ -5383,25 +5414,27 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv, int roaming) { struct ipw_supported_rates rates; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); /* Verify that this network's capability is compatible with the * current mode (AdHoc or Infrastructure) */ if ((priv->ieee->iw_mode == IW_MODE_ADHOC && !(network->capability & WLAN_CAPABILITY_IBSS))) { - IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded due to " + IPW_DEBUG_MERGE("Network '%s (%s)' excluded due to " "capability mismatch.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 0; } /* If we do not have an ESSID for this AP, we can not associate with * it */ if (network->flags & NETWORK_EMPTY_ESSID) { - IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_MERGE("Network '%s (%s)' excluded " "because of hidden ESSID.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 0; } @@ -5411,11 +5444,11 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv, if ((network->ssid_len != match->network->ssid_len) || memcmp(network->ssid, match->network->ssid, network->ssid_len)) { - IPW_DEBUG_MERGE("Netowrk '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_MERGE("Network '%s (%s)' excluded " "because of non-network ESSID.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 0; } } else { @@ -5430,9 +5463,9 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv, strncpy(escaped, escape_essid(network->ssid, network->ssid_len), sizeof(escaped)); - IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_MERGE("Network '%s (%s)' excluded " "because of ESSID mismatch: '%s'.\n", - escaped, MAC_ARG(network->bssid), + escaped, print_mac(mac, network->bssid), escape_essid(priv->essid, priv->essid_len)); return 0; @@ -5459,10 +5492,10 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv, /* Now go through and see if the requested network is valid... */ if (priv->ieee->scan_age != 0 && time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) { - IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_MERGE("Network '%s (%s)' excluded " "because of age: %ums.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid), + print_mac(mac, network->bssid), jiffies_to_msecs(jiffies - network->last_scanned)); return 0; @@ -5470,10 +5503,10 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv, if ((priv->config & CFG_STATIC_CHANNEL) && (network->channel != priv->channel)) { - IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_MERGE("Network '%s (%s)' excluded " "because of channel mismatch: %d != %d.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid), + print_mac(mac, network->bssid), network->channel, priv->channel); return 0; } @@ -5481,10 +5514,10 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv, /* Verify privacy compatability */ if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) != ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) { - IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_MERGE("Network '%s (%s)' excluded " "because of privacy mismatch: %s != %s.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid), + print_mac(mac, network->bssid), priv-> capability & CAP_PRIVACY_ON ? "on" : "off", network-> @@ -5494,40 +5527,41 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv, } if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) { - IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded " - "because of the same BSSID match: " MAC_FMT + IPW_DEBUG_MERGE("Network '%s (%s)' excluded " + "because of the same BSSID match: %s" ".\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid), MAC_ARG(priv->bssid)); + print_mac(mac, network->bssid), + print_mac(mac2, priv->bssid)); return 0; } /* Filter out any incompatible freq / mode combinations */ if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) { - IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_MERGE("Network '%s (%s)' excluded " "because of invalid frequency/mode " "combination.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 0; } /* Ensure that the rates supported by the driver are compatible with * this AP, including verification of basic rates (mandatory) */ if (!ipw_compatible_rates(priv, network, &rates)) { - IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_MERGE("Network '%s (%s)' excluded " "because configured rate mask excludes " "AP mandatory rate.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 0; } if (rates.num_rates == 0) { - IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_MERGE("Network '%s (%s)' excluded " "because of no compatible rates.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 0; } @@ -5538,9 +5572,9 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv, /* Set up 'new' AP to this network */ ipw_copy_rates(&match->rates, &rates); match->network = network; - IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' is a viable match.\n", + IPW_DEBUG_MERGE("Network '%s (%s)' is a viable match.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 1; } @@ -5594,6 +5628,7 @@ static int ipw_best_network(struct ipw_priv *priv, struct ieee80211_network *network, int roaming) { struct ipw_supported_rates rates; + DECLARE_MAC_BUF(mac); /* Verify that this network's capability is compatible with the * current mode (AdHoc or Infrastructure) */ @@ -5601,20 +5636,20 @@ static int ipw_best_network(struct ipw_priv *priv, !(network->capability & WLAN_CAPABILITY_ESS)) || (priv->ieee->iw_mode == IW_MODE_ADHOC && !(network->capability & WLAN_CAPABILITY_IBSS))) { - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded due to " + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded due to " "capability mismatch.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 0; } /* If we do not have an ESSID for this AP, we can not associate with * it */ if (network->flags & NETWORK_EMPTY_ESSID) { - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " "because of hidden ESSID.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 0; } @@ -5624,11 +5659,11 @@ static int ipw_best_network(struct ipw_priv *priv, if ((network->ssid_len != match->network->ssid_len) || memcmp(network->ssid, match->network->ssid, network->ssid_len)) { - IPW_DEBUG_ASSOC("Netowrk '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " "because of non-network ESSID.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 0; } } else { @@ -5642,9 +5677,9 @@ static int ipw_best_network(struct ipw_priv *priv, strncpy(escaped, escape_essid(network->ssid, network->ssid_len), sizeof(escaped)); - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " "because of ESSID mismatch: '%s'.\n", - escaped, MAC_ARG(network->bssid), + escaped, print_mac(mac, network->bssid), escape_essid(priv->essid, priv->essid_len)); return 0; @@ -5658,12 +5693,12 @@ static int ipw_best_network(struct ipw_priv *priv, strncpy(escaped, escape_essid(network->ssid, network->ssid_len), sizeof(escaped)); - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded because " - "'%s (" MAC_FMT ")' has a stronger signal.\n", - escaped, MAC_ARG(network->bssid), + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded because " + "'%s (%s)' has a stronger signal.\n", + escaped, print_mac(mac, network->bssid), escape_essid(match->network->ssid, match->network->ssid_len), - MAC_ARG(match->network->bssid)); + print_mac(mac, match->network->bssid)); return 0; } @@ -5671,11 +5706,11 @@ static int ipw_best_network(struct ipw_priv *priv, * last 3 seconds, do not try and associate again... */ if (network->last_associate && time_after(network->last_associate + (HZ * 3UL), jiffies)) { - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " "because of storming (%ums since last " "assoc attempt).\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid), + print_mac(mac, network->bssid), jiffies_to_msecs(jiffies - network->last_associate)); return 0; @@ -5684,10 +5719,10 @@ static int ipw_best_network(struct ipw_priv *priv, /* Now go through and see if the requested network is valid... */ if (priv->ieee->scan_age != 0 && time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) { - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " "because of age: %ums.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid), + print_mac(mac, network->bssid), jiffies_to_msecs(jiffies - network->last_scanned)); return 0; @@ -5695,10 +5730,10 @@ static int ipw_best_network(struct ipw_priv *priv, if ((priv->config & CFG_STATIC_CHANNEL) && (network->channel != priv->channel)) { - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " "because of channel mismatch: %d != %d.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid), + print_mac(mac, network->bssid), network->channel, priv->channel); return 0; } @@ -5706,10 +5741,10 @@ static int ipw_best_network(struct ipw_priv *priv, /* Verify privacy compatability */ if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) != ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) { - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " "because of privacy mismatch: %s != %s.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid), + print_mac(mac, network->bssid), priv->capability & CAP_PRIVACY_ON ? "on" : "off", network->capability & @@ -5719,48 +5754,48 @@ static int ipw_best_network(struct ipw_priv *priv, if ((priv->config & CFG_STATIC_BSSID) && memcmp(network->bssid, priv->bssid, ETH_ALEN)) { - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " - "because of BSSID mismatch: " MAC_FMT ".\n", + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " + "because of BSSID mismatch: %s.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid), MAC_ARG(priv->bssid)); + print_mac(mac, network->bssid), print_mac(mac, priv->bssid)); return 0; } /* Filter out any incompatible freq / mode combinations */ if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) { - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " "because of invalid frequency/mode " "combination.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 0; } /* Filter out invalid channel in current GEO */ if (!ieee80211_is_valid_channel(priv->ieee, network->channel)) { - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " "because of invalid channel in current GEO\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 0; } /* Ensure that the rates supported by the driver are compatible with * this AP, including verification of basic rates (mandatory) */ if (!ipw_compatible_rates(priv, network, &rates)) { - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " "because configured rate mask excludes " "AP mandatory rate.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 0; } if (rates.num_rates == 0) { - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " "because of no compatible rates.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 0; } @@ -5772,9 +5807,9 @@ static int ipw_best_network(struct ipw_priv *priv, ipw_copy_rates(&match->rates, &rates); match->network = network; - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' is a viable match.\n", + IPW_DEBUG_ASSOC("Network '%s (%s)' is a viable match.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 1; } @@ -6016,6 +6051,7 @@ static void ipw_bg_adhoc_check(struct work_struct *work) static void ipw_debug_config(struct ipw_priv *priv) { + DECLARE_MAC_BUF(mac); IPW_DEBUG_INFO("Scan completed, no valid APs matched " "[CFG 0x%08X]\n", priv->config); if (priv->config & CFG_STATIC_CHANNEL) @@ -6028,8 +6064,8 @@ static void ipw_debug_config(struct ipw_priv *priv) else IPW_DEBUG_INFO("ESSID unlocked.\n"); if (priv->config & CFG_STATIC_BSSID) - IPW_DEBUG_INFO("BSSID locked to " MAC_FMT "\n", - MAC_ARG(priv->bssid)); + IPW_DEBUG_INFO("BSSID locked to %s\n", + print_mac(mac, priv->bssid)); else IPW_DEBUG_INFO("BSSID unlocked.\n"); if (priv->capability & CAP_PRIVACY_ON) @@ -7221,6 +7257,7 @@ static int ipw_associate_network(struct ipw_priv *priv, struct ipw_supported_rates *rates, int roaming) { int err; + DECLARE_MAC_BUF(mac); if (priv->config & CFG_FIXED_RATE) ipw_set_fixed_rate(priv, network->mode); @@ -7388,9 +7425,9 @@ static int ipw_associate_network(struct ipw_priv *priv, return err; } - IPW_DEBUG(IPW_DL_STATE, "associating: '%s' " MAC_FMT " \n", + IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %s \n", escape_essid(priv->essid, priv->essid_len), - MAC_ARG(priv->bssid)); + print_mac(mac, priv->bssid)); return 0; } @@ -8202,6 +8239,9 @@ static void ipw_rx(struct ipw_priv *priv) struct ieee80211_hdr_4addr *header; u32 r, w, i; u8 network_packet; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); + DECLARE_MAC_BUF(mac3); r = ipw_read32(priv, IPW_RX_READ_INDEX); w = ipw_read32(priv, IPW_RX_WRITE_INDEX); @@ -8328,14 +8368,17 @@ static void ipw_rx(struct ipw_priv *priv) header))) { IPW_DEBUG_DROP("Dropping: " - MAC_FMT ", " - MAC_FMT ", " - MAC_FMT "\n", - MAC_ARG(header-> + "%s, " + "%s, " + "%s\n", + print_mac(mac, + header-> addr1), - MAC_ARG(header-> + print_mac(mac2, + header-> addr2), - MAC_ARG(header-> + print_mac(mac3, + header-> addr3)); break; } @@ -8867,6 +8910,7 @@ static int ipw_wx_set_wap(struct net_device *dev, union iwreq_data *wrqu, char *extra) { struct ipw_priv *priv = ieee80211_priv(dev); + DECLARE_MAC_BUF(mac); static const unsigned char any[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff @@ -8897,8 +8941,8 @@ static int ipw_wx_set_wap(struct net_device *dev, return 0; } - IPW_DEBUG_WX("Setting mandatory BSSID to " MAC_FMT "\n", - MAC_ARG(wrqu->ap_addr.sa_data)); + IPW_DEBUG_WX("Setting mandatory BSSID to %s\n", + print_mac(mac, wrqu->ap_addr.sa_data)); memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN); @@ -8916,6 +8960,8 @@ static int ipw_wx_get_wap(struct net_device *dev, union iwreq_data *wrqu, char *extra) { struct ipw_priv *priv = ieee80211_priv(dev); + DECLARE_MAC_BUF(mac); + /* If we are associated, trying to associate, or have a statically * configured BSSID then return that; otherwise return ANY */ mutex_lock(&priv->mutex); @@ -8926,8 +8972,8 @@ static int ipw_wx_get_wap(struct net_device *dev, } else memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN); - IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n", - MAC_ARG(wrqu->ap_addr.sa_data)); + IPW_DEBUG_WX("Getting WAP BSSID: %s\n", + print_mac(mac, wrqu->ap_addr.sa_data)); mutex_unlock(&priv->mutex); return 0; } @@ -9472,6 +9518,10 @@ static int ipw_wx_set_scan(struct net_device *dev, struct ipw_priv *priv = ieee80211_priv(dev); struct iw_scan_req *req = (struct iw_scan_req *)extra; + mutex_lock(&priv->mutex); + priv->user_requested_scan = 1; + mutex_unlock(&priv->mutex); + if (wrqu->data.length == sizeof(struct iw_scan_req)) { if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { ipw_request_direct_scan(priv, req->essid, @@ -9568,6 +9618,7 @@ static int ipw_wx_set_power(struct net_device *dev, priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY; else priv->power_mode = IPW_POWER_ENABLED | priv->power_mode; + err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode)); if (err) { IPW_DEBUG_WX("failed setting power mode.\n"); @@ -9604,22 +9655,19 @@ static int ipw_wx_set_powermode(struct net_device *dev, struct ipw_priv *priv = ieee80211_priv(dev); int mode = *(int *)extra; int err; + mutex_lock(&priv->mutex); - if ((mode < 1) || (mode > IPW_POWER_LIMIT)) { + if ((mode < 1) || (mode > IPW_POWER_LIMIT)) mode = IPW_POWER_AC; - priv->power_mode = mode; - } else { - priv->power_mode = IPW_POWER_ENABLED | mode; - } - if (priv->power_mode != mode) { + if (IPW_POWER_LEVEL(priv->power_mode) != mode) { err = ipw_send_power_mode(priv, mode); - if (err) { IPW_DEBUG_WX("failed setting power mode.\n"); mutex_unlock(&priv->mutex); return err; } + priv->power_mode = IPW_POWER_ENABLED | mode; } mutex_unlock(&priv->mutex); return 0; @@ -10135,6 +10183,7 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb, u8 id, hdr_len, unicast; u16 remaining_bytes; int fc; + DECLARE_MAC_BUF(mac); hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); switch (priv->ieee->iw_mode) { @@ -10145,8 +10194,8 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb, id = ipw_add_station(priv, hdr->addr1); if (id == IPW_INVALID_STATION) { IPW_WARNING("Attempt to send data to " - "invalid cell: " MAC_FMT "\n", - MAC_ARG(hdr->addr1)); + "invalid cell: %s\n", + print_mac(mac, hdr->addr1)); goto drop; } } @@ -10462,13 +10511,15 @@ static int ipw_net_set_mac_address(struct net_device *dev, void *p) { struct ipw_priv *priv = ieee80211_priv(dev); struct sockaddr *addr = p; + DECLARE_MAC_BUF(mac); + if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; mutex_lock(&priv->mutex); priv->config |= CFG_CUSTOM_MAC; memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); - printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n", - priv->net_dev->name, MAC_ARG(priv->mac_addr)); + printk(KERN_INFO "%s: Setting MAC to %s\n", + priv->net_dev->name, print_mac(mac, priv->mac_addr)); queue_work(priv->workqueue, &priv->adapter_restart); mutex_unlock(&priv->mutex); return 0; @@ -10555,7 +10606,7 @@ static irqreturn_t ipw_isr(int irq, void *data) spin_lock(&priv->irq_lock); if (!(priv->status & STATUS_INT_ENABLED)) { - /* Shared IRQ */ + /* IRQ is disabled */ goto none; } @@ -10649,6 +10700,7 @@ static void ipw_link_up(struct ipw_priv *priv) } cancel_delayed_work(&priv->request_scan); + cancel_delayed_work(&priv->scan_event); ipw_reset_stats(priv); /* Ensure the rate is updated immediately */ priv->last_rate = ipw_get_current_rate(priv); @@ -10686,7 +10738,8 @@ static void ipw_link_down(struct ipw_priv *priv) if (!(priv->status & STATUS_EXIT_PENDING)) { /* Queue up another scan... */ queue_delayed_work(priv->workqueue, &priv->request_scan, 0); - } + } else + cancel_delayed_work(&priv->scan_event); } static void ipw_bg_link_down(struct work_struct *work) @@ -10716,6 +10769,7 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv) INIT_WORK(&priv->up, ipw_bg_up); INIT_WORK(&priv->down, ipw_bg_down); INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan); + INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event); INIT_WORK(&priv->request_passive_scan, ipw_request_passive_scan); INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats); INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan); @@ -11627,7 +11681,6 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_destroy_workqueue; } - SET_MODULE_OWNER(net_dev); SET_NETDEV_DEV(net_dev, &pdev->dev); mutex_lock(&priv->mutex); @@ -11748,6 +11801,7 @@ static void ipw_pci_remove(struct pci_dev *pdev) cancel_delayed_work(&priv->adhoc_check); cancel_delayed_work(&priv->gather_stats); cancel_delayed_work(&priv->request_scan); + cancel_delayed_work(&priv->scan_event); cancel_delayed_work(&priv->rf_kill); cancel_delayed_work(&priv->scan_check); destroy_workqueue(priv->workqueue); diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h index 626a240a87d8..bec8e37a1738 100644 --- a/drivers/net/wireless/ipw2200.h +++ b/drivers/net/wireless/ipw2200.h @@ -45,7 +45,6 @@ #include <linux/firmware.h> #include <linux/wireless.h> -#include <linux/dma-mapping.h> #include <linux/jiffies.h> #include <asm/io.h> @@ -1288,6 +1287,8 @@ struct ipw_priv { struct iw_public_data wireless_data; + int user_requested_scan; + struct workqueue_struct *workqueue; struct delayed_work adhoc_check; @@ -1296,6 +1297,7 @@ struct ipw_priv { struct work_struct system_config; struct work_struct rx_replenish; struct delayed_work request_scan; + struct delayed_work scan_event; struct work_struct request_passive_scan; struct work_struct adapter_restart; struct delayed_work rf_kill; diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig new file mode 100644 index 000000000000..25cfc6c32509 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/Kconfig @@ -0,0 +1,128 @@ +config IWLWIFI + bool "Intel Wireless WiFi Link Drivers" + depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL + select FW_LOADER + default n + ---help--- + Select to enable drivers based on the iwlwifi project. This + project provides a common foundation for Intel's wireless + drivers designed to use the mac80211 subsystem. + + See <file:Documentation/networking/README.iwlwifi> for + information on the capabilities currently enabled in this + driver and for tips for debugging issues and problems. + +config IWLWIFI_DEBUG + bool "Enable full debugging output in iwlwifi drivers" + depends on IWLWIFI + default y + ---help--- + This option will enable debug tracing output for the iwlwifi + drivers. + + This will result in the kernel module being ~100k larger. You can + control which debug output is sent to the kernel log by setting the + value in + + /sys/bus/pci/drivers/${DRIVER}/debug_level + + This entry will only exist if this option is enabled. + + To set a value, simply echo an 8-byte hex value to the same file: + + % echo 0x43fff > /sys/bus/pci/drivers/${DRIVER}/debug_level + + You can find the list of debug mask values in: + drivers/net/wireless/mac80211/iwlwifi/iwl-debug.h + + If this is your first time using this driver, you should say Y here + as the debug information can assist others in helping you resolve + any problems you may encounter. + +config IWLWIFI_SENSITIVITY + bool "Enable Sensitivity Calibration in iwlwifi drivers" + depends on IWLWIFI + default y + ---help--- + This option will enable sensitivity calibration for the iwlwifi + drivers. + +config IWLWIFI_SPECTRUM_MEASUREMENT + bool "Enable Spectrum Measurement in iwlwifi drivers" + depends on IWLWIFI + default y + ---help--- + This option will enable spectrum measurement for the iwlwifi drivers. + +config IWLWIFI_QOS + bool "Enable Wireless QoS in iwlwifi drivers" + depends on IWLWIFI + default y + ---help--- + This option will enable wireless quality of service (QoS) for the + iwlwifi drivers. + +config IWLWIFI_HT + bool "Enable 802.11n HT features in iwlwifi drivers" + depends on EXPERIMENTAL + depends on IWLWIFI && MAC80211_HT + default n + ---help--- + This option enables IEEE 802.11n High Throughput features + for the iwlwifi drivers. + +config IWL4965 + tristate "Intel Wireless WiFi 4965AGN" + depends on m && IWLWIFI && EXPERIMENTAL + default m + ---help--- + Select to build the driver supporting the: + + Intel Wireless WiFi Link 4965AGN + + This driver uses the kernel's mac80211 subsystem. + + See <file:Documentation/networking/README.iwlwifi> for + information on the capabilities currently enabled in this + driver and for tips for debugging any issues or problems. + + In order to use this driver, you will need a microcode (uCode) + image for it. You can obtain the microcode from: + + <http://intellinuxwireless.org/>. + + See the above referenced README.iwlwifi for information on where + to install the microcode images. + + If you want to compile the driver as a module ( = code which can be + inserted in and remvoed from the running kernel whenever you want), + say M here and read <file:Documentation/modules.txt>. The module + will be called iwl4965.ko. + +config IWL3945 + tristate "Intel PRO/Wireless 3945ABG/BG Network Connection" + depends on m && IWLWIFI && EXPERIMENTAL + default m + ---help--- + Select to build the driver supporting the: + + Intel PRO/Wireless 3945ABG/BG Network Connection + + This driver uses the kernel's mac80211 subsystem. + + See <file:Documentation/networking/README.iwlwifi> for + information on the capabilities currently enabled in this + driver and for tips for debugging any issues or problems. + + In order to use this driver, you will need a microcode (uCode) + image for it. You can obtain the microcode from: + + <http://intellinuxwireless.org/>. + + See the above referenced README.iwlwifi for information on where + to install the microcode images. + + If you want to compile the driver as a module ( = code which can be + inserted in and remvoed from the running kernel whenever you want), + say M here and read <file:Documentation/modules.txt>. The module + will be called iwl3945.ko. diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile new file mode 100644 index 000000000000..3bbd38358d53 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_IWL3945) += iwl3945.o +iwl3945-objs = iwl3945-base.o iwl-3945.o iwl-3945-rs.o + +obj-$(CONFIG_IWL4965) += iwl4965.o +iwl4965-objs = iwl4965-base.o iwl-4965.o iwl-4965-rs.o diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h new file mode 100644 index 000000000000..fb5f0649f4f6 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h @@ -0,0 +1,118 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU Geeral Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * James P. Ketrenos <ipw2100-admin@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_3945_hw__ +#define __iwl_3945_hw__ + +#define IWL_RX_BUF_SIZE 3000 +/* card static random access memory (SRAM) for processor data and instructs */ +#define ALM_RTC_INST_UPPER_BOUND (0x014000) +#define ALM_RTC_DATA_UPPER_BOUND (0x808000) + +#define ALM_RTC_INST_SIZE (ALM_RTC_INST_UPPER_BOUND - RTC_INST_LOWER_BOUND) +#define ALM_RTC_DATA_SIZE (ALM_RTC_DATA_UPPER_BOUND - RTC_DATA_LOWER_BOUND) + +#define IWL_MAX_BSM_SIZE ALM_RTC_INST_SIZE +#define IWL_MAX_INST_SIZE ALM_RTC_INST_SIZE +#define IWL_MAX_DATA_SIZE ALM_RTC_DATA_SIZE +#define IWL_MAX_NUM_QUEUES 8 + +static inline int iwl_hw_valid_rtc_data_addr(u32 addr) +{ + return (addr >= RTC_DATA_LOWER_BOUND) && + (addr < ALM_RTC_DATA_UPPER_BOUND); +} + +/* Base physical address of iwl_shared is provided to FH_TSSR_CBB_BASE + * and &iwl_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */ +struct iwl_shared { + __le32 tx_base_ptr[8]; + __le32 rx_read_ptr[3]; +} __attribute__ ((packed)); + +struct iwl_tfd_frame_data { + __le32 addr; + __le32 len; +} __attribute__ ((packed)); + +struct iwl_tfd_frame { + __le32 control_flags; + struct iwl_tfd_frame_data pa[4]; + u8 reserved[28]; +} __attribute__ ((packed)); + +static inline u8 iwl_hw_get_rate(__le16 rate_n_flags) +{ + return le16_to_cpu(rate_n_flags) & 0xFF; +} + +static inline u16 iwl_hw_get_rate_n_flags(__le16 rate_n_flags) +{ + return le16_to_cpu(rate_n_flags); +} + +static inline __le16 iwl_hw_set_rate_n_flags(u8 rate, u16 flags) +{ + return cpu_to_le16((u16)rate|flags); +} +#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c new file mode 100644 index 000000000000..f4aabcf480e4 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c @@ -0,0 +1,982 @@ +/****************************************************************************** + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos <ipw2100-admin@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/skbuff.h> +#include <linux/wireless.h> +#include <net/mac80211.h> +#include <net/ieee80211.h> + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/delay.h> + +#include <linux/workqueue.h> + +#include <net/mac80211.h> +#include <linux/wireless.h> + +#define IWL 3945 + +#include "../net/mac80211/ieee80211_rate.h" + +#include "iwlwifi.h" + +#define RS_NAME "iwl-3945-rs" + +struct iwl_rate_scale_data { + u64 data; + s32 success_counter; + s32 success_ratio; + s32 counter; + s32 average_tpt; + unsigned long stamp; +}; + +struct iwl_rate_scale_priv { + spinlock_t lock; + s32 *expected_tpt; + unsigned long last_partial_flush; + unsigned long last_flush; + u32 flush_time; + u32 last_tx_packets; + u32 tx_packets; + u8 tgg; + u8 flush_pending; + u8 start_rate; + u8 ibss_sta_added; + struct timer_list rate_scale_flush; + struct iwl_rate_scale_data win[IWL_RATE_COUNT]; +}; + +static s32 iwl_expected_tpt_g[IWL_RATE_COUNT] = { + 0, 0, 76, 104, 130, 168, 191, 202, 7, 13, 35, 58 +}; + +static s32 iwl_expected_tpt_g_prot[IWL_RATE_COUNT] = { + 0, 0, 0, 80, 93, 113, 123, 125, 7, 13, 35, 58 +}; + +static s32 iwl_expected_tpt_a[IWL_RATE_COUNT] = { + 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0, 0 +}; + +static s32 iwl_expected_tpt_b[IWL_RATE_COUNT] = { + 0, 0, 0, 0, 0, 0, 0, 0, 7, 13, 35, 58 +}; + +struct iwl_tpt_entry { + s8 min_rssi; + u8 index; +}; + +static struct iwl_tpt_entry iwl_tpt_table_a[] = { + {-60, IWL_RATE_54M_INDEX}, + {-64, IWL_RATE_48M_INDEX}, + {-72, IWL_RATE_36M_INDEX}, + {-80, IWL_RATE_24M_INDEX}, + {-84, IWL_RATE_18M_INDEX}, + {-85, IWL_RATE_12M_INDEX}, + {-87, IWL_RATE_9M_INDEX}, + {-89, IWL_RATE_6M_INDEX} +}; + +static struct iwl_tpt_entry iwl_tpt_table_b[] = { + {-86, IWL_RATE_11M_INDEX}, + {-88, IWL_RATE_5M_INDEX}, + {-90, IWL_RATE_2M_INDEX}, + {-92, IWL_RATE_1M_INDEX} + +}; + +static struct iwl_tpt_entry iwl_tpt_table_g[] = { + {-60, IWL_RATE_54M_INDEX}, + {-64, IWL_RATE_48M_INDEX}, + {-68, IWL_RATE_36M_INDEX}, + {-80, IWL_RATE_24M_INDEX}, + {-84, IWL_RATE_18M_INDEX}, + {-85, IWL_RATE_12M_INDEX}, + {-86, IWL_RATE_11M_INDEX}, + {-88, IWL_RATE_5M_INDEX}, + {-90, IWL_RATE_2M_INDEX}, + {-92, IWL_RATE_1M_INDEX} +}; + +#define IWL_RATE_MAX_WINDOW 62 +#define IWL_RATE_FLUSH (3*HZ/10) +#define IWL_RATE_WIN_FLUSH (HZ/2) +#define IWL_RATE_HIGH_TH 11520 +#define IWL_RATE_MIN_FAILURE_TH 8 +#define IWL_RATE_MIN_SUCCESS_TH 8 +#define IWL_RATE_DECREASE_TH 1920 + +static u8 iwl_get_rate_index_by_rssi(s32 rssi, u8 mode) +{ + u32 index = 0; + u32 table_size = 0; + struct iwl_tpt_entry *tpt_table = NULL; + + if ((rssi < IWL_MIN_RSSI_VAL) || (rssi > IWL_MAX_RSSI_VAL)) + rssi = IWL_MIN_RSSI_VAL; + + switch (mode) { + case MODE_IEEE80211G: + tpt_table = iwl_tpt_table_g; + table_size = ARRAY_SIZE(iwl_tpt_table_g); + break; + + case MODE_IEEE80211A: + tpt_table = iwl_tpt_table_a; + table_size = ARRAY_SIZE(iwl_tpt_table_a); + break; + + default: + case MODE_IEEE80211B: + tpt_table = iwl_tpt_table_b; + table_size = ARRAY_SIZE(iwl_tpt_table_b); + break; + } + + while ((index < table_size) && (rssi < tpt_table[index].min_rssi)) + index++; + + index = min(index, (table_size - 1)); + + return tpt_table[index].index; +} + +static void iwl_clear_window(struct iwl_rate_scale_data *window) +{ + window->data = 0; + window->success_counter = 0; + window->success_ratio = IWL_INVALID_VALUE; + window->counter = 0; + window->average_tpt = IWL_INVALID_VALUE; + window->stamp = 0; +} + +/** + * iwl_rate_scale_flush_windows - flush out the rate scale windows + * + * Returns the number of windows that have gathered data but were + * not flushed. If there were any that were not flushed, then + * reschedule the rate flushing routine. + */ +static int iwl_rate_scale_flush_windows(struct iwl_rate_scale_priv *rs_priv) +{ + int unflushed = 0; + int i; + unsigned long flags; + + /* + * For each rate, if we have collected data on that rate + * and it has been more than IWL_RATE_WIN_FLUSH + * since we flushed, clear out the gathered statistics + */ + for (i = 0; i < IWL_RATE_COUNT; i++) { + if (!rs_priv->win[i].counter) + continue; + + spin_lock_irqsave(&rs_priv->lock, flags); + if (time_after(jiffies, rs_priv->win[i].stamp + + IWL_RATE_WIN_FLUSH)) { + IWL_DEBUG_RATE("flushing %d samples of rate " + "index %d\n", + rs_priv->win[i].counter, i); + iwl_clear_window(&rs_priv->win[i]); + } else + unflushed++; + spin_unlock_irqrestore(&rs_priv->lock, flags); + } + + return unflushed; +} + +#define IWL_RATE_FLUSH_MAX 5000 /* msec */ +#define IWL_RATE_FLUSH_MIN 50 /* msec */ + +static void iwl_bg_rate_scale_flush(unsigned long data) +{ + struct iwl_rate_scale_priv *rs_priv = (void *)data; + int unflushed = 0; + unsigned long flags; + u32 packet_count, duration, pps; + + IWL_DEBUG_RATE("enter\n"); + + unflushed = iwl_rate_scale_flush_windows(rs_priv); + + spin_lock_irqsave(&rs_priv->lock, flags); + + rs_priv->flush_pending = 0; + + /* Number of packets Rx'd since last time this timer ran */ + packet_count = (rs_priv->tx_packets - rs_priv->last_tx_packets) + 1; + + rs_priv->last_tx_packets = rs_priv->tx_packets + 1; + + if (unflushed) { + duration = + jiffies_to_msecs(jiffies - rs_priv->last_partial_flush); +/* duration = jiffies_to_msecs(rs_priv->flush_time); */ + + IWL_DEBUG_RATE("Tx'd %d packets in %dms\n", + packet_count, duration); + + /* Determine packets per second */ + if (duration) + pps = (packet_count * 1000) / duration; + else + pps = 0; + + if (pps) { + duration = IWL_RATE_FLUSH_MAX / pps; + if (duration < IWL_RATE_FLUSH_MIN) + duration = IWL_RATE_FLUSH_MIN; + } else + duration = IWL_RATE_FLUSH_MAX; + + rs_priv->flush_time = msecs_to_jiffies(duration); + + IWL_DEBUG_RATE("new flush period: %d msec ave %d\n", + duration, packet_count); + + mod_timer(&rs_priv->rate_scale_flush, jiffies + + rs_priv->flush_time); + + rs_priv->last_partial_flush = jiffies; + } + + /* If there weren't any unflushed entries, we don't schedule the timer + * to run again */ + + rs_priv->last_flush = jiffies; + + spin_unlock_irqrestore(&rs_priv->lock, flags); + + IWL_DEBUG_RATE("leave\n"); +} + +/** + * iwl_collect_tx_data - Update the success/failure sliding window + * + * We keep a sliding window of the last 64 packets transmitted + * at this rate. window->data contains the bitmask of successful + * packets. + */ +static void iwl_collect_tx_data(struct iwl_rate_scale_priv *rs_priv, + struct iwl_rate_scale_data *window, + int success, int retries) +{ + unsigned long flags; + + if (!retries) { + IWL_DEBUG_RATE("leave: retries == 0 -- should be at least 1\n"); + return; + } + + while (retries--) { + spin_lock_irqsave(&rs_priv->lock, flags); + + /* If we have filled up the window then subtract one from the + * success counter if the high-bit is counting toward + * success */ + if (window->counter == IWL_RATE_MAX_WINDOW) { + if (window->data & (1ULL << (IWL_RATE_MAX_WINDOW - 1))) + window->success_counter--; + } else + window->counter++; + + /* Slide the window to the left one bit */ + window->data = (window->data << 1); + + /* If this packet was a success then set the low bit high */ + if (success) { + window->success_counter++; + window->data |= 1; + } + + /* window->counter can't be 0 -- it is either >0 or + * IWL_RATE_MAX_WINDOW */ + window->success_ratio = 12800 * window->success_counter / + window->counter; + + /* Tag this window as having been updated */ + window->stamp = jiffies; + + spin_unlock_irqrestore(&rs_priv->lock, flags); + } +} + +static void rs_rate_init(void *priv_rate, void *priv_sta, + struct ieee80211_local *local, struct sta_info *sta) +{ + int i; + + IWL_DEBUG_RATE("enter\n"); + + /* TODO: what is a good starting rate for STA? About middle? Maybe not + * the lowest or the highest rate.. Could consider using RSSI from + * previous packets? Need to have IEEE 802.1X auth succeed immediately + * after assoc.. */ + + for (i = IWL_RATE_COUNT - 1; i >= 0; i--) { + if (sta->supp_rates & (1 << i)) { + sta->txrate = i; + break; + } + } + + sta->last_txrate = sta->txrate; + + IWL_DEBUG_RATE("leave\n"); +} + +static void *rs_alloc(struct ieee80211_local *local) +{ + return local->hw.priv; +} + +/* rate scale requires free function to be implmented */ +static void rs_free(void *priv) +{ + return; +} +static void rs_clear(void *priv) +{ + return; +} + + +static void *rs_alloc_sta(void *priv, gfp_t gfp) +{ + struct iwl_rate_scale_priv *rs_priv; + int i; + + IWL_DEBUG_RATE("enter\n"); + + rs_priv = kzalloc(sizeof(struct iwl_rate_scale_priv), gfp); + if (!rs_priv) { + IWL_DEBUG_RATE("leave: ENOMEM\n"); + return NULL; + } + + spin_lock_init(&rs_priv->lock); + + rs_priv->start_rate = IWL_RATE_INVALID; + + /* default to just 802.11b */ + rs_priv->expected_tpt = iwl_expected_tpt_b; + + rs_priv->last_partial_flush = jiffies; + rs_priv->last_flush = jiffies; + rs_priv->flush_time = IWL_RATE_FLUSH; + rs_priv->last_tx_packets = 0; + rs_priv->ibss_sta_added = 0; + + init_timer(&rs_priv->rate_scale_flush); + rs_priv->rate_scale_flush.data = (unsigned long)rs_priv; + rs_priv->rate_scale_flush.function = &iwl_bg_rate_scale_flush; + + for (i = 0; i < IWL_RATE_COUNT; i++) + iwl_clear_window(&rs_priv->win[i]); + + IWL_DEBUG_RATE("leave\n"); + + return rs_priv; +} + +static void rs_free_sta(void *priv, void *priv_sta) +{ + struct iwl_rate_scale_priv *rs_priv = priv_sta; + + IWL_DEBUG_RATE("enter\n"); + del_timer_sync(&rs_priv->rate_scale_flush); + kfree(rs_priv); + IWL_DEBUG_RATE("leave\n"); +} + +/** + * rs_tx_status - Update rate control values based on Tx results + * + * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by + * the hardware for each rate. + */ +static void rs_tx_status(void *priv_rate, + struct net_device *dev, + struct sk_buff *skb, + struct ieee80211_tx_status *tx_resp) +{ + u8 retries, current_count; + int scale_rate_index, first_index, last_index; + unsigned long flags; + struct sta_info *sta; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct iwl_priv *priv = (struct iwl_priv *)priv_rate; + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + struct iwl_rate_scale_priv *rs_priv; + + IWL_DEBUG_RATE("enter\n"); + + retries = tx_resp->retry_count; + + first_index = tx_resp->control.tx_rate; + if ((first_index < 0) || (first_index >= IWL_RATE_COUNT)) { + IWL_DEBUG_RATE("leave: Rate out of bounds: %0x for %d\n", + tx_resp->control.tx_rate, first_index); + return; + } + + sta = sta_info_get(local, hdr->addr1); + if (!sta || !sta->rate_ctrl_priv) { + if (sta) + sta_info_put(sta); + IWL_DEBUG_RATE("leave: No STA priv data to update!\n"); + return; + } + + rs_priv = (void *)sta->rate_ctrl_priv; + + rs_priv->tx_packets++; + + scale_rate_index = first_index; + last_index = first_index; + + /* + * Update the window for each rate. We determine which rates + * were Tx'd based on the total number of retries vs. the number + * of retries configured for each rate -- currently set to the + * priv value 'retry_rate' vs. rate specific + * + * On exit from this while loop last_index indicates the rate + * at which the frame was finally transmitted (or failed if no + * ACK) + */ + while (retries > 0) { + if (retries < priv->retry_rate) { + current_count = retries; + last_index = scale_rate_index; + } else { + current_count = priv->retry_rate; + last_index = iwl_get_prev_ieee_rate(scale_rate_index); + } + + /* Update this rate accounting for as many retries + * as was used for it (per current_count) */ + iwl_collect_tx_data(rs_priv, + &rs_priv->win[scale_rate_index], + 0, current_count); + IWL_DEBUG_RATE("Update rate %d for %d retries.\n", + scale_rate_index, current_count); + + retries -= current_count; + + if (retries) + scale_rate_index = + iwl_get_prev_ieee_rate(scale_rate_index); + } + + /* Update the last index window with success/failure based on ACK */ + IWL_DEBUG_RATE("Update rate %d with %s.\n", + last_index, + (tx_resp->flags & IEEE80211_TX_STATUS_ACK) ? + "success" : "failure"); + iwl_collect_tx_data(rs_priv, + &rs_priv->win[last_index], + tx_resp->flags & IEEE80211_TX_STATUS_ACK, 1); + + /* We updated the rate scale window -- if its been more than + * flush_time since the last run, schedule the flush + * again */ + spin_lock_irqsave(&rs_priv->lock, flags); + + if (!rs_priv->flush_pending && + time_after(jiffies, rs_priv->last_partial_flush + + rs_priv->flush_time)) { + + rs_priv->flush_pending = 1; + mod_timer(&rs_priv->rate_scale_flush, + jiffies + rs_priv->flush_time); + } + + spin_unlock_irqrestore(&rs_priv->lock, flags); + + sta_info_put(sta); + + IWL_DEBUG_RATE("leave\n"); + + return; +} + +static struct ieee80211_rate *iwl_get_lowest_rate(struct ieee80211_local + *local) +{ + struct ieee80211_hw_mode *mode = local->oper_hw_mode; + int i; + + for (i = 0; i < mode->num_rates; i++) { + struct ieee80211_rate *rate = &mode->rates[i]; + + if (rate->flags & IEEE80211_RATE_SUPPORTED) + return rate; + } + + return &mode->rates[0]; +} + +static u16 iwl_get_adjacent_rate(struct iwl_rate_scale_priv *rs_priv, + u8 index, u16 rate_mask, int phymode) +{ + u8 high = IWL_RATE_INVALID; + u8 low = IWL_RATE_INVALID; + + /* 802.11A walks to the next literal adjascent rate in + * the rate table */ + if (unlikely(phymode == MODE_IEEE80211A)) { + int i; + u32 mask; + + /* Find the previous rate that is in the rate mask */ + i = index - 1; + for (mask = (1 << i); i >= 0; i--, mask >>= 1) { + if (rate_mask & mask) { + low = i; + break; + } + } + + /* Find the next rate that is in the rate mask */ + i = index + 1; + for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) { + if (rate_mask & mask) { + high = i; + break; + } + } + + return (high << 8) | low; + } + + low = index; + while (low != IWL_RATE_INVALID) { + if (rs_priv->tgg) + low = iwl_rates[low].prev_rs_tgg; + else + low = iwl_rates[low].prev_rs; + if (low == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << low)) + break; + IWL_DEBUG_RATE("Skipping masked lower rate: %d\n", low); + } + + high = index; + while (high != IWL_RATE_INVALID) { + if (rs_priv->tgg) + high = iwl_rates[high].next_rs_tgg; + else + high = iwl_rates[high].next_rs; + if (high == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << high)) + break; + IWL_DEBUG_RATE("Skipping masked higher rate: %d\n", high); + } + + return (high << 8) | low; +} + +/** + * rs_get_rate - find the rate for the requested packet + * + * Returns the ieee80211_rate structure allocated by the driver. + * + * The rate control algorithm has no internal mapping between hw_mode's + * rate ordering and the rate ordering used by the rate control algorithm. + * + * The rate control algorithm uses a single table of rates that goes across + * the entire A/B/G spectrum vs. being limited to just one particular + * hw_mode. + * + * As such, we can't convert the index obtained below into the hw_mode's + * rate table and must reference the driver allocated rate table + * + */ +static struct ieee80211_rate *rs_get_rate(void *priv_rate, + struct net_device *dev, + struct sk_buff *skb, + struct rate_control_extra *extra) +{ + u8 low = IWL_RATE_INVALID; + u8 high = IWL_RATE_INVALID; + u16 high_low; + int index; + struct iwl_rate_scale_priv *rs_priv; + struct iwl_rate_scale_data *window = NULL; + int current_tpt = IWL_INVALID_VALUE; + int low_tpt = IWL_INVALID_VALUE; + int high_tpt = IWL_INVALID_VALUE; + u32 fail_count; + s8 scale_action = 0; + unsigned long flags; + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct sta_info *sta; + u16 fc, rate_mask; + struct iwl_priv *priv = (struct iwl_priv *)priv_rate; + DECLARE_MAC_BUF(mac); + + IWL_DEBUG_RATE("enter\n"); + + memset(extra, 0, sizeof(*extra)); + + fc = le16_to_cpu(hdr->frame_control); + if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) || + (is_multicast_ether_addr(hdr->addr1))) { + /* Send management frames and broadcast/multicast data using + * lowest rate. */ + /* TODO: this could probably be improved.. */ + IWL_DEBUG_RATE("leave: lowest rate (not data or is " + "multicast)\n"); + + return iwl_get_lowest_rate(local); + } + + sta = sta_info_get(local, hdr->addr1); + if (!sta || !sta->rate_ctrl_priv) { + IWL_DEBUG_RATE("leave: No STA priv data to update!\n"); + if (sta) + sta_info_put(sta); + return NULL; + } + + rate_mask = sta->supp_rates; + index = min(sta->txrate & 0xffff, IWL_RATE_COUNT - 1); + + rs_priv = (void *)sta->rate_ctrl_priv; + + if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && + !rs_priv->ibss_sta_added) { + u8 sta_id = iwl_hw_find_station(priv, hdr->addr1); + + if (sta_id == IWL_INVALID_STATION) { + IWL_DEBUG_RATE("LQ: ADD station %s\n", + print_mac(mac, hdr->addr1)); + sta_id = iwl_add_station(priv, + hdr->addr1, 0, CMD_ASYNC); + } + if (sta_id != IWL_INVALID_STATION) + rs_priv->ibss_sta_added = 1; + } + + spin_lock_irqsave(&rs_priv->lock, flags); + + if (rs_priv->start_rate != IWL_RATE_INVALID) { + index = rs_priv->start_rate; + rs_priv->start_rate = IWL_RATE_INVALID; + } + + window = &(rs_priv->win[index]); + + fail_count = window->counter - window->success_counter; + + if (((fail_count <= IWL_RATE_MIN_FAILURE_TH) && + (window->success_counter < IWL_RATE_MIN_SUCCESS_TH))) { + window->average_tpt = IWL_INVALID_VALUE; + spin_unlock_irqrestore(&rs_priv->lock, flags); + + IWL_DEBUG_RATE("Invalid average_tpt on rate %d: " + "counter: %d, success_counter: %d, " + "expected_tpt is %sNULL\n", + index, + window->counter, + window->success_counter, + rs_priv->expected_tpt ? "not " : ""); + goto out; + + } + + window->average_tpt = ((window->success_ratio * + rs_priv->expected_tpt[index] + 64) / 128); + current_tpt = window->average_tpt; + + high_low = iwl_get_adjacent_rate(rs_priv, index, rate_mask, + local->hw.conf.phymode); + low = high_low & 0xff; + high = (high_low >> 8) & 0xff; + + if (low != IWL_RATE_INVALID) + low_tpt = rs_priv->win[low].average_tpt; + + if (high != IWL_RATE_INVALID) + high_tpt = rs_priv->win[high].average_tpt; + + spin_unlock_irqrestore(&rs_priv->lock, flags); + + scale_action = 1; + + if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) { + IWL_DEBUG_RATE("decrease rate because of low success_ratio\n"); + scale_action = -1; + } else if ((low_tpt == IWL_INVALID_VALUE) && + (high_tpt == IWL_INVALID_VALUE)) + scale_action = 1; + else if ((low_tpt != IWL_INVALID_VALUE) && + (high_tpt != IWL_INVALID_VALUE) + && (low_tpt < current_tpt) + && (high_tpt < current_tpt)) { + IWL_DEBUG_RATE("No action -- low [%d] & high [%d] < " + "current_tpt [%d]\n", + low_tpt, high_tpt, current_tpt); + scale_action = 0; + } else { + if (high_tpt != IWL_INVALID_VALUE) { + if (high_tpt > current_tpt) + scale_action = 1; + else { + IWL_DEBUG_RATE + ("decrease rate because of high tpt\n"); + scale_action = -1; + } + } else if (low_tpt != IWL_INVALID_VALUE) { + if (low_tpt > current_tpt) { + IWL_DEBUG_RATE + ("decrease rate because of low tpt\n"); + scale_action = -1; + } else + scale_action = 1; + } + } + + if ((window->success_ratio > IWL_RATE_HIGH_TH) || + (current_tpt > window->average_tpt)) { + IWL_DEBUG_RATE("No action -- success_ratio [%d] > HIGH_TH or " + "current_tpt [%d] > average_tpt [%d]\n", + window->success_ratio, + current_tpt, window->average_tpt); + scale_action = 0; + } + + switch (scale_action) { + case -1: + if (low != IWL_RATE_INVALID) + index = low; + break; + + case 1: + if (high != IWL_RATE_INVALID) + index = high; + + break; + + case 0: + default: + break; + } + + IWL_DEBUG_RATE("Selected %d (action %d) - low %d high %d\n", + index, scale_action, low, high); + + out: + + sta->last_txrate = index; + sta->txrate = sta->last_txrate; + sta_info_put(sta); + + IWL_DEBUG_RATE("leave: %d\n", index); + + return &priv->ieee_rates[index]; +} + +static struct rate_control_ops rs_ops = { + .module = NULL, + .name = RS_NAME, + .tx_status = rs_tx_status, + .get_rate = rs_get_rate, + .rate_init = rs_rate_init, + .clear = rs_clear, + .alloc = rs_alloc, + .free = rs_free, + .alloc_sta = rs_alloc_sta, + .free_sta = rs_free_sta, +}; + +int iwl_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct iwl_priv *priv = hw->priv; + struct iwl_rate_scale_priv *rs_priv; + struct sta_info *sta; + unsigned long flags; + int count = 0, i; + u32 samples = 0, success = 0, good = 0; + unsigned long now = jiffies; + u32 max_time = 0; + + sta = sta_info_get(local, priv->stations[sta_id].sta.sta.addr); + if (!sta || !sta->rate_ctrl_priv) { + if (sta) { + sta_info_put(sta); + IWL_DEBUG_RATE("leave - no private rate data!\n"); + } else + IWL_DEBUG_RATE("leave - no station!\n"); + return sprintf(buf, "station %d not found\n", sta_id); + } + + rs_priv = (void *)sta->rate_ctrl_priv; + spin_lock_irqsave(&rs_priv->lock, flags); + i = IWL_RATE_54M_INDEX; + while (1) { + u64 mask; + int j; + + count += + sprintf(&buf[count], " %2dMbs: ", iwl_rates[i].ieee / 2); + + mask = (1ULL << (IWL_RATE_MAX_WINDOW - 1)); + for (j = 0; j < IWL_RATE_MAX_WINDOW; j++, mask >>= 1) + buf[count++] = + (rs_priv->win[i].data & mask) ? '1' : '0'; + + samples += rs_priv->win[i].counter; + good += rs_priv->win[i].success_counter; + success += rs_priv->win[i].success_counter * iwl_rates[i].ieee; + + if (rs_priv->win[i].stamp) { + int delta = + jiffies_to_msecs(now - rs_priv->win[i].stamp); + + if (delta > max_time) + max_time = delta; + + count += sprintf(&buf[count], "%5dms\n", delta); + } else + buf[count++] = '\n'; + + j = iwl_get_prev_ieee_rate(i); + if (j == i) + break; + i = j; + } + spin_unlock_irqrestore(&rs_priv->lock, flags); + sta_info_put(sta); + + /* Display the average rate of all samples taken. + * + * NOTE: We multiple # of samples by 2 since the IEEE measurement + * added from iwl_rates is actually 2X the rate */ + if (samples) + count += sprintf( + &buf[count], + "\nAverage rate is %3d.%02dMbs over last %4dms\n" + "%3d%% success (%d good packets over %d tries)\n", + success / (2 * samples), (success * 5 / samples) % 10, + max_time, good * 100 / samples, good, samples); + else + count += sprintf(&buf[count], "\nAverage rate: 0Mbs\n"); + + return count; +} + +void iwl_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id) +{ + struct iwl_priv *priv = hw->priv; + s32 rssi = 0; + unsigned long flags; + struct ieee80211_local *local = hw_to_local(hw); + struct iwl_rate_scale_priv *rs_priv; + struct sta_info *sta; + + IWL_DEBUG_RATE("enter\n"); + + if (!local->rate_ctrl->ops->name || + strcmp(local->rate_ctrl->ops->name, RS_NAME)) { + IWL_WARNING("iwl-3945-rs not selected as rate control algo!\n"); + IWL_DEBUG_RATE("leave - mac80211 picked the wrong RC algo.\n"); + return; + } + + sta = sta_info_get(local, priv->stations[sta_id].sta.sta.addr); + if (!sta || !sta->rate_ctrl_priv) { + if (sta) + sta_info_put(sta); + IWL_DEBUG_RATE("leave - no private rate data!\n"); + return; + } + + rs_priv = (void *)sta->rate_ctrl_priv; + + spin_lock_irqsave(&rs_priv->lock, flags); + + rs_priv->tgg = 0; + switch (priv->phymode) { + case MODE_IEEE80211G: + if (priv->active_rxon.flags & RXON_FLG_TGG_PROTECT_MSK) { + rs_priv->tgg = 1; + rs_priv->expected_tpt = iwl_expected_tpt_g_prot; + } else + rs_priv->expected_tpt = iwl_expected_tpt_g; + break; + + case MODE_IEEE80211A: + rs_priv->expected_tpt = iwl_expected_tpt_a; + break; + + default: + IWL_WARNING("Invalid phymode. Defaulting to 802.11b\n"); + case MODE_IEEE80211B: + rs_priv->expected_tpt = iwl_expected_tpt_b; + break; + } + + sta_info_put(sta); + spin_unlock_irqrestore(&rs_priv->lock, flags); + + rssi = priv->last_rx_rssi; + if (rssi == 0) + rssi = IWL_MIN_RSSI_VAL; + + IWL_DEBUG(IWL_DL_INFO | IWL_DL_RATE, "Network RSSI: %d\n", rssi); + + rs_priv->start_rate = iwl_get_rate_index_by_rssi(rssi, priv->phymode); + + IWL_DEBUG_RATE("leave: rssi %d assign rate index: " + "%d (plcp 0x%x)\n", rssi, rs_priv->start_rate, + iwl_rates[rs_priv->start_rate].plcp); +} + +void iwl_rate_control_register(struct ieee80211_hw *hw) +{ + ieee80211_rate_control_register(&rs_ops); +} + +void iwl_rate_control_unregister(struct ieee80211_hw *hw) +{ + ieee80211_rate_control_unregister(&rs_ops); +} + + diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.h b/drivers/net/wireless/iwlwifi/iwl-3945-rs.h new file mode 100644 index 000000000000..b926738e0ea1 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.h @@ -0,0 +1,191 @@ +/****************************************************************************** + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos <ipw2100-admin@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_3945_rs_h__ +#define __iwl_3945_rs_h__ + +struct iwl_rate_info { + u8 plcp; + u8 ieee; + u8 prev_ieee; /* previous rate in IEEE speeds */ + u8 next_ieee; /* next rate in IEEE speeds */ + u8 prev_rs; /* previous rate used in rs algo */ + u8 next_rs; /* next rate used in rs algo */ + u8 prev_rs_tgg; /* previous rate used in TGG rs algo */ + u8 next_rs_tgg; /* next rate used in TGG rs algo */ +}; + +enum { + IWL_RATE_6M_INDEX = 0, + IWL_RATE_9M_INDEX, + IWL_RATE_12M_INDEX, + IWL_RATE_18M_INDEX, + IWL_RATE_24M_INDEX, + IWL_RATE_36M_INDEX, + IWL_RATE_48M_INDEX, + IWL_RATE_54M_INDEX, + IWL_RATE_1M_INDEX, + IWL_RATE_2M_INDEX, + IWL_RATE_5M_INDEX, + IWL_RATE_11M_INDEX, + IWL_RATE_COUNT, + IWL_RATE_INVM_INDEX, + IWL_RATE_INVALID = IWL_RATE_INVM_INDEX +}; + +enum { + IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX, + IWL_LAST_OFDM_RATE = IWL_RATE_54M_INDEX, + IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX, + IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX, +}; + +/* #define vs. enum to keep from defaulting to 'large integer' */ +#define IWL_RATE_6M_MASK (1<<IWL_RATE_6M_INDEX) +#define IWL_RATE_9M_MASK (1<<IWL_RATE_9M_INDEX) +#define IWL_RATE_12M_MASK (1<<IWL_RATE_12M_INDEX) +#define IWL_RATE_18M_MASK (1<<IWL_RATE_18M_INDEX) +#define IWL_RATE_24M_MASK (1<<IWL_RATE_24M_INDEX) +#define IWL_RATE_36M_MASK (1<<IWL_RATE_36M_INDEX) +#define IWL_RATE_48M_MASK (1<<IWL_RATE_48M_INDEX) +#define IWL_RATE_54M_MASK (1<<IWL_RATE_54M_INDEX) +#define IWL_RATE_1M_MASK (1<<IWL_RATE_1M_INDEX) +#define IWL_RATE_2M_MASK (1<<IWL_RATE_2M_INDEX) +#define IWL_RATE_5M_MASK (1<<IWL_RATE_5M_INDEX) +#define IWL_RATE_11M_MASK (1<<IWL_RATE_11M_INDEX) + +enum { + IWL_RATE_6M_PLCP = 13, + IWL_RATE_9M_PLCP = 15, + IWL_RATE_12M_PLCP = 5, + IWL_RATE_18M_PLCP = 7, + IWL_RATE_24M_PLCP = 9, + IWL_RATE_36M_PLCP = 11, + IWL_RATE_48M_PLCP = 1, + IWL_RATE_54M_PLCP = 3, + IWL_RATE_1M_PLCP = 10, + IWL_RATE_2M_PLCP = 20, + IWL_RATE_5M_PLCP = 55, + IWL_RATE_11M_PLCP = 110, +}; + +enum { + IWL_RATE_6M_IEEE = 12, + IWL_RATE_9M_IEEE = 18, + IWL_RATE_12M_IEEE = 24, + IWL_RATE_18M_IEEE = 36, + IWL_RATE_24M_IEEE = 48, + IWL_RATE_36M_IEEE = 72, + IWL_RATE_48M_IEEE = 96, + IWL_RATE_54M_IEEE = 108, + IWL_RATE_1M_IEEE = 2, + IWL_RATE_2M_IEEE = 4, + IWL_RATE_5M_IEEE = 11, + IWL_RATE_11M_IEEE = 22, +}; + +#define IWL_CCK_BASIC_RATES_MASK \ + (IWL_RATE_1M_MASK | \ + IWL_RATE_2M_MASK) + +#define IWL_CCK_RATES_MASK \ + (IWL_BASIC_RATES_MASK | \ + IWL_RATE_5M_MASK | \ + IWL_RATE_11M_MASK) + +#define IWL_OFDM_BASIC_RATES_MASK \ + (IWL_RATE_6M_MASK | \ + IWL_RATE_12M_MASK | \ + IWL_RATE_24M_MASK) + +#define IWL_OFDM_RATES_MASK \ + (IWL_OFDM_BASIC_RATES_MASK | \ + IWL_RATE_9M_MASK | \ + IWL_RATE_18M_MASK | \ + IWL_RATE_36M_MASK | \ + IWL_RATE_48M_MASK | \ + IWL_RATE_54M_MASK) + +#define IWL_BASIC_RATES_MASK \ + (IWL_OFDM_BASIC_RATES_MASK | \ + IWL_CCK_BASIC_RATES_MASK) + +#define IWL_RATES_MASK ((1<<IWL_RATE_COUNT)-1) + +#define IWL_INVALID_VALUE -1 + +#define IWL_MIN_RSSI_VAL -100 +#define IWL_MAX_RSSI_VAL 0 + +extern const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT]; + +static inline u8 iwl_get_prev_ieee_rate(u8 rate_index) +{ + u8 rate = iwl_rates[rate_index].prev_ieee; + + if (rate == IWL_RATE_INVALID) + rate = rate_index; + return rate; +} + +/** + * iwl_fill_rs_info - Fill an output text buffer with the rate representation + * + * NOTE: This is provided as a quick mechanism for a user to visualize + * the performance of the rate control alogirthm and is not meant to be + * parsed software. + */ +extern int iwl_fill_rs_info(struct ieee80211_hw *, char *buf, u8 sta_id); + +/** + * iwl_rate_scale_init - Initialize the rate scale table based on assoc info + * + * The specific througput table used is based on the type of network + * the associated with, including A, B, G, and G w/ TGG protection + */ +extern void iwl_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id); + +/** + * iwl_rate_control_register - Register the rate control algorithm callbacks + * + * Since the rate control algorithm is hardware specific, there is no need + * or reason to place it as a stand alone module. The driver can call + * iwl_rate_control_register in order to register the rate control callbacks + * with the mac80211 subsystem. This should be performed prior to calling + * ieee80211_register_hw + * + */ +extern void iwl_rate_control_register(struct ieee80211_hw *hw); + +/** + * iwl_rate_control_unregister - Unregister the rate control callbacks + * + * This should be called after calling ieee80211_unregister_hw, but before + * the driver is unloaded. + */ +extern void iwl_rate_control_unregister(struct ieee80211_hw *hw); + +#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c new file mode 100644 index 000000000000..acb38750535f --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c @@ -0,0 +1,2300 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos <ipw2100-admin@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/version.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/wireless.h> +#include <linux/firmware.h> +#include <net/mac80211.h> + +#include <linux/etherdevice.h> +#include <linux/delay.h> + +#define IWL 3945 + +#include "iwlwifi.h" +#include "iwl-helpers.h" +#include "iwl-3945.h" +#include "iwl-3945-rs.h" + +#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \ + [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ + IWL_RATE_##r##M_IEEE, \ + IWL_RATE_##ip##M_INDEX, \ + IWL_RATE_##in##M_INDEX, \ + IWL_RATE_##rp##M_INDEX, \ + IWL_RATE_##rn##M_INDEX, \ + IWL_RATE_##pp##M_INDEX, \ + IWL_RATE_##np##M_INDEX } + +/* + * Parameter order: + * rate, prev rate, next rate, prev tgg rate, next tgg rate + * + * If there isn't a valid next or previous rate then INV is used which + * maps to IWL_RATE_INVALID + * + */ +const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = { + IWL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */ + IWL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */ + IWL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */ + IWL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */ + IWL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */ + IWL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */ + IWL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */ + IWL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),/* 54mbps */ + IWL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */ + IWL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */ + IWL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */ + IWL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */ +}; + +/* 1 = enable the iwl_disable_events() function */ +#define IWL_EVT_DISABLE (0) +#define IWL_EVT_DISABLE_SIZE (1532/32) + +/** + * iwl_disable_events - Disable selected events in uCode event log + * + * Disable an event by writing "1"s into "disable" + * bitmap in SRAM. Bit position corresponds to Event # (id/type). + * Default values of 0 enable uCode events to be logged. + * Use for only special debugging. This function is just a placeholder as-is, + * you'll need to provide the special bits! ... + * ... and set IWL_EVT_DISABLE to 1. */ +void iwl_disable_events(struct iwl_priv *priv) +{ + int rc; + int i; + u32 base; /* SRAM address of event log header */ + u32 disable_ptr; /* SRAM address of event-disable bitmap array */ + u32 array_size; /* # of u32 entries in array */ + u32 evt_disable[IWL_EVT_DISABLE_SIZE] = { + 0x00000000, /* 31 - 0 Event id numbers */ + 0x00000000, /* 63 - 32 */ + 0x00000000, /* 95 - 64 */ + 0x00000000, /* 127 - 96 */ + 0x00000000, /* 159 - 128 */ + 0x00000000, /* 191 - 160 */ + 0x00000000, /* 223 - 192 */ + 0x00000000, /* 255 - 224 */ + 0x00000000, /* 287 - 256 */ + 0x00000000, /* 319 - 288 */ + 0x00000000, /* 351 - 320 */ + 0x00000000, /* 383 - 352 */ + 0x00000000, /* 415 - 384 */ + 0x00000000, /* 447 - 416 */ + 0x00000000, /* 479 - 448 */ + 0x00000000, /* 511 - 480 */ + 0x00000000, /* 543 - 512 */ + 0x00000000, /* 575 - 544 */ + 0x00000000, /* 607 - 576 */ + 0x00000000, /* 639 - 608 */ + 0x00000000, /* 671 - 640 */ + 0x00000000, /* 703 - 672 */ + 0x00000000, /* 735 - 704 */ + 0x00000000, /* 767 - 736 */ + 0x00000000, /* 799 - 768 */ + 0x00000000, /* 831 - 800 */ + 0x00000000, /* 863 - 832 */ + 0x00000000, /* 895 - 864 */ + 0x00000000, /* 927 - 896 */ + 0x00000000, /* 959 - 928 */ + 0x00000000, /* 991 - 960 */ + 0x00000000, /* 1023 - 992 */ + 0x00000000, /* 1055 - 1024 */ + 0x00000000, /* 1087 - 1056 */ + 0x00000000, /* 1119 - 1088 */ + 0x00000000, /* 1151 - 1120 */ + 0x00000000, /* 1183 - 1152 */ + 0x00000000, /* 1215 - 1184 */ + 0x00000000, /* 1247 - 1216 */ + 0x00000000, /* 1279 - 1248 */ + 0x00000000, /* 1311 - 1280 */ + 0x00000000, /* 1343 - 1312 */ + 0x00000000, /* 1375 - 1344 */ + 0x00000000, /* 1407 - 1376 */ + 0x00000000, /* 1439 - 1408 */ + 0x00000000, /* 1471 - 1440 */ + 0x00000000, /* 1503 - 1472 */ + }; + + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + if (!iwl_hw_valid_rtc_data_addr(base)) { + IWL_ERROR("Invalid event log pointer 0x%08X\n", base); + return; + } + + rc = iwl_grab_restricted_access(priv); + if (rc) { + IWL_WARNING("Can not read from adapter at this time.\n"); + return; + } + + disable_ptr = iwl_read_restricted_mem(priv, base + (4 * sizeof(u32))); + array_size = iwl_read_restricted_mem(priv, base + (5 * sizeof(u32))); + iwl_release_restricted_access(priv); + + if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) { + IWL_DEBUG_INFO("Disabling selected uCode log events at 0x%x\n", + disable_ptr); + rc = iwl_grab_restricted_access(priv); + for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++) + iwl_write_restricted_mem(priv, + disable_ptr + + (i * sizeof(u32)), + evt_disable[i]); + + iwl_release_restricted_access(priv); + } else { + IWL_DEBUG_INFO("Selected uCode log events may be disabled\n"); + IWL_DEBUG_INFO(" by writing \"1\"s into disable bitmap\n"); + IWL_DEBUG_INFO(" in SRAM at 0x%x, size %d u32s\n", + disable_ptr, array_size); + } + +} + +/** + * iwl3945_get_antenna_flags - Get antenna flags for RXON command + * @priv: eeprom and antenna fields are used to determine antenna flags + * + * priv->eeprom is used to determine if antenna AUX/MAIN are reversed + * priv->antenna specifies the antenna diversity mode: + * + * IWL_ANTENNA_DIVERISTY - NIC selects best antenna by itself + * IWL_ANTENNA_MAIN - Force MAIN antenna + * IWL_ANTENNA_AUX - Force AUX antenna + */ +__le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv) +{ + switch (priv->antenna) { + case IWL_ANTENNA_DIVERSITY: + return 0; + + case IWL_ANTENNA_MAIN: + if (priv->eeprom.antenna_switch_type) + return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; + return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; + + case IWL_ANTENNA_AUX: + if (priv->eeprom.antenna_switch_type) + return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; + return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; + } + + /* bad antenna selector value */ + IWL_ERROR("Bad antenna selector value (0x%x)\n", priv->antenna); + return 0; /* "diversity" is default if error */ +} + +/***************************************************************************** + * + * Intel PRO/Wireless 3945ABG/BG Network Connection + * + * RX handler implementations + * + * Used by iwl-base.c + * + *****************************************************************************/ + +void iwl_hw_rx_statistics(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + IWL_DEBUG_RX("Statistics notification received (%d vs %d).\n", + (int)sizeof(struct iwl_notif_statistics), + le32_to_cpu(pkt->len)); + + memcpy(&priv->statistics, pkt->u.raw, sizeof(priv->statistics)); + + priv->last_statistics_time = jiffies; +} + +static void iwl3945_handle_data_packet(struct iwl_priv *priv, int is_data, + struct iwl_rx_mem_buffer *rxb, + struct ieee80211_rx_status *stats, + u16 phy_flags) +{ + struct ieee80211_hdr *hdr; + struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; + struct iwl_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); + struct iwl_rx_frame_end *rx_end = IWL_RX_END(pkt); + short len = le16_to_cpu(rx_hdr->len); + + /* We received data from the HW, so stop the watchdog */ + if (unlikely((len + IWL_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) { + IWL_DEBUG_DROP("Corruption detected!\n"); + return; + } + + /* We only process data packets if the interface is open */ + if (unlikely(!priv->is_open)) { + IWL_DEBUG_DROP_LIMIT + ("Dropping packet while interface is not open.\n"); + return; + } + if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) { + if (iwl_param_hwcrypto) + iwl_set_decrypted_flag(priv, rxb->skb, + le32_to_cpu(rx_end->status), + stats); + iwl_handle_data_packet_monitor(priv, rxb, IWL_RX_DATA(pkt), + len, stats, phy_flags); + return; + } + + skb_reserve(rxb->skb, (void *)rx_hdr->payload - (void *)pkt); + /* Set the size of the skb to the size of the frame */ + skb_put(rxb->skb, le16_to_cpu(rx_hdr->len)); + + hdr = (void *)rxb->skb->data; + + if (iwl_param_hwcrypto) + iwl_set_decrypted_flag(priv, rxb->skb, + le32_to_cpu(rx_end->status), stats); + + ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats); + rxb->skb = NULL; +} + +static void iwl3945_rx_reply_rx(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt); + struct iwl_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); + struct iwl_rx_frame_end *rx_end = IWL_RX_END(pkt); + struct ieee80211_hdr *header; + u16 phy_flags = le16_to_cpu(rx_hdr->phy_flags); + u16 rx_stats_sig_avg = le16_to_cpu(rx_stats->sig_avg); + u16 rx_stats_noise_diff = le16_to_cpu(rx_stats->noise_diff); + struct ieee80211_rx_status stats = { + .mactime = le64_to_cpu(rx_end->timestamp), + .freq = ieee80211chan2mhz(le16_to_cpu(rx_hdr->channel)), + .channel = le16_to_cpu(rx_hdr->channel), + .phymode = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? + MODE_IEEE80211G : MODE_IEEE80211A, + .antenna = 0, + .rate = rx_hdr->rate, + .flag = 0, + }; + u8 network_packet; + int snr; + + if ((unlikely(rx_stats->phy_count > 20))) { + IWL_DEBUG_DROP + ("dsp size out of range [0,20]: " + "%d/n", rx_stats->phy_count); + return; + } + + if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) + || !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { + IWL_DEBUG_RX("Bad CRC or FIFO: 0x%08X.\n", rx_end->status); + return; + } + + if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) { + iwl3945_handle_data_packet(priv, 1, rxb, &stats, phy_flags); + return; + } + + /* Convert 3945's rssi indicator to dBm */ + stats.ssi = rx_stats->rssi - IWL_RSSI_OFFSET; + + /* Set default noise value to -127 */ + if (priv->last_rx_noise == 0) + priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; + + /* 3945 provides noise info for OFDM frames only. + * sig_avg and noise_diff are measured by the 3945's digital signal + * processor (DSP), and indicate linear levels of signal level and + * distortion/noise within the packet preamble after + * automatic gain control (AGC). sig_avg should stay fairly + * constant if the radio's AGC is working well. + * Since these values are linear (not dB or dBm), linear + * signal-to-noise ratio (SNR) is (sig_avg / noise_diff). + * Convert linear SNR to dB SNR, then subtract that from rssi dBm + * to obtain noise level in dBm. + * Calculate stats.signal (quality indicator in %) based on SNR. */ + if (rx_stats_noise_diff) { + snr = rx_stats_sig_avg / rx_stats_noise_diff; + stats.noise = stats.ssi - iwl_calc_db_from_ratio(snr); + stats.signal = iwl_calc_sig_qual(stats.ssi, stats.noise); + + /* If noise info not available, calculate signal quality indicator (%) + * using just the dBm signal level. */ + } else { + stats.noise = priv->last_rx_noise; + stats.signal = iwl_calc_sig_qual(stats.ssi, 0); + } + + + IWL_DEBUG_STATS("Rssi %d noise %d qual %d sig_avg %d noise_diff %d\n", + stats.ssi, stats.noise, stats.signal, + rx_stats_sig_avg, rx_stats_noise_diff); + + stats.freq = ieee80211chan2mhz(stats.channel); + + /* can be covered by iwl_report_frame() in most cases */ +/* IWL_DEBUG_RX("RX status: 0x%08X\n", rx_end->status); */ + + header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); + + network_packet = iwl_is_network_packet(priv, header); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_debug_level & IWL_DL_STATS && net_ratelimit()) + IWL_DEBUG_STATS + ("[%c] %d RSSI: %d Signal: %u, Noise: %u, Rate: %u\n", + network_packet ? '*' : ' ', + stats.channel, stats.ssi, stats.ssi, + stats.ssi, stats.rate); + + if (iwl_debug_level & (IWL_DL_RX)) + /* Set "1" to report good data frames in groups of 100 */ + iwl_report_frame(priv, pkt, header, 1); +#endif + + if (network_packet) { + priv->last_beacon_time = le32_to_cpu(rx_end->beacon_timestamp); + priv->last_tsf = le64_to_cpu(rx_end->timestamp); + priv->last_rx_rssi = stats.ssi; + priv->last_rx_noise = stats.noise; + } + + switch (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FTYPE) { + case IEEE80211_FTYPE_MGMT: + switch (le16_to_cpu(header->frame_control) & + IEEE80211_FCTL_STYPE) { + case IEEE80211_STYPE_PROBE_RESP: + case IEEE80211_STYPE_BEACON:{ + /* If this is a beacon or probe response for + * our network then cache the beacon + * timestamp */ + if ((((priv->iw_mode == IEEE80211_IF_TYPE_STA) + && !compare_ether_addr(header->addr2, + priv->bssid)) || + ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) + && !compare_ether_addr(header->addr3, + priv->bssid)))) { + struct ieee80211_mgmt *mgmt = + (struct ieee80211_mgmt *)header; + __le32 *pos; + pos = + (__le32 *) & mgmt->u.beacon. + timestamp; + priv->timestamp0 = le32_to_cpu(pos[0]); + priv->timestamp1 = le32_to_cpu(pos[1]); + priv->beacon_int = le16_to_cpu( + mgmt->u.beacon.beacon_int); + if (priv->call_post_assoc_from_beacon && + (priv->iw_mode == + IEEE80211_IF_TYPE_STA)) + queue_work(priv->workqueue, + &priv->post_associate.work); + + priv->call_post_assoc_from_beacon = 0; + } + + break; + } + + case IEEE80211_STYPE_ACTION: + /* TODO: Parse 802.11h frames for CSA... */ + break; + + /* + * TODO: There is no callback function from upper + * stack to inform us when associated status. this + * work around to sniff assoc_resp management frame + * and finish the association process. + */ + case IEEE80211_STYPE_ASSOC_RESP: + case IEEE80211_STYPE_REASSOC_RESP:{ + struct ieee80211_mgmt *mgnt = + (struct ieee80211_mgmt *)header; + priv->assoc_id = (~((1 << 15) | (1 << 14)) & + le16_to_cpu(mgnt->u. + assoc_resp.aid)); + priv->assoc_capability = + le16_to_cpu(mgnt->u.assoc_resp.capab_info); + if (priv->beacon_int) + queue_work(priv->workqueue, + &priv->post_associate.work); + else + priv->call_post_assoc_from_beacon = 1; + break; + } + + case IEEE80211_STYPE_PROBE_REQ:{ + DECLARE_MAC_BUF(mac1); + DECLARE_MAC_BUF(mac2); + DECLARE_MAC_BUF(mac3); + if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) + IWL_DEBUG_DROP + ("Dropping (non network): %s" + ", %s, %s\n", + print_mac(mac1, header->addr1), + print_mac(mac2, header->addr2), + print_mac(mac3, header->addr3)); + return; + } + } + + iwl3945_handle_data_packet(priv, 0, rxb, &stats, phy_flags); + break; + + case IEEE80211_FTYPE_CTL: + break; + + case IEEE80211_FTYPE_DATA: { + DECLARE_MAC_BUF(mac1); + DECLARE_MAC_BUF(mac2); + DECLARE_MAC_BUF(mac3); + + if (unlikely(is_duplicate_packet(priv, header))) + IWL_DEBUG_DROP("Dropping (dup): %s, %s, %s\n", + print_mac(mac1, header->addr1), + print_mac(mac2, header->addr2), + print_mac(mac3, header->addr3)); + else + iwl3945_handle_data_packet(priv, 1, rxb, &stats, + phy_flags); + break; + } + } +} + +int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr, + dma_addr_t addr, u16 len) +{ + int count; + u32 pad; + struct iwl_tfd_frame *tfd = (struct iwl_tfd_frame *)ptr; + + count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags)); + pad = TFD_CTL_PAD_GET(le32_to_cpu(tfd->control_flags)); + + if ((count >= NUM_TFD_CHUNKS) || (count < 0)) { + IWL_ERROR("Error can not send more than %d chunks\n", + NUM_TFD_CHUNKS); + return -EINVAL; + } + + tfd->pa[count].addr = cpu_to_le32(addr); + tfd->pa[count].len = cpu_to_le32(len); + + count++; + + tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(count) | + TFD_CTL_PAD_SET(pad)); + + return 0; +} + +/** + * iwl_hw_txq_free_tfd - Free one TFD, those at index [txq->q.last_used] + * + * Does NOT advance any indexes + */ +int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0]; + struct iwl_tfd_frame *bd = &bd_tmp[txq->q.last_used]; + struct pci_dev *dev = priv->pci_dev; + int i; + int counter; + + /* classify bd */ + if (txq->q.id == IWL_CMD_QUEUE_NUM) + /* nothing to cleanup after for host commands */ + return 0; + + /* sanity check */ + counter = TFD_CTL_COUNT_GET(le32_to_cpu(bd->control_flags)); + if (counter > NUM_TFD_CHUNKS) { + IWL_ERROR("Too many chunks: %i\n", counter); + /* @todo issue fatal error, it is quite serious situation */ + return 0; + } + + /* unmap chunks if any */ + + for (i = 1; i < counter; i++) { + pci_unmap_single(dev, le32_to_cpu(bd->pa[i].addr), + le32_to_cpu(bd->pa[i].len), PCI_DMA_TODEVICE); + if (txq->txb[txq->q.last_used].skb[0]) { + struct sk_buff *skb = txq->txb[txq->q.last_used].skb[0]; + if (txq->txb[txq->q.last_used].skb[0]) { + /* Can be called from interrupt context */ + dev_kfree_skb_any(skb); + txq->txb[txq->q.last_used].skb[0] = NULL; + } + } + } + return 0; +} + +u8 iwl_hw_find_station(struct iwl_priv *priv, const u8 *addr) +{ + int i; + int ret = IWL_INVALID_STATION; + unsigned long flags; + DECLARE_MAC_BUF(mac); + + spin_lock_irqsave(&priv->sta_lock, flags); + for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) + if ((priv->stations[i].used) && + (!compare_ether_addr + (priv->stations[i].sta.sta.addr, addr))) { + ret = i; + goto out; + } + + IWL_DEBUG_INFO("can not find STA %s (total %d)\n", + print_mac(mac, addr), priv->num_stations); + out: + spin_unlock_irqrestore(&priv->sta_lock, flags); + return ret; +} + +/** + * iwl_hw_build_tx_cmd_rate - Add rate portion to TX_CMD: + * +*/ +void iwl_hw_build_tx_cmd_rate(struct iwl_priv *priv, + struct iwl_cmd *cmd, + struct ieee80211_tx_control *ctrl, + struct ieee80211_hdr *hdr, int sta_id, int tx_id) +{ + unsigned long flags; + u16 rate_index = min(ctrl->tx_rate & 0xffff, IWL_RATE_COUNT - 1); + u16 rate_mask; + int rate; + u8 rts_retry_limit; + u8 data_retry_limit; + __le32 tx_flags; + u16 fc = le16_to_cpu(hdr->frame_control); + + rate = iwl_rates[rate_index].plcp; + tx_flags = cmd->cmd.tx.tx_flags; + + /* We need to figure out how to get the sta->supp_rates while + * in this running context; perhaps encoding into ctrl->tx_rate? */ + rate_mask = IWL_RATES_MASK; + + spin_lock_irqsave(&priv->sta_lock, flags); + + priv->stations[sta_id].current_rate.rate_n_flags = rate; + + if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && + (sta_id != IWL3945_BROADCAST_ID) && + (sta_id != IWL_MULTICAST_ID)) + priv->stations[IWL_STA_ID].current_rate.rate_n_flags = rate; + + spin_unlock_irqrestore(&priv->sta_lock, flags); + + if (tx_id >= IWL_CMD_QUEUE_NUM) + rts_retry_limit = 3; + else + rts_retry_limit = 7; + + if (ieee80211_is_probe_response(fc)) { + data_retry_limit = 3; + if (data_retry_limit < rts_retry_limit) + rts_retry_limit = data_retry_limit; + } else + data_retry_limit = IWL_DEFAULT_TX_RETRY; + + if (priv->data_retry_limit != -1) + data_retry_limit = priv->data_retry_limit; + + if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) { + switch (fc & IEEE80211_FCTL_STYPE) { + case IEEE80211_STYPE_AUTH: + case IEEE80211_STYPE_DEAUTH: + case IEEE80211_STYPE_ASSOC_REQ: + case IEEE80211_STYPE_REASSOC_REQ: + if (tx_flags & TX_CMD_FLG_RTS_MSK) { + tx_flags &= ~TX_CMD_FLG_RTS_MSK; + tx_flags |= TX_CMD_FLG_CTS_MSK; + } + break; + default: + break; + } + } + + cmd->cmd.tx.rts_retry_limit = rts_retry_limit; + cmd->cmd.tx.data_retry_limit = data_retry_limit; + cmd->cmd.tx.rate = rate; + cmd->cmd.tx.tx_flags = tx_flags; + + /* OFDM */ + cmd->cmd.tx.supp_rates[0] = rate_mask & IWL_OFDM_RATES_MASK; + + /* CCK */ + cmd->cmd.tx.supp_rates[1] = (rate_mask >> 8) & 0xF; + + IWL_DEBUG_RATE("Tx sta id: %d, rate: %d (plcp), flags: 0x%4X " + "cck/ofdm mask: 0x%x/0x%x\n", sta_id, + cmd->cmd.tx.rate, le32_to_cpu(cmd->cmd.tx.tx_flags), + cmd->cmd.tx.supp_rates[1], cmd->cmd.tx.supp_rates[0]); +} + +u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate, u8 flags) +{ + unsigned long flags_spin; + struct iwl_station_entry *station; + + if (sta_id == IWL_INVALID_STATION) + return IWL_INVALID_STATION; + + spin_lock_irqsave(&priv->sta_lock, flags_spin); + station = &priv->stations[sta_id]; + + station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK; + station->sta.rate_n_flags = cpu_to_le16(tx_rate); + station->current_rate.rate_n_flags = tx_rate; + station->sta.mode = STA_CONTROL_MODIFY_MSK; + + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + + iwl_send_add_station(priv, &station->sta, flags); + IWL_DEBUG_RATE("SCALE sync station %d to rate %d\n", + sta_id, tx_rate); + return sta_id; +} + +void iwl_hw_card_show_info(struct iwl_priv *priv) +{ + IWL_DEBUG_INFO("3945ABG HW Version %u.%u.%u\n", + ((priv->eeprom.board_revision >> 8) & 0x0F), + ((priv->eeprom.board_revision >> 8) >> 4), + (priv->eeprom.board_revision & 0x00FF)); + + IWL_DEBUG_INFO("3945ABG PBA Number %.*s\n", + (int)sizeof(priv->eeprom.board_pba_number), + priv->eeprom.board_pba_number); + + IWL_DEBUG_INFO("EEPROM_ANTENNA_SWITCH_TYPE is 0x%02X\n", + priv->eeprom.antenna_switch_type); +} + +static int iwl3945_nic_set_pwr_src(struct iwl_priv *priv, int pwr_max) +{ + int rc; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + if (!pwr_max) { + u32 val; + + rc = pci_read_config_dword(priv->pci_dev, + PCI_POWER_SOURCE, &val); + if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT) { + iwl_set_bits_mask_restricted_reg(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VAUX, + ~APMG_PS_CTRL_MSK_PWR_SRC); + iwl_release_restricted_access(priv); + + iwl_poll_bit(priv, CSR_GPIO_IN, + CSR_GPIO_IN_VAL_VAUX_PWR_SRC, + CSR_GPIO_IN_BIT_AUX_POWER, 5000); + } else + iwl_release_restricted_access(priv); + } else { + iwl_set_bits_mask_restricted_reg(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, + ~APMG_PS_CTRL_MSK_PWR_SRC); + + iwl_release_restricted_access(priv); + iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC, + CSR_GPIO_IN_BIT_AUX_POWER, 5000); /* uS */ + } + spin_unlock_irqrestore(&priv->lock, flags); + + return rc; +} + +static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + int rc; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + iwl_write_restricted(priv, FH_RCSR_RBD_BASE(0), rxq->dma_addr); + iwl_write_restricted(priv, FH_RCSR_RPTR_ADDR(0), + priv->hw_setting.shared_phys + + offsetof(struct iwl_shared, rx_read_ptr[0])); + iwl_write_restricted(priv, FH_RCSR_WPTR(0), 0); + iwl_write_restricted(priv, FH_RCSR_CONFIG(0), + ALM_FH_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE | + ALM_FH_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE | + ALM_FH_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN | + ALM_FH_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 | + (RX_QUEUE_SIZE_LOG << ALM_FH_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE) | + ALM_FH_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST | + (1 << ALM_FH_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH) | + ALM_FH_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH); + + /* fake read to flush all prev I/O */ + iwl_read_restricted(priv, FH_RSSR_CTRL); + + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static int iwl3945_tx_reset(struct iwl_priv *priv) +{ + int rc; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + /* bypass mode */ + iwl_write_restricted_reg(priv, SCD_MODE_REG, 0x2); + + /* RA 0 is active */ + iwl_write_restricted_reg(priv, SCD_ARASTAT_REG, 0x01); + + /* all 6 fifo are active */ + iwl_write_restricted_reg(priv, SCD_TXFACT_REG, 0x3f); + + iwl_write_restricted_reg(priv, SCD_SBYP_MODE_1_REG, 0x010000); + iwl_write_restricted_reg(priv, SCD_SBYP_MODE_2_REG, 0x030002); + iwl_write_restricted_reg(priv, SCD_TXF4MF_REG, 0x000004); + iwl_write_restricted_reg(priv, SCD_TXF5MF_REG, 0x000005); + + iwl_write_restricted(priv, FH_TSSR_CBB_BASE, + priv->hw_setting.shared_phys); + + iwl_write_restricted(priv, FH_TSSR_MSG_CONFIG, + ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON | + ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON | + ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B | + ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON | + ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON | + ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH | + ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH); + + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +/** + * iwl3945_txq_ctx_reset - Reset TX queue context + * + * Destroys all DMA structures and initialize them again + */ +static int iwl3945_txq_ctx_reset(struct iwl_priv *priv) +{ + int rc; + int txq_id, slots_num; + + iwl_hw_txq_ctx_free(priv); + + /* Tx CMD queue */ + rc = iwl3945_tx_reset(priv); + if (rc) + goto error; + + /* Tx queue(s) */ + for (txq_id = 0; txq_id < TFD_QUEUE_MAX; txq_id++) { + slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? + TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, + txq_id); + if (rc) { + IWL_ERROR("Tx %d queue init failed\n", txq_id); + goto error; + } + } + + return rc; + + error: + iwl_hw_txq_ctx_free(priv); + return rc; +} + +int iwl_hw_nic_init(struct iwl_priv *priv) +{ + u8 rev_id; + int rc; + unsigned long flags; + struct iwl_rx_queue *rxq = &priv->rxq; + + iwl_power_init_handle(priv); + + spin_lock_irqsave(&priv->lock, flags); + iwl_set_bit(priv, CSR_ANA_PLL_CFG, (1 << 24)); + iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS, + CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); + + iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + rc = iwl_poll_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); + if (rc < 0) { + spin_unlock_irqrestore(&priv->lock, flags); + IWL_DEBUG_INFO("Failed to init the card\n"); + return rc; + } + + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + iwl_write_restricted_reg(priv, APMG_CLK_EN_REG, + APMG_CLK_VAL_DMA_CLK_RQT | + APMG_CLK_VAL_BSM_CLK_RQT); + udelay(20); + iwl_set_bits_restricted_reg(priv, APMG_PCIDEV_STT_REG, + APMG_PCIDEV_STT_VAL_L1_ACT_DIS); + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + /* Determine HW type */ + rc = pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id); + if (rc) + return rc; + IWL_DEBUG_INFO("HW Revision ID = 0x%X\n", rev_id); + + iwl3945_nic_set_pwr_src(priv, 1); + spin_lock_irqsave(&priv->lock, flags); + + if (rev_id & PCI_CFG_REV_ID_BIT_RTP) + IWL_DEBUG_INFO("RTP type \n"); + else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) { + IWL_DEBUG_INFO("ALM-MB type\n"); + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_ALMAGOR_MB); + } else { + IWL_DEBUG_INFO("ALM-MM type\n"); + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_ALMAGOR_MM); + } + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Initialize the EEPROM */ + rc = iwl_eeprom_init(priv); + if (rc) + return rc; + + spin_lock_irqsave(&priv->lock, flags); + if (EEPROM_SKU_CAP_OP_MODE_MRC == priv->eeprom.sku_cap) { + IWL_DEBUG_INFO("SKU OP mode is mrc\n"); + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_SKU_MRC); + } else + IWL_DEBUG_INFO("SKU OP mode is basic\n"); + + if ((priv->eeprom.board_revision & 0xF0) == 0xD0) { + IWL_DEBUG_INFO("3945ABG revision is 0x%X\n", + priv->eeprom.board_revision); + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); + } else { + IWL_DEBUG_INFO("3945ABG revision is 0x%X\n", + priv->eeprom.board_revision); + iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); + } + + if (priv->eeprom.almgor_m_version <= 1) { + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A); + IWL_DEBUG_INFO("Card M type A version is 0x%X\n", + priv->eeprom.almgor_m_version); + } else { + IWL_DEBUG_INFO("Card M type B version is 0x%X\n", + priv->eeprom.almgor_m_version); + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B); + } + spin_unlock_irqrestore(&priv->lock, flags); + + if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE) + IWL_DEBUG_RF_KILL("SW RF KILL supported in EEPROM.\n"); + + if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE) + IWL_DEBUG_RF_KILL("HW RF KILL supported in EEPROM.\n"); + + /* Allocate the RX queue, or reset if it is already allocated */ + if (!rxq->bd) { + rc = iwl_rx_queue_alloc(priv); + if (rc) { + IWL_ERROR("Unable to initialize Rx queue\n"); + return -ENOMEM; + } + } else + iwl_rx_queue_reset(priv, rxq); + + iwl_rx_replenish(priv); + + iwl3945_rx_init(priv, rxq); + + spin_lock_irqsave(&priv->lock, flags); + + /* Look at using this instead: + rxq->need_update = 1; + iwl_rx_queue_update_write_ptr(priv, rxq); + */ + + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + iwl_write_restricted(priv, FH_RCSR_WPTR(0), rxq->write & ~7); + iwl_release_restricted_access(priv); + + spin_unlock_irqrestore(&priv->lock, flags); + + rc = iwl3945_txq_ctx_reset(priv); + if (rc) + return rc; + + set_bit(STATUS_INIT, &priv->status); + + return 0; +} + +/** + * iwl_hw_txq_ctx_free - Free TXQ Context + * + * Destroy all TX DMA queues and structures + */ +void iwl_hw_txq_ctx_free(struct iwl_priv *priv) +{ + int txq_id; + + /* Tx queues */ + for (txq_id = 0; txq_id < TFD_QUEUE_MAX; txq_id++) + iwl_tx_queue_free(priv, &priv->txq[txq_id]); +} + +void iwl_hw_txq_ctx_stop(struct iwl_priv *priv) +{ + int queue; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + if (iwl_grab_restricted_access(priv)) { + spin_unlock_irqrestore(&priv->lock, flags); + iwl_hw_txq_ctx_free(priv); + return; + } + + /* stop SCD */ + iwl_write_restricted_reg(priv, SCD_MODE_REG, 0); + + /* reset TFD queues */ + for (queue = TFD_QUEUE_MIN; queue < TFD_QUEUE_MAX; queue++) { + iwl_write_restricted(priv, FH_TCSR_CONFIG(queue), 0x0); + iwl_poll_restricted_bit(priv, FH_TSSR_TX_STATUS, + ALM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(queue), + 1000); + } + + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + iwl_hw_txq_ctx_free(priv); +} + +int iwl_hw_nic_stop_master(struct iwl_priv *priv) +{ + int rc = 0; + u32 reg_val; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + /* set stop master bit */ + iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); + + reg_val = iwl_read32(priv, CSR_GP_CNTRL); + + if (CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE == + (reg_val & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE)) + IWL_DEBUG_INFO("Card in power save, master is already " + "stopped\n"); + else { + rc = iwl_poll_bit(priv, CSR_RESET, + CSR_RESET_REG_FLAG_MASTER_DISABLED, + CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); + if (rc < 0) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + } + + spin_unlock_irqrestore(&priv->lock, flags); + IWL_DEBUG_INFO("stop master\n"); + + return rc; +} + +int iwl_hw_nic_reset(struct iwl_priv *priv) +{ + int rc; + unsigned long flags; + + iwl_hw_nic_stop_master(priv); + + spin_lock_irqsave(&priv->lock, flags); + + iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + + rc = iwl_poll_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); + + rc = iwl_grab_restricted_access(priv); + if (!rc) { + iwl_write_restricted_reg(priv, APMG_CLK_CTRL_REG, + APMG_CLK_VAL_BSM_CLK_RQT); + + udelay(10); + + iwl_set_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + + iwl_write_restricted_reg(priv, APMG_RTC_INT_MSK_REG, 0x0); + iwl_write_restricted_reg(priv, APMG_RTC_INT_STT_REG, + 0xFFFFFFFF); + + /* enable DMA */ + iwl_write_restricted_reg(priv, APMG_CLK_EN_REG, + APMG_CLK_VAL_DMA_CLK_RQT | + APMG_CLK_VAL_BSM_CLK_RQT); + udelay(10); + + iwl_set_bits_restricted_reg(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_RESET_REQ); + udelay(5); + iwl_clear_bits_restricted_reg(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_RESET_REQ); + iwl_release_restricted_access(priv); + } + + /* Clear the 'host command active' bit... */ + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + + wake_up_interruptible(&priv->wait_command_queue); + spin_unlock_irqrestore(&priv->lock, flags); + + return rc; +} + +/** + * iwl_hw_reg_adjust_power_by_temp - return index delta into power gain settings table + */ +static int iwl_hw_reg_adjust_power_by_temp(int new_reading, int old_reading) +{ + return (new_reading - old_reading) * (-11) / 100; +} + +/** + * iwl_hw_reg_temp_out_of_range - Keep temperature in sane range + */ +static inline int iwl_hw_reg_temp_out_of_range(int temperature) +{ + return (((temperature < -260) || (temperature > 25)) ? 1 : 0); +} + +int iwl_hw_get_temperature(struct iwl_priv *priv) +{ + return iwl_read32(priv, CSR_UCODE_DRV_GP2); +} + +/** + * iwl_hw_reg_txpower_get_temperature - get current temperature by reading from NIC + */ +static int iwl_hw_reg_txpower_get_temperature(struct iwl_priv *priv) +{ + int temperature; + + temperature = iwl_hw_get_temperature(priv); + + /* driver's okay range is -260 to +25. + * human readable okay range is 0 to +285 */ + IWL_DEBUG_INFO("Temperature: %d\n", temperature + IWL_TEMP_CONVERT); + + /* handle insane temp reading */ + if (iwl_hw_reg_temp_out_of_range(temperature)) { + IWL_ERROR("Error bad temperature value %d\n", temperature); + + /* if really really hot(?), + * substitute the 3rd band/group's temp measured at factory */ + if (priv->last_temperature > 100) + temperature = priv->eeprom.groups[2].temperature; + else /* else use most recent "sane" value from driver */ + temperature = priv->last_temperature; + } + + return temperature; /* raw, not "human readable" */ +} + +/* Adjust Txpower only if temperature variance is greater than threshold. + * + * Both are lower than older versions' 9 degrees */ +#define IWL_TEMPERATURE_LIMIT_TIMER 6 + +/** + * is_temp_calib_needed - determines if new calibration is needed + * + * records new temperature in tx_mgr->temperature. + * replaces tx_mgr->last_temperature *only* if calib needed + * (assumes caller will actually do the calibration!). */ +static int is_temp_calib_needed(struct iwl_priv *priv) +{ + int temp_diff; + + priv->temperature = iwl_hw_reg_txpower_get_temperature(priv); + temp_diff = priv->temperature - priv->last_temperature; + + /* get absolute value */ + if (temp_diff < 0) { + IWL_DEBUG_POWER("Getting cooler, delta %d,\n", temp_diff); + temp_diff = -temp_diff; + } else if (temp_diff == 0) + IWL_DEBUG_POWER("Same temp,\n"); + else + IWL_DEBUG_POWER("Getting warmer, delta %d,\n", temp_diff); + + /* if we don't need calibration, *don't* update last_temperature */ + if (temp_diff < IWL_TEMPERATURE_LIMIT_TIMER) { + IWL_DEBUG_POWER("Timed thermal calib not needed\n"); + return 0; + } + + IWL_DEBUG_POWER("Timed thermal calib needed\n"); + + /* assume that caller will actually do calib ... + * update the "last temperature" value */ + priv->last_temperature = priv->temperature; + return 1; +} + +#define IWL_MAX_GAIN_ENTRIES 78 +#define IWL_CCK_FROM_OFDM_POWER_DIFF -5 +#define IWL_CCK_FROM_OFDM_INDEX_DIFF (10) + +/* radio and DSP power table, each step is 1/2 dB. + * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */ +static struct iwl_tx_power power_gain_table[2][IWL_MAX_GAIN_ENTRIES] = { + { + {251, 127}, /* 2.4 GHz, highest power */ + {251, 127}, + {251, 127}, + {251, 127}, + {251, 125}, + {251, 110}, + {251, 105}, + {251, 98}, + {187, 125}, + {187, 115}, + {187, 108}, + {187, 99}, + {243, 119}, + {243, 111}, + {243, 105}, + {243, 97}, + {243, 92}, + {211, 106}, + {211, 100}, + {179, 120}, + {179, 113}, + {179, 107}, + {147, 125}, + {147, 119}, + {147, 112}, + {147, 106}, + {147, 101}, + {147, 97}, + {147, 91}, + {115, 107}, + {235, 121}, + {235, 115}, + {235, 109}, + {203, 127}, + {203, 121}, + {203, 115}, + {203, 108}, + {203, 102}, + {203, 96}, + {203, 92}, + {171, 110}, + {171, 104}, + {171, 98}, + {139, 116}, + {227, 125}, + {227, 119}, + {227, 113}, + {227, 107}, + {227, 101}, + {227, 96}, + {195, 113}, + {195, 106}, + {195, 102}, + {195, 95}, + {163, 113}, + {163, 106}, + {163, 102}, + {163, 95}, + {131, 113}, + {131, 106}, + {131, 102}, + {131, 95}, + {99, 113}, + {99, 106}, + {99, 102}, + {99, 95}, + {67, 113}, + {67, 106}, + {67, 102}, + {67, 95}, + {35, 113}, + {35, 106}, + {35, 102}, + {35, 95}, + {3, 113}, + {3, 106}, + {3, 102}, + {3, 95} }, /* 2.4 GHz, lowest power */ + { + {251, 127}, /* 5.x GHz, highest power */ + {251, 120}, + {251, 114}, + {219, 119}, + {219, 101}, + {187, 113}, + {187, 102}, + {155, 114}, + {155, 103}, + {123, 117}, + {123, 107}, + {123, 99}, + {123, 92}, + {91, 108}, + {59, 125}, + {59, 118}, + {59, 109}, + {59, 102}, + {59, 96}, + {59, 90}, + {27, 104}, + {27, 98}, + {27, 92}, + {115, 118}, + {115, 111}, + {115, 104}, + {83, 126}, + {83, 121}, + {83, 113}, + {83, 105}, + {83, 99}, + {51, 118}, + {51, 111}, + {51, 104}, + {51, 98}, + {19, 116}, + {19, 109}, + {19, 102}, + {19, 98}, + {19, 93}, + {171, 113}, + {171, 107}, + {171, 99}, + {139, 120}, + {139, 113}, + {139, 107}, + {139, 99}, + {107, 120}, + {107, 113}, + {107, 107}, + {107, 99}, + {75, 120}, + {75, 113}, + {75, 107}, + {75, 99}, + {43, 120}, + {43, 113}, + {43, 107}, + {43, 99}, + {11, 120}, + {11, 113}, + {11, 107}, + {11, 99}, + {131, 107}, + {131, 99}, + {99, 120}, + {99, 113}, + {99, 107}, + {99, 99}, + {67, 120}, + {67, 113}, + {67, 107}, + {67, 99}, + {35, 120}, + {35, 113}, + {35, 107}, + {35, 99}, + {3, 120} } /* 5.x GHz, lowest power */ +}; + +static inline u8 iwl_hw_reg_fix_power_index(int index) +{ + if (index < 0) + return 0; + if (index >= IWL_MAX_GAIN_ENTRIES) + return IWL_MAX_GAIN_ENTRIES - 1; + return (u8) index; +} + +/* Kick off thermal recalibration check every 60 seconds */ +#define REG_RECALIB_PERIOD (60) + +/** + * iwl_hw_reg_set_scan_power - Set Tx power for scan probe requests + * + * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK) + * or 6 Mbit (OFDM) rates. + */ +static void iwl_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_index, + s32 rate_index, const s8 *clip_pwrs, + struct iwl_channel_info *ch_info, + int band_index) +{ + struct iwl_scan_power_info *scan_power_info; + s8 power; + u8 power_index; + + scan_power_info = &ch_info->scan_pwr_info[scan_tbl_index]; + + /* use this channel group's 6Mbit clipping/saturation pwr, + * but cap at regulatory scan power restriction (set during init + * based on eeprom channel data) for this channel. */ + power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX]); + + /* further limit to user's max power preference. + * FIXME: Other spectrum management power limitations do not + * seem to apply?? */ + power = min(power, priv->user_txpower_limit); + scan_power_info->requested_power = power; + + /* find difference between new scan *power* and current "normal" + * Tx *power* for 6Mb. Use this difference (x2) to adjust the + * current "normal" temperature-compensated Tx power *index* for + * this rate (1Mb or 6Mb) to yield new temp-compensated scan power + * *index*. */ + power_index = ch_info->power_info[rate_index].power_table_index + - (power - ch_info->power_info + [IWL_RATE_6M_INDEX].requested_power) * 2; + + /* store reference index that we use when adjusting *all* scan + * powers. So we can accommodate user (all channel) or spectrum + * management (single channel) power changes "between" temperature + * feedback compensation procedures. + * don't force fit this reference index into gain table; it may be a + * negative number. This will help avoid errors when we're at + * the lower bounds (highest gains, for warmest temperatures) + * of the table. */ + + /* don't exceed table bounds for "real" setting */ + power_index = iwl_hw_reg_fix_power_index(power_index); + + scan_power_info->power_table_index = power_index; + scan_power_info->tpc.tx_gain = + power_gain_table[band_index][power_index].tx_gain; + scan_power_info->tpc.dsp_atten = + power_gain_table[band_index][power_index].dsp_atten; +} + +/** + * iwl_hw_reg_send_txpower - fill in Tx Power command with gain settings + * + * Configures power settings for all rates for the current channel, + * using values from channel info struct, and send to NIC + */ +int iwl_hw_reg_send_txpower(struct iwl_priv *priv) +{ + int rate_idx; + const struct iwl_channel_info *ch_info = NULL; + struct iwl_txpowertable_cmd txpower = { + .channel = priv->active_rxon.channel, + }; + + txpower.band = (priv->phymode == MODE_IEEE80211A) ? 0 : 1; + ch_info = iwl_get_channel_info(priv, + priv->phymode, + le16_to_cpu(priv->active_rxon.channel)); + if (!ch_info) { + IWL_ERROR + ("Failed to get channel info for channel %d [%d]\n", + le16_to_cpu(priv->active_rxon.channel), priv->phymode); + return -EINVAL; + } + + if (!is_channel_valid(ch_info)) { + IWL_DEBUG_POWER("Not calling TX_PWR_TABLE_CMD on " + "non-Tx channel.\n"); + return 0; + } + + /* fill cmd with power settings for all rates for current channel */ + for (rate_idx = 0; rate_idx < IWL_RATE_COUNT; rate_idx++) { + txpower.power[rate_idx].tpc = ch_info->power_info[rate_idx].tpc; + txpower.power[rate_idx].rate = iwl_rates[rate_idx].plcp; + + IWL_DEBUG_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n", + le16_to_cpu(txpower.channel), + txpower.band, + txpower.power[rate_idx].tpc.tx_gain, + txpower.power[rate_idx].tpc.dsp_atten, + txpower.power[rate_idx].rate); + } + + return iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, + sizeof(struct iwl_txpowertable_cmd), &txpower); + +} + +/** + * iwl_hw_reg_set_new_power - Configures power tables at new levels + * @ch_info: Channel to update. Uses power_info.requested_power. + * + * Replace requested_power and base_power_index ch_info fields for + * one channel. + * + * Called if user or spectrum management changes power preferences. + * Takes into account h/w and modulation limitations (clip power). + * + * This does *not* send anything to NIC, just sets up ch_info for one channel. + * + * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to + * properly fill out the scan powers, and actual h/w gain settings, + * and send changes to NIC + */ +static int iwl_hw_reg_set_new_power(struct iwl_priv *priv, + struct iwl_channel_info *ch_info) +{ + struct iwl_channel_power_info *power_info; + int power_changed = 0; + int i; + const s8 *clip_pwrs; + int power; + + /* Get this chnlgrp's rate-to-max/clip-powers table */ + clip_pwrs = priv->clip_groups[ch_info->group_index].clip_powers; + + /* Get this channel's rate-to-current-power settings table */ + power_info = ch_info->power_info; + + /* update OFDM Txpower settings */ + for (i = IWL_FIRST_OFDM_RATE; i <= IWL_LAST_OFDM_RATE; + i++, ++power_info) { + int delta_idx; + + /* limit new power to be no more than h/w capability */ + power = min(ch_info->curr_txpow, clip_pwrs[i]); + if (power == power_info->requested_power) + continue; + + /* find difference between old and new requested powers, + * update base (non-temp-compensated) power index */ + delta_idx = (power - power_info->requested_power) * 2; + power_info->base_power_index -= delta_idx; + + /* save new requested power value */ + power_info->requested_power = power; + + power_changed = 1; + } + + /* update CCK Txpower settings, based on OFDM 12M setting ... + * ... all CCK power settings for a given channel are the *same*. */ + if (power_changed) { + power = + ch_info->power_info[IWL_RATE_12M_INDEX]. + requested_power + IWL_CCK_FROM_OFDM_POWER_DIFF; + + /* do all CCK rates' iwl_channel_power_info structures */ + for (i = IWL_FIRST_CCK_RATE; i <= IWL_LAST_CCK_RATE; i++) { + power_info->requested_power = power; + power_info->base_power_index = + ch_info->power_info[IWL_RATE_12M_INDEX]. + base_power_index + IWL_CCK_FROM_OFDM_INDEX_DIFF; + ++power_info; + } + } + + return 0; +} + +/** + * iwl_hw_reg_get_ch_txpower_limit - returns new power limit for channel + * + * NOTE: Returned power limit may be less (but not more) than requested, + * based strictly on regulatory (eeprom and spectrum mgt) limitations + * (no consideration for h/w clipping limitations). + */ +static int iwl_hw_reg_get_ch_txpower_limit(struct iwl_channel_info *ch_info) +{ + s8 max_power; + +#if 0 + /* if we're using TGd limits, use lower of TGd or EEPROM */ + if (ch_info->tgd_data.max_power != 0) + max_power = min(ch_info->tgd_data.max_power, + ch_info->eeprom.max_power_avg); + + /* else just use EEPROM limits */ + else +#endif + max_power = ch_info->eeprom.max_power_avg; + + return min(max_power, ch_info->max_power_avg); +} + +/** + * iwl_hw_reg_comp_txpower_temp - Compensate for temperature + * + * Compensate txpower settings of *all* channels for temperature. + * This only accounts for the difference between current temperature + * and the factory calibration temperatures, and bases the new settings + * on the channel's base_power_index. + * + * If RxOn is "associated", this sends the new Txpower to NIC! + */ +static int iwl_hw_reg_comp_txpower_temp(struct iwl_priv *priv) +{ + struct iwl_channel_info *ch_info = NULL; + int delta_index; + const s8 *clip_pwrs; /* array of h/w max power levels for each rate */ + u8 a_band; + u8 rate_index; + u8 scan_tbl_index; + u8 i; + int ref_temp; + int temperature = priv->temperature; + + /* set up new Tx power info for each and every channel, 2.4 and 5.x */ + for (i = 0; i < priv->channel_count; i++) { + ch_info = &priv->channel_info[i]; + a_band = is_channel_a_band(ch_info); + + /* Get this chnlgrp's factory calibration temperature */ + ref_temp = (s16)priv->eeprom.groups[ch_info->group_index]. + temperature; + + /* get power index adjustment based on curr and factory + * temps */ + delta_index = iwl_hw_reg_adjust_power_by_temp(temperature, + ref_temp); + + /* set tx power value for all rates, OFDM and CCK */ + for (rate_index = 0; rate_index < IWL_RATE_COUNT; + rate_index++) { + int power_idx = + ch_info->power_info[rate_index].base_power_index; + + /* temperature compensate */ + power_idx += delta_index; + + /* stay within table range */ + power_idx = iwl_hw_reg_fix_power_index(power_idx); + ch_info->power_info[rate_index]. + power_table_index = (u8) power_idx; + ch_info->power_info[rate_index].tpc = + power_gain_table[a_band][power_idx]; + } + + /* Get this chnlgrp's rate-to-max/clip-powers table */ + clip_pwrs = priv->clip_groups[ch_info->group_index].clip_powers; + + /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */ + for (scan_tbl_index = 0; + scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) { + s32 actual_index = (scan_tbl_index == 0) ? + IWL_RATE_1M_INDEX : IWL_RATE_6M_INDEX; + iwl_hw_reg_set_scan_power(priv, scan_tbl_index, + actual_index, clip_pwrs, + ch_info, a_band); + } + } + + /* send Txpower command for current channel to ucode */ + return iwl_hw_reg_send_txpower(priv); +} + +int iwl_hw_reg_set_txpower(struct iwl_priv *priv, s8 power) +{ + struct iwl_channel_info *ch_info; + s8 max_power; + u8 a_band; + u8 i; + + if (priv->user_txpower_limit == power) { + IWL_DEBUG_POWER("Requested Tx power same as current " + "limit: %ddBm.\n", power); + return 0; + } + + IWL_DEBUG_POWER("Setting upper limit clamp to %ddBm.\n", power); + priv->user_txpower_limit = power; + + /* set up new Tx powers for each and every channel, 2.4 and 5.x */ + + for (i = 0; i < priv->channel_count; i++) { + ch_info = &priv->channel_info[i]; + a_band = is_channel_a_band(ch_info); + + /* find minimum power of all user and regulatory constraints + * (does not consider h/w clipping limitations) */ + max_power = iwl_hw_reg_get_ch_txpower_limit(ch_info); + max_power = min(power, max_power); + if (max_power != ch_info->curr_txpow) { + ch_info->curr_txpow = max_power; + + /* this considers the h/w clipping limitations */ + iwl_hw_reg_set_new_power(priv, ch_info); + } + } + + /* update txpower settings for all channels, + * send to NIC if associated. */ + is_temp_calib_needed(priv); + iwl_hw_reg_comp_txpower_temp(priv); + + return 0; +} + +/* will add 3945 channel switch cmd handling later */ +int iwl_hw_channel_switch(struct iwl_priv *priv, u16 channel) +{ + return 0; +} + +/** + * iwl3945_reg_txpower_periodic - called when time to check our temperature. + * + * -- reset periodic timer + * -- see if temp has changed enough to warrant re-calibration ... if so: + * -- correct coeffs for temp (can reset temp timer) + * -- save this temp as "last", + * -- send new set of gain settings to NIC + * NOTE: This should continue working, even when we're not associated, + * so we can keep our internal table of scan powers current. */ +void iwl3945_reg_txpower_periodic(struct iwl_priv *priv) +{ + /* This will kick in the "brute force" + * iwl_hw_reg_comp_txpower_temp() below */ + if (!is_temp_calib_needed(priv)) + goto reschedule; + + /* Set up a new set of temp-adjusted TxPowers, send to NIC. + * This is based *only* on current temperature, + * ignoring any previous power measurements */ + iwl_hw_reg_comp_txpower_temp(priv); + + reschedule: + queue_delayed_work(priv->workqueue, + &priv->thermal_periodic, REG_RECALIB_PERIOD * HZ); +} + +void iwl3945_bg_reg_txpower_periodic(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + thermal_periodic.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl3945_reg_txpower_periodic(priv); + mutex_unlock(&priv->mutex); +} + +/** + * iwl_hw_reg_get_ch_grp_index - find the channel-group index (0-4) + * for the channel. + * + * This function is used when initializing channel-info structs. + * + * NOTE: These channel groups do *NOT* match the bands above! + * These channel groups are based on factory-tested channels; + * on A-band, EEPROM's "group frequency" entries represent the top + * channel in each group 1-4. Group 5 All B/G channels are in group 0. + */ +static u16 iwl_hw_reg_get_ch_grp_index(struct iwl_priv *priv, + const struct iwl_channel_info *ch_info) +{ + struct iwl_eeprom_txpower_group *ch_grp = &priv->eeprom.groups[0]; + u8 group; + u16 group_index = 0; /* based on factory calib frequencies */ + u8 grp_channel; + + /* Find the group index for the channel ... don't use index 1(?) */ + if (is_channel_a_band(ch_info)) { + for (group = 1; group < 5; group++) { + grp_channel = ch_grp[group].group_channel; + if (ch_info->channel <= grp_channel) { + group_index = group; + break; + } + } + /* group 4 has a few channels *above* its factory cal freq */ + if (group == 5) + group_index = 4; + } else + group_index = 0; /* 2.4 GHz, group 0 */ + + IWL_DEBUG_POWER("Chnl %d mapped to grp %d\n", ch_info->channel, + group_index); + return group_index; +} + +/** + * iwl_hw_reg_get_matched_power_index - Interpolate to get nominal index + * + * Interpolate to get nominal (i.e. at factory calibration temperature) index + * into radio/DSP gain settings table for requested power. + */ +static int iwl_hw_reg_get_matched_power_index(struct iwl_priv *priv, + s8 requested_power, + s32 setting_index, s32 *new_index) +{ + const struct iwl_eeprom_txpower_group *chnl_grp = NULL; + s32 index0, index1; + s32 power = 2 * requested_power; + s32 i; + const struct iwl_eeprom_txpower_sample *samples; + s32 gains0, gains1; + s32 res; + s32 denominator; + + chnl_grp = &priv->eeprom.groups[setting_index]; + samples = chnl_grp->samples; + for (i = 0; i < 5; i++) { + if (power == samples[i].power) { + *new_index = samples[i].gain_index; + return 0; + } + } + + if (power > samples[1].power) { + index0 = 0; + index1 = 1; + } else if (power > samples[2].power) { + index0 = 1; + index1 = 2; + } else if (power > samples[3].power) { + index0 = 2; + index1 = 3; + } else { + index0 = 3; + index1 = 4; + } + + denominator = (s32) samples[index1].power - (s32) samples[index0].power; + if (denominator == 0) + return -EINVAL; + gains0 = (s32) samples[index0].gain_index * (1 << 19); + gains1 = (s32) samples[index1].gain_index * (1 << 19); + res = gains0 + (gains1 - gains0) * + ((s32) power - (s32) samples[index0].power) / denominator + + (1 << 18); + *new_index = res >> 19; + return 0; +} + +static void iwl_hw_reg_init_channel_groups(struct iwl_priv *priv) +{ + u32 i; + s32 rate_index; + const struct iwl_eeprom_txpower_group *group; + + IWL_DEBUG_POWER("Initializing factory calib info from EEPROM\n"); + + for (i = 0; i < IWL_NUM_TX_CALIB_GROUPS; i++) { + s8 *clip_pwrs; /* table of power levels for each rate */ + s8 satur_pwr; /* saturation power for each chnl group */ + group = &priv->eeprom.groups[i]; + + /* sanity check on factory saturation power value */ + if (group->saturation_power < 40) { + IWL_WARNING("Error: saturation power is %d, " + "less than minimum expected 40\n", + group->saturation_power); + return; + } + + /* + * Derive requested power levels for each rate, based on + * hardware capabilities (saturation power for band). + * Basic value is 3dB down from saturation, with further + * power reductions for highest 3 data rates. These + * backoffs provide headroom for high rate modulation + * power peaks, without too much distortion (clipping). + */ + /* we'll fill in this array with h/w max power levels */ + clip_pwrs = (s8 *) priv->clip_groups[i].clip_powers; + + /* divide factory saturation power by 2 to find -3dB level */ + satur_pwr = (s8) (group->saturation_power >> 1); + + /* fill in channel group's nominal powers for each rate */ + for (rate_index = 0; + rate_index < IWL_RATE_COUNT; rate_index++, clip_pwrs++) { + switch (rate_index) { + case IWL_RATE_36M_INDEX: + if (i == 0) /* B/G */ + *clip_pwrs = satur_pwr; + else /* A */ + *clip_pwrs = satur_pwr - 5; + break; + case IWL_RATE_48M_INDEX: + if (i == 0) + *clip_pwrs = satur_pwr - 7; + else + *clip_pwrs = satur_pwr - 10; + break; + case IWL_RATE_54M_INDEX: + if (i == 0) + *clip_pwrs = satur_pwr - 9; + else + *clip_pwrs = satur_pwr - 12; + break; + default: + *clip_pwrs = satur_pwr; + break; + } + } + } +} + +/** + * iwl3945_txpower_set_from_eeprom - Set channel power info based on EEPROM + * + * Second pass (during init) to set up priv->channel_info + * + * Set up Tx-power settings in our channel info database for each VALID + * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values + * and current temperature. + * + * Since this is based on current temperature (at init time), these values may + * not be valid for very long, but it gives us a starting/default point, + * and allows us to active (i.e. using Tx) scan. + * + * This does *not* write values to NIC, just sets up our internal table. + */ +int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv) +{ + struct iwl_channel_info *ch_info = NULL; + struct iwl_channel_power_info *pwr_info; + int delta_index; + u8 rate_index; + u8 scan_tbl_index; + const s8 *clip_pwrs; /* array of power levels for each rate */ + u8 gain, dsp_atten; + s8 power; + u8 pwr_index, base_pwr_index, a_band; + u8 i; + int temperature; + + /* save temperature reference, + * so we can determine next time to calibrate */ + temperature = iwl_hw_reg_txpower_get_temperature(priv); + priv->last_temperature = temperature; + + iwl_hw_reg_init_channel_groups(priv); + + /* initialize Tx power info for each and every channel, 2.4 and 5.x */ + for (i = 0, ch_info = priv->channel_info; i < priv->channel_count; + i++, ch_info++) { + a_band = is_channel_a_band(ch_info); + if (!is_channel_valid(ch_info)) + continue; + + /* find this channel's channel group (*not* "band") index */ + ch_info->group_index = + iwl_hw_reg_get_ch_grp_index(priv, ch_info); + + /* Get this chnlgrp's rate->max/clip-powers table */ + clip_pwrs = priv->clip_groups[ch_info->group_index].clip_powers; + + /* calculate power index *adjustment* value according to + * diff between current temperature and factory temperature */ + delta_index = iwl_hw_reg_adjust_power_by_temp(temperature, + priv->eeprom.groups[ch_info->group_index]. + temperature); + + IWL_DEBUG_POWER("Delta index for channel %d: %d [%d]\n", + ch_info->channel, delta_index, temperature + + IWL_TEMP_CONVERT); + + /* set tx power value for all OFDM rates */ + for (rate_index = 0; rate_index < IWL_OFDM_RATES; + rate_index++) { + s32 power_idx; + int rc; + + /* use channel group's clip-power table, + * but don't exceed channel's max power */ + s8 pwr = min(ch_info->max_power_avg, + clip_pwrs[rate_index]); + + pwr_info = &ch_info->power_info[rate_index]; + + /* get base (i.e. at factory-measured temperature) + * power table index for this rate's power */ + rc = iwl_hw_reg_get_matched_power_index(priv, pwr, + ch_info->group_index, + &power_idx); + if (rc) { + IWL_ERROR("Invalid power index\n"); + return rc; + } + pwr_info->base_power_index = (u8) power_idx; + + /* temperature compensate */ + power_idx += delta_index; + + /* stay within range of gain table */ + power_idx = iwl_hw_reg_fix_power_index(power_idx); + + /* fill 1 OFDM rate's iwl_channel_power_info struct */ + pwr_info->requested_power = pwr; + pwr_info->power_table_index = (u8) power_idx; + pwr_info->tpc.tx_gain = + power_gain_table[a_band][power_idx].tx_gain; + pwr_info->tpc.dsp_atten = + power_gain_table[a_band][power_idx].dsp_atten; + } + + /* set tx power for CCK rates, based on OFDM 12 Mbit settings*/ + pwr_info = &ch_info->power_info[IWL_RATE_12M_INDEX]; + power = pwr_info->requested_power + + IWL_CCK_FROM_OFDM_POWER_DIFF; + pwr_index = pwr_info->power_table_index + + IWL_CCK_FROM_OFDM_INDEX_DIFF; + base_pwr_index = pwr_info->base_power_index + + IWL_CCK_FROM_OFDM_INDEX_DIFF; + + /* stay within table range */ + pwr_index = iwl_hw_reg_fix_power_index(pwr_index); + gain = power_gain_table[a_band][pwr_index].tx_gain; + dsp_atten = power_gain_table[a_band][pwr_index].dsp_atten; + + /* fill each CCK rate's iwl_channel_power_info structure + * NOTE: All CCK-rate Txpwrs are the same for a given chnl! + * NOTE: CCK rates start at end of OFDM rates! */ + for (rate_index = IWL_OFDM_RATES; + rate_index < IWL_RATE_COUNT; rate_index++) { + pwr_info = &ch_info->power_info[rate_index]; + pwr_info->requested_power = power; + pwr_info->power_table_index = pwr_index; + pwr_info->base_power_index = base_pwr_index; + pwr_info->tpc.tx_gain = gain; + pwr_info->tpc.dsp_atten = dsp_atten; + } + + /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */ + for (scan_tbl_index = 0; + scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) { + s32 actual_index = (scan_tbl_index == 0) ? + IWL_RATE_1M_INDEX : IWL_RATE_6M_INDEX; + iwl_hw_reg_set_scan_power(priv, scan_tbl_index, + actual_index, clip_pwrs, ch_info, a_band); + } + } + + return 0; +} + +int iwl_hw_rxq_stop(struct iwl_priv *priv) +{ + int rc; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + iwl_write_restricted(priv, FH_RCSR_CONFIG(0), 0); + rc = iwl_poll_restricted_bit(priv, FH_RSSR_STATUS, (1 << 24), 1000); + if (rc < 0) + IWL_ERROR("Can't stop Rx DMA.\n"); + + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +int iwl_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + int rc; + unsigned long flags; + int txq_id = txq->q.id; + + struct iwl_shared *shared_data = priv->hw_setting.shared_virt; + + shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr); + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + iwl_write_restricted(priv, FH_CBCC_CTRL(txq_id), 0); + iwl_write_restricted(priv, FH_CBCC_BASE(txq_id), 0); + + iwl_write_restricted(priv, FH_TCSR_CONFIG(txq_id), + ALM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT | + ALM_FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF | + ALM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD | + ALM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL | + ALM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE); + iwl_release_restricted_access(priv); + + /* fake read to flush all prev. writes */ + iwl_read32(priv, FH_TSSR_CBB_BASE); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +int iwl_hw_get_rx_read(struct iwl_priv *priv) +{ + struct iwl_shared *shared_data = priv->hw_setting.shared_virt; + + return le32_to_cpu(shared_data->rx_read_ptr[0]); +} + +/** + * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table + */ +int iwl3945_init_hw_rate_table(struct iwl_priv *priv) +{ + int rc, i; + struct iwl_rate_scaling_cmd rate_cmd = { + .reserved = {0, 0, 0}, + }; + struct iwl_rate_scaling_info *table = rate_cmd.table; + + for (i = 0; i < ARRAY_SIZE(iwl_rates); i++) { + table[i].rate_n_flags = + iwl_hw_set_rate_n_flags(iwl_rates[i].plcp, 0); + table[i].try_cnt = priv->retry_rate; + table[i].next_rate_index = iwl_get_prev_ieee_rate(i); + } + + switch (priv->phymode) { + case MODE_IEEE80211A: + IWL_DEBUG_RATE("Select A mode rate scale\n"); + /* If one of the following CCK rates is used, + * have it fall back to the 6M OFDM rate */ + for (i = IWL_FIRST_CCK_RATE; i <= IWL_LAST_CCK_RATE; i++) + table[i].next_rate_index = IWL_FIRST_OFDM_RATE; + + /* Don't fall back to CCK rates */ + table[IWL_RATE_12M_INDEX].next_rate_index = IWL_RATE_9M_INDEX; + + /* Don't drop out of OFDM rates */ + table[IWL_FIRST_OFDM_RATE].next_rate_index = + IWL_FIRST_OFDM_RATE; + break; + + case MODE_IEEE80211B: + IWL_DEBUG_RATE("Select B mode rate scale\n"); + /* If an OFDM rate is used, have it fall back to the + * 1M CCK rates */ + for (i = IWL_FIRST_OFDM_RATE; i <= IWL_LAST_OFDM_RATE; i++) + table[i].next_rate_index = IWL_FIRST_CCK_RATE; + + /* CCK shouldn't fall back to OFDM... */ + table[IWL_RATE_11M_INDEX].next_rate_index = IWL_RATE_5M_INDEX; + break; + + default: + IWL_DEBUG_RATE("Select G mode rate scale\n"); + break; + } + + /* Update the rate scaling for control frame Tx */ + rate_cmd.table_id = 0; + rc = iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd), + &rate_cmd); + if (rc) + return rc; + + /* Update the rate scaling for data frame Tx */ + rate_cmd.table_id = 1; + return iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd), + &rate_cmd); +} + +int iwl_hw_set_hw_setting(struct iwl_priv *priv) +{ + memset((void *)&priv->hw_setting, 0, + sizeof(struct iwl_driver_hw_info)); + + priv->hw_setting.shared_virt = + pci_alloc_consistent(priv->pci_dev, + sizeof(struct iwl_shared), + &priv->hw_setting.shared_phys); + + if (!priv->hw_setting.shared_virt) { + IWL_ERROR("failed to allocate pci memory\n"); + mutex_unlock(&priv->mutex); + return -ENOMEM; + } + + priv->hw_setting.ac_queue_count = AC_NUM; + priv->hw_setting.rx_buffer_size = IWL_RX_BUF_SIZE; + priv->hw_setting.tx_cmd_len = sizeof(struct iwl_tx_cmd); + priv->hw_setting.max_rxq_size = RX_QUEUE_SIZE; + priv->hw_setting.max_rxq_log = RX_QUEUE_SIZE_LOG; + priv->hw_setting.cck_flag = 0; + priv->hw_setting.max_stations = IWL3945_STATION_COUNT; + priv->hw_setting.bcast_sta_id = IWL3945_BROADCAST_ID; + return 0; +} + +unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv, + struct iwl_frame *frame, u8 rate) +{ + struct iwl_tx_beacon_cmd *tx_beacon_cmd; + unsigned int frame_size; + + tx_beacon_cmd = (struct iwl_tx_beacon_cmd *)&frame->u; + memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); + + tx_beacon_cmd->tx.sta_id = IWL3945_BROADCAST_ID; + tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + + frame_size = iwl_fill_beacon_frame(priv, + tx_beacon_cmd->frame, + BROADCAST_ADDR, + sizeof(frame->u) - sizeof(*tx_beacon_cmd)); + + BUG_ON(frame_size > MAX_MPDU_SIZE); + tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); + + tx_beacon_cmd->tx.rate = rate; + tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK | + TX_CMD_FLG_TSF_MSK); + + /* supp_rates[0] == OFDM */ + tx_beacon_cmd->tx.supp_rates[0] = IWL_OFDM_BASIC_RATES_MASK; + + /* supp_rates[1] == CCK + * + * NOTE: IWL_*_RATES_MASK are not in the order that supp_rates + * expects so we have to shift them around. + * + * supp_rates expects: + * CCK rates are bit0..3 + * + * However IWL_*_RATES_MASK has: + * CCK rates are bit8..11 + */ + tx_beacon_cmd->tx.supp_rates[1] = + (IWL_CCK_BASIC_RATES_MASK >> 8) & 0xF; + + return (sizeof(struct iwl_tx_beacon_cmd) + frame_size); +} + +void iwl_hw_rx_handler_setup(struct iwl_priv *priv) +{ + priv->rx_handlers[REPLY_3945_RX] = iwl3945_rx_reply_rx; +} + +void iwl_hw_setup_deferred_work(struct iwl_priv *priv) +{ + INIT_DELAYED_WORK(&priv->thermal_periodic, + iwl3945_bg_reg_txpower_periodic); +} + +void iwl_hw_cancel_deferred_work(struct iwl_priv *priv) +{ + cancel_delayed_work(&priv->thermal_periodic); +} + +struct pci_device_id iwl_hw_card_ids[] = { + {0x8086, 0x4222, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {0x8086, 0x4227, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {0} +}; + +inline int iwl_eeprom_aqcuire_semaphore(struct iwl_priv *priv) +{ + _iwl_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK); + return 0; +} + +MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h new file mode 100644 index 000000000000..813902e9f8c2 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-3945.h @@ -0,0 +1,41 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos <ipw2100-admin@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_3945_h__ +#define __iwl_3945_h__ + +/* + * Forward declare iwl-3945.c functions for iwl-base.c + */ +extern int iwl_eeprom_aqcuire_semaphore(struct iwl_priv *priv); +extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv); +extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv); +extern void iwl3945_reg_txpower_periodic(struct iwl_priv *priv); +extern void iwl3945_bg_reg_txpower_periodic(struct work_struct *work); +extern int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv); +extern u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, + u16 tx_rate, u8 flags); +#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h new file mode 100644 index 000000000000..99a19ef4c743 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h @@ -0,0 +1,581 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU Geeral Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * James P. Ketrenos <ipw2100-admin@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_4965_hw_h__ +#define __iwl_4965_hw_h__ + +#define IWL_RX_BUF_SIZE (4 * 1024) +#define IWL_MAX_BSM_SIZE BSM_SRAM_SIZE +#define KDR_RTC_INST_UPPER_BOUND (0x018000) +#define KDR_RTC_DATA_UPPER_BOUND (0x80A000) +#define KDR_RTC_INST_SIZE (KDR_RTC_INST_UPPER_BOUND - RTC_INST_LOWER_BOUND) +#define KDR_RTC_DATA_SIZE (KDR_RTC_DATA_UPPER_BOUND - RTC_DATA_LOWER_BOUND) + +#define IWL_MAX_INST_SIZE KDR_RTC_INST_SIZE +#define IWL_MAX_DATA_SIZE KDR_RTC_DATA_SIZE + +static inline int iwl_hw_valid_rtc_data_addr(u32 addr) +{ + return (addr >= RTC_DATA_LOWER_BOUND) && + (addr < KDR_RTC_DATA_UPPER_BOUND); +} + +/********************* START TXPOWER *****************************************/ +enum { + HT_IE_EXT_CHANNEL_NONE = 0, + HT_IE_EXT_CHANNEL_ABOVE, + HT_IE_EXT_CHANNEL_INVALID, + HT_IE_EXT_CHANNEL_BELOW, + HT_IE_EXT_CHANNEL_MAX +}; + +enum { + CALIB_CH_GROUP_1 = 0, + CALIB_CH_GROUP_2 = 1, + CALIB_CH_GROUP_3 = 2, + CALIB_CH_GROUP_4 = 3, + CALIB_CH_GROUP_5 = 4, + CALIB_CH_GROUP_MAX +}; + +/* Temperature calibration offset is 3% 0C in Kelvin */ +#define TEMPERATURE_CALIB_KELVIN_OFFSET 8 +#define TEMPERATURE_CALIB_A_VAL 259 + +#define IWL_TX_POWER_TEMPERATURE_MIN (263) +#define IWL_TX_POWER_TEMPERATURE_MAX (410) + +#define IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \ + (((t) < IWL_TX_POWER_TEMPERATURE_MIN) || \ + ((t) > IWL_TX_POWER_TEMPERATURE_MAX)) + +#define IWL_TX_POWER_ILLEGAL_TEMPERATURE (300) + +#define IWL_TX_POWER_TEMPERATURE_DIFFERENCE (2) + +#define IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6) + +#define IWL_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm = 1 milliwatt */ +#define IWL_TX_POWER_TARGET_POWER_MAX (16) /* 16 dBm */ + +/* timeout equivalent to 3 minutes */ +#define IWL_TX_POWER_TIMELIMIT_NOCALIB 1800000000 + +#define IWL_TX_POWER_CCK_COMPENSATION (9) + +#define MIN_TX_GAIN_INDEX (0) +#define MIN_TX_GAIN_INDEX_52GHZ_EXT (-9) +#define MAX_TX_GAIN_INDEX_52GHZ (98) +#define MIN_TX_GAIN_52GHZ (98) +#define MAX_TX_GAIN_INDEX_24GHZ (98) +#define MIN_TX_GAIN_24GHZ (98) +#define MAX_TX_GAIN (0) +#define MAX_TX_GAIN_52GHZ_EXT (-9) + +#define IWL_TX_POWER_DEFAULT_REGULATORY_24 (34) +#define IWL_TX_POWER_DEFAULT_REGULATORY_52 (34) +#define IWL_TX_POWER_REGULATORY_MIN (0) +#define IWL_TX_POWER_REGULATORY_MAX (34) +#define IWL_TX_POWER_DEFAULT_SATURATION_24 (38) +#define IWL_TX_POWER_DEFAULT_SATURATION_52 (38) +#define IWL_TX_POWER_SATURATION_MIN (20) +#define IWL_TX_POWER_SATURATION_MAX (50) + +/* dv *0.4 = dt; so that 5 degrees temperature diff equals + * 12.5 in voltage diff */ +#define IWL_TX_TEMPERATURE_UPDATE_LIMIT 9 + +#define IWL_INVALID_CHANNEL (0xffffffff) +#define IWL_TX_POWER_REGITRY_BIT (2) + +#define MIN_IWL_TX_POWER_CALIB_DUR (100) +#define IWL_CCK_FROM_OFDM_POWER_DIFF (-5) +#define IWL_CCK_FROM_OFDM_INDEX_DIFF (9) + +/* Number of entries in the gain table */ +#define POWER_GAIN_NUM_ENTRIES 78 +#define TX_POW_MAX_SESSION_NUM 5 +/* timeout equivalent to 3 minutes */ +#define TX_IWL_TIMELIMIT_NOCALIB 1800000000 + +/* Kedron TX_CALIB_STATES */ +#define IWL_TX_CALIB_STATE_SEND_TX 0x00000001 +#define IWL_TX_CALIB_WAIT_TX_RESPONSE 0x00000002 +#define IWL_TX_CALIB_ENABLED 0x00000004 +#define IWL_TX_CALIB_XVT_ON 0x00000008 +#define IWL_TX_CALIB_TEMPERATURE_CORRECT 0x00000010 +#define IWL_TX_CALIB_WORKING_WITH_XVT 0x00000020 +#define IWL_TX_CALIB_XVT_PERIODICAL 0x00000040 + +#define NUM_IWL_TX_CALIB_SETTINS 5 /* Number of tx correction groups */ + +#define IWL_MIN_POWER_IN_VP_TABLE 1 /* 0.5dBm multiplied by 2 */ +#define IWL_MAX_POWER_IN_VP_TABLE 40 /* 20dBm - multiplied by 2 (because + * entries are for each 0.5dBm) */ +#define IWL_STEP_IN_VP_TABLE 1 /* 0.5dB - multiplied by 2 */ +#define IWL_NUM_POINTS_IN_VPTABLE \ + (1 + IWL_MAX_POWER_IN_VP_TABLE - IWL_MIN_POWER_IN_VP_TABLE) + +#define MIN_TX_GAIN_INDEX (0) +#define MAX_TX_GAIN_INDEX_52GHZ (98) +#define MIN_TX_GAIN_52GHZ (98) +#define MAX_TX_GAIN_INDEX_24GHZ (98) +#define MIN_TX_GAIN_24GHZ (98) +#define MAX_TX_GAIN (0) + +/* First and last channels of all groups */ +#define CALIB_IWL_TX_ATTEN_GR1_FCH 34 +#define CALIB_IWL_TX_ATTEN_GR1_LCH 43 +#define CALIB_IWL_TX_ATTEN_GR2_FCH 44 +#define CALIB_IWL_TX_ATTEN_GR2_LCH 70 +#define CALIB_IWL_TX_ATTEN_GR3_FCH 71 +#define CALIB_IWL_TX_ATTEN_GR3_LCH 124 +#define CALIB_IWL_TX_ATTEN_GR4_FCH 125 +#define CALIB_IWL_TX_ATTEN_GR4_LCH 200 +#define CALIB_IWL_TX_ATTEN_GR5_FCH 1 +#define CALIB_IWL_TX_ATTEN_GR5_LCH 20 + + +union iwl_tx_power_dual_stream { + struct { + u8 radio_tx_gain[2]; + u8 dsp_predis_atten[2]; + } s; + u32 dw; +}; + +/********************* END TXPOWER *****************************************/ + +/* HT flags */ +#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22) +#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK __constant_cpu_to_le32(0x1<<22) + +#define RXON_FLG_HT_OPERATING_MODE_POS (23) + +#define RXON_FLG_HT_PROT_MSK __constant_cpu_to_le32(0x1<<23) +#define RXON_FLG_FAT_PROT_MSK __constant_cpu_to_le32(0x2<<23) + +#define RXON_FLG_CHANNEL_MODE_POS (25) +#define RXON_FLG_CHANNEL_MODE_MSK __constant_cpu_to_le32(0x3<<25) +#define RXON_FLG_CHANNEL_MODE_PURE_40_MSK __constant_cpu_to_le32(0x1<<25) +#define RXON_FLG_CHANNEL_MODE_MIXED_MSK __constant_cpu_to_le32(0x2<<25) + +#define RXON_RX_CHAIN_DRIVER_FORCE_MSK __constant_cpu_to_le16(0x1<<0) +#define RXON_RX_CHAIN_VALID_MSK __constant_cpu_to_le16(0x7<<1) +#define RXON_RX_CHAIN_VALID_POS (1) +#define RXON_RX_CHAIN_FORCE_SEL_MSK __constant_cpu_to_le16(0x7<<4) +#define RXON_RX_CHAIN_FORCE_SEL_POS (4) +#define RXON_RX_CHAIN_FORCE_MIMO_SEL_MSK __constant_cpu_to_le16(0x7<<7) +#define RXON_RX_CHAIN_FORCE_MIMO_SEL_POS (7) +#define RXON_RX_CHAIN_CNT_MSK __constant_cpu_to_le16(0x3<<10) +#define RXON_RX_CHAIN_CNT_POS (10) +#define RXON_RX_CHAIN_MIMO_CNT_MSK __constant_cpu_to_le16(0x3<<12) +#define RXON_RX_CHAIN_MIMO_CNT_POS (12) +#define RXON_RX_CHAIN_MIMO_FORCE_MSK __constant_cpu_to_le16(0x1<<14) +#define RXON_RX_CHAIN_MIMO_FORCE_POS (14) + + +#define MCS_DUP_6M_PLCP 0x20 + +/* OFDM HT rate masks */ +/* ***************************************** */ +#define R_MCS_6M_MSK 0x1 +#define R_MCS_12M_MSK 0x2 +#define R_MCS_18M_MSK 0x4 +#define R_MCS_24M_MSK 0x8 +#define R_MCS_36M_MSK 0x10 +#define R_MCS_48M_MSK 0x20 +#define R_MCS_54M_MSK 0x40 +#define R_MCS_60M_MSK 0x80 +#define R_MCS_12M_DUAL_MSK 0x100 +#define R_MCS_24M_DUAL_MSK 0x200 +#define R_MCS_36M_DUAL_MSK 0x400 +#define R_MCS_48M_DUAL_MSK 0x800 + +#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A)) +#define is_siso(tbl) (((tbl) == LQ_SISO)) +#define is_mimo(tbl) (((tbl) == LQ_MIMO)) +#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) +#define is_a_band(tbl) (((tbl) == LQ_A)) +#define is_g_and(tbl) (((tbl) == LQ_G)) + +/* Flow Handler Definitions */ + +/**********************/ +/* Addresses */ +/**********************/ + +#define FH_MEM_LOWER_BOUND (0x1000) +#define FH_MEM_UPPER_BOUND (0x1EF0) + +#define IWL_FH_REGS_LOWER_BOUND (0x1000) +#define IWL_FH_REGS_UPPER_BOUND (0x2000) + +#define IWL_FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C) + +/* CBBC Area - Circular buffers base address cache pointers table */ +#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0) +#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10) +/* queues 0 - 15 */ +#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4) + +/* RSCSR Area */ +#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0) +#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) +#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND) + +#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0) +#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004) +#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008) + +/* RCSR Area - Registers address map */ +#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) +#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0) +#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND) + +#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0) + +/* RSSR Area - Rx shared ctrl & status registers */ +#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40) +#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00) +#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND) +#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004) +#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV (FH_MEM_RSSR_LOWER_BOUND + 0x008) + +/* TCSR */ +#define IWL_FH_TCSR_LOWER_BOUND (IWL_FH_REGS_LOWER_BOUND + 0xD00) +#define IWL_FH_TCSR_UPPER_BOUND (IWL_FH_REGS_LOWER_BOUND + 0xE60) + +#define IWL_FH_TCSR_CHNL_NUM (7) +#define IWL_FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \ + (IWL_FH_TCSR_LOWER_BOUND + 0x20 * _chnl) + +/* TSSR Area - Tx shared status registers */ +/* TSSR */ +#define IWL_FH_TSSR_LOWER_BOUND (IWL_FH_REGS_LOWER_BOUND + 0xEA0) +#define IWL_FH_TSSR_UPPER_BOUND (IWL_FH_REGS_LOWER_BOUND + 0xEC0) + +#define IWL_FH_TSSR_TX_MSG_CONFIG_REG (IWL_FH_TSSR_LOWER_BOUND + 0x008) +#define IWL_FH_TSSR_TX_STATUS_REG (IWL_FH_TSSR_LOWER_BOUND + 0x010) + +#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000) +#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000) + +#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_64B (0x00000000) +#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400) +#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_256B (0x00000800) +#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_512B (0x00000C00) + +#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100) +#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080) + +#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020) +#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005) + +#define IWL_FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) \ + ((1 << (_chnl)) << 24) +#define IWL_FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl) \ + ((1 << (_chnl)) << 16) + +#define IWL_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) \ + (IWL_FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) | \ + IWL_FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl)) + +/* TCSR: tx_config register values */ +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000) +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001) +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_ARC (0x00000002) + +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000) +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008) + +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000) +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000) +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000) + +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000) +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000) +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000) + +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000) +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000) +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000) + +#define IWL_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000) +#define IWL_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000) +#define IWL_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003) + +#define IWL_FH_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001) + +#define IWL_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20) +#define IWL_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12) + +/* RCSR: channel 0 rx_config register defines */ +#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MASK (0xC0000000) /* bits 30-31 */ +#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MASK (0x00F00000) /* bits 20-23 */ +#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MASK (0x00030000) /* bits 16-17 */ +#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MASK (0x00008000) /* bit 15 */ +#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MASK (0x00001000) /* bit 12 */ +#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MASK (0x00000FF0) /* bit 4-11 */ + +#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT (20) +#define FH_RCSR_RX_CONFIG_RB_SIZE_BITSHIFT (16) + +/* RCSR: rx_config register values */ +#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000) +#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000) +#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000) + +#define IWL_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000) + +/* RCSR channel 0 config register values */ +#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000) +#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000) + +/* RSCSR: defs used in normal mode */ +#define FH_RSCSR_CHNL0_RBDCB_WPTR_MASK (0x00000FFF) /* bits 0-11 */ + +#define SCD_WIN_SIZE 64 +#define SCD_FRAME_LIMIT 64 + +/* memory mapped registers */ +#define SCD_START_OFFSET 0xa02c00 + +#define SCD_SRAM_BASE_ADDR (SCD_START_OFFSET + 0x0) +#define SCD_EMPTY_BITS (SCD_START_OFFSET + 0x4) +#define SCD_DRAM_BASE_ADDR (SCD_START_OFFSET + 0x10) +#define SCD_AIT (SCD_START_OFFSET + 0x18) +#define SCD_TXFACT (SCD_START_OFFSET + 0x1c) +#define SCD_QUEUE_WRPTR(x) (SCD_START_OFFSET + 0x24 + (x) * 4) +#define SCD_QUEUE_RDPTR(x) (SCD_START_OFFSET + 0x64 + (x) * 4) +#define SCD_SETQUEUENUM (SCD_START_OFFSET + 0xa4) +#define SCD_SET_TXSTAT_TXED (SCD_START_OFFSET + 0xa8) +#define SCD_SET_TXSTAT_DONE (SCD_START_OFFSET + 0xac) +#define SCD_SET_TXSTAT_NOT_SCHD (SCD_START_OFFSET + 0xb0) +#define SCD_DECREASE_CREDIT (SCD_START_OFFSET + 0xb4) +#define SCD_DECREASE_SCREDIT (SCD_START_OFFSET + 0xb8) +#define SCD_LOAD_CREDIT (SCD_START_OFFSET + 0xbc) +#define SCD_LOAD_SCREDIT (SCD_START_OFFSET + 0xc0) +#define SCD_BAR (SCD_START_OFFSET + 0xc4) +#define SCD_BAR_DW0 (SCD_START_OFFSET + 0xc8) +#define SCD_BAR_DW1 (SCD_START_OFFSET + 0xcc) +#define SCD_QUEUECHAIN_SEL (SCD_START_OFFSET + 0xd0) +#define SCD_QUERY_REQ (SCD_START_OFFSET + 0xd8) +#define SCD_QUERY_RES (SCD_START_OFFSET + 0xdc) +#define SCD_PENDING_FRAMES (SCD_START_OFFSET + 0xe0) +#define SCD_INTERRUPT_MASK (SCD_START_OFFSET + 0xe4) +#define SCD_INTERRUPT_THRESHOLD (SCD_START_OFFSET + 0xe8) +#define SCD_QUERY_MIN_FRAME_SIZE (SCD_START_OFFSET + 0x100) +#define SCD_QUEUE_STATUS_BITS(x) (SCD_START_OFFSET + 0x104 + (x) * 4) + +/* SRAM structures */ +#define SCD_CONTEXT_DATA_OFFSET 0x380 +#define SCD_TX_STTS_BITMAP_OFFSET 0x400 +#define SCD_TRANSLATE_TBL_OFFSET 0x500 +#define SCD_CONTEXT_QUEUE_OFFSET(x) (SCD_CONTEXT_DATA_OFFSET + ((x) * 8)) +#define SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \ + ((SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc) + +#define SCD_TXFACT_REG_TXFIFO_MASK(lo, hi) \ + ((1<<(hi))|((1<<(hi))-(1<<(lo)))) + + +#define SCD_MODE_REG_BIT_SEARCH_MODE (1<<0) +#define SCD_MODE_REG_BIT_SBYP_MODE (1<<1) + +#define SCD_TXFIFO_POS_TID (0) +#define SCD_TXFIFO_POS_RA (4) +#define SCD_QUEUE_STTS_REG_POS_ACTIVE (0) +#define SCD_QUEUE_STTS_REG_POS_TXF (1) +#define SCD_QUEUE_STTS_REG_POS_WSL (5) +#define SCD_QUEUE_STTS_REG_POS_SCD_ACK (8) +#define SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10) +#define SCD_QUEUE_STTS_REG_MSK (0x0007FC00) + +#define SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF) + +#define SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0) +#define SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F) +#define SCD_QUEUE_CTX_REG1_CREDIT_POS (8) +#define SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00) +#define SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24) +#define SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000) +#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16) +#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000) + +#define CSR_HW_IF_CONFIG_REG_BIT_KEDRON_R (0x00000010) +#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00) +#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100) +#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200) + +static inline u8 iwl_hw_get_rate(__le32 rate_n_flags) +{ + return le32_to_cpu(rate_n_flags) & 0xFF; +} +static inline u16 iwl_hw_get_rate_n_flags(__le32 rate_n_flags) +{ + return le32_to_cpu(rate_n_flags) & 0xFFFF; +} +static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u16 flags) +{ + return cpu_to_le32(flags|(u16)rate); +} + +struct iwl_tfd_frame_data { + __le32 tb1_addr; + + __le32 val1; + /* __le32 ptb1_32_35:4; */ +#define IWL_tb1_addr_hi_POS 0 +#define IWL_tb1_addr_hi_LEN 4 +#define IWL_tb1_addr_hi_SYM val1 + /* __le32 tb_len1:12; */ +#define IWL_tb1_len_POS 4 +#define IWL_tb1_len_LEN 12 +#define IWL_tb1_len_SYM val1 + /* __le32 ptb2_0_15:16; */ +#define IWL_tb2_addr_lo16_POS 16 +#define IWL_tb2_addr_lo16_LEN 16 +#define IWL_tb2_addr_lo16_SYM val1 + + __le32 val2; + /* __le32 ptb2_16_35:20; */ +#define IWL_tb2_addr_hi20_POS 0 +#define IWL_tb2_addr_hi20_LEN 20 +#define IWL_tb2_addr_hi20_SYM val2 + /* __le32 tb_len2:12; */ +#define IWL_tb2_len_POS 20 +#define IWL_tb2_len_LEN 12 +#define IWL_tb2_len_SYM val2 +} __attribute__ ((packed)); + +struct iwl_tfd_frame { + __le32 val0; + /* __le32 rsvd1:24; */ + /* __le32 num_tbs:5; */ +#define IWL_num_tbs_POS 24 +#define IWL_num_tbs_LEN 5 +#define IWL_num_tbs_SYM val0 + /* __le32 rsvd2:1; */ + /* __le32 padding:2; */ + struct iwl_tfd_frame_data pa[10]; + __le32 reserved; +} __attribute__ ((packed)); + +#define IWL4965_MAX_WIN_SIZE 64 +#define IWL4965_QUEUE_SIZE 256 +#define IWL4965_NUM_FIFOS 7 +#define IWL_MAX_NUM_QUEUES 16 + +struct iwl4965_queue_byte_cnt_entry { + __le16 val; + /* __le16 byte_cnt:12; */ +#define IWL_byte_cnt_POS 0 +#define IWL_byte_cnt_LEN 12 +#define IWL_byte_cnt_SYM val + /* __le16 rsvd:4; */ +} __attribute__ ((packed)); + +struct iwl4965_sched_queue_byte_cnt_tbl { + struct iwl4965_queue_byte_cnt_entry tfd_offset[IWL4965_QUEUE_SIZE + + IWL4965_MAX_WIN_SIZE]; + u8 dont_care[1024 - + (IWL4965_QUEUE_SIZE + IWL4965_MAX_WIN_SIZE) * + sizeof(__le16)]; +} __attribute__ ((packed)); + +/* Base physical address of iwl_shared is provided to SCD_DRAM_BASE_ADDR + * and &iwl_shared.val0 is provided to FH_RSCSR_CHNL0_STTS_WPTR_REG */ +struct iwl_shared { + struct iwl4965_sched_queue_byte_cnt_tbl + queues_byte_cnt_tbls[IWL_MAX_NUM_QUEUES]; + __le32 val0; + + /* __le32 rb_closed_stts_rb_num:12; */ +#define IWL_rb_closed_stts_rb_num_POS 0 +#define IWL_rb_closed_stts_rb_num_LEN 12 +#define IWL_rb_closed_stts_rb_num_SYM val0 + /* __le32 rsrv1:4; */ + /* __le32 rb_closed_stts_rx_frame_num:12; */ +#define IWL_rb_closed_stts_rx_frame_num_POS 16 +#define IWL_rb_closed_stts_rx_frame_num_LEN 12 +#define IWL_rb_closed_stts_rx_frame_num_SYM val0 + /* __le32 rsrv2:4; */ + + __le32 val1; + /* __le32 frame_finished_stts_rb_num:12; */ +#define IWL_frame_finished_stts_rb_num_POS 0 +#define IWL_frame_finished_stts_rb_num_LEN 12 +#define IWL_frame_finished_stts_rb_num_SYM val1 + /* __le32 rsrv3:4; */ + /* __le32 frame_finished_stts_rx_frame_num:12; */ +#define IWL_frame_finished_stts_rx_frame_num_POS 16 +#define IWL_frame_finished_stts_rx_frame_num_LEN 12 +#define IWL_frame_finished_stts_rx_frame_num_SYM val1 + /* __le32 rsrv4:4; */ + + __le32 padding1; /* so that allocation will be aligned to 16B */ + __le32 padding2; +} __attribute__ ((packed)); + +#endif /* __iwl_4965_hw_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c new file mode 100644 index 000000000000..287c75705c44 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c @@ -0,0 +1,2295 @@ +/****************************************************************************** + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos <ipw2100-admin@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/skbuff.h> +#include <linux/wireless.h> +#include <net/mac80211.h> +#include <net/ieee80211.h> + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/delay.h> + +#include <linux/workqueue.h> + +#include <net/mac80211.h> +#include <linux/wireless.h> + +#define IWL 4965 + +#include "../net/mac80211/ieee80211_rate.h" + +#include "iwlwifi.h" +#include "iwl-helpers.h" + +#define RS_NAME "iwl-4965-rs" + +#define NUM_TRY_BEFORE_ANTENNA_TOGGLE 1 +#define IWL_NUMBER_TRY 1 +#define IWL_HT_NUMBER_TRY 3 + +#define IWL_RATE_MAX_WINDOW 62 +#define IWL_RATE_HIGH_TH 10880 +#define IWL_RATE_MIN_FAILURE_TH 6 +#define IWL_RATE_MIN_SUCCESS_TH 8 +#define IWL_RATE_DECREASE_TH 1920 +#define IWL_RATE_INCREASE_TH 8960 +#define IWL_RATE_SCALE_FLUSH_INTVL (2*HZ) /*2 seconds */ + +static u8 rs_ht_to_legacy[] = { + IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX, + IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX, + IWL_RATE_6M_INDEX, + IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX, + IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX, + IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX, + IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX +}; + +struct iwl_rate { + u32 rate_n_flags; +} __attribute__ ((packed)); + +struct iwl_rate_scale_data { + u64 data; + s32 success_counter; + s32 success_ratio; + s32 counter; + s32 average_tpt; + unsigned long stamp; +}; + +struct iwl_scale_tbl_info { + enum iwl_table_type lq_type; + enum iwl_antenna_type antenna_type; + u8 is_SGI; + u8 is_fat; + u8 is_dup; + u8 action; + s32 *expected_tpt; + struct iwl_rate current_rate; + struct iwl_rate_scale_data win[IWL_RATE_COUNT]; +}; + +struct iwl_rate_scale_priv { + u8 active_tbl; + u8 enable_counter; + u8 stay_in_tbl; + u8 search_better_tbl; + s32 last_tpt; + u32 table_count_limit; + u32 max_failure_limit; + u32 max_success_limit; + u32 table_count; + u32 total_failed; + u32 total_success; + u32 flush_timer; + u8 action_counter; + u8 antenna; + u8 valid_antenna; + u8 is_green; + u8 is_dup; + u8 phymode; + u8 ibss_sta_added; + u32 supp_rates; + u16 active_rate; + u16 active_siso_rate; + u16 active_mimo_rate; + u16 active_rate_basic; + struct iwl_link_quality_cmd lq; + struct iwl_scale_tbl_info lq_info[LQ_SIZE]; +#ifdef CONFIG_MAC80211_DEBUGFS + struct dentry *rs_sta_dbgfs_scale_table_file; + struct dentry *rs_sta_dbgfs_stats_table_file; + struct iwl_rate dbg_fixed; + struct iwl_priv *drv; +#endif +}; + +static void rs_rate_scale_perform(struct iwl_priv *priv, + struct net_device *dev, + struct ieee80211_hdr *hdr, + struct sta_info *sta); +static void rs_fill_link_cmd(struct iwl_rate_scale_priv *lq_data, + struct iwl_rate *tx_mcs, + struct iwl_link_quality_cmd *tbl); + + +#ifdef CONFIG_MAC80211_DEBUGFS +static void rs_dbgfs_set_mcs(struct iwl_rate_scale_priv *rs_priv, + struct iwl_rate *mcs, int index); +#else +static void rs_dbgfs_set_mcs(struct iwl_rate_scale_priv *rs_priv, + struct iwl_rate *mcs, int index) +{} +#endif +static s32 expected_tpt_A[IWL_RATE_COUNT] = { + 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186, 186 +}; + +static s32 expected_tpt_G[IWL_RATE_COUNT] = { + 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 186 +}; + +static s32 expected_tpt_siso20MHz[IWL_RATE_COUNT] = { + 0, 0, 0, 0, 42, 42, 76, 102, 124, 159, 183, 193, 202 +}; + +static s32 expected_tpt_siso20MHzSGI[IWL_RATE_COUNT] = { + 0, 0, 0, 0, 46, 46, 82, 110, 132, 168, 192, 202, 211 +}; + +static s32 expected_tpt_mimo20MHz[IWL_RATE_COUNT] = { + 0, 0, 0, 0, 74, 74, 123, 155, 179, 214, 236, 244, 251 +}; + +static s32 expected_tpt_mimo20MHzSGI[IWL_RATE_COUNT] = { + 0, 0, 0, 0, 81, 81, 131, 164, 188, 222, 243, 251, 257 +}; + +static s32 expected_tpt_siso40MHz[IWL_RATE_COUNT] = { + 0, 0, 0, 0, 77, 77, 127, 160, 184, 220, 242, 250, 257 +}; + +static s32 expected_tpt_siso40MHzSGI[IWL_RATE_COUNT] = { + 0, 0, 0, 0, 83, 83, 135, 169, 193, 229, 250, 257, 264 +}; + +static s32 expected_tpt_mimo40MHz[IWL_RATE_COUNT] = { + 0, 0, 0, 0, 123, 123, 182, 214, 235, 264, 279, 285, 289 +}; + +static s32 expected_tpt_mimo40MHzSGI[IWL_RATE_COUNT] = { + 0, 0, 0, 0, 131, 131, 191, 222, 242, 270, 284, 289, 293 +}; + +static int iwl_lq_sync_callback(struct iwl_priv *priv, + struct iwl_cmd *cmd, struct sk_buff *skb) +{ + /*We didn't cache the SKB; let the caller free it */ + return 1; +} + +static inline u8 iwl_rate_get_rate(u32 rate_n_flags) +{ + return (u8)(rate_n_flags & 0xFF); +} + +static int rs_send_lq_cmd(struct iwl_priv *priv, + struct iwl_link_quality_cmd *lq, u8 flags) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + int i; +#endif + int rc = -1; + + struct iwl_host_cmd cmd = { + .id = REPLY_TX_LINK_QUALITY_CMD, + .len = sizeof(struct iwl_link_quality_cmd), + .meta.flags = flags, + .data = lq, + }; + + if ((lq->sta_id == 0xFF) && + (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)) + return rc; + + if (lq->sta_id == 0xFF) + lq->sta_id = IWL_AP_ID; + + IWL_DEBUG_RATE("lq station id 0x%x\n", lq->sta_id); + IWL_DEBUG_RATE("lq dta 0x%X 0x%X\n", + lq->general_params.single_stream_ant_msk, + lq->general_params.dual_stream_ant_msk); +#ifdef CONFIG_IWLWIFI_DEBUG + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) + IWL_DEBUG_RATE("lq index %d 0x%X\n", + i, lq->rs_table[i].rate_n_flags); +#endif + + if (flags & CMD_ASYNC) + cmd.meta.u.callback = iwl_lq_sync_callback; + + if (iwl_is_associated(priv) && priv->assoc_station_added && + priv->lq_mngr.lq_ready) + rc = iwl_send_cmd(priv, &cmd); + + return rc; +} + +static int rs_rate_scale_clear_window(struct iwl_rate_scale_data *window) +{ + window->data = 0; + window->success_counter = 0; + window->success_ratio = IWL_INVALID_VALUE; + window->counter = 0; + window->average_tpt = IWL_INVALID_VALUE; + window->stamp = 0; + + return 0; +} + +static int rs_collect_tx_data(struct iwl_rate_scale_data *windows, + int scale_index, s32 tpt, u32 status) +{ + int rc = 0; + struct iwl_rate_scale_data *window = NULL; + u64 mask; + u8 win_size = IWL_RATE_MAX_WINDOW; + s32 fail_count; + + if (scale_index < 0) + return -1; + + if (scale_index >= IWL_RATE_COUNT) + return -1; + + window = &(windows[scale_index]); + + if (window->counter >= win_size) { + + window->counter = win_size - 1; + mask = 1; + mask = (mask << (win_size - 1)); + if ((window->data & mask)) { + window->data &= ~mask; + window->success_counter = window->success_counter - 1; + } + } + + window->counter = window->counter + 1; + mask = window->data; + window->data = (mask << 1); + if (status != 0) { + window->success_counter = window->success_counter + 1; + window->data |= 0x1; + } + + if (window->counter > 0) + window->success_ratio = 128 * (100 * window->success_counter) + / window->counter; + else + window->success_ratio = IWL_INVALID_VALUE; + + fail_count = window->counter - window->success_counter; + + if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) || + (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH)) + window->average_tpt = (window->success_ratio * tpt + 64) / 128; + else + window->average_tpt = IWL_INVALID_VALUE; + + window->stamp = jiffies; + + return rc; +} + +int static rs_mcs_from_tbl(struct iwl_rate *mcs_rate, + struct iwl_scale_tbl_info *tbl, + int index, u8 use_green) +{ + int rc = 0; + + if (is_legacy(tbl->lq_type)) { + mcs_rate->rate_n_flags = iwl_rates[index].plcp; + if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE) + mcs_rate->rate_n_flags |= RATE_MCS_CCK_MSK; + + } else if (is_siso(tbl->lq_type)) { + if (index > IWL_LAST_OFDM_RATE) + index = IWL_LAST_OFDM_RATE; + mcs_rate->rate_n_flags = iwl_rates[index].plcp_siso | + RATE_MCS_HT_MSK; + } else { + if (index > IWL_LAST_OFDM_RATE) + index = IWL_LAST_OFDM_RATE; + mcs_rate->rate_n_flags = iwl_rates[index].plcp_mimo | + RATE_MCS_HT_MSK; + } + + switch (tbl->antenna_type) { + case ANT_BOTH: + mcs_rate->rate_n_flags |= RATE_MCS_ANT_AB_MSK; + break; + case ANT_MAIN: + mcs_rate->rate_n_flags |= RATE_MCS_ANT_A_MSK; + break; + case ANT_AUX: + mcs_rate->rate_n_flags |= RATE_MCS_ANT_B_MSK; + break; + case ANT_NONE: + break; + } + + if (is_legacy(tbl->lq_type)) + return rc; + + if (tbl->is_fat) { + if (tbl->is_dup) + mcs_rate->rate_n_flags |= RATE_MCS_DUP_MSK; + else + mcs_rate->rate_n_flags |= RATE_MCS_FAT_MSK; + } + if (tbl->is_SGI) + mcs_rate->rate_n_flags |= RATE_MCS_SGI_MSK; + + if (use_green) { + mcs_rate->rate_n_flags |= RATE_MCS_GF_MSK; + if (is_siso(tbl->lq_type)) + mcs_rate->rate_n_flags &= ~RATE_MCS_SGI_MSK; + } + return rc; +} + +static int rs_get_tbl_info_from_mcs(const struct iwl_rate *mcs_rate, + int phymode, struct iwl_scale_tbl_info *tbl, + int *rate_idx) +{ + int index; + u32 ant_msk; + + index = iwl_rate_index_from_plcp(mcs_rate->rate_n_flags); + + if (index == IWL_RATE_INVALID) { + *rate_idx = -1; + return -1; + } + tbl->is_SGI = 0; + tbl->is_fat = 0; + tbl->is_dup = 0; + tbl->antenna_type = ANT_BOTH; + + if (!(mcs_rate->rate_n_flags & RATE_MCS_HT_MSK)) { + ant_msk = (mcs_rate->rate_n_flags & RATE_MCS_ANT_AB_MSK); + + if (ant_msk == RATE_MCS_ANT_AB_MSK) + tbl->lq_type = LQ_NONE; + else { + + if (phymode == MODE_IEEE80211A) + tbl->lq_type = LQ_A; + else + tbl->lq_type = LQ_G; + + if (mcs_rate->rate_n_flags & RATE_MCS_ANT_A_MSK) + tbl->antenna_type = ANT_MAIN; + else + tbl->antenna_type = ANT_AUX; + } + *rate_idx = index; + + } else if (iwl_rate_get_rate(mcs_rate->rate_n_flags) + <= IWL_RATE_SISO_60M_PLCP) { + tbl->lq_type = LQ_SISO; + + ant_msk = (mcs_rate->rate_n_flags & RATE_MCS_ANT_AB_MSK); + if (ant_msk == RATE_MCS_ANT_AB_MSK) + tbl->lq_type = LQ_NONE; + else { + if (mcs_rate->rate_n_flags & RATE_MCS_ANT_A_MSK) + tbl->antenna_type = ANT_MAIN; + else + tbl->antenna_type = ANT_AUX; + } + if (mcs_rate->rate_n_flags & RATE_MCS_SGI_MSK) + tbl->is_SGI = 1; + + if ((mcs_rate->rate_n_flags & RATE_MCS_FAT_MSK) || + (mcs_rate->rate_n_flags & RATE_MCS_DUP_MSK)) + tbl->is_fat = 1; + + if (mcs_rate->rate_n_flags & RATE_MCS_DUP_MSK) + tbl->is_dup = 1; + + *rate_idx = index; + } else { + tbl->lq_type = LQ_MIMO; + if (mcs_rate->rate_n_flags & RATE_MCS_SGI_MSK) + tbl->is_SGI = 1; + + if ((mcs_rate->rate_n_flags & RATE_MCS_FAT_MSK) || + (mcs_rate->rate_n_flags & RATE_MCS_DUP_MSK)) + tbl->is_fat = 1; + + if (mcs_rate->rate_n_flags & RATE_MCS_DUP_MSK) + tbl->is_dup = 1; + *rate_idx = index; + } + return 0; +} + +static inline void rs_toggle_antenna(struct iwl_rate *new_rate, + struct iwl_scale_tbl_info *tbl) +{ + if (tbl->antenna_type == ANT_AUX) { + tbl->antenna_type = ANT_MAIN; + new_rate->rate_n_flags &= ~RATE_MCS_ANT_B_MSK; + new_rate->rate_n_flags |= RATE_MCS_ANT_A_MSK; + } else { + tbl->antenna_type = ANT_AUX; + new_rate->rate_n_flags &= ~RATE_MCS_ANT_A_MSK; + new_rate->rate_n_flags |= RATE_MCS_ANT_B_MSK; + } +} + +static inline s8 rs_use_green(struct iwl_priv *priv) +{ + s8 rc = 0; +#ifdef CONFIG_IWLWIFI_HT + if (!priv->is_ht_enabled || !priv->current_assoc_ht.is_ht) + return 0; + + if ((priv->current_assoc_ht.is_green_field) && + !(priv->current_assoc_ht.operating_mode & 0x4)) + rc = 1; +#endif /*CONFIG_IWLWIFI_HT */ + return rc; +} + +/** + * rs_get_supported_rates - get the available rates + * + * if management frame or broadcast frame only return + * basic available rates. + * + */ +static void rs_get_supported_rates(struct iwl_rate_scale_priv *lq_data, + struct ieee80211_hdr *hdr, + enum iwl_table_type rate_type, + u16 *data_rate) +{ + if (is_legacy(rate_type)) + *data_rate = lq_data->active_rate; + else { + if (is_siso(rate_type)) + *data_rate = lq_data->active_siso_rate; + else + *data_rate = lq_data->active_mimo_rate; + } + + if (hdr && is_multicast_ether_addr(hdr->addr1) && + lq_data->active_rate_basic) + *data_rate = lq_data->active_rate_basic; +} + +static u16 rs_get_adjacent_rate(u8 index, u16 rate_mask, int rate_type) +{ + u8 high = IWL_RATE_INVALID; + u8 low = IWL_RATE_INVALID; + + /* 802.11A or ht walks to the next literal adjascent rate in + * the rate table */ + if (is_a_band(rate_type) || !is_legacy(rate_type)) { + int i; + u32 mask; + + /* Find the previous rate that is in the rate mask */ + i = index - 1; + for (mask = (1 << i); i >= 0; i--, mask >>= 1) { + if (rate_mask & mask) { + low = i; + break; + } + } + + /* Find the next rate that is in the rate mask */ + i = index + 1; + for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) { + if (rate_mask & mask) { + high = i; + break; + } + } + + return (high << 8) | low; + } + + low = index; + while (low != IWL_RATE_INVALID) { + low = iwl_rates[low].prev_rs; + if (low == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << low)) + break; + IWL_DEBUG_RATE("Skipping masked lower rate: %d\n", low); + } + + high = index; + while (high != IWL_RATE_INVALID) { + high = iwl_rates[high].next_rs; + if (high == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << high)) + break; + IWL_DEBUG_RATE("Skipping masked higher rate: %d\n", high); + } + + return (high << 8) | low; +} + +static int rs_get_lower_rate(struct iwl_rate_scale_priv *lq_data, + struct iwl_scale_tbl_info *tbl, u8 scale_index, + u8 ht_possible, struct iwl_rate *mcs_rate) +{ + s32 low; + u16 rate_mask; + u16 high_low; + u8 switch_to_legacy = 0; + u8 is_green = lq_data->is_green; + + /* check if we need to switch from HT to legacy rates. + * assumption is that mandatory rates (1Mbps or 6Mbps) + * are always supported (spec demand) */ + if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) { + switch_to_legacy = 1; + scale_index = rs_ht_to_legacy[scale_index]; + if (lq_data->phymode == MODE_IEEE80211A) + tbl->lq_type = LQ_A; + else + tbl->lq_type = LQ_G; + + if ((tbl->antenna_type == ANT_BOTH) || + (tbl->antenna_type == ANT_NONE)) + tbl->antenna_type = ANT_MAIN; + + tbl->is_fat = 0; + tbl->is_SGI = 0; + } + + rs_get_supported_rates(lq_data, NULL, tbl->lq_type, &rate_mask); + + /* mask with station rate restriction */ + if (is_legacy(tbl->lq_type)) { + if (lq_data->phymode == (u8) MODE_IEEE80211A) + rate_mask = (u16)(rate_mask & + (lq_data->supp_rates << IWL_FIRST_OFDM_RATE)); + else + rate_mask = (u16)(rate_mask & lq_data->supp_rates); + } + + /* if we did switched from HT to legacy check current rate */ + if ((switch_to_legacy) && + (rate_mask & (1 << scale_index))) { + rs_mcs_from_tbl(mcs_rate, tbl, scale_index, is_green); + return 0; + } + + high_low = rs_get_adjacent_rate(scale_index, rate_mask, tbl->lq_type); + low = high_low & 0xff; + + if (low != IWL_RATE_INVALID) + rs_mcs_from_tbl(mcs_rate, tbl, low, is_green); + else + rs_mcs_from_tbl(mcs_rate, tbl, scale_index, is_green); + + return 0; +} + +static void rs_tx_status(void *priv_rate, + struct net_device *dev, + struct sk_buff *skb, + struct ieee80211_tx_status *tx_resp) +{ + int status; + u8 retries; + int rs_index, index = 0; + struct iwl_rate_scale_priv *lq; + struct iwl_link_quality_cmd *table; + struct sta_info *sta; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct iwl_priv *priv = (struct iwl_priv *)priv_rate; + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + struct iwl_rate_scale_data *window = NULL; + struct iwl_rate_scale_data *search_win = NULL; + struct iwl_rate tx_mcs; + struct iwl_scale_tbl_info tbl_type; + struct iwl_scale_tbl_info *curr_tbl, *search_tbl; + u8 active_index = 0; + u16 fc = le16_to_cpu(hdr->frame_control); + s32 tpt = 0; + + IWL_DEBUG_RATE_LIMIT("get frame ack response, update rate scale window\n"); + + if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1)) + return; + + retries = tx_resp->retry_count; + + if (retries > 15) + retries = 15; + + + sta = sta_info_get(local, hdr->addr1); + + if (!sta || !sta->rate_ctrl_priv) { + if (sta) + sta_info_put(sta); + return; + } + + lq = (struct iwl_rate_scale_priv *)sta->rate_ctrl_priv; + + if (!priv->lq_mngr.lq_ready) + return; + + if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && !lq->ibss_sta_added) + return; + + table = &lq->lq; + active_index = lq->active_tbl; + + lq->antenna = (lq->valid_antenna & local->hw.conf.antenna_sel_tx); + if (!lq->antenna) + lq->antenna = lq->valid_antenna; + + lq->antenna = lq->valid_antenna; + curr_tbl = &(lq->lq_info[active_index]); + search_tbl = &(lq->lq_info[(1 - active_index)]); + window = (struct iwl_rate_scale_data *) + &(curr_tbl->win[0]); + search_win = (struct iwl_rate_scale_data *) + &(search_tbl->win[0]); + + tx_mcs.rate_n_flags = tx_resp->control.tx_rate; + + rs_get_tbl_info_from_mcs(&tx_mcs, priv->phymode, + &tbl_type, &rs_index); + if ((rs_index < 0) || (rs_index >= IWL_RATE_COUNT)) { + IWL_DEBUG_RATE("bad rate index at: %d rate 0x%X\n", + rs_index, tx_mcs.rate_n_flags); + sta_info_put(sta); + return; + } + + if (retries && + (tx_mcs.rate_n_flags != + le32_to_cpu(table->rs_table[0].rate_n_flags))) { + IWL_DEBUG_RATE("initial rate does not match 0x%x 0x%x\n", + tx_mcs.rate_n_flags, + le32_to_cpu(table->rs_table[0].rate_n_flags)); + sta_info_put(sta); + return; + } + + while (retries) { + tx_mcs.rate_n_flags = + le32_to_cpu(table->rs_table[index].rate_n_flags); + rs_get_tbl_info_from_mcs(&tx_mcs, priv->phymode, + &tbl_type, &rs_index); + + if ((tbl_type.lq_type == search_tbl->lq_type) && + (tbl_type.antenna_type == search_tbl->antenna_type) && + (tbl_type.is_SGI == search_tbl->is_SGI)) { + if (search_tbl->expected_tpt) + tpt = search_tbl->expected_tpt[rs_index]; + else + tpt = 0; + rs_collect_tx_data(search_win, + rs_index, tpt, 0); + } else if ((tbl_type.lq_type == curr_tbl->lq_type) && + (tbl_type.antenna_type == curr_tbl->antenna_type) && + (tbl_type.is_SGI == curr_tbl->is_SGI)) { + if (curr_tbl->expected_tpt) + tpt = curr_tbl->expected_tpt[rs_index]; + else + tpt = 0; + rs_collect_tx_data(window, rs_index, tpt, 0); + } + if (lq->stay_in_tbl) + lq->total_failed++; + --retries; + index++; + + } + + if (!tx_resp->retry_count) + tx_mcs.rate_n_flags = tx_resp->control.tx_rate; + else + tx_mcs.rate_n_flags = + le32_to_cpu(table->rs_table[index].rate_n_flags); + + rs_get_tbl_info_from_mcs(&tx_mcs, priv->phymode, + &tbl_type, &rs_index); + + if (tx_resp->flags & IEEE80211_TX_STATUS_ACK) + status = 1; + else + status = 0; + + if ((tbl_type.lq_type == search_tbl->lq_type) && + (tbl_type.antenna_type == search_tbl->antenna_type) && + (tbl_type.is_SGI == search_tbl->is_SGI)) { + if (search_tbl->expected_tpt) + tpt = search_tbl->expected_tpt[rs_index]; + else + tpt = 0; + rs_collect_tx_data(search_win, + rs_index, tpt, status); + } else if ((tbl_type.lq_type == curr_tbl->lq_type) && + (tbl_type.antenna_type == curr_tbl->antenna_type) && + (tbl_type.is_SGI == curr_tbl->is_SGI)) { + if (curr_tbl->expected_tpt) + tpt = curr_tbl->expected_tpt[rs_index]; + else + tpt = 0; + rs_collect_tx_data(window, rs_index, tpt, status); + } + + if (lq->stay_in_tbl) { + if (status) + lq->total_success++; + else + lq->total_failed++; + } + + rs_rate_scale_perform(priv, dev, hdr, sta); + sta_info_put(sta); + return; +} + +static u8 rs_is_ant_connected(u8 valid_antenna, + enum iwl_antenna_type antenna_type) +{ + if (antenna_type == ANT_AUX) + return ((valid_antenna & 0x2) ? 1:0); + else if (antenna_type == ANT_MAIN) + return ((valid_antenna & 0x1) ? 1:0); + else if (antenna_type == ANT_BOTH) { + if ((valid_antenna & 0x3) == 0x3) + return 1; + else + return 0; + } + + return 1; +} + +static u8 rs_is_other_ant_connected(u8 valid_antenna, + enum iwl_antenna_type antenna_type) +{ + if (antenna_type == ANT_AUX) + return (rs_is_ant_connected(valid_antenna, ANT_MAIN)); + else + return (rs_is_ant_connected(valid_antenna, ANT_AUX)); + + return 0; +} + +static void rs_set_stay_in_table(u8 is_legacy, + struct iwl_rate_scale_priv *lq_data) +{ + IWL_DEBUG_HT("we are staying in the same table\n"); + lq_data->stay_in_tbl = 1; + if (is_legacy) { + lq_data->table_count_limit = IWL_LEGACY_TABLE_COUNT; + lq_data->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT; + lq_data->max_success_limit = IWL_LEGACY_TABLE_COUNT; + } else { + lq_data->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT; + lq_data->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT; + lq_data->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT; + } + lq_data->table_count = 0; + lq_data->total_failed = 0; + lq_data->total_success = 0; +} + +static void rs_get_expected_tpt_table(struct iwl_rate_scale_priv *lq_data, + struct iwl_scale_tbl_info *tbl) +{ + if (is_legacy(tbl->lq_type)) { + if (!is_a_band(tbl->lq_type)) + tbl->expected_tpt = expected_tpt_G; + else + tbl->expected_tpt = expected_tpt_A; + } else if (is_siso(tbl->lq_type)) { + if (tbl->is_fat && !lq_data->is_dup) + if (tbl->is_SGI) + tbl->expected_tpt = expected_tpt_siso40MHzSGI; + else + tbl->expected_tpt = expected_tpt_siso40MHz; + else if (tbl->is_SGI) + tbl->expected_tpt = expected_tpt_siso20MHzSGI; + else + tbl->expected_tpt = expected_tpt_siso20MHz; + + } else if (is_mimo(tbl->lq_type)) { + if (tbl->is_fat && !lq_data->is_dup) + if (tbl->is_SGI) + tbl->expected_tpt = expected_tpt_mimo40MHzSGI; + else + tbl->expected_tpt = expected_tpt_mimo40MHz; + else if (tbl->is_SGI) + tbl->expected_tpt = expected_tpt_mimo20MHzSGI; + else + tbl->expected_tpt = expected_tpt_mimo20MHz; + } else + tbl->expected_tpt = expected_tpt_G; +} + +#ifdef CONFIG_IWLWIFI_HT +static s32 rs_get_best_rate(struct iwl_priv *priv, + struct iwl_rate_scale_priv *lq_data, + struct iwl_scale_tbl_info *tbl, + u16 rate_mask, s8 index, s8 rate) +{ + struct iwl_scale_tbl_info *active_tbl = + &(lq_data->lq_info[lq_data->active_tbl]); + s32 new_rate, high, low, start_hi; + s32 active_sr = active_tbl->win[index].success_ratio; + s32 *tpt_tbl = tbl->expected_tpt; + s32 active_tpt = active_tbl->expected_tpt[index]; + u16 high_low; + + new_rate = high = low = start_hi = IWL_RATE_INVALID; + + for (; ;) { + high_low = rs_get_adjacent_rate(rate, rate_mask, tbl->lq_type); + + low = high_low & 0xff; + high = (high_low >> 8) & 0xff; + + if ((((100 * tpt_tbl[rate]) > lq_data->last_tpt) && + ((active_sr > IWL_RATE_DECREASE_TH) && + (active_sr <= IWL_RATE_HIGH_TH) && + (tpt_tbl[rate] <= active_tpt))) || + ((active_sr >= IWL_RATE_SCALE_SWITCH) && + (tpt_tbl[rate] > active_tpt))) { + + if (start_hi != IWL_RATE_INVALID) { + new_rate = start_hi; + break; + } + new_rate = rate; + if (low != IWL_RATE_INVALID) + rate = low; + else + break; + } else { + if (new_rate != IWL_RATE_INVALID) + break; + else if (high != IWL_RATE_INVALID) { + start_hi = high; + rate = high; + } else { + new_rate = rate; + break; + } + } + } + + return new_rate; +} +#endif /* CONFIG_IWLWIFI_HT */ + +static inline u8 rs_is_both_ant_supp(u8 valid_antenna) +{ + return (rs_is_ant_connected(valid_antenna, ANT_BOTH)); +} + +static int rs_switch_to_mimo(struct iwl_priv *priv, + struct iwl_rate_scale_priv *lq_data, + struct iwl_scale_tbl_info *tbl, int index) +{ + int rc = -1; +#ifdef CONFIG_IWLWIFI_HT + u16 rate_mask; + s32 rate; + s8 is_green = lq_data->is_green; + + if (!priv->is_ht_enabled || !priv->current_assoc_ht.is_ht) + return -1; + + IWL_DEBUG_HT("LQ: try to switch to MIMO\n"); + tbl->lq_type = LQ_MIMO; + rs_get_supported_rates(lq_data, NULL, tbl->lq_type, + &rate_mask); + + if (priv->current_assoc_ht.tx_mimo_ps_mode == IWL_MIMO_PS_STATIC) + return -1; + + if (!rs_is_both_ant_supp(lq_data->antenna)) + return -1; + + rc = 0; + tbl->is_dup = lq_data->is_dup; + tbl->action = 0; + if (priv->current_channel_width == IWL_CHANNEL_WIDTH_40MHZ) + tbl->is_fat = 1; + else + tbl->is_fat = 0; + + if (tbl->is_fat) { + if (priv->current_assoc_ht.sgf & HT_SHORT_GI_40MHZ_ONLY) + tbl->is_SGI = 1; + else + tbl->is_SGI = 0; + } else if (priv->current_assoc_ht.sgf & HT_SHORT_GI_20MHZ_ONLY) + tbl->is_SGI = 1; + else + tbl->is_SGI = 0; + + rs_get_expected_tpt_table(lq_data, tbl); + + rate = rs_get_best_rate(priv, lq_data, tbl, rate_mask, index, index); + + IWL_DEBUG_HT("LQ: MIMO best rate %d mask %X\n", rate, rate_mask); + if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) + return -1; + rs_mcs_from_tbl(&tbl->current_rate, tbl, rate, is_green); + + IWL_DEBUG_HT("LQ: Switch to new mcs %X index is green %X\n", + tbl->current_rate.rate_n_flags, is_green); + +#endif /*CONFIG_IWLWIFI_HT */ + return rc; +} + +static int rs_switch_to_siso(struct iwl_priv *priv, + struct iwl_rate_scale_priv *lq_data, + struct iwl_scale_tbl_info *tbl, int index) +{ + int rc = -1; +#ifdef CONFIG_IWLWIFI_HT + u16 rate_mask; + u8 is_green = lq_data->is_green; + s32 rate; + + IWL_DEBUG_HT("LQ: try to switch to SISO\n"); + if (!priv->is_ht_enabled || !priv->current_assoc_ht.is_ht) + return -1; + + rc = 0; + tbl->is_dup = lq_data->is_dup; + tbl->lq_type = LQ_SISO; + tbl->action = 0; + rs_get_supported_rates(lq_data, NULL, tbl->lq_type, + &rate_mask); + + if (priv->current_channel_width == IWL_CHANNEL_WIDTH_40MHZ) + tbl->is_fat = 1; + else + tbl->is_fat = 0; + + if (tbl->is_fat) { + if (priv->current_assoc_ht.sgf & HT_SHORT_GI_40MHZ_ONLY) + tbl->is_SGI = 1; + else + tbl->is_SGI = 0; + } else if (priv->current_assoc_ht.sgf & HT_SHORT_GI_20MHZ_ONLY) + tbl->is_SGI = 1; + else + tbl->is_SGI = 0; + + if (is_green) + tbl->is_SGI = 0; + + rs_get_expected_tpt_table(lq_data, tbl); + rate = rs_get_best_rate(priv, lq_data, tbl, rate_mask, index, index); + + IWL_DEBUG_HT("LQ: get best rate %d mask %X\n", rate, rate_mask); + if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { + IWL_DEBUG_HT("can not switch with index %d rate mask %x\n", + rate, rate_mask); + return -1; + } + rs_mcs_from_tbl(&tbl->current_rate, tbl, rate, is_green); + IWL_DEBUG_HT("LQ: Switch to new mcs %X index is green %X\n", + tbl->current_rate.rate_n_flags, is_green); + +#endif /*CONFIG_IWLWIFI_HT */ + return rc; +} + +static int rs_move_legacy_other(struct iwl_priv *priv, + struct iwl_rate_scale_priv *lq_data, + int index) +{ + int rc = 0; + struct iwl_scale_tbl_info *tbl = + &(lq_data->lq_info[lq_data->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_data->lq_info[(1 - lq_data->active_tbl)]); + struct iwl_rate_scale_data *window = &(tbl->win[index]); + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + u8 start_action = tbl->action; + + for (; ;) { + switch (tbl->action) { + case IWL_LEGACY_SWITCH_ANTENNA: + IWL_DEBUG_HT("LQ Legacy switch Antenna\n"); + + search_tbl->lq_type = LQ_NONE; + lq_data->action_counter++; + if (window->success_ratio >= IWL_RS_GOOD_RATIO) + break; + if (!rs_is_other_ant_connected(lq_data->antenna, + tbl->antenna_type)) + break; + + memcpy(search_tbl, tbl, sz); + + rs_toggle_antenna(&(search_tbl->current_rate), + search_tbl); + rs_get_expected_tpt_table(lq_data, search_tbl); + lq_data->search_better_tbl = 1; + goto out; + + case IWL_LEGACY_SWITCH_SISO: + IWL_DEBUG_HT("LQ: Legacy switch to SISO\n"); + memcpy(search_tbl, tbl, sz); + search_tbl->lq_type = LQ_SISO; + search_tbl->is_SGI = 0; + search_tbl->is_fat = 0; + rc = rs_switch_to_siso(priv, lq_data, search_tbl, + index); + if (!rc) { + lq_data->search_better_tbl = 1; + lq_data->action_counter = 0; + } + if (!rc) + goto out; + + break; + case IWL_LEGACY_SWITCH_MIMO: + IWL_DEBUG_HT("LQ: Legacy switch MIMO\n"); + memcpy(search_tbl, tbl, sz); + search_tbl->lq_type = LQ_MIMO; + search_tbl->is_SGI = 0; + search_tbl->is_fat = 0; + search_tbl->antenna_type = ANT_BOTH; + rc = rs_switch_to_mimo(priv, lq_data, search_tbl, + index); + if (!rc) { + lq_data->search_better_tbl = 1; + lq_data->action_counter = 0; + } + if (!rc) + goto out; + break; + } + tbl->action++; + if (tbl->action > IWL_LEGACY_SWITCH_MIMO) + tbl->action = IWL_LEGACY_SWITCH_ANTENNA; + + if (tbl->action == start_action) + break; + + } + return 0; + + out: + tbl->action++; + if (tbl->action > IWL_LEGACY_SWITCH_MIMO) + tbl->action = IWL_LEGACY_SWITCH_ANTENNA; + return 0; + +} + +static int rs_move_siso_to_other(struct iwl_priv *priv, + struct iwl_rate_scale_priv *lq_data, + int index) +{ + int rc = -1; + u8 is_green = lq_data->is_green; + struct iwl_scale_tbl_info *tbl = + &(lq_data->lq_info[lq_data->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_data->lq_info[(1 - lq_data->active_tbl)]); + struct iwl_rate_scale_data *window = &(tbl->win[index]); + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + u8 start_action = tbl->action; + + for (;;) { + lq_data->action_counter++; + switch (tbl->action) { + case IWL_SISO_SWITCH_ANTENNA: + IWL_DEBUG_HT("LQ: SISO SWITCH ANTENNA SISO\n"); + search_tbl->lq_type = LQ_NONE; + if (window->success_ratio >= IWL_RS_GOOD_RATIO) + break; + if (!rs_is_other_ant_connected(lq_data->antenna, + tbl->antenna_type)) + break; + + memcpy(search_tbl, tbl, sz); + search_tbl->action = IWL_SISO_SWITCH_MIMO; + rs_toggle_antenna(&(search_tbl->current_rate), + search_tbl); + lq_data->search_better_tbl = 1; + + goto out; + + case IWL_SISO_SWITCH_MIMO: + IWL_DEBUG_HT("LQ: SISO SWITCH TO MIMO FROM SISO\n"); + memcpy(search_tbl, tbl, sz); + search_tbl->lq_type = LQ_MIMO; + search_tbl->is_SGI = 0; + search_tbl->is_fat = 0; + search_tbl->antenna_type = ANT_BOTH; + rc = rs_switch_to_mimo(priv, lq_data, search_tbl, + index); + if (!rc) + lq_data->search_better_tbl = 1; + + if (!rc) + goto out; + break; + case IWL_SISO_SWITCH_GI: + IWL_DEBUG_HT("LQ: SISO SWITCH TO GI\n"); + memcpy(search_tbl, tbl, sz); + search_tbl->action = 0; + if (search_tbl->is_SGI) + search_tbl->is_SGI = 0; + else if (!is_green) + search_tbl->is_SGI = 1; + else + break; + lq_data->search_better_tbl = 1; + if ((tbl->lq_type == LQ_SISO) && + (tbl->is_SGI)) { + s32 tpt = lq_data->last_tpt / 100; + if (((!tbl->is_fat) && + (tpt >= expected_tpt_siso20MHz[index])) || + ((tbl->is_fat) && + (tpt >= expected_tpt_siso40MHz[index]))) + lq_data->search_better_tbl = 0; + } + rs_get_expected_tpt_table(lq_data, search_tbl); + rs_mcs_from_tbl(&search_tbl->current_rate, + search_tbl, index, is_green); + goto out; + } + tbl->action++; + if (tbl->action > IWL_SISO_SWITCH_GI) + tbl->action = IWL_SISO_SWITCH_ANTENNA; + + if (tbl->action == start_action) + break; + } + return 0; + + out: + tbl->action++; + if (tbl->action > IWL_SISO_SWITCH_GI) + tbl->action = IWL_SISO_SWITCH_ANTENNA; + return 0; +} + +static int rs_move_mimo_to_other(struct iwl_priv *priv, + struct iwl_rate_scale_priv *lq_data, + int index) +{ + int rc = -1; + s8 is_green = lq_data->is_green; + struct iwl_scale_tbl_info *tbl = + &(lq_data->lq_info[lq_data->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_data->lq_info[(1 - lq_data->active_tbl)]); + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + u8 start_action = tbl->action; + + for (;;) { + lq_data->action_counter++; + switch (tbl->action) { + case IWL_MIMO_SWITCH_ANTENNA_A: + case IWL_MIMO_SWITCH_ANTENNA_B: + IWL_DEBUG_HT("LQ: MIMO SWITCH TO SISO\n"); + memcpy(search_tbl, tbl, sz); + search_tbl->lq_type = LQ_SISO; + search_tbl->is_SGI = 0; + search_tbl->is_fat = 0; + if (tbl->action == IWL_MIMO_SWITCH_ANTENNA_A) + search_tbl->antenna_type = ANT_MAIN; + else + search_tbl->antenna_type = ANT_AUX; + + rc = rs_switch_to_siso(priv, lq_data, search_tbl, + index); + if (!rc) { + lq_data->search_better_tbl = 1; + goto out; + } + break; + + case IWL_MIMO_SWITCH_GI: + IWL_DEBUG_HT("LQ: MIMO SWITCH TO GI\n"); + memcpy(search_tbl, tbl, sz); + search_tbl->lq_type = LQ_MIMO; + search_tbl->antenna_type = ANT_BOTH; + search_tbl->action = 0; + if (search_tbl->is_SGI) + search_tbl->is_SGI = 0; + else + search_tbl->is_SGI = 1; + lq_data->search_better_tbl = 1; + if ((tbl->lq_type == LQ_MIMO) && + (tbl->is_SGI)) { + s32 tpt = lq_data->last_tpt / 100; + if (((!tbl->is_fat) && + (tpt >= expected_tpt_mimo20MHz[index])) || + ((tbl->is_fat) && + (tpt >= expected_tpt_mimo40MHz[index]))) + lq_data->search_better_tbl = 0; + } + rs_get_expected_tpt_table(lq_data, search_tbl); + rs_mcs_from_tbl(&search_tbl->current_rate, + search_tbl, index, is_green); + goto out; + + } + tbl->action++; + if (tbl->action > IWL_MIMO_SWITCH_GI) + tbl->action = IWL_MIMO_SWITCH_ANTENNA_A; + + if (tbl->action == start_action) + break; + } + + return 0; + out: + tbl->action++; + if (tbl->action > IWL_MIMO_SWITCH_GI) + tbl->action = IWL_MIMO_SWITCH_ANTENNA_A; + return 0; + +} + +static void rs_stay_in_table(struct iwl_rate_scale_priv *lq_data) +{ + struct iwl_scale_tbl_info *tbl; + int i; + int active_tbl; + int flush_interval_passed = 0; + + active_tbl = lq_data->active_tbl; + + tbl = &(lq_data->lq_info[active_tbl]); + + if (lq_data->stay_in_tbl) { + + if (lq_data->flush_timer) + flush_interval_passed = + time_after(jiffies, + (unsigned long)(lq_data->flush_timer + + IWL_RATE_SCALE_FLUSH_INTVL)); + + flush_interval_passed = 0; + if ((lq_data->total_failed > lq_data->max_failure_limit) || + (lq_data->total_success > lq_data->max_success_limit) || + ((!lq_data->search_better_tbl) && (lq_data->flush_timer) + && (flush_interval_passed))) { + IWL_DEBUG_HT("LQ: stay is expired %d %d %d\n:", + lq_data->total_failed, + lq_data->total_success, + flush_interval_passed); + lq_data->stay_in_tbl = 0; + lq_data->total_failed = 0; + lq_data->total_success = 0; + lq_data->flush_timer = 0; + } else if (lq_data->table_count > 0) { + lq_data->table_count++; + if (lq_data->table_count >= + lq_data->table_count_limit) { + lq_data->table_count = 0; + + IWL_DEBUG_HT("LQ: stay in table clear win\n"); + for (i = 0; i < IWL_RATE_COUNT; i++) + rs_rate_scale_clear_window( + &(tbl->win[i])); + } + } + + if (!lq_data->stay_in_tbl) { + for (i = 0; i < IWL_RATE_COUNT; i++) + rs_rate_scale_clear_window(&(tbl->win[i])); + } + } +} + +static void rs_rate_scale_perform(struct iwl_priv *priv, + struct net_device *dev, + struct ieee80211_hdr *hdr, + struct sta_info *sta) +{ + int low = IWL_RATE_INVALID; + int high = IWL_RATE_INVALID; + int index; + int i; + struct iwl_rate_scale_data *window = NULL; + int current_tpt = IWL_INVALID_VALUE; + int low_tpt = IWL_INVALID_VALUE; + int high_tpt = IWL_INVALID_VALUE; + u32 fail_count; + s8 scale_action = 0; + u16 fc, rate_mask; + u8 update_lq = 0; + struct iwl_rate_scale_priv *lq_data; + struct iwl_scale_tbl_info *tbl, *tbl1; + u16 rate_scale_index_msk = 0; + struct iwl_rate mcs_rate; + u8 is_green = 0; + u8 active_tbl = 0; + u8 done_search = 0; + u16 high_low; + + IWL_DEBUG_RATE("rate scale calculate new rate for skb\n"); + + fc = le16_to_cpu(hdr->frame_control); + if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1)) { + /* Send management frames and broadcast/multicast data using + * lowest rate. */ + /* TODO: this could probably be improved.. */ + return; + } + + if (!sta || !sta->rate_ctrl_priv) + return; + + if (!priv->lq_mngr.lq_ready) { + IWL_DEBUG_RATE("still rate scaling not ready\n"); + return; + } + lq_data = (struct iwl_rate_scale_priv *)sta->rate_ctrl_priv; + + if (!lq_data->search_better_tbl) + active_tbl = lq_data->active_tbl; + else + active_tbl = 1 - lq_data->active_tbl; + + tbl = &(lq_data->lq_info[active_tbl]); + is_green = lq_data->is_green; + + index = sta->last_txrate; + + IWL_DEBUG_RATE("Rate scale index %d for type %d\n", index, + tbl->lq_type); + + rs_get_supported_rates(lq_data, hdr, tbl->lq_type, + &rate_mask); + + IWL_DEBUG_RATE("mask 0x%04X \n", rate_mask); + + /* mask with station rate restriction */ + if (is_legacy(tbl->lq_type)) { + if (lq_data->phymode == (u8) MODE_IEEE80211A) + rate_scale_index_msk = (u16) (rate_mask & + (lq_data->supp_rates << IWL_FIRST_OFDM_RATE)); + else + rate_scale_index_msk = (u16) (rate_mask & + lq_data->supp_rates); + + } else + rate_scale_index_msk = rate_mask; + + if (!rate_scale_index_msk) + rate_scale_index_msk = rate_mask; + + if (index < 0 || !((1 << index) & rate_scale_index_msk)) { + index = IWL_INVALID_VALUE; + update_lq = 1; + + /* get the lowest availabe rate */ + for (i = 0; i <= IWL_RATE_COUNT; i++) { + if ((1 << i) & rate_scale_index_msk) + index = i; + } + + if (index == IWL_INVALID_VALUE) { + IWL_WARNING("Can not find a suitable rate\n"); + return; + } + } + + if (!tbl->expected_tpt) + rs_get_expected_tpt_table(lq_data, tbl); + + window = &(tbl->win[index]); + + fail_count = window->counter - window->success_counter; + if (((fail_count < IWL_RATE_MIN_FAILURE_TH) && + (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) + || (tbl->expected_tpt == NULL)) { + IWL_DEBUG_RATE("LQ: still below TH succ %d total %d " + "for index %d\n", + window->success_counter, window->counter, index); + window->average_tpt = IWL_INVALID_VALUE; + rs_stay_in_table(lq_data); + if (update_lq) { + rs_mcs_from_tbl(&mcs_rate, tbl, index, is_green); + rs_fill_link_cmd(lq_data, &mcs_rate, &lq_data->lq); + rs_send_lq_cmd(priv, &lq_data->lq, CMD_ASYNC); + } + goto out; + + } else + window->average_tpt = ((window->success_ratio * + tbl->expected_tpt[index] + 64) / 128); + + if (lq_data->search_better_tbl) { + int success_limit = IWL_RATE_SCALE_SWITCH; + + if ((window->success_ratio > success_limit) || + (window->average_tpt > lq_data->last_tpt)) { + if (!is_legacy(tbl->lq_type)) { + IWL_DEBUG_HT("LQ: we are switching to HT" + " rate suc %d current tpt %d" + " old tpt %d\n", + window->success_ratio, + window->average_tpt, + lq_data->last_tpt); + lq_data->enable_counter = 1; + } + lq_data->active_tbl = active_tbl; + current_tpt = window->average_tpt; + } else { + tbl->lq_type = LQ_NONE; + active_tbl = lq_data->active_tbl; + tbl = &(lq_data->lq_info[active_tbl]); + + index = iwl_rate_index_from_plcp( + tbl->current_rate.rate_n_flags); + + update_lq = 1; + current_tpt = lq_data->last_tpt; + IWL_DEBUG_HT("XXY GO BACK TO OLD TABLE\n"); + } + lq_data->search_better_tbl = 0; + done_search = 1; + goto lq_update; + } + + high_low = rs_get_adjacent_rate(index, rate_scale_index_msk, + tbl->lq_type); + low = high_low & 0xff; + high = (high_low >> 8) & 0xff; + + current_tpt = window->average_tpt; + + if (low != IWL_RATE_INVALID) + low_tpt = tbl->win[low].average_tpt; + + if (high != IWL_RATE_INVALID) + high_tpt = tbl->win[high].average_tpt; + + + scale_action = 1; + + if ((window->success_ratio <= IWL_RATE_DECREASE_TH) || + (current_tpt == 0)) { + IWL_DEBUG_RATE("decrease rate because of low success_ratio\n"); + scale_action = -1; + } else if ((low_tpt == IWL_INVALID_VALUE) && + (high_tpt == IWL_INVALID_VALUE)) + scale_action = 1; + else if ((low_tpt != IWL_INVALID_VALUE) && + (high_tpt != IWL_INVALID_VALUE) && + (low_tpt < current_tpt) && + (high_tpt < current_tpt)) + scale_action = 0; + else { + if (high_tpt != IWL_INVALID_VALUE) { + if (high_tpt > current_tpt) + scale_action = 1; + else { + IWL_DEBUG_RATE + ("decrease rate because of high tpt\n"); + scale_action = -1; + } + } else if (low_tpt != IWL_INVALID_VALUE) { + if (low_tpt > current_tpt) { + IWL_DEBUG_RATE + ("decrease rate because of low tpt\n"); + scale_action = -1; + } else + scale_action = 1; + } + } + + if (scale_action == -1) { + if ((low != IWL_RATE_INVALID) && + ((window->success_ratio > IWL_RATE_HIGH_TH) || + (current_tpt > (100 * tbl->expected_tpt[low])))) + scale_action = 0; + } else if ((scale_action == 1) && + (window->success_ratio < IWL_RATE_INCREASE_TH)) + scale_action = 0; + + switch (scale_action) { + case -1: + if (low != IWL_RATE_INVALID) { + update_lq = 1; + index = low; + } + break; + case 1: + if (high != IWL_RATE_INVALID) { + update_lq = 1; + index = high; + } + + break; + case 0: + default: + break; + } + + IWL_DEBUG_HT("choose rate scale index %d action %d low %d " + "high %d type %d\n", + index, scale_action, low, high, tbl->lq_type); + + lq_update: + if (update_lq) { + rs_mcs_from_tbl(&mcs_rate, tbl, index, is_green); + rs_fill_link_cmd(lq_data, &mcs_rate, &lq_data->lq); + rs_send_lq_cmd(priv, &lq_data->lq, CMD_ASYNC); + } + rs_stay_in_table(lq_data); + + if (!update_lq && !done_search && !lq_data->stay_in_tbl) { + lq_data->last_tpt = current_tpt; + + if (is_legacy(tbl->lq_type)) + rs_move_legacy_other(priv, lq_data, index); + else if (is_siso(tbl->lq_type)) + rs_move_siso_to_other(priv, lq_data, index); + else + rs_move_mimo_to_other(priv, lq_data, index); + + if (lq_data->search_better_tbl) { + tbl = &(lq_data->lq_info[(1 - lq_data->active_tbl)]); + for (i = 0; i < IWL_RATE_COUNT; i++) + rs_rate_scale_clear_window(&(tbl->win[i])); + + index = iwl_rate_index_from_plcp( + tbl->current_rate.rate_n_flags); + + IWL_DEBUG_HT("Switch current mcs: %X index: %d\n", + tbl->current_rate.rate_n_flags, index); + rs_fill_link_cmd(lq_data, &tbl->current_rate, + &lq_data->lq); + rs_send_lq_cmd(priv, &lq_data->lq, CMD_ASYNC); + } + tbl1 = &(lq_data->lq_info[lq_data->active_tbl]); + + if (is_legacy(tbl1->lq_type) && +#ifdef CONFIG_IWLWIFI_HT + !priv->current_assoc_ht.is_ht && +#endif + (lq_data->action_counter >= 1)) { + lq_data->action_counter = 0; + IWL_DEBUG_HT("LQ: STAY in legacy table\n"); + rs_set_stay_in_table(1, lq_data); + } + + if (lq_data->enable_counter && + (lq_data->action_counter >= IWL_ACTION_LIMIT)) { +#ifdef CONFIG_IWLWIFI_HT_AGG + if ((lq_data->last_tpt > TID_AGG_TPT_THREHOLD) && + (priv->lq_mngr.agg_ctrl.auto_agg)) { + priv->lq_mngr.agg_ctrl.tid_retry = + TID_ALL_SPECIFIED; + schedule_work(&priv->agg_work); + } +#endif /*CONFIG_IWLWIFI_HT_AGG */ + lq_data->action_counter = 0; + rs_set_stay_in_table(0, lq_data); + } + } else { + if ((!update_lq) && (!done_search) && (!lq_data->flush_timer)) + lq_data->flush_timer = jiffies; + } + +out: + rs_mcs_from_tbl(&tbl->current_rate, tbl, index, is_green); + i = index; + sta->last_txrate = i; + + /* sta->txrate is an index to A mode rates which start + * at IWL_FIRST_OFDM_RATE + */ + if (lq_data->phymode == (u8) MODE_IEEE80211A) + sta->txrate = i - IWL_FIRST_OFDM_RATE; + else + sta->txrate = i; + + return; +} + + +static void rs_initialize_lq(struct iwl_priv *priv, + struct sta_info *sta) +{ + int i; + struct iwl_rate_scale_priv *lq; + struct iwl_scale_tbl_info *tbl; + u8 active_tbl = 0; + int rate_idx; + u8 use_green = rs_use_green(priv); + struct iwl_rate mcs_rate; + + if (!sta || !sta->rate_ctrl_priv) + goto out; + + lq = (struct iwl_rate_scale_priv *)sta->rate_ctrl_priv; + i = sta->last_txrate; + + if ((lq->lq.sta_id == 0xff) && + (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)) + goto out; + + if (!lq->search_better_tbl) + active_tbl = lq->active_tbl; + else + active_tbl = 1 - lq->active_tbl; + + tbl = &(lq->lq_info[active_tbl]); + + if ((i < 0) || (i >= IWL_RATE_COUNT)) + i = 0; + + mcs_rate.rate_n_flags = iwl_rates[i].plcp ; + mcs_rate.rate_n_flags |= RATE_MCS_ANT_B_MSK; + mcs_rate.rate_n_flags &= ~RATE_MCS_ANT_A_MSK; + + if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE) + mcs_rate.rate_n_flags |= RATE_MCS_CCK_MSK; + + tbl->antenna_type = ANT_AUX; + rs_get_tbl_info_from_mcs(&mcs_rate, priv->phymode, tbl, &rate_idx); + if (!rs_is_ant_connected(priv->valid_antenna, tbl->antenna_type)) + rs_toggle_antenna(&mcs_rate, tbl); + + rs_mcs_from_tbl(&mcs_rate, tbl, rate_idx, use_green); + tbl->current_rate.rate_n_flags = mcs_rate.rate_n_flags; + rs_get_expected_tpt_table(lq, tbl); + rs_fill_link_cmd(lq, &mcs_rate, &lq->lq); + rs_send_lq_cmd(priv, &lq->lq, CMD_ASYNC); + out: + return; +} + +static struct ieee80211_rate *rs_get_lowest_rate(struct ieee80211_local + *local) +{ + struct ieee80211_hw_mode *mode = local->oper_hw_mode; + int i; + + for (i = 0; i < mode->num_rates; i++) { + struct ieee80211_rate *rate = &mode->rates[i]; + + if (rate->flags & IEEE80211_RATE_SUPPORTED) + return rate; + } + + return &mode->rates[0]; +} + +static struct ieee80211_rate *rs_get_rate(void *priv_rate, + struct net_device *dev, + struct sk_buff *skb, + struct rate_control_extra + *extra) +{ + + int i; + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct sta_info *sta; + u16 fc; + struct iwl_priv *priv = (struct iwl_priv *)priv_rate; + struct iwl_rate_scale_priv *lq; + + IWL_DEBUG_RATE_LIMIT("rate scale calculate new rate for skb\n"); + + memset(extra, 0, sizeof(*extra)); + + fc = le16_to_cpu(hdr->frame_control); + if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1)) { + /* Send management frames and broadcast/multicast data using + * lowest rate. */ + /* TODO: this could probably be improved.. */ + return rs_get_lowest_rate(local); + } + + sta = sta_info_get(local, hdr->addr1); + + if (!sta || !sta->rate_ctrl_priv) { + if (sta) + sta_info_put(sta); + return rs_get_lowest_rate(local); + } + + lq = (struct iwl_rate_scale_priv *)sta->rate_ctrl_priv; + i = sta->last_txrate; + + if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && !lq->ibss_sta_added) { + u8 sta_id = iwl_hw_find_station(priv, hdr->addr1); + DECLARE_MAC_BUF(mac); + + if (sta_id == IWL_INVALID_STATION) { + IWL_DEBUG_RATE("LQ: ADD station %s\n", + print_mac(mac, hdr->addr1)); + sta_id = iwl_add_station(priv, + hdr->addr1, 0, CMD_ASYNC); + } + if ((sta_id != IWL_INVALID_STATION)) { + lq->lq.sta_id = sta_id; + lq->lq.rs_table[0].rate_n_flags = 0; + lq->ibss_sta_added = 1; + rs_initialize_lq(priv, sta); + } + if (!lq->ibss_sta_added) + goto done; + } + + done: + sta_info_put(sta); + if ((i < 0) || (i > IWL_RATE_COUNT)) + return rs_get_lowest_rate(local); + + return &priv->ieee_rates[i]; +} + +static void *rs_alloc_sta(void *priv, gfp_t gfp) +{ + struct iwl_rate_scale_priv *crl; + int i, j; + + IWL_DEBUG_RATE("create station rate scale window\n"); + + crl = kzalloc(sizeof(struct iwl_rate_scale_priv), gfp); + + if (crl == NULL) + return NULL; + crl->lq.sta_id = 0xff; + + + for (j = 0; j < LQ_SIZE; j++) + for (i = 0; i < IWL_RATE_COUNT; i++) + rs_rate_scale_clear_window(&(crl->lq_info[j].win[i])); + + return crl; +} + +static void rs_rate_init(void *priv_rate, void *priv_sta, + struct ieee80211_local *local, + struct sta_info *sta) +{ + int i, j; + struct ieee80211_hw_mode *mode = local->oper_hw_mode; + struct iwl_priv *priv = (struct iwl_priv *)priv_rate; + struct iwl_rate_scale_priv *crl = priv_sta; + + crl->flush_timer = 0; + crl->supp_rates = sta->supp_rates; + sta->txrate = 3; + for (j = 0; j < LQ_SIZE; j++) + for (i = 0; i < IWL_RATE_COUNT; i++) + rs_rate_scale_clear_window(&(crl->lq_info[j].win[i])); + + IWL_DEBUG_RATE("rate scale global init\n"); + /* TODO: what is a good starting rate for STA? About middle? Maybe not + * the lowest or the highest rate.. Could consider using RSSI from + * previous packets? Need to have IEEE 802.1X auth succeed immediately + * after assoc.. */ + + crl->ibss_sta_added = 0; + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { + u8 sta_id = iwl_hw_find_station(priv, sta->addr); + DECLARE_MAC_BUF(mac); + + /* for IBSS the call are from tasklet */ + IWL_DEBUG_HT("LQ: ADD station %s\n", + print_mac(mac, sta->addr)); + + if (sta_id == IWL_INVALID_STATION) { + IWL_DEBUG_RATE("LQ: ADD station %s\n", + print_mac(mac, sta->addr)); + sta_id = iwl_add_station(priv, + sta->addr, 0, CMD_ASYNC); + } + if ((sta_id != IWL_INVALID_STATION)) { + crl->lq.sta_id = sta_id; + crl->lq.rs_table[0].rate_n_flags = 0; + } + /* FIXME: this is w/a remove it later */ + priv->assoc_station_added = 1; + } + + for (i = 0; i < mode->num_rates; i++) { + if ((sta->supp_rates & BIT(i)) && + (mode->rates[i].flags & IEEE80211_RATE_SUPPORTED)) + sta->txrate = i; + } + sta->last_txrate = sta->txrate; + /* For MODE_IEEE80211A mode cck rate are at end + * rate table + */ + if (local->hw.conf.phymode == MODE_IEEE80211A) + sta->last_txrate += IWL_FIRST_OFDM_RATE; + + crl->is_dup = priv->is_dup; + crl->valid_antenna = priv->valid_antenna; + crl->antenna = priv->antenna; + crl->is_green = rs_use_green(priv); + crl->active_rate = priv->active_rate; + crl->active_rate &= ~(0x1000); + crl->active_rate_basic = priv->active_rate_basic; + crl->phymode = priv->phymode; +#ifdef CONFIG_IWLWIFI_HT + crl->active_siso_rate = (priv->current_assoc_ht.supp_rates[0] << 1); + crl->active_siso_rate |= (priv->current_assoc_ht.supp_rates[0] & 0x1); + crl->active_siso_rate &= ~((u16)0x2); + crl->active_siso_rate = crl->active_siso_rate << IWL_FIRST_OFDM_RATE; + + crl->active_mimo_rate = (priv->current_assoc_ht.supp_rates[1] << 1); + crl->active_mimo_rate |= (priv->current_assoc_ht.supp_rates[1] & 0x1); + crl->active_mimo_rate &= ~((u16)0x2); + crl->active_mimo_rate = crl->active_mimo_rate << IWL_FIRST_OFDM_RATE; + IWL_DEBUG_HT("MIMO RATE 0x%X SISO MASK 0x%X\n", crl->active_siso_rate, + crl->active_mimo_rate); +#endif /*CONFIG_IWLWIFI_HT*/ +#ifdef CONFIG_MAC80211_DEBUGFS + crl->drv = priv; +#endif + + if (priv->assoc_station_added) + priv->lq_mngr.lq_ready = 1; + + rs_initialize_lq(priv, sta); +} + +static void rs_fill_link_cmd(struct iwl_rate_scale_priv *lq_data, + struct iwl_rate *tx_mcs, + struct iwl_link_quality_cmd *lq_cmd) +{ + int index = 0; + int rate_idx; + int repeat_rate = 0; + u8 ant_toggle_count = 0; + u8 use_ht_possible = 1; + struct iwl_rate new_rate; + struct iwl_scale_tbl_info tbl_type = { 0 }; + + rs_dbgfs_set_mcs(lq_data, tx_mcs, index); + + rs_get_tbl_info_from_mcs(tx_mcs, lq_data->phymode, + &tbl_type, &rate_idx); + + if (is_legacy(tbl_type.lq_type)) { + ant_toggle_count = 1; + repeat_rate = IWL_NUMBER_TRY; + } else + repeat_rate = IWL_HT_NUMBER_TRY; + + lq_cmd->general_params.mimo_delimiter = + is_mimo(tbl_type.lq_type) ? 1 : 0; + lq_cmd->rs_table[index].rate_n_flags = + cpu_to_le32(tx_mcs->rate_n_flags); + new_rate.rate_n_flags = tx_mcs->rate_n_flags; + + if (is_mimo(tbl_type.lq_type) || (tbl_type.antenna_type == ANT_MAIN)) + lq_cmd->general_params.single_stream_ant_msk = 1; + else + lq_cmd->general_params.single_stream_ant_msk = 2; + + index++; + repeat_rate--; + + while (index < LINK_QUAL_MAX_RETRY_NUM) { + while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) { + if (is_legacy(tbl_type.lq_type)) { + if (ant_toggle_count < + NUM_TRY_BEFORE_ANTENNA_TOGGLE) + ant_toggle_count++; + else { + rs_toggle_antenna(&new_rate, &tbl_type); + ant_toggle_count = 1; + } + } + + rs_dbgfs_set_mcs(lq_data, &new_rate, index); + lq_cmd->rs_table[index].rate_n_flags = + cpu_to_le32(new_rate.rate_n_flags); + repeat_rate--; + index++; + } + + rs_get_tbl_info_from_mcs(&new_rate, lq_data->phymode, &tbl_type, + &rate_idx); + + if (is_mimo(tbl_type.lq_type)) + lq_cmd->general_params.mimo_delimiter = index; + + rs_get_lower_rate(lq_data, &tbl_type, rate_idx, + use_ht_possible, &new_rate); + + if (is_legacy(tbl_type.lq_type)) { + if (ant_toggle_count < NUM_TRY_BEFORE_ANTENNA_TOGGLE) + ant_toggle_count++; + else { + rs_toggle_antenna(&new_rate, &tbl_type); + ant_toggle_count = 1; + } + repeat_rate = IWL_NUMBER_TRY; + } else + repeat_rate = IWL_HT_NUMBER_TRY; + + use_ht_possible = 0; + + rs_dbgfs_set_mcs(lq_data, &new_rate, index); + lq_cmd->rs_table[index].rate_n_flags = + cpu_to_le32(new_rate.rate_n_flags); + + index++; + repeat_rate--; + } + + lq_cmd->general_params.dual_stream_ant_msk = 3; + lq_cmd->agg_params.agg_dis_start_th = 3; + lq_cmd->agg_params.agg_time_limit = cpu_to_le16(4000); +} + +static void *rs_alloc(struct ieee80211_local *local) +{ + return local->hw.priv; +} +/* rate scale requires free function to be implemented */ +static void rs_free(void *priv_rate) +{ + return; +} + +static void rs_clear(void *priv_rate) +{ + struct iwl_priv *priv = (struct iwl_priv *) priv_rate; + + IWL_DEBUG_RATE("enter\n"); + + priv->lq_mngr.lq_ready = 0; +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG + if (priv->lq_mngr.agg_ctrl.granted_ba) + iwl4965_turn_off_agg(priv, TID_ALL_SPECIFIED); +#endif /*CONFIG_IWLWIFI_HT_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ + + IWL_DEBUG_RATE("leave\n"); +} + +static void rs_free_sta(void *priv, void *priv_sta) +{ + struct iwl_rate_scale_priv *rs_priv = priv_sta; + + IWL_DEBUG_RATE("enter\n"); + kfree(rs_priv); + IWL_DEBUG_RATE("leave\n"); +} + + +#ifdef CONFIG_MAC80211_DEBUGFS +static int open_file_generic(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} +static void rs_dbgfs_set_mcs(struct iwl_rate_scale_priv *rs_priv, + struct iwl_rate *mcs, int index) +{ + const u32 cck_rate = 0x820A; + if (rs_priv->dbg_fixed.rate_n_flags) { + if (index < 12) + mcs->rate_n_flags = rs_priv->dbg_fixed.rate_n_flags; + else + mcs->rate_n_flags = cck_rate; + IWL_DEBUG_RATE("Fixed rate ON\n"); + return; + } + + IWL_DEBUG_RATE("Fixed rate OFF\n"); +} + +static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct iwl_rate_scale_priv *rs_priv = file->private_data; + char buf[64]; + int buf_size; + u32 parsed_rate; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + if (sscanf(buf, "%x", &parsed_rate) == 1) + rs_priv->dbg_fixed.rate_n_flags = parsed_rate; + else + rs_priv->dbg_fixed.rate_n_flags = 0; + + rs_priv->active_rate = 0x0FFF; + rs_priv->active_siso_rate = 0x1FD0; + rs_priv->active_mimo_rate = 0x1FD0; + + IWL_DEBUG_RATE("sta_id %d rate 0x%X\n", + rs_priv->lq.sta_id, rs_priv->dbg_fixed.rate_n_flags); + + if (rs_priv->dbg_fixed.rate_n_flags) { + rs_fill_link_cmd(rs_priv, &rs_priv->dbg_fixed, &rs_priv->lq); + rs_send_lq_cmd(rs_priv->drv, &rs_priv->lq, CMD_ASYNC); + } + + return count; +} + +static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + char buff[1024]; + int desc = 0; + int i = 0; + + struct iwl_rate_scale_priv *rs_priv = file->private_data; + + desc += sprintf(buff+desc, "sta_id %d\n", rs_priv->lq.sta_id); + desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n", + rs_priv->total_failed, rs_priv->total_success, + rs_priv->active_rate); + desc += sprintf(buff+desc, "fixed rate 0x%X\n", + rs_priv->dbg_fixed.rate_n_flags); + desc += sprintf(buff+desc, "general:" + "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n", + rs_priv->lq.general_params.flags, + rs_priv->lq.general_params.mimo_delimiter, + rs_priv->lq.general_params.single_stream_ant_msk, + rs_priv->lq.general_params.dual_stream_ant_msk); + + desc += sprintf(buff+desc, "agg:" + "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n", + le16_to_cpu(rs_priv->lq.agg_params.agg_time_limit), + rs_priv->lq.agg_params.agg_dis_start_th, + rs_priv->lq.agg_params.agg_frame_cnt_limit); + + desc += sprintf(buff+desc, + "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n", + rs_priv->lq.general_params.start_rate_index[0], + rs_priv->lq.general_params.start_rate_index[1], + rs_priv->lq.general_params.start_rate_index[2], + rs_priv->lq.general_params.start_rate_index[3]); + + + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) + desc += sprintf(buff+desc, " rate[%d] 0x%X\n", + i, le32_to_cpu(rs_priv->lq.rs_table[i].rate_n_flags)); + + return simple_read_from_buffer(user_buf, count, ppos, buff, desc); +} + +static const struct file_operations rs_sta_dbgfs_scale_table_ops = { + .write = rs_sta_dbgfs_scale_table_write, + .read = rs_sta_dbgfs_scale_table_read, + .open = open_file_generic, +}; +static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + char buff[1024]; + int desc = 0; + int i, j; + + struct iwl_rate_scale_priv *rs_priv = file->private_data; + for (i = 0; i < LQ_SIZE; i++) { + desc += sprintf(buff+desc, "%s type=%d SGI=%d FAT=%d DUP=%d\n" + "rate=0x%X\n", + rs_priv->active_tbl == i?"*":"x", + rs_priv->lq_info[i].lq_type, + rs_priv->lq_info[i].is_SGI, + rs_priv->lq_info[i].is_fat, + rs_priv->lq_info[i].is_dup, + rs_priv->lq_info[i].current_rate.rate_n_flags); + for (j = 0; j < IWL_RATE_COUNT; j++) { + desc += sprintf(buff+desc, + "counter=%d success=%d %%=%d\n", + rs_priv->lq_info[i].win[j].counter, + rs_priv->lq_info[i].win[j].success_counter, + rs_priv->lq_info[i].win[j].success_ratio); + } + } + return simple_read_from_buffer(user_buf, count, ppos, buff, desc); +} + +static const struct file_operations rs_sta_dbgfs_stats_table_ops = { + .read = rs_sta_dbgfs_stats_table_read, + .open = open_file_generic, +}; + +static void rs_add_debugfs(void *priv, void *priv_sta, + struct dentry *dir) +{ + struct iwl_rate_scale_priv *rs_priv = priv_sta; + rs_priv->rs_sta_dbgfs_scale_table_file = + debugfs_create_file("rate_scale_table", 0600, dir, + rs_priv, &rs_sta_dbgfs_scale_table_ops); + rs_priv->rs_sta_dbgfs_stats_table_file = + debugfs_create_file("rate_stats_table", 0600, dir, + rs_priv, &rs_sta_dbgfs_stats_table_ops); +} + +static void rs_remove_debugfs(void *priv, void *priv_sta) +{ + struct iwl_rate_scale_priv *rs_priv = priv_sta; + debugfs_remove(rs_priv->rs_sta_dbgfs_scale_table_file); + debugfs_remove(rs_priv->rs_sta_dbgfs_stats_table_file); +} +#endif + +static struct rate_control_ops rs_ops = { + .module = NULL, + .name = RS_NAME, + .tx_status = rs_tx_status, + .get_rate = rs_get_rate, + .rate_init = rs_rate_init, + .clear = rs_clear, + .alloc = rs_alloc, + .free = rs_free, + .alloc_sta = rs_alloc_sta, + .free_sta = rs_free_sta, +#ifdef CONFIG_MAC80211_DEBUGFS + .add_sta_debugfs = rs_add_debugfs, + .remove_sta_debugfs = rs_remove_debugfs, +#endif +}; + +int iwl_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct iwl_priv *priv = hw->priv; + struct iwl_rate_scale_priv *rs_priv; + struct sta_info *sta; + int count = 0, i; + u32 samples = 0, success = 0, good = 0; + unsigned long now = jiffies; + u32 max_time = 0; + u8 lq_type, antenna; + + sta = sta_info_get(local, priv->stations[sta_id].sta.sta.addr); + if (!sta || !sta->rate_ctrl_priv) { + if (sta) { + sta_info_put(sta); + IWL_DEBUG_RATE("leave - no private rate data!\n"); + } else + IWL_DEBUG_RATE("leave - no station!\n"); + return sprintf(buf, "station %d not found\n", sta_id); + } + + rs_priv = (void *)sta->rate_ctrl_priv; + + lq_type = rs_priv->lq_info[rs_priv->active_tbl].lq_type; + antenna = rs_priv->lq_info[rs_priv->active_tbl].antenna_type; + + if (is_legacy(lq_type)) + i = IWL_RATE_54M_INDEX; + else + i = IWL_RATE_60M_INDEX; + while (1) { + u64 mask; + int j; + int active = rs_priv->active_tbl; + + count += + sprintf(&buf[count], " %2dMbs: ", iwl_rates[i].ieee / 2); + + mask = (1ULL << (IWL_RATE_MAX_WINDOW - 1)); + for (j = 0; j < IWL_RATE_MAX_WINDOW; j++, mask >>= 1) + buf[count++] = + (rs_priv->lq_info[active].win[i].data & mask) + ? '1' : '0'; + + samples += rs_priv->lq_info[active].win[i].counter; + good += rs_priv->lq_info[active].win[i].success_counter; + success += rs_priv->lq_info[active].win[i].success_counter * + iwl_rates[i].ieee; + + if (rs_priv->lq_info[active].win[i].stamp) { + int delta = + jiffies_to_msecs(now - + rs_priv->lq_info[active].win[i].stamp); + + if (delta > max_time) + max_time = delta; + + count += sprintf(&buf[count], "%5dms\n", delta); + } else + buf[count++] = '\n'; + + j = iwl_get_prev_ieee_rate(i); + if (j == i) + break; + i = j; + } + + /* Display the average rate of all samples taken. + * + * NOTE: We multiple # of samples by 2 since the IEEE measurement + * added from iwl_rates is actually 2X the rate */ + if (samples) + count += sprintf(&buf[count], + "\nAverage rate is %3d.%02dMbs over last %4dms\n" + "%3d%% success (%d good packets over %d tries)\n", + success / (2 * samples), (success * 5 / samples) % 10, + max_time, good * 100 / samples, good, samples); + else + count += sprintf(&buf[count], "\nAverage rate: 0Mbs\n"); + count += sprintf(&buf[count], "\nrate scale type %d anntena %d " + "active_search %d rate index %d\n", lq_type, antenna, + rs_priv->search_better_tbl, sta->last_txrate); + + sta_info_put(sta); + return count; +} + +void iwl_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id) +{ + struct iwl_priv *priv = hw->priv; + + priv->lq_mngr.lq_ready = 1; +} + +void iwl_rate_control_register(struct ieee80211_hw *hw) +{ + ieee80211_rate_control_register(&rs_ops); +} + +void iwl_rate_control_unregister(struct ieee80211_hw *hw) +{ + ieee80211_rate_control_unregister(&rs_ops); +} + diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.h b/drivers/net/wireless/iwlwifi/iwl-4965-rs.h new file mode 100644 index 000000000000..c6325f72df68 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-4965-rs.h @@ -0,0 +1,266 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos <ipw2100-admin@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_4965_rs_h__ +#define __iwl_4965_rs_h__ + +#include "iwl-4965.h" + +struct iwl_rate_info { + u8 plcp; + u8 plcp_siso; + u8 plcp_mimo; + u8 ieee; + u8 prev_ieee; /* previous rate in IEEE speeds */ + u8 next_ieee; /* next rate in IEEE speeds */ + u8 prev_rs; /* previous rate used in rs algo */ + u8 next_rs; /* next rate used in rs algo */ + u8 prev_rs_tgg; /* previous rate used in TGG rs algo */ + u8 next_rs_tgg; /* next rate used in TGG rs algo */ +}; + +enum { + IWL_RATE_1M_INDEX = 0, + IWL_RATE_2M_INDEX, + IWL_RATE_5M_INDEX, + IWL_RATE_11M_INDEX, + IWL_RATE_6M_INDEX, + IWL_RATE_9M_INDEX, + IWL_RATE_12M_INDEX, + IWL_RATE_18M_INDEX, + IWL_RATE_24M_INDEX, + IWL_RATE_36M_INDEX, + IWL_RATE_48M_INDEX, + IWL_RATE_54M_INDEX, + IWL_RATE_60M_INDEX, + IWL_RATE_COUNT, + IWL_RATE_INVM_INDEX = IWL_RATE_COUNT, + IWL_RATE_INVALID = IWL_RATE_INVM_INDEX +}; + +enum { + IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX, + IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX, + IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX, + IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX, +}; + +/* #define vs. enum to keep from defaulting to 'large integer' */ +#define IWL_RATE_6M_MASK (1<<IWL_RATE_6M_INDEX) +#define IWL_RATE_9M_MASK (1<<IWL_RATE_9M_INDEX) +#define IWL_RATE_12M_MASK (1<<IWL_RATE_12M_INDEX) +#define IWL_RATE_18M_MASK (1<<IWL_RATE_18M_INDEX) +#define IWL_RATE_24M_MASK (1<<IWL_RATE_24M_INDEX) +#define IWL_RATE_36M_MASK (1<<IWL_RATE_36M_INDEX) +#define IWL_RATE_48M_MASK (1<<IWL_RATE_48M_INDEX) +#define IWL_RATE_54M_MASK (1<<IWL_RATE_54M_INDEX) +#define IWL_RATE_60M_MASK (1<<IWL_RATE_60M_INDEX) +#define IWL_RATE_1M_MASK (1<<IWL_RATE_1M_INDEX) +#define IWL_RATE_2M_MASK (1<<IWL_RATE_2M_INDEX) +#define IWL_RATE_5M_MASK (1<<IWL_RATE_5M_INDEX) +#define IWL_RATE_11M_MASK (1<<IWL_RATE_11M_INDEX) + +enum { + IWL_RATE_6M_PLCP = 13, + IWL_RATE_9M_PLCP = 15, + IWL_RATE_12M_PLCP = 5, + IWL_RATE_18M_PLCP = 7, + IWL_RATE_24M_PLCP = 9, + IWL_RATE_36M_PLCP = 11, + IWL_RATE_48M_PLCP = 1, + IWL_RATE_54M_PLCP = 3, + IWL_RATE_60M_PLCP = 3, + IWL_RATE_1M_PLCP = 10, + IWL_RATE_2M_PLCP = 20, + IWL_RATE_5M_PLCP = 55, + IWL_RATE_11M_PLCP = 110, +}; + +/* OFDM HT rate plcp */ +enum { + IWL_RATE_SISO_6M_PLCP = 0, + IWL_RATE_SISO_12M_PLCP = 1, + IWL_RATE_SISO_18M_PLCP = 2, + IWL_RATE_SISO_24M_PLCP = 3, + IWL_RATE_SISO_36M_PLCP = 4, + IWL_RATE_SISO_48M_PLCP = 5, + IWL_RATE_SISO_54M_PLCP = 6, + IWL_RATE_SISO_60M_PLCP = 7, + IWL_RATE_MIMO_6M_PLCP = 0x8, + IWL_RATE_MIMO_12M_PLCP = 0x9, + IWL_RATE_MIMO_18M_PLCP = 0xa, + IWL_RATE_MIMO_24M_PLCP = 0xb, + IWL_RATE_MIMO_36M_PLCP = 0xc, + IWL_RATE_MIMO_48M_PLCP = 0xd, + IWL_RATE_MIMO_54M_PLCP = 0xe, + IWL_RATE_MIMO_60M_PLCP = 0xf, + IWL_RATE_SISO_INVM_PLCP, + IWL_RATE_MIMO_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP, +}; + +enum { + IWL_RATE_6M_IEEE = 12, + IWL_RATE_9M_IEEE = 18, + IWL_RATE_12M_IEEE = 24, + IWL_RATE_18M_IEEE = 36, + IWL_RATE_24M_IEEE = 48, + IWL_RATE_36M_IEEE = 72, + IWL_RATE_48M_IEEE = 96, + IWL_RATE_54M_IEEE = 108, + IWL_RATE_60M_IEEE = 120, + IWL_RATE_1M_IEEE = 2, + IWL_RATE_2M_IEEE = 4, + IWL_RATE_5M_IEEE = 11, + IWL_RATE_11M_IEEE = 22, +}; + +#define IWL_CCK_BASIC_RATES_MASK \ + (IWL_RATE_1M_MASK | \ + IWL_RATE_2M_MASK) + +#define IWL_CCK_RATES_MASK \ + (IWL_BASIC_RATES_MASK | \ + IWL_RATE_5M_MASK | \ + IWL_RATE_11M_MASK) + +#define IWL_OFDM_BASIC_RATES_MASK \ + (IWL_RATE_6M_MASK | \ + IWL_RATE_12M_MASK | \ + IWL_RATE_24M_MASK) + +#define IWL_OFDM_RATES_MASK \ + (IWL_OFDM_BASIC_RATES_MASK | \ + IWL_RATE_9M_MASK | \ + IWL_RATE_18M_MASK | \ + IWL_RATE_36M_MASK | \ + IWL_RATE_48M_MASK | \ + IWL_RATE_54M_MASK) + +#define IWL_BASIC_RATES_MASK \ + (IWL_OFDM_BASIC_RATES_MASK | \ + IWL_CCK_BASIC_RATES_MASK) + +#define IWL_RATES_MASK ((1<<IWL_RATE_COUNT)-1) + +#define IWL_INVALID_VALUE -1 + +#define IWL_MIN_RSSI_VAL -100 +#define IWL_MAX_RSSI_VAL 0 + +#define IWL_LEGACY_SWITCH_ANTENNA 0 +#define IWL_LEGACY_SWITCH_SISO 1 +#define IWL_LEGACY_SWITCH_MIMO 2 + +#define IWL_RS_GOOD_RATIO 12800 + +#define IWL_ACTION_LIMIT 3 +#define IWL_LEGACY_FAILURE_LIMIT 160 +#define IWL_LEGACY_SUCCESS_LIMIT 480 +#define IWL_LEGACY_TABLE_COUNT 160 + +#define IWL_NONE_LEGACY_FAILURE_LIMIT 400 +#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500 +#define IWL_NONE_LEGACY_TABLE_COUNT 1500 + +#define IWL_RATE_SCALE_SWITCH (10880) + +#define IWL_SISO_SWITCH_ANTENNA 0 +#define IWL_SISO_SWITCH_MIMO 1 +#define IWL_SISO_SWITCH_GI 2 + +#define IWL_MIMO_SWITCH_ANTENNA_A 0 +#define IWL_MIMO_SWITCH_ANTENNA_B 1 +#define IWL_MIMO_SWITCH_GI 2 + +#define LQ_SIZE 2 + +extern const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT]; + +enum iwl_table_type { + LQ_NONE, + LQ_G, + LQ_A, + LQ_SISO, + LQ_MIMO, + LQ_MAX, +}; + +enum iwl_antenna_type { + ANT_NONE, + ANT_MAIN, + ANT_AUX, + ANT_BOTH, +}; + +static inline u8 iwl_get_prev_ieee_rate(u8 rate_index) +{ + u8 rate = iwl_rates[rate_index].prev_ieee; + + if (rate == IWL_RATE_INVALID) + rate = rate_index; + return rate; +} + +extern int iwl_rate_index_from_plcp(int plcp); + +/** + * iwl_fill_rs_info - Fill an output text buffer with the rate representation + * + * NOTE: This is provided as a quick mechanism for a user to visualize + * the performance of the rate control alogirthm and is not meant to be + * parsed software. + */ +extern int iwl_fill_rs_info(struct ieee80211_hw *, char *buf, u8 sta_id); + +/** + * iwl_rate_scale_init - Initialize the rate scale table based on assoc info + * + * The specific througput table used is based on the type of network + * the associated with, including A, B, G, and G w/ TGG protection + */ +extern void iwl_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id); + +/** + * iwl_rate_control_register - Register the rate control algorithm callbacks + * + * Since the rate control algorithm is hardware specific, there is no need + * or reason to place it as a stand alone module. The driver can call + * iwl_rate_control_register in order to register the rate control callbacks + * with the mac80211 subsystem. This should be performed prior to calling + * ieee80211_register_hw + * + */ +extern void iwl_rate_control_register(struct ieee80211_hw *hw); + +/** + * iwl_rate_control_unregister - Unregister the rate control callbacks + * + * This should be called after calling ieee80211_unregister_hw, but before + * the driver is unloaded. + */ +extern void iwl_rate_control_unregister(struct ieee80211_hw *hw); + +#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c new file mode 100644 index 000000000000..b50d20267c8a --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c @@ -0,0 +1,4736 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos <ipw2100-admin@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/version.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/wireless.h> +#include <net/mac80211.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/delay.h> + +#define IWL 4965 + +#include "iwlwifi.h" +#include "iwl-4965.h" +#include "iwl-helpers.h" + +#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \ + [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ + IWL_RATE_SISO_##s##M_PLCP, \ + IWL_RATE_MIMO_##s##M_PLCP, \ + IWL_RATE_##r##M_IEEE, \ + IWL_RATE_##ip##M_INDEX, \ + IWL_RATE_##in##M_INDEX, \ + IWL_RATE_##rp##M_INDEX, \ + IWL_RATE_##rn##M_INDEX, \ + IWL_RATE_##pp##M_INDEX, \ + IWL_RATE_##np##M_INDEX } + +/* + * Parameter order: + * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate + * + * If there isn't a valid next or previous rate then INV is used which + * maps to IWL_RATE_INVALID + * + */ +const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = { + IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */ + IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */ + IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */ + IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */ + IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */ + IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */ + IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */ + IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */ + IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */ + IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */ + IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */ + IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */ + IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */ +}; + +static int is_fat_channel(__le32 rxon_flags) +{ + return (rxon_flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) || + (rxon_flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK); +} + +static u8 is_single_stream(struct iwl_priv *priv) +{ +#ifdef CONFIG_IWLWIFI_HT + if (!priv->is_ht_enabled || !priv->current_assoc_ht.is_ht || + (priv->active_rate_ht[1] == 0) || + (priv->ps_mode == IWL_MIMO_PS_STATIC)) + return 1; +#else + return 1; +#endif /*CONFIG_IWLWIFI_HT */ + return 0; +} + +/* + * Determine how many receiver/antenna chains to use. + * More provides better reception via diversity. Fewer saves power. + * MIMO (dual stream) requires at least 2, but works better with 3. + * This does not determine *which* chains to use, just how many. + */ +static int iwl4965_get_rx_chain_counter(struct iwl_priv *priv, + u8 *idle_state, u8 *rx_state) +{ + u8 is_single = is_single_stream(priv); + u8 is_cam = test_bit(STATUS_POWER_PMI, &priv->status) ? 0 : 1; + + /* # of Rx chains to use when expecting MIMO. */ + if (is_single || (!is_cam && (priv->ps_mode == IWL_MIMO_PS_STATIC))) + *rx_state = 2; + else + *rx_state = 3; + + /* # Rx chains when idling and maybe trying to save power */ + switch (priv->ps_mode) { + case IWL_MIMO_PS_STATIC: + case IWL_MIMO_PS_DYNAMIC: + *idle_state = (is_cam) ? 2 : 1; + break; + case IWL_MIMO_PS_NONE: + *idle_state = (is_cam) ? *rx_state : 1; + break; + default: + *idle_state = 1; + break; + } + + return 0; +} + +int iwl_hw_rxq_stop(struct iwl_priv *priv) +{ + int rc; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + /* stop HW */ + iwl_write_restricted(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + rc = iwl_poll_restricted_bit(priv, FH_MEM_RSSR_RX_STATUS_REG, + (1 << 24), 1000); + if (rc < 0) + IWL_ERROR("Can't stop Rx DMA.\n"); + + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +u8 iwl_hw_find_station(struct iwl_priv *priv, const u8 *addr) +{ + int i; + int start = 0; + int ret = IWL_INVALID_STATION; + unsigned long flags; + DECLARE_MAC_BUF(mac); + + if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) || + (priv->iw_mode == IEEE80211_IF_TYPE_AP)) + start = IWL_STA_ID; + + if (is_broadcast_ether_addr(addr)) + return IWL4965_BROADCAST_ID; + + spin_lock_irqsave(&priv->sta_lock, flags); + for (i = start; i < priv->hw_setting.max_stations; i++) + if ((priv->stations[i].used) && + (!compare_ether_addr + (priv->stations[i].sta.sta.addr, addr))) { + ret = i; + goto out; + } + + IWL_DEBUG_ASSOC_LIMIT("can not find STA %s total %d\n", + print_mac(mac, addr), priv->num_stations); + + out: + spin_unlock_irqrestore(&priv->sta_lock, flags); + return ret; +} + +static int iwl4965_nic_set_pwr_src(struct iwl_priv *priv, int pwr_max) +{ + int rc = 0; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + if (!pwr_max) { + u32 val; + + rc = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE, + &val); + + if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT) + iwl_set_bits_mask_restricted_reg( + priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VAUX, + ~APMG_PS_CTRL_MSK_PWR_SRC); + } else + iwl_set_bits_mask_restricted_reg( + priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, + ~APMG_PS_CTRL_MSK_PWR_SRC); + + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + return rc; +} + +static int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + int rc; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + /* stop HW */ + iwl_write_restricted(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + + iwl_write_restricted(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); + iwl_write_restricted(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG, + rxq->dma_addr >> 8); + + iwl_write_restricted(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, + (priv->hw_setting.shared_phys + + offsetof(struct iwl_shared, val0)) >> 4); + + iwl_write_restricted(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, + FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | + FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | + IWL_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K | + /*0x10 << 4 | */ + (RX_QUEUE_SIZE_LOG << + FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT)); + + /* + * iwl_write32(priv,CSR_INT_COAL_REG,0); + */ + + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static int iwl4965_kw_init(struct iwl_priv *priv) +{ + unsigned long flags; + int rc; + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) + goto out; + + iwl_write_restricted(priv, IWL_FH_KW_MEM_ADDR_REG, + priv->kw.dma_addr >> 4); + iwl_release_restricted_access(priv); +out: + spin_unlock_irqrestore(&priv->lock, flags); + return rc; +} + +static int iwl4965_kw_alloc(struct iwl_priv *priv) +{ + struct pci_dev *dev = priv->pci_dev; + struct iwl_kw *kw = &priv->kw; + + kw->size = IWL4965_KW_SIZE; /* TBW need set somewhere else */ + kw->v_addr = pci_alloc_consistent(dev, kw->size, &kw->dma_addr); + if (!kw->v_addr) + return -ENOMEM; + + return 0; +} + +#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \ + ? # x " " : "") + +int iwl4965_set_fat_chan_info(struct iwl_priv *priv, int phymode, u16 channel, + const struct iwl_eeprom_channel *eeprom_ch, + u8 fat_extension_channel) +{ + struct iwl_channel_info *ch_info; + + ch_info = (struct iwl_channel_info *) + iwl_get_channel_info(priv, phymode, channel); + + if (!is_channel_valid(ch_info)) + return -1; + + IWL_DEBUG_INFO("FAT Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x" + " %ddBm): Ad-Hoc %ssupported\n", + ch_info->channel, + is_channel_a_band(ch_info) ? + "5.2" : "2.4", + CHECK_AND_PRINT(IBSS), + CHECK_AND_PRINT(ACTIVE), + CHECK_AND_PRINT(RADAR), + CHECK_AND_PRINT(WIDE), + CHECK_AND_PRINT(NARROW), + CHECK_AND_PRINT(DFS), + eeprom_ch->flags, + eeprom_ch->max_power_avg, + ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) + && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? + "" : "not "); + + ch_info->fat_eeprom = *eeprom_ch; + ch_info->fat_max_power_avg = eeprom_ch->max_power_avg; + ch_info->fat_curr_txpow = eeprom_ch->max_power_avg; + ch_info->fat_min_power = 0; + ch_info->fat_scan_power = eeprom_ch->max_power_avg; + ch_info->fat_flags = eeprom_ch->flags; + ch_info->fat_extension_channel = fat_extension_channel; + + return 0; +} + +static void iwl4965_kw_free(struct iwl_priv *priv) +{ + struct pci_dev *dev = priv->pci_dev; + struct iwl_kw *kw = &priv->kw; + + if (kw->v_addr) { + pci_free_consistent(dev, kw->size, kw->v_addr, kw->dma_addr); + memset(kw, 0, sizeof(*kw)); + } +} + +/** + * iwl4965_txq_ctx_reset - Reset TX queue context + * Destroys all DMA structures and initialise them again + * + * @param priv + * @return error code + */ +static int iwl4965_txq_ctx_reset(struct iwl_priv *priv) +{ + int rc = 0; + int txq_id, slots_num; + unsigned long flags; + + iwl4965_kw_free(priv); + + iwl_hw_txq_ctx_free(priv); + + /* Tx CMD queue */ + rc = iwl4965_kw_alloc(priv); + if (rc) { + IWL_ERROR("Keep Warm allocation failed"); + goto error_kw; + } + + spin_lock_irqsave(&priv->lock, flags); + + rc = iwl_grab_restricted_access(priv); + if (unlikely(rc)) { + IWL_ERROR("TX reset failed"); + spin_unlock_irqrestore(&priv->lock, flags); + goto error_reset; + } + + iwl_write_restricted_reg(priv, SCD_TXFACT, 0); + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + rc = iwl4965_kw_init(priv); + if (rc) { + IWL_ERROR("kw_init failed\n"); + goto error_reset; + } + + /* Tx queue(s) */ + for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) { + slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? + TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, + txq_id); + if (rc) { + IWL_ERROR("Tx %d queue init failed\n", txq_id); + goto error; + } + } + + return rc; + + error: + iwl_hw_txq_ctx_free(priv); + error_reset: + iwl4965_kw_free(priv); + error_kw: + return rc; +} + +int iwl_hw_nic_init(struct iwl_priv *priv) +{ + int rc; + unsigned long flags; + struct iwl_rx_queue *rxq = &priv->rxq; + u8 rev_id; + u32 val; + u8 val_link; + + iwl_power_init_handle(priv); + + /* nic_init */ + spin_lock_irqsave(&priv->lock, flags); + + iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS, + CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); + + iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + rc = iwl_poll_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); + if (rc < 0) { + spin_unlock_irqrestore(&priv->lock, flags); + IWL_DEBUG_INFO("Failed to init the card\n"); + return rc; + } + + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + iwl_read_restricted_reg(priv, APMG_CLK_CTRL_REG); + + iwl_write_restricted_reg(priv, APMG_CLK_CTRL_REG, + APMG_CLK_VAL_DMA_CLK_RQT | + APMG_CLK_VAL_BSM_CLK_RQT); + iwl_read_restricted_reg(priv, APMG_CLK_CTRL_REG); + + udelay(20); + + iwl_set_bits_restricted_reg(priv, APMG_PCIDEV_STT_REG, + APMG_PCIDEV_STT_VAL_L1_ACT_DIS); + + iwl_release_restricted_access(priv); + iwl_write32(priv, CSR_INT_COALESCING, 512 / 32); + spin_unlock_irqrestore(&priv->lock, flags); + + /* Determine HW type */ + rc = pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id); + if (rc) + return rc; + + IWL_DEBUG_INFO("HW Revision ID = 0x%X\n", rev_id); + + iwl4965_nic_set_pwr_src(priv, 1); + spin_lock_irqsave(&priv->lock, flags); + + if ((rev_id & 0x80) == 0x80 && (rev_id & 0x7f) < 8) { + pci_read_config_dword(priv->pci_dev, PCI_REG_WUM8, &val); + /* Enable No Snoop field */ + pci_write_config_dword(priv->pci_dev, PCI_REG_WUM8, + val & ~(1 << 11)); + } + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Read the EEPROM */ + rc = iwl_eeprom_init(priv); + if (rc) + return rc; + + if (priv->eeprom.calib_version < EEPROM_TX_POWER_VERSION_NEW) { + IWL_ERROR("Older EEPROM detected! Aborting.\n"); + return -EINVAL; + } + + pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link); + + /* disable L1 entry -- workaround for pre-B1 */ + pci_write_config_byte(priv->pci_dev, PCI_LINK_CTRL, val_link & ~0x02); + + spin_lock_irqsave(&priv->lock, flags); + + /* set CSR_HW_CONFIG_REG for uCode use */ + + iwl_set_bit(priv, CSR_SW_VER, CSR_HW_IF_CONFIG_REG_BIT_KEDRON_R | + CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | + CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); + + rc = iwl_grab_restricted_access(priv); + if (rc < 0) { + spin_unlock_irqrestore(&priv->lock, flags); + IWL_DEBUG_INFO("Failed to init the card\n"); + return rc; + } + + iwl_read_restricted_reg(priv, APMG_PS_CTRL_REG); + iwl_set_bits_restricted_reg(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_RESET_REQ); + udelay(5); + iwl_clear_bits_restricted_reg(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_RESET_REQ); + + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + iwl_hw_card_show_info(priv); + + /* end nic_init */ + + /* Allocate the RX queue, or reset if it is already allocated */ + if (!rxq->bd) { + rc = iwl_rx_queue_alloc(priv); + if (rc) { + IWL_ERROR("Unable to initialize Rx queue\n"); + return -ENOMEM; + } + } else + iwl_rx_queue_reset(priv, rxq); + + iwl_rx_replenish(priv); + + iwl4965_rx_init(priv, rxq); + + spin_lock_irqsave(&priv->lock, flags); + + rxq->need_update = 1; + iwl_rx_queue_update_write_ptr(priv, rxq); + + spin_unlock_irqrestore(&priv->lock, flags); + rc = iwl4965_txq_ctx_reset(priv); + if (rc) + return rc; + + if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE) + IWL_DEBUG_RF_KILL("SW RF KILL supported in EEPROM.\n"); + + if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE) + IWL_DEBUG_RF_KILL("HW RF KILL supported in EEPROM.\n"); + + set_bit(STATUS_INIT, &priv->status); + + return 0; +} + +int iwl_hw_nic_stop_master(struct iwl_priv *priv) +{ + int rc = 0; + u32 reg_val; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + /* set stop master bit */ + iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); + + reg_val = iwl_read32(priv, CSR_GP_CNTRL); + + if (CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE == + (reg_val & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE)) + IWL_DEBUG_INFO("Card in power save, master is already " + "stopped\n"); + else { + rc = iwl_poll_bit(priv, CSR_RESET, + CSR_RESET_REG_FLAG_MASTER_DISABLED, + CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); + if (rc < 0) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + } + + spin_unlock_irqrestore(&priv->lock, flags); + IWL_DEBUG_INFO("stop master\n"); + + return rc; +} + +void iwl_hw_txq_ctx_stop(struct iwl_priv *priv) +{ + + int txq_id; + unsigned long flags; + + /* reset TFD queues */ + for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) { + spin_lock_irqsave(&priv->lock, flags); + if (iwl_grab_restricted_access(priv)) { + spin_unlock_irqrestore(&priv->lock, flags); + continue; + } + + iwl_write_restricted(priv, + IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), + 0x0); + iwl_poll_restricted_bit(priv, IWL_FH_TSSR_TX_STATUS_REG, + IWL_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE + (txq_id), 200); + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + } + + iwl_hw_txq_ctx_free(priv); +} + +int iwl_hw_nic_reset(struct iwl_priv *priv) +{ + int rc = 0; + unsigned long flags; + + iwl_hw_nic_stop_master(priv); + + spin_lock_irqsave(&priv->lock, flags); + + iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + + udelay(10); + + iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + rc = iwl_poll_bit(priv, CSR_RESET, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25); + + udelay(10); + + rc = iwl_grab_restricted_access(priv); + if (!rc) { + iwl_write_restricted_reg(priv, APMG_CLK_EN_REG, + APMG_CLK_VAL_DMA_CLK_RQT | + APMG_CLK_VAL_BSM_CLK_RQT); + + udelay(10); + + iwl_set_bits_restricted_reg(priv, APMG_PCIDEV_STT_REG, + APMG_PCIDEV_STT_VAL_L1_ACT_DIS); + + iwl_release_restricted_access(priv); + } + + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + wake_up_interruptible(&priv->wait_command_queue); + + spin_unlock_irqrestore(&priv->lock, flags); + + return rc; + +} + +#define REG_RECALIB_PERIOD (60) + +/** + * iwl4965_bg_statistics_periodic - Timer callback to queue statistics + * + * This callback is provided in order to queue the statistics_work + * in work_queue context (v. softirq) + * + * This timer function is continually reset to execute within + * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION + * was received. We need to ensure we receive the statistics in order + * to update the temperature used for calibrating the TXPOWER. However, + * we can't send the statistics command from softirq context (which + * is the context which timers run at) so we have to queue off the + * statistics_work to actually send the command to the hardware. + */ +static void iwl4965_bg_statistics_periodic(unsigned long data) +{ + struct iwl_priv *priv = (struct iwl_priv *)data; + + queue_work(priv->workqueue, &priv->statistics_work); +} + +/** + * iwl4965_bg_statistics_work - Send the statistics request to the hardware. + * + * This is queued by iwl_bg_statistics_periodic. + */ +static void iwl4965_bg_statistics_work(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + statistics_work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl_send_statistics_request(priv); + mutex_unlock(&priv->mutex); +} + +#define CT_LIMIT_CONST 259 +#define TM_CT_KILL_THRESHOLD 110 + +void iwl4965_rf_kill_ct_config(struct iwl_priv *priv) +{ + struct iwl_ct_kill_config cmd; + u32 R1, R2, R3; + u32 temp_th; + u32 crit_temperature; + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&priv->lock, flags); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); + spin_unlock_irqrestore(&priv->lock, flags); + + if (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK) { + R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]); + R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]); + R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]); + } else { + R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]); + R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]); + R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]); + } + + temp_th = CELSIUS_TO_KELVIN(TM_CT_KILL_THRESHOLD); + + crit_temperature = ((temp_th * (R3-R1))/CT_LIMIT_CONST) + R2; + cmd.critical_temperature_R = cpu_to_le32(crit_temperature); + rc = iwl_send_cmd_pdu(priv, + REPLY_CT_KILL_CONFIG_CMD, sizeof(cmd), &cmd); + if (rc) + IWL_ERROR("REPLY_CT_KILL_CONFIG_CMD failed\n"); + else + IWL_DEBUG_INFO("REPLY_CT_KILL_CONFIG_CMD succeeded\n"); +} + +#ifdef CONFIG_IWLWIFI_SENSITIVITY + +/* "false alarms" are signals that our DSP tries to lock onto, + * but then determines that they are either noise, or transmissions + * from a distant wireless network (also "noise", really) that get + * "stepped on" by stronger transmissions within our own network. + * This algorithm attempts to set a sensitivity level that is high + * enough to receive all of our own network traffic, but not so + * high that our DSP gets too busy trying to lock onto non-network + * activity/noise. */ +static int iwl4965_sens_energy_cck(struct iwl_priv *priv, + u32 norm_fa, + u32 rx_enable_time, + struct statistics_general_data *rx_info) +{ + u32 max_nrg_cck = 0; + int i = 0; + u8 max_silence_rssi = 0; + u32 silence_ref = 0; + u8 silence_rssi_a = 0; + u8 silence_rssi_b = 0; + u8 silence_rssi_c = 0; + u32 val; + + /* "false_alarms" values below are cross-multiplications to assess the + * numbers of false alarms within the measured period of actual Rx + * (Rx is off when we're txing), vs the min/max expected false alarms + * (some should be expected if rx is sensitive enough) in a + * hypothetical listening period of 200 time units (TU), 204.8 msec: + * + * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time + * + * */ + u32 false_alarms = norm_fa * 200 * 1024; + u32 max_false_alarms = MAX_FA_CCK * rx_enable_time; + u32 min_false_alarms = MIN_FA_CCK * rx_enable_time; + struct iwl_sensitivity_data *data = NULL; + + data = &(priv->sensitivity_data); + + data->nrg_auto_corr_silence_diff = 0; + + /* Find max silence rssi among all 3 receivers. + * This is background noise, which may include transmissions from other + * networks, measured during silence before our network's beacon */ + silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a & + ALL_BAND_FILTER)>>8); + silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b & + ALL_BAND_FILTER)>>8); + silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c & + ALL_BAND_FILTER)>>8); + + val = max(silence_rssi_b, silence_rssi_c); + max_silence_rssi = max(silence_rssi_a, (u8) val); + + /* Store silence rssi in 20-beacon history table */ + data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi; + data->nrg_silence_idx++; + if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L) + data->nrg_silence_idx = 0; + + /* Find max silence rssi across 20 beacon history */ + for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) { + val = data->nrg_silence_rssi[i]; + silence_ref = max(silence_ref, val); + } + IWL_DEBUG_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n", + silence_rssi_a, silence_rssi_b, silence_rssi_c, + silence_ref); + + /* Find max rx energy (min value!) among all 3 receivers, + * measured during beacon frame. + * Save it in 10-beacon history table. */ + i = data->nrg_energy_idx; + val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c); + data->nrg_value[i] = min(rx_info->beacon_energy_a, val); + + data->nrg_energy_idx++; + if (data->nrg_energy_idx >= 10) + data->nrg_energy_idx = 0; + + /* Find min rx energy (max value) across 10 beacon history. + * This is the minimum signal level that we want to receive well. + * Add backoff (margin so we don't miss slightly lower energy frames). + * This establishes an upper bound (min value) for energy threshold. */ + max_nrg_cck = data->nrg_value[0]; + for (i = 1; i < 10; i++) + max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i])); + max_nrg_cck += 6; + + IWL_DEBUG_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n", + rx_info->beacon_energy_a, rx_info->beacon_energy_b, + rx_info->beacon_energy_c, max_nrg_cck - 6); + + /* Count number of consecutive beacons with fewer-than-desired + * false alarms. */ + if (false_alarms < min_false_alarms) + data->num_in_cck_no_fa++; + else + data->num_in_cck_no_fa = 0; + IWL_DEBUG_CALIB("consecutive bcns with few false alarms = %u\n", + data->num_in_cck_no_fa); + + /* If we got too many false alarms this time, reduce sensitivity */ + if (false_alarms > max_false_alarms) { + IWL_DEBUG_CALIB("norm FA %u > max FA %u\n", + false_alarms, max_false_alarms); + IWL_DEBUG_CALIB("... reducing sensitivity\n"); + data->nrg_curr_state = IWL_FA_TOO_MANY; + + if (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK) { + /* Store for "fewer than desired" on later beacon */ + data->nrg_silence_ref = silence_ref; + + /* increase energy threshold (reduce nrg value) + * to decrease sensitivity */ + if (data->nrg_th_cck > (NRG_MAX_CCK + NRG_STEP_CCK)) + data->nrg_th_cck = data->nrg_th_cck + - NRG_STEP_CCK; + } + + /* increase auto_corr values to decrease sensitivity */ + if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK) + data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1; + else { + val = data->auto_corr_cck + AUTO_CORR_STEP_CCK; + data->auto_corr_cck = min((u32)AUTO_CORR_MAX_CCK, val); + } + val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK; + data->auto_corr_cck_mrc = min((u32)AUTO_CORR_MAX_CCK_MRC, val); + + /* Else if we got fewer than desired, increase sensitivity */ + } else if (false_alarms < min_false_alarms) { + data->nrg_curr_state = IWL_FA_TOO_FEW; + + /* Compare silence level with silence level for most recent + * healthy number or too many false alarms */ + data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref - + (s32)silence_ref; + + IWL_DEBUG_CALIB("norm FA %u < min FA %u, silence diff %d\n", + false_alarms, min_false_alarms, + data->nrg_auto_corr_silence_diff); + + /* Increase value to increase sensitivity, but only if: + * 1a) previous beacon did *not* have *too many* false alarms + * 1b) AND there's a significant difference in Rx levels + * from a previous beacon with too many, or healthy # FAs + * OR 2) We've seen a lot of beacons (100) with too few + * false alarms */ + if ((data->nrg_prev_state != IWL_FA_TOO_MANY) && + ((data->nrg_auto_corr_silence_diff > NRG_DIFF) || + (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) { + + IWL_DEBUG_CALIB("... increasing sensitivity\n"); + /* Increase nrg value to increase sensitivity */ + val = data->nrg_th_cck + NRG_STEP_CCK; + data->nrg_th_cck = min((u32)NRG_MIN_CCK, val); + + /* Decrease auto_corr values to increase sensitivity */ + val = data->auto_corr_cck - AUTO_CORR_STEP_CCK; + data->auto_corr_cck = max((u32)AUTO_CORR_MIN_CCK, val); + + val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK; + data->auto_corr_cck_mrc = + max((u32)AUTO_CORR_MIN_CCK_MRC, val); + + } else + IWL_DEBUG_CALIB("... but not changing sensitivity\n"); + + /* Else we got a healthy number of false alarms, keep status quo */ + } else { + IWL_DEBUG_CALIB(" FA in safe zone\n"); + data->nrg_curr_state = IWL_FA_GOOD_RANGE; + + /* Store for use in "fewer than desired" with later beacon */ + data->nrg_silence_ref = silence_ref; + + /* If previous beacon had too many false alarms, + * give it some extra margin by reducing sensitivity again + * (but don't go below measured energy of desired Rx) */ + if (IWL_FA_TOO_MANY == data->nrg_prev_state) { + IWL_DEBUG_CALIB("... increasing margin\n"); + data->nrg_th_cck -= NRG_MARGIN; + } + } + + /* Make sure the energy threshold does not go above the measured + * energy of the desired Rx signals (reduced by backoff margin), + * or else we might start missing Rx frames. + * Lower value is higher energy, so we use max()! + */ + data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck); + IWL_DEBUG_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck); + + data->nrg_prev_state = data->nrg_curr_state; + + return 0; +} + + +static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv, + u32 norm_fa, + u32 rx_enable_time) +{ + u32 val; + u32 false_alarms = norm_fa * 200 * 1024; + u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time; + u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time; + struct iwl_sensitivity_data *data = NULL; + + data = &(priv->sensitivity_data); + + /* If we got too many false alarms this time, reduce sensitivity */ + if (false_alarms > max_false_alarms) { + + IWL_DEBUG_CALIB("norm FA %u > max FA %u)\n", + false_alarms, max_false_alarms); + + val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm = + min((u32)AUTO_CORR_MAX_OFDM, val); + + val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_mrc = + min((u32)AUTO_CORR_MAX_OFDM_MRC, val); + + val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_x1 = + min((u32)AUTO_CORR_MAX_OFDM_X1, val); + + val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_mrc_x1 = + min((u32)AUTO_CORR_MAX_OFDM_MRC_X1, val); + } + + /* Else if we got fewer than desired, increase sensitivity */ + else if (false_alarms < min_false_alarms) { + + IWL_DEBUG_CALIB("norm FA %u < min FA %u\n", + false_alarms, min_false_alarms); + + val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm = + max((u32)AUTO_CORR_MIN_OFDM, val); + + val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_mrc = + max((u32)AUTO_CORR_MIN_OFDM_MRC, val); + + val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_x1 = + max((u32)AUTO_CORR_MIN_OFDM_X1, val); + + val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_mrc_x1 = + max((u32)AUTO_CORR_MIN_OFDM_MRC_X1, val); + } + + else + IWL_DEBUG_CALIB("min FA %u < norm FA %u < max FA %u OK\n", + min_false_alarms, false_alarms, max_false_alarms); + + return 0; +} + +static int iwl_sensitivity_callback(struct iwl_priv *priv, + struct iwl_cmd *cmd, struct sk_buff *skb) +{ + /* We didn't cache the SKB; let the caller free it */ + return 1; +} + +/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */ +static int iwl4965_sensitivity_write(struct iwl_priv *priv, u8 flags) +{ + int rc = 0; + struct iwl_sensitivity_cmd cmd ; + struct iwl_sensitivity_data *data = NULL; + struct iwl_host_cmd cmd_out = { + .id = SENSITIVITY_CMD, + .len = sizeof(struct iwl_sensitivity_cmd), + .meta.flags = flags, + .data = &cmd, + }; + + data = &(priv->sensitivity_data); + + memset(&cmd, 0, sizeof(cmd)); + + cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm); + cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm_mrc); + cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm_x1); + cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1); + + cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] = + cpu_to_le16((u16)data->auto_corr_cck); + cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] = + cpu_to_le16((u16)data->auto_corr_cck_mrc); + + cmd.table[HD_MIN_ENERGY_CCK_DET_INDEX] = + cpu_to_le16((u16)data->nrg_th_cck); + cmd.table[HD_MIN_ENERGY_OFDM_DET_INDEX] = + cpu_to_le16((u16)data->nrg_th_ofdm); + + cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] = + __constant_cpu_to_le16(190); + cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] = + __constant_cpu_to_le16(390); + cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] = + __constant_cpu_to_le16(62); + + IWL_DEBUG_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n", + data->auto_corr_ofdm, data->auto_corr_ofdm_mrc, + data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1, + data->nrg_th_ofdm); + + IWL_DEBUG_CALIB("cck: ac %u mrc %u thresh %u\n", + data->auto_corr_cck, data->auto_corr_cck_mrc, + data->nrg_th_cck); + + cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE; + + if (flags & CMD_ASYNC) + cmd_out.meta.u.callback = iwl_sensitivity_callback; + + /* Don't send command to uCode if nothing has changed */ + if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]), + sizeof(u16)*HD_TABLE_SIZE)) { + IWL_DEBUG_CALIB("No change in SENSITIVITY_CMD\n"); + return 0; + } + + /* Copy table for comparison next time */ + memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]), + sizeof(u16)*HD_TABLE_SIZE); + + rc = iwl_send_cmd(priv, &cmd_out); + if (!rc) { + IWL_DEBUG_CALIB("SENSITIVITY_CMD succeeded\n"); + return rc; + } + + return 0; +} + +void iwl4965_init_sensitivity(struct iwl_priv *priv, u8 flags, u8 force) +{ + int rc = 0; + int i; + struct iwl_sensitivity_data *data = NULL; + + IWL_DEBUG_CALIB("Start iwl4965_init_sensitivity\n"); + + if (force) + memset(&(priv->sensitivity_tbl[0]), 0, + sizeof(u16)*HD_TABLE_SIZE); + + /* Clear driver's sensitivity algo data */ + data = &(priv->sensitivity_data); + memset(data, 0, sizeof(struct iwl_sensitivity_data)); + + data->num_in_cck_no_fa = 0; + data->nrg_curr_state = IWL_FA_TOO_MANY; + data->nrg_prev_state = IWL_FA_TOO_MANY; + data->nrg_silence_ref = 0; + data->nrg_silence_idx = 0; + data->nrg_energy_idx = 0; + + for (i = 0; i < 10; i++) + data->nrg_value[i] = 0; + + for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) + data->nrg_silence_rssi[i] = 0; + + data->auto_corr_ofdm = 90; + data->auto_corr_ofdm_mrc = 170; + data->auto_corr_ofdm_x1 = 105; + data->auto_corr_ofdm_mrc_x1 = 220; + data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF; + data->auto_corr_cck_mrc = 200; + data->nrg_th_cck = 100; + data->nrg_th_ofdm = 100; + + data->last_bad_plcp_cnt_ofdm = 0; + data->last_fa_cnt_ofdm = 0; + data->last_bad_plcp_cnt_cck = 0; + data->last_fa_cnt_cck = 0; + + /* Clear prior Sensitivity command data to force send to uCode */ + if (force) + memset(&(priv->sensitivity_tbl[0]), 0, + sizeof(u16)*HD_TABLE_SIZE); + + rc |= iwl4965_sensitivity_write(priv, flags); + IWL_DEBUG_CALIB("<<return 0x%X\n", rc); + + return; +} + + +/* Reset differential Rx gains in NIC to prepare for chain noise calibration. + * Called after every association, but this runs only once! + * ... once chain noise is calibrated the first time, it's good forever. */ +void iwl4965_chain_noise_reset(struct iwl_priv *priv) +{ + struct iwl_chain_noise_data *data = NULL; + int rc = 0; + + data = &(priv->chain_noise_data); + if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) { + struct iwl_calibration_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD; + cmd.diff_gain_a = 0; + cmd.diff_gain_b = 0; + cmd.diff_gain_c = 0; + rc = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, + sizeof(cmd), &cmd); + msleep(4); + data->state = IWL_CHAIN_NOISE_ACCUMULATE; + IWL_DEBUG_CALIB("Run chain_noise_calibrate\n"); + } + return; +} + +/* + * Accumulate 20 beacons of signal and noise statistics for each of + * 3 receivers/antennas/rx-chains, then figure out: + * 1) Which antennas are connected. + * 2) Differential rx gain settings to balance the 3 receivers. + */ +static void iwl4965_noise_calibration(struct iwl_priv *priv, + struct iwl_notif_statistics *stat_resp) +{ + struct iwl_chain_noise_data *data = NULL; + int rc = 0; + + u32 chain_noise_a; + u32 chain_noise_b; + u32 chain_noise_c; + u32 chain_sig_a; + u32 chain_sig_b; + u32 chain_sig_c; + u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; + u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; + u32 max_average_sig; + u16 max_average_sig_antenna_i; + u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE; + u16 min_average_noise_antenna_i = INITIALIZATION_VALUE; + u16 i = 0; + u16 chan_num = INITIALIZATION_VALUE; + u32 band = INITIALIZATION_VALUE; + u32 active_chains = 0; + unsigned long flags; + struct statistics_rx_non_phy *rx_info = &(stat_resp->rx.general); + + data = &(priv->chain_noise_data); + + /* Accumulate just the first 20 beacons after the first association, + * then we're done forever. */ + if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) { + if (data->state == IWL_CHAIN_NOISE_ALIVE) + IWL_DEBUG_CALIB("Wait for noise calib reset\n"); + return; + } + + spin_lock_irqsave(&priv->lock, flags); + if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { + IWL_DEBUG_CALIB(" << Interference data unavailable\n"); + spin_unlock_irqrestore(&priv->lock, flags); + return; + } + + band = (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) ? 0 : 1; + chan_num = le16_to_cpu(priv->staging_rxon.channel); + + /* Make sure we accumulate data for just the associated channel + * (even if scanning). */ + if ((chan_num != (le32_to_cpu(stat_resp->flag) >> 16)) || + ((STATISTICS_REPLY_FLG_BAND_24G_MSK == + (stat_resp->flag & STATISTICS_REPLY_FLG_BAND_24G_MSK)) && band)) { + IWL_DEBUG_CALIB("Stats not from chan=%d, band=%d\n", + chan_num, band); + spin_unlock_irqrestore(&priv->lock, flags); + return; + } + + /* Accumulate beacon statistics values across 20 beacons */ + chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) & + IN_BAND_FILTER; + chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) & + IN_BAND_FILTER; + chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) & + IN_BAND_FILTER; + + chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER; + chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER; + chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER; + + spin_unlock_irqrestore(&priv->lock, flags); + + data->beacon_count++; + + data->chain_noise_a = (chain_noise_a + data->chain_noise_a); + data->chain_noise_b = (chain_noise_b + data->chain_noise_b); + data->chain_noise_c = (chain_noise_c + data->chain_noise_c); + + data->chain_signal_a = (chain_sig_a + data->chain_signal_a); + data->chain_signal_b = (chain_sig_b + data->chain_signal_b); + data->chain_signal_c = (chain_sig_c + data->chain_signal_c); + + IWL_DEBUG_CALIB("chan=%d, band=%d, beacon=%d\n", chan_num, band, + data->beacon_count); + IWL_DEBUG_CALIB("chain_sig: a %d b %d c %d\n", + chain_sig_a, chain_sig_b, chain_sig_c); + IWL_DEBUG_CALIB("chain_noise: a %d b %d c %d\n", + chain_noise_a, chain_noise_b, chain_noise_c); + + /* If this is the 20th beacon, determine: + * 1) Disconnected antennas (using signal strengths) + * 2) Differential gain (using silence noise) to balance receivers */ + if (data->beacon_count == CAL_NUM_OF_BEACONS) { + + /* Analyze signal for disconnected antenna */ + average_sig[0] = (data->chain_signal_a) / CAL_NUM_OF_BEACONS; + average_sig[1] = (data->chain_signal_b) / CAL_NUM_OF_BEACONS; + average_sig[2] = (data->chain_signal_c) / CAL_NUM_OF_BEACONS; + + if (average_sig[0] >= average_sig[1]) { + max_average_sig = average_sig[0]; + max_average_sig_antenna_i = 0; + active_chains = (1 << max_average_sig_antenna_i); + } else { + max_average_sig = average_sig[1]; + max_average_sig_antenna_i = 1; + active_chains = (1 << max_average_sig_antenna_i); + } + + if (average_sig[2] >= max_average_sig) { + max_average_sig = average_sig[2]; + max_average_sig_antenna_i = 2; + active_chains = (1 << max_average_sig_antenna_i); + } + + IWL_DEBUG_CALIB("average_sig: a %d b %d c %d\n", + average_sig[0], average_sig[1], average_sig[2]); + IWL_DEBUG_CALIB("max_average_sig = %d, antenna %d\n", + max_average_sig, max_average_sig_antenna_i); + + /* Compare signal strengths for all 3 receivers. */ + for (i = 0; i < NUM_RX_CHAINS; i++) { + if (i != max_average_sig_antenna_i) { + s32 rssi_delta = (max_average_sig - + average_sig[i]); + + /* If signal is very weak, compared with + * strongest, mark it as disconnected. */ + if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS) + data->disconn_array[i] = 1; + else + active_chains |= (1 << i); + IWL_DEBUG_CALIB("i = %d rssiDelta = %d " + "disconn_array[i] = %d\n", + i, rssi_delta, data->disconn_array[i]); + } + } + + /*If both chains A & B are disconnected - + * connect B and leave A as is */ + if (data->disconn_array[CHAIN_A] && + data->disconn_array[CHAIN_B]) { + data->disconn_array[CHAIN_B] = 0; + active_chains |= (1 << CHAIN_B); + IWL_DEBUG_CALIB("both A & B chains are disconnected! " + "W/A - declare B as connected\n"); + } + + IWL_DEBUG_CALIB("active_chains (bitwise) = 0x%x\n", + active_chains); + + /* Save for use within RXON, TX, SCAN commands, etc. */ + priv->valid_antenna = active_chains; + + /* Analyze noise for rx balance */ + average_noise[0] = ((data->chain_noise_a)/CAL_NUM_OF_BEACONS); + average_noise[1] = ((data->chain_noise_b)/CAL_NUM_OF_BEACONS); + average_noise[2] = ((data->chain_noise_c)/CAL_NUM_OF_BEACONS); + + for (i = 0; i < NUM_RX_CHAINS; i++) { + if (!(data->disconn_array[i]) && + (average_noise[i] <= min_average_noise)) { + /* This means that chain i is active and has + * lower noise values so far: */ + min_average_noise = average_noise[i]; + min_average_noise_antenna_i = i; + } + } + + data->delta_gain_code[min_average_noise_antenna_i] = 0; + + IWL_DEBUG_CALIB("average_noise: a %d b %d c %d\n", + average_noise[0], average_noise[1], + average_noise[2]); + + IWL_DEBUG_CALIB("min_average_noise = %d, antenna %d\n", + min_average_noise, min_average_noise_antenna_i); + + for (i = 0; i < NUM_RX_CHAINS; i++) { + s32 delta_g = 0; + + if (!(data->disconn_array[i]) && + (data->delta_gain_code[i] == + CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) { + delta_g = average_noise[i] - min_average_noise; + data->delta_gain_code[i] = (u8)((delta_g * + 10) / 15); + if (CHAIN_NOISE_MAX_DELTA_GAIN_CODE < + data->delta_gain_code[i]) + data->delta_gain_code[i] = + CHAIN_NOISE_MAX_DELTA_GAIN_CODE; + + data->delta_gain_code[i] = + (data->delta_gain_code[i] | (1 << 2)); + } else + data->delta_gain_code[i] = 0; + } + IWL_DEBUG_CALIB("delta_gain_codes: a %d b %d c %d\n", + data->delta_gain_code[0], + data->delta_gain_code[1], + data->delta_gain_code[2]); + + /* Differential gain gets sent to uCode only once */ + if (!data->radio_write) { + struct iwl_calibration_cmd cmd; + data->radio_write = 1; + + memset(&cmd, 0, sizeof(cmd)); + cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD; + cmd.diff_gain_a = data->delta_gain_code[0]; + cmd.diff_gain_b = data->delta_gain_code[1]; + cmd.diff_gain_c = data->delta_gain_code[2]; + rc = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, + sizeof(cmd), &cmd); + if (rc) + IWL_DEBUG_CALIB("fail sending cmd " + "REPLY_PHY_CALIBRATION_CMD \n"); + + /* TODO we might want recalculate + * rx_chain in rxon cmd */ + + /* Mark so we run this algo only once! */ + data->state = IWL_CHAIN_NOISE_CALIBRATED; + } + data->chain_noise_a = 0; + data->chain_noise_b = 0; + data->chain_noise_c = 0; + data->chain_signal_a = 0; + data->chain_signal_b = 0; + data->chain_signal_c = 0; + data->beacon_count = 0; + } + return; +} + +static void iwl4965_sensitivity_calibration(struct iwl_priv *priv, + struct iwl_notif_statistics *resp) +{ + int rc = 0; + u32 rx_enable_time; + u32 fa_cck; + u32 fa_ofdm; + u32 bad_plcp_cck; + u32 bad_plcp_ofdm; + u32 norm_fa_ofdm; + u32 norm_fa_cck; + struct iwl_sensitivity_data *data = NULL; + struct statistics_rx_non_phy *rx_info = &(resp->rx.general); + struct statistics_rx *statistics = &(resp->rx); + unsigned long flags; + struct statistics_general_data statis; + + data = &(priv->sensitivity_data); + + if (!iwl_is_associated(priv)) { + IWL_DEBUG_CALIB("<< - not associated\n"); + return; + } + + spin_lock_irqsave(&priv->lock, flags); + if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { + IWL_DEBUG_CALIB("<< invalid data.\n"); + spin_unlock_irqrestore(&priv->lock, flags); + return; + } + + /* Extract Statistics: */ + rx_enable_time = le32_to_cpu(rx_info->channel_load); + fa_cck = le32_to_cpu(statistics->cck.false_alarm_cnt); + fa_ofdm = le32_to_cpu(statistics->ofdm.false_alarm_cnt); + bad_plcp_cck = le32_to_cpu(statistics->cck.plcp_err); + bad_plcp_ofdm = le32_to_cpu(statistics->ofdm.plcp_err); + + statis.beacon_silence_rssi_a = + le32_to_cpu(statistics->general.beacon_silence_rssi_a); + statis.beacon_silence_rssi_b = + le32_to_cpu(statistics->general.beacon_silence_rssi_b); + statis.beacon_silence_rssi_c = + le32_to_cpu(statistics->general.beacon_silence_rssi_c); + statis.beacon_energy_a = + le32_to_cpu(statistics->general.beacon_energy_a); + statis.beacon_energy_b = + le32_to_cpu(statistics->general.beacon_energy_b); + statis.beacon_energy_c = + le32_to_cpu(statistics->general.beacon_energy_c); + + spin_unlock_irqrestore(&priv->lock, flags); + + IWL_DEBUG_CALIB("rx_enable_time = %u usecs\n", rx_enable_time); + + if (!rx_enable_time) { + IWL_DEBUG_CALIB("<< RX Enable Time == 0! \n"); + return; + } + + /* These statistics increase monotonically, and do not reset + * at each beacon. Calculate difference from last value, or just + * use the new statistics value if it has reset or wrapped around. */ + if (data->last_bad_plcp_cnt_cck > bad_plcp_cck) + data->last_bad_plcp_cnt_cck = bad_plcp_cck; + else { + bad_plcp_cck -= data->last_bad_plcp_cnt_cck; + data->last_bad_plcp_cnt_cck += bad_plcp_cck; + } + + if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm) + data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm; + else { + bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm; + data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm; + } + + if (data->last_fa_cnt_ofdm > fa_ofdm) + data->last_fa_cnt_ofdm = fa_ofdm; + else { + fa_ofdm -= data->last_fa_cnt_ofdm; + data->last_fa_cnt_ofdm += fa_ofdm; + } + + if (data->last_fa_cnt_cck > fa_cck) + data->last_fa_cnt_cck = fa_cck; + else { + fa_cck -= data->last_fa_cnt_cck; + data->last_fa_cnt_cck += fa_cck; + } + + /* Total aborted signal locks */ + norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm; + norm_fa_cck = fa_cck + bad_plcp_cck; + + IWL_DEBUG_CALIB("cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck, + bad_plcp_cck, fa_ofdm, bad_plcp_ofdm); + + iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time); + iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis); + rc |= iwl4965_sensitivity_write(priv, CMD_ASYNC); + + return; +} + +static void iwl4965_bg_sensitivity_work(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + sensitivity_work); + + mutex_lock(&priv->mutex); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status) || + test_bit(STATUS_SCANNING, &priv->status)) { + mutex_unlock(&priv->mutex); + return; + } + + if (priv->start_calib) { + iwl4965_noise_calibration(priv, &priv->statistics); + + if (priv->sensitivity_data.state == + IWL_SENS_CALIB_NEED_REINIT) { + iwl4965_init_sensitivity(priv, CMD_ASYNC, 0); + priv->sensitivity_data.state = IWL_SENS_CALIB_ALLOWED; + } else + iwl4965_sensitivity_calibration(priv, + &priv->statistics); + } + + mutex_unlock(&priv->mutex); + return; +} +#endif /*CONFIG_IWLWIFI_SENSITIVITY*/ + +static void iwl4965_bg_txpower_work(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + txpower_work); + + /* If a scan happened to start before we got here + * then just return; the statistics notification will + * kick off another scheduled work to compensate for + * any temperature delta we missed here. */ + if (test_bit(STATUS_EXIT_PENDING, &priv->status) || + test_bit(STATUS_SCANNING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + + /* Regardless of if we are assocaited, we must reconfigure the + * TX power since frames can be sent on non-radar channels while + * not associated */ + iwl_hw_reg_send_txpower(priv); + + /* Update last_temperature to keep is_calib_needed from running + * when it isn't needed... */ + priv->last_temperature = priv->temperature; + + mutex_unlock(&priv->mutex); +} + +/* + * Acquire priv->lock before calling this function ! + */ +static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index) +{ + iwl_write_restricted(priv, HBUS_TARG_WRPTR, + (index & 0xff) | (txq_id << 8)); + iwl_write_restricted_reg(priv, SCD_QUEUE_RDPTR(txq_id), index); +} + +/* + * Acquire priv->lock before calling this function ! + */ +static void iwl4965_tx_queue_set_status(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + int tx_fifo_id, int scd_retry) +{ + int txq_id = txq->q.id; + int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0; + + iwl_write_restricted_reg(priv, SCD_QUEUE_STATUS_BITS(txq_id), + (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) | + (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) | + (scd_retry << SCD_QUEUE_STTS_REG_POS_WSL) | + (scd_retry << SCD_QUEUE_STTS_REG_POS_SCD_ACK) | + SCD_QUEUE_STTS_REG_MSK); + + txq->sched_retry = scd_retry; + + IWL_DEBUG_INFO("%s %s Queue %d on AC %d\n", + active ? "Activete" : "Deactivate", + scd_retry ? "BA" : "AC", txq_id, tx_fifo_id); +} + +static const u16 default_queue_to_tx_fifo[] = { + IWL_TX_FIFO_AC3, + IWL_TX_FIFO_AC2, + IWL_TX_FIFO_AC1, + IWL_TX_FIFO_AC0, + IWL_CMD_FIFO_NUM, + IWL_TX_FIFO_HCCA_1, + IWL_TX_FIFO_HCCA_2 +}; + +static inline void iwl4965_txq_ctx_activate(struct iwl_priv *priv, int txq_id) +{ + set_bit(txq_id, &priv->txq_ctx_active_msk); +} + +static inline void iwl4965_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id) +{ + clear_bit(txq_id, &priv->txq_ctx_active_msk); +} + +int iwl4965_alive_notify(struct iwl_priv *priv) +{ + u32 a; + int i = 0; + unsigned long flags; + int rc; + + spin_lock_irqsave(&priv->lock, flags); + +#ifdef CONFIG_IWLWIFI_SENSITIVITY + memset(&(priv->sensitivity_data), 0, + sizeof(struct iwl_sensitivity_data)); + memset(&(priv->chain_noise_data), 0, + sizeof(struct iwl_chain_noise_data)); + for (i = 0; i < NUM_RX_CHAINS; i++) + priv->chain_noise_data.delta_gain_code[i] = + CHAIN_NOISE_DELTA_GAIN_INIT_VAL; +#endif /* CONFIG_IWLWIFI_SENSITIVITY*/ + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + priv->scd_base_addr = iwl_read_restricted_reg(priv, SCD_SRAM_BASE_ADDR); + a = priv->scd_base_addr + SCD_CONTEXT_DATA_OFFSET; + for (; a < priv->scd_base_addr + SCD_TX_STTS_BITMAP_OFFSET; a += 4) + iwl_write_restricted_mem(priv, a, 0); + for (; a < priv->scd_base_addr + SCD_TRANSLATE_TBL_OFFSET; a += 4) + iwl_write_restricted_mem(priv, a, 0); + for (; a < sizeof(u16) * priv->hw_setting.max_txq_num; a += 4) + iwl_write_restricted_mem(priv, a, 0); + + iwl_write_restricted_reg(priv, SCD_DRAM_BASE_ADDR, + (priv->hw_setting.shared_phys + + offsetof(struct iwl_shared, queues_byte_cnt_tbls)) >> 10); + iwl_write_restricted_reg(priv, SCD_QUEUECHAIN_SEL, 0); + + /* initiate the queues */ + for (i = 0; i < priv->hw_setting.max_txq_num; i++) { + iwl_write_restricted_reg(priv, SCD_QUEUE_RDPTR(i), 0); + iwl_write_restricted(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); + iwl_write_restricted_mem(priv, priv->scd_base_addr + + SCD_CONTEXT_QUEUE_OFFSET(i), + (SCD_WIN_SIZE << + SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & + SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); + iwl_write_restricted_mem(priv, priv->scd_base_addr + + SCD_CONTEXT_QUEUE_OFFSET(i) + + sizeof(u32), + (SCD_FRAME_LIMIT << + SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & + SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); + + } + iwl_write_restricted_reg(priv, SCD_INTERRUPT_MASK, + (1 << priv->hw_setting.max_txq_num) - 1); + + iwl_write_restricted_reg(priv, SCD_TXFACT, + SCD_TXFACT_REG_TXFIFO_MASK(0, 7)); + + iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); + /* map qos queues to fifos one-to-one */ + for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) { + int ac = default_queue_to_tx_fifo[i]; + iwl4965_txq_ctx_activate(priv, i); + iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0); + } + + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +int iwl_hw_set_hw_setting(struct iwl_priv *priv) +{ + priv->hw_setting.shared_virt = + pci_alloc_consistent(priv->pci_dev, + sizeof(struct iwl_shared), + &priv->hw_setting.shared_phys); + + if (!priv->hw_setting.shared_virt) + return -1; + + memset(priv->hw_setting.shared_virt, 0, sizeof(struct iwl_shared)); + + priv->hw_setting.max_txq_num = iwl_param_queues_num; + priv->hw_setting.ac_queue_count = AC_NUM; + + priv->hw_setting.cck_flag = RATE_MCS_CCK_MSK; + priv->hw_setting.tx_cmd_len = sizeof(struct iwl_tx_cmd); + priv->hw_setting.max_rxq_size = RX_QUEUE_SIZE; + priv->hw_setting.max_rxq_log = RX_QUEUE_SIZE_LOG; + + priv->hw_setting.max_stations = IWL4965_STATION_COUNT; + priv->hw_setting.bcast_sta_id = IWL4965_BROADCAST_ID; + return 0; +} + +/** + * iwl_hw_txq_ctx_free - Free TXQ Context + * + * Destroy all TX DMA queues and structures + */ +void iwl_hw_txq_ctx_free(struct iwl_priv *priv) +{ + int txq_id; + + /* Tx queues */ + for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) + iwl_tx_queue_free(priv, &priv->txq[txq_id]); + + iwl4965_kw_free(priv); +} + +/** + * iwl_hw_txq_free_tfd - Free one TFD, those at index [txq->q.last_used] + * + * Does NOT advance any indexes + */ +int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0]; + struct iwl_tfd_frame *bd = &bd_tmp[txq->q.last_used]; + struct pci_dev *dev = priv->pci_dev; + int i; + int counter = 0; + int index, is_odd; + + /* classify bd */ + if (txq->q.id == IWL_CMD_QUEUE_NUM) + /* nothing to cleanup after for host commands */ + return 0; + + /* sanity check */ + counter = IWL_GET_BITS(*bd, num_tbs); + if (counter > MAX_NUM_OF_TBS) { + IWL_ERROR("Too many chunks: %i\n", counter); + /* @todo issue fatal error, it is quite serious situation */ + return 0; + } + + /* unmap chunks if any */ + + for (i = 0; i < counter; i++) { + index = i / 2; + is_odd = i & 0x1; + + if (is_odd) + pci_unmap_single( + dev, + IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) | + (IWL_GET_BITS(bd->pa[index], + tb2_addr_hi20) << 16), + IWL_GET_BITS(bd->pa[index], tb2_len), + PCI_DMA_TODEVICE); + + else if (i > 0) + pci_unmap_single(dev, + le32_to_cpu(bd->pa[index].tb1_addr), + IWL_GET_BITS(bd->pa[index], tb1_len), + PCI_DMA_TODEVICE); + + if (txq->txb[txq->q.last_used].skb[i]) { + struct sk_buff *skb = txq->txb[txq->q.last_used].skb[i]; + + dev_kfree_skb(skb); + txq->txb[txq->q.last_used].skb[i] = NULL; + } + } + return 0; +} + +int iwl_hw_reg_set_txpower(struct iwl_priv *priv, s8 power) +{ + IWL_ERROR("TODO: Implement iwl_hw_reg_set_txpower!\n"); + return -EINVAL; +} + +static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res) +{ + s32 sign = 1; + + if (num < 0) { + sign = -sign; + num = -num; + } + if (denom < 0) { + sign = -sign; + denom = -denom; + } + *res = 1; + *res = ((num * 2 + denom) / (denom * 2)) * sign; + + return 1; +} + +static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage, + s32 current_voltage) +{ + s32 comp = 0; + + if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) || + (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage)) + return 0; + + iwl4965_math_div_round(current_voltage - eeprom_voltage, + TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp); + + if (current_voltage > eeprom_voltage) + comp *= 2; + if ((comp < -2) || (comp > 2)) + comp = 0; + + return comp; +} + +static const struct iwl_channel_info * +iwl4965_get_channel_txpower_info(struct iwl_priv *priv, u8 phymode, u16 channel) +{ + const struct iwl_channel_info *ch_info; + + ch_info = iwl_get_channel_info(priv, phymode, channel); + + if (!is_channel_valid(ch_info)) + return NULL; + + return ch_info; +} + +static s32 iwl4965_get_tx_atten_grp(u16 channel) +{ + if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH && + channel <= CALIB_IWL_TX_ATTEN_GR5_LCH) + return CALIB_CH_GROUP_5; + + if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH && + channel <= CALIB_IWL_TX_ATTEN_GR1_LCH) + return CALIB_CH_GROUP_1; + + if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH && + channel <= CALIB_IWL_TX_ATTEN_GR2_LCH) + return CALIB_CH_GROUP_2; + + if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH && + channel <= CALIB_IWL_TX_ATTEN_GR3_LCH) + return CALIB_CH_GROUP_3; + + if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH && + channel <= CALIB_IWL_TX_ATTEN_GR4_LCH) + return CALIB_CH_GROUP_4; + + IWL_ERROR("Can't find txatten group for channel %d.\n", channel); + return -1; +} + +static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel) +{ + s32 b = -1; + + for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) { + if (priv->eeprom.calib_info.band_info[b].ch_from == 0) + continue; + + if ((channel >= priv->eeprom.calib_info.band_info[b].ch_from) + && (channel <= priv->eeprom.calib_info.band_info[b].ch_to)) + break; + } + + return b; +} + +static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2) +{ + s32 val; + + if (x2 == x1) + return y1; + else { + iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val); + return val + y2; + } +} + +static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel, + struct iwl_eeprom_calib_ch_info *chan_info) +{ + s32 s = -1; + u32 c; + u32 m; + const struct iwl_eeprom_calib_measure *m1; + const struct iwl_eeprom_calib_measure *m2; + struct iwl_eeprom_calib_measure *omeas; + u32 ch_i1; + u32 ch_i2; + + s = iwl4965_get_sub_band(priv, channel); + if (s >= EEPROM_TX_POWER_BANDS) { + IWL_ERROR("Tx Power can not find channel %d ", channel); + return -1; + } + + ch_i1 = priv->eeprom.calib_info.band_info[s].ch1.ch_num; + ch_i2 = priv->eeprom.calib_info.band_info[s].ch2.ch_num; + chan_info->ch_num = (u8) channel; + + IWL_DEBUG_TXPOWER("channel %d subband %d factory cal ch %d & %d\n", + channel, s, ch_i1, ch_i2); + + for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) { + for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) { + m1 = &(priv->eeprom.calib_info.band_info[s].ch1. + measurements[c][m]); + m2 = &(priv->eeprom.calib_info.band_info[s].ch2. + measurements[c][m]); + omeas = &(chan_info->measurements[c][m]); + + omeas->actual_pow = + (u8) iwl4965_interpolate_value(channel, ch_i1, + m1->actual_pow, + ch_i2, + m2->actual_pow); + omeas->gain_idx = + (u8) iwl4965_interpolate_value(channel, ch_i1, + m1->gain_idx, ch_i2, + m2->gain_idx); + omeas->temperature = + (u8) iwl4965_interpolate_value(channel, ch_i1, + m1->temperature, + ch_i2, + m2->temperature); + omeas->pa_det = + (s8) iwl4965_interpolate_value(channel, ch_i1, + m1->pa_det, ch_i2, + m2->pa_det); + + IWL_DEBUG_TXPOWER + ("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m, + m1->actual_pow, m2->actual_pow, omeas->actual_pow); + IWL_DEBUG_TXPOWER + ("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m, + m1->gain_idx, m2->gain_idx, omeas->gain_idx); + IWL_DEBUG_TXPOWER + ("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m, + m1->pa_det, m2->pa_det, omeas->pa_det); + IWL_DEBUG_TXPOWER + ("chain %d meas %d T1=%d T2=%d T=%d\n", c, m, + m1->temperature, m2->temperature, + omeas->temperature); + } + } + + return 0; +} + +/* bit-rate-dependent table to prevent Tx distortion, in half-dB units, + * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */ +static s32 back_off_table[] = { + 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */ + 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */ + 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */ + 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */ + 10 /* CCK */ +}; + +/* Thermal compensation values for txpower for various frequency ranges ... + * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */ +static struct iwl_txpower_comp_entry { + s32 degrees_per_05db_a; + s32 degrees_per_05db_a_denom; +} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = { + {9, 2}, /* group 0 5.2, ch 34-43 */ + {4, 1}, /* group 1 5.2, ch 44-70 */ + {4, 1}, /* group 2 5.2, ch 71-124 */ + {4, 1}, /* group 3 5.2, ch 125-200 */ + {3, 1} /* group 4 2.4, ch all */ +}; + +static s32 get_min_power_index(s32 rate_power_index, u32 band) +{ + if (!band) { + if ((rate_power_index & 7) <= 4) + return MIN_TX_GAIN_INDEX_52GHZ_EXT; + } + return MIN_TX_GAIN_INDEX; +} + +struct gain_entry { + u8 dsp; + u8 radio; +}; + +static const struct gain_entry gain_table[2][108] = { + /* 5.2GHz power gain index table */ + { + {123, 0x3F}, /* highest txpower */ + {117, 0x3F}, + {110, 0x3F}, + {104, 0x3F}, + {98, 0x3F}, + {110, 0x3E}, + {104, 0x3E}, + {98, 0x3E}, + {110, 0x3D}, + {104, 0x3D}, + {98, 0x3D}, + {110, 0x3C}, + {104, 0x3C}, + {98, 0x3C}, + {110, 0x3B}, + {104, 0x3B}, + {98, 0x3B}, + {110, 0x3A}, + {104, 0x3A}, + {98, 0x3A}, + {110, 0x39}, + {104, 0x39}, + {98, 0x39}, + {110, 0x38}, + {104, 0x38}, + {98, 0x38}, + {110, 0x37}, + {104, 0x37}, + {98, 0x37}, + {110, 0x36}, + {104, 0x36}, + {98, 0x36}, + {110, 0x35}, + {104, 0x35}, + {98, 0x35}, + {110, 0x34}, + {104, 0x34}, + {98, 0x34}, + {110, 0x33}, + {104, 0x33}, + {98, 0x33}, + {110, 0x32}, + {104, 0x32}, + {98, 0x32}, + {110, 0x31}, + {104, 0x31}, + {98, 0x31}, + {110, 0x30}, + {104, 0x30}, + {98, 0x30}, + {110, 0x25}, + {104, 0x25}, + {98, 0x25}, + {110, 0x24}, + {104, 0x24}, + {98, 0x24}, + {110, 0x23}, + {104, 0x23}, + {98, 0x23}, + {110, 0x22}, + {104, 0x18}, + {98, 0x18}, + {110, 0x17}, + {104, 0x17}, + {98, 0x17}, + {110, 0x16}, + {104, 0x16}, + {98, 0x16}, + {110, 0x15}, + {104, 0x15}, + {98, 0x15}, + {110, 0x14}, + {104, 0x14}, + {98, 0x14}, + {110, 0x13}, + {104, 0x13}, + {98, 0x13}, + {110, 0x12}, + {104, 0x08}, + {98, 0x08}, + {110, 0x07}, + {104, 0x07}, + {98, 0x07}, + {110, 0x06}, + {104, 0x06}, + {98, 0x06}, + {110, 0x05}, + {104, 0x05}, + {98, 0x05}, + {110, 0x04}, + {104, 0x04}, + {98, 0x04}, + {110, 0x03}, + {104, 0x03}, + {98, 0x03}, + {110, 0x02}, + {104, 0x02}, + {98, 0x02}, + {110, 0x01}, + {104, 0x01}, + {98, 0x01}, + {110, 0x00}, + {104, 0x00}, + {98, 0x00}, + {93, 0x00}, + {88, 0x00}, + {83, 0x00}, + {78, 0x00}, + }, + /* 2.4GHz power gain index table */ + { + {110, 0x3f}, /* highest txpower */ + {104, 0x3f}, + {98, 0x3f}, + {110, 0x3e}, + {104, 0x3e}, + {98, 0x3e}, + {110, 0x3d}, + {104, 0x3d}, + {98, 0x3d}, + {110, 0x3c}, + {104, 0x3c}, + {98, 0x3c}, + {110, 0x3b}, + {104, 0x3b}, + {98, 0x3b}, + {110, 0x3a}, + {104, 0x3a}, + {98, 0x3a}, + {110, 0x39}, + {104, 0x39}, + {98, 0x39}, + {110, 0x38}, + {104, 0x38}, + {98, 0x38}, + {110, 0x37}, + {104, 0x37}, + {98, 0x37}, + {110, 0x36}, + {104, 0x36}, + {98, 0x36}, + {110, 0x35}, + {104, 0x35}, + {98, 0x35}, + {110, 0x34}, + {104, 0x34}, + {98, 0x34}, + {110, 0x33}, + {104, 0x33}, + {98, 0x33}, + {110, 0x32}, + {104, 0x32}, + {98, 0x32}, + {110, 0x31}, + {104, 0x31}, + {98, 0x31}, + {110, 0x30}, + {104, 0x30}, + {98, 0x30}, + {110, 0x6}, + {104, 0x6}, + {98, 0x6}, + {110, 0x5}, + {104, 0x5}, + {98, 0x5}, + {110, 0x4}, + {104, 0x4}, + {98, 0x4}, + {110, 0x3}, + {104, 0x3}, + {98, 0x3}, + {110, 0x2}, + {104, 0x2}, + {98, 0x2}, + {110, 0x1}, + {104, 0x1}, + {98, 0x1}, + {110, 0x0}, + {104, 0x0}, + {98, 0x0}, + {97, 0}, + {96, 0}, + {95, 0}, + {94, 0}, + {93, 0}, + {92, 0}, + {91, 0}, + {90, 0}, + {89, 0}, + {88, 0}, + {87, 0}, + {86, 0}, + {85, 0}, + {84, 0}, + {83, 0}, + {82, 0}, + {81, 0}, + {80, 0}, + {79, 0}, + {78, 0}, + {77, 0}, + {76, 0}, + {75, 0}, + {74, 0}, + {73, 0}, + {72, 0}, + {71, 0}, + {70, 0}, + {69, 0}, + {68, 0}, + {67, 0}, + {66, 0}, + {65, 0}, + {64, 0}, + {63, 0}, + {62, 0}, + {61, 0}, + {60, 0}, + {59, 0}, + } +}; + +static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel, + u8 is_fat, u8 ctrl_chan_high, + struct iwl_tx_power_db *tx_power_tbl) +{ + u8 saturation_power; + s32 target_power; + s32 user_target_power; + s32 power_limit; + s32 current_temp; + s32 reg_limit; + s32 current_regulatory; + s32 txatten_grp = CALIB_CH_GROUP_MAX; + int i; + int c; + const struct iwl_channel_info *ch_info = NULL; + struct iwl_eeprom_calib_ch_info ch_eeprom_info; + const struct iwl_eeprom_calib_measure *measurement; + s16 voltage; + s32 init_voltage; + s32 voltage_compensation; + s32 degrees_per_05db_num; + s32 degrees_per_05db_denom; + s32 factory_temp; + s32 temperature_comp[2]; + s32 factory_gain_index[2]; + s32 factory_actual_pwr[2]; + s32 power_index; + + /* Sanity check requested level (dBm) */ + if (priv->user_txpower_limit < IWL_TX_POWER_TARGET_POWER_MIN) { + IWL_WARNING("Requested user TXPOWER %d below limit.\n", + priv->user_txpower_limit); + return -EINVAL; + } + if (priv->user_txpower_limit > IWL_TX_POWER_TARGET_POWER_MAX) { + IWL_WARNING("Requested user TXPOWER %d above limit.\n", + priv->user_txpower_limit); + return -EINVAL; + } + + /* user_txpower_limit is in dBm, convert to half-dBm (half-dB units + * are used for indexing into txpower table) */ + user_target_power = 2 * priv->user_txpower_limit; + + /* Get current (RXON) channel, band, width */ + ch_info = + iwl4965_get_channel_txpower_info(priv, priv->phymode, channel); + + IWL_DEBUG_TXPOWER("chan %d band %d is_fat %d\n", channel, band, + is_fat); + + if (!ch_info) + return -EINVAL; + + /* get txatten group, used to select 1) thermal txpower adjustment + * and 2) mimo txpower balance between Tx chains. */ + txatten_grp = iwl4965_get_tx_atten_grp(channel); + if (txatten_grp < 0) + return -EINVAL; + + IWL_DEBUG_TXPOWER("channel %d belongs to txatten group %d\n", + channel, txatten_grp); + + if (is_fat) { + if (ctrl_chan_high) + channel -= 2; + else + channel += 2; + } + + /* hardware txpower limits ... + * saturation (clipping distortion) txpowers are in half-dBm */ + if (band) + saturation_power = priv->eeprom.calib_info.saturation_power24; + else + saturation_power = priv->eeprom.calib_info.saturation_power52; + + if (saturation_power < IWL_TX_POWER_SATURATION_MIN || + saturation_power > IWL_TX_POWER_SATURATION_MAX) { + if (band) + saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24; + else + saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52; + } + + /* regulatory txpower limits ... reg_limit values are in half-dBm, + * max_power_avg values are in dBm, convert * 2 */ + if (is_fat) + reg_limit = ch_info->fat_max_power_avg * 2; + else + reg_limit = ch_info->max_power_avg * 2; + + if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) || + (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) { + if (band) + reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24; + else + reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52; + } + + /* Interpolate txpower calibration values for this channel, + * based on factory calibration tests on spaced channels. */ + iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info); + + /* calculate tx gain adjustment based on power supply voltage */ + voltage = priv->eeprom.calib_info.voltage; + init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage); + voltage_compensation = + iwl4965_get_voltage_compensation(voltage, init_voltage); + + IWL_DEBUG_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n", + init_voltage, + voltage, voltage_compensation); + + /* get current temperature (Celsius) */ + current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN); + current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX); + current_temp = KELVIN_TO_CELSIUS(current_temp); + + /* select thermal txpower adjustment params, based on channel group + * (same frequency group used for mimo txatten adjustment) */ + degrees_per_05db_num = + tx_power_cmp_tble[txatten_grp].degrees_per_05db_a; + degrees_per_05db_denom = + tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom; + + /* get per-chain txpower values from factory measurements */ + for (c = 0; c < 2; c++) { + measurement = &ch_eeprom_info.measurements[c][1]; + + /* txgain adjustment (in half-dB steps) based on difference + * between factory and current temperature */ + factory_temp = measurement->temperature; + iwl4965_math_div_round((current_temp - factory_temp) * + degrees_per_05db_denom, + degrees_per_05db_num, + &temperature_comp[c]); + + factory_gain_index[c] = measurement->gain_idx; + factory_actual_pwr[c] = measurement->actual_pow; + + IWL_DEBUG_TXPOWER("chain = %d\n", c); + IWL_DEBUG_TXPOWER("fctry tmp %d, " + "curr tmp %d, comp %d steps\n", + factory_temp, current_temp, + temperature_comp[c]); + + IWL_DEBUG_TXPOWER("fctry idx %d, fctry pwr %d\n", + factory_gain_index[c], + factory_actual_pwr[c]); + } + + /* for each of 33 bit-rates (including 1 for CCK) */ + for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) { + u8 is_mimo_rate; + union iwl_tx_power_dual_stream tx_power; + + /* for mimo, reduce each chain's txpower by half + * (3dB, 6 steps), so total output power is regulatory + * compliant. */ + if (i & 0x8) { + current_regulatory = reg_limit - + IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION; + is_mimo_rate = 1; + } else { + current_regulatory = reg_limit; + is_mimo_rate = 0; + } + + /* find txpower limit, either hardware or regulatory */ + power_limit = saturation_power - back_off_table[i]; + if (power_limit > current_regulatory) + power_limit = current_regulatory; + + /* reduce user's txpower request if necessary + * for this rate on this channel */ + target_power = user_target_power; + if (target_power > power_limit) + target_power = power_limit; + + IWL_DEBUG_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n", + i, saturation_power - back_off_table[i], + current_regulatory, user_target_power, + target_power); + + /* for each of 2 Tx chains (radio transmitters) */ + for (c = 0; c < 2; c++) { + s32 atten_value; + + if (is_mimo_rate) + atten_value = + (s32)le32_to_cpu(priv->card_alive_init. + tx_atten[txatten_grp][c]); + else + atten_value = 0; + + /* calculate index; higher index means lower txpower */ + power_index = (u8) (factory_gain_index[c] - + (target_power - + factory_actual_pwr[c]) - + temperature_comp[c] - + voltage_compensation + + atten_value); + +/* IWL_DEBUG_TXPOWER("calculated txpower index %d\n", + power_index); */ + + if (power_index < get_min_power_index(i, band)) + power_index = get_min_power_index(i, band); + + /* adjust 5 GHz index to support negative indexes */ + if (!band) + power_index += 9; + + /* CCK, rate 32, reduce txpower for CCK */ + if (i == POWER_TABLE_CCK_ENTRY) + power_index += + IWL_TX_POWER_CCK_COMPENSATION_C_STEP; + + /* stay within the table! */ + if (power_index > 107) { + IWL_WARNING("txpower index %d > 107\n", + power_index); + power_index = 107; + } + if (power_index < 0) { + IWL_WARNING("txpower index %d < 0\n", + power_index); + power_index = 0; + } + + /* fill txpower command for this rate/chain */ + tx_power.s.radio_tx_gain[c] = + gain_table[band][power_index].radio; + tx_power.s.dsp_predis_atten[c] = + gain_table[band][power_index].dsp; + + IWL_DEBUG_TXPOWER("chain %d mimo %d index %d " + "gain 0x%02x dsp %d\n", + c, atten_value, power_index, + tx_power.s.radio_tx_gain[c], + tx_power.s.dsp_predis_atten[c]); + }/* for each chain */ + + tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw); + + }/* for each rate */ + + return 0; +} + +/** + * iwl_hw_reg_send_txpower - Configure the TXPOWER level user limit + * + * Uses the active RXON for channel, band, and characteristics (fat, high) + * The power limit is taken from priv->user_txpower_limit. + */ +int iwl_hw_reg_send_txpower(struct iwl_priv *priv) +{ + struct iwl_txpowertable_cmd cmd = { 0 }; + int rc = 0; + u8 band = 0; + u8 is_fat = 0; + u8 ctrl_chan_high = 0; + + if (test_bit(STATUS_SCANNING, &priv->status)) { + /* If this gets hit a lot, switch it to a BUG() and catch + * the stack trace to find out who is calling this during + * a scan. */ + IWL_WARNING("TX Power requested while scanning!\n"); + return -EAGAIN; + } + + band = ((priv->phymode == MODE_IEEE80211B) || + (priv->phymode == MODE_IEEE80211G)); + + is_fat = is_fat_channel(priv->active_rxon.flags); + + if (is_fat && + (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) + ctrl_chan_high = 1; + + cmd.band = band; + cmd.channel = priv->active_rxon.channel; + + rc = iwl4965_fill_txpower_tbl(priv, band, + le16_to_cpu(priv->active_rxon.channel), + is_fat, ctrl_chan_high, &cmd.tx_power); + if (rc) + return rc; + + rc = iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd); + return rc; +} + +int iwl_hw_channel_switch(struct iwl_priv *priv, u16 channel) +{ + int rc; + u8 band = 0; + u8 is_fat = 0; + u8 ctrl_chan_high = 0; + struct iwl_channel_switch_cmd cmd = { 0 }; + const struct iwl_channel_info *ch_info; + + band = ((priv->phymode == MODE_IEEE80211B) || + (priv->phymode == MODE_IEEE80211G)); + + ch_info = iwl_get_channel_info(priv, priv->phymode, channel); + + is_fat = is_fat_channel(priv->staging_rxon.flags); + + if (is_fat && + (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) + ctrl_chan_high = 1; + + cmd.band = band; + cmd.expect_beacon = 0; + cmd.channel = cpu_to_le16(channel); + cmd.rxon_flags = priv->active_rxon.flags; + cmd.rxon_filter_flags = priv->active_rxon.filter_flags; + cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); + if (ch_info) + cmd.expect_beacon = is_channel_radar(ch_info); + else + cmd.expect_beacon = 1; + + rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_fat, + ctrl_chan_high, &cmd.tx_power); + if (rc) { + IWL_DEBUG_11H("error:%d fill txpower_tbl\n", rc); + return rc; + } + + rc = iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd); + return rc; +} + +#define RTS_HCCA_RETRY_LIMIT 3 +#define RTS_DFAULT_RETRY_LIMIT 60 + +void iwl_hw_build_tx_cmd_rate(struct iwl_priv *priv, + struct iwl_cmd *cmd, + struct ieee80211_tx_control *ctrl, + struct ieee80211_hdr *hdr, int sta_id, + int is_hcca) +{ + u8 rate; + u8 rts_retry_limit = 0; + u8 data_retry_limit = 0; + __le32 tx_flags; + u16 fc = le16_to_cpu(hdr->frame_control); + + tx_flags = cmd->cmd.tx.tx_flags; + + rate = iwl_rates[ctrl->tx_rate].plcp; + + rts_retry_limit = (is_hcca) ? + RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT; + + if (ieee80211_is_probe_response(fc)) { + data_retry_limit = 3; + if (data_retry_limit < rts_retry_limit) + rts_retry_limit = data_retry_limit; + } else + data_retry_limit = IWL_DEFAULT_TX_RETRY; + + if (priv->data_retry_limit != -1) + data_retry_limit = priv->data_retry_limit; + + if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) { + switch (fc & IEEE80211_FCTL_STYPE) { + case IEEE80211_STYPE_AUTH: + case IEEE80211_STYPE_DEAUTH: + case IEEE80211_STYPE_ASSOC_REQ: + case IEEE80211_STYPE_REASSOC_REQ: + if (tx_flags & TX_CMD_FLG_RTS_MSK) { + tx_flags &= ~TX_CMD_FLG_RTS_MSK; + tx_flags |= TX_CMD_FLG_CTS_MSK; + } + break; + default: + break; + } + } + + cmd->cmd.tx.rts_retry_limit = rts_retry_limit; + cmd->cmd.tx.data_retry_limit = data_retry_limit; + cmd->cmd.tx.rate_n_flags = iwl_hw_set_rate_n_flags(rate, 0); + cmd->cmd.tx.tx_flags = tx_flags; +} + +int iwl_hw_get_rx_read(struct iwl_priv *priv) +{ + struct iwl_shared *shared_data = priv->hw_setting.shared_virt; + + return IWL_GET_BITS(*shared_data, rb_closed_stts_rb_num); +} + +int iwl_hw_get_temperature(struct iwl_priv *priv) +{ + return priv->temperature; +} + +unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv, + struct iwl_frame *frame, u8 rate) +{ + struct iwl_tx_beacon_cmd *tx_beacon_cmd; + unsigned int frame_size; + + tx_beacon_cmd = &frame->u.beacon; + memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); + + tx_beacon_cmd->tx.sta_id = IWL4965_BROADCAST_ID; + tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + + frame_size = iwl_fill_beacon_frame(priv, + tx_beacon_cmd->frame, + BROADCAST_ADDR, + sizeof(frame->u) - sizeof(*tx_beacon_cmd)); + + BUG_ON(frame_size > MAX_MPDU_SIZE); + tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); + + if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP)) + tx_beacon_cmd->tx.rate_n_flags = + iwl_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK); + else + tx_beacon_cmd->tx.rate_n_flags = + iwl_hw_set_rate_n_flags(rate, 0); + + tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK | + TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK); + return (sizeof(*tx_beacon_cmd) + frame_size); +} + +int iwl_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + int rc; + unsigned long flags; + int txq_id = txq->q.id; + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + iwl_write_restricted(priv, FH_MEM_CBBC_QUEUE(txq_id), + txq->q.dma_addr >> 8); + iwl_write_restricted( + priv, IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), + IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | + IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL); + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static inline u8 iwl4965_get_dma_hi_address(dma_addr_t addr) +{ + return sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0; +} + +int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr, + dma_addr_t addr, u16 len) +{ + int index, is_odd; + struct iwl_tfd_frame *tfd = ptr; + u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs); + + if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) { + IWL_ERROR("Error can not send more than %d chunks\n", + MAX_NUM_OF_TBS); + return -EINVAL; + } + + index = num_tbs / 2; + is_odd = num_tbs & 0x1; + + if (!is_odd) { + tfd->pa[index].tb1_addr = cpu_to_le32(addr); + IWL_SET_BITS(tfd->pa[index], tb1_addr_hi, + iwl4965_get_dma_hi_address(addr)); + IWL_SET_BITS(tfd->pa[index], tb1_len, len); + } else { + IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16, + (u32) (addr & 0xffff)); + IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16); + IWL_SET_BITS(tfd->pa[index], tb2_len, len); + } + + IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1); + + return 0; +} + +void iwl_hw_card_show_info(struct iwl_priv *priv) +{ + u16 hw_version = priv->eeprom.board_revision_4965; + + IWL_DEBUG_INFO("4965ABGN HW Version %u.%u.%u\n", + ((hw_version >> 8) & 0x0F), + ((hw_version >> 8) >> 4), (hw_version & 0x00FF)); + + IWL_DEBUG_INFO("4965ABGN PBA Number %.16s\n", + priv->eeprom.board_pba_number_4965); +} + +#define IWL_TX_CRC_SIZE 4 +#define IWL_TX_DELIMITER_SIZE 4 + +int iwl4965_tx_queue_update_wr_ptr(struct iwl_priv *priv, + struct iwl_tx_queue *txq, u16 byte_cnt) +{ + int len; + int txq_id = txq->q.id; + struct iwl_shared *shared_data = priv->hw_setting.shared_virt; + + if (txq->need_update == 0) + return 0; + + len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; + + IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id]. + tfd_offset[txq->q.first_empty], byte_cnt, len); + + if (txq->q.first_empty < IWL4965_MAX_WIN_SIZE) + IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id]. + tfd_offset[IWL4965_QUEUE_SIZE + txq->q.first_empty], + byte_cnt, len); + + return 0; +} + +/* Set up Rx receiver/antenna/chain usage in "staging" RXON image. + * This should not be used for scan command ... it puts data in wrong place. */ +void iwl4965_set_rxon_chain(struct iwl_priv *priv) +{ + u8 is_single = is_single_stream(priv); + u8 idle_state, rx_state; + + priv->staging_rxon.rx_chain = 0; + rx_state = idle_state = 3; + + /* Tell uCode which antennas are actually connected. + * Before first association, we assume all antennas are connected. + * Just after first association, iwl4965_noise_calibration() + * checks which antennas actually *are* connected. */ + priv->staging_rxon.rx_chain |= + cpu_to_le16(priv->valid_antenna << RXON_RX_CHAIN_VALID_POS); + + /* How many receivers should we use? */ + iwl4965_get_rx_chain_counter(priv, &idle_state, &rx_state); + priv->staging_rxon.rx_chain |= + cpu_to_le16(rx_state << RXON_RX_CHAIN_MIMO_CNT_POS); + priv->staging_rxon.rx_chain |= + cpu_to_le16(idle_state << RXON_RX_CHAIN_CNT_POS); + + if (!is_single && (rx_state >= 2) && + !test_bit(STATUS_POWER_PMI, &priv->status)) + priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK; + else + priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK; + + IWL_DEBUG_ASSOC("rx chain %X\n", priv->staging_rxon.rx_chain); +} + +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG +/* + get the traffic load value for tid +*/ +static u32 iwl4965_tl_get_load(struct iwl_priv *priv, u8 tid) +{ + u32 load = 0; + u32 current_time = jiffies_to_msecs(jiffies); + u32 time_diff; + s32 index; + unsigned long flags; + struct iwl_traffic_load *tid_ptr = NULL; + + if (tid >= TID_MAX_LOAD_COUNT) + return 0; + + tid_ptr = &(priv->lq_mngr.agg_ctrl.traffic_load[tid]); + + current_time -= current_time % TID_ROUND_VALUE; + + spin_lock_irqsave(&priv->lq_mngr.lock, flags); + if (!(tid_ptr->queue_count)) + goto out; + + time_diff = TIME_WRAP_AROUND(tid_ptr->time_stamp, current_time); + index = time_diff / TID_QUEUE_CELL_SPACING; + + if (index >= TID_QUEUE_MAX_SIZE) { + u32 oldest_time = current_time - TID_MAX_TIME_DIFF; + + while (tid_ptr->queue_count && + (tid_ptr->time_stamp < oldest_time)) { + tid_ptr->total -= tid_ptr->packet_count[tid_ptr->head]; + tid_ptr->packet_count[tid_ptr->head] = 0; + tid_ptr->time_stamp += TID_QUEUE_CELL_SPACING; + tid_ptr->queue_count--; + tid_ptr->head++; + if (tid_ptr->head >= TID_QUEUE_MAX_SIZE) + tid_ptr->head = 0; + } + } + load = tid_ptr->total; + + out: + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); + return load; +} + +/* + increment traffic load value for tid and also remove + any old values if passed the certian time period +*/ +static void iwl4965_tl_add_packet(struct iwl_priv *priv, u8 tid) +{ + u32 current_time = jiffies_to_msecs(jiffies); + u32 time_diff; + s32 index; + unsigned long flags; + struct iwl_traffic_load *tid_ptr = NULL; + + if (tid >= TID_MAX_LOAD_COUNT) + return; + + tid_ptr = &(priv->lq_mngr.agg_ctrl.traffic_load[tid]); + + current_time -= current_time % TID_ROUND_VALUE; + + spin_lock_irqsave(&priv->lq_mngr.lock, flags); + if (!(tid_ptr->queue_count)) { + tid_ptr->total = 1; + tid_ptr->time_stamp = current_time; + tid_ptr->queue_count = 1; + tid_ptr->head = 0; + tid_ptr->packet_count[0] = 1; + goto out; + } + + time_diff = TIME_WRAP_AROUND(tid_ptr->time_stamp, current_time); + index = time_diff / TID_QUEUE_CELL_SPACING; + + if (index >= TID_QUEUE_MAX_SIZE) { + u32 oldest_time = current_time - TID_MAX_TIME_DIFF; + + while (tid_ptr->queue_count && + (tid_ptr->time_stamp < oldest_time)) { + tid_ptr->total -= tid_ptr->packet_count[tid_ptr->head]; + tid_ptr->packet_count[tid_ptr->head] = 0; + tid_ptr->time_stamp += TID_QUEUE_CELL_SPACING; + tid_ptr->queue_count--; + tid_ptr->head++; + if (tid_ptr->head >= TID_QUEUE_MAX_SIZE) + tid_ptr->head = 0; + } + } + + index = (tid_ptr->head + index) % TID_QUEUE_MAX_SIZE; + tid_ptr->packet_count[index] = tid_ptr->packet_count[index] + 1; + tid_ptr->total = tid_ptr->total + 1; + + if ((index + 1) > tid_ptr->queue_count) + tid_ptr->queue_count = index + 1; + out: + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); + +} + +#define MMAC_SCHED_MAX_NUMBER_OF_HT_BACK_FLOWS 7 +enum HT_STATUS { + BA_STATUS_FAILURE = 0, + BA_STATUS_INITIATOR_DELBA, + BA_STATUS_RECIPIENT_DELBA, + BA_STATUS_RENEW_ADDBA_REQUEST, + BA_STATUS_ACTIVE, +}; + +static u8 iwl4964_tl_ba_avail(struct iwl_priv *priv) +{ + int i; + struct iwl_lq_mngr *lq; + u8 count = 0; + u16 msk; + + lq = (struct iwl_lq_mngr *)&(priv->lq_mngr); + for (i = 0; i < TID_MAX_LOAD_COUNT ; i++) { + msk = 1 << i; + if ((lq->agg_ctrl.granted_ba & msk) || + (lq->agg_ctrl.wait_for_agg_status & msk)) + count++; + } + + if (count < MMAC_SCHED_MAX_NUMBER_OF_HT_BACK_FLOWS) + return 1; + + return 0; +} + +static void iwl4965_ba_status(struct iwl_priv *priv, + u8 tid, enum HT_STATUS status); + +static int iwl4965_perform_addba(struct iwl_priv *priv, u8 tid, u32 length, + u32 ba_timeout) +{ + int rc; + + rc = ieee80211_start_BA_session(priv->hw, priv->bssid, tid); + if (rc) + iwl4965_ba_status(priv, tid, BA_STATUS_FAILURE); + + return rc; +} + +static int iwl4965_perform_delba(struct iwl_priv *priv, u8 tid) +{ + int rc; + + rc = ieee80211_stop_BA_session(priv->hw, priv->bssid, tid); + if (rc) + iwl4965_ba_status(priv, tid, BA_STATUS_FAILURE); + + return rc; +} + +static void iwl4965_turn_on_agg_for_tid(struct iwl_priv *priv, + struct iwl_lq_mngr *lq, + u8 auto_agg, u8 tid) +{ + u32 tid_msk = (1 << tid); + unsigned long flags; + + spin_lock_irqsave(&priv->lq_mngr.lock, flags); +/* + if ((auto_agg) && (!lq->enable_counter)){ + lq->agg_ctrl.next_retry = 0; + lq->agg_ctrl.tid_retry = 0; + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); + return; + } +*/ + if (!(lq->agg_ctrl.granted_ba & tid_msk) && + (lq->agg_ctrl.requested_ba & tid_msk)) { + u8 available_queues; + u32 load; + + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); + available_queues = iwl4964_tl_ba_avail(priv); + load = iwl4965_tl_get_load(priv, tid); + + spin_lock_irqsave(&priv->lq_mngr.lock, flags); + if (!available_queues) { + if (auto_agg) + lq->agg_ctrl.tid_retry |= tid_msk; + else { + lq->agg_ctrl.requested_ba &= ~tid_msk; + lq->agg_ctrl.wait_for_agg_status &= ~tid_msk; + } + } else if ((auto_agg) && + ((load <= lq->agg_ctrl.tid_traffic_load_threshold) || + ((lq->agg_ctrl.wait_for_agg_status & tid_msk)))) + lq->agg_ctrl.tid_retry |= tid_msk; + else { + lq->agg_ctrl.wait_for_agg_status |= tid_msk; + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); + iwl4965_perform_addba(priv, tid, 0x40, + lq->agg_ctrl.ba_timeout); + spin_lock_irqsave(&priv->lq_mngr.lock, flags); + } + } + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); +} + +static void iwl4965_turn_on_agg(struct iwl_priv *priv, u8 tid) +{ + struct iwl_lq_mngr *lq; + unsigned long flags; + + lq = (struct iwl_lq_mngr *)&(priv->lq_mngr); + + if ((tid < TID_MAX_LOAD_COUNT)) + iwl4965_turn_on_agg_for_tid(priv, lq, lq->agg_ctrl.auto_agg, + tid); + else if (tid == TID_ALL_SPECIFIED) { + if (lq->agg_ctrl.requested_ba) { + for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++) + iwl4965_turn_on_agg_for_tid(priv, lq, + lq->agg_ctrl.auto_agg, tid); + } else { + spin_lock_irqsave(&priv->lq_mngr.lock, flags); + lq->agg_ctrl.tid_retry = 0; + lq->agg_ctrl.next_retry = 0; + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); + } + } + +} + +void iwl4965_turn_off_agg(struct iwl_priv *priv, u8 tid) +{ + u32 tid_msk; + struct iwl_lq_mngr *lq; + unsigned long flags; + + lq = (struct iwl_lq_mngr *)&(priv->lq_mngr); + + if ((tid < TID_MAX_LOAD_COUNT)) { + tid_msk = 1 << tid; + spin_lock_irqsave(&priv->lq_mngr.lock, flags); + lq->agg_ctrl.wait_for_agg_status |= tid_msk; + lq->agg_ctrl.requested_ba &= ~tid_msk; + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); + iwl4965_perform_delba(priv, tid); + } else if (tid == TID_ALL_SPECIFIED) { + spin_lock_irqsave(&priv->lq_mngr.lock, flags); + for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++) { + tid_msk = 1 << tid; + lq->agg_ctrl.wait_for_agg_status |= tid_msk; + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); + iwl4965_perform_delba(priv, tid); + spin_lock_irqsave(&priv->lq_mngr.lock, flags); + } + lq->agg_ctrl.requested_ba = 0; + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); + } +} + +static void iwl4965_ba_status(struct iwl_priv *priv, + u8 tid, enum HT_STATUS status) +{ + struct iwl_lq_mngr *lq; + u32 tid_msk = (1 << tid); + unsigned long flags; + + lq = (struct iwl_lq_mngr *)&(priv->lq_mngr); + + if ((tid >= TID_MAX_LOAD_COUNT)) + goto out; + + spin_lock_irqsave(&priv->lq_mngr.lock, flags); + switch (status) { + case BA_STATUS_ACTIVE: + if (!(lq->agg_ctrl.granted_ba & tid_msk)) + lq->agg_ctrl.granted_ba |= tid_msk; + break; + default: + if ((lq->agg_ctrl.granted_ba & tid_msk)) + lq->agg_ctrl.granted_ba &= ~tid_msk; + break; + } + + lq->agg_ctrl.wait_for_agg_status &= ~tid_msk; + if (status != BA_STATUS_ACTIVE) { + if (lq->agg_ctrl.auto_agg) { + lq->agg_ctrl.tid_retry |= tid_msk; + lq->agg_ctrl.next_retry = + jiffies + msecs_to_jiffies(500); + } else + lq->agg_ctrl.requested_ba &= ~tid_msk; + } + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); + out: + return; +} + +static void iwl4965_bg_agg_work(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + agg_work); + + u32 tid; + u32 retry_tid; + u32 tid_msk; + unsigned long flags; + struct iwl_lq_mngr *lq = (struct iwl_lq_mngr *)&(priv->lq_mngr); + + spin_lock_irqsave(&priv->lq_mngr.lock, flags); + retry_tid = lq->agg_ctrl.tid_retry; + lq->agg_ctrl.tid_retry = 0; + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); + + if (retry_tid == TID_ALL_SPECIFIED) + iwl4965_turn_on_agg(priv, TID_ALL_SPECIFIED); + else { + for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++) { + tid_msk = (1 << tid); + if (retry_tid & tid_msk) + iwl4965_turn_on_agg(priv, tid); + } + } + + spin_lock_irqsave(&priv->lq_mngr.lock, flags); + if (lq->agg_ctrl.tid_retry) + lq->agg_ctrl.next_retry = jiffies + msecs_to_jiffies(500); + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); + return; +} +#endif /*CONFIG_IWLWIFI_HT_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ + +int iwl4965_tx_cmd(struct iwl_priv *priv, struct iwl_cmd *out_cmd, + u8 sta_id, dma_addr_t txcmd_phys, + struct ieee80211_hdr *hdr, u8 hdr_len, + struct ieee80211_tx_control *ctrl, void *sta_in) +{ + struct iwl_tx_cmd cmd; + struct iwl_tx_cmd *tx = (struct iwl_tx_cmd *)&out_cmd->cmd.payload[0]; + dma_addr_t scratch_phys; + u8 unicast = 0; + u8 is_data = 1; + u16 fc; + u16 rate_flags; + int rate_index = min(ctrl->tx_rate & 0xffff, IWL_RATE_COUNT - 1); +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG + __le16 *qc; +#endif /*CONFIG_IWLWIFI_HT_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ + + unicast = !is_multicast_ether_addr(hdr->addr1); + + fc = le16_to_cpu(hdr->frame_control); + if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) + is_data = 0; + + memcpy(&cmd, &(out_cmd->cmd.tx), sizeof(struct iwl_tx_cmd)); + memset(tx, 0, sizeof(struct iwl_tx_cmd)); + memcpy(tx->hdr, hdr, hdr_len); + + tx->len = cmd.len; + tx->driver_txop = cmd.driver_txop; + tx->stop_time.life_time = cmd.stop_time.life_time; + tx->tx_flags = cmd.tx_flags; + tx->sta_id = cmd.sta_id; + tx->tid_tspec = cmd.tid_tspec; + tx->timeout.pm_frame_timeout = cmd.timeout.pm_frame_timeout; + tx->next_frame_len = cmd.next_frame_len; + + tx->sec_ctl = cmd.sec_ctl; + memcpy(&(tx->key[0]), &(cmd.key[0]), 16); + tx->tx_flags = cmd.tx_flags; + + tx->rts_retry_limit = cmd.rts_retry_limit; + tx->data_retry_limit = cmd.data_retry_limit; + + scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + + offsetof(struct iwl_tx_cmd, scratch); + tx->dram_lsb_ptr = cpu_to_le32(scratch_phys); + tx->dram_msb_ptr = iwl4965_get_dma_hi_address(scratch_phys); + + /* Hard coded to start at the highest retry fallback position + * until the 4965 specific rate control algorithm is tied in */ + tx->initial_rate_index = LINK_QUAL_MAX_RETRY_NUM - 1; + + /* Alternate between antenna A and B for successive frames */ + if (priv->use_ant_b_for_management_frame) { + priv->use_ant_b_for_management_frame = 0; + rate_flags = RATE_MCS_ANT_B_MSK; + } else { + priv->use_ant_b_for_management_frame = 1; + rate_flags = RATE_MCS_ANT_A_MSK; + } + + if (!unicast || !is_data) { + if ((rate_index >= IWL_FIRST_CCK_RATE) && + (rate_index <= IWL_LAST_CCK_RATE)) + rate_flags |= RATE_MCS_CCK_MSK; + } else { + tx->initial_rate_index = 0; + tx->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; + } + + tx->rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[rate_index].plcp, + rate_flags); + + if (ieee80211_is_probe_request(fc)) + tx->tx_flags |= TX_CMD_FLG_TSF_MSK; + else if (ieee80211_is_back_request(fc)) + tx->tx_flags |= TX_CMD_FLG_ACK_MSK | + TX_CMD_FLG_IMM_BA_RSP_MASK; +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG + qc = ieee80211_get_qos_ctrl(hdr); + if (qc && + (priv->iw_mode != IEEE80211_IF_TYPE_IBSS)) { + u8 tid = 0; + tid = (u8) (le16_to_cpu(*qc) & 0xF); + if (tid < TID_MAX_LOAD_COUNT) + iwl4965_tl_add_packet(priv, tid); + } + + if (priv->lq_mngr.agg_ctrl.next_retry && + (time_after(priv->lq_mngr.agg_ctrl.next_retry, jiffies))) { + unsigned long flags; + + spin_lock_irqsave(&priv->lq_mngr.lock, flags); + priv->lq_mngr.agg_ctrl.next_retry = 0; + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); + schedule_work(&priv->agg_work); + } +#endif +#endif + return 0; +} + +/** + * sign_extend - Sign extend a value using specified bit as sign-bit + * + * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1 + * and bit0..2 is 001b which when sign extended to 1111111111111001b is -7. + * + * @param oper value to sign extend + * @param index 0 based bit index (0<=index<32) to sign bit + */ +static s32 sign_extend(u32 oper, int index) +{ + u8 shift = 31 - index; + + return (s32)(oper << shift) >> shift; +} + +/** + * iwl4965_get_temperature - return the calibrated temperature (in Kelvin) + * @statistics: Provides the temperature reading from the uCode + * + * A return of <0 indicates bogus data in the statistics + */ +int iwl4965_get_temperature(const struct iwl_priv *priv) +{ + s32 temperature; + s32 vt; + s32 R1, R2, R3; + u32 R4; + + if (test_bit(STATUS_TEMPERATURE, &priv->status) && + (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)) { + IWL_DEBUG_TEMP("Running FAT temperature calibration\n"); + R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]); + R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]); + R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]); + R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]); + } else { + IWL_DEBUG_TEMP("Running temperature calibration\n"); + R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]); + R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]); + R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]); + R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]); + } + + /* + * Temperature is only 23 bits so sign extend out to 32 + * + * NOTE If we haven't received a statistics notification yet + * with an updated temperature, use R4 provided to us in the + * ALIVE response. */ + if (!test_bit(STATUS_TEMPERATURE, &priv->status)) + vt = sign_extend(R4, 23); + else + vt = sign_extend( + le32_to_cpu(priv->statistics.general.temperature), 23); + + IWL_DEBUG_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n", + R1, R2, R3, vt); + + if (R3 == R1) { + IWL_ERROR("Calibration conflict R1 == R3\n"); + return -1; + } + + /* Calculate temperature in degrees Kelvin, adjust by 97%. + * Add offset to center the adjustment around 0 degrees Centigrade. */ + temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2); + temperature /= (R3 - R1); + temperature = (temperature * 97) / 100 + + TEMPERATURE_CALIB_KELVIN_OFFSET; + + IWL_DEBUG_TEMP("Calibrated temperature: %dK, %dC\n", temperature, + KELVIN_TO_CELSIUS(temperature)); + + return temperature; +} + +/* Adjust Txpower only if temperature variance is greater than threshold. */ +#define IWL_TEMPERATURE_THRESHOLD 3 + +/** + * iwl4965_is_temp_calib_needed - determines if new calibration is needed + * + * If the temperature changed has changed sufficiently, then a recalibration + * is needed. + * + * Assumes caller will replace priv->last_temperature once calibration + * executed. + */ +static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv) +{ + int temp_diff; + + if (!test_bit(STATUS_STATISTICS, &priv->status)) { + IWL_DEBUG_TEMP("Temperature not updated -- no statistics.\n"); + return 0; + } + + temp_diff = priv->temperature - priv->last_temperature; + + /* get absolute value */ + if (temp_diff < 0) { + IWL_DEBUG_POWER("Getting cooler, delta %d, \n", temp_diff); + temp_diff = -temp_diff; + } else if (temp_diff == 0) + IWL_DEBUG_POWER("Same temp, \n"); + else + IWL_DEBUG_POWER("Getting warmer, delta %d, \n", temp_diff); + + if (temp_diff < IWL_TEMPERATURE_THRESHOLD) { + IWL_DEBUG_POWER("Thermal txpower calib not needed\n"); + return 0; + } + + IWL_DEBUG_POWER("Thermal txpower calib needed\n"); + + return 1; +} + +/* Calculate noise level, based on measurements during network silence just + * before arriving beacon. This measurement can be done only if we know + * exactly when to expect beacons, therefore only when we're associated. */ +static void iwl4965_rx_calc_noise(struct iwl_priv *priv) +{ + struct statistics_rx_non_phy *rx_info + = &(priv->statistics.rx.general); + int num_active_rx = 0; + int total_silence = 0; + int bcn_silence_a = + le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER; + int bcn_silence_b = + le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; + int bcn_silence_c = + le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; + + if (bcn_silence_a) { + total_silence += bcn_silence_a; + num_active_rx++; + } + if (bcn_silence_b) { + total_silence += bcn_silence_b; + num_active_rx++; + } + if (bcn_silence_c) { + total_silence += bcn_silence_c; + num_active_rx++; + } + + /* Average among active antennas */ + if (num_active_rx) + priv->last_rx_noise = (total_silence / num_active_rx) - 107; + else + priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; + + IWL_DEBUG_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", + bcn_silence_a, bcn_silence_b, bcn_silence_c, + priv->last_rx_noise); +} + +void iwl_hw_rx_statistics(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + int change; + s32 temp; + + IWL_DEBUG_RX("Statistics notification received (%d vs %d).\n", + (int)sizeof(priv->statistics), pkt->len); + + change = ((priv->statistics.general.temperature != + pkt->u.stats.general.temperature) || + ((priv->statistics.flag & + STATISTICS_REPLY_FLG_FAT_MODE_MSK) != + (pkt->u.stats.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK))); + + memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics)); + + set_bit(STATUS_STATISTICS, &priv->status); + + /* Reschedule the statistics timer to occur in + * REG_RECALIB_PERIOD seconds to ensure we get a + * thermal update even if the uCode doesn't give + * us one */ + mod_timer(&priv->statistics_periodic, jiffies + + msecs_to_jiffies(REG_RECALIB_PERIOD * 1000)); + + if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && + (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) { + iwl4965_rx_calc_noise(priv); +#ifdef CONFIG_IWLWIFI_SENSITIVITY + queue_work(priv->workqueue, &priv->sensitivity_work); +#endif + } + + /* If the hardware hasn't reported a change in + * temperature then don't bother computing a + * calibrated temperature value */ + if (!change) + return; + + temp = iwl4965_get_temperature(priv); + if (temp < 0) + return; + + if (priv->temperature != temp) { + if (priv->temperature) + IWL_DEBUG_TEMP("Temperature changed " + "from %dC to %dC\n", + KELVIN_TO_CELSIUS(priv->temperature), + KELVIN_TO_CELSIUS(temp)); + else + IWL_DEBUG_TEMP("Temperature " + "initialized to %dC\n", + KELVIN_TO_CELSIUS(temp)); + } + + priv->temperature = temp; + set_bit(STATUS_TEMPERATURE, &priv->status); + + if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && + iwl4965_is_temp_calib_needed(priv)) + queue_work(priv->workqueue, &priv->txpower_work); +} + +static void iwl4965_handle_data_packet(struct iwl_priv *priv, int is_data, + int include_phy, + struct iwl_rx_mem_buffer *rxb, + struct ieee80211_rx_status *stats) +{ + struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; + struct iwl4965_rx_phy_res *rx_start = (include_phy) ? + (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : NULL; + struct ieee80211_hdr *hdr; + u16 len; + __le32 *rx_end; + unsigned int skblen; + u32 ampdu_status; + + if (!include_phy && priv->last_phy_res[0]) + rx_start = (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1]; + + if (!rx_start) { + IWL_ERROR("MPDU frame without a PHY data\n"); + return; + } + if (include_phy) { + hdr = (struct ieee80211_hdr *)((u8 *) & rx_start[1] + + rx_start->cfg_phy_cnt); + + len = le16_to_cpu(rx_start->byte_count); + + rx_end = (__le32 *) ((u8 *) & pkt->u.raw[0] + + sizeof(struct iwl4965_rx_phy_res) + + rx_start->cfg_phy_cnt + len); + + } else { + struct iwl4965_rx_mpdu_res_start *amsdu = + (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw; + + hdr = (struct ieee80211_hdr *)(pkt->u.raw + + sizeof(struct iwl4965_rx_mpdu_res_start)); + len = le16_to_cpu(amsdu->byte_count); + rx_start->byte_count = amsdu->byte_count; + rx_end = (__le32 *) (((u8 *) hdr) + len); + } + if (len > 2342 || len < 16) { + IWL_DEBUG_DROP("byte count out of range [16,2342]" + " : %d\n", len); + return; + } + + ampdu_status = le32_to_cpu(*rx_end); + skblen = ((u8 *) rx_end - (u8 *) & pkt->u.raw[0]) + sizeof(u32); + + /* start from MAC */ + skb_reserve(rxb->skb, (void *)hdr - (void *)pkt); + skb_put(rxb->skb, len); /* end where data ends */ + + /* We only process data packets if the interface is open */ + if (unlikely(!priv->is_open)) { + IWL_DEBUG_DROP_LIMIT + ("Dropping packet while interface is not open.\n"); + return; + } + + if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) { + if (iwl_param_hwcrypto) + iwl_set_decrypted_flag(priv, rxb->skb, + ampdu_status, stats); + iwl_handle_data_packet_monitor(priv, rxb, hdr, len, stats, 0); + return; + } + + stats->flag = 0; + hdr = (struct ieee80211_hdr *)rxb->skb->data; + + if (iwl_param_hwcrypto) + iwl_set_decrypted_flag(priv, rxb->skb, ampdu_status, stats); + + ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats); + priv->alloc_rxb_skb--; + rxb->skb = NULL; +#ifdef LED + priv->led_packets += len; + iwl_setup_activity_timer(priv); +#endif +} + +/* Calc max signal level (dBm) among 3 possible receivers */ +static int iwl4965_calc_rssi(struct iwl4965_rx_phy_res *rx_resp) +{ + /* data from PHY/DSP regarding signal strength, etc., + * contents are always there, not configurable by host. */ + struct iwl4965_rx_non_cfg_phy *ncphy = + (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy; + u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL_AGC_DB_MASK) + >> IWL_AGC_DB_POS; + + u32 valid_antennae = + (le16_to_cpu(rx_resp->phy_flags) & RX_PHY_FLAGS_ANTENNAE_MASK) + >> RX_PHY_FLAGS_ANTENNAE_OFFSET; + u8 max_rssi = 0; + u32 i; + + /* Find max rssi among 3 possible receivers. + * These values are measured by the digital signal processor (DSP). + * They should stay fairly constant even as the signal strength varies, + * if the radio's automatic gain control (AGC) is working right. + * AGC value (see below) will provide the "interesting" info. */ + for (i = 0; i < 3; i++) + if (valid_antennae & (1 << i)) + max_rssi = max(ncphy->rssi_info[i << 1], max_rssi); + + IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n", + ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4], + max_rssi, agc); + + /* dBm = max_rssi dB - agc dB - constant. + * Higher AGC (higher radio gain) means lower signal. */ + return (max_rssi - agc - IWL_RSSI_OFFSET); +} + +#ifdef CONFIG_IWLWIFI_HT + +/* Parsed Information Elements */ +struct ieee802_11_elems { + u8 *ds_params; + u8 ds_params_len; + u8 *tim; + u8 tim_len; + u8 *ibss_params; + u8 ibss_params_len; + u8 *erp_info; + u8 erp_info_len; + u8 *ht_cap_param; + u8 ht_cap_param_len; + u8 *ht_extra_param; + u8 ht_extra_param_len; +}; + +static int parse_elems(u8 *start, size_t len, struct ieee802_11_elems *elems) +{ + size_t left = len; + u8 *pos = start; + int unknown = 0; + + memset(elems, 0, sizeof(*elems)); + + while (left >= 2) { + u8 id, elen; + + id = *pos++; + elen = *pos++; + left -= 2; + + if (elen > left) + return -1; + + switch (id) { + case WLAN_EID_DS_PARAMS: + elems->ds_params = pos; + elems->ds_params_len = elen; + break; + case WLAN_EID_TIM: + elems->tim = pos; + elems->tim_len = elen; + break; + case WLAN_EID_IBSS_PARAMS: + elems->ibss_params = pos; + elems->ibss_params_len = elen; + break; + case WLAN_EID_ERP_INFO: + elems->erp_info = pos; + elems->erp_info_len = elen; + break; + case WLAN_EID_HT_CAPABILITY: + elems->ht_cap_param = pos; + elems->ht_cap_param_len = elen; + break; + case WLAN_EID_HT_EXTRA_INFO: + elems->ht_extra_param = pos; + elems->ht_extra_param_len = elen; + break; + default: + unknown++; + break; + } + + left -= elen; + pos += elen; + } + + return 0; +} +#endif /* CONFIG_IWLWIFI_HT */ + +static void iwl4965_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK; + priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK; + priv->stations[sta_id].sta.sta.modify_mask = 0; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + iwl_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); +} + +static void iwl4965_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr) +{ + /* FIXME: need locking over ps_status ??? */ + u8 sta_id = iwl_hw_find_station(priv, addr); + + if (sta_id != IWL_INVALID_STATION) { + u8 sta_awake = priv->stations[sta_id]. + ps_status == STA_PS_STATUS_WAKE; + + if (sta_awake && ps_bit) + priv->stations[sta_id].ps_status = STA_PS_STATUS_SLEEP; + else if (!sta_awake && !ps_bit) { + iwl4965_sta_modify_ps_wake(priv, sta_id); + priv->stations[sta_id].ps_status = STA_PS_STATUS_WAKE; + } + } +} + +/* Called for REPLY_4965_RX (legacy ABG frames), or + * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */ +static void iwl4965_rx_reply_rx(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + /* Use phy data (Rx signal strength, etc.) contained within + * this rx packet for legacy frames, + * or phy data cached from REPLY_RX_PHY_CMD for HT frames. */ + int include_phy = (pkt->hdr.cmd == REPLY_4965_RX); + struct iwl4965_rx_phy_res *rx_start = (include_phy) ? + (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : + (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1]; + __le32 *rx_end; + unsigned int len = 0; + struct ieee80211_hdr *header; + u16 fc; + struct ieee80211_rx_status stats = { + .mactime = le64_to_cpu(rx_start->timestamp), + .channel = le16_to_cpu(rx_start->channel), + .phymode = + (rx_start->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? + MODE_IEEE80211G : MODE_IEEE80211A, + .antenna = 0, + .rate = iwl_hw_get_rate(rx_start->rate_n_flags), + .flag = 0, +#ifdef CONFIG_IWLWIFI_HT_AGG + .ordered = 0 +#endif /* CONFIG_IWLWIFI_HT_AGG */ + }; + u8 network_packet; + + if ((unlikely(rx_start->cfg_phy_cnt > 20))) { + IWL_DEBUG_DROP + ("dsp size out of range [0,20]: " + "%d/n", rx_start->cfg_phy_cnt); + return; + } + if (!include_phy) { + if (priv->last_phy_res[0]) + rx_start = (struct iwl4965_rx_phy_res *) + &priv->last_phy_res[1]; + else + rx_start = NULL; + } + + if (!rx_start) { + IWL_ERROR("MPDU frame without a PHY data\n"); + return; + } + + if (include_phy) { + header = (struct ieee80211_hdr *)((u8 *) & rx_start[1] + + rx_start->cfg_phy_cnt); + + len = le16_to_cpu(rx_start->byte_count); + rx_end = (__le32 *) (pkt->u.raw + rx_start->cfg_phy_cnt + + sizeof(struct iwl4965_rx_phy_res) + len); + } else { + struct iwl4965_rx_mpdu_res_start *amsdu = + (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw; + + header = (void *)(pkt->u.raw + + sizeof(struct iwl4965_rx_mpdu_res_start)); + len = le16_to_cpu(amsdu->byte_count); + rx_end = (__le32 *) (pkt->u.raw + + sizeof(struct iwl4965_rx_mpdu_res_start) + len); + } + + if (!(*rx_end & RX_RES_STATUS_NO_CRC32_ERROR) || + !(*rx_end & RX_RES_STATUS_NO_RXE_OVERFLOW)) { + IWL_DEBUG_RX("Bad CRC or FIFO: 0x%08X.\n", + le32_to_cpu(*rx_end)); + return; + } + + priv->ucode_beacon_time = le32_to_cpu(rx_start->beacon_time_stamp); + + stats.freq = ieee80211chan2mhz(stats.channel); + + /* Find max signal strength (dBm) among 3 antenna/receiver chains */ + stats.ssi = iwl4965_calc_rssi(rx_start); + + /* Meaningful noise values are available only from beacon statistics, + * which are gathered only when associated, and indicate noise + * only for the associated network channel ... + * Ignore these noise values while scanning (other channels) */ + if (iwl_is_associated(priv) && + !test_bit(STATUS_SCANNING, &priv->status)) { + stats.noise = priv->last_rx_noise; + stats.signal = iwl_calc_sig_qual(stats.ssi, stats.noise); + } else { + stats.noise = IWL_NOISE_MEAS_NOT_AVAILABLE; + stats.signal = iwl_calc_sig_qual(stats.ssi, 0); + } + + /* Reset beacon noise level if not associated. */ + if (!iwl_is_associated(priv)) + priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; + +#ifdef CONFIG_IWLWIFI_DEBUG + /* TODO: Parts of iwl_report_frame are broken for 4965 */ + if (iwl_debug_level & (IWL_DL_RX)) + /* Set "1" to report good data frames in groups of 100 */ + iwl_report_frame(priv, pkt, header, 1); + + if (iwl_debug_level & (IWL_DL_RX | IWL_DL_STATS)) + IWL_DEBUG_RX("Rssi %d, noise %d, qual %d, TSF %lu\n", + stats.ssi, stats.noise, stats.signal, + (long unsigned int)le64_to_cpu(rx_start->timestamp)); +#endif + + network_packet = iwl_is_network_packet(priv, header); + if (network_packet) { + priv->last_rx_rssi = stats.ssi; + priv->last_beacon_time = priv->ucode_beacon_time; + priv->last_tsf = le64_to_cpu(rx_start->timestamp); + } + + fc = le16_to_cpu(header->frame_control); + switch (fc & IEEE80211_FCTL_FTYPE) { + case IEEE80211_FTYPE_MGMT: + + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) + iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM, + header->addr2); + switch (fc & IEEE80211_FCTL_STYPE) { + case IEEE80211_STYPE_PROBE_RESP: + case IEEE80211_STYPE_BEACON: + if ((priv->iw_mode == IEEE80211_IF_TYPE_STA && + !compare_ether_addr(header->addr2, priv->bssid)) || + (priv->iw_mode == IEEE80211_IF_TYPE_IBSS && + !compare_ether_addr(header->addr3, priv->bssid))) { + struct ieee80211_mgmt *mgmt = + (struct ieee80211_mgmt *)header; + u64 timestamp = + le64_to_cpu(mgmt->u.beacon.timestamp); + + priv->timestamp0 = timestamp & 0xFFFFFFFF; + priv->timestamp1 = + (timestamp >> 32) & 0xFFFFFFFF; + priv->beacon_int = le16_to_cpu( + mgmt->u.beacon.beacon_int); + if (priv->call_post_assoc_from_beacon && + (priv->iw_mode == IEEE80211_IF_TYPE_STA)) { + priv->call_post_assoc_from_beacon = 0; + queue_work(priv->workqueue, + &priv->post_associate.work); + } + } + break; + + case IEEE80211_STYPE_ACTION: + break; + + /* + * TODO: There is no callback function from upper + * stack to inform us when associated status. this + * work around to sniff assoc_resp management frame + * and finish the association process. + */ + case IEEE80211_STYPE_ASSOC_RESP: + case IEEE80211_STYPE_REASSOC_RESP: + if (network_packet && iwl_is_associated(priv)) { +#ifdef CONFIG_IWLWIFI_HT + u8 *pos = NULL; + struct ieee802_11_elems elems; +#endif /*CONFIG_IWLWIFI_HT */ + struct ieee80211_mgmt *mgnt = + (struct ieee80211_mgmt *)header; + + priv->assoc_id = (~((1 << 15) | (1 << 14)) + & le16_to_cpu(mgnt->u.assoc_resp.aid)); + priv->assoc_capability = + le16_to_cpu( + mgnt->u.assoc_resp.capab_info); +#ifdef CONFIG_IWLWIFI_HT + pos = mgnt->u.assoc_resp.variable; + if (!parse_elems(pos, + len - (pos - (u8 *) mgnt), + &elems)) { + if (elems.ht_extra_param && + elems.ht_cap_param) + break; + } +#endif /*CONFIG_IWLWIFI_HT */ + /* assoc_id is 0 no association */ + if (!priv->assoc_id) + break; + if (priv->beacon_int) + queue_work(priv->workqueue, + &priv->post_associate.work); + else + priv->call_post_assoc_from_beacon = 1; + } + + break; + + case IEEE80211_STYPE_PROBE_REQ: + if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && + !iwl_is_associated(priv)) { + DECLARE_MAC_BUF(mac1); + DECLARE_MAC_BUF(mac2); + DECLARE_MAC_BUF(mac3); + + IWL_DEBUG_DROP("Dropping (non network): " + "%s, %s, %s\n", + print_mac(mac1, header->addr1), + print_mac(mac2, header->addr2), + print_mac(mac3, header->addr3)); + return; + } + } + iwl4965_handle_data_packet(priv, 0, include_phy, rxb, &stats); + break; + + case IEEE80211_FTYPE_CTL: +#ifdef CONFIG_IWLWIFI_HT_AGG + switch (fc & IEEE80211_FCTL_STYPE) { + case IEEE80211_STYPE_BACK_REQ: + IWL_DEBUG_HT("IEEE80211_STYPE_BACK_REQ arrived\n"); + iwl4965_handle_data_packet(priv, 0, include_phy, + rxb, &stats); + break; + default: + break; + } +#endif + + break; + + case IEEE80211_FTYPE_DATA: { + DECLARE_MAC_BUF(mac1); + DECLARE_MAC_BUF(mac2); + DECLARE_MAC_BUF(mac3); + + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) + iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM, + header->addr2); + + if (unlikely(!network_packet)) + IWL_DEBUG_DROP("Dropping (non network): " + "%s, %s, %s\n", + print_mac(mac1, header->addr1), + print_mac(mac2, header->addr2), + print_mac(mac3, header->addr3)); + else if (unlikely(is_duplicate_packet(priv, header))) + IWL_DEBUG_DROP("Dropping (dup): %s, %s, %s\n", + print_mac(mac1, header->addr1), + print_mac(mac2, header->addr2), + print_mac(mac3, header->addr3)); + else + iwl4965_handle_data_packet(priv, 1, include_phy, rxb, + &stats); + break; + } + default: + break; + + } +} + +/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD). + * This will be used later in iwl4965_rx_reply_rx() for REPLY_RX_MPDU_CMD. */ +static void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + priv->last_phy_res[0] = 1; + memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]), + sizeof(struct iwl4965_rx_phy_res)); +} + +static void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) + +{ +#ifdef CONFIG_IWLWIFI_SENSITIVITY + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_missed_beacon_notif *missed_beacon; + + missed_beacon = &pkt->u.missed_beacon; + if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) { + IWL_DEBUG_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n", + le32_to_cpu(missed_beacon->consequtive_missed_beacons), + le32_to_cpu(missed_beacon->total_missed_becons), + le32_to_cpu(missed_beacon->num_recvd_beacons), + le32_to_cpu(missed_beacon->num_expected_beacons)); + priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT; + if (unlikely(!test_bit(STATUS_SCANNING, &priv->status))) + queue_work(priv->workqueue, &priv->sensitivity_work); + } +#endif /*CONFIG_IWLWIFI_SENSITIVITY*/ +} + +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG + +static void iwl4965_set_tx_status(struct iwl_priv *priv, int txq_id, int idx, + u32 status, u32 retry_count, u32 rate) +{ + struct ieee80211_tx_status *tx_status = + &(priv->txq[txq_id].txb[idx].status); + + tx_status->flags = status ? IEEE80211_TX_STATUS_ACK : 0; + tx_status->retry_count += retry_count; + tx_status->control.tx_rate = rate; +} + + +static void iwl_sta_modify_enable_tid_tx(struct iwl_priv *priv, + int sta_id, int tid) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX; + priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid)); + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + iwl_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); +} + + +static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv, + struct iwl_ht_agg *agg, + struct iwl_compressed_ba_resp* + ba_resp) + +{ + int i, sh, ack; + u16 ba_seq_ctl = le16_to_cpu(ba_resp->ba_seq_ctl); + u32 bitmap0, bitmap1; + u32 resp_bitmap0 = le32_to_cpu(ba_resp->ba_bitmap0); + u32 resp_bitmap1 = le32_to_cpu(ba_resp->ba_bitmap1); + + if (unlikely(!agg->wait_for_ba)) { + IWL_ERROR("Received BA when not expected\n"); + return -EINVAL; + } + agg->wait_for_ba = 0; + IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->ba_seq_ctl); + sh = agg->start_idx - SEQ_TO_INDEX(ba_seq_ctl>>4); + if (sh < 0) /* tbw something is wrong with indeces */ + sh += 0x100; + + /* don't use 64 bits for now */ + bitmap0 = resp_bitmap0 >> sh; + bitmap1 = resp_bitmap1 >> sh; + bitmap0 |= (resp_bitmap1 & ((1<<sh)|((1<<sh)-1))) << (32 - sh); + + if (agg->frame_count > (64 - sh)) { + IWL_DEBUG_TX_REPLY("more frames than bitmap size"); + return -1; + } + + /* check for success or failure according to the + * transmitted bitmap and back bitmap */ + bitmap0 &= agg->bitmap0; + bitmap1 &= agg->bitmap1; + + for (i = 0; i < agg->frame_count ; i++) { + int idx = (agg->start_idx + i) & 0xff; + ack = bitmap0 & (1 << i); + IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", + ack? "ACK":"NACK", i, idx, agg->start_idx + i); + iwl4965_set_tx_status(priv, agg->txq_id, idx, ack, 0, + agg->rate_n_flags); + + } + + IWL_DEBUG_TX_REPLY("Bitmap %x%x\n", bitmap0, bitmap1); + + return 0; +} + +static inline int iwl_queue_dec_wrap(int index, int n_bd) +{ + return (index == 0) ? n_bd - 1 : index - 1; +} + +static void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; + int index; + struct iwl_tx_queue *txq = NULL; + struct iwl_ht_agg *agg; + u16 ba_resp_scd_flow = le16_to_cpu(ba_resp->scd_flow); + u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); + + if (ba_resp_scd_flow >= ARRAY_SIZE(priv->txq)) { + IWL_ERROR("BUG_ON scd_flow is bigger than number of queues"); + return; + } + + txq = &priv->txq[ba_resp_scd_flow]; + agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg; + index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); + + /* TODO: Need to get this copy more sefely - now good for debug */ +/* + { + DECLARE_MAC_BUF(mac); + IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d]Received from %s, " + "sta_id = %d\n", + agg->wait_for_ba, + print_mac(mac, (u8*) &ba_resp->sta_addr_lo32), + ba_resp->sta_id); + IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%X%X, scd_flow = " + "%d, scd_ssn = %d\n", + ba_resp->tid, + ba_resp->ba_seq_ctl, + ba_resp->ba_bitmap1, + ba_resp->ba_bitmap0, + ba_resp->scd_flow, + ba_resp->scd_ssn); + IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%X%X \n", + agg->start_idx, + agg->bitmap1, + agg->bitmap0); + } +*/ + iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp); + /* releases all the TFDs until the SSN */ + if (txq->q.last_used != (ba_resp_scd_ssn & 0xff)) + iwl_tx_queue_reclaim(priv, ba_resp_scd_flow, index); + +} + + +static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id) +{ + iwl_write_restricted_reg(priv, + SCD_QUEUE_STATUS_BITS(txq_id), + (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| + (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); +} + +static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, + u16 txq_id) +{ + u32 tbl_dw_addr; + u32 tbl_dw; + u16 scd_q2ratid; + + scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; + + tbl_dw_addr = priv->scd_base_addr + + SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); + + tbl_dw = iwl_read_restricted_mem(priv, tbl_dw_addr); + + if (txq_id & 0x1) + tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); + else + tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); + + iwl_write_restricted_mem(priv, tbl_dw_addr, tbl_dw); + + return 0; +} + +/** + * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID + */ +static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id, + int tx_fifo, int sta_id, int tid, + u16 ssn_idx) +{ + unsigned long flags; + int rc; + u16 ra_tid; + + if (IWL_BACK_QUEUE_FIRST_ID > txq_id) + IWL_WARNING("queue number too small: %d, must be > %d\n", + txq_id, IWL_BACK_QUEUE_FIRST_ID); + + ra_tid = BUILD_RAxTID(sta_id, tid); + + iwl_sta_modify_enable_tid_tx(priv, sta_id, tid); + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + iwl4965_tx_queue_stop_scheduler(priv, txq_id); + + iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id); + + + iwl_set_bits_restricted_reg(priv, SCD_QUEUECHAIN_SEL, (1<<txq_id)); + + priv->txq[txq_id].q.last_used = (ssn_idx & 0xff); + priv->txq[txq_id].q.first_empty = (ssn_idx & 0xff); + + /* supposes that ssn_idx is valid (!= 0xFFF) */ + iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); + + iwl_write_restricted_mem(priv, + priv->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id), + (SCD_WIN_SIZE << SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & + SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); + + iwl_write_restricted_mem(priv, priv->scd_base_addr + + SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), + (SCD_FRAME_LIMIT << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) + & SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); + + iwl_set_bits_restricted_reg(priv, SCD_INTERRUPT_MASK, (1 << txq_id)); + + iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); + + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +/** + * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID + */ +static int iwl4965_tx_queue_agg_disable(struct iwl_priv *priv, u16 txq_id, + u16 ssn_idx, u8 tx_fifo) +{ + unsigned long flags; + int rc; + + if (IWL_BACK_QUEUE_FIRST_ID > txq_id) { + IWL_WARNING("queue number too small: %d, must be > %d\n", + txq_id, IWL_BACK_QUEUE_FIRST_ID); + return -EINVAL; + } + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + iwl4965_tx_queue_stop_scheduler(priv, txq_id); + + iwl_clear_bits_restricted_reg(priv, SCD_QUEUECHAIN_SEL, (1 << txq_id)); + + priv->txq[txq_id].q.last_used = (ssn_idx & 0xff); + priv->txq[txq_id].q.first_empty = (ssn_idx & 0xff); + /* supposes that ssn_idx is valid (!= 0xFFF) */ + iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); + + iwl_clear_bits_restricted_reg(priv, SCD_INTERRUPT_MASK, (1 << txq_id)); + iwl4965_txq_ctx_deactivate(priv, txq_id); + iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0); + + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +#endif/* CONFIG_IWLWIFI_HT_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ +/* + * RATE SCALE CODE + */ +int iwl4965_init_hw_rates(struct iwl_priv *priv, struct ieee80211_rate *rates) +{ + return 0; +} + + +/** + * iwl4965_add_station - Initialize a station's hardware rate table + * + * The uCode contains a table of fallback rates and retries per rate + * for automatic fallback during transmission. + * + * NOTE: This initializes the table for a single retry per data rate + * which is not optimal. Setting up an intelligent retry per rate + * requires feedback from transmission, which isn't exposed through + * rc80211_simple which is what this driver is currently using. + * + */ +void iwl4965_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap) +{ + int i, r; + struct iwl_link_quality_cmd link_cmd = { + .reserved1 = 0, + }; + u16 rate_flags; + + /* Set up the rate scaling to start at 54M and fallback + * all the way to 1M in IEEE order and then spin on IEEE */ + if (is_ap) + r = IWL_RATE_54M_INDEX; + else if (priv->phymode == MODE_IEEE80211A) + r = IWL_RATE_6M_INDEX; + else + r = IWL_RATE_1M_INDEX; + + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { + rate_flags = 0; + if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE) + rate_flags |= RATE_MCS_CCK_MSK; + + rate_flags |= RATE_MCS_ANT_B_MSK; + rate_flags &= ~RATE_MCS_ANT_A_MSK; + link_cmd.rs_table[i].rate_n_flags = + iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags); + r = iwl_get_prev_ieee_rate(r); + } + + link_cmd.general_params.single_stream_ant_msk = 2; + link_cmd.general_params.dual_stream_ant_msk = 3; + link_cmd.agg_params.agg_dis_start_th = 3; + link_cmd.agg_params.agg_time_limit = cpu_to_le16(4000); + + /* Update the rate scaling for control frame Tx to AP */ + link_cmd.sta_id = is_ap ? IWL_AP_ID : IWL4965_BROADCAST_ID; + + iwl_send_cmd_pdu(priv, REPLY_TX_LINK_QUALITY_CMD, sizeof(link_cmd), + &link_cmd); +} + +#ifdef CONFIG_IWLWIFI_HT + +static u8 iwl_is_channel_extension(struct iwl_priv *priv, int phymode, + u16 channel, u8 extension_chan_offset) +{ + const struct iwl_channel_info *ch_info; + + ch_info = iwl_get_channel_info(priv, phymode, channel); + if (!is_channel_valid(ch_info)) + return 0; + + if (extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_AUTO) + return 0; + + if ((ch_info->fat_extension_channel == extension_chan_offset) || + (ch_info->fat_extension_channel == HT_IE_EXT_CHANNEL_MAX)) + return 1; + + return 0; +} + +static u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv, + const struct sta_ht_info *ht_info) +{ + + if (priv->channel_width != IWL_CHANNEL_WIDTH_40MHZ) + return 0; + + if (ht_info->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ) + return 0; + + if (ht_info->extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_AUTO) + return 0; + + /* no fat tx allowed on 2.4GHZ */ + if (priv->phymode != MODE_IEEE80211A) + return 0; + return (iwl_is_channel_extension(priv, priv->phymode, + ht_info->control_channel, + ht_info->extension_chan_offset)); +} + +void iwl4965_set_rxon_ht(struct iwl_priv *priv, struct sta_ht_info *ht_info) +{ + struct iwl_rxon_cmd *rxon = &priv->staging_rxon; + u32 val; + + if (!ht_info->is_ht) + return; + + if (iwl_is_fat_tx_allowed(priv, ht_info)) + rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED_MSK; + else + rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK | + RXON_FLG_CHANNEL_MODE_PURE_40_MSK); + + if (le16_to_cpu(rxon->channel) != ht_info->control_channel) { + IWL_DEBUG_ASSOC("control diff than current %d %d\n", + le16_to_cpu(rxon->channel), + ht_info->control_channel); + rxon->channel = cpu_to_le16(ht_info->control_channel); + return; + } + + /* Note: control channel is oposit to extension channel */ + switch (ht_info->extension_chan_offset) { + case IWL_EXT_CHANNEL_OFFSET_ABOVE: + rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); + break; + case IWL_EXT_CHANNEL_OFFSET_BELOW: + rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; + break; + case IWL_EXT_CHANNEL_OFFSET_AUTO: + rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK; + break; + default: + rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK; + break; + } + + val = ht_info->operating_mode; + + rxon->flags |= cpu_to_le32(val << RXON_FLG_HT_OPERATING_MODE_POS); + + priv->active_rate_ht[0] = ht_info->supp_rates[0]; + priv->active_rate_ht[1] = ht_info->supp_rates[1]; + iwl4965_set_rxon_chain(priv); + + IWL_DEBUG_ASSOC("supported HT rate 0x%X %X " + "rxon flags 0x%X operation mode :0x%X " + "extension channel offset 0x%x " + "control chan %d\n", + priv->active_rate_ht[0], priv->active_rate_ht[1], + le32_to_cpu(rxon->flags), ht_info->operating_mode, + ht_info->extension_chan_offset, + ht_info->control_channel); + return; +} + +void iwl4965_set_ht_add_station(struct iwl_priv *priv, u8 index) +{ + __le32 sta_flags; + struct sta_ht_info *ht_info = &priv->current_assoc_ht; + + priv->current_channel_width = IWL_CHANNEL_WIDTH_20MHZ; + if (!ht_info->is_ht) + goto done; + + sta_flags = priv->stations[index].sta.station_flags; + + if (ht_info->tx_mimo_ps_mode == IWL_MIMO_PS_DYNAMIC) + sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK; + else + sta_flags &= ~STA_FLG_RTS_MIMO_PROT_MSK; + + sta_flags |= cpu_to_le32( + (u32)ht_info->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS); + + sta_flags |= cpu_to_le32( + (u32)ht_info->mpdu_density << STA_FLG_AGG_MPDU_DENSITY_POS); + + sta_flags &= (~STA_FLG_FAT_EN_MSK); + ht_info->tx_chan_width = IWL_CHANNEL_WIDTH_20MHZ; + ht_info->chan_width_cap = IWL_CHANNEL_WIDTH_20MHZ; + + if (iwl_is_fat_tx_allowed(priv, ht_info)) { + sta_flags |= STA_FLG_FAT_EN_MSK; + ht_info->chan_width_cap = IWL_CHANNEL_WIDTH_40MHZ; + if (ht_info->supported_chan_width == IWL_CHANNEL_WIDTH_40MHZ) + ht_info->tx_chan_width = IWL_CHANNEL_WIDTH_40MHZ; + } + priv->current_channel_width = ht_info->tx_chan_width; + priv->stations[index].sta.station_flags = sta_flags; + done: + return; +} + +#ifdef CONFIG_IWLWIFI_HT_AGG + +static void iwl4965_sta_modify_add_ba_tid(struct iwl_priv *priv, + int sta_id, int tid, u16 ssn) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].sta.station_flags_msk = 0; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK; + priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid; + priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn); + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + iwl_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); +} + +static void iwl4965_sta_modify_del_ba_tid(struct iwl_priv *priv, + int sta_id, int tid) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].sta.station_flags_msk = 0; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK; + priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + iwl_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); +} + +static const u16 default_tid_to_tx_fifo[] = { + IWL_TX_FIFO_AC1, + IWL_TX_FIFO_AC0, + IWL_TX_FIFO_AC0, + IWL_TX_FIFO_AC1, + IWL_TX_FIFO_AC2, + IWL_TX_FIFO_AC2, + IWL_TX_FIFO_AC3, + IWL_TX_FIFO_AC3, + IWL_TX_FIFO_NONE, + IWL_TX_FIFO_NONE, + IWL_TX_FIFO_NONE, + IWL_TX_FIFO_NONE, + IWL_TX_FIFO_NONE, + IWL_TX_FIFO_NONE, + IWL_TX_FIFO_NONE, + IWL_TX_FIFO_NONE, + IWL_TX_FIFO_AC3 +}; + +static int iwl_txq_ctx_activate_free(struct iwl_priv *priv) +{ + int txq_id; + + for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) + if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk)) + return txq_id; + return -1; +} + +int iwl_mac_ht_tx_agg_start(struct ieee80211_hw *hw, u8 *da, u16 tid, + u16 *start_seq_num) +{ + + struct iwl_priv *priv = hw->priv; + int sta_id; + int tx_fifo; + int txq_id; + int ssn = -1; + unsigned long flags; + struct iwl_tid_data *tid_data; + DECLARE_MAC_BUF(mac); + + if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) + tx_fifo = default_tid_to_tx_fifo[tid]; + else + return -EINVAL; + + IWL_WARNING("iwl-AGG iwl_mac_ht_tx_agg_start on da=%s" + " tid=%d\n", print_mac(mac, da), tid); + + sta_id = iwl_hw_find_station(priv, da); + if (sta_id == IWL_INVALID_STATION) + return -ENXIO; + + txq_id = iwl_txq_ctx_activate_free(priv); + if (txq_id == -1) + return -ENXIO; + + spin_lock_irqsave(&priv->sta_lock, flags); + tid_data = &priv->stations[sta_id].tid[tid]; + ssn = SEQ_TO_SN(tid_data->seq_number); + tid_data->agg.txq_id = txq_id; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + *start_seq_num = ssn; + iwl4965_ba_status(priv, tid, BA_STATUS_ACTIVE); + return iwl4965_tx_queue_agg_enable(priv, txq_id, tx_fifo, + sta_id, tid, ssn); +} + + +int iwl_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, u8 *da, u16 tid, + int generator) +{ + + struct iwl_priv *priv = hw->priv; + int tx_fifo_id, txq_id, sta_id, ssn = -1; + struct iwl_tid_data *tid_data; + int rc; + DECLARE_MAC_BUF(mac); + + if (!da) { + IWL_ERROR("%s: da = NULL\n", __func__); + return -EINVAL; + } + + if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) + tx_fifo_id = default_tid_to_tx_fifo[tid]; + else + return -EINVAL; + + sta_id = iwl_hw_find_station(priv, da); + + if (sta_id == IWL_INVALID_STATION) + return -ENXIO; + + tid_data = &priv->stations[sta_id].tid[tid]; + ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; + txq_id = tid_data->agg.txq_id; + + rc = iwl4965_tx_queue_agg_disable(priv, txq_id, ssn, tx_fifo_id); + /* FIXME: need more safe way to handle error condition */ + if (rc) + return rc; + + iwl4965_ba_status(priv, tid, BA_STATUS_INITIATOR_DELBA); + IWL_DEBUG_INFO("iwl_mac_ht_tx_agg_stop on da=%s tid=%d\n", + print_mac(mac, da), tid); + + return 0; +} + +int iwl_mac_ht_rx_agg_start(struct ieee80211_hw *hw, u8 *da, + u16 tid, u16 start_seq_num) +{ + struct iwl_priv *priv = hw->priv; + int sta_id; + DECLARE_MAC_BUF(mac); + + IWL_WARNING("iwl-AGG iwl_mac_ht_rx_agg_start on da=%s" + " tid=%d\n", print_mac(mac, da), tid); + sta_id = iwl_hw_find_station(priv, da); + iwl4965_sta_modify_add_ba_tid(priv, sta_id, tid, start_seq_num); + return 0; +} + +int iwl_mac_ht_rx_agg_stop(struct ieee80211_hw *hw, u8 *da, + u16 tid, int generator) +{ + struct iwl_priv *priv = hw->priv; + int sta_id; + DECLARE_MAC_BUF(mac); + + IWL_WARNING("iwl-AGG iwl_mac_ht_rx_agg_stop on da=%s tid=%d\n", + print_mac(mac, da), tid); + sta_id = iwl_hw_find_station(priv, da); + iwl4965_sta_modify_del_ba_tid(priv, sta_id, tid); + return 0; +} + +#endif /* CONFIG_IWLWIFI_HT_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ + +/* Set up 4965-specific Rx frame reply handlers */ +void iwl_hw_rx_handler_setup(struct iwl_priv *priv) +{ + /* Legacy Rx frames */ + priv->rx_handlers[REPLY_4965_RX] = iwl4965_rx_reply_rx; + + /* High-throughput (HT) Rx frames */ + priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy; + priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx; + + priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] = + iwl4965_rx_missed_beacon_notif; + +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG + priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba; +#endif /* CONFIG_IWLWIFI_AGG */ +#endif /* CONFIG_IWLWIFI */ +} + +void iwl_hw_setup_deferred_work(struct iwl_priv *priv) +{ + INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work); + INIT_WORK(&priv->statistics_work, iwl4965_bg_statistics_work); +#ifdef CONFIG_IWLWIFI_SENSITIVITY + INIT_WORK(&priv->sensitivity_work, iwl4965_bg_sensitivity_work); +#endif +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG + INIT_WORK(&priv->agg_work, iwl4965_bg_agg_work); +#endif /* CONFIG_IWLWIFI_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ + init_timer(&priv->statistics_periodic); + priv->statistics_periodic.data = (unsigned long)priv; + priv->statistics_periodic.function = iwl4965_bg_statistics_periodic; +} + +void iwl_hw_cancel_deferred_work(struct iwl_priv *priv) +{ + del_timer_sync(&priv->statistics_periodic); + + cancel_delayed_work(&priv->init_alive_start); +} + +struct pci_device_id iwl_hw_card_ids[] = { + {0x8086, 0x4229, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {0x8086, 0x4230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {0} +}; + +int iwl_eeprom_aqcuire_semaphore(struct iwl_priv *priv) +{ + u16 count; + int rc; + + for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) { + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); + rc = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, + EEPROM_SEM_TIMEOUT); + if (rc >= 0) { + IWL_DEBUG_IO("Aqcuired semaphore after %d tries.\n", + count+1); + return rc; + } + } + + return rc; +} + +inline void iwl_eeprom_release_semaphore(struct iwl_priv *priv) +{ + iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); +} + + +MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.h b/drivers/net/wireless/iwlwifi/iwl-4965.h new file mode 100644 index 000000000000..4c700812b45b --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-4965.h @@ -0,0 +1,341 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos <ipw2100-admin@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#ifndef __iwl_4965_h__ +#define __iwl_4965_h__ + +struct iwl_priv; +struct sta_ht_info; + +/* + * Forward declare iwl-4965.c functions for iwl-base.c + */ +extern int iwl_eeprom_aqcuire_semaphore(struct iwl_priv *priv); +extern void iwl_eeprom_release_semaphore(struct iwl_priv *priv); + +extern int iwl4965_tx_queue_update_wr_ptr(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + u16 byte_cnt); +extern void iwl4965_add_station(struct iwl_priv *priv, const u8 *addr, + int is_ap); +extern void iwl4965_set_rxon_ht(struct iwl_priv *priv, + struct sta_ht_info *ht_info); + +extern void iwl4965_set_rxon_chain(struct iwl_priv *priv); +extern int iwl4965_tx_cmd(struct iwl_priv *priv, struct iwl_cmd *out_cmd, + u8 sta_id, dma_addr_t txcmd_phys, + struct ieee80211_hdr *hdr, u8 hdr_len, + struct ieee80211_tx_control *ctrl, void *sta_in); +extern int iwl4965_init_hw_rates(struct iwl_priv *priv, + struct ieee80211_rate *rates); +extern int iwl4965_alive_notify(struct iwl_priv *priv); +extern void iwl4965_update_rate_scaling(struct iwl_priv *priv, u8 mode); +extern void iwl4965_set_ht_add_station(struct iwl_priv *priv, u8 index); + +extern void iwl4965_chain_noise_reset(struct iwl_priv *priv); +extern void iwl4965_init_sensitivity(struct iwl_priv *priv, u8 flags, + u8 force); +extern int iwl4965_set_fat_chan_info(struct iwl_priv *priv, int phymode, + u16 channel, + const struct iwl_eeprom_channel *eeprom_ch, + u8 fat_extension_channel); +extern void iwl4965_rf_kill_ct_config(struct iwl_priv *priv); + +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG +extern int iwl_mac_ht_tx_agg_start(struct ieee80211_hw *hw, u8 *da, + u16 tid, u16 *start_seq_num); +extern int iwl_mac_ht_rx_agg_start(struct ieee80211_hw *hw, u8 *da, + u16 tid, u16 start_seq_num); +extern int iwl_mac_ht_rx_agg_stop(struct ieee80211_hw *hw, u8 *da, + u16 tid, int generator); +extern int iwl_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, u8 *da, + u16 tid, int generator); +extern void iwl4965_turn_off_agg(struct iwl_priv *priv, u8 tid); +#endif /* CONFIG_IWLWIFI_HT_AGG */ +#endif /*CONFIG_IWLWIFI_HT */ +/* Structures, enum, and defines specific to the 4965 */ + +#define IWL4965_KW_SIZE 0x1000 /*4k */ + +struct iwl_kw { + dma_addr_t dma_addr; + void *v_addr; + size_t size; +}; + +#define TID_QUEUE_CELL_SPACING 50 /*mS */ +#define TID_QUEUE_MAX_SIZE 20 +#define TID_ROUND_VALUE 5 /* mS */ +#define TID_MAX_LOAD_COUNT 8 + +#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING) +#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y)) + +#define TID_ALL_ENABLED 0x7f +#define TID_ALL_SPECIFIED 0xff +#define TID_AGG_TPT_THREHOLD 0x0 + +#define IWL_CHANNEL_WIDTH_20MHZ 0 +#define IWL_CHANNEL_WIDTH_40MHZ 1 + +#define IWL_MIMO_PS_STATIC 0 +#define IWL_MIMO_PS_NONE 3 +#define IWL_MIMO_PS_DYNAMIC 1 +#define IWL_MIMO_PS_INVALID 2 + +#define IWL_OPERATION_MODE_AUTO 0 +#define IWL_OPERATION_MODE_HT_ONLY 1 +#define IWL_OPERATION_MODE_MIXED 2 +#define IWL_OPERATION_MODE_20MHZ 3 + +#define IWL_EXT_CHANNEL_OFFSET_AUTO 0 +#define IWL_EXT_CHANNEL_OFFSET_ABOVE 1 +#define IWL_EXT_CHANNEL_OFFSET_ 2 +#define IWL_EXT_CHANNEL_OFFSET_BELOW 3 +#define IWL_EXT_CHANNEL_OFFSET_MAX 4 + +#define NRG_NUM_PREV_STAT_L 20 +#define NUM_RX_CHAINS (3) + +#define TX_POWER_IWL_ILLEGAL_VDET -100000 +#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000 +#define TX_POWER_IWL_CLOSED_LOOP_MIN_POWER 18 +#define TX_POWER_IWL_CLOSED_LOOP_MAX_POWER 34 +#define TX_POWER_IWL_VDET_SLOPE_BELOW_NOMINAL 17 +#define TX_POWER_IWL_VDET_SLOPE_ABOVE_NOMINAL 20 +#define TX_POWER_IWL_NOMINAL_POWER 26 +#define TX_POWER_IWL_CLOSED_LOOP_ITERATION_LIMIT 1 +#define TX_POWER_IWL_VOLTAGE_CODES_PER_03V 7 +#define TX_POWER_IWL_DEGREES_PER_VDET_CODE 11 +#define IWL_TX_POWER_MAX_NUM_PA_MEASUREMENTS 1 +#define IWL_TX_POWER_CCK_COMPENSATION_B_STEP (9) +#define IWL_TX_POWER_CCK_COMPENSATION_C_STEP (5) + +struct iwl_traffic_load { + unsigned long time_stamp; + u32 packet_count[TID_QUEUE_MAX_SIZE]; + u8 queue_count; + u8 head; + u32 total; +}; + +#ifdef CONFIG_IWLWIFI_HT_AGG +struct iwl_agg_control { + unsigned long next_retry; + u32 wait_for_agg_status; + u32 tid_retry; + u32 requested_ba; + u32 granted_ba; + u8 auto_agg; + u32 tid_traffic_load_threshold; + u32 ba_timeout; + struct iwl_traffic_load traffic_load[TID_MAX_LOAD_COUNT]; +}; +#endif /*CONFIG_IWLWIFI_HT_AGG */ + +struct iwl_lq_mngr { +#ifdef CONFIG_IWLWIFI_HT_AGG + struct iwl_agg_control agg_ctrl; +#endif + spinlock_t lock; + s32 max_window_size; + s32 *expected_tpt; + u8 *next_higher_rate; + u8 *next_lower_rate; + unsigned long stamp; + unsigned long stamp_last; + u32 flush_time; + u32 tx_packets; + u8 lq_ready; +}; + + +/* Sensitivity and chain noise calibration */ +#define INTERFERENCE_DATA_AVAILABLE __constant_cpu_to_le32(1) +#define INITIALIZATION_VALUE 0xFFFF +#define CAL_NUM_OF_BEACONS 20 +#define MAXIMUM_ALLOWED_PATHLOSS 15 + +/* Param table within SENSITIVITY_CMD */ +#define HD_MIN_ENERGY_CCK_DET_INDEX (0) +#define HD_MIN_ENERGY_OFDM_DET_INDEX (1) +#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX (2) +#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX (3) +#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX (4) +#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX (5) +#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX (6) +#define HD_BARKER_CORR_TH_ADD_MIN_INDEX (7) +#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX (8) +#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9) +#define HD_OFDM_ENERGY_TH_IN_INDEX (10) + +#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE __constant_cpu_to_le16(0) +#define SENSITIVITY_CMD_CONTROL_WORK_TABLE __constant_cpu_to_le16(1) + +#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3 + +#define MAX_FA_OFDM 50 +#define MIN_FA_OFDM 5 +#define MAX_FA_CCK 50 +#define MIN_FA_CCK 5 + +#define NRG_MIN_CCK 97 +#define NRG_MAX_CCK 0 + +#define AUTO_CORR_MIN_OFDM 85 +#define AUTO_CORR_MIN_OFDM_MRC 170 +#define AUTO_CORR_MIN_OFDM_X1 105 +#define AUTO_CORR_MIN_OFDM_MRC_X1 220 +#define AUTO_CORR_MAX_OFDM 120 +#define AUTO_CORR_MAX_OFDM_MRC 210 +#define AUTO_CORR_MAX_OFDM_X1 140 +#define AUTO_CORR_MAX_OFDM_MRC_X1 270 +#define AUTO_CORR_STEP_OFDM 1 + +#define AUTO_CORR_MIN_CCK (125) +#define AUTO_CORR_MAX_CCK (200) +#define AUTO_CORR_MIN_CCK_MRC 200 +#define AUTO_CORR_MAX_CCK_MRC 400 +#define AUTO_CORR_STEP_CCK 3 +#define AUTO_CORR_MAX_TH_CCK 160 + +#define NRG_ALG 0 +#define AUTO_CORR_ALG 1 +#define NRG_DIFF 2 +#define NRG_STEP_CCK 2 +#define NRG_MARGIN 8 +#define MAX_NUMBER_CCK_NO_FA 100 + +#define AUTO_CORR_CCK_MIN_VAL_DEF (125) + +#define CHAIN_A 0 +#define CHAIN_B 1 +#define CHAIN_C 2 +#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4 +#define ALL_BAND_FILTER 0xFF00 +#define IN_BAND_FILTER 0xFF +#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF + +enum iwl_false_alarm_state { + IWL_FA_TOO_MANY = 0, + IWL_FA_TOO_FEW = 1, + IWL_FA_GOOD_RANGE = 2, +}; + +enum iwl_chain_noise_state { + IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */ + IWL_CHAIN_NOISE_ACCUMULATE = 1, + IWL_CHAIN_NOISE_CALIBRATED = 2, +}; + +enum iwl_sensitivity_state { + IWL_SENS_CALIB_ALLOWED = 0, + IWL_SENS_CALIB_NEED_REINIT = 1, +}; + +enum iwl_calib_enabled_state { + IWL_CALIB_DISABLED = 0, /* must be 0 */ + IWL_CALIB_ENABLED = 1, +}; + +struct statistics_general_data { + u32 beacon_silence_rssi_a; + u32 beacon_silence_rssi_b; + u32 beacon_silence_rssi_c; + u32 beacon_energy_a; + u32 beacon_energy_b; + u32 beacon_energy_c; +}; + +/* Sensitivity calib data */ +struct iwl_sensitivity_data { + u32 auto_corr_ofdm; + u32 auto_corr_ofdm_mrc; + u32 auto_corr_ofdm_x1; + u32 auto_corr_ofdm_mrc_x1; + u32 auto_corr_cck; + u32 auto_corr_cck_mrc; + + u32 last_bad_plcp_cnt_ofdm; + u32 last_fa_cnt_ofdm; + u32 last_bad_plcp_cnt_cck; + u32 last_fa_cnt_cck; + + u32 nrg_curr_state; + u32 nrg_prev_state; + u32 nrg_value[10]; + u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L]; + u32 nrg_silence_ref; + u32 nrg_energy_idx; + u32 nrg_silence_idx; + u32 nrg_th_cck; + s32 nrg_auto_corr_silence_diff; + u32 num_in_cck_no_fa; + u32 nrg_th_ofdm; + + u8 state; +}; + +/* Chain noise (differential Rx gain) calib data */ +struct iwl_chain_noise_data { + u8 state; + u16 beacon_count; + u32 chain_noise_a; + u32 chain_noise_b; + u32 chain_noise_c; + u32 chain_signal_a; + u32 chain_signal_b; + u32 chain_signal_c; + u8 disconn_array[NUM_RX_CHAINS]; + u8 delta_gain_code[NUM_RX_CHAINS]; + u8 radio_write; +}; + +/* IWL4965 */ +#define RATE_MCS_CODE_MSK 0x7 +#define RATE_MCS_MIMO_POS 3 +#define RATE_MCS_MIMO_MSK 0x8 +#define RATE_MCS_HT_DUP_POS 5 +#define RATE_MCS_HT_DUP_MSK 0x20 +#define RATE_MCS_FLAGS_POS 8 +#define RATE_MCS_HT_POS 8 +#define RATE_MCS_HT_MSK 0x100 +#define RATE_MCS_CCK_POS 9 +#define RATE_MCS_CCK_MSK 0x200 +#define RATE_MCS_GF_POS 10 +#define RATE_MCS_GF_MSK 0x400 + +#define RATE_MCS_FAT_POS 11 +#define RATE_MCS_FAT_MSK 0x800 +#define RATE_MCS_DUP_POS 12 +#define RATE_MCS_DUP_MSK 0x1000 +#define RATE_MCS_SGI_POS 13 +#define RATE_MCS_SGI_MSK 0x2000 + +#define EEPROM_SEM_TIMEOUT 10 +#define EEPROM_SEM_RETRY_LIMIT 1000 + +#endif /* __iwl_4965_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-channel.h b/drivers/net/wireless/iwlwifi/iwl-channel.h new file mode 100644 index 000000000000..023c3f240cea --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-channel.h @@ -0,0 +1,161 @@ +/****************************************************************************** + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos <ipw2100-admin@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#ifndef __iwl_channel_h__ +#define __iwl_channel_h__ + +#define IWL_NUM_SCAN_RATES (2) + +struct iwl_channel_tgd_info { + u8 type; + s8 max_power; +}; + +struct iwl_channel_tgh_info { + s64 last_radar_time; +}; + +/* current Tx power values to use, one for each rate for each channel. + * requested power is limited by: + * -- regulatory EEPROM limits for this channel + * -- hardware capabilities (clip-powers) + * -- spectrum management + * -- user preference (e.g. iwconfig) + * when requested power is set, base power index must also be set. */ +struct iwl_channel_power_info { + struct iwl_tx_power tpc; /* actual radio and DSP gain settings */ + s8 power_table_index; /* actual (compenst'd) index into gain table */ + s8 base_power_index; /* gain index for power at factory temp. */ + s8 requested_power; /* power (dBm) requested for this chnl/rate */ +}; + +/* current scan Tx power values to use, one for each scan rate for each + * channel. */ +struct iwl_scan_power_info { + struct iwl_tx_power tpc; /* actual radio and DSP gain settings */ + s8 power_table_index; /* actual (compenst'd) index into gain table */ + s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */ +}; + +/* Channel unlock period is 15 seconds. If no beacon or probe response + * has been received within 15 seconds on a locked channel then the channel + * remains locked. */ +#define TX_UNLOCK_PERIOD 15 + +/* CSA lock period is 15 seconds. If a CSA has been received on a channel in + * the last 15 seconds, the channel is locked */ +#define CSA_LOCK_PERIOD 15 +/* + * One for each channel, holds all channel setup data + * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant + * with one another! + */ +#define IWL4965_MAX_RATE (33) + +struct iwl_channel_info { + struct iwl_channel_tgd_info tgd; + struct iwl_channel_tgh_info tgh; + struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */ + struct iwl_eeprom_channel fat_eeprom; /* EEPROM regulatory limit for + * FAT channel */ + + u8 channel; /* channel number */ + u8 flags; /* flags copied from EEPROM */ + s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */ + s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) */ + s8 min_power; /* always 0 */ + s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */ + + u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */ + u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */ + u8 phymode; /* MODE_IEEE80211{A,B,G} */ + + /* Radio/DSP gain settings for each "normal" data Tx rate. + * These include, in addition to RF and DSP gain, a few fields for + * remembering/modifying gain settings (indexes). */ + struct iwl_channel_power_info power_info[IWL4965_MAX_RATE]; + +#if IWL == 4965 + /* FAT channel info */ + s8 fat_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */ + s8 fat_curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) */ + s8 fat_min_power; /* always 0 */ + s8 fat_scan_power; /* (dBm) eeprom, direct scans, any rate */ + u8 fat_flags; /* flags copied from EEPROM */ + u8 fat_extension_channel; +#endif + + /* Radio/DSP gain settings for each scan rate, for directed scans. */ + struct iwl_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES]; +}; + +struct iwl_clip_group { + /* maximum power level to prevent clipping for each rate, derived by + * us from this band's saturation power in EEPROM */ + const s8 clip_powers[IWL_MAX_RATES]; +}; + +static inline int is_channel_valid(const struct iwl_channel_info *ch_info) +{ + if (ch_info == NULL) + return 0; + return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0; +} + +static inline int is_channel_narrow(const struct iwl_channel_info *ch_info) +{ + return (ch_info->flags & EEPROM_CHANNEL_NARROW) ? 1 : 0; +} + +static inline int is_channel_radar(const struct iwl_channel_info *ch_info) +{ + return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0; +} + +static inline u8 is_channel_a_band(const struct iwl_channel_info *ch_info) +{ + return ch_info->phymode == MODE_IEEE80211A; +} + +static inline u8 is_channel_bg_band(const struct iwl_channel_info *ch_info) +{ + return ((ch_info->phymode == MODE_IEEE80211B) || + (ch_info->phymode == MODE_IEEE80211G)); +} + +static inline int is_channel_passive(const struct iwl_channel_info *ch) +{ + return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0; +} + +static inline int is_channel_ibss(const struct iwl_channel_info *ch) +{ + return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0; +} + +extern const struct iwl_channel_info *iwl_get_channel_info( + const struct iwl_priv *priv, int phymode, u16 channel); + +#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h new file mode 100644 index 000000000000..9de8d7f6efa3 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-commands.h @@ -0,0 +1,1734 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU Geeral Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * James P. Ketrenos <ipw2100-admin@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_commands_h__ +#define __iwl_commands_h__ + +enum { + REPLY_ALIVE = 0x1, + REPLY_ERROR = 0x2, + + /* RXON and QOS commands */ + REPLY_RXON = 0x10, + REPLY_RXON_ASSOC = 0x11, + REPLY_QOS_PARAM = 0x13, + REPLY_RXON_TIMING = 0x14, + + /* Multi-Station support */ + REPLY_ADD_STA = 0x18, + REPLY_REMOVE_STA = 0x19, /* not used */ + REPLY_REMOVE_ALL_STA = 0x1a, /* not used */ + + /* RX, TX, LEDs */ +#if IWL == 3945 + REPLY_3945_RX = 0x1b, /* 3945 only */ +#endif + REPLY_TX = 0x1c, + REPLY_RATE_SCALE = 0x47, /* 3945 only */ + REPLY_LEDS_CMD = 0x48, + REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* 4965 only */ + + /* 802.11h related */ + RADAR_NOTIFICATION = 0x70, /* not used */ + REPLY_QUIET_CMD = 0x71, /* not used */ + REPLY_CHANNEL_SWITCH = 0x72, + CHANNEL_SWITCH_NOTIFICATION = 0x73, + REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74, + SPECTRUM_MEASURE_NOTIFICATION = 0x75, + + /* Power Management */ + POWER_TABLE_CMD = 0x77, + PM_SLEEP_NOTIFICATION = 0x7A, + PM_DEBUG_STATISTIC_NOTIFIC = 0x7B, + + /* Scan commands and notifications */ + REPLY_SCAN_CMD = 0x80, + REPLY_SCAN_ABORT_CMD = 0x81, + SCAN_START_NOTIFICATION = 0x82, + SCAN_RESULTS_NOTIFICATION = 0x83, + SCAN_COMPLETE_NOTIFICATION = 0x84, + + /* IBSS/AP commands */ + BEACON_NOTIFICATION = 0x90, + REPLY_TX_BEACON = 0x91, + WHO_IS_AWAKE_NOTIFICATION = 0x94, /* not used */ + + /* Miscellaneous commands */ + QUIET_NOTIFICATION = 0x96, /* not used */ + REPLY_TX_PWR_TABLE_CMD = 0x97, + MEASURE_ABORT_NOTIFICATION = 0x99, /* not used */ + + /* BT config command */ + REPLY_BT_CONFIG = 0x9b, + + /* 4965 Statistics */ + REPLY_STATISTICS_CMD = 0x9c, + STATISTICS_NOTIFICATION = 0x9d, + + /* RF-KILL commands and notifications */ + REPLY_CARD_STATE_CMD = 0xa0, + CARD_STATE_NOTIFICATION = 0xa1, + + /* Missed beacons notification */ + MISSED_BEACONS_NOTIFICATION = 0xa2, + +#if IWL == 4965 + REPLY_CT_KILL_CONFIG_CMD = 0xa4, + SENSITIVITY_CMD = 0xa8, + REPLY_PHY_CALIBRATION_CMD = 0xb0, + REPLY_RX_PHY_CMD = 0xc0, + REPLY_RX_MPDU_CMD = 0xc1, + REPLY_4965_RX = 0xc3, + REPLY_COMPRESSED_BA = 0xc5, +#endif + REPLY_MAX = 0xff +}; + +/****************************************************************************** + * (0) + * Header + * + *****************************************************************************/ + +#define IWL_CMD_FAILED_MSK 0x40 + +struct iwl_cmd_header { + u8 cmd; + u8 flags; + /* We have 15 LSB to use as we please (MSB indicates + * a frame Rx'd from the HW). We encode the following + * information into the sequence field: + * + * 0:7 index in fifo + * 8:13 fifo selection + * 14:14 bit indicating if this packet references the 'extra' + * storage at the end of the memory queue + * 15:15 (Rx indication) + * + */ + __le16 sequence; + + /* command data follows immediately */ + u8 data[0]; +} __attribute__ ((packed)); + +/****************************************************************************** + * (0a) + * Alive and Error Commands & Responses: + * + *****************************************************************************/ + +#define UCODE_VALID_OK __constant_cpu_to_le32(0x1) +#define INITIALIZE_SUBTYPE (9) + +/* + * REPLY_ALIVE = 0x1 (response only, not a command) + */ +struct iwl_alive_resp { + u8 ucode_minor; + u8 ucode_major; + __le16 reserved1; + u8 sw_rev[8]; + u8 ver_type; + u8 ver_subtype; + __le16 reserved2; + __le32 log_event_table_ptr; + __le32 error_event_table_ptr; + __le32 timestamp; + __le32 is_valid; +} __attribute__ ((packed)); + +struct iwl_init_alive_resp { + u8 ucode_minor; + u8 ucode_major; + __le16 reserved1; + u8 sw_rev[8]; + u8 ver_type; + u8 ver_subtype; + __le16 reserved2; + __le32 log_event_table_ptr; + __le32 error_event_table_ptr; + __le32 timestamp; + __le32 is_valid; + +#if IWL == 4965 + /* calibration values from "initialize" uCode */ + __le32 voltage; /* signed */ + __le32 therm_r1[2]; /* signed 1st for normal, 2nd for FAT channel */ + __le32 therm_r2[2]; /* signed */ + __le32 therm_r3[2]; /* signed */ + __le32 therm_r4[2]; /* signed */ + __le32 tx_atten[5][2]; /* signed MIMO gain comp, 5 freq groups, + * 2 Tx chains */ +#endif +} __attribute__ ((packed)); + +union tsf { + u8 byte[8]; + __le16 word[4]; + __le32 dw[2]; +}; + +/* + * REPLY_ERROR = 0x2 (response only, not a command) + */ +struct iwl_error_resp { + __le32 error_type; + u8 cmd_id; + u8 reserved1; + __le16 bad_cmd_seq_num; +#if IWL == 3945 + __le16 reserved2; +#endif + __le32 error_info; + union tsf timestamp; +} __attribute__ ((packed)); + +/****************************************************************************** + * (1) + * RXON Commands & Responses: + * + *****************************************************************************/ + +/* + * Rx config defines & structure + */ +/* rx_config device types */ +enum { + RXON_DEV_TYPE_AP = 1, + RXON_DEV_TYPE_ESS = 3, + RXON_DEV_TYPE_IBSS = 4, + RXON_DEV_TYPE_SNIFFER = 6, +}; + +/* rx_config flags */ +/* band & modulation selection */ +#define RXON_FLG_BAND_24G_MSK __constant_cpu_to_le32(1 << 0) +#define RXON_FLG_CCK_MSK __constant_cpu_to_le32(1 << 1) +/* auto detection enable */ +#define RXON_FLG_AUTO_DETECT_MSK __constant_cpu_to_le32(1 << 2) +/* TGg protection when tx */ +#define RXON_FLG_TGG_PROTECT_MSK __constant_cpu_to_le32(1 << 3) +/* cck short slot & preamble */ +#define RXON_FLG_SHORT_SLOT_MSK __constant_cpu_to_le32(1 << 4) +#define RXON_FLG_SHORT_PREAMBLE_MSK __constant_cpu_to_le32(1 << 5) +/* antenna selection */ +#define RXON_FLG_DIS_DIV_MSK __constant_cpu_to_le32(1 << 7) +#define RXON_FLG_ANT_SEL_MSK __constant_cpu_to_le32(0x0f00) +#define RXON_FLG_ANT_A_MSK __constant_cpu_to_le32(1 << 8) +#define RXON_FLG_ANT_B_MSK __constant_cpu_to_le32(1 << 9) +/* radar detection enable */ +#define RXON_FLG_RADAR_DETECT_MSK __constant_cpu_to_le32(1 << 12) +#define RXON_FLG_TGJ_NARROW_BAND_MSK __constant_cpu_to_le32(1 << 13) +/* rx response to host with 8-byte TSF +* (according to ON_AIR deassertion) */ +#define RXON_FLG_TSF2HOST_MSK __constant_cpu_to_le32(1 << 15) + +/* rx_config filter flags */ +/* accept all data frames */ +#define RXON_FILTER_PROMISC_MSK __constant_cpu_to_le32(1 << 0) +/* pass control & management to host */ +#define RXON_FILTER_CTL2HOST_MSK __constant_cpu_to_le32(1 << 1) +/* accept multi-cast */ +#define RXON_FILTER_ACCEPT_GRP_MSK __constant_cpu_to_le32(1 << 2) +/* don't decrypt uni-cast frames */ +#define RXON_FILTER_DIS_DECRYPT_MSK __constant_cpu_to_le32(1 << 3) +/* don't decrypt multi-cast frames */ +#define RXON_FILTER_DIS_GRP_DECRYPT_MSK __constant_cpu_to_le32(1 << 4) +/* STA is associated */ +#define RXON_FILTER_ASSOC_MSK __constant_cpu_to_le32(1 << 5) +/* transfer to host non bssid beacons in associated state */ +#define RXON_FILTER_BCON_AWARE_MSK __constant_cpu_to_le32(1 << 6) + +/* + * REPLY_RXON = 0x10 (command, has simple generic response) + */ +struct iwl_rxon_cmd { + u8 node_addr[6]; + __le16 reserved1; + u8 bssid_addr[6]; + __le16 reserved2; + u8 wlap_bssid_addr[6]; + __le16 reserved3; + u8 dev_type; + u8 air_propagation; +#if IWL == 3945 + __le16 reserved4; +#elif IWL == 4965 + __le16 rx_chain; +#endif + u8 ofdm_basic_rates; + u8 cck_basic_rates; + __le16 assoc_id; + __le32 flags; + __le32 filter_flags; + __le16 channel; +#if IWL == 3945 + __le16 reserved5; +#elif IWL == 4965 + u8 ofdm_ht_single_stream_basic_rates; + u8 ofdm_ht_dual_stream_basic_rates; +#endif +} __attribute__ ((packed)); + +/* + * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response) + */ +struct iwl_rxon_assoc_cmd { + __le32 flags; + __le32 filter_flags; + u8 ofdm_basic_rates; + u8 cck_basic_rates; +#if IWL == 4965 + u8 ofdm_ht_single_stream_basic_rates; + u8 ofdm_ht_dual_stream_basic_rates; + __le16 rx_chain_select_flags; +#endif + __le16 reserved; +} __attribute__ ((packed)); + +/* + * REPLY_RXON_TIMING = 0x14 (command, has simple generic response) + */ +struct iwl_rxon_time_cmd { + union tsf timestamp; + __le16 beacon_interval; + __le16 atim_window; + __le32 beacon_init_val; + __le16 listen_interval; + __le16 reserved; +} __attribute__ ((packed)); + +struct iwl_tx_power { + u8 tx_gain; /* gain for analog radio */ + u8 dsp_atten; /* gain for DSP */ +} __attribute__ ((packed)); + +#if IWL == 3945 +struct iwl_power_per_rate { + u8 rate; /* plcp */ + struct iwl_tx_power tpc; + u8 reserved; +} __attribute__ ((packed)); + +#elif IWL == 4965 +#define POWER_TABLE_NUM_ENTRIES 33 +#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32 +#define POWER_TABLE_CCK_ENTRY 32 +struct tx_power_dual_stream { + __le32 dw; +} __attribute__ ((packed)); + +struct iwl_tx_power_db { + struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES]; +} __attribute__ ((packed)); +#endif + +/* + * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response) + */ +struct iwl_channel_switch_cmd { + u8 band; + u8 expect_beacon; + __le16 channel; + __le32 rxon_flags; + __le32 rxon_filter_flags; + __le32 switch_time; +#if IWL == 3945 + struct iwl_power_per_rate power[IWL_MAX_RATES]; +#elif IWL == 4965 + struct iwl_tx_power_db tx_power; +#endif +} __attribute__ ((packed)); + +/* + * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command) + */ +struct iwl_csa_notification { + __le16 band; + __le16 channel; + __le32 status; /* 0 - OK, 1 - fail */ +} __attribute__ ((packed)); + +/****************************************************************************** + * (2) + * Quality-of-Service (QOS) Commands & Responses: + * + *****************************************************************************/ +struct iwl_ac_qos { + __le16 cw_min; + __le16 cw_max; + u8 aifsn; + u8 reserved1; + __le16 edca_txop; +} __attribute__ ((packed)); + +/* QoS flags defines */ +#define QOS_PARAM_FLG_UPDATE_EDCA_MSK __constant_cpu_to_le32(0x01) +#define QOS_PARAM_FLG_TGN_MSK __constant_cpu_to_le32(0x02) +#define QOS_PARAM_FLG_TXOP_TYPE_MSK __constant_cpu_to_le32(0x10) + +/* + * TXFIFO Queue number defines + */ +/* number of Access categories (AC) (EDCA), queues 0..3 */ +#define AC_NUM 4 + +/* + * REPLY_QOS_PARAM = 0x13 (command, has simple generic response) + */ +struct iwl_qosparam_cmd { + __le32 qos_flags; + struct iwl_ac_qos ac[AC_NUM]; +} __attribute__ ((packed)); + +/****************************************************************************** + * (3) + * Add/Modify Stations Commands & Responses: + * + *****************************************************************************/ +/* + * Multi station support + */ +#define IWL_AP_ID 0 +#define IWL_MULTICAST_ID 1 +#define IWL_STA_ID 2 + +#define IWL3945_BROADCAST_ID 24 +#define IWL3945_STATION_COUNT 25 + +#define IWL4965_BROADCAST_ID 31 +#define IWL4965_STATION_COUNT 32 + +#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/ +#define IWL_INVALID_STATION 255 + +#if IWL == 3945 +#define STA_FLG_TX_RATE_MSK __constant_cpu_to_le32(1<<2); +#endif +#define STA_FLG_PWR_SAVE_MSK __constant_cpu_to_le32(1<<8); + +#define STA_CONTROL_MODIFY_MSK 0x01 + +/* key flags __le16*/ +#define STA_KEY_FLG_ENCRYPT_MSK __constant_cpu_to_le16(0x7) +#define STA_KEY_FLG_NO_ENC __constant_cpu_to_le16(0x0) +#define STA_KEY_FLG_WEP __constant_cpu_to_le16(0x1) +#define STA_KEY_FLG_CCMP __constant_cpu_to_le16(0x2) +#define STA_KEY_FLG_TKIP __constant_cpu_to_le16(0x3) + +#define STA_KEY_FLG_KEYID_POS 8 +#define STA_KEY_FLG_INVALID __constant_cpu_to_le16(0x0800) + +/* modify flags */ +#define STA_MODIFY_KEY_MASK 0x01 +#define STA_MODIFY_TID_DISABLE_TX 0x02 +#define STA_MODIFY_TX_RATE_MSK 0x04 +#define STA_MODIFY_ADDBA_TID_MSK 0x08 +#define STA_MODIFY_DELBA_TID_MSK 0x10 +#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) + +/* + * Antenna masks: + * bit14:15 01 B inactive, A active + * 10 B active, A inactive + * 11 Both active + */ +#define RATE_MCS_ANT_A_POS 14 +#define RATE_MCS_ANT_B_POS 15 +#define RATE_MCS_ANT_A_MSK 0x4000 +#define RATE_MCS_ANT_B_MSK 0x8000 +#define RATE_MCS_ANT_AB_MSK 0xc000 + +struct iwl_keyinfo { + __le16 key_flags; + u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */ + u8 reserved1; + __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */ + __le16 reserved2; + u8 key[16]; /* 16-byte unicast decryption key */ +} __attribute__ ((packed)); + +struct sta_id_modify { + u8 addr[ETH_ALEN]; + __le16 reserved1; + u8 sta_id; + u8 modify_mask; + __le16 reserved2; +} __attribute__ ((packed)); + +/* + * REPLY_ADD_STA = 0x18 (command) + */ +struct iwl_addsta_cmd { + u8 mode; + u8 reserved[3]; + struct sta_id_modify sta; + struct iwl_keyinfo key; + __le32 station_flags; + __le32 station_flags_msk; + __le16 tid_disable_tx; +#if IWL == 3945 + __le16 rate_n_flags; +#else + __le16 reserved1; +#endif + u8 add_immediate_ba_tid; + u8 remove_immediate_ba_tid; + __le16 add_immediate_ba_ssn; +#if IWL == 4965 + __le32 reserved2; +#endif +} __attribute__ ((packed)); + +/* + * REPLY_ADD_STA = 0x18 (response) + */ +struct iwl_add_sta_resp { + u8 status; +} __attribute__ ((packed)); + +#define ADD_STA_SUCCESS_MSK 0x1 + +/****************************************************************************** + * (4) + * Rx Responses: + * + *****************************************************************************/ + +struct iwl_rx_frame_stats { + u8 phy_count; + u8 id; + u8 rssi; + u8 agc; + __le16 sig_avg; + __le16 noise_diff; + u8 payload[0]; +} __attribute__ ((packed)); + +struct iwl_rx_frame_hdr { + __le16 channel; + __le16 phy_flags; + u8 reserved1; + u8 rate; + __le16 len; + u8 payload[0]; +} __attribute__ ((packed)); + +#define RX_RES_STATUS_NO_CRC32_ERROR __constant_cpu_to_le32(1 << 0) +#define RX_RES_STATUS_NO_RXE_OVERFLOW __constant_cpu_to_le32(1 << 1) + +#define RX_RES_PHY_FLAGS_BAND_24_MSK __constant_cpu_to_le16(1 << 0) +#define RX_RES_PHY_FLAGS_MOD_CCK_MSK __constant_cpu_to_le16(1 << 1) +#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK __constant_cpu_to_le16(1 << 2) +#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK __constant_cpu_to_le16(1 << 3) +#define RX_RES_PHY_FLAGS_ANTENNA_MSK __constant_cpu_to_le16(0xf0) + +#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8) +#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8) +#define RX_RES_STATUS_SEC_TYPE_WEP (0x1 << 8) +#define RX_RES_STATUS_SEC_TYPE_CCMP (0x2 << 8) +#define RX_RES_STATUS_SEC_TYPE_TKIP (0x3 << 8) + +#define RX_RES_STATUS_DECRYPT_TYPE_MSK (0x3 << 11) +#define RX_RES_STATUS_NOT_DECRYPT (0x0 << 11) +#define RX_RES_STATUS_DECRYPT_OK (0x3 << 11) +#define RX_RES_STATUS_BAD_ICV_MIC (0x1 << 11) +#define RX_RES_STATUS_BAD_KEY_TTAK (0x2 << 11) + +struct iwl_rx_frame_end { + __le32 status; + __le64 timestamp; + __le32 beacon_timestamp; +} __attribute__ ((packed)); + +/* + * REPLY_3945_RX = 0x1b (response only, not a command) + * + * NOTE: DO NOT dereference from casts to this structure + * It is provided only for calculating minimum data set size. + * The actual offsets of the hdr and end are dynamic based on + * stats.phy_count + */ +struct iwl_rx_frame { + struct iwl_rx_frame_stats stats; + struct iwl_rx_frame_hdr hdr; + struct iwl_rx_frame_end end; +} __attribute__ ((packed)); + +/* Fixed (non-configurable) rx data from phy */ +#define RX_PHY_FLAGS_ANTENNAE_OFFSET (4) +#define RX_PHY_FLAGS_ANTENNAE_MASK (0x70) +#define IWL_AGC_DB_MASK (0x3f80) /* MASK(7,13) */ +#define IWL_AGC_DB_POS (7) +struct iwl4965_rx_non_cfg_phy { + __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */ + __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */ + u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */ + u8 pad[0]; +} __attribute__ ((packed)); + +/* + * REPLY_4965_RX = 0xc3 (response only, not a command) + * Used only for legacy (non 11n) frames. + */ +#define RX_RES_PHY_CNT 14 +struct iwl4965_rx_phy_res { + u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */ + u8 cfg_phy_cnt; /* configurable DSP phy data byte count */ + u8 stat_id; /* configurable DSP phy data set ID */ + u8 reserved1; + __le64 timestamp; /* TSF at on air rise */ + __le32 beacon_time_stamp; /* beacon at on-air rise */ + __le16 phy_flags; /* general phy flags: band, modulation, ... */ + __le16 channel; /* channel number */ + __le16 non_cfg_phy[RX_RES_PHY_CNT]; /* upto 14 phy entries */ + __le32 reserved2; + __le32 rate_n_flags; + __le16 byte_count; /* frame's byte-count */ + __le16 reserved3; +} __attribute__ ((packed)); + +struct iwl4965_rx_mpdu_res_start { + __le16 byte_count; + __le16 reserved; +} __attribute__ ((packed)); + + +/****************************************************************************** + * (5) + * Tx Commands & Responses: + * + *****************************************************************************/ + +/* Tx flags */ +#define TX_CMD_FLG_RTS_MSK __constant_cpu_to_le32(1 << 1) +#define TX_CMD_FLG_CTS_MSK __constant_cpu_to_le32(1 << 2) +#define TX_CMD_FLG_ACK_MSK __constant_cpu_to_le32(1 << 3) +#define TX_CMD_FLG_STA_RATE_MSK __constant_cpu_to_le32(1 << 4) +#define TX_CMD_FLG_IMM_BA_RSP_MASK __constant_cpu_to_le32(1 << 6) +#define TX_CMD_FLG_FULL_TXOP_PROT_MSK __constant_cpu_to_le32(1 << 7) +#define TX_CMD_FLG_ANT_SEL_MSK __constant_cpu_to_le32(0xf00) +#define TX_CMD_FLG_ANT_A_MSK __constant_cpu_to_le32(1 << 8) +#define TX_CMD_FLG_ANT_B_MSK __constant_cpu_to_le32(1 << 9) + +/* ucode ignores BT priority for this frame */ +#define TX_CMD_FLG_BT_DIS_MSK __constant_cpu_to_le32(1 << 12) + +/* ucode overrides sequence control */ +#define TX_CMD_FLG_SEQ_CTL_MSK __constant_cpu_to_le32(1 << 13) + +/* signal that this frame is non-last MPDU */ +#define TX_CMD_FLG_MORE_FRAG_MSK __constant_cpu_to_le32(1 << 14) + +/* calculate TSF in outgoing frame */ +#define TX_CMD_FLG_TSF_MSK __constant_cpu_to_le32(1 << 16) + +/* activate TX calibration. */ +#define TX_CMD_FLG_CALIB_MSK __constant_cpu_to_le32(1 << 17) + +/* signals that 2 bytes pad was inserted + after the MAC header */ +#define TX_CMD_FLG_MH_PAD_MSK __constant_cpu_to_le32(1 << 20) + +/* HCCA-AP - disable duration overwriting. */ +#define TX_CMD_FLG_DUR_MSK __constant_cpu_to_le32(1 << 25) + +/* + * TX command security control + */ +#define TX_CMD_SEC_WEP 0x01 +#define TX_CMD_SEC_CCM 0x02 +#define TX_CMD_SEC_TKIP 0x03 +#define TX_CMD_SEC_MSK 0x03 +#define TX_CMD_SEC_SHIFT 6 +#define TX_CMD_SEC_KEY128 0x08 + +/* + * TX command Frame life time + */ + +struct iwl_dram_scratch { + u8 try_cnt; + u8 bt_kill_cnt; + __le16 reserved; +} __attribute__ ((packed)); + +/* + * REPLY_TX = 0x1c (command) + */ +struct iwl_tx_cmd { + __le16 len; + __le16 next_frame_len; + __le32 tx_flags; +#if IWL == 3945 + u8 rate; + u8 sta_id; + u8 tid_tspec; +#elif IWL == 4965 + struct iwl_dram_scratch scratch; + __le32 rate_n_flags; + u8 sta_id; +#endif + u8 sec_ctl; +#if IWL == 4965 + u8 initial_rate_index; + u8 reserved; +#endif + u8 key[16]; +#if IWL == 3945 + union { + u8 byte[8]; + __le16 word[4]; + __le32 dw[2]; + } tkip_mic; + __le32 next_frame_info; +#elif IWL == 4965 + __le16 next_frame_flags; + __le16 reserved2; +#endif + union { + __le32 life_time; + __le32 attempt; + } stop_time; +#if IWL == 3945 + u8 supp_rates[2]; +#elif IWL == 4965 + __le32 dram_lsb_ptr; + u8 dram_msb_ptr; +#endif + u8 rts_retry_limit; /*byte 50 */ + u8 data_retry_limit; /*byte 51 */ +#if IWL == 4965 + u8 tid_tspec; +#endif + union { + __le16 pm_frame_timeout; + __le16 attempt_duration; + } timeout; + __le16 driver_txop; + u8 payload[0]; + struct ieee80211_hdr hdr[0]; +} __attribute__ ((packed)); + +/* TX command response is sent after *all* transmission attempts. + * + * NOTES: + * + * TX_STATUS_FAIL_NEXT_FRAG + * + * If the fragment flag in the MAC header for the frame being transmitted + * is set and there is insufficient time to transmit the next frame, the + * TX status will be returned with 'TX_STATUS_FAIL_NEXT_FRAG'. + * + * TX_STATUS_FIFO_UNDERRUN + * + * Indicates the host did not provide bytes to the FIFO fast enough while + * a TX was in progress. + * + * TX_STATUS_FAIL_MGMNT_ABORT + * + * This status is only possible if the ABORT ON MGMT RX parameter was + * set to true with the TX command. + * + * If the MSB of the status parameter is set then an abort sequence is + * required. This sequence consists of the host activating the TX Abort + * control line, and then waiting for the TX Abort command response. This + * indicates that a the device is no longer in a transmit state, and that the + * command FIFO has been cleared. The host must then deactivate the TX Abort + * control line. Receiving is still allowed in this case. + */ +enum { + TX_STATUS_SUCCESS = 0x01, + TX_STATUS_DIRECT_DONE = 0x02, + TX_STATUS_FAIL_SHORT_LIMIT = 0x82, + TX_STATUS_FAIL_LONG_LIMIT = 0x83, + TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84, + TX_STATUS_FAIL_MGMNT_ABORT = 0x85, + TX_STATUS_FAIL_NEXT_FRAG = 0x86, + TX_STATUS_FAIL_LIFE_EXPIRE = 0x87, + TX_STATUS_FAIL_DEST_PS = 0x88, + TX_STATUS_FAIL_ABORTED = 0x89, + TX_STATUS_FAIL_BT_RETRY = 0x8a, + TX_STATUS_FAIL_STA_INVALID = 0x8b, + TX_STATUS_FAIL_FRAG_DROPPED = 0x8c, + TX_STATUS_FAIL_TID_DISABLE = 0x8d, + TX_STATUS_FAIL_FRAME_FLUSHED = 0x8e, + TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f, + TX_STATUS_FAIL_TX_LOCKED = 0x90, + TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91, +}; + +#define TX_PACKET_MODE_REGULAR 0x0000 +#define TX_PACKET_MODE_BURST_SEQ 0x0100 +#define TX_PACKET_MODE_BURST_FIRST 0x0200 + +enum { + TX_POWER_PA_NOT_ACTIVE = 0x0, +}; + +enum { + TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */ + TX_STATUS_DELAY_MSK = 0x00000040, + TX_STATUS_ABORT_MSK = 0x00000080, + TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */ + TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */ + TX_RESERVED = 0x00780000, /* bits 19:22 */ + TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */ + TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */ +}; + +/* ******************************* + * TX aggregation state + ******************************* */ + +enum { + AGG_TX_STATE_TRANSMITTED = 0x00, + AGG_TX_STATE_UNDERRUN_MSK = 0x01, + AGG_TX_STATE_BT_PRIO_MSK = 0x02, + AGG_TX_STATE_FEW_BYTES_MSK = 0x04, + AGG_TX_STATE_ABORT_MSK = 0x08, + AGG_TX_STATE_LAST_SENT_TTL_MSK = 0x10, + AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK = 0x20, + AGG_TX_STATE_LAST_SENT_BT_KILL_MSK = 0x40, + AGG_TX_STATE_SCD_QUERY_MSK = 0x80, + AGG_TX_STATE_TEST_BAD_CRC32_MSK = 0x100, + AGG_TX_STATE_RESPONSE_MSK = 0x1ff, + AGG_TX_STATE_DUMP_TX_MSK = 0x200, + AGG_TX_STATE_DELAY_TX_MSK = 0x400 +}; + +#define AGG_TX_STATE_LAST_SENT_MSK \ +(AGG_TX_STATE_LAST_SENT_TTL_MSK | \ + AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK | \ + AGG_TX_STATE_LAST_SENT_BT_KILL_MSK) + +#define AGG_TX_STATE_TRY_CNT_POS 12 +#define AGG_TX_STATE_TRY_CNT_MSK 0xf000 + +#define AGG_TX_STATE_SEQ_NUM_POS 16 +#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000 + +/* + * REPLY_TX = 0x1c (response) + */ +#if IWL == 4965 +struct iwl_tx_resp { + u8 frame_count; /* 1 no aggregation, >1 aggregation */ + u8 bt_kill_count; + u8 failure_rts; + u8 failure_frame; + __le32 rate_n_flags; + __le16 wireless_media_time; + __le16 reserved; + __le32 pa_power1; + __le32 pa_power2; + __le32 status; /* TX status (for aggregation status of 1st frame) */ +} __attribute__ ((packed)); + +#elif IWL == 3945 +struct iwl_tx_resp { + u8 failure_rts; + u8 failure_frame; + u8 bt_kill_count; + u8 rate; + __le32 wireless_media_time; + __le32 status; /* TX status (for aggregation status of 1st frame) */ +} __attribute__ ((packed)); +#endif + +/* + * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command) + */ +struct iwl_compressed_ba_resp { + __le32 sta_addr_lo32; + __le16 sta_addr_hi16; + __le16 reserved; + u8 sta_id; + u8 tid; + __le16 ba_seq_ctl; + __le32 ba_bitmap0; + __le32 ba_bitmap1; + __le16 scd_flow; + __le16 scd_ssn; +} __attribute__ ((packed)); + +/* + * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response) + */ +struct iwl_txpowertable_cmd { + u8 band; /* 0: 5 GHz, 1: 2.4 GHz */ + u8 reserved; + __le16 channel; +#if IWL == 3945 + struct iwl_power_per_rate power[IWL_MAX_RATES]; +#elif IWL == 4965 + struct iwl_tx_power_db tx_power; +#endif +} __attribute__ ((packed)); + +#if IWL == 3945 +struct iwl_rate_scaling_info { + __le16 rate_n_flags; + u8 try_cnt; + u8 next_rate_index; +} __attribute__ ((packed)); + +/** + * struct iwl_rate_scaling_cmd - Rate Scaling Command & Response + * + * REPLY_RATE_SCALE = 0x47 (command, has simple generic response) + * + * NOTE: The table of rates passed to the uCode via the + * RATE_SCALE command sets up the corresponding order of + * rates used for all related commands, including rate + * masks, etc. + * + * For example, if you set 9MB (PLCP 0x0f) as the first + * rate in the rate table, the bit mask for that rate + * when passed through ofdm_basic_rates on the REPLY_RXON + * command would be bit 0 (1<<0) + */ +struct iwl_rate_scaling_cmd { + u8 table_id; + u8 reserved[3]; + struct iwl_rate_scaling_info table[IWL_MAX_RATES]; +} __attribute__ ((packed)); + +#elif IWL == 4965 + +/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */ +#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1<<0) + +#define LINK_QUAL_AC_NUM AC_NUM +#define LINK_QUAL_MAX_RETRY_NUM 16 + +#define LINK_QUAL_ANT_A_MSK (1<<0) +#define LINK_QUAL_ANT_B_MSK (1<<1) +#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK) + +struct iwl_link_qual_general_params { + u8 flags; + u8 mimo_delimiter; + u8 single_stream_ant_msk; + u8 dual_stream_ant_msk; + u8 start_rate_index[LINK_QUAL_AC_NUM]; +} __attribute__ ((packed)); + +struct iwl_link_qual_agg_params { + __le16 agg_time_limit; + u8 agg_dis_start_th; + u8 agg_frame_cnt_limit; + __le32 reserved; +} __attribute__ ((packed)); + +/* + * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response) + */ +struct iwl_link_quality_cmd { + u8 sta_id; + u8 reserved1; + __le16 control; + struct iwl_link_qual_general_params general_params; + struct iwl_link_qual_agg_params agg_params; + struct { + __le32 rate_n_flags; + } rs_table[LINK_QUAL_MAX_RETRY_NUM]; + __le32 reserved2; +} __attribute__ ((packed)); +#endif + +/* + * REPLY_BT_CONFIG = 0x9b (command, has simple generic response) + */ +struct iwl_bt_cmd { + u8 flags; + u8 lead_time; + u8 max_kill; + u8 reserved; + __le32 kill_ack_mask; + __le32 kill_cts_mask; +} __attribute__ ((packed)); + +/****************************************************************************** + * (6) + * Spectrum Management (802.11h) Commands, Responses, Notifications: + * + *****************************************************************************/ + +/* + * Spectrum Management + */ +#define MEASUREMENT_FILTER_FLAG (RXON_FILTER_PROMISC_MSK | \ + RXON_FILTER_CTL2HOST_MSK | \ + RXON_FILTER_ACCEPT_GRP_MSK | \ + RXON_FILTER_DIS_DECRYPT_MSK | \ + RXON_FILTER_DIS_GRP_DECRYPT_MSK | \ + RXON_FILTER_ASSOC_MSK | \ + RXON_FILTER_BCON_AWARE_MSK) + +struct iwl_measure_channel { + __le32 duration; /* measurement duration in extended beacon + * format */ + u8 channel; /* channel to measure */ + u8 type; /* see enum iwl_measure_type */ + __le16 reserved; +} __attribute__ ((packed)); + +/* + * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command) + */ +struct iwl_spectrum_cmd { + __le16 len; /* number of bytes starting from token */ + u8 token; /* token id */ + u8 id; /* measurement id -- 0 or 1 */ + u8 origin; /* 0 = TGh, 1 = other, 2 = TGk */ + u8 periodic; /* 1 = periodic */ + __le16 path_loss_timeout; + __le32 start_time; /* start time in extended beacon format */ + __le32 reserved2; + __le32 flags; /* rxon flags */ + __le32 filter_flags; /* rxon filter flags */ + __le16 channel_count; /* minimum 1, maximum 10 */ + __le16 reserved3; + struct iwl_measure_channel channels[10]; +} __attribute__ ((packed)); + +/* + * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response) + */ +struct iwl_spectrum_resp { + u8 token; + u8 id; /* id of the prior command replaced, or 0xff */ + __le16 status; /* 0 - command will be handled + * 1 - cannot handle (conflicts with another + * measurement) */ +} __attribute__ ((packed)); + +enum iwl_measurement_state { + IWL_MEASUREMENT_START = 0, + IWL_MEASUREMENT_STOP = 1, +}; + +enum iwl_measurement_status { + IWL_MEASUREMENT_OK = 0, + IWL_MEASUREMENT_CONCURRENT = 1, + IWL_MEASUREMENT_CSA_CONFLICT = 2, + IWL_MEASUREMENT_TGH_CONFLICT = 3, + /* 4-5 reserved */ + IWL_MEASUREMENT_STOPPED = 6, + IWL_MEASUREMENT_TIMEOUT = 7, + IWL_MEASUREMENT_PERIODIC_FAILED = 8, +}; + +#define NUM_ELEMENTS_IN_HISTOGRAM 8 + +struct iwl_measurement_histogram { + __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */ + __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */ +} __attribute__ ((packed)); + +/* clear channel availability counters */ +struct iwl_measurement_cca_counters { + __le32 ofdm; + __le32 cck; +} __attribute__ ((packed)); + +enum iwl_measure_type { + IWL_MEASURE_BASIC = (1 << 0), + IWL_MEASURE_CHANNEL_LOAD = (1 << 1), + IWL_MEASURE_HISTOGRAM_RPI = (1 << 2), + IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3), + IWL_MEASURE_FRAME = (1 << 4), + /* bits 5:6 are reserved */ + IWL_MEASURE_IDLE = (1 << 7), +}; + +/* + * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command) + */ +struct iwl_spectrum_notification { + u8 id; /* measurement id -- 0 or 1 */ + u8 token; + u8 channel_index; /* index in measurement channel list */ + u8 state; /* 0 - start, 1 - stop */ + __le32 start_time; /* lower 32-bits of TSF */ + u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */ + u8 channel; + u8 type; /* see enum iwl_measurement_type */ + u8 reserved1; + /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only + * valid if applicable for measurement type requested. */ + __le32 cca_ofdm; /* cca fraction time in 40Mhz clock periods */ + __le32 cca_cck; /* cca fraction time in 44Mhz clock periods */ + __le32 cca_time; /* channel load time in usecs */ + u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 - + * unidentified */ + u8 reserved2[3]; + struct iwl_measurement_histogram histogram; + __le32 stop_time; /* lower 32-bits of TSF */ + __le32 status; /* see iwl_measurement_status */ +} __attribute__ ((packed)); + +/****************************************************************************** + * (7) + * Power Management Commands, Responses, Notifications: + * + *****************************************************************************/ + +/** + * struct iwl_powertable_cmd - Power Table Command + * @flags: See below: + * + * POWER_TABLE_CMD = 0x77 (command, has simple generic response) + * + * PM allow: + * bit 0 - '0' Driver not allow power management + * '1' Driver allow PM (use rest of parameters) + * uCode send sleep notifications: + * bit 1 - '0' Don't send sleep notification + * '1' send sleep notification (SEND_PM_NOTIFICATION) + * Sleep over DTIM + * bit 2 - '0' PM have to walk up every DTIM + * '1' PM could sleep over DTIM till listen Interval. + * PCI power managed + * bit 3 - '0' (PCI_LINK_CTRL & 0x1) + * '1' !(PCI_LINK_CTRL & 0x1) + * Force sleep Modes + * bit 31/30- '00' use both mac/xtal sleeps + * '01' force Mac sleep + * '10' force xtal sleep + * '11' Illegal set + * + * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then + * ucode assume sleep over DTIM is allowed and we don't need to wakeup + * for every DTIM. + */ +#define IWL_POWER_VEC_SIZE 5 + + +#if IWL == 3945 + +#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK __constant_cpu_to_le32(1<<0) +#define IWL_POWER_SLEEP_OVER_DTIM_MSK __constant_cpu_to_le32(1<<2) +#define IWL_POWER_PCI_PM_MSK __constant_cpu_to_le32(1<<3) +struct iwl_powertable_cmd { + __le32 flags; + __le32 rx_data_timeout; + __le32 tx_data_timeout; + __le32 sleep_interval[IWL_POWER_VEC_SIZE]; +} __attribute__((packed)); + +#elif IWL == 4965 + +#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK __constant_cpu_to_le16(1<<0) +#define IWL_POWER_SLEEP_OVER_DTIM_MSK __constant_cpu_to_le16(1<<2) +#define IWL_POWER_PCI_PM_MSK __constant_cpu_to_le16(1<<3) + +struct iwl_powertable_cmd { + __le16 flags; + u8 keep_alive_seconds; + u8 debug_flags; + __le32 rx_data_timeout; + __le32 tx_data_timeout; + __le32 sleep_interval[IWL_POWER_VEC_SIZE]; + __le32 keep_alive_beacons; +} __attribute__ ((packed)); +#endif + +/* + * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command) + * 3945 and 4965 identical. + */ +struct iwl_sleep_notification { + u8 pm_sleep_mode; + u8 pm_wakeup_src; + __le16 reserved; + __le32 sleep_time; + __le32 tsf_low; + __le32 bcon_timer; +} __attribute__ ((packed)); + +/* Sleep states. 3945 and 4965 identical. */ +enum { + IWL_PM_NO_SLEEP = 0, + IWL_PM_SLP_MAC = 1, + IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2, + IWL_PM_SLP_FULL_MAC_CARD_STATE = 3, + IWL_PM_SLP_PHY = 4, + IWL_PM_SLP_REPENT = 5, + IWL_PM_WAKEUP_BY_TIMER = 6, + IWL_PM_WAKEUP_BY_DRIVER = 7, + IWL_PM_WAKEUP_BY_RFKILL = 8, + /* 3 reserved */ + IWL_PM_NUM_OF_MODES = 12, +}; + +/* + * REPLY_CARD_STATE_CMD = 0xa0 (command, has simple generic response) + */ +#define CARD_STATE_CMD_DISABLE 0x00 /* Put card to sleep */ +#define CARD_STATE_CMD_ENABLE 0x01 /* Wake up card */ +#define CARD_STATE_CMD_HALT 0x02 /* Power down permanently */ +struct iwl_card_state_cmd { + __le32 status; /* CARD_STATE_CMD_* request new power state */ +} __attribute__ ((packed)); + +/* + * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command) + */ +struct iwl_card_state_notif { + __le32 flags; +} __attribute__ ((packed)); + +#define HW_CARD_DISABLED 0x01 +#define SW_CARD_DISABLED 0x02 +#define RF_CARD_DISABLED 0x04 +#define RXON_CARD_DISABLED 0x10 + +struct iwl_ct_kill_config { + __le32 reserved; + __le32 critical_temperature_M; + __le32 critical_temperature_R; +} __attribute__ ((packed)); + +/****************************************************************************** + * (8) + * Scan Commands, Responses, Notifications: + * + *****************************************************************************/ + +struct iwl_scan_channel { + /* type is defined as: + * 0:0 active (0 - passive) + * 1:4 SSID direct + * If 1 is set then corresponding SSID IE is transmitted in probe + * 5:7 reserved + */ + u8 type; + u8 channel; + struct iwl_tx_power tpc; + __le16 active_dwell; + __le16 passive_dwell; +} __attribute__ ((packed)); + +struct iwl_ssid_ie { + u8 id; + u8 len; + u8 ssid[32]; +} __attribute__ ((packed)); + +#define PROBE_OPTION_MAX 0x4 +#define TX_CMD_LIFE_TIME_INFINITE __constant_cpu_to_le32(0xFFFFFFFF) +#define IWL_GOOD_CRC_TH __constant_cpu_to_le16(1) +#define IWL_MAX_SCAN_SIZE 1024 + +/* + * REPLY_SCAN_CMD = 0x80 (command) + */ +struct iwl_scan_cmd { + __le16 len; + u8 reserved0; + u8 channel_count; + __le16 quiet_time; /* dwell only this long on quiet chnl + * (active scan) */ + __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */ + __le16 good_CRC_th; /* passive -> active promotion threshold */ +#if IWL == 3945 + __le16 reserved1; +#elif IWL == 4965 + __le16 rx_chain; +#endif + __le32 max_out_time; /* max usec to be out of associated (service) + * chnl */ + __le32 suspend_time; /* pause scan this long when returning to svc + * chnl. + * 3945 -- 31:24 # beacons, 19:0 additional usec, + * 4965 -- 31:22 # beacons, 21:0 additional usec. + */ + __le32 flags; + __le32 filter_flags; + + struct iwl_tx_cmd tx_cmd; + struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; + + u8 data[0]; + /* + * The channels start after the probe request payload and are of type: + * + * struct iwl_scan_channel channels[0]; + * + * NOTE: Only one band of channels can be scanned per pass. You + * can not mix 2.4GHz channels and 5.2GHz channels and must + * request a scan multiple times (not concurrently) + * + */ +} __attribute__ ((packed)); + +/* Can abort will notify by complete notification with abort status. */ +#define CAN_ABORT_STATUS __constant_cpu_to_le32(0x1) +/* complete notification statuses */ +#define ABORT_STATUS 0x2 + +/* + * REPLY_SCAN_CMD = 0x80 (response) + */ +struct iwl_scanreq_notification { + __le32 status; /* 1: okay, 2: cannot fulfill request */ +} __attribute__ ((packed)); + +/* + * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command) + */ +struct iwl_scanstart_notification { + __le32 tsf_low; + __le32 tsf_high; + __le32 beacon_timer; + u8 channel; + u8 band; + u8 reserved[2]; + __le32 status; +} __attribute__ ((packed)); + +#define SCAN_OWNER_STATUS 0x1; +#define MEASURE_OWNER_STATUS 0x2; + +#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */ +/* + * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command) + */ +struct iwl_scanresults_notification { + u8 channel; + u8 band; + u8 reserved[2]; + __le32 tsf_low; + __le32 tsf_high; + __le32 statistics[NUMBER_OF_STATISTICS]; +} __attribute__ ((packed)); + +/* + * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command) + */ +struct iwl_scancomplete_notification { + u8 scanned_channels; + u8 status; + u8 reserved; + u8 last_channel; + __le32 tsf_low; + __le32 tsf_high; +} __attribute__ ((packed)); + + +/****************************************************************************** + * (9) + * IBSS/AP Commands and Notifications: + * + *****************************************************************************/ + +/* + * BEACON_NOTIFICATION = 0x90 (notification only, not a command) + */ +struct iwl_beacon_notif { + struct iwl_tx_resp beacon_notify_hdr; + __le32 low_tsf; + __le32 high_tsf; + __le32 ibss_mgr_status; +} __attribute__ ((packed)); + +/* + * REPLY_TX_BEACON = 0x91 (command, has simple generic response) + */ +struct iwl_tx_beacon_cmd { + struct iwl_tx_cmd tx; + __le16 tim_idx; + u8 tim_size; + u8 reserved1; + struct ieee80211_hdr frame[0]; /* beacon frame */ +} __attribute__ ((packed)); + +/****************************************************************************** + * (10) + * Statistics Commands and Notifications: + * + *****************************************************************************/ + +#define IWL_TEMP_CONVERT 260 + +#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 +#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 +#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 + +/* Used for passing to driver number of successes and failures per rate */ +struct rate_histogram { + union { + __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS]; + __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS]; + __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS]; + } success; + union { + __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS]; + __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS]; + __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS]; + } failed; +} __attribute__ ((packed)); + +/* statistics command response */ + +struct statistics_rx_phy { + __le32 ina_cnt; + __le32 fina_cnt; + __le32 plcp_err; + __le32 crc32_err; + __le32 overrun_err; + __le32 early_overrun_err; + __le32 crc32_good; + __le32 false_alarm_cnt; + __le32 fina_sync_err_cnt; + __le32 sfd_timeout; + __le32 fina_timeout; + __le32 unresponded_rts; + __le32 rxe_frame_limit_overrun; + __le32 sent_ack_cnt; + __le32 sent_cts_cnt; +#if IWL == 4965 + __le32 sent_ba_rsp_cnt; + __le32 dsp_self_kill; + __le32 mh_format_err; + __le32 re_acq_main_rssi_sum; + __le32 reserved3; +#endif +} __attribute__ ((packed)); + +#if IWL == 4965 +struct statistics_rx_ht_phy { + __le32 plcp_err; + __le32 overrun_err; + __le32 early_overrun_err; + __le32 crc32_good; + __le32 crc32_err; + __le32 mh_format_err; + __le32 agg_crc32_good; + __le32 agg_mpdu_cnt; + __le32 agg_cnt; + __le32 reserved2; +} __attribute__ ((packed)); +#endif + +struct statistics_rx_non_phy { + __le32 bogus_cts; /* CTS received when not expecting CTS */ + __le32 bogus_ack; /* ACK received when not expecting ACK */ + __le32 non_bssid_frames; /* number of frames with BSSID that + * doesn't belong to the STA BSSID */ + __le32 filtered_frames; /* count frames that were dumped in the + * filtering process */ + __le32 non_channel_beacons; /* beacons with our bss id but not on + * our serving channel */ +#if IWL == 4965 + __le32 channel_beacons; /* beacons with our bss id and in our + * serving channel */ + __le32 num_missed_bcon; /* number of missed beacons */ + __le32 adc_rx_saturation_time; /* count in 0.8us units the time the + * ADC was in saturation */ + __le32 ina_detection_search_time;/* total time (in 0.8us) searched + * for INA */ + __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */ + __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */ + __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */ + __le32 interference_data_flag; /* flag for interference data + * availability. 1 when data is + * available. */ + __le32 channel_load; /* counts RX Enable time */ + __le32 dsp_false_alarms; /* DSP false alarm (both OFDM + * and CCK) counter */ + __le32 beacon_rssi_a; + __le32 beacon_rssi_b; + __le32 beacon_rssi_c; + __le32 beacon_energy_a; + __le32 beacon_energy_b; + __le32 beacon_energy_c; +#endif +} __attribute__ ((packed)); + +struct statistics_rx { + struct statistics_rx_phy ofdm; + struct statistics_rx_phy cck; + struct statistics_rx_non_phy general; +#if IWL == 4965 + struct statistics_rx_ht_phy ofdm_ht; +#endif +} __attribute__ ((packed)); + +#if IWL == 4965 +struct statistics_tx_non_phy_agg { + __le32 ba_timeout; + __le32 ba_reschedule_frames; + __le32 scd_query_agg_frame_cnt; + __le32 scd_query_no_agg; + __le32 scd_query_agg; + __le32 scd_query_mismatch; + __le32 frame_not_ready; + __le32 underrun; + __le32 bt_prio_kill; + __le32 rx_ba_rsp_cnt; + __le32 reserved2; + __le32 reserved3; +} __attribute__ ((packed)); +#endif + +struct statistics_tx { + __le32 preamble_cnt; + __le32 rx_detected_cnt; + __le32 bt_prio_defer_cnt; + __le32 bt_prio_kill_cnt; + __le32 few_bytes_cnt; + __le32 cts_timeout; + __le32 ack_timeout; + __le32 expected_ack_cnt; + __le32 actual_ack_cnt; +#if IWL == 4965 + __le32 dump_msdu_cnt; + __le32 burst_abort_next_frame_mismatch_cnt; + __le32 burst_abort_missing_next_frame_cnt; + __le32 cts_timeout_collision; + __le32 ack_or_ba_timeout_collision; + struct statistics_tx_non_phy_agg agg; +#endif +} __attribute__ ((packed)); + +struct statistics_dbg { + __le32 burst_check; + __le32 burst_count; + __le32 reserved[4]; +} __attribute__ ((packed)); + +struct statistics_div { + __le32 tx_on_a; + __le32 tx_on_b; + __le32 exec_time; + __le32 probe_time; +#if IWL == 4965 + __le32 reserved1; + __le32 reserved2; +#endif +} __attribute__ ((packed)); + +struct statistics_general { + __le32 temperature; +#if IWL == 4965 + __le32 temperature_m; +#endif + struct statistics_dbg dbg; + __le32 sleep_time; + __le32 slots_out; + __le32 slots_idle; + __le32 ttl_timestamp; + struct statistics_div div; +#if IWL == 4965 + __le32 rx_enable_counter; + __le32 reserved1; + __le32 reserved2; + __le32 reserved3; +#endif +} __attribute__ ((packed)); + +/* + * REPLY_STATISTICS_CMD = 0x9c, + * 3945 and 4965 identical. + * + * This command triggers an immediate response containing uCode statistics. + * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below. + * + * If the CLEAR_STATS configuration flag is set, uCode will clear its + * internal copy of the statistics (counters) after issuing the response. + * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below). + * + * If the DISABLE_NOTIF configuration flag is set, uCode will not issue + * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag + * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself. + */ +#define IWL_STATS_CONF_CLEAR_STATS __constant_cpu_to_le32(0x1) /* see above */ +#define IWL_STATS_CONF_DISABLE_NOTIF __constant_cpu_to_le32(0x2)/* see above */ +struct iwl_statistics_cmd { + __le32 configuration_flags; /* IWL_STATS_CONF_* */ +} __attribute__ ((packed)); + +/* + * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command) + * + * By default, uCode issues this notification after receiving a beacon + * while associated. To disable this behavior, set DISABLE_NOTIF flag in the + * REPLY_STATISTICS_CMD 0x9c, above. + * + * Statistics counters continue to increment beacon after beacon, but are + * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD + * 0x9c with CLEAR_STATS bit set (see above). + * + * uCode also issues this notification during scans. uCode clears statistics + * appropriately so that each notification contains statistics for only the + * one channel that has just been scanned. + */ +#define STATISTICS_REPLY_FLG_BAND_24G_MSK __constant_cpu_to_le32(0x2) +#define STATISTICS_REPLY_FLG_FAT_MODE_MSK __constant_cpu_to_le32(0x8) +struct iwl_notif_statistics { + __le32 flag; + struct statistics_rx rx; + struct statistics_tx tx; + struct statistics_general general; +} __attribute__ ((packed)); + + +/* + * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command) + */ +/* if ucode missed CONSECUTIVE_MISSED_BCONS_TH beacons in a row, + * then this notification will be sent. */ +#define CONSECUTIVE_MISSED_BCONS_TH 20 + +struct iwl_missed_beacon_notif { + __le32 consequtive_missed_beacons; + __le32 total_missed_becons; + __le32 num_expected_beacons; + __le32 num_recvd_beacons; +} __attribute__ ((packed)); + +/****************************************************************************** + * (11) + * Rx Calibration Commands: + * + *****************************************************************************/ + +#define PHY_CALIBRATE_DIFF_GAIN_CMD (7) +#define HD_TABLE_SIZE (11) + +struct iwl_sensitivity_cmd { + __le16 control; + __le16 table[HD_TABLE_SIZE]; +} __attribute__ ((packed)); + +struct iwl_calibration_cmd { + u8 opCode; + u8 flags; + __le16 reserved; + s8 diff_gain_a; + s8 diff_gain_b; + s8 diff_gain_c; + u8 reserved1; +} __attribute__ ((packed)); + +/****************************************************************************** + * (12) + * Miscellaneous Commands: + * + *****************************************************************************/ + +/* + * LEDs Command & Response + * REPLY_LEDS_CMD = 0x48 (command, has simple generic response) + * + * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field), + * this command turns it on or off, or sets up a periodic blinking cycle. + */ +struct iwl_led_cmd { + __le32 interval; /* "interval" in uSec */ + u8 id; /* 1: Activity, 2: Link, 3: Tech */ + u8 off; /* # intervals off while blinking; + * "0", with >0 "on" value, turns LED on */ + u8 on; /* # intervals on while blinking; + * "0", regardless of "off", turns LED off */ + u8 reserved; +} __attribute__ ((packed)); + +/****************************************************************************** + * (13) + * Union of all expected notifications/responses: + * + *****************************************************************************/ + +struct iwl_rx_packet { + __le32 len; + struct iwl_cmd_header hdr; + union { + struct iwl_alive_resp alive_frame; + struct iwl_rx_frame rx_frame; + struct iwl_tx_resp tx_resp; + struct iwl_spectrum_notification spectrum_notif; + struct iwl_csa_notification csa_notif; + struct iwl_error_resp err_resp; + struct iwl_card_state_notif card_state_notif; + struct iwl_beacon_notif beacon_status; + struct iwl_add_sta_resp add_sta; + struct iwl_sleep_notification sleep_notif; + struct iwl_spectrum_resp spectrum; + struct iwl_notif_statistics stats; +#if IWL == 4965 + struct iwl_compressed_ba_resp compressed_ba; + struct iwl_missed_beacon_notif missed_beacon; +#endif + __le32 status; + u8 raw[0]; + } u; +} __attribute__ ((packed)); + +#define IWL_RX_FRAME_SIZE (4 + sizeof(struct iwl_rx_frame)) + +#endif /* __iwl_commands_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h new file mode 100644 index 000000000000..72318d78957e --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h @@ -0,0 +1,152 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos <ipw2100-admin@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_debug_h__ +#define __iwl_debug_h__ + +#ifdef CONFIG_IWLWIFI_DEBUG +extern u32 iwl_debug_level; +#define IWL_DEBUG(level, fmt, args...) \ +do { if (iwl_debug_level & (level)) \ + printk(KERN_ERR DRV_NAME": %c %s " fmt, \ + in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) + +#define IWL_DEBUG_LIMIT(level, fmt, args...) \ +do { if ((iwl_debug_level & (level)) && net_ratelimit()) \ + printk(KERN_ERR DRV_NAME": %c %s " fmt, \ + in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) +#else +static inline void IWL_DEBUG(int level, const char *fmt, ...) +{ +} +static inline void IWL_DEBUG_LIMIT(int level, const char *fmt, ...) +{ +} +#endif /* CONFIG_IWLWIFI_DEBUG */ + +/* + * To use the debug system; + * + * If you are defining a new debug classification, simply add it to the #define + * list here in the form of: + * + * #define IWL_DL_xxxx VALUE + * + * shifting value to the left one bit from the previous entry. xxxx should be + * the name of the classification (for example, WEP) + * + * You then need to either add a IWL_xxxx_DEBUG() macro definition for your + * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want + * to send output to that classification. + * + * To add your debug level to the list of levels seen when you perform + * + * % cat /proc/net/iwl/debug_level + * + * you simply need to add your entry to the iwl_debug_levels array. + * + * If you do not see debug_level in /proc/net/iwl then you do not have + * CONFIG_IWLWIFI_DEBUG defined in your kernel configuration + * + */ + +#define IWL_DL_INFO (1<<0) +#define IWL_DL_MAC80211 (1<<1) +#define IWL_DL_HOST_COMMAND (1<<2) +#define IWL_DL_STATE (1<<3) + +#define IWL_DL_RADIO (1<<7) +#define IWL_DL_POWER (1<<8) +#define IWL_DL_TEMP (1<<9) + +#define IWL_DL_NOTIF (1<<10) +#define IWL_DL_SCAN (1<<11) +#define IWL_DL_ASSOC (1<<12) +#define IWL_DL_DROP (1<<13) + +#define IWL_DL_TXPOWER (1<<14) + +#define IWL_DL_AP (1<<15) + +#define IWL_DL_FW (1<<16) +#define IWL_DL_RF_KILL (1<<17) +#define IWL_DL_FW_ERRORS (1<<18) + +#define IWL_DL_LED (1<<19) + +#define IWL_DL_RATE (1<<20) + +#define IWL_DL_CALIB (1<<21) +#define IWL_DL_WEP (1<<22) +#define IWL_DL_TX (1<<23) +#define IWL_DL_RX (1<<24) +#define IWL_DL_ISR (1<<25) +#define IWL_DL_HT (1<<26) +#define IWL_DL_IO (1<<27) +#define IWL_DL_11H (1<<28) + +#define IWL_DL_STATS (1<<29) +#define IWL_DL_TX_REPLY (1<<30) +#define IWL_DL_QOS (1<<31) + +#define IWL_ERROR(f, a...) printk(KERN_ERR DRV_NAME ": " f, ## a) +#define IWL_WARNING(f, a...) printk(KERN_WARNING DRV_NAME ": " f, ## a) +#define IWL_DEBUG_INFO(f, a...) IWL_DEBUG(IWL_DL_INFO, f, ## a) + +#define IWL_DEBUG_MAC80211(f, a...) IWL_DEBUG(IWL_DL_MAC80211, f, ## a) +#define IWL_DEBUG_TEMP(f, a...) IWL_DEBUG(IWL_DL_TEMP, f, ## a) +#define IWL_DEBUG_SCAN(f, a...) IWL_DEBUG(IWL_DL_SCAN, f, ## a) +#define IWL_DEBUG_RX(f, a...) IWL_DEBUG(IWL_DL_RX, f, ## a) +#define IWL_DEBUG_TX(f, a...) IWL_DEBUG(IWL_DL_TX, f, ## a) +#define IWL_DEBUG_ISR(f, a...) IWL_DEBUG(IWL_DL_ISR, f, ## a) +#define IWL_DEBUG_LED(f, a...) IWL_DEBUG(IWL_DL_LED, f, ## a) +#define IWL_DEBUG_WEP(f, a...) IWL_DEBUG(IWL_DL_WEP, f, ## a) +#define IWL_DEBUG_HC(f, a...) IWL_DEBUG(IWL_DL_HOST_COMMAND, f, ## a) +#define IWL_DEBUG_CALIB(f, a...) IWL_DEBUG(IWL_DL_CALIB, f, ## a) +#define IWL_DEBUG_FW(f, a...) IWL_DEBUG(IWL_DL_FW, f, ## a) +#define IWL_DEBUG_RF_KILL(f, a...) IWL_DEBUG(IWL_DL_RF_KILL, f, ## a) +#define IWL_DEBUG_DROP(f, a...) IWL_DEBUG(IWL_DL_DROP, f, ## a) +#define IWL_DEBUG_DROP_LIMIT(f, a...) IWL_DEBUG_LIMIT(IWL_DL_DROP, f, ## a) +#define IWL_DEBUG_AP(f, a...) IWL_DEBUG(IWL_DL_AP, f, ## a) +#define IWL_DEBUG_TXPOWER(f, a...) IWL_DEBUG(IWL_DL_TXPOWER, f, ## a) +#define IWL_DEBUG_IO(f, a...) IWL_DEBUG(IWL_DL_IO, f, ## a) +#define IWL_DEBUG_RATE(f, a...) IWL_DEBUG(IWL_DL_RATE, f, ## a) +#define IWL_DEBUG_RATE_LIMIT(f, a...) IWL_DEBUG_LIMIT(IWL_DL_RATE, f, ## a) +#define IWL_DEBUG_NOTIF(f, a...) IWL_DEBUG(IWL_DL_NOTIF, f, ## a) +#define IWL_DEBUG_ASSOC(f, a...) IWL_DEBUG(IWL_DL_ASSOC | IWL_DL_INFO, f, ## a) +#define IWL_DEBUG_ASSOC_LIMIT(f, a...) \ + IWL_DEBUG_LIMIT(IWL_DL_ASSOC | IWL_DL_INFO, f, ## a) +#define IWL_DEBUG_HT(f, a...) IWL_DEBUG(IWL_DL_HT, f, ## a) +#define IWL_DEBUG_STATS(f, a...) IWL_DEBUG(IWL_DL_STATS, f, ## a) +#define IWL_DEBUG_TX_REPLY(f, a...) IWL_DEBUG(IWL_DL_TX_REPLY, f, ## a) +#define IWL_DEBUG_QOS(f, a...) IWL_DEBUG(IWL_DL_QOS, f, ## a) +#define IWL_DEBUG_RADIO(f, a...) IWL_DEBUG(IWL_DL_RADIO, f, ## a) +#define IWL_DEBUG_POWER(f, a...) IWL_DEBUG(IWL_DL_POWER, f, ## a) +#define IWL_DEBUG_11H(f, a...) IWL_DEBUG(IWL_DL_11H, f, ## a) + +#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h new file mode 100644 index 000000000000..e473c97e3f4f --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h @@ -0,0 +1,336 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU Geeral Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * James P. Ketrenos <ipw2100-admin@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_eeprom_h__ +#define __iwl_eeprom_h__ + +/* + * This file defines EEPROM related constants, enums, and inline functions. + * + */ + +#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */ +#define IWL_EEPROM_ACCESS_DELAY 10 /* uSec */ +/* EEPROM field values */ +#define ANTENNA_SWITCH_NORMAL 0 +#define ANTENNA_SWITCH_INVERSE 1 + +enum { + EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */ + EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */ + /* Bit 2 Reserved */ + EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */ + EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */ + EEPROM_CHANNEL_WIDE = (1 << 5), + EEPROM_CHANNEL_NARROW = (1 << 6), + EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */ +}; + +/* EEPROM field lengths */ +#define EEPROM_BOARD_PBA_NUMBER_LENGTH 11 + +/* EEPROM field lengths */ +#define EEPROM_BOARD_PBA_NUMBER_LENGTH 11 +#define EEPROM_REGULATORY_SKU_ID_LENGTH 4 +#define EEPROM_REGULATORY_BAND1_CHANNELS_LENGTH 14 +#define EEPROM_REGULATORY_BAND2_CHANNELS_LENGTH 13 +#define EEPROM_REGULATORY_BAND3_CHANNELS_LENGTH 12 +#define EEPROM_REGULATORY_BAND4_CHANNELS_LENGTH 11 +#define EEPROM_REGULATORY_BAND5_CHANNELS_LENGTH 6 + +#if IWL == 3945 +#define EEPROM_REGULATORY_CHANNELS_LENGTH ( \ + EEPROM_REGULATORY_BAND1_CHANNELS_LENGTH + \ + EEPROM_REGULATORY_BAND2_CHANNELS_LENGTH + \ + EEPROM_REGULATORY_BAND3_CHANNELS_LENGTH + \ + EEPROM_REGULATORY_BAND4_CHANNELS_LENGTH + \ + EEPROM_REGULATORY_BAND5_CHANNELS_LENGTH) +#elif IWL == 4965 +#define EEPROM_REGULATORY_BAND_24_FAT_CHANNELS_LENGTH 7 +#define EEPROM_REGULATORY_BAND_52_FAT_CHANNELS_LENGTH 11 +#define EEPROM_REGULATORY_CHANNELS_LENGTH ( \ + EEPROM_REGULATORY_BAND1_CHANNELS_LENGTH + \ + EEPROM_REGULATORY_BAND2_CHANNELS_LENGTH + \ + EEPROM_REGULATORY_BAND3_CHANNELS_LENGTH + \ + EEPROM_REGULATORY_BAND4_CHANNELS_LENGTH + \ + EEPROM_REGULATORY_BAND5_CHANNELS_LENGTH + \ + EEPROM_REGULATORY_BAND_24_FAT_CHANNELS_LENGTH + \ + EEPROM_REGULATORY_BAND_52_FAT_CHANNELS_LENGTH) +#endif + +#define EEPROM_REGULATORY_NUMBER_OF_BANDS 5 + +/* SKU Capabilities */ +#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0) +#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1) +#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7) + +/* *regulatory* channel data from eeprom, one for each channel */ +struct iwl_eeprom_channel { + u8 flags; /* flags copied from EEPROM */ + s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */ +} __attribute__ ((packed)); + +/* + * Mapping of a Tx power level, at factory calibration temperature, + * to a radio/DSP gain table index. + * One for each of 5 "sample" power levels in each band. + * v_det is measured at the factory, using the 3945's built-in power amplifier + * (PA) output voltage detector. This same detector is used during Tx of + * long packets in normal operation to provide feedback as to proper output + * level. + * Data copied from EEPROM. + */ +struct iwl_eeprom_txpower_sample { + u8 gain_index; /* index into power (gain) setup table ... */ + s8 power; /* ... for this pwr level for this chnl group */ + u16 v_det; /* PA output voltage */ +} __attribute__ ((packed)); + +/* + * Mappings of Tx power levels -> nominal radio/DSP gain table indexes. + * One for each channel group (a.k.a. "band") (1 for BG, 4 for A). + * Tx power setup code interpolates between the 5 "sample" power levels + * to determine the nominal setup for a requested power level. + * Data copied from EEPROM. + * DO NOT ALTER THIS STRUCTURE!!! + */ +struct iwl_eeprom_txpower_group { + struct iwl_eeprom_txpower_sample samples[5]; /* 5 power levels */ + s32 a, b, c, d, e; /* coefficients for voltage->power + * formula (signed) */ + s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on + * frequency (signed) */ + s8 saturation_power; /* highest power possible by h/w in this + * band */ + u8 group_channel; /* "representative" channel # in this band */ + s16 temperature; /* h/w temperature at factory calib this band + * (signed) */ +} __attribute__ ((packed)); + +/* + * Temperature-based Tx-power compensation data, not band-specific. + * These coefficients are use to modify a/b/c/d/e coeffs based on + * difference between current temperature and factory calib temperature. + * Data copied from EEPROM. + */ +struct iwl_eeprom_temperature_corr { + u32 Ta; + u32 Tb; + u32 Tc; + u32 Td; + u32 Te; +} __attribute__ ((packed)); + +#if IWL == 4965 +#define EEPROM_TX_POWER_TX_CHAINS (2) +#define EEPROM_TX_POWER_BANDS (8) +#define EEPROM_TX_POWER_MEASUREMENTS (3) +#define EEPROM_TX_POWER_VERSION (2) +#define EEPROM_TX_POWER_VERSION_NEW (5) + +struct iwl_eeprom_calib_measure { + u8 temperature; + u8 gain_idx; + u8 actual_pow; + s8 pa_det; +} __attribute__ ((packed)); + +struct iwl_eeprom_calib_ch_info { + u8 ch_num; + struct iwl_eeprom_calib_measure measurements[EEPROM_TX_POWER_TX_CHAINS] + [EEPROM_TX_POWER_MEASUREMENTS]; +} __attribute__ ((packed)); + +struct iwl_eeprom_calib_subband_info { + u8 ch_from; + u8 ch_to; + struct iwl_eeprom_calib_ch_info ch1; + struct iwl_eeprom_calib_ch_info ch2; +} __attribute__ ((packed)); + +struct iwl_eeprom_calib_info { + u8 saturation_power24; + u8 saturation_power52; + s16 voltage; /* signed */ + struct iwl_eeprom_calib_subband_info band_info[EEPROM_TX_POWER_BANDS]; +} __attribute__ ((packed)); + +#endif + +struct iwl_eeprom { + u8 reserved0[16]; +#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */ + u16 device_id; /* abs.ofs: 16 */ + u8 reserved1[2]; +#define EEPROM_PMC (2*0x0A) /* 2 bytes */ + u16 pmc; /* abs.ofs: 20 */ + u8 reserved2[20]; +#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */ + u8 mac_address[6]; /* abs.ofs: 42 */ + u8 reserved3[58]; +#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */ + u16 board_revision; /* abs.ofs: 106 */ + u8 reserved4[11]; +#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */ + u8 board_pba_number[9]; /* abs.ofs: 119 */ + u8 reserved5[8]; +#define EEPROM_VERSION (2*0x44) /* 2 bytes */ + u16 version; /* abs.ofs: 136 */ +#define EEPROM_SKU_CAP (2*0x45) /* 1 bytes */ + u8 sku_cap; /* abs.ofs: 138 */ +#define EEPROM_LEDS_MODE (2*0x45+1) /* 1 bytes */ + u8 leds_mode; /* abs.ofs: 139 */ +#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */ + u16 oem_mode; +#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */ + u16 wowlan_mode; /* abs.ofs: 142 */ +#define EEPROM_LEDS_TIME_INTERVAL (2*0x48) /* 2 bytes */ + u16 leds_time_interval; /* abs.ofs: 144 */ +#define EEPROM_LEDS_OFF_TIME (2*0x49) /* 1 bytes */ + u8 leds_off_time; /* abs.ofs: 146 */ +#define EEPROM_LEDS_ON_TIME (2*0x49+1) /* 1 bytes */ + u8 leds_on_time; /* abs.ofs: 147 */ +#define EEPROM_ALMGOR_M_VERSION (2*0x4A) /* 1 bytes */ + u8 almgor_m_version; /* abs.ofs: 148 */ +#define EEPROM_ANTENNA_SWITCH_TYPE (2*0x4A+1) /* 1 bytes */ + u8 antenna_switch_type; /* abs.ofs: 149 */ +#if IWL == 3945 + u8 reserved6[42]; +#else + u8 reserved6[8]; +#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */ + u16 board_revision_4965; /* abs.ofs: 158 */ + u8 reserved7[13]; +#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */ + u8 board_pba_number_4965[9]; /* abs.ofs: 173 */ + u8 reserved8[10]; +#endif +#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */ + u8 sku_id[4]; /* abs.ofs: 192 */ +#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */ + u16 band_1_count; /* abs.ofs: 196 */ +#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */ + struct iwl_eeprom_channel band_1_channels[14]; /* abs.ofs: 196 */ +#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */ + u16 band_2_count; /* abs.ofs: 226 */ +#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */ + struct iwl_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */ +#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */ + u16 band_3_count; /* abs.ofs: 254 */ +#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */ + struct iwl_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */ +#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */ + u16 band_4_count; /* abs.ofs: 280 */ +#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */ + struct iwl_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */ +#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */ + u16 band_5_count; /* abs.ofs: 304 */ +#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */ + struct iwl_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */ + +/* From here on out the EEPROM diverges between the 4965 and the 3945 */ +#if IWL == 3945 + + u8 reserved9[194]; + +#define EEPROM_TXPOWER_CALIB_GROUP0 0x200 +#define EEPROM_TXPOWER_CALIB_GROUP1 0x240 +#define EEPROM_TXPOWER_CALIB_GROUP2 0x280 +#define EEPROM_TXPOWER_CALIB_GROUP3 0x2c0 +#define EEPROM_TXPOWER_CALIB_GROUP4 0x300 +#define IWL_NUM_TX_CALIB_GROUPS 5 + struct iwl_eeprom_txpower_group groups[IWL_NUM_TX_CALIB_GROUPS]; +/* abs.ofs: 512 */ +#define EEPROM_CALIB_TEMPERATURE_CORRECT 0x340 + struct iwl_eeprom_temperature_corr corrections; /* abs.ofs: 832 */ + u8 reserved16[172]; /* fill out to full 1024 byte block */ + +/* 4965AGN adds fat channel support */ +#elif IWL == 4965 + + u8 reserved10[2]; +#define EEPROM_REGULATORY_BAND_24_FAT_CHANNELS (2*0xA0) /* 14 bytes */ + struct iwl_eeprom_channel band_24_channels[7]; /* abs.ofs: 320 */ + u8 reserved11[2]; +#define EEPROM_REGULATORY_BAND_52_FAT_CHANNELS (2*0xA8) /* 22 bytes */ + struct iwl_eeprom_channel band_52_channels[11]; /* abs.ofs: 336 */ + u8 reserved12[6]; +#define EEPROM_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */ + u16 calib_version; /* abs.ofs: 364 */ + u8 reserved13[2]; +#define EEPROM_SATURATION_POWER_OFFSET (2*0xB8) /* 2 bytes */ + u16 satruation_power; /* abs.ofs: 368 */ + u8 reserved14[94]; +#define EEPROM_IWL_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */ + struct iwl_eeprom_calib_info calib_info; /* abs.ofs: 464 */ + + u8 reserved16[140]; /* fill out to full 1024 byte block */ + +#endif + +} __attribute__ ((packed)); + +#define IWL_EEPROM_IMAGE_SIZE 1024 + +#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h new file mode 100644 index 000000000000..e2a8d95ad9cd --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h @@ -0,0 +1,255 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos <ipw2100-admin@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_helpers_h__ +#define __iwl_helpers_h__ + +#include <linux/ctype.h> + +/* + * The structures defined by the hardware/uCode interface + * have bit-wise operations. For each bit-field there is + * a data symbol in the structure, the start bit position + * and the length of the bit-field. + * + * iwl_get_bits and iwl_set_bits will return or set the + * appropriate bits on a 32-bit value. + * + * IWL_GET_BITS and IWL_SET_BITS use symbol expansion to + * expand out to the appropriate call to iwl_get_bits + * and iwl_set_bits without having to reference all of the + * numerical constants and defines provided in the hardware + * definition + */ + +/** + * iwl_get_bits - Extract a hardware bit-field value + * @src: source hardware value (__le32) + * @pos: bit-position (0-based) of first bit of value + * @len: length of bit-field + * + * iwl_get_bits will return the bit-field in cpu endian ordering. + * + * NOTE: If used from IWL_GET_BITS then pos and len are compile-constants and + * will collapse to minimal code by the compiler. + */ +static inline u32 iwl_get_bits(__le32 src, u8 pos, u8 len) +{ + u32 tmp = le32_to_cpu(src); + + tmp >>= pos; + tmp &= (1UL << len) - 1; + return tmp; +} + +/** + * iwl_set_bits - Set a hardware bit-field value + * @dst: Address of __le32 hardware value + * @pos: bit-position (0-based) of first bit of value + * @len: length of bit-field + * @val: cpu endian value to encode into the bit-field + * + * iwl_set_bits will encode val into dst, masked to be len bits long at bit + * position pos. + * + * NOTE: If used IWL_SET_BITS pos and len will be compile-constants and + * will collapse to minimal code by the compiler. + */ +static inline void iwl_set_bits(__le32 *dst, u8 pos, u8 len, int val) +{ + u32 tmp = le32_to_cpu(*dst); + + tmp &= ~(((1UL << len) - 1) << pos); + tmp |= (val & ((1UL << len) - 1)) << pos; + *dst = cpu_to_le32(tmp); +} + +static inline void iwl_set_bits16(__le16 *dst, u8 pos, u8 len, int val) +{ + u16 tmp = le16_to_cpu(*dst); + + tmp &= ~((1UL << (pos + len)) - (1UL << pos)); + tmp |= (val & ((1UL << len) - 1)) << pos; + *dst = cpu_to_le16(tmp); +} + +/* + * The bit-field definitions in iwl-xxxx-hw.h are in the form of: + * + * struct example { + * __le32 val1; + * #define IWL_name_POS 8 + * #define IWL_name_LEN 4 + * #define IWL_name_SYM val1 + * }; + * + * The IWL_SET_BITS and IWL_GET_BITS macros are provided to allow the driver + * to call: + * + * struct example bar; + * u32 val = IWL_GET_BITS(bar, name); + * val = val * 2; + * IWL_SET_BITS(bar, name, val); + * + * All cpu / host ordering, masking, and shifts are performed by the macros + * and iwl_{get,set}_bits. + * + */ +#define IWL_SET_BITS(s, sym, v) \ + iwl_set_bits(&(s).IWL_ ## sym ## _SYM, IWL_ ## sym ## _POS, \ + IWL_ ## sym ## _LEN, (v)) + +#define IWL_SET_BITS16(s, sym, v) \ + iwl_set_bits16(&(s).IWL_ ## sym ## _SYM, IWL_ ## sym ## _POS, \ + IWL_ ## sym ## _LEN, (v)) + +#define IWL_GET_BITS(s, sym) \ + iwl_get_bits((s).IWL_ ## sym ## _SYM, IWL_ ## sym ## _POS, \ + IWL_ ## sym ## _LEN) + + +#define KELVIN_TO_CELSIUS(x) ((x)-273) +#define CELSIUS_TO_KELVIN(x) ((x)+273) + +#define IEEE80211_CHAN_W_RADAR_DETECT 0x00000010 + +static inline struct ieee80211_conf *ieee80211_get_hw_conf( + struct ieee80211_hw *hw) +{ + return &hw->conf; +} + +#define QOS_CONTROL_LEN 2 + +#define IEEE80211_STYPE_BACK_REQ 0x0080 +#define IEEE80211_STYPE_BACK 0x0090 + + +static inline int ieee80211_is_management(u16 fc) +{ + return (fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT; +} + +static inline int ieee80211_is_control(u16 fc) +{ + return (fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL; +} + +static inline int ieee80211_is_data(u16 fc) +{ + return (fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA; +} + +static inline int ieee80211_is_back_request(u16 fc) +{ + return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BACK_REQ); +} + +static inline int ieee80211_is_probe_response(u16 fc) +{ + return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP); +} + +static inline int ieee80211_is_probe_request(u16 fc) +{ + return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_REQ); +} + +static inline int ieee80211_is_beacon(u16 fc) +{ + return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON); +} + +static inline int ieee80211_is_atim(u16 fc) +{ + return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ATIM); +} + +static inline int ieee80211_is_assoc_request(u16 fc) +{ + return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ); +} + +static inline int ieee80211_is_assoc_response(u16 fc) +{ + return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_RESP); +} + +static inline int ieee80211_is_auth(u16 fc) +{ + return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ); +} + +static inline int ieee80211_is_deauth(u16 fc) +{ + return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ); +} + +static inline int ieee80211_is_disassoc(u16 fc) +{ + return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ); +} + +static inline int ieee80211_is_reassoc_request(u16 fc) +{ + return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ); +} + +static inline int ieee80211_is_reassoc_response(u16 fc) +{ + return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_RESP); +} + +static inline int iwl_check_bits(unsigned long field, unsigned long mask) +{ + return ((field & mask) == mask) ? 1 : 0; +} + +static inline unsigned long elapsed_jiffies(unsigned long start, + unsigned long end) +{ + if (end > start) + return end - start; + + return end + (MAX_JIFFY_OFFSET - start); +} + +#endif /* __iwl_helpers_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-hw.h b/drivers/net/wireless/iwlwifi/iwl-hw.h new file mode 100644 index 000000000000..1aa6fcd39a5e --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-hw.h @@ -0,0 +1,537 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU Geeral Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * James P. Ketrenos <ipw2100-admin@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwlwifi_hw_h__ +#define __iwlwifi_hw_h__ + +/* + * This file defines hardware constants common to 3945 and 4965. + * + * Device-specific constants are defined in iwl-3945-hw.h and iwl-4965-hw.h, + * although this file contains a few definitions for which the .c + * implementation is the same for 3945 and 4965, except for the value of + * a constant. + * + * uCode API constants are defined in iwl-commands.h. + * + * NOTE: DO NOT PUT OS IMPLEMENTATION-SPECIFIC DECLARATIONS HERE + * + * The iwl-*hw.h (and files they include) files should remain OS/driver + * implementation independent, declaring only the hardware interface. + */ + +/* uCode queue management definitions */ +#define IWL_CMD_QUEUE_NUM 4 +#define IWL_CMD_FIFO_NUM 4 +#define IWL_BACK_QUEUE_FIRST_ID 7 + +/* Tx rates */ +#define IWL_CCK_RATES 4 +#define IWL_OFDM_RATES 8 + +#if IWL == 3945 +#define IWL_HT_RATES 0 +#elif IWL == 4965 +#define IWL_HT_RATES 16 +#endif + +#define IWL_MAX_RATES (IWL_CCK_RATES+IWL_OFDM_RATES+IWL_HT_RATES) + +/* Time constants */ +#define SHORT_SLOT_TIME 9 +#define LONG_SLOT_TIME 20 + +/* RSSI to dBm */ +#if IWL == 3945 +#define IWL_RSSI_OFFSET 95 +#elif IWL == 4965 +#define IWL_RSSI_OFFSET 44 +#endif + +#include "iwl-eeprom.h" +#include "iwl-commands.h" + +#define PCI_LINK_CTRL 0x0F0 +#define PCI_POWER_SOURCE 0x0C8 +#define PCI_REG_WUM8 0x0E8 +#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT (0x80000000) + +/*=== CSR (control and status registers) ===*/ +#define CSR_BASE (0x000) + +#define CSR_SW_VER (CSR_BASE+0x000) +#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */ +#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */ +#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */ +#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */ +#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/ +#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */ +#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/ +#define CSR_GP_CNTRL (CSR_BASE+0x024) +#define CSR_HW_REV (CSR_BASE+0x028) +#define CSR_EEPROM_REG (CSR_BASE+0x02c) +#define CSR_EEPROM_GP (CSR_BASE+0x030) +#define CSR_GP_UCODE (CSR_BASE+0x044) +#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054) +#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058) +#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c) +#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060) +#define CSR_LED_REG (CSR_BASE+0x094) +#define CSR_DRAM_INT_TBL_CTL (CSR_BASE+0x0A0) +#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100) +#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c) +#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C) + +/* HW I/F configuration */ +#define CSR_HW_IF_CONFIG_REG_BIT_ALMAGOR_MB (0x00000100) +#define CSR_HW_IF_CONFIG_REG_BIT_ALMAGOR_MM (0x00000200) +#define CSR_HW_IF_CONFIG_REG_BIT_SKU_MRC (0x00000400) +#define CSR_HW_IF_CONFIG_REG_BIT_BOARD_TYPE (0x00000800) +#define CSR_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A (0x00000000) +#define CSR_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B (0x00001000) +#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000) + +/* interrupt flags in INTA, set by uCode or hardware (e.g. dma), + * acknowledged (reset) by host writing "1" to flagged bits. */ +#define CSR_INT_BIT_FH_RX (1<<31) /* Rx DMA, cmd responses, FH_INT[17:16] */ +#define CSR_INT_BIT_HW_ERR (1<<29) /* DMA hardware error FH_INT[31] */ +#define CSR_INT_BIT_DNLD (1<<28) /* uCode Download */ +#define CSR_INT_BIT_FH_TX (1<<27) /* Tx DMA FH_INT[1:0] */ +#define CSR_INT_BIT_MAC_CLK_ACTV (1<<26) /* NIC controller's clock toggled on/off */ +#define CSR_INT_BIT_SW_ERR (1<<25) /* uCode error */ +#define CSR_INT_BIT_RF_KILL (1<<7) /* HW RFKILL switch GP_CNTRL[27] toggled */ +#define CSR_INT_BIT_CT_KILL (1<<6) /* Critical temp (chip too hot) rfkill */ +#define CSR_INT_BIT_SW_RX (1<<3) /* Rx, command responses, 3945 */ +#define CSR_INT_BIT_WAKEUP (1<<1) /* NIC controller waking up (pwr mgmt) */ +#define CSR_INT_BIT_ALIVE (1<<0) /* uCode interrupts once it initializes */ + +#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \ + CSR_INT_BIT_HW_ERR | \ + CSR_INT_BIT_FH_TX | \ + CSR_INT_BIT_SW_ERR | \ + CSR_INT_BIT_RF_KILL | \ + CSR_INT_BIT_SW_RX | \ + CSR_INT_BIT_WAKEUP | \ + CSR_INT_BIT_ALIVE) + +/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */ +#define CSR_FH_INT_BIT_ERR (1<<31) /* Error */ +#define CSR_FH_INT_BIT_HI_PRIOR (1<<30) /* High priority Rx, bypass coalescing */ +#define CSR_FH_INT_BIT_RX_CHNL2 (1<<18) /* Rx channel 2 (3945 only) */ +#define CSR_FH_INT_BIT_RX_CHNL1 (1<<17) /* Rx channel 1 */ +#define CSR_FH_INT_BIT_RX_CHNL0 (1<<16) /* Rx channel 0 */ +#define CSR_FH_INT_BIT_TX_CHNL6 (1<<6) /* Tx channel 6 (3945 only) */ +#define CSR_FH_INT_BIT_TX_CHNL1 (1<<1) /* Tx channel 1 */ +#define CSR_FH_INT_BIT_TX_CHNL0 (1<<0) /* Tx channel 0 */ + +#define CSR_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \ + CSR_FH_INT_BIT_RX_CHNL2 | \ + CSR_FH_INT_BIT_RX_CHNL1 | \ + CSR_FH_INT_BIT_RX_CHNL0) + +#define CSR_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL6 | \ + CSR_FH_INT_BIT_TX_CHNL1 | \ + CSR_FH_INT_BIT_TX_CHNL0 ) + + +/* RESET */ +#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001) +#define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002) +#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080) +#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100) +#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200) + +/* GP (general purpose) CONTROL */ +#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001) +#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004) +#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008) +#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010) + +#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001) + +#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000) +#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000) +#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000) + + +/* EEPROM REG */ +#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) +#define CSR_EEPROM_REG_BIT_CMD (0x00000002) + +/* EEPROM GP */ +#define CSR_EEPROM_GP_VALID_MSK (0x00000006) +#define CSR_EEPROM_GP_BAD_SIGNATURE (0x00000000) +#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180) + +/* UCODE DRV GP */ +#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001) +#define CSR_UCODE_SW_BIT_RFKILL (0x00000002) +#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004) +#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008) + +/* GPIO */ +#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200) +#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000) +#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC CSR_GPIO_IN_BIT_AUX_POWER + +/* GI Chicken Bits */ +#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000) +#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000) + +/* CSR_ANA_PLL_CFG */ +#define CSR_ANA_PLL_CFG_SH (0x00880300) + +#define CSR_LED_REG_TRUN_ON (0x00000078) +#define CSR_LED_REG_TRUN_OFF (0x00000038) +#define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF) + +/* DRAM_INT_TBL_CTRL */ +#define CSR_DRAM_INT_TBL_CTRL_EN (1<<31) +#define CSR_DRAM_INT_TBL_CTRL_WRAP_CHK (1<<27) + +/*=== HBUS (Host-side Bus) ===*/ +#define HBUS_BASE (0x400) + +#define HBUS_TARG_MEM_RADDR (HBUS_BASE+0x00c) +#define HBUS_TARG_MEM_WADDR (HBUS_BASE+0x010) +#define HBUS_TARG_MEM_WDAT (HBUS_BASE+0x018) +#define HBUS_TARG_MEM_RDAT (HBUS_BASE+0x01c) +#define HBUS_TARG_PRPH_WADDR (HBUS_BASE+0x044) +#define HBUS_TARG_PRPH_RADDR (HBUS_BASE+0x048) +#define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c) +#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050) +#define HBUS_TARG_WRPTR (HBUS_BASE+0x060) + +#define HBUS_TARG_MBX_C (HBUS_BASE+0x030) + + +/* SCD (Scheduler) */ +#define SCD_BASE (CSR_BASE + 0x2E00) + +#define SCD_MODE_REG (SCD_BASE + 0x000) +#define SCD_ARASTAT_REG (SCD_BASE + 0x004) +#define SCD_TXFACT_REG (SCD_BASE + 0x010) +#define SCD_TXF4MF_REG (SCD_BASE + 0x014) +#define SCD_TXF5MF_REG (SCD_BASE + 0x020) +#define SCD_SBYP_MODE_1_REG (SCD_BASE + 0x02C) +#define SCD_SBYP_MODE_2_REG (SCD_BASE + 0x030) + +/*=== FH (data Flow Handler) ===*/ +#define FH_BASE (0x800) + +#define FH_CBCC_TABLE (FH_BASE+0x140) +#define FH_TFDB_TABLE (FH_BASE+0x180) +#define FH_RCSR_TABLE (FH_BASE+0x400) +#define FH_RSSR_TABLE (FH_BASE+0x4c0) +#define FH_TCSR_TABLE (FH_BASE+0x500) +#define FH_TSSR_TABLE (FH_BASE+0x680) + +/* TFDB (Transmit Frame Buffer Descriptor) */ +#define FH_TFDB(_channel, buf) \ + (FH_TFDB_TABLE+((_channel)*2+(buf))*0x28) +#define ALM_FH_TFDB_CHNL_BUF_CTRL_REG(_channel) \ + (FH_TFDB_TABLE + 0x50 * _channel) +/* CBCC _channel is [0,2] */ +#define FH_CBCC(_channel) (FH_CBCC_TABLE+(_channel)*0x8) +#define FH_CBCC_CTRL(_channel) (FH_CBCC(_channel)+0x00) +#define FH_CBCC_BASE(_channel) (FH_CBCC(_channel)+0x04) + +/* RCSR _channel is [0,2] */ +#define FH_RCSR(_channel) (FH_RCSR_TABLE+(_channel)*0x40) +#define FH_RCSR_CONFIG(_channel) (FH_RCSR(_channel)+0x00) +#define FH_RCSR_RBD_BASE(_channel) (FH_RCSR(_channel)+0x04) +#define FH_RCSR_WPTR(_channel) (FH_RCSR(_channel)+0x20) +#define FH_RCSR_RPTR_ADDR(_channel) (FH_RCSR(_channel)+0x24) + +#if IWL == 3945 +#define FH_RSCSR_CHNL0_WPTR (FH_RCSR_WPTR(0)) +#elif IWL == 4965 +#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG) +#endif + +/* RSSR */ +#define FH_RSSR_CTRL (FH_RSSR_TABLE+0x000) +#define FH_RSSR_STATUS (FH_RSSR_TABLE+0x004) +/* TCSR */ +#define FH_TCSR(_channel) (FH_TCSR_TABLE+(_channel)*0x20) +#define FH_TCSR_CONFIG(_channel) (FH_TCSR(_channel)+0x00) +#define FH_TCSR_CREDIT(_channel) (FH_TCSR(_channel)+0x04) +#define FH_TCSR_BUFF_STTS(_channel) (FH_TCSR(_channel)+0x08) +/* TSSR */ +#define FH_TSSR_CBB_BASE (FH_TSSR_TABLE+0x000) +#define FH_TSSR_MSG_CONFIG (FH_TSSR_TABLE+0x008) +#define FH_TSSR_TX_STATUS (FH_TSSR_TABLE+0x010) +/* 18 - reserved */ + +/* card static random access memory (SRAM) for processor data and instructs */ +#define RTC_INST_LOWER_BOUND (0x000000) +#define RTC_DATA_LOWER_BOUND (0x800000) + + +/* DBM */ + +#define ALM_FH_SRVC_CHNL (6) + +#define ALM_FH_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20) +#define ALM_FH_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4) + +#define ALM_FH_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000) + +#define ALM_FH_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000) + +#define ALM_FH_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000) + +#define ALM_FH_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000) + +#define ALM_FH_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000) + +#define ALM_FH_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000) + +#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000) +#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001) + +#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000) +#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008) + +#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000) + +#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000) + +#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000) +#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000) + +#define ALM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000) + +#define ALM_FH_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001) + +#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000) +#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000) + +#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400) + +#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100) +#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080) + +#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020) +#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005) + +#define ALM_TB_MAX_BYTES_COUNT (0xFFF0) + +#define ALM_FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_channel) \ + ((1LU << _channel) << 24) +#define ALM_FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_channel) \ + ((1LU << _channel) << 16) + +#define ALM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_channel) \ + (ALM_FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_channel) | \ + ALM_FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_channel)) +#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */ +#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */ + +#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED (0x00000004) + +#define TFD_QUEUE_MIN 0 +#define TFD_QUEUE_MAX 6 +#define TFD_QUEUE_SIZE_MAX (256) + +/* spectrum and channel data structures */ +#define IWL_NUM_SCAN_RATES (2) + +#define IWL_SCAN_FLAG_24GHZ (1<<0) +#define IWL_SCAN_FLAG_52GHZ (1<<1) +#define IWL_SCAN_FLAG_ACTIVE (1<<2) +#define IWL_SCAN_FLAG_DIRECT (1<<3) + +#define IWL_MAX_CMD_SIZE 1024 + +#define IWL_DEFAULT_TX_RETRY 15 +#define IWL_MAX_TX_RETRY 16 + +/*********************************************/ + +#define RFD_SIZE 4 +#define NUM_TFD_CHUNKS 4 + +#define RX_QUEUE_SIZE 256 +#define RX_QUEUE_MASK 255 +#define RX_QUEUE_SIZE_LOG 8 + +/* QoS definitions */ + +#define CW_MIN_OFDM 15 +#define CW_MAX_OFDM 1023 +#define CW_MIN_CCK 31 +#define CW_MAX_CCK 1023 + +#define QOS_TX0_CW_MIN_OFDM CW_MIN_OFDM +#define QOS_TX1_CW_MIN_OFDM CW_MIN_OFDM +#define QOS_TX2_CW_MIN_OFDM ((CW_MIN_OFDM + 1) / 2 - 1) +#define QOS_TX3_CW_MIN_OFDM ((CW_MIN_OFDM + 1) / 4 - 1) + +#define QOS_TX0_CW_MIN_CCK CW_MIN_CCK +#define QOS_TX1_CW_MIN_CCK CW_MIN_CCK +#define QOS_TX2_CW_MIN_CCK ((CW_MIN_CCK + 1) / 2 - 1) +#define QOS_TX3_CW_MIN_CCK ((CW_MIN_CCK + 1) / 4 - 1) + +#define QOS_TX0_CW_MAX_OFDM CW_MAX_OFDM +#define QOS_TX1_CW_MAX_OFDM CW_MAX_OFDM +#define QOS_TX2_CW_MAX_OFDM CW_MIN_OFDM +#define QOS_TX3_CW_MAX_OFDM ((CW_MIN_OFDM + 1) / 2 - 1) + +#define QOS_TX0_CW_MAX_CCK CW_MAX_CCK +#define QOS_TX1_CW_MAX_CCK CW_MAX_CCK +#define QOS_TX2_CW_MAX_CCK CW_MIN_CCK +#define QOS_TX3_CW_MAX_CCK ((CW_MIN_CCK + 1) / 2 - 1) + +#define QOS_TX0_AIFS 3 +#define QOS_TX1_AIFS 7 +#define QOS_TX2_AIFS 2 +#define QOS_TX3_AIFS 2 + +#define QOS_TX0_ACM 0 +#define QOS_TX1_ACM 0 +#define QOS_TX2_ACM 0 +#define QOS_TX3_ACM 0 + +#define QOS_TX0_TXOP_LIMIT_CCK 0 +#define QOS_TX1_TXOP_LIMIT_CCK 0 +#define QOS_TX2_TXOP_LIMIT_CCK 6016 +#define QOS_TX3_TXOP_LIMIT_CCK 3264 + +#define QOS_TX0_TXOP_LIMIT_OFDM 0 +#define QOS_TX1_TXOP_LIMIT_OFDM 0 +#define QOS_TX2_TXOP_LIMIT_OFDM 3008 +#define QOS_TX3_TXOP_LIMIT_OFDM 1504 + +#define DEF_TX0_CW_MIN_OFDM CW_MIN_OFDM +#define DEF_TX1_CW_MIN_OFDM CW_MIN_OFDM +#define DEF_TX2_CW_MIN_OFDM CW_MIN_OFDM +#define DEF_TX3_CW_MIN_OFDM CW_MIN_OFDM + +#define DEF_TX0_CW_MIN_CCK CW_MIN_CCK +#define DEF_TX1_CW_MIN_CCK CW_MIN_CCK +#define DEF_TX2_CW_MIN_CCK CW_MIN_CCK +#define DEF_TX3_CW_MIN_CCK CW_MIN_CCK + +#define DEF_TX0_CW_MAX_OFDM CW_MAX_OFDM +#define DEF_TX1_CW_MAX_OFDM CW_MAX_OFDM +#define DEF_TX2_CW_MAX_OFDM CW_MAX_OFDM +#define DEF_TX3_CW_MAX_OFDM CW_MAX_OFDM + +#define DEF_TX0_CW_MAX_CCK CW_MAX_CCK +#define DEF_TX1_CW_MAX_CCK CW_MAX_CCK +#define DEF_TX2_CW_MAX_CCK CW_MAX_CCK +#define DEF_TX3_CW_MAX_CCK CW_MAX_CCK + +#define DEF_TX0_AIFS (2) +#define DEF_TX1_AIFS (2) +#define DEF_TX2_AIFS (2) +#define DEF_TX3_AIFS (2) + +#define DEF_TX0_ACM 0 +#define DEF_TX1_ACM 0 +#define DEF_TX2_ACM 0 +#define DEF_TX3_ACM 0 + +#define DEF_TX0_TXOP_LIMIT_CCK 0 +#define DEF_TX1_TXOP_LIMIT_CCK 0 +#define DEF_TX2_TXOP_LIMIT_CCK 0 +#define DEF_TX3_TXOP_LIMIT_CCK 0 + +#define DEF_TX0_TXOP_LIMIT_OFDM 0 +#define DEF_TX1_TXOP_LIMIT_OFDM 0 +#define DEF_TX2_TXOP_LIMIT_OFDM 0 +#define DEF_TX3_TXOP_LIMIT_OFDM 0 + +#define QOS_QOS_SETS 3 +#define QOS_PARAM_SET_ACTIVE 0 +#define QOS_PARAM_SET_DEF_CCK 1 +#define QOS_PARAM_SET_DEF_OFDM 2 + +#define CTRL_QOS_NO_ACK (0x0020) +#define DCT_FLAG_EXT_QOS_ENABLED (0x10) + +#define U32_PAD(n) ((4-(n))&0x3) + +/* + * Generic queue structure + * + * Contains common data for Rx and Tx queues + */ +#define TFD_CTL_COUNT_SET(n) (n<<24) +#define TFD_CTL_COUNT_GET(ctl) ((ctl>>24) & 7) +#define TFD_CTL_PAD_SET(n) (n<<28) +#define TFD_CTL_PAD_GET(ctl) (ctl>>28) + +#define TFD_TX_CMD_SLOTS 256 +#define TFD_CMD_SLOTS 32 + +#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_cmd) - \ + sizeof(struct iwl_cmd_meta)) + +/* + * RX related structures and functions + */ +#define RX_FREE_BUFFERS 64 +#define RX_LOW_WATERMARK 8 + +#endif /* __iwlwifi_hw_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h new file mode 100644 index 000000000000..8a8b96fcf48d --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-io.h @@ -0,0 +1,470 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos <ipw2100-admin@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_io_h__ +#define __iwl_io_h__ + +#include <linux/io.h> + +#include "iwl-debug.h" + +/* + * IO, register, and NIC memory access functions + * + * NOTE on naming convention and macro usage for these + * + * A single _ prefix before a an access function means that no state + * check or debug information is printed when that function is called. + * + * A double __ prefix before an access function means that state is checked + * (in the case of *restricted calls) and the current line number is printed + * in addition to any other debug output. + * + * The non-prefixed name is the #define that maps the caller into a + * #define that provides the caller's __LINE__ to the double prefix version. + * + * If you wish to call the function without any debug or state checking, + * you should use the single _ prefix version (as is used by dependent IO + * routines, for example _iwl_read_restricted calls the non-check version of + * _iwl_read32.) + * + * These declarations are *extremely* useful in quickly isolating code deltas + * which result in misconfiguring of the hardware I/O. In combination with + * git-bisect and the IO debug level you can quickly determine the specific + * commit which breaks the IO sequence to the hardware. + * + */ + +#define _iwl_write32(iwl, ofs, val) writel((val), (iwl)->hw_base + (ofs)) +#ifdef CONFIG_IWLWIFI_DEBUG +static inline void __iwl_write32(const char *f, u32 l, struct iwl_priv *iwl, + u32 ofs, u32 val) +{ + IWL_DEBUG_IO("write_direct32(0x%08X, 0x%08X) - %s %d\n", + (u32) (ofs), (u32) (val), f, l); + _iwl_write32(iwl, ofs, val); +} +#define iwl_write32(iwl, ofs, val) \ + __iwl_write32(__FILE__, __LINE__, iwl, ofs, val) +#else +#define iwl_write32(iwl, ofs, val) _iwl_write32(iwl, ofs, val) +#endif + +#define _iwl_read32(iwl, ofs) readl((iwl)->hw_base + (ofs)) +#ifdef CONFIG_IWLWIFI_DEBUG +static inline u32 __iwl_read32(char *f, u32 l, struct iwl_priv *iwl, u32 ofs) +{ + IWL_DEBUG_IO("read_direct32(0x%08X) - %s %d\n", ofs, f, l); + return _iwl_read32(iwl, ofs); +} +#define iwl_read32(iwl, ofs) __iwl_read32(__FILE__, __LINE__, iwl, ofs) +#else +#define iwl_read32(p, o) _iwl_read32(p, o) +#endif + +static inline int _iwl_poll_bit(struct iwl_priv *priv, u32 addr, + u32 bits, u32 mask, int timeout) +{ + int i = 0; + + do { + if ((_iwl_read32(priv, addr) & mask) == (bits & mask)) + return i; + mdelay(10); + i += 10; + } while (i < timeout); + + return -ETIMEDOUT; +} +#ifdef CONFIG_IWLWIFI_DEBUG +static inline int __iwl_poll_bit(const char *f, u32 l, + struct iwl_priv *priv, u32 addr, + u32 bits, u32 mask, int timeout) +{ + int rc = _iwl_poll_bit(priv, addr, bits, mask, timeout); + if (unlikely(rc == -ETIMEDOUT)) + IWL_DEBUG_IO + ("poll_bit(0x%08X, 0x%08X, 0x%08X) - timedout - %s %d\n", + addr, bits, mask, f, l); + else + IWL_DEBUG_IO + ("poll_bit(0x%08X, 0x%08X, 0x%08X) = 0x%08X - %s %d\n", + addr, bits, mask, rc, f, l); + return rc; +} +#define iwl_poll_bit(iwl, addr, bits, mask, timeout) \ + __iwl_poll_bit(__FILE__, __LINE__, iwl, addr, bits, mask, timeout) +#else +#define iwl_poll_bit(p, a, b, m, t) _iwl_poll_bit(p, a, b, m, t) +#endif + +static inline void _iwl_set_bit(struct iwl_priv *priv, u32 reg, u32 mask) +{ + _iwl_write32(priv, reg, _iwl_read32(priv, reg) | mask); +} +#ifdef CONFIG_IWLWIFI_DEBUG +static inline void __iwl_set_bit(const char *f, u32 l, + struct iwl_priv *priv, u32 reg, u32 mask) +{ + u32 val = _iwl_read32(priv, reg) | mask; + IWL_DEBUG_IO("set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val); + _iwl_write32(priv, reg, val); +} +#define iwl_set_bit(p, r, m) __iwl_set_bit(__FILE__, __LINE__, p, r, m) +#else +#define iwl_set_bit(p, r, m) _iwl_set_bit(p, r, m) +#endif + +static inline void _iwl_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask) +{ + _iwl_write32(priv, reg, _iwl_read32(priv, reg) & ~mask); +} +#ifdef CONFIG_IWLWIFI_DEBUG +static inline void __iwl_clear_bit(const char *f, u32 l, + struct iwl_priv *priv, u32 reg, u32 mask) +{ + u32 val = _iwl_read32(priv, reg) & ~mask; + IWL_DEBUG_IO("clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val); + _iwl_write32(priv, reg, val); +} +#define iwl_clear_bit(p, r, m) __iwl_clear_bit(__FILE__, __LINE__, p, r, m) +#else +#define iwl_clear_bit(p, r, m) _iwl_clear_bit(p, r, m) +#endif + +static inline int _iwl_grab_restricted_access(struct iwl_priv *priv) +{ + int rc; + u32 gp_ctl; + +#ifdef CONFIG_IWLWIFI_DEBUG + if (atomic_read(&priv->restrict_refcnt)) + return 0; +#endif + if (test_bit(STATUS_RF_KILL_HW, &priv->status) || + test_bit(STATUS_RF_KILL_SW, &priv->status)) { + IWL_WARNING("WARNING: Requesting MAC access during RFKILL " + "wakes up NIC\n"); + + /* 10 msec allows time for NIC to complete its data save */ + gp_ctl = _iwl_read32(priv, CSR_GP_CNTRL); + if (gp_ctl & CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) { + IWL_DEBUG_RF_KILL("Wait for complete power-down, " + "gpctl = 0x%08x\n", gp_ctl); + mdelay(10); + } else + IWL_DEBUG_RF_KILL("power-down complete, " + "gpctl = 0x%08x\n", gp_ctl); + } + + /* this bit wakes up the NIC */ + _iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + rc = _iwl_poll_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, + (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | + CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 50); + if (rc < 0) { + IWL_ERROR("MAC is in deep sleep!\n"); + return -EIO; + } + +#ifdef CONFIG_IWLWIFI_DEBUG + atomic_inc(&priv->restrict_refcnt); +#endif + return 0; +} + +#ifdef CONFIG_IWLWIFI_DEBUG +static inline int __iwl_grab_restricted_access(const char *f, u32 l, + struct iwl_priv *priv) +{ + if (atomic_read(&priv->restrict_refcnt)) + IWL_DEBUG_INFO("Grabbing access while already held at " + "line %d.\n", l); + + IWL_DEBUG_IO("grabbing restricted access - %s %d\n", f, l); + + return _iwl_grab_restricted_access(priv); +} +#define iwl_grab_restricted_access(priv) \ + __iwl_grab_restricted_access(__FILE__, __LINE__, priv) +#else +#define iwl_grab_restricted_access(priv) \ + _iwl_grab_restricted_access(priv) +#endif + +static inline void _iwl_release_restricted_access(struct iwl_priv *priv) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + if (atomic_dec_and_test(&priv->restrict_refcnt)) +#endif + _iwl_clear_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); +} +#ifdef CONFIG_IWLWIFI_DEBUG +static inline void __iwl_release_restricted_access(const char *f, u32 l, + struct iwl_priv *priv) +{ + if (atomic_read(&priv->restrict_refcnt) <= 0) + IWL_ERROR("Release unheld restricted access at line %d.\n", l); + + IWL_DEBUG_IO("releasing restricted access - %s %d\n", f, l); + _iwl_release_restricted_access(priv); +} +#define iwl_release_restricted_access(priv) \ + __iwl_release_restricted_access(__FILE__, __LINE__, priv) +#else +#define iwl_release_restricted_access(priv) \ + _iwl_release_restricted_access(priv) +#endif + +static inline u32 _iwl_read_restricted(struct iwl_priv *priv, u32 reg) +{ + return _iwl_read32(priv, reg); +} +#ifdef CONFIG_IWLWIFI_DEBUG +static inline u32 __iwl_read_restricted(const char *f, u32 l, + struct iwl_priv *priv, u32 reg) +{ + u32 value = _iwl_read_restricted(priv, reg); + if (!atomic_read(&priv->restrict_refcnt)) + IWL_ERROR("Unrestricted access from %s %d\n", f, l); + IWL_DEBUG_IO("read_restricted(0x%4X) = 0x%08x - %s %d \n", reg, value, + f, l); + return value; +} +#define iwl_read_restricted(priv, reg) \ + __iwl_read_restricted(__FILE__, __LINE__, priv, reg) +#else +#define iwl_read_restricted _iwl_read_restricted +#endif + +static inline void _iwl_write_restricted(struct iwl_priv *priv, + u32 reg, u32 value) +{ + _iwl_write32(priv, reg, value); +} +#ifdef CONFIG_IWLWIFI_DEBUG +static void __iwl_write_restricted(u32 line, + struct iwl_priv *priv, u32 reg, u32 value) +{ + if (!atomic_read(&priv->restrict_refcnt)) + IWL_ERROR("Unrestricted access from line %d\n", line); + _iwl_write_restricted(priv, reg, value); +} +#define iwl_write_restricted(priv, reg, value) \ + __iwl_write_restricted(__LINE__, priv, reg, value) +#else +#define iwl_write_restricted _iwl_write_restricted +#endif + +static inline void iwl_write_buffer_restricted(struct iwl_priv *priv, + u32 reg, u32 len, u32 *values) +{ + u32 count = sizeof(u32); + + if ((priv != NULL) && (values != NULL)) { + for (; 0 < len; len -= count, reg += count, values++) + _iwl_write_restricted(priv, reg, *values); + } +} + +static inline int _iwl_poll_restricted_bit(struct iwl_priv *priv, + u32 addr, u32 mask, int timeout) +{ + int i = 0; + + do { + if ((_iwl_read_restricted(priv, addr) & mask) == mask) + return i; + mdelay(10); + i += 10; + } while (i < timeout); + + return -ETIMEDOUT; +} + +#ifdef CONFIG_IWLWIFI_DEBUG +static inline int __iwl_poll_restricted_bit(const char *f, u32 l, + struct iwl_priv *priv, + u32 addr, u32 mask, int timeout) +{ + int rc = _iwl_poll_restricted_bit(priv, addr, mask, timeout); + + if (unlikely(rc == -ETIMEDOUT)) + IWL_DEBUG_IO("poll_restricted_bit(0x%08X, 0x%08X) - " + "timedout - %s %d\n", addr, mask, f, l); + else + IWL_DEBUG_IO("poll_restricted_bit(0x%08X, 0x%08X) = 0x%08X " + "- %s %d\n", addr, mask, rc, f, l); + return rc; +} +#define iwl_poll_restricted_bit(iwl, addr, mask, timeout) \ + __iwl_poll_restricted_bit(__FILE__, __LINE__, iwl, addr, mask, timeout) +#else +#define iwl_poll_restricted_bit _iwl_poll_restricted_bit +#endif + +static inline u32 _iwl_read_restricted_reg(struct iwl_priv *priv, u32 reg) +{ + _iwl_write_restricted(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24)); + return _iwl_read_restricted(priv, HBUS_TARG_PRPH_RDAT); +} +#ifdef CONFIG_IWLWIFI_DEBUG +static inline u32 __iwl_read_restricted_reg(u32 line, + struct iwl_priv *priv, u32 reg) +{ + if (!atomic_read(&priv->restrict_refcnt)) + IWL_ERROR("Unrestricted access from line %d\n", line); + return _iwl_read_restricted_reg(priv, reg); +} + +#define iwl_read_restricted_reg(priv, reg) \ + __iwl_read_restricted_reg(__LINE__, priv, reg) +#else +#define iwl_read_restricted_reg _iwl_read_restricted_reg +#endif + +static inline void _iwl_write_restricted_reg(struct iwl_priv *priv, + u32 addr, u32 val) +{ + _iwl_write_restricted(priv, HBUS_TARG_PRPH_WADDR, + ((addr & 0x0000FFFF) | (3 << 24))); + _iwl_write_restricted(priv, HBUS_TARG_PRPH_WDAT, val); +} +#ifdef CONFIG_IWLWIFI_DEBUG +static inline void __iwl_write_restricted_reg(u32 line, + struct iwl_priv *priv, + u32 addr, u32 val) +{ + if (!atomic_read(&priv->restrict_refcnt)) + IWL_ERROR("Unrestricted access from line %d\n", line); + _iwl_write_restricted_reg(priv, addr, val); +} + +#define iwl_write_restricted_reg(priv, addr, val) \ + __iwl_write_restricted_reg(__LINE__, priv, addr, val); +#else +#define iwl_write_restricted_reg _iwl_write_restricted_reg +#endif + +#define _iwl_set_bits_restricted_reg(priv, reg, mask) \ + _iwl_write_restricted_reg(priv, reg, \ + (_iwl_read_restricted_reg(priv, reg) | mask)) +#ifdef CONFIG_IWLWIFI_DEBUG +static inline void __iwl_set_bits_restricted_reg(u32 line, struct iwl_priv + *priv, u32 reg, u32 mask) +{ + if (!atomic_read(&priv->restrict_refcnt)) + IWL_ERROR("Unrestricted access from line %d\n", line); + _iwl_set_bits_restricted_reg(priv, reg, mask); +} +#define iwl_set_bits_restricted_reg(priv, reg, mask) \ + __iwl_set_bits_restricted_reg(__LINE__, priv, reg, mask) +#else +#define iwl_set_bits_restricted_reg _iwl_set_bits_restricted_reg +#endif + +#define _iwl_set_bits_mask_restricted_reg(priv, reg, bits, mask) \ + _iwl_write_restricted_reg( \ + priv, reg, ((_iwl_read_restricted_reg(priv, reg) & mask) | bits)) +#ifdef CONFIG_IWLWIFI_DEBUG +static inline void __iwl_set_bits_mask_restricted_reg(u32 line, + struct iwl_priv *priv, u32 reg, u32 bits, u32 mask) +{ + if (!atomic_read(&priv->restrict_refcnt)) + IWL_ERROR("Unrestricted access from line %d\n", line); + _iwl_set_bits_mask_restricted_reg(priv, reg, bits, mask); +} + +#define iwl_set_bits_mask_restricted_reg(priv, reg, bits, mask) \ + __iwl_set_bits_mask_restricted_reg(__LINE__, priv, reg, bits, mask) +#else +#define iwl_set_bits_mask_restricted_reg _iwl_set_bits_mask_restricted_reg +#endif + +static inline void iwl_clear_bits_restricted_reg(struct iwl_priv + *priv, u32 reg, u32 mask) +{ + u32 val = _iwl_read_restricted_reg(priv, reg); + _iwl_write_restricted_reg(priv, reg, (val & ~mask)); +} + +static inline u32 iwl_read_restricted_mem(struct iwl_priv *priv, u32 addr) +{ + iwl_write_restricted(priv, HBUS_TARG_MEM_RADDR, addr); + return iwl_read_restricted(priv, HBUS_TARG_MEM_RDAT); +} + +static inline void iwl_write_restricted_mem(struct iwl_priv *priv, u32 addr, + u32 val) +{ + iwl_write_restricted(priv, HBUS_TARG_MEM_WADDR, addr); + iwl_write_restricted(priv, HBUS_TARG_MEM_WDAT, val); +} + +static inline void iwl_write_restricted_mems(struct iwl_priv *priv, u32 addr, + u32 len, u32 *values) +{ + iwl_write_restricted(priv, HBUS_TARG_MEM_WADDR, addr); + for (; 0 < len; len -= sizeof(u32), values++) + iwl_write_restricted(priv, HBUS_TARG_MEM_WDAT, *values); +} + +static inline void iwl_write_restricted_regs(struct iwl_priv *priv, u32 reg, + u32 len, u8 *values) +{ + u32 reg_offset = reg; + u32 aligment = reg & 0x3; + + /* write any non-dword-aligned stuff at the beginning */ + if (len < sizeof(u32)) { + if ((aligment + len) <= sizeof(u32)) { + u8 size; + u32 value = 0; + size = len - 1; + memcpy(&value, values, len); + reg_offset = (reg_offset & 0x0000FFFF); + + _iwl_write_restricted(priv, + HBUS_TARG_PRPH_WADDR, + (reg_offset | (size << 24))); + _iwl_write_restricted(priv, HBUS_TARG_PRPH_WDAT, + value); + } + + return; + } + + /* now write all the dword-aligned stuff */ + for (; reg_offset < (reg + len); + reg_offset += sizeof(u32), values += sizeof(u32)) + _iwl_write_restricted_reg(priv, reg_offset, *((u32 *) values)); +} + +#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-priv.h b/drivers/net/wireless/iwlwifi/iwl-priv.h new file mode 100644 index 000000000000..6b490d08fea9 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-priv.h @@ -0,0 +1,308 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos <ipw2100-admin@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_priv_h__ +#define __iwl_priv_h__ + +#include <linux/workqueue.h> + +#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT + +enum { + MEASUREMENT_READY = (1 << 0), + MEASUREMENT_ACTIVE = (1 << 1), +}; + +#endif + +struct iwl_priv { + + /* ieee device used by generic ieee processing code */ + struct ieee80211_hw *hw; + struct ieee80211_channel *ieee_channels; + struct ieee80211_rate *ieee_rates; + + /* temporary frame storage list */ + struct list_head free_frames; + int frames_count; + + u8 phymode; + int alloc_rxb_skb; + + void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); + + const struct ieee80211_hw_mode *modes; + +#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT + /* spectrum measurement report caching */ + struct iwl_spectrum_notification measure_report; + u8 measurement_status; +#endif + /* ucode beacon time */ + u32 ucode_beacon_time; + + /* we allocate array of iwl_channel_info for NIC's valid channels. + * Access via channel # using indirect index array */ + struct iwl_channel_info *channel_info; /* channel info array */ + u8 channel_count; /* # of channels */ + + /* each calibration channel group in the EEPROM has a derived + * clip setting for each rate. */ + const struct iwl_clip_group clip_groups[5]; + + /* thermal calibration */ + s32 temperature; /* degrees Kelvin */ + s32 last_temperature; + + /* Scan related variables */ + unsigned long last_scan_jiffies; + unsigned long scan_start; + unsigned long scan_pass_start; + unsigned long scan_start_tsf; + int scan_bands; + int one_direct_scan; + u8 direct_ssid_len; + u8 direct_ssid[IW_ESSID_MAX_SIZE]; + struct iwl_scan_cmd *scan; + u8 only_active_channel; + + /* spinlock */ + spinlock_t lock; /* protect general shared data */ + spinlock_t hcmd_lock; /* protect hcmd */ + struct mutex mutex; + + /* basic pci-network driver stuff */ + struct pci_dev *pci_dev; + + /* pci hardware address support */ + void __iomem *hw_base; + + /* uCode images, save to reload in case of failure */ + struct fw_image_desc ucode_code; /* runtime inst */ + struct fw_image_desc ucode_data; /* runtime data original */ + struct fw_image_desc ucode_data_backup; /* runtime data save/restore */ + struct fw_image_desc ucode_init; /* initialization inst */ + struct fw_image_desc ucode_init_data; /* initialization data */ + struct fw_image_desc ucode_boot; /* bootstrap inst */ + + + struct iwl_rxon_time_cmd rxon_timing; + + /* We declare this const so it can only be + * changed via explicit cast within the + * routines that actually update the physical + * hardware */ + const struct iwl_rxon_cmd active_rxon; + struct iwl_rxon_cmd staging_rxon; + + int error_recovering; + struct iwl_rxon_cmd recovery_rxon; + + /* 1st responses from initialize and runtime uCode images. + * 4965's initialize alive response contains some calibration data. */ + struct iwl_init_alive_resp card_alive_init; + struct iwl_alive_resp card_alive; + +#ifdef LED + /* LED related variables */ + struct iwl_activity_blink activity; + unsigned long led_packets; + int led_state; +#endif + + u16 active_rate; + u16 active_rate_basic; + + u8 call_post_assoc_from_beacon; + u8 assoc_station_added; +#if IWL == 4965 + u8 use_ant_b_for_management_frame; /* Tx antenna selection */ + /* HT variables */ + u8 is_dup; + u8 is_ht_enabled; + u8 channel_width; /* 0=20MHZ, 1=40MHZ */ + u8 current_channel_width; + u8 valid_antenna; /* Bit mask of antennas actually connected */ +#ifdef CONFIG_IWLWIFI_SENSITIVITY + struct iwl_sensitivity_data sensitivity_data; + struct iwl_chain_noise_data chain_noise_data; + u8 start_calib; + __le16 sensitivity_tbl[HD_TABLE_SIZE]; +#endif /*CONFIG_IWLWIFI_SENSITIVITY*/ + +#ifdef CONFIG_IWLWIFI_HT + struct sta_ht_info current_assoc_ht; +#endif + u8 active_rate_ht[2]; + u8 last_phy_res[100]; + + /* Rate scaling data */ + struct iwl_lq_mngr lq_mngr; +#endif + + /* Rate scaling data */ + s8 data_retry_limit; + u8 retry_rate; + + wait_queue_head_t wait_command_queue; + + int activity_timer_active; + + /* Rx and Tx DMA processing queues */ + struct iwl_rx_queue rxq; + struct iwl_tx_queue txq[IWL_MAX_NUM_QUEUES]; +#if IWL == 4965 + unsigned long txq_ctx_active_msk; + struct iwl_kw kw; /* keep warm address */ + u32 scd_base_addr; /* scheduler sram base address */ +#endif + + unsigned long status; + u32 config; + + int last_rx_rssi; /* From Rx packet statisitics */ + int last_rx_noise; /* From beacon statistics */ + + struct iwl_power_mgr power_data; + + struct iwl_notif_statistics statistics; + unsigned long last_statistics_time; + + /* context information */ + u8 essid[IW_ESSID_MAX_SIZE]; + u8 essid_len; + u16 rates_mask; + + u32 power_mode; + u32 antenna; + u8 bssid[ETH_ALEN]; + u16 rts_threshold; + u8 mac_addr[ETH_ALEN]; + + /*station table variables */ + spinlock_t sta_lock; + int num_stations; + struct iwl_station_entry stations[IWL_STATION_COUNT]; + + /* Indication if ieee80211_ops->open has been called */ + int is_open; + + u8 mac80211_registered; + int is_abg; + + u32 notif_missed_beacons; + + /* Rx'd packet timing information */ + u32 last_beacon_time; + u64 last_tsf; + + /* Duplicate packet detection */ + u16 last_seq_num; + u16 last_frag_num; + unsigned long last_packet_time; + struct list_head ibss_mac_hash[IWL_IBSS_MAC_HASH_SIZE]; + + /* eeprom */ + struct iwl_eeprom eeprom; + + int iw_mode; + + struct sk_buff *ibss_beacon; + + /* Last Rx'd beacon timestamp */ + u32 timestamp0; + u32 timestamp1; + u16 beacon_int; + struct iwl_driver_hw_info hw_setting; + int interface_id; + + /* Current association information needed to configure the + * hardware */ + u16 assoc_id; + u16 assoc_capability; + u8 ps_mode; + +#ifdef CONFIG_IWLWIFI_QOS + struct iwl_qos_info qos_data; +#endif /*CONFIG_IWLWIFI_QOS */ + + struct workqueue_struct *workqueue; + + struct work_struct up; + struct work_struct restart; + struct work_struct calibrated_work; + struct work_struct scan_completed; + struct work_struct rx_replenish; + struct work_struct rf_kill; + struct work_struct abort_scan; + struct work_struct update_link_led; + struct work_struct auth_work; + struct work_struct report_work; + struct work_struct request_scan; + struct work_struct beacon_update; + + struct tasklet_struct irq_tasklet; + + struct delayed_work init_alive_start; + struct delayed_work alive_start; + struct delayed_work activity_timer; + struct delayed_work thermal_periodic; + struct delayed_work gather_stats; + struct delayed_work scan_check; + struct delayed_work post_associate; + +#define IWL_DEFAULT_TX_POWER 0x0F + s8 user_txpower_limit; + s8 max_channel_txpower_limit; + u32 cck_power_index_compensation; + +#ifdef CONFIG_PM + u32 pm_state[16]; +#endif + +#ifdef CONFIG_IWLWIFI_DEBUG + /* debugging info */ + u32 framecnt_to_us; + atomic_t restrict_refcnt; +#endif + +#if IWL == 4965 + struct work_struct txpower_work; +#ifdef CONFIG_IWLWIFI_SENSITIVITY + struct work_struct sensitivity_work; +#endif + struct work_struct statistics_work; + struct timer_list statistics_periodic; + +#ifdef CONFIG_IWLWIFI_HT_AGG + struct work_struct agg_work; +#endif + +#endif /* 4965 */ +}; /*iwl_priv */ + +#endif /* __iwl_priv_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h new file mode 100644 index 000000000000..0df41148eadc --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-prph.h @@ -0,0 +1,229 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU Geeral Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * James P. Ketrenos <ipw2100-admin@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_prph_h__ +#define __iwl_prph_h__ + + +#define PRPH_BASE (0x00000) +#define PRPH_END (0xFFFFF) + +/* APMG (power management) constants */ +#define APMG_BASE (PRPH_BASE + 0x3000) +#define APMG_CLK_CTRL_REG (APMG_BASE + 0x0000) +#define APMG_CLK_EN_REG (APMG_BASE + 0x0004) +#define APMG_CLK_DIS_REG (APMG_BASE + 0x0008) +#define APMG_PS_CTRL_REG (APMG_BASE + 0x000c) +#define APMG_PCIDEV_STT_REG (APMG_BASE + 0x0010) +#define APMG_RFKILL_REG (APMG_BASE + 0x0014) +#define APMG_RTC_INT_STT_REG (APMG_BASE + 0x001c) +#define APMG_RTC_INT_MSK_REG (APMG_BASE + 0x0020) + +#define APMG_CLK_VAL_DMA_CLK_RQT (0x00000200) +#define APMG_CLK_VAL_BSM_CLK_RQT (0x00000800) + +#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000) + +#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) + +#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000) +#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000) +#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x01000000) + + +/** + * BSM (Bootstrap State Machine) + * + * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program + * in special SRAM that does not power down when the embedded control + * processor is sleeping (e.g. for periodic power-saving shutdowns of radio). + * + * When powering back up after sleeps (or during initial uCode load), the BSM + * internally loads the short bootstrap program from the special SRAM into the + * embedded processor's instruction SRAM, and starts the processor so it runs + * the bootstrap program. + * + * This bootstrap program loads (via PCI busmaster DMA) instructions and data + * images for a uCode program from host DRAM locations. The host driver + * indicates DRAM locations and sizes for instruction and data images via the + * four BSM_DRAM_* registers. Once the bootstrap program loads the new program, + * the new program starts automatically. + * + * The uCode used for open-source drivers includes two programs: + * + * 1) Initialization -- performs hardware calibration and sets up some + * internal data, then notifies host via "initialize alive" notification + * (struct iwl_init_alive_resp) that it has completed all of its work. + * After signal from host, it then loads and starts the runtime program. + * The initialization program must be used when initially setting up the + * NIC after loading the driver. + * + * 2) Runtime/Protocol -- performs all normal runtime operations. This + * notifies host via "alive" notification (struct iwl_alive_resp) that it + * is ready to be used. + * + * When initializing the NIC, the host driver does the following procedure: + * + * 1) Load bootstrap program (instructions only, no data image for bootstrap) + * into bootstrap memory. Use dword writes starting at BSM_SRAM_LOWER_BOUND + * + * 2) Point (via BSM_DRAM_*) to the "initialize" uCode data and instruction + * images in host DRAM. + * + * 3) Set up BSM to copy from BSM SRAM into uCode instruction SRAM when asked: + * BSM_WR_MEM_SRC_REG = 0 + * BSM_WR_MEM_DST_REG = RTC_INST_LOWER_BOUND + * BSM_WR_MEM_DWCOUNT_REG = # dwords in bootstrap instruction image + * + * 4) Load bootstrap into instruction SRAM: + * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START + * + * 5) Wait for load completion: + * Poll BSM_WR_CTRL_REG for BSM_WR_CTRL_REG_BIT_START = 0 + * + * 6) Enable future boot loads whenever NIC's power management triggers it: + * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START_EN + * + * 7) Start the NIC by removing all reset bits: + * CSR_RESET = 0 + * + * The bootstrap uCode (already in instruction SRAM) loads initialization + * uCode. Initialization uCode performs data initialization, sends + * "initialize alive" notification to host, and waits for a signal from + * host to load runtime code. + * + * 4) Point (via BSM_DRAM_*) to the "runtime" uCode data and instruction + * images in host DRAM. The last register loaded must be the instruction + * bytecount register ("1" in MSbit tells initialization uCode to load + * the runtime uCode): + * BSM_DRAM_INST_BYTECOUNT_REG = bytecount | BSM_DRAM_INST_LOAD + * + * 5) Wait for "alive" notification, then issue normal runtime commands. + * + * Data caching during power-downs: + * + * Just before the embedded controller powers down (e.g for automatic + * power-saving modes, or for RFKILL), uCode stores (via PCI busmaster DMA) + * a current snapshot of the embedded processor's data SRAM into host DRAM. + * This caches the data while the embedded processor's memory is powered down. + * Location and size are controlled by BSM_DRAM_DATA_* registers. + * + * NOTE: Instruction SRAM does not need to be saved, since that doesn't + * change during operation; the original image (from uCode distribution + * file) can be used for reload. + * + * When powering back up, the BSM loads the bootstrap program. Bootstrap looks + * at the BSM_DRAM_* registers, which now point to the runtime instruction + * image and the cached (modified) runtime data (*not* the initialization + * uCode). Bootstrap reloads these runtime images into SRAM, and restarts the + * uCode from where it left off before the power-down. + * + * NOTE: Initialization uCode does *not* run as part of the save/restore + * procedure. + * + * This save/restore method is mostly for autonomous power management during + * normal operation (result of POWER_TABLE_CMD). Platform suspend/resume and + * RFKILL should use complete restarts (with total re-initialization) of uCode, + * allowing total shutdown (including BSM memory). + * + * Note that, during normal operation, the host DRAM that held the initial + * startup data for the runtime code is now being used as a backup data cache + * for modified data! If you need to completely re-initialize the NIC, make + * sure that you use the runtime data image from the uCode distribution file, + * not the modified/saved runtime data. You may want to store a separate + * "clean" runtime data image in DRAM to avoid disk reads of distribution file. + */ + +/* BSM bit fields */ +#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */ +#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup*/ +#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */ + +/* BSM addresses */ +#define BSM_BASE (PRPH_BASE + 0x3400) +#define BSM_END (PRPH_BASE + 0x3800) + +#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */ +#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */ +#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */ +#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */ +#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */ + +/* + * Pointers and size regs for bootstrap load and data SRAM save/restore. + * NOTE: 3945 pointers use bits 31:0 of DRAM address. + * 4965 pointers use bits 35:4 of DRAM address. + */ +#define BSM_DRAM_INST_PTR_REG (BSM_BASE + 0x090) +#define BSM_DRAM_INST_BYTECOUNT_REG (BSM_BASE + 0x094) +#define BSM_DRAM_DATA_PTR_REG (BSM_BASE + 0x098) +#define BSM_DRAM_DATA_BYTECOUNT_REG (BSM_BASE + 0x09C) + +/* + * BSM special memory, stays powered on during power-save sleeps. + * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1) + */ +#define BSM_SRAM_LOWER_BOUND (PRPH_BASE + 0x3800) +#define BSM_SRAM_SIZE (1024) /* bytes */ + + +#endif /* __iwl_prph_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.h b/drivers/net/wireless/iwlwifi/iwl-spectrum.h new file mode 100644 index 000000000000..b576ff24eb4f --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-spectrum.h @@ -0,0 +1,91 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos <ipw2100-admin@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_spectrum_h__ +#define __iwl_spectrum_h__ +enum { /* ieee80211_basic_report.map */ + IEEE80211_BASIC_MAP_BSS = (1 << 0), + IEEE80211_BASIC_MAP_OFDM = (1 << 1), + IEEE80211_BASIC_MAP_UNIDENTIFIED = (1 << 2), + IEEE80211_BASIC_MAP_RADAR = (1 << 3), + IEEE80211_BASIC_MAP_UNMEASURED = (1 << 4), + /* Bits 5-7 are reserved */ + +}; +struct ieee80211_basic_report { + u8 channel; + __le64 start_time; + __le16 duration; + u8 map; +} __attribute__ ((packed)); + +enum { /* ieee80211_measurement_request.mode */ + /* Bit 0 is reserved */ + IEEE80211_MEASUREMENT_ENABLE = (1 << 1), + IEEE80211_MEASUREMENT_REQUEST = (1 << 2), + IEEE80211_MEASUREMENT_REPORT = (1 << 3), + /* Bits 4-7 are reserved */ +}; + +enum { + IEEE80211_REPORT_BASIC = 0, /* required */ + IEEE80211_REPORT_CCA = 1, /* optional */ + IEEE80211_REPORT_RPI = 2, /* optional */ + /* 3-255 reserved */ +}; + +struct ieee80211_measurement_params { + u8 channel; + __le64 start_time; + __le16 duration; +} __attribute__ ((packed)); + +struct ieee80211_info_element { + u8 id; + u8 len; + u8 data[0]; +} __attribute__ ((packed)); + +struct ieee80211_measurement_request { + struct ieee80211_info_element ie; + u8 token; + u8 mode; + u8 type; + struct ieee80211_measurement_params params[0]; +} __attribute__ ((packed)); + +struct ieee80211_measurement_report { + struct ieee80211_info_element ie; + u8 token; + u8 mode; + u8 type; + union { + struct ieee80211_basic_report basic[0]; + } u; +} __attribute__ ((packed)); +#endif diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c new file mode 100644 index 000000000000..75e3b5c3f155 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -0,0 +1,8746 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos <ipw2100-admin@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +/* + * NOTE: This file (iwl-base.c) is used to build to multiple hardware targets + * by defining IWL to either 3945 or 4965. The Makefile used when building + * the base targets will create base-3945.o and base-4965.o + * + * The eventual goal is to move as many of the #if IWL / #endif blocks out of + * this file and into the hardware specific implementation files (iwl-XXXX.c) + * and leave only the common (non #ifdef sprinkled) code in this file + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/version.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/wireless.h> +#include <linux/firmware.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/if_arp.h> + +#include <net/ieee80211_radiotap.h> +#include <net/mac80211.h> + +#include <asm/div64.h> + +#define IWL 3945 + +#include "iwlwifi.h" +#include "iwl-3945.h" +#include "iwl-helpers.h" + +#ifdef CONFIG_IWLWIFI_DEBUG +u32 iwl_debug_level; +#endif + +/****************************************************************************** + * + * module boiler plate + * + ******************************************************************************/ + +/* module parameters */ +int iwl_param_disable_hw_scan; +int iwl_param_debug; +int iwl_param_disable; /* def: enable radio */ +int iwl_param_antenna; /* def: 0 = both antennas (use diversity) */ +int iwl_param_hwcrypto; /* def: using software encryption */ +int iwl_param_qos_enable = 1; +int iwl_param_queues_num = IWL_MAX_NUM_QUEUES; + +/* + * module name, copyright, version, etc. + * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk + */ + +#define DRV_DESCRIPTION \ +"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux" + +#ifdef CONFIG_IWLWIFI_DEBUG +#define VD "d" +#else +#define VD +#endif + +#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT +#define VS "s" +#else +#define VS +#endif + +#define IWLWIFI_VERSION "1.1.17k" VD VS +#define DRV_COPYRIGHT "Copyright(c) 2003-2007 Intel Corporation" +#define DRV_VERSION IWLWIFI_VERSION + +/* Change firmware file name, using "-" and incrementing number, + * *only* when uCode interface or architecture changes so that it + * is not compatible with earlier drivers. + * This number will also appear in << 8 position of 1st dword of uCode file */ +#define IWL3945_UCODE_API "-1" + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR(DRV_COPYRIGHT); +MODULE_LICENSE("GPL"); + +__le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr) +{ + u16 fc = le16_to_cpu(hdr->frame_control); + int hdr_len = ieee80211_get_hdrlen(fc); + + if ((fc & 0x00cc) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA)) + return (__le16 *) ((u8 *) hdr + hdr_len - QOS_CONTROL_LEN); + return NULL; +} + +static const struct ieee80211_hw_mode *iwl_get_hw_mode( + struct iwl_priv *priv, int mode) +{ + int i; + + for (i = 0; i < 3; i++) + if (priv->modes[i].mode == mode) + return &priv->modes[i]; + + return NULL; +} + +static int iwl_is_empty_essid(const char *essid, int essid_len) +{ + /* Single white space is for Linksys APs */ + if (essid_len == 1 && essid[0] == ' ') + return 1; + + /* Otherwise, if the entire essid is 0, we assume it is hidden */ + while (essid_len) { + essid_len--; + if (essid[essid_len] != '\0') + return 0; + } + + return 1; +} + +static const char *iwl_escape_essid(const char *essid, u8 essid_len) +{ + static char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; + const char *s = essid; + char *d = escaped; + + if (iwl_is_empty_essid(essid, essid_len)) { + memcpy(escaped, "<hidden>", sizeof("<hidden>")); + return escaped; + } + + essid_len = min(essid_len, (u8) IW_ESSID_MAX_SIZE); + while (essid_len--) { + if (*s == '\0') { + *d++ = '\\'; + *d++ = '0'; + s++; + } else + *d++ = *s++; + } + *d = '\0'; + return escaped; +} + +static void iwl_print_hex_dump(int level, void *p, u32 len) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + if (!(iwl_debug_level & level)) + return; + + print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1, + p, len, 1); +#endif +} + +/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** + * DMA services + * + * Theory of operation + * + * A queue is a circular buffers with 'Read' and 'Write' pointers. + * 2 empty entries always kept in the buffer to protect from overflow. + * + * For Tx queue, there are low mark and high mark limits. If, after queuing + * the packet for Tx, free space become < low mark, Tx queue stopped. When + * reclaiming packets (on 'tx done IRQ), if free space become > high mark, + * Tx queue resumed. + * + * The IWL operates with six queues, one receive queue in the device's + * sram, one transmit queue for sending commands to the device firmware, + * and four transmit queues for data. + ***************************************************/ + +static int iwl_queue_space(const struct iwl_queue *q) +{ + int s = q->last_used - q->first_empty; + + if (q->last_used > q->first_empty) + s -= q->n_bd; + + if (s <= 0) + s += q->n_window; + /* keep some reserve to not confuse empty and full situations */ + s -= 2; + if (s < 0) + s = 0; + return s; +} + +/* XXX: n_bd must be power-of-two size */ +static inline int iwl_queue_inc_wrap(int index, int n_bd) +{ + return ++index & (n_bd - 1); +} + +/* XXX: n_bd must be power-of-two size */ +static inline int iwl_queue_dec_wrap(int index, int n_bd) +{ + return --index & (n_bd - 1); +} + +static inline int x2_queue_used(const struct iwl_queue *q, int i) +{ + return q->first_empty > q->last_used ? + (i >= q->last_used && i < q->first_empty) : + !(i < q->last_used && i >= q->first_empty); +} + +static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge) +{ + if (is_huge) + return q->n_window; + + return index & (q->n_window - 1); +} + +static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, + int count, int slots_num, u32 id) +{ + q->n_bd = count; + q->n_window = slots_num; + q->id = id; + + /* count must be power-of-two size, otherwise iwl_queue_inc_wrap + * and iwl_queue_dec_wrap are broken. */ + BUG_ON(!is_power_of_2(count)); + + /* slots_num must be power-of-two size, otherwise + * get_cmd_index is broken. */ + BUG_ON(!is_power_of_2(slots_num)); + + q->low_mark = q->n_window / 4; + if (q->low_mark < 4) + q->low_mark = 4; + + q->high_mark = q->n_window / 8; + if (q->high_mark < 2) + q->high_mark = 2; + + q->first_empty = q->last_used = 0; + + return 0; +} + +static int iwl_tx_queue_alloc(struct iwl_priv *priv, + struct iwl_tx_queue *txq, u32 id) +{ + struct pci_dev *dev = priv->pci_dev; + + if (id != IWL_CMD_QUEUE_NUM) { + txq->txb = kmalloc(sizeof(txq->txb[0]) * + TFD_QUEUE_SIZE_MAX, GFP_KERNEL); + if (!txq->txb) { + IWL_ERROR("kmalloc for auxilary BD " + "structures failed\n"); + goto error; + } + } else + txq->txb = NULL; + + txq->bd = pci_alloc_consistent(dev, + sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX, + &txq->q.dma_addr); + + if (!txq->bd) { + IWL_ERROR("pci_alloc_consistent(%zd) failed\n", + sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX); + goto error; + } + txq->q.id = id; + + return 0; + + error: + if (txq->txb) { + kfree(txq->txb); + txq->txb = NULL; + } + + return -ENOMEM; +} + +int iwl_tx_queue_init(struct iwl_priv *priv, + struct iwl_tx_queue *txq, int slots_num, u32 txq_id) +{ + struct pci_dev *dev = priv->pci_dev; + int len; + int rc = 0; + + /* alocate command space + one big command for scan since scan + * command is very huge the system will not have two scan at the + * same time */ + len = sizeof(struct iwl_cmd) * slots_num; + if (txq_id == IWL_CMD_QUEUE_NUM) + len += IWL_MAX_SCAN_SIZE; + txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd); + if (!txq->cmd) + return -ENOMEM; + + rc = iwl_tx_queue_alloc(priv, txq, txq_id); + if (rc) { + pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd); + + return -ENOMEM; + } + txq->need_update = 0; + + /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise + * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ + BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); + iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); + + iwl_hw_tx_queue_init(priv, txq); + + return 0; +} + +/** + * iwl_tx_queue_free - Deallocate DMA queue. + * @txq: Transmit queue to deallocate. + * + * Empty queue by removing and destroying all BD's. + * Free all buffers. txq itself is not freed. + * + */ +void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + struct iwl_queue *q = &txq->q; + struct pci_dev *dev = priv->pci_dev; + int len; + + if (q->n_bd == 0) + return; + + /* first, empty all BD's */ + for (; q->first_empty != q->last_used; + q->last_used = iwl_queue_inc_wrap(q->last_used, q->n_bd)) + iwl_hw_txq_free_tfd(priv, txq); + + len = sizeof(struct iwl_cmd) * q->n_window; + if (q->id == IWL_CMD_QUEUE_NUM) + len += IWL_MAX_SCAN_SIZE; + + pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd); + + /* free buffers belonging to queue itself */ + if (txq->q.n_bd) + pci_free_consistent(dev, sizeof(struct iwl_tfd_frame) * + txq->q.n_bd, txq->bd, txq->q.dma_addr); + + if (txq->txb) { + kfree(txq->txb); + txq->txb = NULL; + } + + /* 0 fill whole structure */ + memset(txq, 0, sizeof(*txq)); +} + +const u8 BROADCAST_ADDR[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; + +/*************** STATION TABLE MANAGEMENT **** + * + * NOTE: This needs to be overhauled to better synchronize between + * how the iwl-4965.c is using iwl_hw_find_station vs. iwl-3945.c + * + * mac80211 should also be examined to determine if sta_info is duplicating + * the functionality provided here + */ + +/**************************************************************/ +#if 0 /* temparary disable till we add real remove station */ +static u8 iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap) +{ + int index = IWL_INVALID_STATION; + int i; + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + + if (is_ap) + index = IWL_AP_ID; + else if (is_broadcast_ether_addr(addr)) + index = priv->hw_setting.bcast_sta_id; + else + for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) + if (priv->stations[i].used && + !compare_ether_addr(priv->stations[i].sta.sta.addr, + addr)) { + index = i; + break; + } + + if (unlikely(index == IWL_INVALID_STATION)) + goto out; + + if (priv->stations[index].used) { + priv->stations[index].used = 0; + priv->num_stations--; + } + + BUG_ON(priv->num_stations < 0); + +out: + spin_unlock_irqrestore(&priv->sta_lock, flags); + return 0; +} +#endif +static void iwl_clear_stations_table(struct iwl_priv *priv) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + + priv->num_stations = 0; + memset(priv->stations, 0, sizeof(priv->stations)); + + spin_unlock_irqrestore(&priv->sta_lock, flags); +} + + +u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap, u8 flags) +{ + int i; + int index = IWL_INVALID_STATION; + struct iwl_station_entry *station; + unsigned long flags_spin; + DECLARE_MAC_BUF(mac); + u8 rate; + + spin_lock_irqsave(&priv->sta_lock, flags_spin); + if (is_ap) + index = IWL_AP_ID; + else if (is_broadcast_ether_addr(addr)) + index = priv->hw_setting.bcast_sta_id; + else + for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) { + if (!compare_ether_addr(priv->stations[i].sta.sta.addr, + addr)) { + index = i; + break; + } + + if (!priv->stations[i].used && + index == IWL_INVALID_STATION) + index = i; + } + + /* These twh conditions has the same outcome but keep them separate + since they have different meaning */ + if (unlikely(index == IWL_INVALID_STATION)) { + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + return index; + } + + if (priv->stations[index].used && + !compare_ether_addr(priv->stations[index].sta.sta.addr, addr)) { + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + return index; + } + + IWL_DEBUG_ASSOC("Add STA ID %d: %s\n", index, print_mac(mac, addr)); + station = &priv->stations[index]; + station->used = 1; + priv->num_stations++; + + memset(&station->sta, 0, sizeof(struct iwl_addsta_cmd)); + memcpy(station->sta.sta.addr, addr, ETH_ALEN); + station->sta.mode = 0; + station->sta.sta.sta_id = index; + station->sta.station_flags = 0; + + rate = (priv->phymode == MODE_IEEE80211A) ? IWL_RATE_6M_PLCP : + IWL_RATE_1M_PLCP | priv->hw_setting.cck_flag; + + /* Turn on both antennas for the station... */ + station->sta.rate_n_flags = + iwl_hw_set_rate_n_flags(rate, RATE_MCS_ANT_AB_MSK); + station->current_rate.rate_n_flags = + le16_to_cpu(station->sta.rate_n_flags); + + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + iwl_send_add_station(priv, &station->sta, flags); + return index; + +} + +/*************** DRIVER STATUS FUNCTIONS *****/ + +static inline int iwl_is_ready(struct iwl_priv *priv) +{ + /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are + * set but EXIT_PENDING is not */ + return test_bit(STATUS_READY, &priv->status) && + test_bit(STATUS_GEO_CONFIGURED, &priv->status) && + !test_bit(STATUS_EXIT_PENDING, &priv->status); +} + +static inline int iwl_is_alive(struct iwl_priv *priv) +{ + return test_bit(STATUS_ALIVE, &priv->status); +} + +static inline int iwl_is_init(struct iwl_priv *priv) +{ + return test_bit(STATUS_INIT, &priv->status); +} + +static inline int iwl_is_rfkill(struct iwl_priv *priv) +{ + return test_bit(STATUS_RF_KILL_HW, &priv->status) || + test_bit(STATUS_RF_KILL_SW, &priv->status); +} + +static inline int iwl_is_ready_rf(struct iwl_priv *priv) +{ + + if (iwl_is_rfkill(priv)) + return 0; + + return iwl_is_ready(priv); +} + +/*************** HOST COMMAND QUEUE FUNCTIONS *****/ + +#define IWL_CMD(x) case x : return #x + +static const char *get_cmd_string(u8 cmd) +{ + switch (cmd) { + IWL_CMD(REPLY_ALIVE); + IWL_CMD(REPLY_ERROR); + IWL_CMD(REPLY_RXON); + IWL_CMD(REPLY_RXON_ASSOC); + IWL_CMD(REPLY_QOS_PARAM); + IWL_CMD(REPLY_RXON_TIMING); + IWL_CMD(REPLY_ADD_STA); + IWL_CMD(REPLY_REMOVE_STA); + IWL_CMD(REPLY_REMOVE_ALL_STA); + IWL_CMD(REPLY_3945_RX); + IWL_CMD(REPLY_TX); + IWL_CMD(REPLY_RATE_SCALE); + IWL_CMD(REPLY_LEDS_CMD); + IWL_CMD(REPLY_TX_LINK_QUALITY_CMD); + IWL_CMD(RADAR_NOTIFICATION); + IWL_CMD(REPLY_QUIET_CMD); + IWL_CMD(REPLY_CHANNEL_SWITCH); + IWL_CMD(CHANNEL_SWITCH_NOTIFICATION); + IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD); + IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION); + IWL_CMD(POWER_TABLE_CMD); + IWL_CMD(PM_SLEEP_NOTIFICATION); + IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC); + IWL_CMD(REPLY_SCAN_CMD); + IWL_CMD(REPLY_SCAN_ABORT_CMD); + IWL_CMD(SCAN_START_NOTIFICATION); + IWL_CMD(SCAN_RESULTS_NOTIFICATION); + IWL_CMD(SCAN_COMPLETE_NOTIFICATION); + IWL_CMD(BEACON_NOTIFICATION); + IWL_CMD(REPLY_TX_BEACON); + IWL_CMD(WHO_IS_AWAKE_NOTIFICATION); + IWL_CMD(QUIET_NOTIFICATION); + IWL_CMD(REPLY_TX_PWR_TABLE_CMD); + IWL_CMD(MEASURE_ABORT_NOTIFICATION); + IWL_CMD(REPLY_BT_CONFIG); + IWL_CMD(REPLY_STATISTICS_CMD); + IWL_CMD(STATISTICS_NOTIFICATION); + IWL_CMD(REPLY_CARD_STATE_CMD); + IWL_CMD(CARD_STATE_NOTIFICATION); + IWL_CMD(MISSED_BEACONS_NOTIFICATION); + default: + return "UNKNOWN"; + + } +} + +#define HOST_COMPLETE_TIMEOUT (HZ / 2) + +/** + * iwl_enqueue_hcmd - enqueue a uCode command + * @priv: device private data point + * @cmd: a point to the ucode command structure + * + * The function returns < 0 values to indicate the operation is + * failed. On success, it turns the index (> 0) of command in the + * command queue. + */ +static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; + struct iwl_queue *q = &txq->q; + struct iwl_tfd_frame *tfd; + u32 *control_flags; + struct iwl_cmd *out_cmd; + u32 idx; + u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); + dma_addr_t phys_addr; + int pad; + u16 count; + int ret; + unsigned long flags; + + /* If any of the command structures end up being larger than + * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then + * we will need to increase the size of the TFD entries */ + BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && + !(cmd->meta.flags & CMD_SIZE_HUGE)); + + if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) { + IWL_ERROR("No space for Tx\n"); + return -ENOSPC; + } + + spin_lock_irqsave(&priv->hcmd_lock, flags); + + tfd = &txq->bd[q->first_empty]; + memset(tfd, 0, sizeof(*tfd)); + + control_flags = (u32 *) tfd; + + idx = get_cmd_index(q, q->first_empty, cmd->meta.flags & CMD_SIZE_HUGE); + out_cmd = &txq->cmd[idx]; + + out_cmd->hdr.cmd = cmd->id; + memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta)); + memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); + + /* At this point, the out_cmd now has all of the incoming cmd + * information */ + + out_cmd->hdr.flags = 0; + out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) | + INDEX_TO_SEQ(q->first_empty)); + if (out_cmd->meta.flags & CMD_SIZE_HUGE) + out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME); + + phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx + + offsetof(struct iwl_cmd, hdr); + iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size); + + pad = U32_PAD(cmd->len); + count = TFD_CTL_COUNT_GET(*control_flags); + *control_flags = TFD_CTL_COUNT_SET(count) | TFD_CTL_PAD_SET(pad); + + IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, " + "%d bytes at %d[%d]:%d\n", + get_cmd_string(out_cmd->hdr.cmd), + out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), + fix_size, q->first_empty, idx, IWL_CMD_QUEUE_NUM); + + txq->need_update = 1; + q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd); + ret = iwl_tx_queue_update_write_ptr(priv, txq); + + spin_unlock_irqrestore(&priv->hcmd_lock, flags); + return ret ? ret : idx; +} + +int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + int ret; + + BUG_ON(!(cmd->meta.flags & CMD_ASYNC)); + + /* An asynchronous command can not expect an SKB to be set. */ + BUG_ON(cmd->meta.flags & CMD_WANT_SKB); + + /* An asynchronous command MUST have a callback. */ + BUG_ON(!cmd->meta.u.callback); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return -EBUSY; + + ret = iwl_enqueue_hcmd(priv, cmd); + if (ret < 0) { + IWL_ERROR("Error sending %s: iwl_enqueue_hcmd failed: %d\n", + get_cmd_string(cmd->id), ret); + return ret; + } + return 0; +} + +int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + int cmd_idx; + int ret; + static atomic_t entry = ATOMIC_INIT(0); /* reentrance protection */ + + BUG_ON(cmd->meta.flags & CMD_ASYNC); + + /* A synchronous command can not have a callback set. */ + BUG_ON(cmd->meta.u.callback != NULL); + + if (atomic_xchg(&entry, 1)) { + IWL_ERROR("Error sending %s: Already sending a host command\n", + get_cmd_string(cmd->id)); + return -EBUSY; + } + + set_bit(STATUS_HCMD_ACTIVE, &priv->status); + + if (cmd->meta.flags & CMD_WANT_SKB) + cmd->meta.source = &cmd->meta; + + cmd_idx = iwl_enqueue_hcmd(priv, cmd); + if (cmd_idx < 0) { + ret = cmd_idx; + IWL_ERROR("Error sending %s: iwl_enqueue_hcmd failed: %d\n", + get_cmd_string(cmd->id), ret); + goto out; + } + + ret = wait_event_interruptible_timeout(priv->wait_command_queue, + !test_bit(STATUS_HCMD_ACTIVE, &priv->status), + HOST_COMPLETE_TIMEOUT); + if (!ret) { + if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) { + IWL_ERROR("Error sending %s: time out after %dms.\n", + get_cmd_string(cmd->id), + jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); + + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + ret = -ETIMEDOUT; + goto cancel; + } + } + + if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { + IWL_DEBUG_INFO("Command %s aborted: RF KILL Switch\n", + get_cmd_string(cmd->id)); + ret = -ECANCELED; + goto fail; + } + if (test_bit(STATUS_FW_ERROR, &priv->status)) { + IWL_DEBUG_INFO("Command %s failed: FW Error\n", + get_cmd_string(cmd->id)); + ret = -EIO; + goto fail; + } + if ((cmd->meta.flags & CMD_WANT_SKB) && !cmd->meta.u.skb) { + IWL_ERROR("Error: Response NULL in '%s'\n", + get_cmd_string(cmd->id)); + ret = -EIO; + goto out; + } + + ret = 0; + goto out; + +cancel: + if (cmd->meta.flags & CMD_WANT_SKB) { + struct iwl_cmd *qcmd; + + /* Cancel the CMD_WANT_SKB flag for the cmd in the + * TX cmd queue. Otherwise in case the cmd comes + * in later, it will possibly set an invalid + * address (cmd->meta.source). */ + qcmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx]; + qcmd->meta.flags &= ~CMD_WANT_SKB; + } +fail: + if (cmd->meta.u.skb) { + dev_kfree_skb_any(cmd->meta.u.skb); + cmd->meta.u.skb = NULL; + } +out: + atomic_set(&entry, 0); + return ret; +} + +int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + /* A command can not be asynchronous AND expect an SKB to be set. */ + BUG_ON((cmd->meta.flags & CMD_ASYNC) && + (cmd->meta.flags & CMD_WANT_SKB)); + + if (cmd->meta.flags & CMD_ASYNC) + return iwl_send_cmd_async(priv, cmd); + + return iwl_send_cmd_sync(priv, cmd); +} + +int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data) +{ + struct iwl_host_cmd cmd = { + .id = id, + .len = len, + .data = data, + }; + + return iwl_send_cmd_sync(priv, &cmd); +} + +static int __must_check iwl_send_cmd_u32(struct iwl_priv *priv, u8 id, u32 val) +{ + struct iwl_host_cmd cmd = { + .id = id, + .len = sizeof(val), + .data = &val, + }; + + return iwl_send_cmd_sync(priv, &cmd); +} + +int iwl_send_statistics_request(struct iwl_priv *priv) +{ + return iwl_send_cmd_u32(priv, REPLY_STATISTICS_CMD, 0); +} + +/** + * iwl_set_rxon_channel - Set the phymode and channel values in staging RXON + * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz + * @channel: Any channel valid for the requested phymode + + * In addition to setting the staging RXON, priv->phymode is also set. + * + * NOTE: Does not commit to the hardware; it sets appropriate bit fields + * in the staging RXON flag structure based on the phymode + */ +static int iwl_set_rxon_channel(struct iwl_priv *priv, u8 phymode, u16 channel) +{ + if (!iwl_get_channel_info(priv, phymode, channel)) { + IWL_DEBUG_INFO("Could not set channel to %d [%d]\n", + channel, phymode); + return -EINVAL; + } + + if ((le16_to_cpu(priv->staging_rxon.channel) == channel) && + (priv->phymode == phymode)) + return 0; + + priv->staging_rxon.channel = cpu_to_le16(channel); + if (phymode == MODE_IEEE80211A) + priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK; + else + priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; + + priv->phymode = phymode; + + IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, phymode); + + return 0; +} + +/** + * iwl_check_rxon_cmd - validate RXON structure is valid + * + * NOTE: This is really only useful during development and can eventually + * be #ifdef'd out once the driver is stable and folks aren't actively + * making changes + */ +static int iwl_check_rxon_cmd(struct iwl_rxon_cmd *rxon) +{ + int error = 0; + int counter = 1; + + if (rxon->flags & RXON_FLG_BAND_24G_MSK) { + error |= le32_to_cpu(rxon->flags & + (RXON_FLG_TGJ_NARROW_BAND_MSK | + RXON_FLG_RADAR_DETECT_MSK)); + if (error) + IWL_WARNING("check 24G fields %d | %d\n", + counter++, error); + } else { + error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ? + 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK); + if (error) + IWL_WARNING("check 52 fields %d | %d\n", + counter++, error); + error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK); + if (error) + IWL_WARNING("check 52 CCK %d | %d\n", + counter++, error); + } + error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1; + if (error) + IWL_WARNING("check mac addr %d | %d\n", counter++, error); + + /* make sure basic rates 6Mbps and 1Mbps are supported */ + error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) && + ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0)); + if (error) + IWL_WARNING("check basic rate %d | %d\n", counter++, error); + + error |= (le16_to_cpu(rxon->assoc_id) > 2007); + if (error) + IWL_WARNING("check assoc id %d | %d\n", counter++, error); + + error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) + == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)); + if (error) + IWL_WARNING("check CCK and short slot %d | %d\n", + counter++, error); + + error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) + == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)); + if (error) + IWL_WARNING("check CCK & auto detect %d | %d\n", + counter++, error); + + error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK | + RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK); + if (error) + IWL_WARNING("check TGG and auto detect %d | %d\n", + counter++, error); + + if ((rxon->flags & RXON_FLG_DIS_DIV_MSK)) + error |= ((rxon->flags & (RXON_FLG_ANT_B_MSK | + RXON_FLG_ANT_A_MSK)) == 0); + if (error) + IWL_WARNING("check antenna %d %d\n", counter++, error); + + if (error) + IWL_WARNING("Tuning to channel %d\n", + le16_to_cpu(rxon->channel)); + + if (error) { + IWL_ERROR("Not a valid iwl_rxon_assoc_cmd field values\n"); + return -1; + } + return 0; +} + +/** + * iwl_full_rxon_required - determine if RXON_ASSOC can be used in RXON commit + * @priv: staging_rxon is comapred to active_rxon + * + * If the RXON structure is changing sufficient to require a new + * tune or to clear and reset the RXON_FILTER_ASSOC_MSK then return 1 + * to indicate a new tune is required. + */ +static int iwl_full_rxon_required(struct iwl_priv *priv) +{ + + /* These items are only settable from the full RXON command */ + if (!(priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) || + compare_ether_addr(priv->staging_rxon.bssid_addr, + priv->active_rxon.bssid_addr) || + compare_ether_addr(priv->staging_rxon.node_addr, + priv->active_rxon.node_addr) || + compare_ether_addr(priv->staging_rxon.wlap_bssid_addr, + priv->active_rxon.wlap_bssid_addr) || + (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) || + (priv->staging_rxon.channel != priv->active_rxon.channel) || + (priv->staging_rxon.air_propagation != + priv->active_rxon.air_propagation) || + (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id)) + return 1; + + /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can + * be updated with the RXON_ASSOC command -- however only some + * flag transitions are allowed using RXON_ASSOC */ + + /* Check if we are not switching bands */ + if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) != + (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)) + return 1; + + /* Check if we are switching association toggle */ + if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) != + (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) + return 1; + + return 0; +} + +static int iwl_send_rxon_assoc(struct iwl_priv *priv) +{ + int rc = 0; + struct iwl_rx_packet *res = NULL; + struct iwl_rxon_assoc_cmd rxon_assoc; + struct iwl_host_cmd cmd = { + .id = REPLY_RXON_ASSOC, + .len = sizeof(rxon_assoc), + .meta.flags = CMD_WANT_SKB, + .data = &rxon_assoc, + }; + const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon; + const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon; + + if ((rxon1->flags == rxon2->flags) && + (rxon1->filter_flags == rxon2->filter_flags) && + (rxon1->cck_basic_rates == rxon2->cck_basic_rates) && + (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) { + IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n"); + return 0; + } + + rxon_assoc.flags = priv->staging_rxon.flags; + rxon_assoc.filter_flags = priv->staging_rxon.filter_flags; + rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates; + rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates; + rxon_assoc.reserved = 0; + + rc = iwl_send_cmd_sync(priv, &cmd); + if (rc) + return rc; + + res = (struct iwl_rx_packet *)cmd.meta.u.skb->data; + if (res->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERROR("Bad return from REPLY_RXON_ASSOC command\n"); + rc = -EIO; + } + + priv->alloc_rxb_skb--; + dev_kfree_skb_any(cmd.meta.u.skb); + + return rc; +} + +/** + * iwl_commit_rxon - commit staging_rxon to hardware + * + * The RXON command in staging_rxon is commited to the hardware and + * the active_rxon structure is updated with the new data. This + * function correctly transitions out of the RXON_ASSOC_MSK state if + * a HW tune is required based on the RXON structure changes. + */ +static int iwl_commit_rxon(struct iwl_priv *priv) +{ + /* cast away the const for active_rxon in this function */ + struct iwl_rxon_cmd *active_rxon = (void *)&priv->active_rxon; + int rc = 0; + DECLARE_MAC_BUF(mac); + + if (!iwl_is_alive(priv)) + return -1; + + /* always get timestamp with Rx frame */ + priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK; + + /* select antenna */ + priv->staging_rxon.flags &= + ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK); + priv->staging_rxon.flags |= iwl3945_get_antenna_flags(priv); + + rc = iwl_check_rxon_cmd(&priv->staging_rxon); + if (rc) { + IWL_ERROR("Invalid RXON configuration. Not committing.\n"); + return -EINVAL; + } + + /* If we don't need to send a full RXON, we can use + * iwl_rxon_assoc_cmd which is used to reconfigure filter + * and other flags for the current radio configuration. */ + if (!iwl_full_rxon_required(priv)) { + rc = iwl_send_rxon_assoc(priv); + if (rc) { + IWL_ERROR("Error setting RXON_ASSOC " + "configuration (%d).\n", rc); + return rc; + } + + memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); + + return 0; + } + + /* If we are currently associated and the new config requires + * an RXON_ASSOC and the new config wants the associated mask enabled, + * we must clear the associated from the active configuration + * before we apply the new config */ + if (iwl_is_associated(priv) && + (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) { + IWL_DEBUG_INFO("Toggling associated bit on current RXON\n"); + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + + rc = iwl_send_cmd_pdu(priv, REPLY_RXON, + sizeof(struct iwl_rxon_cmd), + &priv->active_rxon); + + /* If the mask clearing failed then we set + * active_rxon back to what it was previously */ + if (rc) { + active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK; + IWL_ERROR("Error clearing ASSOC_MSK on current " + "configuration (%d).\n", rc); + return rc; + } + } + + IWL_DEBUG_INFO("Sending RXON\n" + "* with%s RXON_FILTER_ASSOC_MSK\n" + "* channel = %d\n" + "* bssid = %s\n", + ((priv->staging_rxon.filter_flags & + RXON_FILTER_ASSOC_MSK) ? "" : "out"), + le16_to_cpu(priv->staging_rxon.channel), + print_mac(mac, priv->staging_rxon.bssid_addr)); + + /* Apply the new configuration */ + rc = iwl_send_cmd_pdu(priv, REPLY_RXON, + sizeof(struct iwl_rxon_cmd), &priv->staging_rxon); + if (rc) { + IWL_ERROR("Error setting new configuration (%d).\n", rc); + return rc; + } + + memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); + + iwl_clear_stations_table(priv); + + /* If we issue a new RXON command which required a tune then we must + * send a new TXPOWER command or we won't be able to Tx any frames */ + rc = iwl_hw_reg_send_txpower(priv); + if (rc) { + IWL_ERROR("Error setting Tx power (%d).\n", rc); + return rc; + } + + /* Add the broadcast address so we can send broadcast frames */ + if (iwl_add_station(priv, BROADCAST_ADDR, 0, 0) == + IWL_INVALID_STATION) { + IWL_ERROR("Error adding BROADCAST address for transmit.\n"); + return -EIO; + } + + /* If we have set the ASSOC_MSK and we are in BSS mode then + * add the IWL_AP_ID to the station rate table */ + if (iwl_is_associated(priv) && + (priv->iw_mode == IEEE80211_IF_TYPE_STA)) + if (iwl_add_station(priv, priv->active_rxon.bssid_addr, 1, 0) + == IWL_INVALID_STATION) { + IWL_ERROR("Error adding AP address for transmit.\n"); + return -EIO; + } + + /* Init the hardware's rate fallback order based on the + * phymode */ + rc = iwl3945_init_hw_rate_table(priv); + if (rc) { + IWL_ERROR("Error setting HW rate table: %02X\n", rc); + return -EIO; + } + + return 0; +} + +static int iwl_send_bt_config(struct iwl_priv *priv) +{ + struct iwl_bt_cmd bt_cmd = { + .flags = 3, + .lead_time = 0xAA, + .max_kill = 1, + .kill_ack_mask = 0, + .kill_cts_mask = 0, + }; + + return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, + sizeof(struct iwl_bt_cmd), &bt_cmd); +} + +static int iwl_send_scan_abort(struct iwl_priv *priv) +{ + int rc = 0; + struct iwl_rx_packet *res; + struct iwl_host_cmd cmd = { + .id = REPLY_SCAN_ABORT_CMD, + .meta.flags = CMD_WANT_SKB, + }; + + /* If there isn't a scan actively going on in the hardware + * then we are in between scan bands and not actually + * actively scanning, so don't send the abort command */ + if (!test_bit(STATUS_SCAN_HW, &priv->status)) { + clear_bit(STATUS_SCAN_ABORTING, &priv->status); + return 0; + } + + rc = iwl_send_cmd_sync(priv, &cmd); + if (rc) { + clear_bit(STATUS_SCAN_ABORTING, &priv->status); + return rc; + } + + res = (struct iwl_rx_packet *)cmd.meta.u.skb->data; + if (res->u.status != CAN_ABORT_STATUS) { + /* The scan abort will return 1 for success or + * 2 for "failure". A failure condition can be + * due to simply not being in an active scan which + * can occur if we send the scan abort before we + * the microcode has notified us that a scan is + * completed. */ + IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status); + clear_bit(STATUS_SCAN_ABORTING, &priv->status); + clear_bit(STATUS_SCAN_HW, &priv->status); + } + + dev_kfree_skb_any(cmd.meta.u.skb); + + return rc; +} + +static int iwl_card_state_sync_callback(struct iwl_priv *priv, + struct iwl_cmd *cmd, + struct sk_buff *skb) +{ + return 1; +} + +/* + * CARD_STATE_CMD + * + * Use: Sets the internal card state to enable, disable, or halt + * + * When in the 'enable' state the card operates as normal. + * When in the 'disable' state, the card enters into a low power mode. + * When in the 'halt' state, the card is shut down and must be fully + * restarted to come back on. + */ +static int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag) +{ + struct iwl_host_cmd cmd = { + .id = REPLY_CARD_STATE_CMD, + .len = sizeof(u32), + .data = &flags, + .meta.flags = meta_flag, + }; + + if (meta_flag & CMD_ASYNC) + cmd.meta.u.callback = iwl_card_state_sync_callback; + + return iwl_send_cmd(priv, &cmd); +} + +static int iwl_add_sta_sync_callback(struct iwl_priv *priv, + struct iwl_cmd *cmd, struct sk_buff *skb) +{ + struct iwl_rx_packet *res = NULL; + + if (!skb) { + IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n"); + return 1; + } + + res = (struct iwl_rx_packet *)skb->data; + if (res->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n", + res->hdr.flags); + return 1; + } + + switch (res->u.add_sta.status) { + case ADD_STA_SUCCESS_MSK: + break; + default: + break; + } + + /* We didn't cache the SKB; let the caller free it */ + return 1; +} + +int iwl_send_add_station(struct iwl_priv *priv, + struct iwl_addsta_cmd *sta, u8 flags) +{ + struct iwl_rx_packet *res = NULL; + int rc = 0; + struct iwl_host_cmd cmd = { + .id = REPLY_ADD_STA, + .len = sizeof(struct iwl_addsta_cmd), + .meta.flags = flags, + .data = sta, + }; + + if (flags & CMD_ASYNC) + cmd.meta.u.callback = iwl_add_sta_sync_callback; + else + cmd.meta.flags |= CMD_WANT_SKB; + + rc = iwl_send_cmd(priv, &cmd); + + if (rc || (flags & CMD_ASYNC)) + return rc; + + res = (struct iwl_rx_packet *)cmd.meta.u.skb->data; + if (res->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n", + res->hdr.flags); + rc = -EIO; + } + + if (rc == 0) { + switch (res->u.add_sta.status) { + case ADD_STA_SUCCESS_MSK: + IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n"); + break; + default: + rc = -EIO; + IWL_WARNING("REPLY_ADD_STA failed\n"); + break; + } + } + + priv->alloc_rxb_skb--; + dev_kfree_skb_any(cmd.meta.u.skb); + + return rc; +} + +static int iwl_update_sta_key_info(struct iwl_priv *priv, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + unsigned long flags; + __le16 key_flags = 0; + + switch (keyconf->alg) { + case ALG_CCMP: + key_flags |= STA_KEY_FLG_CCMP; + key_flags |= cpu_to_le16( + keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + key_flags &= ~STA_KEY_FLG_INVALID; + break; + case ALG_TKIP: + case ALG_WEP: + return -EINVAL; + default: + return -EINVAL; + } + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].keyinfo.alg = keyconf->alg; + priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; + memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, + keyconf->keylen); + + memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, + keyconf->keylen); + priv->stations[sta_id].sta.key.key_flags = key_flags; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + spin_unlock_irqrestore(&priv->sta_lock, flags); + + IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n"); + iwl_send_add_station(priv, &priv->stations[sta_id].sta, 0); + return 0; +} + +static int iwl_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key)); + memset(&priv->stations[sta_id].sta.key, 0, sizeof(struct iwl_keyinfo)); + priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n"); + iwl_send_add_station(priv, &priv->stations[sta_id].sta, 0); + return 0; +} + +static void iwl_clear_free_frames(struct iwl_priv *priv) +{ + struct list_head *element; + + IWL_DEBUG_INFO("%d frames on pre-allocated heap on clear.\n", + priv->frames_count); + + while (!list_empty(&priv->free_frames)) { + element = priv->free_frames.next; + list_del(element); + kfree(list_entry(element, struct iwl_frame, list)); + priv->frames_count--; + } + + if (priv->frames_count) { + IWL_WARNING("%d frames still in use. Did we lose one?\n", + priv->frames_count); + priv->frames_count = 0; + } +} + +static struct iwl_frame *iwl_get_free_frame(struct iwl_priv *priv) +{ + struct iwl_frame *frame; + struct list_head *element; + if (list_empty(&priv->free_frames)) { + frame = kzalloc(sizeof(*frame), GFP_KERNEL); + if (!frame) { + IWL_ERROR("Could not allocate frame!\n"); + return NULL; + } + + priv->frames_count++; + return frame; + } + + element = priv->free_frames.next; + list_del(element); + return list_entry(element, struct iwl_frame, list); +} + +static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame) +{ + memset(frame, 0, sizeof(*frame)); + list_add(&frame->list, &priv->free_frames); +} + +unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + const u8 *dest, int left) +{ + + if (!iwl_is_associated(priv) || !priv->ibss_beacon || + ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) && + (priv->iw_mode != IEEE80211_IF_TYPE_AP))) + return 0; + + if (priv->ibss_beacon->len > left) + return 0; + + memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len); + + return priv->ibss_beacon->len; +} + +static int iwl_rate_index_from_plcp(int plcp) +{ + int i = 0; + + for (i = 0; i < IWL_RATE_COUNT; i++) + if (iwl_rates[i].plcp == plcp) + return i; + return -1; +} + +static u8 iwl_rate_get_lowest_plcp(int rate_mask) +{ + u8 i; + + for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID; + i = iwl_rates[i].next_ieee) { + if (rate_mask & (1 << i)) + return iwl_rates[i].plcp; + } + + return IWL_RATE_INVALID; +} + +static int iwl_send_beacon_cmd(struct iwl_priv *priv) +{ + struct iwl_frame *frame; + unsigned int frame_size; + int rc; + u8 rate; + + frame = iwl_get_free_frame(priv); + + if (!frame) { + IWL_ERROR("Could not obtain free frame buffer for beacon " + "command.\n"); + return -ENOMEM; + } + + if (!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)) { + rate = iwl_rate_get_lowest_plcp(priv->active_rate_basic & + 0xFF0); + if (rate == IWL_INVALID_RATE) + rate = IWL_RATE_6M_PLCP; + } else { + rate = iwl_rate_get_lowest_plcp(priv->active_rate_basic & 0xF); + if (rate == IWL_INVALID_RATE) + rate = IWL_RATE_1M_PLCP; + } + + frame_size = iwl_hw_get_beacon_cmd(priv, frame, rate); + + rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, + &frame->u.cmd[0]); + + iwl_free_frame(priv, frame); + + return rc; +} + +/****************************************************************************** + * + * EEPROM related functions + * + ******************************************************************************/ + +static void get_eeprom_mac(struct iwl_priv *priv, u8 *mac) +{ + memcpy(mac, priv->eeprom.mac_address, 6); +} + +/** + * iwl_eeprom_init - read EEPROM contents + * + * Load the EEPROM from adapter into priv->eeprom + * + * NOTE: This routine uses the non-debug IO access functions. + */ +int iwl_eeprom_init(struct iwl_priv *priv) +{ + u16 *e = (u16 *)&priv->eeprom; + u32 gp = iwl_read32(priv, CSR_EEPROM_GP); + u32 r; + int sz = sizeof(priv->eeprom); + int rc; + int i; + u16 addr; + + /* The EEPROM structure has several padding buffers within it + * and when adding new EEPROM maps is subject to programmer errors + * which may be very difficult to identify without explicitly + * checking the resulting size of the eeprom map. */ + BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE); + + if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) { + IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp); + return -ENOENT; + } + + rc = iwl_eeprom_aqcuire_semaphore(priv); + if (rc < 0) { + IWL_ERROR("Failed to aqcuire EEPROM semaphore.\n"); + return -ENOENT; + } + + /* eeprom is an array of 16bit values */ + for (addr = 0; addr < sz; addr += sizeof(u16)) { + _iwl_write32(priv, CSR_EEPROM_REG, addr << 1); + _iwl_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD); + + for (i = 0; i < IWL_EEPROM_ACCESS_TIMEOUT; + i += IWL_EEPROM_ACCESS_DELAY) { + r = _iwl_read_restricted(priv, CSR_EEPROM_REG); + if (r & CSR_EEPROM_REG_READ_VALID_MSK) + break; + udelay(IWL_EEPROM_ACCESS_DELAY); + } + + if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) { + IWL_ERROR("Time out reading EEPROM[%d]", addr); + return -ETIMEDOUT; + } + e[addr / 2] = le16_to_cpu(r >> 16); + } + + return 0; +} + +/****************************************************************************** + * + * Misc. internal state and helper functions + * + ******************************************************************************/ +#ifdef CONFIG_IWLWIFI_DEBUG + +/** + * iwl_report_frame - dump frame to syslog during debug sessions + * + * hack this function to show different aspects of received frames, + * including selective frame dumps. + * group100 parameter selects whether to show 1 out of 100 good frames. + * + * TODO: ieee80211_hdr stuff is common to 3945 and 4965, so frame type + * info output is okay, but some of this stuff (e.g. iwl_rx_frame_stats) + * is 3945-specific and gives bad output for 4965. Need to split the + * functionality, keep common stuff here. + */ +void iwl_report_frame(struct iwl_priv *priv, + struct iwl_rx_packet *pkt, + struct ieee80211_hdr *header, int group100) +{ + u32 to_us; + u32 print_summary = 0; + u32 print_dump = 0; /* set to 1 to dump all frames' contents */ + u32 hundred = 0; + u32 dataframe = 0; + u16 fc; + u16 seq_ctl; + u16 channel; + u16 phy_flags; + int rate_sym; + u16 length; + u16 status; + u16 bcn_tmr; + u32 tsf_low; + u64 tsf; + u8 rssi; + u8 agc; + u16 sig_avg; + u16 noise_diff; + struct iwl_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt); + struct iwl_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); + struct iwl_rx_frame_end *rx_end = IWL_RX_END(pkt); + u8 *data = IWL_RX_DATA(pkt); + + /* MAC header */ + fc = le16_to_cpu(header->frame_control); + seq_ctl = le16_to_cpu(header->seq_ctrl); + + /* metadata */ + channel = le16_to_cpu(rx_hdr->channel); + phy_flags = le16_to_cpu(rx_hdr->phy_flags); + rate_sym = rx_hdr->rate; + length = le16_to_cpu(rx_hdr->len); + + /* end-of-frame status and timestamp */ + status = le32_to_cpu(rx_end->status); + bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp); + tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff; + tsf = le64_to_cpu(rx_end->timestamp); + + /* signal statistics */ + rssi = rx_stats->rssi; + agc = rx_stats->agc; + sig_avg = le16_to_cpu(rx_stats->sig_avg); + noise_diff = le16_to_cpu(rx_stats->noise_diff); + + to_us = !compare_ether_addr(header->addr1, priv->mac_addr); + + /* if data frame is to us and all is good, + * (optionally) print summary for only 1 out of every 100 */ + if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) == + (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) { + dataframe = 1; + if (!group100) + print_summary = 1; /* print each frame */ + else if (priv->framecnt_to_us < 100) { + priv->framecnt_to_us++; + print_summary = 0; + } else { + priv->framecnt_to_us = 0; + print_summary = 1; + hundred = 1; + } + } else { + /* print summary for all other frames */ + print_summary = 1; + } + + if (print_summary) { + char *title; + u32 rate; + + if (hundred) + title = "100Frames"; + else if (fc & IEEE80211_FCTL_RETRY) + title = "Retry"; + else if (ieee80211_is_assoc_response(fc)) + title = "AscRsp"; + else if (ieee80211_is_reassoc_response(fc)) + title = "RasRsp"; + else if (ieee80211_is_probe_response(fc)) { + title = "PrbRsp"; + print_dump = 1; /* dump frame contents */ + } else if (ieee80211_is_beacon(fc)) { + title = "Beacon"; + print_dump = 1; /* dump frame contents */ + } else if (ieee80211_is_atim(fc)) + title = "ATIM"; + else if (ieee80211_is_auth(fc)) + title = "Auth"; + else if (ieee80211_is_deauth(fc)) + title = "DeAuth"; + else if (ieee80211_is_disassoc(fc)) + title = "DisAssoc"; + else + title = "Frame"; + + rate = iwl_rate_index_from_plcp(rate_sym); + if (rate == -1) + rate = 0; + else + rate = iwl_rates[rate].ieee / 2; + + /* print frame summary. + * MAC addresses show just the last byte (for brevity), + * but you can hack it to show more, if you'd like to. */ + if (dataframe) + IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, " + "len=%u, rssi=%d, chnl=%d, rate=%u, \n", + title, fc, header->addr1[5], + length, rssi, channel, rate); + else { + /* src/dst addresses assume managed mode */ + IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, " + "src=0x%02x, rssi=%u, tim=%lu usec, " + "phy=0x%02x, chnl=%d\n", + title, fc, header->addr1[5], + header->addr3[5], rssi, + tsf_low - priv->scan_start_tsf, + phy_flags, channel); + } + } + if (print_dump) + iwl_print_hex_dump(IWL_DL_RX, data, length); +} +#endif + +static void iwl_unset_hw_setting(struct iwl_priv *priv) +{ + if (priv->hw_setting.shared_virt) + pci_free_consistent(priv->pci_dev, + sizeof(struct iwl_shared), + priv->hw_setting.shared_virt, + priv->hw_setting.shared_phys); +} + +/** + * iwl_supported_rate_to_ie - fill in the supported rate in IE field + * + * return : set the bit for each supported rate insert in ie + */ +static u16 iwl_supported_rate_to_ie(u8 *ie, u16 supported_rate, + u16 basic_rate, int max_count) +{ + u16 ret_rates = 0, bit; + int i; + u8 *rates; + + rates = &(ie[1]); + + for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) { + if (bit & supported_rate) { + ret_rates |= bit; + rates[*ie] = iwl_rates[i].ieee | + ((bit & basic_rate) ? 0x80 : 0x00); + *ie = *ie + 1; + if (*ie >= max_count) + break; + } + } + + return ret_rates; +} + +/** + * iwl_fill_probe_req - fill in all required fields and IE for probe request + */ +static u16 iwl_fill_probe_req(struct iwl_priv *priv, + struct ieee80211_mgmt *frame, + int left, int is_direct) +{ + int len = 0; + u8 *pos = NULL; + u16 ret_rates; + + /* Make sure there is enough space for the probe request, + * two mandatory IEs and the data */ + left -= 24; + if (left < 0) + return 0; + len += 24; + + frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); + memcpy(frame->da, BROADCAST_ADDR, ETH_ALEN); + memcpy(frame->sa, priv->mac_addr, ETH_ALEN); + memcpy(frame->bssid, BROADCAST_ADDR, ETH_ALEN); + frame->seq_ctrl = 0; + + /* fill in our indirect SSID IE */ + /* ...next IE... */ + + left -= 2; + if (left < 0) + return 0; + len += 2; + pos = &(frame->u.probe_req.variable[0]); + *pos++ = WLAN_EID_SSID; + *pos++ = 0; + + /* fill in our direct SSID IE... */ + if (is_direct) { + /* ...next IE... */ + left -= 2 + priv->essid_len; + if (left < 0) + return 0; + /* ... fill it in... */ + *pos++ = WLAN_EID_SSID; + *pos++ = priv->essid_len; + memcpy(pos, priv->essid, priv->essid_len); + pos += priv->essid_len; + len += 2 + priv->essid_len; + } + + /* fill in supported rate */ + /* ...next IE... */ + left -= 2; + if (left < 0) + return 0; + /* ... fill it in... */ + *pos++ = WLAN_EID_SUPP_RATES; + *pos = 0; + ret_rates = priv->active_rate = priv->rates_mask; + priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; + + iwl_supported_rate_to_ie(pos, priv->active_rate, + priv->active_rate_basic, left); + len += 2 + *pos; + pos += (*pos) + 1; + ret_rates = ~ret_rates & priv->active_rate; + + if (ret_rates == 0) + goto fill_end; + + /* fill in supported extended rate */ + /* ...next IE... */ + left -= 2; + if (left < 0) + return 0; + /* ... fill it in... */ + *pos++ = WLAN_EID_EXT_SUPP_RATES; + *pos = 0; + iwl_supported_rate_to_ie(pos, ret_rates, priv->active_rate_basic, left); + if (*pos > 0) + len += 2 + *pos; + + fill_end: + return (u16)len; +} + +/* + * QoS support +*/ +#ifdef CONFIG_IWLWIFI_QOS +static int iwl_send_qos_params_command(struct iwl_priv *priv, + struct iwl_qosparam_cmd *qos) +{ + + return iwl_send_cmd_pdu(priv, REPLY_QOS_PARAM, + sizeof(struct iwl_qosparam_cmd), qos); +} + +static void iwl_reset_qos(struct iwl_priv *priv) +{ + u16 cw_min = 15; + u16 cw_max = 1023; + u8 aifs = 2; + u8 is_legacy = 0; + unsigned long flags; + int i; + + spin_lock_irqsave(&priv->lock, flags); + priv->qos_data.qos_active = 0; + + if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) { + if (priv->qos_data.qos_enable) + priv->qos_data.qos_active = 1; + if (!(priv->active_rate & 0xfff0)) { + cw_min = 31; + is_legacy = 1; + } + } else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { + if (priv->qos_data.qos_enable) + priv->qos_data.qos_active = 1; + } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) { + cw_min = 31; + is_legacy = 1; + } + + if (priv->qos_data.qos_active) + aifs = 3; + + priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min); + priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max); + priv->qos_data.def_qos_parm.ac[0].aifsn = aifs; + priv->qos_data.def_qos_parm.ac[0].edca_txop = 0; + priv->qos_data.def_qos_parm.ac[0].reserved1 = 0; + + if (priv->qos_data.qos_active) { + i = 1; + priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min); + priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max); + priv->qos_data.def_qos_parm.ac[i].aifsn = 7; + priv->qos_data.def_qos_parm.ac[i].edca_txop = 0; + priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; + + i = 2; + priv->qos_data.def_qos_parm.ac[i].cw_min = + cpu_to_le16((cw_min + 1) / 2 - 1); + priv->qos_data.def_qos_parm.ac[i].cw_max = + cpu_to_le16(cw_max); + priv->qos_data.def_qos_parm.ac[i].aifsn = 2; + if (is_legacy) + priv->qos_data.def_qos_parm.ac[i].edca_txop = + cpu_to_le16(6016); + else + priv->qos_data.def_qos_parm.ac[i].edca_txop = + cpu_to_le16(3008); + priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; + + i = 3; + priv->qos_data.def_qos_parm.ac[i].cw_min = + cpu_to_le16((cw_min + 1) / 4 - 1); + priv->qos_data.def_qos_parm.ac[i].cw_max = + cpu_to_le16((cw_max + 1) / 2 - 1); + priv->qos_data.def_qos_parm.ac[i].aifsn = 2; + priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; + if (is_legacy) + priv->qos_data.def_qos_parm.ac[i].edca_txop = + cpu_to_le16(3264); + else + priv->qos_data.def_qos_parm.ac[i].edca_txop = + cpu_to_le16(1504); + } else { + for (i = 1; i < 4; i++) { + priv->qos_data.def_qos_parm.ac[i].cw_min = + cpu_to_le16(cw_min); + priv->qos_data.def_qos_parm.ac[i].cw_max = + cpu_to_le16(cw_max); + priv->qos_data.def_qos_parm.ac[i].aifsn = aifs; + priv->qos_data.def_qos_parm.ac[i].edca_txop = 0; + priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; + } + } + IWL_DEBUG_QOS("set QoS to default \n"); + + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void iwl_activate_qos(struct iwl_priv *priv, u8 force) +{ + unsigned long flags; + + if (priv == NULL) + return; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (!priv->qos_data.qos_enable) + return; + + spin_lock_irqsave(&priv->lock, flags); + priv->qos_data.def_qos_parm.qos_flags = 0; + + if (priv->qos_data.qos_cap.q_AP.queue_request && + !priv->qos_data.qos_cap.q_AP.txop_request) + priv->qos_data.def_qos_parm.qos_flags |= + QOS_PARAM_FLG_TXOP_TYPE_MSK; + + if (priv->qos_data.qos_active) + priv->qos_data.def_qos_parm.qos_flags |= + QOS_PARAM_FLG_UPDATE_EDCA_MSK; + + spin_unlock_irqrestore(&priv->lock, flags); + + if (force || iwl_is_associated(priv)) { + IWL_DEBUG_QOS("send QoS cmd with Qos active %d \n", + priv->qos_data.qos_active); + + iwl_send_qos_params_command(priv, + &(priv->qos_data.def_qos_parm)); + } +} + +#endif /* CONFIG_IWLWIFI_QOS */ +/* + * Power management (not Tx power!) functions + */ +#define MSEC_TO_USEC 1024 + +#define NOSLP __constant_cpu_to_le32(0) +#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK +#define SLP_TIMEOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC) +#define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \ + __constant_cpu_to_le32(X1), \ + __constant_cpu_to_le32(X2), \ + __constant_cpu_to_le32(X3), \ + __constant_cpu_to_le32(X4)} + + +/* default power management (not Tx power) table values */ +/* for tim 0-10 */ +static struct iwl_power_vec_entry range_0[IWL_POWER_AC] = { + {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0}, + {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0}, + {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0}, + {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0}, + {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1}, + {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1} +}; + +/* for tim > 10 */ +static struct iwl_power_vec_entry range_1[IWL_POWER_AC] = { + {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0}, + {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), + SLP_VEC(1, 2, 3, 4, 0xFF)}, 0}, + {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), + SLP_VEC(2, 4, 6, 7, 0xFF)}, 0}, + {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), + SLP_VEC(2, 6, 9, 9, 0xFF)}, 0}, + {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0}, + {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), + SLP_VEC(4, 7, 10, 10, 0xFF)}, 0} +}; + +int iwl_power_init_handle(struct iwl_priv *priv) +{ + int rc = 0, i; + struct iwl_power_mgr *pow_data; + int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_AC; + u16 pci_pm; + + IWL_DEBUG_POWER("Initialize power \n"); + + pow_data = &(priv->power_data); + + memset(pow_data, 0, sizeof(*pow_data)); + + pow_data->active_index = IWL_POWER_RANGE_0; + pow_data->dtim_val = 0xffff; + + memcpy(&pow_data->pwr_range_0[0], &range_0[0], size); + memcpy(&pow_data->pwr_range_1[0], &range_1[0], size); + + rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm); + if (rc != 0) + return 0; + else { + struct iwl_powertable_cmd *cmd; + + IWL_DEBUG_POWER("adjust power command flags\n"); + + for (i = 0; i < IWL_POWER_AC; i++) { + cmd = &pow_data->pwr_range_0[i].cmd; + + if (pci_pm & 0x1) + cmd->flags &= ~IWL_POWER_PCI_PM_MSK; + else + cmd->flags |= IWL_POWER_PCI_PM_MSK; + } + } + return rc; +} + +static int iwl_update_power_cmd(struct iwl_priv *priv, + struct iwl_powertable_cmd *cmd, u32 mode) +{ + int rc = 0, i; + u8 skip; + u32 max_sleep = 0; + struct iwl_power_vec_entry *range; + u8 period = 0; + struct iwl_power_mgr *pow_data; + + if (mode > IWL_POWER_INDEX_5) { + IWL_DEBUG_POWER("Error invalid power mode \n"); + return -1; + } + pow_data = &(priv->power_data); + + if (pow_data->active_index == IWL_POWER_RANGE_0) + range = &pow_data->pwr_range_0[0]; + else + range = &pow_data->pwr_range_1[1]; + + memcpy(cmd, &range[mode].cmd, sizeof(struct iwl_powertable_cmd)); + +#ifdef IWL_MAC80211_DISABLE + if (priv->assoc_network != NULL) { + unsigned long flags; + + period = priv->assoc_network->tim.tim_period; + } +#endif /*IWL_MAC80211_DISABLE */ + skip = range[mode].no_dtim; + + if (period == 0) { + period = 1; + skip = 0; + } + + if (skip == 0) { + max_sleep = period; + cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK; + } else { + __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]; + max_sleep = (le32_to_cpu(slp_itrvl) / period) * period; + cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK; + } + + for (i = 0; i < IWL_POWER_VEC_SIZE; i++) { + if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep) + cmd->sleep_interval[i] = cpu_to_le32(max_sleep); + } + + IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags); + IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout)); + IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout)); + IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n", + le32_to_cpu(cmd->sleep_interval[0]), + le32_to_cpu(cmd->sleep_interval[1]), + le32_to_cpu(cmd->sleep_interval[2]), + le32_to_cpu(cmd->sleep_interval[3]), + le32_to_cpu(cmd->sleep_interval[4])); + + return rc; +} + +static int iwl_send_power_mode(struct iwl_priv *priv, u32 mode) +{ + u32 final_mode = mode; + int rc; + struct iwl_powertable_cmd cmd; + + /* If on battery, set to 3, + * if plugged into AC power, set to CAM ("continuosly aware mode"), + * else user level */ + switch (mode) { + case IWL_POWER_BATTERY: + final_mode = IWL_POWER_INDEX_3; + break; + case IWL_POWER_AC: + final_mode = IWL_POWER_MODE_CAM; + break; + default: + final_mode = mode; + break; + } + + iwl_update_power_cmd(priv, &cmd, final_mode); + + rc = iwl_send_cmd_pdu(priv, POWER_TABLE_CMD, sizeof(cmd), &cmd); + + if (final_mode == IWL_POWER_MODE_CAM) + clear_bit(STATUS_POWER_PMI, &priv->status); + else + set_bit(STATUS_POWER_PMI, &priv->status); + + return rc; +} + +int iwl_is_network_packet(struct iwl_priv *priv, struct ieee80211_hdr *header) +{ + /* Filter incoming packets to determine if they are targeted toward + * this network, discarding packets coming from ourselves */ + switch (priv->iw_mode) { + case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source | BSSID */ + /* packets from our adapter are dropped (echo) */ + if (!compare_ether_addr(header->addr2, priv->mac_addr)) + return 0; + /* {broad,multi}cast packets to our IBSS go through */ + if (is_multicast_ether_addr(header->addr1)) + return !compare_ether_addr(header->addr3, priv->bssid); + /* packets to our adapter go through */ + return !compare_ether_addr(header->addr1, priv->mac_addr); + case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */ + /* packets from our adapter are dropped (echo) */ + if (!compare_ether_addr(header->addr3, priv->mac_addr)) + return 0; + /* {broad,multi}cast packets to our BSS go through */ + if (is_multicast_ether_addr(header->addr1)) + return !compare_ether_addr(header->addr2, priv->bssid); + /* packets to our adapter go through */ + return !compare_ether_addr(header->addr1, priv->mac_addr); + } + + return 1; +} + +#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x + +const char *iwl_get_tx_fail_reason(u32 status) +{ + switch (status & TX_STATUS_MSK) { + case TX_STATUS_SUCCESS: + return "SUCCESS"; + TX_STATUS_ENTRY(SHORT_LIMIT); + TX_STATUS_ENTRY(LONG_LIMIT); + TX_STATUS_ENTRY(FIFO_UNDERRUN); + TX_STATUS_ENTRY(MGMNT_ABORT); + TX_STATUS_ENTRY(NEXT_FRAG); + TX_STATUS_ENTRY(LIFE_EXPIRE); + TX_STATUS_ENTRY(DEST_PS); + TX_STATUS_ENTRY(ABORTED); + TX_STATUS_ENTRY(BT_RETRY); + TX_STATUS_ENTRY(STA_INVALID); + TX_STATUS_ENTRY(FRAG_DROPPED); + TX_STATUS_ENTRY(TID_DISABLE); + TX_STATUS_ENTRY(FRAME_FLUSHED); + TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL); + TX_STATUS_ENTRY(TX_LOCKED); + TX_STATUS_ENTRY(NO_BEACON_ON_RADAR); + } + + return "UNKNOWN"; +} + +/** + * iwl_scan_cancel - Cancel any currently executing HW scan + * + * NOTE: priv->mutex is not required before calling this function + */ +static int iwl_scan_cancel(struct iwl_priv *priv) +{ + if (!test_bit(STATUS_SCAN_HW, &priv->status)) { + clear_bit(STATUS_SCANNING, &priv->status); + return 0; + } + + if (test_bit(STATUS_SCANNING, &priv->status)) { + if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_SCAN("Queuing scan abort.\n"); + set_bit(STATUS_SCAN_ABORTING, &priv->status); + queue_work(priv->workqueue, &priv->abort_scan); + + } else + IWL_DEBUG_SCAN("Scan abort already in progress.\n"); + + return test_bit(STATUS_SCANNING, &priv->status); + } + + return 0; +} + +/** + * iwl_scan_cancel_timeout - Cancel any currently executing HW scan + * @ms: amount of time to wait (in milliseconds) for scan to abort + * + * NOTE: priv->mutex must be held before calling this function + */ +static int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms) +{ + unsigned long now = jiffies; + int ret; + + ret = iwl_scan_cancel(priv); + if (ret && ms) { + mutex_unlock(&priv->mutex); + while (!time_after(jiffies, now + msecs_to_jiffies(ms)) && + test_bit(STATUS_SCANNING, &priv->status)) + msleep(1); + mutex_lock(&priv->mutex); + + return test_bit(STATUS_SCANNING, &priv->status); + } + + return ret; +} + +static void iwl_sequence_reset(struct iwl_priv *priv) +{ + /* Reset ieee stats */ + + /* We don't reset the net_device_stats (ieee->stats) on + * re-association */ + + priv->last_seq_num = -1; + priv->last_frag_num = -1; + priv->last_packet_time = 0; + + iwl_scan_cancel(priv); +} + +#define MAX_UCODE_BEACON_INTERVAL 1024 +#define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA) + +static __le16 iwl_adjust_beacon_interval(u16 beacon_val) +{ + u16 new_val = 0; + u16 beacon_factor = 0; + + beacon_factor = + (beacon_val + MAX_UCODE_BEACON_INTERVAL) + / MAX_UCODE_BEACON_INTERVAL; + new_val = beacon_val / beacon_factor; + + return cpu_to_le16(new_val); +} + +static void iwl_setup_rxon_timing(struct iwl_priv *priv) +{ + u64 interval_tm_unit; + u64 tsf, result; + unsigned long flags; + struct ieee80211_conf *conf = NULL; + u16 beacon_int = 0; + + conf = ieee80211_get_hw_conf(priv->hw); + + spin_lock_irqsave(&priv->lock, flags); + priv->rxon_timing.timestamp.dw[1] = cpu_to_le32(priv->timestamp1); + priv->rxon_timing.timestamp.dw[0] = cpu_to_le32(priv->timestamp0); + + priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL; + + tsf = priv->timestamp1; + tsf = ((tsf << 32) | priv->timestamp0); + + beacon_int = priv->beacon_int; + spin_unlock_irqrestore(&priv->lock, flags); + + if (priv->iw_mode == IEEE80211_IF_TYPE_STA) { + if (beacon_int == 0) { + priv->rxon_timing.beacon_interval = cpu_to_le16(100); + priv->rxon_timing.beacon_init_val = cpu_to_le32(102400); + } else { + priv->rxon_timing.beacon_interval = + cpu_to_le16(beacon_int); + priv->rxon_timing.beacon_interval = + iwl_adjust_beacon_interval( + le16_to_cpu(priv->rxon_timing.beacon_interval)); + } + + priv->rxon_timing.atim_window = 0; + } else { + priv->rxon_timing.beacon_interval = + iwl_adjust_beacon_interval(conf->beacon_int); + /* TODO: we need to get atim_window from upper stack + * for now we set to 0 */ + priv->rxon_timing.atim_window = 0; + } + + interval_tm_unit = + (le16_to_cpu(priv->rxon_timing.beacon_interval) * 1024); + result = do_div(tsf, interval_tm_unit); + priv->rxon_timing.beacon_init_val = + cpu_to_le32((u32) ((u64) interval_tm_unit - result)); + + IWL_DEBUG_ASSOC + ("beacon interval %d beacon timer %d beacon tim %d\n", + le16_to_cpu(priv->rxon_timing.beacon_interval), + le32_to_cpu(priv->rxon_timing.beacon_init_val), + le16_to_cpu(priv->rxon_timing.atim_window)); +} + +static int iwl_scan_initiate(struct iwl_priv *priv) +{ + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { + IWL_ERROR("APs don't scan.\n"); + return 0; + } + + if (!iwl_is_ready_rf(priv)) { + IWL_DEBUG_SCAN("Aborting scan due to not ready.\n"); + return -EIO; + } + + if (test_bit(STATUS_SCANNING, &priv->status)) { + IWL_DEBUG_SCAN("Scan already in progress.\n"); + return -EAGAIN; + } + + if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_SCAN("Scan request while abort pending. " + "Queuing.\n"); + return -EAGAIN; + } + + IWL_DEBUG_INFO("Starting scan...\n"); + priv->scan_bands = 2; + set_bit(STATUS_SCANNING, &priv->status); + priv->scan_start = jiffies; + priv->scan_pass_start = priv->scan_start; + + queue_work(priv->workqueue, &priv->request_scan); + + return 0; +} + +static int iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt) +{ + struct iwl_rxon_cmd *rxon = &priv->staging_rxon; + + if (hw_decrypt) + rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; + else + rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; + + return 0; +} + +static void iwl_set_flags_for_phymode(struct iwl_priv *priv, u8 phymode) +{ + if (phymode == MODE_IEEE80211A) { + priv->staging_rxon.flags &= + ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK + | RXON_FLG_CCK_MSK); + priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; + } else { + /* Copied from iwl_bg_post_associate() */ + if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) + priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; + else + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; + priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK; + priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK; + } +} + +/* + * initilize rxon structure with default values fromm eeprom + */ +static void iwl_connection_init_rx_config(struct iwl_priv *priv) +{ + const struct iwl_channel_info *ch_info; + + memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon)); + + switch (priv->iw_mode) { + case IEEE80211_IF_TYPE_AP: + priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP; + break; + + case IEEE80211_IF_TYPE_STA: + priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS; + priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; + break; + + case IEEE80211_IF_TYPE_IBSS: + priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS; + priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK; + priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK | + RXON_FILTER_ACCEPT_GRP_MSK; + break; + + case IEEE80211_IF_TYPE_MNTR: + priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER; + priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK | + RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK; + break; + } + +#if 0 + /* TODO: Figure out when short_preamble would be set and cache from + * that */ + if (!hw_to_local(priv->hw)->short_preamble) + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + else + priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; +#endif + + ch_info = iwl_get_channel_info(priv, priv->phymode, + le16_to_cpu(priv->staging_rxon.channel)); + + if (!ch_info) + ch_info = &priv->channel_info[0]; + + /* + * in some case A channels are all non IBSS + * in this case force B/G channel + */ + if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && + !(is_channel_ibss(ch_info))) + ch_info = &priv->channel_info[0]; + + priv->staging_rxon.channel = cpu_to_le16(ch_info->channel); + if (is_channel_a_band(ch_info)) + priv->phymode = MODE_IEEE80211A; + else + priv->phymode = MODE_IEEE80211G; + + iwl_set_flags_for_phymode(priv, priv->phymode); + + priv->staging_rxon.ofdm_basic_rates = + (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; + priv->staging_rxon.cck_basic_rates = + (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; +} + +static int iwl_set_mode(struct iwl_priv *priv, int mode) +{ + if (!iwl_is_ready_rf(priv)) + return -EAGAIN; + + if (mode == IEEE80211_IF_TYPE_IBSS) { + const struct iwl_channel_info *ch_info; + + ch_info = iwl_get_channel_info(priv, + priv->phymode, + le16_to_cpu(priv->staging_rxon.channel)); + + if (!ch_info || !is_channel_ibss(ch_info)) { + IWL_ERROR("channel %d not IBSS channel\n", + le16_to_cpu(priv->staging_rxon.channel)); + return -EINVAL; + } + } + + cancel_delayed_work(&priv->scan_check); + if (iwl_scan_cancel_timeout(priv, 100)) { + IWL_WARNING("Aborted scan still in progress after 100ms\n"); + IWL_DEBUG_MAC80211("leaving - scan abort failed.\n"); + return -EAGAIN; + } + + priv->iw_mode = mode; + + iwl_connection_init_rx_config(priv); + memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); + + iwl_clear_stations_table(priv); + + iwl_commit_rxon(priv); + + return 0; +} + +static void iwl_build_tx_cmd_hwcrypto(struct iwl_priv *priv, + struct ieee80211_tx_control *ctl, + struct iwl_cmd *cmd, + struct sk_buff *skb_frag, + int last_frag) +{ + struct iwl_hw_key *keyinfo = &priv->stations[ctl->key_idx].keyinfo; + + switch (keyinfo->alg) { + case ALG_CCMP: + cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM; + memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen); + IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n"); + break; + + case ALG_TKIP: +#if 0 + cmd->cmd.tx.sec_ctl = TX_CMD_SEC_TKIP; + + if (last_frag) + memcpy(cmd->cmd.tx.tkip_mic.byte, skb_frag->tail - 8, + 8); + else + memset(cmd->cmd.tx.tkip_mic.byte, 0, 8); +#endif + break; + + case ALG_WEP: + cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP | + (ctl->key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT; + + if (keyinfo->keylen == 13) + cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128; + + memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen); + + IWL_DEBUG_TX("Configuring packet for WEP encryption " + "with key %d\n", ctl->key_idx); + break; + + default: + printk(KERN_ERR "Unknown encode alg %d\n", keyinfo->alg); + break; + } +} + +/* + * handle build REPLY_TX command notification. + */ +static void iwl_build_tx_cmd_basic(struct iwl_priv *priv, + struct iwl_cmd *cmd, + struct ieee80211_tx_control *ctrl, + struct ieee80211_hdr *hdr, + int is_unicast, u8 std_id) +{ + __le16 *qc; + u16 fc = le16_to_cpu(hdr->frame_control); + __le32 tx_flags = cmd->cmd.tx.tx_flags; + + cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + if (!(ctrl->flags & IEEE80211_TXCTL_NO_ACK)) { + tx_flags |= TX_CMD_FLG_ACK_MSK; + if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + if (ieee80211_is_probe_response(fc) && + !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) + tx_flags |= TX_CMD_FLG_TSF_MSK; + } else { + tx_flags &= (~TX_CMD_FLG_ACK_MSK); + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + } + + cmd->cmd.tx.sta_id = std_id; + if (ieee80211_get_morefrag(hdr)) + tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; + + qc = ieee80211_get_qos_ctrl(hdr); + if (qc) { + cmd->cmd.tx.tid_tspec = (u8) (le16_to_cpu(*qc) & 0xf); + tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; + } else + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + + if (ctrl->flags & IEEE80211_TXCTL_USE_RTS_CTS) { + tx_flags |= TX_CMD_FLG_RTS_MSK; + tx_flags &= ~TX_CMD_FLG_CTS_MSK; + } else if (ctrl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) { + tx_flags &= ~TX_CMD_FLG_RTS_MSK; + tx_flags |= TX_CMD_FLG_CTS_MSK; + } + + if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) + tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; + + tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); + if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) { + if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ || + (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ) + cmd->cmd.tx.timeout.pm_frame_timeout = + cpu_to_le16(3); + else + cmd->cmd.tx.timeout.pm_frame_timeout = + cpu_to_le16(2); + } else + cmd->cmd.tx.timeout.pm_frame_timeout = 0; + + cmd->cmd.tx.driver_txop = 0; + cmd->cmd.tx.tx_flags = tx_flags; + cmd->cmd.tx.next_frame_len = 0; +} + +static int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr) +{ + int sta_id; + u16 fc = le16_to_cpu(hdr->frame_control); + + /* If this frame is broadcast or not data then use the broadcast + * station id */ + if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) || + is_multicast_ether_addr(hdr->addr1)) + return priv->hw_setting.bcast_sta_id; + + switch (priv->iw_mode) { + + /* If this frame is part of a BSS network (we're a station), then + * we use the AP's station id */ + case IEEE80211_IF_TYPE_STA: + return IWL_AP_ID; + + /* If we are an AP, then find the station, or use BCAST */ + case IEEE80211_IF_TYPE_AP: + sta_id = iwl_hw_find_station(priv, hdr->addr1); + if (sta_id != IWL_INVALID_STATION) + return sta_id; + return priv->hw_setting.bcast_sta_id; + + /* If this frame is part of a IBSS network, then we use the + * target specific station id */ + case IEEE80211_IF_TYPE_IBSS: { + DECLARE_MAC_BUF(mac); + + sta_id = iwl_hw_find_station(priv, hdr->addr1); + if (sta_id != IWL_INVALID_STATION) + return sta_id; + + sta_id = iwl_add_station(priv, hdr->addr1, 0, CMD_ASYNC); + + if (sta_id != IWL_INVALID_STATION) + return sta_id; + + IWL_DEBUG_DROP("Station %s not in station map. " + "Defaulting to broadcast...\n", + print_mac(mac, hdr->addr1)); + iwl_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr)); + return priv->hw_setting.bcast_sta_id; + } + default: + IWL_WARNING("Unkown mode of operation: %d", priv->iw_mode); + return priv->hw_setting.bcast_sta_id; + } +} + +/* + * start REPLY_TX command process + */ +static int iwl_tx_skb(struct iwl_priv *priv, + struct sk_buff *skb, struct ieee80211_tx_control *ctl) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct iwl_tfd_frame *tfd; + u32 *control_flags; + int txq_id = ctl->queue; + struct iwl_tx_queue *txq = NULL; + struct iwl_queue *q = NULL; + dma_addr_t phys_addr; + dma_addr_t txcmd_phys; + struct iwl_cmd *out_cmd = NULL; + u16 len, idx, len_org; + u8 id, hdr_len, unicast; + u8 sta_id; + u16 seq_number = 0; + u16 fc; + __le16 *qc; + u8 wait_write_ptr = 0; + unsigned long flags; + int rc; + + spin_lock_irqsave(&priv->lock, flags); + if (iwl_is_rfkill(priv)) { + IWL_DEBUG_DROP("Dropping - RF KILL\n"); + goto drop_unlock; + } + + if (!priv->interface_id) { + IWL_DEBUG_DROP("Dropping - !priv->interface_id\n"); + goto drop_unlock; + } + + if ((ctl->tx_rate & 0xFF) == IWL_INVALID_RATE) { + IWL_ERROR("ERROR: No TX rate available.\n"); + goto drop_unlock; + } + + unicast = !is_multicast_ether_addr(hdr->addr1); + id = 0; + + fc = le16_to_cpu(hdr->frame_control); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (ieee80211_is_auth(fc)) + IWL_DEBUG_TX("Sending AUTH frame\n"); + else if (ieee80211_is_assoc_request(fc)) + IWL_DEBUG_TX("Sending ASSOC frame\n"); + else if (ieee80211_is_reassoc_request(fc)) + IWL_DEBUG_TX("Sending REASSOC frame\n"); +#endif + + if (!iwl_is_associated(priv) && + ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) { + IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n"); + goto drop_unlock; + } + + spin_unlock_irqrestore(&priv->lock, flags); + + hdr_len = ieee80211_get_hdrlen(fc); + sta_id = iwl_get_sta_id(priv, hdr); + if (sta_id == IWL_INVALID_STATION) { + DECLARE_MAC_BUF(mac); + + IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n", + print_mac(mac, hdr->addr1)); + goto drop; + } + + IWL_DEBUG_RATE("station Id %d\n", sta_id); + + qc = ieee80211_get_qos_ctrl(hdr); + if (qc) { + u8 tid = (u8)(le16_to_cpu(*qc) & 0xf); + seq_number = priv->stations[sta_id].tid[tid].seq_number & + IEEE80211_SCTL_SEQ; + hdr->seq_ctrl = cpu_to_le16(seq_number) | + (hdr->seq_ctrl & + __constant_cpu_to_le16(IEEE80211_SCTL_FRAG)); + seq_number += 0x10; + } + txq = &priv->txq[txq_id]; + q = &txq->q; + + spin_lock_irqsave(&priv->lock, flags); + + tfd = &txq->bd[q->first_empty]; + memset(tfd, 0, sizeof(*tfd)); + control_flags = (u32 *) tfd; + idx = get_cmd_index(q, q->first_empty, 0); + + memset(&(txq->txb[q->first_empty]), 0, sizeof(struct iwl_tx_info)); + txq->txb[q->first_empty].skb[0] = skb; + memcpy(&(txq->txb[q->first_empty].status.control), + ctl, sizeof(struct ieee80211_tx_control)); + out_cmd = &txq->cmd[idx]; + memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); + memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx)); + out_cmd->hdr.cmd = REPLY_TX; + out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | + INDEX_TO_SEQ(q->first_empty))); + /* copy frags header */ + memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len); + + /* hdr = (struct ieee80211_hdr *)out_cmd->cmd.tx.hdr; */ + len = priv->hw_setting.tx_cmd_len + + sizeof(struct iwl_cmd_header) + hdr_len; + + len_org = len; + len = (len + 3) & ~3; + + if (len_org != len) + len_org = 1; + else + len_org = 0; + + txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx + + offsetof(struct iwl_cmd, hdr); + + iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len); + + if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)) + iwl_build_tx_cmd_hwcrypto(priv, ctl, out_cmd, skb, 0); + + /* 802.11 null functions have no payload... */ + len = skb->len - hdr_len; + if (len) { + phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, + len, PCI_DMA_TODEVICE); + iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len); + } + + /* If there is no payload, then only one TFD is used */ + if (!len) + *control_flags = TFD_CTL_COUNT_SET(1); + else + *control_flags = TFD_CTL_COUNT_SET(2) | + TFD_CTL_PAD_SET(U32_PAD(len)); + + len = (u16)skb->len; + out_cmd->cmd.tx.len = cpu_to_le16(len); + + /* TODO need this for burst mode later on */ + iwl_build_tx_cmd_basic(priv, out_cmd, ctl, hdr, unicast, sta_id); + + /* set is_hcca to 0; it probably will never be implemented */ + iwl_hw_build_tx_cmd_rate(priv, out_cmd, ctl, hdr, sta_id, 0); + + out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; + out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; + + if (!ieee80211_get_morefrag(hdr)) { + txq->need_update = 1; + if (qc) { + u8 tid = (u8)(le16_to_cpu(*qc) & 0xf); + priv->stations[sta_id].tid[tid].seq_number = seq_number; + } + } else { + wait_write_ptr = 1; + txq->need_update = 0; + } + + iwl_print_hex_dump(IWL_DL_TX, out_cmd->cmd.payload, + sizeof(out_cmd->cmd.tx)); + + iwl_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr, + ieee80211_get_hdrlen(fc)); + + q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd); + rc = iwl_tx_queue_update_write_ptr(priv, txq); + spin_unlock_irqrestore(&priv->lock, flags); + + if (rc) + return rc; + + if ((iwl_queue_space(q) < q->high_mark) + && priv->mac80211_registered) { + if (wait_write_ptr) { + spin_lock_irqsave(&priv->lock, flags); + txq->need_update = 1; + iwl_tx_queue_update_write_ptr(priv, txq); + spin_unlock_irqrestore(&priv->lock, flags); + } + + ieee80211_stop_queue(priv->hw, ctl->queue); + } + + return 0; + +drop_unlock: + spin_unlock_irqrestore(&priv->lock, flags); +drop: + return -1; +} + +static void iwl_set_rate(struct iwl_priv *priv) +{ + const struct ieee80211_hw_mode *hw = NULL; + struct ieee80211_rate *rate; + int i; + + hw = iwl_get_hw_mode(priv, priv->phymode); + + priv->active_rate = 0; + priv->active_rate_basic = 0; + + IWL_DEBUG_RATE("Setting rates for 802.11%c\n", + hw->mode == MODE_IEEE80211A ? + 'a' : ((hw->mode == MODE_IEEE80211B) ? 'b' : 'g')); + + for (i = 0; i < hw->num_rates; i++) { + rate = &(hw->rates[i]); + if ((rate->val < IWL_RATE_COUNT) && + (rate->flags & IEEE80211_RATE_SUPPORTED)) { + IWL_DEBUG_RATE("Adding rate index %d (plcp %d)%s\n", + rate->val, iwl_rates[rate->val].plcp, + (rate->flags & IEEE80211_RATE_BASIC) ? + "*" : ""); + priv->active_rate |= (1 << rate->val); + if (rate->flags & IEEE80211_RATE_BASIC) + priv->active_rate_basic |= (1 << rate->val); + } else + IWL_DEBUG_RATE("Not adding rate %d (plcp %d)\n", + rate->val, iwl_rates[rate->val].plcp); + } + + IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n", + priv->active_rate, priv->active_rate_basic); + + /* + * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK) + * otherwise set it to the default of all CCK rates and 6, 12, 24 for + * OFDM + */ + if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK) + priv->staging_rxon.cck_basic_rates = + ((priv->active_rate_basic & + IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF; + else + priv->staging_rxon.cck_basic_rates = + (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; + + if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK) + priv->staging_rxon.ofdm_basic_rates = + ((priv->active_rate_basic & + (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >> + IWL_FIRST_OFDM_RATE) & 0xFF; + else + priv->staging_rxon.ofdm_basic_rates = + (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; +} + +static void iwl_radio_kill_sw(struct iwl_priv *priv, int disable_radio) +{ + unsigned long flags; + + if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status)) + return; + + IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO %s\n", + disable_radio ? "OFF" : "ON"); + + if (disable_radio) { + iwl_scan_cancel(priv); + /* FIXME: This is a workaround for AP */ + if (priv->iw_mode != IEEE80211_IF_TYPE_AP) { + spin_lock_irqsave(&priv->lock, flags); + iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_SW_BIT_RFKILL); + spin_unlock_irqrestore(&priv->lock, flags); + iwl_send_card_state(priv, CARD_STATE_CMD_DISABLE, 0); + set_bit(STATUS_RF_KILL_SW, &priv->status); + } + return; + } + + spin_lock_irqsave(&priv->lock, flags); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + + clear_bit(STATUS_RF_KILL_SW, &priv->status); + spin_unlock_irqrestore(&priv->lock, flags); + + /* wake up ucode */ + msleep(10); + + spin_lock_irqsave(&priv->lock, flags); + iwl_read32(priv, CSR_UCODE_DRV_GP1); + if (!iwl_grab_restricted_access(priv)) + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { + IWL_DEBUG_RF_KILL("Can not turn radio back on - " + "disabled by HW switch\n"); + return; + } + + queue_work(priv->workqueue, &priv->restart); + return; +} + +void iwl_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb, + u32 decrypt_res, struct ieee80211_rx_status *stats) +{ + u16 fc = + le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control); + + if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK) + return; + + if (!(fc & IEEE80211_FCTL_PROTECTED)) + return; + + IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res); + switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) { + case RX_RES_STATUS_SEC_TYPE_TKIP: + if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == + RX_RES_STATUS_BAD_ICV_MIC) + stats->flag |= RX_FLAG_MMIC_ERROR; + case RX_RES_STATUS_SEC_TYPE_WEP: + case RX_RES_STATUS_SEC_TYPE_CCMP: + if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == + RX_RES_STATUS_DECRYPT_OK) { + IWL_DEBUG_RX("hw decrypt successfully!!!\n"); + stats->flag |= RX_FLAG_DECRYPTED; + } + break; + + default: + break; + } +} + +void iwl_handle_data_packet_monitor(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb, + void *data, short len, + struct ieee80211_rx_status *stats, + u16 phy_flags) +{ + struct iwl_rt_rx_hdr *iwl_rt; + + /* First cache any information we need before we overwrite + * the information provided in the skb from the hardware */ + s8 signal = stats->ssi; + s8 noise = 0; + int rate = stats->rate; + u64 tsf = stats->mactime; + __le16 phy_flags_hw = cpu_to_le16(phy_flags); + + /* We received data from the HW, so stop the watchdog */ + if (len > IWL_RX_BUF_SIZE - sizeof(*iwl_rt)) { + IWL_DEBUG_DROP("Dropping too large packet in monitor\n"); + return; + } + + /* copy the frame data to write after where the radiotap header goes */ + iwl_rt = (void *)rxb->skb->data; + memmove(iwl_rt->payload, data, len); + + iwl_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION; + iwl_rt->rt_hdr.it_pad = 0; /* always good to zero */ + + /* total header + data */ + iwl_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*iwl_rt)); + + /* Set the size of the skb to the size of the frame */ + skb_put(rxb->skb, sizeof(*iwl_rt) + len); + + /* Big bitfield of all the fields we provide in radiotap */ + iwl_rt->rt_hdr.it_present = + cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) | + (1 << IEEE80211_RADIOTAP_FLAGS) | + (1 << IEEE80211_RADIOTAP_RATE) | + (1 << IEEE80211_RADIOTAP_CHANNEL) | + (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | + (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | + (1 << IEEE80211_RADIOTAP_ANTENNA)); + + /* Zero the flags, we'll add to them as we go */ + iwl_rt->rt_flags = 0; + + iwl_rt->rt_tsf = cpu_to_le64(tsf); + + /* Convert to dBm */ + iwl_rt->rt_dbmsignal = signal; + iwl_rt->rt_dbmnoise = noise; + + /* Convert the channel frequency and set the flags */ + iwl_rt->rt_channelMHz = cpu_to_le16(stats->freq); + if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK)) + iwl_rt->rt_chbitmask = + cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ)); + else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK) + iwl_rt->rt_chbitmask = + cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ)); + else /* 802.11g */ + iwl_rt->rt_chbitmask = + cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ)); + + rate = iwl_rate_index_from_plcp(rate); + if (rate == -1) + iwl_rt->rt_rate = 0; + else + iwl_rt->rt_rate = iwl_rates[rate].ieee; + + /* antenna number */ + iwl_rt->rt_antenna = + le16_to_cpu(phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4; + + /* set the preamble flag if we have it */ + if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) + iwl_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; + + IWL_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len); + + stats->flag |= RX_FLAG_RADIOTAP; + ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats); + rxb->skb = NULL; +} + + +#define IWL_PACKET_RETRY_TIME HZ + +int is_duplicate_packet(struct iwl_priv *priv, struct ieee80211_hdr *header) +{ + u16 sc = le16_to_cpu(header->seq_ctrl); + u16 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; + u16 frag = sc & IEEE80211_SCTL_FRAG; + u16 *last_seq, *last_frag; + unsigned long *last_time; + + switch (priv->iw_mode) { + case IEEE80211_IF_TYPE_IBSS:{ + struct list_head *p; + struct iwl_ibss_seq *entry = NULL; + u8 *mac = header->addr2; + int index = mac[5] & (IWL_IBSS_MAC_HASH_SIZE - 1); + + __list_for_each(p, &priv->ibss_mac_hash[index]) { + entry = + list_entry(p, struct iwl_ibss_seq, list); + if (!compare_ether_addr(entry->mac, mac)) + break; + } + if (p == &priv->ibss_mac_hash[index]) { + entry = kzalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) { + IWL_ERROR + ("Cannot malloc new mac entry\n"); + return 0; + } + memcpy(entry->mac, mac, ETH_ALEN); + entry->seq_num = seq; + entry->frag_num = frag; + entry->packet_time = jiffies; + list_add(&entry->list, + &priv->ibss_mac_hash[index]); + return 0; + } + last_seq = &entry->seq_num; + last_frag = &entry->frag_num; + last_time = &entry->packet_time; + break; + } + case IEEE80211_IF_TYPE_STA: + last_seq = &priv->last_seq_num; + last_frag = &priv->last_frag_num; + last_time = &priv->last_packet_time; + break; + default: + return 0; + } + if ((*last_seq == seq) && + time_after(*last_time + IWL_PACKET_RETRY_TIME, jiffies)) { + if (*last_frag == frag) + goto drop; + if (*last_frag + 1 != frag) + /* out-of-order fragment */ + goto drop; + } else + *last_seq = seq; + + *last_frag = frag; + *last_time = jiffies; + return 0; + + drop: + return 1; +} + +#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT + +#include "iwl-spectrum.h" + +#define BEACON_TIME_MASK_LOW 0x00FFFFFF +#define BEACON_TIME_MASK_HIGH 0xFF000000 +#define TIME_UNIT 1024 + +/* + * extended beacon time format + * time in usec will be changed into a 32-bit value in 8:24 format + * the high 1 byte is the beacon counts + * the lower 3 bytes is the time in usec within one beacon interval + */ + +static u32 iwl_usecs_to_beacons(u32 usec, u32 beacon_interval) +{ + u32 quot; + u32 rem; + u32 interval = beacon_interval * 1024; + + if (!interval || !usec) + return 0; + + quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24); + rem = (usec % interval) & BEACON_TIME_MASK_LOW; + + return (quot << 24) + rem; +} + +/* base is usually what we get from ucode with each received frame, + * the same as HW timer counter counting down + */ + +static __le32 iwl_add_beacon_time(u32 base, u32 addon, u32 beacon_interval) +{ + u32 base_low = base & BEACON_TIME_MASK_LOW; + u32 addon_low = addon & BEACON_TIME_MASK_LOW; + u32 interval = beacon_interval * TIME_UNIT; + u32 res = (base & BEACON_TIME_MASK_HIGH) + + (addon & BEACON_TIME_MASK_HIGH); + + if (base_low > addon_low) + res += base_low - addon_low; + else if (base_low < addon_low) { + res += interval + base_low - addon_low; + res += (1 << 24); + } else + res += (1 << 24); + + return cpu_to_le32(res); +} + +static int iwl_get_measurement(struct iwl_priv *priv, + struct ieee80211_measurement_params *params, + u8 type) +{ + struct iwl_spectrum_cmd spectrum; + struct iwl_rx_packet *res; + struct iwl_host_cmd cmd = { + .id = REPLY_SPECTRUM_MEASUREMENT_CMD, + .data = (void *)&spectrum, + .meta.flags = CMD_WANT_SKB, + }; + u32 add_time = le64_to_cpu(params->start_time); + int rc; + int spectrum_resp_status; + int duration = le16_to_cpu(params->duration); + + if (iwl_is_associated(priv)) + add_time = + iwl_usecs_to_beacons( + le64_to_cpu(params->start_time) - priv->last_tsf, + le16_to_cpu(priv->rxon_timing.beacon_interval)); + + memset(&spectrum, 0, sizeof(spectrum)); + + spectrum.channel_count = cpu_to_le16(1); + spectrum.flags = + RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK; + spectrum.filter_flags = MEASUREMENT_FILTER_FLAG; + cmd.len = sizeof(spectrum); + spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len)); + + if (iwl_is_associated(priv)) + spectrum.start_time = + iwl_add_beacon_time(priv->last_beacon_time, + add_time, + le16_to_cpu(priv->rxon_timing.beacon_interval)); + else + spectrum.start_time = 0; + + spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT); + spectrum.channels[0].channel = params->channel; + spectrum.channels[0].type = type; + if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK) + spectrum.flags |= RXON_FLG_BAND_24G_MSK | + RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK; + + rc = iwl_send_cmd_sync(priv, &cmd); + if (rc) + return rc; + + res = (struct iwl_rx_packet *)cmd.meta.u.skb->data; + if (res->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n"); + rc = -EIO; + } + + spectrum_resp_status = le16_to_cpu(res->u.spectrum.status); + switch (spectrum_resp_status) { + case 0: /* Command will be handled */ + if (res->u.spectrum.id != 0xff) { + IWL_DEBUG_INFO + ("Replaced existing measurement: %d\n", + res->u.spectrum.id); + priv->measurement_status &= ~MEASUREMENT_READY; + } + priv->measurement_status |= MEASUREMENT_ACTIVE; + rc = 0; + break; + + case 1: /* Command will not be handled */ + rc = -EAGAIN; + break; + } + + dev_kfree_skb_any(cmd.meta.u.skb); + + return rc; +} +#endif + +static void iwl_txstatus_to_ieee(struct iwl_priv *priv, + struct iwl_tx_info *tx_sta) +{ + + tx_sta->status.ack_signal = 0; + tx_sta->status.excessive_retries = 0; + tx_sta->status.queue_length = 0; + tx_sta->status.queue_number = 0; + + if (in_interrupt()) + ieee80211_tx_status_irqsafe(priv->hw, + tx_sta->skb[0], &(tx_sta->status)); + else + ieee80211_tx_status(priv->hw, + tx_sta->skb[0], &(tx_sta->status)); + + tx_sta->skb[0] = NULL; +} + +/** + * iwl_tx_queue_reclaim - Reclaim Tx queue entries no more used by NIC. + * + * When FW advances 'R' index, all entries between old and + * new 'R' index need to be reclaimed. As result, some free space + * forms. If there is enough free space (> low mark), wake Tx queue. + */ +int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) +{ + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct iwl_queue *q = &txq->q; + int nfreed = 0; + + if ((index >= q->n_bd) || (x2_queue_used(q, index) == 0)) { + IWL_ERROR("Read index for DMA queue txq id (%d), index %d, " + "is out of range [0-%d] %d %d.\n", txq_id, + index, q->n_bd, q->first_empty, q->last_used); + return 0; + } + + for (index = iwl_queue_inc_wrap(index, q->n_bd); + q->last_used != index; + q->last_used = iwl_queue_inc_wrap(q->last_used, q->n_bd)) { + if (txq_id != IWL_CMD_QUEUE_NUM) { + iwl_txstatus_to_ieee(priv, + &(txq->txb[txq->q.last_used])); + iwl_hw_txq_free_tfd(priv, txq); + } else if (nfreed > 1) { + IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index, + q->first_empty, q->last_used); + queue_work(priv->workqueue, &priv->restart); + } + nfreed++; + } + + if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) && + (txq_id != IWL_CMD_QUEUE_NUM) && + priv->mac80211_registered) + ieee80211_wake_queue(priv->hw, txq_id); + + + return nfreed; +} + +static int iwl_is_tx_success(u32 status) +{ + return (status & 0xFF) == 0x1; +} + +/****************************************************************************** + * + * Generic RX handler implementations + * + ******************************************************************************/ +static void iwl_rx_reply_tx(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int txq_id = SEQ_TO_QUEUE(sequence); + int index = SEQ_TO_INDEX(sequence); + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct ieee80211_tx_status *tx_status; + struct iwl_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; + u32 status = le32_to_cpu(tx_resp->status); + + if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) { + IWL_ERROR("Read index for DMA queue txq_id (%d) index %d " + "is out of range [0-%d] %d %d\n", txq_id, + index, txq->q.n_bd, txq->q.first_empty, + txq->q.last_used); + return; + } + + tx_status = &(txq->txb[txq->q.last_used].status); + + tx_status->retry_count = tx_resp->failure_frame; + tx_status->queue_number = status; + tx_status->queue_length = tx_resp->bt_kill_count; + tx_status->queue_length |= tx_resp->failure_rts; + + tx_status->flags = + iwl_is_tx_success(status) ? IEEE80211_TX_STATUS_ACK : 0; + + tx_status->control.tx_rate = iwl_rate_index_from_plcp(tx_resp->rate); + + IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n", + txq_id, iwl_get_tx_fail_reason(status), status, + tx_resp->rate, tx_resp->failure_frame); + + IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index); + if (index != -1) + iwl_tx_queue_reclaim(priv, txq_id, index); + + if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) + IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n"); +} + + +static void iwl_rx_reply_alive(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_alive_resp *palive; + struct delayed_work *pwork; + + palive = &pkt->u.alive_frame; + + IWL_DEBUG_INFO("Alive ucode status 0x%08X revision " + "0x%01X 0x%01X\n", + palive->is_valid, palive->ver_type, + palive->ver_subtype); + + if (palive->ver_subtype == INITIALIZE_SUBTYPE) { + IWL_DEBUG_INFO("Initialization Alive received.\n"); + memcpy(&priv->card_alive_init, + &pkt->u.alive_frame, + sizeof(struct iwl_init_alive_resp)); + pwork = &priv->init_alive_start; + } else { + IWL_DEBUG_INFO("Runtime Alive received.\n"); + memcpy(&priv->card_alive, &pkt->u.alive_frame, + sizeof(struct iwl_alive_resp)); + pwork = &priv->alive_start; + iwl_disable_events(priv); + } + + /* We delay the ALIVE response by 5ms to + * give the HW RF Kill time to activate... */ + if (palive->is_valid == UCODE_VALID_OK) + queue_delayed_work(priv->workqueue, pwork, + msecs_to_jiffies(5)); + else + IWL_WARNING("uCode did not respond OK.\n"); +} + +static void iwl_rx_reply_add_sta(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + + IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status); + return; +} + +static void iwl_rx_reply_error(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + + IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) " + "seq 0x%04X ser 0x%08X\n", + le32_to_cpu(pkt->u.err_resp.error_type), + get_cmd_string(pkt->u.err_resp.cmd_id), + pkt->u.err_resp.cmd_id, + le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num), + le32_to_cpu(pkt->u.err_resp.error_info)); +} + +#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x + +static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon; + struct iwl_csa_notification *csa = &(pkt->u.csa_notif); + IWL_DEBUG_11H("CSA notif: channel %d, status %d\n", + le16_to_cpu(csa->channel), le32_to_cpu(csa->status)); + rxon->channel = csa->channel; + priv->staging_rxon.channel = csa->channel; +} + +static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif); + + if (!report->state) { + IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO, + "Spectrum Measure Notification: Start\n"); + return; + } + + memcpy(&priv->measure_report, report, sizeof(*report)); + priv->measurement_status |= MEASUREMENT_READY; +#endif +} + +static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif); + IWL_DEBUG_RX("sleep mode: %d, src: %d\n", + sleep->pm_sleep_mode, sleep->pm_wakeup_src); +#endif +} + +static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + IWL_DEBUG_RADIO("Dumping %d bytes of unhandled " + "notification for %s:\n", + le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd)); + iwl_print_hex_dump(IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len)); +} + +static void iwl_bg_beacon_update(struct work_struct *work) +{ + struct iwl_priv *priv = + container_of(work, struct iwl_priv, beacon_update); + struct sk_buff *beacon; + + /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ + beacon = ieee80211_beacon_get(priv->hw, priv->interface_id, NULL); + + if (!beacon) { + IWL_ERROR("update beacon failed\n"); + return; + } + + mutex_lock(&priv->mutex); + /* new beacon skb is allocated every time; dispose previous.*/ + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + priv->ibss_beacon = beacon; + mutex_unlock(&priv->mutex); + + iwl_send_beacon_cmd(priv); +} + +static void iwl_rx_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_beacon_notif *beacon = &(pkt->u.beacon_status); + u8 rate = beacon->beacon_notify_hdr.rate; + + IWL_DEBUG_RX("beacon status %x retries %d iss %d " + "tsf %d %d rate %d\n", + le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK, + beacon->beacon_notify_hdr.failure_frame, + le32_to_cpu(beacon->ibss_mgr_status), + le32_to_cpu(beacon->high_tsf), + le32_to_cpu(beacon->low_tsf), rate); +#endif + + if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) && + (!test_bit(STATUS_EXIT_PENDING, &priv->status))) + queue_work(priv->workqueue, &priv->beacon_update); +} + +/* Service response to REPLY_SCAN_CMD (0x80) */ +static void iwl_rx_reply_scan(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_scanreq_notification *notif = + (struct iwl_scanreq_notification *)pkt->u.raw; + + IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status); +#endif +} + +/* Service SCAN_START_NOTIFICATION (0x82) */ +static void iwl_rx_scan_start_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_scanstart_notification *notif = + (struct iwl_scanstart_notification *)pkt->u.raw; + priv->scan_start_tsf = le32_to_cpu(notif->tsf_low); + IWL_DEBUG_SCAN("Scan start: " + "%d [802.11%s] " + "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", + notif->channel, + notif->band ? "bg" : "a", + notif->tsf_high, + notif->tsf_low, notif->status, notif->beacon_timer); +} + +/* Service SCAN_RESULTS_NOTIFICATION (0x83) */ +static void iwl_rx_scan_results_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_scanresults_notification *notif = + (struct iwl_scanresults_notification *)pkt->u.raw; + + IWL_DEBUG_SCAN("Scan ch.res: " + "%d [802.11%s] " + "(TSF: 0x%08X:%08X) - %d " + "elapsed=%lu usec (%dms since last)\n", + notif->channel, + notif->band ? "bg" : "a", + le32_to_cpu(notif->tsf_high), + le32_to_cpu(notif->tsf_low), + le32_to_cpu(notif->statistics[0]), + le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf, + jiffies_to_msecs(elapsed_jiffies + (priv->last_scan_jiffies, jiffies))); + + priv->last_scan_jiffies = jiffies; +} + +/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */ +static void iwl_rx_scan_complete_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw; + + IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", + scan_notif->scanned_channels, + scan_notif->tsf_low, + scan_notif->tsf_high, scan_notif->status); + + /* The HW is no longer scanning */ + clear_bit(STATUS_SCAN_HW, &priv->status); + + /* The scan completion notification came in, so kill that timer... */ + cancel_delayed_work(&priv->scan_check); + + IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n", + (priv->scan_bands == 2) ? "2.4" : "5.2", + jiffies_to_msecs(elapsed_jiffies + (priv->scan_pass_start, jiffies))); + + /* Remove this scanned band from the list + * of pending bands to scan */ + priv->scan_bands--; + + /* If a request to abort was given, or the scan did not succeed + * then we reset the scan state machine and terminate, + * re-queuing another scan if one has been requested */ + if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_INFO("Aborted scan completed.\n"); + clear_bit(STATUS_SCAN_ABORTING, &priv->status); + } else { + /* If there are more bands on this scan pass reschedule */ + if (priv->scan_bands > 0) + goto reschedule; + } + + priv->last_scan_jiffies = jiffies; + IWL_DEBUG_INFO("Setting scan to off\n"); + + clear_bit(STATUS_SCANNING, &priv->status); + + IWL_DEBUG_INFO("Scan took %dms\n", + jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies))); + + queue_work(priv->workqueue, &priv->scan_completed); + + return; + +reschedule: + priv->scan_pass_start = jiffies; + queue_work(priv->workqueue, &priv->request_scan); +} + +/* Handle notification from uCode that card's power state is changing + * due to software, hardware, or critical temperature RFKILL */ +static void iwl_rx_card_state_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); + unsigned long status = priv->status; + + IWL_DEBUG_RF_KILL("Card state received: HW:%s SW:%s\n", + (flags & HW_CARD_DISABLED) ? "Kill" : "On", + (flags & SW_CARD_DISABLED) ? "Kill" : "On"); + + iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + if (flags & HW_CARD_DISABLED) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + + + if (flags & SW_CARD_DISABLED) + set_bit(STATUS_RF_KILL_SW, &priv->status); + else + clear_bit(STATUS_RF_KILL_SW, &priv->status); + + iwl_scan_cancel(priv); + + if ((test_bit(STATUS_RF_KILL_HW, &status) != + test_bit(STATUS_RF_KILL_HW, &priv->status)) || + (test_bit(STATUS_RF_KILL_SW, &status) != + test_bit(STATUS_RF_KILL_SW, &priv->status))) + queue_work(priv->workqueue, &priv->rf_kill); + else + wake_up_interruptible(&priv->wait_command_queue); +} + +/** + * iwl_setup_rx_handlers - Initialize Rx handler callbacks + * + * Setup the RX handlers for each of the reply types sent from the uCode + * to the host. + * + * This function chains into the hardware specific files for them to setup + * any hardware specific handlers as well. + */ +static void iwl_setup_rx_handlers(struct iwl_priv *priv) +{ + priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive; + priv->rx_handlers[REPLY_ADD_STA] = iwl_rx_reply_add_sta; + priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error; + priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa; + priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = + iwl_rx_spectrum_measure_notif; + priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif; + priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = + iwl_rx_pm_debug_statistics_notif; + priv->rx_handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif; + + /* NOTE: iwl_rx_statistics is different based on whether + * the build is for the 3945 or the 4965. See the + * corresponding implementation in iwl-XXXX.c + * + * The same handler is used for both the REPLY to a + * discrete statistics request from the host as well as + * for the periodic statistics notification from the uCode + */ + priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_hw_rx_statistics; + priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_hw_rx_statistics; + + priv->rx_handlers[REPLY_SCAN_CMD] = iwl_rx_reply_scan; + priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl_rx_scan_start_notif; + priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] = + iwl_rx_scan_results_notif; + priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] = + iwl_rx_scan_complete_notif; + priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif; + priv->rx_handlers[REPLY_TX] = iwl_rx_reply_tx; + + /* Setup hardware specific Rx handlers */ + iwl_hw_rx_handler_setup(priv); +} + +/** + * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them + * @rxb: Rx buffer to reclaim + * + * If an Rx buffer has an async callback associated with it the callback + * will be executed. The attached skb (if present) will only be freed + * if the callback returns 1 + */ +static void iwl_tx_cmd_complete(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int txq_id = SEQ_TO_QUEUE(sequence); + int index = SEQ_TO_INDEX(sequence); + int huge = sequence & SEQ_HUGE_FRAME; + int cmd_index; + struct iwl_cmd *cmd; + + /* If a Tx command is being handled and it isn't in the actual + * command queue then there a command routing bug has been introduced + * in the queue management code. */ + if (txq_id != IWL_CMD_QUEUE_NUM) + IWL_ERROR("Error wrong command queue %d command id 0x%X\n", + txq_id, pkt->hdr.cmd); + BUG_ON(txq_id != IWL_CMD_QUEUE_NUM); + + cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); + cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; + + /* Input error checking is done when commands are added to queue. */ + if (cmd->meta.flags & CMD_WANT_SKB) { + cmd->meta.source->u.skb = rxb->skb; + rxb->skb = NULL; + } else if (cmd->meta.u.callback && + !cmd->meta.u.callback(priv, cmd, rxb->skb)) + rxb->skb = NULL; + + iwl_tx_queue_reclaim(priv, txq_id, index); + + if (!(cmd->meta.flags & CMD_ASYNC)) { + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + wake_up_interruptible(&priv->wait_command_queue); + } +} + +/************************** RX-FUNCTIONS ****************************/ +/* + * Rx theory of operation + * + * The host allocates 32 DMA target addresses and passes the host address + * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is + * 0 to 31 + * + * Rx Queue Indexes + * The host/firmware share two index registers for managing the Rx buffers. + * + * The READ index maps to the first position that the firmware may be writing + * to -- the driver can read up to (but not including) this position and get + * good data. + * The READ index is managed by the firmware once the card is enabled. + * + * The WRITE index maps to the last position the driver has read from -- the + * position preceding WRITE is the last slot the firmware can place a packet. + * + * The queue is empty (no good data) if WRITE = READ - 1, and is full if + * WRITE = READ. + * + * During initialization the host sets up the READ queue position to the first + * INDEX position, and WRITE to the last (READ - 1 wrapped) + * + * When the firmware places a packet in a buffer it will advance the READ index + * and fire the RX interrupt. The driver can then query the READ index and + * process as many packets as possible, moving the WRITE index forward as it + * resets the Rx queue buffers with new memory. + * + * The management in the driver is as follows: + * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When + * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled + * to replensish the iwl->rxq->rx_free. + * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the + * iwl->rxq is replenished and the READ INDEX is updated (updating the + * 'processed' and 'read' driver indexes as well) + * + A received packet is processed and handed to the kernel network stack, + * detached from the iwl->rxq. The driver 'processed' index is updated. + * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free + * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ + * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there + * were enough free buffers and RX_STALLED is set it is cleared. + * + * + * Driver sequence: + * + * iwl_rx_queue_alloc() Allocates rx_free + * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls + * iwl_rx_queue_restock + * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx + * queue, updates firmware pointers, and updates + * the WRITE index. If insufficient rx_free buffers + * are available, schedules iwl_rx_replenish + * + * -- enable interrupts -- + * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the + * READ INDEX, detaching the SKB from the pool. + * Moves the packet buffer from queue to rx_used. + * Calls iwl_rx_queue_restock to refill any empty + * slots. + * ... + * + */ + +/** + * iwl_rx_queue_space - Return number of free slots available in queue. + */ +static int iwl_rx_queue_space(const struct iwl_rx_queue *q) +{ + int s = q->read - q->write; + if (s <= 0) + s += RX_QUEUE_SIZE; + /* keep some buffer to not confuse full and empty queue */ + s -= 2; + if (s < 0) + s = 0; + return s; +} + +/** + * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue + * + * NOTE: This function has 3945 and 4965 specific code sections + * but is declared in base due to the majority of the + * implementation being the same (only a numeric constant is + * different) + * + */ +int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q) +{ + u32 reg = 0; + int rc = 0; + unsigned long flags; + + spin_lock_irqsave(&q->lock, flags); + + if (q->need_update == 0) + goto exit_unlock; + + if (test_bit(STATUS_POWER_PMI, &priv->status)) { + reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); + + if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { + iwl_set_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + goto exit_unlock; + } + + rc = iwl_grab_restricted_access(priv); + if (rc) + goto exit_unlock; + + iwl_write_restricted(priv, FH_RSCSR_CHNL0_WPTR, + q->write & ~0x7); + iwl_release_restricted_access(priv); + } else + iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7); + + + q->need_update = 0; + + exit_unlock: + spin_unlock_irqrestore(&q->lock, flags); + return rc; +} + +/** + * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer pointer. + * + * NOTE: This function has 3945 and 4965 specific code paths in it. + */ +static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv, + dma_addr_t dma_addr) +{ + return cpu_to_le32((u32)dma_addr); +} + +/** + * iwl_rx_queue_restock - refill RX queue from pre-allocated pool + * + * If there are slots in the RX queue that need to be restocked, + * and we have free pre-allocated buffers, fill the ranks as much + * as we can pulling from rx_free. + * + * This moves the 'write' index forward to catch up with 'processed', and + * also updates the memory address in the firmware to reference the new + * target buffer. + */ +int iwl_rx_queue_restock(struct iwl_priv *priv) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct list_head *element; + struct iwl_rx_mem_buffer *rxb; + unsigned long flags; + int write, rc; + + spin_lock_irqsave(&rxq->lock, flags); + write = rxq->write & ~0x7; + while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { + element = rxq->rx_free.next; + rxb = list_entry(element, struct iwl_rx_mem_buffer, list); + list_del(element); + rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->dma_addr); + rxq->queue[rxq->write] = rxb; + rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; + rxq->free_count--; + } + spin_unlock_irqrestore(&rxq->lock, flags); + /* If the pre-allocated buffer pool is dropping low, schedule to + * refill it */ + if (rxq->free_count <= RX_LOW_WATERMARK) + queue_work(priv->workqueue, &priv->rx_replenish); + + + /* If we've added more space for the firmware to place data, tell it */ + if ((write != (rxq->write & ~0x7)) + || (abs(rxq->write - rxq->read) > 7)) { + spin_lock_irqsave(&rxq->lock, flags); + rxq->need_update = 1; + spin_unlock_irqrestore(&rxq->lock, flags); + rc = iwl_rx_queue_update_write_ptr(priv, rxq); + if (rc) + return rc; + } + + return 0; +} + +/** + * iwl_rx_replensih - Move all used packet from rx_used to rx_free + * + * When moving to rx_free an SKB is allocated for the slot. + * + * Also restock the Rx queue via iwl_rx_queue_restock. + * This is called as a scheduled work item (except for during intialization) + */ +void iwl_rx_replenish(void *data) +{ + struct iwl_priv *priv = data; + struct iwl_rx_queue *rxq = &priv->rxq; + struct list_head *element; + struct iwl_rx_mem_buffer *rxb; + unsigned long flags; + spin_lock_irqsave(&rxq->lock, flags); + while (!list_empty(&rxq->rx_used)) { + element = rxq->rx_used.next; + rxb = list_entry(element, struct iwl_rx_mem_buffer, list); + rxb->skb = + alloc_skb(IWL_RX_BUF_SIZE, __GFP_NOWARN | GFP_ATOMIC); + if (!rxb->skb) { + if (net_ratelimit()) + printk(KERN_CRIT DRV_NAME + ": Can not allocate SKB buffers\n"); + /* We don't reschedule replenish work here -- we will + * call the restock method and if it still needs + * more buffers it will schedule replenish */ + break; + } + priv->alloc_rxb_skb++; + list_del(element); + rxb->dma_addr = + pci_map_single(priv->pci_dev, rxb->skb->data, + IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + } + spin_unlock_irqrestore(&rxq->lock, flags); + + spin_lock_irqsave(&priv->lock, flags); + iwl_rx_queue_restock(priv); + spin_unlock_irqrestore(&priv->lock, flags); +} + +/* Assumes that the skb field of the buffers in 'pool' is kept accurate. + * If an SKB has been detached, the POOL needs to have it's SKB set to NULL + * This free routine walks the list of POOL entries and if SKB is set to + * non NULL it is unmapped and freed + */ +void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + int i; + for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { + if (rxq->pool[i].skb != NULL) { + pci_unmap_single(priv->pci_dev, + rxq->pool[i].dma_addr, + IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + dev_kfree_skb(rxq->pool[i].skb); + } + } + + pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd, + rxq->dma_addr); + rxq->bd = NULL; +} + +int iwl_rx_queue_alloc(struct iwl_priv *priv) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct pci_dev *dev = priv->pci_dev; + int i; + + spin_lock_init(&rxq->lock); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr); + if (!rxq->bd) + return -ENOMEM; + /* Fill the rx_used queue with _all_ of the Rx buffers */ + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) + list_add_tail(&rxq->pool[i].list, &rxq->rx_used); + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->free_count = 0; + rxq->need_update = 0; + return 0; +} + +void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + unsigned long flags; + int i; + spin_lock_irqsave(&rxq->lock, flags); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + /* Fill the rx_used queue with _all_ of the Rx buffers */ + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { + /* In the reset function, these buffers may have been allocated + * to an SKB, so we need to unmap and free potential storage */ + if (rxq->pool[i].skb != NULL) { + pci_unmap_single(priv->pci_dev, + rxq->pool[i].dma_addr, + IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + priv->alloc_rxb_skb--; + dev_kfree_skb(rxq->pool[i].skb); + rxq->pool[i].skb = NULL; + } + list_add_tail(&rxq->pool[i].list, &rxq->rx_used); + } + + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->free_count = 0; + spin_unlock_irqrestore(&rxq->lock, flags); +} + +/* Convert linear signal-to-noise ratio into dB */ +static u8 ratio2dB[100] = { +/* 0 1 2 3 4 5 6 7 8 9 */ + 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */ + 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */ + 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */ + 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */ + 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */ + 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */ + 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */ + 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */ + 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */ + 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */ +}; + +/* Calculates a relative dB value from a ratio of linear + * (i.e. not dB) signal levels. + * Conversion assumes that levels are voltages (20*log), not powers (10*log). */ +int iwl_calc_db_from_ratio(int sig_ratio) +{ + /* Anything above 1000:1 just report as 60 dB */ + if (sig_ratio > 1000) + return 60; + + /* Above 100:1, divide by 10 and use table, + * add 20 dB to make up for divide by 10 */ + if (sig_ratio > 100) + return (20 + (int)ratio2dB[sig_ratio/10]); + + /* We shouldn't see this */ + if (sig_ratio < 1) + return 0; + + /* Use table for ratios 1:1 - 99:1 */ + return (int)ratio2dB[sig_ratio]; +} + +#define PERFECT_RSSI (-20) /* dBm */ +#define WORST_RSSI (-95) /* dBm */ +#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI) + +/* Calculate an indication of rx signal quality (a percentage, not dBm!). + * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info + * about formulas used below. */ +int iwl_calc_sig_qual(int rssi_dbm, int noise_dbm) +{ + int sig_qual; + int degradation = PERFECT_RSSI - rssi_dbm; + + /* If we get a noise measurement, use signal-to-noise ratio (SNR) + * as indicator; formula is (signal dbm - noise dbm). + * SNR at or above 40 is a great signal (100%). + * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator. + * Weakest usable signal is usually 10 - 15 dB SNR. */ + if (noise_dbm) { + if (rssi_dbm - noise_dbm >= 40) + return 100; + else if (rssi_dbm < noise_dbm) + return 0; + sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2; + + /* Else use just the signal level. + * This formula is a least squares fit of data points collected and + * compared with a reference system that had a percentage (%) display + * for signal quality. */ + } else + sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation * + (15 * RSSI_RANGE + 62 * degradation)) / + (RSSI_RANGE * RSSI_RANGE); + + if (sig_qual > 100) + sig_qual = 100; + else if (sig_qual < 1) + sig_qual = 0; + + return sig_qual; +} + +/** + * iwl_rx_handle - Main entry function for receiving responses from the uCode + * + * Uses the priv->rx_handlers callback function array to invoke + * the appropriate handlers, including command responses, + * frame-received notifications, and other notifications. + */ +static void iwl_rx_handle(struct iwl_priv *priv) +{ + struct iwl_rx_mem_buffer *rxb; + struct iwl_rx_packet *pkt; + struct iwl_rx_queue *rxq = &priv->rxq; + u32 r, i; + int reclaim; + unsigned long flags; + + r = iwl_hw_get_rx_read(priv); + i = rxq->read; + + /* Rx interrupt, but nothing sent from uCode */ + if (i == r) + IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i); + + while (i != r) { + rxb = rxq->queue[i]; + + /* If an RXB doesn't have a queue slot associated with it + * then a bug has been introduced in the queue refilling + * routines -- catch it here */ + BUG_ON(rxb == NULL); + + rxq->queue[i] = NULL; + + pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, + IWL_RX_BUF_SIZE, + PCI_DMA_FROMDEVICE); + pkt = (struct iwl_rx_packet *)rxb->skb->data; + + /* Reclaim a command buffer only if this packet is a response + * to a (driver-originated) command. + * If the packet (e.g. Rx frame) originated from uCode, + * there is no command buffer to reclaim. + * Ucode should set SEQ_RX_FRAME bit if ucode-originated, + * but apparently a few don't get set; catch them here. */ + reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && + (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && + (pkt->hdr.cmd != REPLY_TX); + + /* Based on type of command response or notification, + * handle those that need handling via function in + * rx_handlers table. See iwl_setup_rx_handlers() */ + if (priv->rx_handlers[pkt->hdr.cmd]) { + IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR, + "r = %d, i = %d, %s, 0x%02x\n", r, i, + get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); + priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); + } else { + /* No handling needed */ + IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR, + "r %d i %d No handler needed for %s, 0x%02x\n", + r, i, get_cmd_string(pkt->hdr.cmd), + pkt->hdr.cmd); + } + + if (reclaim) { + /* Invoke any callbacks, transfer the skb to caller, + * and fire off the (possibly) blocking iwl_send_cmd() + * as we reclaim the driver command queue */ + if (rxb && rxb->skb) + iwl_tx_cmd_complete(priv, rxb); + else + IWL_WARNING("Claim null rxb?\n"); + } + + /* For now we just don't re-use anything. We can tweak this + * later to try and re-use notification packets and SKBs that + * fail to Rx correctly */ + if (rxb->skb != NULL) { + priv->alloc_rxb_skb--; + dev_kfree_skb_any(rxb->skb); + rxb->skb = NULL; + } + + pci_unmap_single(priv->pci_dev, rxb->dma_addr, + IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + spin_lock_irqsave(&rxq->lock, flags); + list_add_tail(&rxb->list, &priv->rxq.rx_used); + spin_unlock_irqrestore(&rxq->lock, flags); + i = (i + 1) & RX_QUEUE_MASK; + } + + /* Backtrack one entry */ + priv->rxq.read = i; + iwl_rx_queue_restock(priv); +} + +int iwl_tx_queue_update_write_ptr(struct iwl_priv *priv, + struct iwl_tx_queue *txq) +{ + u32 reg = 0; + int rc = 0; + int txq_id = txq->q.id; + + if (txq->need_update == 0) + return rc; + + /* if we're trying to save power */ + if (test_bit(STATUS_POWER_PMI, &priv->status)) { + /* wake up nic if it's powered down ... + * uCode will wake up, and interrupt us again, so next + * time we'll skip this part. */ + reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); + + if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { + IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg); + iwl_set_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + return rc; + } + + /* restore this queue's parameters in nic hardware. */ + rc = iwl_grab_restricted_access(priv); + if (rc) + return rc; + iwl_write_restricted(priv, HBUS_TARG_WRPTR, + txq->q.first_empty | (txq_id << 8)); + iwl_release_restricted_access(priv); + + /* else not in power-save mode, uCode will never sleep when we're + * trying to tx (during RFKILL, we're not trying to tx). */ + } else + iwl_write32(priv, HBUS_TARG_WRPTR, + txq->q.first_empty | (txq_id << 8)); + + txq->need_update = 0; + + return rc; +} + +#ifdef CONFIG_IWLWIFI_DEBUG +static void iwl_print_rx_config_cmd(struct iwl_rxon_cmd *rxon) +{ + DECLARE_MAC_BUF(mac); + + IWL_DEBUG_RADIO("RX CONFIG:\n"); + iwl_print_hex_dump(IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); + IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel)); + IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags)); + IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n", + le32_to_cpu(rxon->filter_flags)); + IWL_DEBUG_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type); + IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n", + rxon->ofdm_basic_rates); + IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates); + IWL_DEBUG_RADIO("u8[6] node_addr: %s\n", + print_mac(mac, rxon->node_addr)); + IWL_DEBUG_RADIO("u8[6] bssid_addr: %s\n", + print_mac(mac, rxon->bssid_addr)); + IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id)); +} +#endif + +static void iwl_enable_interrupts(struct iwl_priv *priv) +{ + IWL_DEBUG_ISR("Enabling interrupts\n"); + set_bit(STATUS_INT_ENABLED, &priv->status); + iwl_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK); +} + +static inline void iwl_disable_interrupts(struct iwl_priv *priv) +{ + clear_bit(STATUS_INT_ENABLED, &priv->status); + + /* disable interrupts from uCode/NIC to host */ + iwl_write32(priv, CSR_INT_MASK, 0x00000000); + + /* acknowledge/clear/reset any interrupts still pending + * from uCode or flow handler (Rx/Tx DMA) */ + iwl_write32(priv, CSR_INT, 0xffffffff); + iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff); + IWL_DEBUG_ISR("Disabled interrupts\n"); +} + +static const char *desc_lookup(int i) +{ + switch (i) { + case 1: + return "FAIL"; + case 2: + return "BAD_PARAM"; + case 3: + return "BAD_CHECKSUM"; + case 4: + return "NMI_INTERRUPT"; + case 5: + return "SYSASSERT"; + case 6: + return "FATAL_ERROR"; + } + + return "UNKNOWN"; +} + +#define ERROR_START_OFFSET (1 * sizeof(u32)) +#define ERROR_ELEM_SIZE (7 * sizeof(u32)) + +static void iwl_dump_nic_error_log(struct iwl_priv *priv) +{ + u32 i; + u32 desc, time, count, base, data1; + u32 blink1, blink2, ilink1, ilink2; + int rc; + + base = le32_to_cpu(priv->card_alive.error_event_table_ptr); + + if (!iwl_hw_valid_rtc_data_addr(base)) { + IWL_ERROR("Not valid error log pointer 0x%08X\n", base); + return; + } + + rc = iwl_grab_restricted_access(priv); + if (rc) { + IWL_WARNING("Can not read from adapter at this time.\n"); + return; + } + + count = iwl_read_restricted_mem(priv, base); + + if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { + IWL_ERROR("Start IWL Error Log Dump:\n"); + IWL_ERROR("Status: 0x%08lX, Config: %08X count: %d\n", + priv->status, priv->config, count); + } + + IWL_ERROR("Desc Time asrtPC blink2 " + "ilink1 nmiPC Line\n"); + for (i = ERROR_START_OFFSET; + i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET; + i += ERROR_ELEM_SIZE) { + desc = iwl_read_restricted_mem(priv, base + i); + time = + iwl_read_restricted_mem(priv, base + i + 1 * sizeof(u32)); + blink1 = + iwl_read_restricted_mem(priv, base + i + 2 * sizeof(u32)); + blink2 = + iwl_read_restricted_mem(priv, base + i + 3 * sizeof(u32)); + ilink1 = + iwl_read_restricted_mem(priv, base + i + 4 * sizeof(u32)); + ilink2 = + iwl_read_restricted_mem(priv, base + i + 5 * sizeof(u32)); + data1 = + iwl_read_restricted_mem(priv, base + i + 6 * sizeof(u32)); + + IWL_ERROR + ("%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", + desc_lookup(desc), desc, time, blink1, blink2, + ilink1, ilink2, data1); + } + + iwl_release_restricted_access(priv); + +} + +#define EVENT_START_OFFSET (4 * sizeof(u32)) + +/** + * iwl_print_event_log - Dump error event log to syslog + * + * NOTE: Must be called with iwl_grab_restricted_access() already obtained! + */ +static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx, + u32 num_events, u32 mode) +{ + u32 i; + u32 base; /* SRAM byte address of event log header */ + u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ + u32 ptr; /* SRAM byte address of log data */ + u32 ev, time, data; /* event log data */ + + if (num_events == 0) + return; + + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + + if (mode == 0) + event_size = 2 * sizeof(u32); + else + event_size = 3 * sizeof(u32); + + ptr = base + EVENT_START_OFFSET + (start_idx * event_size); + + /* "time" is actually "data" for mode 0 (no timestamp). + * place event id # at far right for easier visual parsing. */ + for (i = 0; i < num_events; i++) { + ev = iwl_read_restricted_mem(priv, ptr); + ptr += sizeof(u32); + time = iwl_read_restricted_mem(priv, ptr); + ptr += sizeof(u32); + if (mode == 0) + IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */ + else { + data = iwl_read_restricted_mem(priv, ptr); + ptr += sizeof(u32); + IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev); + } + } +} + +static void iwl_dump_nic_event_log(struct iwl_priv *priv) +{ + int rc; + u32 base; /* SRAM byte address of event log header */ + u32 capacity; /* event log capacity in # entries */ + u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ + u32 num_wraps; /* # times uCode wrapped to top of log */ + u32 next_entry; /* index of next entry to be written by uCode */ + u32 size; /* # entries that we'll print */ + + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + if (!iwl_hw_valid_rtc_data_addr(base)) { + IWL_ERROR("Invalid event log pointer 0x%08X\n", base); + return; + } + + rc = iwl_grab_restricted_access(priv); + if (rc) { + IWL_WARNING("Can not read from adapter at this time.\n"); + return; + } + + /* event log header */ + capacity = iwl_read_restricted_mem(priv, base); + mode = iwl_read_restricted_mem(priv, base + (1 * sizeof(u32))); + num_wraps = iwl_read_restricted_mem(priv, base + (2 * sizeof(u32))); + next_entry = iwl_read_restricted_mem(priv, base + (3 * sizeof(u32))); + + size = num_wraps ? capacity : next_entry; + + /* bail out if nothing in log */ + if (size == 0) { + IWL_ERROR("Start IWL Event Log Dump: nothing in log\n"); + iwl_release_restricted_access(priv); + return; + } + + IWL_ERROR("Start IWL Event Log Dump: display count %d, wraps %d\n", + size, num_wraps); + + /* if uCode has wrapped back to top of log, start at the oldest entry, + * i.e the next one that uCode would fill. */ + if (num_wraps) + iwl_print_event_log(priv, next_entry, + capacity - next_entry, mode); + + /* (then/else) start at top of log */ + iwl_print_event_log(priv, 0, next_entry, mode); + + iwl_release_restricted_access(priv); +} + +/** + * iwl_irq_handle_error - called for HW or SW error interrupt from card + */ +static void iwl_irq_handle_error(struct iwl_priv *priv) +{ + /* Set the FW error flag -- cleared on iwl_down */ + set_bit(STATUS_FW_ERROR, &priv->status); + + /* Cancel currently queued command. */ + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_debug_level & IWL_DL_FW_ERRORS) { + iwl_dump_nic_error_log(priv); + iwl_dump_nic_event_log(priv); + iwl_print_rx_config_cmd(&priv->staging_rxon); + } +#endif + + wake_up_interruptible(&priv->wait_command_queue); + + /* Keep the restart process from trying to send host + * commands by clearing the INIT status bit */ + clear_bit(STATUS_READY, &priv->status); + + if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS, + "Restarting adapter due to uCode error.\n"); + + if (iwl_is_associated(priv)) { + memcpy(&priv->recovery_rxon, &priv->active_rxon, + sizeof(priv->recovery_rxon)); + priv->error_recovering = 1; + } + queue_work(priv->workqueue, &priv->restart); + } +} + +static void iwl_error_recovery(struct iwl_priv *priv) +{ + unsigned long flags; + + memcpy(&priv->staging_rxon, &priv->recovery_rxon, + sizeof(priv->staging_rxon)); + priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl_commit_rxon(priv); + + iwl_add_station(priv, priv->bssid, 1, 0); + + spin_lock_irqsave(&priv->lock, flags); + priv->assoc_id = le16_to_cpu(priv->staging_rxon.assoc_id); + priv->error_recovering = 0; + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void iwl_irq_tasklet(struct iwl_priv *priv) +{ + u32 inta, handled = 0; + u32 inta_fh; + unsigned long flags; +#ifdef CONFIG_IWLWIFI_DEBUG + u32 inta_mask; +#endif + + spin_lock_irqsave(&priv->lock, flags); + + /* Ack/clear/reset pending uCode interrupts. + * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, + * and will clear only when CSR_FH_INT_STATUS gets cleared. */ + inta = iwl_read32(priv, CSR_INT); + iwl_write32(priv, CSR_INT, inta); + + /* Ack/clear/reset pending flow-handler (DMA) interrupts. + * Any new interrupts that happen after this, either while we're + * in this tasklet, or later, will show up in next ISR/tasklet. */ + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_debug_level & IWL_DL_ISR) { + inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ + IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", + inta, inta_mask, inta_fh); + } +#endif + + /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not + * atomic, make sure that inta covers all the interrupts that + * we've discovered, even if FH interrupt came in just after + * reading CSR_INT. */ + if (inta_fh & CSR_FH_INT_RX_MASK) + inta |= CSR_INT_BIT_FH_RX; + if (inta_fh & CSR_FH_INT_TX_MASK) + inta |= CSR_INT_BIT_FH_TX; + + /* Now service all interrupt bits discovered above. */ + if (inta & CSR_INT_BIT_HW_ERR) { + IWL_ERROR("Microcode HW error detected. Restarting.\n"); + + /* Tell the device to stop sending interrupts */ + iwl_disable_interrupts(priv); + + iwl_irq_handle_error(priv); + + handled |= CSR_INT_BIT_HW_ERR; + + spin_unlock_irqrestore(&priv->lock, flags); + + return; + } + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_debug_level & (IWL_DL_ISR)) { + /* NIC fires this, but we don't use it, redundant with WAKEUP */ + if (inta & CSR_INT_BIT_MAC_CLK_ACTV) + IWL_DEBUG_ISR("Microcode started or stopped.\n"); + + /* Alive notification via Rx interrupt will do the real work */ + if (inta & CSR_INT_BIT_ALIVE) + IWL_DEBUG_ISR("Alive interrupt\n"); + } +#endif + /* Safely ignore these bits for debug checks below */ + inta &= ~(CSR_INT_BIT_MAC_CLK_ACTV | CSR_INT_BIT_ALIVE); + + /* HW RF KILL switch toggled (4965 only) */ + if (inta & CSR_INT_BIT_RF_KILL) { + int hw_rf_kill = 0; + if (!(iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) + hw_rf_kill = 1; + + IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL | IWL_DL_ISR, + "RF_KILL bit toggled to %s.\n", + hw_rf_kill ? "disable radio":"enable radio"); + + /* Queue restart only if RF_KILL switch was set to "kill" + * when we loaded driver, and is now set to "enable". + * After we're Alive, RF_KILL gets handled by + * iwl_rx_card_state_notif() */ + if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status)) + queue_work(priv->workqueue, &priv->restart); + + handled |= CSR_INT_BIT_RF_KILL; + } + + /* Chip got too hot and stopped itself (4965 only) */ + if (inta & CSR_INT_BIT_CT_KILL) { + IWL_ERROR("Microcode CT kill error detected.\n"); + handled |= CSR_INT_BIT_CT_KILL; + } + + /* Error detected by uCode */ + if (inta & CSR_INT_BIT_SW_ERR) { + IWL_ERROR("Microcode SW error detected. Restarting 0x%X.\n", + inta); + iwl_irq_handle_error(priv); + handled |= CSR_INT_BIT_SW_ERR; + } + + /* uCode wakes up after power-down sleep */ + if (inta & CSR_INT_BIT_WAKEUP) { + IWL_DEBUG_ISR("Wakeup interrupt\n"); + iwl_rx_queue_update_write_ptr(priv, &priv->rxq); + iwl_tx_queue_update_write_ptr(priv, &priv->txq[0]); + iwl_tx_queue_update_write_ptr(priv, &priv->txq[1]); + iwl_tx_queue_update_write_ptr(priv, &priv->txq[2]); + iwl_tx_queue_update_write_ptr(priv, &priv->txq[3]); + iwl_tx_queue_update_write_ptr(priv, &priv->txq[4]); + iwl_tx_queue_update_write_ptr(priv, &priv->txq[5]); + + handled |= CSR_INT_BIT_WAKEUP; + } + + /* All uCode command responses, including Tx command responses, + * Rx "responses" (frame-received notification), and other + * notifications from uCode come through here*/ + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { + iwl_rx_handle(priv); + handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); + } + + if (inta & CSR_INT_BIT_FH_TX) { + IWL_DEBUG_ISR("Tx interrupt\n"); + + iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6)); + if (!iwl_grab_restricted_access(priv)) { + iwl_write_restricted(priv, + FH_TCSR_CREDIT + (ALM_FH_SRVC_CHNL), 0x0); + iwl_release_restricted_access(priv); + } + handled |= CSR_INT_BIT_FH_TX; + } + + if (inta & ~handled) + IWL_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled); + + if (inta & ~CSR_INI_SET_MASK) { + IWL_WARNING("Disabled INTA bits 0x%08x were pending\n", + inta & ~CSR_INI_SET_MASK); + IWL_WARNING(" with FH_INT = 0x%08x\n", inta_fh); + } + + /* Re-enable all interrupts */ + iwl_enable_interrupts(priv); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_debug_level & (IWL_DL_ISR)) { + inta = iwl_read32(priv, CSR_INT); + inta_mask = iwl_read32(priv, CSR_INT_MASK); + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " + "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); + } +#endif + spin_unlock_irqrestore(&priv->lock, flags); +} + +static irqreturn_t iwl_isr(int irq, void *data) +{ + struct iwl_priv *priv = data; + u32 inta, inta_mask; + u32 inta_fh; + if (!priv) + return IRQ_NONE; + + spin_lock(&priv->lock); + + /* Disable (but don't clear!) interrupts here to avoid + * back-to-back ISRs and sporadic interrupts from our NIC. + * If we have something to service, the tasklet will re-enable ints. + * If we *don't* have something, we'll re-enable before leaving here. */ + inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ + iwl_write32(priv, CSR_INT_MASK, 0x00000000); + + /* Discover which interrupts are active/pending */ + inta = iwl_read32(priv, CSR_INT); + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + + /* Ignore interrupt if there's nothing in NIC to service. + * This may be due to IRQ shared with another device, + * or due to sporadic interrupts thrown from our NIC. */ + if (!inta && !inta_fh) { + IWL_DEBUG_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n"); + goto none; + } + + if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { + /* Hardware disappeared */ + IWL_WARNING("HARDWARE GONE?? INTA == 0x%080x\n", inta); + goto none; + } + + IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", + inta, inta_mask, inta_fh); + + /* iwl_irq_tasklet() will service interrupts and re-enable them */ + tasklet_schedule(&priv->irq_tasklet); + spin_unlock(&priv->lock); + + return IRQ_HANDLED; + + none: + /* re-enable interrupts here since we don't have anything to service. */ + iwl_enable_interrupts(priv); + spin_unlock(&priv->lock); + return IRQ_NONE; +} + +/************************** EEPROM BANDS **************************** + * + * The iwl_eeprom_band definitions below provide the mapping from the + * EEPROM contents to the specific channel number supported for each + * band. + * + * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3 + * definition below maps to physical channel 42 in the 5.2GHz spectrum. + * The specific geography and calibration information for that channel + * is contained in the eeprom map itself. + * + * During init, we copy the eeprom information and channel map + * information into priv->channel_info_24/52 and priv->channel_map_24/52 + * + * channel_map_24/52 provides the index in the channel_info array for a + * given channel. We have to have two separate maps as there is channel + * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and + * band_2 + * + * A value of 0xff stored in the channel_map indicates that the channel + * is not supported by the hardware at all. + * + * A value of 0xfe in the channel_map indicates that the channel is not + * valid for Tx with the current hardware. This means that + * while the system can tune and receive on a given channel, it may not + * be able to associate or transmit any frames on that + * channel. There is no corresponding channel information for that + * entry. + * + *********************************************************************/ + +/* 2.4 GHz */ +static const u8 iwl_eeprom_band_1[14] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 +}; + +/* 5.2 GHz bands */ +static const u8 iwl_eeprom_band_2[] = { + 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 +}; + +static const u8 iwl_eeprom_band_3[] = { /* 5205-5320MHz */ + 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 +}; + +static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */ + 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 +}; + +static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */ + 145, 149, 153, 157, 161, 165 +}; + +static void iwl_init_band_reference(const struct iwl_priv *priv, int band, + int *eeprom_ch_count, + const struct iwl_eeprom_channel + **eeprom_ch_info, + const u8 **eeprom_ch_index) +{ + switch (band) { + case 1: /* 2.4GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1); + *eeprom_ch_info = priv->eeprom.band_1_channels; + *eeprom_ch_index = iwl_eeprom_band_1; + break; + case 2: /* 5.2GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2); + *eeprom_ch_info = priv->eeprom.band_2_channels; + *eeprom_ch_index = iwl_eeprom_band_2; + break; + case 3: /* 5.2GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3); + *eeprom_ch_info = priv->eeprom.band_3_channels; + *eeprom_ch_index = iwl_eeprom_band_3; + break; + case 4: /* 5.2GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4); + *eeprom_ch_info = priv->eeprom.band_4_channels; + *eeprom_ch_index = iwl_eeprom_band_4; + break; + case 5: /* 5.2GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5); + *eeprom_ch_info = priv->eeprom.band_5_channels; + *eeprom_ch_index = iwl_eeprom_band_5; + break; + default: + BUG(); + return; + } +} + +const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv, + int phymode, u16 channel) +{ + int i; + + switch (phymode) { + case MODE_IEEE80211A: + for (i = 14; i < priv->channel_count; i++) { + if (priv->channel_info[i].channel == channel) + return &priv->channel_info[i]; + } + break; + + case MODE_IEEE80211B: + case MODE_IEEE80211G: + if (channel >= 1 && channel <= 14) + return &priv->channel_info[channel - 1]; + break; + + } + + return NULL; +} + +#define CHECK_AND_PRINT(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \ + ? # x " " : "") + +static int iwl_init_channel_map(struct iwl_priv *priv) +{ + int eeprom_ch_count = 0; + const u8 *eeprom_ch_index = NULL; + const struct iwl_eeprom_channel *eeprom_ch_info = NULL; + int band, ch; + struct iwl_channel_info *ch_info; + + if (priv->channel_count) { + IWL_DEBUG_INFO("Channel map already initialized.\n"); + return 0; + } + + if (priv->eeprom.version < 0x2f) { + IWL_WARNING("Unsupported EEPROM version: 0x%04X\n", + priv->eeprom.version); + return -EINVAL; + } + + IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n"); + + priv->channel_count = + ARRAY_SIZE(iwl_eeprom_band_1) + + ARRAY_SIZE(iwl_eeprom_band_2) + + ARRAY_SIZE(iwl_eeprom_band_3) + + ARRAY_SIZE(iwl_eeprom_band_4) + + ARRAY_SIZE(iwl_eeprom_band_5); + + IWL_DEBUG_INFO("Parsing data for %d channels.\n", priv->channel_count); + + priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) * + priv->channel_count, GFP_KERNEL); + if (!priv->channel_info) { + IWL_ERROR("Could not allocate channel_info\n"); + priv->channel_count = 0; + return -ENOMEM; + } + + ch_info = priv->channel_info; + + /* Loop through the 5 EEPROM bands adding them in order to the + * channel map we maintain (that contains additional information than + * what just in the EEPROM) */ + for (band = 1; band <= 5; band++) { + + iwl_init_band_reference(priv, band, &eeprom_ch_count, + &eeprom_ch_info, &eeprom_ch_index); + + /* Loop through each band adding each of the channels */ + for (ch = 0; ch < eeprom_ch_count; ch++) { + ch_info->channel = eeprom_ch_index[ch]; + ch_info->phymode = (band == 1) ? MODE_IEEE80211B : + MODE_IEEE80211A; + + /* permanently store EEPROM's channel regulatory flags + * and max power in channel info database. */ + ch_info->eeprom = eeprom_ch_info[ch]; + + /* Copy the run-time flags so they are there even on + * invalid channels */ + ch_info->flags = eeprom_ch_info[ch].flags; + + if (!(is_channel_valid(ch_info))) { + IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - " + "No traffic\n", + ch_info->channel, + ch_info->flags, + is_channel_a_band(ch_info) ? + "5.2" : "2.4"); + ch_info++; + continue; + } + + /* Initialize regulatory-based run-time data */ + ch_info->max_power_avg = ch_info->curr_txpow = + eeprom_ch_info[ch].max_power_avg; + ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; + ch_info->min_power = 0; + + IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x" + " %ddBm): Ad-Hoc %ssupported\n", + ch_info->channel, + is_channel_a_band(ch_info) ? + "5.2" : "2.4", + CHECK_AND_PRINT(IBSS), + CHECK_AND_PRINT(ACTIVE), + CHECK_AND_PRINT(RADAR), + CHECK_AND_PRINT(WIDE), + CHECK_AND_PRINT(NARROW), + CHECK_AND_PRINT(DFS), + eeprom_ch_info[ch].flags, + eeprom_ch_info[ch].max_power_avg, + ((eeprom_ch_info[ch]. + flags & EEPROM_CHANNEL_IBSS) + && !(eeprom_ch_info[ch]. + flags & EEPROM_CHANNEL_RADAR)) + ? "" : "not "); + + /* Set the user_txpower_limit to the highest power + * supported by any channel */ + if (eeprom_ch_info[ch].max_power_avg > + priv->user_txpower_limit) + priv->user_txpower_limit = + eeprom_ch_info[ch].max_power_avg; + + ch_info++; + } + } + + if (iwl3945_txpower_set_from_eeprom(priv)) + return -EIO; + + return 0; +} + +/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after + * sending probe req. This should be set long enough to hear probe responses + * from more than one AP. */ +#define IWL_ACTIVE_DWELL_TIME_24 (20) /* all times in msec */ +#define IWL_ACTIVE_DWELL_TIME_52 (10) + +/* For faster active scanning, scan will move to the next channel if fewer than + * PLCP_QUIET_THRESH packets are heard on this channel within + * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell + * time if it's a quiet channel (nothing responded to our probe, and there's + * no other traffic). + * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */ +#define IWL_PLCP_QUIET_THRESH __constant_cpu_to_le16(1) /* packets */ +#define IWL_ACTIVE_QUIET_TIME __constant_cpu_to_le16(5) /* msec */ + +/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel. + * Must be set longer than active dwell time. + * For the most reliable scan, set > AP beacon interval (typically 100msec). */ +#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */ +#define IWL_PASSIVE_DWELL_TIME_52 (10) +#define IWL_PASSIVE_DWELL_BASE (100) +#define IWL_CHANNEL_TUNE_TIME 5 + +static inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv, int phymode) +{ + if (phymode == MODE_IEEE80211A) + return IWL_ACTIVE_DWELL_TIME_52; + else + return IWL_ACTIVE_DWELL_TIME_24; +} + +static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, int phymode) +{ + u16 active = iwl_get_active_dwell_time(priv, phymode); + u16 passive = (phymode != MODE_IEEE80211A) ? + IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 : + IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52; + + if (iwl_is_associated(priv)) { + /* If we're associated, we clamp the maximum passive + * dwell time to be 98% of the beacon interval (minus + * 2 * channel tune time) */ + passive = priv->beacon_int; + if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive) + passive = IWL_PASSIVE_DWELL_BASE; + passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2; + } + + if (passive <= active) + passive = active + 1; + + return passive; +} + +static int iwl_get_channels_for_scan(struct iwl_priv *priv, int phymode, + u8 is_active, u8 direct_mask, + struct iwl_scan_channel *scan_ch) +{ + const struct ieee80211_channel *channels = NULL; + const struct ieee80211_hw_mode *hw_mode; + const struct iwl_channel_info *ch_info; + u16 passive_dwell = 0; + u16 active_dwell = 0; + int added, i; + + hw_mode = iwl_get_hw_mode(priv, phymode); + if (!hw_mode) + return 0; + + channels = hw_mode->channels; + + active_dwell = iwl_get_active_dwell_time(priv, phymode); + passive_dwell = iwl_get_passive_dwell_time(priv, phymode); + + for (i = 0, added = 0; i < hw_mode->num_channels; i++) { + if (channels[i].chan == + le16_to_cpu(priv->active_rxon.channel)) { + if (iwl_is_associated(priv)) { + IWL_DEBUG_SCAN + ("Skipping current channel %d\n", + le16_to_cpu(priv->active_rxon.channel)); + continue; + } + } else if (priv->only_active_channel) + continue; + + scan_ch->channel = channels[i].chan; + + ch_info = iwl_get_channel_info(priv, phymode, scan_ch->channel); + if (!is_channel_valid(ch_info)) { + IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n", + scan_ch->channel); + continue; + } + + if (!is_active || is_channel_passive(ch_info) || + !(channels[i].flag & IEEE80211_CHAN_W_ACTIVE_SCAN)) + scan_ch->type = 0; /* passive */ + else + scan_ch->type = 1; /* active */ + + if (scan_ch->type & 1) + scan_ch->type |= (direct_mask << 1); + + if (is_channel_narrow(ch_info)) + scan_ch->type |= (1 << 7); + + scan_ch->active_dwell = cpu_to_le16(active_dwell); + scan_ch->passive_dwell = cpu_to_le16(passive_dwell); + + /* Set power levels to defaults */ + scan_ch->tpc.dsp_atten = 110; + /* scan_pwr_info->tpc.dsp_atten; */ + + /*scan_pwr_info->tpc.tx_gain; */ + if (phymode == MODE_IEEE80211A) + scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3; + else { + scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3)); + /* NOTE: if we were doing 6Mb OFDM for scans we'd use + * power level + scan_ch->tpc.tx_gain = ((1<<5) | (2 << 3)) | 3; + */ + } + + IWL_DEBUG_SCAN("Scanning %d [%s %d]\n", + scan_ch->channel, + (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE", + (scan_ch->type & 1) ? + active_dwell : passive_dwell); + + scan_ch++; + added++; + } + + IWL_DEBUG_SCAN("total channels to scan %d \n", added); + return added; +} + +static void iwl_reset_channel_flag(struct iwl_priv *priv) +{ + int i, j; + for (i = 0; i < 3; i++) { + struct ieee80211_hw_mode *hw_mode = (void *)&priv->modes[i]; + for (j = 0; j < hw_mode->num_channels; j++) + hw_mode->channels[j].flag = hw_mode->channels[j].val; + } +} + +static void iwl_init_hw_rates(struct iwl_priv *priv, + struct ieee80211_rate *rates) +{ + int i; + + for (i = 0; i < IWL_RATE_COUNT; i++) { + rates[i].rate = iwl_rates[i].ieee * 5; + rates[i].val = i; /* Rate scaling will work on indexes */ + rates[i].val2 = i; + rates[i].flags = IEEE80211_RATE_SUPPORTED; + /* Only OFDM have the bits-per-symbol set */ + if ((i <= IWL_LAST_OFDM_RATE) && (i >= IWL_FIRST_OFDM_RATE)) + rates[i].flags |= IEEE80211_RATE_OFDM; + else { + /* + * If CCK 1M then set rate flag to CCK else CCK_2 + * which is CCK | PREAMBLE2 + */ + rates[i].flags |= (iwl_rates[i].plcp == 10) ? + IEEE80211_RATE_CCK : IEEE80211_RATE_CCK_2; + } + + /* Set up which ones are basic rates... */ + if (IWL_BASIC_RATES_MASK & (1 << i)) + rates[i].flags |= IEEE80211_RATE_BASIC; + } +} + +/** + * iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom + */ +static int iwl_init_geos(struct iwl_priv *priv) +{ + struct iwl_channel_info *ch; + struct ieee80211_hw_mode *modes; + struct ieee80211_channel *channels; + struct ieee80211_channel *geo_ch; + struct ieee80211_rate *rates; + int i = 0; + enum { + A = 0, + B = 1, + G = 2, + }; + int mode_count = 3; + + if (priv->modes) { + IWL_DEBUG_INFO("Geography modes already initialized.\n"); + set_bit(STATUS_GEO_CONFIGURED, &priv->status); + return 0; + } + + modes = kzalloc(sizeof(struct ieee80211_hw_mode) * mode_count, + GFP_KERNEL); + if (!modes) + return -ENOMEM; + + channels = kzalloc(sizeof(struct ieee80211_channel) * + priv->channel_count, GFP_KERNEL); + if (!channels) { + kfree(modes); + return -ENOMEM; + } + + rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_MAX_RATES + 1)), + GFP_KERNEL); + if (!rates) { + kfree(modes); + kfree(channels); + return -ENOMEM; + } + + /* 0 = 802.11a + * 1 = 802.11b + * 2 = 802.11g + */ + + /* 5.2GHz channels start after the 2.4GHz channels */ + modes[A].mode = MODE_IEEE80211A; + modes[A].channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)]; + modes[A].rates = rates; + modes[A].num_rates = 8; /* just OFDM */ + modes[A].num_channels = 0; + + modes[B].mode = MODE_IEEE80211B; + modes[B].channels = channels; + modes[B].rates = &rates[8]; + modes[B].num_rates = 4; /* just CCK */ + modes[B].num_channels = 0; + + modes[G].mode = MODE_IEEE80211G; + modes[G].channels = channels; + modes[G].rates = rates; + modes[G].num_rates = 12; /* OFDM & CCK */ + modes[G].num_channels = 0; + + priv->ieee_channels = channels; + priv->ieee_rates = rates; + + iwl_init_hw_rates(priv, rates); + + for (i = 0, geo_ch = channels; i < priv->channel_count; i++) { + ch = &priv->channel_info[i]; + + if (!is_channel_valid(ch)) { + IWL_DEBUG_INFO("Channel %d [%sGHz] is restricted -- " + "skipping.\n", + ch->channel, is_channel_a_band(ch) ? + "5.2" : "2.4"); + continue; + } + + if (is_channel_a_band(ch)) + geo_ch = &modes[A].channels[modes[A].num_channels++]; + else { + geo_ch = &modes[B].channels[modes[B].num_channels++]; + modes[G].num_channels++; + } + + geo_ch->freq = ieee80211chan2mhz(ch->channel); + geo_ch->chan = ch->channel; + geo_ch->power_level = ch->max_power_avg; + geo_ch->antenna_max = 0xff; + + if (is_channel_valid(ch)) { + geo_ch->flag = IEEE80211_CHAN_W_SCAN; + if (ch->flags & EEPROM_CHANNEL_IBSS) + geo_ch->flag |= IEEE80211_CHAN_W_IBSS; + + if (ch->flags & EEPROM_CHANNEL_ACTIVE) + geo_ch->flag |= IEEE80211_CHAN_W_ACTIVE_SCAN; + + if (ch->flags & EEPROM_CHANNEL_RADAR) + geo_ch->flag |= IEEE80211_CHAN_W_RADAR_DETECT; + + if (ch->max_power_avg > priv->max_channel_txpower_limit) + priv->max_channel_txpower_limit = + ch->max_power_avg; + } + + geo_ch->val = geo_ch->flag; + } + + if ((modes[A].num_channels == 0) && priv->is_abg) { + printk(KERN_INFO DRV_NAME + ": Incorrectly detected BG card as ABG. Please send " + "your PCI ID 0x%04X:0x%04X to maintainer.\n", + priv->pci_dev->device, priv->pci_dev->subsystem_device); + priv->is_abg = 0; + } + + printk(KERN_INFO DRV_NAME + ": Tunable channels: %d 802.11bg, %d 802.11a channels\n", + modes[G].num_channels, modes[A].num_channels); + + /* + * NOTE: We register these in preference of order -- the + * stack doesn't currently (as of 7.0.6 / Apr 24 '07) pick + * a phymode based on rates or AP capabilities but seems to + * configure it purely on if the channel being configured + * is supported by a mode -- and the first match is taken + */ + + if (modes[G].num_channels) + ieee80211_register_hwmode(priv->hw, &modes[G]); + if (modes[B].num_channels) + ieee80211_register_hwmode(priv->hw, &modes[B]); + if (modes[A].num_channels) + ieee80211_register_hwmode(priv->hw, &modes[A]); + + priv->modes = modes; + set_bit(STATUS_GEO_CONFIGURED, &priv->status); + + return 0; +} + +/****************************************************************************** + * + * uCode download functions + * + ******************************************************************************/ + +static void iwl_dealloc_ucode_pci(struct iwl_priv *priv) +{ + if (priv->ucode_code.v_addr != NULL) { + pci_free_consistent(priv->pci_dev, + priv->ucode_code.len, + priv->ucode_code.v_addr, + priv->ucode_code.p_addr); + priv->ucode_code.v_addr = NULL; + } + if (priv->ucode_data.v_addr != NULL) { + pci_free_consistent(priv->pci_dev, + priv->ucode_data.len, + priv->ucode_data.v_addr, + priv->ucode_data.p_addr); + priv->ucode_data.v_addr = NULL; + } + if (priv->ucode_data_backup.v_addr != NULL) { + pci_free_consistent(priv->pci_dev, + priv->ucode_data_backup.len, + priv->ucode_data_backup.v_addr, + priv->ucode_data_backup.p_addr); + priv->ucode_data_backup.v_addr = NULL; + } + if (priv->ucode_init.v_addr != NULL) { + pci_free_consistent(priv->pci_dev, + priv->ucode_init.len, + priv->ucode_init.v_addr, + priv->ucode_init.p_addr); + priv->ucode_init.v_addr = NULL; + } + if (priv->ucode_init_data.v_addr != NULL) { + pci_free_consistent(priv->pci_dev, + priv->ucode_init_data.len, + priv->ucode_init_data.v_addr, + priv->ucode_init_data.p_addr); + priv->ucode_init_data.v_addr = NULL; + } + if (priv->ucode_boot.v_addr != NULL) { + pci_free_consistent(priv->pci_dev, + priv->ucode_boot.len, + priv->ucode_boot.v_addr, + priv->ucode_boot.p_addr); + priv->ucode_boot.v_addr = NULL; + } +} + +/** + * iwl_verify_inst_full - verify runtime uCode image in card vs. host, + * looking at all data. + */ +static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 * image, u32 len) +{ + u32 val; + u32 save_len = len; + int rc = 0; + u32 errcnt; + + IWL_DEBUG_INFO("ucode inst image size is %u\n", len); + + rc = iwl_grab_restricted_access(priv); + if (rc) + return rc; + + iwl_write_restricted(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND); + + errcnt = 0; + for (; len > 0; len -= sizeof(u32), image++) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IWL_DL_IO is set */ + val = _iwl_read_restricted(priv, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { + IWL_ERROR("uCode INST section is invalid at " + "offset 0x%x, is 0x%x, s/b 0x%x\n", + save_len - len, val, le32_to_cpu(*image)); + rc = -EIO; + errcnt++; + if (errcnt >= 20) + break; + } + } + + iwl_release_restricted_access(priv); + + if (!errcnt) + IWL_DEBUG_INFO + ("ucode image in INSTRUCTION memory is good\n"); + + return rc; +} + + +/** + * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host, + * using sample data 100 bytes apart. If these sample points are good, + * it's a pretty good bet that everything between them is good, too. + */ +static int iwl_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len) +{ + u32 val; + int rc = 0; + u32 errcnt = 0; + u32 i; + + IWL_DEBUG_INFO("ucode inst image size is %u\n", len); + + rc = iwl_grab_restricted_access(priv); + if (rc) + return rc; + + for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IWL_DL_IO is set */ + iwl_write_restricted(priv, HBUS_TARG_MEM_RADDR, + i + RTC_INST_LOWER_BOUND); + val = _iwl_read_restricted(priv, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { +#if 0 /* Enable this if you want to see details */ + IWL_ERROR("uCode INST section is invalid at " + "offset 0x%x, is 0x%x, s/b 0x%x\n", + i, val, *image); +#endif + rc = -EIO; + errcnt++; + if (errcnt >= 3) + break; + } + } + + iwl_release_restricted_access(priv); + + return rc; +} + + +/** + * iwl_verify_ucode - determine which instruction image is in SRAM, + * and verify its contents + */ +static int iwl_verify_ucode(struct iwl_priv *priv) +{ + __le32 *image; + u32 len; + int rc = 0; + + /* Try bootstrap */ + image = (__le32 *)priv->ucode_boot.v_addr; + len = priv->ucode_boot.len; + rc = iwl_verify_inst_sparse(priv, image, len); + if (rc == 0) { + IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n"); + return 0; + } + + /* Try initialize */ + image = (__le32 *)priv->ucode_init.v_addr; + len = priv->ucode_init.len; + rc = iwl_verify_inst_sparse(priv, image, len); + if (rc == 0) { + IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n"); + return 0; + } + + /* Try runtime/protocol */ + image = (__le32 *)priv->ucode_code.v_addr; + len = priv->ucode_code.len; + rc = iwl_verify_inst_sparse(priv, image, len); + if (rc == 0) { + IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n"); + return 0; + } + + IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); + + /* Show first several data entries in instruction SRAM. + * Selection of bootstrap image is arbitrary. */ + image = (__le32 *)priv->ucode_boot.v_addr; + len = priv->ucode_boot.len; + rc = iwl_verify_inst_full(priv, image, len); + + return rc; +} + + +/* check contents of special bootstrap uCode SRAM */ +static int iwl_verify_bsm(struct iwl_priv *priv) +{ + __le32 *image = priv->ucode_boot.v_addr; + u32 len = priv->ucode_boot.len; + u32 reg; + u32 val; + + IWL_DEBUG_INFO("Begin verify bsm\n"); + + /* verify BSM SRAM contents */ + val = iwl_read_restricted_reg(priv, BSM_WR_DWCOUNT_REG); + for (reg = BSM_SRAM_LOWER_BOUND; + reg < BSM_SRAM_LOWER_BOUND + len; + reg += sizeof(u32), image ++) { + val = iwl_read_restricted_reg(priv, reg); + if (val != le32_to_cpu(*image)) { + IWL_ERROR("BSM uCode verification failed at " + "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", + BSM_SRAM_LOWER_BOUND, + reg - BSM_SRAM_LOWER_BOUND, len, + val, le32_to_cpu(*image)); + return -EIO; + } + } + + IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n"); + + return 0; +} + +/** + * iwl_load_bsm - Load bootstrap instructions + * + * BSM operation: + * + * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program + * in special SRAM that does not power down during RFKILL. When powering back + * up after power-saving sleeps (or during initial uCode load), the BSM loads + * the bootstrap program into the on-board processor, and starts it. + * + * The bootstrap program loads (via DMA) instructions and data for a new + * program from host DRAM locations indicated by the host driver in the + * BSM_DRAM_* registers. Once the new program is loaded, it starts + * automatically. + * + * When initializing the NIC, the host driver points the BSM to the + * "initialize" uCode image. This uCode sets up some internal data, then + * notifies host via "initialize alive" that it is complete. + * + * The host then replaces the BSM_DRAM_* pointer values to point to the + * normal runtime uCode instructions and a backup uCode data cache buffer + * (filled initially with starting data values for the on-board processor), + * then triggers the "initialize" uCode to load and launch the runtime uCode, + * which begins normal operation. + * + * When doing a power-save shutdown, runtime uCode saves data SRAM into + * the backup data cache in DRAM before SRAM is powered down. + * + * When powering back up, the BSM loads the bootstrap program. This reloads + * the runtime uCode instructions and the backup data cache into SRAM, + * and re-launches the runtime uCode from where it left off. + */ +static int iwl_load_bsm(struct iwl_priv *priv) +{ + __le32 *image = priv->ucode_boot.v_addr; + u32 len = priv->ucode_boot.len; + dma_addr_t pinst; + dma_addr_t pdata; + u32 inst_len; + u32 data_len; + int rc; + int i; + u32 done; + u32 reg_offset; + + IWL_DEBUG_INFO("Begin load bsm\n"); + + /* make sure bootstrap program is no larger than BSM's SRAM size */ + if (len > IWL_MAX_BSM_SIZE) + return -EINVAL; + + /* Tell bootstrap uCode where to find the "Initialize" uCode + * in host DRAM ... bits 31:0 for 3945, bits 35:4 for 4965. + * NOTE: iwl_initialize_alive_start() will replace these values, + * after the "initialize" uCode has run, to point to + * runtime/protocol instructions and backup data cache. */ + pinst = priv->ucode_init.p_addr; + pdata = priv->ucode_init_data.p_addr; + inst_len = priv->ucode_init.len; + data_len = priv->ucode_init_data.len; + + rc = iwl_grab_restricted_access(priv); + if (rc) + return rc; + + iwl_write_restricted_reg(priv, BSM_DRAM_INST_PTR_REG, pinst); + iwl_write_restricted_reg(priv, BSM_DRAM_DATA_PTR_REG, pdata); + iwl_write_restricted_reg(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); + iwl_write_restricted_reg(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); + + /* Fill BSM memory with bootstrap instructions */ + for (reg_offset = BSM_SRAM_LOWER_BOUND; + reg_offset < BSM_SRAM_LOWER_BOUND + len; + reg_offset += sizeof(u32), image++) + _iwl_write_restricted_reg(priv, reg_offset, + le32_to_cpu(*image)); + + rc = iwl_verify_bsm(priv); + if (rc) { + iwl_release_restricted_access(priv); + return rc; + } + + /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ + iwl_write_restricted_reg(priv, BSM_WR_MEM_SRC_REG, 0x0); + iwl_write_restricted_reg(priv, BSM_WR_MEM_DST_REG, + RTC_INST_LOWER_BOUND); + iwl_write_restricted_reg(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); + + /* Load bootstrap code into instruction SRAM now, + * to prepare to load "initialize" uCode */ + iwl_write_restricted_reg(priv, BSM_WR_CTRL_REG, + BSM_WR_CTRL_REG_BIT_START); + + /* Wait for load of bootstrap uCode to finish */ + for (i = 0; i < 100; i++) { + done = iwl_read_restricted_reg(priv, BSM_WR_CTRL_REG); + if (!(done & BSM_WR_CTRL_REG_BIT_START)) + break; + udelay(10); + } + if (i < 100) + IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i); + else { + IWL_ERROR("BSM write did not complete!\n"); + return -EIO; + } + + /* Enable future boot loads whenever power management unit triggers it + * (e.g. when powering back up after power-save shutdown) */ + iwl_write_restricted_reg(priv, BSM_WR_CTRL_REG, + BSM_WR_CTRL_REG_BIT_START_EN); + + iwl_release_restricted_access(priv); + + return 0; +} + +static void iwl_nic_start(struct iwl_priv *priv) +{ + /* Remove all resets to allow NIC to operate */ + iwl_write32(priv, CSR_RESET, 0); +} + +/** + * iwl_read_ucode - Read uCode images from disk file. + * + * Copy into buffers for card to fetch via bus-mastering + */ +static int iwl_read_ucode(struct iwl_priv *priv) +{ + struct iwl_ucode *ucode; + int rc = 0; + const struct firmware *ucode_raw; + /* firmware file name contains uCode/driver compatibility version */ + const char *name = "iwlwifi-3945" IWL3945_UCODE_API ".ucode"; + u8 *src; + size_t len; + u32 ver, inst_size, data_size, init_size, init_data_size, boot_size; + + /* Ask kernel firmware_class module to get the boot firmware off disk. + * request_firmware() is synchronous, file is in memory on return. */ + rc = request_firmware(&ucode_raw, name, &priv->pci_dev->dev); + if (rc < 0) { + IWL_ERROR("%s firmware file req failed: Reason %d\n", name, rc); + goto error; + } + + IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n", + name, ucode_raw->size); + + /* Make sure that we got at least our header! */ + if (ucode_raw->size < sizeof(*ucode)) { + IWL_ERROR("File size way too small!\n"); + rc = -EINVAL; + goto err_release; + } + + /* Data from ucode file: header followed by uCode images */ + ucode = (void *)ucode_raw->data; + + ver = le32_to_cpu(ucode->ver); + inst_size = le32_to_cpu(ucode->inst_size); + data_size = le32_to_cpu(ucode->data_size); + init_size = le32_to_cpu(ucode->init_size); + init_data_size = le32_to_cpu(ucode->init_data_size); + boot_size = le32_to_cpu(ucode->boot_size); + + IWL_DEBUG_INFO("f/w package hdr ucode version = 0x%x\n", ver); + IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n", + inst_size); + IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n", + data_size); + IWL_DEBUG_INFO("f/w package hdr init inst size = %u\n", + init_size); + IWL_DEBUG_INFO("f/w package hdr init data size = %u\n", + init_data_size); + IWL_DEBUG_INFO("f/w package hdr boot inst size = %u\n", + boot_size); + + /* Verify size of file vs. image size info in file's header */ + if (ucode_raw->size < sizeof(*ucode) + + inst_size + data_size + init_size + + init_data_size + boot_size) { + + IWL_DEBUG_INFO("uCode file size %d too small\n", + (int)ucode_raw->size); + rc = -EINVAL; + goto err_release; + } + + /* Verify that uCode images will fit in card's SRAM */ + if (inst_size > IWL_MAX_INST_SIZE) { + IWL_DEBUG_INFO("uCode instr len %d too large to fit in card\n", + (int)inst_size); + rc = -EINVAL; + goto err_release; + } + + if (data_size > IWL_MAX_DATA_SIZE) { + IWL_DEBUG_INFO("uCode data len %d too large to fit in card\n", + (int)data_size); + rc = -EINVAL; + goto err_release; + } + if (init_size > IWL_MAX_INST_SIZE) { + IWL_DEBUG_INFO + ("uCode init instr len %d too large to fit in card\n", + (int)init_size); + rc = -EINVAL; + goto err_release; + } + if (init_data_size > IWL_MAX_DATA_SIZE) { + IWL_DEBUG_INFO + ("uCode init data len %d too large to fit in card\n", + (int)init_data_size); + rc = -EINVAL; + goto err_release; + } + if (boot_size > IWL_MAX_BSM_SIZE) { + IWL_DEBUG_INFO + ("uCode boot instr len %d too large to fit in bsm\n", + (int)boot_size); + rc = -EINVAL; + goto err_release; + } + + /* Allocate ucode buffers for card's bus-master loading ... */ + + /* Runtime instructions and 2 copies of data: + * 1) unmodified from disk + * 2) backup cache for save/restore during power-downs */ + priv->ucode_code.len = inst_size; + priv->ucode_code.v_addr = + pci_alloc_consistent(priv->pci_dev, + priv->ucode_code.len, + &(priv->ucode_code.p_addr)); + + priv->ucode_data.len = data_size; + priv->ucode_data.v_addr = + pci_alloc_consistent(priv->pci_dev, + priv->ucode_data.len, + &(priv->ucode_data.p_addr)); + + priv->ucode_data_backup.len = data_size; + priv->ucode_data_backup.v_addr = + pci_alloc_consistent(priv->pci_dev, + priv->ucode_data_backup.len, + &(priv->ucode_data_backup.p_addr)); + + + /* Initialization instructions and data */ + priv->ucode_init.len = init_size; + priv->ucode_init.v_addr = + pci_alloc_consistent(priv->pci_dev, + priv->ucode_init.len, + &(priv->ucode_init.p_addr)); + + priv->ucode_init_data.len = init_data_size; + priv->ucode_init_data.v_addr = + pci_alloc_consistent(priv->pci_dev, + priv->ucode_init_data.len, + &(priv->ucode_init_data.p_addr)); + + /* Bootstrap (instructions only, no data) */ + priv->ucode_boot.len = boot_size; + priv->ucode_boot.v_addr = + pci_alloc_consistent(priv->pci_dev, + priv->ucode_boot.len, + &(priv->ucode_boot.p_addr)); + + if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || + !priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr || + !priv->ucode_boot.v_addr || !priv->ucode_data_backup.v_addr) + goto err_pci_alloc; + + /* Copy images into buffers for card's bus-master reads ... */ + + /* Runtime instructions (first block of data in file) */ + src = &ucode->data[0]; + len = priv->ucode_code.len; + IWL_DEBUG_INFO("Copying (but not loading) uCode instr len %d\n", + (int)len); + memcpy(priv->ucode_code.v_addr, src, len); + IWL_DEBUG_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", + priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr); + + /* Runtime data (2nd block) + * NOTE: Copy into backup buffer will be done in iwl_up() */ + src = &ucode->data[inst_size]; + len = priv->ucode_data.len; + IWL_DEBUG_INFO("Copying (but not loading) uCode data len %d\n", + (int)len); + memcpy(priv->ucode_data.v_addr, src, len); + memcpy(priv->ucode_data_backup.v_addr, src, len); + + /* Initialization instructions (3rd block) */ + if (init_size) { + src = &ucode->data[inst_size + data_size]; + len = priv->ucode_init.len; + IWL_DEBUG_INFO("Copying (but not loading) init instr len %d\n", + (int)len); + memcpy(priv->ucode_init.v_addr, src, len); + } + + /* Initialization data (4th block) */ + if (init_data_size) { + src = &ucode->data[inst_size + data_size + init_size]; + len = priv->ucode_init_data.len; + IWL_DEBUG_INFO("Copying (but not loading) init data len %d\n", + (int)len); + memcpy(priv->ucode_init_data.v_addr, src, len); + } + + /* Bootstrap instructions (5th block) */ + src = &ucode->data[inst_size + data_size + init_size + init_data_size]; + len = priv->ucode_boot.len; + IWL_DEBUG_INFO("Copying (but not loading) boot instr len %d\n", + (int)len); + memcpy(priv->ucode_boot.v_addr, src, len); + + /* We have our copies now, allow OS release its copies */ + release_firmware(ucode_raw); + return 0; + + err_pci_alloc: + IWL_ERROR("failed to allocate pci memory\n"); + rc = -ENOMEM; + iwl_dealloc_ucode_pci(priv); + + err_release: + release_firmware(ucode_raw); + + error: + return rc; +} + + +/** + * iwl_set_ucode_ptrs - Set uCode address location + * + * Tell initialization uCode where to find runtime uCode. + * + * BSM registers initially contain pointers to initialization uCode. + * We need to replace them to load runtime uCode inst and data, + * and to save runtime data when powering down. + */ +static int iwl_set_ucode_ptrs(struct iwl_priv *priv) +{ + dma_addr_t pinst; + dma_addr_t pdata; + int rc = 0; + unsigned long flags; + + /* bits 31:0 for 3945 */ + pinst = priv->ucode_code.p_addr; + pdata = priv->ucode_data_backup.p_addr; + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + /* Tell bootstrap uCode where to find image to load */ + iwl_write_restricted_reg(priv, BSM_DRAM_INST_PTR_REG, pinst); + iwl_write_restricted_reg(priv, BSM_DRAM_DATA_PTR_REG, pdata); + iwl_write_restricted_reg(priv, BSM_DRAM_DATA_BYTECOUNT_REG, + priv->ucode_data.len); + + /* Inst bytecount must be last to set up, bit 31 signals uCode + * that all new ptr/size info is in place */ + iwl_write_restricted_reg(priv, BSM_DRAM_INST_BYTECOUNT_REG, + priv->ucode_code.len | BSM_DRAM_INST_LOAD); + + iwl_release_restricted_access(priv); + + spin_unlock_irqrestore(&priv->lock, flags); + + IWL_DEBUG_INFO("Runtime uCode pointers are set.\n"); + + return rc; +} + +/** + * iwl_init_alive_start - Called after REPLY_ALIVE notification receieved + * + * Called after REPLY_ALIVE notification received from "initialize" uCode. + * + * The 4965 "initialize" ALIVE reply contains calibration data for: + * Voltage, temperature, and MIMO tx gain correction, now stored in priv + * (3945 does not contain this data). + * + * Tell "initialize" uCode to go ahead and load the runtime uCode. +*/ +static void iwl_init_alive_start(struct iwl_priv *priv) +{ + /* Check alive response for "valid" sign from uCode */ + if (priv->card_alive_init.is_valid != UCODE_VALID_OK) { + /* We had an error bringing up the hardware, so take it + * all the way back down so we can try again */ + IWL_DEBUG_INFO("Initialize Alive failed.\n"); + goto restart; + } + + /* Bootstrap uCode has loaded initialize uCode ... verify inst image. + * This is a paranoid check, because we would not have gotten the + * "initialize" alive if code weren't properly loaded. */ + if (iwl_verify_ucode(priv)) { + /* Runtime instruction load was bad; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n"); + goto restart; + } + + /* Send pointers to protocol/runtime uCode image ... init code will + * load and launch runtime uCode, which will send us another "Alive" + * notification. */ + IWL_DEBUG_INFO("Initialization Alive received.\n"); + if (iwl_set_ucode_ptrs(priv)) { + /* Runtime instruction load won't happen; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n"); + goto restart; + } + return; + + restart: + queue_work(priv->workqueue, &priv->restart); +} + + +/** + * iwl_alive_start - called after REPLY_ALIVE notification received + * from protocol/runtime uCode (initialization uCode's + * Alive gets handled by iwl_init_alive_start()). + */ +static void iwl_alive_start(struct iwl_priv *priv) +{ + int rc = 0; + int thermal_spin = 0; + u32 rfkill; + + IWL_DEBUG_INFO("Runtime Alive received.\n"); + + if (priv->card_alive.is_valid != UCODE_VALID_OK) { + /* We had an error bringing up the hardware, so take it + * all the way back down so we can try again */ + IWL_DEBUG_INFO("Alive failed.\n"); + goto restart; + } + + /* Initialize uCode has loaded Runtime uCode ... verify inst image. + * This is a paranoid check, because we would not have gotten the + * "runtime" alive if code weren't properly loaded. */ + if (iwl_verify_ucode(priv)) { + /* Runtime instruction load was bad; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO("Bad runtime uCode load.\n"); + goto restart; + } + + iwl_clear_stations_table(priv); + + rc = iwl_grab_restricted_access(priv); + if (rc) { + IWL_WARNING("Can not read rfkill status from adapter\n"); + return; + } + + rfkill = iwl_read_restricted_reg(priv, APMG_RFKILL_REG); + IWL_DEBUG_INFO("RFKILL status: 0x%x\n", rfkill); + iwl_release_restricted_access(priv); + + if (rfkill & 0x1) { + clear_bit(STATUS_RF_KILL_HW, &priv->status); + /* if rfkill is not on, then wait for thermal + * sensor in adapter to kick in */ + while (iwl_hw_get_temperature(priv) == 0) { + thermal_spin++; + udelay(10); + } + + if (thermal_spin) + IWL_DEBUG_INFO("Thermal calibration took %dus\n", + thermal_spin * 10); + } else + set_bit(STATUS_RF_KILL_HW, &priv->status); + + /* After the ALIVE response, we can process host commands */ + set_bit(STATUS_ALIVE, &priv->status); + + /* Clear out the uCode error bit if it is set */ + clear_bit(STATUS_FW_ERROR, &priv->status); + + rc = iwl_init_channel_map(priv); + if (rc) { + IWL_ERROR("initializing regulatory failed: %d\n", rc); + return; + } + + iwl_init_geos(priv); + + if (iwl_is_rfkill(priv)) + return; + + if (!priv->mac80211_registered) { + /* Unlock so any user space entry points can call back into + * the driver without a deadlock... */ + mutex_unlock(&priv->mutex); + iwl_rate_control_register(priv->hw); + rc = ieee80211_register_hw(priv->hw); + priv->hw->conf.beacon_int = 100; + mutex_lock(&priv->mutex); + + if (rc) { + IWL_ERROR("Failed to register network " + "device (error %d)\n", rc); + return; + } + + priv->mac80211_registered = 1; + + iwl_reset_channel_flag(priv); + } else + ieee80211_start_queues(priv->hw); + + priv->active_rate = priv->rates_mask; + priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; + + iwl_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode)); + + if (iwl_is_associated(priv)) { + struct iwl_rxon_cmd *active_rxon = + (struct iwl_rxon_cmd *)(&priv->active_rxon); + + memcpy(&priv->staging_rxon, &priv->active_rxon, + sizeof(priv->staging_rxon)); + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + } else { + /* Initialize our rx_config data */ + iwl_connection_init_rx_config(priv); + memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); + } + + /* Configure BT coexistence */ + iwl_send_bt_config(priv); + + /* Configure the adapter for unassociated operation */ + iwl_commit_rxon(priv); + + /* At this point, the NIC is initialized and operational */ + priv->notif_missed_beacons = 0; + set_bit(STATUS_READY, &priv->status); + + iwl3945_reg_txpower_periodic(priv); + + IWL_DEBUG_INFO("ALIVE processing complete.\n"); + + if (priv->error_recovering) + iwl_error_recovery(priv); + + return; + + restart: + queue_work(priv->workqueue, &priv->restart); +} + +static void iwl_cancel_deferred_work(struct iwl_priv *priv); + +static void __iwl_down(struct iwl_priv *priv) +{ + unsigned long flags; + int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status); + struct ieee80211_conf *conf = NULL; + + IWL_DEBUG_INFO(DRV_NAME " is going down\n"); + + conf = ieee80211_get_hw_conf(priv->hw); + + if (!exit_pending) + set_bit(STATUS_EXIT_PENDING, &priv->status); + + iwl_clear_stations_table(priv); + + /* Unblock any waiting calls */ + wake_up_interruptible_all(&priv->wait_command_queue); + + iwl_cancel_deferred_work(priv); + + /* Wipe out the EXIT_PENDING status bit if we are not actually + * exiting the module */ + if (!exit_pending) + clear_bit(STATUS_EXIT_PENDING, &priv->status); + + /* stop and reset the on-board processor */ + iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + + /* tell the device to stop sending interrupts */ + iwl_disable_interrupts(priv); + + if (priv->mac80211_registered) + ieee80211_stop_queues(priv->hw); + + /* If we have not previously called iwl_init() then + * clear all bits but the RF Kill and SUSPEND bits and return */ + if (!iwl_is_init(priv)) { + priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << + STATUS_RF_KILL_HW | + test_bit(STATUS_RF_KILL_SW, &priv->status) << + STATUS_RF_KILL_SW | + test_bit(STATUS_IN_SUSPEND, &priv->status) << + STATUS_IN_SUSPEND; + goto exit; + } + + /* ...otherwise clear out all the status bits but the RF Kill and + * SUSPEND bits and continue taking the NIC down. */ + priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << + STATUS_RF_KILL_HW | + test_bit(STATUS_RF_KILL_SW, &priv->status) << + STATUS_RF_KILL_SW | + test_bit(STATUS_IN_SUSPEND, &priv->status) << + STATUS_IN_SUSPEND | + test_bit(STATUS_FW_ERROR, &priv->status) << + STATUS_FW_ERROR; + + spin_lock_irqsave(&priv->lock, flags); + iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + spin_unlock_irqrestore(&priv->lock, flags); + + iwl_hw_txq_ctx_stop(priv); + iwl_hw_rxq_stop(priv); + + spin_lock_irqsave(&priv->lock, flags); + if (!iwl_grab_restricted_access(priv)) { + iwl_write_restricted_reg(priv, APMG_CLK_DIS_REG, + APMG_CLK_VAL_DMA_CLK_RQT); + iwl_release_restricted_access(priv); + } + spin_unlock_irqrestore(&priv->lock, flags); + + udelay(5); + + iwl_hw_nic_stop_master(priv); + iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + iwl_hw_nic_reset(priv); + + exit: + memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); + + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + priv->ibss_beacon = NULL; + + /* clear out any free frames */ + iwl_clear_free_frames(priv); +} + +static void iwl_down(struct iwl_priv *priv) +{ + mutex_lock(&priv->mutex); + __iwl_down(priv); + mutex_unlock(&priv->mutex); +} + +#define MAX_HW_RESTARTS 5 + +static int __iwl_up(struct iwl_priv *priv) +{ + DECLARE_MAC_BUF(mac); + int rc, i; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_WARNING("Exit pending; will not bring the NIC up\n"); + return -EIO; + } + + if (test_bit(STATUS_RF_KILL_SW, &priv->status)) { + IWL_WARNING("Radio disabled by SW RF kill (module " + "parameter)\n"); + return 0; + } + + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + + rc = iwl_hw_nic_init(priv); + if (rc) { + IWL_ERROR("Unable to int nic\n"); + return rc; + } + + /* make sure rfkill handshake bits are cleared */ + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + /* clear (again), then enable host interrupts */ + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + iwl_enable_interrupts(priv); + + /* really make sure rfkill handshake bits are cleared */ + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + + /* Copy original ucode data image from disk into backup cache. + * This will be used to initialize the on-board processor's + * data SRAM for a clean start when the runtime program first loads. */ + memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr, + priv->ucode_data.len); + + for (i = 0; i < MAX_HW_RESTARTS; i++) { + + iwl_clear_stations_table(priv); + + /* load bootstrap state machine, + * load bootstrap program into processor's memory, + * prepare to load the "initialize" uCode */ + rc = iwl_load_bsm(priv); + + if (rc) { + IWL_ERROR("Unable to set up bootstrap uCode: %d\n", rc); + continue; + } + + /* start card; "initialize" will load runtime ucode */ + iwl_nic_start(priv); + + /* MAC Address location in EEPROM same for 3945/4965 */ + get_eeprom_mac(priv, priv->mac_addr); + IWL_DEBUG_INFO("MAC address: %s\n", + print_mac(mac, priv->mac_addr)); + + SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr); + + IWL_DEBUG_INFO(DRV_NAME " is coming up\n"); + + return 0; + } + + set_bit(STATUS_EXIT_PENDING, &priv->status); + __iwl_down(priv); + + /* tried to restart and config the device for as long as our + * patience could withstand */ + IWL_ERROR("Unable to initialize device after %d attempts.\n", i); + return -EIO; +} + + +/***************************************************************************** + * + * Workqueue callbacks + * + *****************************************************************************/ + +static void iwl_bg_init_alive_start(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, init_alive_start.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl_init_alive_start(priv); + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_alive_start(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, alive_start.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl_alive_start(priv); + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_rf_kill(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, rf_kill); + + wake_up_interruptible(&priv->wait_command_queue); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + + if (!iwl_is_rfkill(priv)) { + IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL, + "HW and/or SW RF Kill no longer active, restarting " + "device\n"); + if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) + queue_work(priv->workqueue, &priv->restart); + } else { + + if (!test_bit(STATUS_RF_KILL_HW, &priv->status)) + IWL_DEBUG_RF_KILL("Can not turn radio back on - " + "disabled by SW switch\n"); + else + IWL_WARNING("Radio Frequency Kill Switch is On:\n" + "Kill switch must be turned off for " + "wireless networking to work.\n"); + } + mutex_unlock(&priv->mutex); +} + +#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ) + +static void iwl_bg_scan_check(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, scan_check.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + if (test_bit(STATUS_SCANNING, &priv->status) || + test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, + "Scan completion watchdog resetting adapter (%dms)\n", + jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG)); + if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) + queue_work(priv->workqueue, &priv->restart); + } + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_request_scan(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, request_scan); + struct iwl_host_cmd cmd = { + .id = REPLY_SCAN_CMD, + .len = sizeof(struct iwl_scan_cmd), + .meta.flags = CMD_SIZE_HUGE, + }; + int rc = 0; + struct iwl_scan_cmd *scan; + struct ieee80211_conf *conf = NULL; + u8 direct_mask; + int phymode; + + conf = ieee80211_get_hw_conf(priv->hw); + + mutex_lock(&priv->mutex); + + if (!iwl_is_ready(priv)) { + IWL_WARNING("request scan called when driver not ready.\n"); + goto done; + } + + /* Make sure the scan wasn't cancelled before this queued work + * was given the chance to run... */ + if (!test_bit(STATUS_SCANNING, &priv->status)) + goto done; + + /* This should never be called or scheduled if there is currently + * a scan active in the hardware. */ + if (test_bit(STATUS_SCAN_HW, &priv->status)) { + IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. " + "Ignoring second request.\n"); + rc = -EIO; + goto done; + } + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_DEBUG_SCAN("Aborting scan due to device shutdown\n"); + goto done; + } + + if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_HC("Scan request while abort pending. Queuing.\n"); + goto done; + } + + if (iwl_is_rfkill(priv)) { + IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n"); + goto done; + } + + if (!test_bit(STATUS_READY, &priv->status)) { + IWL_DEBUG_HC("Scan request while uninitialized. Queuing.\n"); + goto done; + } + + if (!priv->scan_bands) { + IWL_DEBUG_HC("Aborting scan due to no requested bands\n"); + goto done; + } + + if (!priv->scan) { + priv->scan = kmalloc(sizeof(struct iwl_scan_cmd) + + IWL_MAX_SCAN_SIZE, GFP_KERNEL); + if (!priv->scan) { + rc = -ENOMEM; + goto done; + } + } + scan = priv->scan; + memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE); + + scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; + scan->quiet_time = IWL_ACTIVE_QUIET_TIME; + + if (iwl_is_associated(priv)) { + u16 interval = 0; + u32 extra; + u32 suspend_time = 100; + u32 scan_suspend_time = 100; + unsigned long flags; + + IWL_DEBUG_INFO("Scanning while associated...\n"); + + spin_lock_irqsave(&priv->lock, flags); + interval = priv->beacon_int; + spin_unlock_irqrestore(&priv->lock, flags); + + scan->suspend_time = 0; + scan->max_out_time = cpu_to_le32(600 * 1024); + if (!interval) + interval = suspend_time; + /* + * suspend time format: + * 0-19: beacon interval in usec (time before exec.) + * 20-23: 0 + * 24-31: number of beacons (suspend between channels) + */ + + extra = (suspend_time / interval) << 24; + scan_suspend_time = 0xFF0FFFFF & + (extra | ((suspend_time % interval) * 1024)); + + scan->suspend_time = cpu_to_le32(scan_suspend_time); + IWL_DEBUG_SCAN("suspend_time 0x%X beacon interval %d\n", + scan_suspend_time, interval); + } + + /* We should add the ability for user to lock to PASSIVE ONLY */ + if (priv->one_direct_scan) { + IWL_DEBUG_SCAN + ("Kicking off one direct scan for '%s'\n", + iwl_escape_essid(priv->direct_ssid, + priv->direct_ssid_len)); + scan->direct_scan[0].id = WLAN_EID_SSID; + scan->direct_scan[0].len = priv->direct_ssid_len; + memcpy(scan->direct_scan[0].ssid, + priv->direct_ssid, priv->direct_ssid_len); + direct_mask = 1; + } else if (!iwl_is_associated(priv)) { + scan->direct_scan[0].id = WLAN_EID_SSID; + scan->direct_scan[0].len = priv->essid_len; + memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len); + direct_mask = 1; + } else + direct_mask = 0; + + /* We don't build a direct scan probe request; the uCode will do + * that based on the direct_mask added to each channel entry */ + scan->tx_cmd.len = cpu_to_le16( + iwl_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data, + IWL_MAX_SCAN_SIZE - sizeof(scan), 0)); + scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; + scan->tx_cmd.sta_id = priv->hw_setting.bcast_sta_id; + scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + + /* flags + rate selection */ + + switch (priv->scan_bands) { + case 2: + scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; + scan->tx_cmd.rate = IWL_RATE_1M_PLCP; + scan->good_CRC_th = 0; + phymode = MODE_IEEE80211G; + break; + + case 1: + scan->tx_cmd.rate = IWL_RATE_6M_PLCP; + scan->good_CRC_th = IWL_GOOD_CRC_TH; + phymode = MODE_IEEE80211A; + break; + + default: + IWL_WARNING("Invalid scan band count\n"); + goto done; + } + + /* select Rx antennas */ + scan->flags |= iwl3945_get_antenna_flags(priv); + + if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) + scan->filter_flags = RXON_FILTER_PROMISC_MSK; + + if (direct_mask) + IWL_DEBUG_SCAN + ("Initiating direct scan for %s.\n", + iwl_escape_essid(priv->essid, priv->essid_len)); + else + IWL_DEBUG_SCAN("Initiating indirect scan.\n"); + + scan->channel_count = + iwl_get_channels_for_scan( + priv, phymode, 1, /* active */ + direct_mask, + (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); + + cmd.len += le16_to_cpu(scan->tx_cmd.len) + + scan->channel_count * sizeof(struct iwl_scan_channel); + cmd.data = scan; + scan->len = cpu_to_le16(cmd.len); + + set_bit(STATUS_SCAN_HW, &priv->status); + rc = iwl_send_cmd_sync(priv, &cmd); + if (rc) + goto done; + + queue_delayed_work(priv->workqueue, &priv->scan_check, + IWL_SCAN_CHECK_WATCHDOG); + + mutex_unlock(&priv->mutex); + return; + + done: + /* inform mac80211 sacn aborted */ + queue_work(priv->workqueue, &priv->scan_completed); + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_up(struct work_struct *data) +{ + struct iwl_priv *priv = container_of(data, struct iwl_priv, up); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + __iwl_up(priv); + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_restart(struct work_struct *data) +{ + struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + iwl_down(priv); + queue_work(priv->workqueue, &priv->up); +} + +static void iwl_bg_rx_replenish(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, rx_replenish); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl_rx_replenish(priv); + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_post_associate(struct work_struct *data) +{ + struct iwl_priv *priv = container_of(data, struct iwl_priv, + post_associate.work); + + int rc = 0; + struct ieee80211_conf *conf = NULL; + DECLARE_MAC_BUF(mac); + + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { + IWL_ERROR("%s Should not be called in AP mode\n", __FUNCTION__); + return; + } + + + IWL_DEBUG_ASSOC("Associated as %d to: %s\n", + priv->assoc_id, + print_mac(mac, priv->active_rxon.bssid_addr)); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + + conf = ieee80211_get_hw_conf(priv->hw); + + priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl_commit_rxon(priv); + + memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd)); + iwl_setup_rxon_timing(priv); + rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, + sizeof(priv->rxon_timing), &priv->rxon_timing); + if (rc) + IWL_WARNING("REPLY_RXON_TIMING failed - " + "Attempting to continue.\n"); + + priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; + + priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); + + IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n", + priv->assoc_id, priv->beacon_int); + + if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) + priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; + else + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + + if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { + if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) + priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; + else + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + } + + iwl_commit_rxon(priv); + + switch (priv->iw_mode) { + case IEEE80211_IF_TYPE_STA: + iwl_rate_scale_init(priv->hw, IWL_AP_ID); + break; + + case IEEE80211_IF_TYPE_IBSS: + + /* clear out the station table */ + iwl_clear_stations_table(priv); + + iwl_add_station(priv, BROADCAST_ADDR, 0, 0); + iwl_add_station(priv, priv->bssid, 0, 0); + iwl3945_sync_sta(priv, IWL_STA_ID, + (priv->phymode == MODE_IEEE80211A)? + IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP, + CMD_ASYNC); + iwl_rate_scale_init(priv->hw, IWL_STA_ID); + iwl_send_beacon_cmd(priv); + + break; + + default: + IWL_ERROR("%s Should not be called in %d mode\n", + __FUNCTION__, priv->iw_mode); + break; + } + + iwl_sequence_reset(priv); + +#ifdef CONFIG_IWLWIFI_QOS + iwl_activate_qos(priv, 0); +#endif /* CONFIG_IWLWIFI_QOS */ + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_abort_scan(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + abort_scan); + + if (!iwl_is_ready(priv)) + return; + + mutex_lock(&priv->mutex); + + set_bit(STATUS_SCAN_ABORTING, &priv->status); + iwl_send_scan_abort(priv); + + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_scan_completed(struct work_struct *work) +{ + struct iwl_priv *priv = + container_of(work, struct iwl_priv, scan_completed); + + IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, "SCAN complete scan\n"); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + ieee80211_scan_completed(priv->hw); + + /* Since setting the TXPOWER may have been deferred while + * performing the scan, fire one off */ + mutex_lock(&priv->mutex); + iwl_hw_reg_send_txpower(priv); + mutex_unlock(&priv->mutex); +} + +/***************************************************************************** + * + * mac80211 entry point functions + * + *****************************************************************************/ + +static int iwl_mac_start(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211("enter\n"); + + /* we should be verifying the device is ready to be opened */ + mutex_lock(&priv->mutex); + + priv->is_open = 1; + + if (!iwl_is_rfkill(priv)) + ieee80211_start_queues(priv->hw); + + mutex_unlock(&priv->mutex); + IWL_DEBUG_MAC80211("leave\n"); + return 0; +} + +static void iwl_mac_stop(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211("enter\n"); + priv->is_open = 0; + /*netif_stop_queue(dev); */ + flush_workqueue(priv->workqueue); + IWL_DEBUG_MAC80211("leave\n"); +} + +static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ieee80211_tx_control *ctl) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211("enter\n"); + + if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) { + IWL_DEBUG_MAC80211("leave - monitor\n"); + return -1; + } + + IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, + ctl->tx_rate); + + if (iwl_tx_skb(priv, skb, ctl)) + dev_kfree_skb_any(skb); + + IWL_DEBUG_MAC80211("leave\n"); + return 0; +} + +static int iwl_mac_add_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf) +{ + struct iwl_priv *priv = hw->priv; + unsigned long flags; + DECLARE_MAC_BUF(mac); + + IWL_DEBUG_MAC80211("enter: id %d, type %d\n", conf->if_id, conf->type); + if (conf->mac_addr) + IWL_DEBUG_MAC80211("enter: MAC %s\n", + print_mac(mac, conf->mac_addr)); + + if (priv->interface_id) { + IWL_DEBUG_MAC80211("leave - interface_id != 0\n"); + return 0; + } + + spin_lock_irqsave(&priv->lock, flags); + priv->interface_id = conf->if_id; + + spin_unlock_irqrestore(&priv->lock, flags); + + mutex_lock(&priv->mutex); + iwl_set_mode(priv, conf->type); + + IWL_DEBUG_MAC80211("leave\n"); + mutex_unlock(&priv->mutex); + + return 0; +} + +/** + * iwl_mac_config - mac80211 config callback + * + * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to + * be set inappropriately and the driver currently sets the hardware up to + * use it whenever needed. + */ +static int iwl_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf) +{ + struct iwl_priv *priv = hw->priv; + const struct iwl_channel_info *ch_info; + unsigned long flags; + + mutex_lock(&priv->mutex); + IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel); + + if (!iwl_is_ready(priv)) { + IWL_DEBUG_MAC80211("leave - not ready\n"); + mutex_unlock(&priv->mutex); + return -EIO; + } + + /* TODO: Figure out how to get ieee80211_local->sta_scanning w/ only + * what is exposed through include/ declrations */ + if (unlikely(!iwl_param_disable_hw_scan && + test_bit(STATUS_SCANNING, &priv->status))) { + IWL_DEBUG_MAC80211("leave - scanning\n"); + mutex_unlock(&priv->mutex); + return 0; + } + + spin_lock_irqsave(&priv->lock, flags); + + ch_info = iwl_get_channel_info(priv, conf->phymode, conf->channel); + if (!is_channel_valid(ch_info)) { + IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this SKU.\n", + conf->channel, conf->phymode); + IWL_DEBUG_MAC80211("leave - invalid channel\n"); + spin_unlock_irqrestore(&priv->lock, flags); + mutex_unlock(&priv->mutex); + return -EINVAL; + } + + iwl_set_rxon_channel(priv, conf->phymode, conf->channel); + + iwl_set_flags_for_phymode(priv, conf->phymode); + + /* The list of supported rates and rate mask can be different + * for each phymode; since the phymode may have changed, reset + * the rate mask to what mac80211 lists */ + iwl_set_rate(priv); + + spin_unlock_irqrestore(&priv->lock, flags); + +#ifdef IEEE80211_CONF_CHANNEL_SWITCH + if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) { + iwl_hw_channel_switch(priv, conf->channel); + mutex_unlock(&priv->mutex); + return 0; + } +#endif + + iwl_radio_kill_sw(priv, !conf->radio_enabled); + + if (!conf->radio_enabled) { + IWL_DEBUG_MAC80211("leave - radio disabled\n"); + mutex_unlock(&priv->mutex); + return 0; + } + + if (iwl_is_rfkill(priv)) { + IWL_DEBUG_MAC80211("leave - RF kill\n"); + mutex_unlock(&priv->mutex); + return -EIO; + } + + iwl_set_rate(priv); + + if (memcmp(&priv->active_rxon, + &priv->staging_rxon, sizeof(priv->staging_rxon))) + iwl_commit_rxon(priv); + else + IWL_DEBUG_INFO("No re-sending same RXON configuration.\n"); + + IWL_DEBUG_MAC80211("leave\n"); + + mutex_unlock(&priv->mutex); + + return 0; +} + +static void iwl_config_ap(struct iwl_priv *priv) +{ + int rc = 0; + + if (priv->status & STATUS_EXIT_PENDING) + return; + + /* The following should be done only at AP bring up */ + if ((priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) == 0) { + + /* RXON - unassoc (to set timing command) */ + priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl_commit_rxon(priv); + + /* RXON Timing */ + memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd)); + iwl_setup_rxon_timing(priv); + rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, + sizeof(priv->rxon_timing), &priv->rxon_timing); + if (rc) + IWL_WARNING("REPLY_RXON_TIMING failed - " + "Attempting to continue.\n"); + + /* FIXME: what should be the assoc_id for AP? */ + priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); + if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) + priv->staging_rxon.flags |= + RXON_FLG_SHORT_PREAMBLE_MSK; + else + priv->staging_rxon.flags &= + ~RXON_FLG_SHORT_PREAMBLE_MSK; + + if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { + if (priv->assoc_capability & + WLAN_CAPABILITY_SHORT_SLOT_TIME) + priv->staging_rxon.flags |= + RXON_FLG_SHORT_SLOT_MSK; + else + priv->staging_rxon.flags &= + ~RXON_FLG_SHORT_SLOT_MSK; + + if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) + priv->staging_rxon.flags &= + ~RXON_FLG_SHORT_SLOT_MSK; + } + /* restore RXON assoc */ + priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; + iwl_commit_rxon(priv); + iwl_add_station(priv, BROADCAST_ADDR, 0, 0); + } + iwl_send_beacon_cmd(priv); + + /* FIXME - we need to add code here to detect a totally new + * configuration, reset the AP, unassoc, rxon timing, assoc, + * clear sta table, add BCAST sta... */ +} + +static int iwl_mac_config_interface(struct ieee80211_hw *hw, int if_id, + struct ieee80211_if_conf *conf) +{ + struct iwl_priv *priv = hw->priv; + DECLARE_MAC_BUF(mac); + unsigned long flags; + int rc; + + if (conf == NULL) + return -EIO; + + /* XXX: this MUST use conf->mac_addr */ + + if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) && + (!conf->beacon || !conf->ssid_len)) { + IWL_DEBUG_MAC80211 + ("Leaving in AP mode because HostAPD is not ready.\n"); + return 0; + } + + mutex_lock(&priv->mutex); + + IWL_DEBUG_MAC80211("enter: interface id %d\n", if_id); + if (conf->bssid) + IWL_DEBUG_MAC80211("bssid: %s\n", + print_mac(mac, conf->bssid)); + +/* + * very dubious code was here; the probe filtering flag is never set: + * + if (unlikely(test_bit(STATUS_SCANNING, &priv->status)) && + !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) { + */ + if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) { + IWL_DEBUG_MAC80211("leave - scanning\n"); + mutex_unlock(&priv->mutex); + return 0; + } + + if (priv->interface_id != if_id) { + IWL_DEBUG_MAC80211("leave - interface_id != if_id\n"); + mutex_unlock(&priv->mutex); + return 0; + } + + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { + if (!conf->bssid) { + conf->bssid = priv->mac_addr; + memcpy(priv->bssid, priv->mac_addr, ETH_ALEN); + IWL_DEBUG_MAC80211("bssid was set to: %s\n", + print_mac(mac, conf->bssid)); + } + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + priv->ibss_beacon = conf->beacon; + } + + if (conf->bssid && !is_zero_ether_addr(conf->bssid) && + !is_multicast_ether_addr(conf->bssid)) { + /* If there is currently a HW scan going on in the background + * then we need to cancel it else the RXON below will fail. */ + if (iwl_scan_cancel_timeout(priv, 100)) { + IWL_WARNING("Aborted scan still in progress " + "after 100ms\n"); + IWL_DEBUG_MAC80211("leaving - scan abort failed.\n"); + mutex_unlock(&priv->mutex); + return -EAGAIN; + } + memcpy(priv->staging_rxon.bssid_addr, conf->bssid, ETH_ALEN); + + /* TODO: Audit driver for usage of these members and see + * if mac80211 deprecates them (priv->bssid looks like it + * shouldn't be there, but I haven't scanned the IBSS code + * to verify) - jpk */ + memcpy(priv->bssid, conf->bssid, ETH_ALEN); + + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) + iwl_config_ap(priv); + else { + priv->staging_rxon.filter_flags |= + RXON_FILTER_ASSOC_MSK; + rc = iwl_commit_rxon(priv); + if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc) + iwl_add_station(priv, + priv->active_rxon.bssid_addr, 1, 0); + } + + } else { + priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl_commit_rxon(priv); + } + + spin_lock_irqsave(&priv->lock, flags); + if (!conf->ssid_len) + memset(priv->essid, 0, IW_ESSID_MAX_SIZE); + else + memcpy(priv->essid, conf->ssid, conf->ssid_len); + + priv->essid_len = conf->ssid_len; + spin_unlock_irqrestore(&priv->lock, flags); + + IWL_DEBUG_MAC80211("leave\n"); + mutex_unlock(&priv->mutex); + + return 0; +} + +static void iwl_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + int mc_count, struct dev_addr_list *mc_list) +{ + /* + * XXX: dummy + * see also iwl_connection_init_rx_config + */ + *total_flags = 0; +} + +static void iwl_mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211("enter\n"); + + mutex_lock(&priv->mutex); + if (priv->interface_id == conf->if_id) { + priv->interface_id = 0; + memset(priv->bssid, 0, ETH_ALEN); + memset(priv->essid, 0, IW_ESSID_MAX_SIZE); + priv->essid_len = 0; + } + mutex_unlock(&priv->mutex); + + IWL_DEBUG_MAC80211("leave\n"); + +} + +#define IWL_DELAY_NEXT_SCAN (HZ*2) +static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) +{ + int rc = 0; + unsigned long flags; + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211("enter\n"); + + spin_lock_irqsave(&priv->lock, flags); + + if (!iwl_is_ready_rf(priv)) { + rc = -EIO; + IWL_DEBUG_MAC80211("leave - not ready or exit pending\n"); + goto out_unlock; + } + + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { /* APs don't scan */ + rc = -EIO; + IWL_ERROR("ERROR: APs don't scan\n"); + goto out_unlock; + } + + /* if we just finished scan ask for delay */ + if (priv->last_scan_jiffies && + time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN, + jiffies)) { + rc = -EAGAIN; + goto out_unlock; + } + if (len) { + IWL_DEBUG_SCAN("direct scan for " + "%s [%d]\n ", + iwl_escape_essid(ssid, len), (int)len); + + priv->one_direct_scan = 1; + priv->direct_ssid_len = (u8) + min((u8) len, (u8) IW_ESSID_MAX_SIZE); + memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len); + } + + rc = iwl_scan_initiate(priv); + + IWL_DEBUG_MAC80211("leave\n"); + +out_unlock: + spin_unlock_irqrestore(&priv->lock, flags); + + return rc; +} + +static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + const u8 *local_addr, const u8 *addr, + struct ieee80211_key_conf *key) +{ + struct iwl_priv *priv = hw->priv; + int rc = 0; + u8 sta_id; + + IWL_DEBUG_MAC80211("enter\n"); + + if (!iwl_param_hwcrypto) { + IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n"); + return -EOPNOTSUPP; + } + + if (is_zero_ether_addr(addr)) + /* only support pairwise keys */ + return -EOPNOTSUPP; + + sta_id = iwl_hw_find_station(priv, addr); + if (sta_id == IWL_INVALID_STATION) { + DECLARE_MAC_BUF(mac); + + IWL_DEBUG_MAC80211("leave - %s not in station map.\n", + print_mac(mac, addr)); + return -EINVAL; + } + + mutex_lock(&priv->mutex); + + switch (cmd) { + case SET_KEY: + rc = iwl_update_sta_key_info(priv, key, sta_id); + if (!rc) { + iwl_set_rxon_hwcrypto(priv, 1); + iwl_commit_rxon(priv); + key->hw_key_idx = sta_id; + IWL_DEBUG_MAC80211("set_key success, using hwcrypto\n"); + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + } + break; + case DISABLE_KEY: + rc = iwl_clear_sta_key_info(priv, sta_id); + if (!rc) { + iwl_set_rxon_hwcrypto(priv, 0); + iwl_commit_rxon(priv); + IWL_DEBUG_MAC80211("disable hwcrypto key\n"); + } + break; + default: + rc = -EINVAL; + } + + IWL_DEBUG_MAC80211("leave\n"); + mutex_unlock(&priv->mutex); + + return rc; +} + +static int iwl_mac_conf_tx(struct ieee80211_hw *hw, int queue, + const struct ieee80211_tx_queue_params *params) +{ + struct iwl_priv *priv = hw->priv; +#ifdef CONFIG_IWLWIFI_QOS + unsigned long flags; + int q; +#endif /* CONFIG_IWL_QOS */ + + IWL_DEBUG_MAC80211("enter\n"); + + if (!iwl_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211("leave - RF not ready\n"); + return -EIO; + } + + if (queue >= AC_NUM) { + IWL_DEBUG_MAC80211("leave - queue >= AC_NUM %d\n", queue); + return 0; + } + +#ifdef CONFIG_IWLWIFI_QOS + if (!priv->qos_data.qos_enable) { + priv->qos_data.qos_active = 0; + IWL_DEBUG_MAC80211("leave - qos not enabled\n"); + return 0; + } + q = AC_NUM - 1 - queue; + + spin_lock_irqsave(&priv->lock, flags); + + priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min); + priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max); + priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; + priv->qos_data.def_qos_parm.ac[q].edca_txop = + cpu_to_le16((params->burst_time * 100)); + + priv->qos_data.def_qos_parm.ac[q].reserved1 = 0; + priv->qos_data.qos_active = 1; + + spin_unlock_irqrestore(&priv->lock, flags); + + mutex_lock(&priv->mutex); + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) + iwl_activate_qos(priv, 1); + else if (priv->assoc_id && iwl_is_associated(priv)) + iwl_activate_qos(priv, 0); + + mutex_unlock(&priv->mutex); + +#endif /*CONFIG_IWLWIFI_QOS */ + + IWL_DEBUG_MAC80211("leave\n"); + return 0; +} + +static int iwl_mac_get_tx_stats(struct ieee80211_hw *hw, + struct ieee80211_tx_queue_stats *stats) +{ + struct iwl_priv *priv = hw->priv; + int i, avail; + struct iwl_tx_queue *txq; + struct iwl_queue *q; + unsigned long flags; + + IWL_DEBUG_MAC80211("enter\n"); + + if (!iwl_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211("leave - RF not ready\n"); + return -EIO; + } + + spin_lock_irqsave(&priv->lock, flags); + + for (i = 0; i < AC_NUM; i++) { + txq = &priv->txq[i]; + q = &txq->q; + avail = iwl_queue_space(q); + + stats->data[i].len = q->n_window - avail; + stats->data[i].limit = q->n_window - q->high_mark; + stats->data[i].count = q->n_window; + + } + spin_unlock_irqrestore(&priv->lock, flags); + + IWL_DEBUG_MAC80211("leave\n"); + + return 0; +} + +static int iwl_mac_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + IWL_DEBUG_MAC80211("enter\n"); + IWL_DEBUG_MAC80211("leave\n"); + + return 0; +} + +static u64 iwl_mac_get_tsf(struct ieee80211_hw *hw) +{ + IWL_DEBUG_MAC80211("enter\n"); + IWL_DEBUG_MAC80211("leave\n"); + + return 0; +} + +static void iwl_mac_reset_tsf(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + unsigned long flags; + + mutex_lock(&priv->mutex); + IWL_DEBUG_MAC80211("enter\n"); + +#ifdef CONFIG_IWLWIFI_QOS + iwl_reset_qos(priv); +#endif + cancel_delayed_work(&priv->post_associate); + + spin_lock_irqsave(&priv->lock, flags); + priv->assoc_id = 0; + priv->assoc_capability = 0; + priv->call_post_assoc_from_beacon = 0; + + /* new association get rid of ibss beacon skb */ + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + priv->ibss_beacon = NULL; + + priv->beacon_int = priv->hw->conf.beacon_int; + priv->timestamp1 = 0; + priv->timestamp0 = 0; + if ((priv->iw_mode == IEEE80211_IF_TYPE_STA)) + priv->beacon_int = 0; + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Per mac80211.h: This is only used in IBSS mode... */ + if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { + IWL_DEBUG_MAC80211("leave - not in IBSS\n"); + mutex_unlock(&priv->mutex); + return; + } + + if (!iwl_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211("leave - not ready\n"); + mutex_unlock(&priv->mutex); + return; + } + + priv->only_active_channel = 0; + + iwl_set_rate(priv); + + mutex_unlock(&priv->mutex); + + IWL_DEBUG_MAC80211("leave\n"); + +} + +static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ieee80211_tx_control *control) +{ + struct iwl_priv *priv = hw->priv; + unsigned long flags; + + mutex_lock(&priv->mutex); + IWL_DEBUG_MAC80211("enter\n"); + + if (!iwl_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211("leave - RF not ready\n"); + mutex_unlock(&priv->mutex); + return -EIO; + } + + if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { + IWL_DEBUG_MAC80211("leave - not IBSS\n"); + mutex_unlock(&priv->mutex); + return -EIO; + } + + spin_lock_irqsave(&priv->lock, flags); + + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + priv->ibss_beacon = skb; + + priv->assoc_id = 0; + + IWL_DEBUG_MAC80211("leave\n"); + spin_unlock_irqrestore(&priv->lock, flags); + +#ifdef CONFIG_IWLWIFI_QOS + iwl_reset_qos(priv); +#endif + + queue_work(priv->workqueue, &priv->post_associate.work); + + mutex_unlock(&priv->mutex); + + return 0; +} + +/***************************************************************************** + * + * sysfs attributes + * + *****************************************************************************/ + +#ifdef CONFIG_IWLWIFI_DEBUG + +/* + * The following adds a new attribute to the sysfs representation + * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/) + * used for controlling the debug level. + * + * See the level definitions in iwl for details. + */ + +static ssize_t show_debug_level(struct device_driver *d, char *buf) +{ + return sprintf(buf, "0x%08X\n", iwl_debug_level); +} +static ssize_t store_debug_level(struct device_driver *d, + const char *buf, size_t count) +{ + char *p = (char *)buf; + u32 val; + + val = simple_strtoul(p, &p, 0); + if (p == buf) + printk(KERN_INFO DRV_NAME + ": %s is not in hex or decimal form.\n", buf); + else + iwl_debug_level = val; + + return strnlen(buf, count); +} + +static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO, + show_debug_level, store_debug_level); + +#endif /* CONFIG_IWLWIFI_DEBUG */ + +static ssize_t show_rf_kill(struct device *d, + struct device_attribute *attr, char *buf) +{ + /* + * 0 - RF kill not enabled + * 1 - SW based RF kill active (sysfs) + * 2 - HW based RF kill active + * 3 - Both HW and SW based RF kill active + */ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + int val = (test_bit(STATUS_RF_KILL_SW, &priv->status) ? 0x1 : 0x0) | + (test_bit(STATUS_RF_KILL_HW, &priv->status) ? 0x2 : 0x0); + + return sprintf(buf, "%i\n", val); +} + +static ssize_t store_rf_kill(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + + mutex_lock(&priv->mutex); + iwl_radio_kill_sw(priv, buf[0] == '1'); + mutex_unlock(&priv->mutex); + + return count; +} + +static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill); + +static ssize_t show_temperature(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + return sprintf(buf, "%d\n", iwl_hw_get_temperature(priv)); +} + +static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL); + +static ssize_t show_rs_window(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct iwl_priv *priv = d->driver_data; + return iwl_fill_rs_info(priv->hw, buf, IWL_AP_ID); +} +static DEVICE_ATTR(rs_window, S_IRUGO, show_rs_window, NULL); + +static ssize_t show_tx_power(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + return sprintf(buf, "%d\n", priv->user_txpower_limit); +} + +static ssize_t store_tx_power(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + char *p = (char *)buf; + u32 val; + + val = simple_strtoul(p, &p, 10); + if (p == buf) + printk(KERN_INFO DRV_NAME + ": %s is not in decimal form.\n", buf); + else + iwl_hw_reg_set_txpower(priv, val); + + return count; +} + +static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power); + +static ssize_t show_flags(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + + return sprintf(buf, "0x%04X\n", priv->active_rxon.flags); +} + +static ssize_t store_flags(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + u32 flags = simple_strtoul(buf, NULL, 0); + + mutex_lock(&priv->mutex); + if (le32_to_cpu(priv->staging_rxon.flags) != flags) { + /* Cancel any currently running scans... */ + if (iwl_scan_cancel_timeout(priv, 100)) + IWL_WARNING("Could not cancel scan.\n"); + else { + IWL_DEBUG_INFO("Committing rxon.flags = 0x%04X\n", + flags); + priv->staging_rxon.flags = cpu_to_le32(flags); + iwl_commit_rxon(priv); + } + } + mutex_unlock(&priv->mutex); + + return count; +} + +static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags); + +static ssize_t show_filter_flags(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + + return sprintf(buf, "0x%04X\n", + le32_to_cpu(priv->active_rxon.filter_flags)); +} + +static ssize_t store_filter_flags(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + u32 filter_flags = simple_strtoul(buf, NULL, 0); + + mutex_lock(&priv->mutex); + if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) { + /* Cancel any currently running scans... */ + if (iwl_scan_cancel_timeout(priv, 100)) + IWL_WARNING("Could not cancel scan.\n"); + else { + IWL_DEBUG_INFO("Committing rxon.filter_flags = " + "0x%04X\n", filter_flags); + priv->staging_rxon.filter_flags = + cpu_to_le32(filter_flags); + iwl_commit_rxon(priv); + } + } + mutex_unlock(&priv->mutex); + + return count; +} + +static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags, + store_filter_flags); + +static ssize_t show_tune(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + + return sprintf(buf, "0x%04X\n", + (priv->phymode << 8) | + le16_to_cpu(priv->active_rxon.channel)); +} + +static void iwl_set_flags_for_phymode(struct iwl_priv *priv, u8 phymode); + +static ssize_t store_tune(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + char *p = (char *)buf; + u16 tune = simple_strtoul(p, &p, 0); + u8 phymode = (tune >> 8) & 0xff; + u16 channel = tune & 0xff; + + IWL_DEBUG_INFO("Tune request to:%d channel:%d\n", phymode, channel); + + mutex_lock(&priv->mutex); + if ((le16_to_cpu(priv->staging_rxon.channel) != channel) || + (priv->phymode != phymode)) { + const struct iwl_channel_info *ch_info; + + ch_info = iwl_get_channel_info(priv, phymode, channel); + if (!ch_info) { + IWL_WARNING("Requested invalid phymode/channel " + "combination: %d %d\n", phymode, channel); + mutex_unlock(&priv->mutex); + return -EINVAL; + } + + /* Cancel any currently running scans... */ + if (iwl_scan_cancel_timeout(priv, 100)) + IWL_WARNING("Could not cancel scan.\n"); + else { + IWL_DEBUG_INFO("Committing phymode and " + "rxon.channel = %d %d\n", + phymode, channel); + + iwl_set_rxon_channel(priv, phymode, channel); + iwl_set_flags_for_phymode(priv, phymode); + + iwl_set_rate(priv); + iwl_commit_rxon(priv); + } + } + mutex_unlock(&priv->mutex); + + return count; +} + +static DEVICE_ATTR(tune, S_IWUSR | S_IRUGO, show_tune, store_tune); + +#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT + +static ssize_t show_measurement(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + struct iwl_spectrum_notification measure_report; + u32 size = sizeof(measure_report), len = 0, ofs = 0; + u8 *data = (u8 *) & measure_report; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + if (!(priv->measurement_status & MEASUREMENT_READY)) { + spin_unlock_irqrestore(&priv->lock, flags); + return 0; + } + memcpy(&measure_report, &priv->measure_report, size); + priv->measurement_status = 0; + spin_unlock_irqrestore(&priv->lock, flags); + + while (size && (PAGE_SIZE - len)) { + hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, + PAGE_SIZE - len, 1); + len = strlen(buf); + if (PAGE_SIZE - len) + buf[len++] = '\n'; + + ofs += 16; + size -= min(size, 16U); + } + + return len; +} + +static ssize_t store_measurement(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + struct ieee80211_measurement_params params = { + .channel = le16_to_cpu(priv->active_rxon.channel), + .start_time = cpu_to_le64(priv->last_tsf), + .duration = cpu_to_le16(1), + }; + u8 type = IWL_MEASURE_BASIC; + u8 buffer[32]; + u8 channel; + + if (count) { + char *p = buffer; + strncpy(buffer, buf, min(sizeof(buffer), count)); + channel = simple_strtoul(p, NULL, 0); + if (channel) + params.channel = channel; + + p = buffer; + while (*p && *p != ' ') + p++; + if (*p) + type = simple_strtoul(p + 1, NULL, 0); + } + + IWL_DEBUG_INFO("Invoking measurement of type %d on " + "channel %d (for '%s')\n", type, params.channel, buf); + iwl_get_measurement(priv, ¶ms, type); + + return count; +} + +static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, + show_measurement, store_measurement); +#endif /* CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT */ + +static ssize_t show_rate(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + unsigned long flags; + int i; + + spin_lock_irqsave(&priv->sta_lock, flags); + if (priv->iw_mode == IEEE80211_IF_TYPE_STA) + i = priv->stations[IWL_AP_ID].current_rate.s.rate; + else + i = priv->stations[IWL_STA_ID].current_rate.s.rate; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + i = iwl_rate_index_from_plcp(i); + if (i == -1) + return sprintf(buf, "0\n"); + + return sprintf(buf, "%d%s\n", + (iwl_rates[i].ieee >> 1), + (iwl_rates[i].ieee & 0x1) ? ".5" : ""); +} + +static DEVICE_ATTR(rate, S_IRUSR, show_rate, NULL); + +static ssize_t store_retry_rate(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + priv->retry_rate = simple_strtoul(buf, NULL, 0); + if (priv->retry_rate <= 0) + priv->retry_rate = 1; + + return count; +} + +static ssize_t show_retry_rate(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + return sprintf(buf, "%d", priv->retry_rate); +} + +static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate, + store_retry_rate); + +static ssize_t store_power_level(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + int rc; + int mode; + + mode = simple_strtoul(buf, NULL, 0); + mutex_lock(&priv->mutex); + + if (!iwl_is_ready(priv)) { + rc = -EAGAIN; + goto out; + } + + if ((mode < 1) || (mode > IWL_POWER_LIMIT) || (mode == IWL_POWER_AC)) + mode = IWL_POWER_AC; + else + mode |= IWL_POWER_ENABLED; + + if (mode != priv->power_mode) { + rc = iwl_send_power_mode(priv, IWL_POWER_LEVEL(mode)); + if (rc) { + IWL_DEBUG_MAC80211("failed setting power mode.\n"); + goto out; + } + priv->power_mode = mode; + } + + rc = count; + + out: + mutex_unlock(&priv->mutex); + return rc; +} + +#define MAX_WX_STRING 80 + +/* Values are in microsecond */ +static const s32 timeout_duration[] = { + 350000, + 250000, + 75000, + 37000, + 25000, +}; +static const s32 period_duration[] = { + 400000, + 700000, + 1000000, + 1000000, + 1000000 +}; + +static ssize_t show_power_level(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + int level = IWL_POWER_LEVEL(priv->power_mode); + char *p = buf; + + p += sprintf(p, "%d ", level); + switch (level) { + case IWL_POWER_MODE_CAM: + case IWL_POWER_AC: + p += sprintf(p, "(AC)"); + break; + case IWL_POWER_BATTERY: + p += sprintf(p, "(BATTERY)"); + break; + default: + p += sprintf(p, + "(Timeout %dms, Period %dms)", + timeout_duration[level - 1] / 1000, + period_duration[level - 1] / 1000); + } + + if (!(priv->power_mode & IWL_POWER_ENABLED)) + p += sprintf(p, " OFF\n"); + else + p += sprintf(p, " \n"); + + return (p - buf + 1); + +} + +static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level, + store_power_level); + +static ssize_t show_channels(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + int len = 0, i; + struct ieee80211_channel *channels = NULL; + const struct ieee80211_hw_mode *hw_mode = NULL; + int count = 0; + + if (!iwl_is_ready(priv)) + return -EAGAIN; + + hw_mode = iwl_get_hw_mode(priv, MODE_IEEE80211G); + if (!hw_mode) + hw_mode = iwl_get_hw_mode(priv, MODE_IEEE80211B); + if (hw_mode) { + channels = hw_mode->channels; + count = hw_mode->num_channels; + } + + len += + sprintf(&buf[len], + "Displaying %d channels in 2.4GHz band " + "(802.11bg):\n", count); + + for (i = 0; i < count; i++) + len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n", + channels[i].chan, + channels[i].power_level, + channels[i]. + flag & IEEE80211_CHAN_W_RADAR_DETECT ? + " (IEEE 802.11h required)" : "", + (!(channels[i].flag & IEEE80211_CHAN_W_IBSS) + || (channels[i]. + flag & + IEEE80211_CHAN_W_RADAR_DETECT)) ? "" : + ", IBSS", + channels[i]. + flag & IEEE80211_CHAN_W_ACTIVE_SCAN ? + "active/passive" : "passive only"); + + hw_mode = iwl_get_hw_mode(priv, MODE_IEEE80211A); + if (hw_mode) { + channels = hw_mode->channels; + count = hw_mode->num_channels; + } else { + channels = NULL; + count = 0; + } + + len += sprintf(&buf[len], "Displaying %d channels in 5.2GHz band " + "(802.11a):\n", count); + + for (i = 0; i < count; i++) + len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n", + channels[i].chan, + channels[i].power_level, + channels[i]. + flag & IEEE80211_CHAN_W_RADAR_DETECT ? + " (IEEE 802.11h required)" : "", + (!(channels[i].flag & IEEE80211_CHAN_W_IBSS) + || (channels[i]. + flag & + IEEE80211_CHAN_W_RADAR_DETECT)) ? "" : + ", IBSS", + channels[i]. + flag & IEEE80211_CHAN_W_ACTIVE_SCAN ? + "active/passive" : "passive only"); + + return len; +} + +static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL); + +static ssize_t show_statistics(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + u32 size = sizeof(struct iwl_notif_statistics); + u32 len = 0, ofs = 0; + u8 *data = (u8 *) & priv->statistics; + int rc = 0; + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + mutex_lock(&priv->mutex); + rc = iwl_send_statistics_request(priv); + mutex_unlock(&priv->mutex); + + if (rc) { + len = sprintf(buf, + "Error sending statistics request: 0x%08X\n", rc); + return len; + } + + while (size && (PAGE_SIZE - len)) { + hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, + PAGE_SIZE - len, 1); + len = strlen(buf); + if (PAGE_SIZE - len) + buf[len++] = '\n'; + + ofs += 16; + size -= min(size, 16U); + } + + return len; +} + +static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL); + +static ssize_t show_antenna(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + return sprintf(buf, "%d\n", priv->antenna); +} + +static ssize_t store_antenna(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int ant; + struct iwl_priv *priv = dev_get_drvdata(d); + + if (count == 0) + return 0; + + if (sscanf(buf, "%1i", &ant) != 1) { + IWL_DEBUG_INFO("not in hex or decimal form.\n"); + return count; + } + + if ((ant >= 0) && (ant <= 2)) { + IWL_DEBUG_INFO("Setting antenna select to %d.\n", ant); + priv->antenna = (enum iwl_antenna)ant; + } else + IWL_DEBUG_INFO("Bad antenna select value %d.\n", ant); + + + return count; +} + +static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna); + +static ssize_t show_status(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + if (!iwl_is_alive(priv)) + return -EAGAIN; + return sprintf(buf, "0x%08x\n", (int)priv->status); +} + +static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); + +static ssize_t dump_error_log(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char *p = (char *)buf; + + if (p[0] == '1') + iwl_dump_nic_error_log((struct iwl_priv *)d->driver_data); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log); + +static ssize_t dump_event_log(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char *p = (char *)buf; + + if (p[0] == '1') + iwl_dump_nic_event_log((struct iwl_priv *)d->driver_data); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log); + +/***************************************************************************** + * + * driver setup and teardown + * + *****************************************************************************/ + +static void iwl_setup_deferred_work(struct iwl_priv *priv) +{ + priv->workqueue = create_workqueue(DRV_NAME); + + init_waitqueue_head(&priv->wait_command_queue); + + INIT_WORK(&priv->up, iwl_bg_up); + INIT_WORK(&priv->restart, iwl_bg_restart); + INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish); + INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); + INIT_WORK(&priv->request_scan, iwl_bg_request_scan); + INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan); + INIT_WORK(&priv->rf_kill, iwl_bg_rf_kill); + INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update); + INIT_DELAYED_WORK(&priv->post_associate, iwl_bg_post_associate); + INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start); + INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start); + INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check); + + iwl_hw_setup_deferred_work(priv); + + tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) + iwl_irq_tasklet, (unsigned long)priv); +} + +static void iwl_cancel_deferred_work(struct iwl_priv *priv) +{ + iwl_hw_cancel_deferred_work(priv); + + cancel_delayed_work(&priv->scan_check); + cancel_delayed_work(&priv->alive_start); + cancel_delayed_work(&priv->post_associate); + cancel_work_sync(&priv->beacon_update); +} + +static struct attribute *iwl_sysfs_entries[] = { + &dev_attr_antenna.attr, + &dev_attr_channels.attr, + &dev_attr_dump_errors.attr, + &dev_attr_dump_events.attr, + &dev_attr_flags.attr, + &dev_attr_filter_flags.attr, +#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT + &dev_attr_measurement.attr, +#endif + &dev_attr_power_level.attr, + &dev_attr_rate.attr, + &dev_attr_retry_rate.attr, + &dev_attr_rf_kill.attr, + &dev_attr_rs_window.attr, + &dev_attr_statistics.attr, + &dev_attr_status.attr, + &dev_attr_temperature.attr, + &dev_attr_tune.attr, + &dev_attr_tx_power.attr, + + NULL +}; + +static struct attribute_group iwl_attribute_group = { + .name = NULL, /* put in device directory */ + .attrs = iwl_sysfs_entries, +}; + +static struct ieee80211_ops iwl_hw_ops = { + .tx = iwl_mac_tx, + .start = iwl_mac_start, + .stop = iwl_mac_stop, + .add_interface = iwl_mac_add_interface, + .remove_interface = iwl_mac_remove_interface, + .config = iwl_mac_config, + .config_interface = iwl_mac_config_interface, + .configure_filter = iwl_configure_filter, + .set_key = iwl_mac_set_key, + .get_stats = iwl_mac_get_stats, + .get_tx_stats = iwl_mac_get_tx_stats, + .conf_tx = iwl_mac_conf_tx, + .get_tsf = iwl_mac_get_tsf, + .reset_tsf = iwl_mac_reset_tsf, + .beacon_update = iwl_mac_beacon_update, + .hw_scan = iwl_mac_hw_scan +}; + +static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int err = 0; + u32 pci_id; + struct iwl_priv *priv; + struct ieee80211_hw *hw; + int i; + + if (iwl_param_disable_hw_scan) { + IWL_DEBUG_INFO("Disabling hw_scan\n"); + iwl_hw_ops.hw_scan = NULL; + } + + if ((iwl_param_queues_num > IWL_MAX_NUM_QUEUES) || + (iwl_param_queues_num < IWL_MIN_NUM_QUEUES)) { + IWL_ERROR("invalid queues_num, should be between %d and %d\n", + IWL_MIN_NUM_QUEUES, IWL_MAX_NUM_QUEUES); + err = -EINVAL; + goto out; + } + + /* mac80211 allocates memory for this device instance, including + * space for this driver's private structure */ + hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwl_hw_ops); + if (hw == NULL) { + IWL_ERROR("Can not allocate network device\n"); + err = -ENOMEM; + goto out; + } + SET_IEEE80211_DEV(hw, &pdev->dev); + + IWL_DEBUG_INFO("*** LOAD DRIVER ***\n"); + priv = hw->priv; + priv->hw = hw; + + priv->pci_dev = pdev; + priv->antenna = (enum iwl_antenna)iwl_param_antenna; +#ifdef CONFIG_IWLWIFI_DEBUG + iwl_debug_level = iwl_param_debug; + atomic_set(&priv->restrict_refcnt, 0); +#endif + priv->retry_rate = 1; + + priv->ibss_beacon = NULL; + + /* Tell mac80211 and its clients (e.g. Wireless Extensions) + * the range of signal quality values that we'll provide. + * Negative values for level/noise indicate that we'll provide dBm. + * For WE, at least, non-0 values here *enable* display of values + * in app (iwconfig). */ + hw->max_rssi = -20; /* signal level, negative indicates dBm */ + hw->max_noise = -20; /* noise level, negative indicates dBm */ + hw->max_signal = 100; /* link quality indication (%) */ + + /* Tell mac80211 our Tx characteristics */ + hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE; + + hw->queues = 4; + + spin_lock_init(&priv->lock); + spin_lock_init(&priv->power_data.lock); + spin_lock_init(&priv->sta_lock); + spin_lock_init(&priv->hcmd_lock); + + for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) + INIT_LIST_HEAD(&priv->ibss_mac_hash[i]); + + INIT_LIST_HEAD(&priv->free_frames); + + mutex_init(&priv->mutex); + if (pci_enable_device(pdev)) { + err = -ENODEV; + goto out_ieee80211_free_hw; + } + + pci_set_master(pdev); + + iwl_clear_stations_table(priv); + + priv->data_retry_limit = -1; + priv->ieee_channels = NULL; + priv->ieee_rates = NULL; + priv->phymode = -1; + + err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (!err) + err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + if (err) { + printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n"); + goto out_pci_disable_device; + } + + pci_set_drvdata(pdev, priv); + err = pci_request_regions(pdev, DRV_NAME); + if (err) + goto out_pci_disable_device; + /* We disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state */ + pci_write_config_byte(pdev, 0x41, 0x00); + priv->hw_base = pci_iomap(pdev, 0, 0); + if (!priv->hw_base) { + err = -ENODEV; + goto out_pci_release_regions; + } + + IWL_DEBUG_INFO("pci_resource_len = 0x%08llx\n", + (unsigned long long) pci_resource_len(pdev, 0)); + IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base); + + /* Initialize module parameter values here */ + + if (iwl_param_disable) { + set_bit(STATUS_RF_KILL_SW, &priv->status); + IWL_DEBUG_INFO("Radio disabled.\n"); + } + + priv->iw_mode = IEEE80211_IF_TYPE_STA; + + pci_id = + (priv->pci_dev->device << 16) | priv->pci_dev->subsystem_device; + + switch (pci_id) { + case 0x42221005: /* 0x4222 0x8086 0x1005 is BG SKU */ + case 0x42221034: /* 0x4222 0x8086 0x1034 is BG SKU */ + case 0x42271014: /* 0x4227 0x8086 0x1014 is BG SKU */ + case 0x42221044: /* 0x4222 0x8086 0x1044 is BG SKU */ + priv->is_abg = 0; + break; + + /* + * Rest are assumed ABG SKU -- if this is not the + * case then the card will get the wrong 'Detected' + * line in the kernel log however the code that + * initializes the GEO table will detect no A-band + * channels and remove the is_abg mask. + */ + default: + priv->is_abg = 1; + break; + } + + printk(KERN_INFO DRV_NAME + ": Detected Intel PRO/Wireless 3945%sBG Network Connection\n", + priv->is_abg ? "A" : ""); + + /* Device-specific setup */ + if (iwl_hw_set_hw_setting(priv)) { + IWL_ERROR("failed to set hw settings\n"); + mutex_unlock(&priv->mutex); + goto out_iounmap; + } + +#ifdef CONFIG_IWLWIFI_QOS + if (iwl_param_qos_enable) + priv->qos_data.qos_enable = 1; + + iwl_reset_qos(priv); + + priv->qos_data.qos_active = 0; + priv->qos_data.qos_cap.val = 0; +#endif /* CONFIG_IWLWIFI_QOS */ + + iwl_set_rxon_channel(priv, MODE_IEEE80211G, 6); + iwl_setup_deferred_work(priv); + iwl_setup_rx_handlers(priv); + + priv->rates_mask = IWL_RATES_MASK; + /* If power management is turned on, default to AC mode */ + priv->power_mode = IWL_POWER_AC; + priv->user_txpower_limit = IWL_DEFAULT_TX_POWER; + + pci_enable_msi(pdev); + + err = request_irq(pdev->irq, iwl_isr, IRQF_SHARED, DRV_NAME, priv); + if (err) { + IWL_ERROR("Error allocating IRQ %d\n", pdev->irq); + goto out_disable_msi; + } + + mutex_lock(&priv->mutex); + + err = sysfs_create_group(&pdev->dev.kobj, &iwl_attribute_group); + if (err) { + IWL_ERROR("failed to create sysfs device attributes\n"); + mutex_unlock(&priv->mutex); + goto out_release_irq; + } + + /* fetch ucode file from disk, alloc and copy to bus-master buffers ... + * ucode filename and max sizes are card-specific. */ + err = iwl_read_ucode(priv); + if (err) { + IWL_ERROR("Could not read microcode: %d\n", err); + mutex_unlock(&priv->mutex); + goto out_pci_alloc; + } + + mutex_unlock(&priv->mutex); + + IWL_DEBUG_INFO("Queing UP work.\n"); + + queue_work(priv->workqueue, &priv->up); + + return 0; + + out_pci_alloc: + iwl_dealloc_ucode_pci(priv); + + sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); + + out_release_irq: + free_irq(pdev->irq, priv); + + out_disable_msi: + pci_disable_msi(pdev); + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + iwl_unset_hw_setting(priv); + + out_iounmap: + pci_iounmap(pdev, priv->hw_base); + out_pci_release_regions: + pci_release_regions(pdev); + out_pci_disable_device: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + out_ieee80211_free_hw: + ieee80211_free_hw(priv->hw); + out: + return err; +} + +static void iwl_pci_remove(struct pci_dev *pdev) +{ + struct iwl_priv *priv = pci_get_drvdata(pdev); + struct list_head *p, *q; + int i; + + if (!priv) + return; + + IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n"); + + mutex_lock(&priv->mutex); + set_bit(STATUS_EXIT_PENDING, &priv->status); + __iwl_down(priv); + mutex_unlock(&priv->mutex); + + /* Free MAC hash list for ADHOC */ + for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) { + list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) { + list_del(p); + kfree(list_entry(p, struct iwl_ibss_seq, list)); + } + } + + sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); + + iwl_dealloc_ucode_pci(priv); + + if (priv->rxq.bd) + iwl_rx_queue_free(priv, &priv->rxq); + iwl_hw_txq_ctx_free(priv); + + iwl_unset_hw_setting(priv); + iwl_clear_stations_table(priv); + + if (priv->mac80211_registered) { + ieee80211_unregister_hw(priv->hw); + iwl_rate_control_unregister(priv->hw); + } + + /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes + * priv->workqueue... so we can't take down the workqueue + * until now... */ + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + + free_irq(pdev->irq, priv); + pci_disable_msi(pdev); + pci_iounmap(pdev, priv->hw_base); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + kfree(priv->channel_info); + + kfree(priv->ieee_channels); + kfree(priv->ieee_rates); + + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + ieee80211_free_hw(priv->hw); +} + +#ifdef CONFIG_PM + +static int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct iwl_priv *priv = pci_get_drvdata(pdev); + + mutex_lock(&priv->mutex); + + set_bit(STATUS_IN_SUSPEND, &priv->status); + + /* Take down the device; powers it off, etc. */ + __iwl_down(priv); + + if (priv->mac80211_registered) + ieee80211_stop_queues(priv->hw); + + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + + mutex_unlock(&priv->mutex); + + return 0; +} + +static void iwl_resume(struct iwl_priv *priv) +{ + unsigned long flags; + + /* The following it a temporary work around due to the + * suspend / resume not fully initializing the NIC correctly. + * Without all of the following, resume will not attempt to take + * down the NIC (it shouldn't really need to) and will just try + * and bring the NIC back up. However that fails during the + * ucode verification process. This then causes iwl_down to be + * called *after* iwl_hw_nic_init() has succeeded -- which + * then lets the next init sequence succeed. So, we've + * replicated all of that NIC init code here... */ + + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + + iwl_hw_nic_init(priv); + + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + + /* tell the device to stop sending interrupts */ + iwl_disable_interrupts(priv); + + spin_lock_irqsave(&priv->lock, flags); + iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + + if (!iwl_grab_restricted_access(priv)) { + iwl_write_restricted_reg(priv, APMG_CLK_DIS_REG, + APMG_CLK_VAL_DMA_CLK_RQT); + iwl_release_restricted_access(priv); + } + spin_unlock_irqrestore(&priv->lock, flags); + + udelay(5); + + iwl_hw_nic_reset(priv); + + /* Bring the device back up */ + clear_bit(STATUS_IN_SUSPEND, &priv->status); + queue_work(priv->workqueue, &priv->up); +} + +static int iwl_pci_resume(struct pci_dev *pdev) +{ + struct iwl_priv *priv = pci_get_drvdata(pdev); + int err; + + printk(KERN_INFO "Coming out of suspend...\n"); + + mutex_lock(&priv->mutex); + + pci_set_power_state(pdev, PCI_D0); + err = pci_enable_device(pdev); + pci_restore_state(pdev); + + /* + * Suspend/Resume resets the PCI configuration space, so we have to + * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries + * from interfering with C3 CPU state. pci_restore_state won't help + * here since it only restores the first 64 bytes pci config header. + */ + pci_write_config_byte(pdev, 0x41, 0x00); + + iwl_resume(priv); + mutex_unlock(&priv->mutex); + + return 0; +} + +#endif /* CONFIG_PM */ + +/***************************************************************************** + * + * driver and module entry point + * + *****************************************************************************/ + +static struct pci_driver iwl_driver = { + .name = DRV_NAME, + .id_table = iwl_hw_card_ids, + .probe = iwl_pci_probe, + .remove = __devexit_p(iwl_pci_remove), +#ifdef CONFIG_PM + .suspend = iwl_pci_suspend, + .resume = iwl_pci_resume, +#endif +}; + +static int __init iwl_init(void) +{ + + int ret; + printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); + printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); + ret = pci_register_driver(&iwl_driver); + if (ret) { + IWL_ERROR("Unable to initialize PCI module\n"); + return ret; + } +#ifdef CONFIG_IWLWIFI_DEBUG + ret = driver_create_file(&iwl_driver.driver, &driver_attr_debug_level); + if (ret) { + IWL_ERROR("Unable to create driver sysfs file\n"); + pci_unregister_driver(&iwl_driver); + return ret; + } +#endif + + return ret; +} + +static void __exit iwl_exit(void) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + driver_remove_file(&iwl_driver.driver, &driver_attr_debug_level); +#endif + pci_unregister_driver(&iwl_driver); +} + +module_param_named(antenna, iwl_param_antenna, int, 0444); +MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); +module_param_named(disable, iwl_param_disable, int, 0444); +MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])"); +module_param_named(hwcrypto, iwl_param_hwcrypto, int, 0444); +MODULE_PARM_DESC(hwcrypto, + "using hardware crypto engine (default 0 [software])\n"); +module_param_named(debug, iwl_param_debug, int, 0444); +MODULE_PARM_DESC(debug, "debug output mask"); +module_param_named(disable_hw_scan, iwl_param_disable_hw_scan, int, 0444); +MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)"); + +module_param_named(queues_num, iwl_param_queues_num, int, 0444); +MODULE_PARM_DESC(queues_num, "number of hw queues."); + +/* QoS */ +module_param_named(qos_enable, iwl_param_qos_enable, int, 0444); +MODULE_PARM_DESC(qos_enable, "enable all QoS functionality"); + +module_exit(iwl_exit); +module_init(iwl_init); diff --git a/drivers/net/wireless/iwlwifi/iwl4965-base.c b/drivers/net/wireless/iwlwifi/iwl4965-base.c new file mode 100644 index 000000000000..b1a6e39f7821 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl4965-base.c @@ -0,0 +1,9340 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos <ipw2100-admin@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +/* + * NOTE: This file (iwl-base.c) is used to build to multiple hardware targets + * by defining IWL to either 3945 or 4965. The Makefile used when building + * the base targets will create base-3945.o and base-4965.o + * + * The eventual goal is to move as many of the #if IWL / #endif blocks out of + * this file and into the hardware specific implementation files (iwl-XXXX.c) + * and leave only the common (non #ifdef sprinkled) code in this file + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/version.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/wireless.h> +#include <linux/firmware.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/if_arp.h> + +#include <net/ieee80211_radiotap.h> +#include <net/mac80211.h> + +#include <asm/div64.h> + +#define IWL 4965 + +#include "iwlwifi.h" +#include "iwl-4965.h" +#include "iwl-helpers.h" + +#ifdef CONFIG_IWLWIFI_DEBUG +u32 iwl_debug_level; +#endif + +/****************************************************************************** + * + * module boiler plate + * + ******************************************************************************/ + +/* module parameters */ +int iwl_param_disable_hw_scan; +int iwl_param_debug; +int iwl_param_disable; /* def: enable radio */ +int iwl_param_antenna; /* def: 0 = both antennas (use diversity) */ +int iwl_param_hwcrypto; /* def: using software encryption */ +int iwl_param_qos_enable = 1; +int iwl_param_queues_num = IWL_MAX_NUM_QUEUES; + +/* + * module name, copyright, version, etc. + * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk + */ + +#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link 4965AGN driver for Linux" + +#ifdef CONFIG_IWLWIFI_DEBUG +#define VD "d" +#else +#define VD +#endif + +#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT +#define VS "s" +#else +#define VS +#endif + +#define IWLWIFI_VERSION "1.1.17k" VD VS +#define DRV_COPYRIGHT "Copyright(c) 2003-2007 Intel Corporation" +#define DRV_VERSION IWLWIFI_VERSION + +/* Change firmware file name, using "-" and incrementing number, + * *only* when uCode interface or architecture changes so that it + * is not compatible with earlier drivers. + * This number will also appear in << 8 position of 1st dword of uCode file */ +#define IWL4965_UCODE_API "-1" + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR(DRV_COPYRIGHT); +MODULE_LICENSE("GPL"); + +__le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr) +{ + u16 fc = le16_to_cpu(hdr->frame_control); + int hdr_len = ieee80211_get_hdrlen(fc); + + if ((fc & 0x00cc) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA)) + return (__le16 *) ((u8 *) hdr + hdr_len - QOS_CONTROL_LEN); + return NULL; +} + +static const struct ieee80211_hw_mode *iwl_get_hw_mode( + struct iwl_priv *priv, int mode) +{ + int i; + + for (i = 0; i < 3; i++) + if (priv->modes[i].mode == mode) + return &priv->modes[i]; + + return NULL; +} + +static int iwl_is_empty_essid(const char *essid, int essid_len) +{ + /* Single white space is for Linksys APs */ + if (essid_len == 1 && essid[0] == ' ') + return 1; + + /* Otherwise, if the entire essid is 0, we assume it is hidden */ + while (essid_len) { + essid_len--; + if (essid[essid_len] != '\0') + return 0; + } + + return 1; +} + +static const char *iwl_escape_essid(const char *essid, u8 essid_len) +{ + static char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; + const char *s = essid; + char *d = escaped; + + if (iwl_is_empty_essid(essid, essid_len)) { + memcpy(escaped, "<hidden>", sizeof("<hidden>")); + return escaped; + } + + essid_len = min(essid_len, (u8) IW_ESSID_MAX_SIZE); + while (essid_len--) { + if (*s == '\0') { + *d++ = '\\'; + *d++ = '0'; + s++; + } else + *d++ = *s++; + } + *d = '\0'; + return escaped; +} + +static void iwl_print_hex_dump(int level, void *p, u32 len) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + if (!(iwl_debug_level & level)) + return; + + print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1, + p, len, 1); +#endif +} + +/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** + * DMA services + * + * Theory of operation + * + * A queue is a circular buffers with 'Read' and 'Write' pointers. + * 2 empty entries always kept in the buffer to protect from overflow. + * + * For Tx queue, there are low mark and high mark limits. If, after queuing + * the packet for Tx, free space become < low mark, Tx queue stopped. When + * reclaiming packets (on 'tx done IRQ), if free space become > high mark, + * Tx queue resumed. + * + * The IWL operates with six queues, one receive queue in the device's + * sram, one transmit queue for sending commands to the device firmware, + * and four transmit queues for data. + ***************************************************/ + +static int iwl_queue_space(const struct iwl_queue *q) +{ + int s = q->last_used - q->first_empty; + + if (q->last_used > q->first_empty) + s -= q->n_bd; + + if (s <= 0) + s += q->n_window; + /* keep some reserve to not confuse empty and full situations */ + s -= 2; + if (s < 0) + s = 0; + return s; +} + +/* XXX: n_bd must be power-of-two size */ +static inline int iwl_queue_inc_wrap(int index, int n_bd) +{ + return ++index & (n_bd - 1); +} + +/* XXX: n_bd must be power-of-two size */ +static inline int iwl_queue_dec_wrap(int index, int n_bd) +{ + return --index & (n_bd - 1); +} + +static inline int x2_queue_used(const struct iwl_queue *q, int i) +{ + return q->first_empty > q->last_used ? + (i >= q->last_used && i < q->first_empty) : + !(i < q->last_used && i >= q->first_empty); +} + +static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge) +{ + if (is_huge) + return q->n_window; + + return index & (q->n_window - 1); +} + +static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, + int count, int slots_num, u32 id) +{ + q->n_bd = count; + q->n_window = slots_num; + q->id = id; + + /* count must be power-of-two size, otherwise iwl_queue_inc_wrap + * and iwl_queue_dec_wrap are broken. */ + BUG_ON(!is_power_of_2(count)); + + /* slots_num must be power-of-two size, otherwise + * get_cmd_index is broken. */ + BUG_ON(!is_power_of_2(slots_num)); + + q->low_mark = q->n_window / 4; + if (q->low_mark < 4) + q->low_mark = 4; + + q->high_mark = q->n_window / 8; + if (q->high_mark < 2) + q->high_mark = 2; + + q->first_empty = q->last_used = 0; + + return 0; +} + +static int iwl_tx_queue_alloc(struct iwl_priv *priv, + struct iwl_tx_queue *txq, u32 id) +{ + struct pci_dev *dev = priv->pci_dev; + + if (id != IWL_CMD_QUEUE_NUM) { + txq->txb = kmalloc(sizeof(txq->txb[0]) * + TFD_QUEUE_SIZE_MAX, GFP_KERNEL); + if (!txq->txb) { + IWL_ERROR("kmalloc for auxilary BD " + "structures failed\n"); + goto error; + } + } else + txq->txb = NULL; + + txq->bd = pci_alloc_consistent(dev, + sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX, + &txq->q.dma_addr); + + if (!txq->bd) { + IWL_ERROR("pci_alloc_consistent(%zd) failed\n", + sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX); + goto error; + } + txq->q.id = id; + + return 0; + + error: + if (txq->txb) { + kfree(txq->txb); + txq->txb = NULL; + } + + return -ENOMEM; +} + +int iwl_tx_queue_init(struct iwl_priv *priv, + struct iwl_tx_queue *txq, int slots_num, u32 txq_id) +{ + struct pci_dev *dev = priv->pci_dev; + int len; + int rc = 0; + + /* alocate command space + one big command for scan since scan + * command is very huge the system will not have two scan at the + * same time */ + len = sizeof(struct iwl_cmd) * slots_num; + if (txq_id == IWL_CMD_QUEUE_NUM) + len += IWL_MAX_SCAN_SIZE; + txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd); + if (!txq->cmd) + return -ENOMEM; + + rc = iwl_tx_queue_alloc(priv, txq, txq_id); + if (rc) { + pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd); + + return -ENOMEM; + } + txq->need_update = 0; + + /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise + * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ + BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); + iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); + + iwl_hw_tx_queue_init(priv, txq); + + return 0; +} + +/** + * iwl_tx_queue_free - Deallocate DMA queue. + * @txq: Transmit queue to deallocate. + * + * Empty queue by removing and destroying all BD's. + * Free all buffers. txq itself is not freed. + * + */ +void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + struct iwl_queue *q = &txq->q; + struct pci_dev *dev = priv->pci_dev; + int len; + + if (q->n_bd == 0) + return; + + /* first, empty all BD's */ + for (; q->first_empty != q->last_used; + q->last_used = iwl_queue_inc_wrap(q->last_used, q->n_bd)) + iwl_hw_txq_free_tfd(priv, txq); + + len = sizeof(struct iwl_cmd) * q->n_window; + if (q->id == IWL_CMD_QUEUE_NUM) + len += IWL_MAX_SCAN_SIZE; + + pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd); + + /* free buffers belonging to queue itself */ + if (txq->q.n_bd) + pci_free_consistent(dev, sizeof(struct iwl_tfd_frame) * + txq->q.n_bd, txq->bd, txq->q.dma_addr); + + if (txq->txb) { + kfree(txq->txb); + txq->txb = NULL; + } + + /* 0 fill whole structure */ + memset(txq, 0, sizeof(*txq)); +} + +const u8 BROADCAST_ADDR[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; + +/*************** STATION TABLE MANAGEMENT **** + * + * NOTE: This needs to be overhauled to better synchronize between + * how the iwl-4965.c is using iwl_hw_find_station vs. iwl-3945.c + * + * mac80211 should also be examined to determine if sta_info is duplicating + * the functionality provided here + */ + +/**************************************************************/ + +#if 0 /* temparary disable till we add real remove station */ +static u8 iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap) +{ + int index = IWL_INVALID_STATION; + int i; + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + + if (is_ap) + index = IWL_AP_ID; + else if (is_broadcast_ether_addr(addr)) + index = priv->hw_setting.bcast_sta_id; + else + for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) + if (priv->stations[i].used && + !compare_ether_addr(priv->stations[i].sta.sta.addr, + addr)) { + index = i; + break; + } + + if (unlikely(index == IWL_INVALID_STATION)) + goto out; + + if (priv->stations[index].used) { + priv->stations[index].used = 0; + priv->num_stations--; + } + + BUG_ON(priv->num_stations < 0); + +out: + spin_unlock_irqrestore(&priv->sta_lock, flags); + return 0; +} +#endif + +static void iwl_clear_stations_table(struct iwl_priv *priv) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + + priv->num_stations = 0; + memset(priv->stations, 0, sizeof(priv->stations)); + + spin_unlock_irqrestore(&priv->sta_lock, flags); +} + +u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap, u8 flags) +{ + int i; + int index = IWL_INVALID_STATION; + struct iwl_station_entry *station; + unsigned long flags_spin; + DECLARE_MAC_BUF(mac); + + spin_lock_irqsave(&priv->sta_lock, flags_spin); + if (is_ap) + index = IWL_AP_ID; + else if (is_broadcast_ether_addr(addr)) + index = priv->hw_setting.bcast_sta_id; + else + for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) { + if (!compare_ether_addr(priv->stations[i].sta.sta.addr, + addr)) { + index = i; + break; + } + + if (!priv->stations[i].used && + index == IWL_INVALID_STATION) + index = i; + } + + + /* These twh conditions has the same outcome but keep them separate + since they have different meaning */ + if (unlikely(index == IWL_INVALID_STATION)) { + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + return index; + } + + if (priv->stations[index].used && + !compare_ether_addr(priv->stations[index].sta.sta.addr, addr)) { + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + return index; + } + + + IWL_DEBUG_ASSOC("Add STA ID %d: %s\n", index, print_mac(mac, addr)); + station = &priv->stations[index]; + station->used = 1; + priv->num_stations++; + + memset(&station->sta, 0, sizeof(struct iwl_addsta_cmd)); + memcpy(station->sta.sta.addr, addr, ETH_ALEN); + station->sta.mode = 0; + station->sta.sta.sta_id = index; + station->sta.station_flags = 0; + +#ifdef CONFIG_IWLWIFI_HT + /* BCAST station and IBSS stations do not work in HT mode */ + if (index != priv->hw_setting.bcast_sta_id && + priv->iw_mode != IEEE80211_IF_TYPE_IBSS) + iwl4965_set_ht_add_station(priv, index); +#endif /*CONFIG_IWLWIFI_HT*/ + + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + iwl_send_add_station(priv, &station->sta, flags); + return index; + +} + +/*************** DRIVER STATUS FUNCTIONS *****/ + +static inline int iwl_is_ready(struct iwl_priv *priv) +{ + /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are + * set but EXIT_PENDING is not */ + return test_bit(STATUS_READY, &priv->status) && + test_bit(STATUS_GEO_CONFIGURED, &priv->status) && + !test_bit(STATUS_EXIT_PENDING, &priv->status); +} + +static inline int iwl_is_alive(struct iwl_priv *priv) +{ + return test_bit(STATUS_ALIVE, &priv->status); +} + +static inline int iwl_is_init(struct iwl_priv *priv) +{ + return test_bit(STATUS_INIT, &priv->status); +} + +static inline int iwl_is_rfkill(struct iwl_priv *priv) +{ + return test_bit(STATUS_RF_KILL_HW, &priv->status) || + test_bit(STATUS_RF_KILL_SW, &priv->status); +} + +static inline int iwl_is_ready_rf(struct iwl_priv *priv) +{ + + if (iwl_is_rfkill(priv)) + return 0; + + return iwl_is_ready(priv); +} + +/*************** HOST COMMAND QUEUE FUNCTIONS *****/ + +#define IWL_CMD(x) case x : return #x + +static const char *get_cmd_string(u8 cmd) +{ + switch (cmd) { + IWL_CMD(REPLY_ALIVE); + IWL_CMD(REPLY_ERROR); + IWL_CMD(REPLY_RXON); + IWL_CMD(REPLY_RXON_ASSOC); + IWL_CMD(REPLY_QOS_PARAM); + IWL_CMD(REPLY_RXON_TIMING); + IWL_CMD(REPLY_ADD_STA); + IWL_CMD(REPLY_REMOVE_STA); + IWL_CMD(REPLY_REMOVE_ALL_STA); + IWL_CMD(REPLY_TX); + IWL_CMD(REPLY_RATE_SCALE); + IWL_CMD(REPLY_LEDS_CMD); + IWL_CMD(REPLY_TX_LINK_QUALITY_CMD); + IWL_CMD(RADAR_NOTIFICATION); + IWL_CMD(REPLY_QUIET_CMD); + IWL_CMD(REPLY_CHANNEL_SWITCH); + IWL_CMD(CHANNEL_SWITCH_NOTIFICATION); + IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD); + IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION); + IWL_CMD(POWER_TABLE_CMD); + IWL_CMD(PM_SLEEP_NOTIFICATION); + IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC); + IWL_CMD(REPLY_SCAN_CMD); + IWL_CMD(REPLY_SCAN_ABORT_CMD); + IWL_CMD(SCAN_START_NOTIFICATION); + IWL_CMD(SCAN_RESULTS_NOTIFICATION); + IWL_CMD(SCAN_COMPLETE_NOTIFICATION); + IWL_CMD(BEACON_NOTIFICATION); + IWL_CMD(REPLY_TX_BEACON); + IWL_CMD(WHO_IS_AWAKE_NOTIFICATION); + IWL_CMD(QUIET_NOTIFICATION); + IWL_CMD(REPLY_TX_PWR_TABLE_CMD); + IWL_CMD(MEASURE_ABORT_NOTIFICATION); + IWL_CMD(REPLY_BT_CONFIG); + IWL_CMD(REPLY_STATISTICS_CMD); + IWL_CMD(STATISTICS_NOTIFICATION); + IWL_CMD(REPLY_CARD_STATE_CMD); + IWL_CMD(CARD_STATE_NOTIFICATION); + IWL_CMD(MISSED_BEACONS_NOTIFICATION); + IWL_CMD(REPLY_CT_KILL_CONFIG_CMD); + IWL_CMD(SENSITIVITY_CMD); + IWL_CMD(REPLY_PHY_CALIBRATION_CMD); + IWL_CMD(REPLY_RX_PHY_CMD); + IWL_CMD(REPLY_RX_MPDU_CMD); + IWL_CMD(REPLY_4965_RX); + IWL_CMD(REPLY_COMPRESSED_BA); + default: + return "UNKNOWN"; + + } +} + +#define HOST_COMPLETE_TIMEOUT (HZ / 2) + +/** + * iwl_enqueue_hcmd - enqueue a uCode command + * @priv: device private data point + * @cmd: a point to the ucode command structure + * + * The function returns < 0 values to indicate the operation is + * failed. On success, it turns the index (> 0) of command in the + * command queue. + */ +static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; + struct iwl_queue *q = &txq->q; + struct iwl_tfd_frame *tfd; + u32 *control_flags; + struct iwl_cmd *out_cmd; + u32 idx; + u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); + dma_addr_t phys_addr; + int ret; + unsigned long flags; + + /* If any of the command structures end up being larger than + * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then + * we will need to increase the size of the TFD entries */ + BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && + !(cmd->meta.flags & CMD_SIZE_HUGE)); + + if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) { + IWL_ERROR("No space for Tx\n"); + return -ENOSPC; + } + + spin_lock_irqsave(&priv->hcmd_lock, flags); + + tfd = &txq->bd[q->first_empty]; + memset(tfd, 0, sizeof(*tfd)); + + control_flags = (u32 *) tfd; + + idx = get_cmd_index(q, q->first_empty, cmd->meta.flags & CMD_SIZE_HUGE); + out_cmd = &txq->cmd[idx]; + + out_cmd->hdr.cmd = cmd->id; + memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta)); + memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); + + /* At this point, the out_cmd now has all of the incoming cmd + * information */ + + out_cmd->hdr.flags = 0; + out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) | + INDEX_TO_SEQ(q->first_empty)); + if (out_cmd->meta.flags & CMD_SIZE_HUGE) + out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME); + + phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx + + offsetof(struct iwl_cmd, hdr); + iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size); + + IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, " + "%d bytes at %d[%d]:%d\n", + get_cmd_string(out_cmd->hdr.cmd), + out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), + fix_size, q->first_empty, idx, IWL_CMD_QUEUE_NUM); + + txq->need_update = 1; + ret = iwl4965_tx_queue_update_wr_ptr(priv, txq, 0); + q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd); + iwl_tx_queue_update_write_ptr(priv, txq); + + spin_unlock_irqrestore(&priv->hcmd_lock, flags); + return ret ? ret : idx; +} + +int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + int ret; + + BUG_ON(!(cmd->meta.flags & CMD_ASYNC)); + + /* An asynchronous command can not expect an SKB to be set. */ + BUG_ON(cmd->meta.flags & CMD_WANT_SKB); + + /* An asynchronous command MUST have a callback. */ + BUG_ON(!cmd->meta.u.callback); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return -EBUSY; + + ret = iwl_enqueue_hcmd(priv, cmd); + if (ret < 0) { + IWL_ERROR("Error sending %s: iwl_enqueue_hcmd failed: %d\n", + get_cmd_string(cmd->id), ret); + return ret; + } + return 0; +} + +int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + int cmd_idx; + int ret; + static atomic_t entry = ATOMIC_INIT(0); /* reentrance protection */ + + BUG_ON(cmd->meta.flags & CMD_ASYNC); + + /* A synchronous command can not have a callback set. */ + BUG_ON(cmd->meta.u.callback != NULL); + + if (atomic_xchg(&entry, 1)) { + IWL_ERROR("Error sending %s: Already sending a host command\n", + get_cmd_string(cmd->id)); + return -EBUSY; + } + + set_bit(STATUS_HCMD_ACTIVE, &priv->status); + + if (cmd->meta.flags & CMD_WANT_SKB) + cmd->meta.source = &cmd->meta; + + cmd_idx = iwl_enqueue_hcmd(priv, cmd); + if (cmd_idx < 0) { + ret = cmd_idx; + IWL_ERROR("Error sending %s: iwl_enqueue_hcmd failed: %d\n", + get_cmd_string(cmd->id), ret); + goto out; + } + + ret = wait_event_interruptible_timeout(priv->wait_command_queue, + !test_bit(STATUS_HCMD_ACTIVE, &priv->status), + HOST_COMPLETE_TIMEOUT); + if (!ret) { + if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) { + IWL_ERROR("Error sending %s: time out after %dms.\n", + get_cmd_string(cmd->id), + jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); + + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + ret = -ETIMEDOUT; + goto cancel; + } + } + + if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { + IWL_DEBUG_INFO("Command %s aborted: RF KILL Switch\n", + get_cmd_string(cmd->id)); + ret = -ECANCELED; + goto fail; + } + if (test_bit(STATUS_FW_ERROR, &priv->status)) { + IWL_DEBUG_INFO("Command %s failed: FW Error\n", + get_cmd_string(cmd->id)); + ret = -EIO; + goto fail; + } + if ((cmd->meta.flags & CMD_WANT_SKB) && !cmd->meta.u.skb) { + IWL_ERROR("Error: Response NULL in '%s'\n", + get_cmd_string(cmd->id)); + ret = -EIO; + goto out; + } + + ret = 0; + goto out; + +cancel: + if (cmd->meta.flags & CMD_WANT_SKB) { + struct iwl_cmd *qcmd; + + /* Cancel the CMD_WANT_SKB flag for the cmd in the + * TX cmd queue. Otherwise in case the cmd comes + * in later, it will possibly set an invalid + * address (cmd->meta.source). */ + qcmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx]; + qcmd->meta.flags &= ~CMD_WANT_SKB; + } +fail: + if (cmd->meta.u.skb) { + dev_kfree_skb_any(cmd->meta.u.skb); + cmd->meta.u.skb = NULL; + } +out: + atomic_set(&entry, 0); + return ret; +} + +int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + /* A command can not be asynchronous AND expect an SKB to be set. */ + BUG_ON((cmd->meta.flags & CMD_ASYNC) && + (cmd->meta.flags & CMD_WANT_SKB)); + + if (cmd->meta.flags & CMD_ASYNC) + return iwl_send_cmd_async(priv, cmd); + + return iwl_send_cmd_sync(priv, cmd); +} + +int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data) +{ + struct iwl_host_cmd cmd = { + .id = id, + .len = len, + .data = data, + }; + + return iwl_send_cmd_sync(priv, &cmd); +} + +static int __must_check iwl_send_cmd_u32(struct iwl_priv *priv, u8 id, u32 val) +{ + struct iwl_host_cmd cmd = { + .id = id, + .len = sizeof(val), + .data = &val, + }; + + return iwl_send_cmd_sync(priv, &cmd); +} + +int iwl_send_statistics_request(struct iwl_priv *priv) +{ + return iwl_send_cmd_u32(priv, REPLY_STATISTICS_CMD, 0); +} + +/** + * iwl_rxon_add_station - add station into station table. + * + * there is only one AP station with id= IWL_AP_ID + * NOTE: mutex must be held before calling the this fnction +*/ +static int iwl_rxon_add_station(struct iwl_priv *priv, + const u8 *addr, int is_ap) +{ + u8 sta_id; + + sta_id = iwl_add_station(priv, addr, is_ap, 0); + iwl4965_add_station(priv, addr, is_ap); + + return sta_id; +} + +/** + * iwl_set_rxon_channel - Set the phymode and channel values in staging RXON + * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz + * @channel: Any channel valid for the requested phymode + + * In addition to setting the staging RXON, priv->phymode is also set. + * + * NOTE: Does not commit to the hardware; it sets appropriate bit fields + * in the staging RXON flag structure based on the phymode + */ +static int iwl_set_rxon_channel(struct iwl_priv *priv, u8 phymode, u16 channel) +{ + if (!iwl_get_channel_info(priv, phymode, channel)) { + IWL_DEBUG_INFO("Could not set channel to %d [%d]\n", + channel, phymode); + return -EINVAL; + } + + if ((le16_to_cpu(priv->staging_rxon.channel) == channel) && + (priv->phymode == phymode)) + return 0; + + priv->staging_rxon.channel = cpu_to_le16(channel); + if (phymode == MODE_IEEE80211A) + priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK; + else + priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; + + priv->phymode = phymode; + + IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, phymode); + + return 0; +} + +/** + * iwl_check_rxon_cmd - validate RXON structure is valid + * + * NOTE: This is really only useful during development and can eventually + * be #ifdef'd out once the driver is stable and folks aren't actively + * making changes + */ +static int iwl_check_rxon_cmd(struct iwl_rxon_cmd *rxon) +{ + int error = 0; + int counter = 1; + + if (rxon->flags & RXON_FLG_BAND_24G_MSK) { + error |= le32_to_cpu(rxon->flags & + (RXON_FLG_TGJ_NARROW_BAND_MSK | + RXON_FLG_RADAR_DETECT_MSK)); + if (error) + IWL_WARNING("check 24G fields %d | %d\n", + counter++, error); + } else { + error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ? + 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK); + if (error) + IWL_WARNING("check 52 fields %d | %d\n", + counter++, error); + error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK); + if (error) + IWL_WARNING("check 52 CCK %d | %d\n", + counter++, error); + } + error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1; + if (error) + IWL_WARNING("check mac addr %d | %d\n", counter++, error); + + /* make sure basic rates 6Mbps and 1Mbps are supported */ + error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) && + ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0)); + if (error) + IWL_WARNING("check basic rate %d | %d\n", counter++, error); + + error |= (le16_to_cpu(rxon->assoc_id) > 2007); + if (error) + IWL_WARNING("check assoc id %d | %d\n", counter++, error); + + error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) + == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)); + if (error) + IWL_WARNING("check CCK and short slot %d | %d\n", + counter++, error); + + error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) + == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)); + if (error) + IWL_WARNING("check CCK & auto detect %d | %d\n", + counter++, error); + + error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK | + RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK); + if (error) + IWL_WARNING("check TGG and auto detect %d | %d\n", + counter++, error); + + if (error) + IWL_WARNING("Tuning to channel %d\n", + le16_to_cpu(rxon->channel)); + + if (error) { + IWL_ERROR("Not a valid iwl_rxon_assoc_cmd field values\n"); + return -1; + } + return 0; +} + +/** + * iwl_full_rxon_required - determine if RXON_ASSOC can be used in RXON commit + * @priv: staging_rxon is comapred to active_rxon + * + * If the RXON structure is changing sufficient to require a new + * tune or to clear and reset the RXON_FILTER_ASSOC_MSK then return 1 + * to indicate a new tune is required. + */ +static int iwl_full_rxon_required(struct iwl_priv *priv) +{ + + /* These items are only settable from the full RXON command */ + if (!(priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) || + compare_ether_addr(priv->staging_rxon.bssid_addr, + priv->active_rxon.bssid_addr) || + compare_ether_addr(priv->staging_rxon.node_addr, + priv->active_rxon.node_addr) || + compare_ether_addr(priv->staging_rxon.wlap_bssid_addr, + priv->active_rxon.wlap_bssid_addr) || + (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) || + (priv->staging_rxon.channel != priv->active_rxon.channel) || + (priv->staging_rxon.air_propagation != + priv->active_rxon.air_propagation) || + (priv->staging_rxon.ofdm_ht_single_stream_basic_rates != + priv->active_rxon.ofdm_ht_single_stream_basic_rates) || + (priv->staging_rxon.ofdm_ht_dual_stream_basic_rates != + priv->active_rxon.ofdm_ht_dual_stream_basic_rates) || + (priv->staging_rxon.rx_chain != priv->active_rxon.rx_chain) || + (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id)) + return 1; + + /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can + * be updated with the RXON_ASSOC command -- however only some + * flag transitions are allowed using RXON_ASSOC */ + + /* Check if we are not switching bands */ + if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) != + (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)) + return 1; + + /* Check if we are switching association toggle */ + if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) != + (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) + return 1; + + return 0; +} + +static int iwl_send_rxon_assoc(struct iwl_priv *priv) +{ + int rc = 0; + struct iwl_rx_packet *res = NULL; + struct iwl_rxon_assoc_cmd rxon_assoc; + struct iwl_host_cmd cmd = { + .id = REPLY_RXON_ASSOC, + .len = sizeof(rxon_assoc), + .meta.flags = CMD_WANT_SKB, + .data = &rxon_assoc, + }; + const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon; + const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon; + + if ((rxon1->flags == rxon2->flags) && + (rxon1->filter_flags == rxon2->filter_flags) && + (rxon1->cck_basic_rates == rxon2->cck_basic_rates) && + (rxon1->ofdm_ht_single_stream_basic_rates == + rxon2->ofdm_ht_single_stream_basic_rates) && + (rxon1->ofdm_ht_dual_stream_basic_rates == + rxon2->ofdm_ht_dual_stream_basic_rates) && + (rxon1->rx_chain == rxon2->rx_chain) && + (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) { + IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n"); + return 0; + } + + rxon_assoc.flags = priv->staging_rxon.flags; + rxon_assoc.filter_flags = priv->staging_rxon.filter_flags; + rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates; + rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates; + rxon_assoc.reserved = 0; + rxon_assoc.ofdm_ht_single_stream_basic_rates = + priv->staging_rxon.ofdm_ht_single_stream_basic_rates; + rxon_assoc.ofdm_ht_dual_stream_basic_rates = + priv->staging_rxon.ofdm_ht_dual_stream_basic_rates; + rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain; + + rc = iwl_send_cmd_sync(priv, &cmd); + if (rc) + return rc; + + res = (struct iwl_rx_packet *)cmd.meta.u.skb->data; + if (res->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERROR("Bad return from REPLY_RXON_ASSOC command\n"); + rc = -EIO; + } + + priv->alloc_rxb_skb--; + dev_kfree_skb_any(cmd.meta.u.skb); + + return rc; +} + +/** + * iwl_commit_rxon - commit staging_rxon to hardware + * + * The RXON command in staging_rxon is commited to the hardware and + * the active_rxon structure is updated with the new data. This + * function correctly transitions out of the RXON_ASSOC_MSK state if + * a HW tune is required based on the RXON structure changes. + */ +static int iwl_commit_rxon(struct iwl_priv *priv) +{ + /* cast away the const for active_rxon in this function */ + struct iwl_rxon_cmd *active_rxon = (void *)&priv->active_rxon; + DECLARE_MAC_BUF(mac); + int rc = 0; + + if (!iwl_is_alive(priv)) + return -1; + + /* always get timestamp with Rx frame */ + priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK; + + rc = iwl_check_rxon_cmd(&priv->staging_rxon); + if (rc) { + IWL_ERROR("Invalid RXON configuration. Not committing.\n"); + return -EINVAL; + } + + /* If we don't need to send a full RXON, we can use + * iwl_rxon_assoc_cmd which is used to reconfigure filter + * and other flags for the current radio configuration. */ + if (!iwl_full_rxon_required(priv)) { + rc = iwl_send_rxon_assoc(priv); + if (rc) { + IWL_ERROR("Error setting RXON_ASSOC " + "configuration (%d).\n", rc); + return rc; + } + + memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); + + return 0; + } + + /* station table will be cleared */ + priv->assoc_station_added = 0; + +#ifdef CONFIG_IWLWIFI_SENSITIVITY + priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT; + if (!priv->error_recovering) + priv->start_calib = 0; + + iwl4965_init_sensitivity(priv, CMD_ASYNC, 1); +#endif /* CONFIG_IWLWIFI_SENSITIVITY */ + + /* If we are currently associated and the new config requires + * an RXON_ASSOC and the new config wants the associated mask enabled, + * we must clear the associated from the active configuration + * before we apply the new config */ + if (iwl_is_associated(priv) && + (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) { + IWL_DEBUG_INFO("Toggling associated bit on current RXON\n"); + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + + rc = iwl_send_cmd_pdu(priv, REPLY_RXON, + sizeof(struct iwl_rxon_cmd), + &priv->active_rxon); + + /* If the mask clearing failed then we set + * active_rxon back to what it was previously */ + if (rc) { + active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK; + IWL_ERROR("Error clearing ASSOC_MSK on current " + "configuration (%d).\n", rc); + return rc; + } + } + + IWL_DEBUG_INFO("Sending RXON\n" + "* with%s RXON_FILTER_ASSOC_MSK\n" + "* channel = %d\n" + "* bssid = %s\n", + ((priv->staging_rxon.filter_flags & + RXON_FILTER_ASSOC_MSK) ? "" : "out"), + le16_to_cpu(priv->staging_rxon.channel), + print_mac(mac, priv->staging_rxon.bssid_addr)); + + /* Apply the new configuration */ + rc = iwl_send_cmd_pdu(priv, REPLY_RXON, + sizeof(struct iwl_rxon_cmd), &priv->staging_rxon); + if (rc) { + IWL_ERROR("Error setting new configuration (%d).\n", rc); + return rc; + } + + iwl_clear_stations_table(priv); + +#ifdef CONFIG_IWLWIFI_SENSITIVITY + if (!priv->error_recovering) + priv->start_calib = 0; + + priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT; + iwl4965_init_sensitivity(priv, CMD_ASYNC, 1); +#endif /* CONFIG_IWLWIFI_SENSITIVITY */ + + memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); + + /* If we issue a new RXON command which required a tune then we must + * send a new TXPOWER command or we won't be able to Tx any frames */ + rc = iwl_hw_reg_send_txpower(priv); + if (rc) { + IWL_ERROR("Error setting Tx power (%d).\n", rc); + return rc; + } + + /* Add the broadcast address so we can send broadcast frames */ + if (iwl_rxon_add_station(priv, BROADCAST_ADDR, 0) == + IWL_INVALID_STATION) { + IWL_ERROR("Error adding BROADCAST address for transmit.\n"); + return -EIO; + } + + /* If we have set the ASSOC_MSK and we are in BSS mode then + * add the IWL_AP_ID to the station rate table */ + if (iwl_is_associated(priv) && + (priv->iw_mode == IEEE80211_IF_TYPE_STA)) { + if (iwl_rxon_add_station(priv, priv->active_rxon.bssid_addr, 1) + == IWL_INVALID_STATION) { + IWL_ERROR("Error adding AP address for transmit.\n"); + return -EIO; + } + priv->assoc_station_added = 1; + } + + return 0; +} + +static int iwl_send_bt_config(struct iwl_priv *priv) +{ + struct iwl_bt_cmd bt_cmd = { + .flags = 3, + .lead_time = 0xAA, + .max_kill = 1, + .kill_ack_mask = 0, + .kill_cts_mask = 0, + }; + + return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, + sizeof(struct iwl_bt_cmd), &bt_cmd); +} + +static int iwl_send_scan_abort(struct iwl_priv *priv) +{ + int rc = 0; + struct iwl_rx_packet *res; + struct iwl_host_cmd cmd = { + .id = REPLY_SCAN_ABORT_CMD, + .meta.flags = CMD_WANT_SKB, + }; + + /* If there isn't a scan actively going on in the hardware + * then we are in between scan bands and not actually + * actively scanning, so don't send the abort command */ + if (!test_bit(STATUS_SCAN_HW, &priv->status)) { + clear_bit(STATUS_SCAN_ABORTING, &priv->status); + return 0; + } + + rc = iwl_send_cmd_sync(priv, &cmd); + if (rc) { + clear_bit(STATUS_SCAN_ABORTING, &priv->status); + return rc; + } + + res = (struct iwl_rx_packet *)cmd.meta.u.skb->data; + if (res->u.status != CAN_ABORT_STATUS) { + /* The scan abort will return 1 for success or + * 2 for "failure". A failure condition can be + * due to simply not being in an active scan which + * can occur if we send the scan abort before we + * the microcode has notified us that a scan is + * completed. */ + IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status); + clear_bit(STATUS_SCAN_ABORTING, &priv->status); + clear_bit(STATUS_SCAN_HW, &priv->status); + } + + dev_kfree_skb_any(cmd.meta.u.skb); + + return rc; +} + +static int iwl_card_state_sync_callback(struct iwl_priv *priv, + struct iwl_cmd *cmd, + struct sk_buff *skb) +{ + return 1; +} + +/* + * CARD_STATE_CMD + * + * Use: Sets the internal card state to enable, disable, or halt + * + * When in the 'enable' state the card operates as normal. + * When in the 'disable' state, the card enters into a low power mode. + * When in the 'halt' state, the card is shut down and must be fully + * restarted to come back on. + */ +static int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag) +{ + struct iwl_host_cmd cmd = { + .id = REPLY_CARD_STATE_CMD, + .len = sizeof(u32), + .data = &flags, + .meta.flags = meta_flag, + }; + + if (meta_flag & CMD_ASYNC) + cmd.meta.u.callback = iwl_card_state_sync_callback; + + return iwl_send_cmd(priv, &cmd); +} + +static int iwl_add_sta_sync_callback(struct iwl_priv *priv, + struct iwl_cmd *cmd, struct sk_buff *skb) +{ + struct iwl_rx_packet *res = NULL; + + if (!skb) { + IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n"); + return 1; + } + + res = (struct iwl_rx_packet *)skb->data; + if (res->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n", + res->hdr.flags); + return 1; + } + + switch (res->u.add_sta.status) { + case ADD_STA_SUCCESS_MSK: + break; + default: + break; + } + + /* We didn't cache the SKB; let the caller free it */ + return 1; +} + +int iwl_send_add_station(struct iwl_priv *priv, + struct iwl_addsta_cmd *sta, u8 flags) +{ + struct iwl_rx_packet *res = NULL; + int rc = 0; + struct iwl_host_cmd cmd = { + .id = REPLY_ADD_STA, + .len = sizeof(struct iwl_addsta_cmd), + .meta.flags = flags, + .data = sta, + }; + + if (flags & CMD_ASYNC) + cmd.meta.u.callback = iwl_add_sta_sync_callback; + else + cmd.meta.flags |= CMD_WANT_SKB; + + rc = iwl_send_cmd(priv, &cmd); + + if (rc || (flags & CMD_ASYNC)) + return rc; + + res = (struct iwl_rx_packet *)cmd.meta.u.skb->data; + if (res->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n", + res->hdr.flags); + rc = -EIO; + } + + if (rc == 0) { + switch (res->u.add_sta.status) { + case ADD_STA_SUCCESS_MSK: + IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n"); + break; + default: + rc = -EIO; + IWL_WARNING("REPLY_ADD_STA failed\n"); + break; + } + } + + priv->alloc_rxb_skb--; + dev_kfree_skb_any(cmd.meta.u.skb); + + return rc; +} + +static int iwl_update_sta_key_info(struct iwl_priv *priv, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + unsigned long flags; + __le16 key_flags = 0; + + switch (keyconf->alg) { + case ALG_CCMP: + key_flags |= STA_KEY_FLG_CCMP; + key_flags |= cpu_to_le16( + keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + key_flags &= ~STA_KEY_FLG_INVALID; + break; + case ALG_TKIP: + case ALG_WEP: + return -EINVAL; + default: + return -EINVAL; + } + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].keyinfo.alg = keyconf->alg; + priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; + memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, + keyconf->keylen); + + memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, + keyconf->keylen); + priv->stations[sta_id].sta.key.key_flags = key_flags; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + spin_unlock_irqrestore(&priv->sta_lock, flags); + + IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n"); + iwl_send_add_station(priv, &priv->stations[sta_id].sta, 0); + return 0; +} + +static int iwl_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key)); + memset(&priv->stations[sta_id].sta.key, 0, sizeof(struct iwl_keyinfo)); + priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n"); + iwl_send_add_station(priv, &priv->stations[sta_id].sta, 0); + return 0; +} + +static void iwl_clear_free_frames(struct iwl_priv *priv) +{ + struct list_head *element; + + IWL_DEBUG_INFO("%d frames on pre-allocated heap on clear.\n", + priv->frames_count); + + while (!list_empty(&priv->free_frames)) { + element = priv->free_frames.next; + list_del(element); + kfree(list_entry(element, struct iwl_frame, list)); + priv->frames_count--; + } + + if (priv->frames_count) { + IWL_WARNING("%d frames still in use. Did we lose one?\n", + priv->frames_count); + priv->frames_count = 0; + } +} + +static struct iwl_frame *iwl_get_free_frame(struct iwl_priv *priv) +{ + struct iwl_frame *frame; + struct list_head *element; + if (list_empty(&priv->free_frames)) { + frame = kzalloc(sizeof(*frame), GFP_KERNEL); + if (!frame) { + IWL_ERROR("Could not allocate frame!\n"); + return NULL; + } + + priv->frames_count++; + return frame; + } + + element = priv->free_frames.next; + list_del(element); + return list_entry(element, struct iwl_frame, list); +} + +static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame) +{ + memset(frame, 0, sizeof(*frame)); + list_add(&frame->list, &priv->free_frames); +} + +unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + const u8 *dest, int left) +{ + + if (!iwl_is_associated(priv) || !priv->ibss_beacon || + ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) && + (priv->iw_mode != IEEE80211_IF_TYPE_AP))) + return 0; + + if (priv->ibss_beacon->len > left) + return 0; + + memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len); + + return priv->ibss_beacon->len; +} + +int iwl_rate_index_from_plcp(int plcp) +{ + int i = 0; + + if (plcp & RATE_MCS_HT_MSK) { + i = (plcp & 0xff); + + if (i >= IWL_RATE_MIMO_6M_PLCP) + i = i - IWL_RATE_MIMO_6M_PLCP; + + i += IWL_FIRST_OFDM_RATE; + /* skip 9M not supported in ht*/ + if (i >= IWL_RATE_9M_INDEX) + i += 1; + if ((i >= IWL_FIRST_OFDM_RATE) && + (i <= IWL_LAST_OFDM_RATE)) + return i; + } else { + for (i = 0; i < ARRAY_SIZE(iwl_rates); i++) + if (iwl_rates[i].plcp == (plcp &0xFF)) + return i; + } + return -1; +} + +static u8 iwl_rate_get_lowest_plcp(int rate_mask) +{ + u8 i; + + for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID; + i = iwl_rates[i].next_ieee) { + if (rate_mask & (1 << i)) + return iwl_rates[i].plcp; + } + + return IWL_RATE_INVALID; +} + +static int iwl_send_beacon_cmd(struct iwl_priv *priv) +{ + struct iwl_frame *frame; + unsigned int frame_size; + int rc; + u8 rate; + + frame = iwl_get_free_frame(priv); + + if (!frame) { + IWL_ERROR("Could not obtain free frame buffer for beacon " + "command.\n"); + return -ENOMEM; + } + + if (!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)) { + rate = iwl_rate_get_lowest_plcp(priv->active_rate_basic & + 0xFF0); + if (rate == IWL_INVALID_RATE) + rate = IWL_RATE_6M_PLCP; + } else { + rate = iwl_rate_get_lowest_plcp(priv->active_rate_basic & 0xF); + if (rate == IWL_INVALID_RATE) + rate = IWL_RATE_1M_PLCP; + } + + frame_size = iwl_hw_get_beacon_cmd(priv, frame, rate); + + rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, + &frame->u.cmd[0]); + + iwl_free_frame(priv, frame); + + return rc; +} + +/****************************************************************************** + * + * EEPROM related functions + * + ******************************************************************************/ + +static void get_eeprom_mac(struct iwl_priv *priv, u8 *mac) +{ + memcpy(mac, priv->eeprom.mac_address, 6); +} + +/** + * iwl_eeprom_init - read EEPROM contents + * + * Load the EEPROM from adapter into priv->eeprom + * + * NOTE: This routine uses the non-debug IO access functions. + */ +int iwl_eeprom_init(struct iwl_priv *priv) +{ + u16 *e = (u16 *)&priv->eeprom; + u32 gp = iwl_read32(priv, CSR_EEPROM_GP); + u32 r; + int sz = sizeof(priv->eeprom); + int rc; + int i; + u16 addr; + + /* The EEPROM structure has several padding buffers within it + * and when adding new EEPROM maps is subject to programmer errors + * which may be very difficult to identify without explicitly + * checking the resulting size of the eeprom map. */ + BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE); + + if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) { + IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp); + return -ENOENT; + } + + rc = iwl_eeprom_aqcuire_semaphore(priv); + if (rc < 0) { + IWL_ERROR("Failed to aqcuire EEPROM semaphore.\n"); + return -ENOENT; + } + + /* eeprom is an array of 16bit values */ + for (addr = 0; addr < sz; addr += sizeof(u16)) { + _iwl_write32(priv, CSR_EEPROM_REG, addr << 1); + _iwl_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD); + + for (i = 0; i < IWL_EEPROM_ACCESS_TIMEOUT; + i += IWL_EEPROM_ACCESS_DELAY) { + r = _iwl_read_restricted(priv, CSR_EEPROM_REG); + if (r & CSR_EEPROM_REG_READ_VALID_MSK) + break; + udelay(IWL_EEPROM_ACCESS_DELAY); + } + + if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) { + IWL_ERROR("Time out reading EEPROM[%d]", addr); + rc = -ETIMEDOUT; + goto done; + } + e[addr / 2] = le16_to_cpu(r >> 16); + } + rc = 0; + +done: + iwl_eeprom_release_semaphore(priv); + return rc; +} + +/****************************************************************************** + * + * Misc. internal state and helper functions + * + ******************************************************************************/ +#ifdef CONFIG_IWLWIFI_DEBUG + +/** + * iwl_report_frame - dump frame to syslog during debug sessions + * + * hack this function to show different aspects of received frames, + * including selective frame dumps. + * group100 parameter selects whether to show 1 out of 100 good frames. + * + * TODO: ieee80211_hdr stuff is common to 3945 and 4965, so frame type + * info output is okay, but some of this stuff (e.g. iwl_rx_frame_stats) + * is 3945-specific and gives bad output for 4965. Need to split the + * functionality, keep common stuff here. + */ +void iwl_report_frame(struct iwl_priv *priv, + struct iwl_rx_packet *pkt, + struct ieee80211_hdr *header, int group100) +{ + u32 to_us; + u32 print_summary = 0; + u32 print_dump = 0; /* set to 1 to dump all frames' contents */ + u32 hundred = 0; + u32 dataframe = 0; + u16 fc; + u16 seq_ctl; + u16 channel; + u16 phy_flags; + int rate_sym; + u16 length; + u16 status; + u16 bcn_tmr; + u32 tsf_low; + u64 tsf; + u8 rssi; + u8 agc; + u16 sig_avg; + u16 noise_diff; + struct iwl_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt); + struct iwl_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); + struct iwl_rx_frame_end *rx_end = IWL_RX_END(pkt); + u8 *data = IWL_RX_DATA(pkt); + + /* MAC header */ + fc = le16_to_cpu(header->frame_control); + seq_ctl = le16_to_cpu(header->seq_ctrl); + + /* metadata */ + channel = le16_to_cpu(rx_hdr->channel); + phy_flags = le16_to_cpu(rx_hdr->phy_flags); + rate_sym = rx_hdr->rate; + length = le16_to_cpu(rx_hdr->len); + + /* end-of-frame status and timestamp */ + status = le32_to_cpu(rx_end->status); + bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp); + tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff; + tsf = le64_to_cpu(rx_end->timestamp); + + /* signal statistics */ + rssi = rx_stats->rssi; + agc = rx_stats->agc; + sig_avg = le16_to_cpu(rx_stats->sig_avg); + noise_diff = le16_to_cpu(rx_stats->noise_diff); + + to_us = !compare_ether_addr(header->addr1, priv->mac_addr); + + /* if data frame is to us and all is good, + * (optionally) print summary for only 1 out of every 100 */ + if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) == + (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) { + dataframe = 1; + if (!group100) + print_summary = 1; /* print each frame */ + else if (priv->framecnt_to_us < 100) { + priv->framecnt_to_us++; + print_summary = 0; + } else { + priv->framecnt_to_us = 0; + print_summary = 1; + hundred = 1; + } + } else { + /* print summary for all other frames */ + print_summary = 1; + } + + if (print_summary) { + char *title; + u32 rate; + + if (hundred) + title = "100Frames"; + else if (fc & IEEE80211_FCTL_RETRY) + title = "Retry"; + else if (ieee80211_is_assoc_response(fc)) + title = "AscRsp"; + else if (ieee80211_is_reassoc_response(fc)) + title = "RasRsp"; + else if (ieee80211_is_probe_response(fc)) { + title = "PrbRsp"; + print_dump = 1; /* dump frame contents */ + } else if (ieee80211_is_beacon(fc)) { + title = "Beacon"; + print_dump = 1; /* dump frame contents */ + } else if (ieee80211_is_atim(fc)) + title = "ATIM"; + else if (ieee80211_is_auth(fc)) + title = "Auth"; + else if (ieee80211_is_deauth(fc)) + title = "DeAuth"; + else if (ieee80211_is_disassoc(fc)) + title = "DisAssoc"; + else + title = "Frame"; + + rate = iwl_rate_index_from_plcp(rate_sym); + if (rate == -1) + rate = 0; + else + rate = iwl_rates[rate].ieee / 2; + + /* print frame summary. + * MAC addresses show just the last byte (for brevity), + * but you can hack it to show more, if you'd like to. */ + if (dataframe) + IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, " + "len=%u, rssi=%d, chnl=%d, rate=%u, \n", + title, fc, header->addr1[5], + length, rssi, channel, rate); + else { + /* src/dst addresses assume managed mode */ + IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, " + "src=0x%02x, rssi=%u, tim=%lu usec, " + "phy=0x%02x, chnl=%d\n", + title, fc, header->addr1[5], + header->addr3[5], rssi, + tsf_low - priv->scan_start_tsf, + phy_flags, channel); + } + } + if (print_dump) + iwl_print_hex_dump(IWL_DL_RX, data, length); +} +#endif + +static void iwl_unset_hw_setting(struct iwl_priv *priv) +{ + if (priv->hw_setting.shared_virt) + pci_free_consistent(priv->pci_dev, + sizeof(struct iwl_shared), + priv->hw_setting.shared_virt, + priv->hw_setting.shared_phys); +} + +/** + * iwl_supported_rate_to_ie - fill in the supported rate in IE field + * + * return : set the bit for each supported rate insert in ie + */ +static u16 iwl_supported_rate_to_ie(u8 *ie, u16 supported_rate, + u16 basic_rate, int max_count) +{ + u16 ret_rates = 0, bit; + int i; + u8 *rates; + + rates = &(ie[1]); + + for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) { + if (bit & supported_rate) { + ret_rates |= bit; + rates[*ie] = iwl_rates[i].ieee | + ((bit & basic_rate) ? 0x80 : 0x00); + *ie = *ie + 1; + if (*ie >= max_count) + break; + } + } + + return ret_rates; +} + +#ifdef CONFIG_IWLWIFI_HT +void static iwl_set_ht_capab(struct ieee80211_hw *hw, + struct ieee80211_ht_capability *ht_cap, + u8 use_wide_chan); +#endif + +/** + * iwl_fill_probe_req - fill in all required fields and IE for probe request + */ +static u16 iwl_fill_probe_req(struct iwl_priv *priv, + struct ieee80211_mgmt *frame, + int left, int is_direct) +{ + int len = 0; + u8 *pos = NULL; + u16 ret_rates; + + /* Make sure there is enough space for the probe request, + * two mandatory IEs and the data */ + left -= 24; + if (left < 0) + return 0; + len += 24; + + frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); + memcpy(frame->da, BROADCAST_ADDR, ETH_ALEN); + memcpy(frame->sa, priv->mac_addr, ETH_ALEN); + memcpy(frame->bssid, BROADCAST_ADDR, ETH_ALEN); + frame->seq_ctrl = 0; + + /* fill in our indirect SSID IE */ + /* ...next IE... */ + + left -= 2; + if (left < 0) + return 0; + len += 2; + pos = &(frame->u.probe_req.variable[0]); + *pos++ = WLAN_EID_SSID; + *pos++ = 0; + + /* fill in our direct SSID IE... */ + if (is_direct) { + /* ...next IE... */ + left -= 2 + priv->essid_len; + if (left < 0) + return 0; + /* ... fill it in... */ + *pos++ = WLAN_EID_SSID; + *pos++ = priv->essid_len; + memcpy(pos, priv->essid, priv->essid_len); + pos += priv->essid_len; + len += 2 + priv->essid_len; + } + + /* fill in supported rate */ + /* ...next IE... */ + left -= 2; + if (left < 0) + return 0; + /* ... fill it in... */ + *pos++ = WLAN_EID_SUPP_RATES; + *pos = 0; + ret_rates = priv->active_rate = priv->rates_mask; + priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; + + iwl_supported_rate_to_ie(pos, priv->active_rate, + priv->active_rate_basic, left); + len += 2 + *pos; + pos += (*pos) + 1; + ret_rates = ~ret_rates & priv->active_rate; + + if (ret_rates == 0) + goto fill_end; + + /* fill in supported extended rate */ + /* ...next IE... */ + left -= 2; + if (left < 0) + return 0; + /* ... fill it in... */ + *pos++ = WLAN_EID_EXT_SUPP_RATES; + *pos = 0; + iwl_supported_rate_to_ie(pos, ret_rates, priv->active_rate_basic, left); + if (*pos > 0) + len += 2 + *pos; + +#ifdef CONFIG_IWLWIFI_HT + if (is_direct && priv->is_ht_enabled) { + u8 use_wide_chan = 1; + + if (priv->channel_width != IWL_CHANNEL_WIDTH_40MHZ) + use_wide_chan = 0; + pos += (*pos) + 1; + *pos++ = WLAN_EID_HT_CAPABILITY; + *pos++ = sizeof(struct ieee80211_ht_capability); + iwl_set_ht_capab(NULL, (struct ieee80211_ht_capability *)pos, + use_wide_chan); + len += 2 + sizeof(struct ieee80211_ht_capability); + } +#endif /*CONFIG_IWLWIFI_HT */ + + fill_end: + return (u16)len; +} + +/* + * QoS support +*/ +#ifdef CONFIG_IWLWIFI_QOS +static int iwl_send_qos_params_command(struct iwl_priv *priv, + struct iwl_qosparam_cmd *qos) +{ + + return iwl_send_cmd_pdu(priv, REPLY_QOS_PARAM, + sizeof(struct iwl_qosparam_cmd), qos); +} + +static void iwl_reset_qos(struct iwl_priv *priv) +{ + u16 cw_min = 15; + u16 cw_max = 1023; + u8 aifs = 2; + u8 is_legacy = 0; + unsigned long flags; + int i; + + spin_lock_irqsave(&priv->lock, flags); + priv->qos_data.qos_active = 0; + + if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) { + if (priv->qos_data.qos_enable) + priv->qos_data.qos_active = 1; + if (!(priv->active_rate & 0xfff0)) { + cw_min = 31; + is_legacy = 1; + } + } else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { + if (priv->qos_data.qos_enable) + priv->qos_data.qos_active = 1; + } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) { + cw_min = 31; + is_legacy = 1; + } + + if (priv->qos_data.qos_active) + aifs = 3; + + priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min); + priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max); + priv->qos_data.def_qos_parm.ac[0].aifsn = aifs; + priv->qos_data.def_qos_parm.ac[0].edca_txop = 0; + priv->qos_data.def_qos_parm.ac[0].reserved1 = 0; + + if (priv->qos_data.qos_active) { + i = 1; + priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min); + priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max); + priv->qos_data.def_qos_parm.ac[i].aifsn = 7; + priv->qos_data.def_qos_parm.ac[i].edca_txop = 0; + priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; + + i = 2; + priv->qos_data.def_qos_parm.ac[i].cw_min = + cpu_to_le16((cw_min + 1) / 2 - 1); + priv->qos_data.def_qos_parm.ac[i].cw_max = + cpu_to_le16(cw_max); + priv->qos_data.def_qos_parm.ac[i].aifsn = 2; + if (is_legacy) + priv->qos_data.def_qos_parm.ac[i].edca_txop = + cpu_to_le16(6016); + else + priv->qos_data.def_qos_parm.ac[i].edca_txop = + cpu_to_le16(3008); + priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; + + i = 3; + priv->qos_data.def_qos_parm.ac[i].cw_min = + cpu_to_le16((cw_min + 1) / 4 - 1); + priv->qos_data.def_qos_parm.ac[i].cw_max = + cpu_to_le16((cw_max + 1) / 2 - 1); + priv->qos_data.def_qos_parm.ac[i].aifsn = 2; + priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; + if (is_legacy) + priv->qos_data.def_qos_parm.ac[i].edca_txop = + cpu_to_le16(3264); + else + priv->qos_data.def_qos_parm.ac[i].edca_txop = + cpu_to_le16(1504); + } else { + for (i = 1; i < 4; i++) { + priv->qos_data.def_qos_parm.ac[i].cw_min = + cpu_to_le16(cw_min); + priv->qos_data.def_qos_parm.ac[i].cw_max = + cpu_to_le16(cw_max); + priv->qos_data.def_qos_parm.ac[i].aifsn = aifs; + priv->qos_data.def_qos_parm.ac[i].edca_txop = 0; + priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; + } + } + IWL_DEBUG_QOS("set QoS to default \n"); + + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void iwl_activate_qos(struct iwl_priv *priv, u8 force) +{ + unsigned long flags; + + if (priv == NULL) + return; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (!priv->qos_data.qos_enable) + return; + + spin_lock_irqsave(&priv->lock, flags); + priv->qos_data.def_qos_parm.qos_flags = 0; + + if (priv->qos_data.qos_cap.q_AP.queue_request && + !priv->qos_data.qos_cap.q_AP.txop_request) + priv->qos_data.def_qos_parm.qos_flags |= + QOS_PARAM_FLG_TXOP_TYPE_MSK; + + if (priv->qos_data.qos_active) + priv->qos_data.def_qos_parm.qos_flags |= + QOS_PARAM_FLG_UPDATE_EDCA_MSK; + + spin_unlock_irqrestore(&priv->lock, flags); + + if (force || iwl_is_associated(priv)) { + IWL_DEBUG_QOS("send QoS cmd with Qos active %d \n", + priv->qos_data.qos_active); + + iwl_send_qos_params_command(priv, + &(priv->qos_data.def_qos_parm)); + } +} + +#endif /* CONFIG_IWLWIFI_QOS */ +/* + * Power management (not Tx power!) functions + */ +#define MSEC_TO_USEC 1024 + +#define NOSLP __constant_cpu_to_le16(0), 0, 0 +#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0 +#define SLP_TIMEOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC) +#define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \ + __constant_cpu_to_le32(X1), \ + __constant_cpu_to_le32(X2), \ + __constant_cpu_to_le32(X3), \ + __constant_cpu_to_le32(X4)} + + +/* default power management (not Tx power) table values */ +/* for tim 0-10 */ +static struct iwl_power_vec_entry range_0[IWL_POWER_AC] = { + {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0}, + {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0}, + {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0}, + {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0}, + {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1}, + {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1} +}; + +/* for tim > 10 */ +static struct iwl_power_vec_entry range_1[IWL_POWER_AC] = { + {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0}, + {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), + SLP_VEC(1, 2, 3, 4, 0xFF)}, 0}, + {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), + SLP_VEC(2, 4, 6, 7, 0xFF)}, 0}, + {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), + SLP_VEC(2, 6, 9, 9, 0xFF)}, 0}, + {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0}, + {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), + SLP_VEC(4, 7, 10, 10, 0xFF)}, 0} +}; + +int iwl_power_init_handle(struct iwl_priv *priv) +{ + int rc = 0, i; + struct iwl_power_mgr *pow_data; + int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_AC; + u16 pci_pm; + + IWL_DEBUG_POWER("Initialize power \n"); + + pow_data = &(priv->power_data); + + memset(pow_data, 0, sizeof(*pow_data)); + + pow_data->active_index = IWL_POWER_RANGE_0; + pow_data->dtim_val = 0xffff; + + memcpy(&pow_data->pwr_range_0[0], &range_0[0], size); + memcpy(&pow_data->pwr_range_1[0], &range_1[0], size); + + rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm); + if (rc != 0) + return 0; + else { + struct iwl_powertable_cmd *cmd; + + IWL_DEBUG_POWER("adjust power command flags\n"); + + for (i = 0; i < IWL_POWER_AC; i++) { + cmd = &pow_data->pwr_range_0[i].cmd; + + if (pci_pm & 0x1) + cmd->flags &= ~IWL_POWER_PCI_PM_MSK; + else + cmd->flags |= IWL_POWER_PCI_PM_MSK; + } + } + return rc; +} + +static int iwl_update_power_cmd(struct iwl_priv *priv, + struct iwl_powertable_cmd *cmd, u32 mode) +{ + int rc = 0, i; + u8 skip; + u32 max_sleep = 0; + struct iwl_power_vec_entry *range; + u8 period = 0; + struct iwl_power_mgr *pow_data; + + if (mode > IWL_POWER_INDEX_5) { + IWL_DEBUG_POWER("Error invalid power mode \n"); + return -1; + } + pow_data = &(priv->power_data); + + if (pow_data->active_index == IWL_POWER_RANGE_0) + range = &pow_data->pwr_range_0[0]; + else + range = &pow_data->pwr_range_1[1]; + + memcpy(cmd, &range[mode].cmd, sizeof(struct iwl_powertable_cmd)); + +#ifdef IWL_MAC80211_DISABLE + if (priv->assoc_network != NULL) { + unsigned long flags; + + period = priv->assoc_network->tim.tim_period; + } +#endif /*IWL_MAC80211_DISABLE */ + skip = range[mode].no_dtim; + + if (period == 0) { + period = 1; + skip = 0; + } + + if (skip == 0) { + max_sleep = period; + cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK; + } else { + __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]; + max_sleep = (le32_to_cpu(slp_itrvl) / period) * period; + cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK; + } + + for (i = 0; i < IWL_POWER_VEC_SIZE; i++) { + if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep) + cmd->sleep_interval[i] = cpu_to_le32(max_sleep); + } + + IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags); + IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout)); + IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout)); + IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n", + le32_to_cpu(cmd->sleep_interval[0]), + le32_to_cpu(cmd->sleep_interval[1]), + le32_to_cpu(cmd->sleep_interval[2]), + le32_to_cpu(cmd->sleep_interval[3]), + le32_to_cpu(cmd->sleep_interval[4])); + + return rc; +} + +static int iwl_send_power_mode(struct iwl_priv *priv, u32 mode) +{ + u32 final_mode = mode; + int rc; + struct iwl_powertable_cmd cmd; + + /* If on battery, set to 3, + * if plugged into AC power, set to CAM ("continuosly aware mode"), + * else user level */ + switch (mode) { + case IWL_POWER_BATTERY: + final_mode = IWL_POWER_INDEX_3; + break; + case IWL_POWER_AC: + final_mode = IWL_POWER_MODE_CAM; + break; + default: + final_mode = mode; + break; + } + + cmd.keep_alive_beacons = 0; + + iwl_update_power_cmd(priv, &cmd, final_mode); + + rc = iwl_send_cmd_pdu(priv, POWER_TABLE_CMD, sizeof(cmd), &cmd); + + if (final_mode == IWL_POWER_MODE_CAM) + clear_bit(STATUS_POWER_PMI, &priv->status); + else + set_bit(STATUS_POWER_PMI, &priv->status); + + return rc; +} + +int iwl_is_network_packet(struct iwl_priv *priv, struct ieee80211_hdr *header) +{ + /* Filter incoming packets to determine if they are targeted toward + * this network, discarding packets coming from ourselves */ + switch (priv->iw_mode) { + case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source | BSSID */ + /* packets from our adapter are dropped (echo) */ + if (!compare_ether_addr(header->addr2, priv->mac_addr)) + return 0; + /* {broad,multi}cast packets to our IBSS go through */ + if (is_multicast_ether_addr(header->addr1)) + return !compare_ether_addr(header->addr3, priv->bssid); + /* packets to our adapter go through */ + return !compare_ether_addr(header->addr1, priv->mac_addr); + case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */ + /* packets from our adapter are dropped (echo) */ + if (!compare_ether_addr(header->addr3, priv->mac_addr)) + return 0; + /* {broad,multi}cast packets to our BSS go through */ + if (is_multicast_ether_addr(header->addr1)) + return !compare_ether_addr(header->addr2, priv->bssid); + /* packets to our adapter go through */ + return !compare_ether_addr(header->addr1, priv->mac_addr); + } + + return 1; +} + +#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x + +const char *iwl_get_tx_fail_reason(u32 status) +{ + switch (status & TX_STATUS_MSK) { + case TX_STATUS_SUCCESS: + return "SUCCESS"; + TX_STATUS_ENTRY(SHORT_LIMIT); + TX_STATUS_ENTRY(LONG_LIMIT); + TX_STATUS_ENTRY(FIFO_UNDERRUN); + TX_STATUS_ENTRY(MGMNT_ABORT); + TX_STATUS_ENTRY(NEXT_FRAG); + TX_STATUS_ENTRY(LIFE_EXPIRE); + TX_STATUS_ENTRY(DEST_PS); + TX_STATUS_ENTRY(ABORTED); + TX_STATUS_ENTRY(BT_RETRY); + TX_STATUS_ENTRY(STA_INVALID); + TX_STATUS_ENTRY(FRAG_DROPPED); + TX_STATUS_ENTRY(TID_DISABLE); + TX_STATUS_ENTRY(FRAME_FLUSHED); + TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL); + TX_STATUS_ENTRY(TX_LOCKED); + TX_STATUS_ENTRY(NO_BEACON_ON_RADAR); + } + + return "UNKNOWN"; +} + +/** + * iwl_scan_cancel - Cancel any currently executing HW scan + * + * NOTE: priv->mutex is not required before calling this function + */ +static int iwl_scan_cancel(struct iwl_priv *priv) +{ + if (!test_bit(STATUS_SCAN_HW, &priv->status)) { + clear_bit(STATUS_SCANNING, &priv->status); + return 0; + } + + if (test_bit(STATUS_SCANNING, &priv->status)) { + if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_SCAN("Queuing scan abort.\n"); + set_bit(STATUS_SCAN_ABORTING, &priv->status); + queue_work(priv->workqueue, &priv->abort_scan); + + } else + IWL_DEBUG_SCAN("Scan abort already in progress.\n"); + + return test_bit(STATUS_SCANNING, &priv->status); + } + + return 0; +} + +/** + * iwl_scan_cancel_timeout - Cancel any currently executing HW scan + * @ms: amount of time to wait (in milliseconds) for scan to abort + * + * NOTE: priv->mutex must be held before calling this function + */ +static int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms) +{ + unsigned long now = jiffies; + int ret; + + ret = iwl_scan_cancel(priv); + if (ret && ms) { + mutex_unlock(&priv->mutex); + while (!time_after(jiffies, now + msecs_to_jiffies(ms)) && + test_bit(STATUS_SCANNING, &priv->status)) + msleep(1); + mutex_lock(&priv->mutex); + + return test_bit(STATUS_SCANNING, &priv->status); + } + + return ret; +} + +static void iwl_sequence_reset(struct iwl_priv *priv) +{ + /* Reset ieee stats */ + + /* We don't reset the net_device_stats (ieee->stats) on + * re-association */ + + priv->last_seq_num = -1; + priv->last_frag_num = -1; + priv->last_packet_time = 0; + + iwl_scan_cancel(priv); +} + +#define MAX_UCODE_BEACON_INTERVAL 4096 +#define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA) + +static __le16 iwl_adjust_beacon_interval(u16 beacon_val) +{ + u16 new_val = 0; + u16 beacon_factor = 0; + + beacon_factor = + (beacon_val + MAX_UCODE_BEACON_INTERVAL) + / MAX_UCODE_BEACON_INTERVAL; + new_val = beacon_val / beacon_factor; + + return cpu_to_le16(new_val); +} + +static void iwl_setup_rxon_timing(struct iwl_priv *priv) +{ + u64 interval_tm_unit; + u64 tsf, result; + unsigned long flags; + struct ieee80211_conf *conf = NULL; + u16 beacon_int = 0; + + conf = ieee80211_get_hw_conf(priv->hw); + + spin_lock_irqsave(&priv->lock, flags); + priv->rxon_timing.timestamp.dw[1] = cpu_to_le32(priv->timestamp1); + priv->rxon_timing.timestamp.dw[0] = cpu_to_le32(priv->timestamp0); + + priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL; + + tsf = priv->timestamp1; + tsf = ((tsf << 32) | priv->timestamp0); + + beacon_int = priv->beacon_int; + spin_unlock_irqrestore(&priv->lock, flags); + + if (priv->iw_mode == IEEE80211_IF_TYPE_STA) { + if (beacon_int == 0) { + priv->rxon_timing.beacon_interval = cpu_to_le16(100); + priv->rxon_timing.beacon_init_val = cpu_to_le32(102400); + } else { + priv->rxon_timing.beacon_interval = + cpu_to_le16(beacon_int); + priv->rxon_timing.beacon_interval = + iwl_adjust_beacon_interval( + le16_to_cpu(priv->rxon_timing.beacon_interval)); + } + + priv->rxon_timing.atim_window = 0; + } else { + priv->rxon_timing.beacon_interval = + iwl_adjust_beacon_interval(conf->beacon_int); + /* TODO: we need to get atim_window from upper stack + * for now we set to 0 */ + priv->rxon_timing.atim_window = 0; + } + + interval_tm_unit = + (le16_to_cpu(priv->rxon_timing.beacon_interval) * 1024); + result = do_div(tsf, interval_tm_unit); + priv->rxon_timing.beacon_init_val = + cpu_to_le32((u32) ((u64) interval_tm_unit - result)); + + IWL_DEBUG_ASSOC + ("beacon interval %d beacon timer %d beacon tim %d\n", + le16_to_cpu(priv->rxon_timing.beacon_interval), + le32_to_cpu(priv->rxon_timing.beacon_init_val), + le16_to_cpu(priv->rxon_timing.atim_window)); +} + +static int iwl_scan_initiate(struct iwl_priv *priv) +{ + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { + IWL_ERROR("APs don't scan.\n"); + return 0; + } + + if (!iwl_is_ready_rf(priv)) { + IWL_DEBUG_SCAN("Aborting scan due to not ready.\n"); + return -EIO; + } + + if (test_bit(STATUS_SCANNING, &priv->status)) { + IWL_DEBUG_SCAN("Scan already in progress.\n"); + return -EAGAIN; + } + + if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_SCAN("Scan request while abort pending. " + "Queuing.\n"); + return -EAGAIN; + } + + IWL_DEBUG_INFO("Starting scan...\n"); + priv->scan_bands = 2; + set_bit(STATUS_SCANNING, &priv->status); + priv->scan_start = jiffies; + priv->scan_pass_start = priv->scan_start; + + queue_work(priv->workqueue, &priv->request_scan); + + return 0; +} + +static int iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt) +{ + struct iwl_rxon_cmd *rxon = &priv->staging_rxon; + + if (hw_decrypt) + rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; + else + rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; + + return 0; +} + +static void iwl_set_flags_for_phymode(struct iwl_priv *priv, u8 phymode) +{ + if (phymode == MODE_IEEE80211A) { + priv->staging_rxon.flags &= + ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK + | RXON_FLG_CCK_MSK); + priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; + } else { + /* Copied from iwl_bg_post_associate() */ + if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) + priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; + else + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; + priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK; + priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK; + } +} + +/* + * initilize rxon structure with default values fromm eeprom + */ +static void iwl_connection_init_rx_config(struct iwl_priv *priv) +{ + const struct iwl_channel_info *ch_info; + + memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon)); + + switch (priv->iw_mode) { + case IEEE80211_IF_TYPE_AP: + priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP; + break; + + case IEEE80211_IF_TYPE_STA: + priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS; + priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; + break; + + case IEEE80211_IF_TYPE_IBSS: + priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS; + priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK; + priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK | + RXON_FILTER_ACCEPT_GRP_MSK; + break; + + case IEEE80211_IF_TYPE_MNTR: + priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER; + priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK | + RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK; + break; + } + +#if 0 + /* TODO: Figure out when short_preamble would be set and cache from + * that */ + if (!hw_to_local(priv->hw)->short_preamble) + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + else + priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; +#endif + + ch_info = iwl_get_channel_info(priv, priv->phymode, + le16_to_cpu(priv->staging_rxon.channel)); + + if (!ch_info) + ch_info = &priv->channel_info[0]; + + /* + * in some case A channels are all non IBSS + * in this case force B/G channel + */ + if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && + !(is_channel_ibss(ch_info))) + ch_info = &priv->channel_info[0]; + + priv->staging_rxon.channel = cpu_to_le16(ch_info->channel); + if (is_channel_a_band(ch_info)) + priv->phymode = MODE_IEEE80211A; + else + priv->phymode = MODE_IEEE80211G; + + iwl_set_flags_for_phymode(priv, priv->phymode); + + priv->staging_rxon.ofdm_basic_rates = + (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; + priv->staging_rxon.cck_basic_rates = + (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; + + priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK | + RXON_FLG_CHANNEL_MODE_PURE_40_MSK); + memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); + memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN); + priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff; + priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff; + iwl4965_set_rxon_chain(priv); +} + +static int iwl_set_mode(struct iwl_priv *priv, int mode) +{ + if (!iwl_is_ready_rf(priv)) + return -EAGAIN; + + if (mode == IEEE80211_IF_TYPE_IBSS) { + const struct iwl_channel_info *ch_info; + + ch_info = iwl_get_channel_info(priv, + priv->phymode, + le16_to_cpu(priv->staging_rxon.channel)); + + if (!ch_info || !is_channel_ibss(ch_info)) { + IWL_ERROR("channel %d not IBSS channel\n", + le16_to_cpu(priv->staging_rxon.channel)); + return -EINVAL; + } + } + + cancel_delayed_work(&priv->scan_check); + if (iwl_scan_cancel_timeout(priv, 100)) { + IWL_WARNING("Aborted scan still in progress after 100ms\n"); + IWL_DEBUG_MAC80211("leaving - scan abort failed.\n"); + return -EAGAIN; + } + + priv->iw_mode = mode; + + iwl_connection_init_rx_config(priv); + memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); + + iwl_clear_stations_table(priv); + + iwl_commit_rxon(priv); + + return 0; +} + +static void iwl_build_tx_cmd_hwcrypto(struct iwl_priv *priv, + struct ieee80211_tx_control *ctl, + struct iwl_cmd *cmd, + struct sk_buff *skb_frag, + int last_frag) +{ + struct iwl_hw_key *keyinfo = &priv->stations[ctl->key_idx].keyinfo; + + switch (keyinfo->alg) { + case ALG_CCMP: + cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM; + memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen); + IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n"); + break; + + case ALG_TKIP: +#if 0 + cmd->cmd.tx.sec_ctl = TX_CMD_SEC_TKIP; + + if (last_frag) + memcpy(cmd->cmd.tx.tkip_mic.byte, skb_frag->tail - 8, + 8); + else + memset(cmd->cmd.tx.tkip_mic.byte, 0, 8); +#endif + break; + + case ALG_WEP: + cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP | + (ctl->key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT; + + if (keyinfo->keylen == 13) + cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128; + + memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen); + + IWL_DEBUG_TX("Configuring packet for WEP encryption " + "with key %d\n", ctl->key_idx); + break; + + default: + printk(KERN_ERR "Unknown encode alg %d\n", keyinfo->alg); + break; + } +} + +/* + * handle build REPLY_TX command notification. + */ +static void iwl_build_tx_cmd_basic(struct iwl_priv *priv, + struct iwl_cmd *cmd, + struct ieee80211_tx_control *ctrl, + struct ieee80211_hdr *hdr, + int is_unicast, u8 std_id) +{ + __le16 *qc; + u16 fc = le16_to_cpu(hdr->frame_control); + __le32 tx_flags = cmd->cmd.tx.tx_flags; + + cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + if (!(ctrl->flags & IEEE80211_TXCTL_NO_ACK)) { + tx_flags |= TX_CMD_FLG_ACK_MSK; + if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + if (ieee80211_is_probe_response(fc) && + !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) + tx_flags |= TX_CMD_FLG_TSF_MSK; + } else { + tx_flags &= (~TX_CMD_FLG_ACK_MSK); + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + } + + cmd->cmd.tx.sta_id = std_id; + if (ieee80211_get_morefrag(hdr)) + tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; + + qc = ieee80211_get_qos_ctrl(hdr); + if (qc) { + cmd->cmd.tx.tid_tspec = (u8) (le16_to_cpu(*qc) & 0xf); + tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; + } else + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + + if (ctrl->flags & IEEE80211_TXCTL_USE_RTS_CTS) { + tx_flags |= TX_CMD_FLG_RTS_MSK; + tx_flags &= ~TX_CMD_FLG_CTS_MSK; + } else if (ctrl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) { + tx_flags &= ~TX_CMD_FLG_RTS_MSK; + tx_flags |= TX_CMD_FLG_CTS_MSK; + } + + if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) + tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; + + tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); + if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) { + if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ || + (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ) + cmd->cmd.tx.timeout.pm_frame_timeout = + cpu_to_le16(3); + else + cmd->cmd.tx.timeout.pm_frame_timeout = + cpu_to_le16(2); + } else + cmd->cmd.tx.timeout.pm_frame_timeout = 0; + + cmd->cmd.tx.driver_txop = 0; + cmd->cmd.tx.tx_flags = tx_flags; + cmd->cmd.tx.next_frame_len = 0; +} + +static int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr) +{ + int sta_id; + u16 fc = le16_to_cpu(hdr->frame_control); + DECLARE_MAC_BUF(mac); + + /* If this frame is broadcast or not data then use the broadcast + * station id */ + if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) || + is_multicast_ether_addr(hdr->addr1)) + return priv->hw_setting.bcast_sta_id; + + switch (priv->iw_mode) { + + /* If this frame is part of a BSS network (we're a station), then + * we use the AP's station id */ + case IEEE80211_IF_TYPE_STA: + return IWL_AP_ID; + + /* If we are an AP, then find the station, or use BCAST */ + case IEEE80211_IF_TYPE_AP: + sta_id = iwl_hw_find_station(priv, hdr->addr1); + if (sta_id != IWL_INVALID_STATION) + return sta_id; + return priv->hw_setting.bcast_sta_id; + + /* If this frame is part of a IBSS network, then we use the + * target specific station id */ + case IEEE80211_IF_TYPE_IBSS: + sta_id = iwl_hw_find_station(priv, hdr->addr1); + if (sta_id != IWL_INVALID_STATION) + return sta_id; + + sta_id = iwl_add_station(priv, hdr->addr1, 0, CMD_ASYNC); + + if (sta_id != IWL_INVALID_STATION) + return sta_id; + + IWL_DEBUG_DROP("Station %s not in station map. " + "Defaulting to broadcast...\n", + print_mac(mac, hdr->addr1)); + iwl_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr)); + return priv->hw_setting.bcast_sta_id; + + default: + IWL_WARNING("Unkown mode of operation: %d", priv->iw_mode); + return priv->hw_setting.bcast_sta_id; + } +} + +/* + * start REPLY_TX command process + */ +static int iwl_tx_skb(struct iwl_priv *priv, + struct sk_buff *skb, struct ieee80211_tx_control *ctl) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct iwl_tfd_frame *tfd; + u32 *control_flags; + int txq_id = ctl->queue; + struct iwl_tx_queue *txq = NULL; + struct iwl_queue *q = NULL; + dma_addr_t phys_addr; + dma_addr_t txcmd_phys; + struct iwl_cmd *out_cmd = NULL; + u16 len, idx, len_org; + u8 id, hdr_len, unicast; + u8 sta_id; + u16 seq_number = 0; + u16 fc; + __le16 *qc; + u8 wait_write_ptr = 0; + unsigned long flags; + int rc; + + spin_lock_irqsave(&priv->lock, flags); + if (iwl_is_rfkill(priv)) { + IWL_DEBUG_DROP("Dropping - RF KILL\n"); + goto drop_unlock; + } + + if (!priv->interface_id) { + IWL_DEBUG_DROP("Dropping - !priv->interface_id\n"); + goto drop_unlock; + } + + if ((ctl->tx_rate & 0xFF) == IWL_INVALID_RATE) { + IWL_ERROR("ERROR: No TX rate available.\n"); + goto drop_unlock; + } + + unicast = !is_multicast_ether_addr(hdr->addr1); + id = 0; + + fc = le16_to_cpu(hdr->frame_control); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (ieee80211_is_auth(fc)) + IWL_DEBUG_TX("Sending AUTH frame\n"); + else if (ieee80211_is_assoc_request(fc)) + IWL_DEBUG_TX("Sending ASSOC frame\n"); + else if (ieee80211_is_reassoc_request(fc)) + IWL_DEBUG_TX("Sending REASSOC frame\n"); +#endif + + if (!iwl_is_associated(priv) && + ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) { + IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n"); + goto drop_unlock; + } + + spin_unlock_irqrestore(&priv->lock, flags); + + hdr_len = ieee80211_get_hdrlen(fc); + sta_id = iwl_get_sta_id(priv, hdr); + if (sta_id == IWL_INVALID_STATION) { + DECLARE_MAC_BUF(mac); + + IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n", + print_mac(mac, hdr->addr1)); + goto drop; + } + + IWL_DEBUG_RATE("station Id %d\n", sta_id); + + qc = ieee80211_get_qos_ctrl(hdr); + if (qc) { + u8 tid = (u8)(le16_to_cpu(*qc) & 0xf); + seq_number = priv->stations[sta_id].tid[tid].seq_number & + IEEE80211_SCTL_SEQ; + hdr->seq_ctrl = cpu_to_le16(seq_number) | + (hdr->seq_ctrl & + __constant_cpu_to_le16(IEEE80211_SCTL_FRAG)); + seq_number += 0x10; +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG + /* aggregation is on for this <sta,tid> */ + if (ctl->flags & IEEE80211_TXCTL_HT_MPDU_AGG) + txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; +#endif /* CONFIG_IWLWIFI_HT_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ + } + txq = &priv->txq[txq_id]; + q = &txq->q; + + spin_lock_irqsave(&priv->lock, flags); + + tfd = &txq->bd[q->first_empty]; + memset(tfd, 0, sizeof(*tfd)); + control_flags = (u32 *) tfd; + idx = get_cmd_index(q, q->first_empty, 0); + + memset(&(txq->txb[q->first_empty]), 0, sizeof(struct iwl_tx_info)); + txq->txb[q->first_empty].skb[0] = skb; + memcpy(&(txq->txb[q->first_empty].status.control), + ctl, sizeof(struct ieee80211_tx_control)); + out_cmd = &txq->cmd[idx]; + memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); + memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx)); + out_cmd->hdr.cmd = REPLY_TX; + out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | + INDEX_TO_SEQ(q->first_empty))); + /* copy frags header */ + memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len); + + /* hdr = (struct ieee80211_hdr *)out_cmd->cmd.tx.hdr; */ + len = priv->hw_setting.tx_cmd_len + + sizeof(struct iwl_cmd_header) + hdr_len; + + len_org = len; + len = (len + 3) & ~3; + + if (len_org != len) + len_org = 1; + else + len_org = 0; + + txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx + + offsetof(struct iwl_cmd, hdr); + + iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len); + + if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)) + iwl_build_tx_cmd_hwcrypto(priv, ctl, out_cmd, skb, 0); + + /* 802.11 null functions have no payload... */ + len = skb->len - hdr_len; + if (len) { + phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, + len, PCI_DMA_TODEVICE); + iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len); + } + + if (len_org) + out_cmd->cmd.tx.tx_flags |= TX_CMD_FLG_MH_PAD_MSK; + + len = (u16)skb->len; + out_cmd->cmd.tx.len = cpu_to_le16(len); + + /* TODO need this for burst mode later on */ + iwl_build_tx_cmd_basic(priv, out_cmd, ctl, hdr, unicast, sta_id); + + /* set is_hcca to 0; it probably will never be implemented */ + iwl_hw_build_tx_cmd_rate(priv, out_cmd, ctl, hdr, sta_id, 0); + + iwl4965_tx_cmd(priv, out_cmd, sta_id, txcmd_phys, + hdr, hdr_len, ctl, NULL); + + if (!ieee80211_get_morefrag(hdr)) { + txq->need_update = 1; + if (qc) { + u8 tid = (u8)(le16_to_cpu(*qc) & 0xf); + priv->stations[sta_id].tid[tid].seq_number = seq_number; + } + } else { + wait_write_ptr = 1; + txq->need_update = 0; + } + + iwl_print_hex_dump(IWL_DL_TX, out_cmd->cmd.payload, + sizeof(out_cmd->cmd.tx)); + + iwl_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr, + ieee80211_get_hdrlen(fc)); + + iwl4965_tx_queue_update_wr_ptr(priv, txq, len); + + q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd); + rc = iwl_tx_queue_update_write_ptr(priv, txq); + spin_unlock_irqrestore(&priv->lock, flags); + + if (rc) + return rc; + + if ((iwl_queue_space(q) < q->high_mark) + && priv->mac80211_registered) { + if (wait_write_ptr) { + spin_lock_irqsave(&priv->lock, flags); + txq->need_update = 1; + iwl_tx_queue_update_write_ptr(priv, txq); + spin_unlock_irqrestore(&priv->lock, flags); + } + + ieee80211_stop_queue(priv->hw, ctl->queue); + } + + return 0; + +drop_unlock: + spin_unlock_irqrestore(&priv->lock, flags); +drop: + return -1; +} + +static void iwl_set_rate(struct iwl_priv *priv) +{ + const struct ieee80211_hw_mode *hw = NULL; + struct ieee80211_rate *rate; + int i; + + hw = iwl_get_hw_mode(priv, priv->phymode); + + priv->active_rate = 0; + priv->active_rate_basic = 0; + + IWL_DEBUG_RATE("Setting rates for 802.11%c\n", + hw->mode == MODE_IEEE80211A ? + 'a' : ((hw->mode == MODE_IEEE80211B) ? 'b' : 'g')); + + for (i = 0; i < hw->num_rates; i++) { + rate = &(hw->rates[i]); + if ((rate->val < IWL_RATE_COUNT) && + (rate->flags & IEEE80211_RATE_SUPPORTED)) { + IWL_DEBUG_RATE("Adding rate index %d (plcp %d)%s\n", + rate->val, iwl_rates[rate->val].plcp, + (rate->flags & IEEE80211_RATE_BASIC) ? + "*" : ""); + priv->active_rate |= (1 << rate->val); + if (rate->flags & IEEE80211_RATE_BASIC) + priv->active_rate_basic |= (1 << rate->val); + } else + IWL_DEBUG_RATE("Not adding rate %d (plcp %d)\n", + rate->val, iwl_rates[rate->val].plcp); + } + + IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n", + priv->active_rate, priv->active_rate_basic); + + /* + * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK) + * otherwise set it to the default of all CCK rates and 6, 12, 24 for + * OFDM + */ + if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK) + priv->staging_rxon.cck_basic_rates = + ((priv->active_rate_basic & + IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF; + else + priv->staging_rxon.cck_basic_rates = + (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; + + if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK) + priv->staging_rxon.ofdm_basic_rates = + ((priv->active_rate_basic & + (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >> + IWL_FIRST_OFDM_RATE) & 0xFF; + else + priv->staging_rxon.ofdm_basic_rates = + (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; +} + +static void iwl_radio_kill_sw(struct iwl_priv *priv, int disable_radio) +{ + unsigned long flags; + + if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status)) + return; + + IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO %s\n", + disable_radio ? "OFF" : "ON"); + + if (disable_radio) { + iwl_scan_cancel(priv); + /* FIXME: This is a workaround for AP */ + if (priv->iw_mode != IEEE80211_IF_TYPE_AP) { + spin_lock_irqsave(&priv->lock, flags); + iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_SW_BIT_RFKILL); + spin_unlock_irqrestore(&priv->lock, flags); + iwl_send_card_state(priv, CARD_STATE_CMD_DISABLE, 0); + set_bit(STATUS_RF_KILL_SW, &priv->status); + } + return; + } + + spin_lock_irqsave(&priv->lock, flags); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + + clear_bit(STATUS_RF_KILL_SW, &priv->status); + spin_unlock_irqrestore(&priv->lock, flags); + + /* wake up ucode */ + msleep(10); + + spin_lock_irqsave(&priv->lock, flags); + iwl_read32(priv, CSR_UCODE_DRV_GP1); + if (!iwl_grab_restricted_access(priv)) + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { + IWL_DEBUG_RF_KILL("Can not turn radio back on - " + "disabled by HW switch\n"); + return; + } + + queue_work(priv->workqueue, &priv->restart); + return; +} + +void iwl_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb, + u32 decrypt_res, struct ieee80211_rx_status *stats) +{ + u16 fc = + le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control); + + if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK) + return; + + if (!(fc & IEEE80211_FCTL_PROTECTED)) + return; + + IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res); + switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) { + case RX_RES_STATUS_SEC_TYPE_TKIP: + if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == + RX_RES_STATUS_BAD_ICV_MIC) + stats->flag |= RX_FLAG_MMIC_ERROR; + case RX_RES_STATUS_SEC_TYPE_WEP: + case RX_RES_STATUS_SEC_TYPE_CCMP: + if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == + RX_RES_STATUS_DECRYPT_OK) { + IWL_DEBUG_RX("hw decrypt successfully!!!\n"); + stats->flag |= RX_FLAG_DECRYPTED; + } + break; + + default: + break; + } +} + +void iwl_handle_data_packet_monitor(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb, + void *data, short len, + struct ieee80211_rx_status *stats, + u16 phy_flags) +{ + struct iwl_rt_rx_hdr *iwl_rt; + + /* First cache any information we need before we overwrite + * the information provided in the skb from the hardware */ + s8 signal = stats->ssi; + s8 noise = 0; + int rate = stats->rate; + u64 tsf = stats->mactime; + __le16 phy_flags_hw = cpu_to_le16(phy_flags); + + /* We received data from the HW, so stop the watchdog */ + if (len > IWL_RX_BUF_SIZE - sizeof(*iwl_rt)) { + IWL_DEBUG_DROP("Dropping too large packet in monitor\n"); + return; + } + + /* copy the frame data to write after where the radiotap header goes */ + iwl_rt = (void *)rxb->skb->data; + memmove(iwl_rt->payload, data, len); + + iwl_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION; + iwl_rt->rt_hdr.it_pad = 0; /* always good to zero */ + + /* total header + data */ + iwl_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*iwl_rt)); + + /* Set the size of the skb to the size of the frame */ + skb_put(rxb->skb, sizeof(*iwl_rt) + len); + + /* Big bitfield of all the fields we provide in radiotap */ + iwl_rt->rt_hdr.it_present = + cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) | + (1 << IEEE80211_RADIOTAP_FLAGS) | + (1 << IEEE80211_RADIOTAP_RATE) | + (1 << IEEE80211_RADIOTAP_CHANNEL) | + (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | + (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | + (1 << IEEE80211_RADIOTAP_ANTENNA)); + + /* Zero the flags, we'll add to them as we go */ + iwl_rt->rt_flags = 0; + + iwl_rt->rt_tsf = cpu_to_le64(tsf); + + /* Convert to dBm */ + iwl_rt->rt_dbmsignal = signal; + iwl_rt->rt_dbmnoise = noise; + + /* Convert the channel frequency and set the flags */ + iwl_rt->rt_channelMHz = cpu_to_le16(stats->freq); + if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK)) + iwl_rt->rt_chbitmask = + cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ)); + else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK) + iwl_rt->rt_chbitmask = + cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ)); + else /* 802.11g */ + iwl_rt->rt_chbitmask = + cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ)); + + rate = iwl_rate_index_from_plcp(rate); + if (rate == -1) + iwl_rt->rt_rate = 0; + else + iwl_rt->rt_rate = iwl_rates[rate].ieee; + + /* antenna number */ + iwl_rt->rt_antenna = + le16_to_cpu(phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4; + + /* set the preamble flag if we have it */ + if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) + iwl_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; + + IWL_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len); + + stats->flag |= RX_FLAG_RADIOTAP; + ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats); + rxb->skb = NULL; +} + + +#define IWL_PACKET_RETRY_TIME HZ + +int is_duplicate_packet(struct iwl_priv *priv, struct ieee80211_hdr *header) +{ + u16 sc = le16_to_cpu(header->seq_ctrl); + u16 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; + u16 frag = sc & IEEE80211_SCTL_FRAG; + u16 *last_seq, *last_frag; + unsigned long *last_time; + + switch (priv->iw_mode) { + case IEEE80211_IF_TYPE_IBSS:{ + struct list_head *p; + struct iwl_ibss_seq *entry = NULL; + u8 *mac = header->addr2; + int index = mac[5] & (IWL_IBSS_MAC_HASH_SIZE - 1); + + __list_for_each(p, &priv->ibss_mac_hash[index]) { + entry = + list_entry(p, struct iwl_ibss_seq, list); + if (!compare_ether_addr(entry->mac, mac)) + break; + } + if (p == &priv->ibss_mac_hash[index]) { + entry = kzalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) { + IWL_ERROR + ("Cannot malloc new mac entry\n"); + return 0; + } + memcpy(entry->mac, mac, ETH_ALEN); + entry->seq_num = seq; + entry->frag_num = frag; + entry->packet_time = jiffies; + list_add(&entry->list, + &priv->ibss_mac_hash[index]); + return 0; + } + last_seq = &entry->seq_num; + last_frag = &entry->frag_num; + last_time = &entry->packet_time; + break; + } + case IEEE80211_IF_TYPE_STA: + last_seq = &priv->last_seq_num; + last_frag = &priv->last_frag_num; + last_time = &priv->last_packet_time; + break; + default: + return 0; + } + if ((*last_seq == seq) && + time_after(*last_time + IWL_PACKET_RETRY_TIME, jiffies)) { + if (*last_frag == frag) + goto drop; + if (*last_frag + 1 != frag) + /* out-of-order fragment */ + goto drop; + } else + *last_seq = seq; + + *last_frag = frag; + *last_time = jiffies; + return 0; + + drop: + return 1; +} + +#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT + +#include "iwl-spectrum.h" + +#define BEACON_TIME_MASK_LOW 0x00FFFFFF +#define BEACON_TIME_MASK_HIGH 0xFF000000 +#define TIME_UNIT 1024 + +/* + * extended beacon time format + * time in usec will be changed into a 32-bit value in 8:24 format + * the high 1 byte is the beacon counts + * the lower 3 bytes is the time in usec within one beacon interval + */ + +static u32 iwl_usecs_to_beacons(u32 usec, u32 beacon_interval) +{ + u32 quot; + u32 rem; + u32 interval = beacon_interval * 1024; + + if (!interval || !usec) + return 0; + + quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24); + rem = (usec % interval) & BEACON_TIME_MASK_LOW; + + return (quot << 24) + rem; +} + +/* base is usually what we get from ucode with each received frame, + * the same as HW timer counter counting down + */ + +static __le32 iwl_add_beacon_time(u32 base, u32 addon, u32 beacon_interval) +{ + u32 base_low = base & BEACON_TIME_MASK_LOW; + u32 addon_low = addon & BEACON_TIME_MASK_LOW; + u32 interval = beacon_interval * TIME_UNIT; + u32 res = (base & BEACON_TIME_MASK_HIGH) + + (addon & BEACON_TIME_MASK_HIGH); + + if (base_low > addon_low) + res += base_low - addon_low; + else if (base_low < addon_low) { + res += interval + base_low - addon_low; + res += (1 << 24); + } else + res += (1 << 24); + + return cpu_to_le32(res); +} + +static int iwl_get_measurement(struct iwl_priv *priv, + struct ieee80211_measurement_params *params, + u8 type) +{ + struct iwl_spectrum_cmd spectrum; + struct iwl_rx_packet *res; + struct iwl_host_cmd cmd = { + .id = REPLY_SPECTRUM_MEASUREMENT_CMD, + .data = (void *)&spectrum, + .meta.flags = CMD_WANT_SKB, + }; + u32 add_time = le64_to_cpu(params->start_time); + int rc; + int spectrum_resp_status; + int duration = le16_to_cpu(params->duration); + + if (iwl_is_associated(priv)) + add_time = + iwl_usecs_to_beacons( + le64_to_cpu(params->start_time) - priv->last_tsf, + le16_to_cpu(priv->rxon_timing.beacon_interval)); + + memset(&spectrum, 0, sizeof(spectrum)); + + spectrum.channel_count = cpu_to_le16(1); + spectrum.flags = + RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK; + spectrum.filter_flags = MEASUREMENT_FILTER_FLAG; + cmd.len = sizeof(spectrum); + spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len)); + + if (iwl_is_associated(priv)) + spectrum.start_time = + iwl_add_beacon_time(priv->last_beacon_time, + add_time, + le16_to_cpu(priv->rxon_timing.beacon_interval)); + else + spectrum.start_time = 0; + + spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT); + spectrum.channels[0].channel = params->channel; + spectrum.channels[0].type = type; + if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK) + spectrum.flags |= RXON_FLG_BAND_24G_MSK | + RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK; + + rc = iwl_send_cmd_sync(priv, &cmd); + if (rc) + return rc; + + res = (struct iwl_rx_packet *)cmd.meta.u.skb->data; + if (res->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n"); + rc = -EIO; + } + + spectrum_resp_status = le16_to_cpu(res->u.spectrum.status); + switch (spectrum_resp_status) { + case 0: /* Command will be handled */ + if (res->u.spectrum.id != 0xff) { + IWL_DEBUG_INFO + ("Replaced existing measurement: %d\n", + res->u.spectrum.id); + priv->measurement_status &= ~MEASUREMENT_READY; + } + priv->measurement_status |= MEASUREMENT_ACTIVE; + rc = 0; + break; + + case 1: /* Command will not be handled */ + rc = -EAGAIN; + break; + } + + dev_kfree_skb_any(cmd.meta.u.skb); + + return rc; +} +#endif + +static void iwl_txstatus_to_ieee(struct iwl_priv *priv, + struct iwl_tx_info *tx_sta) +{ + + tx_sta->status.ack_signal = 0; + tx_sta->status.excessive_retries = 0; + tx_sta->status.queue_length = 0; + tx_sta->status.queue_number = 0; + + if (in_interrupt()) + ieee80211_tx_status_irqsafe(priv->hw, + tx_sta->skb[0], &(tx_sta->status)); + else + ieee80211_tx_status(priv->hw, + tx_sta->skb[0], &(tx_sta->status)); + + tx_sta->skb[0] = NULL; +} + +/** + * iwl_tx_queue_reclaim - Reclaim Tx queue entries no more used by NIC. + * + * When FW advances 'R' index, all entries between old and + * new 'R' index need to be reclaimed. As result, some free space + * forms. If there is enough free space (> low mark), wake Tx queue. + */ +int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) +{ + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct iwl_queue *q = &txq->q; + int nfreed = 0; + + if ((index >= q->n_bd) || (x2_queue_used(q, index) == 0)) { + IWL_ERROR("Read index for DMA queue txq id (%d), index %d, " + "is out of range [0-%d] %d %d.\n", txq_id, + index, q->n_bd, q->first_empty, q->last_used); + return 0; + } + + for (index = iwl_queue_inc_wrap(index, q->n_bd); + q->last_used != index; + q->last_used = iwl_queue_inc_wrap(q->last_used, q->n_bd)) { + if (txq_id != IWL_CMD_QUEUE_NUM) { + iwl_txstatus_to_ieee(priv, + &(txq->txb[txq->q.last_used])); + iwl_hw_txq_free_tfd(priv, txq); + } else if (nfreed > 1) { + IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index, + q->first_empty, q->last_used); + queue_work(priv->workqueue, &priv->restart); + } + nfreed++; + } + + if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) && + (txq_id != IWL_CMD_QUEUE_NUM) && + priv->mac80211_registered) + ieee80211_wake_queue(priv->hw, txq_id); + + + return nfreed; +} + +static int iwl_is_tx_success(u32 status) +{ + status &= TX_STATUS_MSK; + return (status == TX_STATUS_SUCCESS) + || (status == TX_STATUS_DIRECT_DONE); +} + +/****************************************************************************** + * + * Generic RX handler implementations + * + ******************************************************************************/ +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG + +static inline int iwl_get_ra_sta_id(struct iwl_priv *priv, + struct ieee80211_hdr *hdr) +{ + if (priv->iw_mode == IEEE80211_IF_TYPE_STA) + return IWL_AP_ID; + else { + u8 *da = ieee80211_get_DA(hdr); + return iwl_hw_find_station(priv, da); + } +} + +static struct ieee80211_hdr *iwl_tx_queue_get_hdr( + struct iwl_priv *priv, int txq_id, int idx) +{ + if (priv->txq[txq_id].txb[idx].skb[0]) + return (struct ieee80211_hdr *)priv->txq[txq_id]. + txb[idx].skb[0]->data; + return NULL; +} + +static inline u32 iwl_get_scd_ssn(struct iwl_tx_resp *tx_resp) +{ + __le32 *scd_ssn = (__le32 *)((u32 *)&tx_resp->status + + tx_resp->frame_count); + return le32_to_cpu(*scd_ssn) & MAX_SN; + +} +static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv, + struct iwl_ht_agg *agg, + struct iwl_tx_resp *tx_resp, + u16 start_idx) +{ + u32 status; + __le32 *frame_status = &tx_resp->status; + struct ieee80211_tx_status *tx_status = NULL; + struct ieee80211_hdr *hdr = NULL; + int i, sh; + int txq_id, idx; + u16 seq; + + if (agg->wait_for_ba) + IWL_DEBUG_TX_REPLY("got tx repsons w/o back\n"); + + agg->frame_count = tx_resp->frame_count; + agg->start_idx = start_idx; + agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); + agg->bitmap0 = agg->bitmap1 = 0; + + if (agg->frame_count == 1) { + struct iwl_tx_queue *txq ; + status = le32_to_cpu(frame_status[0]); + + txq_id = agg->txq_id; + txq = &priv->txq[txq_id]; + /* FIXME: code repetition */ + IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d \n", + agg->frame_count, agg->start_idx); + + tx_status = &(priv->txq[txq_id].txb[txq->q.last_used].status); + tx_status->retry_count = tx_resp->failure_frame; + tx_status->queue_number = status & 0xff; + tx_status->queue_length = tx_resp->bt_kill_count; + tx_status->queue_length |= tx_resp->failure_rts; + + tx_status->flags = iwl_is_tx_success(status)? + IEEE80211_TX_STATUS_ACK : 0; + tx_status->control.tx_rate = + iwl_hw_get_rate_n_flags(tx_resp->rate_n_flags); + /* FIXME: code repetition end */ + + IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n", + status & 0xff, tx_resp->failure_frame); + IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n", + iwl_hw_get_rate_n_flags(tx_resp->rate_n_flags)); + + agg->wait_for_ba = 0; + } else { + u64 bitmap = 0; + int start = agg->start_idx; + + for (i = 0; i < agg->frame_count; i++) { + u16 sc; + status = le32_to_cpu(frame_status[i]); + seq = status >> 16; + idx = SEQ_TO_INDEX(seq); + txq_id = SEQ_TO_QUEUE(seq); + + if (status & (AGG_TX_STATE_FEW_BYTES_MSK | + AGG_TX_STATE_ABORT_MSK)) + continue; + + IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n", + agg->frame_count, txq_id, idx); + + hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx); + + sc = le16_to_cpu(hdr->seq_ctrl); + if (idx != (SEQ_TO_SN(sc) & 0xff)) { + IWL_ERROR("BUG_ON idx doesn't match seq control" + " idx=%d, seq_idx=%d, seq=%d\n", + idx, SEQ_TO_SN(sc), + hdr->seq_ctrl); + return -1; + } + + IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", + i, idx, SEQ_TO_SN(sc)); + + sh = idx - start; + if (sh > 64) { + sh = (start - idx) + 0xff; + bitmap = bitmap << sh; + sh = 0; + start = idx; + } else if (sh < -64) + sh = 0xff - (start - idx); + else if (sh < 0) { + sh = start - idx; + start = idx; + bitmap = bitmap << sh; + sh = 0; + } + bitmap |= (1 << sh); + IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n", + start, (u32)(bitmap & 0xFFFFFFFF)); + } + + agg->bitmap0 = bitmap & 0xFFFFFFFF; + agg->bitmap1 = bitmap >> 32; + agg->start_idx = start; + agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); + IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%x\n", + agg->frame_count, agg->start_idx, + agg->bitmap0); + + if (bitmap) + agg->wait_for_ba = 1; + } + return 0; +} +#endif +#endif + +static void iwl_rx_reply_tx(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int txq_id = SEQ_TO_QUEUE(sequence); + int index = SEQ_TO_INDEX(sequence); + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct ieee80211_tx_status *tx_status; + struct iwl_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; + u32 status = le32_to_cpu(tx_resp->status); +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG + int tid, sta_id; +#endif +#endif + + if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) { + IWL_ERROR("Read index for DMA queue txq_id (%d) index %d " + "is out of range [0-%d] %d %d\n", txq_id, + index, txq->q.n_bd, txq->q.first_empty, + txq->q.last_used); + return; + } + +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG + if (txq->sched_retry) { + const u32 scd_ssn = iwl_get_scd_ssn(tx_resp); + struct ieee80211_hdr *hdr = + iwl_tx_queue_get_hdr(priv, txq_id, index); + struct iwl_ht_agg *agg = NULL; + __le16 *qc = ieee80211_get_qos_ctrl(hdr); + + if (qc == NULL) { + IWL_ERROR("BUG_ON qc is null!!!!\n"); + return; + } + + tid = le16_to_cpu(*qc) & 0xf; + + sta_id = iwl_get_ra_sta_id(priv, hdr); + if (unlikely(sta_id == IWL_INVALID_STATION)) { + IWL_ERROR("Station not known for\n"); + return; + } + + agg = &priv->stations[sta_id].tid[tid].agg; + + iwl4965_tx_status_reply_tx(priv, agg, tx_resp, index); + + if ((tx_resp->frame_count == 1) && + !iwl_is_tx_success(status)) { + /* TODO: send BAR */ + } + + if ((txq->q.last_used != (scd_ssn & 0xff))) { + index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); + IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn " + "%d index %d\n", scd_ssn , index); + iwl_tx_queue_reclaim(priv, txq_id, index); + } + } else { +#endif /* CONFIG_IWLWIFI_HT_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ + tx_status = &(txq->txb[txq->q.last_used].status); + + tx_status->retry_count = tx_resp->failure_frame; + tx_status->queue_number = status; + tx_status->queue_length = tx_resp->bt_kill_count; + tx_status->queue_length |= tx_resp->failure_rts; + + tx_status->flags = + iwl_is_tx_success(status) ? IEEE80211_TX_STATUS_ACK : 0; + + tx_status->control.tx_rate = + iwl_hw_get_rate_n_flags(tx_resp->rate_n_flags); + + IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags 0x%x " + "retries %d\n", txq_id, iwl_get_tx_fail_reason(status), + status, le32_to_cpu(tx_resp->rate_n_flags), + tx_resp->failure_frame); + + IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index); + if (index != -1) + iwl_tx_queue_reclaim(priv, txq_id, index); +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG + } +#endif /* CONFIG_IWLWIFI_HT_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ + + if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) + IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n"); +} + + +static void iwl_rx_reply_alive(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_alive_resp *palive; + struct delayed_work *pwork; + + palive = &pkt->u.alive_frame; + + IWL_DEBUG_INFO("Alive ucode status 0x%08X revision " + "0x%01X 0x%01X\n", + palive->is_valid, palive->ver_type, + palive->ver_subtype); + + if (palive->ver_subtype == INITIALIZE_SUBTYPE) { + IWL_DEBUG_INFO("Initialization Alive received.\n"); + memcpy(&priv->card_alive_init, + &pkt->u.alive_frame, + sizeof(struct iwl_init_alive_resp)); + pwork = &priv->init_alive_start; + } else { + IWL_DEBUG_INFO("Runtime Alive received.\n"); + memcpy(&priv->card_alive, &pkt->u.alive_frame, + sizeof(struct iwl_alive_resp)); + pwork = &priv->alive_start; + } + + /* We delay the ALIVE response by 5ms to + * give the HW RF Kill time to activate... */ + if (palive->is_valid == UCODE_VALID_OK) + queue_delayed_work(priv->workqueue, pwork, + msecs_to_jiffies(5)); + else + IWL_WARNING("uCode did not respond OK.\n"); +} + +static void iwl_rx_reply_add_sta(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + + IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status); + return; +} + +static void iwl_rx_reply_error(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + + IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) " + "seq 0x%04X ser 0x%08X\n", + le32_to_cpu(pkt->u.err_resp.error_type), + get_cmd_string(pkt->u.err_resp.cmd_id), + pkt->u.err_resp.cmd_id, + le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num), + le32_to_cpu(pkt->u.err_resp.error_info)); +} + +#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x + +static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon; + struct iwl_csa_notification *csa = &(pkt->u.csa_notif); + IWL_DEBUG_11H("CSA notif: channel %d, status %d\n", + le16_to_cpu(csa->channel), le32_to_cpu(csa->status)); + rxon->channel = csa->channel; + priv->staging_rxon.channel = csa->channel; +} + +static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif); + + if (!report->state) { + IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO, + "Spectrum Measure Notification: Start\n"); + return; + } + + memcpy(&priv->measure_report, report, sizeof(*report)); + priv->measurement_status |= MEASUREMENT_READY; +#endif +} + +static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif); + IWL_DEBUG_RX("sleep mode: %d, src: %d\n", + sleep->pm_sleep_mode, sleep->pm_wakeup_src); +#endif +} + +static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + IWL_DEBUG_RADIO("Dumping %d bytes of unhandled " + "notification for %s:\n", + le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd)); + iwl_print_hex_dump(IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len)); +} + +static void iwl_bg_beacon_update(struct work_struct *work) +{ + struct iwl_priv *priv = + container_of(work, struct iwl_priv, beacon_update); + struct sk_buff *beacon; + + /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ + beacon = ieee80211_beacon_get(priv->hw, priv->interface_id, NULL); + + if (!beacon) { + IWL_ERROR("update beacon failed\n"); + return; + } + + mutex_lock(&priv->mutex); + /* new beacon skb is allocated every time; dispose previous.*/ + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + priv->ibss_beacon = beacon; + mutex_unlock(&priv->mutex); + + iwl_send_beacon_cmd(priv); +} + +static void iwl_rx_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_beacon_notif *beacon = &(pkt->u.beacon_status); + u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); + + IWL_DEBUG_RX("beacon status %x retries %d iss %d " + "tsf %d %d rate %d\n", + le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK, + beacon->beacon_notify_hdr.failure_frame, + le32_to_cpu(beacon->ibss_mgr_status), + le32_to_cpu(beacon->high_tsf), + le32_to_cpu(beacon->low_tsf), rate); +#endif + + if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) && + (!test_bit(STATUS_EXIT_PENDING, &priv->status))) + queue_work(priv->workqueue, &priv->beacon_update); +} + +/* Service response to REPLY_SCAN_CMD (0x80) */ +static void iwl_rx_reply_scan(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_scanreq_notification *notif = + (struct iwl_scanreq_notification *)pkt->u.raw; + + IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status); +#endif +} + +/* Service SCAN_START_NOTIFICATION (0x82) */ +static void iwl_rx_scan_start_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_scanstart_notification *notif = + (struct iwl_scanstart_notification *)pkt->u.raw; + priv->scan_start_tsf = le32_to_cpu(notif->tsf_low); + IWL_DEBUG_SCAN("Scan start: " + "%d [802.11%s] " + "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", + notif->channel, + notif->band ? "bg" : "a", + notif->tsf_high, + notif->tsf_low, notif->status, notif->beacon_timer); +} + +/* Service SCAN_RESULTS_NOTIFICATION (0x83) */ +static void iwl_rx_scan_results_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_scanresults_notification *notif = + (struct iwl_scanresults_notification *)pkt->u.raw; + + IWL_DEBUG_SCAN("Scan ch.res: " + "%d [802.11%s] " + "(TSF: 0x%08X:%08X) - %d " + "elapsed=%lu usec (%dms since last)\n", + notif->channel, + notif->band ? "bg" : "a", + le32_to_cpu(notif->tsf_high), + le32_to_cpu(notif->tsf_low), + le32_to_cpu(notif->statistics[0]), + le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf, + jiffies_to_msecs(elapsed_jiffies + (priv->last_scan_jiffies, jiffies))); + + priv->last_scan_jiffies = jiffies; +} + +/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */ +static void iwl_rx_scan_complete_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw; + + IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", + scan_notif->scanned_channels, + scan_notif->tsf_low, + scan_notif->tsf_high, scan_notif->status); + + /* The HW is no longer scanning */ + clear_bit(STATUS_SCAN_HW, &priv->status); + + /* The scan completion notification came in, so kill that timer... */ + cancel_delayed_work(&priv->scan_check); + + IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n", + (priv->scan_bands == 2) ? "2.4" : "5.2", + jiffies_to_msecs(elapsed_jiffies + (priv->scan_pass_start, jiffies))); + + /* Remove this scanned band from the list + * of pending bands to scan */ + priv->scan_bands--; + + /* If a request to abort was given, or the scan did not succeed + * then we reset the scan state machine and terminate, + * re-queuing another scan if one has been requested */ + if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_INFO("Aborted scan completed.\n"); + clear_bit(STATUS_SCAN_ABORTING, &priv->status); + } else { + /* If there are more bands on this scan pass reschedule */ + if (priv->scan_bands > 0) + goto reschedule; + } + + priv->last_scan_jiffies = jiffies; + IWL_DEBUG_INFO("Setting scan to off\n"); + + clear_bit(STATUS_SCANNING, &priv->status); + + IWL_DEBUG_INFO("Scan took %dms\n", + jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies))); + + queue_work(priv->workqueue, &priv->scan_completed); + + return; + +reschedule: + priv->scan_pass_start = jiffies; + queue_work(priv->workqueue, &priv->request_scan); +} + +/* Handle notification from uCode that card's power state is changing + * due to software, hardware, or critical temperature RFKILL */ +static void iwl_rx_card_state_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); + unsigned long status = priv->status; + + IWL_DEBUG_RF_KILL("Card state received: HW:%s SW:%s\n", + (flags & HW_CARD_DISABLED) ? "Kill" : "On", + (flags & SW_CARD_DISABLED) ? "Kill" : "On"); + + if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | + RF_CARD_DISABLED)) { + + iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + if (!iwl_grab_restricted_access(priv)) { + iwl_write_restricted( + priv, HBUS_TARG_MBX_C, + HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); + + iwl_release_restricted_access(priv); + } + + if (!(flags & RXON_CARD_DISABLED)) { + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + if (!iwl_grab_restricted_access(priv)) { + iwl_write_restricted( + priv, HBUS_TARG_MBX_C, + HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); + + iwl_release_restricted_access(priv); + } + } + + if (flags & RF_CARD_DISABLED) { + iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); + iwl_read32(priv, CSR_UCODE_DRV_GP1); + if (!iwl_grab_restricted_access(priv)) + iwl_release_restricted_access(priv); + } + } + + if (flags & HW_CARD_DISABLED) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + + + if (flags & SW_CARD_DISABLED) + set_bit(STATUS_RF_KILL_SW, &priv->status); + else + clear_bit(STATUS_RF_KILL_SW, &priv->status); + + if (!(flags & RXON_CARD_DISABLED)) + iwl_scan_cancel(priv); + + if ((test_bit(STATUS_RF_KILL_HW, &status) != + test_bit(STATUS_RF_KILL_HW, &priv->status)) || + (test_bit(STATUS_RF_KILL_SW, &status) != + test_bit(STATUS_RF_KILL_SW, &priv->status))) + queue_work(priv->workqueue, &priv->rf_kill); + else + wake_up_interruptible(&priv->wait_command_queue); +} + +/** + * iwl_setup_rx_handlers - Initialize Rx handler callbacks + * + * Setup the RX handlers for each of the reply types sent from the uCode + * to the host. + * + * This function chains into the hardware specific files for them to setup + * any hardware specific handlers as well. + */ +static void iwl_setup_rx_handlers(struct iwl_priv *priv) +{ + priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive; + priv->rx_handlers[REPLY_ADD_STA] = iwl_rx_reply_add_sta; + priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error; + priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa; + priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = + iwl_rx_spectrum_measure_notif; + priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif; + priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = + iwl_rx_pm_debug_statistics_notif; + priv->rx_handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif; + + /* NOTE: iwl_rx_statistics is different based on whether + * the build is for the 3945 or the 4965. See the + * corresponding implementation in iwl-XXXX.c + * + * The same handler is used for both the REPLY to a + * discrete statistics request from the host as well as + * for the periodic statistics notification from the uCode + */ + priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_hw_rx_statistics; + priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_hw_rx_statistics; + + priv->rx_handlers[REPLY_SCAN_CMD] = iwl_rx_reply_scan; + priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl_rx_scan_start_notif; + priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] = + iwl_rx_scan_results_notif; + priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] = + iwl_rx_scan_complete_notif; + priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif; + priv->rx_handlers[REPLY_TX] = iwl_rx_reply_tx; + + /* Setup hardware specific Rx handlers */ + iwl_hw_rx_handler_setup(priv); +} + +/** + * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them + * @rxb: Rx buffer to reclaim + * + * If an Rx buffer has an async callback associated with it the callback + * will be executed. The attached skb (if present) will only be freed + * if the callback returns 1 + */ +static void iwl_tx_cmd_complete(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int txq_id = SEQ_TO_QUEUE(sequence); + int index = SEQ_TO_INDEX(sequence); + int huge = sequence & SEQ_HUGE_FRAME; + int cmd_index; + struct iwl_cmd *cmd; + + /* If a Tx command is being handled and it isn't in the actual + * command queue then there a command routing bug has been introduced + * in the queue management code. */ + if (txq_id != IWL_CMD_QUEUE_NUM) + IWL_ERROR("Error wrong command queue %d command id 0x%X\n", + txq_id, pkt->hdr.cmd); + BUG_ON(txq_id != IWL_CMD_QUEUE_NUM); + + cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); + cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; + + /* Input error checking is done when commands are added to queue. */ + if (cmd->meta.flags & CMD_WANT_SKB) { + cmd->meta.source->u.skb = rxb->skb; + rxb->skb = NULL; + } else if (cmd->meta.u.callback && + !cmd->meta.u.callback(priv, cmd, rxb->skb)) + rxb->skb = NULL; + + iwl_tx_queue_reclaim(priv, txq_id, index); + + if (!(cmd->meta.flags & CMD_ASYNC)) { + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + wake_up_interruptible(&priv->wait_command_queue); + } +} + +/************************** RX-FUNCTIONS ****************************/ +/* + * Rx theory of operation + * + * The host allocates 32 DMA target addresses and passes the host address + * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is + * 0 to 31 + * + * Rx Queue Indexes + * The host/firmware share two index registers for managing the Rx buffers. + * + * The READ index maps to the first position that the firmware may be writing + * to -- the driver can read up to (but not including) this position and get + * good data. + * The READ index is managed by the firmware once the card is enabled. + * + * The WRITE index maps to the last position the driver has read from -- the + * position preceding WRITE is the last slot the firmware can place a packet. + * + * The queue is empty (no good data) if WRITE = READ - 1, and is full if + * WRITE = READ. + * + * During initialization the host sets up the READ queue position to the first + * INDEX position, and WRITE to the last (READ - 1 wrapped) + * + * When the firmware places a packet in a buffer it will advance the READ index + * and fire the RX interrupt. The driver can then query the READ index and + * process as many packets as possible, moving the WRITE index forward as it + * resets the Rx queue buffers with new memory. + * + * The management in the driver is as follows: + * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When + * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled + * to replensish the iwl->rxq->rx_free. + * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the + * iwl->rxq is replenished and the READ INDEX is updated (updating the + * 'processed' and 'read' driver indexes as well) + * + A received packet is processed and handed to the kernel network stack, + * detached from the iwl->rxq. The driver 'processed' index is updated. + * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free + * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ + * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there + * were enough free buffers and RX_STALLED is set it is cleared. + * + * + * Driver sequence: + * + * iwl_rx_queue_alloc() Allocates rx_free + * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls + * iwl_rx_queue_restock + * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx + * queue, updates firmware pointers, and updates + * the WRITE index. If insufficient rx_free buffers + * are available, schedules iwl_rx_replenish + * + * -- enable interrupts -- + * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the + * READ INDEX, detaching the SKB from the pool. + * Moves the packet buffer from queue to rx_used. + * Calls iwl_rx_queue_restock to refill any empty + * slots. + * ... + * + */ + +/** + * iwl_rx_queue_space - Return number of free slots available in queue. + */ +static int iwl_rx_queue_space(const struct iwl_rx_queue *q) +{ + int s = q->read - q->write; + if (s <= 0) + s += RX_QUEUE_SIZE; + /* keep some buffer to not confuse full and empty queue */ + s -= 2; + if (s < 0) + s = 0; + return s; +} + +/** + * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue + * + * NOTE: This function has 3945 and 4965 specific code sections + * but is declared in base due to the majority of the + * implementation being the same (only a numeric constant is + * different) + * + */ +int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q) +{ + u32 reg = 0; + int rc = 0; + unsigned long flags; + + spin_lock_irqsave(&q->lock, flags); + + if (q->need_update == 0) + goto exit_unlock; + + if (test_bit(STATUS_POWER_PMI, &priv->status)) { + reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); + + if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { + iwl_set_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + goto exit_unlock; + } + + rc = iwl_grab_restricted_access(priv); + if (rc) + goto exit_unlock; + + iwl_write_restricted(priv, FH_RSCSR_CHNL0_WPTR, + q->write & ~0x7); + iwl_release_restricted_access(priv); + } else + iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7); + + + q->need_update = 0; + + exit_unlock: + spin_unlock_irqrestore(&q->lock, flags); + return rc; +} + +/** + * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer pointer. + * + * NOTE: This function has 3945 and 4965 specific code paths in it. + */ +static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv, + dma_addr_t dma_addr) +{ + return cpu_to_le32((u32)(dma_addr >> 8)); +} + + +/** + * iwl_rx_queue_restock - refill RX queue from pre-allocated pool + * + * If there are slots in the RX queue that need to be restocked, + * and we have free pre-allocated buffers, fill the ranks as much + * as we can pulling from rx_free. + * + * This moves the 'write' index forward to catch up with 'processed', and + * also updates the memory address in the firmware to reference the new + * target buffer. + */ +int iwl_rx_queue_restock(struct iwl_priv *priv) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct list_head *element; + struct iwl_rx_mem_buffer *rxb; + unsigned long flags; + int write, rc; + + spin_lock_irqsave(&rxq->lock, flags); + write = rxq->write & ~0x7; + while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { + element = rxq->rx_free.next; + rxb = list_entry(element, struct iwl_rx_mem_buffer, list); + list_del(element); + rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->dma_addr); + rxq->queue[rxq->write] = rxb; + rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; + rxq->free_count--; + } + spin_unlock_irqrestore(&rxq->lock, flags); + /* If the pre-allocated buffer pool is dropping low, schedule to + * refill it */ + if (rxq->free_count <= RX_LOW_WATERMARK) + queue_work(priv->workqueue, &priv->rx_replenish); + + + /* If we've added more space for the firmware to place data, tell it */ + if ((write != (rxq->write & ~0x7)) + || (abs(rxq->write - rxq->read) > 7)) { + spin_lock_irqsave(&rxq->lock, flags); + rxq->need_update = 1; + spin_unlock_irqrestore(&rxq->lock, flags); + rc = iwl_rx_queue_update_write_ptr(priv, rxq); + if (rc) + return rc; + } + + return 0; +} + +/** + * iwl_rx_replensih - Move all used packet from rx_used to rx_free + * + * When moving to rx_free an SKB is allocated for the slot. + * + * Also restock the Rx queue via iwl_rx_queue_restock. + * This is called as a scheduled work item (except for during intialization) + */ +void iwl_rx_replenish(void *data) +{ + struct iwl_priv *priv = data; + struct iwl_rx_queue *rxq = &priv->rxq; + struct list_head *element; + struct iwl_rx_mem_buffer *rxb; + unsigned long flags; + spin_lock_irqsave(&rxq->lock, flags); + while (!list_empty(&rxq->rx_used)) { + element = rxq->rx_used.next; + rxb = list_entry(element, struct iwl_rx_mem_buffer, list); + rxb->skb = + alloc_skb(IWL_RX_BUF_SIZE, __GFP_NOWARN | GFP_ATOMIC); + if (!rxb->skb) { + if (net_ratelimit()) + printk(KERN_CRIT DRV_NAME + ": Can not allocate SKB buffers\n"); + /* We don't reschedule replenish work here -- we will + * call the restock method and if it still needs + * more buffers it will schedule replenish */ + break; + } + priv->alloc_rxb_skb++; + list_del(element); + rxb->dma_addr = + pci_map_single(priv->pci_dev, rxb->skb->data, + IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + } + spin_unlock_irqrestore(&rxq->lock, flags); + + spin_lock_irqsave(&priv->lock, flags); + iwl_rx_queue_restock(priv); + spin_unlock_irqrestore(&priv->lock, flags); +} + +/* Assumes that the skb field of the buffers in 'pool' is kept accurate. + * If an SKB has been detached, the POOL needs to have it's SKB set to NULL + * This free routine walks the list of POOL entries and if SKB is set to + * non NULL it is unmapped and freed + */ +void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + int i; + for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { + if (rxq->pool[i].skb != NULL) { + pci_unmap_single(priv->pci_dev, + rxq->pool[i].dma_addr, + IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + dev_kfree_skb(rxq->pool[i].skb); + } + } + + pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd, + rxq->dma_addr); + rxq->bd = NULL; +} + +int iwl_rx_queue_alloc(struct iwl_priv *priv) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct pci_dev *dev = priv->pci_dev; + int i; + + spin_lock_init(&rxq->lock); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr); + if (!rxq->bd) + return -ENOMEM; + /* Fill the rx_used queue with _all_ of the Rx buffers */ + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) + list_add_tail(&rxq->pool[i].list, &rxq->rx_used); + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->free_count = 0; + rxq->need_update = 0; + return 0; +} + +void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + unsigned long flags; + int i; + spin_lock_irqsave(&rxq->lock, flags); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + /* Fill the rx_used queue with _all_ of the Rx buffers */ + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { + /* In the reset function, these buffers may have been allocated + * to an SKB, so we need to unmap and free potential storage */ + if (rxq->pool[i].skb != NULL) { + pci_unmap_single(priv->pci_dev, + rxq->pool[i].dma_addr, + IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + priv->alloc_rxb_skb--; + dev_kfree_skb(rxq->pool[i].skb); + rxq->pool[i].skb = NULL; + } + list_add_tail(&rxq->pool[i].list, &rxq->rx_used); + } + + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->free_count = 0; + spin_unlock_irqrestore(&rxq->lock, flags); +} + +/* Convert linear signal-to-noise ratio into dB */ +static u8 ratio2dB[100] = { +/* 0 1 2 3 4 5 6 7 8 9 */ + 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */ + 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */ + 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */ + 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */ + 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */ + 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */ + 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */ + 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */ + 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */ + 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */ +}; + +/* Calculates a relative dB value from a ratio of linear + * (i.e. not dB) signal levels. + * Conversion assumes that levels are voltages (20*log), not powers (10*log). */ +int iwl_calc_db_from_ratio(int sig_ratio) +{ + /* Anything above 1000:1 just report as 60 dB */ + if (sig_ratio > 1000) + return 60; + + /* Above 100:1, divide by 10 and use table, + * add 20 dB to make up for divide by 10 */ + if (sig_ratio > 100) + return (20 + (int)ratio2dB[sig_ratio/10]); + + /* We shouldn't see this */ + if (sig_ratio < 1) + return 0; + + /* Use table for ratios 1:1 - 99:1 */ + return (int)ratio2dB[sig_ratio]; +} + +#define PERFECT_RSSI (-20) /* dBm */ +#define WORST_RSSI (-95) /* dBm */ +#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI) + +/* Calculate an indication of rx signal quality (a percentage, not dBm!). + * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info + * about formulas used below. */ +int iwl_calc_sig_qual(int rssi_dbm, int noise_dbm) +{ + int sig_qual; + int degradation = PERFECT_RSSI - rssi_dbm; + + /* If we get a noise measurement, use signal-to-noise ratio (SNR) + * as indicator; formula is (signal dbm - noise dbm). + * SNR at or above 40 is a great signal (100%). + * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator. + * Weakest usable signal is usually 10 - 15 dB SNR. */ + if (noise_dbm) { + if (rssi_dbm - noise_dbm >= 40) + return 100; + else if (rssi_dbm < noise_dbm) + return 0; + sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2; + + /* Else use just the signal level. + * This formula is a least squares fit of data points collected and + * compared with a reference system that had a percentage (%) display + * for signal quality. */ + } else + sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation * + (15 * RSSI_RANGE + 62 * degradation)) / + (RSSI_RANGE * RSSI_RANGE); + + if (sig_qual > 100) + sig_qual = 100; + else if (sig_qual < 1) + sig_qual = 0; + + return sig_qual; +} + +/** + * iwl_rx_handle - Main entry function for receiving responses from the uCode + * + * Uses the priv->rx_handlers callback function array to invoke + * the appropriate handlers, including command responses, + * frame-received notifications, and other notifications. + */ +static void iwl_rx_handle(struct iwl_priv *priv) +{ + struct iwl_rx_mem_buffer *rxb; + struct iwl_rx_packet *pkt; + struct iwl_rx_queue *rxq = &priv->rxq; + u32 r, i; + int reclaim; + unsigned long flags; + + r = iwl_hw_get_rx_read(priv); + i = rxq->read; + + /* Rx interrupt, but nothing sent from uCode */ + if (i == r) + IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i); + + while (i != r) { + rxb = rxq->queue[i]; + + /* If an RXB doesn't have a queue slot associated with it + * then a bug has been introduced in the queue refilling + * routines -- catch it here */ + BUG_ON(rxb == NULL); + + rxq->queue[i] = NULL; + + pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, + IWL_RX_BUF_SIZE, + PCI_DMA_FROMDEVICE); + pkt = (struct iwl_rx_packet *)rxb->skb->data; + + /* Reclaim a command buffer only if this packet is a response + * to a (driver-originated) command. + * If the packet (e.g. Rx frame) originated from uCode, + * there is no command buffer to reclaim. + * Ucode should set SEQ_RX_FRAME bit if ucode-originated, + * but apparently a few don't get set; catch them here. */ + reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && + (pkt->hdr.cmd != REPLY_RX_PHY_CMD) && + (pkt->hdr.cmd != REPLY_4965_RX) && + (pkt->hdr.cmd != REPLY_COMPRESSED_BA) && + (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && + (pkt->hdr.cmd != REPLY_TX); + + /* Based on type of command response or notification, + * handle those that need handling via function in + * rx_handlers table. See iwl_setup_rx_handlers() */ + if (priv->rx_handlers[pkt->hdr.cmd]) { + IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR, + "r = %d, i = %d, %s, 0x%02x\n", r, i, + get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); + priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); + } else { + /* No handling needed */ + IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR, + "r %d i %d No handler needed for %s, 0x%02x\n", + r, i, get_cmd_string(pkt->hdr.cmd), + pkt->hdr.cmd); + } + + if (reclaim) { + /* Invoke any callbacks, transfer the skb to caller, + * and fire off the (possibly) blocking iwl_send_cmd() + * as we reclaim the driver command queue */ + if (rxb && rxb->skb) + iwl_tx_cmd_complete(priv, rxb); + else + IWL_WARNING("Claim null rxb?\n"); + } + + /* For now we just don't re-use anything. We can tweak this + * later to try and re-use notification packets and SKBs that + * fail to Rx correctly */ + if (rxb->skb != NULL) { + priv->alloc_rxb_skb--; + dev_kfree_skb_any(rxb->skb); + rxb->skb = NULL; + } + + pci_unmap_single(priv->pci_dev, rxb->dma_addr, + IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + spin_lock_irqsave(&rxq->lock, flags); + list_add_tail(&rxb->list, &priv->rxq.rx_used); + spin_unlock_irqrestore(&rxq->lock, flags); + i = (i + 1) & RX_QUEUE_MASK; + } + + /* Backtrack one entry */ + priv->rxq.read = i; + iwl_rx_queue_restock(priv); +} + +int iwl_tx_queue_update_write_ptr(struct iwl_priv *priv, + struct iwl_tx_queue *txq) +{ + u32 reg = 0; + int rc = 0; + int txq_id = txq->q.id; + + if (txq->need_update == 0) + return rc; + + /* if we're trying to save power */ + if (test_bit(STATUS_POWER_PMI, &priv->status)) { + /* wake up nic if it's powered down ... + * uCode will wake up, and interrupt us again, so next + * time we'll skip this part. */ + reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); + + if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { + IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg); + iwl_set_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + return rc; + } + + /* restore this queue's parameters in nic hardware. */ + rc = iwl_grab_restricted_access(priv); + if (rc) + return rc; + iwl_write_restricted(priv, HBUS_TARG_WRPTR, + txq->q.first_empty | (txq_id << 8)); + iwl_release_restricted_access(priv); + + /* else not in power-save mode, uCode will never sleep when we're + * trying to tx (during RFKILL, we're not trying to tx). */ + } else + iwl_write32(priv, HBUS_TARG_WRPTR, + txq->q.first_empty | (txq_id << 8)); + + txq->need_update = 0; + + return rc; +} + +#ifdef CONFIG_IWLWIFI_DEBUG +static void iwl_print_rx_config_cmd(struct iwl_rxon_cmd *rxon) +{ + DECLARE_MAC_BUF(mac); + + IWL_DEBUG_RADIO("RX CONFIG:\n"); + iwl_print_hex_dump(IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); + IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel)); + IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags)); + IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n", + le32_to_cpu(rxon->filter_flags)); + IWL_DEBUG_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type); + IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n", + rxon->ofdm_basic_rates); + IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates); + IWL_DEBUG_RADIO("u8[6] node_addr: %s\n", + print_mac(mac, rxon->node_addr)); + IWL_DEBUG_RADIO("u8[6] bssid_addr: %s\n", + print_mac(mac, rxon->bssid_addr)); + IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id)); +} +#endif + +static void iwl_enable_interrupts(struct iwl_priv *priv) +{ + IWL_DEBUG_ISR("Enabling interrupts\n"); + set_bit(STATUS_INT_ENABLED, &priv->status); + iwl_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK); +} + +static inline void iwl_disable_interrupts(struct iwl_priv *priv) +{ + clear_bit(STATUS_INT_ENABLED, &priv->status); + + /* disable interrupts from uCode/NIC to host */ + iwl_write32(priv, CSR_INT_MASK, 0x00000000); + + /* acknowledge/clear/reset any interrupts still pending + * from uCode or flow handler (Rx/Tx DMA) */ + iwl_write32(priv, CSR_INT, 0xffffffff); + iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff); + IWL_DEBUG_ISR("Disabled interrupts\n"); +} + +static const char *desc_lookup(int i) +{ + switch (i) { + case 1: + return "FAIL"; + case 2: + return "BAD_PARAM"; + case 3: + return "BAD_CHECKSUM"; + case 4: + return "NMI_INTERRUPT"; + case 5: + return "SYSASSERT"; + case 6: + return "FATAL_ERROR"; + } + + return "UNKNOWN"; +} + +#define ERROR_START_OFFSET (1 * sizeof(u32)) +#define ERROR_ELEM_SIZE (7 * sizeof(u32)) + +static void iwl_dump_nic_error_log(struct iwl_priv *priv) +{ + u32 data2, line; + u32 desc, time, count, base, data1; + u32 blink1, blink2, ilink1, ilink2; + int rc; + + base = le32_to_cpu(priv->card_alive.error_event_table_ptr); + + if (!iwl_hw_valid_rtc_data_addr(base)) { + IWL_ERROR("Not valid error log pointer 0x%08X\n", base); + return; + } + + rc = iwl_grab_restricted_access(priv); + if (rc) { + IWL_WARNING("Can not read from adapter at this time.\n"); + return; + } + + count = iwl_read_restricted_mem(priv, base); + + if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { + IWL_ERROR("Start IWL Error Log Dump:\n"); + IWL_ERROR("Status: 0x%08lX, Config: %08X count: %d\n", + priv->status, priv->config, count); + } + + desc = iwl_read_restricted_mem(priv, base + 1 * sizeof(u32)); + blink1 = iwl_read_restricted_mem(priv, base + 3 * sizeof(u32)); + blink2 = iwl_read_restricted_mem(priv, base + 4 * sizeof(u32)); + ilink1 = iwl_read_restricted_mem(priv, base + 5 * sizeof(u32)); + ilink2 = iwl_read_restricted_mem(priv, base + 6 * sizeof(u32)); + data1 = iwl_read_restricted_mem(priv, base + 7 * sizeof(u32)); + data2 = iwl_read_restricted_mem(priv, base + 8 * sizeof(u32)); + line = iwl_read_restricted_mem(priv, base + 9 * sizeof(u32)); + time = iwl_read_restricted_mem(priv, base + 11 * sizeof(u32)); + + IWL_ERROR("Desc Time " + "data1 data2 line\n"); + IWL_ERROR("%-13s (#%d) %010u 0x%08X 0x%08X %u\n", + desc_lookup(desc), desc, time, data1, data2, line); + IWL_ERROR("blink1 blink2 ilink1 ilink2\n"); + IWL_ERROR("0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2, + ilink1, ilink2); + + iwl_release_restricted_access(priv); +} + +#define EVENT_START_OFFSET (4 * sizeof(u32)) + +/** + * iwl_print_event_log - Dump error event log to syslog + * + * NOTE: Must be called with iwl_grab_restricted_access() already obtained! + */ +static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx, + u32 num_events, u32 mode) +{ + u32 i; + u32 base; /* SRAM byte address of event log header */ + u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ + u32 ptr; /* SRAM byte address of log data */ + u32 ev, time, data; /* event log data */ + + if (num_events == 0) + return; + + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + + if (mode == 0) + event_size = 2 * sizeof(u32); + else + event_size = 3 * sizeof(u32); + + ptr = base + EVENT_START_OFFSET + (start_idx * event_size); + + /* "time" is actually "data" for mode 0 (no timestamp). + * place event id # at far right for easier visual parsing. */ + for (i = 0; i < num_events; i++) { + ev = iwl_read_restricted_mem(priv, ptr); + ptr += sizeof(u32); + time = iwl_read_restricted_mem(priv, ptr); + ptr += sizeof(u32); + if (mode == 0) + IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */ + else { + data = iwl_read_restricted_mem(priv, ptr); + ptr += sizeof(u32); + IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev); + } + } +} + +static void iwl_dump_nic_event_log(struct iwl_priv *priv) +{ + int rc; + u32 base; /* SRAM byte address of event log header */ + u32 capacity; /* event log capacity in # entries */ + u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ + u32 num_wraps; /* # times uCode wrapped to top of log */ + u32 next_entry; /* index of next entry to be written by uCode */ + u32 size; /* # entries that we'll print */ + + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + if (!iwl_hw_valid_rtc_data_addr(base)) { + IWL_ERROR("Invalid event log pointer 0x%08X\n", base); + return; + } + + rc = iwl_grab_restricted_access(priv); + if (rc) { + IWL_WARNING("Can not read from adapter at this time.\n"); + return; + } + + /* event log header */ + capacity = iwl_read_restricted_mem(priv, base); + mode = iwl_read_restricted_mem(priv, base + (1 * sizeof(u32))); + num_wraps = iwl_read_restricted_mem(priv, base + (2 * sizeof(u32))); + next_entry = iwl_read_restricted_mem(priv, base + (3 * sizeof(u32))); + + size = num_wraps ? capacity : next_entry; + + /* bail out if nothing in log */ + if (size == 0) { + IWL_ERROR("Start IWL Event Log Dump: nothing in log\n"); + iwl_release_restricted_access(priv); + return; + } + + IWL_ERROR("Start IWL Event Log Dump: display count %d, wraps %d\n", + size, num_wraps); + + /* if uCode has wrapped back to top of log, start at the oldest entry, + * i.e the next one that uCode would fill. */ + if (num_wraps) + iwl_print_event_log(priv, next_entry, + capacity - next_entry, mode); + + /* (then/else) start at top of log */ + iwl_print_event_log(priv, 0, next_entry, mode); + + iwl_release_restricted_access(priv); +} + +/** + * iwl_irq_handle_error - called for HW or SW error interrupt from card + */ +static void iwl_irq_handle_error(struct iwl_priv *priv) +{ + /* Set the FW error flag -- cleared on iwl_down */ + set_bit(STATUS_FW_ERROR, &priv->status); + + /* Cancel currently queued command. */ + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_debug_level & IWL_DL_FW_ERRORS) { + iwl_dump_nic_error_log(priv); + iwl_dump_nic_event_log(priv); + iwl_print_rx_config_cmd(&priv->staging_rxon); + } +#endif + + wake_up_interruptible(&priv->wait_command_queue); + + /* Keep the restart process from trying to send host + * commands by clearing the INIT status bit */ + clear_bit(STATUS_READY, &priv->status); + + if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS, + "Restarting adapter due to uCode error.\n"); + + if (iwl_is_associated(priv)) { + memcpy(&priv->recovery_rxon, &priv->active_rxon, + sizeof(priv->recovery_rxon)); + priv->error_recovering = 1; + } + queue_work(priv->workqueue, &priv->restart); + } +} + +static void iwl_error_recovery(struct iwl_priv *priv) +{ + unsigned long flags; + + memcpy(&priv->staging_rxon, &priv->recovery_rxon, + sizeof(priv->staging_rxon)); + priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl_commit_rxon(priv); + + iwl_rxon_add_station(priv, priv->bssid, 1); + + spin_lock_irqsave(&priv->lock, flags); + priv->assoc_id = le16_to_cpu(priv->staging_rxon.assoc_id); + priv->error_recovering = 0; + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void iwl_irq_tasklet(struct iwl_priv *priv) +{ + u32 inta, handled = 0; + u32 inta_fh; + unsigned long flags; +#ifdef CONFIG_IWLWIFI_DEBUG + u32 inta_mask; +#endif + + spin_lock_irqsave(&priv->lock, flags); + + /* Ack/clear/reset pending uCode interrupts. + * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, + * and will clear only when CSR_FH_INT_STATUS gets cleared. */ + inta = iwl_read32(priv, CSR_INT); + iwl_write32(priv, CSR_INT, inta); + + /* Ack/clear/reset pending flow-handler (DMA) interrupts. + * Any new interrupts that happen after this, either while we're + * in this tasklet, or later, will show up in next ISR/tasklet. */ + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_debug_level & IWL_DL_ISR) { + inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ + IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", + inta, inta_mask, inta_fh); + } +#endif + + /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not + * atomic, make sure that inta covers all the interrupts that + * we've discovered, even if FH interrupt came in just after + * reading CSR_INT. */ + if (inta_fh & CSR_FH_INT_RX_MASK) + inta |= CSR_INT_BIT_FH_RX; + if (inta_fh & CSR_FH_INT_TX_MASK) + inta |= CSR_INT_BIT_FH_TX; + + /* Now service all interrupt bits discovered above. */ + if (inta & CSR_INT_BIT_HW_ERR) { + IWL_ERROR("Microcode HW error detected. Restarting.\n"); + + /* Tell the device to stop sending interrupts */ + iwl_disable_interrupts(priv); + + iwl_irq_handle_error(priv); + + handled |= CSR_INT_BIT_HW_ERR; + + spin_unlock_irqrestore(&priv->lock, flags); + + return; + } + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_debug_level & (IWL_DL_ISR)) { + /* NIC fires this, but we don't use it, redundant with WAKEUP */ + if (inta & CSR_INT_BIT_MAC_CLK_ACTV) + IWL_DEBUG_ISR("Microcode started or stopped.\n"); + + /* Alive notification via Rx interrupt will do the real work */ + if (inta & CSR_INT_BIT_ALIVE) + IWL_DEBUG_ISR("Alive interrupt\n"); + } +#endif + /* Safely ignore these bits for debug checks below */ + inta &= ~(CSR_INT_BIT_MAC_CLK_ACTV | CSR_INT_BIT_ALIVE); + + /* HW RF KILL switch toggled (4965 only) */ + if (inta & CSR_INT_BIT_RF_KILL) { + int hw_rf_kill = 0; + if (!(iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) + hw_rf_kill = 1; + + IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL | IWL_DL_ISR, + "RF_KILL bit toggled to %s.\n", + hw_rf_kill ? "disable radio":"enable radio"); + + /* Queue restart only if RF_KILL switch was set to "kill" + * when we loaded driver, and is now set to "enable". + * After we're Alive, RF_KILL gets handled by + * iwl_rx_card_state_notif() */ + if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status)) + queue_work(priv->workqueue, &priv->restart); + + handled |= CSR_INT_BIT_RF_KILL; + } + + /* Chip got too hot and stopped itself (4965 only) */ + if (inta & CSR_INT_BIT_CT_KILL) { + IWL_ERROR("Microcode CT kill error detected.\n"); + handled |= CSR_INT_BIT_CT_KILL; + } + + /* Error detected by uCode */ + if (inta & CSR_INT_BIT_SW_ERR) { + IWL_ERROR("Microcode SW error detected. Restarting 0x%X.\n", + inta); + iwl_irq_handle_error(priv); + handled |= CSR_INT_BIT_SW_ERR; + } + + /* uCode wakes up after power-down sleep */ + if (inta & CSR_INT_BIT_WAKEUP) { + IWL_DEBUG_ISR("Wakeup interrupt\n"); + iwl_rx_queue_update_write_ptr(priv, &priv->rxq); + iwl_tx_queue_update_write_ptr(priv, &priv->txq[0]); + iwl_tx_queue_update_write_ptr(priv, &priv->txq[1]); + iwl_tx_queue_update_write_ptr(priv, &priv->txq[2]); + iwl_tx_queue_update_write_ptr(priv, &priv->txq[3]); + iwl_tx_queue_update_write_ptr(priv, &priv->txq[4]); + iwl_tx_queue_update_write_ptr(priv, &priv->txq[5]); + + handled |= CSR_INT_BIT_WAKEUP; + } + + /* All uCode command responses, including Tx command responses, + * Rx "responses" (frame-received notification), and other + * notifications from uCode come through here*/ + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { + iwl_rx_handle(priv); + handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); + } + + if (inta & CSR_INT_BIT_FH_TX) { + IWL_DEBUG_ISR("Tx interrupt\n"); + handled |= CSR_INT_BIT_FH_TX; + } + + if (inta & ~handled) + IWL_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled); + + if (inta & ~CSR_INI_SET_MASK) { + IWL_WARNING("Disabled INTA bits 0x%08x were pending\n", + inta & ~CSR_INI_SET_MASK); + IWL_WARNING(" with FH_INT = 0x%08x\n", inta_fh); + } + + /* Re-enable all interrupts */ + iwl_enable_interrupts(priv); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_debug_level & (IWL_DL_ISR)) { + inta = iwl_read32(priv, CSR_INT); + inta_mask = iwl_read32(priv, CSR_INT_MASK); + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " + "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); + } +#endif + spin_unlock_irqrestore(&priv->lock, flags); +} + +static irqreturn_t iwl_isr(int irq, void *data) +{ + struct iwl_priv *priv = data; + u32 inta, inta_mask; + u32 inta_fh; + if (!priv) + return IRQ_NONE; + + spin_lock(&priv->lock); + + /* Disable (but don't clear!) interrupts here to avoid + * back-to-back ISRs and sporadic interrupts from our NIC. + * If we have something to service, the tasklet will re-enable ints. + * If we *don't* have something, we'll re-enable before leaving here. */ + inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ + iwl_write32(priv, CSR_INT_MASK, 0x00000000); + + /* Discover which interrupts are active/pending */ + inta = iwl_read32(priv, CSR_INT); + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + + /* Ignore interrupt if there's nothing in NIC to service. + * This may be due to IRQ shared with another device, + * or due to sporadic interrupts thrown from our NIC. */ + if (!inta && !inta_fh) { + IWL_DEBUG_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n"); + goto none; + } + + if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { + /* Hardware disappeared */ + IWL_WARNING("HARDWARE GONE?? INTA == 0x%080x\n", inta); + goto none; + } + + IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", + inta, inta_mask, inta_fh); + + /* iwl_irq_tasklet() will service interrupts and re-enable them */ + tasklet_schedule(&priv->irq_tasklet); + spin_unlock(&priv->lock); + + return IRQ_HANDLED; + + none: + /* re-enable interrupts here since we don't have anything to service. */ + iwl_enable_interrupts(priv); + spin_unlock(&priv->lock); + return IRQ_NONE; +} + +/************************** EEPROM BANDS **************************** + * + * The iwl_eeprom_band definitions below provide the mapping from the + * EEPROM contents to the specific channel number supported for each + * band. + * + * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3 + * definition below maps to physical channel 42 in the 5.2GHz spectrum. + * The specific geography and calibration information for that channel + * is contained in the eeprom map itself. + * + * During init, we copy the eeprom information and channel map + * information into priv->channel_info_24/52 and priv->channel_map_24/52 + * + * channel_map_24/52 provides the index in the channel_info array for a + * given channel. We have to have two separate maps as there is channel + * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and + * band_2 + * + * A value of 0xff stored in the channel_map indicates that the channel + * is not supported by the hardware at all. + * + * A value of 0xfe in the channel_map indicates that the channel is not + * valid for Tx with the current hardware. This means that + * while the system can tune and receive on a given channel, it may not + * be able to associate or transmit any frames on that + * channel. There is no corresponding channel information for that + * entry. + * + *********************************************************************/ + +/* 2.4 GHz */ +static const u8 iwl_eeprom_band_1[14] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 +}; + +/* 5.2 GHz bands */ +static const u8 iwl_eeprom_band_2[] = { + 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 +}; + +static const u8 iwl_eeprom_band_3[] = { /* 5205-5320MHz */ + 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 +}; + +static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */ + 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 +}; + +static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */ + 145, 149, 153, 157, 161, 165 +}; + +static u8 iwl_eeprom_band_6[] = { /* 2.4 FAT channel */ + 1, 2, 3, 4, 5, 6, 7 +}; + +static u8 iwl_eeprom_band_7[] = { /* 5.2 FAT channel */ + 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157 +}; + +static void iwl_init_band_reference(const struct iwl_priv *priv, int band, + int *eeprom_ch_count, + const struct iwl_eeprom_channel + **eeprom_ch_info, + const u8 **eeprom_ch_index) +{ + switch (band) { + case 1: /* 2.4GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1); + *eeprom_ch_info = priv->eeprom.band_1_channels; + *eeprom_ch_index = iwl_eeprom_band_1; + break; + case 2: /* 5.2GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2); + *eeprom_ch_info = priv->eeprom.band_2_channels; + *eeprom_ch_index = iwl_eeprom_band_2; + break; + case 3: /* 5.2GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3); + *eeprom_ch_info = priv->eeprom.band_3_channels; + *eeprom_ch_index = iwl_eeprom_band_3; + break; + case 4: /* 5.2GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4); + *eeprom_ch_info = priv->eeprom.band_4_channels; + *eeprom_ch_index = iwl_eeprom_band_4; + break; + case 5: /* 5.2GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5); + *eeprom_ch_info = priv->eeprom.band_5_channels; + *eeprom_ch_index = iwl_eeprom_band_5; + break; + case 6: + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6); + *eeprom_ch_info = priv->eeprom.band_24_channels; + *eeprom_ch_index = iwl_eeprom_band_6; + break; + case 7: + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7); + *eeprom_ch_info = priv->eeprom.band_52_channels; + *eeprom_ch_index = iwl_eeprom_band_7; + break; + default: + BUG(); + return; + } +} + +const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv, + int phymode, u16 channel) +{ + int i; + + switch (phymode) { + case MODE_IEEE80211A: + for (i = 14; i < priv->channel_count; i++) { + if (priv->channel_info[i].channel == channel) + return &priv->channel_info[i]; + } + break; + + case MODE_IEEE80211B: + case MODE_IEEE80211G: + if (channel >= 1 && channel <= 14) + return &priv->channel_info[channel - 1]; + break; + + } + + return NULL; +} + +#define CHECK_AND_PRINT(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \ + ? # x " " : "") + +static int iwl_init_channel_map(struct iwl_priv *priv) +{ + int eeprom_ch_count = 0; + const u8 *eeprom_ch_index = NULL; + const struct iwl_eeprom_channel *eeprom_ch_info = NULL; + int band, ch; + struct iwl_channel_info *ch_info; + + if (priv->channel_count) { + IWL_DEBUG_INFO("Channel map already initialized.\n"); + return 0; + } + + if (priv->eeprom.version < 0x2f) { + IWL_WARNING("Unsupported EEPROM version: 0x%04X\n", + priv->eeprom.version); + return -EINVAL; + } + + IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n"); + + priv->channel_count = + ARRAY_SIZE(iwl_eeprom_band_1) + + ARRAY_SIZE(iwl_eeprom_band_2) + + ARRAY_SIZE(iwl_eeprom_band_3) + + ARRAY_SIZE(iwl_eeprom_band_4) + + ARRAY_SIZE(iwl_eeprom_band_5); + + IWL_DEBUG_INFO("Parsing data for %d channels.\n", priv->channel_count); + + priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) * + priv->channel_count, GFP_KERNEL); + if (!priv->channel_info) { + IWL_ERROR("Could not allocate channel_info\n"); + priv->channel_count = 0; + return -ENOMEM; + } + + ch_info = priv->channel_info; + + /* Loop through the 5 EEPROM bands adding them in order to the + * channel map we maintain (that contains additional information than + * what just in the EEPROM) */ + for (band = 1; band <= 5; band++) { + + iwl_init_band_reference(priv, band, &eeprom_ch_count, + &eeprom_ch_info, &eeprom_ch_index); + + /* Loop through each band adding each of the channels */ + for (ch = 0; ch < eeprom_ch_count; ch++) { + ch_info->channel = eeprom_ch_index[ch]; + ch_info->phymode = (band == 1) ? MODE_IEEE80211B : + MODE_IEEE80211A; + + /* permanently store EEPROM's channel regulatory flags + * and max power in channel info database. */ + ch_info->eeprom = eeprom_ch_info[ch]; + + /* Copy the run-time flags so they are there even on + * invalid channels */ + ch_info->flags = eeprom_ch_info[ch].flags; + + if (!(is_channel_valid(ch_info))) { + IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - " + "No traffic\n", + ch_info->channel, + ch_info->flags, + is_channel_a_band(ch_info) ? + "5.2" : "2.4"); + ch_info++; + continue; + } + + /* Initialize regulatory-based run-time data */ + ch_info->max_power_avg = ch_info->curr_txpow = + eeprom_ch_info[ch].max_power_avg; + ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; + ch_info->min_power = 0; + + IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x" + " %ddBm): Ad-Hoc %ssupported\n", + ch_info->channel, + is_channel_a_band(ch_info) ? + "5.2" : "2.4", + CHECK_AND_PRINT(IBSS), + CHECK_AND_PRINT(ACTIVE), + CHECK_AND_PRINT(RADAR), + CHECK_AND_PRINT(WIDE), + CHECK_AND_PRINT(NARROW), + CHECK_AND_PRINT(DFS), + eeprom_ch_info[ch].flags, + eeprom_ch_info[ch].max_power_avg, + ((eeprom_ch_info[ch]. + flags & EEPROM_CHANNEL_IBSS) + && !(eeprom_ch_info[ch]. + flags & EEPROM_CHANNEL_RADAR)) + ? "" : "not "); + + /* Set the user_txpower_limit to the highest power + * supported by any channel */ + if (eeprom_ch_info[ch].max_power_avg > + priv->user_txpower_limit) + priv->user_txpower_limit = + eeprom_ch_info[ch].max_power_avg; + + ch_info++; + } + } + + for (band = 6; band <= 7; band++) { + int phymode; + u8 fat_extension_chan; + + iwl_init_band_reference(priv, band, &eeprom_ch_count, + &eeprom_ch_info, &eeprom_ch_index); + + phymode = (band == 6) ? MODE_IEEE80211B : MODE_IEEE80211A; + /* Loop through each band adding each of the channels */ + for (ch = 0; ch < eeprom_ch_count; ch++) { + + if ((band == 6) && + ((eeprom_ch_index[ch] == 5) || + (eeprom_ch_index[ch] == 6) || + (eeprom_ch_index[ch] == 7))) + fat_extension_chan = HT_IE_EXT_CHANNEL_MAX; + else + fat_extension_chan = HT_IE_EXT_CHANNEL_ABOVE; + + iwl4965_set_fat_chan_info(priv, phymode, + eeprom_ch_index[ch], + &(eeprom_ch_info[ch]), + fat_extension_chan); + + iwl4965_set_fat_chan_info(priv, phymode, + (eeprom_ch_index[ch] + 4), + &(eeprom_ch_info[ch]), + HT_IE_EXT_CHANNEL_BELOW); + } + } + + return 0; +} + +/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after + * sending probe req. This should be set long enough to hear probe responses + * from more than one AP. */ +#define IWL_ACTIVE_DWELL_TIME_24 (20) /* all times in msec */ +#define IWL_ACTIVE_DWELL_TIME_52 (10) + +/* For faster active scanning, scan will move to the next channel if fewer than + * PLCP_QUIET_THRESH packets are heard on this channel within + * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell + * time if it's a quiet channel (nothing responded to our probe, and there's + * no other traffic). + * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */ +#define IWL_PLCP_QUIET_THRESH __constant_cpu_to_le16(1) /* packets */ +#define IWL_ACTIVE_QUIET_TIME __constant_cpu_to_le16(5) /* msec */ + +/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel. + * Must be set longer than active dwell time. + * For the most reliable scan, set > AP beacon interval (typically 100msec). */ +#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */ +#define IWL_PASSIVE_DWELL_TIME_52 (10) +#define IWL_PASSIVE_DWELL_BASE (100) +#define IWL_CHANNEL_TUNE_TIME 5 + +static inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv, int phymode) +{ + if (phymode == MODE_IEEE80211A) + return IWL_ACTIVE_DWELL_TIME_52; + else + return IWL_ACTIVE_DWELL_TIME_24; +} + +static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, int phymode) +{ + u16 active = iwl_get_active_dwell_time(priv, phymode); + u16 passive = (phymode != MODE_IEEE80211A) ? + IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 : + IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52; + + if (iwl_is_associated(priv)) { + /* If we're associated, we clamp the maximum passive + * dwell time to be 98% of the beacon interval (minus + * 2 * channel tune time) */ + passive = priv->beacon_int; + if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive) + passive = IWL_PASSIVE_DWELL_BASE; + passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2; + } + + if (passive <= active) + passive = active + 1; + + return passive; +} + +static int iwl_get_channels_for_scan(struct iwl_priv *priv, int phymode, + u8 is_active, u8 direct_mask, + struct iwl_scan_channel *scan_ch) +{ + const struct ieee80211_channel *channels = NULL; + const struct ieee80211_hw_mode *hw_mode; + const struct iwl_channel_info *ch_info; + u16 passive_dwell = 0; + u16 active_dwell = 0; + int added, i; + + hw_mode = iwl_get_hw_mode(priv, phymode); + if (!hw_mode) + return 0; + + channels = hw_mode->channels; + + active_dwell = iwl_get_active_dwell_time(priv, phymode); + passive_dwell = iwl_get_passive_dwell_time(priv, phymode); + + for (i = 0, added = 0; i < hw_mode->num_channels; i++) { + if (channels[i].chan == + le16_to_cpu(priv->active_rxon.channel)) { + if (iwl_is_associated(priv)) { + IWL_DEBUG_SCAN + ("Skipping current channel %d\n", + le16_to_cpu(priv->active_rxon.channel)); + continue; + } + } else if (priv->only_active_channel) + continue; + + scan_ch->channel = channels[i].chan; + + ch_info = iwl_get_channel_info(priv, phymode, scan_ch->channel); + if (!is_channel_valid(ch_info)) { + IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n", + scan_ch->channel); + continue; + } + + if (!is_active || is_channel_passive(ch_info) || + !(channels[i].flag & IEEE80211_CHAN_W_ACTIVE_SCAN)) + scan_ch->type = 0; /* passive */ + else + scan_ch->type = 1; /* active */ + + if (scan_ch->type & 1) + scan_ch->type |= (direct_mask << 1); + + if (is_channel_narrow(ch_info)) + scan_ch->type |= (1 << 7); + + scan_ch->active_dwell = cpu_to_le16(active_dwell); + scan_ch->passive_dwell = cpu_to_le16(passive_dwell); + + /* Set power levels to defaults */ + scan_ch->tpc.dsp_atten = 110; + /* scan_pwr_info->tpc.dsp_atten; */ + + /*scan_pwr_info->tpc.tx_gain; */ + if (phymode == MODE_IEEE80211A) + scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3; + else { + scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3)); + /* NOTE: if we were doing 6Mb OFDM for scans we'd use + * power level + scan_ch->tpc.tx_gain = ((1<<5) | (2 << 3)) | 3; + */ + } + + IWL_DEBUG_SCAN("Scanning %d [%s %d]\n", + scan_ch->channel, + (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE", + (scan_ch->type & 1) ? + active_dwell : passive_dwell); + + scan_ch++; + added++; + } + + IWL_DEBUG_SCAN("total channels to scan %d \n", added); + return added; +} + +static void iwl_reset_channel_flag(struct iwl_priv *priv) +{ + int i, j; + for (i = 0; i < 3; i++) { + struct ieee80211_hw_mode *hw_mode = (void *)&priv->modes[i]; + for (j = 0; j < hw_mode->num_channels; j++) + hw_mode->channels[j].flag = hw_mode->channels[j].val; + } +} + +static void iwl_init_hw_rates(struct iwl_priv *priv, + struct ieee80211_rate *rates) +{ + int i; + + for (i = 0; i < IWL_RATE_COUNT; i++) { + rates[i].rate = iwl_rates[i].ieee * 5; + rates[i].val = i; /* Rate scaling will work on indexes */ + rates[i].val2 = i; + rates[i].flags = IEEE80211_RATE_SUPPORTED; + /* Only OFDM have the bits-per-symbol set */ + if ((i <= IWL_LAST_OFDM_RATE) && (i >= IWL_FIRST_OFDM_RATE)) + rates[i].flags |= IEEE80211_RATE_OFDM; + else { + /* + * If CCK 1M then set rate flag to CCK else CCK_2 + * which is CCK | PREAMBLE2 + */ + rates[i].flags |= (iwl_rates[i].plcp == 10) ? + IEEE80211_RATE_CCK : IEEE80211_RATE_CCK_2; + } + + /* Set up which ones are basic rates... */ + if (IWL_BASIC_RATES_MASK & (1 << i)) + rates[i].flags |= IEEE80211_RATE_BASIC; + } + + iwl4965_init_hw_rates(priv, rates); +} + +/** + * iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom + */ +static int iwl_init_geos(struct iwl_priv *priv) +{ + struct iwl_channel_info *ch; + struct ieee80211_hw_mode *modes; + struct ieee80211_channel *channels; + struct ieee80211_channel *geo_ch; + struct ieee80211_rate *rates; + int i = 0; + enum { + A = 0, + B = 1, + G = 2, + A_11N = 3, + G_11N = 4, + }; + int mode_count = 5; + + if (priv->modes) { + IWL_DEBUG_INFO("Geography modes already initialized.\n"); + set_bit(STATUS_GEO_CONFIGURED, &priv->status); + return 0; + } + + modes = kzalloc(sizeof(struct ieee80211_hw_mode) * mode_count, + GFP_KERNEL); + if (!modes) + return -ENOMEM; + + channels = kzalloc(sizeof(struct ieee80211_channel) * + priv->channel_count, GFP_KERNEL); + if (!channels) { + kfree(modes); + return -ENOMEM; + } + + rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_MAX_RATES + 1)), + GFP_KERNEL); + if (!rates) { + kfree(modes); + kfree(channels); + return -ENOMEM; + } + + /* 0 = 802.11a + * 1 = 802.11b + * 2 = 802.11g + */ + + /* 5.2GHz channels start after the 2.4GHz channels */ + modes[A].mode = MODE_IEEE80211A; + modes[A].channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)]; + modes[A].rates = rates; + modes[A].num_rates = 8; /* just OFDM */ + modes[A].rates = &rates[4]; + modes[A].num_channels = 0; + + modes[B].mode = MODE_IEEE80211B; + modes[B].channels = channels; + modes[B].rates = rates; + modes[B].num_rates = 4; /* just CCK */ + modes[B].num_channels = 0; + + modes[G].mode = MODE_IEEE80211G; + modes[G].channels = channels; + modes[G].rates = rates; + modes[G].num_rates = 12; /* OFDM & CCK */ + modes[G].num_channels = 0; + + modes[G_11N].mode = MODE_IEEE80211G; + modes[G_11N].channels = channels; + modes[G_11N].num_rates = 13; /* OFDM & CCK */ + modes[G_11N].rates = rates; + modes[G_11N].num_channels = 0; + + modes[A_11N].mode = MODE_IEEE80211A; + modes[A_11N].channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)]; + modes[A_11N].rates = &rates[4]; + modes[A_11N].num_rates = 9; /* just OFDM */ + modes[A_11N].num_channels = 0; + + priv->ieee_channels = channels; + priv->ieee_rates = rates; + + iwl_init_hw_rates(priv, rates); + + for (i = 0, geo_ch = channels; i < priv->channel_count; i++) { + ch = &priv->channel_info[i]; + + if (!is_channel_valid(ch)) { + IWL_DEBUG_INFO("Channel %d [%sGHz] is restricted -- " + "skipping.\n", + ch->channel, is_channel_a_band(ch) ? + "5.2" : "2.4"); + continue; + } + + if (is_channel_a_band(ch)) { + geo_ch = &modes[A].channels[modes[A].num_channels++]; + modes[A_11N].num_channels++; + } else { + geo_ch = &modes[B].channels[modes[B].num_channels++]; + modes[G].num_channels++; + modes[G_11N].num_channels++; + } + + geo_ch->freq = ieee80211chan2mhz(ch->channel); + geo_ch->chan = ch->channel; + geo_ch->power_level = ch->max_power_avg; + geo_ch->antenna_max = 0xff; + + if (is_channel_valid(ch)) { + geo_ch->flag = IEEE80211_CHAN_W_SCAN; + if (ch->flags & EEPROM_CHANNEL_IBSS) + geo_ch->flag |= IEEE80211_CHAN_W_IBSS; + + if (ch->flags & EEPROM_CHANNEL_ACTIVE) + geo_ch->flag |= IEEE80211_CHAN_W_ACTIVE_SCAN; + + if (ch->flags & EEPROM_CHANNEL_RADAR) + geo_ch->flag |= IEEE80211_CHAN_W_RADAR_DETECT; + + if (ch->max_power_avg > priv->max_channel_txpower_limit) + priv->max_channel_txpower_limit = + ch->max_power_avg; + } + + geo_ch->val = geo_ch->flag; + } + + if ((modes[A].num_channels == 0) && priv->is_abg) { + printk(KERN_INFO DRV_NAME + ": Incorrectly detected BG card as ABG. Please send " + "your PCI ID 0x%04X:0x%04X to maintainer.\n", + priv->pci_dev->device, priv->pci_dev->subsystem_device); + priv->is_abg = 0; + } + + printk(KERN_INFO DRV_NAME + ": Tunable channels: %d 802.11bg, %d 802.11a channels\n", + modes[G].num_channels, modes[A].num_channels); + + /* + * NOTE: We register these in preference of order -- the + * stack doesn't currently (as of 7.0.6 / Apr 24 '07) pick + * a phymode based on rates or AP capabilities but seems to + * configure it purely on if the channel being configured + * is supported by a mode -- and the first match is taken + */ + + if (modes[G].num_channels) + ieee80211_register_hwmode(priv->hw, &modes[G]); + if (modes[B].num_channels) + ieee80211_register_hwmode(priv->hw, &modes[B]); + if (modes[A].num_channels) + ieee80211_register_hwmode(priv->hw, &modes[A]); + + priv->modes = modes; + set_bit(STATUS_GEO_CONFIGURED, &priv->status); + + return 0; +} + +/****************************************************************************** + * + * uCode download functions + * + ******************************************************************************/ + +static void iwl_dealloc_ucode_pci(struct iwl_priv *priv) +{ + if (priv->ucode_code.v_addr != NULL) { + pci_free_consistent(priv->pci_dev, + priv->ucode_code.len, + priv->ucode_code.v_addr, + priv->ucode_code.p_addr); + priv->ucode_code.v_addr = NULL; + } + if (priv->ucode_data.v_addr != NULL) { + pci_free_consistent(priv->pci_dev, + priv->ucode_data.len, + priv->ucode_data.v_addr, + priv->ucode_data.p_addr); + priv->ucode_data.v_addr = NULL; + } + if (priv->ucode_data_backup.v_addr != NULL) { + pci_free_consistent(priv->pci_dev, + priv->ucode_data_backup.len, + priv->ucode_data_backup.v_addr, + priv->ucode_data_backup.p_addr); + priv->ucode_data_backup.v_addr = NULL; + } + if (priv->ucode_init.v_addr != NULL) { + pci_free_consistent(priv->pci_dev, + priv->ucode_init.len, + priv->ucode_init.v_addr, + priv->ucode_init.p_addr); + priv->ucode_init.v_addr = NULL; + } + if (priv->ucode_init_data.v_addr != NULL) { + pci_free_consistent(priv->pci_dev, + priv->ucode_init_data.len, + priv->ucode_init_data.v_addr, + priv->ucode_init_data.p_addr); + priv->ucode_init_data.v_addr = NULL; + } + if (priv->ucode_boot.v_addr != NULL) { + pci_free_consistent(priv->pci_dev, + priv->ucode_boot.len, + priv->ucode_boot.v_addr, + priv->ucode_boot.p_addr); + priv->ucode_boot.v_addr = NULL; + } +} + +/** + * iwl_verify_inst_full - verify runtime uCode image in card vs. host, + * looking at all data. + */ +static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 * image, u32 len) +{ + u32 val; + u32 save_len = len; + int rc = 0; + u32 errcnt; + + IWL_DEBUG_INFO("ucode inst image size is %u\n", len); + + rc = iwl_grab_restricted_access(priv); + if (rc) + return rc; + + iwl_write_restricted(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND); + + errcnt = 0; + for (; len > 0; len -= sizeof(u32), image++) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IWL_DL_IO is set */ + val = _iwl_read_restricted(priv, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { + IWL_ERROR("uCode INST section is invalid at " + "offset 0x%x, is 0x%x, s/b 0x%x\n", + save_len - len, val, le32_to_cpu(*image)); + rc = -EIO; + errcnt++; + if (errcnt >= 20) + break; + } + } + + iwl_release_restricted_access(priv); + + if (!errcnt) + IWL_DEBUG_INFO + ("ucode image in INSTRUCTION memory is good\n"); + + return rc; +} + + +/** + * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host, + * using sample data 100 bytes apart. If these sample points are good, + * it's a pretty good bet that everything between them is good, too. + */ +static int iwl_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len) +{ + u32 val; + int rc = 0; + u32 errcnt = 0; + u32 i; + + IWL_DEBUG_INFO("ucode inst image size is %u\n", len); + + rc = iwl_grab_restricted_access(priv); + if (rc) + return rc; + + for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IWL_DL_IO is set */ + iwl_write_restricted(priv, HBUS_TARG_MEM_RADDR, + i + RTC_INST_LOWER_BOUND); + val = _iwl_read_restricted(priv, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { +#if 0 /* Enable this if you want to see details */ + IWL_ERROR("uCode INST section is invalid at " + "offset 0x%x, is 0x%x, s/b 0x%x\n", + i, val, *image); +#endif + rc = -EIO; + errcnt++; + if (errcnt >= 3) + break; + } + } + + iwl_release_restricted_access(priv); + + return rc; +} + + +/** + * iwl_verify_ucode - determine which instruction image is in SRAM, + * and verify its contents + */ +static int iwl_verify_ucode(struct iwl_priv *priv) +{ + __le32 *image; + u32 len; + int rc = 0; + + /* Try bootstrap */ + image = (__le32 *)priv->ucode_boot.v_addr; + len = priv->ucode_boot.len; + rc = iwl_verify_inst_sparse(priv, image, len); + if (rc == 0) { + IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n"); + return 0; + } + + /* Try initialize */ + image = (__le32 *)priv->ucode_init.v_addr; + len = priv->ucode_init.len; + rc = iwl_verify_inst_sparse(priv, image, len); + if (rc == 0) { + IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n"); + return 0; + } + + /* Try runtime/protocol */ + image = (__le32 *)priv->ucode_code.v_addr; + len = priv->ucode_code.len; + rc = iwl_verify_inst_sparse(priv, image, len); + if (rc == 0) { + IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n"); + return 0; + } + + IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); + + /* Show first several data entries in instruction SRAM. + * Selection of bootstrap image is arbitrary. */ + image = (__le32 *)priv->ucode_boot.v_addr; + len = priv->ucode_boot.len; + rc = iwl_verify_inst_full(priv, image, len); + + return rc; +} + + +/* check contents of special bootstrap uCode SRAM */ +static int iwl_verify_bsm(struct iwl_priv *priv) +{ + __le32 *image = priv->ucode_boot.v_addr; + u32 len = priv->ucode_boot.len; + u32 reg; + u32 val; + + IWL_DEBUG_INFO("Begin verify bsm\n"); + + /* verify BSM SRAM contents */ + val = iwl_read_restricted_reg(priv, BSM_WR_DWCOUNT_REG); + for (reg = BSM_SRAM_LOWER_BOUND; + reg < BSM_SRAM_LOWER_BOUND + len; + reg += sizeof(u32), image ++) { + val = iwl_read_restricted_reg(priv, reg); + if (val != le32_to_cpu(*image)) { + IWL_ERROR("BSM uCode verification failed at " + "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", + BSM_SRAM_LOWER_BOUND, + reg - BSM_SRAM_LOWER_BOUND, len, + val, le32_to_cpu(*image)); + return -EIO; + } + } + + IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n"); + + return 0; +} + +/** + * iwl_load_bsm - Load bootstrap instructions + * + * BSM operation: + * + * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program + * in special SRAM that does not power down during RFKILL. When powering back + * up after power-saving sleeps (or during initial uCode load), the BSM loads + * the bootstrap program into the on-board processor, and starts it. + * + * The bootstrap program loads (via DMA) instructions and data for a new + * program from host DRAM locations indicated by the host driver in the + * BSM_DRAM_* registers. Once the new program is loaded, it starts + * automatically. + * + * When initializing the NIC, the host driver points the BSM to the + * "initialize" uCode image. This uCode sets up some internal data, then + * notifies host via "initialize alive" that it is complete. + * + * The host then replaces the BSM_DRAM_* pointer values to point to the + * normal runtime uCode instructions and a backup uCode data cache buffer + * (filled initially with starting data values for the on-board processor), + * then triggers the "initialize" uCode to load and launch the runtime uCode, + * which begins normal operation. + * + * When doing a power-save shutdown, runtime uCode saves data SRAM into + * the backup data cache in DRAM before SRAM is powered down. + * + * When powering back up, the BSM loads the bootstrap program. This reloads + * the runtime uCode instructions and the backup data cache into SRAM, + * and re-launches the runtime uCode from where it left off. + */ +static int iwl_load_bsm(struct iwl_priv *priv) +{ + __le32 *image = priv->ucode_boot.v_addr; + u32 len = priv->ucode_boot.len; + dma_addr_t pinst; + dma_addr_t pdata; + u32 inst_len; + u32 data_len; + int rc; + int i; + u32 done; + u32 reg_offset; + + IWL_DEBUG_INFO("Begin load bsm\n"); + + /* make sure bootstrap program is no larger than BSM's SRAM size */ + if (len > IWL_MAX_BSM_SIZE) + return -EINVAL; + + /* Tell bootstrap uCode where to find the "Initialize" uCode + * in host DRAM ... bits 31:0 for 3945, bits 35:4 for 4965. + * NOTE: iwl_initialize_alive_start() will replace these values, + * after the "initialize" uCode has run, to point to + * runtime/protocol instructions and backup data cache. */ + pinst = priv->ucode_init.p_addr >> 4; + pdata = priv->ucode_init_data.p_addr >> 4; + inst_len = priv->ucode_init.len; + data_len = priv->ucode_init_data.len; + + rc = iwl_grab_restricted_access(priv); + if (rc) + return rc; + + iwl_write_restricted_reg(priv, BSM_DRAM_INST_PTR_REG, pinst); + iwl_write_restricted_reg(priv, BSM_DRAM_DATA_PTR_REG, pdata); + iwl_write_restricted_reg(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); + iwl_write_restricted_reg(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); + + /* Fill BSM memory with bootstrap instructions */ + for (reg_offset = BSM_SRAM_LOWER_BOUND; + reg_offset < BSM_SRAM_LOWER_BOUND + len; + reg_offset += sizeof(u32), image++) + _iwl_write_restricted_reg(priv, reg_offset, + le32_to_cpu(*image)); + + rc = iwl_verify_bsm(priv); + if (rc) { + iwl_release_restricted_access(priv); + return rc; + } + + /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ + iwl_write_restricted_reg(priv, BSM_WR_MEM_SRC_REG, 0x0); + iwl_write_restricted_reg(priv, BSM_WR_MEM_DST_REG, + RTC_INST_LOWER_BOUND); + iwl_write_restricted_reg(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); + + /* Load bootstrap code into instruction SRAM now, + * to prepare to load "initialize" uCode */ + iwl_write_restricted_reg(priv, BSM_WR_CTRL_REG, + BSM_WR_CTRL_REG_BIT_START); + + /* Wait for load of bootstrap uCode to finish */ + for (i = 0; i < 100; i++) { + done = iwl_read_restricted_reg(priv, BSM_WR_CTRL_REG); + if (!(done & BSM_WR_CTRL_REG_BIT_START)) + break; + udelay(10); + } + if (i < 100) + IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i); + else { + IWL_ERROR("BSM write did not complete!\n"); + return -EIO; + } + + /* Enable future boot loads whenever power management unit triggers it + * (e.g. when powering back up after power-save shutdown) */ + iwl_write_restricted_reg(priv, BSM_WR_CTRL_REG, + BSM_WR_CTRL_REG_BIT_START_EN); + + iwl_release_restricted_access(priv); + + return 0; +} + +static void iwl_nic_start(struct iwl_priv *priv) +{ + /* Remove all resets to allow NIC to operate */ + iwl_write32(priv, CSR_RESET, 0); +} + +/** + * iwl_read_ucode - Read uCode images from disk file. + * + * Copy into buffers for card to fetch via bus-mastering + */ +static int iwl_read_ucode(struct iwl_priv *priv) +{ + struct iwl_ucode *ucode; + int rc = 0; + const struct firmware *ucode_raw; + const char *name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode"; + u8 *src; + size_t len; + u32 ver, inst_size, data_size, init_size, init_data_size, boot_size; + + /* Ask kernel firmware_class module to get the boot firmware off disk. + * request_firmware() is synchronous, file is in memory on return. */ + rc = request_firmware(&ucode_raw, name, &priv->pci_dev->dev); + if (rc < 0) { + IWL_ERROR("%s firmware file req failed: Reason %d\n", name, rc); + goto error; + } + + IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n", + name, ucode_raw->size); + + /* Make sure that we got at least our header! */ + if (ucode_raw->size < sizeof(*ucode)) { + IWL_ERROR("File size way too small!\n"); + rc = -EINVAL; + goto err_release; + } + + /* Data from ucode file: header followed by uCode images */ + ucode = (void *)ucode_raw->data; + + ver = le32_to_cpu(ucode->ver); + inst_size = le32_to_cpu(ucode->inst_size); + data_size = le32_to_cpu(ucode->data_size); + init_size = le32_to_cpu(ucode->init_size); + init_data_size = le32_to_cpu(ucode->init_data_size); + boot_size = le32_to_cpu(ucode->boot_size); + + IWL_DEBUG_INFO("f/w package hdr ucode version = 0x%x\n", ver); + IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n", + inst_size); + IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n", + data_size); + IWL_DEBUG_INFO("f/w package hdr init inst size = %u\n", + init_size); + IWL_DEBUG_INFO("f/w package hdr init data size = %u\n", + init_data_size); + IWL_DEBUG_INFO("f/w package hdr boot inst size = %u\n", + boot_size); + + /* Verify size of file vs. image size info in file's header */ + if (ucode_raw->size < sizeof(*ucode) + + inst_size + data_size + init_size + + init_data_size + boot_size) { + + IWL_DEBUG_INFO("uCode file size %d too small\n", + (int)ucode_raw->size); + rc = -EINVAL; + goto err_release; + } + + /* Verify that uCode images will fit in card's SRAM */ + if (inst_size > IWL_MAX_INST_SIZE) { + IWL_DEBUG_INFO("uCode instr len %d too large to fit in card\n", + (int)inst_size); + rc = -EINVAL; + goto err_release; + } + + if (data_size > IWL_MAX_DATA_SIZE) { + IWL_DEBUG_INFO("uCode data len %d too large to fit in card\n", + (int)data_size); + rc = -EINVAL; + goto err_release; + } + if (init_size > IWL_MAX_INST_SIZE) { + IWL_DEBUG_INFO + ("uCode init instr len %d too large to fit in card\n", + (int)init_size); + rc = -EINVAL; + goto err_release; + } + if (init_data_size > IWL_MAX_DATA_SIZE) { + IWL_DEBUG_INFO + ("uCode init data len %d too large to fit in card\n", + (int)init_data_size); + rc = -EINVAL; + goto err_release; + } + if (boot_size > IWL_MAX_BSM_SIZE) { + IWL_DEBUG_INFO + ("uCode boot instr len %d too large to fit in bsm\n", + (int)boot_size); + rc = -EINVAL; + goto err_release; + } + + /* Allocate ucode buffers for card's bus-master loading ... */ + + /* Runtime instructions and 2 copies of data: + * 1) unmodified from disk + * 2) backup cache for save/restore during power-downs */ + priv->ucode_code.len = inst_size; + priv->ucode_code.v_addr = + pci_alloc_consistent(priv->pci_dev, + priv->ucode_code.len, + &(priv->ucode_code.p_addr)); + + priv->ucode_data.len = data_size; + priv->ucode_data.v_addr = + pci_alloc_consistent(priv->pci_dev, + priv->ucode_data.len, + &(priv->ucode_data.p_addr)); + + priv->ucode_data_backup.len = data_size; + priv->ucode_data_backup.v_addr = + pci_alloc_consistent(priv->pci_dev, + priv->ucode_data_backup.len, + &(priv->ucode_data_backup.p_addr)); + + + /* Initialization instructions and data */ + priv->ucode_init.len = init_size; + priv->ucode_init.v_addr = + pci_alloc_consistent(priv->pci_dev, + priv->ucode_init.len, + &(priv->ucode_init.p_addr)); + + priv->ucode_init_data.len = init_data_size; + priv->ucode_init_data.v_addr = + pci_alloc_consistent(priv->pci_dev, + priv->ucode_init_data.len, + &(priv->ucode_init_data.p_addr)); + + /* Bootstrap (instructions only, no data) */ + priv->ucode_boot.len = boot_size; + priv->ucode_boot.v_addr = + pci_alloc_consistent(priv->pci_dev, + priv->ucode_boot.len, + &(priv->ucode_boot.p_addr)); + + if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || + !priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr || + !priv->ucode_boot.v_addr || !priv->ucode_data_backup.v_addr) + goto err_pci_alloc; + + /* Copy images into buffers for card's bus-master reads ... */ + + /* Runtime instructions (first block of data in file) */ + src = &ucode->data[0]; + len = priv->ucode_code.len; + IWL_DEBUG_INFO("Copying (but not loading) uCode instr len %d\n", + (int)len); + memcpy(priv->ucode_code.v_addr, src, len); + IWL_DEBUG_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", + priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr); + + /* Runtime data (2nd block) + * NOTE: Copy into backup buffer will be done in iwl_up() */ + src = &ucode->data[inst_size]; + len = priv->ucode_data.len; + IWL_DEBUG_INFO("Copying (but not loading) uCode data len %d\n", + (int)len); + memcpy(priv->ucode_data.v_addr, src, len); + memcpy(priv->ucode_data_backup.v_addr, src, len); + + /* Initialization instructions (3rd block) */ + if (init_size) { + src = &ucode->data[inst_size + data_size]; + len = priv->ucode_init.len; + IWL_DEBUG_INFO("Copying (but not loading) init instr len %d\n", + (int)len); + memcpy(priv->ucode_init.v_addr, src, len); + } + + /* Initialization data (4th block) */ + if (init_data_size) { + src = &ucode->data[inst_size + data_size + init_size]; + len = priv->ucode_init_data.len; + IWL_DEBUG_INFO("Copying (but not loading) init data len %d\n", + (int)len); + memcpy(priv->ucode_init_data.v_addr, src, len); + } + + /* Bootstrap instructions (5th block) */ + src = &ucode->data[inst_size + data_size + init_size + init_data_size]; + len = priv->ucode_boot.len; + IWL_DEBUG_INFO("Copying (but not loading) boot instr len %d\n", + (int)len); + memcpy(priv->ucode_boot.v_addr, src, len); + + /* We have our copies now, allow OS release its copies */ + release_firmware(ucode_raw); + return 0; + + err_pci_alloc: + IWL_ERROR("failed to allocate pci memory\n"); + rc = -ENOMEM; + iwl_dealloc_ucode_pci(priv); + + err_release: + release_firmware(ucode_raw); + + error: + return rc; +} + + +/** + * iwl_set_ucode_ptrs - Set uCode address location + * + * Tell initialization uCode where to find runtime uCode. + * + * BSM registers initially contain pointers to initialization uCode. + * We need to replace them to load runtime uCode inst and data, + * and to save runtime data when powering down. + */ +static int iwl_set_ucode_ptrs(struct iwl_priv *priv) +{ + dma_addr_t pinst; + dma_addr_t pdata; + int rc = 0; + unsigned long flags; + + /* bits 35:4 for 4965 */ + pinst = priv->ucode_code.p_addr >> 4; + pdata = priv->ucode_data_backup.p_addr >> 4; + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + /* Tell bootstrap uCode where to find image to load */ + iwl_write_restricted_reg(priv, BSM_DRAM_INST_PTR_REG, pinst); + iwl_write_restricted_reg(priv, BSM_DRAM_DATA_PTR_REG, pdata); + iwl_write_restricted_reg(priv, BSM_DRAM_DATA_BYTECOUNT_REG, + priv->ucode_data.len); + + /* Inst bytecount must be last to set up, bit 31 signals uCode + * that all new ptr/size info is in place */ + iwl_write_restricted_reg(priv, BSM_DRAM_INST_BYTECOUNT_REG, + priv->ucode_code.len | BSM_DRAM_INST_LOAD); + + iwl_release_restricted_access(priv); + + spin_unlock_irqrestore(&priv->lock, flags); + + IWL_DEBUG_INFO("Runtime uCode pointers are set.\n"); + + return rc; +} + +/** + * iwl_init_alive_start - Called after REPLY_ALIVE notification receieved + * + * Called after REPLY_ALIVE notification received from "initialize" uCode. + * + * The 4965 "initialize" ALIVE reply contains calibration data for: + * Voltage, temperature, and MIMO tx gain correction, now stored in priv + * (3945 does not contain this data). + * + * Tell "initialize" uCode to go ahead and load the runtime uCode. +*/ +static void iwl_init_alive_start(struct iwl_priv *priv) +{ + /* Check alive response for "valid" sign from uCode */ + if (priv->card_alive_init.is_valid != UCODE_VALID_OK) { + /* We had an error bringing up the hardware, so take it + * all the way back down so we can try again */ + IWL_DEBUG_INFO("Initialize Alive failed.\n"); + goto restart; + } + + /* Bootstrap uCode has loaded initialize uCode ... verify inst image. + * This is a paranoid check, because we would not have gotten the + * "initialize" alive if code weren't properly loaded. */ + if (iwl_verify_ucode(priv)) { + /* Runtime instruction load was bad; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n"); + goto restart; + } + + /* Calculate temperature */ + priv->temperature = iwl4965_get_temperature(priv); + + /* Send pointers to protocol/runtime uCode image ... init code will + * load and launch runtime uCode, which will send us another "Alive" + * notification. */ + IWL_DEBUG_INFO("Initialization Alive received.\n"); + if (iwl_set_ucode_ptrs(priv)) { + /* Runtime instruction load won't happen; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n"); + goto restart; + } + return; + + restart: + queue_work(priv->workqueue, &priv->restart); +} + + +/** + * iwl_alive_start - called after REPLY_ALIVE notification received + * from protocol/runtime uCode (initialization uCode's + * Alive gets handled by iwl_init_alive_start()). + */ +static void iwl_alive_start(struct iwl_priv *priv) +{ + int rc = 0; + + IWL_DEBUG_INFO("Runtime Alive received.\n"); + + if (priv->card_alive.is_valid != UCODE_VALID_OK) { + /* We had an error bringing up the hardware, so take it + * all the way back down so we can try again */ + IWL_DEBUG_INFO("Alive failed.\n"); + goto restart; + } + + /* Initialize uCode has loaded Runtime uCode ... verify inst image. + * This is a paranoid check, because we would not have gotten the + * "runtime" alive if code weren't properly loaded. */ + if (iwl_verify_ucode(priv)) { + /* Runtime instruction load was bad; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO("Bad runtime uCode load.\n"); + goto restart; + } + + iwl_clear_stations_table(priv); + + rc = iwl4965_alive_notify(priv); + if (rc) { + IWL_WARNING("Could not complete ALIVE transition [ntf]: %d\n", + rc); + goto restart; + } + + /* After the ALIVE response, we can process host commands */ + set_bit(STATUS_ALIVE, &priv->status); + + /* Clear out the uCode error bit if it is set */ + clear_bit(STATUS_FW_ERROR, &priv->status); + + rc = iwl_init_channel_map(priv); + if (rc) { + IWL_ERROR("initializing regulatory failed: %d\n", rc); + return; + } + + iwl_init_geos(priv); + + if (iwl_is_rfkill(priv)) + return; + + if (!priv->mac80211_registered) { + /* Unlock so any user space entry points can call back into + * the driver without a deadlock... */ + mutex_unlock(&priv->mutex); + iwl_rate_control_register(priv->hw); + rc = ieee80211_register_hw(priv->hw); + priv->hw->conf.beacon_int = 100; + mutex_lock(&priv->mutex); + + if (rc) { + IWL_ERROR("Failed to register network " + "device (error %d)\n", rc); + return; + } + + priv->mac80211_registered = 1; + + iwl_reset_channel_flag(priv); + } else + ieee80211_start_queues(priv->hw); + + priv->active_rate = priv->rates_mask; + priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; + + iwl_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode)); + + if (iwl_is_associated(priv)) { + struct iwl_rxon_cmd *active_rxon = + (struct iwl_rxon_cmd *)(&priv->active_rxon); + + memcpy(&priv->staging_rxon, &priv->active_rxon, + sizeof(priv->staging_rxon)); + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + } else { + /* Initialize our rx_config data */ + iwl_connection_init_rx_config(priv); + memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); + } + + /* Configure BT coexistence */ + iwl_send_bt_config(priv); + + /* Configure the adapter for unassociated operation */ + iwl_commit_rxon(priv); + + /* At this point, the NIC is initialized and operational */ + priv->notif_missed_beacons = 0; + set_bit(STATUS_READY, &priv->status); + + iwl4965_rf_kill_ct_config(priv); + IWL_DEBUG_INFO("ALIVE processing complete.\n"); + + if (priv->error_recovering) + iwl_error_recovery(priv); + + return; + + restart: + queue_work(priv->workqueue, &priv->restart); +} + +static void iwl_cancel_deferred_work(struct iwl_priv *priv); + +static void __iwl_down(struct iwl_priv *priv) +{ + unsigned long flags; + int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status); + struct ieee80211_conf *conf = NULL; + + IWL_DEBUG_INFO(DRV_NAME " is going down\n"); + + conf = ieee80211_get_hw_conf(priv->hw); + + if (!exit_pending) + set_bit(STATUS_EXIT_PENDING, &priv->status); + + iwl_clear_stations_table(priv); + + /* Unblock any waiting calls */ + wake_up_interruptible_all(&priv->wait_command_queue); + + iwl_cancel_deferred_work(priv); + + /* Wipe out the EXIT_PENDING status bit if we are not actually + * exiting the module */ + if (!exit_pending) + clear_bit(STATUS_EXIT_PENDING, &priv->status); + + /* stop and reset the on-board processor */ + iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + + /* tell the device to stop sending interrupts */ + iwl_disable_interrupts(priv); + + if (priv->mac80211_registered) + ieee80211_stop_queues(priv->hw); + + /* If we have not previously called iwl_init() then + * clear all bits but the RF Kill and SUSPEND bits and return */ + if (!iwl_is_init(priv)) { + priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << + STATUS_RF_KILL_HW | + test_bit(STATUS_RF_KILL_SW, &priv->status) << + STATUS_RF_KILL_SW | + test_bit(STATUS_IN_SUSPEND, &priv->status) << + STATUS_IN_SUSPEND; + goto exit; + } + + /* ...otherwise clear out all the status bits but the RF Kill and + * SUSPEND bits and continue taking the NIC down. */ + priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << + STATUS_RF_KILL_HW | + test_bit(STATUS_RF_KILL_SW, &priv->status) << + STATUS_RF_KILL_SW | + test_bit(STATUS_IN_SUSPEND, &priv->status) << + STATUS_IN_SUSPEND | + test_bit(STATUS_FW_ERROR, &priv->status) << + STATUS_FW_ERROR; + + spin_lock_irqsave(&priv->lock, flags); + iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + spin_unlock_irqrestore(&priv->lock, flags); + + iwl_hw_txq_ctx_stop(priv); + iwl_hw_rxq_stop(priv); + + spin_lock_irqsave(&priv->lock, flags); + if (!iwl_grab_restricted_access(priv)) { + iwl_write_restricted_reg(priv, APMG_CLK_DIS_REG, + APMG_CLK_VAL_DMA_CLK_RQT); + iwl_release_restricted_access(priv); + } + spin_unlock_irqrestore(&priv->lock, flags); + + udelay(5); + + iwl_hw_nic_stop_master(priv); + iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + iwl_hw_nic_reset(priv); + + exit: + memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); + + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + priv->ibss_beacon = NULL; + + /* clear out any free frames */ + iwl_clear_free_frames(priv); +} + +static void iwl_down(struct iwl_priv *priv) +{ + mutex_lock(&priv->mutex); + __iwl_down(priv); + mutex_unlock(&priv->mutex); +} + +#define MAX_HW_RESTARTS 5 + +static int __iwl_up(struct iwl_priv *priv) +{ + DECLARE_MAC_BUF(mac); + int rc, i; + u32 hw_rf_kill = 0; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_WARNING("Exit pending; will not bring the NIC up\n"); + return -EIO; + } + + if (test_bit(STATUS_RF_KILL_SW, &priv->status)) { + IWL_WARNING("Radio disabled by SW RF kill (module " + "parameter)\n"); + return 0; + } + + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + + rc = iwl_hw_nic_init(priv); + if (rc) { + IWL_ERROR("Unable to int nic\n"); + return rc; + } + + /* make sure rfkill handshake bits are cleared */ + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + /* clear (again), then enable host interrupts */ + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + iwl_enable_interrupts(priv); + + /* really make sure rfkill handshake bits are cleared */ + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + + /* Copy original ucode data image from disk into backup cache. + * This will be used to initialize the on-board processor's + * data SRAM for a clean start when the runtime program first loads. */ + memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr, + priv->ucode_data.len); + + /* If platform's RF_KILL switch is set to KILL, + * wait for BIT_INT_RF_KILL interrupt before loading uCode + * and getting things started */ + if (!(iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) + hw_rf_kill = 1; + + if (test_bit(STATUS_RF_KILL_HW, &priv->status) || hw_rf_kill) { + IWL_WARNING("Radio disabled by HW RF Kill switch\n"); + return 0; + } + + for (i = 0; i < MAX_HW_RESTARTS; i++) { + + iwl_clear_stations_table(priv); + + /* load bootstrap state machine, + * load bootstrap program into processor's memory, + * prepare to load the "initialize" uCode */ + rc = iwl_load_bsm(priv); + + if (rc) { + IWL_ERROR("Unable to set up bootstrap uCode: %d\n", rc); + continue; + } + + /* start card; "initialize" will load runtime ucode */ + iwl_nic_start(priv); + + /* MAC Address location in EEPROM same for 3945/4965 */ + get_eeprom_mac(priv, priv->mac_addr); + IWL_DEBUG_INFO("MAC address: %s\n", + print_mac(mac, priv->mac_addr)); + + SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr); + + IWL_DEBUG_INFO(DRV_NAME " is coming up\n"); + + return 0; + } + + set_bit(STATUS_EXIT_PENDING, &priv->status); + __iwl_down(priv); + + /* tried to restart and config the device for as long as our + * patience could withstand */ + IWL_ERROR("Unable to initialize device after %d attempts.\n", i); + return -EIO; +} + + +/***************************************************************************** + * + * Workqueue callbacks + * + *****************************************************************************/ + +static void iwl_bg_init_alive_start(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, init_alive_start.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl_init_alive_start(priv); + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_alive_start(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, alive_start.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl_alive_start(priv); + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_rf_kill(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, rf_kill); + + wake_up_interruptible(&priv->wait_command_queue); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + + if (!iwl_is_rfkill(priv)) { + IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL, + "HW and/or SW RF Kill no longer active, restarting " + "device\n"); + if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) + queue_work(priv->workqueue, &priv->restart); + } else { + + if (!test_bit(STATUS_RF_KILL_HW, &priv->status)) + IWL_DEBUG_RF_KILL("Can not turn radio back on - " + "disabled by SW switch\n"); + else + IWL_WARNING("Radio Frequency Kill Switch is On:\n" + "Kill switch must be turned off for " + "wireless networking to work.\n"); + } + mutex_unlock(&priv->mutex); +} + +#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ) + +static void iwl_bg_scan_check(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, scan_check.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + if (test_bit(STATUS_SCANNING, &priv->status) || + test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, + "Scan completion watchdog resetting adapter (%dms)\n", + jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG)); + if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) + queue_work(priv->workqueue, &priv->restart); + } + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_request_scan(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, request_scan); + struct iwl_host_cmd cmd = { + .id = REPLY_SCAN_CMD, + .len = sizeof(struct iwl_scan_cmd), + .meta.flags = CMD_SIZE_HUGE, + }; + int rc = 0; + struct iwl_scan_cmd *scan; + struct ieee80211_conf *conf = NULL; + u8 direct_mask; + int phymode; + + conf = ieee80211_get_hw_conf(priv->hw); + + mutex_lock(&priv->mutex); + + if (!iwl_is_ready(priv)) { + IWL_WARNING("request scan called when driver not ready.\n"); + goto done; + } + + /* Make sure the scan wasn't cancelled before this queued work + * was given the chance to run... */ + if (!test_bit(STATUS_SCANNING, &priv->status)) + goto done; + + /* This should never be called or scheduled if there is currently + * a scan active in the hardware. */ + if (test_bit(STATUS_SCAN_HW, &priv->status)) { + IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. " + "Ignoring second request.\n"); + rc = -EIO; + goto done; + } + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_DEBUG_SCAN("Aborting scan due to device shutdown\n"); + goto done; + } + + if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_HC("Scan request while abort pending. Queuing.\n"); + goto done; + } + + if (iwl_is_rfkill(priv)) { + IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n"); + goto done; + } + + if (!test_bit(STATUS_READY, &priv->status)) { + IWL_DEBUG_HC("Scan request while uninitialized. Queuing.\n"); + goto done; + } + + if (!priv->scan_bands) { + IWL_DEBUG_HC("Aborting scan due to no requested bands\n"); + goto done; + } + + if (!priv->scan) { + priv->scan = kmalloc(sizeof(struct iwl_scan_cmd) + + IWL_MAX_SCAN_SIZE, GFP_KERNEL); + if (!priv->scan) { + rc = -ENOMEM; + goto done; + } + } + scan = priv->scan; + memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE); + + scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; + scan->quiet_time = IWL_ACTIVE_QUIET_TIME; + + if (iwl_is_associated(priv)) { + u16 interval = 0; + u32 extra; + u32 suspend_time = 100; + u32 scan_suspend_time = 100; + unsigned long flags; + + IWL_DEBUG_INFO("Scanning while associated...\n"); + + spin_lock_irqsave(&priv->lock, flags); + interval = priv->beacon_int; + spin_unlock_irqrestore(&priv->lock, flags); + + scan->suspend_time = 0; + scan->max_out_time = cpu_to_le32(600 * 1024); + if (!interval) + interval = suspend_time; + + extra = (suspend_time / interval) << 22; + scan_suspend_time = (extra | + ((suspend_time % interval) * 1024)); + scan->suspend_time = cpu_to_le32(scan_suspend_time); + IWL_DEBUG_SCAN("suspend_time 0x%X beacon interval %d\n", + scan_suspend_time, interval); + } + + /* We should add the ability for user to lock to PASSIVE ONLY */ + if (priv->one_direct_scan) { + IWL_DEBUG_SCAN + ("Kicking off one direct scan for '%s'\n", + iwl_escape_essid(priv->direct_ssid, + priv->direct_ssid_len)); + scan->direct_scan[0].id = WLAN_EID_SSID; + scan->direct_scan[0].len = priv->direct_ssid_len; + memcpy(scan->direct_scan[0].ssid, + priv->direct_ssid, priv->direct_ssid_len); + direct_mask = 1; + } else if (!iwl_is_associated(priv)) { + scan->direct_scan[0].id = WLAN_EID_SSID; + scan->direct_scan[0].len = priv->essid_len; + memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len); + direct_mask = 1; + } else + direct_mask = 0; + + /* We don't build a direct scan probe request; the uCode will do + * that based on the direct_mask added to each channel entry */ + scan->tx_cmd.len = cpu_to_le16( + iwl_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data, + IWL_MAX_SCAN_SIZE - sizeof(scan), 0)); + scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; + scan->tx_cmd.sta_id = priv->hw_setting.bcast_sta_id; + scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + + /* flags + rate selection */ + + scan->tx_cmd.tx_flags |= cpu_to_le32(0x200); + + switch (priv->scan_bands) { + case 2: + scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; + scan->tx_cmd.rate_n_flags = + iwl_hw_set_rate_n_flags(IWL_RATE_1M_PLCP, + RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK); + + scan->good_CRC_th = 0; + phymode = MODE_IEEE80211G; + break; + + case 1: + scan->tx_cmd.rate_n_flags = + iwl_hw_set_rate_n_flags(IWL_RATE_6M_PLCP, + RATE_MCS_ANT_B_MSK); + scan->good_CRC_th = IWL_GOOD_CRC_TH; + phymode = MODE_IEEE80211A; + break; + + default: + IWL_WARNING("Invalid scan band count\n"); + goto done; + } + + /* select Rx chains */ + + /* Force use of chains B and C (0x6) for scan Rx. + * Avoid A (0x1) because of its off-channel reception on A-band. + * MIMO is not used here, but value is required to make uCode happy. */ + scan->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK | + cpu_to_le16((0x7 << RXON_RX_CHAIN_VALID_POS) | + (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) | + (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS)); + + if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) + scan->filter_flags = RXON_FILTER_PROMISC_MSK; + + if (direct_mask) + IWL_DEBUG_SCAN + ("Initiating direct scan for %s.\n", + iwl_escape_essid(priv->essid, priv->essid_len)); + else + IWL_DEBUG_SCAN("Initiating indirect scan.\n"); + + scan->channel_count = + iwl_get_channels_for_scan( + priv, phymode, 1, /* active */ + direct_mask, + (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); + + cmd.len += le16_to_cpu(scan->tx_cmd.len) + + scan->channel_count * sizeof(struct iwl_scan_channel); + cmd.data = scan; + scan->len = cpu_to_le16(cmd.len); + + set_bit(STATUS_SCAN_HW, &priv->status); + rc = iwl_send_cmd_sync(priv, &cmd); + if (rc) + goto done; + + queue_delayed_work(priv->workqueue, &priv->scan_check, + IWL_SCAN_CHECK_WATCHDOG); + + mutex_unlock(&priv->mutex); + return; + + done: + /* inform mac80211 sacn aborted */ + queue_work(priv->workqueue, &priv->scan_completed); + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_up(struct work_struct *data) +{ + struct iwl_priv *priv = container_of(data, struct iwl_priv, up); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + __iwl_up(priv); + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_restart(struct work_struct *data) +{ + struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + iwl_down(priv); + queue_work(priv->workqueue, &priv->up); +} + +static void iwl_bg_rx_replenish(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, rx_replenish); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl_rx_replenish(priv); + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_post_associate(struct work_struct *data) +{ + struct iwl_priv *priv = container_of(data, struct iwl_priv, + post_associate.work); + + int rc = 0; + struct ieee80211_conf *conf = NULL; + DECLARE_MAC_BUF(mac); + + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { + IWL_ERROR("%s Should not be called in AP mode\n", __FUNCTION__); + return; + } + + IWL_DEBUG_ASSOC("Associated as %d to: %s\n", + priv->assoc_id, + print_mac(mac, priv->active_rxon.bssid_addr)); + + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + + conf = ieee80211_get_hw_conf(priv->hw); + + priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl_commit_rxon(priv); + + memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd)); + iwl_setup_rxon_timing(priv); + rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, + sizeof(priv->rxon_timing), &priv->rxon_timing); + if (rc) + IWL_WARNING("REPLY_RXON_TIMING failed - " + "Attempting to continue.\n"); + + priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; + +#ifdef CONFIG_IWLWIFI_HT + if (priv->is_ht_enabled && priv->current_assoc_ht.is_ht) + iwl4965_set_rxon_ht(priv, &priv->current_assoc_ht); + else { + priv->active_rate_ht[0] = 0; + priv->active_rate_ht[1] = 0; + priv->current_channel_width = IWL_CHANNEL_WIDTH_20MHZ; + } +#endif /* CONFIG_IWLWIFI_HT*/ + iwl4965_set_rxon_chain(priv); + priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); + + IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n", + priv->assoc_id, priv->beacon_int); + + if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) + priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; + else + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + + if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { + if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) + priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; + else + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + } + + iwl_commit_rxon(priv); + + switch (priv->iw_mode) { + case IEEE80211_IF_TYPE_STA: + iwl_rate_scale_init(priv->hw, IWL_AP_ID); + break; + + case IEEE80211_IF_TYPE_IBSS: + + /* clear out the station table */ + iwl_clear_stations_table(priv); + + iwl_rxon_add_station(priv, BROADCAST_ADDR, 0); + iwl_rxon_add_station(priv, priv->bssid, 0); + iwl_rate_scale_init(priv->hw, IWL_STA_ID); + iwl_send_beacon_cmd(priv); + + break; + + default: + IWL_ERROR("%s Should not be called in %d mode\n", + __FUNCTION__, priv->iw_mode); + break; + } + + iwl_sequence_reset(priv); + +#ifdef CONFIG_IWLWIFI_SENSITIVITY + /* Enable Rx differential gain and sensitivity calibrations */ + iwl4965_chain_noise_reset(priv); + priv->start_calib = 1; +#endif /* CONFIG_IWLWIFI_SENSITIVITY */ + + if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) + priv->assoc_station_added = 1; + +#ifdef CONFIG_IWLWIFI_QOS + iwl_activate_qos(priv, 0); +#endif /* CONFIG_IWLWIFI_QOS */ + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_abort_scan(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + abort_scan); + + if (!iwl_is_ready(priv)) + return; + + mutex_lock(&priv->mutex); + + set_bit(STATUS_SCAN_ABORTING, &priv->status); + iwl_send_scan_abort(priv); + + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_scan_completed(struct work_struct *work) +{ + struct iwl_priv *priv = + container_of(work, struct iwl_priv, scan_completed); + + IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, "SCAN complete scan\n"); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + ieee80211_scan_completed(priv->hw); + + /* Since setting the TXPOWER may have been deferred while + * performing the scan, fire one off */ + mutex_lock(&priv->mutex); + iwl_hw_reg_send_txpower(priv); + mutex_unlock(&priv->mutex); +} + +/***************************************************************************** + * + * mac80211 entry point functions + * + *****************************************************************************/ + +static int iwl_mac_start(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211("enter\n"); + + /* we should be verifying the device is ready to be opened */ + mutex_lock(&priv->mutex); + + priv->is_open = 1; + + if (!iwl_is_rfkill(priv)) + ieee80211_start_queues(priv->hw); + + mutex_unlock(&priv->mutex); + IWL_DEBUG_MAC80211("leave\n"); + return 0; +} + +static void iwl_mac_stop(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211("enter\n"); + priv->is_open = 0; + /*netif_stop_queue(dev); */ + flush_workqueue(priv->workqueue); + IWL_DEBUG_MAC80211("leave\n"); +} + +static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ieee80211_tx_control *ctl) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211("enter\n"); + + if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) { + IWL_DEBUG_MAC80211("leave - monitor\n"); + return -1; + } + + IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, + ctl->tx_rate); + + if (iwl_tx_skb(priv, skb, ctl)) + dev_kfree_skb_any(skb); + + IWL_DEBUG_MAC80211("leave\n"); + return 0; +} + +static int iwl_mac_add_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf) +{ + struct iwl_priv *priv = hw->priv; + unsigned long flags; + DECLARE_MAC_BUF(mac); + + IWL_DEBUG_MAC80211("enter: id %d, type %d\n", conf->if_id, conf->type); + if (conf->mac_addr) + IWL_DEBUG_MAC80211("enter: MAC %s\n", + print_mac(mac, conf->mac_addr)); + + if (priv->interface_id) { + IWL_DEBUG_MAC80211("leave - interface_id != 0\n"); + return 0; + } + + spin_lock_irqsave(&priv->lock, flags); + priv->interface_id = conf->if_id; + + spin_unlock_irqrestore(&priv->lock, flags); + + mutex_lock(&priv->mutex); + iwl_set_mode(priv, conf->type); + + IWL_DEBUG_MAC80211("leave\n"); + mutex_unlock(&priv->mutex); + + return 0; +} + +/** + * iwl_mac_config - mac80211 config callback + * + * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to + * be set inappropriately and the driver currently sets the hardware up to + * use it whenever needed. + */ +static int iwl_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf) +{ + struct iwl_priv *priv = hw->priv; + const struct iwl_channel_info *ch_info; + unsigned long flags; + + mutex_lock(&priv->mutex); + IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel); + + if (!iwl_is_ready(priv)) { + IWL_DEBUG_MAC80211("leave - not ready\n"); + mutex_unlock(&priv->mutex); + return -EIO; + } + + /* TODO: Figure out how to get ieee80211_local->sta_scanning w/ only + * what is exposed through include/ declrations */ + if (unlikely(!iwl_param_disable_hw_scan && + test_bit(STATUS_SCANNING, &priv->status))) { + IWL_DEBUG_MAC80211("leave - scanning\n"); + mutex_unlock(&priv->mutex); + return 0; + } + + spin_lock_irqsave(&priv->lock, flags); + + ch_info = iwl_get_channel_info(priv, conf->phymode, conf->channel); + if (!is_channel_valid(ch_info)) { + IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this SKU.\n", + conf->channel, conf->phymode); + IWL_DEBUG_MAC80211("leave - invalid channel\n"); + spin_unlock_irqrestore(&priv->lock, flags); + mutex_unlock(&priv->mutex); + return -EINVAL; + } + +#ifdef CONFIG_IWLWIFI_HT + /* if we are switching fron ht to 2.4 clear flags + * from any ht related info since 2.4 does not + * support ht */ + if ((le16_to_cpu(priv->staging_rxon.channel) != conf->channel) +#ifdef IEEE80211_CONF_CHANNEL_SWITCH + && !(conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) +#endif + ) + priv->staging_rxon.flags = 0; +#endif /* CONFIG_IWLWIFI_HT */ + + iwl_set_rxon_channel(priv, conf->phymode, conf->channel); + + iwl_set_flags_for_phymode(priv, conf->phymode); + + /* The list of supported rates and rate mask can be different + * for each phymode; since the phymode may have changed, reset + * the rate mask to what mac80211 lists */ + iwl_set_rate(priv); + + spin_unlock_irqrestore(&priv->lock, flags); + +#ifdef IEEE80211_CONF_CHANNEL_SWITCH + if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) { + iwl_hw_channel_switch(priv, conf->channel); + mutex_unlock(&priv->mutex); + return 0; + } +#endif + + iwl_radio_kill_sw(priv, !conf->radio_enabled); + + if (!conf->radio_enabled) { + IWL_DEBUG_MAC80211("leave - radio disabled\n"); + mutex_unlock(&priv->mutex); + return 0; + } + + if (iwl_is_rfkill(priv)) { + IWL_DEBUG_MAC80211("leave - RF kill\n"); + mutex_unlock(&priv->mutex); + return -EIO; + } + + iwl_set_rate(priv); + + if (memcmp(&priv->active_rxon, + &priv->staging_rxon, sizeof(priv->staging_rxon))) + iwl_commit_rxon(priv); + else + IWL_DEBUG_INFO("No re-sending same RXON configuration.\n"); + + IWL_DEBUG_MAC80211("leave\n"); + + mutex_unlock(&priv->mutex); + + return 0; +} + +static void iwl_config_ap(struct iwl_priv *priv) +{ + int rc = 0; + + if (priv->status & STATUS_EXIT_PENDING) + return; + + /* The following should be done only at AP bring up */ + if ((priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) == 0) { + + /* RXON - unassoc (to set timing command) */ + priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl_commit_rxon(priv); + + /* RXON Timing */ + memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd)); + iwl_setup_rxon_timing(priv); + rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, + sizeof(priv->rxon_timing), &priv->rxon_timing); + if (rc) + IWL_WARNING("REPLY_RXON_TIMING failed - " + "Attempting to continue.\n"); + + iwl4965_set_rxon_chain(priv); + + /* FIXME: what should be the assoc_id for AP? */ + priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); + if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) + priv->staging_rxon.flags |= + RXON_FLG_SHORT_PREAMBLE_MSK; + else + priv->staging_rxon.flags &= + ~RXON_FLG_SHORT_PREAMBLE_MSK; + + if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { + if (priv->assoc_capability & + WLAN_CAPABILITY_SHORT_SLOT_TIME) + priv->staging_rxon.flags |= + RXON_FLG_SHORT_SLOT_MSK; + else + priv->staging_rxon.flags &= + ~RXON_FLG_SHORT_SLOT_MSK; + + if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) + priv->staging_rxon.flags &= + ~RXON_FLG_SHORT_SLOT_MSK; + } + /* restore RXON assoc */ + priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; + iwl_commit_rxon(priv); +#ifdef CONFIG_IWLWIFI_QOS + iwl_activate_qos(priv, 1); +#endif + iwl_rxon_add_station(priv, BROADCAST_ADDR, 0); + } + iwl_send_beacon_cmd(priv); + + /* FIXME - we need to add code here to detect a totally new + * configuration, reset the AP, unassoc, rxon timing, assoc, + * clear sta table, add BCAST sta... */ +} + +static int iwl_mac_config_interface(struct ieee80211_hw *hw, int if_id, + struct ieee80211_if_conf *conf) +{ + struct iwl_priv *priv = hw->priv; + DECLARE_MAC_BUF(mac); + unsigned long flags; + int rc; + + if (conf == NULL) + return -EIO; + + if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) && + (!conf->beacon || !conf->ssid_len)) { + IWL_DEBUG_MAC80211 + ("Leaving in AP mode because HostAPD is not ready.\n"); + return 0; + } + + mutex_lock(&priv->mutex); + + IWL_DEBUG_MAC80211("enter: interface id %d\n", if_id); + if (conf->bssid) + IWL_DEBUG_MAC80211("bssid: %s\n", + print_mac(mac, conf->bssid)); + +/* + * very dubious code was here; the probe filtering flag is never set: + * + if (unlikely(test_bit(STATUS_SCANNING, &priv->status)) && + !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) { + */ + if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) { + IWL_DEBUG_MAC80211("leave - scanning\n"); + mutex_unlock(&priv->mutex); + return 0; + } + + if (priv->interface_id != if_id) { + IWL_DEBUG_MAC80211("leave - interface_id != if_id\n"); + mutex_unlock(&priv->mutex); + return 0; + } + + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { + if (!conf->bssid) { + conf->bssid = priv->mac_addr; + memcpy(priv->bssid, priv->mac_addr, ETH_ALEN); + IWL_DEBUG_MAC80211("bssid was set to: %s\n", + print_mac(mac, conf->bssid)); + } + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + priv->ibss_beacon = conf->beacon; + } + + if (conf->bssid && !is_zero_ether_addr(conf->bssid) && + !is_multicast_ether_addr(conf->bssid)) { + /* If there is currently a HW scan going on in the background + * then we need to cancel it else the RXON below will fail. */ + if (iwl_scan_cancel_timeout(priv, 100)) { + IWL_WARNING("Aborted scan still in progress " + "after 100ms\n"); + IWL_DEBUG_MAC80211("leaving - scan abort failed.\n"); + mutex_unlock(&priv->mutex); + return -EAGAIN; + } + memcpy(priv->staging_rxon.bssid_addr, conf->bssid, ETH_ALEN); + + /* TODO: Audit driver for usage of these members and see + * if mac80211 deprecates them (priv->bssid looks like it + * shouldn't be there, but I haven't scanned the IBSS code + * to verify) - jpk */ + memcpy(priv->bssid, conf->bssid, ETH_ALEN); + + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) + iwl_config_ap(priv); + else { + priv->staging_rxon.filter_flags |= + RXON_FILTER_ASSOC_MSK; + rc = iwl_commit_rxon(priv); + if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc) + iwl_rxon_add_station( + priv, priv->active_rxon.bssid_addr, 1); + } + + } else { + priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl_commit_rxon(priv); + } + + spin_lock_irqsave(&priv->lock, flags); + if (!conf->ssid_len) + memset(priv->essid, 0, IW_ESSID_MAX_SIZE); + else + memcpy(priv->essid, conf->ssid, conf->ssid_len); + + priv->essid_len = conf->ssid_len; + spin_unlock_irqrestore(&priv->lock, flags); + + IWL_DEBUG_MAC80211("leave\n"); + mutex_unlock(&priv->mutex); + + return 0; +} + +static void iwl_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + int mc_count, struct dev_addr_list *mc_list) +{ + /* + * XXX: dummy + * see also iwl_connection_init_rx_config + */ + *total_flags = 0; +} + +static void iwl_mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211("enter\n"); + + mutex_lock(&priv->mutex); + if (priv->interface_id == conf->if_id) { + priv->interface_id = 0; + memset(priv->bssid, 0, ETH_ALEN); + memset(priv->essid, 0, IW_ESSID_MAX_SIZE); + priv->essid_len = 0; + } + mutex_unlock(&priv->mutex); + + IWL_DEBUG_MAC80211("leave\n"); + +} + +#define IWL_DELAY_NEXT_SCAN (HZ*2) +static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) +{ + int rc = 0; + unsigned long flags; + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211("enter\n"); + + spin_lock_irqsave(&priv->lock, flags); + + if (!iwl_is_ready_rf(priv)) { + rc = -EIO; + IWL_DEBUG_MAC80211("leave - not ready or exit pending\n"); + goto out_unlock; + } + + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { /* APs don't scan */ + rc = -EIO; + IWL_ERROR("ERROR: APs don't scan\n"); + goto out_unlock; + } + + /* if we just finished scan ask for delay */ + if (priv->last_scan_jiffies && + time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN, + jiffies)) { + rc = -EAGAIN; + goto out_unlock; + } + if (len) { + IWL_DEBUG_SCAN("direct scan for " + "%s [%d]\n ", + iwl_escape_essid(ssid, len), (int)len); + + priv->one_direct_scan = 1; + priv->direct_ssid_len = (u8) + min((u8) len, (u8) IW_ESSID_MAX_SIZE); + memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len); + } + + rc = iwl_scan_initiate(priv); + + IWL_DEBUG_MAC80211("leave\n"); + +out_unlock: + spin_unlock_irqrestore(&priv->lock, flags); + + return rc; +} + +static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + const u8 *local_addr, const u8 *addr, + struct ieee80211_key_conf *key) +{ + struct iwl_priv *priv = hw->priv; + DECLARE_MAC_BUF(mac); + int rc = 0; + u8 sta_id; + + IWL_DEBUG_MAC80211("enter\n"); + + if (!iwl_param_hwcrypto) { + IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n"); + return -EOPNOTSUPP; + } + + if (is_zero_ether_addr(addr)) + /* only support pairwise keys */ + return -EOPNOTSUPP; + + sta_id = iwl_hw_find_station(priv, addr); + if (sta_id == IWL_INVALID_STATION) { + IWL_DEBUG_MAC80211("leave - %s not in station map.\n", + print_mac(mac, addr)); + return -EINVAL; + } + + mutex_lock(&priv->mutex); + + switch (cmd) { + case SET_KEY: + rc = iwl_update_sta_key_info(priv, key, sta_id); + if (!rc) { + iwl_set_rxon_hwcrypto(priv, 1); + iwl_commit_rxon(priv); + key->hw_key_idx = sta_id; + IWL_DEBUG_MAC80211("set_key success, using hwcrypto\n"); + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + } + break; + case DISABLE_KEY: + rc = iwl_clear_sta_key_info(priv, sta_id); + if (!rc) { + iwl_set_rxon_hwcrypto(priv, 0); + iwl_commit_rxon(priv); + IWL_DEBUG_MAC80211("disable hwcrypto key\n"); + } + break; + default: + rc = -EINVAL; + } + + IWL_DEBUG_MAC80211("leave\n"); + mutex_unlock(&priv->mutex); + + return rc; +} + +static int iwl_mac_conf_tx(struct ieee80211_hw *hw, int queue, + const struct ieee80211_tx_queue_params *params) +{ + struct iwl_priv *priv = hw->priv; +#ifdef CONFIG_IWLWIFI_QOS + unsigned long flags; + int q; +#endif /* CONFIG_IWL_QOS */ + + IWL_DEBUG_MAC80211("enter\n"); + + if (!iwl_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211("leave - RF not ready\n"); + return -EIO; + } + + if (queue >= AC_NUM) { + IWL_DEBUG_MAC80211("leave - queue >= AC_NUM %d\n", queue); + return 0; + } + +#ifdef CONFIG_IWLWIFI_QOS + if (!priv->qos_data.qos_enable) { + priv->qos_data.qos_active = 0; + IWL_DEBUG_MAC80211("leave - qos not enabled\n"); + return 0; + } + q = AC_NUM - 1 - queue; + + spin_lock_irqsave(&priv->lock, flags); + + priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min); + priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max); + priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; + priv->qos_data.def_qos_parm.ac[q].edca_txop = + cpu_to_le16((params->burst_time * 100)); + + priv->qos_data.def_qos_parm.ac[q].reserved1 = 0; + priv->qos_data.qos_active = 1; + + spin_unlock_irqrestore(&priv->lock, flags); + + mutex_lock(&priv->mutex); + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) + iwl_activate_qos(priv, 1); + else if (priv->assoc_id && iwl_is_associated(priv)) + iwl_activate_qos(priv, 0); + + mutex_unlock(&priv->mutex); + +#endif /*CONFIG_IWLWIFI_QOS */ + + IWL_DEBUG_MAC80211("leave\n"); + return 0; +} + +static int iwl_mac_get_tx_stats(struct ieee80211_hw *hw, + struct ieee80211_tx_queue_stats *stats) +{ + struct iwl_priv *priv = hw->priv; + int i, avail; + struct iwl_tx_queue *txq; + struct iwl_queue *q; + unsigned long flags; + + IWL_DEBUG_MAC80211("enter\n"); + + if (!iwl_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211("leave - RF not ready\n"); + return -EIO; + } + + spin_lock_irqsave(&priv->lock, flags); + + for (i = 0; i < AC_NUM; i++) { + txq = &priv->txq[i]; + q = &txq->q; + avail = iwl_queue_space(q); + + stats->data[i].len = q->n_window - avail; + stats->data[i].limit = q->n_window - q->high_mark; + stats->data[i].count = q->n_window; + + } + spin_unlock_irqrestore(&priv->lock, flags); + + IWL_DEBUG_MAC80211("leave\n"); + + return 0; +} + +static int iwl_mac_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + IWL_DEBUG_MAC80211("enter\n"); + IWL_DEBUG_MAC80211("leave\n"); + + return 0; +} + +static u64 iwl_mac_get_tsf(struct ieee80211_hw *hw) +{ + IWL_DEBUG_MAC80211("enter\n"); + IWL_DEBUG_MAC80211("leave\n"); + + return 0; +} + +static void iwl_mac_reset_tsf(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + unsigned long flags; + + mutex_lock(&priv->mutex); + IWL_DEBUG_MAC80211("enter\n"); + + priv->lq_mngr.lq_ready = 0; +#ifdef CONFIG_IWLWIFI_HT + spin_lock_irqsave(&priv->lock, flags); + memset(&priv->current_assoc_ht, 0, sizeof(struct sta_ht_info)); + spin_unlock_irqrestore(&priv->lock, flags); +#ifdef CONFIG_IWLWIFI_HT_AGG +/* if (priv->lq_mngr.agg_ctrl.granted_ba) + iwl4965_turn_off_agg(priv, TID_ALL_SPECIFIED);*/ + + memset(&(priv->lq_mngr.agg_ctrl), 0, sizeof(struct iwl_agg_control)); + priv->lq_mngr.agg_ctrl.tid_traffic_load_threshold = 10; + priv->lq_mngr.agg_ctrl.ba_timeout = 5000; + priv->lq_mngr.agg_ctrl.auto_agg = 1; + + if (priv->lq_mngr.agg_ctrl.auto_agg) + priv->lq_mngr.agg_ctrl.requested_ba = TID_ALL_ENABLED; +#endif /*CONFIG_IWLWIFI_HT_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ + +#ifdef CONFIG_IWLWIFI_QOS + iwl_reset_qos(priv); +#endif + + cancel_delayed_work(&priv->post_associate); + + spin_lock_irqsave(&priv->lock, flags); + priv->assoc_id = 0; + priv->assoc_capability = 0; + priv->call_post_assoc_from_beacon = 0; + priv->assoc_station_added = 0; + + /* new association get rid of ibss beacon skb */ + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + priv->ibss_beacon = NULL; + + priv->beacon_int = priv->hw->conf.beacon_int; + priv->timestamp1 = 0; + priv->timestamp0 = 0; + if ((priv->iw_mode == IEEE80211_IF_TYPE_STA)) + priv->beacon_int = 0; + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Per mac80211.h: This is only used in IBSS mode... */ + if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { + IWL_DEBUG_MAC80211("leave - not in IBSS\n"); + mutex_unlock(&priv->mutex); + return; + } + + if (!iwl_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211("leave - not ready\n"); + mutex_unlock(&priv->mutex); + return; + } + + priv->only_active_channel = 0; + + iwl_set_rate(priv); + + mutex_unlock(&priv->mutex); + + IWL_DEBUG_MAC80211("leave\n"); + +} + +static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ieee80211_tx_control *control) +{ + struct iwl_priv *priv = hw->priv; + unsigned long flags; + + mutex_lock(&priv->mutex); + IWL_DEBUG_MAC80211("enter\n"); + + if (!iwl_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211("leave - RF not ready\n"); + mutex_unlock(&priv->mutex); + return -EIO; + } + + if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { + IWL_DEBUG_MAC80211("leave - not IBSS\n"); + mutex_unlock(&priv->mutex); + return -EIO; + } + + spin_lock_irqsave(&priv->lock, flags); + + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + priv->ibss_beacon = skb; + + priv->assoc_id = 0; + + IWL_DEBUG_MAC80211("leave\n"); + spin_unlock_irqrestore(&priv->lock, flags); + +#ifdef CONFIG_IWLWIFI_QOS + iwl_reset_qos(priv); +#endif + + queue_work(priv->workqueue, &priv->post_associate.work); + + mutex_unlock(&priv->mutex); + + return 0; +} + +#ifdef CONFIG_IWLWIFI_HT +union ht_cap_info { + struct { + u16 advanced_coding_cap :1; + u16 supported_chan_width_set :1; + u16 mimo_power_save_mode :2; + u16 green_field :1; + u16 short_GI20 :1; + u16 short_GI40 :1; + u16 tx_stbc :1; + u16 rx_stbc :1; + u16 beam_forming :1; + u16 delayed_ba :1; + u16 maximal_amsdu_size :1; + u16 cck_mode_at_40MHz :1; + u16 psmp_support :1; + u16 stbc_ctrl_frame_support :1; + u16 sig_txop_protection_support :1; + }; + u16 val; +} __attribute__ ((packed)); + +union ht_param_info{ + struct { + u8 max_rx_ampdu_factor :2; + u8 mpdu_density :3; + u8 reserved :3; + }; + u8 val; +} __attribute__ ((packed)); + +union ht_exra_param_info { + struct { + u8 ext_chan_offset :2; + u8 tx_chan_width :1; + u8 rifs_mode :1; + u8 controlled_access_only :1; + u8 service_interval_granularity :3; + }; + u8 val; +} __attribute__ ((packed)); + +union ht_operation_mode{ + struct { + u16 op_mode :2; + u16 non_GF :1; + u16 reserved :13; + }; + u16 val; +} __attribute__ ((packed)); + + +static int sta_ht_info_init(struct ieee80211_ht_capability *ht_cap, + struct ieee80211_ht_additional_info *ht_extra, + struct sta_ht_info *ht_info_ap, + struct sta_ht_info *ht_info) +{ + union ht_cap_info cap; + union ht_operation_mode op_mode; + union ht_param_info param_info; + union ht_exra_param_info extra_param_info; + + IWL_DEBUG_MAC80211("enter: \n"); + + if (!ht_info) { + IWL_DEBUG_MAC80211("leave: ht_info is NULL\n"); + return -1; + } + + if (ht_cap) { + cap.val = (u16) le16_to_cpu(ht_cap->capabilities_info); + param_info.val = ht_cap->mac_ht_params_info; + ht_info->is_ht = 1; + if (cap.short_GI20) + ht_info->sgf |= 0x1; + if (cap.short_GI40) + ht_info->sgf |= 0x2; + ht_info->is_green_field = cap.green_field; + ht_info->max_amsdu_size = cap.maximal_amsdu_size; + ht_info->supported_chan_width = cap.supported_chan_width_set; + ht_info->tx_mimo_ps_mode = cap.mimo_power_save_mode; + memcpy(ht_info->supp_rates, ht_cap->supported_mcs_set, 16); + + ht_info->ampdu_factor = param_info.max_rx_ampdu_factor; + ht_info->mpdu_density = param_info.mpdu_density; + + IWL_DEBUG_MAC80211("SISO mask 0x%X MIMO mask 0x%X \n", + ht_cap->supported_mcs_set[0], + ht_cap->supported_mcs_set[1]); + + if (ht_info_ap) { + ht_info->control_channel = ht_info_ap->control_channel; + ht_info->extension_chan_offset = + ht_info_ap->extension_chan_offset; + ht_info->tx_chan_width = ht_info_ap->tx_chan_width; + ht_info->operating_mode = ht_info_ap->operating_mode; + } + + if (ht_extra) { + extra_param_info.val = ht_extra->ht_param; + ht_info->control_channel = ht_extra->control_chan; + ht_info->extension_chan_offset = + extra_param_info.ext_chan_offset; + ht_info->tx_chan_width = extra_param_info.tx_chan_width; + op_mode.val = (u16) + le16_to_cpu(ht_extra->operation_mode); + ht_info->operating_mode = op_mode.op_mode; + IWL_DEBUG_MAC80211("control channel %d\n", + ht_extra->control_chan); + } + } else + ht_info->is_ht = 0; + + IWL_DEBUG_MAC80211("leave\n"); + return 0; +} + +static int iwl_mac_conf_ht(struct ieee80211_hw *hw, + struct ieee80211_ht_capability *ht_cap, + struct ieee80211_ht_additional_info *ht_extra) +{ + struct iwl_priv *priv = hw->priv; + int rs; + + IWL_DEBUG_MAC80211("enter: \n"); + + rs = sta_ht_info_init(ht_cap, ht_extra, NULL, &priv->current_assoc_ht); + iwl4965_set_rxon_chain(priv); + + if (priv && priv->assoc_id && + (priv->iw_mode == IEEE80211_IF_TYPE_STA)) { + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + if (priv->beacon_int) + queue_work(priv->workqueue, &priv->post_associate.work); + else + priv->call_post_assoc_from_beacon = 1; + spin_unlock_irqrestore(&priv->lock, flags); + } + + IWL_DEBUG_MAC80211("leave: control channel %d\n", + ht_extra->control_chan); + return rs; + +} + +static void iwl_set_ht_capab(struct ieee80211_hw *hw, + struct ieee80211_ht_capability *ht_cap, + u8 use_wide_chan) +{ + union ht_cap_info cap; + union ht_param_info param_info; + + memset(&cap, 0, sizeof(union ht_cap_info)); + memset(¶m_info, 0, sizeof(union ht_param_info)); + + cap.maximal_amsdu_size = HT_IE_MAX_AMSDU_SIZE_4K; + cap.green_field = 1; + cap.short_GI20 = 1; + cap.short_GI40 = 1; + cap.supported_chan_width_set = use_wide_chan; + cap.mimo_power_save_mode = 0x3; + + param_info.max_rx_ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; + param_info.mpdu_density = CFG_HT_MPDU_DENSITY_DEF; + ht_cap->capabilities_info = (__le16) cpu_to_le16(cap.val); + ht_cap->mac_ht_params_info = (u8) param_info.val; + + ht_cap->supported_mcs_set[0] = 0xff; + ht_cap->supported_mcs_set[1] = 0xff; + ht_cap->supported_mcs_set[4] = + (cap.supported_chan_width_set) ? 0x1: 0x0; +} + +static void iwl_mac_get_ht_capab(struct ieee80211_hw *hw, + struct ieee80211_ht_capability *ht_cap) +{ + u8 use_wide_channel = 1; + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211("enter: \n"); + if (priv->channel_width != IWL_CHANNEL_WIDTH_40MHZ) + use_wide_channel = 0; + + /* no fat tx allowed on 2.4GHZ */ + if (priv->phymode != MODE_IEEE80211A) + use_wide_channel = 0; + + iwl_set_ht_capab(hw, ht_cap, use_wide_channel); + IWL_DEBUG_MAC80211("leave: \n"); +} +#endif /*CONFIG_IWLWIFI_HT*/ + +/***************************************************************************** + * + * sysfs attributes + * + *****************************************************************************/ + +#ifdef CONFIG_IWLWIFI_DEBUG + +/* + * The following adds a new attribute to the sysfs representation + * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/) + * used for controlling the debug level. + * + * See the level definitions in iwl for details. + */ + +static ssize_t show_debug_level(struct device_driver *d, char *buf) +{ + return sprintf(buf, "0x%08X\n", iwl_debug_level); +} +static ssize_t store_debug_level(struct device_driver *d, + const char *buf, size_t count) +{ + char *p = (char *)buf; + u32 val; + + val = simple_strtoul(p, &p, 0); + if (p == buf) + printk(KERN_INFO DRV_NAME + ": %s is not in hex or decimal form.\n", buf); + else + iwl_debug_level = val; + + return strnlen(buf, count); +} + +static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO, + show_debug_level, store_debug_level); + +#endif /* CONFIG_IWLWIFI_DEBUG */ + +static ssize_t show_rf_kill(struct device *d, + struct device_attribute *attr, char *buf) +{ + /* + * 0 - RF kill not enabled + * 1 - SW based RF kill active (sysfs) + * 2 - HW based RF kill active + * 3 - Both HW and SW based RF kill active + */ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + int val = (test_bit(STATUS_RF_KILL_SW, &priv->status) ? 0x1 : 0x0) | + (test_bit(STATUS_RF_KILL_HW, &priv->status) ? 0x2 : 0x0); + + return sprintf(buf, "%i\n", val); +} + +static ssize_t store_rf_kill(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + + mutex_lock(&priv->mutex); + iwl_radio_kill_sw(priv, buf[0] == '1'); + mutex_unlock(&priv->mutex); + + return count; +} + +static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill); + +static ssize_t show_temperature(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + return sprintf(buf, "%d\n", iwl_hw_get_temperature(priv)); +} + +static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL); + +static ssize_t show_rs_window(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct iwl_priv *priv = d->driver_data; + return iwl_fill_rs_info(priv->hw, buf, IWL_AP_ID); +} +static DEVICE_ATTR(rs_window, S_IRUGO, show_rs_window, NULL); + +static ssize_t show_tx_power(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + return sprintf(buf, "%d\n", priv->user_txpower_limit); +} + +static ssize_t store_tx_power(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + char *p = (char *)buf; + u32 val; + + val = simple_strtoul(p, &p, 10); + if (p == buf) + printk(KERN_INFO DRV_NAME + ": %s is not in decimal form.\n", buf); + else + iwl_hw_reg_set_txpower(priv, val); + + return count; +} + +static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power); + +static ssize_t show_flags(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + + return sprintf(buf, "0x%04X\n", priv->active_rxon.flags); +} + +static ssize_t store_flags(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + u32 flags = simple_strtoul(buf, NULL, 0); + + mutex_lock(&priv->mutex); + if (le32_to_cpu(priv->staging_rxon.flags) != flags) { + /* Cancel any currently running scans... */ + if (iwl_scan_cancel_timeout(priv, 100)) + IWL_WARNING("Could not cancel scan.\n"); + else { + IWL_DEBUG_INFO("Committing rxon.flags = 0x%04X\n", + flags); + priv->staging_rxon.flags = cpu_to_le32(flags); + iwl_commit_rxon(priv); + } + } + mutex_unlock(&priv->mutex); + + return count; +} + +static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags); + +static ssize_t show_filter_flags(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + + return sprintf(buf, "0x%04X\n", + le32_to_cpu(priv->active_rxon.filter_flags)); +} + +static ssize_t store_filter_flags(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + u32 filter_flags = simple_strtoul(buf, NULL, 0); + + mutex_lock(&priv->mutex); + if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) { + /* Cancel any currently running scans... */ + if (iwl_scan_cancel_timeout(priv, 100)) + IWL_WARNING("Could not cancel scan.\n"); + else { + IWL_DEBUG_INFO("Committing rxon.filter_flags = " + "0x%04X\n", filter_flags); + priv->staging_rxon.filter_flags = + cpu_to_le32(filter_flags); + iwl_commit_rxon(priv); + } + } + mutex_unlock(&priv->mutex); + + return count; +} + +static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags, + store_filter_flags); + +static ssize_t show_tune(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + + return sprintf(buf, "0x%04X\n", + (priv->phymode << 8) | + le16_to_cpu(priv->active_rxon.channel)); +} + +static void iwl_set_flags_for_phymode(struct iwl_priv *priv, u8 phymode); + +static ssize_t store_tune(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + char *p = (char *)buf; + u16 tune = simple_strtoul(p, &p, 0); + u8 phymode = (tune >> 8) & 0xff; + u16 channel = tune & 0xff; + + IWL_DEBUG_INFO("Tune request to:%d channel:%d\n", phymode, channel); + + mutex_lock(&priv->mutex); + if ((le16_to_cpu(priv->staging_rxon.channel) != channel) || + (priv->phymode != phymode)) { + const struct iwl_channel_info *ch_info; + + ch_info = iwl_get_channel_info(priv, phymode, channel); + if (!ch_info) { + IWL_WARNING("Requested invalid phymode/channel " + "combination: %d %d\n", phymode, channel); + mutex_unlock(&priv->mutex); + return -EINVAL; + } + + /* Cancel any currently running scans... */ + if (iwl_scan_cancel_timeout(priv, 100)) + IWL_WARNING("Could not cancel scan.\n"); + else { + IWL_DEBUG_INFO("Committing phymode and " + "rxon.channel = %d %d\n", + phymode, channel); + + iwl_set_rxon_channel(priv, phymode, channel); + iwl_set_flags_for_phymode(priv, phymode); + + iwl_set_rate(priv); + iwl_commit_rxon(priv); + } + } + mutex_unlock(&priv->mutex); + + return count; +} + +static DEVICE_ATTR(tune, S_IWUSR | S_IRUGO, show_tune, store_tune); + +#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT + +static ssize_t show_measurement(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + struct iwl_spectrum_notification measure_report; + u32 size = sizeof(measure_report), len = 0, ofs = 0; + u8 *data = (u8 *) & measure_report; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + if (!(priv->measurement_status & MEASUREMENT_READY)) { + spin_unlock_irqrestore(&priv->lock, flags); + return 0; + } + memcpy(&measure_report, &priv->measure_report, size); + priv->measurement_status = 0; + spin_unlock_irqrestore(&priv->lock, flags); + + while (size && (PAGE_SIZE - len)) { + hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, + PAGE_SIZE - len, 1); + len = strlen(buf); + if (PAGE_SIZE - len) + buf[len++] = '\n'; + + ofs += 16; + size -= min(size, 16U); + } + + return len; +} + +static ssize_t store_measurement(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + struct ieee80211_measurement_params params = { + .channel = le16_to_cpu(priv->active_rxon.channel), + .start_time = cpu_to_le64(priv->last_tsf), + .duration = cpu_to_le16(1), + }; + u8 type = IWL_MEASURE_BASIC; + u8 buffer[32]; + u8 channel; + + if (count) { + char *p = buffer; + strncpy(buffer, buf, min(sizeof(buffer), count)); + channel = simple_strtoul(p, NULL, 0); + if (channel) + params.channel = channel; + + p = buffer; + while (*p && *p != ' ') + p++; + if (*p) + type = simple_strtoul(p + 1, NULL, 0); + } + + IWL_DEBUG_INFO("Invoking measurement of type %d on " + "channel %d (for '%s')\n", type, params.channel, buf); + iwl_get_measurement(priv, ¶ms, type); + + return count; +} + +static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, + show_measurement, store_measurement); +#endif /* CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT */ + +static ssize_t store_retry_rate(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + priv->retry_rate = simple_strtoul(buf, NULL, 0); + if (priv->retry_rate <= 0) + priv->retry_rate = 1; + + return count; +} + +static ssize_t show_retry_rate(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + return sprintf(buf, "%d", priv->retry_rate); +} + +static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate, + store_retry_rate); + +static ssize_t store_power_level(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + int rc; + int mode; + + mode = simple_strtoul(buf, NULL, 0); + mutex_lock(&priv->mutex); + + if (!iwl_is_ready(priv)) { + rc = -EAGAIN; + goto out; + } + + if ((mode < 1) || (mode > IWL_POWER_LIMIT) || (mode == IWL_POWER_AC)) + mode = IWL_POWER_AC; + else + mode |= IWL_POWER_ENABLED; + + if (mode != priv->power_mode) { + rc = iwl_send_power_mode(priv, IWL_POWER_LEVEL(mode)); + if (rc) { + IWL_DEBUG_MAC80211("failed setting power mode.\n"); + goto out; + } + priv->power_mode = mode; + } + + rc = count; + + out: + mutex_unlock(&priv->mutex); + return rc; +} + +#define MAX_WX_STRING 80 + +/* Values are in microsecond */ +static const s32 timeout_duration[] = { + 350000, + 250000, + 75000, + 37000, + 25000, +}; +static const s32 period_duration[] = { + 400000, + 700000, + 1000000, + 1000000, + 1000000 +}; + +static ssize_t show_power_level(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + int level = IWL_POWER_LEVEL(priv->power_mode); + char *p = buf; + + p += sprintf(p, "%d ", level); + switch (level) { + case IWL_POWER_MODE_CAM: + case IWL_POWER_AC: + p += sprintf(p, "(AC)"); + break; + case IWL_POWER_BATTERY: + p += sprintf(p, "(BATTERY)"); + break; + default: + p += sprintf(p, + "(Timeout %dms, Period %dms)", + timeout_duration[level - 1] / 1000, + period_duration[level - 1] / 1000); + } + + if (!(priv->power_mode & IWL_POWER_ENABLED)) + p += sprintf(p, " OFF\n"); + else + p += sprintf(p, " \n"); + + return (p - buf + 1); + +} + +static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level, + store_power_level); + +static ssize_t show_channels(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + int len = 0, i; + struct ieee80211_channel *channels = NULL; + const struct ieee80211_hw_mode *hw_mode = NULL; + int count = 0; + + if (!iwl_is_ready(priv)) + return -EAGAIN; + + hw_mode = iwl_get_hw_mode(priv, MODE_IEEE80211G); + if (!hw_mode) + hw_mode = iwl_get_hw_mode(priv, MODE_IEEE80211B); + if (hw_mode) { + channels = hw_mode->channels; + count = hw_mode->num_channels; + } + + len += + sprintf(&buf[len], + "Displaying %d channels in 2.4GHz band " + "(802.11bg):\n", count); + + for (i = 0; i < count; i++) + len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n", + channels[i].chan, + channels[i].power_level, + channels[i]. + flag & IEEE80211_CHAN_W_RADAR_DETECT ? + " (IEEE 802.11h required)" : "", + (!(channels[i].flag & IEEE80211_CHAN_W_IBSS) + || (channels[i]. + flag & + IEEE80211_CHAN_W_RADAR_DETECT)) ? "" : + ", IBSS", + channels[i]. + flag & IEEE80211_CHAN_W_ACTIVE_SCAN ? + "active/passive" : "passive only"); + + hw_mode = iwl_get_hw_mode(priv, MODE_IEEE80211A); + if (hw_mode) { + channels = hw_mode->channels; + count = hw_mode->num_channels; + } else { + channels = NULL; + count = 0; + } + + len += sprintf(&buf[len], "Displaying %d channels in 5.2GHz band " + "(802.11a):\n", count); + + for (i = 0; i < count; i++) + len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n", + channels[i].chan, + channels[i].power_level, + channels[i]. + flag & IEEE80211_CHAN_W_RADAR_DETECT ? + " (IEEE 802.11h required)" : "", + (!(channels[i].flag & IEEE80211_CHAN_W_IBSS) + || (channels[i]. + flag & + IEEE80211_CHAN_W_RADAR_DETECT)) ? "" : + ", IBSS", + channels[i]. + flag & IEEE80211_CHAN_W_ACTIVE_SCAN ? + "active/passive" : "passive only"); + + return len; +} + +static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL); + +static ssize_t show_statistics(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + u32 size = sizeof(struct iwl_notif_statistics); + u32 len = 0, ofs = 0; + u8 *data = (u8 *) & priv->statistics; + int rc = 0; + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + mutex_lock(&priv->mutex); + rc = iwl_send_statistics_request(priv); + mutex_unlock(&priv->mutex); + + if (rc) { + len = sprintf(buf, + "Error sending statistics request: 0x%08X\n", rc); + return len; + } + + while (size && (PAGE_SIZE - len)) { + hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, + PAGE_SIZE - len, 1); + len = strlen(buf); + if (PAGE_SIZE - len) + buf[len++] = '\n'; + + ofs += 16; + size -= min(size, 16U); + } + + return len; +} + +static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL); + +static ssize_t show_antenna(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + return sprintf(buf, "%d\n", priv->antenna); +} + +static ssize_t store_antenna(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int ant; + struct iwl_priv *priv = dev_get_drvdata(d); + + if (count == 0) + return 0; + + if (sscanf(buf, "%1i", &ant) != 1) { + IWL_DEBUG_INFO("not in hex or decimal form.\n"); + return count; + } + + if ((ant >= 0) && (ant <= 2)) { + IWL_DEBUG_INFO("Setting antenna select to %d.\n", ant); + priv->antenna = (enum iwl_antenna)ant; + } else + IWL_DEBUG_INFO("Bad antenna select value %d.\n", ant); + + + return count; +} + +static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna); + +static ssize_t show_status(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + if (!iwl_is_alive(priv)) + return -EAGAIN; + return sprintf(buf, "0x%08x\n", (int)priv->status); +} + +static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); + +static ssize_t dump_error_log(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char *p = (char *)buf; + + if (p[0] == '1') + iwl_dump_nic_error_log((struct iwl_priv *)d->driver_data); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log); + +static ssize_t dump_event_log(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char *p = (char *)buf; + + if (p[0] == '1') + iwl_dump_nic_event_log((struct iwl_priv *)d->driver_data); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log); + +/***************************************************************************** + * + * driver setup and teardown + * + *****************************************************************************/ + +static void iwl_setup_deferred_work(struct iwl_priv *priv) +{ + priv->workqueue = create_workqueue(DRV_NAME); + + init_waitqueue_head(&priv->wait_command_queue); + + INIT_WORK(&priv->up, iwl_bg_up); + INIT_WORK(&priv->restart, iwl_bg_restart); + INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish); + INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); + INIT_WORK(&priv->request_scan, iwl_bg_request_scan); + INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan); + INIT_WORK(&priv->rf_kill, iwl_bg_rf_kill); + INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update); + INIT_DELAYED_WORK(&priv->post_associate, iwl_bg_post_associate); + INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start); + INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start); + INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check); + + iwl_hw_setup_deferred_work(priv); + + tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) + iwl_irq_tasklet, (unsigned long)priv); +} + +static void iwl_cancel_deferred_work(struct iwl_priv *priv) +{ + iwl_hw_cancel_deferred_work(priv); + + cancel_delayed_work(&priv->scan_check); + cancel_delayed_work(&priv->alive_start); + cancel_delayed_work(&priv->post_associate); + cancel_work_sync(&priv->beacon_update); +} + +static struct attribute *iwl_sysfs_entries[] = { + &dev_attr_antenna.attr, + &dev_attr_channels.attr, + &dev_attr_dump_errors.attr, + &dev_attr_dump_events.attr, + &dev_attr_flags.attr, + &dev_attr_filter_flags.attr, +#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT + &dev_attr_measurement.attr, +#endif + &dev_attr_power_level.attr, + &dev_attr_retry_rate.attr, + &dev_attr_rf_kill.attr, + &dev_attr_rs_window.attr, + &dev_attr_statistics.attr, + &dev_attr_status.attr, + &dev_attr_temperature.attr, + &dev_attr_tune.attr, + &dev_attr_tx_power.attr, + + NULL +}; + +static struct attribute_group iwl_attribute_group = { + .name = NULL, /* put in device directory */ + .attrs = iwl_sysfs_entries, +}; + +static struct ieee80211_ops iwl_hw_ops = { + .tx = iwl_mac_tx, + .start = iwl_mac_start, + .stop = iwl_mac_stop, + .add_interface = iwl_mac_add_interface, + .remove_interface = iwl_mac_remove_interface, + .config = iwl_mac_config, + .config_interface = iwl_mac_config_interface, + .configure_filter = iwl_configure_filter, + .set_key = iwl_mac_set_key, + .get_stats = iwl_mac_get_stats, + .get_tx_stats = iwl_mac_get_tx_stats, + .conf_tx = iwl_mac_conf_tx, + .get_tsf = iwl_mac_get_tsf, + .reset_tsf = iwl_mac_reset_tsf, + .beacon_update = iwl_mac_beacon_update, +#ifdef CONFIG_IWLWIFI_HT + .conf_ht = iwl_mac_conf_ht, + .get_ht_capab = iwl_mac_get_ht_capab, +#ifdef CONFIG_IWLWIFI_HT_AGG + .ht_tx_agg_start = iwl_mac_ht_tx_agg_start, + .ht_tx_agg_stop = iwl_mac_ht_tx_agg_stop, + .ht_rx_agg_start = iwl_mac_ht_rx_agg_start, + .ht_rx_agg_stop = iwl_mac_ht_rx_agg_stop, +#endif /* CONFIG_IWLWIFI_HT_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ + .hw_scan = iwl_mac_hw_scan +}; + +static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int err = 0; + struct iwl_priv *priv; + struct ieee80211_hw *hw; + int i; + + if (iwl_param_disable_hw_scan) { + IWL_DEBUG_INFO("Disabling hw_scan\n"); + iwl_hw_ops.hw_scan = NULL; + } + + if ((iwl_param_queues_num > IWL_MAX_NUM_QUEUES) || + (iwl_param_queues_num < IWL_MIN_NUM_QUEUES)) { + IWL_ERROR("invalid queues_num, should be between %d and %d\n", + IWL_MIN_NUM_QUEUES, IWL_MAX_NUM_QUEUES); + err = -EINVAL; + goto out; + } + + /* mac80211 allocates memory for this device instance, including + * space for this driver's private structure */ + hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwl_hw_ops); + if (hw == NULL) { + IWL_ERROR("Can not allocate network device\n"); + err = -ENOMEM; + goto out; + } + SET_IEEE80211_DEV(hw, &pdev->dev); + + IWL_DEBUG_INFO("*** LOAD DRIVER ***\n"); + priv = hw->priv; + priv->hw = hw; + + priv->pci_dev = pdev; + priv->antenna = (enum iwl_antenna)iwl_param_antenna; +#ifdef CONFIG_IWLWIFI_DEBUG + iwl_debug_level = iwl_param_debug; + atomic_set(&priv->restrict_refcnt, 0); +#endif + priv->retry_rate = 1; + + priv->ibss_beacon = NULL; + + /* Tell mac80211 and its clients (e.g. Wireless Extensions) + * the range of signal quality values that we'll provide. + * Negative values for level/noise indicate that we'll provide dBm. + * For WE, at least, non-0 values here *enable* display of values + * in app (iwconfig). */ + hw->max_rssi = -20; /* signal level, negative indicates dBm */ + hw->max_noise = -20; /* noise level, negative indicates dBm */ + hw->max_signal = 100; /* link quality indication (%) */ + + /* Tell mac80211 our Tx characteristics */ + hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE; + + hw->queues = 4; +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG + hw->queues = 16; +#endif /* CONFIG_IWLWIFI_HT_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ + + spin_lock_init(&priv->lock); + spin_lock_init(&priv->power_data.lock); + spin_lock_init(&priv->sta_lock); + spin_lock_init(&priv->hcmd_lock); + spin_lock_init(&priv->lq_mngr.lock); + + for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) + INIT_LIST_HEAD(&priv->ibss_mac_hash[i]); + + INIT_LIST_HEAD(&priv->free_frames); + + mutex_init(&priv->mutex); + if (pci_enable_device(pdev)) { + err = -ENODEV; + goto out_ieee80211_free_hw; + } + + pci_set_master(pdev); + + iwl_clear_stations_table(priv); + + priv->data_retry_limit = -1; + priv->ieee_channels = NULL; + priv->ieee_rates = NULL; + priv->phymode = -1; + + err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (!err) + err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + if (err) { + printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n"); + goto out_pci_disable_device; + } + + pci_set_drvdata(pdev, priv); + err = pci_request_regions(pdev, DRV_NAME); + if (err) + goto out_pci_disable_device; + /* We disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state */ + pci_write_config_byte(pdev, 0x41, 0x00); + priv->hw_base = pci_iomap(pdev, 0, 0); + if (!priv->hw_base) { + err = -ENODEV; + goto out_pci_release_regions; + } + + IWL_DEBUG_INFO("pci_resource_len = 0x%08llx\n", + (unsigned long long) pci_resource_len(pdev, 0)); + IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base); + + /* Initialize module parameter values here */ + + if (iwl_param_disable) { + set_bit(STATUS_RF_KILL_SW, &priv->status); + IWL_DEBUG_INFO("Radio disabled.\n"); + } + + priv->iw_mode = IEEE80211_IF_TYPE_STA; + + priv->ps_mode = 0; + priv->use_ant_b_for_management_frame = 1; /* start with ant B */ + priv->is_ht_enabled = 1; + priv->channel_width = IWL_CHANNEL_WIDTH_40MHZ; + priv->valid_antenna = 0x7; /* assume all 3 connected */ + priv->ps_mode = IWL_MIMO_PS_NONE; + priv->cck_power_index_compensation = iwl_read32( + priv, CSR_HW_REV_WA_REG); + + iwl4965_set_rxon_chain(priv); + + printk(KERN_INFO DRV_NAME + ": Detected Intel Wireless WiFi Link 4965AGN\n"); + + /* Device-specific setup */ + if (iwl_hw_set_hw_setting(priv)) { + IWL_ERROR("failed to set hw settings\n"); + mutex_unlock(&priv->mutex); + goto out_iounmap; + } + +#ifdef CONFIG_IWLWIFI_QOS + if (iwl_param_qos_enable) + priv->qos_data.qos_enable = 1; + + iwl_reset_qos(priv); + + priv->qos_data.qos_active = 0; + priv->qos_data.qos_cap.val = 0; +#endif /* CONFIG_IWLWIFI_QOS */ + + iwl_set_rxon_channel(priv, MODE_IEEE80211G, 6); + iwl_setup_deferred_work(priv); + iwl_setup_rx_handlers(priv); + + priv->rates_mask = IWL_RATES_MASK; + /* If power management is turned on, default to AC mode */ + priv->power_mode = IWL_POWER_AC; + priv->user_txpower_limit = IWL_DEFAULT_TX_POWER; + + pci_enable_msi(pdev); + + err = request_irq(pdev->irq, iwl_isr, IRQF_SHARED, DRV_NAME, priv); + if (err) { + IWL_ERROR("Error allocating IRQ %d\n", pdev->irq); + goto out_disable_msi; + } + + mutex_lock(&priv->mutex); + + err = sysfs_create_group(&pdev->dev.kobj, &iwl_attribute_group); + if (err) { + IWL_ERROR("failed to create sysfs device attributes\n"); + mutex_unlock(&priv->mutex); + goto out_release_irq; + } + + /* fetch ucode file from disk, alloc and copy to bus-master buffers ... + * ucode filename and max sizes are card-specific. */ + err = iwl_read_ucode(priv); + if (err) { + IWL_ERROR("Could not read microcode: %d\n", err); + mutex_unlock(&priv->mutex); + goto out_pci_alloc; + } + + mutex_unlock(&priv->mutex); + + IWL_DEBUG_INFO("Queing UP work.\n"); + + queue_work(priv->workqueue, &priv->up); + + return 0; + + out_pci_alloc: + iwl_dealloc_ucode_pci(priv); + + sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); + + out_release_irq: + free_irq(pdev->irq, priv); + + out_disable_msi: + pci_disable_msi(pdev); + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + iwl_unset_hw_setting(priv); + + out_iounmap: + pci_iounmap(pdev, priv->hw_base); + out_pci_release_regions: + pci_release_regions(pdev); + out_pci_disable_device: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + out_ieee80211_free_hw: + ieee80211_free_hw(priv->hw); + out: + return err; +} + +static void iwl_pci_remove(struct pci_dev *pdev) +{ + struct iwl_priv *priv = pci_get_drvdata(pdev); + struct list_head *p, *q; + int i; + + if (!priv) + return; + + IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n"); + + mutex_lock(&priv->mutex); + set_bit(STATUS_EXIT_PENDING, &priv->status); + __iwl_down(priv); + mutex_unlock(&priv->mutex); + + /* Free MAC hash list for ADHOC */ + for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) { + list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) { + list_del(p); + kfree(list_entry(p, struct iwl_ibss_seq, list)); + } + } + + sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); + + iwl_dealloc_ucode_pci(priv); + + if (priv->rxq.bd) + iwl_rx_queue_free(priv, &priv->rxq); + iwl_hw_txq_ctx_free(priv); + + iwl_unset_hw_setting(priv); + iwl_clear_stations_table(priv); + + if (priv->mac80211_registered) { + ieee80211_unregister_hw(priv->hw); + iwl_rate_control_unregister(priv->hw); + } + + /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes + * priv->workqueue... so we can't take down the workqueue + * until now... */ + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + + free_irq(pdev->irq, priv); + pci_disable_msi(pdev); + pci_iounmap(pdev, priv->hw_base); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + kfree(priv->channel_info); + + kfree(priv->ieee_channels); + kfree(priv->ieee_rates); + + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + ieee80211_free_hw(priv->hw); +} + +#ifdef CONFIG_PM + +static int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct iwl_priv *priv = pci_get_drvdata(pdev); + + mutex_lock(&priv->mutex); + + set_bit(STATUS_IN_SUSPEND, &priv->status); + + /* Take down the device; powers it off, etc. */ + __iwl_down(priv); + + if (priv->mac80211_registered) + ieee80211_stop_queues(priv->hw); + + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + + mutex_unlock(&priv->mutex); + + return 0; +} + +static void iwl_resume(struct iwl_priv *priv) +{ + unsigned long flags; + + /* The following it a temporary work around due to the + * suspend / resume not fully initializing the NIC correctly. + * Without all of the following, resume will not attempt to take + * down the NIC (it shouldn't really need to) and will just try + * and bring the NIC back up. However that fails during the + * ucode verification process. This then causes iwl_down to be + * called *after* iwl_hw_nic_init() has succeeded -- which + * then lets the next init sequence succeed. So, we've + * replicated all of that NIC init code here... */ + + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + + iwl_hw_nic_init(priv); + + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + + /* tell the device to stop sending interrupts */ + iwl_disable_interrupts(priv); + + spin_lock_irqsave(&priv->lock, flags); + iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + + if (!iwl_grab_restricted_access(priv)) { + iwl_write_restricted_reg(priv, APMG_CLK_DIS_REG, + APMG_CLK_VAL_DMA_CLK_RQT); + iwl_release_restricted_access(priv); + } + spin_unlock_irqrestore(&priv->lock, flags); + + udelay(5); + + iwl_hw_nic_reset(priv); + + /* Bring the device back up */ + clear_bit(STATUS_IN_SUSPEND, &priv->status); + queue_work(priv->workqueue, &priv->up); +} + +static int iwl_pci_resume(struct pci_dev *pdev) +{ + struct iwl_priv *priv = pci_get_drvdata(pdev); + int err; + + printk(KERN_INFO "Coming out of suspend...\n"); + + mutex_lock(&priv->mutex); + + pci_set_power_state(pdev, PCI_D0); + err = pci_enable_device(pdev); + pci_restore_state(pdev); + + /* + * Suspend/Resume resets the PCI configuration space, so we have to + * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries + * from interfering with C3 CPU state. pci_restore_state won't help + * here since it only restores the first 64 bytes pci config header. + */ + pci_write_config_byte(pdev, 0x41, 0x00); + + iwl_resume(priv); + mutex_unlock(&priv->mutex); + + return 0; +} + +#endif /* CONFIG_PM */ + +/***************************************************************************** + * + * driver and module entry point + * + *****************************************************************************/ + +static struct pci_driver iwl_driver = { + .name = DRV_NAME, + .id_table = iwl_hw_card_ids, + .probe = iwl_pci_probe, + .remove = __devexit_p(iwl_pci_remove), +#ifdef CONFIG_PM + .suspend = iwl_pci_suspend, + .resume = iwl_pci_resume, +#endif +}; + +static int __init iwl_init(void) +{ + + int ret; + printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); + printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); + ret = pci_register_driver(&iwl_driver); + if (ret) { + IWL_ERROR("Unable to initialize PCI module\n"); + return ret; + } +#ifdef CONFIG_IWLWIFI_DEBUG + ret = driver_create_file(&iwl_driver.driver, &driver_attr_debug_level); + if (ret) { + IWL_ERROR("Unable to create driver sysfs file\n"); + pci_unregister_driver(&iwl_driver); + return ret; + } +#endif + + return ret; +} + +static void __exit iwl_exit(void) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + driver_remove_file(&iwl_driver.driver, &driver_attr_debug_level); +#endif + pci_unregister_driver(&iwl_driver); +} + +module_param_named(antenna, iwl_param_antenna, int, 0444); +MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); +module_param_named(disable, iwl_param_disable, int, 0444); +MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])"); +module_param_named(hwcrypto, iwl_param_hwcrypto, int, 0444); +MODULE_PARM_DESC(hwcrypto, + "using hardware crypto engine (default 0 [software])\n"); +module_param_named(debug, iwl_param_debug, int, 0444); +MODULE_PARM_DESC(debug, "debug output mask"); +module_param_named(disable_hw_scan, iwl_param_disable_hw_scan, int, 0444); +MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)"); + +module_param_named(queues_num, iwl_param_queues_num, int, 0444); +MODULE_PARM_DESC(queues_num, "number of hw queues."); + +/* QoS */ +module_param_named(qos_enable, iwl_param_qos_enable, int, 0444); +MODULE_PARM_DESC(qos_enable, "enable all QoS functionality"); + +module_exit(iwl_exit); +module_init(iwl_init); diff --git a/drivers/net/wireless/iwlwifi/iwlwifi.h b/drivers/net/wireless/iwlwifi/iwlwifi.h new file mode 100644 index 000000000000..e0b97c341215 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwlwifi.h @@ -0,0 +1,713 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos <ipw2100-admin@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwlwifi_h__ +#define __iwlwifi_h__ + +#include <linux/pci.h> /* for struct pci_device_id */ +#include <linux/kernel.h> +#include <net/ieee80211_radiotap.h> + +struct iwl_priv; + +/* Hardware specific file defines the PCI IDs table for that hardware module */ +extern struct pci_device_id iwl_hw_card_ids[]; + +#if IWL == 3945 + +#define DRV_NAME "iwl3945" +#include "iwl-hw.h" +#include "iwl-3945-hw.h" + +#elif IWL == 4965 + +#define DRV_NAME "iwl4965" +#include "iwl-hw.h" +#include "iwl-4965-hw.h" + +#endif + +#include "iwl-prph.h" + +/* + * Driver implementation data structures, constants, inline + * functions + * + * NOTE: DO NOT PUT HARDWARE/UCODE SPECIFIC DECLRATIONS HERE + * + * Hardware specific declrations go into iwl-*hw.h + * + */ + +#include "iwl-debug.h" + +/* Default noise level to report when noise measurement is not available. + * This may be because we're: + * 1) Not associated (4965, no beacon statistics being sent to driver) + * 2) Scanning (noise measurement does not apply to associated channel) + * 3) Receiving CCK (3945 delivers noise info only for OFDM frames) + * Use default noise value of -127 ... this is below the range of measurable + * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user. + * Also, -127 works better than 0 when averaging frames with/without + * noise info (e.g. averaging might be done in app); measured dBm values are + * always negative ... using a negative value as the default keeps all + * averages within an s8's (used in some apps) range of negative values. */ +#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127) + +/* Module parameters accessible from iwl-*.c */ +extern int iwl_param_disable_hw_scan; +extern int iwl_param_debug; +extern int iwl_param_mode; +extern int iwl_param_disable; +extern int iwl_param_antenna; +extern int iwl_param_hwcrypto; +extern int iwl_param_qos_enable; +extern int iwl_param_queues_num; + +enum iwl_antenna { + IWL_ANTENNA_DIVERSITY, + IWL_ANTENNA_MAIN, + IWL_ANTENNA_AUX +}; + +/* + * RTS threshold here is total size [2347] minus 4 FCS bytes + * Per spec: + * a value of 0 means RTS on all data/management packets + * a value > max MSDU size means no RTS + * else RTS for data/management frames where MPDU is larger + * than RTS value. + */ +#define DEFAULT_RTS_THRESHOLD 2347U +#define MIN_RTS_THRESHOLD 0U +#define MAX_RTS_THRESHOLD 2347U +#define MAX_MSDU_SIZE 2304U +#define MAX_MPDU_SIZE 2346U +#define DEFAULT_BEACON_INTERVAL 100U +#define DEFAULT_SHORT_RETRY_LIMIT 7U +#define DEFAULT_LONG_RETRY_LIMIT 4U + +struct iwl_rx_mem_buffer { + dma_addr_t dma_addr; + struct sk_buff *skb; + struct list_head list; +}; + +struct iwl_rt_rx_hdr { + struct ieee80211_radiotap_header rt_hdr; + __le64 rt_tsf; /* TSF */ + u8 rt_flags; /* radiotap packet flags */ + u8 rt_rate; /* rate in 500kb/s */ + __le16 rt_channelMHz; /* channel in MHz */ + __le16 rt_chbitmask; /* channel bitfield */ + s8 rt_dbmsignal; /* signal in dBm, kluged to signed */ + s8 rt_dbmnoise; + u8 rt_antenna; /* antenna number */ + u8 payload[0]; /* payload... */ +} __attribute__ ((packed)); + +struct iwl_rt_tx_hdr { + struct ieee80211_radiotap_header rt_hdr; + u8 rt_rate; /* rate in 500kb/s */ + __le16 rt_channel; /* channel in mHz */ + __le16 rt_chbitmask; /* channel bitfield */ + s8 rt_dbmsignal; /* signal in dBm, kluged to signed */ + u8 rt_antenna; /* antenna number */ + u8 payload[0]; /* payload... */ +} __attribute__ ((packed)); + +/* + * Generic queue structure + * + * Contains common data for Rx and Tx queues + */ +struct iwl_queue { + int n_bd; /* number of BDs in this queue */ + int first_empty; /* 1-st empty entry (index) host_w*/ + int last_used; /* last used entry (index) host_r*/ + dma_addr_t dma_addr; /* physical addr for BD's */ + int n_window; /* safe queue window */ + u32 id; + int low_mark; /* low watermark, resume queue if free + * space more than this */ + int high_mark; /* high watermark, stop queue if free + * space less than this */ +} __attribute__ ((packed)); + +#define MAX_NUM_OF_TBS (20) + +struct iwl_tx_info { + struct ieee80211_tx_status status; + struct sk_buff *skb[MAX_NUM_OF_TBS]; +}; + +/** + * struct iwl_tx_queue - Tx Queue for DMA + * @need_update: need to update read/write index + * @shed_retry: queue is HT AGG enabled + * + * Queue consists of circular buffer of BD's and required locking structures. + */ +struct iwl_tx_queue { + struct iwl_queue q; + struct iwl_tfd_frame *bd; + struct iwl_cmd *cmd; + dma_addr_t dma_addr_cmd; + struct iwl_tx_info *txb; + int need_update; + int sched_retry; + int active; +}; + +#include "iwl-channel.h" + +#if IWL == 3945 +#include "iwl-3945-rs.h" +#else +#include "iwl-4965-rs.h" +#endif + +#define IWL_TX_FIFO_AC0 0 +#define IWL_TX_FIFO_AC1 1 +#define IWL_TX_FIFO_AC2 2 +#define IWL_TX_FIFO_AC3 3 +#define IWL_TX_FIFO_HCCA_1 5 +#define IWL_TX_FIFO_HCCA_2 6 +#define IWL_TX_FIFO_NONE 7 + +/* Minimum number of queues. MAX_NUM is defined in hw specific files */ +#define IWL_MIN_NUM_QUEUES 4 + +/* Power management (not Tx power) structures */ + +struct iwl_power_vec_entry { + struct iwl_powertable_cmd cmd; + u8 no_dtim; +}; +#define IWL_POWER_RANGE_0 (0) +#define IWL_POWER_RANGE_1 (1) + +#define IWL_POWER_MODE_CAM 0x00 /* Continuously Aware Mode, always on */ +#define IWL_POWER_INDEX_3 0x03 +#define IWL_POWER_INDEX_5 0x05 +#define IWL_POWER_AC 0x06 +#define IWL_POWER_BATTERY 0x07 +#define IWL_POWER_LIMIT 0x07 +#define IWL_POWER_MASK 0x0F +#define IWL_POWER_ENABLED 0x10 +#define IWL_POWER_LEVEL(x) ((x) & IWL_POWER_MASK) + +struct iwl_power_mgr { + spinlock_t lock; + struct iwl_power_vec_entry pwr_range_0[IWL_POWER_AC]; + struct iwl_power_vec_entry pwr_range_1[IWL_POWER_AC]; + u8 active_index; + u32 dtim_val; +}; + +#define IEEE80211_DATA_LEN 2304 +#define IEEE80211_4ADDR_LEN 30 +#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN) +#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN) + +struct iwl_frame { + union { + struct ieee80211_hdr frame; + struct iwl_tx_beacon_cmd beacon; + u8 raw[IEEE80211_FRAME_LEN]; + u8 cmd[360]; + } u; + struct list_head list; +}; + +#define SEQ_TO_QUEUE(x) ((x >> 8) & 0xbf) +#define QUEUE_TO_SEQ(x) ((x & 0xbf) << 8) +#define SEQ_TO_INDEX(x) (x & 0xff) +#define INDEX_TO_SEQ(x) (x & 0xff) +#define SEQ_HUGE_FRAME (0x4000) +#define SEQ_RX_FRAME __constant_cpu_to_le16(0x8000) +#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) +#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) +#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) + +enum { + /* CMD_SIZE_NORMAL = 0, */ + CMD_SIZE_HUGE = (1 << 0), + /* CMD_SYNC = 0, */ + CMD_ASYNC = (1 << 1), + /* CMD_NO_SKB = 0, */ + CMD_WANT_SKB = (1 << 2), +}; + +struct iwl_cmd; +struct iwl_priv; + +struct iwl_cmd_meta { + struct iwl_cmd_meta *source; + union { + struct sk_buff *skb; + int (*callback)(struct iwl_priv *priv, + struct iwl_cmd *cmd, struct sk_buff *skb); + } __attribute__ ((packed)) u; + + /* The CMD_SIZE_HUGE flag bit indicates that the command + * structure is stored at the end of the shared queue memory. */ + u32 flags; + +} __attribute__ ((packed)); + +struct iwl_cmd { + struct iwl_cmd_meta meta; + struct iwl_cmd_header hdr; + union { + struct iwl_addsta_cmd addsta; + struct iwl_led_cmd led; + u32 flags; + u8 val8; + u16 val16; + u32 val32; + struct iwl_bt_cmd bt; + struct iwl_rxon_time_cmd rxon_time; + struct iwl_powertable_cmd powertable; + struct iwl_qosparam_cmd qosparam; + struct iwl_tx_cmd tx; + struct iwl_tx_beacon_cmd tx_beacon; + struct iwl_rxon_assoc_cmd rxon_assoc; + u8 *indirect; + u8 payload[360]; + } __attribute__ ((packed)) cmd; +} __attribute__ ((packed)); + +struct iwl_host_cmd { + u8 id; + u16 len; + struct iwl_cmd_meta meta; + const void *data; +}; + +#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_cmd) - \ + sizeof(struct iwl_cmd_meta)) + +/* + * RX related structures and functions + */ +#define RX_FREE_BUFFERS 64 +#define RX_LOW_WATERMARK 8 + +#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 +#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 +#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 + +/** + * struct iwl_rx_queue - Rx queue + * @processed: Internal index to last handled Rx packet + * @read: Shared index to newest available Rx buffer + * @write: Shared index to oldest written Rx packet + * @free_count: Number of pre-allocated buffers in rx_free + * @rx_free: list of free SKBs for use + * @rx_used: List of Rx buffers with no SKB + * @need_update: flag to indicate we need to update read/write index + * + * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers + */ +struct iwl_rx_queue { + __le32 *bd; + dma_addr_t dma_addr; + struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; + struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; + u32 processed; + u32 read; + u32 write; + u32 free_count; + struct list_head rx_free; + struct list_head rx_used; + int need_update; + spinlock_t lock; +}; + +#define IWL_SUPPORTED_RATES_IE_LEN 8 + +#define SCAN_INTERVAL 100 + +#define MAX_A_CHANNELS 252 +#define MIN_A_CHANNELS 7 + +#define MAX_B_CHANNELS 14 +#define MIN_B_CHANNELS 1 + +#define STATUS_HCMD_ACTIVE 0 /* host command in progress */ +#define STATUS_INT_ENABLED 1 +#define STATUS_RF_KILL_HW 2 +#define STATUS_RF_KILL_SW 3 +#define STATUS_INIT 4 +#define STATUS_ALIVE 5 +#define STATUS_READY 6 +#define STATUS_TEMPERATURE 7 +#define STATUS_GEO_CONFIGURED 8 +#define STATUS_EXIT_PENDING 9 +#define STATUS_IN_SUSPEND 10 +#define STATUS_STATISTICS 11 +#define STATUS_SCANNING 12 +#define STATUS_SCAN_ABORTING 13 +#define STATUS_SCAN_HW 14 +#define STATUS_POWER_PMI 15 +#define STATUS_FW_ERROR 16 + +#define MAX_TID_COUNT 9 + +#define IWL_INVALID_RATE 0xFF +#define IWL_INVALID_VALUE -1 + +#if IWL == 4965 +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG +struct iwl_ht_agg { + u16 txq_id; + u16 frame_count; + u16 wait_for_ba; + u16 start_idx; + u32 bitmap0; + u32 bitmap1; + u32 rate_n_flags; +}; +#endif /* CONFIG_IWLWIFI_HT_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ +#endif + +struct iwl_tid_data { + u16 seq_number; +#if IWL == 4965 +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG + struct iwl_ht_agg agg; +#endif /* CONFIG_IWLWIFI_HT_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ +#endif +}; + +struct iwl_hw_key { + enum ieee80211_key_alg alg; + int keylen; + u8 key[32]; +}; + +union iwl_ht_rate_supp { + u16 rates; + struct { + u8 siso_rate; + u8 mimo_rate; + }; +}; + +#ifdef CONFIG_IWLWIFI_HT +#define CFG_HT_RX_AMPDU_FACTOR_DEF (0x3) +#define HT_IE_MAX_AMSDU_SIZE_4K (0) +#define CFG_HT_MPDU_DENSITY_2USEC (0x5) +#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_2USEC + +struct sta_ht_info { + u8 is_ht; + u16 rx_mimo_ps_mode; + u16 tx_mimo_ps_mode; + u16 control_channel; + u8 max_amsdu_size; + u8 ampdu_factor; + u8 mpdu_density; + u8 operating_mode; + u8 supported_chan_width; + u8 extension_chan_offset; + u8 is_green_field; + u8 sgf; + u8 supp_rates[16]; + u8 tx_chan_width; + u8 chan_width_cap; +}; +#endif /*CONFIG_IWLWIFI_HT */ + +#ifdef CONFIG_IWLWIFI_QOS + +union iwl_qos_capabity { + struct { + u8 edca_count:4; /* bit 0-3 */ + u8 q_ack:1; /* bit 4 */ + u8 queue_request:1; /* bit 5 */ + u8 txop_request:1; /* bit 6 */ + u8 reserved:1; /* bit 7 */ + } q_AP; + struct { + u8 acvo_APSD:1; /* bit 0 */ + u8 acvi_APSD:1; /* bit 1 */ + u8 ac_bk_APSD:1; /* bit 2 */ + u8 ac_be_APSD:1; /* bit 3 */ + u8 q_ack:1; /* bit 4 */ + u8 max_len:2; /* bit 5-6 */ + u8 more_data_ack:1; /* bit 7 */ + } q_STA; + u8 val; +}; + +/* QoS sturctures */ +struct iwl_qos_info { + int qos_enable; + int qos_active; + union iwl_qos_capabity qos_cap; + struct iwl_qosparam_cmd def_qos_parm; +}; +#endif /*CONFIG_IWLWIFI_QOS */ + +#define STA_PS_STATUS_WAKE 0 +#define STA_PS_STATUS_SLEEP 1 + +struct iwl_station_entry { + struct iwl_addsta_cmd sta; + struct iwl_tid_data tid[MAX_TID_COUNT]; +#if IWL == 3945 + union { + struct { + u8 rate; + u8 flags; + } s; + u16 rate_n_flags; + } current_rate; +#endif + u8 used; + u8 ps_status; + struct iwl_hw_key keyinfo; +}; + +/* one for each uCode image (inst/data, boot/init/runtime) */ +struct fw_image_desc { + void *v_addr; /* access by driver */ + dma_addr_t p_addr; /* access by card's busmaster DMA */ + u32 len; /* bytes */ +}; + +/* uCode file layout */ +struct iwl_ucode { + __le32 ver; /* major/minor/subminor */ + __le32 inst_size; /* bytes of runtime instructions */ + __le32 data_size; /* bytes of runtime data */ + __le32 init_size; /* bytes of initialization instructions */ + __le32 init_data_size; /* bytes of initialization data */ + __le32 boot_size; /* bytes of bootstrap instructions */ + u8 data[0]; /* data in same order as "size" elements */ +}; + +#define IWL_IBSS_MAC_HASH_SIZE 32 + +struct iwl_ibss_seq { + u8 mac[ETH_ALEN]; + u16 seq_num; + u16 frag_num; + unsigned long packet_time; + struct list_head list; +}; + +struct iwl_driver_hw_info { + u16 max_txq_num; + u16 ac_queue_count; + u32 rx_buffer_size; + u16 tx_cmd_len; + u16 max_rxq_size; + u16 max_rxq_log; + u32 cck_flag; + u8 max_stations; + u8 bcast_sta_id; + void *shared_virt; + dma_addr_t shared_phys; +}; + + +#define STA_FLG_RTS_MIMO_PROT_MSK __constant_cpu_to_le32(1 << 17) +#define STA_FLG_AGG_MPDU_8US_MSK __constant_cpu_to_le32(1 << 18) +#define STA_FLG_MAX_AGG_SIZE_POS (19) +#define STA_FLG_MAX_AGG_SIZE_MSK __constant_cpu_to_le32(3 << 19) +#define STA_FLG_FAT_EN_MSK __constant_cpu_to_le32(1 << 21) +#define STA_FLG_MIMO_DIS_MSK __constant_cpu_to_le32(1 << 22) +#define STA_FLG_AGG_MPDU_DENSITY_POS (23) +#define STA_FLG_AGG_MPDU_DENSITY_MSK __constant_cpu_to_le32(7 << 23) +#define HT_SHORT_GI_20MHZ_ONLY (1 << 0) +#define HT_SHORT_GI_40MHZ_ONLY (1 << 1) + + +#include "iwl-priv.h" + +/* Requires full declaration of iwl_priv before including */ +#include "iwl-io.h" + +#define IWL_RX_HDR(x) ((struct iwl_rx_frame_hdr *)(\ + x->u.rx_frame.stats.payload + \ + x->u.rx_frame.stats.phy_count)) +#define IWL_RX_END(x) ((struct iwl_rx_frame_end *)(\ + IWL_RX_HDR(x)->payload + \ + le16_to_cpu(IWL_RX_HDR(x)->len))) +#define IWL_RX_STATS(x) (&x->u.rx_frame.stats) +#define IWL_RX_DATA(x) (IWL_RX_HDR(x)->payload) + + +/****************************************************************************** + * + * Functions implemented in iwl-base.c which are forward declared here + * for use by iwl-*.c + * + *****************************************************************************/ +struct iwl_addsta_cmd; +extern int iwl_send_add_station(struct iwl_priv *priv, + struct iwl_addsta_cmd *sta, u8 flags); +extern const char *iwl_get_tx_fail_reason(u32 status); +extern u8 iwl_add_station(struct iwl_priv *priv, const u8 *bssid, + int is_ap, u8 flags); +extern int iwl_is_network_packet(struct iwl_priv *priv, + struct ieee80211_hdr *header); +extern int iwl_power_init_handle(struct iwl_priv *priv); +extern int iwl_eeprom_init(struct iwl_priv *priv); +#ifdef CONFIG_IWLWIFI_DEBUG +extern void iwl_report_frame(struct iwl_priv *priv, + struct iwl_rx_packet *pkt, + struct ieee80211_hdr *header, int group100); +#else +static inline void iwl_report_frame(struct iwl_priv *priv, + struct iwl_rx_packet *pkt, + struct ieee80211_hdr *header, + int group100) {} +#endif +extern int iwl_tx_queue_update_write_ptr(struct iwl_priv *priv, + struct iwl_tx_queue *txq); +extern void iwl_handle_data_packet_monitor(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb, + void *data, short len, + struct ieee80211_rx_status *stats, + u16 phy_flags); +extern int is_duplicate_packet(struct iwl_priv *priv, struct ieee80211_hdr + *header); +extern void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq); +extern int iwl_rx_queue_alloc(struct iwl_priv *priv); +extern void iwl_rx_queue_reset(struct iwl_priv *priv, + struct iwl_rx_queue *rxq); +extern int iwl_calc_db_from_ratio(int sig_ratio); +extern int iwl_calc_sig_qual(int rssi_dbm, int noise_dbm); +extern int iwl_tx_queue_init(struct iwl_priv *priv, + struct iwl_tx_queue *txq, int count, u32 id); +extern int iwl_rx_queue_restock(struct iwl_priv *priv); +extern void iwl_rx_replenish(void *data); +extern void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq); +extern int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, + const void *data); +extern int __must_check iwl_send_cmd_async(struct iwl_priv *priv, + struct iwl_host_cmd *cmd); +extern int __must_check iwl_send_cmd_sync(struct iwl_priv *priv, + struct iwl_host_cmd *cmd); +extern int __must_check iwl_send_cmd(struct iwl_priv *priv, + struct iwl_host_cmd *cmd); +extern unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + const u8 *dest, int left); +extern int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, + struct iwl_rx_queue *q); +extern int iwl_send_statistics_request(struct iwl_priv *priv); +extern void iwl_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb, + u32 decrypt_res, + struct ieee80211_rx_status *stats); +extern __le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr); + +extern const u8 BROADCAST_ADDR[ETH_ALEN]; + +/* + * Currently used by iwl-3945-rs... look at restructuring so that it doesn't + * call this... todo... fix that. +*/ +extern u8 iwl_sync_station(struct iwl_priv *priv, int sta_id, + u16 tx_rate, u8 flags); + +static inline int iwl_is_associated(struct iwl_priv *priv) +{ + return (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0; +} + +/****************************************************************************** + * + * Functions implemented in iwl-[34]*.c which are forward declared here + * for use by iwl-base.c + * + * NOTE: The implementation of these functions are hardware specific + * which is why they are in the hardware specific files (vs. iwl-base.c) + * + * Naming convention -- + * iwl_ <-- Its part of iwlwifi (should be changed to iwl_) + * iwl_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW) + * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX) + * iwl_bg_ <-- Called from work queue context + * iwl_mac_ <-- mac80211 callback + * + ****************************************************************************/ +extern void iwl_hw_rx_handler_setup(struct iwl_priv *priv); +extern void iwl_hw_setup_deferred_work(struct iwl_priv *priv); +extern void iwl_hw_cancel_deferred_work(struct iwl_priv *priv); +extern int iwl_hw_rxq_stop(struct iwl_priv *priv); +extern int iwl_hw_set_hw_setting(struct iwl_priv *priv); +extern int iwl_hw_nic_init(struct iwl_priv *priv); +extern void iwl_hw_card_show_info(struct iwl_priv *priv); +extern int iwl_hw_nic_stop_master(struct iwl_priv *priv); +extern void iwl_hw_txq_ctx_free(struct iwl_priv *priv); +extern void iwl_hw_txq_ctx_stop(struct iwl_priv *priv); +extern int iwl_hw_nic_reset(struct iwl_priv *priv); +extern int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *tfd, + dma_addr_t addr, u16 len); +extern int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); +extern int iwl_hw_get_temperature(struct iwl_priv *priv); +extern int iwl_hw_tx_queue_init(struct iwl_priv *priv, + struct iwl_tx_queue *txq); +extern unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv, + struct iwl_frame *frame, u8 rate); +extern int iwl_hw_get_rx_read(struct iwl_priv *priv); +extern void iwl_hw_build_tx_cmd_rate(struct iwl_priv *priv, + struct iwl_cmd *cmd, + struct ieee80211_tx_control *ctrl, + struct ieee80211_hdr *hdr, + int sta_id, int tx_id); +extern int iwl_hw_reg_send_txpower(struct iwl_priv *priv); +extern int iwl_hw_reg_set_txpower(struct iwl_priv *priv, s8 power); +extern void iwl_hw_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +extern void iwl_disable_events(struct iwl_priv *priv); +extern int iwl4965_get_temperature(const struct iwl_priv *priv); + +/** + * iwl_hw_find_station - Find station id for a given BSSID + * @bssid: MAC address of station ID to find + * + * NOTE: This should not be hardware specific but the code has + * not yet been merged into a single common layer for managing the + * station tables. + */ +extern u8 iwl_hw_find_station(struct iwl_priv *priv, const u8 *bssid); + +extern int iwl_hw_channel_switch(struct iwl_priv *priv, u16 channel); +extern int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index); +#endif diff --git a/drivers/net/wireless/libertas/11d.c b/drivers/net/wireless/libertas/11d.c index 4cf0ff7b833d..9cf0211de67f 100644 --- a/drivers/net/wireless/libertas/11d.c +++ b/drivers/net/wireless/libertas/11d.c @@ -124,17 +124,17 @@ static u8 wlan_channel_known_11d(u8 chan, u8 nr_chan = parsed_region_chan->nr_chan; u8 i = 0; - lbs_dbg_hex("11D:parsed_region_chan:", (char *)chanpwr, + lbs_deb_hex(LBS_DEB_11D, "parsed_region_chan", (char *)chanpwr, sizeof(struct chan_power_11d) * nr_chan); for (i = 0; i < nr_chan; i++) { if (chan == chanpwr[i].chan) { - lbs_deb_11d("11D: Found Chan:%d\n", chan); + lbs_deb_11d("found chan %d\n", chan); return 1; } } - lbs_deb_11d("11D: Not Find Chan:%d\n", chan); + lbs_deb_11d("chan %d not found\n", chan); return 0; } @@ -174,8 +174,8 @@ static int generate_domain_info_11d(struct parsed_region_chan_11d memcpy(domaininfo->countrycode, parsed_region_chan->countrycode, COUNTRY_CODE_LEN); - lbs_deb_11d("11D:nrchan=%d\n", nr_chan); - lbs_dbg_hex("11D:parsed_region_chan:", (char *)parsed_region_chan, + lbs_deb_11d("nrchan %d\n", nr_chan); + lbs_deb_hex(LBS_DEB_11D, "parsed_region_chan", (char *)parsed_region_chan, sizeof(struct parsed_region_chan_11d)); for (i = 0; i < nr_chan; i++) { @@ -213,7 +213,7 @@ static int generate_domain_info_11d(struct parsed_region_chan_11d domaininfo->nr_subband = nr_subband; lbs_deb_11d("nr_subband=%x\n", domaininfo->nr_subband); - lbs_dbg_hex("11D:domaininfo:", (char *)domaininfo, + lbs_deb_hex(LBS_DEB_11D, "domaininfo", (char *)domaininfo, COUNTRY_CODE_LEN + 1 + sizeof(struct ieeetypes_subbandset) * nr_subband); return 0; @@ -233,13 +233,13 @@ static void wlan_generate_parsed_region_chan_11d(struct region_channel * region_ struct chan_freq_power *cfp; if (region_chan == NULL) { - lbs_deb_11d("11D: region_chan is NULL\n"); + lbs_deb_11d("region_chan is NULL\n"); return; } cfp = region_chan->CFP; if (cfp == NULL) { - lbs_deb_11d("11D: cfp equal NULL \n"); + lbs_deb_11d("cfp is NULL \n"); return; } @@ -248,19 +248,19 @@ static void wlan_generate_parsed_region_chan_11d(struct region_channel * region_ memcpy(parsed_region_chan->countrycode, wlan_code_2_region(region_chan->region), COUNTRY_CODE_LEN); - lbs_deb_11d("11D: region[0x%x] band[%d]\n", parsed_region_chan->region, + lbs_deb_11d("region 0x%x, band %d\n", parsed_region_chan->region, parsed_region_chan->band); for (i = 0; i < region_chan->nrcfp; i++, cfp++) { parsed_region_chan->chanpwr[i].chan = cfp->channel; parsed_region_chan->chanpwr[i].pwr = cfp->maxtxpower; - lbs_deb_11d("11D: Chan[%d] Pwr[%d]\n", + lbs_deb_11d("chan %d, pwr %d\n", parsed_region_chan->chanpwr[i].chan, parsed_region_chan->chanpwr[i].pwr); } parsed_region_chan->nr_chan = region_chan->nrcfp; - lbs_deb_11d("11D: nrchan[%d]\n", parsed_region_chan->nr_chan); + lbs_deb_11d("nrchan %d\n", parsed_region_chan->nr_chan); return; } @@ -336,7 +336,7 @@ static int parse_domain_info_11d(struct ieeetypes_countryinfofullset* 6. Others */ - lbs_dbg_hex("CountryInfo:", (u8 *) countryinfo, 30); + lbs_deb_hex(LBS_DEB_11D, "countryinfo", (u8 *) countryinfo, 30); if ((*(countryinfo->countrycode)) == 0 || (countryinfo->len <= COUNTRY_CODE_LEN)) { @@ -349,7 +349,7 @@ static int parse_domain_info_11d(struct ieeetypes_countryinfofullset* wlan_region_2_code(countryinfo->countrycode); lbs_deb_11d("regioncode=%x\n", (u8) parsed_region_chan->region); - lbs_dbg_hex("CountryCode:", (char *)countryinfo->countrycode, + lbs_deb_hex(LBS_DEB_11D, "countrycode", (char *)countryinfo->countrycode, COUNTRY_CODE_LEN); parsed_region_chan->band = band; @@ -364,7 +364,7 @@ static int parse_domain_info_11d(struct ieeetypes_countryinfofullset* if (countryinfo->subband[j].firstchan <= lastchan) { /*Step2&3. Check First Chan Num increment and no overlap */ - lbs_deb_11d("11D: Chan[%d>%d] Overlap\n", + lbs_deb_11d("chan %d>%d, overlap\n", countryinfo->subband[j].firstchan, lastchan); continue; } @@ -393,7 +393,7 @@ static int parse_domain_info_11d(struct ieeetypes_countryinfofullset* } else { /*not supported and ignore the chan */ lbs_deb_11d( - "11D:i[%d] chan[%d] unsupported in region[%x] band[%d]\n", + "i %d, chan %d unsupported in region %x, band %d\n", i, curchan, region, band); } } @@ -405,7 +405,7 @@ static int parse_domain_info_11d(struct ieeetypes_countryinfofullset* parsed_region_chan->nr_chan = idx; lbs_deb_11d("nrchan=%x\n", parsed_region_chan->nr_chan); - lbs_dbg_hex("11D:parsed_region_chan:", (u8 *) parsed_region_chan, + lbs_deb_hex(LBS_DEB_11D, "parsed_region_chan", (u8 *) parsed_region_chan, 2 + COUNTRY_CODE_LEN + sizeof(struct parsed_region_chan_11d) * idx); done: @@ -422,15 +422,15 @@ done: u8 libertas_get_scan_type_11d(u8 chan, struct parsed_region_chan_11d * parsed_region_chan) { - u8 scan_type = cmd_scan_type_passive; + u8 scan_type = CMD_SCAN_TYPE_PASSIVE; lbs_deb_enter(LBS_DEB_11D); if (wlan_channel_known_11d(chan, parsed_region_chan)) { - lbs_deb_11d("11D: Found and do Active Scan\n"); - scan_type = cmd_scan_type_active; + lbs_deb_11d("found, do active scan\n"); + scan_type = CMD_SCAN_TYPE_ACTIVE; } else { - lbs_deb_11d("11D: Not Find and do Passive Scan\n"); + lbs_deb_11d("not found, do passive scan\n"); } lbs_deb_leave_args(LBS_DEB_11D, "ret scan_type %d", scan_type); @@ -446,25 +446,6 @@ void libertas_init_11d(wlan_private * priv) return; } -static int wlan_enable_11d(wlan_private * priv, u8 flag) -{ - int ret; - - priv->adapter->enable11d = flag; - - /* send cmd to FW to enable/disable 11D function in FW */ - ret = libertas_prepare_and_send_command(priv, - cmd_802_11_snmp_mib, - cmd_act_set, - cmd_option_waitforrsp, - OID_802_11D_ENABLE, - &priv->adapter->enable11d); - if (ret) - lbs_deb_11d("11D: Fail to enable 11D \n"); - - return 0; -} - /** * @brief This function sets DOMAIN INFO to FW * @param priv pointer to wlan_private @@ -475,15 +456,15 @@ static int set_domain_info_11d(wlan_private * priv) int ret; if (!priv->adapter->enable11d) { - lbs_deb_11d("11D: dnld domain Info with 11d disabled\n"); + lbs_deb_11d("dnld domain Info with 11d disabled\n"); return 0; } - ret = libertas_prepare_and_send_command(priv, cmd_802_11d_domain_info, - cmd_act_set, - cmd_option_waitforrsp, 0, NULL); + ret = libertas_prepare_and_send_command(priv, CMD_802_11D_DOMAIN_INFO, + CMD_ACT_SET, + CMD_OPTION_WAITFORRSP, 0, NULL); if (ret) - lbs_deb_11d("11D: Fail to dnld domain Info\n"); + lbs_deb_11d("fail to dnld domain info\n"); return ret; } @@ -505,7 +486,7 @@ int libertas_set_universaltable(wlan_private * priv, u8 band) adapter->universal_channel[i].nrcfp = sizeof(channel_freq_power_UN_BG) / size; - lbs_deb_11d("11D: BG-band nrcfp=%d\n", + lbs_deb_11d("BG-band nrcfp %d\n", adapter->universal_channel[i].nrcfp); adapter->universal_channel[i].CFP = channel_freq_power_UN_BG; @@ -541,10 +522,10 @@ int libertas_cmd_802_11d_domain_info(wlan_private * priv, cmd->command = cpu_to_le16(cmdno); pdomaininfo->action = cpu_to_le16(cmdoption); - if (cmdoption == cmd_act_get) { + if (cmdoption == CMD_ACT_GET) { cmd->size = cpu_to_le16(sizeof(pdomaininfo->action) + S_DS_GEN); - lbs_dbg_hex("11D: 802_11D_DOMAIN_INFO:", (u8 *) cmd, + lbs_deb_hex(LBS_DEB_11D, "802_11D_DOMAIN_INFO", (u8 *) cmd, (int)(cmd->size)); goto done; } @@ -562,7 +543,7 @@ int libertas_cmd_802_11d_domain_info(wlan_private * priv, nr_subband * sizeof(struct ieeetypes_subbandset)); cmd->size = cpu_to_le16(sizeof(pdomaininfo->action) + - domain->header.len + + le16_to_cpu(domain->header.len) + sizeof(struct mrvlietypesheader) + S_DS_GEN); } else { @@ -570,7 +551,7 @@ int libertas_cmd_802_11d_domain_info(wlan_private * priv, cpu_to_le16(sizeof(pdomaininfo->action) + S_DS_GEN); } - lbs_dbg_hex("11D:802_11D_DOMAIN_INFO:", (u8 *) cmd, le16_to_cpu(cmd->size)); + lbs_deb_hex(LBS_DEB_11D, "802_11D_DOMAIN_INFO", (u8 *) cmd, le16_to_cpu(cmd->size)); done: lbs_deb_enter(LBS_DEB_11D); @@ -578,31 +559,6 @@ done: } /** - * @brief This function implements private cmd: enable/disable 11D - * @param priv pointer to wlan_private - * @param wrq pointer to user data - * @return 0 or -1 - */ -int libertas_cmd_enable_11d(wlan_private * priv, struct iwreq *wrq) -{ - int data = 0; - int *val; - - lbs_deb_enter(LBS_DEB_11D); - data = SUBCMD_DATA(wrq); - - lbs_deb_11d("enable 11D: %s\n", - (data == 1) ? "enable" : "Disable"); - - wlan_enable_11d(priv, data); - val = (int *)wrq->u.name; - *val = priv->adapter->enable11d; - - lbs_deb_enter(LBS_DEB_11D); - return 0; -} - -/** * @brief This function parses countryinfo from AP and download country info to FW * @param priv pointer to wlan_private * @param resp pointer to command response buffer @@ -619,13 +575,13 @@ int libertas_ret_802_11d_domain_info(wlan_private * priv, lbs_deb_enter(LBS_DEB_11D); - lbs_dbg_hex("11D DOMAIN Info Rsp Data:", (u8 *) resp, + lbs_deb_hex(LBS_DEB_11D, "domain info resp", (u8 *) resp, (int)le16_to_cpu(resp->size)); nr_subband = (le16_to_cpu(domain->header.len) - COUNTRY_CODE_LEN) / sizeof(struct ieeetypes_subbandset); - lbs_deb_11d("11D Domain Info Resp: nr_subband=%d\n", nr_subband); + lbs_deb_11d("domain info resp: nr_subband %d\n", nr_subband); if (nr_subband > MRVDRV_MAX_SUBBAND_802_11D) { lbs_deb_11d("Invalid Numrer of Subband returned!!\n"); @@ -633,10 +589,10 @@ int libertas_ret_802_11d_domain_info(wlan_private * priv, } switch (action) { - case cmd_act_set: /*Proc Set action */ + case CMD_ACT_SET: /*Proc Set action */ break; - case cmd_act_get: + case CMD_ACT_GET: break; default: lbs_deb_11d("Invalid action:%d\n", domaininfo->action); @@ -667,7 +623,7 @@ int libertas_parse_dnld_countryinfo_11d(wlan_private * priv, &adapter->parsed_region_chan); if (ret == -1) { - lbs_deb_11d("11D: Err Parse domain_info from AP..\n"); + lbs_deb_11d("error parsing domain_info from AP\n"); goto done; } @@ -679,7 +635,7 @@ int libertas_parse_dnld_countryinfo_11d(wlan_private * priv, ret = set_domain_info_11d(priv); if (ret) { - lbs_deb_11d("11D: Err set domainInfo to FW\n"); + lbs_deb_11d("error setting domain info\n"); goto done; } } @@ -703,7 +659,7 @@ int libertas_create_dnld_countryinfo_11d(wlan_private * priv) u8 j; lbs_deb_enter(LBS_DEB_11D); - lbs_deb_11d("11D:curbssparams.band[%d]\n", adapter->curbssparams.band); + lbs_deb_11d("curbssparams.band %d\n", adapter->curbssparams.band); if (priv->adapter->enable11d) { /* update parsed_region_chan_11; dnld domaininf to FW */ @@ -712,7 +668,7 @@ int libertas_create_dnld_countryinfo_11d(wlan_private * priv) sizeof(adapter->region_channel[0]); j++) { region_chan = &adapter->region_channel[j]; - lbs_deb_11d("11D:[%d] region_chan->band[%d]\n", j, + lbs_deb_11d("%d region_chan->band %d\n", j, region_chan->band); if (!region_chan || !region_chan->valid @@ -725,7 +681,7 @@ int libertas_create_dnld_countryinfo_11d(wlan_private * priv) if (j >= sizeof(adapter->region_channel) / sizeof(adapter->region_channel[0])) { - lbs_deb_11d("11D:region_chan not found. band[%d]\n", + lbs_deb_11d("region_chan not found, band %d\n", adapter->curbssparams.band); ret = -1; goto done; @@ -745,7 +701,7 @@ int libertas_create_dnld_countryinfo_11d(wlan_private * priv) ret = set_domain_info_11d(priv); if (ret) { - lbs_deb_11d("11D: Err set domainInfo to FW\n"); + lbs_deb_11d("error setting domain info\n"); goto done; } diff --git a/drivers/net/wireless/libertas/11d.h b/drivers/net/wireless/libertas/11d.h index 73e42e712911..3a6d1f8db78f 100644 --- a/drivers/net/wireless/libertas/11d.h +++ b/drivers/net/wireless/libertas/11d.h @@ -83,8 +83,6 @@ u8 libertas_get_scan_type_11d(u8 chan, u32 libertas_chan_2_freq(u8 chan, u8 band); -enum state_11d libertas_get_state_11d(wlan_private * priv); - void libertas_init_11d(wlan_private * priv); int libertas_set_universaltable(wlan_private * priv, u8 band); @@ -93,8 +91,6 @@ int libertas_cmd_802_11d_domain_info(wlan_private * priv, struct cmd_ds_command *cmd, u16 cmdno, u16 cmdOption); -int libertas_cmd_enable_11d(wlan_private * priv, struct iwreq *wrq); - int libertas_ret_802_11d_domain_info(wlan_private * priv, struct cmd_ds_command *resp); diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile index 32ed4136b0d4..c469d569f090 100644 --- a/drivers/net/wireless/libertas/Makefile +++ b/drivers/net/wireless/libertas/Makefile @@ -1,12 +1,13 @@ -libertas-objs := main.o fw.o wext.o \ +libertas-objs := main.o wext.o \ rx.o tx.o cmd.o \ cmdresp.o scan.o \ join.o 11d.o \ debugfs.o \ ethtool.o assoc.o -usb8xxx-objs += if_bootcmd.o usb8xxx-objs += if_usb.o +libertas_cs-objs += if_cs.o obj-$(CONFIG_LIBERTAS) += libertas.o obj-$(CONFIG_LIBERTAS_USB) += usb8xxx.o +obj-$(CONFIG_LIBERTAS_CS) += libertas_cs.o diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c index afd5617dd92b..b61b176e9d07 100644 --- a/drivers/net/wireless/libertas/assoc.c +++ b/drivers/net/wireless/libertas/assoc.c @@ -16,6 +16,7 @@ static const u8 bssid_off[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static void print_assoc_req(const char * extra, struct assoc_request * assoc_req) { + DECLARE_MAC_BUF(mac); lbs_deb_assoc( "#### Association Request: %s\n" " flags: 0x%08lX\n" @@ -23,13 +24,13 @@ static void print_assoc_req(const char * extra, struct assoc_request * assoc_req " channel: %d\n" " band: %d\n" " mode: %d\n" - " BSSID: " MAC_FMT "\n" + " BSSID: %s\n" " Encryption:%s%s%s\n" " auth: %d\n", extra, assoc_req->flags, escape_essid(assoc_req->ssid, assoc_req->ssid_len), assoc_req->channel, assoc_req->band, assoc_req->mode, - MAC_ARG(assoc_req->bssid), + print_mac(mac, assoc_req->bssid), assoc_req->secinfo.WPAenabled ? " WPA" : "", assoc_req->secinfo.WPA2enabled ? " WPA2" : "", assoc_req->secinfo.wep_enabled ? " WEP" : "", @@ -57,10 +58,8 @@ static int assoc_helper_essid(wlan_private *priv, lbs_deb_assoc("New SSID requested: '%s'\n", escape_essid(assoc_req->ssid, assoc_req->ssid_len)); if (assoc_req->mode == IW_MODE_INFRA) { - if (adapter->prescan) { - libertas_send_specific_ssid_scan(priv, assoc_req->ssid, - assoc_req->ssid_len, 0); - } + libertas_send_specific_ssid_scan(priv, assoc_req->ssid, + assoc_req->ssid_len, 0); bss = libertas_find_ssid_in_list(adapter, assoc_req->ssid, assoc_req->ssid_len, NULL, IW_MODE_INFRA, channel); @@ -106,16 +105,17 @@ static int assoc_helper_bssid(wlan_private *priv, wlan_adapter *adapter = priv->adapter; int ret = 0; struct bss_descriptor * bss; + DECLARE_MAC_BUF(mac); - lbs_deb_enter_args(LBS_DEB_ASSOC, "BSSID " MAC_FMT, - MAC_ARG(assoc_req->bssid)); + lbs_deb_enter_args(LBS_DEB_ASSOC, "BSSID %s", + print_mac(mac, assoc_req->bssid)); /* Search for index position in list for requested MAC */ bss = libertas_find_bssid_in_list(adapter, assoc_req->bssid, assoc_req->mode); if (bss == NULL) { - lbs_deb_assoc("ASSOC: WAP: BSSID " MAC_FMT " not found, " - "cannot associate.\n", MAC_ARG(assoc_req->bssid)); + lbs_deb_assoc("ASSOC: WAP: BSSID %s not found, " + "cannot associate.\n", print_mac(mac, assoc_req->bssid)); goto out; } @@ -175,14 +175,14 @@ static int assoc_helper_mode(wlan_private *priv, if (assoc_req->mode == IW_MODE_INFRA) { if (adapter->psstate != PS_STATE_FULL_POWER) - libertas_ps_wakeup(priv, cmd_option_waitforrsp); - adapter->psmode = wlan802_11powermodecam; + libertas_ps_wakeup(priv, CMD_OPTION_WAITFORRSP); + adapter->psmode = WLAN802_11POWERMODECAM; } adapter->mode = assoc_req->mode; ret = libertas_prepare_and_send_command(priv, - cmd_802_11_snmp_mib, - 0, cmd_option_waitforrsp, + CMD_802_11_SNMP_MIB, + 0, CMD_OPTION_WAITFORRSP, OID_802_11_INFRASTRUCTURE_MODE, /* Shoot me now */ (void *) (size_t) assoc_req->mode); @@ -195,9 +195,9 @@ done: static int update_channel(wlan_private * priv) { /* the channel in f/w could be out of sync, get the current channel */ - return libertas_prepare_and_send_command(priv, cmd_802_11_rf_channel, - cmd_opt_802_11_rf_channel_get, - cmd_option_waitforrsp, 0, NULL); + return libertas_prepare_and_send_command(priv, CMD_802_11_RF_CHANNEL, + CMD_OPT_802_11_RF_CHANNEL_GET, + CMD_OPTION_WAITFORRSP, 0, NULL); } void libertas_sync_channel(struct work_struct *work) @@ -227,9 +227,9 @@ static int assoc_helper_channel(wlan_private *priv, lbs_deb_assoc("ASSOC: channel: %d -> %d\n", adapter->curbssparams.channel, assoc_req->channel); - ret = libertas_prepare_and_send_command(priv, cmd_802_11_rf_channel, - cmd_opt_802_11_rf_channel_set, - cmd_option_waitforrsp, 0, &assoc_req->channel); + ret = libertas_prepare_and_send_command(priv, CMD_802_11_RF_CHANNEL, + CMD_OPT_802_11_RF_CHANNEL_SET, + CMD_OPTION_WAITFORRSP, 0, &assoc_req->channel); if (ret < 0) { lbs_deb_assoc("ASSOC: channel: error setting channel."); } @@ -278,15 +278,15 @@ static int assoc_helper_wep_keys(wlan_private *priv, || assoc_req->wep_keys[2].len || assoc_req->wep_keys[3].len) { ret = libertas_prepare_and_send_command(priv, - cmd_802_11_set_wep, - cmd_act_add, - cmd_option_waitforrsp, + CMD_802_11_SET_WEP, + CMD_ACT_ADD, + CMD_OPTION_WAITFORRSP, 0, assoc_req); } else { ret = libertas_prepare_and_send_command(priv, - cmd_802_11_set_wep, - cmd_act_remove, - cmd_option_waitforrsp, + CMD_802_11_SET_WEP, + CMD_ACT_REMOVE, + CMD_OPTION_WAITFORRSP, 0, NULL); } @@ -295,9 +295,9 @@ static int assoc_helper_wep_keys(wlan_private *priv, /* enable/disable the MAC's WEP packet filter */ if (assoc_req->secinfo.wep_enabled) - adapter->currentpacketfilter |= cmd_act_mac_wep_enable; + adapter->currentpacketfilter |= CMD_ACT_MAC_WEP_ENABLE; else - adapter->currentpacketfilter &= ~cmd_act_mac_wep_enable; + adapter->currentpacketfilter &= ~CMD_ACT_MAC_WEP_ENABLE; ret = libertas_set_mac_packet_filter(priv); if (ret) goto out; @@ -307,7 +307,7 @@ static int assoc_helper_wep_keys(wlan_private *priv, /* Copy WEP keys into adapter wep key fields */ for (i = 0; i < 4; i++) { memcpy(&adapter->wep_keys[i], &assoc_req->wep_keys[i], - sizeof(struct WLAN_802_11_KEY)); + sizeof(struct enc_key)); } adapter->wep_tx_keyidx = assoc_req->wep_tx_keyidx; @@ -342,9 +342,9 @@ static int assoc_helper_secinfo(wlan_private *priv, /* Get RSN enabled/disabled */ ret = libertas_prepare_and_send_command(priv, - cmd_802_11_enable_rsn, - cmd_act_set, - cmd_option_waitforrsp, + CMD_802_11_ENABLE_RSN, + CMD_ACT_GET, + CMD_OPTION_WAITFORRSP, 0, &rsn); if (ret) { lbs_deb_assoc("Failed to get RSN status: %d", ret); @@ -359,9 +359,9 @@ static int assoc_helper_secinfo(wlan_private *priv, /* Set RSN enabled/disabled */ rsn = do_wpa; ret = libertas_prepare_and_send_command(priv, - cmd_802_11_enable_rsn, - cmd_act_set, - cmd_option_waitforrsp, + CMD_802_11_ENABLE_RSN, + CMD_ACT_SET, + CMD_OPTION_WAITFORRSP, 0, &rsn); out: @@ -374,15 +374,40 @@ static int assoc_helper_wpa_keys(wlan_private *priv, struct assoc_request * assoc_req) { int ret = 0; + unsigned int flags = assoc_req->flags; lbs_deb_enter(LBS_DEB_ASSOC); - ret = libertas_prepare_and_send_command(priv, - cmd_802_11_key_material, - cmd_act_set, - cmd_option_waitforrsp, - 0, assoc_req); + /* Work around older firmware bug where WPA unicast and multicast + * keys must be set independently. Seen in SDIO parts with firmware + * version 5.0.11p0. + */ + + if (test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) { + clear_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags); + ret = libertas_prepare_and_send_command(priv, + CMD_802_11_KEY_MATERIAL, + CMD_ACT_SET, + CMD_OPTION_WAITFORRSP, + 0, assoc_req); + assoc_req->flags = flags; + } + + if (ret) + goto out; + if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags)) { + clear_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags); + + ret = libertas_prepare_and_send_command(priv, + CMD_802_11_KEY_MATERIAL, + CMD_ACT_SET, + CMD_OPTION_WAITFORRSP, + 0, assoc_req); + assoc_req->flags = flags; + } + +out: lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); return ret; } @@ -412,7 +437,7 @@ static int assoc_helper_wpa_ie(wlan_private *priv, static int should_deauth_infrastructure(wlan_adapter *adapter, struct assoc_request * assoc_req) { - if (adapter->connect_status != libertas_connected) + if (adapter->connect_status != LIBERTAS_CONNECTED) return 0; if (test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) { @@ -453,7 +478,7 @@ static int should_deauth_infrastructure(wlan_adapter *adapter, static int should_stop_adhoc(wlan_adapter *adapter, struct assoc_request * assoc_req) { - if (adapter->connect_status != libertas_connected) + if (adapter->connect_status != LIBERTAS_CONNECTED) return 0; if (libertas_ssid_cmp(adapter->curbssparams.ssid, @@ -483,6 +508,7 @@ void libertas_association_worker(struct work_struct *work) struct assoc_request * assoc_req = NULL; int ret = 0; int find_any_ssid = 0; + DECLARE_MAC_BUF(mac); lbs_deb_enter(LBS_DEB_ASSOC); @@ -556,7 +582,8 @@ void libertas_association_worker(struct work_struct *work) if (test_bit(ASSOC_FLAG_MODE, &assoc_req->flags)) { ret = assoc_helper_mode(priv, assoc_req); if (ret) { -lbs_deb_assoc("ASSOC(:%d) mode: ret = %d\n", __LINE__, ret); + lbs_deb_assoc("ASSOC(:%d) mode: ret = %d\n", + __LINE__, ret); goto out; } } @@ -574,7 +601,8 @@ lbs_deb_assoc("ASSOC(:%d) mode: ret = %d\n", __LINE__, ret); || test_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags)) { ret = assoc_helper_wep_keys(priv, assoc_req); if (ret) { -lbs_deb_assoc("ASSOC(:%d) wep_keys: ret = %d\n", __LINE__, ret); + lbs_deb_assoc("ASSOC(:%d) wep_keys: ret = %d\n", + __LINE__, ret); goto out; } } @@ -582,7 +610,8 @@ lbs_deb_assoc("ASSOC(:%d) wep_keys: ret = %d\n", __LINE__, ret); if (test_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags)) { ret = assoc_helper_secinfo(priv, assoc_req); if (ret) { -lbs_deb_assoc("ASSOC(:%d) secinfo: ret = %d\n", __LINE__, ret); + lbs_deb_assoc("ASSOC(:%d) secinfo: ret = %d\n", + __LINE__, ret); goto out; } } @@ -590,7 +619,8 @@ lbs_deb_assoc("ASSOC(:%d) secinfo: ret = %d\n", __LINE__, ret); if (test_bit(ASSOC_FLAG_WPA_IE, &assoc_req->flags)) { ret = assoc_helper_wpa_ie(priv, assoc_req); if (ret) { -lbs_deb_assoc("ASSOC(:%d) wpa_ie: ret = %d\n", __LINE__, ret); + lbs_deb_assoc("ASSOC(:%d) wpa_ie: ret = %d\n", + __LINE__, ret); goto out; } } @@ -599,7 +629,8 @@ lbs_deb_assoc("ASSOC(:%d) wpa_ie: ret = %d\n", __LINE__, ret); || test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) { ret = assoc_helper_wpa_keys(priv, assoc_req); if (ret) { -lbs_deb_assoc("ASSOC(:%d) wpa_keys: ret = %d\n", __LINE__, ret); + lbs_deb_assoc("ASSOC(:%d) wpa_keys: ret = %d\n", + __LINE__, ret); goto out; } } @@ -618,25 +649,25 @@ lbs_deb_assoc("ASSOC(:%d) wpa_keys: ret = %d\n", __LINE__, ret); success = 0; } - if (adapter->connect_status != libertas_connected) { - lbs_deb_assoc("ASSOC: assoication attempt unsuccessful, " + if (adapter->connect_status != LIBERTAS_CONNECTED) { + lbs_deb_assoc("ASSOC: association attempt unsuccessful, " "not connected.\n"); success = 0; } if (success) { lbs_deb_assoc("ASSOC: association attempt successful. " - "Associated to '%s' (" MAC_FMT ")\n", + "Associated to '%s' (%s)\n", escape_essid(adapter->curbssparams.ssid, adapter->curbssparams.ssid_len), - MAC_ARG(adapter->curbssparams.bssid)); + print_mac(mac, adapter->curbssparams.bssid)); libertas_prepare_and_send_command(priv, - cmd_802_11_rssi, - 0, cmd_option_waitforrsp, 0, NULL); + CMD_802_11_RSSI, + 0, CMD_OPTION_WAITFORRSP, 0, NULL); libertas_prepare_and_send_command(priv, - cmd_802_11_get_log, - 0, cmd_option_waitforrsp, 0, NULL); + CMD_802_11_GET_LOG, + 0, CMD_OPTION_WAITFORRSP, 0, NULL); } else { ret = -1; } @@ -703,7 +734,7 @@ struct assoc_request * wlan_get_association_request(wlan_adapter *adapter) int i; for (i = 0; i < 4; i++) { memcpy(&assoc_req->wep_keys[i], &adapter->wep_keys[i], - sizeof(struct WLAN_802_11_KEY)); + sizeof(struct enc_key)); } } @@ -712,12 +743,12 @@ struct assoc_request * wlan_get_association_request(wlan_adapter *adapter) if (!test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags)) { memcpy(&assoc_req->wpa_mcast_key, &adapter->wpa_mcast_key, - sizeof(struct WLAN_802_11_KEY)); + sizeof(struct enc_key)); } if (!test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) { memcpy(&assoc_req->wpa_unicast_key, &adapter->wpa_unicast_key, - sizeof(struct WLAN_802_11_KEY)); + sizeof(struct enc_key)); } if (!test_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags)) { diff --git a/drivers/net/wireless/libertas/assoc.h b/drivers/net/wireless/libertas/assoc.h index 5e9c31f0932b..e09b7490abbd 100644 --- a/drivers/net/wireless/libertas/assoc.h +++ b/drivers/net/wireless/libertas/assoc.h @@ -17,7 +17,7 @@ static inline void wlan_postpone_association_work(wlan_private *priv) if (priv->adapter->surpriseremoved) return; cancel_delayed_work(&priv->assoc_work); - queue_delayed_work(priv->assoc_thread, &priv->assoc_work, ASSOC_DELAY); + queue_delayed_work(priv->work_thread, &priv->assoc_work, ASSOC_DELAY); } static inline void wlan_cancel_association_work(wlan_private *priv) diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c index 4a8f5dc70239..1cbbd96fdbde 100644 --- a/drivers/net/wireless/libertas/cmd.c +++ b/drivers/net/wireless/libertas/cmd.c @@ -15,7 +15,7 @@ static void cleanup_cmdnode(struct cmd_ctrl_node *ptempnode); static u16 commands_allowed_in_ps[] = { - cmd_802_11_rssi, + CMD_802_11_RSSI, }; /** @@ -43,7 +43,7 @@ static int wlan_cmd_hw_spec(wlan_private * priv, struct cmd_ds_command *cmd) lbs_deb_enter(LBS_DEB_CMD); - cmd->command = cpu_to_le16(cmd_get_hw_spec); + cmd->command = cpu_to_le16(CMD_GET_HW_SPEC); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_get_hw_spec) + S_DS_GEN); memcpy(hwspec->permanentaddr, priv->adapter->current_addr, ETH_ALEN); @@ -56,34 +56,29 @@ static int wlan_cmd_802_11_ps_mode(wlan_private * priv, u16 cmd_action) { struct cmd_ds_802_11_ps_mode *psm = &cmd->params.psmode; - wlan_adapter *adapter = priv->adapter; lbs_deb_enter(LBS_DEB_CMD); - cmd->command = cpu_to_le16(cmd_802_11_ps_mode); + cmd->command = cpu_to_le16(CMD_802_11_PS_MODE); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_ps_mode) + S_DS_GEN); psm->action = cpu_to_le16(cmd_action); psm->multipledtim = 0; switch (cmd_action) { - case cmd_subcmd_enter_ps: + case CMD_SUBCMD_ENTER_PS: lbs_deb_cmd("PS command:" "SubCode- Enter PS\n"); - lbs_deb_cmd("locallisteninterval = %d\n", - adapter->locallisteninterval); - psm->locallisteninterval = - cpu_to_le16(adapter->locallisteninterval); - psm->nullpktinterval = - cpu_to_le16(adapter->nullpktinterval); + psm->locallisteninterval = 0; + psm->nullpktinterval = 0; psm->multipledtim = - cpu_to_le16(priv->adapter->multipledtim); + cpu_to_le16(MRVDRV_DEFAULT_MULTIPLE_DTIM); break; - case cmd_subcmd_exit_ps: + case CMD_SUBCMD_EXIT_PS: lbs_deb_cmd("PS command:" "SubCode- Exit PS\n"); break; - case cmd_subcmd_sleep_confirmed: + case CMD_SUBCMD_SLEEP_CONFIRMED: lbs_deb_cmd("PS command: SubCode- sleep confirm\n"); break; @@ -101,7 +96,9 @@ static int wlan_cmd_802_11_inactivity_timeout(wlan_private * priv, { u16 *timeout = pdata_buf; - cmd->command = cpu_to_le16(cmd_802_11_inactivity_timeout); + lbs_deb_enter(LBS_DEB_CMD); + + cmd->command = cpu_to_le16(CMD_802_11_INACTIVITY_TIMEOUT); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_inactivity_timeout) + S_DS_GEN); @@ -113,6 +110,7 @@ static int wlan_cmd_802_11_inactivity_timeout(wlan_private * priv, else cmd->params.inactivity_timeout.timeout = 0; + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -127,13 +125,13 @@ static int wlan_cmd_802_11_sleep_params(wlan_private * priv, cmd->size = cpu_to_le16((sizeof(struct cmd_ds_802_11_sleep_params)) + S_DS_GEN); - cmd->command = cpu_to_le16(cmd_802_11_sleep_params); + cmd->command = cpu_to_le16(CMD_802_11_SLEEP_PARAMS); - if (cmd_action == cmd_act_get) { + if (cmd_action == CMD_ACT_GET) { memset(&adapter->sp, 0, sizeof(struct sleep_params)); memset(sp, 0, sizeof(struct cmd_ds_802_11_sleep_params)); sp->action = cpu_to_le16(cmd_action); - } else if (cmd_action == cmd_act_set) { + } else if (cmd_action == CMD_ACT_SET) { sp->action = cpu_to_le16(cmd_action); sp->error = cpu_to_le16(adapter->sp.sp_error); sp->offset = cpu_to_le16(adapter->sp.sp_offset); @@ -159,10 +157,10 @@ static int wlan_cmd_802_11_set_wep(wlan_private * priv, lbs_deb_enter(LBS_DEB_CMD); - cmd->command = cpu_to_le16(cmd_802_11_set_wep); + cmd->command = cpu_to_le16(CMD_802_11_SET_WEP); cmd->size = cpu_to_le16(sizeof(*wep) + S_DS_GEN); - if (cmd_act == cmd_act_add) { + if (cmd_act == CMD_ACT_ADD) { int i; if (!assoc_req) { @@ -171,48 +169,47 @@ static int wlan_cmd_802_11_set_wep(wlan_private * priv, goto done; } - wep->action = cpu_to_le16(cmd_act_add); + wep->action = cpu_to_le16(CMD_ACT_ADD); /* default tx key index */ wep->keyindex = cpu_to_le16((u16)(assoc_req->wep_tx_keyidx & - (u32)cmd_WEP_KEY_INDEX_MASK)); - - lbs_deb_cmd("Tx key Index: %u\n", le16_to_cpu(wep->keyindex)); + (u32)CMD_WEP_KEY_INDEX_MASK)); /* Copy key types and material to host command structure */ for (i = 0; i < 4; i++) { - struct WLAN_802_11_KEY * pkey = &assoc_req->wep_keys[i]; + struct enc_key * pkey = &assoc_req->wep_keys[i]; switch (pkey->len) { case KEY_LEN_WEP_40: - wep->keytype[i] = - cpu_to_le16(cmd_type_wep_40_bit); + wep->keytype[i] = CMD_TYPE_WEP_40_BIT; memmove(&wep->keymaterial[i], pkey->key, pkey->len); + lbs_deb_cmd("SET_WEP: add key %d (40 bit)\n", i); break; case KEY_LEN_WEP_104: - wep->keytype[i] = - cpu_to_le16(cmd_type_wep_104_bit); + wep->keytype[i] = CMD_TYPE_WEP_104_BIT; memmove(&wep->keymaterial[i], pkey->key, pkey->len); + lbs_deb_cmd("SET_WEP: add key %d (104 bit)\n", i); break; case 0: break; default: - lbs_deb_cmd("Invalid WEP key %d length of %d\n", + lbs_deb_cmd("SET_WEP: invalid key %d, length %d\n", i, pkey->len); ret = -1; goto done; break; } } - } else if (cmd_act == cmd_act_remove) { + } else if (cmd_act == CMD_ACT_REMOVE) { /* ACT_REMOVE clears _all_ WEP keys */ - wep->action = cpu_to_le16(cmd_act_remove); + wep->action = cpu_to_le16(CMD_ACT_REMOVE); /* default tx key index */ wep->keyindex = cpu_to_le16((u16)(adapter->wep_tx_keyidx & - (u32)cmd_WEP_KEY_INDEX_MASK)); + (u32)CMD_WEP_KEY_INDEX_MASK)); + lbs_deb_cmd("SET_WEP: remove key %d\n", adapter->wep_tx_keyidx); } ret = 0; @@ -232,15 +229,16 @@ static int wlan_cmd_802_11_enable_rsn(wlan_private * priv, lbs_deb_enter(LBS_DEB_CMD); - cmd->command = cpu_to_le16(cmd_802_11_enable_rsn); + cmd->command = cpu_to_le16(CMD_802_11_ENABLE_RSN); cmd->size = cpu_to_le16(sizeof(*penableRSN) + S_DS_GEN); penableRSN->action = cpu_to_le16(cmd_action); - if (cmd_action == cmd_act_set) { + if (cmd_action == CMD_ACT_SET) { if (*enable) - penableRSN->enable = cpu_to_le16(cmd_enable_rsn); + penableRSN->enable = cpu_to_le16(CMD_ENABLE_RSN); else - penableRSN->enable = cpu_to_le16(cmd_disable_rsn); + penableRSN->enable = cpu_to_le16(CMD_DISABLE_RSN); + lbs_deb_cmd("ENABLE_RSN: %d\n", *enable); } lbs_deb_leave(LBS_DEB_CMD); @@ -249,9 +247,9 @@ static int wlan_cmd_802_11_enable_rsn(wlan_private * priv, static void set_one_wpa_key(struct MrvlIEtype_keyParamSet * pkeyparamset, - struct WLAN_802_11_KEY * pkey) + struct enc_key * pkey) { - pkeyparamset->keytypeid = cpu_to_le16(pkey->type); + lbs_deb_enter(LBS_DEB_CMD); if (pkey->flags & KEY_INFO_WPA_ENABLED) { pkeyparamset->keyinfo |= cpu_to_le16(KEY_INFO_WPA_ENABLED); @@ -264,12 +262,14 @@ static void set_one_wpa_key(struct MrvlIEtype_keyParamSet * pkeyparamset, } pkeyparamset->type = cpu_to_le16(TLV_TYPE_KEY_MATERIAL); + pkeyparamset->keytypeid = cpu_to_le16(pkey->type); pkeyparamset->keylen = cpu_to_le16(pkey->len); memcpy(pkeyparamset->key, pkey->key, pkey->len); pkeyparamset->length = cpu_to_le16( sizeof(pkeyparamset->keytypeid) + sizeof(pkeyparamset->keyinfo) + sizeof(pkeyparamset->keylen) + sizeof(pkeyparamset->key)); + lbs_deb_leave(LBS_DEB_CMD); } static int wlan_cmd_802_11_key_material(wlan_private * priv, @@ -285,10 +285,10 @@ static int wlan_cmd_802_11_key_material(wlan_private * priv, lbs_deb_enter(LBS_DEB_CMD); - cmd->command = cpu_to_le16(cmd_802_11_key_material); + cmd->command = cpu_to_le16(CMD_802_11_KEY_MATERIAL); pkeymaterial->action = cpu_to_le16(cmd_action); - if (cmd_action == cmd_act_get) { + if (cmd_action == CMD_ACT_GET) { cmd->size = cpu_to_le16(S_DS_GEN + sizeof (pkeymaterial->action)); ret = 0; goto done; @@ -324,30 +324,37 @@ static int wlan_cmd_802_11_reset(wlan_private * priv, { struct cmd_ds_802_11_reset *reset = &cmd->params.reset; - cmd->command = cpu_to_le16(cmd_802_11_reset); + lbs_deb_enter(LBS_DEB_CMD); + + cmd->command = cpu_to_le16(CMD_802_11_RESET); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_reset) + S_DS_GEN); reset->action = cpu_to_le16(cmd_action); + lbs_deb_leave(LBS_DEB_CMD); return 0; } static int wlan_cmd_802_11_get_log(wlan_private * priv, struct cmd_ds_command *cmd) { - cmd->command = cpu_to_le16(cmd_802_11_get_log); + lbs_deb_enter(LBS_DEB_CMD); + cmd->command = cpu_to_le16(CMD_802_11_GET_LOG); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_get_log) + S_DS_GEN); + lbs_deb_leave(LBS_DEB_CMD); return 0; } static int wlan_cmd_802_11_get_stat(wlan_private * priv, struct cmd_ds_command *cmd) { - cmd->command = cpu_to_le16(cmd_802_11_get_stat); + lbs_deb_enter(LBS_DEB_CMD); + cmd->command = cpu_to_le16(CMD_802_11_GET_STAT); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_get_stat) + S_DS_GEN); + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -364,15 +371,15 @@ static int wlan_cmd_802_11_snmp_mib(wlan_private * priv, lbs_deb_cmd("SNMP_CMD: cmd_oid = 0x%x\n", cmd_oid); - cmd->command = cpu_to_le16(cmd_802_11_snmp_mib); + cmd->command = cpu_to_le16(CMD_802_11_SNMP_MIB); cmd->size = cpu_to_le16(sizeof(*pSNMPMIB) + S_DS_GEN); switch (cmd_oid) { case OID_802_11_INFRASTRUCTURE_MODE: { u8 mode = (u8) (size_t) pdata_buf; - pSNMPMIB->querytype = cpu_to_le16(cmd_act_set); - pSNMPMIB->oid = cpu_to_le16((u16) desired_bsstype_i); + pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_SET); + pSNMPMIB->oid = cpu_to_le16((u16) DESIRED_BSSTYPE_I); pSNMPMIB->bufsize = sizeof(u8); if (mode == IW_MODE_ADHOC) { ucTemp = SNMP_MIB_VALUE_ADHOC; @@ -390,10 +397,10 @@ static int wlan_cmd_802_11_snmp_mib(wlan_private * priv, { u32 ulTemp; - pSNMPMIB->oid = cpu_to_le16((u16) dot11d_i); + pSNMPMIB->oid = cpu_to_le16((u16) DOT11D_I); - if (cmd_action == cmd_act_set) { - pSNMPMIB->querytype = cmd_act_set; + if (cmd_action == CMD_ACT_SET) { + pSNMPMIB->querytype = CMD_ACT_SET; pSNMPMIB->bufsize = sizeof(u16); ulTemp = *(u32 *)pdata_buf; *((__le16 *)(pSNMPMIB->value)) = @@ -406,12 +413,12 @@ static int wlan_cmd_802_11_snmp_mib(wlan_private * priv, { u32 ulTemp; - pSNMPMIB->oid = cpu_to_le16((u16) fragthresh_i); + pSNMPMIB->oid = cpu_to_le16((u16) FRAGTHRESH_I); - if (cmd_action == cmd_act_get) { - pSNMPMIB->querytype = cpu_to_le16(cmd_act_get); - } else if (cmd_action == cmd_act_set) { - pSNMPMIB->querytype = cpu_to_le16(cmd_act_set); + if (cmd_action == CMD_ACT_GET) { + pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_GET); + } else if (cmd_action == CMD_ACT_SET) { + pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_SET); pSNMPMIB->bufsize = cpu_to_le16(sizeof(u16)); ulTemp = *((u32 *) pdata_buf); *((__le16 *)(pSNMPMIB->value)) = @@ -426,12 +433,12 @@ static int wlan_cmd_802_11_snmp_mib(wlan_private * priv, { u32 ulTemp; - pSNMPMIB->oid = le16_to_cpu((u16) rtsthresh_i); + pSNMPMIB->oid = le16_to_cpu((u16) RTSTHRESH_I); - if (cmd_action == cmd_act_get) { - pSNMPMIB->querytype = cpu_to_le16(cmd_act_get); - } else if (cmd_action == cmd_act_set) { - pSNMPMIB->querytype = cpu_to_le16(cmd_act_set); + if (cmd_action == CMD_ACT_GET) { + pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_GET); + } else if (cmd_action == CMD_ACT_SET) { + pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_SET); pSNMPMIB->bufsize = cpu_to_le16(sizeof(u16)); ulTemp = *((u32 *)pdata_buf); *(__le16 *)(pSNMPMIB->value) = @@ -441,12 +448,12 @@ static int wlan_cmd_802_11_snmp_mib(wlan_private * priv, break; } case OID_802_11_TX_RETRYCOUNT: - pSNMPMIB->oid = cpu_to_le16((u16) short_retrylim_i); + pSNMPMIB->oid = cpu_to_le16((u16) SHORT_RETRYLIM_I); - if (cmd_action == cmd_act_get) { - pSNMPMIB->querytype = cpu_to_le16(cmd_act_get); - } else if (cmd_action == cmd_act_set) { - pSNMPMIB->querytype = cpu_to_le16(cmd_act_set); + if (cmd_action == CMD_ACT_GET) { + pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_GET); + } else if (cmd_action == CMD_ACT_SET) { + pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_SET); pSNMPMIB->bufsize = cpu_to_le16(sizeof(u16)); *((__le16 *)(pSNMPMIB->value)) = cpu_to_le16((u16) adapter->txretrycount); @@ -463,7 +470,7 @@ static int wlan_cmd_802_11_snmp_mib(wlan_private * priv, le16_to_cpu(cmd->seqnum), le16_to_cpu(cmd->result)); lbs_deb_cmd( - "SNMP_CMD: action=0x%x, oid=0x%x, oidsize=0x%x, value=0x%x\n", + "SNMP_CMD: action 0x%x, oid 0x%x, oidsize 0x%x, value 0x%x\n", le16_to_cpu(pSNMPMIB->querytype), le16_to_cpu(pSNMPMIB->oid), le16_to_cpu(pSNMPMIB->bufsize), le16_to_cpu(*(__le16 *) pSNMPMIB->value)); @@ -484,20 +491,20 @@ static int wlan_cmd_802_11_radio_control(wlan_private * priv, cmd->size = cpu_to_le16((sizeof(struct cmd_ds_802_11_radio_control)) + S_DS_GEN); - cmd->command = cpu_to_le16(cmd_802_11_radio_control); + cmd->command = cpu_to_le16(CMD_802_11_RADIO_CONTROL); pradiocontrol->action = cpu_to_le16(cmd_action); switch (adapter->preamble) { - case cmd_type_short_preamble: + case CMD_TYPE_SHORT_PREAMBLE: pradiocontrol->control = cpu_to_le16(SET_SHORT_PREAMBLE); break; - case cmd_type_long_preamble: + case CMD_TYPE_LONG_PREAMBLE: pradiocontrol->control = cpu_to_le16(SET_LONG_PREAMBLE); break; - case cmd_type_auto_preamble: + case CMD_TYPE_AUTO_PREAMBLE: default: pradiocontrol->control = cpu_to_le16(SET_AUTO_PREAMBLE); break; @@ -523,7 +530,7 @@ static int wlan_cmd_802_11_rf_tx_power(wlan_private * priv, cmd->size = cpu_to_le16((sizeof(struct cmd_ds_802_11_rf_tx_power)) + S_DS_GEN); - cmd->command = cpu_to_le16(cmd_802_11_rf_tx_power); + cmd->command = cpu_to_le16(CMD_802_11_RF_TX_POWER); prtp->action = cpu_to_le16(cmd_action); lbs_deb_cmd("RF_TX_POWER_CMD: size:%d cmd:0x%x Act:%d\n", @@ -531,23 +538,23 @@ static int wlan_cmd_802_11_rf_tx_power(wlan_private * priv, le16_to_cpu(prtp->action)); switch (cmd_action) { - case cmd_act_tx_power_opt_get: - prtp->action = cpu_to_le16(cmd_act_get); + case CMD_ACT_TX_POWER_OPT_GET: + prtp->action = cpu_to_le16(CMD_ACT_GET); prtp->currentlevel = 0; break; - case cmd_act_tx_power_opt_set_high: - prtp->action = cpu_to_le16(cmd_act_set); - prtp->currentlevel = cpu_to_le16(cmd_act_tx_power_index_high); + case CMD_ACT_TX_POWER_OPT_SET_HIGH: + prtp->action = cpu_to_le16(CMD_ACT_SET); + prtp->currentlevel = cpu_to_le16(CMD_ACT_TX_POWER_INDEX_HIGH); break; - case cmd_act_tx_power_opt_set_mid: - prtp->action = cpu_to_le16(cmd_act_set); - prtp->currentlevel = cpu_to_le16(cmd_act_tx_power_index_mid); + case CMD_ACT_TX_POWER_OPT_SET_MID: + prtp->action = cpu_to_le16(CMD_ACT_SET); + prtp->currentlevel = cpu_to_le16(CMD_ACT_TX_POWER_INDEX_MID); break; - case cmd_act_tx_power_opt_set_low: - prtp->action = cpu_to_le16(cmd_act_set); + case CMD_ACT_TX_POWER_OPT_SET_LOW: + prtp->action = cpu_to_le16(CMD_ACT_SET); prtp->currentlevel = cpu_to_le16(*((u16 *) pdata_buf)); break; } @@ -556,19 +563,21 @@ static int wlan_cmd_802_11_rf_tx_power(wlan_private * priv, return 0; } -static int wlan_cmd_802_11_rf_antenna(wlan_private * priv, +static int wlan_cmd_802_11_monitor_mode(wlan_private * priv, struct cmd_ds_command *cmd, u16 cmd_action, void *pdata_buf) { - struct cmd_ds_802_11_rf_antenna *rant = &cmd->params.rant; + struct cmd_ds_802_11_monitor_mode *monitor = &cmd->params.monitor; - cmd->command = cpu_to_le16(cmd_802_11_rf_antenna); - cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_rf_antenna) + - S_DS_GEN); + cmd->command = cpu_to_le16(CMD_802_11_MONITOR_MODE); + cmd->size = + cpu_to_le16(sizeof(struct cmd_ds_802_11_monitor_mode) + + S_DS_GEN); - rant->action = cpu_to_le16(cmd_action); - if ((cmd_action == cmd_act_set_rx) || (cmd_action == cmd_act_set_tx)) { - rant->antennamode = cpu_to_le16((u16) (*(u32 *) pdata_buf)); + monitor->action = cpu_to_le16(cmd_action); + if (cmd_action == CMD_ACT_SET) { + monitor->mode = + cpu_to_le16((u16) (*(u32 *) pdata_buf)); } return 0; @@ -582,12 +591,11 @@ static int wlan_cmd_802_11_rate_adapt_rateset(wlan_private * priv, *rateadapt = &cmd->params.rateset; wlan_adapter *adapter = priv->adapter; + lbs_deb_enter(LBS_DEB_CMD); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_rate_adapt_rateset) + S_DS_GEN); - cmd->command = cpu_to_le16(cmd_802_11_rate_adapt_rateset); - - lbs_deb_enter(LBS_DEB_CMD); + cmd->command = cpu_to_le16(CMD_802_11_RATE_ADAPT_RATESET); rateadapt->action = cpu_to_le16(cmd_action); rateadapt->enablehwauto = cpu_to_le16(adapter->enablehwauto); @@ -608,19 +616,16 @@ static int wlan_cmd_802_11_data_rate(wlan_private * priv, cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_data_rate) + S_DS_GEN); - - cmd->command = cpu_to_le16(cmd_802_11_data_rate); - + cmd->command = cpu_to_le16(CMD_802_11_DATA_RATE); memset(pdatarate, 0, sizeof(struct cmd_ds_802_11_data_rate)); - pdatarate->action = cpu_to_le16(cmd_action); - if (cmd_action == cmd_act_set_tx_fix_rate) { - pdatarate->datarate[0] = libertas_data_rate_to_index(adapter->datarate); - lbs_deb_cmd("Setting FW for fixed rate 0x%02X\n", - adapter->datarate); - } else if (cmd_action == cmd_act_set_tx_auto) { - lbs_deb_cmd("Setting FW for AUTO rate\n"); + if (cmd_action == CMD_ACT_SET_TX_FIX_RATE) { + pdatarate->rates[0] = libertas_data_rate_to_fw_index(adapter->cur_rate); + lbs_deb_cmd("DATA_RATE: set fixed 0x%02X\n", + adapter->cur_rate); + } else if (cmd_action == CMD_ACT_SET_TX_AUTO) { + lbs_deb_cmd("DATA_RATE: setting auto\n"); } lbs_deb_leave(LBS_DEB_CMD); @@ -634,16 +639,19 @@ static int wlan_cmd_mac_multicast_adr(wlan_private * priv, struct cmd_ds_mac_multicast_adr *pMCastAdr = &cmd->params.madr; wlan_adapter *adapter = priv->adapter; + lbs_deb_enter(LBS_DEB_CMD); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_mac_multicast_adr) + S_DS_GEN); - cmd->command = cpu_to_le16(cmd_mac_multicast_adr); + cmd->command = cpu_to_le16(CMD_MAC_MULTICAST_ADR); + lbs_deb_cmd("MULTICAST_ADR: setting %d addresses\n", pMCastAdr->nr_of_adrs); pMCastAdr->action = cpu_to_le16(cmd_action); pMCastAdr->nr_of_adrs = cpu_to_le16((u16) adapter->nr_of_multicastmacaddr); memcpy(pMCastAdr->maclist, adapter->multicastlist, adapter->nr_of_multicastmacaddr * ETH_ALEN); + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -653,16 +661,18 @@ static int wlan_cmd_802_11_rf_channel(wlan_private * priv, { struct cmd_ds_802_11_rf_channel *rfchan = &cmd->params.rfchannel; - cmd->command = cpu_to_le16(cmd_802_11_rf_channel); + lbs_deb_enter(LBS_DEB_CMD); + cmd->command = cpu_to_le16(CMD_802_11_RF_CHANNEL); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_rf_channel) + S_DS_GEN); - if (option == cmd_opt_802_11_rf_channel_set) { + if (option == CMD_OPT_802_11_RF_CHANNEL_SET) { rfchan->currentchannel = cpu_to_le16(*((u16 *) pdata_buf)); } rfchan->action = cpu_to_le16(option); + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -671,9 +681,10 @@ static int wlan_cmd_802_11_rssi(wlan_private * priv, { wlan_adapter *adapter = priv->adapter; - cmd->command = cpu_to_le16(cmd_802_11_rssi); + lbs_deb_enter(LBS_DEB_CMD); + cmd->command = cpu_to_le16(CMD_802_11_RSSI); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_rssi) + S_DS_GEN); - cmd->params.rssi.N = cpu_to_le16(priv->adapter->bcn_avg_factor); + cmd->params.rssi.N = cpu_to_le16(DEFAULT_BCN_AVG_FACTOR); /* reset Beacon SNR/NF/RSSI values */ adapter->SNR[TYPE_BEACON][TYPE_NOAVG] = 0; @@ -683,6 +694,7 @@ static int wlan_cmd_802_11_rssi(wlan_private * priv, adapter->RSSI[TYPE_BEACON][TYPE_NOAVG] = 0; adapter->RSSI[TYPE_BEACON][TYPE_AVG] = 0; + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -697,7 +709,7 @@ static int wlan_cmd_reg_access(wlan_private * priv, offval = (struct wlan_offset_value *)pdata_buf; switch (cmdptr->command) { - case cmd_mac_reg_access: + case CMD_MAC_REG_ACCESS: { struct cmd_ds_mac_reg_access *macreg; @@ -715,7 +727,7 @@ static int wlan_cmd_reg_access(wlan_private * priv, break; } - case cmd_bbp_reg_access: + case CMD_BBP_REG_ACCESS: { struct cmd_ds_bbp_reg_access *bbpreg; @@ -734,7 +746,7 @@ static int wlan_cmd_reg_access(wlan_private * priv, break; } - case cmd_rf_reg_access: + case CMD_RF_REG_ACCESS: { struct cmd_ds_rf_reg_access *rfreg; @@ -767,19 +779,21 @@ static int wlan_cmd_802_11_mac_address(wlan_private * priv, { wlan_adapter *adapter = priv->adapter; - cmd->command = cpu_to_le16(cmd_802_11_mac_address); + lbs_deb_enter(LBS_DEB_CMD); + cmd->command = cpu_to_le16(CMD_802_11_MAC_ADDRESS); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_mac_address) + S_DS_GEN); cmd->result = 0; cmd->params.macadd.action = cpu_to_le16(cmd_action); - if (cmd_action == cmd_act_set) { + if (cmd_action == CMD_ACT_SET) { memcpy(cmd->params.macadd.macadd, adapter->current_addr, ETH_ALEN); - lbs_dbg_hex("SET_CMD: MAC ADDRESS-", adapter->current_addr, 6); + lbs_deb_hex(LBS_DEB_CMD, "SET_CMD: MAC addr", adapter->current_addr, 6); } + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -791,7 +805,7 @@ static int wlan_cmd_802_11_eeprom_access(wlan_private * priv, lbs_deb_enter(LBS_DEB_CMD); - cmd->command = cpu_to_le16(cmd_802_11_eeprom_access); + cmd->command = cpu_to_le16(CMD_802_11_EEPROM_ACCESS); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_eeprom_access) + S_DS_GEN); cmd->result = 0; @@ -801,6 +815,7 @@ static int wlan_cmd_802_11_eeprom_access(wlan_private * priv, cmd->params.rdeeprom.bytecount = cpu_to_le16(ea->NOB); cmd->params.rdeeprom.value = 0; + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -809,35 +824,36 @@ static int wlan_cmd_bt_access(wlan_private * priv, u16 cmd_action, void *pdata_buf) { struct cmd_ds_bt_access *bt_access = &cmd->params.bt; - lbs_deb_cmd("BT CMD(%d)\n", cmd_action); + lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action); - cmd->command = cpu_to_le16(cmd_bt_access); + cmd->command = cpu_to_le16(CMD_BT_ACCESS); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_bt_access) + S_DS_GEN); cmd->result = 0; bt_access->action = cpu_to_le16(cmd_action); switch (cmd_action) { - case cmd_act_bt_access_add: + case CMD_ACT_BT_ACCESS_ADD: memcpy(bt_access->addr1, pdata_buf, 2 * ETH_ALEN); - lbs_dbg_hex("BT_ADD: blinded mac address-", bt_access->addr1, 6); + lbs_deb_hex(LBS_DEB_MESH, "BT_ADD: blinded MAC addr", bt_access->addr1, 6); break; - case cmd_act_bt_access_del: + case CMD_ACT_BT_ACCESS_DEL: memcpy(bt_access->addr1, pdata_buf, 1 * ETH_ALEN); - lbs_dbg_hex("BT_DEL: blinded mac address-", bt_access->addr1, 6); + lbs_deb_hex(LBS_DEB_MESH, "BT_DEL: blinded MAC addr", bt_access->addr1, 6); break; - case cmd_act_bt_access_list: + case CMD_ACT_BT_ACCESS_LIST: bt_access->id = cpu_to_le32(*(u32 *) pdata_buf); break; - case cmd_act_bt_access_reset: + case CMD_ACT_BT_ACCESS_RESET: break; - case cmd_act_bt_access_set_invert: + case CMD_ACT_BT_ACCESS_SET_INVERT: bt_access->id = cpu_to_le32(*(u32 *) pdata_buf); break; - case cmd_act_bt_access_get_invert: + case CMD_ACT_BT_ACCESS_GET_INVERT: break; default: break; } + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -846,9 +862,9 @@ static int wlan_cmd_fwt_access(wlan_private * priv, u16 cmd_action, void *pdata_buf) { struct cmd_ds_fwt_access *fwt_access = &cmd->params.fwt; - lbs_deb_cmd("FWT CMD(%d)\n", cmd_action); + lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action); - cmd->command = cpu_to_le16(cmd_fwt_access); + cmd->command = cpu_to_le16(CMD_FWT_ACCESS); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_fwt_access) + S_DS_GEN); cmd->result = 0; @@ -859,6 +875,7 @@ static int wlan_cmd_fwt_access(wlan_private * priv, fwt_access->action = cpu_to_le16(cmd_action); + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -867,9 +884,9 @@ static int wlan_cmd_mesh_access(wlan_private * priv, u16 cmd_action, void *pdata_buf) { struct cmd_ds_mesh_access *mesh_access = &cmd->params.mesh; - lbs_deb_cmd("FWT CMD(%d)\n", cmd_action); + lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action); - cmd->command = cpu_to_le16(cmd_mesh_access); + cmd->command = cpu_to_le16(CMD_MESH_ACCESS); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_mesh_access) + S_DS_GEN); cmd->result = 0; @@ -880,6 +897,18 @@ static int wlan_cmd_mesh_access(wlan_private * priv, mesh_access->action = cpu_to_le16(cmd_action); + lbs_deb_leave(LBS_DEB_CMD); + return 0; +} + +static int wlan_cmd_set_boot2_ver(wlan_private * priv, + struct cmd_ds_command *cmd, + u16 cmd_action, void *pdata_buf) +{ + struct cmd_ds_set_boot2_ver *boot2_ver = &cmd->params.boot2_ver; + cmd->command = cpu_to_le16(CMD_SET_BOOT2_VER); + cmd->size = cpu_to_le16(sizeof(struct cmd_ds_set_boot2_ver) + S_DS_GEN); + boot2_ver->version = priv->boot2_version; return 0; } @@ -888,23 +917,23 @@ void libertas_queue_cmd(wlan_adapter * adapter, struct cmd_ctrl_node *cmdnode, u unsigned long flags; struct cmd_ds_command *cmdptr; - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_enter(LBS_DEB_HOST); if (!cmdnode) { - lbs_deb_cmd("QUEUE_CMD: cmdnode is NULL\n"); + lbs_deb_host("QUEUE_CMD: cmdnode is NULL\n"); goto done; } cmdptr = (struct cmd_ds_command *)cmdnode->bufvirtualaddr; if (!cmdptr) { - lbs_deb_cmd("QUEUE_CMD: cmdptr is NULL\n"); + lbs_deb_host("QUEUE_CMD: cmdptr is NULL\n"); goto done; } /* Exit_PS command needs to be queued in the header always. */ - if (cmdptr->command == cmd_802_11_ps_mode) { + if (cmdptr->command == CMD_802_11_PS_MODE) { struct cmd_ds_802_11_ps_mode *psm = &cmdptr->params.psmode; - if (psm->action == cpu_to_le16(cmd_subcmd_exit_ps)) { + if (psm->action == cpu_to_le16(CMD_SUBCMD_EXIT_PS)) { if (adapter->psstate != PS_STATE_FULL_POWER) addtail = 0; } @@ -920,17 +949,16 @@ void libertas_queue_cmd(wlan_adapter * adapter, struct cmd_ctrl_node *cmdnode, u spin_unlock_irqrestore(&adapter->driver_lock, flags); - lbs_deb_cmd("QUEUE_CMD: Inserted node=%p, cmd=0x%x in cmdpendingq\n", - cmdnode, + lbs_deb_host("QUEUE_CMD: inserted command 0x%04x into cmdpendingq\n", le16_to_cpu(((struct cmd_ds_gen*)cmdnode->bufvirtualaddr)->command)); done: - lbs_deb_leave(LBS_DEB_CMD); + lbs_deb_leave(LBS_DEB_HOST); } /* * TODO: Fix the issue when DownloadcommandToStation is being called the - * second time when the command timesout. All the cmdptr->xxx are in little + * second time when the command times out. All the cmdptr->xxx are in little * endian and therefore all the comparissions will fail. * For now - we are not performing the endian conversion the second time - but * for PS and DEEP_SLEEP we need to worry @@ -941,68 +969,59 @@ static int DownloadcommandToStation(wlan_private * priv, unsigned long flags; struct cmd_ds_command *cmdptr; wlan_adapter *adapter = priv->adapter; - int ret = 0; + int ret = -1; u16 cmdsize; u16 command; - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_enter(LBS_DEB_HOST); if (!adapter || !cmdnode) { - lbs_deb_cmd("DNLD_CMD: adapter = %p, cmdnode = %p\n", - adapter, cmdnode); - if (cmdnode) { - spin_lock_irqsave(&adapter->driver_lock, flags); - __libertas_cleanup_and_insert_cmd(priv, cmdnode); - spin_unlock_irqrestore(&adapter->driver_lock, flags); - } - ret = -1; + lbs_deb_host("DNLD_CMD: adapter or cmdmode is NULL\n"); goto done; } cmdptr = (struct cmd_ds_command *)cmdnode->bufvirtualaddr; - spin_lock_irqsave(&adapter->driver_lock, flags); if (!cmdptr || !cmdptr->size) { - lbs_deb_cmd("DNLD_CMD: cmdptr is Null or cmd size is Zero, " - "Not sending\n"); + lbs_deb_host("DNLD_CMD: cmdptr is NULL or zero\n"); __libertas_cleanup_and_insert_cmd(priv, cmdnode); spin_unlock_irqrestore(&adapter->driver_lock, flags); - ret = -1; goto done; } adapter->cur_cmd = cmdnode; adapter->cur_cmd_retcode = 0; spin_unlock_irqrestore(&adapter->driver_lock, flags); - lbs_deb_cmd("DNLD_CMD:: Before download, size of cmd = %d\n", - le16_to_cpu(cmdptr->size)); cmdsize = cmdptr->size; - command = cpu_to_le16(cmdptr->command); + lbs_deb_host("DNLD_CMD: command 0x%04x, size %d, jiffies %lu\n", + command, le16_to_cpu(cmdptr->size), jiffies); + lbs_deb_hex(LBS_DEB_HOST, "DNLD_CMD", cmdnode->bufvirtualaddr, cmdsize); + cmdnode->cmdwaitqwoken = 0; cmdsize = cpu_to_le16(cmdsize); ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) cmdptr, cmdsize); if (ret != 0) { - lbs_deb_cmd("DNLD_CMD: Host to Card failed\n"); + lbs_deb_host("DNLD_CMD: hw_host_to_card failed\n"); spin_lock_irqsave(&adapter->driver_lock, flags); + adapter->cur_cmd_retcode = ret; __libertas_cleanup_and_insert_cmd(priv, adapter->cur_cmd); + adapter->nr_cmd_pending--; adapter->cur_cmd = NULL; spin_unlock_irqrestore(&adapter->driver_lock, flags); - ret = -1; goto done; } - lbs_deb_cmd("DNLD_CMD: Sent command 0x%x @ %lu\n", command, jiffies); - lbs_dbg_hex("DNLD_CMD: command", cmdnode->bufvirtualaddr, cmdsize); + lbs_deb_cmd("DNLD_CMD: sent command 0x%04x, jiffies %lu\n", command, jiffies); /* Setup the timer after transmit command */ - if (command == cmd_802_11_scan || command == cmd_802_11_authenticate - || command == cmd_802_11_associate) + if (command == CMD_802_11_SCAN || command == CMD_802_11_AUTHENTICATE + || command == CMD_802_11_ASSOCIATE) mod_timer(&adapter->command_timer, jiffies + (10*HZ)); else mod_timer(&adapter->command_timer, jiffies + (5*HZ)); @@ -1010,7 +1029,7 @@ static int DownloadcommandToStation(wlan_private * priv, ret = 0; done: - lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret); return ret; } @@ -1021,11 +1040,11 @@ static int wlan_cmd_mac_control(wlan_private * priv, lbs_deb_enter(LBS_DEB_CMD); - cmd->command = cpu_to_le16(cmd_mac_control); + cmd->command = cpu_to_le16(CMD_MAC_CONTROL); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_mac_control) + S_DS_GEN); mac->action = cpu_to_le16(priv->adapter->currentpacketfilter); - lbs_deb_cmd("wlan_cmd_mac_control(): action=0x%X size=%d\n", + lbs_deb_cmd("MAC_CONTROL: action 0x%x, size %d\n", le16_to_cpu(mac->action), le16_to_cpu(cmd->size)); lbs_deb_leave(LBS_DEB_CMD); @@ -1041,15 +1060,13 @@ void __libertas_cleanup_and_insert_cmd(wlan_private * priv, struct cmd_ctrl_node wlan_adapter *adapter = priv->adapter; if (!ptempcmd) - goto done; + return; cleanup_cmdnode(ptempcmd); list_add_tail((struct list_head *)ptempcmd, &adapter->cmdfreeq); -done: - return; } -void libertas_cleanup_and_insert_cmd(wlan_private * priv, struct cmd_ctrl_node *ptempcmd) +static void libertas_cleanup_and_insert_cmd(wlan_private * priv, struct cmd_ctrl_node *ptempcmd) { unsigned long flags; @@ -1065,11 +1082,11 @@ int libertas_set_radio_control(wlan_private * priv) lbs_deb_enter(LBS_DEB_CMD); ret = libertas_prepare_and_send_command(priv, - cmd_802_11_radio_control, - cmd_act_set, - cmd_option_waitforrsp, 0, NULL); + CMD_802_11_RADIO_CONTROL, + CMD_ACT_SET, + CMD_OPTION_WAITFORRSP, 0, NULL); - lbs_deb_cmd("RADIO_SET: on or off: 0x%X, preamble = 0x%X\n", + lbs_deb_cmd("RADIO_SET: radio %d, preamble %d\n", priv->adapter->radioon, priv->adapter->preamble); lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); @@ -1082,12 +1099,9 @@ int libertas_set_mac_packet_filter(wlan_private * priv) lbs_deb_enter(LBS_DEB_CMD); - lbs_deb_cmd("libertas_set_mac_packet_filter value = %x\n", - priv->adapter->currentpacketfilter); - /* Send MAC control command to station */ ret = libertas_prepare_and_send_command(priv, - cmd_mac_control, 0, 0, 0, NULL); + CMD_MAC_CONTROL, 0, 0, 0, NULL); lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); return ret; @@ -1115,16 +1129,16 @@ int libertas_prepare_and_send_command(wlan_private * priv, struct cmd_ds_command *cmdptr; unsigned long flags; - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_enter(LBS_DEB_HOST); if (!adapter) { - lbs_deb_cmd("PREP_CMD: adapter is Null\n"); + lbs_deb_host("PREP_CMD: adapter is NULL\n"); ret = -1; goto done; } if (adapter->surpriseremoved) { - lbs_deb_cmd("PREP_CMD: Card is Removed\n"); + lbs_deb_host("PREP_CMD: card removed\n"); ret = -1; goto done; } @@ -1132,10 +1146,10 @@ int libertas_prepare_and_send_command(wlan_private * priv, cmdnode = libertas_get_free_cmd_ctrl_node(priv); if (cmdnode == NULL) { - lbs_deb_cmd("PREP_CMD: No free cmdnode\n"); + lbs_deb_host("PREP_CMD: cmdnode is NULL\n"); /* Wake up main thread to execute next command */ - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); ret = -1; goto done; } @@ -1144,11 +1158,10 @@ int libertas_prepare_and_send_command(wlan_private * priv, cmdptr = (struct cmd_ds_command *)cmdnode->bufvirtualaddr; - lbs_deb_cmd("PREP_CMD: Val of cmd ptr=%p, command=0x%X\n", - cmdptr, cmd_no); + lbs_deb_host("PREP_CMD: command 0x%04x\n", cmd_no); if (!cmdptr) { - lbs_deb_cmd("PREP_CMD: bufvirtualaddr of cmdnode is NULL\n"); + lbs_deb_host("PREP_CMD: cmdptr is NULL\n"); libertas_cleanup_and_insert_cmd(priv, cmdnode); ret = -1; goto done; @@ -1162,136 +1175,136 @@ int libertas_prepare_and_send_command(wlan_private * priv, cmdptr->result = 0; switch (cmd_no) { - case cmd_get_hw_spec: + case CMD_GET_HW_SPEC: ret = wlan_cmd_hw_spec(priv, cmdptr); break; - case cmd_802_11_ps_mode: + case CMD_802_11_PS_MODE: ret = wlan_cmd_802_11_ps_mode(priv, cmdptr, cmd_action); break; - case cmd_802_11_scan: + case CMD_802_11_SCAN: ret = libertas_cmd_80211_scan(priv, cmdptr, pdata_buf); break; - case cmd_mac_control: + case CMD_MAC_CONTROL: ret = wlan_cmd_mac_control(priv, cmdptr); break; - case cmd_802_11_associate: - case cmd_802_11_reassociate: + case CMD_802_11_ASSOCIATE: + case CMD_802_11_REASSOCIATE: ret = libertas_cmd_80211_associate(priv, cmdptr, pdata_buf); break; - case cmd_802_11_deauthenticate: + case CMD_802_11_DEAUTHENTICATE: ret = libertas_cmd_80211_deauthenticate(priv, cmdptr); break; - case cmd_802_11_set_wep: + case CMD_802_11_SET_WEP: ret = wlan_cmd_802_11_set_wep(priv, cmdptr, cmd_action, pdata_buf); break; - case cmd_802_11_ad_hoc_start: + case CMD_802_11_AD_HOC_START: ret = libertas_cmd_80211_ad_hoc_start(priv, cmdptr, pdata_buf); break; - case cmd_code_dnld: + case CMD_CODE_DNLD: break; - case cmd_802_11_reset: + case CMD_802_11_RESET: ret = wlan_cmd_802_11_reset(priv, cmdptr, cmd_action); break; - case cmd_802_11_get_log: + case CMD_802_11_GET_LOG: ret = wlan_cmd_802_11_get_log(priv, cmdptr); break; - case cmd_802_11_authenticate: + case CMD_802_11_AUTHENTICATE: ret = libertas_cmd_80211_authenticate(priv, cmdptr, pdata_buf); break; - case cmd_802_11_get_stat: + case CMD_802_11_GET_STAT: ret = wlan_cmd_802_11_get_stat(priv, cmdptr); break; - case cmd_802_11_snmp_mib: + case CMD_802_11_SNMP_MIB: ret = wlan_cmd_802_11_snmp_mib(priv, cmdptr, cmd_action, cmd_oid, pdata_buf); break; - case cmd_mac_reg_access: - case cmd_bbp_reg_access: - case cmd_rf_reg_access: + case CMD_MAC_REG_ACCESS: + case CMD_BBP_REG_ACCESS: + case CMD_RF_REG_ACCESS: ret = wlan_cmd_reg_access(priv, cmdptr, cmd_action, pdata_buf); break; - case cmd_802_11_rf_channel: + case CMD_802_11_RF_CHANNEL: ret = wlan_cmd_802_11_rf_channel(priv, cmdptr, cmd_action, pdata_buf); break; - case cmd_802_11_rf_tx_power: + case CMD_802_11_RF_TX_POWER: ret = wlan_cmd_802_11_rf_tx_power(priv, cmdptr, cmd_action, pdata_buf); break; - case cmd_802_11_radio_control: + case CMD_802_11_RADIO_CONTROL: ret = wlan_cmd_802_11_radio_control(priv, cmdptr, cmd_action); break; - case cmd_802_11_rf_antenna: - ret = wlan_cmd_802_11_rf_antenna(priv, cmdptr, - cmd_action, pdata_buf); - break; - - case cmd_802_11_data_rate: + case CMD_802_11_DATA_RATE: ret = wlan_cmd_802_11_data_rate(priv, cmdptr, cmd_action); break; - case cmd_802_11_rate_adapt_rateset: + case CMD_802_11_RATE_ADAPT_RATESET: ret = wlan_cmd_802_11_rate_adapt_rateset(priv, cmdptr, cmd_action); break; - case cmd_mac_multicast_adr: + case CMD_MAC_MULTICAST_ADR: ret = wlan_cmd_mac_multicast_adr(priv, cmdptr, cmd_action); break; - case cmd_802_11_ad_hoc_join: + case CMD_802_11_MONITOR_MODE: + ret = wlan_cmd_802_11_monitor_mode(priv, cmdptr, + cmd_action, pdata_buf); + break; + + case CMD_802_11_AD_HOC_JOIN: ret = libertas_cmd_80211_ad_hoc_join(priv, cmdptr, pdata_buf); break; - case cmd_802_11_rssi: + case CMD_802_11_RSSI: ret = wlan_cmd_802_11_rssi(priv, cmdptr); break; - case cmd_802_11_ad_hoc_stop: + case CMD_802_11_AD_HOC_STOP: ret = libertas_cmd_80211_ad_hoc_stop(priv, cmdptr); break; - case cmd_802_11_enable_rsn: + case CMD_802_11_ENABLE_RSN: ret = wlan_cmd_802_11_enable_rsn(priv, cmdptr, cmd_action, pdata_buf); break; - case cmd_802_11_key_material: + case CMD_802_11_KEY_MATERIAL: ret = wlan_cmd_802_11_key_material(priv, cmdptr, cmd_action, cmd_oid, pdata_buf); break; - case cmd_802_11_pairwise_tsc: + case CMD_802_11_PAIRWISE_TSC: break; - case cmd_802_11_group_tsc: + case CMD_802_11_GROUP_TSC: break; - case cmd_802_11_mac_address: + case CMD_802_11_MAC_ADDRESS: ret = wlan_cmd_802_11_mac_address(priv, cmdptr, cmd_action); break; - case cmd_802_11_eeprom_access: + case CMD_802_11_EEPROM_ACCESS: ret = wlan_cmd_802_11_eeprom_access(priv, cmdptr, cmd_action, pdata_buf); break; - case cmd_802_11_set_afc: - case cmd_802_11_get_afc: + case CMD_802_11_SET_AFC: + case CMD_802_11_GET_AFC: cmdptr->command = cpu_to_le16(cmd_no); cmdptr->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_afc) + @@ -1303,22 +1316,22 @@ int libertas_prepare_and_send_command(wlan_private * priv, ret = 0; goto done; - case cmd_802_11d_domain_info: + case CMD_802_11D_DOMAIN_INFO: ret = libertas_cmd_802_11d_domain_info(priv, cmdptr, cmd_no, cmd_action); break; - case cmd_802_11_sleep_params: + case CMD_802_11_SLEEP_PARAMS: ret = wlan_cmd_802_11_sleep_params(priv, cmdptr, cmd_action); break; - case cmd_802_11_inactivity_timeout: + case CMD_802_11_INACTIVITY_TIMEOUT: ret = wlan_cmd_802_11_inactivity_timeout(priv, cmdptr, cmd_action, pdata_buf); libertas_set_cmd_ctrl_node(priv, cmdnode, 0, 0, pdata_buf); break; - case cmd_802_11_tpc_cfg: - cmdptr->command = cpu_to_le16(cmd_802_11_tpc_cfg); + case CMD_802_11_TPC_CFG: + cmdptr->command = cpu_to_le16(CMD_802_11_TPC_CFG); cmdptr->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_tpc_cfg) + S_DS_GEN); @@ -1328,7 +1341,7 @@ int libertas_prepare_and_send_command(wlan_private * priv, ret = 0; break; - case cmd_802_11_led_gpio_ctrl: + case CMD_802_11_LED_GPIO_CTRL: { struct mrvlietypes_ledgpio *gpio = (struct mrvlietypes_ledgpio*) @@ -1339,7 +1352,7 @@ int libertas_prepare_and_send_command(wlan_private * priv, sizeof(struct cmd_ds_802_11_led_ctrl)); cmdptr->command = - cpu_to_le16(cmd_802_11_led_gpio_ctrl); + cpu_to_le16(CMD_802_11_LED_GPIO_CTRL); #define ACTION_NUMLED_TLVTYPE_LEN_FIELDS_LEN 8 cmdptr->size = @@ -1350,8 +1363,8 @@ int libertas_prepare_and_send_command(wlan_private * priv, ret = 0; break; } - case cmd_802_11_pwr_cfg: - cmdptr->command = cpu_to_le16(cmd_802_11_pwr_cfg); + case CMD_802_11_PWR_CFG: + cmdptr->command = cpu_to_le16(CMD_802_11_PWR_CFG); cmdptr->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_pwr_cfg) + S_DS_GEN); @@ -1360,40 +1373,37 @@ int libertas_prepare_and_send_command(wlan_private * priv, ret = 0; break; - case cmd_bt_access: + case CMD_BT_ACCESS: ret = wlan_cmd_bt_access(priv, cmdptr, cmd_action, pdata_buf); break; - case cmd_fwt_access: + case CMD_FWT_ACCESS: ret = wlan_cmd_fwt_access(priv, cmdptr, cmd_action, pdata_buf); break; - case cmd_mesh_access: + case CMD_MESH_ACCESS: ret = wlan_cmd_mesh_access(priv, cmdptr, cmd_action, pdata_buf); break; - case cmd_get_tsf: - cmdptr->command = cpu_to_le16(cmd_get_tsf); - cmdptr->size = cpu_to_le16(sizeof(struct cmd_ds_get_tsf) + - S_DS_GEN); - ret = 0; + case CMD_SET_BOOT2_VER: + ret = wlan_cmd_set_boot2_ver(priv, cmdptr, cmd_action, pdata_buf); break; - case cmd_802_11_tx_rate_query: - cmdptr->command = cpu_to_le16(cmd_802_11_tx_rate_query); - cmdptr->size = cpu_to_le16(sizeof(struct cmd_tx_rate_query) + + + case CMD_GET_TSF: + cmdptr->command = cpu_to_le16(CMD_GET_TSF); + cmdptr->size = cpu_to_le16(sizeof(struct cmd_ds_get_tsf) + S_DS_GEN); - adapter->txrate = 0; ret = 0; break; default: - lbs_deb_cmd("PREP_CMD: unknown command- %#x\n", cmd_no); + lbs_deb_host("PREP_CMD: unknown command 0x%04x\n", cmd_no); ret = -1; break; } /* return error, since the command preparation failed */ if (ret != 0) { - lbs_deb_cmd("PREP_CMD: command preparation failed\n"); + lbs_deb_host("PREP_CMD: command preparation failed\n"); libertas_cleanup_and_insert_cmd(priv, cmdnode); ret = -1; goto done; @@ -1403,10 +1413,10 @@ int libertas_prepare_and_send_command(wlan_private * priv, libertas_queue_cmd(adapter, cmdnode, 1); adapter->nr_cmd_pending++; - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); - if (wait_option & cmd_option_waitforrsp) { - lbs_deb_cmd("PREP_CMD: Wait for CMD response\n"); + if (wait_option & CMD_OPTION_WAITFORRSP) { + lbs_deb_host("PREP_CMD: wait for response\n"); might_sleep(); wait_event_interruptible(cmdnode->cmdwait_q, cmdnode->cmdwaitqwoken); @@ -1414,7 +1424,7 @@ int libertas_prepare_and_send_command(wlan_private * priv, spin_lock_irqsave(&adapter->driver_lock, flags); if (adapter->cur_cmd_retcode) { - lbs_deb_cmd("PREP_CMD: command failed with return code=%d\n", + lbs_deb_host("PREP_CMD: command failed with return code %d\n", adapter->cur_cmd_retcode); adapter->cur_cmd_retcode = 0; ret = -1; @@ -1422,7 +1432,7 @@ int libertas_prepare_and_send_command(wlan_private * priv, spin_unlock_irqrestore(&adapter->driver_lock, flags); done: - lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret); return ret; } EXPORT_SYMBOL_GPL(libertas_prepare_and_send_command); @@ -1443,14 +1453,13 @@ int libertas_allocate_cmd_buffer(wlan_private * priv) u8 *ptempvirtualaddr; wlan_adapter *adapter = priv->adapter; - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_enter(LBS_DEB_HOST); /* Allocate and initialize cmdCtrlNode */ ulbufsize = sizeof(struct cmd_ctrl_node) * MRVDRV_NUM_OF_CMD_BUFFER; if (!(tempcmd_array = kzalloc(ulbufsize, GFP_KERNEL))) { - lbs_deb_cmd( - "ALLOC_CMD_BUF: failed to allocate tempcmd_array\n"); + lbs_deb_host("ALLOC_CMD_BUF: tempcmd_array is NULL\n"); ret = -1; goto done; } @@ -1460,8 +1469,7 @@ int libertas_allocate_cmd_buffer(wlan_private * priv) ulbufsize = MRVDRV_SIZE_OF_CMD_BUFFER; for (i = 0; i < MRVDRV_NUM_OF_CMD_BUFFER; i++) { if (!(ptempvirtualaddr = kzalloc(ulbufsize, GFP_KERNEL))) { - lbs_deb_cmd( - "ALLOC_CMD_BUF: ptempvirtualaddr: out of memory\n"); + lbs_deb_host("ALLOC_CMD_BUF: ptempvirtualaddr is NULL\n"); ret = -1; goto done; } @@ -1478,7 +1486,7 @@ int libertas_allocate_cmd_buffer(wlan_private * priv) ret = 0; done: - lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret); return ret; } @@ -1495,11 +1503,11 @@ int libertas_free_cmd_buffer(wlan_private * priv) struct cmd_ctrl_node *tempcmd_array; wlan_adapter *adapter = priv->adapter; - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_enter(LBS_DEB_HOST); /* need to check if cmd array is allocated or not */ if (adapter->cmd_array == NULL) { - lbs_deb_cmd("FREE_CMD_BUF: cmd_array is Null\n"); + lbs_deb_host("FREE_CMD_BUF: cmd_array is NULL\n"); goto done; } @@ -1509,7 +1517,6 @@ int libertas_free_cmd_buffer(wlan_private * priv) ulbufsize = MRVDRV_SIZE_OF_CMD_BUFFER; for (i = 0; i < MRVDRV_NUM_OF_CMD_BUFFER; i++) { if (tempcmd_array[i].bufvirtualaddr) { - lbs_deb_cmd("Free all the array\n"); kfree(tempcmd_array[i].bufvirtualaddr); tempcmd_array[i].bufvirtualaddr = NULL; } @@ -1517,13 +1524,12 @@ int libertas_free_cmd_buffer(wlan_private * priv) /* Release cmd_ctrl_node */ if (adapter->cmd_array) { - lbs_deb_cmd("Free cmd_array\n"); kfree(adapter->cmd_array); adapter->cmd_array = NULL; } done: - lbs_deb_leave(LBS_DEB_CMD); + lbs_deb_leave(LBS_DEB_HOST); return 0; } @@ -1540,6 +1546,8 @@ struct cmd_ctrl_node *libertas_get_free_cmd_ctrl_node(wlan_private * priv) wlan_adapter *adapter = priv->adapter; unsigned long flags; + lbs_deb_enter(LBS_DEB_HOST); + if (!adapter) return NULL; @@ -1549,21 +1557,16 @@ struct cmd_ctrl_node *libertas_get_free_cmd_ctrl_node(wlan_private * priv) tempnode = (struct cmd_ctrl_node *)adapter->cmdfreeq.next; list_del((struct list_head *)tempnode); } else { - lbs_deb_cmd("GET_CMD_NODE: cmd_ctrl_node is not available\n"); + lbs_deb_host("GET_CMD_NODE: cmd_ctrl_node is not available\n"); tempnode = NULL; } spin_unlock_irqrestore(&adapter->driver_lock, flags); - if (tempnode) { - /* - lbs_pr_debug(3, "GET_CMD_NODE: cmdCtrlNode available\n"); - lbs_pr_debug(3, "GET_CMD_NODE: cmdCtrlNode Address = %p\n", - tempnode); - */ + if (tempnode) cleanup_cmdnode(tempnode); - } + lbs_deb_leave(LBS_DEB_HOST); return tempnode; } @@ -1575,6 +1578,8 @@ struct cmd_ctrl_node *libertas_get_free_cmd_ctrl_node(wlan_private * priv) */ static void cleanup_cmdnode(struct cmd_ctrl_node *ptempnode) { + lbs_deb_enter(LBS_DEB_HOST); + if (!ptempnode) return; ptempnode->cmdwaitqwoken = 1; @@ -1586,7 +1591,8 @@ static void cleanup_cmdnode(struct cmd_ctrl_node *ptempnode) if (ptempnode->bufvirtualaddr != NULL) memset(ptempnode->bufvirtualaddr, 0, MRVDRV_SIZE_OF_CMD_BUFFER); - return; + + lbs_deb_leave(LBS_DEB_HOST); } /** @@ -1603,7 +1609,7 @@ void libertas_set_cmd_ctrl_node(wlan_private * priv, struct cmd_ctrl_node *ptempnode, u32 cmd_oid, u16 wait_option, void *pdata_buf) { - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_enter(LBS_DEB_HOST); if (!ptempnode) return; @@ -1612,7 +1618,7 @@ void libertas_set_cmd_ctrl_node(wlan_private * priv, ptempnode->wait_option = wait_option; ptempnode->pdata_buf = pdata_buf; - lbs_deb_leave(LBS_DEB_CMD); + lbs_deb_leave(LBS_DEB_HOST); } /** @@ -1631,12 +1637,15 @@ int libertas_execute_next_command(wlan_private * priv) unsigned long flags; int ret = 0; - lbs_deb_enter(LBS_DEB_CMD); + // Debug group is LBS_DEB_THREAD and not LBS_DEB_HOST, because the + // only caller to us is libertas_thread() and we get even when a + // data packet is received + lbs_deb_enter(LBS_DEB_THREAD); spin_lock_irqsave(&adapter->driver_lock, flags); if (adapter->cur_cmd) { - lbs_pr_alert( "EXEC_NEXT_CMD: there is command in processing!\n"); + lbs_pr_alert( "EXEC_NEXT_CMD: already processing command!\n"); spin_unlock_irqrestore(&adapter->driver_lock, flags); ret = -1; goto done; @@ -1650,22 +1659,20 @@ int libertas_execute_next_command(wlan_private * priv) spin_unlock_irqrestore(&adapter->driver_lock, flags); if (cmdnode) { - lbs_deb_cmd( - "EXEC_NEXT_CMD: Got next command from cmdpendingq\n"); cmdptr = (struct cmd_ds_command *)cmdnode->bufvirtualaddr; if (is_command_allowed_in_ps(cmdptr->command)) { if ((adapter->psstate == PS_STATE_SLEEP) || (adapter->psstate == PS_STATE_PRE_SLEEP)) { - lbs_deb_cmd( - "EXEC_NEXT_CMD: Cannot send cmd 0x%x in psstate %d\n", + lbs_deb_host( + "EXEC_NEXT_CMD: cannot send cmd 0x%04x in psstate %d\n", le16_to_cpu(cmdptr->command), adapter->psstate); ret = -1; goto done; } - lbs_deb_cmd("EXEC_NEXT_CMD: OK to send command " - "0x%x in psstate %d\n", + lbs_deb_host("EXEC_NEXT_CMD: OK to send command " + "0x%04x in psstate %d\n", le16_to_cpu(cmdptr->command), adapter->psstate); } else if (adapter->psstate != PS_STATE_FULL_POWER) { @@ -1681,7 +1688,7 @@ int libertas_execute_next_command(wlan_private * priv) * immediately. */ if (cmdptr->command != - cpu_to_le16(cmd_802_11_ps_mode)) { + cpu_to_le16(CMD_802_11_PS_MODE)) { /* Prepare to send Exit PS, * this non PS command will be sent later */ if ((adapter->psstate == PS_STATE_SLEEP) @@ -1703,13 +1710,13 @@ int libertas_execute_next_command(wlan_private * priv) struct cmd_ds_802_11_ps_mode *psm = &cmdptr->params.psmode; - lbs_deb_cmd( - "EXEC_NEXT_CMD: PS cmd- action=0x%x\n", + lbs_deb_host( + "EXEC_NEXT_CMD: PS cmd, action 0x%02x\n", psm->action); if (psm->action != - cpu_to_le16(cmd_subcmd_exit_ps)) { - lbs_deb_cmd( - "EXEC_NEXT_CMD: Ignore Enter PS cmd\n"); + cpu_to_le16(CMD_SUBCMD_EXIT_PS)) { + lbs_deb_host( + "EXEC_NEXT_CMD: ignore ENTER_PS cmd\n"); list_del((struct list_head *)cmdnode); libertas_cleanup_and_insert_cmd(priv, cmdnode); @@ -1719,8 +1726,8 @@ int libertas_execute_next_command(wlan_private * priv) if ((adapter->psstate == PS_STATE_SLEEP) || (adapter->psstate == PS_STATE_PRE_SLEEP)) { - lbs_deb_cmd( - "EXEC_NEXT_CMD: Ignore ExitPS cmd in sleep\n"); + lbs_deb_host( + "EXEC_NEXT_CMD: ignore EXIT_PS cmd in sleep\n"); list_del((struct list_head *)cmdnode); libertas_cleanup_and_insert_cmd(priv, cmdnode); adapter->needtowakeup = 1; @@ -1729,12 +1736,12 @@ int libertas_execute_next_command(wlan_private * priv) goto done; } - lbs_deb_cmd( - "EXEC_NEXT_CMD: Sending Exit_PS down...\n"); + lbs_deb_host( + "EXEC_NEXT_CMD: sending EXIT_PS\n"); } } list_del((struct list_head *)cmdnode); - lbs_deb_cmd("EXEC_NEXT_CMD: Sending 0x%04X command\n", + lbs_deb_host("EXEC_NEXT_CMD: sending command 0x%04x\n", le16_to_cpu(cmdptr->command)); DownloadcommandToStation(priv, cmdnode); } else { @@ -1742,23 +1749,23 @@ int libertas_execute_next_command(wlan_private * priv) * check if in power save mode, if yes, put the device back * to PS mode */ - if ((adapter->psmode != wlan802_11powermodecam) && + if ((adapter->psmode != WLAN802_11POWERMODECAM) && (adapter->psstate == PS_STATE_FULL_POWER) && - (adapter->connect_status == libertas_connected)) { + (adapter->connect_status == LIBERTAS_CONNECTED)) { if (adapter->secinfo.WPAenabled || adapter->secinfo.WPA2enabled) { /* check for valid WPA group keys */ if (adapter->wpa_mcast_key.len || adapter->wpa_unicast_key.len) { - lbs_deb_cmd( + lbs_deb_host( "EXEC_NEXT_CMD: WPA enabled and GTK_SET" " go back to PS_SLEEP"); libertas_ps_sleep(priv, 0); } } else { - lbs_deb_cmd( - "EXEC_NEXT_CMD: command PendQ is empty," - " go back to PS_SLEEP"); + lbs_deb_host( + "EXEC_NEXT_CMD: cmdpendingq empty, " + "go back to PS_SLEEP"); libertas_ps_sleep(priv, 0); } } @@ -1766,7 +1773,7 @@ int libertas_execute_next_command(wlan_private * priv) ret = 0; done: - lbs_deb_leave(LBS_DEB_CMD); + lbs_deb_leave(LBS_DEB_THREAD); return ret; } @@ -1775,7 +1782,7 @@ void libertas_send_iwevcustom_event(wlan_private * priv, s8 * str) union iwreq_data iwrq; u8 buf[50]; - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_enter(LBS_DEB_WEXT); memset(&iwrq, 0, sizeof(union iwreq_data)); memset(buf, 0, sizeof(buf)); @@ -1785,13 +1792,13 @@ void libertas_send_iwevcustom_event(wlan_private * priv, s8 * str) iwrq.data.length = strlen(buf) + 1 + IW_EV_LCP_LEN; /* Send Event to upper layer */ - lbs_deb_cmd("Event Indication string = %s\n", (char *)buf); - lbs_deb_cmd("Event Indication String length = %d\n", iwrq.data.length); + lbs_deb_wext("event indication string %s\n", (char *)buf); + lbs_deb_wext("event indication length %d\n", iwrq.data.length); + lbs_deb_wext("sending wireless event IWEVCUSTOM for %s\n", str); - lbs_deb_cmd("Sending wireless event IWEVCUSTOM for %s\n", str); wireless_send_event(priv->dev, IWEVCUSTOM, &iwrq, buf); - lbs_deb_leave(LBS_DEB_CMD); + lbs_deb_leave(LBS_DEB_WEXT); } static int sendconfirmsleep(wlan_private * priv, u8 * cmdptr, u16 size) @@ -1800,19 +1807,19 @@ static int sendconfirmsleep(wlan_private * priv, u8 * cmdptr, u16 size) wlan_adapter *adapter = priv->adapter; int ret = 0; - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_enter(LBS_DEB_HOST); - lbs_deb_cmd("SEND_SLEEPC_CMD: Before download, size of cmd = %d\n", + lbs_deb_host("SEND_SLEEPC_CMD: before download, cmd size %d\n", size); - lbs_dbg_hex("SEND_SLEEPC_CMD: Sleep confirm command", cmdptr, size); + lbs_deb_hex(LBS_DEB_HOST, "sleep confirm command", cmdptr, size); ret = priv->hw_host_to_card(priv, MVMS_CMD, cmdptr, size); priv->dnld_sent = DNLD_RES_RECEIVED; spin_lock_irqsave(&adapter->driver_lock, flags); if (adapter->intcounter || adapter->currenttxskb) - lbs_deb_cmd("SEND_SLEEPC_CMD: intcounter=%d currenttxskb=%p\n", + lbs_deb_host("SEND_SLEEPC_CMD: intcounter %d, currenttxskb %p\n", adapter->intcounter, adapter->currenttxskb); spin_unlock_irqrestore(&adapter->driver_lock, flags); @@ -1824,36 +1831,35 @@ static int sendconfirmsleep(wlan_private * priv, u8 * cmdptr, u16 size) if (!adapter->intcounter) { adapter->psstate = PS_STATE_SLEEP; } else { - lbs_deb_cmd("SEND_SLEEPC_CMD: After sent,IntC=%d\n", + lbs_deb_host("SEND_SLEEPC_CMD: after sent, intcounter %d\n", adapter->intcounter); } spin_unlock_irqrestore(&adapter->driver_lock, flags); - lbs_deb_cmd("SEND_SLEEPC_CMD: Sent Confirm Sleep command\n"); - lbs_deb_cmd("+"); + lbs_deb_host("SEND_SLEEPC_CMD: sent confirm sleep\n"); } - lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret); return ret; } void libertas_ps_sleep(wlan_private * priv, int wait_option) { - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_enter(LBS_DEB_HOST); /* * PS is currently supported only in Infrastructure mode * Remove this check if it is to be supported in IBSS mode also */ - libertas_prepare_and_send_command(priv, cmd_802_11_ps_mode, - cmd_subcmd_enter_ps, wait_option, 0, NULL); + libertas_prepare_and_send_command(priv, CMD_802_11_PS_MODE, + CMD_SUBCMD_ENTER_PS, wait_option, 0, NULL); - lbs_deb_leave(LBS_DEB_CMD); + lbs_deb_leave(LBS_DEB_HOST); } /** - * @brief This function sends Eixt_PS command to firmware. + * @brief This function sends Exit_PS command to firmware. * * @param priv A pointer to wlan_private structure * @param wait_option wait response or not @@ -1863,17 +1869,15 @@ void libertas_ps_wakeup(wlan_private * priv, int wait_option) { __le32 Localpsmode; - lbs_deb_enter(LBS_DEB_CMD); - - Localpsmode = cpu_to_le32(wlan802_11powermodecam); + lbs_deb_enter(LBS_DEB_HOST); - lbs_deb_cmd("Exit_PS: Localpsmode = %d\n", wlan802_11powermodecam); + Localpsmode = cpu_to_le32(WLAN802_11POWERMODECAM); - libertas_prepare_and_send_command(priv, cmd_802_11_ps_mode, - cmd_subcmd_exit_ps, + libertas_prepare_and_send_command(priv, CMD_802_11_PS_MODE, + CMD_SUBCMD_EXIT_PS, wait_option, 0, &Localpsmode); - lbs_deb_leave(LBS_DEB_CMD); + lbs_deb_leave(LBS_DEB_HOST); } /** @@ -1890,31 +1894,31 @@ void libertas_ps_confirm_sleep(wlan_private * priv, u16 psmode) wlan_adapter *adapter = priv->adapter; u8 allowed = 1; - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_enter(LBS_DEB_HOST); if (priv->dnld_sent) { allowed = 0; - lbs_deb_cmd("D"); + lbs_deb_host("dnld_sent was set"); } spin_lock_irqsave(&adapter->driver_lock, flags); if (adapter->cur_cmd) { allowed = 0; - lbs_deb_cmd("C"); + lbs_deb_host("cur_cmd was set"); } if (adapter->intcounter > 0) { allowed = 0; - lbs_deb_cmd("I%d", adapter->intcounter); + lbs_deb_host("intcounter %d", adapter->intcounter); } spin_unlock_irqrestore(&adapter->driver_lock, flags); if (allowed) { - lbs_deb_cmd("Sending libertas_ps_confirm_sleep\n"); + lbs_deb_host("sending libertas_ps_confirm_sleep\n"); sendconfirmsleep(priv, (u8 *) & adapter->libertas_ps_confirm_sleep, sizeof(struct PS_CMD_ConfirmSleep)); } else { - lbs_deb_cmd("Sleep Confirm has been delayed\n"); + lbs_deb_host("sleep confirm has been delayed\n"); } - lbs_deb_leave(LBS_DEB_CMD); + lbs_deb_leave(LBS_DEB_HOST); } diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c index 6ac0d4752fa4..8f90892ea22e 100644 --- a/drivers/net/wireless/libertas/cmdresp.c +++ b/drivers/net/wireless/libertas/cmdresp.c @@ -28,10 +28,10 @@ void libertas_mac_event_disconnected(wlan_private * priv) wlan_adapter *adapter = priv->adapter; union iwreq_data wrqu; - if (adapter->connect_status != libertas_connected) + if (adapter->connect_status != LIBERTAS_CONNECTED) return; - lbs_deb_cmd("Handles disconnect event.\n"); + lbs_deb_enter(LBS_DEB_CMD); memset(wrqu.ap_addr.sa_data, 0x00, ETH_ALEN); wrqu.ap_addr.sa_family = ARPHRD_ETHER; @@ -60,22 +60,12 @@ void libertas_mac_event_disconnected(wlan_private * priv) memset(adapter->rawNF, 0x00, sizeof(adapter->rawNF)); adapter->nextSNRNF = 0; adapter->numSNRNF = 0; - adapter->rxpd_rate = 0; - lbs_deb_cmd("Current SSID='%s', ssid length=%u\n", + lbs_deb_cmd("current SSID '%s', length %u\n", escape_essid(adapter->curbssparams.ssid, adapter->curbssparams.ssid_len), adapter->curbssparams.ssid_len); - lbs_deb_cmd("Previous SSID='%s', ssid length=%u\n", - escape_essid(adapter->prev_ssid, adapter->prev_ssid_len), - adapter->prev_ssid_len); - - adapter->connect_status = libertas_disconnected; - /* Save previous SSID and BSSID for possible reassociation */ - memcpy(&adapter->prev_ssid, &adapter->curbssparams.ssid, - IW_ESSID_MAX_SIZE); - adapter->prev_ssid_len = adapter->curbssparams.ssid_len; - memcpy(adapter->prev_bssid, adapter->curbssparams.bssid, ETH_ALEN); + adapter->connect_status = LIBERTAS_DISCONNECTED; /* Clear out associated SSID and BSSID since connection is * no longer valid. @@ -86,9 +76,10 @@ void libertas_mac_event_disconnected(wlan_private * priv) if (adapter->psstate != PS_STATE_FULL_POWER) { /* make firmware to exit PS mode */ - lbs_deb_cmd("Disconnected, so exit PS mode.\n"); + lbs_deb_cmd("disconnected, so exit PS mode\n"); libertas_ps_wakeup(priv, 0); } + lbs_deb_leave(LBS_DEB_CMD); } /** @@ -102,6 +93,7 @@ static void handle_mic_failureevent(wlan_private * priv, u32 event) { char buf[50]; + lbs_deb_enter(LBS_DEB_CMD); memset(buf, 0, sizeof(buf)); sprintf(buf, "%s", "MLME-MICHAELMICFAILURE.indication "); @@ -113,6 +105,7 @@ static void handle_mic_failureevent(wlan_private * priv, u32 event) } libertas_send_iwevcustom_event(priv, buf); + lbs_deb_leave(LBS_DEB_CMD); } static int wlan_ret_reg_access(wlan_private * priv, @@ -124,7 +117,7 @@ static int wlan_ret_reg_access(wlan_private * priv, lbs_deb_enter(LBS_DEB_CMD); switch (type) { - case cmd_ret_mac_reg_access: + case CMD_RET(CMD_MAC_REG_ACCESS): { struct cmd_ds_mac_reg_access *reg = &resp->params.macreg; @@ -133,7 +126,7 @@ static int wlan_ret_reg_access(wlan_private * priv, break; } - case cmd_ret_bbp_reg_access: + case CMD_RET(CMD_BBP_REG_ACCESS): { struct cmd_ds_bbp_reg_access *reg = &resp->params.bbpreg; @@ -142,7 +135,7 @@ static int wlan_ret_reg_access(wlan_private * priv, break; } - case cmd_ret_rf_reg_access: + case CMD_RET(CMD_RF_REG_ACCESS): { struct cmd_ds_rf_reg_access *reg = &resp->params.rfreg; @@ -155,7 +148,7 @@ static int wlan_ret_reg_access(wlan_private * priv, ret = -1; } - lbs_deb_enter_args(LBS_DEB_CMD, "ret %d", ret); + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); return ret; } @@ -166,6 +159,7 @@ static int wlan_ret_get_hw_spec(wlan_private * priv, struct cmd_ds_get_hw_spec *hwspec = &resp->params.hwspec; wlan_adapter *adapter = priv->adapter; int ret = 0; + DECLARE_MAC_BUF(mac); lbs_deb_enter(LBS_DEB_CMD); @@ -173,22 +167,23 @@ static int wlan_ret_get_hw_spec(wlan_private * priv, memcpy(adapter->fwreleasenumber, hwspec->fwreleasenumber, 4); - lbs_deb_cmd("GET_HW_SPEC: FWReleaseVersion- %u.%u.%u.p%u\n", + lbs_deb_cmd("GET_HW_SPEC: firmware release %u.%u.%up%u\n", adapter->fwreleasenumber[2], adapter->fwreleasenumber[1], adapter->fwreleasenumber[0], adapter->fwreleasenumber[3]); - lbs_deb_cmd("GET_HW_SPEC: Permanent addr- %2x:%2x:%2x:%2x:%2x:%2x\n", - hwspec->permanentaddr[0], hwspec->permanentaddr[1], - hwspec->permanentaddr[2], hwspec->permanentaddr[3], - hwspec->permanentaddr[4], hwspec->permanentaddr[5]); - lbs_deb_cmd("GET_HW_SPEC: hwifversion=0x%X version=0x%X\n", + lbs_deb_cmd("GET_HW_SPEC: MAC addr %s\n", + print_mac(mac, hwspec->permanentaddr)); + lbs_deb_cmd("GET_HW_SPEC: hardware interface 0x%x, hardware spec 0x%04x\n", hwspec->hwifversion, hwspec->version); - adapter->regioncode = le16_to_cpu(hwspec->regioncode); + /* Clamp region code to 8-bit since FW spec indicates that it should + * only ever be 8-bit, even though the field size is 16-bit. Some firmware + * returns non-zero high 8 bits here. + */ + adapter->regioncode = le16_to_cpu(hwspec->regioncode) & 0xFF; for (i = 0; i < MRVDRV_MAX_REGION_CODE; i++) { /* use the region code to search for the index */ if (adapter->regioncode == libertas_region_code_to_index[i]) { - adapter->regiontableindex = (u16) i; break; } } @@ -196,7 +191,6 @@ static int wlan_ret_get_hw_spec(wlan_private * priv, /* if it's unidentified region code, use the default (USA) */ if (i >= MRVDRV_MAX_REGION_CODE) { adapter->regioncode = 0x10; - adapter->regiontableindex = 0; lbs_pr_info("unidentified region code; using the default (USA)\n"); } @@ -230,8 +224,8 @@ static int wlan_ret_802_11_sleep_params(wlan_private * priv, lbs_deb_enter(LBS_DEB_CMD); - lbs_deb_cmd("error=%x offset=%x stabletime=%x calcontrol=%x\n" - " extsleepclk=%x\n", le16_to_cpu(sp->error), + lbs_deb_cmd("error 0x%x, offset 0x%x, stabletime 0x%x, calcontrol 0x%x " + "extsleepclk 0x%x\n", le16_to_cpu(sp->error), le16_to_cpu(sp->offset), le16_to_cpu(sp->stabletime), sp->calcontrol, sp->externalsleepclk); @@ -249,6 +243,7 @@ static int wlan_ret_802_11_sleep_params(wlan_private * priv, static int wlan_ret_802_11_stat(wlan_private * priv, struct cmd_ds_command *resp) { + lbs_deb_enter(LBS_DEB_CMD); /* currently adapter->wlan802_11Stat is unused struct cmd_ds_802_11_get_stat *p11Stat = &resp->params.gstat; @@ -258,6 +253,7 @@ static int wlan_ret_802_11_stat(wlan_private * priv, memcpy(&adapter->wlan802_11Stat, p11Stat, sizeof(struct cmd_ds_802_11_get_stat)); */ + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -270,28 +266,28 @@ static int wlan_ret_802_11_snmp_mib(wlan_private * priv, lbs_deb_enter(LBS_DEB_CMD); - lbs_deb_cmd("SNMP_RESP: value of the oid = %x, querytype=%x\n", oid, + lbs_deb_cmd("SNMP_RESP: oid 0x%x, querytype 0x%x\n", oid, querytype); - lbs_deb_cmd("SNMP_RESP: Buf size = %x\n", le16_to_cpu(smib->bufsize)); + lbs_deb_cmd("SNMP_RESP: Buf size %d\n", le16_to_cpu(smib->bufsize)); - if (querytype == cmd_act_get) { + if (querytype == CMD_ACT_GET) { switch (oid) { - case fragthresh_i: + case FRAGTHRESH_I: priv->adapter->fragthsd = le16_to_cpu(*((__le16 *)(smib->value))); - lbs_deb_cmd("SNMP_RESP: fragthsd =%u\n", + lbs_deb_cmd("SNMP_RESP: frag threshold %u\n", priv->adapter->fragthsd); break; - case rtsthresh_i: + case RTSTHRESH_I: priv->adapter->rtsthsd = le16_to_cpu(*((__le16 *)(smib->value))); - lbs_deb_cmd("SNMP_RESP: rtsthsd =%u\n", + lbs_deb_cmd("SNMP_RESP: rts threshold %u\n", priv->adapter->rtsthsd); break; - case short_retrylim_i: + case SHORT_RETRYLIM_I: priv->adapter->txretrycount = le16_to_cpu(*((__le16 *)(smib->value))); - lbs_deb_cmd("SNMP_RESP: txretrycount =%u\n", + lbs_deb_cmd("SNMP_RESP: tx retry count %u\n", priv->adapter->rtsthsd); break; default: @@ -314,18 +310,19 @@ static int wlan_ret_802_11_key_material(wlan_private * priv, lbs_deb_enter(LBS_DEB_CMD); /* Copy the returned key to driver private data */ - if (action == cmd_act_get) { + if (action == CMD_ACT_GET) { u8 * buf_ptr = (u8 *) &pkeymaterial->keyParamSet; u8 * resp_end = (u8 *) (resp + le16_to_cpu(resp->size)); while (buf_ptr < resp_end) { struct MrvlIEtype_keyParamSet * pkeyparamset = (struct MrvlIEtype_keyParamSet *) buf_ptr; - struct WLAN_802_11_KEY * pkey; - u16 key_info = le16_to_cpu(pkeyparamset->keyinfo); + struct enc_key * pkey; u16 param_set_len = le16_to_cpu(pkeyparamset->length); - u8 * end; u16 key_len = le16_to_cpu(pkeyparamset->keylen); + u16 key_flags = le16_to_cpu(pkeyparamset->keyinfo); + u16 key_type = le16_to_cpu(pkeyparamset->keytypeid); + u8 * end; end = (u8 *) pkeyparamset + sizeof (pkeyparamset->type) + sizeof (pkeyparamset->length) @@ -334,20 +331,20 @@ static int wlan_ret_802_11_key_material(wlan_private * priv, if (end > resp_end) break; - if (key_info & KEY_INFO_WPA_UNICAST) + if (key_flags & KEY_INFO_WPA_UNICAST) pkey = &adapter->wpa_unicast_key; - else if (key_info & KEY_INFO_WPA_MCAST) + else if (key_flags & KEY_INFO_WPA_MCAST) pkey = &adapter->wpa_mcast_key; else break; /* Copy returned key into driver */ - memset(pkey, 0, sizeof(struct WLAN_802_11_KEY)); + memset(pkey, 0, sizeof(struct enc_key)); if (key_len > sizeof(pkey->key)) break; - pkey->type = le16_to_cpu(pkeyparamset->keytypeid); - pkey->flags = le16_to_cpu(pkeyparamset->keyinfo); - pkey->len = le16_to_cpu(pkeyparamset->keylen); + pkey->type = key_type; + pkey->flags = key_flags; + pkey->len = key_len; memcpy(pkey->key, pkeyparamset->key, pkey->len); buf_ptr = end + 1; @@ -382,28 +379,9 @@ static int wlan_ret_802_11_rf_tx_power(wlan_private * priv, adapter->txpowerlevel = le16_to_cpu(rtp->currentlevel); - lbs_deb_cmd("Current TxPower Level = %d\n", adapter->txpowerlevel); - - lbs_deb_enter(LBS_DEB_CMD); - return 0; -} - -static int wlan_ret_802_11_rf_antenna(wlan_private * priv, - struct cmd_ds_command *resp) -{ - struct cmd_ds_802_11_rf_antenna *pAntenna = &resp->params.rant; - wlan_adapter *adapter = priv->adapter; - u16 action = le16_to_cpu(pAntenna->action); - - if (action == cmd_act_get_rx) - adapter->rxantennamode = le16_to_cpu(pAntenna->antennamode); - - if (action == cmd_act_get_tx) - adapter->txantennamode = le16_to_cpu(pAntenna->antennamode); - - lbs_deb_cmd("RF_ANT_RESP: action = 0x%x, mode = 0x%04x\n", - action, le16_to_cpu(pAntenna->antennamode)); + lbs_deb_cmd("TX power currently %d\n", adapter->txpowerlevel); + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -415,12 +393,12 @@ static int wlan_ret_802_11_rate_adapt_rateset(wlan_private * priv, lbs_deb_enter(LBS_DEB_CMD); - if (rates->action == cmd_act_get) { + if (rates->action == CMD_ACT_GET) { adapter->enablehwauto = le16_to_cpu(rates->enablehwauto); adapter->ratebitmap = le16_to_cpu(rates->bitmap); } - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -429,21 +407,19 @@ static int wlan_ret_802_11_data_rate(wlan_private * priv, { struct cmd_ds_802_11_data_rate *pdatarate = &resp->params.drate; wlan_adapter *adapter = priv->adapter; - u8 dot11datarate; lbs_deb_enter(LBS_DEB_CMD); - lbs_dbg_hex("DATA_RATE_RESP: data_rate- ", - (u8 *) pdatarate, sizeof(struct cmd_ds_802_11_data_rate)); + lbs_deb_hex(LBS_DEB_CMD, "DATA_RATE_RESP", (u8 *) pdatarate, + sizeof(struct cmd_ds_802_11_data_rate)); - dot11datarate = pdatarate->datarate[0]; - if (pdatarate->action == cpu_to_le16(cmd_act_get_tx_rate)) { - memcpy(adapter->libertas_supported_rates, pdatarate->datarate, - sizeof(adapter->libertas_supported_rates)); - } - adapter->datarate = libertas_index_to_data_rate(dot11datarate); + /* FIXME: get actual rates FW can do if this command actually returns + * all data rates supported. + */ + adapter->cur_rate = libertas_fw_index_to_data_rate(pdatarate->rates[0]); + lbs_deb_cmd("DATA_RATE: current rate 0x%02x\n", adapter->cur_rate); - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -457,9 +433,9 @@ static int wlan_ret_802_11_rf_channel(wlan_private * priv, lbs_deb_enter(LBS_DEB_CMD); - if (action == cmd_opt_802_11_rf_channel_get + if (action == CMD_OPT_802_11_RF_CHANNEL_GET && adapter->curbssparams.channel != newchannel) { - lbs_deb_cmd("channel Switch: %d to %d\n", + lbs_deb_cmd("channel switch from %d to %d\n", adapter->curbssparams.channel, newchannel); /* Update the channel again */ @@ -476,6 +452,8 @@ static int wlan_ret_802_11_rssi(wlan_private * priv, struct cmd_ds_802_11_rssi_rsp *rssirsp = &resp->params.rssirsp; wlan_adapter *adapter = priv->adapter; + lbs_deb_enter(LBS_DEB_CMD); + /* store the non average value */ adapter->SNR[TYPE_BEACON][TYPE_NOAVG] = le16_to_cpu(rssirsp->SNR); adapter->NF[TYPE_BEACON][TYPE_NOAVG] = le16_to_cpu(rssirsp->noisefloor); @@ -491,9 +469,11 @@ static int wlan_ret_802_11_rssi(wlan_private * priv, CAL_RSSI(adapter->SNR[TYPE_BEACON][TYPE_AVG] / AVG_SCALE, adapter->NF[TYPE_BEACON][TYPE_AVG] / AVG_SCALE); - lbs_deb_cmd("Beacon RSSI value = 0x%x\n", + lbs_deb_cmd("RSSI: beacon %d, avg %d\n", + adapter->RSSI[TYPE_BEACON][TYPE_NOAVG], adapter->RSSI[TYPE_BEACON][TYPE_AVG]); + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -504,11 +484,11 @@ static int wlan_ret_802_11_eeprom_access(wlan_private * priv, struct wlan_ioctl_regrdwr *pbuf; pbuf = (struct wlan_ioctl_regrdwr *) adapter->prdeeprom; - lbs_deb_cmd("eeprom read len=%x\n", + lbs_deb_enter_args(LBS_DEB_CMD, "len %d", le16_to_cpu(resp->params.rdeeprom.bytecount)); if (pbuf->NOB < le16_to_cpu(resp->params.rdeeprom.bytecount)) { pbuf->NOB = 0; - lbs_deb_cmd("eeprom read return length is too big\n"); + lbs_deb_cmd("EEPROM read length too big\n"); return -1; } pbuf->NOB = le16_to_cpu(resp->params.rdeeprom.bytecount); @@ -516,9 +496,10 @@ static int wlan_ret_802_11_eeprom_access(wlan_private * priv, memcpy(&pbuf->value, (u8 *) & resp->params.rdeeprom.value, le16_to_cpu(resp->params.rdeeprom.bytecount)); - lbs_dbg_hex("adapter", (char *)&pbuf->value, + lbs_deb_hex(LBS_DEB_CMD, "EEPROM", (char *)&pbuf->value, le16_to_cpu(resp->params.rdeeprom.bytecount)); } + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -533,7 +514,7 @@ static int wlan_ret_get_log(wlan_private * priv, /* Stored little-endian */ memcpy(&adapter->logmsg, logmessage, sizeof(struct cmd_ds_802_11_get_log)); - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -546,12 +527,12 @@ static int libertas_ret_802_11_enable_rsn(wlan_private * priv, lbs_deb_enter(LBS_DEB_CMD); - if (enable_rsn->action == cpu_to_le16(cmd_act_get)) { + if (enable_rsn->action == cpu_to_le16(CMD_ACT_GET)) { if (pdata_buf) *pdata_buf = (u32) le16_to_cpu(enable_rsn->enable); } - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -563,135 +544,134 @@ static inline int handle_cmd_response(u16 respcmd, unsigned long flags; wlan_adapter *adapter = priv->adapter; + lbs_deb_enter(LBS_DEB_HOST); + switch (respcmd) { - case cmd_ret_mac_reg_access: - case cmd_ret_bbp_reg_access: - case cmd_ret_rf_reg_access: + case CMD_RET(CMD_MAC_REG_ACCESS): + case CMD_RET(CMD_BBP_REG_ACCESS): + case CMD_RET(CMD_RF_REG_ACCESS): ret = wlan_ret_reg_access(priv, respcmd, resp); break; - case cmd_ret_hw_spec_info: + case CMD_RET(CMD_GET_HW_SPEC): ret = wlan_ret_get_hw_spec(priv, resp); break; - case cmd_ret_802_11_scan: + case CMD_RET(CMD_802_11_SCAN): ret = libertas_ret_80211_scan(priv, resp); break; - case cmd_ret_802_11_get_log: + case CMD_RET(CMD_802_11_GET_LOG): ret = wlan_ret_get_log(priv, resp); break; - case cmd_ret_802_11_associate: - case cmd_ret_802_11_reassociate: + case CMD_RET_802_11_ASSOCIATE: + case CMD_RET(CMD_802_11_ASSOCIATE): + case CMD_RET(CMD_802_11_REASSOCIATE): ret = libertas_ret_80211_associate(priv, resp); break; - case cmd_ret_802_11_disassociate: - case cmd_ret_802_11_deauthenticate: + case CMD_RET(CMD_802_11_DISASSOCIATE): + case CMD_RET(CMD_802_11_DEAUTHENTICATE): ret = libertas_ret_80211_disassociate(priv, resp); break; - case cmd_ret_802_11_ad_hoc_start: - case cmd_ret_802_11_ad_hoc_join: + case CMD_RET(CMD_802_11_AD_HOC_START): + case CMD_RET(CMD_802_11_AD_HOC_JOIN): ret = libertas_ret_80211_ad_hoc_start(priv, resp); break; - case cmd_ret_802_11_stat: + case CMD_RET(CMD_802_11_GET_STAT): ret = wlan_ret_802_11_stat(priv, resp); break; - case cmd_ret_802_11_snmp_mib: + case CMD_RET(CMD_802_11_SNMP_MIB): ret = wlan_ret_802_11_snmp_mib(priv, resp); break; - case cmd_ret_802_11_rf_tx_power: + case CMD_RET(CMD_802_11_RF_TX_POWER): ret = wlan_ret_802_11_rf_tx_power(priv, resp); break; - case cmd_ret_802_11_set_afc: - case cmd_ret_802_11_get_afc: + case CMD_RET(CMD_802_11_SET_AFC): + case CMD_RET(CMD_802_11_GET_AFC): spin_lock_irqsave(&adapter->driver_lock, flags); memmove(adapter->cur_cmd->pdata_buf, &resp->params.afc, sizeof(struct cmd_ds_802_11_afc)); spin_unlock_irqrestore(&adapter->driver_lock, flags); break; - case cmd_ret_802_11_rf_antenna: - ret = wlan_ret_802_11_rf_antenna(priv, resp); - break; - case cmd_ret_mac_multicast_adr: - case cmd_ret_mac_control: - case cmd_ret_802_11_set_wep: - case cmd_ret_802_11_reset: - case cmd_ret_802_11_authenticate: - case cmd_ret_802_11_radio_control: - case cmd_ret_802_11_beacon_stop: + case CMD_RET(CMD_MAC_MULTICAST_ADR): + case CMD_RET(CMD_MAC_CONTROL): + case CMD_RET(CMD_802_11_SET_WEP): + case CMD_RET(CMD_802_11_RESET): + case CMD_RET(CMD_802_11_AUTHENTICATE): + case CMD_RET(CMD_802_11_RADIO_CONTROL): + case CMD_RET(CMD_802_11_BEACON_STOP): break; - case cmd_ret_802_11_enable_rsn: + case CMD_RET(CMD_802_11_ENABLE_RSN): ret = libertas_ret_802_11_enable_rsn(priv, resp); break; - case cmd_ret_802_11_data_rate: + case CMD_RET(CMD_802_11_DATA_RATE): ret = wlan_ret_802_11_data_rate(priv, resp); break; - case cmd_ret_802_11_rate_adapt_rateset: + case CMD_RET(CMD_802_11_RATE_ADAPT_RATESET): ret = wlan_ret_802_11_rate_adapt_rateset(priv, resp); break; - case cmd_ret_802_11_rf_channel: + case CMD_RET(CMD_802_11_RF_CHANNEL): ret = wlan_ret_802_11_rf_channel(priv, resp); break; - case cmd_ret_802_11_rssi: + case CMD_RET(CMD_802_11_RSSI): ret = wlan_ret_802_11_rssi(priv, resp); break; - case cmd_ret_802_11_mac_address: + case CMD_RET(CMD_802_11_MAC_ADDRESS): ret = wlan_ret_802_11_mac_address(priv, resp); break; - case cmd_ret_802_11_ad_hoc_stop: + case CMD_RET(CMD_802_11_AD_HOC_STOP): ret = libertas_ret_80211_ad_hoc_stop(priv, resp); break; - case cmd_ret_802_11_key_material: - lbs_deb_cmd("CMD_RESP: KEY_MATERIAL command response\n"); + case CMD_RET(CMD_802_11_KEY_MATERIAL): ret = wlan_ret_802_11_key_material(priv, resp); break; - case cmd_ret_802_11_eeprom_access: + case CMD_RET(CMD_802_11_EEPROM_ACCESS): ret = wlan_ret_802_11_eeprom_access(priv, resp); break; - case cmd_ret_802_11d_domain_info: + case CMD_RET(CMD_802_11D_DOMAIN_INFO): ret = libertas_ret_802_11d_domain_info(priv, resp); break; - case cmd_ret_802_11_sleep_params: + case CMD_RET(CMD_802_11_SLEEP_PARAMS): ret = wlan_ret_802_11_sleep_params(priv, resp); break; - case cmd_ret_802_11_inactivity_timeout: + case CMD_RET(CMD_802_11_INACTIVITY_TIMEOUT): spin_lock_irqsave(&adapter->driver_lock, flags); *((u16 *) adapter->cur_cmd->pdata_buf) = le16_to_cpu(resp->params.inactivity_timeout.timeout); spin_unlock_irqrestore(&adapter->driver_lock, flags); break; - case cmd_ret_802_11_tpc_cfg: + case CMD_RET(CMD_802_11_TPC_CFG): spin_lock_irqsave(&adapter->driver_lock, flags); memmove(adapter->cur_cmd->pdata_buf, &resp->params.tpccfg, sizeof(struct cmd_ds_802_11_tpc_cfg)); spin_unlock_irqrestore(&adapter->driver_lock, flags); break; - case cmd_ret_802_11_led_gpio_ctrl: + case CMD_RET(CMD_802_11_LED_GPIO_CTRL): spin_lock_irqsave(&adapter->driver_lock, flags); memmove(adapter->cur_cmd->pdata_buf, &resp->params.ledgpio, sizeof(struct cmd_ds_802_11_led_ctrl)); spin_unlock_irqrestore(&adapter->driver_lock, flags); break; - case cmd_ret_802_11_pwr_cfg: + case CMD_RET(CMD_802_11_PWR_CFG): spin_lock_irqsave(&adapter->driver_lock, flags); memmove(adapter->cur_cmd->pdata_buf, &resp->params.pwrcfg, sizeof(struct cmd_ds_802_11_pwr_cfg)); @@ -699,39 +679,37 @@ static inline int handle_cmd_response(u16 respcmd, break; - case cmd_ret_get_tsf: + case CMD_RET(CMD_GET_TSF): spin_lock_irqsave(&adapter->driver_lock, flags); memcpy(priv->adapter->cur_cmd->pdata_buf, &resp->params.gettsf.tsfvalue, sizeof(u64)); spin_unlock_irqrestore(&adapter->driver_lock, flags); break; - case cmd_ret_bt_access: + case CMD_RET(CMD_BT_ACCESS): spin_lock_irqsave(&adapter->driver_lock, flags); if (adapter->cur_cmd->pdata_buf) memcpy(adapter->cur_cmd->pdata_buf, &resp->params.bt.addr1, 2 * ETH_ALEN); spin_unlock_irqrestore(&adapter->driver_lock, flags); break; - case cmd_ret_fwt_access: + case CMD_RET(CMD_FWT_ACCESS): spin_lock_irqsave(&adapter->driver_lock, flags); if (adapter->cur_cmd->pdata_buf) memcpy(adapter->cur_cmd->pdata_buf, &resp->params.fwt, sizeof(resp->params.fwt)); spin_unlock_irqrestore(&adapter->driver_lock, flags); break; - case cmd_ret_mesh_access: + case CMD_RET(CMD_MESH_ACCESS): if (adapter->cur_cmd->pdata_buf) memcpy(adapter->cur_cmd->pdata_buf, &resp->params.mesh, sizeof(resp->params.mesh)); break; - case cmd_rte_802_11_tx_rate_query: - priv->adapter->txrate = resp->params.txrate.txrate; - break; default: - lbs_deb_cmd("CMD_RESP: Unknown command response %#x\n", + lbs_deb_host("CMD_RESP: unknown cmd response 0x%04x\n", resp->command); break; } + lbs_deb_leave(LBS_DEB_HOST); return ret; } @@ -744,9 +722,7 @@ int libertas_process_rx_command(wlan_private * priv) ulong flags; u16 result; - lbs_deb_enter(LBS_DEB_CMD); - - lbs_deb_cmd("CMD_RESP: @ %lu\n", jiffies); + lbs_deb_enter(LBS_DEB_HOST); /* Now we got response from FW, cancel the command timer */ del_timer(&adapter->command_timer); @@ -755,25 +731,23 @@ int libertas_process_rx_command(wlan_private * priv) spin_lock_irqsave(&adapter->driver_lock, flags); if (!adapter->cur_cmd) { - lbs_deb_cmd("CMD_RESP: NULL cur_cmd=%p\n", adapter->cur_cmd); + lbs_deb_host("CMD_RESP: cur_cmd is NULL\n"); ret = -1; spin_unlock_irqrestore(&adapter->driver_lock, flags); goto done; } resp = (struct cmd_ds_command *)(adapter->cur_cmd->bufvirtualaddr); - lbs_dbg_hex("CMD_RESP:", adapter->cur_cmd->bufvirtualaddr, - priv->upld_len); - respcmd = le16_to_cpu(resp->command); - result = le16_to_cpu(resp->result); - lbs_deb_cmd("CMD_RESP: %x result: %d length: %d\n", respcmd, - result, priv->upld_len); + lbs_deb_host("CMD_RESP: response 0x%04x, size %d, jiffies %lu\n", + respcmd, priv->upld_len, jiffies); + lbs_deb_hex(LBS_DEB_HOST, "CMD_RESP", adapter->cur_cmd->bufvirtualaddr, + priv->upld_len); if (!(respcmd & 0x8000)) { - lbs_deb_cmd("Invalid response to command!"); + lbs_deb_host("invalid response!\n"); adapter->cur_cmd_retcode = -1; __libertas_cleanup_and_insert_cmd(priv, adapter->cur_cmd); adapter->nr_cmd_pending--; @@ -786,16 +760,16 @@ int libertas_process_rx_command(wlan_private * priv) /* Store the response code to cur_cmd_retcode. */ adapter->cur_cmd_retcode = result;; - if (respcmd == cmd_ret_802_11_ps_mode) { + if (respcmd == CMD_RET(CMD_802_11_PS_MODE)) { struct cmd_ds_802_11_ps_mode *psmode = &resp->params.psmode; u16 action = le16_to_cpu(psmode->action); - lbs_deb_cmd( - "CMD_RESP: PS_MODE cmd reply result=%#x action=0x%X\n", + lbs_deb_host( + "CMD_RESP: PS_MODE cmd reply result 0x%x, action 0x%x\n", result, action); if (result) { - lbs_deb_cmd("CMD_RESP: PS command failed- %#x \n", + lbs_deb_host("CMD_RESP: PS command failed with 0x%x\n", result); /* * We should not re-try enter-ps command in @@ -803,20 +777,20 @@ int libertas_process_rx_command(wlan_private * priv) * libertas_execute_next_command(). */ if (adapter->mode == IW_MODE_ADHOC && - action == cmd_subcmd_enter_ps) - adapter->psmode = wlan802_11powermodecam; - } else if (action == cmd_subcmd_enter_ps) { + action == CMD_SUBCMD_ENTER_PS) + adapter->psmode = WLAN802_11POWERMODECAM; + } else if (action == CMD_SUBCMD_ENTER_PS) { adapter->needtowakeup = 0; adapter->psstate = PS_STATE_AWAKE; - lbs_deb_cmd("CMD_RESP: Enter_PS command response\n"); - if (adapter->connect_status != libertas_connected) { + lbs_deb_host("CMD_RESP: ENTER_PS command response\n"); + if (adapter->connect_status != LIBERTAS_CONNECTED) { /* * When Deauth Event received before Enter_PS command * response, We need to wake up the firmware. */ - lbs_deb_cmd( - "Disconnected, Going to invoke libertas_ps_wakeup\n"); + lbs_deb_host( + "disconnected, invoking libertas_ps_wakeup\n"); spin_unlock_irqrestore(&adapter->driver_lock, flags); mutex_unlock(&adapter->lock); @@ -824,12 +798,12 @@ int libertas_process_rx_command(wlan_private * priv) mutex_lock(&adapter->lock); spin_lock_irqsave(&adapter->driver_lock, flags); } - } else if (action == cmd_subcmd_exit_ps) { + } else if (action == CMD_SUBCMD_EXIT_PS) { adapter->needtowakeup = 0; adapter->psstate = PS_STATE_FULL_POWER; - lbs_deb_cmd("CMD_RESP: Exit_PS command response\n"); + lbs_deb_host("CMD_RESP: EXIT_PS command response\n"); } else { - lbs_deb_cmd("CMD_RESP: PS- action=0x%X\n", action); + lbs_deb_host("CMD_RESP: PS action 0x%X\n", action); } __libertas_cleanup_and_insert_cmd(priv, adapter->cur_cmd); @@ -843,22 +817,22 @@ int libertas_process_rx_command(wlan_private * priv) if (adapter->cur_cmd->cmdflags & CMD_F_HOSTCMD) { /* Copy the response back to response buffer */ - memcpy(adapter->cur_cmd->pdata_buf, resp, resp->size); - + memcpy(adapter->cur_cmd->pdata_buf, resp, + le16_to_cpu(resp->size)); adapter->cur_cmd->cmdflags &= ~CMD_F_HOSTCMD; } /* If the command is not successful, cleanup and return failure */ if ((result != 0 || !(respcmd & 0x8000))) { - lbs_deb_cmd("CMD_RESP: command reply %#x result=%#x\n", - respcmd, result); + lbs_deb_host("CMD_RESP: error 0x%04x in command reply 0x%04x\n", + result, respcmd); /* * Handling errors here */ switch (respcmd) { - case cmd_ret_hw_spec_info: - case cmd_ret_802_11_reset: - lbs_deb_cmd("CMD_RESP: Reset command failed\n"); + case CMD_RET(CMD_GET_HW_SPEC): + case CMD_RET(CMD_802_11_RESET): + lbs_deb_host("CMD_RESP: reset failed\n"); break; } @@ -888,7 +862,7 @@ int libertas_process_rx_command(wlan_private * priv) done: mutex_unlock(&adapter->lock); - lbs_deb_enter_args(LBS_DEB_CMD, "ret %d", ret); + lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret); return ret; } @@ -898,13 +872,13 @@ int libertas_process_event(wlan_private * priv) wlan_adapter *adapter = priv->adapter; u32 eventcause; + lbs_deb_enter(LBS_DEB_CMD); + spin_lock_irq(&adapter->driver_lock); eventcause = adapter->eventcause; spin_unlock_irq(&adapter->driver_lock); - lbs_deb_enter(LBS_DEB_CMD); - - lbs_deb_cmd("EVENT Cause %x\n", eventcause); + lbs_deb_cmd("event cause 0x%x\n", eventcause); switch (eventcause >> SBI_EVENT_CAUSE_SHIFT) { case MACREG_INT_CODE_LINK_SENSED: @@ -912,28 +886,27 @@ int libertas_process_event(wlan_private * priv) break; case MACREG_INT_CODE_DEAUTHENTICATED: - lbs_deb_cmd("EVENT: Deauthenticated\n"); + lbs_deb_cmd("EVENT: deauthenticated\n"); libertas_mac_event_disconnected(priv); break; case MACREG_INT_CODE_DISASSOCIATED: - lbs_deb_cmd("EVENT: Disassociated\n"); + lbs_deb_cmd("EVENT: disassociated\n"); libertas_mac_event_disconnected(priv); break; case MACREG_INT_CODE_LINK_LOSE_NO_SCAN: - lbs_deb_cmd("EVENT: Link lost\n"); + lbs_deb_cmd("EVENT: link lost\n"); libertas_mac_event_disconnected(priv); break; case MACREG_INT_CODE_PS_SLEEP: - lbs_deb_cmd("EVENT: SLEEP\n"); - lbs_deb_cmd("_"); + lbs_deb_cmd("EVENT: sleep\n"); /* handle unexpected PS SLEEP event */ if (adapter->psstate == PS_STATE_FULL_POWER) { lbs_deb_cmd( - "EVENT: In FULL POWER mode - ignore PS SLEEP\n"); + "EVENT: in FULL POWER mode, ignoreing PS_SLEEP\n"); break; } adapter->psstate = PS_STATE_PRE_SLEEP; @@ -943,8 +916,7 @@ int libertas_process_event(wlan_private * priv) break; case MACREG_INT_CODE_PS_AWAKE: - lbs_deb_cmd("EVENT: AWAKE \n"); - lbs_deb_cmd("|"); + lbs_deb_cmd("EVENT: awake\n"); /* handle unexpected PS AWAKE event */ if (adapter->psstate == PS_STATE_FULL_POWER) { @@ -962,7 +934,7 @@ int libertas_process_event(wlan_private * priv) * adapter->needtowakeup will be set to FALSE * in libertas_ps_wakeup() */ - lbs_deb_cmd("Waking up...\n"); + lbs_deb_cmd("waking up ...\n"); libertas_ps_wakeup(priv, 0); } break; @@ -981,38 +953,43 @@ int libertas_process_event(wlan_private * priv) break; case MACREG_INT_CODE_ADHOC_BCN_LOST: - lbs_deb_cmd("EVENT: HWAC - ADHOC BCN LOST\n"); + lbs_deb_cmd("EVENT: ADHOC beacon lost\n"); break; case MACREG_INT_CODE_RSSI_LOW: - lbs_pr_alert( "EVENT: RSSI_LOW\n"); + lbs_pr_alert("EVENT: rssi low\n"); break; case MACREG_INT_CODE_SNR_LOW: - lbs_pr_alert( "EVENT: SNR_LOW\n"); + lbs_pr_alert("EVENT: snr low\n"); break; case MACREG_INT_CODE_MAX_FAIL: - lbs_pr_alert( "EVENT: MAX_FAIL\n"); + lbs_pr_alert("EVENT: max fail\n"); break; case MACREG_INT_CODE_RSSI_HIGH: - lbs_pr_alert( "EVENT: RSSI_HIGH\n"); + lbs_pr_alert("EVENT: rssi high\n"); break; case MACREG_INT_CODE_SNR_HIGH: - lbs_pr_alert( "EVENT: SNR_HIGH\n"); + lbs_pr_alert("EVENT: snr high\n"); break; case MACREG_INT_CODE_MESH_AUTO_STARTED: - lbs_pr_alert( "EVENT: MESH_AUTO_STARTED\n"); - adapter->connect_status = libertas_connected ; + /* Ignore spurious autostart events if autostart is disabled */ + if (!priv->mesh_autostart_enabled) { + lbs_pr_info("EVENT: MESH_AUTO_STARTED (ignoring)\n"); + break; + } + lbs_pr_info("EVENT: MESH_AUTO_STARTED\n"); + adapter->connect_status = LIBERTAS_CONNECTED; if (priv->mesh_open == 1) { - netif_wake_queue(priv->mesh_dev) ; - netif_carrier_on(priv->mesh_dev) ; + netif_wake_queue(priv->mesh_dev); + netif_carrier_on(priv->mesh_dev); } - adapter->mode = IW_MODE_ADHOC ; + adapter->mode = IW_MODE_ADHOC; schedule_work(&priv->sync_channel); break; default: - lbs_pr_alert( "EVENT: unknown event id: %#x\n", + lbs_pr_alert("EVENT: unknown event id 0x%04x\n", eventcause >> SBI_EVENT_CAUSE_SHIFT); break; } @@ -1021,6 +998,6 @@ int libertas_process_event(wlan_private * priv) adapter->eventcause = 0; spin_unlock_irq(&adapter->driver_lock); - lbs_deb_enter_args(LBS_DEB_CMD, "ret %d", ret); + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); return ret; } diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c index 715cbdaa1d4b..0bda0b511910 100644 --- a/drivers/net/wireless/libertas/debugfs.c +++ b/drivers/net/wireless/libertas/debugfs.c @@ -3,6 +3,7 @@ #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/mm.h> +#include <linux/string.h> #include <net/iw_handler.h> #include "dev.h" @@ -63,27 +64,27 @@ static ssize_t libertas_getscantable(struct file *file, char __user *userbuf, int numscansdone = 0, res; unsigned long addr = get_zeroed_page(GFP_KERNEL); char *buf = (char *)addr; + DECLARE_MAC_BUF(mac); struct bss_descriptor * iter_bss; pos += snprintf(buf+pos, len-pos, - "# | ch | ss | bssid | cap | TSF | Qual | SSID \n"); + "# | ch | rssi | bssid | cap | Qual | SSID \n"); mutex_lock(&priv->adapter->lock); list_for_each_entry (iter_bss, &priv->adapter->network_list, list) { - u16 cap; + u16 ibss = (iter_bss->capability & WLAN_CAPABILITY_IBSS); + u16 privacy = (iter_bss->capability & WLAN_CAPABILITY_PRIVACY); + u16 spectrum_mgmt = (iter_bss->capability & WLAN_CAPABILITY_SPECTRUM_MGMT); - memcpy(&cap, &iter_bss->cap, sizeof(cap)); pos += snprintf(buf+pos, len-pos, - "%02u| %03d | %03ld | " MAC_FMT " |", + "%02u| %03d | %04ld | %s |", numscansdone, iter_bss->channel, iter_bss->rssi, - MAC_ARG(iter_bss->bssid)); - pos += snprintf(buf+pos, len-pos, " %04x-", cap); + print_mac(mac, iter_bss->bssid)); + pos += snprintf(buf+pos, len-pos, " %04x-", iter_bss->capability); pos += snprintf(buf+pos, len-pos, "%c%c%c |", - iter_bss->cap.ibss ? 'A' : 'I', - iter_bss->cap.privacy ? 'P' : ' ', - iter_bss->cap.spectrummgmt ? 'S' : ' '); - pos += snprintf(buf+pos, len-pos, " %08llx |", iter_bss->networktsf); - pos += snprintf(buf+pos, len-pos, " %d |", SCAN_RSSI(iter_bss->rssi)); + ibss ? 'A' : 'I', privacy ? 'P' : ' ', + spectrum_mgmt ? 'S' : ' '); + pos += snprintf(buf+pos, len-pos, " %04d |", SCAN_RSSI(iter_bss->rssi)); pos += snprintf(buf+pos, len-pos, " %s\n", escape_essid(iter_bss->ssid, iter_bss->ssid_len)); @@ -125,9 +126,9 @@ static ssize_t libertas_sleepparams_write(struct file *file, priv->adapter->sp.sp_reserved = p6; res = libertas_prepare_and_send_command(priv, - cmd_802_11_sleep_params, - cmd_act_set, - cmd_option_waitforrsp, 0, NULL); + CMD_802_11_SLEEP_PARAMS, + CMD_ACT_SET, + CMD_OPTION_WAITFORRSP, 0, NULL); if (!res) res = count; @@ -150,9 +151,9 @@ static ssize_t libertas_sleepparams_read(struct file *file, char __user *userbuf char *buf = (char *)addr; res = libertas_prepare_and_send_command(priv, - cmd_802_11_sleep_params, - cmd_act_get, - cmd_option_waitforrsp, 0, NULL); + CMD_802_11_SLEEP_PARAMS, + CMD_ACT_GET, + CMD_OPTION_WAITFORRSP, 0, NULL); if (res) { res = -EFAULT; goto out_unlock; @@ -205,7 +206,7 @@ static int libertas_parse_chan(char *buf, size_t count, if (!start) return -EINVAL; start += 5; - end = strstr(start, " "); + end = strchr(start, ' '); if (!end) end = buf + count; hold = kzalloc((end - start)+1, GFP_KERNEL); @@ -256,7 +257,7 @@ static void libertas_parse_ssid(char *buf, size_t count, if (!hold) return; hold += 5; - end = strstr(hold, " "); + end = strchr(hold, ' '); if (!end) end = buf + count - 1; @@ -386,7 +387,7 @@ static int libertas_event_initcmd(wlan_private *priv, void **response_buf, struct cmd_ctrl_node **cmdnode, struct cmd_ds_command **cmd) { - u16 wait_option = cmd_option_waitforrsp; + u16 wait_option = CMD_OPTION_WAITFORRSP; if (!(*cmdnode = libertas_get_free_cmd_ctrl_node(priv))) { lbs_deb_debugfs("failed libertas_get_free_cmd_ctrl_node\n"); @@ -402,7 +403,7 @@ static int libertas_event_initcmd(wlan_private *priv, void **response_buf, (*cmdnode)->cmdflags |= CMD_F_HOSTCMD; (*cmdnode)->cmdwaitqwoken = 0; *cmd = (struct cmd_ds_command *)(*cmdnode)->bufvirtualaddr; - (*cmd)->command = cpu_to_le16(cmd_802_11_subscribe_event); + (*cmd)->command = cpu_to_le16(CMD_802_11_SUBSCRIBE_EVENT); (*cmd)->seqnum = cpu_to_le16(++priv->adapter->seqnum); (*cmd)->result = 0; return 0; @@ -429,10 +430,10 @@ static ssize_t libertas_lowrssi_read(struct file *file, char __user *userbuf, } event = &pcmdptr->params.subscribe_event; - event->action = cpu_to_le16(cmd_act_get); + event->action = cpu_to_le16(CMD_ACT_GET); pcmdptr->size = cpu_to_le16(sizeof(*event) + S_DS_GEN); libertas_queue_cmd(adapter, pcmdnode, 1); - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); /* Sleep until response is generated by FW */ wait_event_interruptible(pcmdnode->cmdwait_q, @@ -447,7 +448,7 @@ static ssize_t libertas_lowrssi_read(struct file *file, char __user *userbuf, return 0; } - if (pcmdptr->command != cpu_to_le16(cmd_ret_802_11_subscribe_event)) { + if (pcmdptr->command != cpu_to_le16(CMD_RET(CMD_802_11_SUBSCRIBE_EVENT))) { lbs_pr_err("command response incorrect!\n"); kfree(response_buf); free_page(addr); @@ -493,10 +494,10 @@ static u16 libertas_get_events_bitmap(wlan_private *priv) return res; event = &pcmdptr->params.subscribe_event; - event->action = cpu_to_le16(cmd_act_get); + event->action = cpu_to_le16(CMD_ACT_GET); pcmdptr->size = cpu_to_le16(sizeof(*event) + S_DS_GEN); libertas_queue_cmd(adapter, pcmdnode, 1); - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); /* Sleep until response is generated by FW */ wait_event_interruptible(pcmdnode->cmdwait_q, @@ -511,7 +512,7 @@ static u16 libertas_get_events_bitmap(wlan_private *priv) return 0; } - if (pcmdptr->command != cmd_ret_802_11_subscribe_event) { + if (le16_to_cpu(pcmdptr->command) != CMD_RET(CMD_802_11_SUBSCRIBE_EVENT)) { lbs_pr_err("command response incorrect!\n"); kfree(response_buf); return 0; @@ -559,7 +560,7 @@ static ssize_t libertas_lowrssi_write(struct file *file, goto out_unlock; event = &pcmdptr->params.subscribe_event; - event->action = cpu_to_le16(cmd_act_set); + event->action = cpu_to_le16(CMD_ACT_SET); pcmdptr->size = cpu_to_le16(S_DS_GEN + sizeof(struct cmd_ds_802_11_subscribe_event) + sizeof(struct mrvlietypes_rssithreshold)); @@ -575,7 +576,7 @@ static ssize_t libertas_lowrssi_write(struct file *file, event->events = cpu_to_le16(event_bitmap); libertas_queue_cmd(adapter, pcmdnode, 1); - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); /* Sleep until response is generated by FW */ wait_event_interruptible(pcmdnode->cmdwait_q, @@ -591,7 +592,7 @@ static ssize_t libertas_lowrssi_write(struct file *file, return 0; } - if (pcmdptr->command != cpu_to_le16(cmd_ret_802_11_subscribe_event)) { + if (pcmdptr->command != cpu_to_le16(CMD_RET(CMD_802_11_SUBSCRIBE_EVENT))) { lbs_pr_err("command response incorrect!\n"); kfree(response_buf); free_page(addr); @@ -625,10 +626,10 @@ static ssize_t libertas_lowsnr_read(struct file *file, char __user *userbuf, } event = &pcmdptr->params.subscribe_event; - event->action = cpu_to_le16(cmd_act_get); + event->action = cpu_to_le16(CMD_ACT_GET); pcmdptr->size = cpu_to_le16(sizeof(*event) + S_DS_GEN); libertas_queue_cmd(adapter, pcmdnode, 1); - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); /* Sleep until response is generated by FW */ wait_event_interruptible(pcmdnode->cmdwait_q, @@ -644,7 +645,7 @@ static ssize_t libertas_lowsnr_read(struct file *file, char __user *userbuf, return 0; } - if (pcmdptr->command != cpu_to_le16(cmd_ret_802_11_subscribe_event)) { + if (pcmdptr->command != cpu_to_le16(CMD_RET(CMD_802_11_SUBSCRIBE_EVENT))) { lbs_pr_err("command response incorrect!\n"); kfree(response_buf); free_page(addr); @@ -712,7 +713,7 @@ static ssize_t libertas_lowsnr_write(struct file *file, goto out_unlock; event = &pcmdptr->params.subscribe_event; - event->action = cpu_to_le16(cmd_act_set); + event->action = cpu_to_le16(CMD_ACT_SET); pcmdptr->size = cpu_to_le16(S_DS_GEN + sizeof(struct cmd_ds_802_11_subscribe_event) + sizeof(struct mrvlietypes_snrthreshold)); @@ -727,7 +728,7 @@ static ssize_t libertas_lowsnr_write(struct file *file, event->events = cpu_to_le16(event_bitmap); libertas_queue_cmd(adapter, pcmdnode, 1); - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); /* Sleep until response is generated by FW */ wait_event_interruptible(pcmdnode->cmdwait_q, @@ -743,7 +744,7 @@ static ssize_t libertas_lowsnr_write(struct file *file, return 0; } - if (pcmdptr->command != cpu_to_le16(cmd_ret_802_11_subscribe_event)) { + if (pcmdptr->command != cpu_to_le16(CMD_RET(CMD_802_11_SUBSCRIBE_EVENT))) { lbs_pr_err("command response incorrect!\n"); kfree(response_buf); free_page(addr); @@ -778,10 +779,10 @@ static ssize_t libertas_failcount_read(struct file *file, char __user *userbuf, } event = &pcmdptr->params.subscribe_event; - event->action = cpu_to_le16(cmd_act_get); + event->action = cpu_to_le16(CMD_ACT_GET); pcmdptr->size = cpu_to_le16(sizeof(*event) + S_DS_GEN); libertas_queue_cmd(adapter, pcmdnode, 1); - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); /* Sleep until response is generated by FW */ wait_event_interruptible(pcmdnode->cmdwait_q, @@ -797,7 +798,7 @@ static ssize_t libertas_failcount_read(struct file *file, char __user *userbuf, return 0; } - if (pcmdptr->command != cpu_to_le16(cmd_ret_802_11_subscribe_event)) { + if (pcmdptr->command != cpu_to_le16(CMD_RET(CMD_802_11_SUBSCRIBE_EVENT))) { lbs_pr_err("command response incorrect!\n"); kfree(response_buf); free_page(addr); @@ -864,7 +865,7 @@ static ssize_t libertas_failcount_write(struct file *file, goto out_unlock; event = &pcmdptr->params.subscribe_event; - event->action = cpu_to_le16(cmd_act_set); + event->action = cpu_to_le16(CMD_ACT_SET); pcmdptr->size = cpu_to_le16(S_DS_GEN + sizeof(struct cmd_ds_802_11_subscribe_event) + sizeof(struct mrvlietypes_failurecount)); @@ -879,7 +880,7 @@ static ssize_t libertas_failcount_write(struct file *file, event->events = cpu_to_le16(event_bitmap); libertas_queue_cmd(adapter, pcmdnode, 1); - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); /* Sleep until response is generated by FW */ wait_event_interruptible(pcmdnode->cmdwait_q, @@ -895,7 +896,7 @@ static ssize_t libertas_failcount_write(struct file *file, return 0; } - if (pcmdptr->command != cpu_to_le16(cmd_ret_802_11_subscribe_event)) { + if (pcmdptr->command != cpu_to_le16(CMD_RET(CMD_802_11_SUBSCRIBE_EVENT))) { lbs_pr_err("command response incorrect!\n"); kfree(response_buf); free_page(addr); @@ -929,10 +930,10 @@ static ssize_t libertas_bcnmiss_read(struct file *file, char __user *userbuf, } event = &pcmdptr->params.subscribe_event; - event->action = cpu_to_le16(cmd_act_get); + event->action = cpu_to_le16(CMD_ACT_GET); pcmdptr->size = cpu_to_le16(sizeof(*event) + S_DS_GEN); libertas_queue_cmd(adapter, pcmdnode, 1); - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); /* Sleep until response is generated by FW */ wait_event_interruptible(pcmdnode->cmdwait_q, @@ -948,7 +949,7 @@ static ssize_t libertas_bcnmiss_read(struct file *file, char __user *userbuf, return 0; } - if (pcmdptr->command != cpu_to_le16(cmd_ret_802_11_subscribe_event)) { + if (pcmdptr->command != cpu_to_le16(CMD_RET(CMD_802_11_SUBSCRIBE_EVENT))) { lbs_pr_err("command response incorrect!\n"); free_page(addr); kfree(response_buf); @@ -1015,7 +1016,7 @@ static ssize_t libertas_bcnmiss_write(struct file *file, goto out_unlock; event = &pcmdptr->params.subscribe_event; - event->action = cpu_to_le16(cmd_act_set); + event->action = cpu_to_le16(CMD_ACT_SET); pcmdptr->size = cpu_to_le16(S_DS_GEN + sizeof(struct cmd_ds_802_11_subscribe_event) + sizeof(struct mrvlietypes_beaconsmissed)); @@ -1029,7 +1030,7 @@ static ssize_t libertas_bcnmiss_write(struct file *file, event->events = cpu_to_le16(event_bitmap); libertas_queue_cmd(adapter, pcmdnode, 1); - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); /* Sleep until response is generated by FW */ wait_event_interruptible(pcmdnode->cmdwait_q, @@ -1045,7 +1046,7 @@ static ssize_t libertas_bcnmiss_write(struct file *file, return 0; } - if (pcmdptr->command != cpu_to_le16(cmd_ret_802_11_subscribe_event)) { + if (pcmdptr->command != cpu_to_le16(CMD_RET(CMD_802_11_SUBSCRIBE_EVENT))) { lbs_pr_err("command response incorrect!\n"); free_page(addr); kfree(response_buf); @@ -1079,10 +1080,10 @@ static ssize_t libertas_highrssi_read(struct file *file, char __user *userbuf, } event = &pcmdptr->params.subscribe_event; - event->action = cpu_to_le16(cmd_act_get); + event->action = cpu_to_le16(CMD_ACT_GET); pcmdptr->size = cpu_to_le16(sizeof(*event) + S_DS_GEN); libertas_queue_cmd(adapter, pcmdnode, 1); - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); /* Sleep until response is generated by FW */ wait_event_interruptible(pcmdnode->cmdwait_q, @@ -1098,7 +1099,7 @@ static ssize_t libertas_highrssi_read(struct file *file, char __user *userbuf, return 0; } - if (pcmdptr->command != cpu_to_le16(cmd_ret_802_11_subscribe_event)) { + if (pcmdptr->command != cpu_to_le16(CMD_RET(CMD_802_11_SUBSCRIBE_EVENT))) { lbs_pr_err("command response incorrect!\n"); kfree(response_buf); free_page(addr); @@ -1166,7 +1167,7 @@ static ssize_t libertas_highrssi_write(struct file *file, goto out_unlock; event = &pcmdptr->params.subscribe_event; - event->action = cpu_to_le16(cmd_act_set); + event->action = cpu_to_le16(CMD_ACT_SET); pcmdptr->size = cpu_to_le16(S_DS_GEN + sizeof(struct cmd_ds_802_11_subscribe_event) + sizeof(struct mrvlietypes_rssithreshold)); @@ -1181,7 +1182,7 @@ static ssize_t libertas_highrssi_write(struct file *file, event->events = cpu_to_le16(event_bitmap); libertas_queue_cmd(adapter, pcmdnode, 1); - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); /* Sleep until response is generated by FW */ wait_event_interruptible(pcmdnode->cmdwait_q, @@ -1196,7 +1197,7 @@ static ssize_t libertas_highrssi_write(struct file *file, return 0; } - if (pcmdptr->command != cpu_to_le16(cmd_ret_802_11_subscribe_event)) { + if (pcmdptr->command != cpu_to_le16(CMD_RET(CMD_802_11_SUBSCRIBE_EVENT))) { lbs_pr_err("command response incorrect!\n"); kfree(response_buf); return 0; @@ -1229,10 +1230,10 @@ static ssize_t libertas_highsnr_read(struct file *file, char __user *userbuf, } event = &pcmdptr->params.subscribe_event; - event->action = cpu_to_le16(cmd_act_get); + event->action = cpu_to_le16(CMD_ACT_GET); pcmdptr->size = cpu_to_le16(sizeof(*event) + S_DS_GEN); libertas_queue_cmd(adapter, pcmdnode, 1); - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); /* Sleep until response is generated by FW */ wait_event_interruptible(pcmdnode->cmdwait_q, @@ -1248,7 +1249,7 @@ static ssize_t libertas_highsnr_read(struct file *file, char __user *userbuf, return 0; } - if (pcmdptr->command != cpu_to_le16(cmd_ret_802_11_subscribe_event)) { + if (pcmdptr->command != cpu_to_le16(CMD_RET(CMD_802_11_SUBSCRIBE_EVENT))) { lbs_pr_err("command response incorrect!\n"); kfree(response_buf); free_page(addr); @@ -1316,7 +1317,7 @@ static ssize_t libertas_highsnr_write(struct file *file, goto out_unlock; event = &pcmdptr->params.subscribe_event; - event->action = cpu_to_le16(cmd_act_set); + event->action = cpu_to_le16(CMD_ACT_SET); pcmdptr->size = cpu_to_le16(S_DS_GEN + sizeof(struct cmd_ds_802_11_subscribe_event) + sizeof(struct mrvlietypes_snrthreshold)); @@ -1331,7 +1332,7 @@ static ssize_t libertas_highsnr_write(struct file *file, event->events = cpu_to_le16(event_bitmap); libertas_queue_cmd(adapter, pcmdnode, 1); - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); /* Sleep until response is generated by FW */ wait_event_interruptible(pcmdnode->cmdwait_q, @@ -1347,7 +1348,7 @@ static ssize_t libertas_highsnr_write(struct file *file, return 0; } - if (pcmdptr->command != cpu_to_le16(cmd_ret_802_11_subscribe_event)) { + if (pcmdptr->command != cpu_to_le16(CMD_RET(CMD_802_11_SUBSCRIBE_EVENT))) { lbs_pr_err("command response incorrect!\n"); kfree(response_buf); free_page(addr); @@ -1375,8 +1376,8 @@ static ssize_t libertas_rdmac_read(struct file *file, char __user *userbuf, offval.value = 0; ret = libertas_prepare_and_send_command(priv, - cmd_mac_reg_access, 0, - cmd_option_waitforrsp, 0, &offval); + CMD_MAC_REG_ACCESS, 0, + CMD_OPTION_WAITFORRSP, 0, &offval); mdelay(10); pos += snprintf(buf+pos, len-pos, "MAC[0x%x] = 0x%08x\n", priv->mac_offset, adapter->offsetvalue.value); @@ -1433,8 +1434,8 @@ static ssize_t libertas_wrmac_write(struct file *file, offval.offset = offset; offval.value = value; res = libertas_prepare_and_send_command(priv, - cmd_mac_reg_access, 1, - cmd_option_waitforrsp, 0, &offval); + CMD_MAC_REG_ACCESS, 1, + CMD_OPTION_WAITFORRSP, 0, &offval); mdelay(10); res = count; @@ -1458,8 +1459,8 @@ static ssize_t libertas_rdbbp_read(struct file *file, char __user *userbuf, offval.value = 0; ret = libertas_prepare_and_send_command(priv, - cmd_bbp_reg_access, 0, - cmd_option_waitforrsp, 0, &offval); + CMD_BBP_REG_ACCESS, 0, + CMD_OPTION_WAITFORRSP, 0, &offval); mdelay(10); pos += snprintf(buf+pos, len-pos, "BBP[0x%x] = 0x%08x\n", priv->bbp_offset, adapter->offsetvalue.value); @@ -1517,8 +1518,8 @@ static ssize_t libertas_wrbbp_write(struct file *file, offval.offset = offset; offval.value = value; res = libertas_prepare_and_send_command(priv, - cmd_bbp_reg_access, 1, - cmd_option_waitforrsp, 0, &offval); + CMD_BBP_REG_ACCESS, 1, + CMD_OPTION_WAITFORRSP, 0, &offval); mdelay(10); res = count; @@ -1542,8 +1543,8 @@ static ssize_t libertas_rdrf_read(struct file *file, char __user *userbuf, offval.value = 0; ret = libertas_prepare_and_send_command(priv, - cmd_rf_reg_access, 0, - cmd_option_waitforrsp, 0, &offval); + CMD_RF_REG_ACCESS, 0, + CMD_OPTION_WAITFORRSP, 0, &offval); mdelay(10); pos += snprintf(buf+pos, len-pos, "RF[0x%x] = 0x%08x\n", priv->rf_offset, adapter->offsetvalue.value); @@ -1601,8 +1602,8 @@ static ssize_t libertas_wrrf_write(struct file *file, offval.offset = offset; offval.value = value; res = libertas_prepare_and_send_command(priv, - cmd_rf_reg_access, 1, - cmd_option_waitforrsp, 0, &offval); + CMD_RF_REG_ACCESS, 1, + CMD_OPTION_WAITFORRSP, 0, &offval); mdelay(10); res = count; @@ -1839,7 +1840,7 @@ static ssize_t wlan_debugfs_write(struct file *f, const char __user *buf, char *p2; struct debug_data *d = (struct debug_data *)f->private_data; - pdata = (char *)kmalloc(cnt, GFP_KERNEL); + pdata = kmalloc(cnt, GFP_KERNEL); if (pdata == NULL) return 0; diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h index 40f56bb1eac8..87fea9d5b90a 100644 --- a/drivers/net/wireless/libertas/decl.h +++ b/drivers/net/wireless/libertas/decl.h @@ -15,14 +15,9 @@ struct wlan_private; struct sk_buff; struct net_device; -extern char *libertas_fw_name; - -void libertas_free_adapter(wlan_private * priv); int libertas_set_mac_packet_filter(wlan_private * priv); -int libertas_send_null_packet(wlan_private * priv, u8 pwr_mgmt); void libertas_send_tx_feedback(wlan_private * priv); -u8 libertas_check_last_packet_indication(wlan_private * priv); int libertas_free_cmd_buffer(wlan_private * priv); struct cmd_ctrl_node; @@ -44,8 +39,8 @@ int libertas_execute_next_command(wlan_private * priv); int libertas_process_event(wlan_private * priv); void libertas_interrupt(struct net_device *); int libertas_set_radio_control(wlan_private * priv); -u32 libertas_index_to_data_rate(u8 index); -u8 libertas_data_rate_to_index(u32 rate); +u32 libertas_fw_index_to_data_rate(u8 index); +u8 libertas_data_rate_to_fw_index(u32 rate); void libertas_get_fwversion(wlan_adapter * adapter, char *fwversion, int maxlen); void libertas_upload_rx_packet(wlan_private * priv, struct sk_buff *skb); @@ -53,8 +48,6 @@ void libertas_upload_rx_packet(wlan_private * priv, struct sk_buff *skb); /** The proc fs interface */ int libertas_process_rx_command(wlan_private * priv); int libertas_process_tx(wlan_private * priv, struct sk_buff *skb); -void libertas_cleanup_and_insert_cmd(wlan_private * priv, - struct cmd_ctrl_node *ptempcmd); void __libertas_cleanup_and_insert_cmd(wlan_private * priv, struct cmd_ctrl_node *ptempcmd); @@ -75,17 +68,15 @@ void libertas_mac_event_disconnected(wlan_private * priv); void libertas_send_iwevcustom_event(wlan_private * priv, s8 * str); -/* fw.c */ -int libertas_init_fw(wlan_private * priv, char *fw_name); - /* main.c */ struct chan_freq_power *libertas_get_region_cfp_table(u8 region, u8 band, int *cfp_no); wlan_private *libertas_add_card(void *card, struct device *dmdev); -int libertas_activate_card(wlan_private *priv, char *fw_name); int libertas_remove_card(wlan_private *priv); +int libertas_start_card(wlan_private *priv); +int libertas_stop_card(wlan_private *priv); int libertas_add_mesh(wlan_private *priv, struct device *dev); void libertas_remove_mesh(wlan_private *priv); - +int libertas_reset_device(wlan_private *priv); #endif /* _WLAN_DECL_H_ */ diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h index 4dd43e59bda0..7c5b7f7b45db 100644 --- a/drivers/net/wireless/libertas/defs.h +++ b/drivers/net/wireless/libertas/defs.h @@ -43,43 +43,43 @@ extern unsigned int libertas_debug; #ifdef DEBUG -#define LBS_DEB_LL(grp, fmt, args...) \ +#define LBS_DEB_LL(grp, grpnam, fmt, args...) \ do { if ((libertas_debug & (grp)) == (grp)) \ - printk(KERN_DEBUG DRV_NAME "%s: " fmt, \ + printk(KERN_DEBUG DRV_NAME grpnam "%s: " fmt, \ in_interrupt() ? " (INT)" : "", ## args); } while (0) #else -#define LBS_DEB_LL(grp, fmt, args...) do {} while (0) +#define LBS_DEB_LL(grp, grpnam, fmt, args...) do {} while (0) #endif #define lbs_deb_enter(grp) \ - LBS_DEB_LL(grp | LBS_DEB_ENTER, "%s():%d enter\n", __FUNCTION__, __LINE__); + LBS_DEB_LL(grp | LBS_DEB_ENTER, " enter", "%s():%d\n", __FUNCTION__, __LINE__); #define lbs_deb_enter_args(grp, fmt, args...) \ - LBS_DEB_LL(grp | LBS_DEB_ENTER, "%s(" fmt "):%d\n", __FUNCTION__, ## args, __LINE__); + LBS_DEB_LL(grp | LBS_DEB_ENTER, " enter", "%s(" fmt "):%d\n", __FUNCTION__, ## args, __LINE__); #define lbs_deb_leave(grp) \ - LBS_DEB_LL(grp | LBS_DEB_LEAVE, "%s():%d leave\n", __FUNCTION__, __LINE__); + LBS_DEB_LL(grp | LBS_DEB_LEAVE, " leave", "%s():%d\n", __FUNCTION__, __LINE__); #define lbs_deb_leave_args(grp, fmt, args...) \ - LBS_DEB_LL(grp | LBS_DEB_LEAVE, "%s():%d leave, " fmt "\n", \ + LBS_DEB_LL(grp | LBS_DEB_LEAVE, " leave", "%s():%d, " fmt "\n", \ __FUNCTION__, __LINE__, ##args); -#define lbs_deb_main(fmt, args...) LBS_DEB_LL(LBS_DEB_MAIN, fmt, ##args) -#define lbs_deb_net(fmt, args...) LBS_DEB_LL(LBS_DEB_NET, fmt, ##args) -#define lbs_deb_mesh(fmt, args...) LBS_DEB_LL(LBS_DEB_MESH, fmt, ##args) -#define lbs_deb_wext(fmt, args...) LBS_DEB_LL(LBS_DEB_WEXT, fmt, ##args) -#define lbs_deb_ioctl(fmt, args...) LBS_DEB_LL(LBS_DEB_IOCTL, fmt, ##args) -#define lbs_deb_scan(fmt, args...) LBS_DEB_LL(LBS_DEB_SCAN, fmt, ##args) -#define lbs_deb_assoc(fmt, args...) LBS_DEB_LL(LBS_DEB_ASSOC, fmt, ##args) -#define lbs_deb_join(fmt, args...) LBS_DEB_LL(LBS_DEB_JOIN, fmt, ##args) -#define lbs_deb_11d(fmt, args...) LBS_DEB_LL(LBS_DEB_11D, fmt, ##args) -#define lbs_deb_debugfs(fmt, args...) LBS_DEB_LL(LBS_DEB_DEBUGFS, fmt, ##args) -#define lbs_deb_ethtool(fmt, args...) LBS_DEB_LL(LBS_DEB_ETHTOOL, fmt, ##args) -#define lbs_deb_host(fmt, args...) LBS_DEB_LL(LBS_DEB_HOST, fmt, ##args) -#define lbs_deb_cmd(fmt, args...) LBS_DEB_LL(LBS_DEB_CMD, fmt, ##args) -#define lbs_deb_rx(fmt, args...) LBS_DEB_LL(LBS_DEB_RX, fmt, ##args) -#define lbs_deb_tx(fmt, args...) LBS_DEB_LL(LBS_DEB_TX, fmt, ##args) -#define lbs_deb_fw(fmt, args...) LBS_DEB_LL(LBS_DEB_FW, fmt, ##args) -#define lbs_deb_usb(fmt, args...) LBS_DEB_LL(LBS_DEB_USB, fmt, ##args) -#define lbs_deb_usbd(dev, fmt, args...) LBS_DEB_LL(LBS_DEB_USB, "%s:" fmt, (dev)->bus_id, ##args) -#define lbs_deb_cs(fmt, args...) LBS_DEB_LL(LBS_DEB_CS, fmt, ##args) -#define lbs_deb_thread(fmt, args...) LBS_DEB_LL(LBS_DEB_THREAD, fmt, ##args) +#define lbs_deb_main(fmt, args...) LBS_DEB_LL(LBS_DEB_MAIN, " main", fmt, ##args) +#define lbs_deb_net(fmt, args...) LBS_DEB_LL(LBS_DEB_NET, " net", fmt, ##args) +#define lbs_deb_mesh(fmt, args...) LBS_DEB_LL(LBS_DEB_MESH, " mesh", fmt, ##args) +#define lbs_deb_wext(fmt, args...) LBS_DEB_LL(LBS_DEB_WEXT, " wext", fmt, ##args) +#define lbs_deb_ioctl(fmt, args...) LBS_DEB_LL(LBS_DEB_IOCTL, " ioctl", fmt, ##args) +#define lbs_deb_scan(fmt, args...) LBS_DEB_LL(LBS_DEB_SCAN, " scan", fmt, ##args) +#define lbs_deb_assoc(fmt, args...) LBS_DEB_LL(LBS_DEB_ASSOC, " assoc", fmt, ##args) +#define lbs_deb_join(fmt, args...) LBS_DEB_LL(LBS_DEB_JOIN, " join", fmt, ##args) +#define lbs_deb_11d(fmt, args...) LBS_DEB_LL(LBS_DEB_11D, " 11d", fmt, ##args) +#define lbs_deb_debugfs(fmt, args...) LBS_DEB_LL(LBS_DEB_DEBUGFS, " debugfs", fmt, ##args) +#define lbs_deb_ethtool(fmt, args...) LBS_DEB_LL(LBS_DEB_ETHTOOL, " ethtool", fmt, ##args) +#define lbs_deb_host(fmt, args...) LBS_DEB_LL(LBS_DEB_HOST, " host", fmt, ##args) +#define lbs_deb_cmd(fmt, args...) LBS_DEB_LL(LBS_DEB_CMD, " cmd", fmt, ##args) +#define lbs_deb_rx(fmt, args...) LBS_DEB_LL(LBS_DEB_RX, " rx", fmt, ##args) +#define lbs_deb_tx(fmt, args...) LBS_DEB_LL(LBS_DEB_TX, " tx", fmt, ##args) +#define lbs_deb_fw(fmt, args...) LBS_DEB_LL(LBS_DEB_FW, " fw", fmt, ##args) +#define lbs_deb_usb(fmt, args...) LBS_DEB_LL(LBS_DEB_USB, " usb", fmt, ##args) +#define lbs_deb_usbd(dev, fmt, args...) LBS_DEB_LL(LBS_DEB_USB, " usbd", "%s:" fmt, (dev)->bus_id, ##args) +#define lbs_deb_cs(fmt, args...) LBS_DEB_LL(LBS_DEB_CS, " cs", fmt, ##args) +#define lbs_deb_thread(fmt, args...) LBS_DEB_LL(LBS_DEB_THREAD, " thread", fmt, ##args) #define lbs_pr_info(format, args...) \ printk(KERN_INFO DRV_NAME": " format, ## args) @@ -89,22 +89,28 @@ do { if ((libertas_debug & (grp)) == (grp)) \ printk(KERN_ALERT DRV_NAME": " format, ## args) #ifdef DEBUG -static inline void lbs_dbg_hex(char *prompt, u8 * buf, int len) +static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, int len) { int i = 0; - if (!(libertas_debug & LBS_DEB_HEX)) - return; - - printk(KERN_DEBUG "%s: ", prompt); - for (i = 1; i <= len; i++) { - printk("%02x ", (u8) * buf); - buf++; + if (len && + (libertas_debug & LBS_DEB_HEX) && + (libertas_debug & grp)) + { + for (i = 1; i <= len; i++) { + if ((i & 0xf) == 1) { + if (i != 1) + printk("\n"); + printk(DRV_NAME " %s: ", prompt); + } + printk("%02x ", (u8) * buf); + buf++; + } + printk("\n"); } - printk("\n"); } #else -#define lbs_dbg_hex(x,y,z) do {} while (0) +#define lbs_deb_hex(grp,prompt,buf,len) do {} while (0) #endif @@ -149,17 +155,18 @@ static inline void lbs_dbg_hex(char *prompt, u8 * buf, int len) #define MRVDRV_CHANNELS_PER_SCAN 4 #define MRVDRV_MAX_CHANNELS_PER_SCAN 14 -#define MRVDRV_DEBUG_RX_PATH 0x00000001 -#define MRVDRV_DEBUG_TX_PATH 0x00000002 - #define MRVDRV_MIN_BEACON_INTERVAL 20 #define MRVDRV_MAX_BEACON_INTERVAL 1000 #define MRVDRV_BEACON_INTERVAL 100 +#define MARVELL_MESH_IE_LENGTH 9 + /** INT status Bit Definition*/ -#define his_cmddnldrdy 0x01 -#define his_cardevent 0x02 -#define his_cmdupldrdy 0x04 +#define MRVDRV_TX_DNLD_RDY 0x0001 +#define MRVDRV_RX_UPLD_RDY 0x0002 +#define MRVDRV_CMD_DNLD_RDY 0x0004 +#define MRVDRV_CMD_UPLD_RDY 0x0008 +#define MRVDRV_CARDEVENT 0x0010 #define SBI_EVENT_CAUSE_SHIFT 3 @@ -218,9 +225,6 @@ static inline void lbs_dbg_hex(char *prompt, u8 * buf, int len) #define CMD_F_HOSTCMD (1 << 0) #define FW_CAPINFO_WPA (1 << 0) -/** WPA key LENGTH*/ -#define MRVL_MAX_KEY_WPA_KEY_LENGTH 32 - #define KEY_LEN_WPA_AES 16 #define KEY_LEN_WPA_TKIP 32 #define KEY_LEN_WEP_104 13 @@ -247,10 +251,7 @@ static inline void lbs_dbg_hex(char *prompt, u8 * buf, int len) ((((int)(AVG) * (N -1)) + ((u16)(SNRNF) * \ AVG_SCALE)) / N)) -#define B_SUPPORTED_RATES 8 -#define G_SUPPORTED_RATES 14 - -#define WLAN_SUPPORTED_RATES 14 +#define MAX_RATES 14 #define MAX_LEDS 8 @@ -264,11 +265,7 @@ typedef struct _wlan_adapter wlan_adapter; extern const char libertas_driver_version[]; extern u16 libertas_region_code_to_index[MRVDRV_MAX_REGION_CODE]; -extern u8 libertas_supported_rates[G_SUPPORTED_RATES]; - -extern u8 libertas_adhoc_rates_g[G_SUPPORTED_RATES]; - -extern u8 libertas_adhoc_rates_b[4]; +extern u8 libertas_bg_rates[MAX_RATES]; /** ENUM definition*/ /** SNRNF_TYPE */ @@ -287,11 +284,11 @@ enum SNRNF_DATA { /** WLAN_802_11_POWER_MODE */ enum WLAN_802_11_POWER_MODE { - wlan802_11powermodecam, - wlan802_11powermodemax_psp, - wlan802_11Powermodefast_psp, + WLAN802_11POWERMODECAM, + WLAN802_11POWERMODEMAX_PSP, + WLAN802_11POWERMODEFAST_PSP, /*not a real mode, defined as an upper bound */ - wlan802_11powemodemax + WLAN802_11POWEMODEMAX }; /** PS_STATE */ @@ -311,14 +308,14 @@ enum DNLD_STATE { /** WLAN_MEDIA_STATE */ enum WLAN_MEDIA_STATE { - libertas_connected, - libertas_disconnected + LIBERTAS_CONNECTED, + LIBERTAS_DISCONNECTED }; /** WLAN_802_11_PRIVACY_FILTER */ enum WLAN_802_11_PRIVACY_FILTER { - wlan802_11privfilteracceptall, - wlan802_11privfilter8021xWEP + WLAN802_11PRIVFILTERACCEPTALL, + WLAN802_11PRIVFILTER8021XWEP }; /** mv_ms_type */ @@ -331,23 +328,23 @@ enum mv_ms_type { /** SNMP_MIB_INDEX_e */ enum SNMP_MIB_INDEX_e { - desired_bsstype_i = 0, - op_rateset_i, - bcnperiod_i, - dtimperiod_i, - assocrsp_timeout_i, - rtsthresh_i, - short_retrylim_i, - long_retrylim_i, - fragthresh_i, - dot11d_i, - dot11h_i, - manufid_i, - prodID_i, - manuf_oui_i, - manuf_name_i, - manuf_prodname_i, - manuf_prodver_i, + DESIRED_BSSTYPE_I = 0, + OP_RATESET_I, + BCNPERIOD_I, + DTIMPERIOD_I, + ASSOCRSP_TIMEOUT_I, + RTSTHRESH_I, + SHORT_RETRYLIM_I, + LONG_RETRYLIM_I, + FRAGTHRESH_I, + DOT11D_I, + DOT11H_I, + MANUFID_I, + PRODID_I, + MANUF_OUI_I, + MANUF_NAME_I, + MANUF_PRODNAME_I, + MANUF_PRODVER_I, }; /** KEY_TYPE_ID */ diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h index 785192b884bc..1fb807aa91b9 100644 --- a/drivers/net/wireless/libertas/dev.h +++ b/drivers/net/wireless/libertas/dev.h @@ -14,7 +14,6 @@ #include "defs.h" #include "scan.h" -#include "thread.h" extern struct ethtool_ops libertas_ethtool_ops; @@ -73,10 +72,8 @@ struct current_bss_params { u8 band; /** channel */ u8 channel; - /** number of rates supported */ - int numofrates; - /** supported rates*/ - u8 datarates[WLAN_SUPPORTED_RATES]; + /** zero-terminated array of supported data rates */ + u8 rates[MAX_RATES + 1]; }; /** sleep_params */ @@ -106,6 +103,8 @@ struct _wlan_private { int open; int mesh_open; int infra_open; + int mesh_autostart_enabled; + __le16 boot2_version; char name[DEV_NAME_LEN]; @@ -114,7 +113,9 @@ struct _wlan_private { struct net_device *dev; struct net_device_stats stats; - struct net_device *mesh_dev ; /* Virtual device */ + struct net_device *mesh_dev; /* Virtual device */ + struct net_device *rtap_net_dev; + struct ieee80211_device *ieee; struct iw_statistics wstats; struct wlan_mesh_stats mstats; @@ -142,20 +143,18 @@ struct _wlan_private { all other bits reserved 0 */ u8 dnld_sent; - const struct firmware *firmware; struct device *hotplug_device; /** thread to service interrupts */ - struct wlan_thread mainthread; + struct task_struct *main_thread; + wait_queue_head_t waitq; + struct workqueue_struct *work_thread; + struct delayed_work scan_work; struct delayed_work assoc_work; - struct workqueue_struct *assoc_thread; struct work_struct sync_channel; /** Hardware access */ - int (*hw_register_dev) (wlan_private * priv); - int (*hw_unregister_dev) (wlan_private *); - int (*hw_prog_firmware) (wlan_private *); int (*hw_host_to_card) (wlan_private * priv, u8 type, u8 * payload, u16 nb); int (*hw_get_int_status) (wlan_private * priv, u8 *); int (*hw_read_event_cause) (wlan_private *); @@ -188,12 +187,12 @@ struct assoc_request { u8 bssid[ETH_ALEN]; /** WEP keys */ - struct WLAN_802_11_KEY wep_keys[4]; + struct enc_key wep_keys[4]; u16 wep_tx_keyidx; /** WPA keys */ - struct WLAN_802_11_KEY wpa_mcast_key; - struct WLAN_802_11_KEY wpa_unicast_key; + struct enc_key wpa_mcast_key; + struct enc_key wpa_unicast_key; struct wlan_802_11_security secinfo; @@ -259,23 +258,15 @@ struct _wlan_adapter { /* IW_MODE_* */ u8 mode; - u8 prev_ssid[IW_ESSID_MAX_SIZE + 1]; - u8 prev_ssid_len; - u8 prev_bssid[ETH_ALEN]; - /* Scan results list */ struct list_head network_list; struct list_head network_free_list; struct bss_descriptor *networks; - u8 scantype; - u32 scanmode; - - u16 beaconperiod; u8 adhoccreate; /** capability Info used in Association, start, join */ - struct ieeetypes_capinfo capinfo; + u16 capability; /** MAC address information */ u8 current_addr[ETH_ALEN]; @@ -287,20 +278,10 @@ struct _wlan_adapter { u16 enablehwauto; u16 ratebitmap; - /** control G rates */ - u8 adhoc_grate_enabled; - - u32 txantenna; - u32 rxantenna; u32 fragthsd; u32 rtsthsd; - u32 datarate; - u8 is_datarate_auto; - - u16 listeninterval; - u16 prescan; u8 txretrycount; /** Tx-related variables (for single packet tx) */ @@ -311,22 +292,17 @@ struct _wlan_adapter { u16 currentpacketfilter; u32 connect_status; u16 regioncode; - u16 regiontableindex; u16 txpowerlevel; /** POWER MANAGEMENT AND PnP SUPPORT */ u8 surpriseremoved; - u16 atimwindow; u16 psmode; /* Wlan802_11PowermodeCAM=disable Wlan802_11PowermodeMAX_PSP=enable */ - u16 multipledtim; u32 psstate; u8 needtowakeup; struct PS_CMD_ConfirmSleep libertas_ps_confirm_sleep; - u16 locallisteninterval; - u16 nullpktinterval; struct assoc_request * pending_assoc_req; struct assoc_request * in_progress_assoc_req; @@ -335,23 +311,18 @@ struct _wlan_adapter { struct wlan_802_11_security secinfo; /** WEP keys */ - struct WLAN_802_11_KEY wep_keys[4]; + struct enc_key wep_keys[4]; u16 wep_tx_keyidx; /** WPA keys */ - struct WLAN_802_11_KEY wpa_mcast_key; - struct WLAN_802_11_KEY wpa_unicast_key; + struct enc_key wpa_mcast_key; + struct enc_key wpa_unicast_key; /** WPA Information Elements*/ u8 wpa_ie[MAX_WPA_IE_LEN]; u8 wpa_ie_len; - u16 rxantennamode; - u16 txantennamode; - /** Requested Signal Strength*/ - u16 bcn_avg_factor; - u16 data_avg_factor; u16 SNR[MAX_TYPE_B][MAX_TYPE_AVG]; u16 NF[MAX_TYPE_B][MAX_TYPE_AVG]; u8 RSSI[MAX_TYPE_B][MAX_TYPE_AVG]; @@ -359,15 +330,13 @@ struct _wlan_adapter { u8 rawNF[DEFAULT_DATA_AVG_FACTOR]; u16 nextSNRNF; u16 numSNRNF; - u16 rxpd_rate; u8 radioon; u32 preamble; - /** Multi bands Parameter*/ - u8 libertas_supported_rates[G_SUPPORTED_RATES]; - - /** Blue Tooth Co-existence Arbitration */ + /** data rate stuff */ + u8 cur_rate; + u8 auto_rate; /** sleep_params */ struct sleep_params sp; @@ -392,14 +361,8 @@ struct _wlan_adapter { struct wlan_offset_value offsetvalue; struct cmd_ds_802_11_get_log logmsg; - u16 scanprobes; - - u32 pkttxctrl; - u16 txrate; - u32 linkmode; - u32 radiomode; - u32 debugmode; + u32 monitormode; u8 fw_ready; u8 last_scanned_channel; diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c index 96f1974685d4..3dae15211b6a 100644 --- a/drivers/net/wireless/libertas/ethtool.c +++ b/drivers/net/wireless/libertas/ethtool.c @@ -60,8 +60,7 @@ static int libertas_ethtool_get_eeprom(struct net_device *dev, // mutex_lock(&priv->mutex); - adapter->prdeeprom = - (char *)kmalloc(eeprom->len+sizeof(regctrl), GFP_KERNEL); + adapter->prdeeprom = kmalloc(eeprom->len+sizeof(regctrl), GFP_KERNEL); if (!adapter->prdeeprom) return -ENOMEM; memcpy(adapter->prdeeprom, ®ctrl, sizeof(regctrl)); @@ -72,9 +71,9 @@ static int libertas_ethtool_get_eeprom(struct net_device *dev, regctrl.action, regctrl.offset, regctrl.NOB); ret = libertas_prepare_and_send_command(priv, - cmd_802_11_eeprom_access, + CMD_802_11_EEPROM_ACCESS, regctrl.action, - cmd_option_waitforrsp, 0, + CMD_OPTION_WAITFORRSP, 0, ®ctrl); if (ret) { @@ -110,56 +109,48 @@ static void libertas_ethtool_get_stats(struct net_device * dev, struct ethtool_stats * stats, u64 * data) { wlan_private *priv = dev->priv; + struct cmd_ds_mesh_access mesh_access; + int ret; lbs_deb_enter(LBS_DEB_ETHTOOL); - stats->cmd = ETHTOOL_GSTATS; - BUG_ON(stats->n_stats != MESH_STATS_NUM); - - data[0] = priv->mstats.fwd_drop_rbt; - data[1] = priv->mstats.fwd_drop_ttl; - data[2] = priv->mstats.fwd_drop_noroute; - data[3] = priv->mstats.fwd_drop_nobuf; - data[4] = priv->mstats.fwd_unicast_cnt; - data[5] = priv->mstats.fwd_bcast_cnt; - data[6] = priv->mstats.drop_blind; - data[7] = priv->mstats.tx_failed_cnt; + /* Get Mesh Statistics */ + ret = libertas_prepare_and_send_command(priv, + CMD_MESH_ACCESS, CMD_ACT_MESH_GET_STATS, + CMD_OPTION_WAITFORRSP, 0, &mesh_access); + + if (ret) + return; + + priv->mstats.fwd_drop_rbt = le32_to_cpu(mesh_access.data[0]); + priv->mstats.fwd_drop_ttl = le32_to_cpu(mesh_access.data[1]); + priv->mstats.fwd_drop_noroute = le32_to_cpu(mesh_access.data[2]); + priv->mstats.fwd_drop_nobuf = le32_to_cpu(mesh_access.data[3]); + priv->mstats.fwd_unicast_cnt = le32_to_cpu(mesh_access.data[4]); + priv->mstats.fwd_bcast_cnt = le32_to_cpu(mesh_access.data[5]); + priv->mstats.drop_blind = le32_to_cpu(mesh_access.data[6]); + priv->mstats.tx_failed_cnt = le32_to_cpu(mesh_access.data[7]); + + data[0] = priv->mstats.fwd_drop_rbt; + data[1] = priv->mstats.fwd_drop_ttl; + data[2] = priv->mstats.fwd_drop_noroute; + data[3] = priv->mstats.fwd_drop_nobuf; + data[4] = priv->mstats.fwd_unicast_cnt; + data[5] = priv->mstats.fwd_bcast_cnt; + data[6] = priv->mstats.drop_blind; + data[7] = priv->mstats.tx_failed_cnt; lbs_deb_enter(LBS_DEB_ETHTOOL); } -static int libertas_ethtool_get_stats_count(struct net_device * dev) +static int libertas_ethtool_get_sset_count(struct net_device * dev, int sset) { - int ret; - wlan_private *priv = dev->priv; - struct cmd_ds_mesh_access mesh_access; - - lbs_deb_enter(LBS_DEB_ETHTOOL); - - /* Get Mesh Statistics */ - ret = libertas_prepare_and_send_command(priv, - cmd_mesh_access, cmd_act_mesh_get_stats, - cmd_option_waitforrsp, 0, &mesh_access); - - if (ret) { - ret = 0; - goto done; + switch (sset) { + case ETH_SS_STATS: + return MESH_STATS_NUM; + default: + return -EOPNOTSUPP; } - - priv->mstats.fwd_drop_rbt = le32_to_cpu(mesh_access.data[0]); - priv->mstats.fwd_drop_ttl = le32_to_cpu(mesh_access.data[1]); - priv->mstats.fwd_drop_noroute = le32_to_cpu(mesh_access.data[2]); - priv->mstats.fwd_drop_nobuf = le32_to_cpu(mesh_access.data[3]); - priv->mstats.fwd_unicast_cnt = le32_to_cpu(mesh_access.data[4]); - priv->mstats.fwd_bcast_cnt = le32_to_cpu(mesh_access.data[5]); - priv->mstats.drop_blind = le32_to_cpu(mesh_access.data[6]); - priv->mstats.tx_failed_cnt = le32_to_cpu(mesh_access.data[7]); - - ret = MESH_STATS_NUM; - -done: - lbs_deb_enter_args(LBS_DEB_ETHTOOL, "ret %d", ret); - return ret; } static void libertas_ethtool_get_strings (struct net_device * dev, @@ -186,7 +177,7 @@ struct ethtool_ops libertas_ethtool_ops = { .get_drvinfo = libertas_ethtool_get_drvinfo, .get_eeprom = libertas_ethtool_get_eeprom, .get_eeprom_len = libertas_ethtool_get_eeprom_len, - .get_stats_count = libertas_ethtool_get_stats_count, + .get_sset_count = libertas_ethtool_get_sset_count, .get_ethtool_stats = libertas_ethtool_get_stats, .get_strings = libertas_ethtool_get_strings, }; diff --git a/drivers/net/wireless/libertas/fw.c b/drivers/net/wireless/libertas/fw.c deleted file mode 100644 index 2dc84ff7a54a..000000000000 --- a/drivers/net/wireless/libertas/fw.c +++ /dev/null @@ -1,349 +0,0 @@ -/** - * This file contains the initialization for FW and HW - */ -#include <linux/firmware.h> - -#include "host.h" -#include "defs.h" -#include "decl.h" -#include "dev.h" -#include "wext.h" -#include "if_usb.h" - -/** - * @brief This function checks the validity of Boot2/FW image. - * - * @param data pointer to image - * len image length - * @return 0 or -1 - */ -static int check_fwfile_format(u8 *data, u32 totlen) -{ - u32 bincmd, exit; - u32 blksize, offset, len; - int ret; - - ret = 1; - exit = len = 0; - - do { - struct fwheader *fwh = (void *)data; - - bincmd = le32_to_cpu(fwh->dnldcmd); - blksize = le32_to_cpu(fwh->datalength); - switch (bincmd) { - case FW_HAS_DATA_TO_RECV: - offset = sizeof(struct fwheader) + blksize; - data += offset; - len += offset; - if (len >= totlen) - exit = 1; - break; - case FW_HAS_LAST_BLOCK: - exit = 1; - ret = 0; - break; - default: - exit = 1; - break; - } - } while (!exit); - - if (ret) - lbs_pr_err("firmware file format check FAIL\n"); - else - lbs_deb_fw("firmware file format check PASS\n"); - - return ret; -} - -/** - * @brief This function downloads firmware image, gets - * HW spec from firmware and set basic parameters to - * firmware. - * - * @param priv A pointer to wlan_private structure - * @return 0 or -1 - */ -static int wlan_setup_station_hw(wlan_private * priv, char *fw_name) -{ - int ret = -1; - wlan_adapter *adapter = priv->adapter; - - lbs_deb_enter(LBS_DEB_FW); - - if ((ret = request_firmware(&priv->firmware, fw_name, - priv->hotplug_device)) < 0) { - lbs_pr_err("request_firmware() failed with %#x\n", ret); - lbs_pr_err("firmware %s not found\n", fw_name); - goto done; - } - - if (check_fwfile_format(priv->firmware->data, priv->firmware->size)) { - release_firmware(priv->firmware); - goto done; - } - - ret = priv->hw_prog_firmware(priv); - - release_firmware(priv->firmware); - - if (ret) { - lbs_deb_fw("bootloader in invalid state\n"); - ret = -1; - goto done; - } - - /* - * Read MAC address from HW - */ - memset(adapter->current_addr, 0xff, ETH_ALEN); - - ret = libertas_prepare_and_send_command(priv, cmd_get_hw_spec, - 0, cmd_option_waitforrsp, 0, NULL); - - if (ret) { - ret = -1; - goto done; - } - - libertas_set_mac_packet_filter(priv); - - /* Get the supported Data rates */ - ret = libertas_prepare_and_send_command(priv, cmd_802_11_data_rate, - cmd_act_get_tx_rate, - cmd_option_waitforrsp, 0, NULL); - - if (ret) { - ret = -1; - goto done; - } - - ret = 0; -done: - lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); - return ret; -} - -static int wlan_allocate_adapter(wlan_private * priv) -{ - size_t bufsize; - wlan_adapter *adapter = priv->adapter; - - /* Allocate buffer to store the BSSID list */ - bufsize = MAX_NETWORK_COUNT * sizeof(struct bss_descriptor); - adapter->networks = kzalloc(bufsize, GFP_KERNEL); - if (!adapter->networks) { - lbs_pr_err("Out of memory allocating beacons\n"); - libertas_free_adapter(priv); - return -ENOMEM; - } - - /* Allocate the command buffers */ - libertas_allocate_cmd_buffer(priv); - - memset(&adapter->libertas_ps_confirm_sleep, 0, sizeof(struct PS_CMD_ConfirmSleep)); - adapter->libertas_ps_confirm_sleep.seqnum = cpu_to_le16(++adapter->seqnum); - adapter->libertas_ps_confirm_sleep.command = - cpu_to_le16(cmd_802_11_ps_mode); - adapter->libertas_ps_confirm_sleep.size = - cpu_to_le16(sizeof(struct PS_CMD_ConfirmSleep)); - adapter->libertas_ps_confirm_sleep.result = 0; - adapter->libertas_ps_confirm_sleep.action = - cpu_to_le16(cmd_subcmd_sleep_confirmed); - - return 0; -} - -static void wlan_init_adapter(wlan_private * priv) -{ - wlan_adapter *adapter = priv->adapter; - int i; - - adapter->scanprobes = 0; - - adapter->bcn_avg_factor = DEFAULT_BCN_AVG_FACTOR; - adapter->data_avg_factor = DEFAULT_DATA_AVG_FACTOR; - - /* ATIM params */ - adapter->atimwindow = 0; - - adapter->connect_status = libertas_disconnected; - memset(adapter->current_addr, 0xff, ETH_ALEN); - - /* scan type */ - adapter->scantype = cmd_scan_type_active; - - /* scan mode */ - adapter->scanmode = cmd_bss_type_any; - - /* 802.11 specific */ - adapter->secinfo.wep_enabled = 0; - for (i = 0; i < sizeof(adapter->wep_keys) / sizeof(adapter->wep_keys[0]); - i++) - memset(&adapter->wep_keys[i], 0, sizeof(struct WLAN_802_11_KEY)); - adapter->wep_tx_keyidx = 0; - adapter->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; - adapter->mode = IW_MODE_INFRA; - - adapter->pending_assoc_req = NULL; - adapter->in_progress_assoc_req = NULL; - - /* Initialize scan result lists */ - INIT_LIST_HEAD(&adapter->network_free_list); - INIT_LIST_HEAD(&adapter->network_list); - for (i = 0; i < MAX_NETWORK_COUNT; i++) { - list_add_tail(&adapter->networks[i].list, - &adapter->network_free_list); - } - - mutex_init(&adapter->lock); - - adapter->prescan = 1; - - memset(&adapter->curbssparams, 0, sizeof(adapter->curbssparams)); - adapter->curbssparams.channel = DEFAULT_AD_HOC_CHANNEL; - - /* PnP and power profile */ - adapter->surpriseremoved = 0; - - adapter->currentpacketfilter = - cmd_act_mac_rx_on | cmd_act_mac_tx_on; - - adapter->radioon = RADIO_ON; - adapter->txantenna = RF_ANTENNA_2; - adapter->rxantenna = RF_ANTENNA_AUTO; - - adapter->is_datarate_auto = 1; - adapter->beaconperiod = MRVDRV_BEACON_INTERVAL; - - // set default value of capinfo. -#define SHORT_PREAMBLE_ALLOWED 1 - memset(&adapter->capinfo, 0, sizeof(adapter->capinfo)); - adapter->capinfo.shortpreamble = SHORT_PREAMBLE_ALLOWED; - - adapter->psmode = wlan802_11powermodecam; - adapter->multipledtim = MRVDRV_DEFAULT_MULTIPLE_DTIM; - - adapter->listeninterval = MRVDRV_DEFAULT_LISTEN_INTERVAL; - - adapter->psstate = PS_STATE_FULL_POWER; - adapter->needtowakeup = 0; - adapter->locallisteninterval = 0; /* default value in firmware will be used */ - - adapter->datarate = 0; // Initially indicate the rate as auto - - adapter->adhoc_grate_enabled = 0; - - adapter->intcounter = 0; - - adapter->currenttxskb = NULL; - adapter->pkttxctrl = 0; - - memset(&adapter->tx_queue_ps, 0, NR_TX_QUEUE*sizeof(struct sk_buff*)); - adapter->tx_queue_idx = 0; - spin_lock_init(&adapter->txqueue_lock); - - return; -} - -static void command_timer_fn(unsigned long data); - -int libertas_init_fw(wlan_private * priv, char *fw_name) -{ - int ret = -1; - wlan_adapter *adapter = priv->adapter; - - lbs_deb_enter(LBS_DEB_FW); - - /* Allocate adapter structure */ - if ((ret = wlan_allocate_adapter(priv)) != 0) - goto done; - - /* init adapter structure */ - wlan_init_adapter(priv); - - /* init timer etc. */ - setup_timer(&adapter->command_timer, command_timer_fn, - (unsigned long)priv); - - /* download fimrware etc. */ - if ((ret = wlan_setup_station_hw(priv, fw_name)) != 0) { - del_timer_sync(&adapter->command_timer); - goto done; - } - - /* init 802.11d */ - libertas_init_11d(priv); - - ret = 0; -done: - lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); - return ret; -} - -void libertas_free_adapter(wlan_private * priv) -{ - wlan_adapter *adapter = priv->adapter; - - if (!adapter) { - lbs_deb_fw("why double free adapter?\n"); - return; - } - - lbs_deb_fw("free command buffer\n"); - libertas_free_cmd_buffer(priv); - - lbs_deb_fw("free command_timer\n"); - del_timer(&adapter->command_timer); - - lbs_deb_fw("free scan results table\n"); - kfree(adapter->networks); - adapter->networks = NULL; - - /* Free the adapter object itself */ - lbs_deb_fw("free adapter\n"); - kfree(adapter); - priv->adapter = NULL; -} - -/** - * This function handles the timeout of command sending. - * It will re-send the same command again. - */ -static void command_timer_fn(unsigned long data) -{ - wlan_private *priv = (wlan_private *)data; - wlan_adapter *adapter = priv->adapter; - struct cmd_ctrl_node *ptempnode; - struct cmd_ds_command *cmd; - unsigned long flags; - - ptempnode = adapter->cur_cmd; - if (ptempnode == NULL) { - lbs_deb_fw("ptempnode empty\n"); - return; - } - - cmd = (struct cmd_ds_command *)ptempnode->bufvirtualaddr; - if (!cmd) { - lbs_deb_fw("cmd is NULL\n"); - return; - } - - lbs_deb_fw("command_timer_fn fired, cmd %x\n", cmd->command); - - if (!adapter->fw_ready) - return; - - spin_lock_irqsave(&adapter->driver_lock, flags); - adapter->cur_cmd = NULL; - spin_unlock_irqrestore(&adapter->driver_lock, flags); - - lbs_deb_fw("re-sending same command because of timeout\n"); - libertas_queue_cmd(adapter, ptempnode, 0); - - wake_up_interruptible(&priv->mainthread.waitq); - - return; -} diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h index 7509cc10af3c..b37ddbca969f 100644 --- a/drivers/net/wireless/libertas/host.h +++ b/drivers/net/wireless/libertas/host.h @@ -20,224 +20,163 @@ #define OID_802_11_TX_RETRYCOUNT 0x0000801D #define OID_802_11D_ENABLE 0x00008020 -#define cmd_option_waitforrsp 0x0002 +#define CMD_OPTION_WAITFORRSP 0x0002 -/** Host command ID */ -#define cmd_code_dnld 0x0002 -#define cmd_get_hw_spec 0x0003 -#define cmd_eeprom_update 0x0004 -#define cmd_802_11_reset 0x0005 -#define cmd_802_11_scan 0x0006 -#define cmd_802_11_get_log 0x000b -#define cmd_mac_multicast_adr 0x0010 -#define cmd_802_11_authenticate 0x0011 -#define cmd_802_11_eeprom_access 0x0059 -#define cmd_802_11_associate 0x0050 -#define cmd_802_11_set_wep 0x0013 -#define cmd_802_11_get_stat 0x0014 -#define cmd_802_3_get_stat 0x0015 -#define cmd_802_11_snmp_mib 0x0016 -#define cmd_mac_reg_map 0x0017 -#define cmd_bbp_reg_map 0x0018 -#define cmd_mac_reg_access 0x0019 -#define cmd_bbp_reg_access 0x001a -#define cmd_rf_reg_access 0x001b -#define cmd_802_11_radio_control 0x001c -#define cmd_802_11_rf_channel 0x001d -#define cmd_802_11_rf_tx_power 0x001e -#define cmd_802_11_rssi 0x001f -#define cmd_802_11_rf_antenna 0x0020 +/** Host command IDs */ -#define cmd_802_11_ps_mode 0x0021 +/* Return command are almost always the same as the host command, but with + * bit 15 set high. There are a few exceptions, though... + */ +#define CMD_RET(cmd) (0x8000 | cmd) -#define cmd_802_11_data_rate 0x0022 -#define cmd_rf_reg_map 0x0023 -#define cmd_802_11_deauthenticate 0x0024 -#define cmd_802_11_reassociate 0x0025 -#define cmd_802_11_disassociate 0x0026 -#define cmd_mac_control 0x0028 -#define cmd_802_11_ad_hoc_start 0x002b -#define cmd_802_11_ad_hoc_join 0x002c +/* Return command convention exceptions: */ +#define CMD_RET_802_11_ASSOCIATE 0x8012 -#define cmd_802_11_query_tkip_reply_cntrs 0x002e -#define cmd_802_11_enable_rsn 0x002f -#define cmd_802_11_pairwise_tsc 0x0036 -#define cmd_802_11_group_tsc 0x0037 -#define cmd_802_11_key_material 0x005e +/* Command codes */ +#define CMD_CODE_DNLD 0x0002 +#define CMD_GET_HW_SPEC 0x0003 +#define CMD_EEPROM_UPDATE 0x0004 +#define CMD_802_11_RESET 0x0005 +#define CMD_802_11_SCAN 0x0006 +#define CMD_802_11_GET_LOG 0x000b +#define CMD_MAC_MULTICAST_ADR 0x0010 +#define CMD_802_11_AUTHENTICATE 0x0011 +#define CMD_802_11_EEPROM_ACCESS 0x0059 +#define CMD_802_11_ASSOCIATE 0x0050 +#define CMD_802_11_SET_WEP 0x0013 +#define CMD_802_11_GET_STAT 0x0014 +#define CMD_802_3_GET_STAT 0x0015 +#define CMD_802_11_SNMP_MIB 0x0016 +#define CMD_MAC_REG_MAP 0x0017 +#define CMD_BBP_REG_MAP 0x0018 +#define CMD_MAC_REG_ACCESS 0x0019 +#define CMD_BBP_REG_ACCESS 0x001a +#define CMD_RF_REG_ACCESS 0x001b +#define CMD_802_11_RADIO_CONTROL 0x001c +#define CMD_802_11_RF_CHANNEL 0x001d +#define CMD_802_11_RF_TX_POWER 0x001e +#define CMD_802_11_RSSI 0x001f +#define CMD_802_11_RF_ANTENNA 0x0020 -#define cmd_802_11_set_afc 0x003c -#define cmd_802_11_get_afc 0x003d +#define CMD_802_11_PS_MODE 0x0021 -#define cmd_802_11_ad_hoc_stop 0x0040 +#define CMD_802_11_DATA_RATE 0x0022 +#define CMD_RF_REG_MAP 0x0023 +#define CMD_802_11_DEAUTHENTICATE 0x0024 +#define CMD_802_11_REASSOCIATE 0x0025 +#define CMD_802_11_DISASSOCIATE 0x0026 +#define CMD_MAC_CONTROL 0x0028 +#define CMD_802_11_AD_HOC_START 0x002b +#define CMD_802_11_AD_HOC_JOIN 0x002c -#define cmd_802_11_beacon_stop 0x0049 +#define CMD_802_11_QUERY_TKIP_REPLY_CNTRS 0x002e +#define CMD_802_11_ENABLE_RSN 0x002f +#define CMD_802_11_PAIRWISE_TSC 0x0036 +#define CMD_802_11_GROUP_TSC 0x0037 +#define CMD_802_11_KEY_MATERIAL 0x005e -#define cmd_802_11_mac_address 0x004D -#define cmd_802_11_eeprom_access 0x0059 +#define CMD_802_11_SET_AFC 0x003c +#define CMD_802_11_GET_AFC 0x003d -#define cmd_802_11_band_config 0x0058 +#define CMD_802_11_AD_HOC_STOP 0x0040 -#define cmd_802_11d_domain_info 0x005b +#define CMD_802_11_BEACON_STOP 0x0049 -#define cmd_802_11_sleep_params 0x0066 +#define CMD_802_11_MAC_ADDRESS 0x004D +#define CMD_802_11_EEPROM_ACCESS 0x0059 -#define cmd_802_11_inactivity_timeout 0x0067 +#define CMD_802_11_BAND_CONFIG 0x0058 -#define cmd_802_11_tpc_cfg 0x0072 -#define cmd_802_11_pwr_cfg 0x0073 +#define CMD_802_11D_DOMAIN_INFO 0x005b -#define cmd_802_11_led_gpio_ctrl 0x004e +#define CMD_802_11_SLEEP_PARAMS 0x0066 -#define cmd_802_11_subscribe_event 0x0075 +#define CMD_802_11_INACTIVITY_TIMEOUT 0x0067 -#define cmd_802_11_rate_adapt_rateset 0x0076 +#define CMD_802_11_TPC_CFG 0x0072 +#define CMD_802_11_PWR_CFG 0x0073 -#define cmd_802_11_tx_rate_query 0x007f +#define CMD_802_11_LED_GPIO_CTRL 0x004e -#define cmd_get_tsf 0x0080 +#define CMD_802_11_SUBSCRIBE_EVENT 0x0075 -#define cmd_bt_access 0x0087 -#define cmd_ret_bt_access 0x8087 +#define CMD_802_11_RATE_ADAPT_RATESET 0x0076 -#define cmd_fwt_access 0x0095 -#define cmd_ret_fwt_access 0x8095 +#define CMD_802_11_TX_RATE_QUERY 0x007f -#define cmd_mesh_access 0x009b -#define cmd_ret_mesh_access 0x809b +#define CMD_GET_TSF 0x0080 + +#define CMD_BT_ACCESS 0x0087 + +#define CMD_FWT_ACCESS 0x0095 + +#define CMD_802_11_MONITOR_MODE 0x0098 + +#define CMD_MESH_ACCESS 0x009b + +#define CMD_SET_BOOT2_VER 0x00a5 /* For the IEEE Power Save */ -#define cmd_subcmd_enter_ps 0x0030 -#define cmd_subcmd_exit_ps 0x0031 -#define cmd_subcmd_sleep_confirmed 0x0034 -#define cmd_subcmd_full_powerdown 0x0035 -#define cmd_subcmd_full_powerup 0x0036 +#define CMD_SUBCMD_ENTER_PS 0x0030 +#define CMD_SUBCMD_EXIT_PS 0x0031 +#define CMD_SUBCMD_SLEEP_CONFIRMED 0x0034 +#define CMD_SUBCMD_FULL_POWERDOWN 0x0035 +#define CMD_SUBCMD_FULL_POWERUP 0x0036 + +#define CMD_ENABLE_RSN 0x0001 +#define CMD_DISABLE_RSN 0x0000 + +#define CMD_ACT_SET 0x0001 +#define CMD_ACT_GET 0x0000 + +#define CMD_ACT_GET_AES (CMD_ACT_GET + 2) +#define CMD_ACT_SET_AES (CMD_ACT_SET + 2) +#define CMD_ACT_REMOVE_AES (CMD_ACT_SET + 3) + +/* Define action or option for CMD_802_11_SET_WEP */ +#define CMD_ACT_ADD 0x0002 +#define CMD_ACT_REMOVE 0x0004 +#define CMD_ACT_USE_DEFAULT 0x0008 + +#define CMD_TYPE_WEP_40_BIT 0x01 +#define CMD_TYPE_WEP_104_BIT 0x02 + +#define CMD_NUM_OF_WEP_KEYS 4 + +#define CMD_WEP_KEY_INDEX_MASK 0x3fff -/* command RET code, MSB is set to 1 */ -#define cmd_ret_hw_spec_info 0x8003 -#define cmd_ret_eeprom_update 0x8004 -#define cmd_ret_802_11_reset 0x8005 -#define cmd_ret_802_11_scan 0x8006 -#define cmd_ret_802_11_get_log 0x800b -#define cmd_ret_mac_control 0x8028 -#define cmd_ret_mac_multicast_adr 0x8010 -#define cmd_ret_802_11_authenticate 0x8011 -#define cmd_ret_802_11_deauthenticate 0x8024 -#define cmd_ret_802_11_associate 0x8012 -#define cmd_ret_802_11_reassociate 0x8025 -#define cmd_ret_802_11_disassociate 0x8026 -#define cmd_ret_802_11_set_wep 0x8013 -#define cmd_ret_802_11_stat 0x8014 -#define cmd_ret_802_3_stat 0x8015 -#define cmd_ret_802_11_snmp_mib 0x8016 -#define cmd_ret_mac_reg_map 0x8017 -#define cmd_ret_bbp_reg_map 0x8018 -#define cmd_ret_rf_reg_map 0x8023 -#define cmd_ret_mac_reg_access 0x8019 -#define cmd_ret_bbp_reg_access 0x801a -#define cmd_ret_rf_reg_access 0x801b -#define cmd_ret_802_11_radio_control 0x801c -#define cmd_ret_802_11_rf_channel 0x801d -#define cmd_ret_802_11_rssi 0x801f -#define cmd_ret_802_11_rf_tx_power 0x801e -#define cmd_ret_802_11_rf_antenna 0x8020 -#define cmd_ret_802_11_ps_mode 0x8021 -#define cmd_ret_802_11_data_rate 0x8022 - -#define cmd_ret_802_11_ad_hoc_start 0x802B -#define cmd_ret_802_11_ad_hoc_join 0x802C - -#define cmd_ret_802_11_query_tkip_reply_cntrs 0x802e -#define cmd_ret_802_11_enable_rsn 0x802f -#define cmd_ret_802_11_pairwise_tsc 0x8036 -#define cmd_ret_802_11_group_tsc 0x8037 -#define cmd_ret_802_11_key_material 0x805e - -#define cmd_enable_rsn 0x0001 -#define cmd_disable_rsn 0x0000 - -#define cmd_act_set 0x0001 -#define cmd_act_get 0x0000 - -#define cmd_act_get_AES (cmd_act_get + 2) -#define cmd_act_set_AES (cmd_act_set + 2) -#define cmd_act_remove_aes (cmd_act_set + 3) - -#define cmd_ret_802_11_set_afc 0x803c -#define cmd_ret_802_11_get_afc 0x803d - -#define cmd_ret_802_11_ad_hoc_stop 0x8040 - -#define cmd_ret_802_11_beacon_stop 0x8049 - -#define cmd_ret_802_11_mac_address 0x804D -#define cmd_ret_802_11_eeprom_access 0x8059 - -#define cmd_ret_802_11_band_config 0x8058 - -#define cmd_ret_802_11_sleep_params 0x8066 - -#define cmd_ret_802_11_inactivity_timeout 0x8067 - -#define cmd_ret_802_11d_domain_info (0x8000 | \ - cmd_802_11d_domain_info) - -#define cmd_ret_802_11_tpc_cfg (cmd_802_11_tpc_cfg | 0x8000) -#define cmd_ret_802_11_pwr_cfg (cmd_802_11_pwr_cfg | 0x8000) - -#define cmd_ret_802_11_led_gpio_ctrl 0x804e - -#define cmd_ret_802_11_subscribe_event (cmd_802_11_subscribe_event | 0x8000) - -#define cmd_ret_802_11_rate_adapt_rateset (cmd_802_11_rate_adapt_rateset | 0x8000) - -#define cmd_rte_802_11_tx_rate_query (cmd_802_11_tx_rate_query | 0x8000) - -#define cmd_ret_get_tsf 0x8080 - -/* Define action or option for cmd_802_11_set_wep */ -#define cmd_act_add 0x0002 -#define cmd_act_remove 0x0004 -#define cmd_act_use_default 0x0008 - -#define cmd_type_wep_40_bit 0x0001 -#define cmd_type_wep_104_bit 0x0002 - -#define cmd_NUM_OF_WEP_KEYS 4 - -#define cmd_WEP_KEY_INDEX_MASK 0x3fff - -/* Define action or option for cmd_802_11_reset */ -#define cmd_act_halt 0x0003 +/* Define action or option for CMD_802_11_RESET */ +#define CMD_ACT_HALT 0x0003 -/* Define action or option for cmd_802_11_scan */ -#define cmd_bss_type_bss 0x0001 -#define cmd_bss_type_ibss 0x0002 -#define cmd_bss_type_any 0x0003 +/* Define action or option for CMD_802_11_SCAN */ +#define CMD_BSS_TYPE_BSS 0x0001 +#define CMD_BSS_TYPE_IBSS 0x0002 +#define CMD_BSS_TYPE_ANY 0x0003 -/* Define action or option for cmd_802_11_scan */ -#define cmd_scan_type_active 0x0000 -#define cmd_scan_type_passive 0x0001 +/* Define action or option for CMD_802_11_SCAN */ +#define CMD_SCAN_TYPE_ACTIVE 0x0000 +#define CMD_SCAN_TYPE_PASSIVE 0x0001 -#define cmd_scan_radio_type_bg 0 +#define CMD_SCAN_RADIO_TYPE_BG 0 -#define cmd_scan_probe_delay_time 0 +#define CMD_SCAN_PROBE_DELAY_TIME 0 -/* Define action or option for cmd_mac_control */ -#define cmd_act_mac_rx_on 0x0001 -#define cmd_act_mac_tx_on 0x0002 -#define cmd_act_mac_loopback_on 0x0004 -#define cmd_act_mac_wep_enable 0x0008 -#define cmd_act_mac_int_enable 0x0010 -#define cmd_act_mac_multicast_enable 0x0020 -#define cmd_act_mac_broadcast_enable 0x0040 -#define cmd_act_mac_promiscuous_enable 0x0080 -#define cmd_act_mac_all_multicast_enable 0x0100 -#define cmd_act_mac_strict_protection_enable 0x0400 +/* Define action or option for CMD_MAC_CONTROL */ +#define CMD_ACT_MAC_RX_ON 0x0001 +#define CMD_ACT_MAC_TX_ON 0x0002 +#define CMD_ACT_MAC_LOOPBACK_ON 0x0004 +#define CMD_ACT_MAC_WEP_ENABLE 0x0008 +#define CMD_ACT_MAC_INT_ENABLE 0x0010 +#define CMD_ACT_MAC_MULTICAST_ENABLE 0x0020 +#define CMD_ACT_MAC_BROADCAST_ENABLE 0x0040 +#define CMD_ACT_MAC_PROMISCUOUS_ENABLE 0x0080 +#define CMD_ACT_MAC_ALL_MULTICAST_ENABLE 0x0100 +#define CMD_ACT_MAC_STRICT_PROTECTION_ENABLE 0x0400 -/* Define action or option for cmd_802_11_radio_control */ -#define cmd_type_auto_preamble 0x0001 -#define cmd_type_short_preamble 0x0002 -#define cmd_type_long_preamble 0x0003 +/* Define action or option for CMD_802_11_RADIO_CONTROL */ +#define CMD_TYPE_AUTO_PREAMBLE 0x0001 +#define CMD_TYPE_SHORT_PREAMBLE 0x0002 +#define CMD_TYPE_LONG_PREAMBLE 0x0003 #define TURN_ON_RF 0x01 #define RADIO_ON 0x01 @@ -248,70 +187,80 @@ #define SET_LONG_PREAMBLE 0x01 /* Define action or option for CMD_802_11_RF_CHANNEL */ -#define cmd_opt_802_11_rf_channel_get 0x00 -#define cmd_opt_802_11_rf_channel_set 0x01 - -/* Define action or option for cmd_802_11_rf_tx_power */ -#define cmd_act_tx_power_opt_get 0x0000 -#define cmd_act_tx_power_opt_set_high 0x8007 -#define cmd_act_tx_power_opt_set_mid 0x8004 -#define cmd_act_tx_power_opt_set_low 0x8000 - -#define cmd_act_tx_power_index_high 0x0007 -#define cmd_act_tx_power_index_mid 0x0004 -#define cmd_act_tx_power_index_low 0x0000 - -/* Define action or option for cmd_802_11_data_rate */ -#define cmd_act_set_tx_auto 0x0000 -#define cmd_act_set_tx_fix_rate 0x0001 -#define cmd_act_get_tx_rate 0x0002 - -#define cmd_act_set_rx 0x0001 -#define cmd_act_set_tx 0x0002 -#define cmd_act_set_both 0x0003 -#define cmd_act_get_rx 0x0004 -#define cmd_act_get_tx 0x0008 -#define cmd_act_get_both 0x000c - -/* Define action or option for cmd_802_11_ps_mode */ -#define cmd_type_cam 0x0000 -#define cmd_type_max_psp 0x0001 -#define cmd_type_fast_psp 0x0002 - -/* Define action or option for cmd_bt_access */ +#define CMD_OPT_802_11_RF_CHANNEL_GET 0x00 +#define CMD_OPT_802_11_RF_CHANNEL_SET 0x01 + +/* Define action or option for CMD_802_11_RF_TX_POWER */ +#define CMD_ACT_TX_POWER_OPT_GET 0x0000 +#define CMD_ACT_TX_POWER_OPT_SET_HIGH 0x8007 +#define CMD_ACT_TX_POWER_OPT_SET_MID 0x8004 +#define CMD_ACT_TX_POWER_OPT_SET_LOW 0x8000 + +#define CMD_ACT_TX_POWER_INDEX_HIGH 0x0007 +#define CMD_ACT_TX_POWER_INDEX_MID 0x0004 +#define CMD_ACT_TX_POWER_INDEX_LOW 0x0000 + +/* Define action or option for CMD_802_11_DATA_RATE */ +#define CMD_ACT_SET_TX_AUTO 0x0000 +#define CMD_ACT_SET_TX_FIX_RATE 0x0001 +#define CMD_ACT_GET_TX_RATE 0x0002 + +#define CMD_ACT_SET_RX 0x0001 +#define CMD_ACT_SET_TX 0x0002 +#define CMD_ACT_SET_BOTH 0x0003 +#define CMD_ACT_GET_RX 0x0004 +#define CMD_ACT_GET_TX 0x0008 +#define CMD_ACT_GET_BOTH 0x000c + +/* Define action or option for CMD_802_11_PS_MODE */ +#define CMD_TYPE_CAM 0x0000 +#define CMD_TYPE_MAX_PSP 0x0001 +#define CMD_TYPE_FAST_PSP 0x0002 + +/* Define action or option for CMD_BT_ACCESS */ enum cmd_bt_access_opts { /* The bt commands start at 5 instead of 1 because the old dft commands * are mapped to 1-4. These old commands are no longer maintained and * should not be called. */ - cmd_act_bt_access_add = 5, - cmd_act_bt_access_del, - cmd_act_bt_access_list, - cmd_act_bt_access_reset, - cmd_act_bt_access_set_invert, - cmd_act_bt_access_get_invert + CMD_ACT_BT_ACCESS_ADD = 5, + CMD_ACT_BT_ACCESS_DEL, + CMD_ACT_BT_ACCESS_LIST, + CMD_ACT_BT_ACCESS_RESET, + CMD_ACT_BT_ACCESS_SET_INVERT, + CMD_ACT_BT_ACCESS_GET_INVERT }; -/* Define action or option for cmd_fwt_access */ +/* Define action or option for CMD_FWT_ACCESS */ enum cmd_fwt_access_opts { - cmd_act_fwt_access_add = 1, - cmd_act_fwt_access_del, - cmd_act_fwt_access_lookup, - cmd_act_fwt_access_list, - cmd_act_fwt_access_list_route, - cmd_act_fwt_access_list_neighbor, - cmd_act_fwt_access_reset, - cmd_act_fwt_access_cleanup, - cmd_act_fwt_access_time, + CMD_ACT_FWT_ACCESS_ADD = 1, + CMD_ACT_FWT_ACCESS_DEL, + CMD_ACT_FWT_ACCESS_LOOKUP, + CMD_ACT_FWT_ACCESS_LIST, + CMD_ACT_FWT_ACCESS_LIST_route, + CMD_ACT_FWT_ACCESS_LIST_neighbor, + CMD_ACT_FWT_ACCESS_RESET, + CMD_ACT_FWT_ACCESS_CLEANUP, + CMD_ACT_FWT_ACCESS_TIME, }; -/* Define action or option for cmd_mesh_access */ +/* Define action or option for CMD_MESH_ACCESS */ enum cmd_mesh_access_opts { - cmd_act_mesh_get_ttl = 1, - cmd_act_mesh_set_ttl, - cmd_act_mesh_get_stats, - cmd_act_mesh_get_anycast, - cmd_act_mesh_set_anycast, + CMD_ACT_MESH_GET_TTL = 1, + CMD_ACT_MESH_SET_TTL, + CMD_ACT_MESH_GET_STATS, + CMD_ACT_MESH_GET_ANYCAST, + CMD_ACT_MESH_SET_ANYCAST, + CMD_ACT_MESH_SET_LINK_COSTS, + CMD_ACT_MESH_GET_LINK_COSTS, + CMD_ACT_MESH_SET_BCAST_RATE, + CMD_ACT_MESH_GET_BCAST_RATE, + CMD_ACT_MESH_SET_RREQ_DELAY, + CMD_ACT_MESH_GET_RREQ_DELAY, + CMD_ACT_MESH_SET_ROUTE_EXP, + CMD_ACT_MESH_GET_ROUTE_EXP, + CMD_ACT_MESH_SET_AUTOSTART_ENABLED, + CMD_ACT_MESH_GET_AUTOSTART_ENABLED, }; /** Card Event definition */ diff --git a/drivers/net/wireless/libertas/hostcmd.h b/drivers/net/wireless/libertas/hostcmd.h index 09b898f6719c..e1045dc02cce 100644 --- a/drivers/net/wireless/libertas/hostcmd.h +++ b/drivers/net/wireless/libertas/hostcmd.h @@ -83,23 +83,12 @@ struct cmd_ctrl_node { wait_queue_head_t cmdwait_q; }; -/* WLAN_802_11_KEY - * - * Generic structure to hold all key types. key type (WEP40, WEP104, TKIP, AES) - * is determined from the keylength field. - */ -struct WLAN_802_11_KEY { - __le32 len; - __le32 flags; /* KEY_INFO_* from wlan_defs.h */ - u8 key[MRVL_MAX_KEY_WPA_KEY_LENGTH]; - __le16 type; /* KEY_TYPE_* from wlan_defs.h */ -}; - -struct IE_WPA { - u8 elementid; - u8 len; - u8 oui[4]; - __le16 version; +/* Generic structure to hold all key types. */ +struct enc_key { + u16 len; + u16 flags; /* KEY_INFO_* from wlan_defs.h */ + u16 type; /* KEY_TYPE_* from wlan_defs.h */ + u8 key[32]; }; /* wlan_offset_value */ @@ -108,18 +97,6 @@ struct wlan_offset_value { u32 value; }; -struct WLAN_802_11_FIXED_IEs { - __le64 timestamp; - __le16 beaconinterval; - u16 capabilities; /* Actually struct ieeetypes_capinfo */ -}; - -struct WLAN_802_11_VARIABLE_IEs { - u8 elementid; - u8 length; - u8 data[1]; -}; - /* Define general data structure */ /* cmd_DS_GEN */ struct cmd_ds_gen { @@ -131,7 +108,7 @@ struct cmd_ds_gen { #define S_DS_GEN sizeof(struct cmd_ds_gen) /* - * Define data structure for cmd_get_hw_spec + * Define data structure for CMD_GET_HW_SPEC * This structure defines the response for the GET_HW_SPEC command */ struct cmd_ds_get_hw_spec { @@ -178,11 +155,11 @@ struct cmd_ds_802_11_subscribe_event { /* * This scan handle Country Information IE(802.11d compliant) - * Define data structure for cmd_802_11_scan + * Define data structure for CMD_802_11_SCAN */ struct cmd_ds_802_11_scan { u8 bsstype; - u8 BSSID[ETH_ALEN]; + u8 bssid[ETH_ALEN]; u8 tlvbuffer[1]; #if 0 mrvlietypes_ssidparamset_t ssidParamSet; @@ -237,7 +214,7 @@ struct cmd_ds_802_11_deauthenticate { struct cmd_ds_802_11_associate { u8 peerstaaddr[6]; - struct ieeetypes_capinfo capinfo; + __le16 capability; __le16 listeninterval; __le16 bcnperiod; u8 dtimperiod; @@ -260,8 +237,8 @@ struct cmd_ds_802_11_associate_rsp { }; struct cmd_ds_802_11_ad_hoc_result { - u8 PAD[3]; - u8 BSSID[ETH_ALEN]; + u8 pad[3]; + u8 bssid[ETH_ALEN]; }; struct cmd_ds_802_11_set_wep { @@ -428,6 +405,16 @@ struct cmd_ds_802_11_rf_antenna { }; +struct cmd_ds_802_11_monitor_mode { + u16 action; + u16 mode; +}; + +struct cmd_ds_set_boot2_ver { + u16 action; + u16 version; +}; + struct cmd_ds_802_11_ps_mode { __le16 action; __le16 nullpktinterval; @@ -451,8 +438,8 @@ struct PS_CMD_ConfirmSleep { struct cmd_ds_802_11_data_rate { __le16 action; - __le16 reserverd; - u8 datarate[G_SUPPORTED_RATES]; + __le16 reserved; + u8 rates[MAX_RATES]; }; struct cmd_ds_802_11_rate_adapt_rateset { @@ -462,30 +449,30 @@ struct cmd_ds_802_11_rate_adapt_rateset { }; struct cmd_ds_802_11_ad_hoc_start { - u8 SSID[IW_ESSID_MAX_SIZE]; + u8 ssid[IW_ESSID_MAX_SIZE]; u8 bsstype; __le16 beaconperiod; u8 dtimperiod; union IEEEtypes_ssparamset ssparamset; union ieeetypes_phyparamset phyparamset; __le16 probedelay; - struct ieeetypes_capinfo cap; - u8 datarate[G_SUPPORTED_RATES]; + __le16 capability; + u8 rates[MAX_RATES]; u8 tlv_memory_size_pad[100]; } __attribute__ ((packed)); struct adhoc_bssdesc { - u8 BSSID[6]; - u8 SSID[32]; - u8 bsstype; + u8 bssid[6]; + u8 ssid[32]; + u8 type; __le16 beaconperiod; u8 dtimperiod; __le64 timestamp; __le64 localtime; union ieeetypes_phyparamset phyparamset; union IEEEtypes_ssparamset ssparamset; - struct ieeetypes_capinfo cap; - u8 datarates[G_SUPPORTED_RATES]; + __le16 capability; + u8 rates[MAX_RATES]; /* DO NOT ADD ANY FIELDS TO THIS STRUCTURE. It is used below in the * Adhoc join command and will cause a binary layout mismatch with @@ -494,7 +481,7 @@ struct adhoc_bssdesc { } __attribute__ ((packed)); struct cmd_ds_802_11_ad_hoc_join { - struct adhoc_bssdesc bssdescriptor; + struct adhoc_bssdesc bss; __le16 failtimeout; __le16 probedelay; @@ -646,6 +633,7 @@ struct cmd_ds_command { struct cmd_ds_802_11_snmp_mib smib; struct cmd_ds_802_11_rf_tx_power txp; struct cmd_ds_802_11_rf_antenna rant; + struct cmd_ds_802_11_monitor_mode monitor; struct cmd_ds_802_11_data_rate drate; struct cmd_ds_802_11_rate_adapt_rateset rateset; struct cmd_ds_mac_multicast_adr madr; @@ -677,6 +665,7 @@ struct cmd_ds_command { struct cmd_ds_bt_access bt; struct cmd_ds_fwt_access fwt; struct cmd_ds_mesh_access mesh; + struct cmd_ds_set_boot2_ver boot2_ver; struct cmd_ds_get_tsf gettsf; struct cmd_ds_802_11_subscribe_event subscribe_event; } params; diff --git a/drivers/net/wireless/libertas/if_bootcmd.c b/drivers/net/wireless/libertas/if_bootcmd.c deleted file mode 100644 index 8bca306ffad9..000000000000 --- a/drivers/net/wireless/libertas/if_bootcmd.c +++ /dev/null @@ -1,40 +0,0 @@ -/** - * This file contains functions used in USB Boot command - * and Boot2/FW update - */ - -#include <linux/delay.h> -#include <linux/firmware.h> -#include <linux/netdevice.h> -#include <linux/usb.h> - -#define DRV_NAME "usb8xxx" - -#include "defs.h" -#include "dev.h" -#include "if_usb.h" - -/** - * @brief This function issues Boot command to the Boot2 code - * @param ivalue 1:Boot from FW by USB-Download - * 2:Boot from FW in EEPROM - * @return 0 - */ -int if_usb_issue_boot_command(wlan_private *priv, int ivalue) -{ - struct usb_card_rec *cardp = priv->card; - struct bootcmdstr sbootcmd; - int i; - - /* Prepare command */ - sbootcmd.u32magicnumber = cpu_to_le32(BOOT_CMD_MAGIC_NUMBER); - sbootcmd.u8cmd_tag = ivalue; - for (i=0; i<11; i++) - sbootcmd.au8dumy[i]=0x00; - memcpy(cardp->bulk_out_buffer, &sbootcmd, sizeof(struct bootcmdstr)); - - /* Issue command */ - usb_tx_block(priv, cardp->bulk_out_buffer, sizeof(struct bootcmdstr)); - - return 0; -} diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c new file mode 100644 index 000000000000..0360cad363a8 --- /dev/null +++ b/drivers/net/wireless/libertas/if_cs.c @@ -0,0 +1,969 @@ +/* + + Driver for the Marvell 8385 based compact flash WLAN cards. + + (C) 2007 by Holger Schurig <hs4233@mail.mn-solutions.de> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/moduleparam.h> +#include <linux/firmware.h> +#include <linux/netdevice.h> + +#include <pcmcia/cs_types.h> +#include <pcmcia/cs.h> +#include <pcmcia/cistpl.h> +#include <pcmcia/ds.h> + +#include <linux/io.h> + +#define DRV_NAME "libertas_cs" + +#include "decl.h" +#include "defs.h" +#include "dev.h" + + +/********************************************************************/ +/* Module stuff */ +/********************************************************************/ + +MODULE_AUTHOR("Holger Schurig <hs4233@mail.mn-solutions.de>"); +MODULE_DESCRIPTION("Driver for Marvell 83xx compact flash WLAN cards"); +MODULE_LICENSE("GPL"); + + + +/********************************************************************/ +/* Data structures */ +/********************************************************************/ + +struct if_cs_card { + struct pcmcia_device *p_dev; + wlan_private *priv; + void __iomem *iobase; +}; + + + +/********************************************************************/ +/* Hardware access */ +/********************************************************************/ + +/* This define enables wrapper functions which allow you + to dump all register accesses. You normally won't this, + except for development */ +/* #define DEBUG_IO */ + +#ifdef DEBUG_IO +static int debug_output = 0; +#else +/* This way the compiler optimizes the printk's away */ +#define debug_output 0 +#endif + +static inline unsigned int if_cs_read8(struct if_cs_card *card, uint reg) +{ + unsigned int val = ioread8(card->iobase + reg); + if (debug_output) + printk(KERN_INFO "##inb %08x<%02x\n", reg, val); + return val; +} +static inline unsigned int if_cs_read16(struct if_cs_card *card, uint reg) +{ + unsigned int val = ioread16(card->iobase + reg); + if (debug_output) + printk(KERN_INFO "##inw %08x<%04x\n", reg, val); + return val; +} +static inline void if_cs_read16_rep( + struct if_cs_card *card, + uint reg, + void *buf, + unsigned long count) +{ + if (debug_output) + printk(KERN_INFO "##insw %08x<(0x%lx words)\n", + reg, count); + ioread16_rep(card->iobase + reg, buf, count); +} + +static inline void if_cs_write8(struct if_cs_card *card, uint reg, u8 val) +{ + if (debug_output) + printk(KERN_INFO "##outb %08x>%02x\n", reg, val); + iowrite8(val, card->iobase + reg); +} + +static inline void if_cs_write16(struct if_cs_card *card, uint reg, u16 val) +{ + if (debug_output) + printk(KERN_INFO "##outw %08x>%04x\n", reg, val); + iowrite16(val, card->iobase + reg); +} + +static inline void if_cs_write16_rep( + struct if_cs_card *card, + uint reg, + void *buf, + unsigned long count) +{ + if (debug_output) + printk(KERN_INFO "##outsw %08x>(0x%lx words)\n", + reg, count); + iowrite16_rep(card->iobase + reg, buf, count); +} + + +/* + * I know that polling/delaying is frowned upon. However, this procedure + * with polling is needed while downloading the firmware. At this stage, + * the hardware does unfortunately not create any interrupts. + * + * Fortunately, this function is never used once the firmware is in + * the card. :-) + * + * As a reference, see the "Firmware Specification v5.1", page 18 + * and 19. I did not follow their suggested timing to the word, + * but this works nice & fast anyway. + */ +static int if_cs_poll_while_fw_download(struct if_cs_card *card, uint addr, u8 reg) +{ + int i; + + for (i = 0; i < 500; i++) { + u8 val = if_cs_read8(card, addr); + if (val == reg) + return i; + udelay(100); + } + return -ETIME; +} + + + +/* Host control registers and their bit definitions */ + +#define IF_CS_H_STATUS 0x00000000 +#define IF_CS_H_STATUS_TX_OVER 0x0001 +#define IF_CS_H_STATUS_RX_OVER 0x0002 +#define IF_CS_H_STATUS_DNLD_OVER 0x0004 + +#define IF_CS_H_INT_CAUSE 0x00000002 +#define IF_CS_H_IC_TX_OVER 0x0001 +#define IF_CS_H_IC_RX_OVER 0x0002 +#define IF_CS_H_IC_DNLD_OVER 0x0004 +#define IF_CS_H_IC_HOST_EVENT 0x0008 +#define IF_CS_H_IC_MASK 0x001f + +#define IF_CS_H_INT_MASK 0x00000004 +#define IF_CS_H_IM_MASK 0x001f + +#define IF_CS_H_WRITE_LEN 0x00000014 + +#define IF_CS_H_WRITE 0x00000016 + +#define IF_CS_H_CMD_LEN 0x00000018 + +#define IF_CS_H_CMD 0x0000001A + +#define IF_CS_C_READ_LEN 0x00000024 + +#define IF_CS_H_READ 0x00000010 + +/* Card control registers and their bit definitions */ + +#define IF_CS_C_STATUS 0x00000020 +#define IF_CS_C_S_TX_DNLD_RDY 0x0001 +#define IF_CS_C_S_RX_UPLD_RDY 0x0002 +#define IF_CS_C_S_CMD_DNLD_RDY 0x0004 +#define IF_CS_C_S_CMD_UPLD_RDY 0x0008 +#define IF_CS_C_S_CARDEVENT 0x0010 +#define IF_CS_C_S_MASK 0x001f +#define IF_CS_C_S_STATUS_MASK 0x7f00 +/* The following definitions should be the same as the MRVDRV_ ones */ + +#if MRVDRV_CMD_DNLD_RDY != IF_CS_C_S_CMD_DNLD_RDY +#error MRVDRV_CMD_DNLD_RDY and IF_CS_C_S_CMD_DNLD_RDY not in sync +#endif +#if MRVDRV_CMD_UPLD_RDY != IF_CS_C_S_CMD_UPLD_RDY +#error MRVDRV_CMD_UPLD_RDY and IF_CS_C_S_CMD_UPLD_RDY not in sync +#endif +#if MRVDRV_CARDEVENT != IF_CS_C_S_CARDEVENT +#error MRVDRV_CARDEVENT and IF_CS_C_S_CARDEVENT not in sync +#endif + +#define IF_CS_C_INT_CAUSE 0x00000022 +#define IF_CS_C_IC_MASK 0x001f + +#define IF_CS_C_SQ_READ_LOW 0x00000028 +#define IF_CS_C_SQ_HELPER_OK 0x10 + +#define IF_CS_C_CMD_LEN 0x00000030 + +#define IF_CS_C_CMD 0x00000012 + +#define IF_CS_SCRATCH 0x0000003F + + + +/********************************************************************/ +/* Interrupts */ +/********************************************************************/ + +static inline void if_cs_enable_ints(struct if_cs_card *card) +{ + lbs_deb_enter(LBS_DEB_CS); + if_cs_write16(card, IF_CS_H_INT_MASK, 0); +} + +static inline void if_cs_disable_ints(struct if_cs_card *card) +{ + lbs_deb_enter(LBS_DEB_CS); + if_cs_write16(card, IF_CS_H_INT_MASK, IF_CS_H_IM_MASK); +} + +static irqreturn_t if_cs_interrupt(int irq, void *data) +{ + struct if_cs_card *card = (struct if_cs_card *)data; + u16 int_cause; + + lbs_deb_enter(LBS_DEB_CS); + + int_cause = if_cs_read16(card, IF_CS_C_INT_CAUSE); + if(int_cause == 0x0) { + /* Not for us */ + return IRQ_NONE; + + } else if(int_cause == 0xffff) { + /* Read in junk, the card has probably been removed */ + card->priv->adapter->surpriseremoved = 1; + + } else { + if(int_cause & IF_CS_H_IC_TX_OVER) { + card->priv->dnld_sent = DNLD_RES_RECEIVED; + if (!card->priv->adapter->cur_cmd) + wake_up_interruptible(&card->priv->waitq); + + if (card->priv->adapter->connect_status == LIBERTAS_CONNECTED) + netif_wake_queue(card->priv->dev); + } + + /* clear interrupt */ + if_cs_write16(card, IF_CS_C_INT_CAUSE, int_cause & IF_CS_C_IC_MASK); + } + + libertas_interrupt(card->priv->dev); + + return IRQ_HANDLED; +} + + + + +/********************************************************************/ +/* I/O */ +/********************************************************************/ + +/* + * Called from if_cs_host_to_card to send a command to the hardware + */ +static int if_cs_send_cmd(wlan_private *priv, u8 *buf, u16 nb) +{ + struct if_cs_card *card = (struct if_cs_card *)priv->card; + int ret = -1; + int loops = 0; + + lbs_deb_enter(LBS_DEB_CS); + + /* Is hardware ready? */ + while (1) { + u16 val = if_cs_read16(card, IF_CS_C_STATUS); + if (val & IF_CS_C_S_CMD_DNLD_RDY) + break; + if (++loops > 100) { + lbs_pr_err("card not ready for commands\n"); + goto done; + } + mdelay(1); + } + + if_cs_write16(card, IF_CS_H_CMD_LEN, nb); + + if_cs_write16_rep(card, IF_CS_H_CMD, buf, nb / 2); + /* Are we supposed to transfer an odd amount of bytes? */ + if (nb & 1) + if_cs_write8(card, IF_CS_H_CMD, buf[nb-1]); + + /* "Assert the download over interrupt command in the Host + * status register" */ + if_cs_write16(card, IF_CS_H_STATUS, IF_CS_H_STATUS_DNLD_OVER); + + /* "Assert the download over interrupt command in the Card + * interrupt case register" */ + if_cs_write16(card, IF_CS_H_INT_CAUSE, IF_CS_H_IC_DNLD_OVER); + ret = 0; + +done: + lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); + return ret; +} + + +/* + * Called from if_cs_host_to_card to send a data to the hardware + */ +static void if_cs_send_data(wlan_private *priv, u8 *buf, u16 nb) +{ + struct if_cs_card *card = (struct if_cs_card *)priv->card; + + lbs_deb_enter(LBS_DEB_CS); + + if_cs_write16(card, IF_CS_H_WRITE_LEN, nb); + + /* write even number of bytes, then odd byte if necessary */ + if_cs_write16_rep(card, IF_CS_H_WRITE, buf, nb / 2); + if (nb & 1) + if_cs_write8(card, IF_CS_H_WRITE, buf[nb-1]); + + if_cs_write16(card, IF_CS_H_STATUS, IF_CS_H_STATUS_TX_OVER); + if_cs_write16(card, IF_CS_H_INT_CAUSE, IF_CS_H_STATUS_TX_OVER); + + lbs_deb_leave(LBS_DEB_CS); +} + + +/* + * Get the command result out of the card. + */ +static int if_cs_receive_cmdres(wlan_private *priv, u8* data, u32 *len) +{ + int ret = -1; + u16 val; + + lbs_deb_enter(LBS_DEB_CS); + + /* is hardware ready? */ + val = if_cs_read16(priv->card, IF_CS_C_STATUS); + if ((val & IF_CS_C_S_CMD_UPLD_RDY) == 0) { + lbs_pr_err("card not ready for CMD\n"); + goto out; + } + + *len = if_cs_read16(priv->card, IF_CS_C_CMD_LEN); + if ((*len == 0) || (*len > MRVDRV_SIZE_OF_CMD_BUFFER)) { + lbs_pr_err("card cmd buffer has invalid # of bytes (%d)\n", *len); + goto out; + } + + /* read even number of bytes, then odd byte if necessary */ + if_cs_read16_rep(priv->card, IF_CS_C_CMD, data, *len/sizeof(u16)); + if (*len & 1) + data[*len-1] = if_cs_read8(priv->card, IF_CS_C_CMD); + + ret = 0; +out: + lbs_deb_leave_args(LBS_DEB_CS, "ret %d, len %d", ret, *len); + return ret; +} + + +static struct sk_buff *if_cs_receive_data(wlan_private *priv) +{ + struct sk_buff *skb = NULL; + u16 len; + u8 *data; + + lbs_deb_enter(LBS_DEB_CS); + + len = if_cs_read16(priv->card, IF_CS_C_READ_LEN); + if (len == 0 || len > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE) { + lbs_pr_err("card data buffer has invalid # of bytes (%d)\n", len); + priv->stats.rx_dropped++; + printk(KERN_INFO "##HS %s:%d TODO\n", __FUNCTION__, __LINE__); + goto dat_err; + } + + //TODO: skb = dev_alloc_skb(len+ETH_FRAME_LEN+MRVDRV_SNAP_HEADER_LEN+EXTRA_LEN); + skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE + 2); + if (!skb) + goto out; + skb_put(skb, len); + skb_reserve(skb, 2);/* 16 byte align */ + data = skb->data; + + /* read even number of bytes, then odd byte if necessary */ + if_cs_read16_rep(priv->card, IF_CS_H_READ, data, len/sizeof(u16)); + if (len & 1) + data[len-1] = if_cs_read8(priv->card, IF_CS_H_READ); + +dat_err: + if_cs_write16(priv->card, IF_CS_H_STATUS, IF_CS_H_STATUS_RX_OVER); + if_cs_write16(priv->card, IF_CS_H_INT_CAUSE, IF_CS_H_IC_RX_OVER); + +out: + lbs_deb_leave_args(LBS_DEB_CS, "ret %p", skb); + return skb; +} + + + +/********************************************************************/ +/* Firmware */ +/********************************************************************/ + +/* + * Tries to program the helper firmware. + * + * Return 0 on success + */ +static int if_cs_prog_helper(struct if_cs_card *card) +{ + int ret = 0; + int sent = 0; + u8 scratch; + const struct firmware *fw; + + lbs_deb_enter(LBS_DEB_CS); + + scratch = if_cs_read8(card, IF_CS_SCRATCH); + + /* "If the value is 0x5a, the firmware is already + * downloaded successfully" + */ + if (scratch == 0x5a) + goto done; + + /* "If the value is != 00, it is invalid value of register */ + if (scratch != 0x00) { + ret = -ENODEV; + goto done; + } + + /* TODO: make firmware file configurable */ + ret = request_firmware(&fw, "libertas_cs_helper.fw", + &handle_to_dev(card->p_dev)); + if (ret) { + lbs_pr_err("can't load helper firmware\n"); + ret = -ENODEV; + goto done; + } + lbs_deb_cs("helper size %td\n", fw->size); + + /* "Set the 5 bytes of the helper image to 0" */ + /* Not needed, this contains an ARM branch instruction */ + + for (;;) { + /* "the number of bytes to send is 256" */ + int count = 256; + int remain = fw->size - sent; + + if (remain < count) + count = remain; + /* printk(KERN_INFO "//HS %d loading %d of %d bytes\n", + __LINE__, sent, fw->size); */ + + /* "write the number of bytes to be sent to the I/O Command + * write length register" */ + if_cs_write16(card, IF_CS_H_CMD_LEN, count); + + /* "write this to I/O Command port register as 16 bit writes */ + if (count) + if_cs_write16_rep(card, IF_CS_H_CMD, + &fw->data[sent], + count >> 1); + + /* "Assert the download over interrupt command in the Host + * status register" */ + if_cs_write8(card, IF_CS_H_STATUS, IF_CS_H_STATUS_DNLD_OVER); + + /* "Assert the download over interrupt command in the Card + * interrupt case register" */ + if_cs_write16(card, IF_CS_H_INT_CAUSE, IF_CS_H_IC_DNLD_OVER); + + /* "The host polls the Card Status register ... for 50 ms before + declaring a failure */ + ret = if_cs_poll_while_fw_download(card, IF_CS_C_STATUS, + IF_CS_C_S_CMD_DNLD_RDY); + if (ret < 0) { + lbs_pr_err("can't download helper at 0x%x, ret %d\n", + sent, ret); + goto done; + } + + if (count == 0) + break; + + sent += count; + } + + release_firmware(fw); + ret = 0; + +done: + lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); + return ret; +} + + +static int if_cs_prog_real(struct if_cs_card *card) +{ + const struct firmware *fw; + int ret = 0; + int retry = 0; + int len = 0; + int sent; + + lbs_deb_enter(LBS_DEB_CS); + + /* TODO: make firmware file configurable */ + ret = request_firmware(&fw, "libertas_cs.fw", + &handle_to_dev(card->p_dev)); + if (ret) { + lbs_pr_err("can't load firmware\n"); + ret = -ENODEV; + goto done; + } + lbs_deb_cs("fw size %td\n", fw->size); + + ret = if_cs_poll_while_fw_download(card, IF_CS_C_SQ_READ_LOW, IF_CS_C_SQ_HELPER_OK); + if (ret < 0) { + int i; + lbs_pr_err("helper firmware doesn't answer\n"); + for (i = 0; i < 0x50; i += 2) + printk(KERN_INFO "## HS %02x: %04x\n", + i, if_cs_read16(card, i)); + goto err_release; + } + + for (sent = 0; sent < fw->size; sent += len) { + len = if_cs_read16(card, IF_CS_C_SQ_READ_LOW); + /* printk(KERN_INFO "//HS %d loading %d of %d bytes\n", + __LINE__, sent, fw->size); */ + if (len & 1) { + retry++; + lbs_pr_info("odd, need to retry this firmware block\n"); + } else { + retry = 0; + } + + if (retry > 20) { + lbs_pr_err("could not download firmware\n"); + ret = -ENODEV; + goto err_release; + } + if (retry) { + sent -= len; + } + + + if_cs_write16(card, IF_CS_H_CMD_LEN, len); + + if_cs_write16_rep(card, IF_CS_H_CMD, + &fw->data[sent], + (len+1) >> 1); + if_cs_write8(card, IF_CS_H_STATUS, IF_CS_H_STATUS_DNLD_OVER); + if_cs_write16(card, IF_CS_H_INT_CAUSE, IF_CS_H_IC_DNLD_OVER); + + ret = if_cs_poll_while_fw_download(card, IF_CS_C_STATUS, + IF_CS_C_S_CMD_DNLD_RDY); + if (ret < 0) { + lbs_pr_err("can't download firmware at 0x%x\n", sent); + goto err_release; + } + } + + ret = if_cs_poll_while_fw_download(card, IF_CS_SCRATCH, 0x5a); + if (ret < 0) { + lbs_pr_err("firmware download failed\n"); + goto err_release; + } + + ret = 0; + goto done; + + +err_release: + release_firmware(fw); + +done: + lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); + return ret; +} + + + +/********************************************************************/ +/* Callback functions for libertas.ko */ +/********************************************************************/ + +/* Send commands or data packets to the card */ +static int if_cs_host_to_card(wlan_private *priv, u8 type, u8 *buf, u16 nb) +{ + int ret = -1; + + lbs_deb_enter_args(LBS_DEB_CS, "type %d, bytes %d", type, nb); + + switch (type) { + case MVMS_DAT: + priv->dnld_sent = DNLD_DATA_SENT; + if_cs_send_data(priv, buf, nb); + ret = 0; + break; + case MVMS_CMD: + priv->dnld_sent = DNLD_CMD_SENT; + ret = if_cs_send_cmd(priv, buf, nb); + break; + default: + lbs_pr_err("%s: unsupported type %d\n", __FUNCTION__, type); + } + + lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); + return ret; +} + + +static int if_cs_get_int_status(wlan_private *priv, u8 *ireg) +{ + struct if_cs_card *card = (struct if_cs_card *)priv->card; + //wlan_adapter *adapter = priv->adapter; + int ret = 0; + u16 int_cause; + u8 *cmdbuf; + *ireg = 0; + + lbs_deb_enter(LBS_DEB_CS); + + if (priv->adapter->surpriseremoved) + goto out; + + int_cause = if_cs_read16(card, IF_CS_C_INT_CAUSE) & IF_CS_C_IC_MASK; + if_cs_write16(card, IF_CS_C_INT_CAUSE, int_cause); + + *ireg = if_cs_read16(card, IF_CS_C_STATUS) & IF_CS_C_S_MASK; + + if (!*ireg) + goto sbi_get_int_status_exit; + +sbi_get_int_status_exit: + + /* is there a data packet for us? */ + if (*ireg & IF_CS_C_S_RX_UPLD_RDY) { + struct sk_buff *skb = if_cs_receive_data(priv); + libertas_process_rxed_packet(priv, skb); + *ireg &= ~IF_CS_C_S_RX_UPLD_RDY; + } + + if (*ireg & IF_CS_C_S_TX_DNLD_RDY) { + priv->dnld_sent = DNLD_RES_RECEIVED; + } + + /* Card has a command result for us */ + if (*ireg & IF_CS_C_S_CMD_UPLD_RDY) { + spin_lock(&priv->adapter->driver_lock); + if (!priv->adapter->cur_cmd) { + cmdbuf = priv->upld_buf; + priv->adapter->hisregcpy &= ~IF_CS_C_S_RX_UPLD_RDY; + } else { + cmdbuf = priv->adapter->cur_cmd->bufvirtualaddr; + } + + ret = if_cs_receive_cmdres(priv, cmdbuf, &priv->upld_len); + spin_unlock(&priv->adapter->driver_lock); + if (ret < 0) + lbs_pr_err("could not receive cmd from card\n"); + } + +out: + lbs_deb_leave_args(LBS_DEB_CS, "ret %d, ireg 0x%x, hisregcpy 0x%x", ret, *ireg, priv->adapter->hisregcpy); + return ret; +} + + +static int if_cs_read_event_cause(wlan_private *priv) +{ + lbs_deb_enter(LBS_DEB_CS); + + priv->adapter->eventcause = (if_cs_read16(priv->card, IF_CS_C_STATUS) & IF_CS_C_S_STATUS_MASK) >> 5; + if_cs_write16(priv->card, IF_CS_H_INT_CAUSE, IF_CS_H_IC_HOST_EVENT); + + return 0; +} + + + +/********************************************************************/ +/* Card Services */ +/********************************************************************/ + +/* + * After a card is removed, if_cs_release() will unregister the + * device, and release the PCMCIA configuration. If the device is + * still open, this will be postponed until it is closed. + */ +static void if_cs_release(struct pcmcia_device *p_dev) +{ + struct if_cs_card *card = p_dev->priv; + + lbs_deb_enter(LBS_DEB_CS); + + pcmcia_disable_device(p_dev); + free_irq(p_dev->irq.AssignedIRQ, card); + if (card->iobase) + ioport_unmap(card->iobase); + + lbs_deb_leave(LBS_DEB_CS); +} + + +/* + * This creates an "instance" of the driver, allocating local data + * structures for one device. The device is registered with Card + * Services. + * + * The dev_link structure is initialized, but we don't actually + * configure the card at this point -- we wait until we receive a card + * insertion event. + */ +static int if_cs_probe(struct pcmcia_device *p_dev) +{ + int ret = -ENOMEM; + wlan_private *priv; + struct if_cs_card *card; + /* CIS parsing */ + tuple_t tuple; + cisparse_t parse; + cistpl_cftable_entry_t *cfg = &parse.cftable_entry; + cistpl_io_t *io = &cfg->io; + u_char buf[64]; + + lbs_deb_enter(LBS_DEB_CS); + + card = kzalloc(sizeof(struct if_cs_card), GFP_KERNEL); + if (!card) { + lbs_pr_err("error in kzalloc\n"); + goto out; + } + card->p_dev = p_dev; + p_dev->priv = card; + + p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; + p_dev->irq.Handler = NULL; + p_dev->irq.IRQInfo1 = IRQ_INFO2_VALID | IRQ_LEVEL_ID; + + p_dev->conf.Attributes = 0; + p_dev->conf.IntType = INT_MEMORY_AND_IO; + + tuple.Attributes = 0; + tuple.TupleData = buf; + tuple.TupleDataMax = sizeof(buf); + tuple.TupleOffset = 0; + + tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; + if ((ret = pcmcia_get_first_tuple(p_dev, &tuple)) != 0 || + (ret = pcmcia_get_tuple_data(p_dev, &tuple)) != 0 || + (ret = pcmcia_parse_tuple(p_dev, &tuple, &parse)) != 0) + { + lbs_pr_err("error in pcmcia_get_first_tuple etc\n"); + goto out1; + } + + p_dev->conf.ConfigIndex = cfg->index; + + /* Do we need to allocate an interrupt? */ + if (cfg->irq.IRQInfo1) { + p_dev->conf.Attributes |= CONF_ENABLE_IRQ; + } + + /* IO window settings */ + if (cfg->io.nwin != 1) { + lbs_pr_err("wrong CIS (check number of IO windows)\n"); + ret = -ENODEV; + goto out1; + } + p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; + p_dev->io.BasePort1 = io->win[0].base; + p_dev->io.NumPorts1 = io->win[0].len; + + /* This reserves IO space but doesn't actually enable it */ + ret = pcmcia_request_io(p_dev, &p_dev->io); + if (ret) { + lbs_pr_err("error in pcmcia_request_io\n"); + goto out1; + } + + /* + * Allocate an interrupt line. Note that this does not assign + * a handler to the interrupt, unless the 'Handler' member of + * the irq structure is initialized. + */ + if (p_dev->conf.Attributes & CONF_ENABLE_IRQ) { + ret = pcmcia_request_irq(p_dev, &p_dev->irq); + if (ret) { + lbs_pr_err("error in pcmcia_request_irq\n"); + goto out1; + } + } + + /* Initialize io access */ + card->iobase = ioport_map(p_dev->io.BasePort1, p_dev->io.NumPorts1); + if (!card->iobase) { + lbs_pr_err("error in ioport_map\n"); + ret = -EIO; + goto out1; + } + + /* + * This actually configures the PCMCIA socket -- setting up + * the I/O windows and the interrupt mapping, and putting the + * card and host interface into "Memory and IO" mode. + */ + ret = pcmcia_request_configuration(p_dev, &p_dev->conf); + if (ret) { + lbs_pr_err("error in pcmcia_request_configuration\n"); + goto out2; + } + + /* Finally, report what we've done */ + lbs_deb_cs("irq %d, io 0x%04x-0x%04x\n", + p_dev->irq.AssignedIRQ, p_dev->io.BasePort1, + p_dev->io.BasePort1 + p_dev->io.NumPorts1 - 1); + + + /* Load the firmware early, before calling into libertas.ko */ + ret = if_cs_prog_helper(card); + if (ret == 0) + ret = if_cs_prog_real(card); + if (ret) + goto out2; + + /* Make this card known to the libertas driver */ + priv = libertas_add_card(card, &p_dev->dev); + if (!priv) { + ret = -ENOMEM; + goto out2; + } + + /* Store pointers to our call-back functions */ + card->priv = priv; + priv->card = card; + priv->hw_host_to_card = if_cs_host_to_card; + priv->hw_get_int_status = if_cs_get_int_status; + priv->hw_read_event_cause = if_cs_read_event_cause; + + priv->adapter->fw_ready = 1; + + /* Now actually get the IRQ */ + ret = request_irq(p_dev->irq.AssignedIRQ, if_cs_interrupt, + IRQF_SHARED, DRV_NAME, card); + if (ret) { + lbs_pr_err("error in request_irq\n"); + goto out3; + } + + if_cs_enable_ints(card); + + /* And finally bring the card up */ + if (libertas_start_card(priv) != 0) { + lbs_pr_err("could not activate card\n"); + goto out3; + } + + ret = 0; + goto out; + +out3: + libertas_remove_card(priv); +out2: + ioport_unmap(card->iobase); +out1: + pcmcia_disable_device(p_dev); +out: + lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); + return ret; +} + + +/* + * This deletes a driver "instance". The device is de-registered with + * Card Services. If it has been released, all local data structures + * are freed. Otherwise, the structures will be freed when the device + * is released. + */ +static void if_cs_detach(struct pcmcia_device *p_dev) +{ + struct if_cs_card *card = p_dev->priv; + + lbs_deb_enter(LBS_DEB_CS); + + libertas_stop_card(card->priv); + libertas_remove_card(card->priv); + if_cs_disable_ints(card); + if_cs_release(p_dev); + kfree(card); + + lbs_deb_leave(LBS_DEB_CS); +} + + + +/********************************************************************/ +/* Module initialization */ +/********************************************************************/ + +static struct pcmcia_device_id if_cs_ids[] = { + PCMCIA_DEVICE_MANF_CARD(0x02df, 0x8103), + PCMCIA_DEVICE_NULL, +}; +MODULE_DEVICE_TABLE(pcmcia, if_cs_ids); + + +static struct pcmcia_driver libertas_driver = { + .owner = THIS_MODULE, + .drv = { + .name = DRV_NAME, + }, + .probe = if_cs_probe, + .remove = if_cs_detach, + .id_table = if_cs_ids, +}; + + +static int __init if_cs_init(void) +{ + int ret; + + lbs_deb_enter(LBS_DEB_CS); + ret = pcmcia_register_driver(&libertas_driver); + lbs_deb_leave(LBS_DEB_CS); + return ret; +} + + +static void __exit if_cs_exit(void) +{ + lbs_deb_enter(LBS_DEB_CS); + pcmcia_unregister_driver(&libertas_driver); + lbs_deb_leave(LBS_DEB_CS); +} + + +module_init(if_cs_init); +module_exit(if_cs_exit); diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c index 998317571ec2..cb59f46ed126 100644 --- a/drivers/net/wireless/libertas/if_usb.c +++ b/drivers/net/wireless/libertas/if_usb.c @@ -21,7 +21,7 @@ static const char usbdriver_name[] = "usb8xxx"; static u8 *default_fw_name = "usb8388.bin"; -char *libertas_fw_name = NULL; +static char *libertas_fw_name = NULL; module_param_named(fw_name, libertas_fw_name, charp, 0644); /* @@ -44,13 +44,14 @@ MODULE_DEVICE_TABLE(usb, if_usb_table); static void if_usb_receive(struct urb *urb); static void if_usb_receive_fwload(struct urb *urb); -static int if_usb_reset_device(wlan_private *priv); -static int if_usb_register_dev(wlan_private * priv); -static int if_usb_unregister_dev(wlan_private *); -static int if_usb_prog_firmware(wlan_private *); +static int if_usb_prog_firmware(struct usb_card_rec *cardp); static int if_usb_host_to_card(wlan_private * priv, u8 type, u8 * payload, u16 nb); static int if_usb_get_int_status(wlan_private * priv, u8 *); static int if_usb_read_event_cause(wlan_private *); +static int usb_tx_block(struct usb_card_rec *cardp, u8 *payload, u16 nb); +static void if_usb_free(struct usb_card_rec *cardp); +static int if_usb_submit_rx_urb(struct usb_card_rec *cardp); +static int if_usb_reset_device(struct usb_card_rec *cardp); /** * @brief call back function to handle the status of the URB @@ -59,29 +60,40 @@ static int if_usb_read_event_cause(wlan_private *); */ static void if_usb_write_bulk_callback(struct urb *urb) { - wlan_private *priv = (wlan_private *) (urb->context); - wlan_adapter *adapter = priv->adapter; - struct net_device *dev = priv->dev; + struct usb_card_rec *cardp = (struct usb_card_rec *) urb->context; /* handle the transmission complete validations */ - if (urb->status != 0) { - /* print the failure status number for debug */ - lbs_pr_info("URB in failure status: %d\n", urb->status); - } else { + if (urb->status == 0) { + wlan_private *priv = cardp->priv; + /* lbs_deb_usbd(&urb->dev->dev, "URB status is successfull\n"); lbs_deb_usbd(&urb->dev->dev, "Actual length transmitted %d\n", urb->actual_length); */ - priv->dnld_sent = DNLD_RES_RECEIVED; - /* Wake main thread if commands are pending */ - if (!adapter->cur_cmd) - wake_up_interruptible(&priv->mainthread.waitq); - if ((adapter->connect_status == libertas_connected)) { - netif_wake_queue(dev); - netif_wake_queue(priv->mesh_dev); + + /* Used for both firmware TX and regular TX. priv isn't + * valid at firmware load time. + */ + if (priv) { + wlan_adapter *adapter = priv->adapter; + struct net_device *dev = priv->dev; + + priv->dnld_sent = DNLD_RES_RECEIVED; + + /* Wake main thread if commands are pending */ + if (!adapter->cur_cmd) + wake_up_interruptible(&priv->waitq); + + if ((adapter->connect_status == LIBERTAS_CONNECTED)) { + netif_wake_queue(dev); + netif_wake_queue(priv->mesh_dev); + } } + } else { + /* print the failure status number for debug */ + lbs_pr_info("URB in failure status: %d\n", urb->status); } return; @@ -92,7 +104,7 @@ static void if_usb_write_bulk_callback(struct urb *urb) * @param cardp pointer usb_card_rec * @return N/A */ -void if_usb_free(struct usb_card_rec *cardp) +static void if_usb_free(struct usb_card_rec *cardp) { lbs_deb_enter(LBS_DEB_USB); @@ -203,21 +215,35 @@ static int if_usb_probe(struct usb_interface *intf, } } + /* Upload firmware */ + cardp->rinfo.cardp = cardp; + if (if_usb_prog_firmware(cardp)) { + lbs_deb_usbd(&udev->dev, "FW upload failed"); + goto err_prog_firmware; + } + if (!(priv = libertas_add_card(cardp, &udev->dev))) - goto dealloc; + goto err_prog_firmware; + + cardp->priv = priv; if (libertas_add_mesh(priv, &udev->dev)) goto err_add_mesh; - priv->hw_register_dev = if_usb_register_dev; - priv->hw_unregister_dev = if_usb_unregister_dev; - priv->hw_prog_firmware = if_usb_prog_firmware; + cardp->eth_dev = priv->dev; + priv->hw_host_to_card = if_usb_host_to_card; priv->hw_get_int_status = if_usb_get_int_status; priv->hw_read_event_cause = if_usb_read_event_cause; + priv->boot2_version = udev->descriptor.bcdDevice; - if (libertas_activate_card(priv, libertas_fw_name)) - goto err_activate_card; + /* Delay 200 ms to waiting for the FW ready */ + if_usb_submit_rx_urb(cardp); + msleep_interruptible(200); + priv->adapter->fw_ready = 1; + + if (libertas_start_card(priv)) + goto err_start_card; list_add_tail(&cardp->list, &usb_devices); @@ -226,11 +252,12 @@ static int if_usb_probe(struct usb_interface *intf, return 0; -err_activate_card: +err_start_card: libertas_remove_mesh(priv); err_add_mesh: - free_netdev(priv->dev); - kfree(priv->adapter); + libertas_remove_card(priv); +err_prog_firmware: + if_usb_reset_device(cardp); dealloc: if_usb_free(cardp); @@ -247,21 +274,22 @@ static void if_usb_disconnect(struct usb_interface *intf) { struct usb_card_rec *cardp = usb_get_intfdata(intf); wlan_private *priv = (wlan_private *) cardp->priv; - wlan_adapter *adapter = NULL; - adapter = priv->adapter; + lbs_deb_enter(LBS_DEB_MAIN); - /* - * Update Surprise removed to TRUE - */ - adapter->surpriseremoved = 1; + /* Update Surprise removed to TRUE */ + cardp->surprise_removed = 1; list_del(&cardp->list); - /* card is removed and we can call wlan_remove_card */ - lbs_deb_usbd(&cardp->udev->dev, "call remove card\n"); - libertas_remove_mesh(priv); - libertas_remove_card(priv); + if (priv) { + wlan_adapter *adapter = priv->adapter; + + adapter->surpriseremoved = 1; + libertas_stop_card(priv); + libertas_remove_mesh(priv); + libertas_remove_card(priv); + } /* Unlink and free urb */ if_usb_free(cardp); @@ -269,7 +297,7 @@ static void if_usb_disconnect(struct usb_interface *intf) usb_set_intfdata(intf, NULL); usb_put_dev(interface_to_usbdev(intf)); - return; + lbs_deb_leave(LBS_DEB_MAIN); } /** @@ -277,12 +305,11 @@ static void if_usb_disconnect(struct usb_interface *intf) * @param priv pointer to wlan_private * @return 0 */ -static int if_prog_firmware(wlan_private * priv) +static int if_prog_firmware(struct usb_card_rec *cardp) { - struct usb_card_rec *cardp = priv->card; struct FWData *fwdata; struct fwheader *fwheader; - u8 *firmware = priv->firmware->data; + u8 *firmware = cardp->fw->data; fwdata = kmalloc(sizeof(struct FWData), GFP_ATOMIC); @@ -330,7 +357,7 @@ static int if_prog_firmware(wlan_private * priv) cardp->totalbytes); */ memcpy(cardp->bulk_out_buffer, fwheader, FW_DATA_XMIT_SIZE); - usb_tx_block(priv, cardp->bulk_out_buffer, FW_DATA_XMIT_SIZE); + usb_tx_block(cardp, cardp->bulk_out_buffer, FW_DATA_XMIT_SIZE); } else if (fwdata->fwheader.dnldcmd == cpu_to_le32(FW_HAS_LAST_BLOCK)) { /* @@ -340,7 +367,7 @@ static int if_prog_firmware(wlan_private * priv) "Donwloading FW JUMP BLOCK\n"); */ memcpy(cardp->bulk_out_buffer, fwheader, FW_DATA_XMIT_SIZE); - usb_tx_block(priv, cardp->bulk_out_buffer, FW_DATA_XMIT_SIZE); + usb_tx_block(cardp, cardp->bulk_out_buffer, FW_DATA_XMIT_SIZE); cardp->fwfinalblk = 1; } @@ -355,17 +382,20 @@ static int if_prog_firmware(wlan_private * priv) return 0; } -static int libertas_do_reset(wlan_private *priv) +static int if_usb_reset_device(struct usb_card_rec *cardp) { int ret; - struct usb_card_rec *cardp = priv->card; + wlan_private * priv = cardp->priv; lbs_deb_enter(LBS_DEB_USB); + /* Try a USB port reset first, if that fails send the reset + * command to the firmware. + */ ret = usb_reset_device(cardp->udev); - if (!ret) { + if (!ret && priv) { msleep(10); - if_usb_reset_device(priv); + ret = libertas_reset_device(priv); msleep(10); } @@ -381,14 +411,12 @@ static int libertas_do_reset(wlan_private *priv) * @param nb data length * @return 0 or -1 */ -int usb_tx_block(wlan_private * priv, u8 * payload, u16 nb) +static int usb_tx_block(struct usb_card_rec *cardp, u8 * payload, u16 nb) { - /* pointer to card structure */ - struct usb_card_rec *cardp = priv->card; int ret = -1; /* check if device is removed */ - if (priv->adapter->surpriseremoved) { + if (cardp->surprise_removed) { lbs_deb_usbd(&cardp->udev->dev, "Device removed\n"); goto tx_ret; } @@ -396,7 +424,7 @@ int usb_tx_block(wlan_private * priv, u8 * payload, u16 nb) usb_fill_bulk_urb(cardp->tx_urb, cardp->udev, usb_sndbulkpipe(cardp->udev, cardp->bulk_out_endpointAddr), - payload, nb, if_usb_write_bulk_callback, priv); + payload, nb, if_usb_write_bulk_callback, cardp); cardp->tx_urb->transfer_flags |= URB_ZERO_PACKET; @@ -413,11 +441,9 @@ tx_ret: return ret; } -static int __if_usb_submit_rx_urb(wlan_private * priv, - void (*callbackfn) - (struct urb *urb)) +static int __if_usb_submit_rx_urb(struct usb_card_rec *cardp, + void (*callbackfn)(struct urb *urb)) { - struct usb_card_rec *cardp = priv->card; struct sk_buff *skb; struct read_cb_info *rinfo = &cardp->rinfo; int ret = -1; @@ -453,22 +479,21 @@ rx_ret: return ret; } -static inline int if_usb_submit_rx_urb_fwload(wlan_private * priv) +static int if_usb_submit_rx_urb_fwload(struct usb_card_rec *cardp) { - return __if_usb_submit_rx_urb(priv, &if_usb_receive_fwload); + return __if_usb_submit_rx_urb(cardp, &if_usb_receive_fwload); } -static inline int if_usb_submit_rx_urb(wlan_private * priv) +static int if_usb_submit_rx_urb(struct usb_card_rec *cardp) { - return __if_usb_submit_rx_urb(priv, &if_usb_receive); + return __if_usb_submit_rx_urb(cardp, &if_usb_receive); } static void if_usb_receive_fwload(struct urb *urb) { struct read_cb_info *rinfo = (struct read_cb_info *)urb->context; - wlan_private *priv = rinfo->priv; struct sk_buff *skb = rinfo->skb; - struct usb_card_rec *cardp = (struct usb_card_rec *)priv->card; + struct usb_card_rec *cardp = (struct usb_card_rec *)rinfo->cardp; struct fwsyncheader *syncfwheader; struct bootcmdrespStr bootcmdresp; @@ -484,7 +509,7 @@ static void if_usb_receive_fwload(struct urb *urb) sizeof(bootcmdresp)); if (le16_to_cpu(cardp->udev->descriptor.bcdDevice) < 0x3106) { kfree_skb(skb); - if_usb_submit_rx_urb_fwload(priv); + if_usb_submit_rx_urb_fwload(cardp); cardp->bootcmdresp = 1; lbs_deb_usbd(&cardp->udev->dev, "Received valid boot command response\n"); @@ -508,7 +533,7 @@ static void if_usb_receive_fwload(struct urb *urb) "Received valid boot command response\n"); } kfree_skb(skb); - if_usb_submit_rx_urb_fwload(priv); + if_usb_submit_rx_urb_fwload(cardp); return; } @@ -544,9 +569,9 @@ static void if_usb_receive_fwload(struct urb *urb) goto exit; } - if_prog_firmware(priv); + if_prog_firmware(cardp); - if_usb_submit_rx_urb_fwload(priv); + if_usb_submit_rx_urb_fwload(cardp); exit: kfree(syncfwheader); @@ -596,11 +621,11 @@ static inline void process_cmdrequest(int recvlength, u8 *recvbuff, * data to clear the interrupt */ if (!priv->adapter->cur_cmd) { cmdbuf = priv->upld_buf; - priv->adapter->hisregcpy &= ~his_cmdupldrdy; + priv->adapter->hisregcpy &= ~MRVDRV_CMD_UPLD_RDY; } else cmdbuf = priv->adapter->cur_cmd->bufvirtualaddr; - cardp->usb_int_cause |= his_cmdupldrdy; + cardp->usb_int_cause |= MRVDRV_CMD_UPLD_RDY; priv->upld_len = (recvlength - MESSAGE_HEADER_LEN); memcpy(cmdbuf, recvbuff + MESSAGE_HEADER_LEN, priv->upld_len); @@ -625,17 +650,19 @@ static inline void process_cmdrequest(int recvlength, u8 *recvbuff, static void if_usb_receive(struct urb *urb) { struct read_cb_info *rinfo = (struct read_cb_info *)urb->context; - wlan_private *priv = rinfo->priv; struct sk_buff *skb = rinfo->skb; - struct usb_card_rec *cardp = (struct usb_card_rec *)priv->card; + struct usb_card_rec *cardp = (struct usb_card_rec *) rinfo->cardp; + wlan_private * priv = cardp->priv; int recvlength = urb->actual_length; u8 *recvbuff = NULL; - u32 recvtype; + u32 recvtype = 0; lbs_deb_enter(LBS_DEB_USB); if (recvlength) { + __le32 tmp; + if (urb->status) { lbs_deb_usbd(&cardp->udev->dev, "URB status is failed\n"); @@ -644,18 +671,14 @@ static void if_usb_receive(struct urb *urb) } recvbuff = skb->data + IPFIELD_ALIGN_OFFSET; - memcpy(&recvtype, recvbuff, sizeof(u32)); - lbs_deb_usbd(&cardp->udev->dev, - "Recv length = 0x%x\n", recvlength); - lbs_deb_usbd(&cardp->udev->dev, - "Receive type = 0x%X\n", recvtype); - recvtype = le32_to_cpu(recvtype); + memcpy(&tmp, recvbuff, sizeof(u32)); + recvtype = le32_to_cpu(tmp); lbs_deb_usbd(&cardp->udev->dev, - "Receive type after = 0x%X\n", recvtype); + "Recv length = 0x%x, Recv type = 0x%X\n", + recvlength, recvtype); } else if (urb->status) goto rx_exit; - switch (recvtype) { case CMD_TYPE_DATA: process_cmdtypedata(recvlength, skb, cardp, priv); @@ -677,18 +700,20 @@ static void if_usb_receive(struct urb *urb) break; } cardp->usb_event_cause <<= 3; - cardp->usb_int_cause |= his_cardevent; + cardp->usb_int_cause |= MRVDRV_CARDEVENT; kfree_skb(skb); libertas_interrupt(priv->dev); spin_unlock(&priv->adapter->driver_lock); goto rx_exit; default: + lbs_deb_usbd(&cardp->udev->dev, "Unknown command type 0x%X\n", + recvtype); kfree_skb(skb); break; } setup_for_next: - if_usb_submit_rx_urb(priv); + if_usb_submit_rx_urb(cardp); rx_exit: lbs_deb_leave(LBS_DEB_USB); } @@ -703,21 +728,19 @@ rx_exit: */ static int if_usb_host_to_card(wlan_private * priv, u8 type, u8 * payload, u16 nb) { - int ret = -1; - u32 tmp; struct usb_card_rec *cardp = (struct usb_card_rec *)priv->card; lbs_deb_usbd(&cardp->udev->dev,"*** type = %u\n", type); lbs_deb_usbd(&cardp->udev->dev,"size after = %d\n", nb); if (type == MVMS_CMD) { - tmp = cpu_to_le32(CMD_TYPE_REQUEST); + __le32 tmp = cpu_to_le32(CMD_TYPE_REQUEST); priv->dnld_sent = DNLD_CMD_SENT; memcpy(cardp->bulk_out_buffer, (u8 *) & tmp, MESSAGE_HEADER_LEN); } else { - tmp = cpu_to_le32(CMD_TYPE_DATA); + __le32 tmp = cpu_to_le32(CMD_TYPE_DATA); priv->dnld_sent = DNLD_DATA_SENT; memcpy(cardp->bulk_out_buffer, (u8 *) & tmp, MESSAGE_HEADER_LEN); @@ -725,10 +748,8 @@ static int if_usb_host_to_card(wlan_private * priv, u8 type, u8 * payload, u16 n memcpy((cardp->bulk_out_buffer + MESSAGE_HEADER_LEN), payload, nb); - ret = - usb_tx_block(priv, cardp->bulk_out_buffer, nb + MESSAGE_HEADER_LEN); - - return ret; + return usb_tx_block(cardp, cardp->bulk_out_buffer, + nb + MESSAGE_HEADER_LEN); } /* called with adapter->driver_lock held */ @@ -747,80 +768,109 @@ static int if_usb_get_int_status(wlan_private * priv, u8 * ireg) static int if_usb_read_event_cause(wlan_private * priv) { struct usb_card_rec *cardp = priv->card; + priv->adapter->eventcause = cardp->usb_event_cause; /* Re-submit rx urb here to avoid event lost issue */ - if_usb_submit_rx_urb(priv); + if_usb_submit_rx_urb(cardp); return 0; } -static int if_usb_reset_device(wlan_private *priv) +/** + * @brief This function issues Boot command to the Boot2 code + * @param ivalue 1:Boot from FW by USB-Download + * 2:Boot from FW in EEPROM + * @return 0 + */ +static int if_usb_issue_boot_command(struct usb_card_rec *cardp, int ivalue) { - int ret; - - lbs_deb_enter(LBS_DEB_USB); - ret = libertas_prepare_and_send_command(priv, cmd_802_11_reset, - cmd_act_halt, 0, 0, NULL); - msleep_interruptible(10); - - lbs_deb_leave_args(LBS_DEB_USB, "ret %d", ret); - return ret; -} + struct bootcmdstr sbootcmd; + int i; -static int if_usb_unregister_dev(wlan_private * priv) -{ - int ret = 0; + /* Prepare command */ + sbootcmd.u32magicnumber = cpu_to_le32(BOOT_CMD_MAGIC_NUMBER); + sbootcmd.u8cmd_tag = ivalue; + for (i=0; i<11; i++) + sbootcmd.au8dumy[i]=0x00; + memcpy(cardp->bulk_out_buffer, &sbootcmd, sizeof(struct bootcmdstr)); - /* Need to send a Reset command to device before USB resources freed - * and wlan_remove_card() called, then device can handle FW download - * again. - */ - if (priv) - if_usb_reset_device(priv); + /* Issue command */ + usb_tx_block(cardp, cardp->bulk_out_buffer, sizeof(struct bootcmdstr)); - return ret; + return 0; } /** - * @brief This function register usb device and initialize parameter - * @param priv pointer to wlan_private - * @return 0 or -1 + * @brief This function checks the validity of Boot2/FW image. + * + * @param data pointer to image + * len image length + * @return 0 or -1 */ -static int if_usb_register_dev(wlan_private * priv) +static int check_fwfile_format(u8 *data, u32 totlen) { - struct usb_card_rec *cardp = (struct usb_card_rec *)priv->card; + u32 bincmd, exit; + u32 blksize, offset, len; + int ret; - lbs_deb_enter(LBS_DEB_USB); + ret = 1; + exit = len = 0; - cardp->priv = priv; - cardp->eth_dev = priv->dev; - priv->hotplug_device = &(cardp->udev->dev); + do { + struct fwheader *fwh = (void *)data; + + bincmd = le32_to_cpu(fwh->dnldcmd); + blksize = le32_to_cpu(fwh->datalength); + switch (bincmd) { + case FW_HAS_DATA_TO_RECV: + offset = sizeof(struct fwheader) + blksize; + data += offset; + len += offset; + if (len >= totlen) + exit = 1; + break; + case FW_HAS_LAST_BLOCK: + exit = 1; + ret = 0; + break; + default: + exit = 1; + break; + } + } while (!exit); - lbs_deb_usbd(&cardp->udev->dev, "udev pointer is at %p\n", - cardp->udev); + if (ret) + lbs_pr_err("firmware file format check FAIL\n"); + else + lbs_deb_fw("firmware file format check PASS\n"); - lbs_deb_leave(LBS_DEB_USB); - return 0; + return ret; } - -static int if_usb_prog_firmware(wlan_private * priv) +static int if_usb_prog_firmware(struct usb_card_rec *cardp) { - struct usb_card_rec *cardp = priv->card; int i = 0; static int reset_count = 10; int ret = 0; lbs_deb_enter(LBS_DEB_USB); - cardp->rinfo.priv = priv; + if ((ret = request_firmware(&cardp->fw, libertas_fw_name, + &cardp->udev->dev)) < 0) { + lbs_pr_err("request_firmware() failed with %#x\n", ret); + lbs_pr_err("firmware %s not found\n", libertas_fw_name); + goto done; + } + + if (check_fwfile_format(cardp->fw->data, cardp->fw->size)) + goto release_fw; restart: - if (if_usb_submit_rx_urb_fwload(priv) < 0) { + if (if_usb_submit_rx_urb_fwload(cardp) < 0) { lbs_deb_usbd(&cardp->udev->dev, "URB submission is failed\n"); ret = -1; - goto done; + goto release_fw; } cardp->bootcmdresp = 0; @@ -828,7 +878,7 @@ restart: int j = 0; i++; /* Issue Boot command = 1, Boot from Download-FW */ - if_usb_issue_boot_command(priv, BOOT_CMD_FW_BY_USB); + if_usb_issue_boot_command(cardp, BOOT_CMD_FW_BY_USB); /* wait for command response */ do { j++; @@ -838,14 +888,13 @@ restart: if (cardp->bootcmdresp == 0) { if (--reset_count >= 0) { - libertas_do_reset(priv); + if_usb_reset_device(cardp); goto restart; } return -1; } i = 0; - priv->adapter->fw_ready = 0; cardp->totalbytes = 0; cardp->fwlastblksent = 0; @@ -855,40 +904,38 @@ restart: cardp->totalbytes = 0; cardp->fwfinalblk = 0; - if_prog_firmware(priv); + if_prog_firmware(cardp); do { lbs_deb_usbd(&cardp->udev->dev,"Wlan sched timeout\n"); i++; msleep_interruptible(100); - if (priv->adapter->surpriseremoved || i >= 20) + if (cardp->surprise_removed || i >= 20) break; } while (!cardp->fwdnldover); if (!cardp->fwdnldover) { lbs_pr_info("failed to load fw, resetting device!\n"); if (--reset_count >= 0) { - libertas_do_reset(priv); + if_usb_reset_device(cardp); goto restart; } lbs_pr_info("FW download failure, time = %d ms\n", i * 100); ret = -1; - goto done; + goto release_fw; } - if_usb_submit_rx_urb(priv); - - /* Delay 200 ms to waiting for the FW ready */ - msleep_interruptible(200); - - priv->adapter->fw_ready = 1; +release_fw: + release_firmware(cardp->fw); + cardp->fw = NULL; done: lbs_deb_leave_args(LBS_DEB_USB, "ret %d", ret); return ret; } + #ifdef CONFIG_PM static int if_usb_suspend(struct usb_interface *intf, pm_message_t message) { @@ -900,6 +947,19 @@ static int if_usb_suspend(struct usb_interface *intf, pm_message_t message) if (priv->adapter->psstate != PS_STATE_FULL_POWER) return -1; + if (priv->mesh_dev && !priv->mesh_autostart_enabled) { + /* Mesh autostart must be activated while sleeping + * On resume it will go back to the current state + */ + struct cmd_ds_mesh_access mesh_access; + memset(&mesh_access, 0, sizeof(mesh_access)); + mesh_access.data[0] = cpu_to_le32(1); + libertas_prepare_and_send_command(priv, + CMD_MESH_ACCESS, + CMD_ACT_MESH_SET_AUTOSTART_ENABLED, + CMD_OPTION_WAITFORRSP, 0, (void *)&mesh_access); + } + netif_device_detach(cardp->eth_dev); netif_device_detach(priv->mesh_dev); @@ -927,6 +987,19 @@ static int if_usb_resume(struct usb_interface *intf) netif_device_attach(cardp->eth_dev); netif_device_attach(priv->mesh_dev); + if (priv->mesh_dev && !priv->mesh_autostart_enabled) { + /* Mesh autostart was activated while sleeping + * Disable it if appropriate + */ + struct cmd_ds_mesh_access mesh_access; + memset(&mesh_access, 0, sizeof(mesh_access)); + mesh_access.data[0] = cpu_to_le32(0); + libertas_prepare_and_send_command(priv, + CMD_MESH_ACCESS, + CMD_ACT_MESH_SET_AUTOSTART_ENABLED, + CMD_OPTION_WAITFORRSP, 0, (void *)&mesh_access); + } + lbs_deb_leave(LBS_DEB_USB); return 0; } @@ -970,8 +1043,10 @@ static void if_usb_exit_module(void) lbs_deb_enter(LBS_DEB_MAIN); - list_for_each_entry_safe(cardp, cardp_temp, &usb_devices, list) - if_usb_reset_device((wlan_private *) cardp->priv); + list_for_each_entry_safe(cardp, cardp_temp, &usb_devices, list) { + libertas_prepare_and_send_command(cardp->priv, CMD_802_11_RESET, + CMD_ACT_HALT, 0, 0, NULL); + } /* API unregisters the driver from USB subsystem */ usb_deregister(&if_usb_driver); diff --git a/drivers/net/wireless/libertas/if_usb.h b/drivers/net/wireless/libertas/if_usb.h index 156bb485e1a6..e07a10ed28b5 100644 --- a/drivers/net/wireless/libertas/if_usb.h +++ b/drivers/net/wireless/libertas/if_usb.h @@ -38,7 +38,7 @@ struct bootcmdrespStr /* read callback private data */ struct read_cb_info { - wlan_private *priv; + struct usb_card_rec *cardp; struct sk_buff *skb; }; @@ -58,6 +58,7 @@ struct usb_card_rec { int bulk_out_size; u8 bulk_out_endpointAddr; + const struct firmware *fw; u8 CRC_OK; u32 fwseqnum; u32 lastseqnum; @@ -65,6 +66,7 @@ struct usb_card_rec { u32 fwlastblksent; u8 fwdnldover; u8 fwfinalblk; + u8 surprise_removed; u32 usb_event_cause; u8 usb_int_cause; @@ -102,8 +104,4 @@ struct fwsyncheader { #define FW_DATA_XMIT_SIZE \ sizeof(struct fwheader) + le32_to_cpu(fwdata->fwheader.datalength) + sizeof(u32) -int usb_tx_block(wlan_private *priv, u8 *payload, u16 nb); -void if_usb_free(struct usb_card_rec *cardp); -int if_usb_issue_boot_command(wlan_private *priv, int ivalue); - #endif diff --git a/drivers/net/wireless/libertas/join.c b/drivers/net/wireless/libertas/join.c index 78ac3064a0bd..dc24a05c9447 100644 --- a/drivers/net/wireless/libertas/join.c +++ b/drivers/net/wireless/libertas/join.c @@ -17,10 +17,13 @@ #include "dev.h" #include "assoc.h" -#define AD_HOC_CAP_PRIVACY_ON 1 +/* The firmware needs certain bits masked out of the beacon-derviced capability + * field when associating/joining to BSSs. + */ +#define CAPINFO_MASK (~(0xda00)) /** - * @brief This function finds out the common rates between rate1 and rate2. + * @brief This function finds common rates between rate1 and card rates. * * It will fill common rates in rate1 as output if found. * @@ -29,75 +32,87 @@ * * @param adapter A pointer to wlan_adapter structure * @param rate1 the buffer which keeps input and output - * @param rate1_size the size of rate1 buffer - * @param rate2 the buffer which keeps rate2 - * @param rate2_size the size of rate2 buffer. + * @param rate1_size the size of rate1 buffer; new size of buffer on return * * @return 0 or -1 */ -static int get_common_rates(wlan_adapter * adapter, u8 * rate1, - int rate1_size, u8 * rate2, int rate2_size) +static int get_common_rates(wlan_adapter * adapter, u8 * rates, u16 *rates_size) { - u8 *ptr = rate1; - int ret = 0; + u8 *card_rates = libertas_bg_rates; + size_t num_card_rates = sizeof(libertas_bg_rates); + int ret = 0, i, j; u8 tmp[30]; - int i; + size_t tmp_size = 0; - memset(&tmp, 0, sizeof(tmp)); - memcpy(&tmp, rate1, min_t(size_t, rate1_size, sizeof(tmp))); - memset(rate1, 0, rate1_size); - - /* Mask the top bit of the original values */ - for (i = 0; tmp[i] && i < sizeof(tmp); i++) - tmp[i] &= 0x7F; - - for (i = 0; rate2[i] && i < rate2_size; i++) { - /* Check for Card Rate in tmp, excluding the top bit */ - if (strchr(tmp, rate2[i] & 0x7F)) { - /* values match, so copy the Card Rate to rate1 */ - *rate1++ = rate2[i]; + /* For each rate in card_rates that exists in rate1, copy to tmp */ + for (i = 0; card_rates[i] && (i < num_card_rates); i++) { + for (j = 0; rates[j] && (j < *rates_size); j++) { + if (rates[j] == card_rates[i]) + tmp[tmp_size++] = card_rates[i]; } } - lbs_dbg_hex("rate1 (AP) rates:", tmp, sizeof(tmp)); - lbs_dbg_hex("rate2 (Card) rates:", rate2, rate2_size); - lbs_dbg_hex("Common rates:", ptr, rate1_size); - lbs_deb_join("Tx datarate is set to 0x%X\n", adapter->datarate); + lbs_deb_hex(LBS_DEB_JOIN, "AP rates ", rates, *rates_size); + lbs_deb_hex(LBS_DEB_JOIN, "card rates ", card_rates, num_card_rates); + lbs_deb_hex(LBS_DEB_JOIN, "common rates", tmp, tmp_size); + lbs_deb_join("Tx datarate is currently 0x%X\n", adapter->cur_rate); - if (!adapter->is_datarate_auto) { - while (*ptr) { - if ((*ptr & 0x7f) == adapter->datarate) { - ret = 0; + if (!adapter->auto_rate) { + for (i = 0; i < tmp_size; i++) { + if (tmp[i] == adapter->cur_rate) goto done; - } - ptr++; } - lbs_pr_alert( "Previously set fixed data rate %#x isn't " - "compatible with the network.\n", adapter->datarate); - + lbs_pr_alert("Previously set fixed data rate %#x isn't " + "compatible with the network.\n", adapter->cur_rate); ret = -1; goto done; } - ret = 0; + done: + memset(rates, 0, *rates_size); + *rates_size = min_t(int, tmp_size, *rates_size); + memcpy(rates, tmp, *rates_size); return ret; } -int libertas_send_deauth(wlan_private * priv) + +/** + * @brief Sets the MSB on basic rates as the firmware requires + * + * Scan through an array and set the MSB for basic data rates. + * + * @param rates buffer of data rates + * @param len size of buffer + */ +static void libertas_set_basic_rate_flags(u8 * rates, size_t len) { - wlan_adapter *adapter = priv->adapter; - int ret = 0; + int i; - if (adapter->mode == IW_MODE_INFRA && - adapter->connect_status == libertas_connected) - ret = libertas_send_deauthentication(priv); - else - ret = -ENOTSUPP; + for (i = 0; i < len; i++) { + if (rates[i] == 0x02 || rates[i] == 0x04 || + rates[i] == 0x0b || rates[i] == 0x16) + rates[i] |= 0x80; + } +} - return ret; +/** + * @brief Unsets the MSB on basic rates + * + * Scan through an array and unset the MSB for basic data rates. + * + * @param rates buffer of data rates + * @param len size of buffer + */ +void libertas_unset_basic_rate_flags(u8 * rates, size_t len) +{ + int i; + + for (i = 0; i < len; i++) + rates[i] &= 0x7f; } + /** * @brief Associate to a specific BSS discovered in a scan * @@ -113,23 +128,24 @@ int wlan_associate(wlan_private * priv, struct assoc_request * assoc_req) lbs_deb_enter(LBS_DEB_JOIN); - ret = libertas_prepare_and_send_command(priv, cmd_802_11_authenticate, - 0, cmd_option_waitforrsp, + ret = libertas_prepare_and_send_command(priv, CMD_802_11_AUTHENTICATE, + 0, CMD_OPTION_WAITFORRSP, 0, assoc_req->bss.bssid); if (ret) goto done; /* set preamble to firmware */ - if (adapter->capinfo.shortpreamble && assoc_req->bss.cap.shortpreamble) - adapter->preamble = cmd_type_short_preamble; + if ( (adapter->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) + && (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)) + adapter->preamble = CMD_TYPE_SHORT_PREAMBLE; else - adapter->preamble = cmd_type_long_preamble; + adapter->preamble = CMD_TYPE_LONG_PREAMBLE; libertas_set_radio_control(priv); - ret = libertas_prepare_and_send_command(priv, cmd_802_11_associate, - 0, cmd_option_waitforrsp, 0, assoc_req); + ret = libertas_prepare_and_send_command(priv, CMD_802_11_ASSOCIATE, + 0, CMD_OPTION_WAITFORRSP, 0, assoc_req); done: lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret); @@ -150,12 +166,12 @@ int libertas_start_adhoc_network(wlan_private * priv, struct assoc_request * ass adapter->adhoccreate = 1; - if (!adapter->capinfo.shortpreamble) { - lbs_deb_join("AdhocStart: Long preamble\n"); - adapter->preamble = cmd_type_long_preamble; - } else { + if (adapter->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) { lbs_deb_join("AdhocStart: Short preamble\n"); - adapter->preamble = cmd_type_short_preamble; + adapter->preamble = CMD_TYPE_SHORT_PREAMBLE; + } else { + lbs_deb_join("AdhocStart: Long preamble\n"); + adapter->preamble = CMD_TYPE_LONG_PREAMBLE; } libertas_set_radio_control(priv); @@ -163,8 +179,8 @@ int libertas_start_adhoc_network(wlan_private * priv, struct assoc_request * ass lbs_deb_join("AdhocStart: channel = %d\n", assoc_req->channel); lbs_deb_join("AdhocStart: band = %d\n", assoc_req->band); - ret = libertas_prepare_and_send_command(priv, cmd_802_11_ad_hoc_start, - 0, cmd_option_waitforrsp, 0, assoc_req); + ret = libertas_prepare_and_send_command(priv, CMD_802_11_AD_HOC_START, + 0, CMD_OPTION_WAITFORRSP, 0, assoc_req); return ret; } @@ -194,25 +210,37 @@ int libertas_join_adhoc_network(wlan_private * priv, struct assoc_request * asso bss->ssid_len); /* check if the requested SSID is already joined */ - if (adapter->curbssparams.ssid_len + if ( adapter->curbssparams.ssid_len && !libertas_ssid_cmp(adapter->curbssparams.ssid, adapter->curbssparams.ssid_len, bss->ssid, bss->ssid_len) - && (adapter->mode == IW_MODE_ADHOC)) { - lbs_deb_join( - "ADHOC_J_CMD: New ad-hoc SSID is the same as current, " - "not attempting to re-join"); - return -1; + && (adapter->mode == IW_MODE_ADHOC) + && (adapter->connect_status == LIBERTAS_CONNECTED)) { + union iwreq_data wrqu; + + lbs_deb_join("ADHOC_J_CMD: New ad-hoc SSID is the same as " + "current, not attempting to re-join"); + + /* Send the re-association event though, because the association + * request really was successful, even if just a null-op. + */ + memset(&wrqu, 0, sizeof(wrqu)); + memcpy(wrqu.ap_addr.sa_data, adapter->curbssparams.bssid, + ETH_ALEN); + wrqu.ap_addr.sa_family = ARPHRD_ETHER; + wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); + goto out; } - /*Use shortpreamble only when both creator and card supports + /* Use shortpreamble only when both creator and card supports short preamble */ - if (!bss->cap.shortpreamble || !adapter->capinfo.shortpreamble) { + if ( !(bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) + || !(adapter->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)) { lbs_deb_join("AdhocJoin: Long preamble\n"); - adapter->preamble = cmd_type_long_preamble; + adapter->preamble = CMD_TYPE_LONG_PREAMBLE; } else { lbs_deb_join("AdhocJoin: Short preamble\n"); - adapter->preamble = cmd_type_short_preamble; + adapter->preamble = CMD_TYPE_SHORT_PREAMBLE; } libertas_set_radio_control(priv); @@ -222,17 +250,18 @@ int libertas_join_adhoc_network(wlan_private * priv, struct assoc_request * asso adapter->adhoccreate = 0; - ret = libertas_prepare_and_send_command(priv, cmd_802_11_ad_hoc_join, - 0, cmd_option_waitforrsp, + ret = libertas_prepare_and_send_command(priv, CMD_802_11_AD_HOC_JOIN, + 0, CMD_OPTION_WAITFORRSP, OID_802_11_SSID, assoc_req); +out: return ret; } int libertas_stop_adhoc_network(wlan_private * priv) { - return libertas_prepare_and_send_command(priv, cmd_802_11_ad_hoc_stop, - 0, cmd_option_waitforrsp, 0, NULL); + return libertas_prepare_and_send_command(priv, CMD_802_11_AD_HOC_STOP, + 0, CMD_OPTION_WAITFORRSP, 0, NULL); } /** @@ -243,8 +272,8 @@ int libertas_stop_adhoc_network(wlan_private * priv) */ int libertas_send_deauthentication(wlan_private * priv) { - return libertas_prepare_and_send_command(priv, cmd_802_11_deauthenticate, - 0, cmd_option_waitforrsp, 0, NULL); + return libertas_prepare_and_send_command(priv, CMD_802_11_DEAUTHENTICATE, + 0, CMD_OPTION_WAITFORRSP, 0, NULL); } /** @@ -264,10 +293,11 @@ int libertas_cmd_80211_authenticate(wlan_private * priv, struct cmd_ds_802_11_authenticate *pauthenticate = &cmd->params.auth; int ret = -1; u8 *bssid = pdata_buf; + DECLARE_MAC_BUF(mac); lbs_deb_enter(LBS_DEB_JOIN); - cmd->command = cpu_to_le16(cmd_802_11_authenticate); + cmd->command = cpu_to_le16(CMD_802_11_AUTHENTICATE); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_authenticate) + S_DS_GEN); @@ -290,8 +320,8 @@ int libertas_cmd_80211_authenticate(wlan_private * priv, memcpy(pauthenticate->macaddr, bssid, ETH_ALEN); - lbs_deb_join("AUTH_CMD: BSSID is : " MAC_FMT " auth=0x%X\n", - MAC_ARG(bssid), pauthenticate->authtype); + lbs_deb_join("AUTH_CMD: BSSID is : %s auth=0x%X\n", + print_mac(mac, bssid), pauthenticate->authtype); ret = 0; out: @@ -307,7 +337,7 @@ int libertas_cmd_80211_deauthenticate(wlan_private * priv, lbs_deb_enter(LBS_DEB_JOIN); - cmd->command = cpu_to_le16(cmd_802_11_deauthenticate); + cmd->command = cpu_to_le16(CMD_802_11_DEAUTHENTICATE); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_deauthenticate) + S_DS_GEN); @@ -330,9 +360,7 @@ int libertas_cmd_80211_associate(wlan_private * priv, int ret = 0; struct assoc_request * assoc_req = pdata_buf; struct bss_descriptor * bss = &assoc_req->bss; - u8 *card_rates; u8 *pos; - int card_rates_size; u16 tmpcap, tmplen; struct mrvlietypes_ssidparamset *ssid; struct mrvlietypes_phyparamset *phy; @@ -349,15 +377,15 @@ int libertas_cmd_80211_associate(wlan_private * priv, goto done; } - cmd->command = cpu_to_le16(cmd_802_11_associate); + cmd->command = cpu_to_le16(CMD_802_11_ASSOCIATE); memcpy(passo->peerstaaddr, bss->bssid, sizeof(passo->peerstaaddr)); pos += sizeof(passo->peerstaaddr); /* set the listen interval */ - passo->listeninterval = cpu_to_le16(adapter->listeninterval); + passo->listeninterval = cpu_to_le16(MRVDRV_DEFAULT_LISTEN_INTERVAL); - pos += sizeof(passo->capinfo); + pos += sizeof(passo->capability); pos += sizeof(passo->listeninterval); pos += sizeof(passo->bcnperiod); pos += sizeof(passo->dtimperiod); @@ -386,23 +414,24 @@ int libertas_cmd_80211_associate(wlan_private * priv, rates = (struct mrvlietypes_ratesparamset *) pos; rates->header.type = cpu_to_le16(TLV_TYPE_RATES); - - memcpy(&rates->rates, &bss->libertas_supported_rates, WLAN_SUPPORTED_RATES); - - card_rates = libertas_supported_rates; - card_rates_size = sizeof(libertas_supported_rates); - - if (get_common_rates(adapter, rates->rates, WLAN_SUPPORTED_RATES, - card_rates, card_rates_size)) { + memcpy(&rates->rates, &bss->rates, MAX_RATES); + tmplen = MAX_RATES; + if (get_common_rates(adapter, rates->rates, &tmplen)) { ret = -1; goto done; } - - tmplen = min_t(size_t, strlen(rates->rates), WLAN_SUPPORTED_RATES); - adapter->curbssparams.numofrates = tmplen; - pos += sizeof(rates->header) + tmplen; rates->header.len = cpu_to_le16(tmplen); + lbs_deb_join("ASSOC_CMD: num rates = %u\n", tmplen); + + /* Copy the infra. association rates into Current BSS state structure */ + memset(&adapter->curbssparams.rates, 0, sizeof(adapter->curbssparams.rates)); + memcpy(&adapter->curbssparams.rates, &rates->rates, tmplen); + + /* Set MSB on basic rates as the firmware requires, but _after_ + * copying to current bss rates. + */ + libertas_set_basic_rate_flags(rates->rates, tmplen); if (assoc_req->secinfo.WPAenabled || assoc_req->secinfo.WPA2enabled) { rsn = (struct mrvlietypes_rsnparamset *) pos; @@ -411,7 +440,7 @@ int libertas_cmd_80211_associate(wlan_private * priv, tmplen = (u16) assoc_req->wpa_ie[1]; rsn->header.len = cpu_to_le16(tmplen); memcpy(rsn->rsnie, &assoc_req->wpa_ie[2], tmplen); - lbs_dbg_hex("ASSOC_CMD: RSN IE", (u8 *) rsn, + lbs_deb_hex(LBS_DEB_JOIN, "ASSOC_CMD: RSN IE", (u8 *) rsn, sizeof(rsn->header) + tmplen); pos += sizeof(rsn->header) + tmplen; } @@ -419,20 +448,6 @@ int libertas_cmd_80211_associate(wlan_private * priv, /* update curbssparams */ adapter->curbssparams.channel = bss->phyparamset.dsparamset.currentchan; - /* Copy the infra. association rates into Current BSS state structure */ - memcpy(&adapter->curbssparams.datarates, &rates->rates, - min_t(size_t, sizeof(adapter->curbssparams.datarates), - cpu_to_le16(rates->header.len))); - - lbs_deb_join("ASSOC_CMD: rates->header.len = %d\n", - cpu_to_le16(rates->header.len)); - - /* set IBSS field */ - if (bss->mode == IW_MODE_INFRA) { -#define CAPINFO_ESS_MODE 1 - passo->capinfo.ess = CAPINFO_ESS_MODE; - } - if (libertas_parse_dnld_countryinfo_11d(priv, bss)) { ret = -1; goto done; @@ -440,12 +455,13 @@ int libertas_cmd_80211_associate(wlan_private * priv, cmd->size = cpu_to_le16((u16) (pos - (u8 *) passo) + S_DS_GEN); - /* set the capability info at last */ - memcpy(&tmpcap, &bss->cap, sizeof(passo->capinfo)); - tmpcap &= CAPINFO_MASK; - lbs_deb_join("ASSOC_CMD: tmpcap=%4X CAPINFO_MASK=%4X\n", + /* set the capability info */ + tmpcap = (bss->capability & CAPINFO_MASK); + if (bss->mode == IW_MODE_INFRA) + tmpcap |= WLAN_CAPABILITY_ESS; + passo->capability = cpu_to_le16(tmpcap); + lbs_deb_join("ASSOC_CMD: capability=%4X CAPINFO_MASK=%4X\n", tmpcap, CAPINFO_MASK); - memcpy(&passo->capinfo, &tmpcap, sizeof(passo->capinfo)); done: lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret); @@ -459,8 +475,9 @@ int libertas_cmd_80211_ad_hoc_start(wlan_private * priv, struct cmd_ds_802_11_ad_hoc_start *adhs = &cmd->params.ads; int ret = 0; int cmdappendsize = 0; - int i; struct assoc_request * assoc_req = pdata_buf; + u16 tmpcap = 0; + size_t ratesize = 0; lbs_deb_enter(LBS_DEB_JOIN); @@ -469,7 +486,7 @@ int libertas_cmd_80211_ad_hoc_start(wlan_private * priv, goto done; } - cmd->command = cpu_to_le16(cmd_802_11_ad_hoc_start); + cmd->command = cpu_to_le16(CMD_802_11_AD_HOC_START); /* * Fill in the parameters for 2 data structures: @@ -483,17 +500,17 @@ int libertas_cmd_80211_ad_hoc_start(wlan_private * priv, * and operational rates. */ - memset(adhs->SSID, 0, IW_ESSID_MAX_SIZE); - memcpy(adhs->SSID, assoc_req->ssid, assoc_req->ssid_len); + memset(adhs->ssid, 0, IW_ESSID_MAX_SIZE); + memcpy(adhs->ssid, assoc_req->ssid, assoc_req->ssid_len); lbs_deb_join("ADHOC_S_CMD: SSID '%s', ssid length %u\n", escape_essid(assoc_req->ssid, assoc_req->ssid_len), assoc_req->ssid_len); /* set the BSS type */ - adhs->bsstype = cmd_bss_type_ibss; + adhs->bsstype = CMD_BSS_TYPE_IBSS; adapter->mode = IW_MODE_ADHOC; - adhs->beaconperiod = cpu_to_le16(adapter->beaconperiod); + adhs->beaconperiod = cpu_to_le16(MRVDRV_BEACON_INTERVAL); /* set Physical param set */ #define DS_PARA_IE_ID 3 @@ -515,45 +532,36 @@ int libertas_cmd_80211_ad_hoc_start(wlan_private * priv, adhs->ssparamset.ibssparamset.elementid = IBSS_PARA_IE_ID; adhs->ssparamset.ibssparamset.len = IBSS_PARA_IE_LEN; - adhs->ssparamset.ibssparamset.atimwindow = cpu_to_le16(adapter->atimwindow); + adhs->ssparamset.ibssparamset.atimwindow = 0; /* set capability info */ - adhs->cap.ess = 0; - adhs->cap.ibss = 1; - - /* probedelay */ - adhs->probedelay = cpu_to_le16(cmd_scan_probe_delay_time); - - /* set up privacy in adapter->scantable[i] */ + tmpcap = WLAN_CAPABILITY_IBSS; if (assoc_req->secinfo.wep_enabled) { lbs_deb_join("ADHOC_S_CMD: WEP enabled, setting privacy on\n"); - adhs->cap.privacy = AD_HOC_CAP_PRIVACY_ON; + tmpcap |= WLAN_CAPABILITY_PRIVACY; } else { lbs_deb_join("ADHOC_S_CMD: WEP disabled, setting privacy off\n"); } + adhs->capability = cpu_to_le16(tmpcap); - memset(adhs->datarate, 0, sizeof(adhs->datarate)); - - if (adapter->adhoc_grate_enabled) { - memcpy(adhs->datarate, libertas_adhoc_rates_g, - min(sizeof(adhs->datarate), sizeof(libertas_adhoc_rates_g))); - } else { - memcpy(adhs->datarate, libertas_adhoc_rates_b, - min(sizeof(adhs->datarate), sizeof(libertas_adhoc_rates_b))); - } - - /* Find the last non zero */ - for (i = 0; i < sizeof(adhs->datarate) && adhs->datarate[i]; i++) ; + /* probedelay */ + adhs->probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME); - adapter->curbssparams.numofrates = i; + memset(adhs->rates, 0, sizeof(adhs->rates)); + ratesize = min(sizeof(adhs->rates), sizeof(libertas_bg_rates)); + memcpy(adhs->rates, libertas_bg_rates, ratesize); /* Copy the ad-hoc creating rates into Current BSS state structure */ - memcpy(&adapter->curbssparams.datarates, - &adhs->datarate, adapter->curbssparams.numofrates); + memset(&adapter->curbssparams.rates, 0, sizeof(adapter->curbssparams.rates)); + memcpy(&adapter->curbssparams.rates, &adhs->rates, ratesize); + + /* Set MSB on basic rates as the firmware requires, but _after_ + * copying to current bss rates. + */ + libertas_set_basic_rate_flags(adhs->rates, ratesize); lbs_deb_join("ADHOC_S_CMD: rates=%02x %02x %02x %02x \n", - adhs->datarate[0], adhs->datarate[1], - adhs->datarate[2], adhs->datarate[3]); + adhs->rates[0], adhs->rates[1], adhs->rates[2], adhs->rates[3]); lbs_deb_join("ADHOC_S_CMD: AD HOC Start command is ready\n"); @@ -575,7 +583,7 @@ done: int libertas_cmd_80211_ad_hoc_stop(wlan_private * priv, struct cmd_ds_command *cmd) { - cmd->command = cpu_to_le16(cmd_802_11_ad_hoc_stop); + cmd->command = cpu_to_le16(CMD_802_11_AD_HOC_STOP); cmd->size = cpu_to_le16(S_DS_GEN); return 0; @@ -585,101 +593,84 @@ int libertas_cmd_80211_ad_hoc_join(wlan_private * priv, struct cmd_ds_command *cmd, void *pdata_buf) { wlan_adapter *adapter = priv->adapter; - struct cmd_ds_802_11_ad_hoc_join *padhocjoin = &cmd->params.adj; + struct cmd_ds_802_11_ad_hoc_join *join_cmd = &cmd->params.adj; struct assoc_request * assoc_req = pdata_buf; struct bss_descriptor *bss = &assoc_req->bss; int cmdappendsize = 0; int ret = 0; - u8 *card_rates; - int card_rates_size; - u16 tmpcap; - int i; + u16 ratesize = 0; + DECLARE_MAC_BUF(mac); lbs_deb_enter(LBS_DEB_JOIN); - cmd->command = cpu_to_le16(cmd_802_11_ad_hoc_join); + cmd->command = cpu_to_le16(CMD_802_11_AD_HOC_JOIN); - padhocjoin->bssdescriptor.bsstype = cmd_bss_type_ibss; + join_cmd->bss.type = CMD_BSS_TYPE_IBSS; + join_cmd->bss.beaconperiod = cpu_to_le16(bss->beaconperiod); - padhocjoin->bssdescriptor.beaconperiod = cpu_to_le16(bss->beaconperiod); + memcpy(&join_cmd->bss.bssid, &bss->bssid, ETH_ALEN); + memcpy(&join_cmd->bss.ssid, &bss->ssid, bss->ssid_len); - memcpy(&padhocjoin->bssdescriptor.BSSID, &bss->bssid, ETH_ALEN); - memcpy(&padhocjoin->bssdescriptor.SSID, &bss->ssid, bss->ssid_len); + memcpy(&join_cmd->bss.phyparamset, &bss->phyparamset, + sizeof(union ieeetypes_phyparamset)); - memcpy(&padhocjoin->bssdescriptor.phyparamset, - &bss->phyparamset, sizeof(union ieeetypes_phyparamset)); - - memcpy(&padhocjoin->bssdescriptor.ssparamset, - &bss->ssparamset, sizeof(union IEEEtypes_ssparamset)); - - memcpy(&tmpcap, &bss->cap, sizeof(struct ieeetypes_capinfo)); - tmpcap &= CAPINFO_MASK; + memcpy(&join_cmd->bss.ssparamset, &bss->ssparamset, + sizeof(union IEEEtypes_ssparamset)); + join_cmd->bss.capability = cpu_to_le16(bss->capability & CAPINFO_MASK); lbs_deb_join("ADHOC_J_CMD: tmpcap=%4X CAPINFO_MASK=%4X\n", - tmpcap, CAPINFO_MASK); - memcpy(&padhocjoin->bssdescriptor.cap, &tmpcap, - sizeof(struct ieeetypes_capinfo)); + bss->capability, CAPINFO_MASK); /* information on BSSID descriptor passed to FW */ lbs_deb_join( - "ADHOC_J_CMD: BSSID = " MAC_FMT ", SSID = '%s'\n", - MAC_ARG(padhocjoin->bssdescriptor.BSSID), - padhocjoin->bssdescriptor.SSID); + "ADHOC_J_CMD: BSSID = %s, SSID = '%s'\n", + print_mac(mac, join_cmd->bss.bssid), + join_cmd->bss.ssid); /* failtimeout */ - padhocjoin->failtimeout = cpu_to_le16(MRVDRV_ASSOCIATION_TIME_OUT); + join_cmd->failtimeout = cpu_to_le16(MRVDRV_ASSOCIATION_TIME_OUT); /* probedelay */ - padhocjoin->probedelay = cpu_to_le16(cmd_scan_probe_delay_time); - - /* Copy Data rates from the rates recorded in scan response */ - memset(padhocjoin->bssdescriptor.datarates, 0, - sizeof(padhocjoin->bssdescriptor.datarates)); - memcpy(padhocjoin->bssdescriptor.datarates, bss->datarates, - min(sizeof(padhocjoin->bssdescriptor.datarates), - sizeof(bss->datarates))); - - card_rates = libertas_supported_rates; - card_rates_size = sizeof(libertas_supported_rates); + join_cmd->probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME); adapter->curbssparams.channel = bss->channel; - if (get_common_rates(adapter, padhocjoin->bssdescriptor.datarates, - sizeof(padhocjoin->bssdescriptor.datarates), - card_rates, card_rates_size)) { + /* Copy Data rates from the rates recorded in scan response */ + memset(join_cmd->bss.rates, 0, sizeof(join_cmd->bss.rates)); + ratesize = min_t(u16, sizeof(join_cmd->bss.rates), MAX_RATES); + memcpy(join_cmd->bss.rates, bss->rates, ratesize); + if (get_common_rates(adapter, join_cmd->bss.rates, &ratesize)) { lbs_deb_join("ADHOC_J_CMD: get_common_rates returns error.\n"); ret = -1; goto done; } - /* Find the last non zero */ - for (i = 0; i < sizeof(padhocjoin->bssdescriptor.datarates) - && padhocjoin->bssdescriptor.datarates[i]; i++) ; - - adapter->curbssparams.numofrates = i; + /* Copy the ad-hoc creating rates into Current BSS state structure */ + memset(&adapter->curbssparams.rates, 0, sizeof(adapter->curbssparams.rates)); + memcpy(&adapter->curbssparams.rates, join_cmd->bss.rates, ratesize); - /* - * Copy the adhoc joining rates to Current BSS State structure + /* Set MSB on basic rates as the firmware requires, but _after_ + * copying to current bss rates. */ - memcpy(adapter->curbssparams.datarates, - padhocjoin->bssdescriptor.datarates, - adapter->curbssparams.numofrates); + libertas_set_basic_rate_flags(join_cmd->bss.rates, ratesize); - padhocjoin->bssdescriptor.ssparamset.ibssparamset.atimwindow = + join_cmd->bss.ssparamset.ibssparamset.atimwindow = cpu_to_le16(bss->atimwindow); if (assoc_req->secinfo.wep_enabled) { - padhocjoin->bssdescriptor.cap.privacy = AD_HOC_CAP_PRIVACY_ON; + u16 tmp = le16_to_cpu(join_cmd->bss.capability); + tmp |= WLAN_CAPABILITY_PRIVACY; + join_cmd->bss.capability = cpu_to_le16(tmp); } - if (adapter->psmode == wlan802_11powermodemax_psp) { + if (adapter->psmode == WLAN802_11POWERMODEMAX_PSP) { /* wake up first */ __le32 Localpsmode; - Localpsmode = cpu_to_le32(wlan802_11powermodecam); + Localpsmode = cpu_to_le32(WLAN802_11POWERMODECAM); ret = libertas_prepare_and_send_command(priv, - cmd_802_11_ps_mode, - cmd_act_set, + CMD_802_11_PS_MODE, + CMD_ACT_SET, 0, 0, &Localpsmode); if (ret) { @@ -709,6 +700,7 @@ int libertas_ret_80211_associate(wlan_private * priv, union iwreq_data wrqu; struct ieeetypes_assocrsp *passocrsp; struct bss_descriptor * bss; + u16 status_code; lbs_deb_enter(LBS_DEB_JOIN); @@ -721,21 +713,65 @@ int libertas_ret_80211_associate(wlan_private * priv, passocrsp = (struct ieeetypes_assocrsp *) & resp->params; - if (le16_to_cpu(passocrsp->statuscode)) { - libertas_mac_event_disconnected(priv); + /* + * Older FW versions map the IEEE 802.11 Status Code in the association + * response to the following values returned in passocrsp->statuscode: + * + * IEEE Status Code Marvell Status Code + * 0 -> 0x0000 ASSOC_RESULT_SUCCESS + * 13 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED + * 14 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED + * 15 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED + * 16 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED + * others -> 0x0003 ASSOC_RESULT_REFUSED + * + * Other response codes: + * 0x0001 -> ASSOC_RESULT_INVALID_PARAMETERS (unused) + * 0x0002 -> ASSOC_RESULT_TIMEOUT (internal timer expired waiting for + * association response from the AP) + */ - lbs_deb_join("ASSOC_RESP: Association failed, status code = %d\n", - le16_to_cpu(passocrsp->statuscode)); + status_code = le16_to_cpu(passocrsp->statuscode); + switch (status_code) { + case 0x00: + lbs_deb_join("ASSOC_RESP: Association succeeded\n"); + break; + case 0x01: + lbs_deb_join("ASSOC_RESP: Association failed; invalid " + "parameters (status code %d)\n", status_code); + break; + case 0x02: + lbs_deb_join("ASSOC_RESP: Association failed; internal timer " + "expired while waiting for the AP (status code %d)" + "\n", status_code); + break; + case 0x03: + lbs_deb_join("ASSOC_RESP: Association failed; association " + "was refused by the AP (status code %d)\n", + status_code); + break; + case 0x04: + lbs_deb_join("ASSOC_RESP: Association failed; authentication " + "was refused by the AP (status code %d)\n", + status_code); + break; + default: + lbs_deb_join("ASSOC_RESP: Association failed; reason unknown " + "(status code %d)\n", status_code); + break; + } + if (status_code) { + libertas_mac_event_disconnected(priv); ret = -1; goto done; } - lbs_dbg_hex("ASSOC_RESP:", (void *)&resp->params, + lbs_deb_hex(LBS_DEB_JOIN, "ASSOC_RESP", (void *)&resp->params, le16_to_cpu(resp->size) - S_DS_GEN); /* Send a Media Connected event, according to the Spec */ - adapter->connect_status = libertas_connected; + adapter->connect_status = LIBERTAS_CONNECTED; lbs_deb_join("ASSOC_RESP: assocated to '%s'\n", escape_essid(bss->ssid, bss->ssid_len)); @@ -759,10 +795,10 @@ int libertas_ret_80211_associate(wlan_private * priv, netif_carrier_on(priv->dev); netif_wake_queue(priv->dev); - netif_carrier_on(priv->mesh_dev); - netif_wake_queue(priv->mesh_dev); - - lbs_deb_join("ASSOC_RESP: Associated \n"); + if (priv->mesh_dev) { + netif_carrier_on(priv->mesh_dev); + netif_wake_queue(priv->mesh_dev); + } memcpy(wrqu.ap_addr.sa_data, adapter->curbssparams.bssid, ETH_ALEN); wrqu.ap_addr.sa_family = ARPHRD_ETHER; @@ -794,6 +830,7 @@ int libertas_ret_80211_ad_hoc_start(wlan_private * priv, struct cmd_ds_802_11_ad_hoc_result *padhocresult; union iwreq_data wrqu; struct bss_descriptor *bss; + DECLARE_MAC_BUF(mac); lbs_deb_enter(LBS_DEB_JOIN); @@ -815,7 +852,7 @@ int libertas_ret_80211_ad_hoc_start(wlan_private * priv, */ if (result) { lbs_deb_join("ADHOC_RESP: failed\n"); - if (adapter->connect_status == libertas_connected) { + if (adapter->connect_status == LIBERTAS_CONNECTED) { libertas_mac_event_disconnected(priv); } ret = -1; @@ -830,11 +867,11 @@ int libertas_ret_80211_ad_hoc_start(wlan_private * priv, escape_essid(bss->ssid, bss->ssid_len)); /* Send a Media Connected event, according to the Spec */ - adapter->connect_status = libertas_connected; + adapter->connect_status = LIBERTAS_CONNECTED; - if (command == cmd_ret_802_11_ad_hoc_start) { + if (command == CMD_RET(CMD_802_11_AD_HOC_START)) { /* Update the created network descriptor with the new BSSID */ - memcpy(bss->bssid, padhocresult->BSSID, ETH_ALEN); + memcpy(bss->bssid, padhocresult->bssid, ETH_ALEN); } /* Set the BSSID from the joined/started descriptor */ @@ -847,8 +884,10 @@ int libertas_ret_80211_ad_hoc_start(wlan_private * priv, netif_carrier_on(priv->dev); netif_wake_queue(priv->dev); - netif_carrier_on(priv->mesh_dev); - netif_wake_queue(priv->mesh_dev); + if (priv->mesh_dev) { + netif_carrier_on(priv->mesh_dev); + netif_wake_queue(priv->mesh_dev); + } memset(&wrqu, 0, sizeof(wrqu)); memcpy(wrqu.ap_addr.sa_data, adapter->curbssparams.bssid, ETH_ALEN); @@ -857,8 +896,8 @@ int libertas_ret_80211_ad_hoc_start(wlan_private * priv, lbs_deb_join("ADHOC_RESP: - Joined/Started Ad Hoc\n"); lbs_deb_join("ADHOC_RESP: channel = %d\n", adapter->curbssparams.channel); - lbs_deb_join("ADHOC_RESP: BSSID = " MAC_FMT "\n", - MAC_ARG(padhocresult->BSSID)); + lbs_deb_join("ADHOC_RESP: BSSID = %s\n", + print_mac(mac, padhocresult->bssid)); done: lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret); diff --git a/drivers/net/wireless/libertas/join.h b/drivers/net/wireless/libertas/join.h index d522630ff8cf..894a072b9f8d 100644 --- a/drivers/net/wireless/libertas/join.h +++ b/drivers/net/wireless/libertas/join.h @@ -12,45 +12,42 @@ #include "dev.h" struct cmd_ds_command; -extern int libertas_cmd_80211_authenticate(wlan_private * priv, +int libertas_cmd_80211_authenticate(wlan_private * priv, struct cmd_ds_command *cmd, void *pdata_buf); -extern int libertas_cmd_80211_ad_hoc_join(wlan_private * priv, +int libertas_cmd_80211_ad_hoc_join(wlan_private * priv, struct cmd_ds_command *cmd, void *pdata_buf); -extern int libertas_cmd_80211_ad_hoc_stop(wlan_private * priv, +int libertas_cmd_80211_ad_hoc_stop(wlan_private * priv, struct cmd_ds_command *cmd); -extern int libertas_cmd_80211_ad_hoc_start(wlan_private * priv, +int libertas_cmd_80211_ad_hoc_start(wlan_private * priv, struct cmd_ds_command *cmd, void *pdata_buf); -extern int libertas_cmd_80211_deauthenticate(wlan_private * priv, +int libertas_cmd_80211_deauthenticate(wlan_private * priv, struct cmd_ds_command *cmd); -extern int libertas_cmd_80211_associate(wlan_private * priv, +int libertas_cmd_80211_associate(wlan_private * priv, struct cmd_ds_command *cmd, void *pdata_buf); -extern int libertas_ret_80211_ad_hoc_start(wlan_private * priv, +int libertas_ret_80211_ad_hoc_start(wlan_private * priv, struct cmd_ds_command *resp); -extern int libertas_ret_80211_ad_hoc_stop(wlan_private * priv, +int libertas_ret_80211_ad_hoc_stop(wlan_private * priv, struct cmd_ds_command *resp); -extern int libertas_ret_80211_disassociate(wlan_private * priv, +int libertas_ret_80211_disassociate(wlan_private * priv, struct cmd_ds_command *resp); -extern int libertas_ret_80211_associate(wlan_private * priv, +int libertas_ret_80211_associate(wlan_private * priv, struct cmd_ds_command *resp); -extern int libertas_reassociation_thread(void *data); - -extern int libertas_start_adhoc_network(wlan_private * priv, +int libertas_start_adhoc_network(wlan_private * priv, struct assoc_request * assoc_req); -extern int libertas_join_adhoc_network(wlan_private * priv, +int libertas_join_adhoc_network(wlan_private * priv, struct assoc_request * assoc_req); -extern int libertas_stop_adhoc_network(wlan_private * priv); - -extern int libertas_send_deauthentication(wlan_private * priv); -extern int libertas_send_deauth(wlan_private * priv); +int libertas_stop_adhoc_network(wlan_private * priv); -extern int libertas_do_adhocstop_ioctl(wlan_private * priv); +int libertas_send_deauthentication(wlan_private * priv); int wlan_associate(wlan_private * priv, struct assoc_request * assoc_req); +void libertas_unset_basic_rate_flags(u8 * rates, size_t len); + #endif diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index 9f366242c392..5ead08312e1e 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c @@ -10,6 +10,7 @@ #include <linux/etherdevice.h> #include <linux/netdevice.h> #include <linux/if_arp.h> +#include <linux/kthread.h> #include <net/iw_handler.h> #include <net/ieee80211.h> @@ -20,8 +21,9 @@ #include "wext.h" #include "debugfs.h" #include "assoc.h" +#include "join.h" -#define DRIVER_RELEASE_VERSION "322.p0" +#define DRIVER_RELEASE_VERSION "323.p0" const char libertas_driver_version[] = "COMM-USB8388-" DRIVER_RELEASE_VERSION #ifdef DEBUG "-dbg" @@ -121,57 +123,88 @@ struct region_cfp_table { static struct region_cfp_table region_cfp_table[] = { {0x10, /*US FCC */ channel_freq_power_US_BG, - sizeof(channel_freq_power_US_BG) / sizeof(struct chan_freq_power), + ARRAY_SIZE(channel_freq_power_US_BG), } , {0x20, /*CANADA IC */ channel_freq_power_US_BG, - sizeof(channel_freq_power_US_BG) / sizeof(struct chan_freq_power), + ARRAY_SIZE(channel_freq_power_US_BG), } , {0x30, /*EU*/ channel_freq_power_EU_BG, - sizeof(channel_freq_power_EU_BG) / sizeof(struct chan_freq_power), + ARRAY_SIZE(channel_freq_power_EU_BG), } , {0x31, /*SPAIN*/ channel_freq_power_SPN_BG, - sizeof(channel_freq_power_SPN_BG) / sizeof(struct chan_freq_power), + ARRAY_SIZE(channel_freq_power_SPN_BG), } , {0x32, /*FRANCE*/ channel_freq_power_FR_BG, - sizeof(channel_freq_power_FR_BG) / sizeof(struct chan_freq_power), + ARRAY_SIZE(channel_freq_power_FR_BG), } , {0x40, /*JAPAN*/ channel_freq_power_JPN_BG, - sizeof(channel_freq_power_JPN_BG) / sizeof(struct chan_freq_power), + ARRAY_SIZE(channel_freq_power_JPN_BG), } , /*Add new region here */ }; /** - * the rates supported + * the table to keep region code */ -u8 libertas_supported_rates[G_SUPPORTED_RATES] = - { 0x82, 0x84, 0x8b, 0x96, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c, -0 }; +u16 libertas_region_code_to_index[MRVDRV_MAX_REGION_CODE] = + { 0x10, 0x20, 0x30, 0x31, 0x32, 0x40 }; /** - * the rates supported for ad-hoc G mode + * 802.11b/g supported bitrates (in 500Kb/s units) */ -u8 libertas_adhoc_rates_g[G_SUPPORTED_RATES] = - { 0x82, 0x84, 0x8b, 0x96, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c, -0 }; +u8 libertas_bg_rates[MAX_RATES] = + { 0x02, 0x04, 0x0b, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c, +0x00, 0x00 }; /** - * the rates supported for ad-hoc B mode + * FW rate table. FW refers to rates by their index in this table, not by the + * rate value itself. Values of 0x00 are + * reserved positions. */ -u8 libertas_adhoc_rates_b[4] = { 0x82, 0x84, 0x8b, 0x96 }; +static u8 fw_data_rates[MAX_RATES] = + { 0x02, 0x04, 0x0B, 0x16, 0x00, 0x0C, 0x12, + 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C, 0x00 +}; /** - * the table to keep region code + * @brief use index to get the data rate + * + * @param idx The index of data rate + * @return data rate or 0 */ -u16 libertas_region_code_to_index[MRVDRV_MAX_REGION_CODE] = - { 0x10, 0x20, 0x30, 0x31, 0x32, 0x40 }; +u32 libertas_fw_index_to_data_rate(u8 idx) +{ + if (idx >= sizeof(fw_data_rates)) + idx = 0; + return fw_data_rates[idx]; +} + +/** + * @brief use rate to get the index + * + * @param rate data rate + * @return index or 0 + */ +u8 libertas_data_rate_to_fw_index(u32 rate) +{ + u8 i; + + if (!rate) + return 0; + + for (i = 0; i < sizeof(fw_data_rates); i++) { + if (rate == fw_data_rates[i]) + return i; + } + return 0; +} /** * Attributes exported through sysfs @@ -187,9 +220,9 @@ static ssize_t libertas_anycast_get(struct device * dev, memset(&mesh_access, 0, sizeof(mesh_access)); libertas_prepare_and_send_command(to_net_dev(dev)->priv, - cmd_mesh_access, - cmd_act_mesh_get_anycast, - cmd_option_waitforrsp, 0, (void *)&mesh_access); + CMD_MESH_ACCESS, + CMD_ACT_MESH_GET_ANYCAST, + CMD_OPTION_WAITFORRSP, 0, (void *)&mesh_access); return snprintf(buf, 12, "0x%X\n", le32_to_cpu(mesh_access.data[0])); } @@ -208,18 +241,127 @@ static ssize_t libertas_anycast_set(struct device * dev, mesh_access.data[0] = cpu_to_le32(datum); libertas_prepare_and_send_command((to_net_dev(dev))->priv, - cmd_mesh_access, - cmd_act_mesh_set_anycast, - cmd_option_waitforrsp, 0, (void *)&mesh_access); + CMD_MESH_ACCESS, + CMD_ACT_MESH_SET_ANYCAST, + CMD_OPTION_WAITFORRSP, 0, (void *)&mesh_access); + return strlen(buf); +} + +int libertas_add_rtap(wlan_private *priv); +void libertas_remove_rtap(wlan_private *priv); + +/** + * Get function for sysfs attribute rtap + */ +static ssize_t libertas_rtap_get(struct device * dev, + struct device_attribute *attr, char * buf) +{ + wlan_private *priv = (wlan_private *) (to_net_dev(dev))->priv; + wlan_adapter *adapter = priv->adapter; + return snprintf(buf, 5, "0x%X\n", adapter->monitormode); +} + +/** + * Set function for sysfs attribute rtap + */ +static ssize_t libertas_rtap_set(struct device * dev, + struct device_attribute *attr, const char * buf, size_t count) +{ + int monitor_mode; + wlan_private *priv = (wlan_private *) (to_net_dev(dev))->priv; + wlan_adapter *adapter = priv->adapter; + + sscanf(buf, "%x", &monitor_mode); + if (monitor_mode != WLAN_MONITOR_OFF) { + if(adapter->monitormode == monitor_mode) + return strlen(buf); + if (adapter->monitormode == WLAN_MONITOR_OFF) { + if (adapter->mode == IW_MODE_INFRA) + libertas_send_deauthentication(priv); + else if (adapter->mode == IW_MODE_ADHOC) + libertas_stop_adhoc_network(priv); + libertas_add_rtap(priv); + } + adapter->monitormode = monitor_mode; + } + + else { + if(adapter->monitormode == WLAN_MONITOR_OFF) + return strlen(buf); + adapter->monitormode = WLAN_MONITOR_OFF; + libertas_remove_rtap(priv); + netif_wake_queue(priv->dev); + netif_wake_queue(priv->mesh_dev); + } + + libertas_prepare_and_send_command(priv, + CMD_802_11_MONITOR_MODE, CMD_ACT_SET, + CMD_OPTION_WAITFORRSP, 0, &adapter->monitormode); return strlen(buf); } /** + * libertas_rtap attribute to be exported per mshX interface + * through sysfs (/sys/class/net/mshX/libertas-rtap) + */ +static DEVICE_ATTR(libertas_rtap, 0644, libertas_rtap_get, + libertas_rtap_set ); + +/** * anycast_mask attribute to be exported per mshX interface * through sysfs (/sys/class/net/mshX/anycast_mask) */ static DEVICE_ATTR(anycast_mask, 0644, libertas_anycast_get, libertas_anycast_set); +static ssize_t libertas_autostart_enabled_get(struct device * dev, + struct device_attribute *attr, char * buf) +{ + struct cmd_ds_mesh_access mesh_access; + + memset(&mesh_access, 0, sizeof(mesh_access)); + libertas_prepare_and_send_command(to_net_dev(dev)->priv, + CMD_MESH_ACCESS, + CMD_ACT_MESH_GET_AUTOSTART_ENABLED, + CMD_OPTION_WAITFORRSP, 0, (void *)&mesh_access); + + return sprintf(buf, "%d\n", le32_to_cpu(mesh_access.data[0])); +} + +static ssize_t libertas_autostart_enabled_set(struct device * dev, + struct device_attribute *attr, const char * buf, size_t count) +{ + struct cmd_ds_mesh_access mesh_access; + uint32_t datum; + wlan_private * priv = (to_net_dev(dev))->priv; + int ret; + + memset(&mesh_access, 0, sizeof(mesh_access)); + sscanf(buf, "%d", &datum); + mesh_access.data[0] = cpu_to_le32(datum); + + ret = libertas_prepare_and_send_command(priv, + CMD_MESH_ACCESS, + CMD_ACT_MESH_SET_AUTOSTART_ENABLED, + CMD_OPTION_WAITFORRSP, 0, (void *)&mesh_access); + if (ret == 0) + priv->mesh_autostart_enabled = datum ? 1 : 0; + + return strlen(buf); +} + +static DEVICE_ATTR(autostart_enabled, 0644, + libertas_autostart_enabled_get, libertas_autostart_enabled_set); + +static struct attribute *libertas_mesh_sysfs_entries[] = { + &dev_attr_anycast_mask.attr, + &dev_attr_autostart_enabled.attr, + NULL, +}; + +static struct attribute_group libertas_mesh_attr_group = { + .attrs = libertas_mesh_sysfs_entries, +}; + /** * @brief Check if the device can be open and wait if necessary. * @@ -255,7 +397,7 @@ static int pre_open_check(struct net_device *dev) * @param dev A pointer to net_device structure * @return 0 */ -static int wlan_dev_open(struct net_device *dev) +static int libertas_dev_open(struct net_device *dev) { wlan_private *priv = (wlan_private *) dev->priv; wlan_adapter *adapter = priv->adapter; @@ -264,12 +406,14 @@ static int wlan_dev_open(struct net_device *dev) priv->open = 1; - if (adapter->connect_status == libertas_connected) { + if (adapter->connect_status == LIBERTAS_CONNECTED) { netif_carrier_on(priv->dev); - netif_carrier_on(priv->mesh_dev); + if (priv->mesh_dev) + netif_carrier_on(priv->mesh_dev); } else { netif_carrier_off(priv->dev); - netif_carrier_off(priv->mesh_dev); + if (priv->mesh_dev) + netif_carrier_off(priv->mesh_dev); } lbs_deb_leave(LBS_DEB_NET); @@ -281,7 +425,7 @@ static int wlan_dev_open(struct net_device *dev) * @param dev A pointer to net_device structure * @return 0 */ -static int mesh_open(struct net_device *dev) +static int libertas_mesh_open(struct net_device *dev) { wlan_private *priv = (wlan_private *) dev->priv ; @@ -290,7 +434,7 @@ static int mesh_open(struct net_device *dev) priv->mesh_open = 1 ; netif_wake_queue(priv->mesh_dev); if (priv->infra_open == 0) - return wlan_dev_open(priv->dev) ; + return libertas_dev_open(priv->dev) ; return 0; } @@ -300,7 +444,7 @@ static int mesh_open(struct net_device *dev) * @param dev A pointer to net_device structure * @return 0 */ -static int wlan_open(struct net_device *dev) +static int libertas_open(struct net_device *dev) { wlan_private *priv = (wlan_private *) dev->priv ; @@ -309,11 +453,11 @@ static int wlan_open(struct net_device *dev) priv->infra_open = 1 ; netif_wake_queue(priv->dev); if (priv->open == 0) - return wlan_dev_open(priv->dev) ; + return libertas_dev_open(priv->dev) ; return 0; } -static int wlan_dev_close(struct net_device *dev) +static int libertas_dev_close(struct net_device *dev) { wlan_private *priv = dev->priv; @@ -332,14 +476,14 @@ static int wlan_dev_close(struct net_device *dev) * @param dev A pointer to net_device structure * @return 0 */ -static int mesh_close(struct net_device *dev) +static int libertas_mesh_close(struct net_device *dev) { wlan_private *priv = (wlan_private *) (dev->priv); priv->mesh_open = 0; netif_stop_queue(priv->mesh_dev); if (priv->infra_open == 0) - return wlan_dev_close(dev); + return libertas_dev_close(dev); else return 0; } @@ -350,20 +494,20 @@ static int mesh_close(struct net_device *dev) * @param dev A pointer to net_device structure * @return 0 */ -static int wlan_close(struct net_device *dev) +static int libertas_close(struct net_device *dev) { wlan_private *priv = (wlan_private *) dev->priv; netif_stop_queue(dev); priv->infra_open = 0; if (priv->mesh_open == 0) - return wlan_dev_close(dev); + return libertas_dev_close(dev); else return 0; } -static int wlan_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +static int libertas_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { int ret = 0; wlan_private *priv = dev->priv; @@ -376,7 +520,8 @@ static int wlan_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) } netif_stop_queue(priv->dev); - netif_stop_queue(priv->mesh_dev); + if (priv->mesh_dev) + netif_stop_queue(priv->mesh_dev); if (libertas_process_tx(priv, skb) == 0) dev->trans_start = jiffies; @@ -386,41 +531,52 @@ done: } /** - * @brief Mark mesh packets and handover them to wlan_hard_start_xmit + * @brief Mark mesh packets and handover them to libertas_hard_start_xmit * */ -static int mesh_pre_start_xmit(struct sk_buff *skb, struct net_device *dev) +static int libertas_mesh_pre_start_xmit(struct sk_buff *skb, + struct net_device *dev) { wlan_private *priv = dev->priv; int ret; lbs_deb_enter(LBS_DEB_MESH); + if(priv->adapter->monitormode != WLAN_MONITOR_OFF) { + netif_stop_queue(dev); + return -EOPNOTSUPP; + } SET_MESH_FRAME(skb); - ret = wlan_hard_start_xmit(skb, priv->dev); + ret = libertas_hard_start_xmit(skb, priv->dev); lbs_deb_leave_args(LBS_DEB_MESH, "ret %d", ret); return ret; } /** - * @brief Mark non-mesh packets and handover them to wlan_hard_start_xmit + * @brief Mark non-mesh packets and handover them to libertas_hard_start_xmit * */ -static int wlan_pre_start_xmit(struct sk_buff *skb, struct net_device *dev) +static int libertas_pre_start_xmit(struct sk_buff *skb, struct net_device *dev) { + wlan_private *priv = dev->priv; int ret; lbs_deb_enter(LBS_DEB_NET); + if(priv->adapter->monitormode != WLAN_MONITOR_OFF) { + netif_stop_queue(dev); + return -EOPNOTSUPP; + } + UNSET_MESH_FRAME(skb); - ret = wlan_hard_start_xmit(skb, dev); + ret = libertas_hard_start_xmit(skb, dev); lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret); return ret; } -static void wlan_tx_timeout(struct net_device *dev) +static void libertas_tx_timeout(struct net_device *dev) { wlan_private *priv = (wlan_private *) dev->priv; @@ -432,16 +588,17 @@ static void wlan_tx_timeout(struct net_device *dev) dev->trans_start = jiffies; if (priv->adapter->currenttxskb) { - if (priv->adapter->radiomode == WLAN_RADIOMODE_RADIOTAP) { + if (priv->adapter->monitormode != WLAN_MONITOR_OFF) { /* If we are here, we have not received feedback from the previous packet. Assume TX_FAIL and move on. */ priv->adapter->eventcause = 0x01000000; libertas_send_tx_feedback(priv); } else - wake_up_interruptible(&priv->mainthread.waitq); - } else if (priv->adapter->connect_status == libertas_connected) { + wake_up_interruptible(&priv->waitq); + } else if (priv->adapter->connect_status == LIBERTAS_CONNECTED) { netif_wake_queue(priv->dev); - netif_wake_queue(priv->mesh_dev); + if (priv->mesh_dev) + netif_wake_queue(priv->mesh_dev); } lbs_deb_leave(LBS_DEB_TX); @@ -453,14 +610,14 @@ static void wlan_tx_timeout(struct net_device *dev) * @param dev A pointer to wlan_private structure * @return A pointer to net_device_stats structure */ -static struct net_device_stats *wlan_get_stats(struct net_device *dev) +static struct net_device_stats *libertas_get_stats(struct net_device *dev) { wlan_private *priv = (wlan_private *) dev->priv; return &priv->stats; } -static int wlan_set_mac_address(struct net_device *dev, void *addr) +static int libertas_set_mac_address(struct net_device *dev, void *addr) { int ret = 0; wlan_private *priv = (wlan_private *) dev->priv; @@ -475,14 +632,14 @@ static int wlan_set_mac_address(struct net_device *dev, void *addr) memset(adapter->current_addr, 0, ETH_ALEN); /* dev->dev_addr is 8 bytes */ - lbs_dbg_hex("dev->dev_addr:", dev->dev_addr, ETH_ALEN); + lbs_deb_hex(LBS_DEB_NET, "dev->dev_addr", dev->dev_addr, ETH_ALEN); - lbs_dbg_hex("addr:", phwaddr->sa_data, ETH_ALEN); + lbs_deb_hex(LBS_DEB_NET, "addr", phwaddr->sa_data, ETH_ALEN); memcpy(adapter->current_addr, phwaddr->sa_data, ETH_ALEN); - ret = libertas_prepare_and_send_command(priv, cmd_802_11_mac_address, - cmd_act_set, - cmd_option_waitforrsp, 0, NULL); + ret = libertas_prepare_and_send_command(priv, CMD_802_11_MAC_ADDRESS, + CMD_ACT_SET, + CMD_OPTION_WAITFORRSP, 0, NULL); if (ret) { lbs_deb_net("set MAC address failed\n"); @@ -490,7 +647,7 @@ static int wlan_set_mac_address(struct net_device *dev, void *addr) goto done; } - lbs_dbg_hex("adapter->macaddr:", adapter->current_addr, ETH_ALEN); + lbs_deb_hex(LBS_DEB_NET, "adapter->macaddr", adapter->current_addr, ETH_ALEN); memcpy(dev->dev_addr, adapter->current_addr, ETH_ALEN); if (priv->mesh_dev) memcpy(priv->mesh_dev->dev_addr, adapter->current_addr, ETH_ALEN); @@ -500,7 +657,7 @@ done: return ret; } -static int wlan_copy_multicast_address(wlan_adapter * adapter, +static int libertas_copy_multicast_address(wlan_adapter * adapter, struct net_device *dev) { int i = 0; @@ -515,11 +672,12 @@ static int wlan_copy_multicast_address(wlan_adapter * adapter, } -static void wlan_set_multicast_list(struct net_device *dev) +static void libertas_set_multicast_list(struct net_device *dev) { wlan_private *priv = dev->priv; wlan_adapter *adapter = priv->adapter; int oldpacketfilter; + DECLARE_MAC_BUF(mac); lbs_deb_enter(LBS_DEB_NET); @@ -528,57 +686,52 @@ static void wlan_set_multicast_list(struct net_device *dev) if (dev->flags & IFF_PROMISC) { lbs_deb_net("enable promiscuous mode\n"); adapter->currentpacketfilter |= - cmd_act_mac_promiscuous_enable; + CMD_ACT_MAC_PROMISCUOUS_ENABLE; adapter->currentpacketfilter &= - ~(cmd_act_mac_all_multicast_enable | - cmd_act_mac_multicast_enable); + ~(CMD_ACT_MAC_ALL_MULTICAST_ENABLE | + CMD_ACT_MAC_MULTICAST_ENABLE); } else { /* Multicast */ adapter->currentpacketfilter &= - ~cmd_act_mac_promiscuous_enable; + ~CMD_ACT_MAC_PROMISCUOUS_ENABLE; if (dev->flags & IFF_ALLMULTI || dev->mc_count > MRVDRV_MAX_MULTICAST_LIST_SIZE) { lbs_deb_net( "enabling all multicast\n"); adapter->currentpacketfilter |= - cmd_act_mac_all_multicast_enable; + CMD_ACT_MAC_ALL_MULTICAST_ENABLE; adapter->currentpacketfilter &= - ~cmd_act_mac_multicast_enable; + ~CMD_ACT_MAC_MULTICAST_ENABLE; } else { adapter->currentpacketfilter &= - ~cmd_act_mac_all_multicast_enable; + ~CMD_ACT_MAC_ALL_MULTICAST_ENABLE; if (!dev->mc_count) { lbs_deb_net("no multicast addresses, " "disabling multicast\n"); adapter->currentpacketfilter &= - ~cmd_act_mac_multicast_enable; + ~CMD_ACT_MAC_MULTICAST_ENABLE; } else { int i; adapter->currentpacketfilter |= - cmd_act_mac_multicast_enable; + CMD_ACT_MAC_MULTICAST_ENABLE; adapter->nr_of_multicastmacaddr = - wlan_copy_multicast_address(adapter, dev); + libertas_copy_multicast_address(adapter, dev); lbs_deb_net("multicast addresses: %d\n", dev->mc_count); for (i = 0; i < dev->mc_count; i++) { - lbs_deb_net("Multicast address %d:" - MAC_FMT "\n", i, - adapter->multicastlist[i][0], - adapter->multicastlist[i][1], - adapter->multicastlist[i][2], - adapter->multicastlist[i][3], - adapter->multicastlist[i][4], - adapter->multicastlist[i][5]); + lbs_deb_net("Multicast address %d:%s\n", + i, print_mac(mac, + adapter->multicastlist[i])); } /* send multicast addresses to firmware */ libertas_prepare_and_send_command(priv, - cmd_mac_multicast_adr, - cmd_act_set, 0, 0, + CMD_MAC_MULTICAST_ADR, + CMD_ACT_SET, 0, 0, NULL); } } @@ -599,18 +752,16 @@ static void wlan_set_multicast_list(struct net_device *dev) * @param data A pointer to wlan_thread structure * @return 0 */ -static int wlan_service_main_thread(void *data) +static int libertas_thread(void *data) { - struct wlan_thread *thread = data; - wlan_private *priv = thread->priv; + struct net_device *dev = data; + wlan_private *priv = dev->priv; wlan_adapter *adapter = priv->adapter; wait_queue_t wait; u8 ireg = 0; lbs_deb_enter(LBS_DEB_THREAD); - wlan_activate_thread(thread); - init_waitqueue_entry(&wait, current); set_freezable(); @@ -620,7 +771,7 @@ static int wlan_service_main_thread(void *data) adapter->intcounter, adapter->currenttxskb, priv->dnld_sent); - add_wait_queue(&thread->waitq, &wait); + add_wait_queue(&priv->waitq, &wait); set_current_state(TASK_INTERRUPTIBLE); spin_lock_irq(&adapter->driver_lock); if ((adapter->psstate == PS_STATE_SLEEP) || @@ -636,14 +787,13 @@ static int wlan_service_main_thread(void *data) } else spin_unlock_irq(&adapter->driver_lock); - lbs_deb_thread( "main-thread 222 (waking up): intcounter=%d currenttxskb=%p " "dnld_sent=%d\n", adapter->intcounter, adapter->currenttxskb, priv->dnld_sent); set_current_state(TASK_RUNNING); - remove_wait_queue(&thread->waitq, &wait); + remove_wait_queue(&priv->waitq, &wait); try_to_freeze(); lbs_deb_thread("main-thread 333: intcounter=%d currenttxskb=%p " @@ -681,20 +831,20 @@ static int wlan_service_main_thread(void *data) adapter->currenttxskb, priv->dnld_sent); /* command response? */ - if (adapter->hisregcpy & his_cmdupldrdy) { + if (adapter->hisregcpy & MRVDRV_CMD_UPLD_RDY) { lbs_deb_thread("main-thread: cmd response ready\n"); - adapter->hisregcpy &= ~his_cmdupldrdy; + adapter->hisregcpy &= ~MRVDRV_CMD_UPLD_RDY; spin_unlock_irq(&adapter->driver_lock); libertas_process_rx_command(priv); spin_lock_irq(&adapter->driver_lock); } /* Any Card Event */ - if (adapter->hisregcpy & his_cardevent) { + if (adapter->hisregcpy & MRVDRV_CARDEVENT) { lbs_deb_thread("main-thread: Card Event Activity\n"); - adapter->hisregcpy &= ~his_cardevent; + adapter->hisregcpy &= ~MRVDRV_CARDEVENT; if (priv->hw_read_event_cause(priv)) { lbs_pr_alert( @@ -711,7 +861,7 @@ static int wlan_service_main_thread(void *data) if (adapter->psstate == PS_STATE_PRE_SLEEP) { if (!priv->dnld_sent && !adapter->cur_cmd) { if (adapter->connect_status == - libertas_connected) { + LIBERTAS_CONNECTED) { lbs_deb_thread( "main_thread: PRE_SLEEP--intcounter=%d currenttxskb=%p " "dnld_sent=%d cur_cmd=%p, confirm now\n", @@ -758,13 +908,214 @@ static int wlan_service_main_thread(void *data) del_timer(&adapter->command_timer); adapter->nr_cmd_pending = 0; wake_up_all(&adapter->cmd_pending); - wlan_deactivate_thread(thread); lbs_deb_leave(LBS_DEB_THREAD); return 0; } /** + * @brief This function downloads firmware image, gets + * HW spec from firmware and set basic parameters to + * firmware. + * + * @param priv A pointer to wlan_private structure + * @return 0 or -1 + */ +static int wlan_setup_firmware(wlan_private * priv) +{ + int ret = -1; + wlan_adapter *adapter = priv->adapter; + struct cmd_ds_mesh_access mesh_access; + + lbs_deb_enter(LBS_DEB_FW); + + /* + * Read MAC address from HW + */ + memset(adapter->current_addr, 0xff, ETH_ALEN); + + ret = libertas_prepare_and_send_command(priv, CMD_GET_HW_SPEC, + 0, CMD_OPTION_WAITFORRSP, 0, NULL); + + if (ret) { + ret = -1; + goto done; + } + + libertas_set_mac_packet_filter(priv); + + /* Get the supported Data rates */ + ret = libertas_prepare_and_send_command(priv, CMD_802_11_DATA_RATE, + CMD_ACT_GET_TX_RATE, + CMD_OPTION_WAITFORRSP, 0, NULL); + + if (ret) { + ret = -1; + goto done; + } + + /* Disable mesh autostart */ + if (priv->mesh_dev) { + memset(&mesh_access, 0, sizeof(mesh_access)); + mesh_access.data[0] = cpu_to_le32(0); + ret = libertas_prepare_and_send_command(priv, + CMD_MESH_ACCESS, + CMD_ACT_MESH_SET_AUTOSTART_ENABLED, + CMD_OPTION_WAITFORRSP, 0, (void *)&mesh_access); + if (ret) { + ret = -1; + goto done; + } + priv->mesh_autostart_enabled = 0; + } + + /* Set the boot2 version in firmware */ + ret = libertas_prepare_and_send_command(priv, CMD_SET_BOOT2_VER, + 0, CMD_OPTION_WAITFORRSP, 0, NULL); + + ret = 0; +done: + lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); + return ret; +} + +/** + * This function handles the timeout of command sending. + * It will re-send the same command again. + */ +static void command_timer_fn(unsigned long data) +{ + wlan_private *priv = (wlan_private *)data; + wlan_adapter *adapter = priv->adapter; + struct cmd_ctrl_node *ptempnode; + struct cmd_ds_command *cmd; + unsigned long flags; + + ptempnode = adapter->cur_cmd; + if (ptempnode == NULL) { + lbs_deb_fw("ptempnode empty\n"); + return; + } + + cmd = (struct cmd_ds_command *)ptempnode->bufvirtualaddr; + if (!cmd) { + lbs_deb_fw("cmd is NULL\n"); + return; + } + + lbs_deb_fw("command_timer_fn fired, cmd %x\n", cmd->command); + + if (!adapter->fw_ready) + return; + + spin_lock_irqsave(&adapter->driver_lock, flags); + adapter->cur_cmd = NULL; + spin_unlock_irqrestore(&adapter->driver_lock, flags); + + lbs_deb_fw("re-sending same command because of timeout\n"); + libertas_queue_cmd(adapter, ptempnode, 0); + + wake_up_interruptible(&priv->waitq); + + return; +} + +static int libertas_init_adapter(wlan_private * priv) +{ + wlan_adapter *adapter = priv->adapter; + size_t bufsize; + int i, ret = 0; + + /* Allocate buffer to store the BSSID list */ + bufsize = MAX_NETWORK_COUNT * sizeof(struct bss_descriptor); + adapter->networks = kzalloc(bufsize, GFP_KERNEL); + if (!adapter->networks) { + lbs_pr_err("Out of memory allocating beacons\n"); + ret = -1; + goto out; + } + + /* Initialize scan result lists */ + INIT_LIST_HEAD(&adapter->network_free_list); + INIT_LIST_HEAD(&adapter->network_list); + for (i = 0; i < MAX_NETWORK_COUNT; i++) { + list_add_tail(&adapter->networks[i].list, + &adapter->network_free_list); + } + + adapter->libertas_ps_confirm_sleep.seqnum = cpu_to_le16(++adapter->seqnum); + adapter->libertas_ps_confirm_sleep.command = + cpu_to_le16(CMD_802_11_PS_MODE); + adapter->libertas_ps_confirm_sleep.size = + cpu_to_le16(sizeof(struct PS_CMD_ConfirmSleep)); + adapter->libertas_ps_confirm_sleep.action = + cpu_to_le16(CMD_SUBCMD_SLEEP_CONFIRMED); + + memset(adapter->current_addr, 0xff, ETH_ALEN); + + adapter->connect_status = LIBERTAS_DISCONNECTED; + adapter->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; + adapter->mode = IW_MODE_INFRA; + adapter->curbssparams.channel = DEFAULT_AD_HOC_CHANNEL; + adapter->currentpacketfilter = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON; + adapter->radioon = RADIO_ON; + adapter->auto_rate = 1; + adapter->capability = WLAN_CAPABILITY_SHORT_PREAMBLE; + adapter->psmode = WLAN802_11POWERMODECAM; + adapter->psstate = PS_STATE_FULL_POWER; + + mutex_init(&adapter->lock); + + memset(&adapter->tx_queue_ps, 0, NR_TX_QUEUE*sizeof(struct sk_buff*)); + adapter->tx_queue_idx = 0; + spin_lock_init(&adapter->txqueue_lock); + + setup_timer(&adapter->command_timer, command_timer_fn, + (unsigned long)priv); + + INIT_LIST_HEAD(&adapter->cmdfreeq); + INIT_LIST_HEAD(&adapter->cmdpendingq); + + spin_lock_init(&adapter->driver_lock); + init_waitqueue_head(&adapter->cmd_pending); + adapter->nr_cmd_pending = 0; + + /* Allocate the command buffers */ + if (libertas_allocate_cmd_buffer(priv)) { + lbs_pr_err("Out of memory allocating command buffers\n"); + ret = -1; + } + +out: + return ret; +} + +static void libertas_free_adapter(wlan_private * priv) +{ + wlan_adapter *adapter = priv->adapter; + + if (!adapter) { + lbs_deb_fw("why double free adapter?\n"); + return; + } + + lbs_deb_fw("free command buffer\n"); + libertas_free_cmd_buffer(priv); + + lbs_deb_fw("free command_timer\n"); + del_timer(&adapter->command_timer); + + lbs_deb_fw("free scan results table\n"); + kfree(adapter->networks); + adapter->networks = NULL; + + /* Free the adapter object itself */ + lbs_deb_fw("free adapter\n"); + kfree(adapter); + priv->adapter = NULL; +} + +/** * @brief This function adds the card. it will probe the * card, allocate the wlan_priv and initialize the device. * @@ -781,7 +1132,7 @@ wlan_private *libertas_add_card(void *card, struct device *dmdev) /* Allocate an Ethernet device and register it */ if (!(dev = alloc_etherdev(sizeof(wlan_private)))) { lbs_pr_err("init ethX device failed\n"); - return NULL; + goto done; } priv = dev->priv; @@ -791,20 +1142,24 @@ wlan_private *libertas_add_card(void *card, struct device *dmdev) goto err_kzalloc; } + if (libertas_init_adapter(priv)) { + lbs_pr_err("failed to initialize adapter structure.\n"); + goto err_init_adapter; + } + priv->dev = dev; priv->card = card; priv->mesh_open = 0; priv->infra_open = 0; - - SET_MODULE_OWNER(dev); + priv->hotplug_device = dmdev; /* Setup the OS Interface to our functions */ - dev->open = wlan_open; - dev->hard_start_xmit = wlan_pre_start_xmit; - dev->stop = wlan_close; - dev->set_mac_address = wlan_set_mac_address; - dev->tx_timeout = wlan_tx_timeout; - dev->get_stats = wlan_get_stats; + dev->open = libertas_open; + dev->hard_start_xmit = libertas_pre_start_xmit; + dev->stop = libertas_close; + dev->set_mac_address = libertas_set_mac_address; + dev->tx_timeout = libertas_tx_timeout; + dev->get_stats = libertas_get_stats; dev->watchdog_timeo = 5 * HZ; dev->ethtool_ops = &libertas_ethtool_ops; #ifdef WIRELESS_EXT @@ -813,84 +1168,148 @@ wlan_private *libertas_add_card(void *card, struct device *dmdev) #define NETIF_F_DYNALLOC 16 dev->features |= NETIF_F_DYNALLOC; dev->flags |= IFF_BROADCAST | IFF_MULTICAST; - dev->set_multicast_list = wlan_set_multicast_list; + dev->set_multicast_list = libertas_set_multicast_list; SET_NETDEV_DEV(dev, dmdev); - INIT_LIST_HEAD(&priv->adapter->cmdfreeq); - INIT_LIST_HEAD(&priv->adapter->cmdpendingq); + priv->rtap_net_dev = NULL; + if (device_create_file(dmdev, &dev_attr_libertas_rtap)) + goto err_init_adapter; + + lbs_deb_thread("Starting main thread...\n"); + init_waitqueue_head(&priv->waitq); + priv->main_thread = kthread_run(libertas_thread, dev, "libertas_main"); + if (IS_ERR(priv->main_thread)) { + lbs_deb_thread("Error creating main thread.\n"); + goto err_kthread_run; + } + + priv->work_thread = create_singlethread_workqueue("libertas_worker"); + INIT_DELAYED_WORK(&priv->assoc_work, libertas_association_worker); + INIT_DELAYED_WORK(&priv->scan_work, libertas_scan_worker); + INIT_WORK(&priv->sync_channel, libertas_sync_channel); - spin_lock_init(&priv->adapter->driver_lock); - init_waitqueue_head(&priv->adapter->cmd_pending); - priv->adapter->nr_cmd_pending = 0; goto done; +err_kthread_run: + device_remove_file(dmdev, &dev_attr_libertas_rtap); + +err_init_adapter: + libertas_free_adapter(priv); + err_kzalloc: free_netdev(dev); priv = NULL; + done: lbs_deb_leave_args(LBS_DEB_NET, "priv %p", priv); return priv; } EXPORT_SYMBOL_GPL(libertas_add_card); -int libertas_activate_card(wlan_private *priv, char *fw_name) + +int libertas_remove_card(wlan_private *priv) { + wlan_adapter *adapter = priv->adapter; struct net_device *dev = priv->dev; - int ret = -1; + union iwreq_data wrqu; lbs_deb_enter(LBS_DEB_MAIN); - lbs_deb_thread("Starting kthread...\n"); - priv->mainthread.priv = priv; - wlan_create_thread(wlan_service_main_thread, - &priv->mainthread, "wlan_main_service"); + libertas_remove_rtap(priv); - priv->assoc_thread = - create_singlethread_workqueue("libertas_assoc"); - INIT_DELAYED_WORK(&priv->assoc_work, libertas_association_worker); - INIT_WORK(&priv->sync_channel, libertas_sync_channel); + dev = priv->dev; + device_remove_file(priv->hotplug_device, &dev_attr_libertas_rtap); - /* - * Register the device. Fillup the private data structure with - * relevant information from the card and request for the required - * IRQ. - */ - if (priv->hw_register_dev(priv) < 0) { - lbs_pr_err("failed to register WLAN device\n"); - goto err_registerdev; - } + cancel_delayed_work(&priv->scan_work); + cancel_delayed_work(&priv->assoc_work); + destroy_workqueue(priv->work_thread); - /* init FW and HW */ - if (fw_name && libertas_init_fw(priv, fw_name)) { - lbs_pr_err("firmware init failed\n"); - goto err_registerdev; + if (adapter->psmode == WLAN802_11POWERMODEMAX_PSP) { + adapter->psmode = WLAN802_11POWERMODECAM; + libertas_ps_wakeup(priv, CMD_OPTION_WAITFORRSP); } + memset(wrqu.ap_addr.sa_data, 0xaa, ETH_ALEN); + wrqu.ap_addr.sa_family = ARPHRD_ETHER; + wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); + + /* Stop the thread servicing the interrupts */ + adapter->surpriseremoved = 1; + kthread_stop(priv->main_thread); + + libertas_free_adapter(priv); + + priv->dev = NULL; + free_netdev(dev); + + lbs_deb_leave(LBS_DEB_MAIN); + return 0; +} +EXPORT_SYMBOL_GPL(libertas_remove_card); + + +int libertas_start_card(wlan_private *priv) +{ + struct net_device *dev = priv->dev; + int ret = -1; + + lbs_deb_enter(LBS_DEB_MAIN); + + /* poke the firmware */ + ret = wlan_setup_firmware(priv); + if (ret) + goto done; + + /* init 802.11d */ + libertas_init_11d(priv); + if (register_netdev(dev)) { lbs_pr_err("cannot register ethX device\n"); - goto err_init_fw; + goto done; } - lbs_pr_info("%s: Marvell WLAN 802.11 adapter\n", dev->name); - libertas_debugfs_init_one(priv, dev); + lbs_pr_info("%s: Marvell WLAN 802.11 adapter\n", dev->name); + ret = 0; - goto done; -err_init_fw: - priv->hw_unregister_dev(priv); -err_registerdev: - destroy_workqueue(priv->assoc_thread); - /* Stop the thread servicing the interrupts */ - wake_up_interruptible(&priv->mainthread.waitq); - wlan_terminate_thread(&priv->mainthread); done: - lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret); + lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret); return ret; } -EXPORT_SYMBOL_GPL(libertas_activate_card); +EXPORT_SYMBOL_GPL(libertas_start_card); + + +int libertas_stop_card(wlan_private *priv) +{ + struct net_device *dev = priv->dev; + int ret = -1; + struct cmd_ctrl_node *cmdnode; + unsigned long flags; + + lbs_deb_enter(LBS_DEB_MAIN); + + netif_stop_queue(priv->dev); + netif_carrier_off(priv->dev); + + libertas_debugfs_remove_one(priv); + + /* Flush pending command nodes */ + spin_lock_irqsave(&priv->adapter->driver_lock, flags); + list_for_each_entry(cmdnode, &priv->adapter->cmdpendingq, list) { + cmdnode->cmdwaitqwoken = 1; + wake_up_interruptible(&cmdnode->cmdwait_q); + } + spin_unlock_irqrestore(&priv->adapter->driver_lock, flags); + + unregister_netdev(dev); + + lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret); + return ret; +} +EXPORT_SYMBOL_GPL(libertas_stop_card); /** @@ -915,13 +1334,11 @@ int libertas_add_mesh(wlan_private *priv, struct device *dev) mesh_dev->priv = priv; priv->mesh_dev = mesh_dev; - SET_MODULE_OWNER(mesh_dev); - - mesh_dev->open = mesh_open; - mesh_dev->hard_start_xmit = mesh_pre_start_xmit; - mesh_dev->stop = mesh_close; - mesh_dev->get_stats = wlan_get_stats; - mesh_dev->set_mac_address = wlan_set_mac_address; + mesh_dev->open = libertas_mesh_open; + mesh_dev->hard_start_xmit = libertas_mesh_pre_start_xmit; + mesh_dev->stop = libertas_mesh_close; + mesh_dev->get_stats = libertas_get_stats; + mesh_dev->set_mac_address = libertas_set_mac_address; mesh_dev->ethtool_ops = &libertas_ethtool_ops; memcpy(mesh_dev->dev_addr, priv->dev->dev_addr, sizeof(priv->dev->dev_addr)); @@ -940,7 +1357,7 @@ int libertas_add_mesh(wlan_private *priv, struct device *dev) goto err_free; } - ret = device_create_file(&(mesh_dev->dev), &dev_attr_anycast_mask); + ret = sysfs_create_group(&(mesh_dev->dev.kobj), &libertas_mesh_attr_group); if (ret) goto err_unregister; @@ -948,7 +1365,6 @@ int libertas_add_mesh(wlan_private *priv, struct device *dev) ret = 0; goto done; - err_unregister: unregister_netdev(mesh_dev); @@ -961,86 +1377,12 @@ done: } EXPORT_SYMBOL_GPL(libertas_add_mesh); -static void wake_pending_cmdnodes(wlan_private *priv) -{ - struct cmd_ctrl_node *cmdnode; - unsigned long flags; - - lbs_deb_enter(LBS_DEB_CMD); - - spin_lock_irqsave(&priv->adapter->driver_lock, flags); - list_for_each_entry(cmdnode, &priv->adapter->cmdpendingq, list) { - cmdnode->cmdwaitqwoken = 1; - wake_up_interruptible(&cmdnode->cmdwait_q); - } - spin_unlock_irqrestore(&priv->adapter->driver_lock, flags); -} - - -int libertas_remove_card(wlan_private *priv) -{ - wlan_adapter *adapter; - struct net_device *dev; - union iwreq_data wrqu; - - lbs_deb_enter(LBS_DEB_NET); - - if (!priv) - goto out; - - adapter = priv->adapter; - - if (!adapter) - goto out; - - dev = priv->dev; - - netif_stop_queue(priv->dev); - netif_carrier_off(priv->dev); - - wake_pending_cmdnodes(priv); - - unregister_netdev(dev); - - cancel_delayed_work(&priv->assoc_work); - destroy_workqueue(priv->assoc_thread); - - if (adapter->psmode == wlan802_11powermodemax_psp) { - adapter->psmode = wlan802_11powermodecam; - libertas_ps_wakeup(priv, cmd_option_waitforrsp); - } - - memset(wrqu.ap_addr.sa_data, 0xaa, ETH_ALEN); - wrqu.ap_addr.sa_family = ARPHRD_ETHER; - wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); - - adapter->surpriseremoved = 1; - - /* Stop the thread servicing the interrupts */ - wlan_terminate_thread(&priv->mainthread); - - libertas_debugfs_remove_one(priv); - - lbs_deb_net("free adapter\n"); - libertas_free_adapter(priv); - - lbs_deb_net("unregister finish\n"); - - priv->dev = NULL; - free_netdev(dev); - -out: - lbs_deb_leave(LBS_DEB_NET); - return 0; -} -EXPORT_SYMBOL_GPL(libertas_remove_card); - void libertas_remove_mesh(wlan_private *priv) { struct net_device *mesh_dev; - lbs_deb_enter(LBS_DEB_NET); + lbs_deb_enter(LBS_DEB_MAIN); if (!priv) goto out; @@ -1050,14 +1392,14 @@ void libertas_remove_mesh(wlan_private *priv) netif_stop_queue(mesh_dev); netif_carrier_off(priv->mesh_dev); - device_remove_file(&(mesh_dev->dev), &dev_attr_anycast_mask); + sysfs_remove_group(&(mesh_dev->dev.kobj), &libertas_mesh_attr_group); unregister_netdev(mesh_dev); priv->mesh_dev = NULL ; free_netdev(mesh_dev); out: - lbs_deb_leave(LBS_DEB_NET); + lbs_deb_leave(LBS_DEB_MAIN); } EXPORT_SYMBOL_GPL(libertas_remove_mesh); @@ -1076,7 +1418,7 @@ struct chan_freq_power *libertas_get_region_cfp_table(u8 region, u8 band, int *c lbs_deb_enter(LBS_DEB_MAIN); - end = sizeof(region_cfp_table)/sizeof(struct region_cfp_table); + end = ARRAY_SIZE(region_cfp_table); for (i = 0; i < end ; i++) { lbs_deb_main("region_cfp_table[i].region=%d\n", @@ -1148,15 +1490,30 @@ void libertas_interrupt(struct net_device *dev) if (priv->adapter->psstate == PS_STATE_SLEEP) { priv->adapter->psstate = PS_STATE_AWAKE; netif_wake_queue(dev); - netif_wake_queue(priv->mesh_dev); + if (priv->mesh_dev) + netif_wake_queue(priv->mesh_dev); } - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); lbs_deb_leave(LBS_DEB_THREAD); } EXPORT_SYMBOL_GPL(libertas_interrupt); +int libertas_reset_device(wlan_private *priv) +{ + int ret; + + lbs_deb_enter(LBS_DEB_MAIN); + ret = libertas_prepare_and_send_command(priv, CMD_802_11_RESET, + CMD_ACT_HALT, 0, 0, NULL); + msleep_interruptible(10); + + lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret); + return ret; +} +EXPORT_SYMBOL_GPL(libertas_reset_device); + static int libertas_init_module(void) { lbs_deb_enter(LBS_DEB_MAIN); @@ -1174,6 +1531,81 @@ static void libertas_exit_module(void) lbs_deb_leave(LBS_DEB_MAIN); } +/* + * rtap interface support fuctions + */ + +static int libertas_rtap_open(struct net_device *dev) +{ + netif_carrier_off(dev); + netif_stop_queue(dev); + return 0; +} + +static int libertas_rtap_stop(struct net_device *dev) +{ + return 0; +} + +static int libertas_rtap_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + netif_stop_queue(dev); + return -EOPNOTSUPP; +} + +static struct net_device_stats *libertas_rtap_get_stats(struct net_device *dev) +{ + wlan_private *priv = dev->priv; + return &priv->ieee->stats; +} + + +void libertas_remove_rtap(wlan_private *priv) +{ + if (priv->rtap_net_dev == NULL) + return; + unregister_netdev(priv->rtap_net_dev); + free_ieee80211(priv->rtap_net_dev); + priv->rtap_net_dev = NULL; +} + +int libertas_add_rtap(wlan_private *priv) +{ + int rc = 0; + + if (priv->rtap_net_dev) + return -EPERM; + + priv->rtap_net_dev = alloc_ieee80211(0); + if (priv->rtap_net_dev == NULL) + return -ENOMEM; + + + priv->ieee = netdev_priv(priv->rtap_net_dev); + + strcpy(priv->rtap_net_dev->name, "rtap%d"); + + priv->rtap_net_dev->type = ARPHRD_IEEE80211_RADIOTAP; + priv->rtap_net_dev->open = libertas_rtap_open; + priv->rtap_net_dev->stop = libertas_rtap_stop; + priv->rtap_net_dev->get_stats = libertas_rtap_get_stats; + priv->rtap_net_dev->hard_start_xmit = libertas_rtap_hard_start_xmit; + priv->rtap_net_dev->set_multicast_list = libertas_set_multicast_list; + priv->rtap_net_dev->priv = priv; + + priv->ieee->iw_mode = IW_MODE_MONITOR; + + rc = register_netdev(priv->rtap_net_dev); + if (rc) { + free_ieee80211(priv->rtap_net_dev); + priv->rtap_net_dev = NULL; + return rc; + } + + return 0; +} + + module_init(libertas_init_module); module_exit(libertas_exit_module); diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c index 769c86fb9509..0420e5b9ff9b 100644 --- a/drivers/net/wireless/libertas/rx.c +++ b/drivers/net/wireless/libertas/rx.c @@ -85,12 +85,12 @@ static u8 wlan_getavgnf(wlan_private * priv) static void wlan_save_rawSNRNF(wlan_private * priv, struct rxpd *p_rx_pd) { wlan_adapter *adapter = priv->adapter; - if (adapter->numSNRNF < adapter->data_avg_factor) + if (adapter->numSNRNF < DEFAULT_DATA_AVG_FACTOR) adapter->numSNRNF++; adapter->rawSNR[adapter->nextSNRNF] = p_rx_pd->snr; adapter->rawNF[adapter->nextSNRNF] = p_rx_pd->nf; adapter->nextSNRNF++; - if (adapter->nextSNRNF >= adapter->data_avg_factor) + if (adapter->nextSNRNF >= DEFAULT_DATA_AVG_FACTOR) adapter->nextSNRNF = 0; return; } @@ -117,8 +117,6 @@ static void wlan_compute_rssi(wlan_private * priv, struct rxpd *p_rx_pd) adapter->NF[TYPE_RXPD][TYPE_NOAVG] = p_rx_pd->nf; wlan_save_rawSNRNF(priv, p_rx_pd); - adapter->rxpd_rate = p_rx_pd->rx_rate; - adapter->SNR[TYPE_RXPD][TYPE_AVG] = wlan_getavgsnr(priv) * AVG_SCALE; adapter->NF[TYPE_RXPD][TYPE_AVG] = wlan_getavgnf(priv) * AVG_SCALE; lbs_deb_rx("after computing SNR: SNR-avg = %d, NF-avg = %d\n", @@ -140,12 +138,15 @@ void libertas_upload_rx_packet(wlan_private * priv, struct sk_buff *skb) { lbs_deb_rx("skb->data %p\n", skb->data); - if (priv->mesh_dev && IS_MESH_FRAME(skb)) - skb->protocol = eth_type_trans(skb, priv->mesh_dev); - else - skb->protocol = eth_type_trans(skb, priv->dev); + if (priv->adapter->monitormode != WLAN_MONITOR_OFF) { + skb->protocol = eth_type_trans(skb, priv->rtap_net_dev); + } else { + if (priv->mesh_dev && IS_MESH_FRAME(skb)) + skb->protocol = eth_type_trans(skb, priv->mesh_dev); + else + skb->protocol = eth_type_trans(skb, priv->dev); + } skb->ip_summed = CHECKSUM_UNNECESSARY; - netif_rx(skb); } @@ -172,11 +173,7 @@ int libertas_process_rxed_packet(wlan_private * priv, struct sk_buff *skb) lbs_deb_enter(LBS_DEB_RX); - if (priv->adapter->debugmode & MRVDRV_DEBUG_RX_PATH) - lbs_dbg_hex("RX packet: ", skb->data, - min_t(unsigned int, skb->len, 100)); - - if (priv->adapter->linkmode == WLAN_LINKMODE_802_11) + if (priv->adapter->monitormode != WLAN_MONITOR_OFF) return process_rxed_802_11_packet(priv, skb); p_rx_pkt = (struct rxpackethdr *) skb->data; @@ -186,7 +183,7 @@ int libertas_process_rxed_packet(wlan_private * priv, struct sk_buff *skb) else UNSET_MESH_FRAME(skb); - lbs_dbg_hex("RX Data: Before chop rxpd", skb->data, + lbs_deb_hex(LBS_DEB_RX, "RX Data: Before chop rxpd", skb->data, min_t(unsigned int, skb->len, 100)); if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) { @@ -210,9 +207,9 @@ int libertas_process_rxed_packet(wlan_private * priv, struct sk_buff *skb) lbs_deb_rx("rx data: skb->len-sizeof(RxPd) = %d-%zd = %zd\n", skb->len, sizeof(struct rxpd), skb->len - sizeof(struct rxpd)); - lbs_dbg_hex("RX Data: Dest", p_rx_pkt->eth803_hdr.dest_addr, + lbs_deb_hex(LBS_DEB_RX, "RX Data: Dest", p_rx_pkt->eth803_hdr.dest_addr, sizeof(p_rx_pkt->eth803_hdr.dest_addr)); - lbs_dbg_hex("RX Data: Src", p_rx_pkt->eth803_hdr.src_addr, + lbs_deb_hex(LBS_DEB_RX, "RX Data: Src", p_rx_pkt->eth803_hdr.src_addr, sizeof(p_rx_pkt->eth803_hdr.src_addr)); if (memcmp(&p_rx_pkt->rfc1042_hdr, @@ -244,7 +241,7 @@ int libertas_process_rxed_packet(wlan_private * priv, struct sk_buff *skb) */ hdrchop = (u8 *) p_ethhdr - (u8 *) p_rx_pkt; } else { - lbs_dbg_hex("RX Data: LLC/SNAP", + lbs_deb_hex(LBS_DEB_RX, "RX Data: LLC/SNAP", (u8 *) & p_rx_pkt->rfc1042_hdr, sizeof(p_rx_pkt->rfc1042_hdr)); @@ -260,8 +257,8 @@ int libertas_process_rxed_packet(wlan_private * priv, struct sk_buff *skb) /* Take the data rate from the rxpd structure * only if the rate is auto */ - if (adapter->is_datarate_auto) - adapter->datarate = libertas_index_to_data_rate(p_rx_pd->rx_rate); + if (adapter->auto_rate) + adapter->cur_rate = libertas_fw_index_to_data_rate(p_rx_pd->rx_rate); wlan_compute_rssi(priv, p_rx_pd); @@ -296,21 +293,22 @@ static u8 convert_mv_rate_to_radiotap(u8 rate) return 11; case 3: /* 11 Mbps */ return 22; - case 4: /* 6 Mbps */ + /* case 4: reserved */ + case 5: /* 6 Mbps */ return 12; - case 5: /* 9 Mbps */ + case 6: /* 9 Mbps */ return 18; - case 6: /* 12 Mbps */ + case 7: /* 12 Mbps */ return 24; - case 7: /* 18 Mbps */ + case 8: /* 18 Mbps */ return 36; - case 8: /* 24 Mbps */ + case 9: /* 24 Mbps */ return 48; - case 9: /* 36 Mbps */ + case 10: /* 36 Mbps */ return 72; - case 10: /* 48 Mbps */ + case 11: /* 48 Mbps */ return 96; - case 11: /* 54 Mbps */ + case 12: /* 54 Mbps */ return 108; } lbs_pr_alert("Invalid Marvell WLAN rate %i\n", rate); @@ -340,7 +338,7 @@ static int process_rxed_802_11_packet(wlan_private * priv, struct sk_buff *skb) p_rx_pkt = (struct rx80211packethdr *) skb->data; prxpd = &p_rx_pkt->rx_pd; - // lbs_dbg_hex("RX Data: Before chop rxpd", skb->data, min(skb->len, 100)); + // lbs_deb_hex(LBS_DEB_RX, "RX Data: Before chop rxpd", skb->data, min(skb->len, 100)); if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) { lbs_deb_rx("rx err: frame received wit bad length\n"); @@ -361,20 +359,19 @@ static int process_rxed_802_11_packet(wlan_private * priv, struct sk_buff *skb) skb->len, sizeof(struct rxpd), skb->len - sizeof(struct rxpd)); /* create the exported radio header */ - switch (priv->adapter->radiomode) { - case WLAN_RADIOMODE_NONE: + if(priv->adapter->monitormode == WLAN_MONITOR_OFF) { /* no radio header */ /* chop the rxpd */ skb_pull(skb, sizeof(struct rxpd)); - break; + } - case WLAN_RADIOMODE_RADIOTAP: + else { /* radiotap header */ radiotap_hdr.hdr.it_version = 0; /* XXX must check this value for pad */ radiotap_hdr.hdr.it_pad = 0; - radiotap_hdr.hdr.it_len = sizeof(struct rx_radiotap_hdr); - radiotap_hdr.hdr.it_present = RX_RADIOTAP_PRESENT; + radiotap_hdr.hdr.it_len = cpu_to_le16 (sizeof(struct rx_radiotap_hdr)); + radiotap_hdr.hdr.it_present = cpu_to_le32 (RX_RADIOTAP_PRESENT); /* unknown values */ radiotap_hdr.flags = 0; radiotap_hdr.chan_freq = 0; @@ -389,8 +386,6 @@ static int process_rxed_802_11_packet(wlan_private * priv, struct sk_buff *skb) radiotap_hdr.rx_flags |= IEEE80211_RADIOTAP_F_RX_BADFCS; //memset(radiotap_hdr.pad, 0x11, IEEE80211_RADIOTAP_HDRLEN - 18); - // lbs_dbg_hex1("RX radiomode packet BEF: ", skb->data, min(skb->len, 100)); - /* chop the rxpd */ skb_pull(skb, sizeof(struct rxpd)); @@ -408,25 +403,13 @@ static int process_rxed_802_11_packet(wlan_private * priv, struct sk_buff *skb) rx_radiotap_hdr)); memcpy(pradiotap_hdr, &radiotap_hdr, sizeof(struct rx_radiotap_hdr)); - //lbs_dbg_hex1("RX radiomode packet AFT: ", skb->data, min(skb->len, 100)); - break; - - default: - /* unknown header */ - lbs_pr_alert("Unknown radiomode %i\n", - priv->adapter->radiomode); - /* don't export any header */ - /* chop the rxpd */ - skb_pull(skb, sizeof(struct rxpd)); - break; } /* Take the data rate from the rxpd structure * only if the rate is auto */ - if (adapter->is_datarate_auto) { - adapter->datarate = libertas_index_to_data_rate(prxpd->rx_rate); - } + if (adapter->auto_rate) + adapter->cur_rate = libertas_fw_index_to_data_rate(prxpd->rx_rate); wlan_compute_rssi(priv, prxpd); diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c index c3043dcb541e..ad1e67d984ce 100644 --- a/drivers/net/wireless/libertas/scan.c +++ b/drivers/net/wireless/libertas/scan.c @@ -13,10 +13,13 @@ #include <net/ieee80211.h> #include <net/iw_handler.h> +#include <asm/unaligned.h> + #include "host.h" #include "decl.h" #include "dev.h" #include "scan.h" +#include "join.h" //! Approximate amount of data needed to pass a scan result back to iwlist #define MAX_SCAN_CELL_SIZE (IW_EV_ADDR_LEN \ @@ -62,6 +65,15 @@ static const u8 zeromac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u8 bcastmac[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; + + + +/*********************************************************************/ +/* */ +/* Misc helper functions */ +/* */ +/*********************************************************************/ + static inline void clear_bss_descriptor (struct bss_descriptor * bss) { /* Don't blow away ->list, just BSS data */ @@ -74,9 +86,9 @@ static inline int match_bss_no_security(struct wlan_802_11_security * secinfo, if ( !secinfo->wep_enabled && !secinfo->WPAenabled && !secinfo->WPA2enabled - && match_bss->wpa_ie[0] != WPA_IE - && match_bss->rsn_ie[0] != WPA2_IE - && !match_bss->privacy) { + && match_bss->wpa_ie[0] != MFIE_TYPE_GENERIC + && match_bss->rsn_ie[0] != MFIE_TYPE_RSN + && !(match_bss->capability & WLAN_CAPABILITY_PRIVACY)) { return 1; } return 0; @@ -88,7 +100,7 @@ static inline int match_bss_static_wep(struct wlan_802_11_security * secinfo, if ( secinfo->wep_enabled && !secinfo->WPAenabled && !secinfo->WPA2enabled - && match_bss->privacy) { + && (match_bss->capability & WLAN_CAPABILITY_PRIVACY)) { return 1; } return 0; @@ -99,9 +111,10 @@ static inline int match_bss_wpa(struct wlan_802_11_security * secinfo, { if ( !secinfo->wep_enabled && secinfo->WPAenabled - && (match_bss->wpa_ie[0] == WPA_IE) + && (match_bss->wpa_ie[0] == MFIE_TYPE_GENERIC) /* privacy bit may NOT be set in some APs like LinkSys WRT54G - && bss->privacy */ + && (match_bss->capability & WLAN_CAPABILITY_PRIVACY)) { + */ ) { return 1; } @@ -113,9 +126,10 @@ static inline int match_bss_wpa2(struct wlan_802_11_security * secinfo, { if ( !secinfo->wep_enabled && secinfo->WPA2enabled - && (match_bss->rsn_ie[0] == WPA2_IE) + && (match_bss->rsn_ie[0] == MFIE_TYPE_RSN) /* privacy bit may NOT be set in some APs like LinkSys WRT54G - && bss->privacy */ + && (match_bss->capability & WLAN_CAPABILITY_PRIVACY)) { + */ ) { return 1; } @@ -128,9 +142,9 @@ static inline int match_bss_dynamic_wep(struct wlan_802_11_security * secinfo, if ( !secinfo->wep_enabled && !secinfo->WPAenabled && !secinfo->WPA2enabled - && (match_bss->wpa_ie[0] != WPA_IE) - && (match_bss->rsn_ie[0] != WPA2_IE) - && match_bss->privacy) { + && (match_bss->wpa_ie[0] != MFIE_TYPE_GENERIC) + && (match_bss->rsn_ie[0] != MFIE_TYPE_RSN) + && (match_bss->capability & WLAN_CAPABILITY_PRIVACY)) { return 1; } return 0; @@ -160,7 +174,7 @@ static int is_network_compatible(wlan_adapter * adapter, { int matched = 0; - lbs_deb_enter(LBS_DEB_ASSOC); + lbs_deb_enter(LBS_DEB_SCAN); if (bss->mode != mode) goto done; @@ -177,7 +191,7 @@ static int is_network_compatible(wlan_adapter * adapter, adapter->secinfo.wep_enabled ? "e" : "d", adapter->secinfo.WPAenabled ? "e" : "d", adapter->secinfo.WPA2enabled ? "e" : "d", - bss->privacy); + (bss->capability & WLAN_CAPABILITY_PRIVACY)); goto done; } else if ((matched = match_bss_wpa2(&adapter->secinfo, bss))) { lbs_deb_scan( @@ -187,15 +201,14 @@ static int is_network_compatible(wlan_adapter * adapter, adapter->secinfo.wep_enabled ? "e" : "d", adapter->secinfo.WPAenabled ? "e" : "d", adapter->secinfo.WPA2enabled ? "e" : "d", - bss->privacy); + (bss->capability & WLAN_CAPABILITY_PRIVACY)); goto done; } else if ((matched = match_bss_dynamic_wep(&adapter->secinfo, bss))) { lbs_deb_scan( "is_network_compatible() dynamic WEP: " "wpa_ie=%#x wpa2_ie=%#x privacy=%#x\n", - bss->wpa_ie[0], - bss->rsn_ie[0], - bss->privacy); + bss->wpa_ie[0], bss->rsn_ie[0], + (bss->capability & WLAN_CAPABILITY_PRIVACY)); goto done; } @@ -207,16 +220,44 @@ static int is_network_compatible(wlan_adapter * adapter, adapter->secinfo.wep_enabled ? "e" : "d", adapter->secinfo.WPAenabled ? "e" : "d", adapter->secinfo.WPA2enabled ? "e" : "d", - bss->privacy); + (bss->capability & WLAN_CAPABILITY_PRIVACY)); done: - lbs_deb_leave(LBS_DEB_SCAN); + lbs_deb_leave_args(LBS_DEB_SCAN, "matched: %d", matched); return matched; } /** + * @brief Compare two SSIDs + * + * @param ssid1 A pointer to ssid to compare + * @param ssid2 A pointer to ssid to compare + * + * @return 0--ssid is same, otherwise is different + */ +int libertas_ssid_cmp(u8 *ssid1, u8 ssid1_len, u8 *ssid2, u8 ssid2_len) +{ + if (ssid1_len != ssid2_len) + return -1; + + return memcmp(ssid1, ssid2, ssid1_len); +} + + + + +/*********************************************************************/ +/* */ +/* Main scanning support */ +/* */ +/*********************************************************************/ + + +/** * @brief Create a channel list for the driver to scan based on region info * + * Only used from wlan_scan_setup_scan_config() + * * Use the driver region/band information to construct a comprehensive list * of channels to scan. This routine is used for any scan that is not * provided a specific channel list to scan. @@ -244,17 +285,19 @@ static void wlan_scan_create_channel_list(wlan_private * priv, int nextchan; u8 scantype; + lbs_deb_enter_args(LBS_DEB_SCAN, "filteredscan %d", filteredscan); + chanidx = 0; /* Set the default scan type to the user specified type, will later * be changed to passive on a per channel basis if restricted by * regulatory requirements (11d or 11h) */ - scantype = adapter->scantype; + scantype = CMD_SCAN_TYPE_ACTIVE; for (rgnidx = 0; rgnidx < ARRAY_SIZE(adapter->region_channel); rgnidx++) { if (priv->adapter->enable11d && - adapter->connect_status != libertas_connected) { + adapter->connect_status != LIBERTAS_CONNECTED) { /* Scan all the supported chan for the first scan */ if (!adapter->universal_channel[rgnidx].valid) continue; @@ -286,11 +329,11 @@ static void wlan_scan_create_channel_list(wlan_private * priv, case BAND_G: default: scanchanlist[chanidx].radiotype = - cmd_scan_radio_type_bg; + CMD_SCAN_RADIO_TYPE_BG; break; } - if (scantype == cmd_scan_type_passive) { + if (scantype == CMD_SCAN_TYPE_PASSIVE) { scanchanlist[chanidx].maxscantime = cpu_to_le16(MRVDRV_PASSIVE_SCAN_CHAN_TIME); scanchanlist[chanidx].chanscanmode.passivescan = @@ -312,6 +355,16 @@ static void wlan_scan_create_channel_list(wlan_private * priv, } } + +/* Delayed partial scan worker */ +void libertas_scan_worker(struct work_struct *work) +{ + wlan_private *priv = container_of(work, wlan_private, scan_work.work); + + wlan_scan_networks(priv, NULL, 0); +} + + /** * @brief Construct a wlan_scan_cmd_config structure to use in issue scan cmds * @@ -359,7 +412,6 @@ wlan_scan_setup_scan_config(wlan_private * priv, u8 * pfilteredscan, u8 * pscancurrentonly) { - wlan_adapter *adapter = priv->adapter; struct mrvlietypes_numprobes *pnumprobestlv; struct mrvlietypes_ssidparamset *pssidtlv; struct wlan_scan_cmd_config * pscancfgout = NULL; @@ -371,6 +423,8 @@ wlan_scan_setup_scan_config(wlan_private * priv, int channel; int radiotype; + lbs_deb_enter(LBS_DEB_SCAN); + pscancfgout = kzalloc(MAX_SCAN_CFG_ALLOC, GFP_KERNEL); if (pscancfgout == NULL) goto out; @@ -407,15 +461,12 @@ wlan_scan_setup_scan_config(wlan_private * priv, *pscancurrentonly = 0; if (puserscanin) { - /* Set the bss type scan filter, use adapter setting if unset */ pscancfgout->bsstype = - (puserscanin->bsstype ? puserscanin->bsstype : adapter-> - scanmode); + puserscanin->bsstype ? puserscanin->bsstype : CMD_BSS_TYPE_ANY; /* Set the number of probes to send, use adapter setting if unset */ - numprobes = (puserscanin->numprobes ? puserscanin->numprobes : - adapter->scanprobes); + numprobes = puserscanin->numprobes ? puserscanin->numprobes : 0; /* * Set the BSSID filter to the incoming configuration, @@ -447,8 +498,8 @@ wlan_scan_setup_scan_config(wlan_private * priv, *pfilteredscan = 1; } } else { - pscancfgout->bsstype = adapter->scanmode; - numprobes = adapter->scanprobes; + pscancfgout->bsstype = CMD_BSS_TYPE_ANY; + numprobes = 0; } /* If the input config or adapter has the number of Probes set, add tlv */ @@ -469,59 +520,56 @@ wlan_scan_setup_scan_config(wlan_private * priv, */ *ppchantlvout = (struct mrvlietypes_chanlistparamset *) ptlvpos; - if (puserscanin && puserscanin->chanlist[0].channumber) { - - lbs_deb_scan("Scan: Using supplied channel list\n"); + if (!puserscanin || !puserscanin->chanlist[0].channumber) { + /* Create a default channel scan list */ + lbs_deb_scan("creating full region channel list\n"); + wlan_scan_create_channel_list(priv, pscanchanlist, + *pfilteredscan); + goto out; + } - for (chanidx = 0; - chanidx < WLAN_IOCTL_USER_SCAN_CHAN_MAX - && puserscanin->chanlist[chanidx].channumber; chanidx++) { + for (chanidx = 0; + chanidx < WLAN_IOCTL_USER_SCAN_CHAN_MAX + && puserscanin->chanlist[chanidx].channumber; chanidx++) { - channel = puserscanin->chanlist[chanidx].channumber; - (pscanchanlist + chanidx)->channumber = channel; + channel = puserscanin->chanlist[chanidx].channumber; + (pscanchanlist + chanidx)->channumber = channel; - radiotype = puserscanin->chanlist[chanidx].radiotype; - (pscanchanlist + chanidx)->radiotype = radiotype; + radiotype = puserscanin->chanlist[chanidx].radiotype; + (pscanchanlist + chanidx)->radiotype = radiotype; - scantype = puserscanin->chanlist[chanidx].scantype; + scantype = puserscanin->chanlist[chanidx].scantype; - if (scantype == cmd_scan_type_passive) { - (pscanchanlist + - chanidx)->chanscanmode.passivescan = 1; - } else { - (pscanchanlist + - chanidx)->chanscanmode.passivescan = 0; - } + if (scantype == CMD_SCAN_TYPE_PASSIVE) { + (pscanchanlist + + chanidx)->chanscanmode.passivescan = 1; + } else { + (pscanchanlist + + chanidx)->chanscanmode.passivescan = 0; + } - if (puserscanin->chanlist[chanidx].scantime) { - scandur = - puserscanin->chanlist[chanidx].scantime; + if (puserscanin->chanlist[chanidx].scantime) { + scandur = puserscanin->chanlist[chanidx].scantime; + } else { + if (scantype == CMD_SCAN_TYPE_PASSIVE) { + scandur = MRVDRV_PASSIVE_SCAN_CHAN_TIME; } else { - if (scantype == cmd_scan_type_passive) { - scandur = MRVDRV_PASSIVE_SCAN_CHAN_TIME; - } else { - scandur = MRVDRV_ACTIVE_SCAN_CHAN_TIME; - } + scandur = MRVDRV_ACTIVE_SCAN_CHAN_TIME; } - - (pscanchanlist + chanidx)->minscantime = - cpu_to_le16(scandur); - (pscanchanlist + chanidx)->maxscantime = - cpu_to_le16(scandur); } - /* Check if we are only scanning the current channel */ - if ((chanidx == 1) && (puserscanin->chanlist[0].channumber - == - priv->adapter->curbssparams.channel)) { - *pscancurrentonly = 1; - lbs_deb_scan("Scan: Scanning current channel only"); - } + (pscanchanlist + chanidx)->minscantime = + cpu_to_le16(scandur); + (pscanchanlist + chanidx)->maxscantime = + cpu_to_le16(scandur); + } - } else { - lbs_deb_scan("Scan: Creating full region channel list\n"); - wlan_scan_create_channel_list(priv, pscanchanlist, - *pfilteredscan); + /* Check if we are only scanning the current channel */ + if ((chanidx == 1) && + (puserscanin->chanlist[0].channumber == + priv->adapter->curbssparams.channel)) { + *pscancurrentonly = 1; + lbs_deb_scan("scanning current channel only"); } out: @@ -531,6 +579,8 @@ out: /** * @brief Construct and send multiple scan config commands to the firmware * + * Only used from wlan_scan_networks() + * * Previous routines have created a wlan_scan_cmd_config with any requested * TLVs. This function splits the channel TLV into maxchanperscan lists * and sends the portion of the channel TLV along with the other TLVs @@ -568,12 +618,14 @@ static int wlan_scan_channel_list(wlan_private * priv, int scanned = 0; union iwreq_data wrqu; - lbs_deb_enter(LBS_DEB_ASSOC); + lbs_deb_enter_args(LBS_DEB_SCAN, "maxchanperscan %d, filteredscan %d, " + "full_scan %d", maxchanperscan, filteredscan, full_scan); - if (pscancfgout == 0 || pchantlvout == 0 || pscanchanlist == 0) { - lbs_deb_scan("Scan: Null detect: %p, %p, %p\n", - pscancfgout, pchantlvout, pscanchanlist); - return -1; + if (!pscancfgout || !pchantlvout || !pscanchanlist) { + lbs_deb_scan("pscancfgout, pchantlvout or " + "pscanchanlist is NULL\n"); + ret = -1; + goto out; } pchantlvout->header.type = cpu_to_le16(TLV_TYPE_CHANLIST); @@ -605,12 +657,13 @@ static int wlan_scan_channel_list(wlan_private * priv, while (tlvidx < maxchanperscan && ptmpchan->channumber && !doneearly && scanned < 2) { - lbs_deb_scan( - "Scan: Chan(%3d), Radio(%d), mode(%d,%d), Dur(%d)\n", - ptmpchan->channumber, ptmpchan->radiotype, - ptmpchan->chanscanmode.passivescan, - ptmpchan->chanscanmode.disablechanfilt, - ptmpchan->maxscantime); + lbs_deb_scan("channel %d, radio %d, passive %d, " + "dischanflt %d, maxscantime %d\n", + ptmpchan->channumber, + ptmpchan->radiotype, + ptmpchan->chanscanmode.passivescan, + ptmpchan->chanscanmode.disablechanfilt, + ptmpchan->maxscantime); /* Copy the current channel TLV to the command being prepared */ memcpy(pchantlvout->chanscanparam + tlvidx, @@ -667,7 +720,7 @@ static int wlan_scan_channel_list(wlan_private * priv, } /* Send the scan command to the firmware with the specified cfg */ - ret = libertas_prepare_and_send_command(priv, cmd_802_11_scan, 0, + ret = libertas_prepare_and_send_command(priv, CMD_802_11_SCAN, 0, 0, 0, pscancfgout); if (scanned >= 2 && !full_scan) { ret = 0; @@ -679,24 +732,38 @@ static int wlan_scan_channel_list(wlan_private * priv, done: priv->adapter->last_scanned_channel = ptmpchan->channumber; - /* Tell userspace the scan table has been updated */ - memset(&wrqu, 0, sizeof(union iwreq_data)); - wireless_send_event(priv->dev, SIOCGIWSCAN, &wrqu, NULL); + if (priv->adapter->last_scanned_channel) { + /* Schedule the next part of the partial scan */ + if (!full_scan && !priv->adapter->surpriseremoved) { + cancel_delayed_work(&priv->scan_work); + queue_delayed_work(priv->work_thread, &priv->scan_work, + msecs_to_jiffies(300)); + } + } else { + /* All done, tell userspace the scan table has been updated */ + memset(&wrqu, 0, sizeof(union iwreq_data)); + wireless_send_event(priv->dev, SIOCGIWSCAN, &wrqu, NULL); + } +out: lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); return ret; } -static void -clear_selected_scan_list_entries(wlan_adapter * adapter, - const struct wlan_ioctl_user_scan_cfg * scan_cfg) +/* + * Only used from wlan_scan_networks() +*/ +static void clear_selected_scan_list_entries(wlan_adapter *adapter, + const struct wlan_ioctl_user_scan_cfg *scan_cfg) { - struct bss_descriptor * bss; - struct bss_descriptor * safe; + struct bss_descriptor *bss; + struct bss_descriptor *safe; u32 clear_ssid_flag = 0, clear_bssid_flag = 0; + lbs_deb_enter(LBS_DEB_SCAN); + if (!scan_cfg) - return; + goto out; if (scan_cfg->clear_ssid && scan_cfg->ssid_len) clear_ssid_flag = 1; @@ -708,7 +775,7 @@ clear_selected_scan_list_entries(wlan_adapter * adapter, } if (!clear_ssid_flag && !clear_bssid_flag) - return; + goto out; mutex_lock(&adapter->lock); list_for_each_entry_safe (bss, safe, &adapter->network_list, list) { @@ -731,12 +798,16 @@ clear_selected_scan_list_entries(wlan_adapter * adapter, } } mutex_unlock(&adapter->lock); +out: + lbs_deb_leave(LBS_DEB_SCAN); } /** * @brief Internal function used to start a scan based on an input config * + * Also used from debugfs + * * Use the input user scan configuration information when provided in * order to send the appropriate scan commands to firmware to populate or * update the internal driver scan table @@ -744,12 +815,13 @@ clear_selected_scan_list_entries(wlan_adapter * adapter, * @param priv A pointer to wlan_private structure * @param puserscanin Pointer to the input configuration for the requested * scan. + * @param full_scan ??? * * @return 0 or < 0 if error */ int wlan_scan_networks(wlan_private * priv, - const struct wlan_ioctl_user_scan_cfg * puserscanin, - int full_scan) + const struct wlan_ioctl_user_scan_cfg * puserscanin, + int full_scan) { wlan_adapter * adapter = priv->adapter; struct mrvlietypes_chanlistparamset *pchantlvout; @@ -762,9 +834,16 @@ int wlan_scan_networks(wlan_private * priv, #ifdef CONFIG_LIBERTAS_DEBUG struct bss_descriptor * iter_bss; int i = 0; + DECLARE_MAC_BUF(mac); #endif - lbs_deb_enter(LBS_DEB_ASSOC); + lbs_deb_enter_args(LBS_DEB_SCAN, "full_scan %d", full_scan); + + /* Cancel any partial outstanding partial scans if this scan + * is a full scan. + */ + if (full_scan && delayed_work_pending(&priv->scan_work)) + cancel_delayed_work(&priv->scan_work); scan_chan_list = kzalloc(sizeof(struct chanscanparamset) * WLAN_IOCTL_USER_SCAN_CHAN_MAX, GFP_KERNEL); @@ -791,8 +870,10 @@ int wlan_scan_networks(wlan_private * priv, if (!scancurrentchanonly) { netif_stop_queue(priv->dev); netif_carrier_off(priv->dev); - netif_stop_queue(priv->mesh_dev); - netif_carrier_off(priv->mesh_dev); + if (priv->mesh_dev) { + netif_stop_queue(priv->mesh_dev); + netif_carrier_off(priv->mesh_dev); + } } ret = wlan_scan_channel_list(priv, @@ -807,19 +888,22 @@ int wlan_scan_networks(wlan_private * priv, #ifdef CONFIG_LIBERTAS_DEBUG /* Dump the scan table */ mutex_lock(&adapter->lock); + lbs_deb_scan("The scan table contains:\n"); list_for_each_entry (iter_bss, &adapter->network_list, list) { - lbs_deb_scan("Scan:(%02d) " MAC_FMT ", RSSI[%03d], SSID[%s]\n", - i++, MAC_ARG(iter_bss->bssid), (s32) iter_bss->rssi, + lbs_deb_scan("scan %02d, %s, RSSI, %d, SSID '%s'\n", + i++, print_mac(mac, iter_bss->bssid), (s32) iter_bss->rssi, escape_essid(iter_bss->ssid, iter_bss->ssid_len)); } mutex_unlock(&adapter->lock); #endif - if (priv->adapter->connect_status == libertas_connected) { + if (priv->adapter->connect_status == LIBERTAS_CONNECTED) { netif_carrier_on(priv->dev); netif_wake_queue(priv->dev); - netif_carrier_on(priv->mesh_dev); - netif_wake_queue(priv->mesh_dev); + if (priv->mesh_dev) { + netif_carrier_on(priv->mesh_dev); + netif_wake_queue(priv->mesh_dev); + } } out: @@ -834,58 +918,6 @@ out: } /** - * @brief Inspect the scan response buffer for pointers to expected TLVs - * - * TLVs can be included at the end of the scan response BSS information. - * Parse the data in the buffer for pointers to TLVs that can potentially - * be passed back in the response - * - * @param ptlv Pointer to the start of the TLV buffer to parse - * @param tlvbufsize size of the TLV buffer - * @param ptsftlv Output parameter: Pointer to the TSF TLV if found - * - * @return void - */ -static -void wlan_ret_802_11_scan_get_tlv_ptrs(struct mrvlietypes_data * ptlv, - int tlvbufsize, - struct mrvlietypes_tsftimestamp ** ptsftlv) -{ - struct mrvlietypes_data *pcurrenttlv; - int tlvbufleft; - u16 tlvtype; - u16 tlvlen; - - pcurrenttlv = ptlv; - tlvbufleft = tlvbufsize; - *ptsftlv = NULL; - - lbs_deb_scan("SCAN_RESP: tlvbufsize = %d\n", tlvbufsize); - lbs_dbg_hex("SCAN_RESP: TLV Buf", (u8 *) ptlv, tlvbufsize); - - while (tlvbufleft >= sizeof(struct mrvlietypesheader)) { - tlvtype = le16_to_cpu(pcurrenttlv->header.type); - tlvlen = le16_to_cpu(pcurrenttlv->header.len); - - switch (tlvtype) { - case TLV_TYPE_TSFTIMESTAMP: - *ptsftlv = (struct mrvlietypes_tsftimestamp *) pcurrenttlv; - break; - - default: - lbs_deb_scan("SCAN_RESP: Unhandled TLV = %d\n", - tlvtype); - /* Give up, this seems corrupted */ - return; - } /* switch */ - - tlvbufleft -= (sizeof(ptlv->header) + tlvlen); - pcurrenttlv = - (struct mrvlietypes_data *) (pcurrenttlv->Data + tlvlen); - } /* while */ -} - -/** * @brief Interpret a BSS scan response returned from the firmware * * Parse the various fixed fields and IEs passed back for a a BSS probe @@ -899,67 +931,49 @@ void wlan_ret_802_11_scan_get_tlv_ptrs(struct mrvlietypes_data * ptlv, static int libertas_process_bss(struct bss_descriptor * bss, u8 ** pbeaconinfo, int *bytesleft) { - enum ieeetypes_elementid elemID; struct ieeetypes_fhparamset *pFH; struct ieeetypes_dsparamset *pDS; struct ieeetypes_cfparamset *pCF; struct ieeetypes_ibssparamset *pibss; - struct ieeetypes_capinfo *pcap; - struct WLAN_802_11_FIXED_IEs fixedie; - u8 *pcurrentptr; - u8 *pRate; - u8 elemlen; - u8 bytestocopy; - u8 ratesize; - u16 beaconsize; - u8 founddatarateie; - int bytesleftforcurrentbeacon; - int ret; - - struct IE_WPA *pIe; - const u8 oui01[4] = { 0x00, 0x50, 0xf2, 0x01 }; - + DECLARE_MAC_BUF(mac); struct ieeetypes_countryinfoset *pcountryinfo; + u8 *pos, *end, *p; + u8 n_ex_rates = 0, got_basic_rates = 0, n_basic_rates = 0; + u16 beaconsize = 0; + int ret; - lbs_deb_enter(LBS_DEB_ASSOC); - - founddatarateie = 0; - ratesize = 0; - beaconsize = 0; + lbs_deb_enter(LBS_DEB_SCAN); if (*bytesleft >= sizeof(beaconsize)) { /* Extract & convert beacon size from the command buffer */ - beaconsize = le16_to_cpup((void *)*pbeaconinfo); + beaconsize = le16_to_cpu(get_unaligned((u16 *)*pbeaconinfo)); *bytesleft -= sizeof(beaconsize); *pbeaconinfo += sizeof(beaconsize); } if (beaconsize == 0 || beaconsize > *bytesleft) { - *pbeaconinfo += *bytesleft; *bytesleft = 0; - - return -1; + ret = -1; + goto done; } /* Initialize the current working beacon pointer for this BSS iteration */ - pcurrentptr = *pbeaconinfo; + pos = *pbeaconinfo; + end = pos + beaconsize; /* Advance the return beacon pointer past the current beacon */ *pbeaconinfo += beaconsize; *bytesleft -= beaconsize; - bytesleftforcurrentbeacon = beaconsize; - - memcpy(bss->bssid, pcurrentptr, ETH_ALEN); - lbs_deb_scan("process_bss: AP BSSID " MAC_FMT "\n", MAC_ARG(bss->bssid)); + memcpy(bss->bssid, pos, ETH_ALEN); + lbs_deb_scan("process_bss: AP BSSID %s\n", print_mac(mac, bss->bssid)); + pos += ETH_ALEN; - pcurrentptr += ETH_ALEN; - bytesleftforcurrentbeacon -= ETH_ALEN; - - if (bytesleftforcurrentbeacon < 12) { + if ((end - pos) < 12) { lbs_deb_scan("process_bss: Not enough bytes left\n"); - return -1; + ret = -1; + goto done; } /* @@ -968,85 +982,61 @@ static int libertas_process_bss(struct bss_descriptor * bss, */ /* RSSI is 1 byte long */ - bss->rssi = *pcurrentptr; - lbs_deb_scan("process_bss: RSSI=%02X\n", *pcurrentptr); - pcurrentptr += 1; - bytesleftforcurrentbeacon -= 1; + bss->rssi = *pos; + lbs_deb_scan("process_bss: RSSI=%02X\n", *pos); + pos++; /* time stamp is 8 bytes long */ - fixedie.timestamp = bss->timestamp = le64_to_cpup((void *)pcurrentptr); - pcurrentptr += 8; - bytesleftforcurrentbeacon -= 8; + pos += 8; /* beacon interval is 2 bytes long */ - fixedie.beaconinterval = bss->beaconperiod = le16_to_cpup((void *)pcurrentptr); - pcurrentptr += 2; - bytesleftforcurrentbeacon -= 2; + bss->beaconperiod = le16_to_cpup((void *) pos); + pos += 2; /* capability information is 2 bytes long */ - memcpy(&fixedie.capabilities, pcurrentptr, 2); - lbs_deb_scan("process_bss: fixedie.capabilities=0x%X\n", - fixedie.capabilities); - pcap = (struct ieeetypes_capinfo *) & fixedie.capabilities; - memcpy(&bss->cap, pcap, sizeof(struct ieeetypes_capinfo)); - pcurrentptr += 2; - bytesleftforcurrentbeacon -= 2; - - /* rest of the current buffer are IE's */ - lbs_deb_scan("process_bss: IE length for this AP = %d\n", - bytesleftforcurrentbeacon); + bss->capability = le16_to_cpup((void *) pos); + lbs_deb_scan("process_bss: capabilities = 0x%4X\n", bss->capability); + pos += 2; - lbs_dbg_hex("process_bss: IE info", (u8 *) pcurrentptr, - bytesleftforcurrentbeacon); - - if (pcap->privacy) { + if (bss->capability & WLAN_CAPABILITY_PRIVACY) lbs_deb_scan("process_bss: AP WEP enabled\n"); - bss->privacy = wlan802_11privfilter8021xWEP; - } else { - bss->privacy = wlan802_11privfilteracceptall; - } - - if (pcap->ibss == 1) { + if (bss->capability & WLAN_CAPABILITY_IBSS) bss->mode = IW_MODE_ADHOC; - } else { + else bss->mode = IW_MODE_INFRA; - } + + /* rest of the current buffer are IE's */ + lbs_deb_scan("process_bss: IE length for this AP = %zd\n", end - pos); + lbs_deb_hex(LBS_DEB_SCAN, "process_bss: IE info", pos, end - pos); /* process variable IE */ - while (bytesleftforcurrentbeacon >= 2) { - elemID = (enum ieeetypes_elementid) (*((u8 *) pcurrentptr)); - elemlen = *((u8 *) pcurrentptr + 1); + while (pos <= end - 2) { + struct ieee80211_info_element * elem = + (struct ieee80211_info_element *) pos; - if (bytesleftforcurrentbeacon < elemlen) { + if (pos + elem->len > end) { lbs_deb_scan("process_bss: error in processing IE, " "bytes left < IE length\n"); - bytesleftforcurrentbeacon = 0; - continue; + break; } - switch (elemID) { - case SSID: - bss->ssid_len = elemlen; - memcpy(bss->ssid, (pcurrentptr + 2), elemlen); + switch (elem->id) { + case MFIE_TYPE_SSID: + bss->ssid_len = elem->len; + memcpy(bss->ssid, elem->data, elem->len); lbs_deb_scan("ssid '%s', ssid length %u\n", escape_essid(bss->ssid, bss->ssid_len), bss->ssid_len); break; - case SUPPORTED_RATES: - memcpy(bss->datarates, (pcurrentptr + 2), elemlen); - memmove(bss->libertas_supported_rates, (pcurrentptr + 2), - elemlen); - ratesize = elemlen; - founddatarateie = 1; + case MFIE_TYPE_RATES: + n_basic_rates = min_t(u8, MAX_RATES, elem->len); + memcpy(bss->rates, elem->data, n_basic_rates); + got_basic_rates = 1; break; - case EXTRA_IE: - lbs_deb_scan("process_bss: EXTRA_IE Found!\n"); - break; - - case FH_PARAM_SET: - pFH = (struct ieeetypes_fhparamset *) pcurrentptr; + case MFIE_TYPE_FH_SET: + pFH = (struct ieeetypes_fhparamset *) pos; memmove(&bss->phyparamset.fhparamset, pFH, sizeof(struct ieeetypes_fhparamset)); #if 0 /* I think we can store these LE */ @@ -1055,21 +1045,21 @@ static int libertas_process_bss(struct bss_descriptor * bss, #endif break; - case DS_PARAM_SET: - pDS = (struct ieeetypes_dsparamset *) pcurrentptr; + case MFIE_TYPE_DS_SET: + pDS = (struct ieeetypes_dsparamset *) pos; bss->channel = pDS->currentchan; memcpy(&bss->phyparamset.dsparamset, pDS, sizeof(struct ieeetypes_dsparamset)); break; - case CF_PARAM_SET: - pCF = (struct ieeetypes_cfparamset *) pcurrentptr; + case MFIE_TYPE_CF_SET: + pCF = (struct ieeetypes_cfparamset *) pos; memcpy(&bss->ssparamset.cfparamset, pCF, sizeof(struct ieeetypes_cfparamset)); break; - case IBSS_PARAM_SET: - pibss = (struct ieeetypes_ibssparamset *) pcurrentptr; + case MFIE_TYPE_IBSS_SET: + pibss = (struct ieeetypes_ibssparamset *) pos; bss->atimwindow = le32_to_cpu(pibss->atimwindow); memmove(&bss->ssparamset.ibssparamset, pibss, sizeof(struct ieeetypes_ibssparamset)); @@ -1079,9 +1069,8 @@ static int libertas_process_bss(struct bss_descriptor * bss, #endif break; - /* Handle Country Info IE */ - case COUNTRY_INFO: - pcountryinfo = (struct ieeetypes_countryinfoset *) pcurrentptr; + case MFIE_TYPE_COUNTRY: + pcountryinfo = (struct ieeetypes_countryinfoset *) pos; if (pcountryinfo->len < sizeof(pcountryinfo->countrycode) || pcountryinfo->len > 254) { lbs_deb_scan("process_bss: 11D- Err " @@ -1094,70 +1083,63 @@ static int libertas_process_bss(struct bss_descriptor * bss, memcpy(&bss->countryinfo, pcountryinfo, pcountryinfo->len + 2); - lbs_dbg_hex("process_bss: 11D- CountryInfo:", + lbs_deb_hex(LBS_DEB_SCAN, "process_bss: 11d countryinfo", (u8 *) pcountryinfo, (u32) (pcountryinfo->len + 2)); break; - case EXTENDED_SUPPORTED_RATES: - /* - * only process extended supported rate - * if data rate is already found. - * data rate IE should come before + case MFIE_TYPE_RATES_EX: + /* only process extended supported rate if data rate is + * already found. Data rate IE should come before * extended supported rate IE */ - if (founddatarateie) { - if ((elemlen + ratesize) > WLAN_SUPPORTED_RATES) { - bytestocopy = - (WLAN_SUPPORTED_RATES - ratesize); - } else { - bytestocopy = elemlen; - } - - pRate = (u8 *) bss->datarates; - pRate += ratesize; - memmove(pRate, (pcurrentptr + 2), bytestocopy); - pRate = (u8 *) bss->libertas_supported_rates; - pRate += ratesize; - memmove(pRate, (pcurrentptr + 2), bytestocopy); - } - break; - - case VENDOR_SPECIFIC_221: -#define IE_ID_LEN_FIELDS_BYTES 2 - pIe = (struct IE_WPA *)pcurrentptr; - - if (memcmp(pIe->oui, oui01, sizeof(oui01))) + if (!got_basic_rates) break; - bss->wpa_ie_len = min(elemlen + IE_ID_LEN_FIELDS_BYTES, - MAX_WPA_IE_LEN); - memcpy(bss->wpa_ie, pcurrentptr, bss->wpa_ie_len); - lbs_dbg_hex("process_bss: WPA IE", bss->wpa_ie, elemlen); + n_ex_rates = elem->len; + if (n_basic_rates + n_ex_rates > MAX_RATES) + n_ex_rates = MAX_RATES - n_basic_rates; + + p = bss->rates + n_basic_rates; + memcpy(p, elem->data, n_ex_rates); break; - case WPA2_IE: - pIe = (struct IE_WPA *)pcurrentptr; - bss->rsn_ie_len = min(elemlen + IE_ID_LEN_FIELDS_BYTES, - MAX_WPA_IE_LEN); - memcpy(bss->rsn_ie, pcurrentptr, bss->rsn_ie_len); - lbs_dbg_hex("process_bss: RSN_IE", bss->rsn_ie, elemlen); + + case MFIE_TYPE_GENERIC: + if (elem->len >= 4 && + elem->data[0] == 0x00 && + elem->data[1] == 0x50 && + elem->data[2] == 0xf2 && + elem->data[3] == 0x01) { + bss->wpa_ie_len = min(elem->len + 2, + MAX_WPA_IE_LEN); + memcpy(bss->wpa_ie, elem, bss->wpa_ie_len); + lbs_deb_hex(LBS_DEB_SCAN, "process_bss: WPA IE", bss->wpa_ie, + elem->len); + } else if (elem->len >= MARVELL_MESH_IE_LENGTH && + elem->data[0] == 0x00 && + elem->data[1] == 0x50 && + elem->data[2] == 0x43 && + elem->data[3] == 0x04) { + bss->mesh = 1; + } break; - case TIM: + + case MFIE_TYPE_RSN: + bss->rsn_ie_len = min(elem->len + 2, MAX_WPA_IE_LEN); + memcpy(bss->rsn_ie, elem, bss->rsn_ie_len); + lbs_deb_hex(LBS_DEB_SCAN, "process_bss: RSN_IE", bss->rsn_ie, elem->len); break; - case CHALLENGE_TEXT: + default: break; } - pcurrentptr += elemlen + 2; - - /* need to account for IE ID and IE len */ - bytesleftforcurrentbeacon -= (elemlen + 2); - - } /* while (bytesleftforcurrentbeacon > 2) */ + pos += elem->len + 2; + } /* Timestamp */ bss->last_scanned = jiffies; + libertas_unset_basic_rate_flags(bss->rates, sizeof(bss->rates)); ret = 0; @@ -1167,40 +1149,28 @@ done: } /** - * @brief Compare two SSIDs - * - * @param ssid1 A pointer to ssid to compare - * @param ssid2 A pointer to ssid to compare - * - * @return 0--ssid is same, otherwise is different - */ -int libertas_ssid_cmp(u8 *ssid1, u8 ssid1_len, u8 *ssid2, u8 ssid2_len) -{ - if (ssid1_len != ssid2_len) - return -1; - - return memcmp(ssid1, ssid2, ssid1_len); -} - -/** * @brief This function finds a specific compatible BSSID in the scan list * + * Used in association code + * * @param adapter A pointer to wlan_adapter * @param bssid BSSID to find in the scan list * @param mode Network mode: Infrastructure or IBSS * * @return index in BSSID list, or error return code (< 0) */ -struct bss_descriptor * libertas_find_bssid_in_list(wlan_adapter * adapter, +struct bss_descriptor *libertas_find_bssid_in_list(wlan_adapter * adapter, u8 * bssid, u8 mode) { struct bss_descriptor * iter_bss; struct bss_descriptor * found_bss = NULL; + lbs_deb_enter(LBS_DEB_SCAN); + if (!bssid) - return NULL; + goto out; - lbs_dbg_hex("libertas_find_BSSID_in_list: looking for ", + lbs_deb_hex(LBS_DEB_SCAN, "looking for", bssid, ETH_ALEN); /* Look through the scan table for a compatible match. The loop will @@ -1225,12 +1195,16 @@ struct bss_descriptor * libertas_find_bssid_in_list(wlan_adapter * adapter, } mutex_unlock(&adapter->lock); +out: + lbs_deb_leave_args(LBS_DEB_SCAN, "found_bss %p", found_bss); return found_bss; } /** * @brief This function finds ssid in ssid list. * + * Used in association code + * * @param adapter A pointer to wlan_adapter * @param ssid SSID to find in the list * @param bssid BSSID to qualify the SSID selection (if provided) @@ -1247,6 +1221,8 @@ struct bss_descriptor * libertas_find_ssid_in_list(wlan_adapter * adapter, struct bss_descriptor * found_bss = NULL; struct bss_descriptor * tmp_oldest = NULL; + lbs_deb_enter(LBS_DEB_SCAN); + mutex_lock(&adapter->lock); list_for_each_entry (iter_bss, &adapter->network_list, list) { @@ -1291,6 +1267,7 @@ struct bss_descriptor * libertas_find_ssid_in_list(wlan_adapter * adapter, out: mutex_unlock(&adapter->lock); + lbs_deb_leave_args(LBS_DEB_SCAN, "found_bss %p", found_bss); return found_bss; } @@ -1304,13 +1281,15 @@ out: * * @return index in BSSID list */ -struct bss_descriptor * libertas_find_best_ssid_in_list(wlan_adapter * adapter, +static struct bss_descriptor * libertas_find_best_ssid_in_list(wlan_adapter * adapter, u8 mode) { u8 bestrssi = 0; struct bss_descriptor * iter_bss; struct bss_descriptor * best_bss = NULL; + lbs_deb_enter(LBS_DEB_SCAN); + mutex_lock(&adapter->lock); list_for_each_entry (iter_bss, &adapter->network_list, list) { @@ -1335,12 +1314,15 @@ struct bss_descriptor * libertas_find_best_ssid_in_list(wlan_adapter * adapter, } mutex_unlock(&adapter->lock); + lbs_deb_leave_args(LBS_DEB_SCAN, "best_bss %p", best_bss); return best_bss; } /** * @brief Find the AP with specific ssid in the scan list * + * Used from association worker. + * * @param priv A pointer to wlan_private structure * @param pSSID A pointer to AP's ssid * @@ -1353,11 +1335,11 @@ int libertas_find_best_network_ssid(wlan_private * priv, int ret = -1; struct bss_descriptor * found; - lbs_deb_enter(LBS_DEB_ASSOC); + lbs_deb_enter(LBS_DEB_SCAN); wlan_scan_networks(priv, NULL, 1); if (adapter->surpriseremoved) - return -1; + goto out; wait_event_interruptible(adapter->cmd_pending, !adapter->nr_cmd_pending); @@ -1369,6 +1351,7 @@ int libertas_find_best_network_ssid(wlan_private * priv, ret = 0; } +out: lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); return ret; } @@ -1391,7 +1374,10 @@ int libertas_set_scan(struct net_device *dev, struct iw_request_info *info, lbs_deb_enter(LBS_DEB_SCAN); - wlan_scan_networks(priv, NULL, 0); + if (!delayed_work_pending(&priv->scan_work)) { + queue_delayed_work(priv->work_thread, &priv->scan_work, + msecs_to_jiffies(50)); + } if (adapter->surpriseremoved) return -1; @@ -1400,10 +1386,17 @@ int libertas_set_scan(struct net_device *dev, struct iw_request_info *info, return 0; } + /** * @brief Send a scan command for all available channels filtered on a spec * + * Used in association code and from debugfs + * * @param priv A pointer to wlan_private structure + * @param ssid A pointer to the SSID to scan for + * @param ssid_len Length of the SSID + * @param clear_ssid Should existing scan results with this SSID + * be cleared? * @param prequestedssid A pointer to AP's ssid * @param keeppreviousscan Flag used to save/clear scan table before scan * @@ -1416,7 +1409,8 @@ int libertas_send_specific_ssid_scan(wlan_private * priv, struct wlan_ioctl_user_scan_cfg scancfg; int ret = 0; - lbs_deb_enter(LBS_DEB_ASSOC); + lbs_deb_enter_args(LBS_DEB_SCAN, "SSID '%s', clear %d", + escape_essid(ssid, ssid_len), clear_ssid); if (!ssid_len) goto out; @@ -1427,47 +1421,27 @@ int libertas_send_specific_ssid_scan(wlan_private * priv, scancfg.clear_ssid = clear_ssid; wlan_scan_networks(priv, &scancfg, 1); - if (adapter->surpriseremoved) - return -1; + if (adapter->surpriseremoved) { + ret = -1; + goto out; + } wait_event_interruptible(adapter->cmd_pending, !adapter->nr_cmd_pending); out: - lbs_deb_leave(LBS_DEB_ASSOC); + lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); return ret; } -/** - * @brief scan an AP with specific BSSID - * - * @param priv A pointer to wlan_private structure - * @param bssid A pointer to AP's bssid - * @param keeppreviousscan Flag used to save/clear scan table before scan - * - * @return 0-success, otherwise fail - */ -int libertas_send_specific_bssid_scan(wlan_private * priv, u8 * bssid, u8 clear_bssid) -{ - struct wlan_ioctl_user_scan_cfg scancfg; - - lbs_deb_enter(LBS_DEB_ASSOC); - if (bssid == NULL) - goto out; - memset(&scancfg, 0x00, sizeof(scancfg)); - memcpy(scancfg.bssid, bssid, ETH_ALEN); - scancfg.clear_bssid = clear_bssid; - wlan_scan_networks(priv, &scancfg, 1); - if (priv->adapter->surpriseremoved) - return -1; - wait_event_interruptible(priv->adapter->cmd_pending, - !priv->adapter->nr_cmd_pending); +/*********************************************************************/ +/* */ +/* Support for Wireless Extensions */ +/* */ +/*********************************************************************/ -out: - lbs_deb_leave(LBS_DEB_ASSOC); - return 0; -} +#define MAX_CUSTOM_LEN 64 static inline char *libertas_translate_scan(wlan_private *priv, char *start, char *stop, @@ -1483,10 +1457,13 @@ static inline char *libertas_translate_scan(wlan_private *priv, #define RSSI_DIFF ((u8)(PERFECT_RSSI - WORST_RSSI)) u8 rssi; + lbs_deb_enter(LBS_DEB_SCAN); + cfp = libertas_find_cfp_by_band_and_channel(adapter, 0, bss->channel); if (!cfp) { lbs_deb_scan("Invalid channel number %d\n", bss->channel); - return NULL; + start = NULL; + goto out; } /* First entry *MUST* be the AP BSSID */ @@ -1550,7 +1527,7 @@ static inline char *libertas_translate_scan(wlan_private *priv, /* Add encryption capability */ iwe.cmd = SIOCGIWENCODE; - if (bss->privacy) { + if (bss->capability & WLAN_CAPABILITY_PRIVACY) { iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; } else { iwe.u.data.flags = IW_ENCODE_DISABLED; @@ -1565,12 +1542,9 @@ static inline char *libertas_translate_scan(wlan_private *priv, iwe.u.bitrate.disabled = 0; iwe.u.bitrate.value = 0; - for (j = 0; j < sizeof(bss->libertas_supported_rates); j++) { - u8 rate = bss->libertas_supported_rates[j]; - if (rate == 0) - break; /* no more rates */ - /* Bit rate given in 500 kb/s units (+ 0x80) */ - iwe.u.bitrate.value = (rate & 0x7f) * 500000; + for (j = 0; bss->rates[j] && (j < sizeof(bss->rates)); j++) { + /* Bit rate given in 500 kb/s units */ + iwe.u.bitrate.value = bss->rates[j] * 500000; current_val = iwe_stream_add_value(start, current_val, stop, &iwe, IW_EV_PARAM_LEN); } @@ -1605,11 +1579,25 @@ static inline char *libertas_translate_scan(wlan_private *priv, start = iwe_stream_add_point(start, stop, &iwe, buf); } + if (bss->mesh) { + char custom[MAX_CUSTOM_LEN]; + char *p = custom; + + iwe.cmd = IWEVCUSTOM; + p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), + "mesh-type: olpc"); + iwe.u.data.length = p - custom; + if (iwe.u.data.length) + start = iwe_stream_add_point(start, stop, &iwe, custom); + } + +out: + lbs_deb_leave_args(LBS_DEB_SCAN, "start %p", start); return start; } /** - * @brief Retrieve the scan table entries via wireless tools IOCTL call + * @brief Handle Retrieve scan table ioctl * * @param dev A pointer to net_device structure * @param info A pointer to iw_request_info structure @@ -1630,16 +1618,12 @@ int libertas_get_scan(struct net_device *dev, struct iw_request_info *info, struct bss_descriptor * iter_bss; struct bss_descriptor * safe; - lbs_deb_enter(LBS_DEB_ASSOC); - - /* If we've got an uncompleted scan, schedule the next part */ - if (!adapter->nr_cmd_pending && adapter->last_scanned_channel) - wlan_scan_networks(priv, NULL, 0); + lbs_deb_enter(LBS_DEB_SCAN); /* Update RSSI if current BSS is a locally created ad-hoc BSS */ if ((adapter->mode == IW_MODE_ADHOC) && adapter->adhoccreate) { - libertas_prepare_and_send_command(priv, cmd_802_11_rssi, 0, - cmd_option_waitforrsp, 0, NULL); + libertas_prepare_and_send_command(priv, CMD_802_11_RSSI, 0, + CMD_OPTION_WAITFORRSP, 0, NULL); } mutex_lock(&adapter->lock); @@ -1652,6 +1636,10 @@ int libertas_get_scan(struct net_device *dev, struct iw_request_info *info, break; } + /* For mesh device, list only mesh networks */ + if (dev == priv->mesh_dev && !iter_bss->mesh) + continue; + /* Prune old an old scan result */ stale_time = iter_bss->last_scanned + DEFAULT_MAX_SCAN_AGE; if (time_after(jiffies, stale_time)) { @@ -1672,19 +1660,27 @@ int libertas_get_scan(struct net_device *dev, struct iw_request_info *info, dwrq->length = (ev - extra); dwrq->flags = 0; - lbs_deb_leave(LBS_DEB_ASSOC); + lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", err); return err; } + + + +/*********************************************************************/ +/* */ +/* Command execution */ +/* */ +/*********************************************************************/ + + /** * @brief Prepare a scan command to be sent to the firmware * - * Use the wlan_scan_cmd_config sent to the command processing module in - * the libertas_prepare_and_send_command to configure a cmd_ds_802_11_scan command - * struct to send to firmware. + * Called from libertas_prepare_and_send_command() in cmd.c * - * The fixed fields specifying the BSS type and BSSID filters as well as a - * variable number/length of TLVs are sent in the command to firmware. + * Sends a fixed lenght data part (specifying the BSS type and BSSID filters) + * as well as a variable number/length of TLVs to the firmware. * * @param priv A pointer to wlan_private structure * @param cmd A pointer to cmd_ds_command structure to be sent to @@ -1693,36 +1689,31 @@ int libertas_get_scan(struct net_device *dev, struct iw_request_info *info, * to set the fields/TLVs for the command sent to firmware * * @return 0 or -1 - * - * @sa wlan_scan_create_channel_list */ int libertas_cmd_80211_scan(wlan_private * priv, struct cmd_ds_command *cmd, void *pdata_buf) { struct cmd_ds_802_11_scan *pscan = &cmd->params.scan; - struct wlan_scan_cmd_config *pscancfg; + struct wlan_scan_cmd_config *pscancfg = pdata_buf; - lbs_deb_enter(LBS_DEB_ASSOC); - - pscancfg = pdata_buf; + lbs_deb_enter(LBS_DEB_SCAN); /* Set fixed field variables in scan command */ pscan->bsstype = pscancfg->bsstype; - memcpy(pscan->BSSID, pscancfg->bssid, sizeof(pscan->BSSID)); + memcpy(pscan->bssid, pscancfg->bssid, ETH_ALEN); memcpy(pscan->tlvbuffer, pscancfg->tlvbuffer, pscancfg->tlvbufferlen); - cmd->command = cpu_to_le16(cmd_802_11_scan); + cmd->command = cpu_to_le16(CMD_802_11_SCAN); /* size is equal to the sizeof(fixed portions) + the TLV len + header */ - cmd->size = cpu_to_le16(sizeof(pscan->bsstype) - + sizeof(pscan->BSSID) - + pscancfg->tlvbufferlen + S_DS_GEN); + cmd->size = cpu_to_le16(sizeof(pscan->bsstype) + ETH_ALEN + + pscancfg->tlvbufferlen + S_DS_GEN); - lbs_deb_scan("SCAN_CMD: command=%x, size=%x, seqnum=%x\n", + lbs_deb_scan("SCAN_CMD: command 0x%04x, size %d, seqnum %d\n", le16_to_cpu(cmd->command), le16_to_cpu(cmd->size), le16_to_cpu(cmd->seqnum)); - lbs_deb_leave(LBS_DEB_ASSOC); + lbs_deb_leave(LBS_DEB_SCAN); return 0; } @@ -1741,6 +1732,8 @@ static inline int is_same_network(struct bss_descriptor *src, /** * @brief This function handles the command response of scan * + * Called from handle_cmd_response() in cmdrespc. + * * The response buffer for the scan command has the following * memory layout: * @@ -1766,8 +1759,6 @@ int libertas_ret_80211_scan(wlan_private * priv, struct cmd_ds_command *resp) { wlan_adapter *adapter = priv->adapter; struct cmd_ds_802_11_scan_rsp *pscan; - struct mrvlietypes_data *ptlv; - struct mrvlietypes_tsftimestamp *ptsftlv; struct bss_descriptor * iter_bss; struct bss_descriptor * safe; u8 *pbssinfo; @@ -1777,7 +1768,7 @@ int libertas_ret_80211_scan(wlan_private * priv, struct cmd_ds_command *resp) int tlvbufsize; int ret; - lbs_deb_enter(LBS_DEB_ASSOC); + lbs_deb_enter(LBS_DEB_SCAN); /* Prune old entries from scan table */ list_for_each_entry_safe (iter_bss, safe, &adapter->network_list, list) { @@ -1798,10 +1789,10 @@ int libertas_ret_80211_scan(wlan_private * priv, struct cmd_ds_command *resp) goto done; } - bytesleft = le16_to_cpu(pscan->bssdescriptsize); + bytesleft = le16_to_cpu(get_unaligned((u16*)&pscan->bssdescriptsize)); lbs_deb_scan("SCAN_RESP: bssdescriptsize %d\n", bytesleft); - scanrespsize = le16_to_cpu(resp->size); + scanrespsize = le16_to_cpu(get_unaligned((u16*)&resp->size)); lbs_deb_scan("SCAN_RESP: returned %d AP before parsing\n", pscan->nr_sets); @@ -1816,11 +1807,6 @@ int libertas_ret_80211_scan(wlan_private * priv, struct cmd_ds_command *resp) + sizeof(pscan->nr_sets) + S_DS_GEN); - ptlv = (struct mrvlietypes_data *) (pscan->bssdesc_and_tlvbuffer + bytesleft); - - /* Search the TLV buffer space in the scan response for any valid TLVs */ - wlan_ret_802_11_scan_get_tlv_ptrs(ptlv, tlvbufsize, &ptsftlv); - /* * Process each scan response returned (pscan->nr_sets). Save * the information in the newbssentry and then insert into the @@ -1831,6 +1817,7 @@ int libertas_ret_80211_scan(wlan_private * priv, struct cmd_ds_command *resp) struct bss_descriptor new; struct bss_descriptor * found = NULL; struct bss_descriptor * oldest = NULL; + DECLARE_MAC_BUF(mac); /* Process the data fields and IEs returned for this BSS */ memset(&new, 0, sizeof (struct bss_descriptor)); @@ -1869,19 +1856,8 @@ int libertas_ret_80211_scan(wlan_private * priv, struct cmd_ds_command *resp) continue; } - lbs_deb_scan("SCAN_RESP: BSSID = " MAC_FMT "\n", - new.bssid[0], new.bssid[1], new.bssid[2], - new.bssid[3], new.bssid[4], new.bssid[5]); - - /* - * If the TSF TLV was appended to the scan results, save the - * this entries TSF value in the networktsf field. The - * networktsf is the firmware's TSF value at the time the - * beacon or probe response was received. - */ - if (ptsftlv) { - new.networktsf = le64_to_cpup(&ptsftlv->tsftable[idx]); - } + lbs_deb_scan("SCAN_RESP: BSSID = %s\n", + print_mac(mac, new.bssid)); /* Copy the locally created newbssentry to the scan table */ memcpy(found, &new, offsetof(struct bss_descriptor, list)); diff --git a/drivers/net/wireless/libertas/scan.h b/drivers/net/wireless/libertas/scan.h index bd019e5ff1eb..c29c031bef8c 100644 --- a/drivers/net/wireless/libertas/scan.h +++ b/drivers/net/wireless/libertas/scan.h @@ -140,8 +140,7 @@ struct bss_descriptor { u8 ssid[IW_ESSID_MAX_SIZE + 1]; u8 ssid_len; - /* WEP encryption requirement */ - u32 privacy; + u16 capability; /* receive signal strength in dBm */ long rssi; @@ -152,18 +151,16 @@ struct bss_descriptor { u32 atimwindow; + /* IW_MODE_AUTO, IW_MODE_ADHOC, IW_MODE_INFRA */ u8 mode; - u8 libertas_supported_rates[WLAN_SUPPORTED_RATES]; - __le64 timestamp; //!< TSF value included in the beacon/probe response + /* zero-terminated array of supported data rates */ + u8 rates[MAX_RATES + 1]; + unsigned long last_scanned; union ieeetypes_phyparamset phyparamset; union IEEEtypes_ssparamset ssparamset; - struct ieeetypes_capinfo cap; - u8 datarates[WLAN_SUPPORTED_RATES]; - - u64 networktsf; //!< TSF timestamp from the current firmware TSF struct ieeetypes_countryinfofullset countryinfo; @@ -172,34 +169,31 @@ struct bss_descriptor { u8 rsn_ie[MAX_WPA_IE_LEN]; size_t rsn_ie_len; + u8 mesh; + struct list_head list; }; -extern int libertas_ssid_cmp(u8 *ssid1, u8 ssid1_len, u8 *ssid2, u8 ssid2_len); +int libertas_ssid_cmp(u8 *ssid1, u8 ssid1_len, u8 *ssid2, u8 ssid2_len); struct bss_descriptor * libertas_find_ssid_in_list(wlan_adapter * adapter, u8 *ssid, u8 ssid_len, u8 * bssid, u8 mode, int channel); -struct bss_descriptor * libertas_find_best_ssid_in_list(wlan_adapter * adapter, - u8 mode); - -extern struct bss_descriptor * libertas_find_bssid_in_list(wlan_adapter * adapter, +struct bss_descriptor * libertas_find_bssid_in_list(wlan_adapter * adapter, u8 * bssid, u8 mode); int libertas_find_best_network_ssid(wlan_private * priv, u8 *out_ssid, u8 *out_ssid_len, u8 preferred_mode, u8 *out_mode); -extern int libertas_send_specific_ssid_scan(wlan_private * priv, u8 *ssid, +int libertas_send_specific_ssid_scan(wlan_private * priv, u8 *ssid, u8 ssid_len, u8 clear_ssid); -extern int libertas_send_specific_bssid_scan(wlan_private * priv, - u8 * bssid, u8 clear_bssid); -extern int libertas_cmd_80211_scan(wlan_private * priv, +int libertas_cmd_80211_scan(wlan_private * priv, struct cmd_ds_command *cmd, void *pdata_buf); -extern int libertas_ret_80211_scan(wlan_private * priv, +int libertas_ret_80211_scan(wlan_private * priv, struct cmd_ds_command *resp); int wlan_scan_networks(wlan_private * priv, @@ -211,9 +205,11 @@ struct ifreq; struct iw_point; struct iw_param; struct iw_request_info; -extern int libertas_get_scan(struct net_device *dev, struct iw_request_info *info, +int libertas_get_scan(struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra); -extern int libertas_set_scan(struct net_device *dev, struct iw_request_info *info, +int libertas_set_scan(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra); +void libertas_scan_worker(struct work_struct *work); + #endif /* _WLAN_SCAN_H */ diff --git a/drivers/net/wireless/libertas/thread.h b/drivers/net/wireless/libertas/thread.h deleted file mode 100644 index b1f34d92ff3e..000000000000 --- a/drivers/net/wireless/libertas/thread.h +++ /dev/null @@ -1,52 +0,0 @@ -#ifndef __WLAN_THREAD_H_ -#define __WLAN_THREAD_H_ - -#include <linux/kthread.h> - -struct wlan_thread { - struct task_struct *task; - wait_queue_head_t waitq; - pid_t pid; - void *priv; -}; - -static inline void wlan_activate_thread(struct wlan_thread * thr) -{ - /** Record the thread pid */ - thr->pid = current->pid; - - /** Initialize the wait queue */ - init_waitqueue_head(&thr->waitq); -} - -static inline void wlan_deactivate_thread(struct wlan_thread * thr) -{ - lbs_deb_enter(LBS_DEB_THREAD); - - thr->pid = 0; - - lbs_deb_leave(LBS_DEB_THREAD); -} - -static inline void wlan_create_thread(int (*wlanfunc) (void *), - struct wlan_thread * thr, char *name) -{ - thr->task = kthread_run(wlanfunc, thr, "%s", name); -} - -static inline int wlan_terminate_thread(struct wlan_thread * thr) -{ - lbs_deb_enter(LBS_DEB_THREAD); - - /* Check if the thread is active or not */ - if (!thr->pid) { - printk(KERN_ERR "Thread does not exist\n"); - return -1; - } - kthread_stop(thr->task); - - lbs_deb_leave(LBS_DEB_THREAD); - return 0; -} - -#endif diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c index 17c437635a00..fbec06c10dd7 100644 --- a/drivers/net/wireless/libertas/tx.c +++ b/drivers/net/wireless/libertas/tx.c @@ -58,7 +58,6 @@ static u32 convert_radiotap_rate_to_mv(u8 rate) */ static int SendSinglePacket(wlan_private * priv, struct sk_buff *skb) { - wlan_adapter *adapter = priv->adapter; int ret = 0; struct txpd localtxpd; struct txpd *plocaltxpd = &localtxpd; @@ -72,10 +71,6 @@ static int SendSinglePacket(wlan_private * priv, struct sk_buff *skb) if (priv->adapter->surpriseremoved) return -1; - if ((priv->adapter->debugmode & MRVDRV_DEBUG_TX_PATH) != 0) - lbs_dbg_hex("TX packet: ", skb->data, - min_t(unsigned int, skb->len, 100)); - if (!skb->len || (skb->len > MRVDRV_ETH_TX_PACKET_BUFFER_SIZE)) { lbs_deb_tx("tx err: skb length %d 0 or > %zd\n", skb->len, MRVDRV_ETH_TX_PACKET_BUFFER_SIZE); @@ -90,11 +85,8 @@ static int SendSinglePacket(wlan_private * priv, struct sk_buff *skb) /* offset of actual data */ plocaltxpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd)); - /* TxCtrl set by user or default */ - plocaltxpd->tx_control = cpu_to_le32(adapter->pkttxctrl); - p802x_hdr = skb->data; - if (priv->adapter->radiomode == WLAN_RADIOMODE_RADIOTAP) { + if (priv->adapter->monitormode != WLAN_MONITOR_OFF) { /* locate radiotap header */ pradiotap_hdr = (struct tx_radiotap_hdr *)skb->data; @@ -103,7 +95,6 @@ static int SendSinglePacket(wlan_private * priv, struct sk_buff *skb) new_rate = convert_radiotap_rate_to_mv(pradiotap_hdr->rate); if (new_rate != 0) { /* use new tx_control[4:0] */ - new_rate |= (adapter->pkttxctrl & ~0x1f); plocaltxpd->tx_control = cpu_to_le32(new_rate); } @@ -115,12 +106,12 @@ static int SendSinglePacket(wlan_private * priv, struct sk_buff *skb) } /* copy destination address from 802.3 or 802.11 header */ - if (priv->adapter->linkmode == WLAN_LINKMODE_802_11) + if (priv->adapter->monitormode != WLAN_MONITOR_OFF) memcpy(plocaltxpd->tx_dest_addr_high, p802x_hdr + 4, ETH_ALEN); else memcpy(plocaltxpd->tx_dest_addr_high, p802x_hdr, ETH_ALEN); - lbs_dbg_hex("txpd", (u8 *) plocaltxpd, sizeof(struct txpd)); + lbs_deb_hex(LBS_DEB_TX, "txpd", (u8 *) plocaltxpd, sizeof(struct txpd)); if (IS_MESH_FRAME(skb)) { plocaltxpd->tx_control |= cpu_to_le32(TxPD_MESH_FRAME); @@ -130,7 +121,7 @@ static int SendSinglePacket(wlan_private * priv, struct sk_buff *skb) ptr += sizeof(struct txpd); - lbs_dbg_hex("Tx Data", (u8 *) p802x_hdr, le16_to_cpu(plocaltxpd->tx_packet_length)); + lbs_deb_hex(LBS_DEB_TX, "Tx Data", (u8 *) p802x_hdr, le16_to_cpu(plocaltxpd->tx_packet_length)); memcpy(ptr, p802x_hdr, le16_to_cpu(plocaltxpd->tx_packet_length)); ret = priv->hw_host_to_card(priv, MVMS_DAT, priv->adapter->tmptxbuf, @@ -153,13 +144,14 @@ done: priv->stats.tx_errors++; } - if (!ret && priv->adapter->radiomode == WLAN_RADIOMODE_RADIOTAP) { + if (!ret && priv->adapter->monitormode != WLAN_MONITOR_OFF) { /* Keep the skb to echo it back once Tx feedback is received from FW */ skb_orphan(skb); /* stop processing outgoing pkts */ netif_stop_queue(priv->dev); - netif_stop_queue(priv->mesh_dev); + if (priv->mesh_dev) + netif_stop_queue(priv->mesh_dev); /* freeze any packets already in our queues */ priv->adapter->TxLockFlag = 1; } else { @@ -198,10 +190,12 @@ static void wlan_tx_queue(wlan_private *priv, struct sk_buff *skb) adapter->tx_queue_ps[adapter->tx_queue_idx++] = skb; if (adapter->tx_queue_idx == NR_TX_QUEUE) { netif_stop_queue(priv->dev); - netif_stop_queue(priv->mesh_dev); + if (priv->mesh_dev) + netif_stop_queue(priv->mesh_dev); } else { netif_start_queue(priv->dev); - netif_start_queue(priv->mesh_dev); + if (priv->mesh_dev) + netif_start_queue(priv->mesh_dev); } spin_unlock(&adapter->txqueue_lock); @@ -219,7 +213,7 @@ int libertas_process_tx(wlan_private * priv, struct sk_buff *skb) int ret = -1; lbs_deb_enter(LBS_DEB_TX); - lbs_dbg_hex("TX Data", skb->data, min_t(unsigned int, skb->len, 100)); + lbs_deb_hex(LBS_DEB_TX, "TX Data", skb->data, min_t(unsigned int, skb->len, 100)); if (priv->dnld_sent) { lbs_pr_alert( "TX error: dnld_sent = %d, not sending\n", @@ -258,16 +252,12 @@ void libertas_send_tx_feedback(wlan_private * priv) int txfail; int try_count; - if (adapter->radiomode != WLAN_RADIOMODE_RADIOTAP || + if (adapter->monitormode == WLAN_MONITOR_OFF || adapter->currenttxskb == NULL) return; radiotap_hdr = (struct tx_radiotap_hdr *)adapter->currenttxskb->data; - if ((adapter->debugmode & MRVDRV_DEBUG_TX_PATH) != 0) - lbs_dbg_hex("TX feedback: ", (u8 *) radiotap_hdr, - min_t(unsigned int, adapter->currenttxskb->len, 100)); - txfail = (status >> 24); #if 0 @@ -283,9 +273,10 @@ void libertas_send_tx_feedback(wlan_private * priv) libertas_upload_rx_packet(priv, adapter->currenttxskb); adapter->currenttxskb = NULL; priv->adapter->TxLockFlag = 0; - if (priv->adapter->connect_status == libertas_connected) { + if (priv->adapter->connect_status == LIBERTAS_CONNECTED) { netif_wake_queue(priv->dev); - netif_wake_queue(priv->mesh_dev); + if (priv->mesh_dev) + netif_wake_queue(priv->mesh_dev); } } EXPORT_SYMBOL_GPL(libertas_send_tx_feedback); diff --git a/drivers/net/wireless/libertas/types.h b/drivers/net/wireless/libertas/types.h index 028e2f3b53d6..a43a5f63c879 100644 --- a/drivers/net/wireless/libertas/types.h +++ b/drivers/net/wireless/libertas/types.h @@ -7,71 +7,6 @@ #include <linux/if_ether.h> #include <asm/byteorder.h> -/** IEEE type definitions */ -enum ieeetypes_elementid { - SSID = 0, - SUPPORTED_RATES, - FH_PARAM_SET, - DS_PARAM_SET, - CF_PARAM_SET, - TIM, - IBSS_PARAM_SET, - COUNTRY_INFO = 7, - - CHALLENGE_TEXT = 16, - - EXTENDED_SUPPORTED_RATES = 50, - - VENDOR_SPECIFIC_221 = 221, - - WPA_IE = 221, - WPA2_IE = 48, - - EXTRA_IE = 133, -} __attribute__ ((packed)); - -#ifdef __BIG_ENDIAN -#define CAPINFO_MASK (~(0xda00)) -#else -#define CAPINFO_MASK (~(0x00da)) -#endif - -struct ieeetypes_capinfo { -#ifdef __BIG_ENDIAN_BITFIELD - u8 chanagility:1; - u8 pbcc:1; - u8 shortpreamble:1; - u8 privacy:1; - u8 cfpollrqst:1; - u8 cfpollable:1; - u8 ibss:1; - u8 ess:1; - u8 rsrvd1:2; - u8 dsssofdm:1; - u8 rsvrd2:1; - u8 apsd:1; - u8 shortslottime:1; - u8 rsrvd3:1; - u8 spectrummgmt:1; -#else - u8 ess:1; - u8 ibss:1; - u8 cfpollable:1; - u8 cfpollrqst:1; - u8 privacy:1; - u8 shortpreamble:1; - u8 pbcc:1; - u8 chanagility:1; - u8 spectrummgmt:1; - u8 rsrvd3:1; - u8 shortslottime:1; - u8 apsd:1; - u8 rsvrd2:1; - u8 dsssofdm:1; - u8 rsrvd1:2; -#endif -} __attribute__ ((packed)); - struct ieeetypes_cfparamset { u8 elementid; u8 len; @@ -114,7 +49,7 @@ union ieeetypes_phyparamset { } __attribute__ ((packed)); struct ieeetypes_assocrsp { - struct ieeetypes_capinfo capability; + __le16 capability; __le16 statuscode; __le16 aid; u8 iebuffer[1]; diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c index 2fcc3bf21081..c6f5aa3cb465 100644 --- a/drivers/net/wireless/libertas/wext.c +++ b/drivers/net/wireless/libertas/wext.c @@ -22,60 +22,6 @@ /** - * the rates supported by the card - */ -static u8 libertas_wlan_data_rates[WLAN_SUPPORTED_RATES] = - { 0x02, 0x04, 0x0B, 0x16, 0x00, 0x0C, 0x12, - 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C, 0x00 -}; - -/** - * @brief Convert mw value to dbm value - * - * @param mw the value of mw - * @return the value of dbm - */ -static int mw_to_dbm(int mw) -{ - if (mw < 2) - return 0; - else if (mw < 3) - return 3; - else if (mw < 4) - return 5; - else if (mw < 6) - return 7; - else if (mw < 7) - return 8; - else if (mw < 8) - return 9; - else if (mw < 10) - return 10; - else if (mw < 13) - return 11; - else if (mw < 16) - return 12; - else if (mw < 20) - return 13; - else if (mw < 25) - return 14; - else if (mw < 32) - return 15; - else if (mw < 40) - return 16; - else if (mw < 50) - return 17; - else if (mw < 63) - return 18; - else if (mw < 79) - return 19; - else if (mw < 100) - return 20; - else - return 21; -} - -/** * @brief Find the channel frequency power info with specific channel * * @param adapter A pointer to wlan_adapter structure @@ -165,7 +111,7 @@ static struct chan_freq_power *find_cfp_by_band_and_freq(wlan_adapter * adapter, * @option Radio Option * @return 0 --success, otherwise fail */ -int wlan_radio_ioctl(wlan_private * priv, u8 option) +static int wlan_radio_ioctl(wlan_private * priv, u8 option) { int ret = 0; wlan_adapter *adapter = priv->adapter; @@ -177,9 +123,9 @@ int wlan_radio_ioctl(wlan_private * priv, u8 option) adapter->radioon = option; ret = libertas_prepare_and_send_command(priv, - cmd_802_11_radio_control, - cmd_act_set, - cmd_option_waitforrsp, 0, NULL); + CMD_802_11_RADIO_CONTROL, + CMD_ACT_SET, + CMD_OPTION_WAITFORRSP, 0, NULL); } lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); @@ -187,84 +133,31 @@ int wlan_radio_ioctl(wlan_private * priv, u8 option) } /** - * @brief Copy rates - * - * @param dest A pointer to Dest Buf - * @param src A pointer to Src Buf - * @param len The len of Src Buf - * @return Number of rates copyed - */ -static inline int copyrates(u8 * dest, int pos, u8 * src, int len) -{ - int i; - - for (i = 0; i < len && src[i]; i++, pos++) { - if (pos >= sizeof(u8) * WLAN_SUPPORTED_RATES) - break; - dest[pos] = src[i]; - } - - return pos; -} - -/** - * @brief Get active data rates + * @brief Copy active data rates based on adapter mode and status * * @param adapter A pointer to wlan_adapter structure * @param rate The buf to return the active rates - * @return The number of rates */ -static int get_active_data_rates(wlan_adapter * adapter, - u8* rates) +static void copy_active_data_rates(wlan_adapter * adapter, u8 * rates) { - int k = 0; - lbs_deb_enter(LBS_DEB_WEXT); - if (adapter->connect_status != libertas_connected) { - if (adapter->mode == IW_MODE_INFRA) { - lbs_deb_wext("infra\n"); - k = copyrates(rates, k, libertas_supported_rates, - sizeof(libertas_supported_rates)); - } else { - lbs_deb_wext("Adhoc G\n"); - k = copyrates(rates, k, libertas_adhoc_rates_g, - sizeof(libertas_adhoc_rates_g)); - } - } else { - k = copyrates(rates, 0, adapter->curbssparams.datarates, - adapter->curbssparams.numofrates); - } + if (adapter->connect_status != LIBERTAS_CONNECTED) + memcpy(rates, libertas_bg_rates, MAX_RATES); + else + memcpy(rates, adapter->curbssparams.rates, MAX_RATES); - lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", k); - return k; + lbs_deb_leave(LBS_DEB_WEXT); } static int wlan_get_name(struct net_device *dev, struct iw_request_info *info, char *cwrq, char *extra) { - const char *cp; - char comm[6] = { "COMM-" }; - char mrvl[6] = { "MRVL-" }; - int cnt; lbs_deb_enter(LBS_DEB_WEXT); - strcpy(cwrq, mrvl); - - cp = strstr(libertas_driver_version, comm); - if (cp == libertas_driver_version) //skip leading "COMM-" - cp = libertas_driver_version + strlen(comm); - else - cp = libertas_driver_version; - - cnt = strlen(mrvl); - cwrq += cnt; - while (cnt < 16 && (*cp != '-')) { - *cwrq++ = toupper(*cp++); - cnt++; - } - *cwrq = '\0'; + /* We could add support for 802.11n here as needed. Jean II */ + snprintf(cwrq, IFNAMSIZ, "IEEE 802.11b/g"); lbs_deb_leave(LBS_DEB_WEXT); return 0; @@ -305,7 +198,7 @@ static int wlan_get_wap(struct net_device *dev, struct iw_request_info *info, lbs_deb_enter(LBS_DEB_WEXT); - if (adapter->connect_status == libertas_connected) { + if (adapter->connect_status == LIBERTAS_CONNECTED) { memcpy(awrq->sa_data, adapter->curbssparams.bssid, ETH_ALEN); } else { memset(awrq->sa_data, 0, ETH_ALEN); @@ -349,24 +242,11 @@ static int wlan_get_nick(struct net_device *dev, struct iw_request_info *info, lbs_deb_enter(LBS_DEB_WEXT); - /* - * Get the Nick Name saved - */ - - mutex_lock(&adapter->lock); - strncpy(extra, adapter->nodename, 16); - mutex_unlock(&adapter->lock); - - extra[16] = '\0'; + dwrq->length = strlen(adapter->nodename); + memcpy(extra, adapter->nodename, dwrq->length); + extra[dwrq->length] = '\0'; - /* - * If none, we may want to get the one that was set - */ - - /* - * Push it out ! - */ - dwrq->length = strlen(extra) + 1; + dwrq->flags = 1; /* active */ lbs_deb_leave(LBS_DEB_WEXT); return 0; @@ -382,20 +262,21 @@ static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info, /* Use nickname to indicate that mesh is on */ - if (adapter->connect_status == libertas_connected) { + if (adapter->connect_status == LIBERTAS_CONNECTED) { strncpy(extra, "Mesh", 12); extra[12] = '\0'; - dwrq->length = strlen(extra) + 1; + dwrq->length = strlen(extra); } else { extra[0] = '\0'; - dwrq->length = 1 ; + dwrq->length = 0; } lbs_deb_leave(LBS_DEB_WEXT); return 0; } + static int wlan_set_rts(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { @@ -414,8 +295,8 @@ static int wlan_set_rts(struct net_device *dev, struct iw_request_info *info, adapter->rtsthsd = rthr; } - ret = libertas_prepare_and_send_command(priv, cmd_802_11_snmp_mib, - cmd_act_set, cmd_option_waitforrsp, + ret = libertas_prepare_and_send_command(priv, CMD_802_11_SNMP_MIB, + CMD_ACT_SET, CMD_OPTION_WAITFORRSP, OID_802_11_RTS_THRESHOLD, &rthr); lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); @@ -432,8 +313,8 @@ static int wlan_get_rts(struct net_device *dev, struct iw_request_info *info, lbs_deb_enter(LBS_DEB_WEXT); adapter->rtsthsd = 0; - ret = libertas_prepare_and_send_command(priv, cmd_802_11_snmp_mib, - cmd_act_get, cmd_option_waitforrsp, + ret = libertas_prepare_and_send_command(priv, CMD_802_11_SNMP_MIB, + CMD_ACT_GET, CMD_OPTION_WAITFORRSP, OID_802_11_RTS_THRESHOLD, NULL); if (ret) goto out; @@ -467,8 +348,8 @@ static int wlan_set_frag(struct net_device *dev, struct iw_request_info *info, adapter->fragthsd = fthr; } - ret = libertas_prepare_and_send_command(priv, cmd_802_11_snmp_mib, - cmd_act_set, cmd_option_waitforrsp, + ret = libertas_prepare_and_send_command(priv, CMD_802_11_SNMP_MIB, + CMD_ACT_SET, CMD_OPTION_WAITFORRSP, OID_802_11_FRAGMENTATION_THRESHOLD, &fthr); lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); @@ -486,8 +367,8 @@ static int wlan_get_frag(struct net_device *dev, struct iw_request_info *info, adapter->fragthsd = 0; ret = libertas_prepare_and_send_command(priv, - cmd_802_11_snmp_mib, - cmd_act_get, cmd_option_waitforrsp, + CMD_802_11_SNMP_MIB, + CMD_ACT_GET, CMD_OPTION_WAITFORRSP, OID_802_11_FRAGMENTATION_THRESHOLD, NULL); if (ret) goto out; @@ -539,9 +420,9 @@ static int wlan_get_txpow(struct net_device *dev, lbs_deb_enter(LBS_DEB_WEXT); ret = libertas_prepare_and_send_command(priv, - cmd_802_11_rf_tx_power, - cmd_act_tx_power_opt_get, - cmd_option_waitforrsp, 0, NULL); + CMD_802_11_RF_TX_POWER, + CMD_ACT_TX_POWER_OPT_GET, + CMD_OPTION_WAITFORRSP, 0, NULL); if (ret) goto out; @@ -581,9 +462,9 @@ static int wlan_set_retry(struct net_device *dev, struct iw_request_info *info, /* Adding 1 to convert retry count to try count */ adapter->txretrycount = vwrq->value + 1; - ret = libertas_prepare_and_send_command(priv, cmd_802_11_snmp_mib, - cmd_act_set, - cmd_option_waitforrsp, + ret = libertas_prepare_and_send_command(priv, CMD_802_11_SNMP_MIB, + CMD_ACT_SET, + CMD_OPTION_WAITFORRSP, OID_802_11_TX_RETRYCOUNT, NULL); if (ret) @@ -608,8 +489,8 @@ static int wlan_get_retry(struct net_device *dev, struct iw_request_info *info, adapter->txretrycount = 0; ret = libertas_prepare_and_send_command(priv, - cmd_802_11_snmp_mib, - cmd_act_get, cmd_option_waitforrsp, + CMD_802_11_SNMP_MIB, + CMD_ACT_GET, CMD_OPTION_WAITFORRSP, OID_802_11_TX_RETRYCOUNT, NULL); if (ret) goto out; @@ -673,7 +554,7 @@ static int wlan_get_range(struct net_device *dev, struct iw_request_info *info, wlan_adapter *adapter = priv->adapter; struct iw_range *range = (struct iw_range *)extra; struct chan_freq_power *cfp; - u8 rates[WLAN_SUPPORTED_RATES]; + u8 rates[MAX_RATES + 1]; u8 flag = 0; @@ -686,19 +567,17 @@ static int wlan_get_range(struct net_device *dev, struct iw_request_info *info, range->max_nwid = 0; memset(rates, 0, sizeof(rates)); - range->num_bitrates = get_active_data_rates(adapter, rates); - - for (i = 0; i < min_t(__u8, range->num_bitrates, IW_MAX_BITRATES) && rates[i]; - i++) { - range->bitrate[i] = (rates[i] & 0x7f) * 500000; - } + copy_active_data_rates(adapter, rates); + range->num_bitrates = strnlen(rates, IW_MAX_BITRATES); + for (i = 0; i < range->num_bitrates; i++) + range->bitrate[i] = rates[i] * 500000; range->num_bitrates = i; lbs_deb_wext("IW_MAX_BITRATES %d, num_bitrates %d\n", IW_MAX_BITRATES, range->num_bitrates); range->num_frequency = 0; if (priv->adapter->enable11d && - adapter->connect_status == libertas_connected) { + adapter->connect_status == LIBERTAS_CONNECTED) { u8 chan_no; u8 band; @@ -858,9 +737,9 @@ static int wlan_set_power(struct net_device *dev, struct iw_request_info *info, */ if (vwrq->disabled) { - adapter->psmode = wlan802_11powermodecam; + adapter->psmode = WLAN802_11POWERMODECAM; if (adapter->psstate != PS_STATE_FULL_POWER) { - libertas_ps_wakeup(priv, cmd_option_waitforrsp); + libertas_ps_wakeup(priv, CMD_OPTION_WAITFORRSP); } return 0; @@ -875,14 +754,14 @@ static int wlan_set_power(struct net_device *dev, struct iw_request_info *info, return -EINVAL; } - if (adapter->psmode != wlan802_11powermodecam) { + if (adapter->psmode != WLAN802_11POWERMODECAM) { return 0; } - adapter->psmode = wlan802_11powermodemax_psp; + adapter->psmode = WLAN802_11POWERMODEMAX_PSP; - if (adapter->connect_status == libertas_connected) { - libertas_ps_sleep(priv, cmd_option_waitforrsp); + if (adapter->connect_status == LIBERTAS_CONNECTED) { + libertas_ps_sleep(priv, CMD_OPTION_WAITFORRSP); } lbs_deb_leave(LBS_DEB_WEXT); @@ -900,8 +779,8 @@ static int wlan_get_power(struct net_device *dev, struct iw_request_info *info, mode = adapter->psmode; - if ((vwrq->disabled = (mode == wlan802_11powermodecam)) - || adapter->connect_status == libertas_disconnected) + if ((vwrq->disabled = (mode == WLAN802_11POWERMODECAM)) + || adapter->connect_status == LIBERTAS_DISCONNECTED) { goto out; } @@ -937,7 +816,7 @@ static struct iw_statistics *wlan_get_wireless_stats(struct net_device *dev) priv->wstats.status = adapter->mode; /* If we're not associated, all quality values are meaningless */ - if (adapter->connect_status != libertas_connected) + if (adapter->connect_status != LIBERTAS_CONNECTED) goto out; /* Quality by RSSI */ @@ -973,7 +852,7 @@ static struct iw_statistics *wlan_get_wireless_stats(struct net_device *dev) /* Quality by TX errors */ priv->wstats.discard.retries = priv->stats.tx_errors; - tx_retries = le16_to_cpu(adapter->logmsg.retry); + tx_retries = le32_to_cpu(adapter->logmsg.retry); if (tx_retries > 75) tx_qual = (90 - tx_retries) * POOR / 15; @@ -989,20 +868,20 @@ static struct iw_statistics *wlan_get_wireless_stats(struct net_device *dev) (PERFECT - VERY_GOOD) / 50 + VERY_GOOD; quality = min(quality, tx_qual); - priv->wstats.discard.code = le16_to_cpu(adapter->logmsg.wepundecryptable); - priv->wstats.discard.fragment = le16_to_cpu(adapter->logmsg.rxfrag); + priv->wstats.discard.code = le32_to_cpu(adapter->logmsg.wepundecryptable); + priv->wstats.discard.fragment = le32_to_cpu(adapter->logmsg.rxfrag); priv->wstats.discard.retries = tx_retries; - priv->wstats.discard.misc = le16_to_cpu(adapter->logmsg.ackfailure); + priv->wstats.discard.misc = le32_to_cpu(adapter->logmsg.ackfailure); /* Calculate quality */ - priv->wstats.qual.qual = max(quality, (u32)100); + priv->wstats.qual.qual = min_t(u8, quality, 100); priv->wstats.qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; stats_valid = 1; /* update stats asynchronously for future calls */ - libertas_prepare_and_send_command(priv, cmd_802_11_rssi, 0, + libertas_prepare_and_send_command(priv, CMD_802_11_RSSI, 0, 0, 0, NULL); - libertas_prepare_and_send_command(priv, cmd_802_11_get_log, 0, + libertas_prepare_and_send_command(priv, CMD_802_11_GET_LOG, 0, 0, 0, NULL); out: if (!stats_valid) { @@ -1080,88 +959,46 @@ out: return ret; } -/** - * @brief use index to get the data rate - * - * @param index The index of data rate - * @return data rate or 0 - */ -u32 libertas_index_to_data_rate(u8 index) -{ - if (index >= sizeof(libertas_wlan_data_rates)) - index = 0; - - return libertas_wlan_data_rates[index]; -} - -/** - * @brief use rate to get the index - * - * @param rate data rate - * @return index or 0 - */ -u8 libertas_data_rate_to_index(u32 rate) -{ - u8 *ptr; - - if (rate) - if ((ptr = memchr(libertas_wlan_data_rates, (u8) rate, - sizeof(libertas_wlan_data_rates)))) - return (ptr - libertas_wlan_data_rates); - - return 0; -} - static int wlan_set_rate(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { wlan_private *priv = dev->priv; wlan_adapter *adapter = priv->adapter; - u32 data_rate; + u32 new_rate; u16 action; - int ret = 0; - u8 rates[WLAN_SUPPORTED_RATES]; - u8 *rate; + int ret = -EINVAL; + u8 rates[MAX_RATES + 1]; lbs_deb_enter(LBS_DEB_WEXT); - lbs_deb_wext("vwrq->value %d\n", vwrq->value); + /* Auto rate? */ if (vwrq->value == -1) { - action = cmd_act_set_tx_auto; // Auto - adapter->is_datarate_auto = 1; - adapter->datarate = 0; + action = CMD_ACT_SET_TX_AUTO; + adapter->auto_rate = 1; + adapter->cur_rate = 0; } else { - if (vwrq->value % 100000) { - return -EINVAL; - } - - data_rate = vwrq->value / 500000; + if (vwrq->value % 100000) + goto out; memset(rates, 0, sizeof(rates)); - get_active_data_rates(adapter, rates); - rate = rates; - while (*rate) { - lbs_deb_wext("rate=0x%X, wanted data_rate 0x%X\n", *rate, - data_rate); - if ((*rate & 0x7f) == (data_rate & 0x7f)) - break; - rate++; - } - if (!*rate) { - lbs_pr_alert("fixed data rate 0x%X out " - "of range\n", data_rate); - return -EINVAL; + copy_active_data_rates(adapter, rates); + new_rate = vwrq->value / 500000; + if (!memchr(rates, new_rate, sizeof(rates))) { + lbs_pr_alert("fixed data rate 0x%X out of range\n", + new_rate); + goto out; } - adapter->datarate = data_rate; - action = cmd_act_set_tx_fix_rate; - adapter->is_datarate_auto = 0; + adapter->cur_rate = new_rate; + action = CMD_ACT_SET_TX_FIX_RATE; + adapter->auto_rate = 0; } - ret = libertas_prepare_and_send_command(priv, cmd_802_11_data_rate, - action, cmd_option_waitforrsp, 0, NULL); + ret = libertas_prepare_and_send_command(priv, CMD_802_11_DATA_RATE, + action, CMD_OPTION_WAITFORRSP, 0, NULL); +out: lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); return ret; } @@ -1174,14 +1011,19 @@ static int wlan_get_rate(struct net_device *dev, struct iw_request_info *info, lbs_deb_enter(LBS_DEB_WEXT); - if (adapter->is_datarate_auto) { - vwrq->fixed = 0; + if (adapter->connect_status == LIBERTAS_CONNECTED) { + vwrq->value = adapter->cur_rate * 500000; + + if (adapter->auto_rate) + vwrq->fixed = 0; + else + vwrq->fixed = 1; + } else { - vwrq->fixed = 1; + vwrq->fixed = 0; + vwrq->value = 0; } - vwrq->value = adapter->datarate * 500000; - lbs_deb_leave(LBS_DEB_WEXT); return 0; } @@ -1298,7 +1140,7 @@ static int wlan_get_encode(struct net_device *dev, dwrq->flags |= IW_ENCODE_NOKEY; - lbs_deb_wext("key: " MAC_FMT ", keylen %d\n", + lbs_deb_wext("key: %02x:%02x:%02x:%02x:%02x:%02x, keylen %d\n", extra[0], extra[1], extra[2], extra[3], extra[4], extra[5], dwrq->length); @@ -1325,7 +1167,7 @@ static int wlan_set_wep_key(struct assoc_request *assoc_req, int set_tx_key) { int ret = 0; - struct WLAN_802_11_KEY *pkey; + struct enc_key *pkey; lbs_deb_enter(LBS_DEB_WEXT); @@ -1344,7 +1186,7 @@ static int wlan_set_wep_key(struct assoc_request *assoc_req, pkey = &assoc_req->wep_keys[index]; if (key_length > 0) { - memset(pkey, 0, sizeof(struct WLAN_802_11_KEY)); + memset(pkey, 0, sizeof(struct enc_key)); pkey->type = KEY_TYPE_ID_WEP; /* Standardize the key length */ @@ -1412,11 +1254,11 @@ static void disable_wpa(struct assoc_request *assoc_req) { lbs_deb_enter(LBS_DEB_WEXT); - memset(&assoc_req->wpa_mcast_key, 0, sizeof (struct WLAN_802_11_KEY)); + memset(&assoc_req->wpa_mcast_key, 0, sizeof (struct enc_key)); assoc_req->wpa_mcast_key.flags = KEY_INFO_WPA_MCAST; set_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags); - memset(&assoc_req->wpa_unicast_key, 0, sizeof (struct WLAN_802_11_KEY)); + memset(&assoc_req->wpa_unicast_key, 0, sizeof (struct enc_key)); assoc_req->wpa_unicast_key.flags = KEY_INFO_WPA_UNICAST; set_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags); @@ -1567,7 +1409,7 @@ static int wlan_get_encodeext(struct net_device *dev, && (adapter->secinfo.WPAenabled || adapter->secinfo.WPA2enabled)) { /* WPA */ - struct WLAN_802_11_KEY * pkey = NULL; + struct enc_key * pkey = NULL; if ( adapter->wpa_mcast_key.len && (adapter->wpa_mcast_key.flags & KEY_INFO_WPA_ENABLED)) @@ -1679,7 +1521,7 @@ static int wlan_set_encodeext(struct net_device *dev, if (set_tx_key) set_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags); } else if ((alg == IW_ENCODE_ALG_TKIP) || (alg == IW_ENCODE_ALG_CCMP)) { - struct WLAN_802_11_KEY * pkey; + struct enc_key * pkey; /* validate key length */ if (((alg == IW_ENCODE_ALG_TKIP) @@ -1702,7 +1544,7 @@ static int wlan_set_encodeext(struct net_device *dev, set_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags); } - memset(pkey, 0, sizeof (struct WLAN_802_11_KEY)); + memset(pkey, 0, sizeof (struct enc_key)); memcpy(pkey->key, ext->key, ext->key_len); pkey->len = ext->key_len; if (pkey->len) @@ -1976,12 +1818,14 @@ static int wlan_set_txpow(struct net_device *dev, struct iw_request_info *info, return 0; } - adapter->preamble = cmd_type_auto_preamble; + adapter->preamble = CMD_TYPE_AUTO_PREAMBLE; wlan_radio_ioctl(priv, RADIO_ON); + /* Userspace check in iwrange if it should use dBm or mW, + * therefore this should never happen... Jean II */ if ((vwrq->flags & IW_TXPOW_TYPE) == IW_TXPOW_MWATT) { - dbm = (u16) mw_to_dbm(vwrq->value); + return -EOPNOTSUPP; } else dbm = (u16) vwrq->value; @@ -1993,9 +1837,9 @@ static int wlan_set_txpow(struct net_device *dev, struct iw_request_info *info, lbs_deb_wext("txpower set %d dbm\n", dbm); ret = libertas_prepare_and_send_command(priv, - cmd_802_11_rf_tx_power, - cmd_act_tx_power_opt_set_low, - cmd_option_waitforrsp, 0, (void *)&dbm); + CMD_802_11_RF_TX_POWER, + CMD_ACT_TX_POWER_OPT_SET_LOW, + CMD_OPTION_WAITFORRSP, 0, (void *)&dbm); lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); return ret; @@ -2017,7 +1861,7 @@ static int wlan_get_essid(struct net_device *dev, struct iw_request_info *info, /* * Get the current SSID */ - if (adapter->connect_status == libertas_connected) { + if (adapter->connect_status == LIBERTAS_CONNECTED) { memcpy(extra, adapter->curbssparams.ssid, adapter->curbssparams.ssid_len); extra[adapter->curbssparams.ssid_len] = '\0'; @@ -2029,12 +1873,7 @@ static int wlan_get_essid(struct net_device *dev, struct iw_request_info *info, * If none, we may want to get the one that was set */ - /* To make the driver backward compatible with WPA supplicant v0.2.4 */ - if (dwrq->length == 32) /* check with WPA supplicant buffer size */ - dwrq->length = min_t(size_t, adapter->curbssparams.ssid_len, - IW_ESSID_MAX_SIZE); - else - dwrq->length = adapter->curbssparams.ssid_len + 1; + dwrq->length = adapter->curbssparams.ssid_len; dwrq->flags = 1; /* active */ @@ -2055,14 +1894,6 @@ static int wlan_set_essid(struct net_device *dev, struct iw_request_info *info, lbs_deb_enter(LBS_DEB_WEXT); - /* - * WE-20 and earlier NULL pad the end of the SSID and increment - * SSID length so it can be used like a string. WE-21 and later don't, - * but some userspace tools aren't able to cope with the change. - */ - if ((in_ssid_len > 0) && (extra[in_ssid_len - 1] == '\0')) - in_ssid_len--; - /* Check the size of the string */ if (in_ssid_len > IW_ESSID_MAX_SIZE) { ret = -E2BIG; @@ -2129,13 +1960,14 @@ static int wlan_set_wap(struct net_device *dev, struct iw_request_info *info, wlan_adapter *adapter = priv->adapter; struct assoc_request * assoc_req; int ret = 0; + DECLARE_MAC_BUF(mac); lbs_deb_enter(LBS_DEB_WEXT); if (awrq->sa_family != ARPHRD_ETHER) return -EINVAL; - lbs_deb_wext("ASSOC: WAP: sa_data " MAC_FMT "\n", MAC_ARG(awrq->sa_data)); + lbs_deb_wext("ASSOC: WAP: sa_data %s\n", print_mac(mac, awrq->sa_data)); mutex_lock(&adapter->lock); @@ -2298,13 +2130,13 @@ static const iw_handler mesh_wlan_handler[] = { (iw_handler) NULL, /* SIOCSIWPMKSA */ }; struct iw_handler_def libertas_handler_def = { - .num_standard = sizeof(wlan_handler) / sizeof(iw_handler), + .num_standard = ARRAY_SIZE(wlan_handler), .standard = (iw_handler *) wlan_handler, .get_wireless_stats = wlan_get_wireless_stats, }; struct iw_handler_def mesh_handler_def = { - .num_standard = sizeof(mesh_wlan_handler) / sizeof(iw_handler), + .num_standard = ARRAY_SIZE(mesh_wlan_handler), .standard = (iw_handler *) mesh_wlan_handler, .get_wireless_stats = wlan_get_wireless_stats, }; diff --git a/drivers/net/wireless/libertas/wext.h b/drivers/net/wireless/libertas/wext.h index 3d5196c9553a..6aa444c7de8d 100644 --- a/drivers/net/wireless/libertas/wext.h +++ b/drivers/net/wireless/libertas/wext.h @@ -4,9 +4,6 @@ #ifndef _WLAN_WEXT_H_ #define _WLAN_WEXT_H_ -#define SUBCMD_OFFSET 4 -#define SUBCMD_DATA(x) *((int *)(x->u.name + SUBCMD_OFFSET)) - /** wlan_ioctl_regrdwr */ struct wlan_ioctl_regrdwr { /** Which register to access */ @@ -18,13 +15,9 @@ struct wlan_ioctl_regrdwr { u32 value; }; -#define WLAN_LINKMODE_802_3 0 -#define WLAN_LINKMODE_802_11 2 -#define WLAN_RADIOMODE_NONE 0 -#define WLAN_RADIOMODE_RADIOTAP 2 +#define WLAN_MONITOR_OFF 0 extern struct iw_handler_def libertas_handler_def; extern struct iw_handler_def mesh_handler_def; -int wlan_radio_ioctl(wlan_private * priv, u8 option); #endif /* _WLAN_WEXT_H_ */ diff --git a/drivers/net/wireless/net2280.h b/drivers/net/wireless/net2280.h new file mode 100644 index 000000000000..120eb831b287 --- /dev/null +++ b/drivers/net/wireless/net2280.h @@ -0,0 +1,452 @@ +#ifndef NET2280_H +#define NET2280_H +/* + * NetChip 2280 high/full speed USB device controller. + * Unlike many such controllers, this one talks PCI. + */ + +/* + * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com) + * Copyright (C) 2003 David Brownell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/*-------------------------------------------------------------------------*/ + +/* NET2280 MEMORY MAPPED REGISTERS + * + * The register layout came from the chip documentation, and the bit + * number definitions were extracted from chip specification. + * + * Use the shift operator ('<<') to build bit masks, with readl/writel + * to access the registers through PCI. + */ + +/* main registers, BAR0 + 0x0000 */ +struct net2280_regs { + // offset 0x0000 + __le32 devinit; +#define LOCAL_CLOCK_FREQUENCY 8 +#define FORCE_PCI_RESET 7 +#define PCI_ID 6 +#define PCI_ENABLE 5 +#define FIFO_SOFT_RESET 4 +#define CFG_SOFT_RESET 3 +#define PCI_SOFT_RESET 2 +#define USB_SOFT_RESET 1 +#define M8051_RESET 0 + __le32 eectl; +#define EEPROM_ADDRESS_WIDTH 23 +#define EEPROM_CHIP_SELECT_ACTIVE 22 +#define EEPROM_PRESENT 21 +#define EEPROM_VALID 20 +#define EEPROM_BUSY 19 +#define EEPROM_CHIP_SELECT_ENABLE 18 +#define EEPROM_BYTE_READ_START 17 +#define EEPROM_BYTE_WRITE_START 16 +#define EEPROM_READ_DATA 8 +#define EEPROM_WRITE_DATA 0 + __le32 eeclkfreq; + u32 _unused0; + // offset 0x0010 + + __le32 pciirqenb0; /* interrupt PCI master ... */ +#define SETUP_PACKET_INTERRUPT_ENABLE 7 +#define ENDPOINT_F_INTERRUPT_ENABLE 6 +#define ENDPOINT_E_INTERRUPT_ENABLE 5 +#define ENDPOINT_D_INTERRUPT_ENABLE 4 +#define ENDPOINT_C_INTERRUPT_ENABLE 3 +#define ENDPOINT_B_INTERRUPT_ENABLE 2 +#define ENDPOINT_A_INTERRUPT_ENABLE 1 +#define ENDPOINT_0_INTERRUPT_ENABLE 0 + __le32 pciirqenb1; +#define PCI_INTERRUPT_ENABLE 31 +#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27 +#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26 +#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25 +#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20 +#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19 +#define PCI_TARGET_ABORT_ASSERTED_INTERRUPT_ENABLE 18 +#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17 +#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16 +#define GPIO_INTERRUPT_ENABLE 13 +#define DMA_D_INTERRUPT_ENABLE 12 +#define DMA_C_INTERRUPT_ENABLE 11 +#define DMA_B_INTERRUPT_ENABLE 10 +#define DMA_A_INTERRUPT_ENABLE 9 +#define EEPROM_DONE_INTERRUPT_ENABLE 8 +#define VBUS_INTERRUPT_ENABLE 7 +#define CONTROL_STATUS_INTERRUPT_ENABLE 6 +#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4 +#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3 +#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2 +#define RESUME_INTERRUPT_ENABLE 1 +#define SOF_INTERRUPT_ENABLE 0 + __le32 cpu_irqenb0; /* ... or onboard 8051 */ +#define SETUP_PACKET_INTERRUPT_ENABLE 7 +#define ENDPOINT_F_INTERRUPT_ENABLE 6 +#define ENDPOINT_E_INTERRUPT_ENABLE 5 +#define ENDPOINT_D_INTERRUPT_ENABLE 4 +#define ENDPOINT_C_INTERRUPT_ENABLE 3 +#define ENDPOINT_B_INTERRUPT_ENABLE 2 +#define ENDPOINT_A_INTERRUPT_ENABLE 1 +#define ENDPOINT_0_INTERRUPT_ENABLE 0 + __le32 cpu_irqenb1; +#define CPU_INTERRUPT_ENABLE 31 +#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27 +#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26 +#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25 +#define PCI_INTA_INTERRUPT_ENABLE 24 +#define PCI_PME_INTERRUPT_ENABLE 23 +#define PCI_SERR_INTERRUPT_ENABLE 22 +#define PCI_PERR_INTERRUPT_ENABLE 21 +#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20 +#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19 +#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17 +#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16 +#define GPIO_INTERRUPT_ENABLE 13 +#define DMA_D_INTERRUPT_ENABLE 12 +#define DMA_C_INTERRUPT_ENABLE 11 +#define DMA_B_INTERRUPT_ENABLE 10 +#define DMA_A_INTERRUPT_ENABLE 9 +#define EEPROM_DONE_INTERRUPT_ENABLE 8 +#define VBUS_INTERRUPT_ENABLE 7 +#define CONTROL_STATUS_INTERRUPT_ENABLE 6 +#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4 +#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3 +#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2 +#define RESUME_INTERRUPT_ENABLE 1 +#define SOF_INTERRUPT_ENABLE 0 + + // offset 0x0020 + u32 _unused1; + __le32 usbirqenb1; +#define USB_INTERRUPT_ENABLE 31 +#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27 +#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26 +#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25 +#define PCI_INTA_INTERRUPT_ENABLE 24 +#define PCI_PME_INTERRUPT_ENABLE 23 +#define PCI_SERR_INTERRUPT_ENABLE 22 +#define PCI_PERR_INTERRUPT_ENABLE 21 +#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20 +#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19 +#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17 +#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16 +#define GPIO_INTERRUPT_ENABLE 13 +#define DMA_D_INTERRUPT_ENABLE 12 +#define DMA_C_INTERRUPT_ENABLE 11 +#define DMA_B_INTERRUPT_ENABLE 10 +#define DMA_A_INTERRUPT_ENABLE 9 +#define EEPROM_DONE_INTERRUPT_ENABLE 8 +#define VBUS_INTERRUPT_ENABLE 7 +#define CONTROL_STATUS_INTERRUPT_ENABLE 6 +#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4 +#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3 +#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2 +#define RESUME_INTERRUPT_ENABLE 1 +#define SOF_INTERRUPT_ENABLE 0 + __le32 irqstat0; +#define INTA_ASSERTED 12 +#define SETUP_PACKET_INTERRUPT 7 +#define ENDPOINT_F_INTERRUPT 6 +#define ENDPOINT_E_INTERRUPT 5 +#define ENDPOINT_D_INTERRUPT 4 +#define ENDPOINT_C_INTERRUPT 3 +#define ENDPOINT_B_INTERRUPT 2 +#define ENDPOINT_A_INTERRUPT 1 +#define ENDPOINT_0_INTERRUPT 0 + __le32 irqstat1; +#define POWER_STATE_CHANGE_INTERRUPT 27 +#define PCI_ARBITER_TIMEOUT_INTERRUPT 26 +#define PCI_PARITY_ERROR_INTERRUPT 25 +#define PCI_INTA_INTERRUPT 24 +#define PCI_PME_INTERRUPT 23 +#define PCI_SERR_INTERRUPT 22 +#define PCI_PERR_INTERRUPT 21 +#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT 20 +#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT 19 +#define PCI_RETRY_ABORT_INTERRUPT 17 +#define PCI_MASTER_CYCLE_DONE_INTERRUPT 16 +#define GPIO_INTERRUPT 13 +#define DMA_D_INTERRUPT 12 +#define DMA_C_INTERRUPT 11 +#define DMA_B_INTERRUPT 10 +#define DMA_A_INTERRUPT 9 +#define EEPROM_DONE_INTERRUPT 8 +#define VBUS_INTERRUPT 7 +#define CONTROL_STATUS_INTERRUPT 6 +#define ROOT_PORT_RESET_INTERRUPT 4 +#define SUSPEND_REQUEST_INTERRUPT 3 +#define SUSPEND_REQUEST_CHANGE_INTERRUPT 2 +#define RESUME_INTERRUPT 1 +#define SOF_INTERRUPT 0 + // offset 0x0030 + __le32 idxaddr; + __le32 idxdata; + __le32 fifoctl; +#define PCI_BASE2_RANGE 16 +#define IGNORE_FIFO_AVAILABILITY 3 +#define PCI_BASE2_SELECT 2 +#define FIFO_CONFIGURATION_SELECT 0 + u32 _unused2; + // offset 0x0040 + __le32 memaddr; +#define START 28 +#define DIRECTION 27 +#define FIFO_DIAGNOSTIC_SELECT 24 +#define MEMORY_ADDRESS 0 + __le32 memdata0; + __le32 memdata1; + u32 _unused3; + // offset 0x0050 + __le32 gpioctl; +#define GPIO3_LED_SELECT 12 +#define GPIO3_INTERRUPT_ENABLE 11 +#define GPIO2_INTERRUPT_ENABLE 10 +#define GPIO1_INTERRUPT_ENABLE 9 +#define GPIO0_INTERRUPT_ENABLE 8 +#define GPIO3_OUTPUT_ENABLE 7 +#define GPIO2_OUTPUT_ENABLE 6 +#define GPIO1_OUTPUT_ENABLE 5 +#define GPIO0_OUTPUT_ENABLE 4 +#define GPIO3_DATA 3 +#define GPIO2_DATA 2 +#define GPIO1_DATA 1 +#define GPIO0_DATA 0 + __le32 gpiostat; +#define GPIO3_INTERRUPT 3 +#define GPIO2_INTERRUPT 2 +#define GPIO1_INTERRUPT 1 +#define GPIO0_INTERRUPT 0 +} __attribute__ ((packed)); + +/* usb control, BAR0 + 0x0080 */ +struct net2280_usb_regs { + // offset 0x0080 + __le32 stdrsp; +#define STALL_UNSUPPORTED_REQUESTS 31 +#define SET_TEST_MODE 16 +#define GET_OTHER_SPEED_CONFIGURATION 15 +#define GET_DEVICE_QUALIFIER 14 +#define SET_ADDRESS 13 +#define ENDPOINT_SET_CLEAR_HALT 12 +#define DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP 11 +#define GET_STRING_DESCRIPTOR_2 10 +#define GET_STRING_DESCRIPTOR_1 9 +#define GET_STRING_DESCRIPTOR_0 8 +#define GET_SET_INTERFACE 6 +#define GET_SET_CONFIGURATION 5 +#define GET_CONFIGURATION_DESCRIPTOR 4 +#define GET_DEVICE_DESCRIPTOR 3 +#define GET_ENDPOINT_STATUS 2 +#define GET_INTERFACE_STATUS 1 +#define GET_DEVICE_STATUS 0 + __le32 prodvendid; +#define PRODUCT_ID 16 +#define VENDOR_ID 0 + __le32 relnum; + __le32 usbctl; +#define SERIAL_NUMBER_INDEX 16 +#define PRODUCT_ID_STRING_ENABLE 13 +#define VENDOR_ID_STRING_ENABLE 12 +#define USB_ROOT_PORT_WAKEUP_ENABLE 11 +#define VBUS_PIN 10 +#define TIMED_DISCONNECT 9 +#define SUSPEND_IMMEDIATELY 7 +#define SELF_POWERED_USB_DEVICE 6 +#define REMOTE_WAKEUP_SUPPORT 5 +#define PME_POLARITY 4 +#define USB_DETECT_ENABLE 3 +#define PME_WAKEUP_ENABLE 2 +#define DEVICE_REMOTE_WAKEUP_ENABLE 1 +#define SELF_POWERED_STATUS 0 + // offset 0x0090 + __le32 usbstat; +#define HIGH_SPEED 7 +#define FULL_SPEED 6 +#define GENERATE_RESUME 5 +#define GENERATE_DEVICE_REMOTE_WAKEUP 4 + __le32 xcvrdiag; +#define FORCE_HIGH_SPEED_MODE 31 +#define FORCE_FULL_SPEED_MODE 30 +#define USB_TEST_MODE 24 +#define LINE_STATE 16 +#define TRANSCEIVER_OPERATION_MODE 2 +#define TRANSCEIVER_SELECT 1 +#define TERMINATION_SELECT 0 + __le32 setup0123; + __le32 setup4567; + // offset 0x0090 + u32 _unused0; + __le32 ouraddr; +#define FORCE_IMMEDIATE 7 +#define OUR_USB_ADDRESS 0 + __le32 ourconfig; +} __attribute__ ((packed)); + +/* pci control, BAR0 + 0x0100 */ +struct net2280_pci_regs { + // offset 0x0100 + __le32 pcimstctl; +#define PCI_ARBITER_PARK_SELECT 13 +#define PCI_MULTI LEVEL_ARBITER 12 +#define PCI_RETRY_ABORT_ENABLE 11 +#define DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE 10 +#define DMA_READ_MULTIPLE_ENABLE 9 +#define DMA_READ_LINE_ENABLE 8 +#define PCI_MASTER_COMMAND_SELECT 6 +#define MEM_READ_OR_WRITE 0 +#define IO_READ_OR_WRITE 1 +#define CFG_READ_OR_WRITE 2 +#define PCI_MASTER_START 5 +#define PCI_MASTER_READ_WRITE 4 +#define PCI_MASTER_WRITE 0 +#define PCI_MASTER_READ 1 +#define PCI_MASTER_BYTE_WRITE_ENABLES 0 + __le32 pcimstaddr; + __le32 pcimstdata; + __le32 pcimststat; +#define PCI_ARBITER_CLEAR 2 +#define PCI_EXTERNAL_ARBITER 1 +#define PCI_HOST_MODE 0 +} __attribute__ ((packed)); + +/* dma control, BAR0 + 0x0180 ... array of four structs like this, + * for channels 0..3. see also struct net2280_dma: descriptor + * that can be loaded into some of these registers. + */ +struct net2280_dma_regs { /* [11.7] */ + // offset 0x0180, 0x01a0, 0x01c0, 0x01e0, + __le32 dmactl; +#define DMA_SCATTER_GATHER_DONE_INTERRUPT_ENABLE 25 +#define DMA_CLEAR_COUNT_ENABLE 21 +#define DESCRIPTOR_POLLING_RATE 19 +#define POLL_CONTINUOUS 0 +#define POLL_1_USEC 1 +#define POLL_100_USEC 2 +#define POLL_1_MSEC 3 +#define DMA_VALID_BIT_POLLING_ENABLE 18 +#define DMA_VALID_BIT_ENABLE 17 +#define DMA_SCATTER_GATHER_ENABLE 16 +#define DMA_OUT_AUTO_START_ENABLE 4 +#define DMA_PREEMPT_ENABLE 3 +#define DMA_FIFO_VALIDATE 2 +#define DMA_ENABLE 1 +#define DMA_ADDRESS_HOLD 0 + __le32 dmastat; +#define DMA_SCATTER_GATHER_DONE_INTERRUPT 25 +#define DMA_TRANSACTION_DONE_INTERRUPT 24 +#define DMA_ABORT 1 +#define DMA_START 0 + u32 _unused0[2]; + // offset 0x0190, 0x01b0, 0x01d0, 0x01f0, + __le32 dmacount; +#define VALID_BIT 31 +#define DMA_DIRECTION 30 +#define DMA_DONE_INTERRUPT_ENABLE 29 +#define END_OF_CHAIN 28 +#define DMA_BYTE_COUNT_MASK ((1<<24)-1) +#define DMA_BYTE_COUNT 0 + __le32 dmaaddr; + __le32 dmadesc; + u32 _unused1; +} __attribute__ ((packed)); + +/* dedicated endpoint registers, BAR0 + 0x0200 */ + +struct net2280_dep_regs { /* [11.8] */ + // offset 0x0200, 0x0210, 0x220, 0x230, 0x240 + __le32 dep_cfg; + // offset 0x0204, 0x0214, 0x224, 0x234, 0x244 + __le32 dep_rsp; + u32 _unused[2]; +} __attribute__ ((packed)); + +/* configurable endpoint registers, BAR0 + 0x0300 ... array of seven structs + * like this, for ep0 then the configurable endpoints A..F + * ep0 reserved for control; E and F have only 64 bytes of fifo + */ +struct net2280_ep_regs { /* [11.9] */ + // offset 0x0300, 0x0320, 0x0340, 0x0360, 0x0380, 0x03a0, 0x03c0 + __le32 ep_cfg; +#define ENDPOINT_BYTE_COUNT 16 +#define ENDPOINT_ENABLE 10 +#define ENDPOINT_TYPE 8 +#define ENDPOINT_DIRECTION 7 +#define ENDPOINT_NUMBER 0 + __le32 ep_rsp; +#define SET_NAK_OUT_PACKETS 15 +#define SET_EP_HIDE_STATUS_PHASE 14 +#define SET_EP_FORCE_CRC_ERROR 13 +#define SET_INTERRUPT_MODE 12 +#define SET_CONTROL_STATUS_PHASE_HANDSHAKE 11 +#define SET_NAK_OUT_PACKETS_MODE 10 +#define SET_ENDPOINT_TOGGLE 9 +#define SET_ENDPOINT_HALT 8 +#define CLEAR_NAK_OUT_PACKETS 7 +#define CLEAR_EP_HIDE_STATUS_PHASE 6 +#define CLEAR_EP_FORCE_CRC_ERROR 5 +#define CLEAR_INTERRUPT_MODE 4 +#define CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE 3 +#define CLEAR_NAK_OUT_PACKETS_MODE 2 +#define CLEAR_ENDPOINT_TOGGLE 1 +#define CLEAR_ENDPOINT_HALT 0 + __le32 ep_irqenb; +#define SHORT_PACKET_OUT_DONE_INTERRUPT_ENABLE 6 +#define SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE 5 +#define DATA_PACKET_RECEIVED_INTERRUPT_ENABLE 3 +#define DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE 2 +#define DATA_OUT_PING_TOKEN_INTERRUPT_ENABLE 1 +#define DATA_IN_TOKEN_INTERRUPT_ENABLE 0 + __le32 ep_stat; +#define FIFO_VALID_COUNT 24 +#define HIGH_BANDWIDTH_OUT_TRANSACTION_PID 22 +#define TIMEOUT 21 +#define USB_STALL_SENT 20 +#define USB_IN_NAK_SENT 19 +#define USB_IN_ACK_RCVD 18 +#define USB_OUT_PING_NAK_SENT 17 +#define USB_OUT_ACK_SENT 16 +#define FIFO_OVERFLOW 13 +#define FIFO_UNDERFLOW 12 +#define FIFO_FULL 11 +#define FIFO_EMPTY 10 +#define FIFO_FLUSH 9 +#define SHORT_PACKET_OUT_DONE_INTERRUPT 6 +#define SHORT_PACKET_TRANSFERRED_INTERRUPT 5 +#define NAK_OUT_PACKETS 4 +#define DATA_PACKET_RECEIVED_INTERRUPT 3 +#define DATA_PACKET_TRANSMITTED_INTERRUPT 2 +#define DATA_OUT_PING_TOKEN_INTERRUPT 1 +#define DATA_IN_TOKEN_INTERRUPT 0 + // offset 0x0310, 0x0330, 0x0350, 0x0370, 0x0390, 0x03b0, 0x03d0 + __le32 ep_avail; + __le32 ep_data; + u32 _unused0[2]; +} __attribute__ ((packed)); + +struct net2280_reg_write { + __le16 port; + __le32 addr; + __le32 val; +} __attribute__ ((packed)); + +struct net2280_reg_read { + __le16 port; + __le32 addr; +} __attribute__ ((packed)); +#endif /* NET2280_H */ diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c index 45b00e13ab2b..c2d71afd57e5 100644 --- a/drivers/net/wireless/netwave_cs.c +++ b/drivers/net/wireless/netwave_cs.c @@ -412,7 +412,6 @@ static int netwave_probe(struct pcmcia_device *link) spin_lock_init(&priv->spinlock); /* Netwave specific entries in the device structure */ - SET_MODULE_OWNER(dev); dev->hard_start_xmit = &netwave_start_xmit; dev->get_stats = &netwave_get_stats; dev->set_multicast_list = &set_multicast_list; @@ -710,9 +709,9 @@ static const iw_handler netwave_private_handler[] = static const struct iw_handler_def netwave_handler_def = { - .num_standard = sizeof(netwave_handler)/sizeof(iw_handler), - .num_private = sizeof(netwave_private_handler)/sizeof(iw_handler), - .num_private_args = sizeof(netwave_private_args)/sizeof(struct iw_priv_args), + .num_standard = ARRAY_SIZE(netwave_handler), + .num_private = ARRAY_SIZE(netwave_private_handler), + .num_private_args = ARRAY_SIZE(netwave_private_args), .standard = (iw_handler *) netwave_handler, .private = (iw_handler *) netwave_private_handler, .private_args = (struct iw_priv_args *) netwave_private_args, @@ -738,6 +737,7 @@ static int netwave_pcmcia_config(struct pcmcia_device *link) { win_req_t req; memreq_t mem; u_char __iomem *ramBase = NULL; + DECLARE_MAC_BUF(mac); DEBUG(0, "netwave_pcmcia_config(0x%p)\n", link); @@ -806,12 +806,13 @@ static int netwave_pcmcia_config(struct pcmcia_device *link) { for (i = 0; i < 6; i++) dev->dev_addr[i] = readb(ramBase + NETWAVE_EREG_PA + i); - printk(KERN_INFO "%s: Netwave: port %#3lx, irq %d, mem %lx id " - "%c%c, hw_addr ", dev->name, dev->base_addr, dev->irq, - (u_long) ramBase, (int) readb(ramBase+NETWAVE_EREG_NI), - (int) readb(ramBase+NETWAVE_EREG_NI+1)); - for (i = 0; i < 6; i++) - printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n")); + printk(KERN_INFO "%s: Netwave: port %#3lx, irq %d, mem %lx" + "id %c%c, hw_addr %s\n", + dev->name, dev->base_addr, dev->irq, + (u_long) ramBase, + (int) readb(ramBase+NETWAVE_EREG_NI), + (int) readb(ramBase+NETWAVE_EREG_NI+1), + print_mac(mac, dev->dev_addr)); /* get revision words */ printk(KERN_DEBUG "Netwave_reset: revision %04x %04x\n", diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c index 062286dc8e15..ca6c2da7bc5d 100644 --- a/drivers/net/wireless/orinoco.c +++ b/drivers/net/wireless/orinoco.c @@ -2232,6 +2232,7 @@ static int orinoco_init(struct net_device *dev) struct hermes_idstring nickbuf; u16 reclen; int len; + DECLARE_MAC_BUF(mac); /* No need to lock, the hw_unavailable flag is already set in * alloc_orinocodev() */ @@ -2274,10 +2275,8 @@ static int orinoco_init(struct net_device *dev) goto out; } - printk(KERN_DEBUG "%s: MAC address %02X:%02X:%02X:%02X:%02X:%02X\n", - dev->name, dev->dev_addr[0], dev->dev_addr[1], - dev->dev_addr[2], dev->dev_addr[3], dev->dev_addr[4], - dev->dev_addr[5]); + printk(KERN_DEBUG "%s: MAC address %s\n", + dev->name, print_mac(mac, dev->dev_addr)); /* Get the station name */ err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME, diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c index d1e502236b2a..8b7f5768a103 100644 --- a/drivers/net/wireless/orinoco_cs.c +++ b/drivers/net/wireless/orinoco_cs.c @@ -313,7 +313,6 @@ orinoco_cs_config(struct pcmcia_device *link) /* Ok, we have the configuration, prepare to register the netdev */ dev->base_addr = link->io.BasePort1; dev->irq = link->irq.AssignedIRQ; - SET_MODULE_OWNER(dev); card->node.major = card->node.minor = 0; SET_NETDEV_DEV(dev, &handle_to_dev(link)); diff --git a/drivers/net/wireless/orinoco_nortel.c b/drivers/net/wireless/orinoco_nortel.c index eaf3d13b851c..35ec5fcf81a6 100644 --- a/drivers/net/wireless/orinoco_nortel.c +++ b/drivers/net/wireless/orinoco_nortel.c @@ -193,7 +193,6 @@ static int orinoco_nortel_init_one(struct pci_dev *pdev, card = priv->card; card->bridge_io = bridge_io; card->attr_io = attr_io; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); hermes_struct_init(&priv->hw, hermes_io, HERMES_16BIT_REGSPACING); diff --git a/drivers/net/wireless/orinoco_pci.c b/drivers/net/wireless/orinoco_pci.c index 97a8b4ff32bd..2547d5dac0d3 100644 --- a/drivers/net/wireless/orinoco_pci.c +++ b/drivers/net/wireless/orinoco_pci.c @@ -148,7 +148,6 @@ static int orinoco_pci_init_one(struct pci_dev *pdev, priv = netdev_priv(dev); card = priv->card; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); hermes_struct_init(&priv->hw, hermes_io, HERMES_32BIT_REGSPACING); diff --git a/drivers/net/wireless/orinoco_plx.c b/drivers/net/wireless/orinoco_plx.c index 31162ac25a92..98fe165337d1 100644 --- a/drivers/net/wireless/orinoco_plx.c +++ b/drivers/net/wireless/orinoco_plx.c @@ -232,7 +232,6 @@ static int orinoco_plx_init_one(struct pci_dev *pdev, card = priv->card; card->bridge_io = bridge_io; card->attr_io = attr_io; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); hermes_struct_init(&priv->hw, hermes_io, HERMES_16BIT_REGSPACING); diff --git a/drivers/net/wireless/orinoco_tmd.c b/drivers/net/wireless/orinoco_tmd.c index 7c7b960c91df..df493185a4af 100644 --- a/drivers/net/wireless/orinoco_tmd.c +++ b/drivers/net/wireless/orinoco_tmd.c @@ -134,7 +134,6 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev, priv = netdev_priv(dev); card = priv->card; card->bridge_io = bridge_io; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); hermes_struct_init(&priv->hw, hermes_io, HERMES_16BIT_REGSPACING); @@ -190,7 +189,7 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev, static void __devexit orinoco_tmd_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); - struct orinoco_private *priv = dev->priv; + struct orinoco_private *priv = netdev_priv(dev); struct orinoco_pci_card *card = priv->card; unregister_netdev(dev); diff --git a/drivers/net/wireless/p54.h b/drivers/net/wireless/p54.h new file mode 100644 index 000000000000..744c866066c5 --- /dev/null +++ b/drivers/net/wireless/p54.h @@ -0,0 +1,81 @@ +#ifndef PRISM54_H +#define PRISM54_H + +/* + * Shared defines for all mac80211 Prism54 code + * + * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> + * + * Based on the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +enum control_frame_types { + P54_CONTROL_TYPE_FILTER_SET = 0, + P54_CONTROL_TYPE_CHANNEL_CHANGE, + P54_CONTROL_TYPE_FREQDONE, + P54_CONTROL_TYPE_DCFINIT, + P54_CONTROL_TYPE_FREEQUEUE = 7, + P54_CONTROL_TYPE_TXDONE, + P54_CONTROL_TYPE_PING, + P54_CONTROL_TYPE_STAT_READBACK, + P54_CONTROL_TYPE_BBP, + P54_CONTROL_TYPE_EEPROM_READBACK, + P54_CONTROL_TYPE_LED +}; + +struct p54_control_hdr { + __le16 magic1; + __le16 len; + __le32 req_id; + __le16 type; /* enum control_frame_types */ + u8 retry1; + u8 retry2; + u8 data[0]; +} __attribute__ ((packed)); + +#define EEPROM_READBACK_LEN (sizeof(struct p54_control_hdr) + 4 /* p54_eeprom_lm86 */) +#define MAX_RX_SIZE (IEEE80211_MAX_RTS_THRESHOLD + sizeof(struct p54_control_hdr) + 20 /* length of struct p54_rx_hdr */ + 16 ) + +#define ISL38XX_DEV_FIRMWARE_ADDR 0x20000 + +struct p54_common { + u32 rx_start; + u32 rx_end; + struct sk_buff_head tx_queue; + void (*tx)(struct ieee80211_hw *dev, struct p54_control_hdr *data, + size_t len, int free_on_tx); + int (*open)(struct ieee80211_hw *dev); + void (*stop)(struct ieee80211_hw *dev); + int mode; + u8 mac_addr[ETH_ALEN]; + u8 bssid[ETH_ALEN]; + struct pda_iq_autocal_entry *iq_autocal; + unsigned int iq_autocal_len; + struct pda_channel_output_limit *output_limit; + unsigned int output_limit_len; + struct pda_pa_curve_data *curve_data; + __le16 rxhw; + u8 version; + unsigned int tx_hdr_len; + void *cached_vdcf; + unsigned int fw_var; + /* FIXME: this channels/modes/rates stuff sucks */ + struct ieee80211_channel channels[14]; + struct ieee80211_rate rates[12]; + struct ieee80211_hw_mode modes[2]; + struct ieee80211_tx_queue_stats tx_stats; +}; + +int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb); +void p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw); +int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len); +void p54_fill_eeprom_readback(struct p54_control_hdr *hdr); +struct ieee80211_hw *p54_init_common(size_t priv_data_len); +void p54_free_common(struct ieee80211_hw *dev); + +#endif /* PRISM54_H */ diff --git a/drivers/net/wireless/p54common.c b/drivers/net/wireless/p54common.c new file mode 100644 index 000000000000..2c63cf0ad2cd --- /dev/null +++ b/drivers/net/wireless/p54common.c @@ -0,0 +1,1019 @@ + +/* + * Common code for mac80211 Prism54 drivers + * + * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> + * Copyright (c) 2007, Christian Lamparter <chunkeey@web.de> + * + * Based on the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/init.h> +#include <linux/firmware.h> +#include <linux/etherdevice.h> + +#include <net/mac80211.h> + +#include "p54.h" +#include "p54common.h" + +MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>"); +MODULE_DESCRIPTION("Softmac Prism54 common code"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("prism54common"); + +void p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw) +{ + struct p54_common *priv = dev->priv; + struct bootrec_exp_if *exp_if; + struct bootrec *bootrec; + u32 *data = (u32 *)fw->data; + u32 *end_data = (u32 *)fw->data + (fw->size >> 2); + u8 *fw_version = NULL; + size_t len; + int i; + + if (priv->rx_start) + return; + + while (data < end_data && *data) + data++; + + while (data < end_data && !*data) + data++; + + bootrec = (struct bootrec *) data; + + while (bootrec->data <= end_data && + (bootrec->data + (len = le32_to_cpu(bootrec->len))) <= end_data) { + u32 code = le32_to_cpu(bootrec->code); + switch (code) { + case BR_CODE_COMPONENT_ID: + switch (be32_to_cpu(*bootrec->data)) { + case FW_FMAC: + printk(KERN_INFO "p54: FreeMAC firmware\n"); + break; + case FW_LM20: + printk(KERN_INFO "p54: LM20 firmware\n"); + break; + case FW_LM86: + printk(KERN_INFO "p54: LM86 firmware\n"); + break; + case FW_LM87: + printk(KERN_INFO "p54: LM87 firmware - not supported yet!\n"); + break; + default: + printk(KERN_INFO "p54: unknown firmware\n"); + break; + } + break; + case BR_CODE_COMPONENT_VERSION: + /* 24 bytes should be enough for all firmwares */ + if (strnlen((unsigned char*)bootrec->data, 24) < 24) + fw_version = (unsigned char*)bootrec->data; + break; + case BR_CODE_DESCR: + priv->rx_start = le32_to_cpu(bootrec->data[1]); + /* FIXME add sanity checking */ + priv->rx_end = le32_to_cpu(bootrec->data[2]) - 0x3500; + break; + case BR_CODE_EXPOSED_IF: + exp_if = (struct bootrec_exp_if *) bootrec->data; + for (i = 0; i < (len * sizeof(*exp_if) / 4); i++) + if (exp_if[i].if_id == 0x1a) + priv->fw_var = le16_to_cpu(exp_if[i].variant); + break; + case BR_CODE_DEPENDENT_IF: + break; + case BR_CODE_END_OF_BRA: + case LEGACY_BR_CODE_END_OF_BRA: + end_data = NULL; + break; + default: + break; + } + bootrec = (struct bootrec *)&bootrec->data[len]; + } + + if (fw_version) + printk(KERN_INFO "p54: FW rev %s - Softmac protocol %x.%x\n", + fw_version, priv->fw_var >> 8, priv->fw_var & 0xff); + + if (priv->fw_var >= 0x300) { + /* Firmware supports QoS, use it! */ + priv->tx_stats.data[0].limit = 3; + priv->tx_stats.data[1].limit = 4; + priv->tx_stats.data[2].limit = 3; + priv->tx_stats.data[3].limit = 1; + dev->queues = 4; + } +} +EXPORT_SYMBOL_GPL(p54_parse_firmware); + +static int p54_convert_rev0_to_rev1(struct ieee80211_hw *dev, + struct pda_pa_curve_data *curve_data) +{ + struct p54_common *priv = dev->priv; + struct pda_pa_curve_data_sample_rev1 *rev1; + struct pda_pa_curve_data_sample_rev0 *rev0; + size_t cd_len = sizeof(*curve_data) + + (curve_data->points_per_channel*sizeof(*rev1) + 2) * + curve_data->channels; + unsigned int i, j; + void *source, *target; + + priv->curve_data = kmalloc(cd_len, GFP_KERNEL); + if (!priv->curve_data) + return -ENOMEM; + + memcpy(priv->curve_data, curve_data, sizeof(*curve_data)); + source = curve_data->data; + target = priv->curve_data->data; + for (i = 0; i < curve_data->channels; i++) { + __le16 *freq = source; + source += sizeof(__le16); + *((__le16 *)target) = *freq; + target += sizeof(__le16); + for (j = 0; j < curve_data->points_per_channel; j++) { + rev1 = target; + rev0 = source; + + rev1->rf_power = rev0->rf_power; + rev1->pa_detector = rev0->pa_detector; + rev1->data_64qam = rev0->pcv; + /* "invent" the points for the other modulations */ +#define SUB(x,y) (u8)((x) - (y)) > (x) ? 0 : (x) - (y) + rev1->data_16qam = SUB(rev0->pcv, 12); + rev1->data_qpsk = SUB(rev1->data_16qam, 12); + rev1->data_bpsk = SUB(rev1->data_qpsk, 12); + rev1->data_barker= SUB(rev1->data_bpsk, 14); +#undef SUB + target += sizeof(*rev1); + source += sizeof(*rev0); + } + } + + return 0; +} + +int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len) +{ + struct p54_common *priv = dev->priv; + struct eeprom_pda_wrap *wrap = NULL; + struct pda_entry *entry; + int i = 0; + unsigned int data_len, entry_len; + void *tmp; + int err; + + wrap = (struct eeprom_pda_wrap *) eeprom; + entry = (void *)wrap->data + wrap->len; + i += 2; + i += le16_to_cpu(entry->len)*2; + while (i < len) { + entry_len = le16_to_cpu(entry->len); + data_len = ((entry_len - 1) << 1); + switch (le16_to_cpu(entry->code)) { + case PDR_MAC_ADDRESS: + SET_IEEE80211_PERM_ADDR(dev, entry->data); + break; + case PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS: + if (data_len < 2) { + err = -EINVAL; + goto err; + } + + if (2 + entry->data[1]*sizeof(*priv->output_limit) > data_len) { + err = -EINVAL; + goto err; + } + + priv->output_limit = kmalloc(entry->data[1] * + sizeof(*priv->output_limit), GFP_KERNEL); + + if (!priv->output_limit) { + err = -ENOMEM; + goto err; + } + + memcpy(priv->output_limit, &entry->data[2], + entry->data[1]*sizeof(*priv->output_limit)); + priv->output_limit_len = entry->data[1]; + break; + case PDR_PRISM_PA_CAL_CURVE_DATA: + if (data_len < sizeof(struct pda_pa_curve_data)) { + err = -EINVAL; + goto err; + } + + if (((struct pda_pa_curve_data *)entry->data)->cal_method_rev) { + priv->curve_data = kmalloc(data_len, GFP_KERNEL); + if (!priv->curve_data) { + err = -ENOMEM; + goto err; + } + + memcpy(priv->curve_data, entry->data, data_len); + } else { + err = p54_convert_rev0_to_rev1(dev, (struct pda_pa_curve_data *)entry->data); + if (err) + goto err; + } + + break; + case PDR_PRISM_ZIF_TX_IQ_CALIBRATION: + priv->iq_autocal = kmalloc(data_len, GFP_KERNEL); + if (!priv->iq_autocal) { + err = -ENOMEM; + goto err; + } + + memcpy(priv->iq_autocal, entry->data, data_len); + priv->iq_autocal_len = data_len / sizeof(struct pda_iq_autocal_entry); + break; + case PDR_INTERFACE_LIST: + tmp = entry->data; + while ((u8 *)tmp < entry->data + data_len) { + struct bootrec_exp_if *exp_if = tmp; + if (le16_to_cpu(exp_if->if_id) == 0xF) + priv->rxhw = exp_if->variant & cpu_to_le16(0x07); + tmp += sizeof(struct bootrec_exp_if); + } + break; + case PDR_HARDWARE_PLATFORM_COMPONENT_ID: + priv->version = *(u8 *)(entry->data + 1); + break; + case PDR_END: + i = len; + break; + } + + entry = (void *)entry + (entry_len + 1)*2; + i += 2; + i += entry_len*2; + } + + if (!priv->iq_autocal || !priv->output_limit || !priv->curve_data) { + printk(KERN_ERR "p54: not all required entries found in eeprom!\n"); + err = -EINVAL; + goto err; + } + + return 0; + + err: + if (priv->iq_autocal) { + kfree(priv->iq_autocal); + priv->iq_autocal = NULL; + } + + if (priv->output_limit) { + kfree(priv->output_limit); + priv->output_limit = NULL; + } + + if (priv->curve_data) { + kfree(priv->curve_data); + priv->curve_data = NULL; + } + + printk(KERN_ERR "p54: eeprom parse failed!\n"); + return err; +} +EXPORT_SYMBOL_GPL(p54_parse_eeprom); + +void p54_fill_eeprom_readback(struct p54_control_hdr *hdr) +{ + struct p54_eeprom_lm86 *eeprom_hdr; + + hdr->magic1 = cpu_to_le16(0x8000); + hdr->len = cpu_to_le16(sizeof(*eeprom_hdr) + 0x2000); + hdr->type = cpu_to_le16(P54_CONTROL_TYPE_EEPROM_READBACK); + hdr->retry1 = hdr->retry2 = 0; + eeprom_hdr = (struct p54_eeprom_lm86 *) hdr->data; + eeprom_hdr->offset = 0x0; + eeprom_hdr->len = cpu_to_le16(0x2000); +} +EXPORT_SYMBOL_GPL(p54_fill_eeprom_readback); + +static void p54_rx_data(struct ieee80211_hw *dev, struct sk_buff *skb) +{ + struct p54_rx_hdr *hdr = (struct p54_rx_hdr *) skb->data; + struct ieee80211_rx_status rx_status = {0}; + u16 freq = le16_to_cpu(hdr->freq); + + rx_status.ssi = hdr->rssi; + rx_status.rate = hdr->rate & 0x1f; /* report short preambles & CCK too */ + rx_status.channel = freq == 2484 ? 14 : (freq - 2407)/5; + rx_status.freq = freq; + rx_status.phymode = MODE_IEEE80211G; + rx_status.antenna = hdr->antenna; + rx_status.mactime = le64_to_cpu(hdr->timestamp); + + skb_pull(skb, sizeof(*hdr)); + skb_trim(skb, le16_to_cpu(hdr->len)); + + ieee80211_rx_irqsafe(dev, skb, &rx_status); +} + +static void inline p54_wake_free_queues(struct ieee80211_hw *dev) +{ + struct p54_common *priv = dev->priv; + int i; + + /* ieee80211_start_queues is great if all queues are really empty. + * But, what if some are full? */ + + for (i = 0; i < dev->queues; i++) + if (priv->tx_stats.data[i].len < priv->tx_stats.data[i].limit) + ieee80211_wake_queue(dev, i); +} + +static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb) +{ + struct p54_common *priv = dev->priv; + struct p54_control_hdr *hdr = (struct p54_control_hdr *) skb->data; + struct p54_frame_sent_hdr *payload = (struct p54_frame_sent_hdr *) hdr->data; + struct sk_buff *entry = (struct sk_buff *) priv->tx_queue.next; + u32 addr = le32_to_cpu(hdr->req_id) - 0x70; + struct memrecord *range = NULL; + u32 freed = 0; + u32 last_addr = priv->rx_start; + + while (entry != (struct sk_buff *)&priv->tx_queue) { + range = (struct memrecord *)&entry->cb; + if (range->start_addr == addr) { + struct ieee80211_tx_status status = {{0}}; + struct p54_control_hdr *entry_hdr; + struct p54_tx_control_allocdata *entry_data; + int pad = 0; + + if (entry->next != (struct sk_buff *)&priv->tx_queue) + freed = ((struct memrecord *)&entry->next->cb)->start_addr - last_addr; + else + freed = priv->rx_end - last_addr; + + last_addr = range->end_addr; + __skb_unlink(entry, &priv->tx_queue); + if (!range->control) { + kfree_skb(entry); + break; + } + memcpy(&status.control, range->control, + sizeof(status.control)); + kfree(range->control); + priv->tx_stats.data[status.control.queue].len--; + + entry_hdr = (struct p54_control_hdr *) entry->data; + entry_data = (struct p54_tx_control_allocdata *) entry_hdr->data; + if ((entry_hdr->magic1 & cpu_to_le16(0x4000)) != 0) + pad = entry_data->align[0]; + + if (!status.control.flags & IEEE80211_TXCTL_NO_ACK) { + if (!(payload->status & 0x01)) + status.flags |= IEEE80211_TX_STATUS_ACK; + else + status.excessive_retries = 1; + } + status.retry_count = payload->retries - 1; + status.ack_signal = le16_to_cpu(payload->ack_rssi); + skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data)); + ieee80211_tx_status_irqsafe(dev, entry, &status); + break; + } else + last_addr = range->end_addr; + entry = entry->next; + } + + if (freed >= IEEE80211_MAX_RTS_THRESHOLD + 0x170 + + sizeof(struct p54_control_hdr)) + p54_wake_free_queues(dev); +} + +static void p54_rx_control(struct ieee80211_hw *dev, struct sk_buff *skb) +{ + struct p54_control_hdr *hdr = (struct p54_control_hdr *) skb->data; + + switch (le16_to_cpu(hdr->type)) { + case P54_CONTROL_TYPE_TXDONE: + p54_rx_frame_sent(dev, skb); + break; + case P54_CONTROL_TYPE_BBP: + break; + default: + printk(KERN_DEBUG "%s: not handling 0x%02x type control frame\n", + wiphy_name(dev->wiphy), le16_to_cpu(hdr->type)); + break; + } +} + +/* returns zero if skb can be reused */ +int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb) +{ + u8 type = le16_to_cpu(*((__le16 *)skb->data)) >> 8; + switch (type) { + case 0x00: + case 0x01: + p54_rx_data(dev, skb); + return -1; + case 0x4d: + /* TODO: do something better... but then again, I've never seen this happen */ + printk(KERN_ERR "%s: Received fault. Probably need to restart hardware now..\n", + wiphy_name(dev->wiphy)); + break; + case 0x80: + p54_rx_control(dev, skb); + break; + default: + printk(KERN_ERR "%s: unknown frame RXed (0x%02x)\n", + wiphy_name(dev->wiphy), type); + break; + } + return 0; +} +EXPORT_SYMBOL_GPL(p54_rx); + +/* + * So, the firmware is somewhat stupid and doesn't know what places in its + * memory incoming data should go to. By poking around in the firmware, we + * can find some unused memory to upload our packets to. However, data that we + * want the card to TX needs to stay intact until the card has told us that + * it is done with it. This function finds empty places we can upload to and + * marks allocated areas as reserved if necessary. p54_rx_frame_sent frees + * allocated areas. + */ +static void p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb, + struct p54_control_hdr *data, u32 len, + struct ieee80211_tx_control *control) +{ + struct p54_common *priv = dev->priv; + struct sk_buff *entry = priv->tx_queue.next; + struct sk_buff *target_skb = NULL; + struct memrecord *range; + u32 last_addr = priv->rx_start; + u32 largest_hole = 0; + u32 target_addr = priv->rx_start; + unsigned long flags; + unsigned int left; + len = (len + 0x170 + 3) & ~0x3; /* 0x70 headroom, 0x100 tailroom */ + + spin_lock_irqsave(&priv->tx_queue.lock, flags); + left = skb_queue_len(&priv->tx_queue); + while (left--) { + u32 hole_size; + range = (struct memrecord *)&entry->cb; + hole_size = range->start_addr - last_addr; + if (!target_skb && hole_size >= len) { + target_skb = entry->prev; + hole_size -= len; + target_addr = last_addr; + } + largest_hole = max(largest_hole, hole_size); + last_addr = range->end_addr; + entry = entry->next; + } + if (!target_skb && priv->rx_end - last_addr >= len) { + target_skb = priv->tx_queue.prev; + largest_hole = max(largest_hole, priv->rx_end - last_addr - len); + if (!skb_queue_empty(&priv->tx_queue)) { + range = (struct memrecord *)&target_skb->cb; + target_addr = range->end_addr; + } + } else + largest_hole = max(largest_hole, priv->rx_end - last_addr); + + if (skb) { + range = (struct memrecord *)&skb->cb; + range->start_addr = target_addr; + range->end_addr = target_addr + len; + range->control = control; + __skb_queue_after(&priv->tx_queue, target_skb, skb); + if (largest_hole < IEEE80211_MAX_RTS_THRESHOLD + 0x170 + + sizeof(struct p54_control_hdr)) + ieee80211_stop_queues(dev); + } + spin_unlock_irqrestore(&priv->tx_queue.lock, flags); + + data->req_id = cpu_to_le32(target_addr + 0x70); +} + +static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb, + struct ieee80211_tx_control *control) +{ + struct ieee80211_tx_queue_stats_data *current_queue; + struct p54_common *priv = dev->priv; + struct p54_control_hdr *hdr; + struct p54_tx_control_allocdata *txhdr; + struct ieee80211_tx_control *control_copy; + size_t padding, len; + u8 rate; + + current_queue = &priv->tx_stats.data[control->queue]; + if (unlikely(current_queue->len > current_queue->limit)) + return NETDEV_TX_BUSY; + current_queue->len++; + current_queue->count++; + if (current_queue->len == current_queue->limit) + ieee80211_stop_queue(dev, control->queue); + + padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3; + len = skb->len; + + control_copy = kmalloc(sizeof(*control), GFP_ATOMIC); + if (control_copy) + memcpy(control_copy, control, sizeof(*control)); + + txhdr = (struct p54_tx_control_allocdata *) + skb_push(skb, sizeof(*txhdr) + padding); + hdr = (struct p54_control_hdr *) skb_push(skb, sizeof(*hdr)); + + if (padding) + hdr->magic1 = cpu_to_le16(0x4010); + else + hdr->magic1 = cpu_to_le16(0x0010); + hdr->len = cpu_to_le16(len); + hdr->type = (control->flags & IEEE80211_TXCTL_NO_ACK) ? 0 : cpu_to_le16(1); + hdr->retry1 = hdr->retry2 = control->retry_limit; + p54_assign_address(dev, skb, hdr, skb->len, control_copy); + + memset(txhdr->wep_key, 0x0, 16); + txhdr->padding = 0; + txhdr->padding2 = 0; + + /* TODO: add support for alternate retry TX rates */ + rate = control->tx_rate; + if (control->flags & IEEE80211_TXCTL_USE_RTS_CTS) + rate |= 0x40; + else if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) + rate |= 0x20; + memset(txhdr->rateset, rate, 8); + txhdr->wep_key_present = 0; + txhdr->wep_key_len = 0; + txhdr->frame_type = cpu_to_le32(control->queue + 4); + txhdr->magic4 = 0; + txhdr->antenna = (control->antenna_sel_tx == 0) ? + 2 : control->antenna_sel_tx - 1; + txhdr->output_power = 0x7f; // HW Maximum + txhdr->magic5 = (control->flags & IEEE80211_TXCTL_NO_ACK) ? + 0 : ((rate > 0x3) ? cpu_to_le32(0x33) : cpu_to_le32(0x23)); + if (padding) + txhdr->align[0] = padding; + + priv->tx(dev, hdr, skb->len, 0); + return 0; +} + +static int p54_set_filter(struct ieee80211_hw *dev, u16 filter_type, + const u8 *dst, const u8 *src, u8 antenna, + u32 magic3, u32 magic8, u32 magic9) +{ + struct p54_common *priv = dev->priv; + struct p54_control_hdr *hdr; + struct p54_tx_control_filter *filter; + + hdr = kzalloc(sizeof(*hdr) + sizeof(*filter) + + priv->tx_hdr_len, GFP_KERNEL); + if (!hdr) + return -ENOMEM; + + hdr = (void *)hdr + priv->tx_hdr_len; + + filter = (struct p54_tx_control_filter *) hdr->data; + hdr->magic1 = cpu_to_le16(0x8001); + hdr->len = cpu_to_le16(sizeof(*filter)); + p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*filter), NULL); + hdr->type = cpu_to_le16(P54_CONTROL_TYPE_FILTER_SET); + + filter->filter_type = cpu_to_le16(filter_type); + memcpy(filter->dst, dst, ETH_ALEN); + if (!src) + memset(filter->src, ~0, ETH_ALEN); + else + memcpy(filter->src, src, ETH_ALEN); + filter->antenna = antenna; + filter->magic3 = cpu_to_le32(magic3); + filter->rx_addr = cpu_to_le32(priv->rx_end); + filter->max_rx = cpu_to_le16(0x0620); /* FIXME: for usb ver 1.. maybe */ + filter->rxhw = priv->rxhw; + filter->magic8 = cpu_to_le16(magic8); + filter->magic9 = cpu_to_le16(magic9); + + priv->tx(dev, hdr, sizeof(*hdr) + sizeof(*filter), 1); + return 0; +} + +static int p54_set_freq(struct ieee80211_hw *dev, __le16 freq) +{ + struct p54_common *priv = dev->priv; + struct p54_control_hdr *hdr; + struct p54_tx_control_channel *chan; + unsigned int i; + size_t payload_len = sizeof(*chan) + sizeof(u32)*2 + + sizeof(*chan->curve_data) * + priv->curve_data->points_per_channel; + void *entry; + + hdr = kzalloc(sizeof(*hdr) + payload_len + + priv->tx_hdr_len, GFP_KERNEL); + if (!hdr) + return -ENOMEM; + + hdr = (void *)hdr + priv->tx_hdr_len; + + chan = (struct p54_tx_control_channel *) hdr->data; + + hdr->magic1 = cpu_to_le16(0x8001); + hdr->len = cpu_to_le16(sizeof(*chan)); + hdr->type = cpu_to_le16(P54_CONTROL_TYPE_CHANNEL_CHANGE); + p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + payload_len, NULL); + + chan->magic1 = cpu_to_le16(0x1); + chan->magic2 = cpu_to_le16(0x0); + + for (i = 0; i < priv->iq_autocal_len; i++) { + if (priv->iq_autocal[i].freq != freq) + continue; + + memcpy(&chan->iq_autocal, &priv->iq_autocal[i], + sizeof(*priv->iq_autocal)); + break; + } + if (i == priv->iq_autocal_len) + goto err; + + for (i = 0; i < priv->output_limit_len; i++) { + if (priv->output_limit[i].freq != freq) + continue; + + chan->val_barker = 0x38; + chan->val_bpsk = priv->output_limit[i].val_bpsk; + chan->val_qpsk = priv->output_limit[i].val_qpsk; + chan->val_16qam = priv->output_limit[i].val_16qam; + chan->val_64qam = priv->output_limit[i].val_64qam; + break; + } + if (i == priv->output_limit_len) + goto err; + + chan->pa_points_per_curve = priv->curve_data->points_per_channel; + + entry = priv->curve_data->data; + for (i = 0; i < priv->curve_data->channels; i++) { + if (*((__le16 *)entry) != freq) { + entry += sizeof(__le16); + entry += sizeof(struct pda_pa_curve_data_sample_rev1) * + chan->pa_points_per_curve; + continue; + } + + entry += sizeof(__le16); + memcpy(chan->curve_data, entry, sizeof(*chan->curve_data) * + chan->pa_points_per_curve); + break; + } + + memcpy(hdr->data + payload_len - 4, &chan->val_bpsk, 4); + + priv->tx(dev, hdr, sizeof(*hdr) + payload_len, 1); + return 0; + + err: + printk(KERN_ERR "%s: frequency change failed\n", wiphy_name(dev->wiphy)); + kfree(hdr); + return -EINVAL; +} + +static int p54_set_leds(struct ieee80211_hw *dev, int mode, int link, int act) +{ + struct p54_common *priv = dev->priv; + struct p54_control_hdr *hdr; + struct p54_tx_control_led *led; + + hdr = kzalloc(sizeof(*hdr) + sizeof(*led) + + priv->tx_hdr_len, GFP_KERNEL); + if (!hdr) + return -ENOMEM; + + hdr = (void *)hdr + priv->tx_hdr_len; + hdr->magic1 = cpu_to_le16(0x8001); + hdr->len = cpu_to_le16(sizeof(*led)); + hdr->type = cpu_to_le16(P54_CONTROL_TYPE_LED); + p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*led), NULL); + + led = (struct p54_tx_control_led *) hdr->data; + led->mode = cpu_to_le16(mode); + led->led_permanent = cpu_to_le16(link); + led->led_temporary = cpu_to_le16(act); + led->duration = cpu_to_le16(1000); + + priv->tx(dev, hdr, sizeof(*hdr) + sizeof(*led), 1); + + return 0; +} + +#define P54_SET_QUEUE(queue, ai_fs, cw_min, cw_max, burst) \ +do { \ + queue.aifs = cpu_to_le16(ai_fs); \ + queue.cwmin = cpu_to_le16(cw_min); \ + queue.cwmax = cpu_to_le16(cw_max); \ + queue.txop = (burst == 0) ? \ + 0 : cpu_to_le16((burst * 100) / 32 + 1); \ +} while(0) + +static void p54_init_vdcf(struct ieee80211_hw *dev) +{ + struct p54_common *priv = dev->priv; + struct p54_control_hdr *hdr; + struct p54_tx_control_vdcf *vdcf; + + /* all USB V1 adapters need a extra headroom */ + hdr = (void *)priv->cached_vdcf + priv->tx_hdr_len; + hdr->magic1 = cpu_to_le16(0x8001); + hdr->len = cpu_to_le16(sizeof(*vdcf)); + hdr->type = cpu_to_le16(P54_CONTROL_TYPE_DCFINIT); + hdr->req_id = cpu_to_le32(priv->rx_start); + + vdcf = (struct p54_tx_control_vdcf *) hdr->data; + + P54_SET_QUEUE(vdcf->queue[0], 0x0002, 0x0003, 0x0007, 0x000f); + P54_SET_QUEUE(vdcf->queue[1], 0x0002, 0x0007, 0x000f, 0x001e); + P54_SET_QUEUE(vdcf->queue[2], 0x0002, 0x000f, 0x03ff, 0x0014); + P54_SET_QUEUE(vdcf->queue[3], 0x0007, 0x000f, 0x03ff, 0x0000); +} + +static void p54_set_vdcf(struct ieee80211_hw *dev) +{ + struct p54_common *priv = dev->priv; + struct p54_control_hdr *hdr; + struct p54_tx_control_vdcf *vdcf; + + hdr = (void *)priv->cached_vdcf + priv->tx_hdr_len; + + p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*vdcf), NULL); + + vdcf = (struct p54_tx_control_vdcf *) hdr->data; + + if (dev->conf.flags & IEEE80211_CONF_SHORT_SLOT_TIME) { + vdcf->slottime = 9; + vdcf->magic1 = 0x00; + vdcf->magic2 = 0x10; + } else { + vdcf->slottime = 20; + vdcf->magic1 = 0x0a; + vdcf->magic2 = 0x06; + } + + /* (see prism54/isl_oid.h for further details) */ + vdcf->frameburst = cpu_to_le16(0); + + priv->tx(dev, hdr, sizeof(*hdr) + sizeof(*vdcf), 0); +} + +static int p54_start(struct ieee80211_hw *dev) +{ + struct p54_common *priv = dev->priv; + int err; + + err = priv->open(dev); + if (!err) + priv->mode = IEEE80211_IF_TYPE_MNTR; + + return err; +} + +static void p54_stop(struct ieee80211_hw *dev) +{ + struct p54_common *priv = dev->priv; + struct sk_buff *skb; + while ((skb = skb_dequeue(&priv->tx_queue))) { + struct memrecord *range = (struct memrecord *)&skb->cb; + if (range->control) + kfree(range->control); + kfree_skb(skb); + } + priv->stop(dev); + priv->mode = IEEE80211_IF_TYPE_INVALID; +} + +static int p54_add_interface(struct ieee80211_hw *dev, + struct ieee80211_if_init_conf *conf) +{ + struct p54_common *priv = dev->priv; + + if (priv->mode != IEEE80211_IF_TYPE_MNTR) + return -EOPNOTSUPP; + + switch (conf->type) { + case IEEE80211_IF_TYPE_STA: + priv->mode = conf->type; + break; + default: + return -EOPNOTSUPP; + } + + memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN); + + p54_set_filter(dev, 0, priv->mac_addr, NULL, 0, 1, 0, 0xF642); + p54_set_filter(dev, 0, priv->mac_addr, NULL, 1, 0, 0, 0xF642); + + switch (conf->type) { + case IEEE80211_IF_TYPE_STA: + p54_set_filter(dev, 1, priv->mac_addr, NULL, 0, 0x15F, 0x1F4, 0); + break; + default: + BUG(); /* impossible */ + break; + } + + p54_set_leds(dev, 1, 0, 0); + + return 0; +} + +static void p54_remove_interface(struct ieee80211_hw *dev, + struct ieee80211_if_init_conf *conf) +{ + struct p54_common *priv = dev->priv; + priv->mode = IEEE80211_IF_TYPE_MNTR; + memset(priv->mac_addr, 0, ETH_ALEN); + p54_set_filter(dev, 0, priv->mac_addr, NULL, 2, 0, 0, 0); +} + +static int p54_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf) +{ + int ret; + + ret = p54_set_freq(dev, cpu_to_le16(conf->freq)); + p54_set_vdcf(dev); + return ret; +} + +static int p54_config_interface(struct ieee80211_hw *dev, int if_id, + struct ieee80211_if_conf *conf) +{ + struct p54_common *priv = dev->priv; + + p54_set_filter(dev, 0, priv->mac_addr, conf->bssid, 0, 1, 0, 0xF642); + p54_set_filter(dev, 0, priv->mac_addr, conf->bssid, 2, 0, 0, 0); + p54_set_leds(dev, 1, !is_multicast_ether_addr(conf->bssid), 0); + memcpy(priv->bssid, conf->bssid, ETH_ALEN); + return 0; +} + +static void p54_configure_filter(struct ieee80211_hw *dev, + unsigned int changed_flags, + unsigned int *total_flags, + int mc_count, struct dev_mc_list *mclist) +{ + struct p54_common *priv = dev->priv; + + *total_flags &= FIF_BCN_PRBRESP_PROMISC; + + if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { + if (*total_flags & FIF_BCN_PRBRESP_PROMISC) + p54_set_filter(dev, 0, priv->mac_addr, + NULL, 2, 0, 0, 0); + else + p54_set_filter(dev, 0, priv->mac_addr, + priv->bssid, 2, 0, 0, 0); + } +} + +static int p54_conf_tx(struct ieee80211_hw *dev, int queue, + const struct ieee80211_tx_queue_params *params) +{ + struct p54_common *priv = dev->priv; + struct p54_tx_control_vdcf *vdcf; + + vdcf = (struct p54_tx_control_vdcf *)(((struct p54_control_hdr *) + ((void *)priv->cached_vdcf + priv->tx_hdr_len))->data); + + if ((params) && !((queue < 0) || (queue > 4))) { + P54_SET_QUEUE(vdcf->queue[queue], params->aifs, + params->cw_min, params->cw_max, params->burst_time); + } else + return -EINVAL; + + p54_set_vdcf(dev); + + return 0; +} + +static int p54_get_stats(struct ieee80211_hw *dev, + struct ieee80211_low_level_stats *stats) +{ + /* TODO */ + return 0; +} + +static int p54_get_tx_stats(struct ieee80211_hw *dev, + struct ieee80211_tx_queue_stats *stats) +{ + struct p54_common *priv = dev->priv; + unsigned int i; + + for (i = 0; i < dev->queues; i++) + memcpy(&stats->data[i], &priv->tx_stats.data[i], + sizeof(stats->data[i])); + + return 0; +} + +static const struct ieee80211_ops p54_ops = { + .tx = p54_tx, + .start = p54_start, + .stop = p54_stop, + .add_interface = p54_add_interface, + .remove_interface = p54_remove_interface, + .config = p54_config, + .config_interface = p54_config_interface, + .configure_filter = p54_configure_filter, + .conf_tx = p54_conf_tx, + .get_stats = p54_get_stats, + .get_tx_stats = p54_get_tx_stats +}; + +struct ieee80211_hw *p54_init_common(size_t priv_data_len) +{ + struct ieee80211_hw *dev; + struct p54_common *priv; + int i; + + dev = ieee80211_alloc_hw(priv_data_len, &p54_ops); + if (!dev) + return NULL; + + priv = dev->priv; + priv->mode = IEEE80211_IF_TYPE_INVALID; + skb_queue_head_init(&priv->tx_queue); + memcpy(priv->channels, p54_channels, sizeof(p54_channels)); + memcpy(priv->rates, p54_rates, sizeof(p54_rates)); + priv->modes[1].mode = MODE_IEEE80211B; + priv->modes[1].num_rates = 4; + priv->modes[1].rates = priv->rates; + priv->modes[1].num_channels = ARRAY_SIZE(p54_channels); + priv->modes[1].channels = priv->channels; + priv->modes[0].mode = MODE_IEEE80211G; + priv->modes[0].num_rates = ARRAY_SIZE(p54_rates); + priv->modes[0].rates = priv->rates; + priv->modes[0].num_channels = ARRAY_SIZE(p54_channels); + priv->modes[0].channels = priv->channels; + dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | /* not sure */ + IEEE80211_HW_RX_INCLUDES_FCS; + dev->channel_change_time = 1000; /* TODO: find actual value */ + dev->max_rssi = 127; + + priv->tx_stats.data[0].limit = 5; + dev->queues = 1; + + dev->extra_tx_headroom = sizeof(struct p54_control_hdr) + 4 + + sizeof(struct p54_tx_control_allocdata); + + priv->cached_vdcf = kzalloc(sizeof(struct p54_tx_control_vdcf) + + priv->tx_hdr_len + sizeof(struct p54_control_hdr), GFP_KERNEL); + + if (!priv->cached_vdcf) { + ieee80211_free_hw(dev); + return NULL; + } + + p54_init_vdcf(dev); + + for (i = 0; i < 2; i++) { + if (ieee80211_register_hwmode(dev, &priv->modes[i])) { + kfree(priv->cached_vdcf); + ieee80211_free_hw(dev); + return NULL; + } + } + + return dev; +} +EXPORT_SYMBOL_GPL(p54_init_common); + +void p54_free_common(struct ieee80211_hw *dev) +{ + struct p54_common *priv = dev->priv; + kfree(priv->iq_autocal); + kfree(priv->output_limit); + kfree(priv->curve_data); + kfree(priv->cached_vdcf); +} +EXPORT_SYMBOL_GPL(p54_free_common); + +static int __init p54_init(void) +{ + return 0; +} + +static void __exit p54_exit(void) +{ +} + +module_init(p54_init); +module_exit(p54_exit); diff --git a/drivers/net/wireless/p54common.h b/drivers/net/wireless/p54common.h new file mode 100644 index 000000000000..a721334e20d9 --- /dev/null +++ b/drivers/net/wireless/p54common.h @@ -0,0 +1,329 @@ +#ifndef PRISM54COMMON_H +#define PRISM54COMMON_H + +/* + * Common code specific definitions for mac80211 Prism54 drivers + * + * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> + * Copyright (c) 2007, Christian Lamparter <chunkeey@web.de> + * + * Based on the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +struct bootrec { + __le32 code; + __le32 len; + u32 data[0]; +} __attribute__((packed)); + +struct bootrec_exp_if { + __le16 role; + __le16 if_id; + __le16 variant; + __le16 btm_compat; + __le16 top_compat; +} __attribute__((packed)); + +#define BR_CODE_MIN 0x80000000 +#define BR_CODE_COMPONENT_ID 0x80000001 +#define BR_CODE_COMPONENT_VERSION 0x80000002 +#define BR_CODE_DEPENDENT_IF 0x80000003 +#define BR_CODE_EXPOSED_IF 0x80000004 +#define BR_CODE_DESCR 0x80000101 +#define BR_CODE_MAX 0x8FFFFFFF +#define BR_CODE_END_OF_BRA 0xFF0000FF +#define LEGACY_BR_CODE_END_OF_BRA 0xFFFFFFFF + +#define FW_FMAC 0x464d4143 +#define FW_LM86 0x4c4d3836 +#define FW_LM87 0x4c4d3837 +#define FW_LM20 0x4c4d3230 + +/* PDA defines are Copyright (C) 2005 Nokia Corporation (taken from islsm_pda.h) */ + +struct pda_entry { + __le16 len; /* includes both code and data */ + __le16 code; + u8 data[0]; +} __attribute__ ((packed)); + +struct eeprom_pda_wrap { + u32 magic; + u16 pad; + u16 len; + u32 arm_opcode; + u8 data[0]; +} __attribute__ ((packed)); + +struct pda_iq_autocal_entry { + __le16 freq; + __le16 iq_param[4]; +} __attribute__ ((packed)); + +struct pda_channel_output_limit { + __le16 freq; + u8 val_bpsk; + u8 val_qpsk; + u8 val_16qam; + u8 val_64qam; + u8 rate_set_mask; + u8 rate_set_size; +} __attribute__ ((packed)); + +struct pda_pa_curve_data_sample_rev0 { + u8 rf_power; + u8 pa_detector; + u8 pcv; +} __attribute__ ((packed)); + +struct pda_pa_curve_data_sample_rev1 { + u8 rf_power; + u8 pa_detector; + u8 data_barker; + u8 data_bpsk; + u8 data_qpsk; + u8 data_16qam; + u8 data_64qam; + u8 padding; +} __attribute__ ((packed)); + +struct pda_pa_curve_data { + u8 cal_method_rev; + u8 channels; + u8 points_per_channel; + u8 padding; + u8 data[0]; +} __attribute__ ((packed)); + +/* + * this defines the PDR codes used to build PDAs as defined in document + * number 553155. The current implementation mirrors version 1.1 of the + * document and lists only PDRs supported by the ARM platform. + */ + +/* common and choice range (0x0000 - 0x0fff) */ +#define PDR_END 0x0000 +#define PDR_MANUFACTURING_PART_NUMBER 0x0001 +#define PDR_PDA_VERSION 0x0002 +#define PDR_NIC_SERIAL_NUMBER 0x0003 + +#define PDR_MAC_ADDRESS 0x0101 +#define PDR_REGULATORY_DOMAIN_LIST 0x0103 +#define PDR_TEMPERATURE_TYPE 0x0107 + +#define PDR_PRISM_PCI_IDENTIFIER 0x0402 + +/* ARM range (0x1000 - 0x1fff) */ +#define PDR_COUNTRY_INFORMATION 0x1000 +#define PDR_INTERFACE_LIST 0x1001 +#define PDR_HARDWARE_PLATFORM_COMPONENT_ID 0x1002 +#define PDR_OEM_NAME 0x1003 +#define PDR_PRODUCT_NAME 0x1004 +#define PDR_UTF8_OEM_NAME 0x1005 +#define PDR_UTF8_PRODUCT_NAME 0x1006 +#define PDR_COUNTRY_LIST 0x1007 +#define PDR_DEFAULT_COUNTRY 0x1008 + +#define PDR_ANTENNA_GAIN 0x1100 + +#define PDR_PRISM_INDIGO_PA_CALIBRATION_DATA 0x1901 +#define PDR_RSSI_LINEAR_APPROXIMATION 0x1902 +#define PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS 0x1903 +#define PDR_PRISM_PA_CAL_CURVE_DATA 0x1904 +#define PDR_RSSI_LINEAR_APPROXIMATION_DUAL_BAND 0x1905 +#define PDR_PRISM_ZIF_TX_IQ_CALIBRATION 0x1906 +#define PDR_REGULATORY_POWER_LIMITS 0x1907 +#define PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED 0x1908 +#define PDR_RADIATED_TRANSMISSION_CORRECTION 0x1909 +#define PDR_PRISM_TX_IQ_CALIBRATION 0x190a + +/* reserved range (0x2000 - 0x7fff) */ + +/* customer range (0x8000 - 0xffff) */ +#define PDR_BASEBAND_REGISTERS 0x8000 +#define PDR_PER_CHANNEL_BASEBAND_REGISTERS 0x8001 + +/* stored in skb->cb */ +struct memrecord { + u32 start_addr; + u32 end_addr; + struct ieee80211_tx_control *control; +}; + +struct p54_eeprom_lm86 { + __le16 offset; + __le16 len; + u8 data[0]; +} __attribute__ ((packed)); + +struct p54_rx_hdr { + __le16 magic; + __le16 len; + __le16 freq; + u8 antenna; + u8 rate; + u8 rssi; + u8 quality; + u16 unknown2; + __le64 timestamp; + u8 data[0]; +} __attribute__ ((packed)); + +struct p54_frame_sent_hdr { + u8 status; + u8 retries; + __le16 ack_rssi; + __le16 seq; + u16 rate; +} __attribute__ ((packed)); + +struct p54_tx_control_allocdata { + u8 rateset[8]; + u16 padding; + u8 wep_key_present; + u8 wep_key_len; + u8 wep_key[16]; + __le32 frame_type; + u32 padding2; + __le16 magic4; + u8 antenna; + u8 output_power; + __le32 magic5; + u8 align[0]; +} __attribute__ ((packed)); + +struct p54_tx_control_filter { + __le16 filter_type; + u8 dst[ETH_ALEN]; + u8 src[ETH_ALEN]; + u8 antenna; + u8 debug; + __le32 magic3; + u8 rates[8]; // FIXME: what's this for? + __le32 rx_addr; + __le16 max_rx; + __le16 rxhw; + __le16 magic8; + __le16 magic9; +} __attribute__ ((packed)); + +struct p54_tx_control_channel { + __le16 magic1; + __le16 magic2; + u8 padding1[20]; + struct pda_iq_autocal_entry iq_autocal; + u8 pa_points_per_curve; + u8 val_barker; + u8 val_bpsk; + u8 val_qpsk; + u8 val_16qam; + u8 val_64qam; + struct pda_pa_curve_data_sample_rev1 curve_data[0]; + /* additional padding/data after curve_data */ +} __attribute__ ((packed)); + +struct p54_tx_control_led { + __le16 mode; + __le16 led_temporary; + __le16 led_permanent; + __le16 duration; +} __attribute__ ((packed)); + +struct p54_tx_vdcf_queues { + __le16 aifs; + __le16 cwmin; + __le16 cwmax; + __le16 txop; +} __attribute__ ((packed)); + +struct p54_tx_control_vdcf { + u8 padding; + u8 slottime; + u8 magic1; + u8 magic2; + struct p54_tx_vdcf_queues queue[8]; + u8 pad2[4]; + __le16 frameburst; +} __attribute__ ((packed)); + +static const struct ieee80211_rate p54_rates[] = { + { .rate = 10, + .val = 0, + .val2 = 0x10, + .flags = IEEE80211_RATE_CCK_2 }, + { .rate = 20, + .val = 1, + .val2 = 0x11, + .flags = IEEE80211_RATE_CCK_2 }, + { .rate = 55, + .val = 2, + .val2 = 0x12, + .flags = IEEE80211_RATE_CCK_2 }, + { .rate = 110, + .val = 3, + .val2 = 0x13, + .flags = IEEE80211_RATE_CCK_2 }, + { .rate = 60, + .val = 4, + .flags = IEEE80211_RATE_OFDM }, + { .rate = 90, + .val = 5, + .flags = IEEE80211_RATE_OFDM }, + { .rate = 120, + .val = 6, + .flags = IEEE80211_RATE_OFDM }, + { .rate = 180, + .val = 7, + .flags = IEEE80211_RATE_OFDM }, + { .rate = 240, + .val = 8, + .flags = IEEE80211_RATE_OFDM }, + { .rate = 360, + .val = 9, + .flags = IEEE80211_RATE_OFDM }, + { .rate = 480, + .val = 10, + .flags = IEEE80211_RATE_OFDM }, + { .rate = 540, + .val = 11, + .flags = IEEE80211_RATE_OFDM }, +}; + +// TODO: just generate this.. +static const struct ieee80211_channel p54_channels[] = { + { .chan = 1, + .freq = 2412}, + { .chan = 2, + .freq = 2417}, + { .chan = 3, + .freq = 2422}, + { .chan = 4, + .freq = 2427}, + { .chan = 5, + .freq = 2432}, + { .chan = 6, + .freq = 2437}, + { .chan = 7, + .freq = 2442}, + { .chan = 8, + .freq = 2447}, + { .chan = 9, + .freq = 2452}, + { .chan = 10, + .freq = 2457}, + { .chan = 11, + .freq = 2462}, + { .chan = 12, + .freq = 2467}, + { .chan = 13, + .freq = 2472}, + { .chan = 14, + .freq = 2484} +}; + +#endif /* PRISM54COMMON_H */ diff --git a/drivers/net/wireless/p54pci.c b/drivers/net/wireless/p54pci.c new file mode 100644 index 000000000000..410b54387f23 --- /dev/null +++ b/drivers/net/wireless/p54pci.c @@ -0,0 +1,692 @@ + +/* + * Linux device driver for PCI based Prism54 + * + * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> + * + * Based on the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note <jean-baptiste.note@m4x.org>, et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/firmware.h> +#include <linux/etherdevice.h> +#include <linux/delay.h> +#include <linux/completion.h> +#include <net/mac80211.h> + +#include "p54.h" +#include "p54pci.h" + +MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>"); +MODULE_DESCRIPTION("Prism54 PCI wireless driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("prism54pci"); + +static struct pci_device_id p54p_table[] __devinitdata = { + /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */ + { PCI_DEVICE(0x1260, 0x3890) }, + /* 3COM 3CRWE154G72 Wireless LAN adapter */ + { PCI_DEVICE(0x10b7, 0x6001) }, + /* Intersil PRISM Indigo Wireless LAN adapter */ + { PCI_DEVICE(0x1260, 0x3877) }, + /* Intersil PRISM Javelin/Xbow Wireless LAN adapter */ + { PCI_DEVICE(0x1260, 0x3886) }, + { }, +}; + +MODULE_DEVICE_TABLE(pci, p54p_table); + +static int p54p_upload_firmware(struct ieee80211_hw *dev) +{ + struct p54p_priv *priv = dev->priv; + const struct firmware *fw_entry = NULL; + __le32 reg; + int err; + u32 *data; + u32 remains, left, device_addr; + + P54P_WRITE(int_enable, 0); + P54P_READ(int_enable); + udelay(10); + + reg = P54P_READ(ctrl_stat); + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RAMBOOT); + P54P_WRITE(ctrl_stat, reg); + P54P_READ(ctrl_stat); + udelay(10); + + reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET); + P54P_WRITE(ctrl_stat, reg); + wmb(); + udelay(10); + + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); + P54P_WRITE(ctrl_stat, reg); + wmb(); + + mdelay(50); + + err = request_firmware(&fw_entry, "isl3886", &priv->pdev->dev); + if (err) { + printk(KERN_ERR "%s (prism54pci): cannot find firmware " + "(isl3886)\n", pci_name(priv->pdev)); + return err; + } + + p54_parse_firmware(dev, fw_entry); + + data = (u32 *) fw_entry->data; + remains = fw_entry->size; + device_addr = ISL38XX_DEV_FIRMWARE_ADDR; + while (remains) { + u32 i = 0; + left = min((u32)0x1000, remains); + P54P_WRITE(direct_mem_base, cpu_to_le32(device_addr)); + P54P_READ(int_enable); + + device_addr += 0x1000; + while (i < left) { + P54P_WRITE(direct_mem_win[i], *data++); + i += sizeof(u32); + } + + remains -= left; + P54P_READ(int_enable); + } + + release_firmware(fw_entry); + + reg = P54P_READ(ctrl_stat); + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_CLKRUN); + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); + reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RAMBOOT); + P54P_WRITE(ctrl_stat, reg); + P54P_READ(ctrl_stat); + udelay(10); + + reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET); + P54P_WRITE(ctrl_stat, reg); + wmb(); + udelay(10); + + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); + P54P_WRITE(ctrl_stat, reg); + wmb(); + udelay(10); + + return 0; +} + +static irqreturn_t p54p_simple_interrupt(int irq, void *dev_id) +{ + struct p54p_priv *priv = (struct p54p_priv *) dev_id; + __le32 reg; + + reg = P54P_READ(int_ident); + P54P_WRITE(int_ack, reg); + + if (reg & P54P_READ(int_enable)) + complete(&priv->boot_comp); + + return IRQ_HANDLED; +} + +static int p54p_read_eeprom(struct ieee80211_hw *dev) +{ + struct p54p_priv *priv = dev->priv; + int err; + struct p54_control_hdr *hdr; + void *eeprom; + dma_addr_t rx_mapping, tx_mapping; + u16 alen; + + init_completion(&priv->boot_comp); + err = request_irq(priv->pdev->irq, &p54p_simple_interrupt, + IRQF_SHARED, "prism54pci", priv); + if (err) { + printk(KERN_ERR "%s (prism54pci): failed to register IRQ handler\n", + pci_name(priv->pdev)); + return err; + } + + eeprom = kmalloc(0x2010 + EEPROM_READBACK_LEN, GFP_KERNEL); + if (!eeprom) { + printk(KERN_ERR "%s (prism54pci): no memory for eeprom!\n", + pci_name(priv->pdev)); + err = -ENOMEM; + goto out; + } + + memset(priv->ring_control, 0, sizeof(*priv->ring_control)); + P54P_WRITE(ring_control_base, priv->ring_control_dma); + P54P_READ(ring_control_base); + udelay(10); + + P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_INIT)); + P54P_READ(int_enable); + udelay(10); + + P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET)); + + if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) { + printk(KERN_ERR "%s (prism54pci): Cannot boot firmware!\n", + pci_name(priv->pdev)); + err = -EINVAL; + goto out; + } + + P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)); + P54P_READ(int_enable); + + hdr = eeprom + 0x2010; + p54_fill_eeprom_readback(hdr); + hdr->req_id = cpu_to_le32(priv->common.rx_start); + + rx_mapping = pci_map_single(priv->pdev, eeprom, + 0x2010, PCI_DMA_FROMDEVICE); + tx_mapping = pci_map_single(priv->pdev, (void *)hdr, + EEPROM_READBACK_LEN, PCI_DMA_TODEVICE); + + priv->ring_control->rx_mgmt[0].host_addr = cpu_to_le32(rx_mapping); + priv->ring_control->rx_mgmt[0].len = cpu_to_le16(0x2010); + priv->ring_control->tx_data[0].host_addr = cpu_to_le32(tx_mapping); + priv->ring_control->tx_data[0].device_addr = hdr->req_id; + priv->ring_control->tx_data[0].len = cpu_to_le16(EEPROM_READBACK_LEN); + + priv->ring_control->host_idx[2] = cpu_to_le32(1); + priv->ring_control->host_idx[1] = cpu_to_le32(1); + + wmb(); + mdelay(100); + P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE)); + + wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ); + wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ); + + pci_unmap_single(priv->pdev, tx_mapping, + EEPROM_READBACK_LEN, PCI_DMA_TODEVICE); + pci_unmap_single(priv->pdev, rx_mapping, + 0x2010, PCI_DMA_FROMDEVICE); + + alen = le16_to_cpu(priv->ring_control->rx_mgmt[0].len); + if (le32_to_cpu(priv->ring_control->device_idx[2]) != 1 || + alen < 0x10) { + printk(KERN_ERR "%s (prism54pci): Cannot read eeprom!\n", + pci_name(priv->pdev)); + err = -EINVAL; + goto out; + } + + p54_parse_eeprom(dev, (u8 *)eeprom + 0x10, alen - 0x10); + + out: + kfree(eeprom); + P54P_WRITE(int_enable, 0); + P54P_READ(int_enable); + udelay(10); + free_irq(priv->pdev->irq, priv); + P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET)); + return err; +} + +static void p54p_refill_rx_ring(struct ieee80211_hw *dev) +{ + struct p54p_priv *priv = dev->priv; + u32 limit, host_idx, idx; + + host_idx = le32_to_cpu(priv->ring_control->host_idx[0]); + limit = host_idx; + limit -= le32_to_cpu(priv->ring_control->device_idx[0]); + limit = ARRAY_SIZE(priv->ring_control->rx_data) - limit; + + idx = host_idx % ARRAY_SIZE(priv->ring_control->rx_data); + while (limit-- > 1) { + struct p54p_desc *desc = &priv->ring_control->rx_data[idx]; + + if (!desc->host_addr) { + struct sk_buff *skb; + dma_addr_t mapping; + skb = dev_alloc_skb(MAX_RX_SIZE); + if (!skb) + break; + + mapping = pci_map_single(priv->pdev, + skb_tail_pointer(skb), + MAX_RX_SIZE, + PCI_DMA_FROMDEVICE); + desc->host_addr = cpu_to_le32(mapping); + desc->device_addr = 0; // FIXME: necessary? + desc->len = cpu_to_le16(MAX_RX_SIZE); + desc->flags = 0; + priv->rx_buf[idx] = skb; + } + + idx++; + host_idx++; + idx %= ARRAY_SIZE(priv->ring_control->rx_data); + } + + wmb(); + priv->ring_control->host_idx[0] = cpu_to_le32(host_idx); +} + +static irqreturn_t p54p_interrupt(int irq, void *dev_id) +{ + struct ieee80211_hw *dev = dev_id; + struct p54p_priv *priv = dev->priv; + __le32 reg; + + spin_lock(&priv->lock); + reg = P54P_READ(int_ident); + if (unlikely(reg == 0xFFFFFFFF)) { + spin_unlock(&priv->lock); + return IRQ_HANDLED; + } + + P54P_WRITE(int_ack, reg); + + reg &= P54P_READ(int_enable); + + if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)) { + struct p54p_desc *desc; + u32 idx, i; + i = priv->tx_idx; + i %= ARRAY_SIZE(priv->ring_control->tx_data); + priv->tx_idx = idx = le32_to_cpu(priv->ring_control->device_idx[1]); + idx %= ARRAY_SIZE(priv->ring_control->tx_data); + + while (i != idx) { + desc = &priv->ring_control->tx_data[i]; + if (priv->tx_buf[i]) { + kfree(priv->tx_buf[i]); + priv->tx_buf[i] = NULL; + } + + pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr), + le16_to_cpu(desc->len), PCI_DMA_TODEVICE); + + desc->host_addr = 0; + desc->device_addr = 0; + desc->len = 0; + desc->flags = 0; + + i++; + i %= ARRAY_SIZE(priv->ring_control->tx_data); + } + + i = priv->rx_idx; + i %= ARRAY_SIZE(priv->ring_control->rx_data); + priv->rx_idx = idx = le32_to_cpu(priv->ring_control->device_idx[0]); + idx %= ARRAY_SIZE(priv->ring_control->rx_data); + while (i != idx) { + u16 len; + struct sk_buff *skb; + desc = &priv->ring_control->rx_data[i]; + len = le16_to_cpu(desc->len); + skb = priv->rx_buf[i]; + + skb_put(skb, len); + + if (p54_rx(dev, skb)) { + pci_unmap_single(priv->pdev, + le32_to_cpu(desc->host_addr), + MAX_RX_SIZE, PCI_DMA_FROMDEVICE); + + priv->rx_buf[i] = NULL; + desc->host_addr = 0; + } else { + skb_trim(skb, 0); + desc->len = cpu_to_le16(MAX_RX_SIZE); + } + + i++; + i %= ARRAY_SIZE(priv->ring_control->rx_data); + } + + p54p_refill_rx_ring(dev); + + wmb(); + P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE)); + } else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT)) + complete(&priv->boot_comp); + + spin_unlock(&priv->lock); + + return reg ? IRQ_HANDLED : IRQ_NONE; +} + +static void p54p_tx(struct ieee80211_hw *dev, struct p54_control_hdr *data, + size_t len, int free_on_tx) +{ + struct p54p_priv *priv = dev->priv; + unsigned long flags; + struct p54p_desc *desc; + dma_addr_t mapping; + u32 device_idx, idx, i; + + spin_lock_irqsave(&priv->lock, flags); + + device_idx = le32_to_cpu(priv->ring_control->device_idx[1]); + idx = le32_to_cpu(priv->ring_control->host_idx[1]); + i = idx % ARRAY_SIZE(priv->ring_control->tx_data); + + mapping = pci_map_single(priv->pdev, data, len, PCI_DMA_TODEVICE); + desc = &priv->ring_control->tx_data[i]; + desc->host_addr = cpu_to_le32(mapping); + desc->device_addr = data->req_id; + desc->len = cpu_to_le16(len); + desc->flags = 0; + + wmb(); + priv->ring_control->host_idx[1] = cpu_to_le32(idx + 1); + + if (free_on_tx) + priv->tx_buf[i] = data; + + spin_unlock_irqrestore(&priv->lock, flags); + + P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE)); + P54P_READ(dev_int); + + /* FIXME: unlikely to happen because the device usually runs out of + memory before we fill the ring up, but we can make it impossible */ + if (idx - device_idx > ARRAY_SIZE(priv->ring_control->tx_data) - 2) + printk(KERN_INFO "%s: tx overflow.\n", wiphy_name(dev->wiphy)); +} + +static int p54p_open(struct ieee80211_hw *dev) +{ + struct p54p_priv *priv = dev->priv; + int err; + + init_completion(&priv->boot_comp); + err = request_irq(priv->pdev->irq, &p54p_interrupt, + IRQF_SHARED, "prism54pci", dev); + if (err) { + printk(KERN_ERR "%s: failed to register IRQ handler\n", + wiphy_name(dev->wiphy)); + return err; + } + + memset(priv->ring_control, 0, sizeof(*priv->ring_control)); + priv->rx_idx = priv->tx_idx = 0; + p54p_refill_rx_ring(dev); + + p54p_upload_firmware(dev); + + P54P_WRITE(ring_control_base, priv->ring_control_dma); + P54P_READ(ring_control_base); + wmb(); + udelay(10); + + P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_INIT)); + P54P_READ(int_enable); + wmb(); + udelay(10); + + P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET)); + P54P_READ(dev_int); + + if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) { + printk(KERN_ERR "%s: Cannot boot firmware!\n", + wiphy_name(dev->wiphy)); + free_irq(priv->pdev->irq, dev); + return -ETIMEDOUT; + } + + P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)); + P54P_READ(int_enable); + wmb(); + udelay(10); + + P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE)); + P54P_READ(dev_int); + wmb(); + udelay(10); + + return 0; +} + +static void p54p_stop(struct ieee80211_hw *dev) +{ + struct p54p_priv *priv = dev->priv; + unsigned int i; + struct p54p_desc *desc; + + P54P_WRITE(int_enable, 0); + P54P_READ(int_enable); + udelay(10); + + free_irq(priv->pdev->irq, dev); + + P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET)); + + for (i = 0; i < ARRAY_SIZE(priv->rx_buf); i++) { + desc = &priv->ring_control->rx_data[i]; + if (desc->host_addr) + pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr), + MAX_RX_SIZE, PCI_DMA_FROMDEVICE); + kfree_skb(priv->rx_buf[i]); + priv->rx_buf[i] = NULL; + } + + for (i = 0; i < ARRAY_SIZE(priv->tx_buf); i++) { + desc = &priv->ring_control->tx_data[i]; + if (desc->host_addr) + pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr), + le16_to_cpu(desc->len), PCI_DMA_TODEVICE); + + kfree(priv->tx_buf[i]); + priv->tx_buf[i] = NULL; + } + + memset(priv->ring_control, 0, sizeof(*priv->ring_control)); +} + +static int __devinit p54p_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct p54p_priv *priv; + struct ieee80211_hw *dev; + unsigned long mem_addr, mem_len; + int err; + DECLARE_MAC_BUF(mac); + + err = pci_enable_device(pdev); + if (err) { + printk(KERN_ERR "%s (prism54pci): Cannot enable new PCI device\n", + pci_name(pdev)); + return err; + } + + mem_addr = pci_resource_start(pdev, 0); + mem_len = pci_resource_len(pdev, 0); + if (mem_len < sizeof(struct p54p_csr)) { + printk(KERN_ERR "%s (prism54pci): Too short PCI resources\n", + pci_name(pdev)); + pci_disable_device(pdev); + return err; + } + + err = pci_request_regions(pdev, "prism54pci"); + if (err) { + printk(KERN_ERR "%s (prism54pci): Cannot obtain PCI resources\n", + pci_name(pdev)); + return err; + } + + if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) || + pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) { + printk(KERN_ERR "%s (prism54pci): No suitable DMA available\n", + pci_name(pdev)); + goto err_free_reg; + } + + pci_set_master(pdev); + pci_try_set_mwi(pdev); + + pci_write_config_byte(pdev, 0x40, 0); + pci_write_config_byte(pdev, 0x41, 0); + + dev = p54_init_common(sizeof(*priv)); + if (!dev) { + printk(KERN_ERR "%s (prism54pci): ieee80211 alloc failed\n", + pci_name(pdev)); + err = -ENOMEM; + goto err_free_reg; + } + + priv = dev->priv; + priv->pdev = pdev; + + SET_IEEE80211_DEV(dev, &pdev->dev); + pci_set_drvdata(pdev, dev); + + priv->map = ioremap(mem_addr, mem_len); + if (!priv->map) { + printk(KERN_ERR "%s (prism54pci): Cannot map device memory\n", + pci_name(pdev)); + err = -EINVAL; // TODO: use a better error code? + goto err_free_dev; + } + + priv->ring_control = pci_alloc_consistent(pdev, sizeof(*priv->ring_control), + &priv->ring_control_dma); + if (!priv->ring_control) { + printk(KERN_ERR "%s (prism54pci): Cannot allocate rings\n", + pci_name(pdev)); + err = -ENOMEM; + goto err_iounmap; + } + memset(priv->ring_control, 0, sizeof(*priv->ring_control)); + + err = p54p_upload_firmware(dev); + if (err) + goto err_free_desc; + + err = p54p_read_eeprom(dev); + if (err) + goto err_free_desc; + + priv->common.open = p54p_open; + priv->common.stop = p54p_stop; + priv->common.tx = p54p_tx; + + spin_lock_init(&priv->lock); + + err = ieee80211_register_hw(dev); + if (err) { + printk(KERN_ERR "%s (prism54pci): Cannot register netdevice\n", + pci_name(pdev)); + goto err_free_common; + } + + printk(KERN_INFO "%s: hwaddr %s, isl38%02x\n", + wiphy_name(dev->wiphy), + print_mac(mac, dev->wiphy->perm_addr), + priv->common.version); + + return 0; + + err_free_common: + p54_free_common(dev); + + err_free_desc: + pci_free_consistent(pdev, sizeof(*priv->ring_control), + priv->ring_control, priv->ring_control_dma); + + err_iounmap: + iounmap(priv->map); + + err_free_dev: + pci_set_drvdata(pdev, NULL); + ieee80211_free_hw(dev); + + err_free_reg: + pci_release_regions(pdev); + pci_disable_device(pdev); + return err; +} + +static void __devexit p54p_remove(struct pci_dev *pdev) +{ + struct ieee80211_hw *dev = pci_get_drvdata(pdev); + struct p54p_priv *priv; + + if (!dev) + return; + + ieee80211_unregister_hw(dev); + priv = dev->priv; + pci_free_consistent(pdev, sizeof(*priv->ring_control), + priv->ring_control, priv->ring_control_dma); + p54_free_common(dev); + iounmap(priv->map); + pci_release_regions(pdev); + pci_disable_device(pdev); + ieee80211_free_hw(dev); +} + +#ifdef CONFIG_PM +static int p54p_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct ieee80211_hw *dev = pci_get_drvdata(pdev); + struct p54p_priv *priv = dev->priv; + + if (priv->common.mode != IEEE80211_IF_TYPE_INVALID) { + ieee80211_stop_queues(dev); + p54p_stop(dev); + } + + pci_save_state(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + return 0; +} + +static int p54p_resume(struct pci_dev *pdev) +{ + struct ieee80211_hw *dev = pci_get_drvdata(pdev); + struct p54p_priv *priv = dev->priv; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + if (priv->common.mode != IEEE80211_IF_TYPE_INVALID) { + p54p_open(dev); + ieee80211_start_queues(dev); + } + + return 0; +} +#endif /* CONFIG_PM */ + +static struct pci_driver p54p_driver = { + .name = "prism54pci", + .id_table = p54p_table, + .probe = p54p_probe, + .remove = __devexit_p(p54p_remove), +#ifdef CONFIG_PM + .suspend = p54p_suspend, + .resume = p54p_resume, +#endif /* CONFIG_PM */ +}; + +static int __init p54p_init(void) +{ + return pci_register_driver(&p54p_driver); +} + +static void __exit p54p_exit(void) +{ + pci_unregister_driver(&p54p_driver); +} + +module_init(p54p_init); +module_exit(p54p_exit); diff --git a/drivers/net/wireless/p54pci.h b/drivers/net/wireless/p54pci.h new file mode 100644 index 000000000000..52feb597dc4a --- /dev/null +++ b/drivers/net/wireless/p54pci.h @@ -0,0 +1,106 @@ +#ifndef PRISM54PCI_H +#define PRISM54PCI_H + +/* + * Defines for PCI based mac80211 Prism54 driver + * + * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> + * + * Based on the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* Device Interrupt register bits */ +#define ISL38XX_DEV_INT_RESET 0x0001 +#define ISL38XX_DEV_INT_UPDATE 0x0002 +#define ISL38XX_DEV_INT_WAKEUP 0x0008 +#define ISL38XX_DEV_INT_SLEEP 0x0010 +#define ISL38XX_DEV_INT_ABORT 0x0020 +/* these two only used in USB */ +#define ISL38XX_DEV_INT_DATA 0x0040 +#define ISL38XX_DEV_INT_MGMT 0x0080 + +#define ISL38XX_DEV_INT_PCIUART_CTS 0x4000 +#define ISL38XX_DEV_INT_PCIUART_DR 0x8000 + +/* Interrupt Identification/Acknowledge/Enable register bits */ +#define ISL38XX_INT_IDENT_UPDATE 0x0002 +#define ISL38XX_INT_IDENT_INIT 0x0004 +#define ISL38XX_INT_IDENT_WAKEUP 0x0008 +#define ISL38XX_INT_IDENT_SLEEP 0x0010 +#define ISL38XX_INT_IDENT_PCIUART_CTS 0x4000 +#define ISL38XX_INT_IDENT_PCIUART_DR 0x8000 + +/* Control/Status register bits */ +#define ISL38XX_CTRL_STAT_SLEEPMODE 0x00000200 +#define ISL38XX_CTRL_STAT_CLKRUN 0x00800000 +#define ISL38XX_CTRL_STAT_RESET 0x10000000 +#define ISL38XX_CTRL_STAT_RAMBOOT 0x20000000 +#define ISL38XX_CTRL_STAT_STARTHALTED 0x40000000 +#define ISL38XX_CTRL_STAT_HOST_OVERRIDE 0x80000000 + +struct p54p_csr { + __le32 dev_int; + u8 unused_1[12]; + __le32 int_ident; + __le32 int_ack; + __le32 int_enable; + u8 unused_2[4]; + union { + __le32 ring_control_base; + __le32 gen_purp_com[2]; + }; + u8 unused_3[8]; + __le32 direct_mem_base; + u8 unused_4[44]; + __le32 dma_addr; + __le32 dma_len; + __le32 dma_ctrl; + u8 unused_5[12]; + __le32 ctrl_stat; + u8 unused_6[1924]; + u8 cardbus_cis[0x800]; + u8 direct_mem_win[0x1000]; +} __attribute__ ((packed)); + +/* usb backend only needs the register defines above */ +#ifndef PRISM54USB_H +struct p54p_desc { + __le32 host_addr; + __le32 device_addr; + __le16 len; + __le16 flags; +} __attribute__ ((packed)); + +struct p54p_ring_control { + __le32 host_idx[4]; + __le32 device_idx[4]; + struct p54p_desc rx_data[8]; + struct p54p_desc tx_data[32]; + struct p54p_desc rx_mgmt[4]; + struct p54p_desc tx_mgmt[4]; +} __attribute__ ((packed)); + +#define P54P_READ(r) __raw_readl(&priv->map->r) +#define P54P_WRITE(r, val) __raw_writel((__force u32)(val), &priv->map->r) + +struct p54p_priv { + struct p54_common common; + struct pci_dev *pdev; + struct p54p_csr __iomem *map; + + spinlock_t lock; + struct p54p_ring_control *ring_control; + dma_addr_t ring_control_dma; + u32 rx_idx, tx_idx; + struct sk_buff *rx_buf[8]; + void *tx_buf[32]; + struct completion boot_comp; +}; + +#endif /* PRISM54USB_H */ +#endif /* PRISM54PCI_H */ diff --git a/drivers/net/wireless/p54usb.c b/drivers/net/wireless/p54usb.c new file mode 100644 index 000000000000..755482a5a938 --- /dev/null +++ b/drivers/net/wireless/p54usb.c @@ -0,0 +1,907 @@ + +/* + * Linux device driver for USB based Prism54 + * + * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> + * + * Based on the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/init.h> +#include <linux/usb.h> +#include <linux/pci.h> +#include <linux/firmware.h> +#include <linux/etherdevice.h> +#include <linux/delay.h> +#include <linux/crc32.h> +#include <net/mac80211.h> + +#include "p54.h" +#include "p54usb.h" + +MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>"); +MODULE_DESCRIPTION("Prism54 USB wireless driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("prism54usb"); + +static struct usb_device_id p54u_table[] __devinitdata = { + /* Version 1 devices (pci chip + net2280) */ + {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */ + {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */ + {USB_DEVICE(0x083a, 0x4501)}, /* Accton 802.11g WN4501 USB */ + {USB_DEVICE(0x083a, 0x4502)}, /* Siemens Gigaset USB Adapter */ + {USB_DEVICE(0x0846, 0x4200)}, /* Netgear WG121 */ + {USB_DEVICE(0x0846, 0x4210)}, /* Netgear WG121 the second ? */ + {USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */ + {USB_DEVICE(0x0cde, 0x0006)}, /* Medion 40900, Roper Europe */ + {USB_DEVICE(0x124a, 0x4023)}, /* Shuttle PN15, Airvast WM168g, IOGear GWU513 */ + {USB_DEVICE(0x1915, 0x2234)}, /* Linksys WUSB54G OEM */ + {USB_DEVICE(0x1915, 0x2235)}, /* Linksys WUSB54G Portable OEM */ + {USB_DEVICE(0x2001, 0x3701)}, /* DLink DWL-G120 Spinnaker */ + {USB_DEVICE(0x2001, 0x3703)}, /* DLink DWL-G122 */ + {USB_DEVICE(0x5041, 0x2234)}, /* Linksys WUSB54G */ + {USB_DEVICE(0x5041, 0x2235)}, /* Linksys WUSB54G Portable */ + + /* Version 2 devices (3887) */ + {USB_DEVICE(0x050d, 0x7050)}, /* Belkin F5D7050 ver 1000 */ + {USB_DEVICE(0x0572, 0x2000)}, /* Cohiba Proto board */ + {USB_DEVICE(0x0572, 0x2002)}, /* Cohiba Proto board */ + {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ + {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ + {USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */ + {USB_DEVICE(0x0915, 0x2000)}, /* Cohiba Proto board */ + {USB_DEVICE(0x0915, 0x2002)}, /* Cohiba Proto board */ + {USB_DEVICE(0x0baf, 0x0118)}, /* U.S. Robotics U5 802.11g Adapter*/ + {USB_DEVICE(0x0bf8, 0x1009)}, /* FUJITSU E-5400 USB D1700*/ + {USB_DEVICE(0x0cde, 0x0006)}, /* Medion MD40900 */ + {USB_DEVICE(0x0cde, 0x0008)}, /* Sagem XG703A */ + {USB_DEVICE(0x0d8e, 0x3762)}, /* DLink DWL-G120 Cohiba */ + {USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */ + {USB_DEVICE(0x13B1, 0x000C)}, /* Linksys WUSB54AG */ + {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ + {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ + {USB_DEVICE(0x413c, 0x8102)}, /* Spinnaker DUT */ + {USB_DEVICE(0x413c, 0x8104)}, /* Cohiba Proto board */ + {} +}; + +MODULE_DEVICE_TABLE(usb, p54u_table); + +static void p54u_rx_cb(struct urb *urb) +{ + struct sk_buff *skb = (struct sk_buff *) urb->context; + struct p54u_rx_info *info = (struct p54u_rx_info *)skb->cb; + struct ieee80211_hw *dev = info->dev; + struct p54u_priv *priv = dev->priv; + + if (unlikely(urb->status)) { + info->urb = NULL; + usb_free_urb(urb); + return; + } + + skb_unlink(skb, &priv->rx_queue); + skb_put(skb, urb->actual_length); + if (!priv->hw_type) + skb_pull(skb, sizeof(struct net2280_tx_hdr)); + + if (p54_rx(dev, skb)) { + skb = dev_alloc_skb(MAX_RX_SIZE); + if (unlikely(!skb)) { + usb_free_urb(urb); + /* TODO check rx queue length and refill *somewhere* */ + return; + } + + info = (struct p54u_rx_info *) skb->cb; + info->urb = urb; + info->dev = dev; + urb->transfer_buffer = skb_tail_pointer(skb); + urb->context = skb; + skb_queue_tail(&priv->rx_queue, skb); + } else { + skb_trim(skb, 0); + skb_queue_tail(&priv->rx_queue, skb); + } + + usb_submit_urb(urb, GFP_ATOMIC); +} + +static void p54u_tx_cb(struct urb *urb) +{ + usb_free_urb(urb); +} + +static void p54u_tx_free_cb(struct urb *urb) +{ + kfree(urb->transfer_buffer); + usb_free_urb(urb); +} + +static int p54u_init_urbs(struct ieee80211_hw *dev) +{ + struct p54u_priv *priv = dev->priv; + struct urb *entry; + struct sk_buff *skb; + struct p54u_rx_info *info; + + while (skb_queue_len(&priv->rx_queue) < 32) { + skb = __dev_alloc_skb(MAX_RX_SIZE, GFP_KERNEL); + if (!skb) + break; + entry = usb_alloc_urb(0, GFP_KERNEL); + if (!entry) { + kfree_skb(skb); + break; + } + usb_fill_bulk_urb(entry, priv->udev, usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA), skb_tail_pointer(skb), MAX_RX_SIZE, p54u_rx_cb, skb); + info = (struct p54u_rx_info *) skb->cb; + info->urb = entry; + info->dev = dev; + skb_queue_tail(&priv->rx_queue, skb); + usb_submit_urb(entry, GFP_KERNEL); + } + + return 0; +} + +static void p54u_free_urbs(struct ieee80211_hw *dev) +{ + struct p54u_priv *priv = dev->priv; + struct p54u_rx_info *info; + struct sk_buff *skb; + + while ((skb = skb_dequeue(&priv->rx_queue))) { + info = (struct p54u_rx_info *) skb->cb; + if (!info->urb) + continue; + + usb_kill_urb(info->urb); + kfree_skb(skb); + } +} + +static void p54u_tx_3887(struct ieee80211_hw *dev, struct p54_control_hdr *data, + size_t len, int free_on_tx) +{ + struct p54u_priv *priv = dev->priv; + struct urb *addr_urb, *data_urb; + + addr_urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!addr_urb) + return; + + data_urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!data_urb) { + usb_free_urb(addr_urb); + return; + } + + usb_fill_bulk_urb(addr_urb, priv->udev, + usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), &data->req_id, + sizeof(data->req_id), p54u_tx_cb, dev); + usb_fill_bulk_urb(data_urb, priv->udev, + usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), data, len, + free_on_tx ? p54u_tx_free_cb : p54u_tx_cb, dev); + + usb_submit_urb(addr_urb, GFP_ATOMIC); + usb_submit_urb(data_urb, GFP_ATOMIC); +} + +static void p54u_tx_net2280(struct ieee80211_hw *dev, struct p54_control_hdr *data, + size_t len, int free_on_tx) +{ + struct p54u_priv *priv = dev->priv; + struct urb *int_urb, *data_urb; + struct net2280_tx_hdr *hdr; + struct net2280_reg_write *reg; + + reg = kmalloc(sizeof(*reg), GFP_ATOMIC); + if (!reg) + return; + + int_urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!int_urb) { + kfree(reg); + return; + } + + data_urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!data_urb) { + kfree(reg); + usb_free_urb(int_urb); + return; + } + + reg->port = cpu_to_le16(NET2280_DEV_U32); + reg->addr = cpu_to_le32(P54U_DEV_BASE); + reg->val = cpu_to_le32(ISL38XX_DEV_INT_DATA); + + len += sizeof(*data); + hdr = (void *)data - sizeof(*hdr); + memset(hdr, 0, sizeof(*hdr)); + hdr->device_addr = data->req_id; + hdr->len = cpu_to_le16(len); + + usb_fill_bulk_urb(int_urb, priv->udev, + usb_sndbulkpipe(priv->udev, P54U_PIPE_DEV), reg, sizeof(*reg), + p54u_tx_free_cb, dev); + usb_submit_urb(int_urb, GFP_ATOMIC); + + usb_fill_bulk_urb(data_urb, priv->udev, + usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), hdr, len + sizeof(*hdr), + free_on_tx ? p54u_tx_free_cb : p54u_tx_cb, dev); + usb_submit_urb(data_urb, GFP_ATOMIC); +} + +static int p54u_write(struct p54u_priv *priv, + struct net2280_reg_write *buf, + enum net2280_op_type type, + __le32 addr, __le32 val) +{ + unsigned int ep; + int alen; + + if (type & 0x0800) + ep = usb_sndbulkpipe(priv->udev, P54U_PIPE_DEV); + else + ep = usb_sndbulkpipe(priv->udev, P54U_PIPE_BRG); + + buf->port = cpu_to_le16(type); + buf->addr = addr; + buf->val = val; + + return usb_bulk_msg(priv->udev, ep, buf, sizeof(*buf), &alen, 1000); +} + +static int p54u_read(struct p54u_priv *priv, void *buf, + enum net2280_op_type type, + __le32 addr, __le32 *val) +{ + struct net2280_reg_read *read = buf; + __le32 *reg = buf; + unsigned int ep; + int alen, err; + + if (type & 0x0800) + ep = P54U_PIPE_DEV; + else + ep = P54U_PIPE_BRG; + + read->port = cpu_to_le16(type); + read->addr = addr; + + err = usb_bulk_msg(priv->udev, usb_sndbulkpipe(priv->udev, ep), + read, sizeof(*read), &alen, 1000); + if (err) + return err; + + err = usb_bulk_msg(priv->udev, usb_rcvbulkpipe(priv->udev, ep), + reg, sizeof(*reg), &alen, 1000); + if (err) + return err; + + *val = *reg; + return 0; +} + +static int p54u_bulk_msg(struct p54u_priv *priv, unsigned int ep, + void *data, size_t len) +{ + int alen; + return usb_bulk_msg(priv->udev, usb_sndbulkpipe(priv->udev, ep), + data, len, &alen, 2000); +} + +static int p54u_read_eeprom(struct ieee80211_hw *dev) +{ + struct p54u_priv *priv = dev->priv; + void *buf; + struct p54_control_hdr *hdr; + int err, alen; + size_t offset = priv->hw_type ? 0x10 : 0x20; + + buf = kmalloc(0x2020, GFP_KERNEL); + if (!buf) { + printk(KERN_ERR "prism54usb: cannot allocate memory for" + "eeprom readback!\n"); + return -ENOMEM; + } + + if (priv->hw_type) { + *((u32 *) buf) = priv->common.rx_start; + err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, sizeof(u32)); + if (err) { + printk(KERN_ERR "prism54usb: addr send failed\n"); + goto fail; + } + } else { + struct net2280_reg_write *reg = buf; + reg->port = cpu_to_le16(NET2280_DEV_U32); + reg->addr = cpu_to_le32(P54U_DEV_BASE); + reg->val = cpu_to_le32(ISL38XX_DEV_INT_DATA); + err = p54u_bulk_msg(priv, P54U_PIPE_DEV, buf, sizeof(*reg)); + if (err) { + printk(KERN_ERR "prism54usb: dev_int send failed\n"); + goto fail; + } + } + + hdr = buf + priv->common.tx_hdr_len; + p54_fill_eeprom_readback(hdr); + hdr->req_id = cpu_to_le32(priv->common.rx_start); + if (priv->common.tx_hdr_len) { + struct net2280_tx_hdr *tx_hdr = buf; + tx_hdr->device_addr = hdr->req_id; + tx_hdr->len = cpu_to_le16(EEPROM_READBACK_LEN); + } + + /* we can just pretend to send 0x2000 bytes of nothing in the headers */ + err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, + EEPROM_READBACK_LEN + priv->common.tx_hdr_len); + if (err) { + printk(KERN_ERR "prism54usb: eeprom req send failed\n"); + goto fail; + } + + err = usb_bulk_msg(priv->udev, + usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA), + buf, 0x2020, &alen, 1000); + if (!err && alen > offset) { + p54_parse_eeprom(dev, (u8 *)buf + offset, alen - offset); + } else { + printk(KERN_ERR "prism54usb: eeprom read failed!\n"); + err = -EINVAL; + goto fail; + } + + fail: + kfree(buf); + return err; +} + +static int p54u_upload_firmware_3887(struct ieee80211_hw *dev) +{ + static char start_string[] = "~~~~<\r"; + struct p54u_priv *priv = dev->priv; + const struct firmware *fw_entry = NULL; + int err, alen; + u8 carry = 0; + u8 *buf, *tmp, *data; + unsigned int left, remains, block_size; + struct x2_header *hdr; + unsigned long timeout; + + tmp = buf = kmalloc(P54U_FW_BLOCK, GFP_KERNEL); + if (!buf) { + printk(KERN_ERR "p54usb: cannot allocate firmware upload buffer!\n"); + err = -ENOMEM; + goto err_bufalloc; + } + + memcpy(buf, start_string, 4); + err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, 4); + if (err) { + printk(KERN_ERR "p54usb: reset failed! (%d)\n", err); + goto err_reset; + } + + err = request_firmware(&fw_entry, "isl3887usb_bare", &priv->udev->dev); + if (err) { + printk(KERN_ERR "p54usb: cannot find firmware (isl3887usb_bare)!\n"); + goto err_req_fw_failed; + } + + p54_parse_firmware(dev, fw_entry); + + left = block_size = min((size_t)P54U_FW_BLOCK, fw_entry->size); + strcpy(buf, start_string); + left -= strlen(start_string); + tmp += strlen(start_string); + + data = fw_entry->data; + remains = fw_entry->size; + + hdr = (struct x2_header *)(buf + strlen(start_string)); + memcpy(hdr->signature, X2_SIGNATURE, X2_SIGNATURE_SIZE); + hdr->fw_load_addr = cpu_to_le32(ISL38XX_DEV_FIRMWARE_ADDR); + hdr->fw_length = cpu_to_le32(fw_entry->size); + hdr->crc = cpu_to_le32(~crc32_le(~0, (void *)&hdr->fw_load_addr, + sizeof(u32)*2)); + left -= sizeof(*hdr); + tmp += sizeof(*hdr); + + while (remains) { + while (left--) { + if (carry) { + *tmp++ = carry; + carry = 0; + remains--; + continue; + } + switch (*data) { + case '~': + *tmp++ = '}'; + carry = '^'; + break; + case '}': + *tmp++ = '}'; + carry = ']'; + break; + default: + *tmp++ = *data; + remains--; + break; + } + data++; + } + + err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, block_size); + if (err) { + printk(KERN_ERR "prism54usb: firmware upload failed!\n"); + goto err_upload_failed; + } + + tmp = buf; + left = block_size = min((unsigned int)P54U_FW_BLOCK, remains); + } + + *((__le32 *)buf) = cpu_to_le32(~crc32_le(~0, fw_entry->data, fw_entry->size)); + err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, sizeof(u32)); + if (err) { + printk(KERN_ERR "prism54usb: firmware upload failed!\n"); + goto err_upload_failed; + } + + timeout = jiffies + msecs_to_jiffies(1000); + while (!(err = usb_bulk_msg(priv->udev, + usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA), buf, 128, &alen, 1000))) { + if (alen > 2 && !memcmp(buf, "OK", 2)) + break; + + if (alen > 5 && !memcmp(buf, "ERROR", 5)) { + printk(KERN_INFO "prism54usb: firmware upload failed!\n"); + err = -EINVAL; + break; + } + + if (time_after(jiffies, timeout)) { + printk(KERN_ERR "prism54usb: firmware boot timed out!\n"); + err = -ETIMEDOUT; + break; + } + } + if (err) + goto err_upload_failed; + + buf[0] = 'g'; + buf[1] = '\r'; + err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, 2); + if (err) { + printk(KERN_ERR "prism54usb: firmware boot failed!\n"); + goto err_upload_failed; + } + + timeout = jiffies + msecs_to_jiffies(1000); + while (!(err = usb_bulk_msg(priv->udev, + usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA), buf, 128, &alen, 1000))) { + if (alen > 0 && buf[0] == 'g') + break; + + if (time_after(jiffies, timeout)) { + err = -ETIMEDOUT; + break; + } + } + if (err) + goto err_upload_failed; + + err_upload_failed: + release_firmware(fw_entry); + err_req_fw_failed: + err_reset: + kfree(buf); + err_bufalloc: + return err; +} + +static int p54u_upload_firmware_net2280(struct ieee80211_hw *dev) +{ + struct p54u_priv *priv = dev->priv; + const struct firmware *fw_entry = NULL; + const struct p54p_csr *devreg = (const struct p54p_csr *) P54U_DEV_BASE; + int err, alen; + void *buf; + __le32 reg; + unsigned int remains, offset; + u8 *data; + + buf = kmalloc(512, GFP_KERNEL); + if (!buf) { + printk(KERN_ERR "p54usb: firmware buffer alloc failed!\n"); + return -ENOMEM; + } + + err = request_firmware(&fw_entry, "isl3890usb", &priv->udev->dev); + if (err) { + printk(KERN_ERR "p54usb: cannot find firmware (isl3890usb)!\n"); + kfree(buf); + return err; + } + + p54_parse_firmware(dev, fw_entry); + +#define P54U_WRITE(type, addr, data) \ + do {\ + err = p54u_write(priv, buf, type,\ + cpu_to_le32((u32)(unsigned long)addr), data);\ + if (err) \ + goto fail;\ + } while (0) + +#define P54U_READ(type, addr) \ + do {\ + err = p54u_read(priv, buf, type,\ + cpu_to_le32((u32)(unsigned long)addr), ®);\ + if (err)\ + goto fail;\ + } while (0) + + /* power down net2280 bridge */ + P54U_READ(NET2280_BRG_U32, NET2280_GPIOCTL); + reg |= cpu_to_le32(P54U_BRG_POWER_DOWN); + reg &= cpu_to_le32(~P54U_BRG_POWER_UP); + P54U_WRITE(NET2280_BRG_U32, NET2280_GPIOCTL, reg); + + mdelay(100); + + /* power up bridge */ + reg |= cpu_to_le32(P54U_BRG_POWER_UP); + reg &= cpu_to_le32(~P54U_BRG_POWER_DOWN); + P54U_WRITE(NET2280_BRG_U32, NET2280_GPIOCTL, reg); + + mdelay(100); + + P54U_WRITE(NET2280_BRG_U32, NET2280_DEVINIT, + cpu_to_le32(NET2280_CLK_30Mhz | + NET2280_PCI_ENABLE | + NET2280_PCI_SOFT_RESET)); + + mdelay(20); + + P54U_WRITE(NET2280_BRG_CFG_U16, PCI_COMMAND, + cpu_to_le32(PCI_COMMAND_MEMORY | + PCI_COMMAND_MASTER)); + + P54U_WRITE(NET2280_BRG_CFG_U32, PCI_BASE_ADDRESS_0, + cpu_to_le32(NET2280_BASE)); + + P54U_READ(NET2280_BRG_CFG_U16, PCI_STATUS); + reg |= cpu_to_le32(PCI_STATUS_REC_MASTER_ABORT); + P54U_WRITE(NET2280_BRG_CFG_U16, PCI_STATUS, reg); + + // TODO: we really need this? + P54U_READ(NET2280_BRG_U32, NET2280_RELNUM); + + P54U_WRITE(NET2280_BRG_U32, NET2280_EPA_RSP, + cpu_to_le32(NET2280_CLEAR_NAK_OUT_PACKETS_MODE)); + P54U_WRITE(NET2280_BRG_U32, NET2280_EPC_RSP, + cpu_to_le32(NET2280_CLEAR_NAK_OUT_PACKETS_MODE)); + + P54U_WRITE(NET2280_BRG_CFG_U32, PCI_BASE_ADDRESS_2, + cpu_to_le32(NET2280_BASE2)); + + /* finally done setting up the bridge */ + + P54U_WRITE(NET2280_DEV_CFG_U16, 0x10000 | PCI_COMMAND, + cpu_to_le32(PCI_COMMAND_MEMORY | + PCI_COMMAND_MASTER)); + + P54U_WRITE(NET2280_DEV_CFG_U16, 0x10000 | 0x40 /* TRDY timeout */, 0); + P54U_WRITE(NET2280_DEV_CFG_U32, 0x10000 | PCI_BASE_ADDRESS_0, + cpu_to_le32(P54U_DEV_BASE)); + + P54U_WRITE(NET2280_BRG_U32, NET2280_USBIRQENB1, 0); + P54U_WRITE(NET2280_BRG_U32, NET2280_IRQSTAT1, + cpu_to_le32(NET2280_PCI_INTA_INTERRUPT)); + + /* do romboot */ + P54U_WRITE(NET2280_DEV_U32, &devreg->int_enable, 0); + + P54U_READ(NET2280_DEV_U32, &devreg->ctrl_stat); + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RAMBOOT); + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_CLKRUN); + P54U_WRITE(NET2280_DEV_U32, &devreg->ctrl_stat, reg); + + mdelay(20); + + reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET); + P54U_WRITE(NET2280_DEV_U32, &devreg->ctrl_stat, reg); + + mdelay(20); + + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); + P54U_WRITE(NET2280_DEV_U32, &devreg->ctrl_stat, reg); + + mdelay(100); + + P54U_READ(NET2280_DEV_U32, &devreg->int_ident); + P54U_WRITE(NET2280_DEV_U32, &devreg->int_ack, reg); + + /* finally, we can upload firmware now! */ + remains = fw_entry->size; + data = fw_entry->data; + offset = ISL38XX_DEV_FIRMWARE_ADDR; + + while (remains) { + unsigned int block_len = min(remains, (unsigned int)512); + memcpy(buf, data, block_len); + + err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, block_len); + if (err) { + printk(KERN_ERR "prism54usb: firmware block upload " + "failed\n"); + goto fail; + } + + P54U_WRITE(NET2280_DEV_U32, &devreg->direct_mem_base, + cpu_to_le32(0xc0000f00)); + + P54U_WRITE(NET2280_DEV_U32, + 0x0020 | (unsigned long)&devreg->direct_mem_win, 0); + P54U_WRITE(NET2280_DEV_U32, + 0x0020 | (unsigned long)&devreg->direct_mem_win, + cpu_to_le32(1)); + + P54U_WRITE(NET2280_DEV_U32, + 0x0024 | (unsigned long)&devreg->direct_mem_win, + cpu_to_le32(block_len)); + P54U_WRITE(NET2280_DEV_U32, + 0x0028 | (unsigned long)&devreg->direct_mem_win, + cpu_to_le32(offset)); + + P54U_WRITE(NET2280_DEV_U32, &devreg->dma_addr, + cpu_to_le32(NET2280_EPA_FIFO_PCI_ADDR)); + P54U_WRITE(NET2280_DEV_U32, &devreg->dma_len, + cpu_to_le32(block_len >> 2)); + P54U_WRITE(NET2280_DEV_U32, &devreg->dma_ctrl, + cpu_to_le32(ISL38XX_DMA_MASTER_CONTROL_TRIGGER)); + + mdelay(10); + + P54U_READ(NET2280_DEV_U32, + 0x002C | (unsigned long)&devreg->direct_mem_win); + if (!(reg & cpu_to_le32(ISL38XX_DMA_STATUS_DONE)) || + !(reg & cpu_to_le32(ISL38XX_DMA_STATUS_READY))) { + printk(KERN_ERR "prism54usb: firmware DMA transfer " + "failed\n"); + goto fail; + } + + P54U_WRITE(NET2280_BRG_U32, NET2280_EPA_STAT, + cpu_to_le32(NET2280_FIFO_FLUSH)); + + remains -= block_len; + data += block_len; + offset += block_len; + } + + /* do ramboot */ + P54U_READ(NET2280_DEV_U32, &devreg->ctrl_stat); + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_CLKRUN); + reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RAMBOOT); + P54U_WRITE(NET2280_DEV_U32, &devreg->ctrl_stat, reg); + + mdelay(20); + + reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET); + P54U_WRITE(NET2280_DEV_U32, &devreg->ctrl_stat, reg); + + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); + P54U_WRITE(NET2280_DEV_U32, &devreg->ctrl_stat, reg); + + mdelay(100); + + P54U_READ(NET2280_DEV_U32, &devreg->int_ident); + P54U_WRITE(NET2280_DEV_U32, &devreg->int_ack, reg); + + /* start up the firmware */ + P54U_WRITE(NET2280_DEV_U32, &devreg->int_enable, + cpu_to_le32(ISL38XX_INT_IDENT_INIT)); + + P54U_WRITE(NET2280_BRG_U32, NET2280_IRQSTAT1, + cpu_to_le32(NET2280_PCI_INTA_INTERRUPT)); + + P54U_WRITE(NET2280_BRG_U32, NET2280_USBIRQENB1, + cpu_to_le32(NET2280_PCI_INTA_INTERRUPT_ENABLE | + NET2280_USB_INTERRUPT_ENABLE)); + + P54U_WRITE(NET2280_DEV_U32, &devreg->dev_int, + cpu_to_le32(ISL38XX_DEV_INT_RESET)); + + err = usb_interrupt_msg(priv->udev, + usb_rcvbulkpipe(priv->udev, P54U_PIPE_INT), + buf, sizeof(__le32), &alen, 1000); + if (err || alen != sizeof(__le32)) + goto fail; + + P54U_READ(NET2280_DEV_U32, &devreg->int_ident); + P54U_WRITE(NET2280_DEV_U32, &devreg->int_ack, reg); + + if (!(reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT))) + err = -EINVAL; + + P54U_WRITE(NET2280_BRG_U32, NET2280_USBIRQENB1, 0); + P54U_WRITE(NET2280_BRG_U32, NET2280_IRQSTAT1, + cpu_to_le32(NET2280_PCI_INTA_INTERRUPT)); + +#undef P54U_WRITE +#undef P54U_READ + + fail: + release_firmware(fw_entry); + kfree(buf); + return err; +} + +static int p54u_open(struct ieee80211_hw *dev) +{ + struct p54u_priv *priv = dev->priv; + int err; + + err = p54u_init_urbs(dev); + if (err) { + return err; + } + + priv->common.open = p54u_init_urbs; + + return 0; +} + +static void p54u_stop(struct ieee80211_hw *dev) +{ + /* TODO: figure out how to reliably stop the 3887 and net2280 so + the hardware is still usable next time we want to start it. + until then, we just stop listening to the hardware.. */ + p54u_free_urbs(dev); + return; +} + +static int __devinit p54u_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct usb_device *udev = interface_to_usbdev(intf); + struct ieee80211_hw *dev; + struct p54u_priv *priv; + int err; + unsigned int i, recognized_pipes; + DECLARE_MAC_BUF(mac); + + dev = p54_init_common(sizeof(*priv)); + if (!dev) { + printk(KERN_ERR "prism54usb: ieee80211 alloc failed\n"); + return -ENOMEM; + } + + priv = dev->priv; + + SET_IEEE80211_DEV(dev, &intf->dev); + usb_set_intfdata(intf, dev); + priv->udev = udev; + + usb_get_dev(udev); + + /* really lazy and simple way of figuring out if we're a 3887 */ + /* TODO: should just stick the identification in the device table */ + i = intf->altsetting->desc.bNumEndpoints; + recognized_pipes = 0; + while (i--) { + switch (intf->altsetting->endpoint[i].desc.bEndpointAddress) { + case P54U_PIPE_DATA: + case P54U_PIPE_MGMT: + case P54U_PIPE_BRG: + case P54U_PIPE_DEV: + case P54U_PIPE_DATA | USB_DIR_IN: + case P54U_PIPE_MGMT | USB_DIR_IN: + case P54U_PIPE_BRG | USB_DIR_IN: + case P54U_PIPE_DEV | USB_DIR_IN: + case P54U_PIPE_INT | USB_DIR_IN: + recognized_pipes++; + } + } + priv->common.open = p54u_open; + + if (recognized_pipes < P54U_PIPE_NUMBER) { + priv->hw_type = P54U_3887; + priv->common.tx = p54u_tx_3887; + } else { + dev->extra_tx_headroom += sizeof(struct net2280_tx_hdr); + priv->common.tx_hdr_len = sizeof(struct net2280_tx_hdr); + priv->common.tx = p54u_tx_net2280; + } + priv->common.stop = p54u_stop; + + if (priv->hw_type) + err = p54u_upload_firmware_3887(dev); + else + err = p54u_upload_firmware_net2280(dev); + if (err) + goto err_free_dev; + + err = p54u_read_eeprom(dev); + if (err) + goto err_free_dev; + + if (!is_valid_ether_addr(dev->wiphy->perm_addr)) { + u8 perm_addr[ETH_ALEN]; + + printk(KERN_WARNING "prism54usb: Invalid hwaddr! Using randomly generated MAC addr\n"); + random_ether_addr(perm_addr); + SET_IEEE80211_PERM_ADDR(dev, perm_addr); + } + + skb_queue_head_init(&priv->rx_queue); + + err = ieee80211_register_hw(dev); + if (err) { + printk(KERN_ERR "prism54usb: Cannot register netdevice\n"); + goto err_free_dev; + } + + printk(KERN_INFO "%s: hwaddr %s, isl38%02x\n", + wiphy_name(dev->wiphy), + print_mac(mac, dev->wiphy->perm_addr), + priv->common.version); + + return 0; + + err_free_dev: + ieee80211_free_hw(dev); + usb_set_intfdata(intf, NULL); + usb_put_dev(udev); + return err; +} + +static void __devexit p54u_disconnect(struct usb_interface *intf) +{ + struct ieee80211_hw *dev = usb_get_intfdata(intf); + struct p54u_priv *priv; + + if (!dev) + return; + + ieee80211_unregister_hw(dev); + + priv = dev->priv; + usb_put_dev(interface_to_usbdev(intf)); + p54_free_common(dev); + ieee80211_free_hw(dev); +} + +static struct usb_driver p54u_driver = { + .name = "prism54usb", + .id_table = p54u_table, + .probe = p54u_probe, + .disconnect = p54u_disconnect, +}; + +static int __init p54u_init(void) +{ + return usb_register(&p54u_driver); +} + +static void __exit p54u_exit(void) +{ + usb_deregister(&p54u_driver); +} + +module_init(p54u_init); +module_exit(p54u_exit); diff --git a/drivers/net/wireless/p54usb.h b/drivers/net/wireless/p54usb.h new file mode 100644 index 000000000000..d1896b396c1c --- /dev/null +++ b/drivers/net/wireless/p54usb.h @@ -0,0 +1,133 @@ +#ifndef PRISM54USB_H +#define PRISM54USB_H + +/* + * Defines for USB based mac80211 Prism54 driver + * + * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> + * + * Based on the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* for isl3886 register definitions used on ver 1 devices */ +#include "p54pci.h" +#include "net2280.h" + +/* pci */ +#define NET2280_BASE 0x10000000 +#define NET2280_BASE2 0x20000000 + +/* gpio */ +#define P54U_BRG_POWER_UP (1 << GPIO0_DATA) +#define P54U_BRG_POWER_DOWN (1 << GPIO1_DATA) + +/* devinit */ +#define NET2280_CLK_4Mhz (15 << LOCAL_CLOCK_FREQUENCY) +#define NET2280_CLK_30Mhz (2 << LOCAL_CLOCK_FREQUENCY) +#define NET2280_CLK_60Mhz (1 << LOCAL_CLOCK_FREQUENCY) +#define NET2280_CLK_STOP (0 << LOCAL_CLOCK_FREQUENCY) +#define NET2280_PCI_ENABLE (1 << PCI_ENABLE) +#define NET2280_PCI_SOFT_RESET (1 << PCI_SOFT_RESET) + +/* endpoints */ +#define NET2280_CLEAR_NAK_OUT_PACKETS_MODE (1 << CLEAR_NAK_OUT_PACKETS_MODE) +#define NET2280_FIFO_FLUSH (1 << FIFO_FLUSH) + +/* irq */ +#define NET2280_USB_INTERRUPT_ENABLE (1 << USB_INTERRUPT_ENABLE) +#define NET2280_PCI_INTA_INTERRUPT (1 << PCI_INTA_INTERRUPT) +#define NET2280_PCI_INTA_INTERRUPT_ENABLE (1 << PCI_INTA_INTERRUPT_ENABLE) + +/* registers */ +#define NET2280_DEVINIT 0x00 +#define NET2280_USBIRQENB1 0x24 +#define NET2280_IRQSTAT1 0x2c +#define NET2280_FIFOCTL 0x38 +#define NET2280_GPIOCTL 0x50 +#define NET2280_RELNUM 0x88 +#define NET2280_EPA_RSP 0x324 +#define NET2280_EPA_STAT 0x32c +#define NET2280_EPB_STAT 0x34c +#define NET2280_EPC_RSP 0x364 +#define NET2280_EPC_STAT 0x36c +#define NET2280_EPD_STAT 0x38c + +#define NET2280_EPA_CFG 0x320 +#define NET2280_EPB_CFG 0x340 +#define NET2280_EPC_CFG 0x360 +#define NET2280_EPD_CFG 0x380 +#define NET2280_EPE_CFG 0x3A0 +#define NET2280_EPF_CFG 0x3C0 +#define P54U_DEV_BASE 0x40000000 + +struct net2280_tx_hdr { + __le32 device_addr; + __le16 len; + __le16 follower; /* ? */ + u8 padding[8]; +} __attribute__((packed)); + +/* Some flags for the isl hardware registers controlling DMA inside the + * chip */ +#define ISL38XX_DMA_STATUS_DONE 0x00000001 +#define ISL38XX_DMA_STATUS_READY 0x00000002 +#define NET2280_EPA_FIFO_PCI_ADDR 0x20000000 +#define ISL38XX_DMA_MASTER_CONTROL_TRIGGER 0x00000004 + +enum net2280_op_type { + NET2280_BRG_U32 = 0x001F, + NET2280_BRG_CFG_U32 = 0x000F, + NET2280_BRG_CFG_U16 = 0x0003, + NET2280_DEV_U32 = 0x080F, + NET2280_DEV_CFG_U32 = 0x088F, + NET2280_DEV_CFG_U16 = 0x0883 +}; + +#define P54U_FW_BLOCK 2048 + +#define X2_SIGNATURE "x2 " +#define X2_SIGNATURE_SIZE 4 + +struct x2_header { + u8 signature[X2_SIGNATURE_SIZE]; + __le32 fw_load_addr; + __le32 fw_length; + __le32 crc; +} __attribute__((packed)); + +/* pipes 3 and 4 are not used by the driver */ +#define P54U_PIPE_NUMBER 9 + +enum p54u_pipe_addr { + P54U_PIPE_DATA = 0x01, + P54U_PIPE_MGMT = 0x02, + P54U_PIPE_3 = 0x03, + P54U_PIPE_4 = 0x04, + P54U_PIPE_BRG = 0x0d, + P54U_PIPE_DEV = 0x0e, + P54U_PIPE_INT = 0x0f +}; + +struct p54u_rx_info { + struct urb *urb; + struct ieee80211_hw *dev; +}; + +struct p54u_priv { + struct p54_common common; + struct usb_device *udev; + enum { + P54U_NET2280 = 0, + P54U_3887 + } hw_type; + + spinlock_t lock; + struct sk_buff_head rx_queue; +}; + +#endif /* PRISM54USB_H */ diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c index 585f5996d292..6d80ca421cf0 100644 --- a/drivers/net/wireless/prism54/isl_ioctl.c +++ b/drivers/net/wireless/prism54/isl_ioctl.c @@ -1753,7 +1753,7 @@ prism54_get_oid(struct net_device *ndev, struct iw_request_info *info, int rvalue; enum oid_num_t n = dwrq->flags; - rvalue = mgt_get_request((islpci_private *) ndev->priv, n, 0, NULL, &r); + rvalue = mgt_get_request(netdev_priv(ndev), n, 0, NULL, &r); dwrq->length = mgt_response_to_str(n, &r, extra); if ((isl_oid[n].flags & OID_FLAG_TYPE) != OID_TYPE_U32) kfree(r.ptr); @@ -1766,7 +1766,7 @@ prism54_set_u32(struct net_device *ndev, struct iw_request_info *info, { u32 oid = uwrq[0], u = uwrq[1]; - return mgt_set_request((islpci_private *) ndev->priv, oid, 0, &u); + return mgt_set_request(netdev_priv(ndev), oid, 0, &u); } static int @@ -1775,7 +1775,7 @@ prism54_set_raw(struct net_device *ndev, struct iw_request_info *info, { u32 oid = dwrq->flags; - return mgt_set_request((islpci_private *) ndev->priv, oid, 0, extra); + return mgt_set_request(netdev_priv(ndev), oid, 0, extra); } void @@ -2029,12 +2029,12 @@ static void format_event(islpci_private *priv, char *dest, const char *str, const struct obj_mlme *mlme, u16 *length, int error) { - const u8 *a = mlme->address; + DECLARE_MAC_BUF(mac); int n = snprintf(dest, IW_CUSTOM_MAX, - "%s %s %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X %s (%2.2X)", + "%s %s %s %s (%2.2X)", str, ((priv->iw_mode == IW_MODE_MASTER) ? "from" : "to"), - a[0], a[1], a[2], a[3], a[4], a[5], + print_mac(mac, mlme->address), (error ? (mlme->code ? " : REJECTED " : " : ACCEPTED ") : ""), mlme->code); BUG_ON(n > IW_CUSTOM_MAX); @@ -2105,15 +2105,13 @@ struct ieee80211_beacon_phdr { #define WLAN_EID_GENERIC 0xdd static u8 wpa_oid[4] = { 0x00, 0x50, 0xf2, 1 }; -#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5] -#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x" - static void prism54_wpa_bss_ie_add(islpci_private *priv, u8 *bssid, u8 *wpa_ie, size_t wpa_ie_len) { struct list_head *ptr; struct islpci_bss_wpa_ie *bss = NULL; + DECLARE_MAC_BUF(mac); if (wpa_ie_len > MAX_WPA_IE_LEN) wpa_ie_len = MAX_WPA_IE_LEN; @@ -2154,8 +2152,8 @@ prism54_wpa_bss_ie_add(islpci_private *priv, u8 *bssid, bss->wpa_ie_len = wpa_ie_len; bss->last_update = jiffies; } else { - printk(KERN_DEBUG "Failed to add BSS WPA entry for " MACSTR - "\n", MAC2STR(bssid)); + printk(KERN_DEBUG "Failed to add BSS WPA entry for " + "%s\n", print_mac(mac, bssid)); } /* expire old entries from WPA list */ @@ -2221,6 +2219,7 @@ prism54_process_bss_data(islpci_private *priv, u32 oid, u8 *addr, { struct ieee80211_beacon_phdr *hdr; u8 *pos, *end; + DECLARE_MAC_BUF(mac); if (!priv->wpa) return; @@ -2231,7 +2230,7 @@ prism54_process_bss_data(islpci_private *priv, u32 oid, u8 *addr, while (pos < end) { if (pos + 2 + pos[1] > end) { printk(KERN_DEBUG "Parsing Beacon/ProbeResp failed " - "for " MACSTR "\n", MAC2STR(addr)); + "for %s\n", print_mac(mac, addr)); return; } if (pos[0] == WLAN_EID_GENERIC && pos[1] >= 4 && @@ -2270,6 +2269,7 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid, size_t len = 0; /* u16, better? */ u8 *payload = NULL, *pos = NULL; int ret; + DECLARE_MAC_BUF(mac); /* I think all trapable objects are listed here. * Some oids have a EX version. The difference is that they are emitted @@ -2358,14 +2358,8 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid, break; memcpy(&confirm->address, mlmeex->address, ETH_ALEN); - printk(KERN_DEBUG "Authenticate from: address:\t%02x:%02x:%02x:%02x:%02x:%02x\n", - mlmeex->address[0], - mlmeex->address[1], - mlmeex->address[2], - mlmeex->address[3], - mlmeex->address[4], - mlmeex->address[5] - ); + printk(KERN_DEBUG "Authenticate from: address:\t%s\n", + print_mac(mac, mlmeex->address)); confirm->id = -1; /* or mlmeex->id ? */ confirm->state = 0; /* not used */ confirm->code = 0; @@ -2410,15 +2404,8 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid, wpa_ie_len = prism54_wpa_bss_ie_get(priv, mlmeex->address, wpa_ie); if (!wpa_ie_len) { - printk(KERN_DEBUG "No WPA IE found from " - "address:\t%02x:%02x:%02x:%02x:%02x:%02x\n", - mlmeex->address[0], - mlmeex->address[1], - mlmeex->address[2], - mlmeex->address[3], - mlmeex->address[4], - mlmeex->address[5] - ); + printk(KERN_DEBUG "No WPA IE found from address:\t%s\n", + print_mac(mac, mlmeex->address)); kfree(confirm); break; } @@ -2454,15 +2441,8 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid, wpa_ie_len = prism54_wpa_bss_ie_get(priv, mlmeex->address, wpa_ie); if (!wpa_ie_len) { - printk(KERN_DEBUG "No WPA IE found from " - "address:\t%02x:%02x:%02x:%02x:%02x:%02x\n", - mlmeex->address[0], - mlmeex->address[1], - mlmeex->address[2], - mlmeex->address[3], - mlmeex->address[4], - mlmeex->address[5] - ); + printk(KERN_DEBUG "No WPA IE found from address:\t%s\n", + print_mac(mac, mlmeex->address)); kfree(confirm); break; } @@ -3239,10 +3219,9 @@ static const iw_handler prism54_private_handler[] = { }; const struct iw_handler_def prism54_handler_def = { - .num_standard = sizeof (prism54_handler) / sizeof (iw_handler), - .num_private = sizeof (prism54_private_handler) / sizeof (iw_handler), - .num_private_args = - sizeof (prism54_private_args) / sizeof (struct iw_priv_args), + .num_standard = ARRAY_SIZE(prism54_handler), + .num_private = ARRAY_SIZE(prism54_private_handler), + .num_private_args = ARRAY_SIZE(prism54_private_args), .standard = (iw_handler *) prism54_handler, .private = (iw_handler *) prism54_private_handler, .private_args = (struct iw_priv_args *) prism54_private_args, diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c index 084795355b74..219dd651dc41 100644 --- a/drivers/net/wireless/prism54/islpci_dev.c +++ b/drivers/net/wireless/prism54/islpci_dev.c @@ -808,7 +808,6 @@ islpci_setup(struct pci_dev *pdev) if (!ndev) return ndev; - SET_MODULE_OWNER(ndev); pci_set_drvdata(pdev, ndev); #if defined(SET_NETDEV_DEV) SET_NETDEV_DEV(ndev, &pdev->dev); diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c index 42780320cd5c..57a4ac34bed6 100644 --- a/drivers/net/wireless/prism54/oid_mgt.c +++ b/drivers/net/wireless/prism54/oid_mgt.c @@ -244,13 +244,11 @@ mgt_init(islpci_private *priv) /* Alloc the cache */ for (i = 0; i < OID_NUM_LAST; i++) { if (isl_oid[i].flags & OID_FLAG_CACHED) { - priv->mib[i] = kmalloc(isl_oid[i].size * + priv->mib[i] = kzalloc(isl_oid[i].size * (isl_oid[i].range + 1), GFP_KERNEL); if (!priv->mib[i]) return -ENOMEM; - memset(priv->mib[i], 0, - isl_oid[i].size * (isl_oid[i].range + 1)); } else priv->mib[i] = NULL; } diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 3be624295a1f..f87fe10059ae 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -314,7 +314,7 @@ static int ray_probe(struct pcmcia_device *p_dev) if (!dev) goto fail_alloc_dev; - local = dev->priv; + local = netdev_priv(dev); local->finder = p_dev; /* The io structure describes IO port mapping. None used here */ @@ -356,7 +356,6 @@ static int ray_probe(struct pcmcia_device *p_dev) dev->set_multicast_list = &set_multicast_list; DEBUG(2,"ray_cs ray_attach calling ether_setup.)\n"); - SET_MODULE_OWNER(dev); dev->init = &ray_dev_init; dev->open = &ray_open; dev->stop = &ray_dev_close; @@ -388,7 +387,7 @@ static void ray_detach(struct pcmcia_device *link) ray_release(link); - local = (ray_dev_t *)dev->priv; + local = netdev_priv(dev); del_timer(&local->timer); if (link->priv) { @@ -412,7 +411,8 @@ static int ray_config(struct pcmcia_device *link) win_req_t req; memreq_t mem; struct net_device *dev = (struct net_device *)link->priv; - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); + DECLARE_MAC_BUF(mac); DEBUG(1, "ray_config(0x%p)\n", link); @@ -483,10 +483,8 @@ static int ray_config(struct pcmcia_device *link) strcpy(local->node.dev_name, dev->name); link->dev_node = &local->node; - printk(KERN_INFO "%s: RayLink, irq %d, hw_addr ", - dev->name, dev->irq); - for (i = 0; i < 6; i++) - printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n")); + printk(KERN_INFO "%s: RayLink, irq %d, hw_addr %s\n", + dev->name, dev->irq, print_mac(mac, dev->dev_addr)); return 0; @@ -520,7 +518,7 @@ static int ray_init(struct net_device *dev) int i; UCHAR *p; struct ccs __iomem *pccs; - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); struct pcmcia_device *link = local->finder; DEBUG(1, "ray_init(0x%p)\n", dev); if (!(pcmcia_dev_present(link))) { @@ -581,7 +579,7 @@ static int ray_init(struct net_device *dev) static int dl_startup_params(struct net_device *dev) { int ccsindex; - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); struct ccs __iomem *pccs; struct pcmcia_device *link = local->finder; @@ -786,7 +784,7 @@ static void join_net(u_long data) static void ray_release(struct pcmcia_device *link) { struct net_device *dev = link->priv; - ray_dev_t *local = dev->priv; + ray_dev_t *local = netdev_priv(dev); int i; DEBUG(1, "ray_release(0x%p)\n", link); @@ -834,7 +832,7 @@ int ray_dev_init(struct net_device *dev) #ifdef RAY_IMMEDIATE_INIT int i; #endif /* RAY_IMMEDIATE_INIT */ - ray_dev_t *local = dev->priv; + ray_dev_t *local = netdev_priv(dev); struct pcmcia_device *link = local->finder; DEBUG(1,"ray_dev_init(dev=%p)\n",dev); @@ -868,7 +866,7 @@ int ray_dev_init(struct net_device *dev) /*===========================================================================*/ static int ray_dev_config(struct net_device *dev, struct ifmap *map) { - ray_dev_t *local = dev->priv; + ray_dev_t *local = netdev_priv(dev); struct pcmcia_device *link = local->finder; /* Dummy routine to satisfy device structure */ DEBUG(1,"ray_dev_config(dev=%p,ifmap=%p)\n",dev,map); @@ -882,7 +880,7 @@ static int ray_dev_config(struct net_device *dev, struct ifmap *map) /*===========================================================================*/ static int ray_dev_start_xmit(struct sk_buff *skb, struct net_device *dev) { - ray_dev_t *local = dev->priv; + ray_dev_t *local = netdev_priv(dev); struct pcmcia_device *link = local->finder; short length = skb->len; @@ -925,7 +923,7 @@ static int ray_dev_start_xmit(struct sk_buff *skb, struct net_device *dev) static int ray_hw_xmit(unsigned char* data, int len, struct net_device* dev, UCHAR msg_type) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); struct ccs __iomem *pccs; int ccsindex; int offset; @@ -1099,7 +1097,7 @@ static int ray_set_freq(struct net_device *dev, struct iw_freq *fwrq, char *extra) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); int err = -EINPROGRESS; /* Call commit handler */ /* Reject if card is already initialised */ @@ -1124,7 +1122,7 @@ static int ray_get_freq(struct net_device *dev, struct iw_freq *fwrq, char *extra) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); fwrq->m = local->sparm.b5.a_hop_pattern; fwrq->e = 0; @@ -1140,7 +1138,7 @@ static int ray_set_essid(struct net_device *dev, struct iw_point *dwrq, char *extra) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); /* Reject if card is already initialised */ if(local->card_status != CARD_AWAITING_PARAM) @@ -1173,7 +1171,7 @@ static int ray_get_essid(struct net_device *dev, struct iw_point *dwrq, char *extra) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); /* Get the essid that was set */ memcpy(extra, local->sparm.b5.a_current_ess_id, IW_ESSID_MAX_SIZE); @@ -1194,7 +1192,7 @@ static int ray_get_wap(struct net_device *dev, struct sockaddr *awrq, char *extra) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); memcpy(awrq->sa_data, local->bss_id, ETH_ALEN); awrq->sa_family = ARPHRD_ETHER; @@ -1211,7 +1209,7 @@ static int ray_set_rate(struct net_device *dev, struct iw_param *vwrq, char *extra) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); /* Reject if card is already initialised */ if(local->card_status != CARD_AWAITING_PARAM) @@ -1240,7 +1238,7 @@ static int ray_get_rate(struct net_device *dev, struct iw_param *vwrq, char *extra) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); if(local->net_default_tx_rate == 3) vwrq->value = 2000000; /* Hum... */ @@ -1260,7 +1258,7 @@ static int ray_set_rts(struct net_device *dev, struct iw_param *vwrq, char *extra) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); int rthr = vwrq->value; /* Reject if card is already initialised */ @@ -1290,7 +1288,7 @@ static int ray_get_rts(struct net_device *dev, struct iw_param *vwrq, char *extra) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); vwrq->value = (local->sparm.b5.a_rts_threshold[0] << 8) + local->sparm.b5.a_rts_threshold[1]; @@ -1309,7 +1307,7 @@ static int ray_set_frag(struct net_device *dev, struct iw_param *vwrq, char *extra) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); int fthr = vwrq->value; /* Reject if card is already initialised */ @@ -1338,7 +1336,7 @@ static int ray_get_frag(struct net_device *dev, struct iw_param *vwrq, char *extra) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); vwrq->value = (local->sparm.b5.a_frag_threshold[0] << 8) + local->sparm.b5.a_frag_threshold[1]; @@ -1357,7 +1355,7 @@ static int ray_set_mode(struct net_device *dev, __u32 *uwrq, char *extra) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); int err = -EINPROGRESS; /* Call commit handler */ char card_mode = 1; @@ -1389,7 +1387,7 @@ static int ray_get_mode(struct net_device *dev, __u32 *uwrq, char *extra) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); if(local->sparm.b5.a_network_type) *uwrq = IW_MODE_INFRA; @@ -1492,7 +1490,7 @@ static int ray_commit(struct net_device *dev, */ static iw_stats * ray_get_wireless_stats(struct net_device * dev) { - ray_dev_t * local = (ray_dev_t *) dev->priv; + ray_dev_t * local = netdev_priv(dev); struct pcmcia_device *link = local->finder; struct status __iomem *p = local->sram + STATUS_BASE; @@ -1568,9 +1566,9 @@ static const struct iw_priv_args ray_private_args[] = { static const struct iw_handler_def ray_handler_def = { - .num_standard = sizeof(ray_handler)/sizeof(iw_handler), - .num_private = sizeof(ray_private_handler)/sizeof(iw_handler), - .num_private_args = sizeof(ray_private_args)/sizeof(struct iw_priv_args), + .num_standard = ARRAY_SIZE(ray_handler), + .num_private = ARRAY_SIZE(ray_private_handler), + .num_private_args = ARRAY_SIZE(ray_private_args), .standard = ray_handler, .private = ray_private_handler, .private_args = ray_private_args, @@ -1580,7 +1578,7 @@ static const struct iw_handler_def ray_handler_def = /*===========================================================================*/ static int ray_open(struct net_device *dev) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); struct pcmcia_device *link; link = local->finder; @@ -1614,7 +1612,7 @@ static int ray_open(struct net_device *dev) /*===========================================================================*/ static int ray_dev_close(struct net_device *dev) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); struct pcmcia_device *link; link = local->finder; @@ -1773,7 +1771,7 @@ static int parse_addr(char *in_str, UCHAR *out) /*===========================================================================*/ static struct net_device_stats *ray_get_stats(struct net_device *dev) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); struct pcmcia_device *link = local->finder; struct status __iomem *p = local->sram + STATUS_BASE; if (!(pcmcia_dev_present(link))) { @@ -1803,7 +1801,7 @@ static struct net_device_stats *ray_get_stats(struct net_device *dev) /*===========================================================================*/ static void ray_update_parm(struct net_device *dev, UCHAR objid, UCHAR *value, int len) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); struct pcmcia_device *link = local->finder; int ccsindex; int i; @@ -1840,7 +1838,7 @@ static void ray_update_multi_list(struct net_device *dev, int all) int ccsindex; struct ccs __iomem *pccs; int i = 0; - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); struct pcmcia_device *link = local->finder; void __iomem *p = local->sram + HOST_TO_ECF_BASE; @@ -1884,7 +1882,7 @@ static void ray_update_multi_list(struct net_device *dev, int all) /*===========================================================================*/ static void set_multicast_list(struct net_device *dev) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); UCHAR promisc; DEBUG(2,"ray_cs set_multicast_list(%p)\n",dev); @@ -1935,7 +1933,7 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id) DEBUG(4,"ray_cs: interrupt for *dev=%p\n",dev); - local = (ray_dev_t *)dev->priv; + local = netdev_priv(dev); link = (struct pcmcia_device *)local->finder; if (!pcmcia_dev_present(link)) { DEBUG(2,"ray_cs interrupt from device not present or suspended.\n"); @@ -2165,7 +2163,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs, unsigned i { struct sk_buff *skb = NULL; struct rcs __iomem *prcslink = prcs; - ray_dev_t *local = dev->priv; + ray_dev_t *local = netdev_priv(dev); UCHAR *rx_ptr; int total_len; int tmp; @@ -2611,6 +2609,7 @@ static int ray_cs_proc_read(char *buf, char **start, off_t offset, int len) UCHAR *p; struct freq_hop_element *pfh; UCHAR c[33]; + DECLARE_MAC_BUF(mac); link = this_device; if (!link) @@ -2618,7 +2617,7 @@ static int ray_cs_proc_read(char *buf, char **start, off_t offset, int len) dev = (struct net_device *)link->priv; if (!dev) return 0; - local = (ray_dev_t *)dev->priv; + local = netdev_priv(dev); if (!local) return 0; @@ -2640,9 +2639,8 @@ static int ray_cs_proc_read(char *buf, char **start, off_t offset, int len) nettype[local->sparm.b5.a_network_type], c); p = local->bss_id; - len += sprintf(buf + len, - "BSSID = %02x:%02x:%02x:%02x:%02x:%02x\n", - p[0],p[1],p[2],p[3],p[4],p[5]); + len += sprintf(buf + len, "BSSID = %s\n", + print_mac(mac, p)); len += sprintf(buf + len, "Country code = %d\n", local->sparm.b5.a_curr_country_code); diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig new file mode 100644 index 000000000000..da05b1faf60d --- /dev/null +++ b/drivers/net/wireless/rt2x00/Kconfig @@ -0,0 +1,130 @@ +config RT2X00 + tristate "Ralink driver support" + depends on MAC80211 && WLAN_80211 && EXPERIMENTAL + ---help--- + This will enable the experimental support for the Ralink drivers, + developed in the rt2x00 project <http://rt2x00.serialmonkey.com>. + + These drivers will make use of the Devicescape ieee80211 stack. + + When building one of the individual drivers, the rt2x00 library + will also be created. That library (when the driver is built as + a module) will be called "rt2x00lib.ko". + +config RT2X00_LIB + tristate + depends on RT2X00 + +config RT2X00_LIB_PCI + tristate + depends on RT2X00 + select RT2X00_LIB + +config RT2X00_LIB_USB + tristate + depends on RT2X00 + select RT2X00_LIB + +config RT2X00_LIB_FIRMWARE + boolean + depends on RT2X00_LIB + select CRC_ITU_T + select FW_LOADER + +config RT2X00_LIB_RFKILL + boolean + depends on RT2X00_LIB + select RFKILL + select INPUT_POLLDEV + +config RT2400PCI + tristate "Ralink rt2400 pci/pcmcia support" + depends on RT2X00 && PCI + select RT2X00_LIB_PCI + select EEPROM_93CX6 + ---help--- + This is an experimental driver for the Ralink rt2400 wireless chip. + + When compiled as a module, this driver will be called "rt2400pci.ko". + +config RT2400PCI_RFKILL + bool "RT2400 rfkill support" + depends on RT2400PCI + select RT2X00_LIB_RFKILL + ---help--- + This adds support for integrated rt2400 devices that feature a + hardware button to control the radio state. + This feature depends on the RF switch subsystem rfkill. + +config RT2500PCI + tristate "Ralink rt2500 pci/pcmcia support" + depends on RT2X00 && PCI + select RT2X00_LIB_PCI + select EEPROM_93CX6 + ---help--- + This is an experimental driver for the Ralink rt2500 wireless chip. + + When compiled as a module, this driver will be called "rt2500pci.ko". + +config RT2500PCI_RFKILL + bool "RT2500 rfkill support" + depends on RT2500PCI + select RT2X00_LIB_RFKILL + ---help--- + This adds support for integrated rt2500 devices that feature a + hardware button to control the radio state. + This feature depends on the RF switch subsystem rfkill. + +config RT61PCI + tristate "Ralink rt61 pci/pcmcia support" + depends on RT2X00 && PCI + select RT2X00_LIB_PCI + select RT2X00_LIB_FIRMWARE + select EEPROM_93CX6 + ---help--- + This is an experimental driver for the Ralink rt61 wireless chip. + + When compiled as a module, this driver will be called "rt61pci.ko". + +config RT61PCI_RFKILL + bool "RT61 rfkill support" + depends on RT61PCI + select RT2X00_LIB_RFKILL + ---help--- + This adds support for integrated rt61 devices that feature a + hardware button to control the radio state. + This feature depends on the RF switch subsystem rfkill. + +config RT2500USB + tristate "Ralink rt2500 usb support" + depends on RT2X00 && USB + select RT2X00_LIB_USB + ---help--- + This is an experimental driver for the Ralink rt2500 wireless chip. + + When compiled as a module, this driver will be called "rt2500usb.ko". + +config RT73USB + tristate "Ralink rt73 usb support" + depends on RT2X00 && USB + select RT2X00_LIB_USB + select RT2X00_LIB_FIRMWARE + ---help--- + This is an experimental driver for the Ralink rt73 wireless chip. + + When compiled as a module, this driver will be called "rt73usb.ko". + +config RT2X00_LIB_DEBUGFS + bool "Ralink debugfs support" + depends on RT2X00_LIB && MAC80211_DEBUGFS + ---help--- + Enable creation of debugfs files for the rt2x00 drivers. + These debugfs files support both reading and writing of the + most important register types of the rt2x00 devices. + +config RT2X00_DEBUG + bool "Ralink debug output" + depends on RT2X00_LIB + ---help--- + Enable debugging output for all rt2x00 modules + diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile new file mode 100644 index 000000000000..30d654a42eea --- /dev/null +++ b/drivers/net/wireless/rt2x00/Makefile @@ -0,0 +1,22 @@ +rt2x00lib-objs := rt2x00dev.o rt2x00mac.o rt2x00config.o + +ifeq ($(CONFIG_RT2X00_LIB_DEBUGFS),y) + rt2x00lib-objs += rt2x00debug.o +endif + +ifeq ($(CONFIG_RT2X00_LIB_RFKILL),y) + rt2x00lib-objs += rt2x00rfkill.o +endif + +ifeq ($(CONFIG_RT2X00_LIB_FIRMWARE),y) + rt2x00lib-objs += rt2x00firmware.o +endif + +obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o +obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o +obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o +obj-$(CONFIG_RT2400PCI) += rt2400pci.o +obj-$(CONFIG_RT2500PCI) += rt2500pci.o +obj-$(CONFIG_RT61PCI) += rt61pci.o +obj-$(CONFIG_RT2500USB) += rt2500usb.o +obj-$(CONFIG_RT73USB) += rt73usb.o diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c new file mode 100644 index 000000000000..31c1dd271627 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2400pci.c @@ -0,0 +1,1664 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2400pci + Abstract: rt2400pci device specific routines. + Supported chipsets: RT2460. + */ + +/* + * Set enviroment defines for rt2x00.h + */ +#define DRV_NAME "rt2400pci" + +#include <linux/delay.h> +#include <linux/etherdevice.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/eeprom_93cx6.h> + +#include "rt2x00.h" +#include "rt2x00pci.h" +#include "rt2400pci.h" + +/* + * Register access. + * All access to the CSR registers will go through the methods + * rt2x00pci_register_read and rt2x00pci_register_write. + * BBP and RF register require indirect register access, + * and use the CSR registers BBPCSR and RFCSR to achieve this. + * These indirect registers work with busy bits, + * and we will try maximal REGISTER_BUSY_COUNT times to access + * the register while taking a REGISTER_BUSY_DELAY us delay + * between each attampt. When the busy bit is still set at that time, + * the access attempt is considered to have failed, + * and we will print an error. + */ +static u32 rt2400pci_bbp_check(const struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + unsigned int i; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2x00pci_register_read(rt2x00dev, BBPCSR, ®); + if (!rt2x00_get_field32(reg, BBPCSR_BUSY)) + break; + udelay(REGISTER_BUSY_DELAY); + } + + return reg; +} + +static void rt2400pci_bbp_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u8 value) +{ + u32 reg; + + /* + * Wait until the BBP becomes ready. + */ + reg = rt2400pci_bbp_check(rt2x00dev); + if (rt2x00_get_field32(reg, BBPCSR_BUSY)) { + ERROR(rt2x00dev, "BBPCSR register busy. Write failed.\n"); + return; + } + + /* + * Write the data into the BBP. + */ + reg = 0; + rt2x00_set_field32(®, BBPCSR_VALUE, value); + rt2x00_set_field32(®, BBPCSR_REGNUM, word); + rt2x00_set_field32(®, BBPCSR_BUSY, 1); + rt2x00_set_field32(®, BBPCSR_WRITE_CONTROL, 1); + + rt2x00pci_register_write(rt2x00dev, BBPCSR, reg); +} + +static void rt2400pci_bbp_read(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u8 *value) +{ + u32 reg; + + /* + * Wait until the BBP becomes ready. + */ + reg = rt2400pci_bbp_check(rt2x00dev); + if (rt2x00_get_field32(reg, BBPCSR_BUSY)) { + ERROR(rt2x00dev, "BBPCSR register busy. Read failed.\n"); + return; + } + + /* + * Write the request into the BBP. + */ + reg = 0; + rt2x00_set_field32(®, BBPCSR_REGNUM, word); + rt2x00_set_field32(®, BBPCSR_BUSY, 1); + rt2x00_set_field32(®, BBPCSR_WRITE_CONTROL, 0); + + rt2x00pci_register_write(rt2x00dev, BBPCSR, reg); + + /* + * Wait until the BBP becomes ready. + */ + reg = rt2400pci_bbp_check(rt2x00dev); + if (rt2x00_get_field32(reg, BBPCSR_BUSY)) { + ERROR(rt2x00dev, "BBPCSR register busy. Read failed.\n"); + *value = 0xff; + return; + } + + *value = rt2x00_get_field32(reg, BBPCSR_VALUE); +} + +static void rt2400pci_rf_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u32 value) +{ + u32 reg; + unsigned int i; + + if (!word) + return; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2x00pci_register_read(rt2x00dev, RFCSR, ®); + if (!rt2x00_get_field32(reg, RFCSR_BUSY)) + goto rf_write; + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "RFCSR register busy. Write failed.\n"); + return; + +rf_write: + reg = 0; + rt2x00_set_field32(®, RFCSR_VALUE, value); + rt2x00_set_field32(®, RFCSR_NUMBER_OF_BITS, 20); + rt2x00_set_field32(®, RFCSR_IF_SELECT, 0); + rt2x00_set_field32(®, RFCSR_BUSY, 1); + + rt2x00pci_register_write(rt2x00dev, RFCSR, reg); + rt2x00_rf_write(rt2x00dev, word, value); +} + +static void rt2400pci_eepromregister_read(struct eeprom_93cx6 *eeprom) +{ + struct rt2x00_dev *rt2x00dev = eeprom->data; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR21, ®); + + eeprom->reg_data_in = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_IN); + eeprom->reg_data_out = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_OUT); + eeprom->reg_data_clock = + !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_CLOCK); + eeprom->reg_chip_select = + !!rt2x00_get_field32(reg, CSR21_EEPROM_CHIP_SELECT); +} + +static void rt2400pci_eepromregister_write(struct eeprom_93cx6 *eeprom) +{ + struct rt2x00_dev *rt2x00dev = eeprom->data; + u32 reg = 0; + + rt2x00_set_field32(®, CSR21_EEPROM_DATA_IN, !!eeprom->reg_data_in); + rt2x00_set_field32(®, CSR21_EEPROM_DATA_OUT, !!eeprom->reg_data_out); + rt2x00_set_field32(®, CSR21_EEPROM_DATA_CLOCK, + !!eeprom->reg_data_clock); + rt2x00_set_field32(®, CSR21_EEPROM_CHIP_SELECT, + !!eeprom->reg_chip_select); + + rt2x00pci_register_write(rt2x00dev, CSR21, reg); +} + +#ifdef CONFIG_RT2X00_LIB_DEBUGFS +#define CSR_OFFSET(__word) ( CSR_REG_BASE + ((__word) * sizeof(u32)) ) + +static void rt2400pci_read_csr(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u32 *data) +{ + rt2x00pci_register_read(rt2x00dev, CSR_OFFSET(word), data); +} + +static void rt2400pci_write_csr(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u32 data) +{ + rt2x00pci_register_write(rt2x00dev, CSR_OFFSET(word), data); +} + +static const struct rt2x00debug rt2400pci_rt2x00debug = { + .owner = THIS_MODULE, + .csr = { + .read = rt2400pci_read_csr, + .write = rt2400pci_write_csr, + .word_size = sizeof(u32), + .word_count = CSR_REG_SIZE / sizeof(u32), + }, + .eeprom = { + .read = rt2x00_eeprom_read, + .write = rt2x00_eeprom_write, + .word_size = sizeof(u16), + .word_count = EEPROM_SIZE / sizeof(u16), + }, + .bbp = { + .read = rt2400pci_bbp_read, + .write = rt2400pci_bbp_write, + .word_size = sizeof(u8), + .word_count = BBP_SIZE / sizeof(u8), + }, + .rf = { + .read = rt2x00_rf_read, + .write = rt2400pci_rf_write, + .word_size = sizeof(u32), + .word_count = RF_SIZE / sizeof(u32), + }, +}; +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ + +#ifdef CONFIG_RT2400PCI_RFKILL +static int rt2400pci_rfkill_poll(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, GPIOCSR, ®); + return rt2x00_get_field32(reg, GPIOCSR_BIT0); +} +#else +#define rt2400pci_rfkill_poll NULL +#endif /* CONFIG_RT2400PCI_RFKILL */ + +/* + * Configuration handlers. + */ +static void rt2400pci_config_mac_addr(struct rt2x00_dev *rt2x00dev, + __le32 *mac) +{ + rt2x00pci_register_multiwrite(rt2x00dev, CSR3, mac, + (2 * sizeof(__le32))); +} + +static void rt2400pci_config_bssid(struct rt2x00_dev *rt2x00dev, + __le32 *bssid) +{ + rt2x00pci_register_multiwrite(rt2x00dev, CSR5, bssid, + (2 * sizeof(__le32))); +} + +static void rt2400pci_config_type(struct rt2x00_dev *rt2x00dev, const int type, + const int tsf_sync) +{ + u32 reg; + + rt2x00pci_register_write(rt2x00dev, CSR14, 0); + + /* + * Enable beacon config + */ + rt2x00pci_register_read(rt2x00dev, BCNCSR1, ®); + rt2x00_set_field32(®, BCNCSR1_PRELOAD, + PREAMBLE + get_duration(IEEE80211_HEADER, 20)); + rt2x00pci_register_write(rt2x00dev, BCNCSR1, reg); + + /* + * Enable synchronisation. + */ + rt2x00pci_register_read(rt2x00dev, CSR14, ®); + rt2x00_set_field32(®, CSR14_TSF_COUNT, 1); + rt2x00_set_field32(®, CSR14_TBCN, 1); + rt2x00_set_field32(®, CSR14_BEACON_GEN, 0); + rt2x00_set_field32(®, CSR14_TSF_SYNC, tsf_sync); + rt2x00pci_register_write(rt2x00dev, CSR14, reg); +} + +static void rt2400pci_config_preamble(struct rt2x00_dev *rt2x00dev, + const int short_preamble, + const int ack_timeout, + const int ack_consume_time) +{ + int preamble_mask; + u32 reg; + + /* + * When short preamble is enabled, we should set bit 0x08 + */ + preamble_mask = short_preamble << 3; + + rt2x00pci_register_read(rt2x00dev, TXCSR1, ®); + rt2x00_set_field32(®, TXCSR1_ACK_TIMEOUT, ack_timeout); + rt2x00_set_field32(®, TXCSR1_ACK_CONSUME_TIME, ack_consume_time); + rt2x00pci_register_write(rt2x00dev, TXCSR1, reg); + + rt2x00pci_register_read(rt2x00dev, ARCSR2, ®); + rt2x00_set_field32(®, ARCSR2_SIGNAL, 0x00 | preamble_mask); + rt2x00_set_field32(®, ARCSR2_SERVICE, 0x04); + rt2x00_set_field32(®, ARCSR2_LENGTH, get_duration(ACK_SIZE, 10)); + rt2x00pci_register_write(rt2x00dev, ARCSR2, reg); + + rt2x00pci_register_read(rt2x00dev, ARCSR3, ®); + rt2x00_set_field32(®, ARCSR3_SIGNAL, 0x01 | preamble_mask); + rt2x00_set_field32(®, ARCSR3_SERVICE, 0x04); + rt2x00_set_field32(®, ARCSR2_LENGTH, get_duration(ACK_SIZE, 20)); + rt2x00pci_register_write(rt2x00dev, ARCSR3, reg); + + rt2x00pci_register_read(rt2x00dev, ARCSR4, ®); + rt2x00_set_field32(®, ARCSR4_SIGNAL, 0x02 | preamble_mask); + rt2x00_set_field32(®, ARCSR4_SERVICE, 0x04); + rt2x00_set_field32(®, ARCSR2_LENGTH, get_duration(ACK_SIZE, 55)); + rt2x00pci_register_write(rt2x00dev, ARCSR4, reg); + + rt2x00pci_register_read(rt2x00dev, ARCSR5, ®); + rt2x00_set_field32(®, ARCSR5_SIGNAL, 0x03 | preamble_mask); + rt2x00_set_field32(®, ARCSR5_SERVICE, 0x84); + rt2x00_set_field32(®, ARCSR2_LENGTH, get_duration(ACK_SIZE, 110)); + rt2x00pci_register_write(rt2x00dev, ARCSR5, reg); +} + +static void rt2400pci_config_phymode(struct rt2x00_dev *rt2x00dev, + const int basic_rate_mask) +{ + rt2x00pci_register_write(rt2x00dev, ARCSR1, basic_rate_mask); +} + +static void rt2400pci_config_channel(struct rt2x00_dev *rt2x00dev, + struct rf_channel *rf) +{ + /* + * Switch on tuning bits. + */ + rt2x00_set_field32(&rf->rf1, RF1_TUNER, 1); + rt2x00_set_field32(&rf->rf3, RF3_TUNER, 1); + + rt2400pci_rf_write(rt2x00dev, 1, rf->rf1); + rt2400pci_rf_write(rt2x00dev, 2, rf->rf2); + rt2400pci_rf_write(rt2x00dev, 3, rf->rf3); + + /* + * RF2420 chipset don't need any additional actions. + */ + if (rt2x00_rf(&rt2x00dev->chip, RF2420)) + return; + + /* + * For the RT2421 chipsets we need to write an invalid + * reference clock rate to activate auto_tune. + * After that we set the value back to the correct channel. + */ + rt2400pci_rf_write(rt2x00dev, 1, rf->rf1); + rt2400pci_rf_write(rt2x00dev, 2, 0x000c2a32); + rt2400pci_rf_write(rt2x00dev, 3, rf->rf3); + + msleep(1); + + rt2400pci_rf_write(rt2x00dev, 1, rf->rf1); + rt2400pci_rf_write(rt2x00dev, 2, rf->rf2); + rt2400pci_rf_write(rt2x00dev, 3, rf->rf3); + + msleep(1); + + /* + * Switch off tuning bits. + */ + rt2x00_set_field32(&rf->rf1, RF1_TUNER, 0); + rt2x00_set_field32(&rf->rf3, RF3_TUNER, 0); + + rt2400pci_rf_write(rt2x00dev, 1, rf->rf1); + rt2400pci_rf_write(rt2x00dev, 3, rf->rf3); + + /* + * Clear false CRC during channel switch. + */ + rt2x00pci_register_read(rt2x00dev, CNT0, &rf->rf1); +} + +static void rt2400pci_config_txpower(struct rt2x00_dev *rt2x00dev, int txpower) +{ + rt2400pci_bbp_write(rt2x00dev, 3, TXPOWER_TO_DEV(txpower)); +} + +static void rt2400pci_config_antenna(struct rt2x00_dev *rt2x00dev, + int antenna_tx, int antenna_rx) +{ + u8 r1; + u8 r4; + + rt2400pci_bbp_read(rt2x00dev, 4, &r4); + rt2400pci_bbp_read(rt2x00dev, 1, &r1); + + /* + * Configure the TX antenna. + */ + switch (antenna_tx) { + case ANTENNA_SW_DIVERSITY: + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r1, BBP_R1_TX_ANTENNA, 1); + break; + case ANTENNA_A: + rt2x00_set_field8(&r1, BBP_R1_TX_ANTENNA, 0); + break; + case ANTENNA_B: + rt2x00_set_field8(&r1, BBP_R1_TX_ANTENNA, 2); + break; + } + + /* + * Configure the RX antenna. + */ + switch (antenna_rx) { + case ANTENNA_SW_DIVERSITY: + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 1); + break; + case ANTENNA_A: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 0); + break; + case ANTENNA_B: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 2); + break; + } + + rt2400pci_bbp_write(rt2x00dev, 4, r4); + rt2400pci_bbp_write(rt2x00dev, 1, r1); +} + +static void rt2400pci_config_duration(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR11, ®); + rt2x00_set_field32(®, CSR11_SLOT_TIME, libconf->slot_time); + rt2x00pci_register_write(rt2x00dev, CSR11, reg); + + rt2x00pci_register_read(rt2x00dev, CSR18, ®); + rt2x00_set_field32(®, CSR18_SIFS, libconf->sifs); + rt2x00_set_field32(®, CSR18_PIFS, libconf->pifs); + rt2x00pci_register_write(rt2x00dev, CSR18, reg); + + rt2x00pci_register_read(rt2x00dev, CSR19, ®); + rt2x00_set_field32(®, CSR19_DIFS, libconf->difs); + rt2x00_set_field32(®, CSR19_EIFS, libconf->eifs); + rt2x00pci_register_write(rt2x00dev, CSR19, reg); + + rt2x00pci_register_read(rt2x00dev, TXCSR1, ®); + rt2x00_set_field32(®, TXCSR1_TSF_OFFSET, IEEE80211_HEADER); + rt2x00_set_field32(®, TXCSR1_AUTORESPONDER, 1); + rt2x00pci_register_write(rt2x00dev, TXCSR1, reg); + + rt2x00pci_register_read(rt2x00dev, CSR12, ®); + rt2x00_set_field32(®, CSR12_BEACON_INTERVAL, + libconf->conf->beacon_int * 16); + rt2x00_set_field32(®, CSR12_CFP_MAX_DURATION, + libconf->conf->beacon_int * 16); + rt2x00pci_register_write(rt2x00dev, CSR12, reg); +} + +static void rt2400pci_config(struct rt2x00_dev *rt2x00dev, + const unsigned int flags, + struct rt2x00lib_conf *libconf) +{ + if (flags & CONFIG_UPDATE_PHYMODE) + rt2400pci_config_phymode(rt2x00dev, libconf->basic_rates); + if (flags & CONFIG_UPDATE_CHANNEL) + rt2400pci_config_channel(rt2x00dev, &libconf->rf); + if (flags & CONFIG_UPDATE_TXPOWER) + rt2400pci_config_txpower(rt2x00dev, + libconf->conf->power_level); + if (flags & CONFIG_UPDATE_ANTENNA) + rt2400pci_config_antenna(rt2x00dev, + libconf->conf->antenna_sel_tx, + libconf->conf->antenna_sel_rx); + if (flags & (CONFIG_UPDATE_SLOT_TIME | CONFIG_UPDATE_BEACON_INT)) + rt2400pci_config_duration(rt2x00dev, libconf); +} + +static void rt2400pci_config_cw(struct rt2x00_dev *rt2x00dev, + struct ieee80211_tx_queue_params *params) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR11, ®); + rt2x00_set_field32(®, CSR11_CWMIN, params->cw_min); + rt2x00_set_field32(®, CSR11_CWMAX, params->cw_max); + rt2x00pci_register_write(rt2x00dev, CSR11, reg); +} + +/* + * LED functions. + */ +static void rt2400pci_enable_led(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, LEDCSR, ®); + + rt2x00_set_field32(®, LEDCSR_ON_PERIOD, 70); + rt2x00_set_field32(®, LEDCSR_OFF_PERIOD, 30); + + if (rt2x00dev->led_mode == LED_MODE_TXRX_ACTIVITY) { + rt2x00_set_field32(®, LEDCSR_LINK, 1); + rt2x00_set_field32(®, LEDCSR_ACTIVITY, 0); + } else if (rt2x00dev->led_mode == LED_MODE_ASUS) { + rt2x00_set_field32(®, LEDCSR_LINK, 0); + rt2x00_set_field32(®, LEDCSR_ACTIVITY, 1); + } else { + rt2x00_set_field32(®, LEDCSR_LINK, 1); + rt2x00_set_field32(®, LEDCSR_ACTIVITY, 1); + } + + rt2x00pci_register_write(rt2x00dev, LEDCSR, reg); +} + +static void rt2400pci_disable_led(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, LEDCSR, ®); + rt2x00_set_field32(®, LEDCSR_LINK, 0); + rt2x00_set_field32(®, LEDCSR_ACTIVITY, 0); + rt2x00pci_register_write(rt2x00dev, LEDCSR, reg); +} + +/* + * Link tuning + */ +static void rt2400pci_link_stats(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + u8 bbp; + + /* + * Update FCS error count from register. + */ + rt2x00pci_register_read(rt2x00dev, CNT0, ®); + rt2x00dev->link.rx_failed = rt2x00_get_field32(reg, CNT0_FCS_ERROR); + + /* + * Update False CCA count from register. + */ + rt2400pci_bbp_read(rt2x00dev, 39, &bbp); + rt2x00dev->link.false_cca = bbp; +} + +static void rt2400pci_reset_tuner(struct rt2x00_dev *rt2x00dev) +{ + rt2400pci_bbp_write(rt2x00dev, 13, 0x08); + rt2x00dev->link.vgc_level = 0x08; +} + +static void rt2400pci_link_tuner(struct rt2x00_dev *rt2x00dev) +{ + u8 reg; + + /* + * The link tuner should not run longer then 60 seconds, + * and should run once every 2 seconds. + */ + if (rt2x00dev->link.count > 60 || !(rt2x00dev->link.count & 1)) + return; + + /* + * Base r13 link tuning on the false cca count. + */ + rt2400pci_bbp_read(rt2x00dev, 13, ®); + + if (rt2x00dev->link.false_cca > 512 && reg < 0x20) { + rt2400pci_bbp_write(rt2x00dev, 13, ++reg); + rt2x00dev->link.vgc_level = reg; + } else if (rt2x00dev->link.false_cca < 100 && reg > 0x08) { + rt2400pci_bbp_write(rt2x00dev, 13, --reg); + rt2x00dev->link.vgc_level = reg; + } +} + +/* + * Initialization functions. + */ +static void rt2400pci_init_rxring(struct rt2x00_dev *rt2x00dev) +{ + struct data_ring *ring = rt2x00dev->rx; + struct data_desc *rxd; + unsigned int i; + u32 word; + + memset(ring->data_addr, 0x00, rt2x00_get_ring_size(ring)); + + for (i = 0; i < ring->stats.limit; i++) { + rxd = ring->entry[i].priv; + + rt2x00_desc_read(rxd, 2, &word); + rt2x00_set_field32(&word, RXD_W2_BUFFER_LENGTH, + ring->data_size); + rt2x00_desc_write(rxd, 2, word); + + rt2x00_desc_read(rxd, 1, &word); + rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, + ring->entry[i].data_dma); + rt2x00_desc_write(rxd, 1, word); + + rt2x00_desc_read(rxd, 0, &word); + rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1); + rt2x00_desc_write(rxd, 0, word); + } + + rt2x00_ring_index_clear(rt2x00dev->rx); +} + +static void rt2400pci_init_txring(struct rt2x00_dev *rt2x00dev, const int queue) +{ + struct data_ring *ring = rt2x00lib_get_ring(rt2x00dev, queue); + struct data_desc *txd; + unsigned int i; + u32 word; + + memset(ring->data_addr, 0x00, rt2x00_get_ring_size(ring)); + + for (i = 0; i < ring->stats.limit; i++) { + txd = ring->entry[i].priv; + + rt2x00_desc_read(txd, 1, &word); + rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, + ring->entry[i].data_dma); + rt2x00_desc_write(txd, 1, word); + + rt2x00_desc_read(txd, 2, &word); + rt2x00_set_field32(&word, TXD_W2_BUFFER_LENGTH, + ring->data_size); + rt2x00_desc_write(txd, 2, word); + + rt2x00_desc_read(txd, 0, &word); + rt2x00_set_field32(&word, TXD_W0_VALID, 0); + rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0); + rt2x00_desc_write(txd, 0, word); + } + + rt2x00_ring_index_clear(ring); +} + +static int rt2400pci_init_rings(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + /* + * Initialize rings. + */ + rt2400pci_init_rxring(rt2x00dev); + rt2400pci_init_txring(rt2x00dev, IEEE80211_TX_QUEUE_DATA0); + rt2400pci_init_txring(rt2x00dev, IEEE80211_TX_QUEUE_DATA1); + rt2400pci_init_txring(rt2x00dev, IEEE80211_TX_QUEUE_AFTER_BEACON); + rt2400pci_init_txring(rt2x00dev, IEEE80211_TX_QUEUE_BEACON); + + /* + * Initialize registers. + */ + rt2x00pci_register_read(rt2x00dev, TXCSR2, ®); + rt2x00_set_field32(®, TXCSR2_TXD_SIZE, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].desc_size); + rt2x00_set_field32(®, TXCSR2_NUM_TXD, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA1].stats.limit); + rt2x00_set_field32(®, TXCSR2_NUM_ATIM, + rt2x00dev->bcn[1].stats.limit); + rt2x00_set_field32(®, TXCSR2_NUM_PRIO, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].stats.limit); + rt2x00pci_register_write(rt2x00dev, TXCSR2, reg); + + rt2x00pci_register_read(rt2x00dev, TXCSR3, ®); + rt2x00_set_field32(®, TXCSR3_TX_RING_REGISTER, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA1].data_dma); + rt2x00pci_register_write(rt2x00dev, TXCSR3, reg); + + rt2x00pci_register_read(rt2x00dev, TXCSR5, ®); + rt2x00_set_field32(®, TXCSR5_PRIO_RING_REGISTER, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].data_dma); + rt2x00pci_register_write(rt2x00dev, TXCSR5, reg); + + rt2x00pci_register_read(rt2x00dev, TXCSR4, ®); + rt2x00_set_field32(®, TXCSR4_ATIM_RING_REGISTER, + rt2x00dev->bcn[1].data_dma); + rt2x00pci_register_write(rt2x00dev, TXCSR4, reg); + + rt2x00pci_register_read(rt2x00dev, TXCSR6, ®); + rt2x00_set_field32(®, TXCSR6_BEACON_RING_REGISTER, + rt2x00dev->bcn[0].data_dma); + rt2x00pci_register_write(rt2x00dev, TXCSR6, reg); + + rt2x00pci_register_read(rt2x00dev, RXCSR1, ®); + rt2x00_set_field32(®, RXCSR1_RXD_SIZE, rt2x00dev->rx->desc_size); + rt2x00_set_field32(®, RXCSR1_NUM_RXD, rt2x00dev->rx->stats.limit); + rt2x00pci_register_write(rt2x00dev, RXCSR1, reg); + + rt2x00pci_register_read(rt2x00dev, RXCSR2, ®); + rt2x00_set_field32(®, RXCSR2_RX_RING_REGISTER, + rt2x00dev->rx->data_dma); + rt2x00pci_register_write(rt2x00dev, RXCSR2, reg); + + return 0; +} + +static int rt2400pci_init_registers(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00pci_register_write(rt2x00dev, PSCSR0, 0x00020002); + rt2x00pci_register_write(rt2x00dev, PSCSR1, 0x00000002); + rt2x00pci_register_write(rt2x00dev, PSCSR2, 0x00023f20); + rt2x00pci_register_write(rt2x00dev, PSCSR3, 0x00000002); + + rt2x00pci_register_read(rt2x00dev, TIMECSR, ®); + rt2x00_set_field32(®, TIMECSR_US_COUNT, 33); + rt2x00_set_field32(®, TIMECSR_US_64_COUNT, 63); + rt2x00_set_field32(®, TIMECSR_BEACON_EXPECT, 0); + rt2x00pci_register_write(rt2x00dev, TIMECSR, reg); + + rt2x00pci_register_read(rt2x00dev, CSR9, ®); + rt2x00_set_field32(®, CSR9_MAX_FRAME_UNIT, + (rt2x00dev->rx->data_size / 128)); + rt2x00pci_register_write(rt2x00dev, CSR9, reg); + + rt2x00pci_register_write(rt2x00dev, CNT3, 0x3f080000); + + rt2x00pci_register_read(rt2x00dev, ARCSR0, ®); + rt2x00_set_field32(®, ARCSR0_AR_BBP_DATA0, 133); + rt2x00_set_field32(®, ARCSR0_AR_BBP_ID0, 134); + rt2x00_set_field32(®, ARCSR0_AR_BBP_DATA1, 136); + rt2x00_set_field32(®, ARCSR0_AR_BBP_ID1, 135); + rt2x00pci_register_write(rt2x00dev, ARCSR0, reg); + + rt2x00pci_register_read(rt2x00dev, RXCSR3, ®); + rt2x00_set_field32(®, RXCSR3_BBP_ID0, 3); /* Tx power.*/ + rt2x00_set_field32(®, RXCSR3_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, RXCSR3_BBP_ID1, 32); /* Signal */ + rt2x00_set_field32(®, RXCSR3_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, RXCSR3_BBP_ID2, 36); /* Rssi */ + rt2x00_set_field32(®, RXCSR3_BBP_ID2_VALID, 1); + rt2x00pci_register_write(rt2x00dev, RXCSR3, reg); + + rt2x00pci_register_write(rt2x00dev, PWRCSR0, 0x3f3b3100); + + if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) + return -EBUSY; + + rt2x00pci_register_write(rt2x00dev, MACCSR0, 0x00217223); + rt2x00pci_register_write(rt2x00dev, MACCSR1, 0x00235518); + + rt2x00pci_register_read(rt2x00dev, MACCSR2, ®); + rt2x00_set_field32(®, MACCSR2_DELAY, 64); + rt2x00pci_register_write(rt2x00dev, MACCSR2, reg); + + rt2x00pci_register_read(rt2x00dev, RALINKCSR, ®); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_DATA0, 17); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_ID0, 154); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_DATA1, 0); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_ID1, 154); + rt2x00pci_register_write(rt2x00dev, RALINKCSR, reg); + + rt2x00pci_register_read(rt2x00dev, CSR1, ®); + rt2x00_set_field32(®, CSR1_SOFT_RESET, 1); + rt2x00_set_field32(®, CSR1_BBP_RESET, 0); + rt2x00_set_field32(®, CSR1_HOST_READY, 0); + rt2x00pci_register_write(rt2x00dev, CSR1, reg); + + rt2x00pci_register_read(rt2x00dev, CSR1, ®); + rt2x00_set_field32(®, CSR1_SOFT_RESET, 0); + rt2x00_set_field32(®, CSR1_HOST_READY, 1); + rt2x00pci_register_write(rt2x00dev, CSR1, reg); + + /* + * We must clear the FCS and FIFO error count. + * These registers are cleared on read, + * so we may pass a useless variable to store the value. + */ + rt2x00pci_register_read(rt2x00dev, CNT0, ®); + rt2x00pci_register_read(rt2x00dev, CNT4, ®); + + return 0; +} + +static int rt2400pci_init_bbp(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + u16 eeprom; + u8 reg_id; + u8 value; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2400pci_bbp_read(rt2x00dev, 0, &value); + if ((value != 0xff) && (value != 0x00)) + goto continue_csr_init; + NOTICE(rt2x00dev, "Waiting for BBP register.\n"); + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "BBP register access failed, aborting.\n"); + return -EACCES; + +continue_csr_init: + rt2400pci_bbp_write(rt2x00dev, 1, 0x00); + rt2400pci_bbp_write(rt2x00dev, 3, 0x27); + rt2400pci_bbp_write(rt2x00dev, 4, 0x08); + rt2400pci_bbp_write(rt2x00dev, 10, 0x0f); + rt2400pci_bbp_write(rt2x00dev, 15, 0x72); + rt2400pci_bbp_write(rt2x00dev, 16, 0x74); + rt2400pci_bbp_write(rt2x00dev, 17, 0x20); + rt2400pci_bbp_write(rt2x00dev, 18, 0x72); + rt2400pci_bbp_write(rt2x00dev, 19, 0x0b); + rt2400pci_bbp_write(rt2x00dev, 20, 0x00); + rt2400pci_bbp_write(rt2x00dev, 28, 0x11); + rt2400pci_bbp_write(rt2x00dev, 29, 0x04); + rt2400pci_bbp_write(rt2x00dev, 30, 0x21); + rt2400pci_bbp_write(rt2x00dev, 31, 0x00); + + DEBUG(rt2x00dev, "Start initialization from EEPROM...\n"); + for (i = 0; i < EEPROM_BBP_SIZE; i++) { + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); + + if (eeprom != 0xffff && eeprom != 0x0000) { + reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); + value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); + DEBUG(rt2x00dev, "BBP: 0x%02x, value: 0x%02x.\n", + reg_id, value); + rt2400pci_bbp_write(rt2x00dev, reg_id, value); + } + } + DEBUG(rt2x00dev, "...End initialization from EEPROM.\n"); + + return 0; +} + +/* + * Device state switch handlers. + */ +static void rt2400pci_toggle_rx(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, RXCSR0, ®); + rt2x00_set_field32(®, RXCSR0_DISABLE_RX, + state == STATE_RADIO_RX_OFF); + rt2x00pci_register_write(rt2x00dev, RXCSR0, reg); +} + +static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + int mask = (state == STATE_RADIO_IRQ_OFF); + u32 reg; + + /* + * When interrupts are being enabled, the interrupt registers + * should clear the register to assure a clean state. + */ + if (state == STATE_RADIO_IRQ_ON) { + rt2x00pci_register_read(rt2x00dev, CSR7, ®); + rt2x00pci_register_write(rt2x00dev, CSR7, reg); + } + + /* + * Only toggle the interrupts bits we are going to use. + * Non-checked interrupt bits are disabled by default. + */ + rt2x00pci_register_read(rt2x00dev, CSR8, ®); + rt2x00_set_field32(®, CSR8_TBCN_EXPIRE, mask); + rt2x00_set_field32(®, CSR8_TXDONE_TXRING, mask); + rt2x00_set_field32(®, CSR8_TXDONE_ATIMRING, mask); + rt2x00_set_field32(®, CSR8_TXDONE_PRIORING, mask); + rt2x00_set_field32(®, CSR8_RXDONE, mask); + rt2x00pci_register_write(rt2x00dev, CSR8, reg); +} + +static int rt2400pci_enable_radio(struct rt2x00_dev *rt2x00dev) +{ + /* + * Initialize all registers. + */ + if (rt2400pci_init_rings(rt2x00dev) || + rt2400pci_init_registers(rt2x00dev) || + rt2400pci_init_bbp(rt2x00dev)) { + ERROR(rt2x00dev, "Register initialization failed.\n"); + return -EIO; + } + + /* + * Enable interrupts. + */ + rt2400pci_toggle_irq(rt2x00dev, STATE_RADIO_IRQ_ON); + + /* + * Enable LED + */ + rt2400pci_enable_led(rt2x00dev); + + return 0; +} + +static void rt2400pci_disable_radio(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + /* + * Disable LED + */ + rt2400pci_disable_led(rt2x00dev); + + rt2x00pci_register_write(rt2x00dev, PWRCSR0, 0); + + /* + * Disable synchronisation. + */ + rt2x00pci_register_write(rt2x00dev, CSR14, 0); + + /* + * Cancel RX and TX. + */ + rt2x00pci_register_read(rt2x00dev, TXCSR0, ®); + rt2x00_set_field32(®, TXCSR0_ABORT, 1); + rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); + + /* + * Disable interrupts. + */ + rt2400pci_toggle_irq(rt2x00dev, STATE_RADIO_IRQ_OFF); +} + +static int rt2400pci_set_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + u32 reg; + unsigned int i; + char put_to_sleep; + char bbp_state; + char rf_state; + + put_to_sleep = (state != STATE_AWAKE); + + rt2x00pci_register_read(rt2x00dev, PWRCSR1, ®); + rt2x00_set_field32(®, PWRCSR1_SET_STATE, 1); + rt2x00_set_field32(®, PWRCSR1_BBP_DESIRE_STATE, state); + rt2x00_set_field32(®, PWRCSR1_RF_DESIRE_STATE, state); + rt2x00_set_field32(®, PWRCSR1_PUT_TO_SLEEP, put_to_sleep); + rt2x00pci_register_write(rt2x00dev, PWRCSR1, reg); + + /* + * Device is not guaranteed to be in the requested state yet. + * We must wait until the register indicates that the + * device has entered the correct state. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2x00pci_register_read(rt2x00dev, PWRCSR1, ®); + bbp_state = rt2x00_get_field32(reg, PWRCSR1_BBP_CURR_STATE); + rf_state = rt2x00_get_field32(reg, PWRCSR1_RF_CURR_STATE); + if (bbp_state == state && rf_state == state) + return 0; + msleep(10); + } + + NOTICE(rt2x00dev, "Device failed to enter state %d, " + "current device state: bbp %d and rf %d.\n", + state, bbp_state, rf_state); + + return -EBUSY; +} + +static int rt2400pci_set_device_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + int retval = 0; + + switch (state) { + case STATE_RADIO_ON: + retval = rt2400pci_enable_radio(rt2x00dev); + break; + case STATE_RADIO_OFF: + rt2400pci_disable_radio(rt2x00dev); + break; + case STATE_RADIO_RX_ON: + case STATE_RADIO_RX_OFF: + rt2400pci_toggle_rx(rt2x00dev, state); + break; + case STATE_DEEP_SLEEP: + case STATE_SLEEP: + case STATE_STANDBY: + case STATE_AWAKE: + retval = rt2400pci_set_state(rt2x00dev, state); + break; + default: + retval = -ENOTSUPP; + break; + } + + return retval; +} + +/* + * TX descriptor initialization + */ +static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, + struct data_desc *txd, + struct txdata_entry_desc *desc, + struct ieee80211_hdr *ieee80211hdr, + unsigned int length, + struct ieee80211_tx_control *control) +{ + u32 word; + u32 signal = 0; + u32 service = 0; + u32 length_high = 0; + u32 length_low = 0; + + /* + * The PLCP values should be treated as if they + * were BBP values. + */ + rt2x00_set_field32(&signal, BBPCSR_VALUE, desc->signal); + rt2x00_set_field32(&signal, BBPCSR_REGNUM, 5); + rt2x00_set_field32(&signal, BBPCSR_BUSY, 1); + + rt2x00_set_field32(&service, BBPCSR_VALUE, desc->service); + rt2x00_set_field32(&service, BBPCSR_REGNUM, 6); + rt2x00_set_field32(&service, BBPCSR_BUSY, 1); + + rt2x00_set_field32(&length_high, BBPCSR_VALUE, desc->length_high); + rt2x00_set_field32(&length_high, BBPCSR_REGNUM, 7); + rt2x00_set_field32(&length_high, BBPCSR_BUSY, 1); + + rt2x00_set_field32(&length_low, BBPCSR_VALUE, desc->length_low); + rt2x00_set_field32(&length_low, BBPCSR_REGNUM, 8); + rt2x00_set_field32(&length_low, BBPCSR_BUSY, 1); + + /* + * Start writing the descriptor words. + */ + rt2x00_desc_read(txd, 2, &word); + rt2x00_set_field32(&word, TXD_W2_DATABYTE_COUNT, length); + rt2x00_desc_write(txd, 2, word); + + rt2x00_desc_read(txd, 3, &word); + rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, signal); + rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, service); + rt2x00_desc_write(txd, 3, word); + + rt2x00_desc_read(txd, 4, &word); + rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_LOW, length_low); + rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_HIGH, length_high); + rt2x00_desc_write(txd, 4, word); + + rt2x00_desc_read(txd, 0, &word); + rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1); + rt2x00_set_field32(&word, TXD_W0_VALID, 1); + rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, + test_bit(ENTRY_TXD_MORE_FRAG, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_ACK, + !(control->flags & IEEE80211_TXCTL_NO_ACK)); + rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, + test_bit(ENTRY_TXD_REQ_TIMESTAMP, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_RTS, + test_bit(ENTRY_TXD_RTS_FRAME, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_IFS, desc->ifs); + rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, + !!(control->flags & + IEEE80211_TXCTL_LONG_RETRY_LIMIT)); + rt2x00_desc_write(txd, 0, word); +} + +/* + * TX data initialization + */ +static void rt2400pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, + unsigned int queue) +{ + u32 reg; + + if (queue == IEEE80211_TX_QUEUE_BEACON) { + rt2x00pci_register_read(rt2x00dev, CSR14, ®); + if (!rt2x00_get_field32(reg, CSR14_BEACON_GEN)) { + rt2x00_set_field32(®, CSR14_BEACON_GEN, 1); + rt2x00pci_register_write(rt2x00dev, CSR14, reg); + } + return; + } + + rt2x00pci_register_read(rt2x00dev, TXCSR0, ®); + if (queue == IEEE80211_TX_QUEUE_DATA0) + rt2x00_set_field32(®, TXCSR0_KICK_PRIO, 1); + else if (queue == IEEE80211_TX_QUEUE_DATA1) + rt2x00_set_field32(®, TXCSR0_KICK_TX, 1); + else if (queue == IEEE80211_TX_QUEUE_AFTER_BEACON) + rt2x00_set_field32(®, TXCSR0_KICK_ATIM, 1); + rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); +} + +/* + * RX control handlers + */ +static void rt2400pci_fill_rxdone(struct data_entry *entry, + struct rxdata_entry_desc *desc) +{ + struct data_desc *rxd = entry->priv; + u32 word0; + u32 word2; + + rt2x00_desc_read(rxd, 0, &word0); + rt2x00_desc_read(rxd, 2, &word2); + + desc->flags = 0; + if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) + desc->flags |= RX_FLAG_FAILED_FCS_CRC; + if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR)) + desc->flags |= RX_FLAG_FAILED_PLCP_CRC; + + /* + * Obtain the status about this packet. + */ + desc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL); + desc->rssi = rt2x00_get_field32(word2, RXD_W2_RSSI) - + entry->ring->rt2x00dev->rssi_offset; + desc->ofdm = 0; + desc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); +} + +/* + * Interrupt functions. + */ +static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev, const int queue) +{ + struct data_ring *ring = rt2x00lib_get_ring(rt2x00dev, queue); + struct data_entry *entry; + struct data_desc *txd; + u32 word; + int tx_status; + int retry; + + while (!rt2x00_ring_empty(ring)) { + entry = rt2x00_get_data_entry_done(ring); + txd = entry->priv; + rt2x00_desc_read(txd, 0, &word); + + if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) || + !rt2x00_get_field32(word, TXD_W0_VALID)) + break; + + /* + * Obtain the status about this packet. + */ + tx_status = rt2x00_get_field32(word, TXD_W0_RESULT); + retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT); + + rt2x00lib_txdone(entry, tx_status, retry); + + /* + * Make this entry available for reuse. + */ + entry->flags = 0; + rt2x00_set_field32(&word, TXD_W0_VALID, 0); + rt2x00_desc_write(txd, 0, word); + rt2x00_ring_index_done_inc(ring); + } + + /* + * If the data ring was full before the txdone handler + * we must make sure the packet queue in the mac80211 stack + * is reenabled when the txdone handler has finished. + */ + entry = ring->entry; + if (!rt2x00_ring_full(ring)) + ieee80211_wake_queue(rt2x00dev->hw, + entry->tx_status.control.queue); +} + +static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance) +{ + struct rt2x00_dev *rt2x00dev = dev_instance; + u32 reg; + + /* + * Get the interrupt sources & saved to local variable. + * Write register value back to clear pending interrupts. + */ + rt2x00pci_register_read(rt2x00dev, CSR7, ®); + rt2x00pci_register_write(rt2x00dev, CSR7, reg); + + if (!reg) + return IRQ_NONE; + + if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) + return IRQ_HANDLED; + + /* + * Handle interrupts, walk through all bits + * and run the tasks, the bits are checked in order of + * priority. + */ + + /* + * 1 - Beacon timer expired interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE)) + rt2x00lib_beacondone(rt2x00dev); + + /* + * 2 - Rx ring done interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_RXDONE)) + rt2x00pci_rxdone(rt2x00dev); + + /* + * 3 - Atim ring transmit done interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING)) + rt2400pci_txdone(rt2x00dev, IEEE80211_TX_QUEUE_AFTER_BEACON); + + /* + * 4 - Priority ring transmit done interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING)) + rt2400pci_txdone(rt2x00dev, IEEE80211_TX_QUEUE_DATA0); + + /* + * 5 - Tx ring transmit done interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) + rt2400pci_txdone(rt2x00dev, IEEE80211_TX_QUEUE_DATA1); + + return IRQ_HANDLED; +} + +/* + * Device probe functions. + */ +static int rt2400pci_validate_eeprom(struct rt2x00_dev *rt2x00dev) +{ + struct eeprom_93cx6 eeprom; + u32 reg; + u16 word; + u8 *mac; + + rt2x00pci_register_read(rt2x00dev, CSR21, ®); + + eeprom.data = rt2x00dev; + eeprom.register_read = rt2400pci_eepromregister_read; + eeprom.register_write = rt2400pci_eepromregister_write; + eeprom.width = rt2x00_get_field32(reg, CSR21_TYPE_93C46) ? + PCI_EEPROM_WIDTH_93C46 : PCI_EEPROM_WIDTH_93C66; + eeprom.reg_data_in = 0; + eeprom.reg_data_out = 0; + eeprom.reg_data_clock = 0; + eeprom.reg_chip_select = 0; + + eeprom_93cx6_multiread(&eeprom, EEPROM_BASE, rt2x00dev->eeprom, + EEPROM_SIZE / sizeof(u16)); + + /* + * Start validation of the data that has been read. + */ + mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); + if (!is_valid_ether_addr(mac)) { + DECLARE_MAC_BUF(macbuf); + + random_ether_addr(mac); + EEPROM(rt2x00dev, "MAC: %s\n", print_mac(macbuf, mac)); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); + if (word == 0xffff) { + ERROR(rt2x00dev, "Invalid EEPROM data detected.\n"); + return -EINVAL; + } + + return 0; +} + +static int rt2400pci_init_eeprom(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + u16 value; + u16 eeprom; + + /* + * Read EEPROM word for configuration. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom); + + /* + * Identify RF chipset. + */ + value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); + rt2x00pci_register_read(rt2x00dev, CSR0, ®); + rt2x00_set_chip(rt2x00dev, RT2460, value, reg); + + if (!rt2x00_rf(&rt2x00dev->chip, RF2420) && + !rt2x00_rf(&rt2x00dev->chip, RF2421)) { + ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); + return -ENODEV; + } + + /* + * Identify default antenna configuration. + */ + rt2x00dev->hw->conf.antenna_sel_tx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT); + rt2x00dev->hw->conf.antenna_sel_rx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT); + + /* + * Store led mode, for correct led behaviour. + */ + rt2x00dev->led_mode = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); + + /* + * Detect if this device has an hardware controlled radio. + */ +#ifdef CONFIG_RT2400PCI_RFKILL + if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) + __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); +#endif /* CONFIG_RT2400PCI_RFKILL */ + + /* + * Check if the BBP tuning should be enabled. + */ + if (!rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_AGCVGC_TUNING)) + __set_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags); + + return 0; +} + +/* + * RF value list for RF2420 & RF2421 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg[] = { + { 1, 0x00022058, 0x000c1fda, 0x00000101, 0 }, + { 2, 0x00022058, 0x000c1fee, 0x00000101, 0 }, + { 3, 0x00022058, 0x000c2002, 0x00000101, 0 }, + { 4, 0x00022058, 0x000c2016, 0x00000101, 0 }, + { 5, 0x00022058, 0x000c202a, 0x00000101, 0 }, + { 6, 0x00022058, 0x000c203e, 0x00000101, 0 }, + { 7, 0x00022058, 0x000c2052, 0x00000101, 0 }, + { 8, 0x00022058, 0x000c2066, 0x00000101, 0 }, + { 9, 0x00022058, 0x000c207a, 0x00000101, 0 }, + { 10, 0x00022058, 0x000c208e, 0x00000101, 0 }, + { 11, 0x00022058, 0x000c20a2, 0x00000101, 0 }, + { 12, 0x00022058, 0x000c20b6, 0x00000101, 0 }, + { 13, 0x00022058, 0x000c20ca, 0x00000101, 0 }, + { 14, 0x00022058, 0x000c20fa, 0x00000101, 0 }, +}; + +static void rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) +{ + struct hw_mode_spec *spec = &rt2x00dev->spec; + u8 *txpower; + unsigned int i; + + /* + * Initialize all hw fields. + */ + rt2x00dev->hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; + rt2x00dev->hw->extra_tx_headroom = 0; + rt2x00dev->hw->max_signal = MAX_SIGNAL; + rt2x00dev->hw->max_rssi = MAX_RX_SSI; + rt2x00dev->hw->queues = 2; + + SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_pci(rt2x00dev)->dev); + SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, + rt2x00_eeprom_addr(rt2x00dev, + EEPROM_MAC_ADDR_0)); + + /* + * Convert tx_power array in eeprom. + */ + txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START); + for (i = 0; i < 14; i++) + txpower[i] = TXPOWER_FROM_DEV(txpower[i]); + + /* + * Initialize hw_mode information. + */ + spec->num_modes = 1; + spec->num_rates = 4; + spec->tx_power_a = NULL; + spec->tx_power_bg = txpower; + spec->tx_power_default = DEFAULT_TXPOWER; + + spec->num_channels = ARRAY_SIZE(rf_vals_bg); + spec->channels = rf_vals_bg; +} + +static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev) +{ + int retval; + + /* + * Allocate eeprom data. + */ + retval = rt2400pci_validate_eeprom(rt2x00dev); + if (retval) + return retval; + + retval = rt2400pci_init_eeprom(rt2x00dev); + if (retval) + return retval; + + /* + * Initialize hw specifications. + */ + rt2400pci_probe_hw_mode(rt2x00dev); + + /* + * This device requires the beacon ring + */ + __set_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags); + + /* + * Set the rssi offset. + */ + rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; + + return 0; +} + +/* + * IEEE80211 stack callback functions. + */ +static void rt2400pci_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + int mc_count, + struct dev_addr_list *mc_list) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct interface *intf = &rt2x00dev->interface; + u32 reg; + + /* + * Mask off any flags we are going to ignore from + * the total_flags field. + */ + *total_flags &= + FIF_ALLMULTI | + FIF_FCSFAIL | + FIF_PLCPFAIL | + FIF_CONTROL | + FIF_OTHER_BSS | + FIF_PROMISC_IN_BSS; + + /* + * Apply some rules to the filters: + * - Some filters imply different filters to be set. + * - Some things we can't filter out at all. + * - Some filters are set based on interface type. + */ + *total_flags |= FIF_ALLMULTI; + if (*total_flags & FIF_OTHER_BSS || + *total_flags & FIF_PROMISC_IN_BSS) + *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS; + if (is_interface_type(intf, IEEE80211_IF_TYPE_AP)) + *total_flags |= FIF_PROMISC_IN_BSS; + + /* + * Check if there is any work left for us. + */ + if (intf->filter == *total_flags) + return; + intf->filter = *total_flags; + + /* + * Start configuration steps. + * Note that the version error will always be dropped + * since there is no filter for it at this time. + */ + rt2x00pci_register_read(rt2x00dev, RXCSR0, ®); + rt2x00_set_field32(®, RXCSR0_DROP_CRC, + !(*total_flags & FIF_FCSFAIL)); + rt2x00_set_field32(®, RXCSR0_DROP_PHYSICAL, + !(*total_flags & FIF_PLCPFAIL)); + rt2x00_set_field32(®, RXCSR0_DROP_CONTROL, + !(*total_flags & FIF_CONTROL)); + rt2x00_set_field32(®, RXCSR0_DROP_NOT_TO_ME, + !(*total_flags & FIF_PROMISC_IN_BSS)); + rt2x00_set_field32(®, RXCSR0_DROP_TODS, + !(*total_flags & FIF_PROMISC_IN_BSS)); + rt2x00_set_field32(®, RXCSR0_DROP_VERSION_ERROR, 1); + rt2x00pci_register_write(rt2x00dev, RXCSR0, reg); +} + +static int rt2400pci_set_retry_limit(struct ieee80211_hw *hw, + u32 short_retry, u32 long_retry) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR11, ®); + rt2x00_set_field32(®, CSR11_LONG_RETRY, long_retry); + rt2x00_set_field32(®, CSR11_SHORT_RETRY, short_retry); + rt2x00pci_register_write(rt2x00dev, CSR11, reg); + + return 0; +} + +static int rt2400pci_conf_tx(struct ieee80211_hw *hw, + int queue, + const struct ieee80211_tx_queue_params *params) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + + /* + * We don't support variating cw_min and cw_max variables + * per queue. So by default we only configure the TX queue, + * and ignore all other configurations. + */ + if (queue != IEEE80211_TX_QUEUE_DATA0) + return -EINVAL; + + if (rt2x00mac_conf_tx(hw, queue, params)) + return -EINVAL; + + /* + * Write configuration to register. + */ + rt2400pci_config_cw(rt2x00dev, &rt2x00dev->tx->tx_params); + + return 0; +} + +static u64 rt2400pci_get_tsf(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u64 tsf; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR17, ®); + tsf = (u64) rt2x00_get_field32(reg, CSR17_HIGH_TSFTIMER) << 32; + rt2x00pci_register_read(rt2x00dev, CSR16, ®); + tsf |= rt2x00_get_field32(reg, CSR16_LOW_TSFTIMER); + + return tsf; +} + +static void rt2400pci_reset_tsf(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + + rt2x00pci_register_write(rt2x00dev, CSR16, 0); + rt2x00pci_register_write(rt2x00dev, CSR17, 0); +} + +static int rt2400pci_tx_last_beacon(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR15, ®); + return rt2x00_get_field32(reg, CSR15_BEACON_SENT); +} + +static const struct ieee80211_ops rt2400pci_mac80211_ops = { + .tx = rt2x00mac_tx, + .start = rt2x00mac_start, + .stop = rt2x00mac_stop, + .add_interface = rt2x00mac_add_interface, + .remove_interface = rt2x00mac_remove_interface, + .config = rt2x00mac_config, + .config_interface = rt2x00mac_config_interface, + .configure_filter = rt2400pci_configure_filter, + .get_stats = rt2x00mac_get_stats, + .set_retry_limit = rt2400pci_set_retry_limit, + .erp_ie_changed = rt2x00mac_erp_ie_changed, + .conf_tx = rt2400pci_conf_tx, + .get_tx_stats = rt2x00mac_get_tx_stats, + .get_tsf = rt2400pci_get_tsf, + .reset_tsf = rt2400pci_reset_tsf, + .beacon_update = rt2x00pci_beacon_update, + .tx_last_beacon = rt2400pci_tx_last_beacon, +}; + +static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = { + .irq_handler = rt2400pci_interrupt, + .probe_hw = rt2400pci_probe_hw, + .initialize = rt2x00pci_initialize, + .uninitialize = rt2x00pci_uninitialize, + .set_device_state = rt2400pci_set_device_state, + .rfkill_poll = rt2400pci_rfkill_poll, + .link_stats = rt2400pci_link_stats, + .reset_tuner = rt2400pci_reset_tuner, + .link_tuner = rt2400pci_link_tuner, + .write_tx_desc = rt2400pci_write_tx_desc, + .write_tx_data = rt2x00pci_write_tx_data, + .kick_tx_queue = rt2400pci_kick_tx_queue, + .fill_rxdone = rt2400pci_fill_rxdone, + .config_mac_addr = rt2400pci_config_mac_addr, + .config_bssid = rt2400pci_config_bssid, + .config_type = rt2400pci_config_type, + .config_preamble = rt2400pci_config_preamble, + .config = rt2400pci_config, +}; + +static const struct rt2x00_ops rt2400pci_ops = { + .name = DRV_NAME, + .rxd_size = RXD_DESC_SIZE, + .txd_size = TXD_DESC_SIZE, + .eeprom_size = EEPROM_SIZE, + .rf_size = RF_SIZE, + .lib = &rt2400pci_rt2x00_ops, + .hw = &rt2400pci_mac80211_ops, +#ifdef CONFIG_RT2X00_LIB_DEBUGFS + .debugfs = &rt2400pci_rt2x00debug, +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ +}; + +/* + * RT2400pci module information. + */ +static struct pci_device_id rt2400pci_device_table[] = { + { PCI_DEVICE(0x1814, 0x0101), PCI_DEVICE_DATA(&rt2400pci_ops) }, + { 0, } +}; + +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("Ralink RT2400 PCI & PCMCIA Wireless LAN driver."); +MODULE_SUPPORTED_DEVICE("Ralink RT2460 PCI & PCMCIA chipset based cards"); +MODULE_DEVICE_TABLE(pci, rt2400pci_device_table); +MODULE_LICENSE("GPL"); + +static struct pci_driver rt2400pci_driver = { + .name = DRV_NAME, + .id_table = rt2400pci_device_table, + .probe = rt2x00pci_probe, + .remove = __devexit_p(rt2x00pci_remove), + .suspend = rt2x00pci_suspend, + .resume = rt2x00pci_resume, +}; + +static int __init rt2400pci_init(void) +{ + return pci_register_driver(&rt2400pci_driver); +} + +static void __exit rt2400pci_exit(void) +{ + pci_unregister_driver(&rt2400pci_driver); +} + +module_init(rt2400pci_init); +module_exit(rt2400pci_exit); diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h new file mode 100644 index 000000000000..ae22501f085d --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2400pci.h @@ -0,0 +1,943 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2400pci + Abstract: Data structures and registers for the rt2400pci module. + Supported chipsets: RT2460. + */ + +#ifndef RT2400PCI_H +#define RT2400PCI_H + +/* + * RF chip defines. + */ +#define RF2420 0x0000 +#define RF2421 0x0001 + +/* + * Signal information. + * Defaul offset is required for RSSI <-> dBm conversion. + */ +#define MAX_SIGNAL 100 +#define MAX_RX_SSI -1 +#define DEFAULT_RSSI_OFFSET 100 + +/* + * Register layout information. + */ +#define CSR_REG_BASE 0x0000 +#define CSR_REG_SIZE 0x014c +#define EEPROM_BASE 0x0000 +#define EEPROM_SIZE 0x0100 +#define BBP_SIZE 0x0020 +#define RF_SIZE 0x0010 + +/* + * Control/Status Registers(CSR). + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * CSR0: ASIC revision number. + */ +#define CSR0 0x0000 + +/* + * CSR1: System control register. + * SOFT_RESET: Software reset, 1: reset, 0: normal. + * BBP_RESET: Hardware reset, 1: reset, 0, release. + * HOST_READY: Host ready after initialization. + */ +#define CSR1 0x0004 +#define CSR1_SOFT_RESET FIELD32(0x00000001) +#define CSR1_BBP_RESET FIELD32(0x00000002) +#define CSR1_HOST_READY FIELD32(0x00000004) + +/* + * CSR2: System admin status register (invalid). + */ +#define CSR2 0x0008 + +/* + * CSR3: STA MAC address register 0. + */ +#define CSR3 0x000c +#define CSR3_BYTE0 FIELD32(0x000000ff) +#define CSR3_BYTE1 FIELD32(0x0000ff00) +#define CSR3_BYTE2 FIELD32(0x00ff0000) +#define CSR3_BYTE3 FIELD32(0xff000000) + +/* + * CSR4: STA MAC address register 1. + */ +#define CSR4 0x0010 +#define CSR4_BYTE4 FIELD32(0x000000ff) +#define CSR4_BYTE5 FIELD32(0x0000ff00) + +/* + * CSR5: BSSID register 0. + */ +#define CSR5 0x0014 +#define CSR5_BYTE0 FIELD32(0x000000ff) +#define CSR5_BYTE1 FIELD32(0x0000ff00) +#define CSR5_BYTE2 FIELD32(0x00ff0000) +#define CSR5_BYTE3 FIELD32(0xff000000) + +/* + * CSR6: BSSID register 1. + */ +#define CSR6 0x0018 +#define CSR6_BYTE4 FIELD32(0x000000ff) +#define CSR6_BYTE5 FIELD32(0x0000ff00) + +/* + * CSR7: Interrupt source register. + * Write 1 to clear interrupt. + * TBCN_EXPIRE: Beacon timer expired interrupt. + * TWAKE_EXPIRE: Wakeup timer expired interrupt. + * TATIMW_EXPIRE: Timer of atim window expired interrupt. + * TXDONE_TXRING: Tx ring transmit done interrupt. + * TXDONE_ATIMRING: Atim ring transmit done interrupt. + * TXDONE_PRIORING: Priority ring transmit done interrupt. + * RXDONE: Receive done interrupt. + */ +#define CSR7 0x001c +#define CSR7_TBCN_EXPIRE FIELD32(0x00000001) +#define CSR7_TWAKE_EXPIRE FIELD32(0x00000002) +#define CSR7_TATIMW_EXPIRE FIELD32(0x00000004) +#define CSR7_TXDONE_TXRING FIELD32(0x00000008) +#define CSR7_TXDONE_ATIMRING FIELD32(0x00000010) +#define CSR7_TXDONE_PRIORING FIELD32(0x00000020) +#define CSR7_RXDONE FIELD32(0x00000040) + +/* + * CSR8: Interrupt mask register. + * Write 1 to mask interrupt. + * TBCN_EXPIRE: Beacon timer expired interrupt. + * TWAKE_EXPIRE: Wakeup timer expired interrupt. + * TATIMW_EXPIRE: Timer of atim window expired interrupt. + * TXDONE_TXRING: Tx ring transmit done interrupt. + * TXDONE_ATIMRING: Atim ring transmit done interrupt. + * TXDONE_PRIORING: Priority ring transmit done interrupt. + * RXDONE: Receive done interrupt. + */ +#define CSR8 0x0020 +#define CSR8_TBCN_EXPIRE FIELD32(0x00000001) +#define CSR8_TWAKE_EXPIRE FIELD32(0x00000002) +#define CSR8_TATIMW_EXPIRE FIELD32(0x00000004) +#define CSR8_TXDONE_TXRING FIELD32(0x00000008) +#define CSR8_TXDONE_ATIMRING FIELD32(0x00000010) +#define CSR8_TXDONE_PRIORING FIELD32(0x00000020) +#define CSR8_RXDONE FIELD32(0x00000040) + +/* + * CSR9: Maximum frame length register. + * MAX_FRAME_UNIT: Maximum frame length in 128b unit, default: 12. + */ +#define CSR9 0x0024 +#define CSR9_MAX_FRAME_UNIT FIELD32(0x00000f80) + +/* + * CSR11: Back-off control register. + * CWMIN: CWmin. Default cwmin is 31 (2^5 - 1). + * CWMAX: CWmax. Default cwmax is 1023 (2^10 - 1). + * SLOT_TIME: Slot time, default is 20us for 802.11b. + * LONG_RETRY: Long retry count. + * SHORT_RETRY: Short retry count. + */ +#define CSR11 0x002c +#define CSR11_CWMIN FIELD32(0x0000000f) +#define CSR11_CWMAX FIELD32(0x000000f0) +#define CSR11_SLOT_TIME FIELD32(0x00001f00) +#define CSR11_LONG_RETRY FIELD32(0x00ff0000) +#define CSR11_SHORT_RETRY FIELD32(0xff000000) + +/* + * CSR12: Synchronization configuration register 0. + * All units in 1/16 TU. + * BEACON_INTERVAL: Beacon interval, default is 100 TU. + * CFPMAX_DURATION: Cfp maximum duration, default is 100 TU. + */ +#define CSR12 0x0030 +#define CSR12_BEACON_INTERVAL FIELD32(0x0000ffff) +#define CSR12_CFP_MAX_DURATION FIELD32(0xffff0000) + +/* + * CSR13: Synchronization configuration register 1. + * All units in 1/16 TU. + * ATIMW_DURATION: Atim window duration. + * CFP_PERIOD: Cfp period, default is 0 TU. + */ +#define CSR13 0x0034 +#define CSR13_ATIMW_DURATION FIELD32(0x0000ffff) +#define CSR13_CFP_PERIOD FIELD32(0x00ff0000) + +/* + * CSR14: Synchronization control register. + * TSF_COUNT: Enable tsf auto counting. + * TSF_SYNC: Tsf sync, 0: disable, 1: infra, 2: ad-hoc/master mode. + * TBCN: Enable tbcn with reload value. + * TCFP: Enable tcfp & cfp / cp switching. + * TATIMW: Enable tatimw & atim window switching. + * BEACON_GEN: Enable beacon generator. + * CFP_COUNT_PRELOAD: Cfp count preload value. + * TBCM_PRELOAD: Tbcn preload value in units of 64us. + */ +#define CSR14 0x0038 +#define CSR14_TSF_COUNT FIELD32(0x00000001) +#define CSR14_TSF_SYNC FIELD32(0x00000006) +#define CSR14_TBCN FIELD32(0x00000008) +#define CSR14_TCFP FIELD32(0x00000010) +#define CSR14_TATIMW FIELD32(0x00000020) +#define CSR14_BEACON_GEN FIELD32(0x00000040) +#define CSR14_CFP_COUNT_PRELOAD FIELD32(0x0000ff00) +#define CSR14_TBCM_PRELOAD FIELD32(0xffff0000) + +/* + * CSR15: Synchronization status register. + * CFP: ASIC is in contention-free period. + * ATIMW: ASIC is in ATIM window. + * BEACON_SENT: Beacon is send. + */ +#define CSR15 0x003c +#define CSR15_CFP FIELD32(0x00000001) +#define CSR15_ATIMW FIELD32(0x00000002) +#define CSR15_BEACON_SENT FIELD32(0x00000004) + +/* + * CSR16: TSF timer register 0. + */ +#define CSR16 0x0040 +#define CSR16_LOW_TSFTIMER FIELD32(0xffffffff) + +/* + * CSR17: TSF timer register 1. + */ +#define CSR17 0x0044 +#define CSR17_HIGH_TSFTIMER FIELD32(0xffffffff) + +/* + * CSR18: IFS timer register 0. + * SIFS: Sifs, default is 10 us. + * PIFS: Pifs, default is 30 us. + */ +#define CSR18 0x0048 +#define CSR18_SIFS FIELD32(0x0000ffff) +#define CSR18_PIFS FIELD32(0xffff0000) + +/* + * CSR19: IFS timer register 1. + * DIFS: Difs, default is 50 us. + * EIFS: Eifs, default is 364 us. + */ +#define CSR19 0x004c +#define CSR19_DIFS FIELD32(0x0000ffff) +#define CSR19_EIFS FIELD32(0xffff0000) + +/* + * CSR20: Wakeup timer register. + * DELAY_AFTER_TBCN: Delay after tbcn expired in units of 1/16 TU. + * TBCN_BEFORE_WAKEUP: Number of beacon before wakeup. + * AUTOWAKE: Enable auto wakeup / sleep mechanism. + */ +#define CSR20 0x0050 +#define CSR20_DELAY_AFTER_TBCN FIELD32(0x0000ffff) +#define CSR20_TBCN_BEFORE_WAKEUP FIELD32(0x00ff0000) +#define CSR20_AUTOWAKE FIELD32(0x01000000) + +/* + * CSR21: EEPROM control register. + * RELOAD: Write 1 to reload eeprom content. + * TYPE_93C46: 1: 93c46, 0:93c66. + */ +#define CSR21 0x0054 +#define CSR21_RELOAD FIELD32(0x00000001) +#define CSR21_EEPROM_DATA_CLOCK FIELD32(0x00000002) +#define CSR21_EEPROM_CHIP_SELECT FIELD32(0x00000004) +#define CSR21_EEPROM_DATA_IN FIELD32(0x00000008) +#define CSR21_EEPROM_DATA_OUT FIELD32(0x00000010) +#define CSR21_TYPE_93C46 FIELD32(0x00000020) + +/* + * CSR22: CFP control register. + * CFP_DURATION_REMAIN: Cfp duration remain, in units of TU. + * RELOAD_CFP_DURATION: Write 1 to reload cfp duration remain. + */ +#define CSR22 0x0058 +#define CSR22_CFP_DURATION_REMAIN FIELD32(0x0000ffff) +#define CSR22_RELOAD_CFP_DURATION FIELD32(0x00010000) + +/* + * Transmit related CSRs. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * TXCSR0: TX Control Register. + * KICK_TX: Kick tx ring. + * KICK_ATIM: Kick atim ring. + * KICK_PRIO: Kick priority ring. + * ABORT: Abort all transmit related ring operation. + */ +#define TXCSR0 0x0060 +#define TXCSR0_KICK_TX FIELD32(0x00000001) +#define TXCSR0_KICK_ATIM FIELD32(0x00000002) +#define TXCSR0_KICK_PRIO FIELD32(0x00000004) +#define TXCSR0_ABORT FIELD32(0x00000008) + +/* + * TXCSR1: TX Configuration Register. + * ACK_TIMEOUT: Ack timeout, default = sifs + 2*slottime + acktime @ 1mbps. + * ACK_CONSUME_TIME: Ack consume time, default = sifs + acktime @ 1mbps. + * TSF_OFFSET: Insert tsf offset. + * AUTORESPONDER: Enable auto responder which include ack & cts. + */ +#define TXCSR1 0x0064 +#define TXCSR1_ACK_TIMEOUT FIELD32(0x000001ff) +#define TXCSR1_ACK_CONSUME_TIME FIELD32(0x0003fe00) +#define TXCSR1_TSF_OFFSET FIELD32(0x00fc0000) +#define TXCSR1_AUTORESPONDER FIELD32(0x01000000) + +/* + * TXCSR2: Tx descriptor configuration register. + * TXD_SIZE: Tx descriptor size, default is 48. + * NUM_TXD: Number of tx entries in ring. + * NUM_ATIM: Number of atim entries in ring. + * NUM_PRIO: Number of priority entries in ring. + */ +#define TXCSR2 0x0068 +#define TXCSR2_TXD_SIZE FIELD32(0x000000ff) +#define TXCSR2_NUM_TXD FIELD32(0x0000ff00) +#define TXCSR2_NUM_ATIM FIELD32(0x00ff0000) +#define TXCSR2_NUM_PRIO FIELD32(0xff000000) + +/* + * TXCSR3: TX Ring Base address register. + */ +#define TXCSR3 0x006c +#define TXCSR3_TX_RING_REGISTER FIELD32(0xffffffff) + +/* + * TXCSR4: TX Atim Ring Base address register. + */ +#define TXCSR4 0x0070 +#define TXCSR4_ATIM_RING_REGISTER FIELD32(0xffffffff) + +/* + * TXCSR5: TX Prio Ring Base address register. + */ +#define TXCSR5 0x0074 +#define TXCSR5_PRIO_RING_REGISTER FIELD32(0xffffffff) + +/* + * TXCSR6: Beacon Base address register. + */ +#define TXCSR6 0x0078 +#define TXCSR6_BEACON_RING_REGISTER FIELD32(0xffffffff) + +/* + * TXCSR7: Auto responder control register. + * AR_POWERMANAGEMENT: Auto responder power management bit. + */ +#define TXCSR7 0x007c +#define TXCSR7_AR_POWERMANAGEMENT FIELD32(0x00000001) + +/* + * Receive related CSRs. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * RXCSR0: RX Control Register. + * DISABLE_RX: Disable rx engine. + * DROP_CRC: Drop crc error. + * DROP_PHYSICAL: Drop physical error. + * DROP_CONTROL: Drop control frame. + * DROP_NOT_TO_ME: Drop not to me unicast frame. + * DROP_TODS: Drop frame tods bit is true. + * DROP_VERSION_ERROR: Drop version error frame. + * PASS_CRC: Pass all packets with crc attached. + */ +#define RXCSR0 0x0080 +#define RXCSR0_DISABLE_RX FIELD32(0x00000001) +#define RXCSR0_DROP_CRC FIELD32(0x00000002) +#define RXCSR0_DROP_PHYSICAL FIELD32(0x00000004) +#define RXCSR0_DROP_CONTROL FIELD32(0x00000008) +#define RXCSR0_DROP_NOT_TO_ME FIELD32(0x00000010) +#define RXCSR0_DROP_TODS FIELD32(0x00000020) +#define RXCSR0_DROP_VERSION_ERROR FIELD32(0x00000040) +#define RXCSR0_PASS_CRC FIELD32(0x00000080) + +/* + * RXCSR1: RX descriptor configuration register. + * RXD_SIZE: Rx descriptor size, default is 32b. + * NUM_RXD: Number of rx entries in ring. + */ +#define RXCSR1 0x0084 +#define RXCSR1_RXD_SIZE FIELD32(0x000000ff) +#define RXCSR1_NUM_RXD FIELD32(0x0000ff00) + +/* + * RXCSR2: RX Ring base address register. + */ +#define RXCSR2 0x0088 +#define RXCSR2_RX_RING_REGISTER FIELD32(0xffffffff) + +/* + * RXCSR3: BBP ID register for Rx operation. + * BBP_ID#: BBP register # id. + * BBP_ID#_VALID: BBP register # id is valid or not. + */ +#define RXCSR3 0x0090 +#define RXCSR3_BBP_ID0 FIELD32(0x0000007f) +#define RXCSR3_BBP_ID0_VALID FIELD32(0x00000080) +#define RXCSR3_BBP_ID1 FIELD32(0x00007f00) +#define RXCSR3_BBP_ID1_VALID FIELD32(0x00008000) +#define RXCSR3_BBP_ID2 FIELD32(0x007f0000) +#define RXCSR3_BBP_ID2_VALID FIELD32(0x00800000) +#define RXCSR3_BBP_ID3 FIELD32(0x7f000000) +#define RXCSR3_BBP_ID3_VALID FIELD32(0x80000000) + +/* + * RXCSR4: BBP ID register for Rx operation. + * BBP_ID#: BBP register # id. + * BBP_ID#_VALID: BBP register # id is valid or not. + */ +#define RXCSR4 0x0094 +#define RXCSR4_BBP_ID4 FIELD32(0x0000007f) +#define RXCSR4_BBP_ID4_VALID FIELD32(0x00000080) +#define RXCSR4_BBP_ID5 FIELD32(0x00007f00) +#define RXCSR4_BBP_ID5_VALID FIELD32(0x00008000) + +/* + * ARCSR0: Auto Responder PLCP config register 0. + * ARCSR0_AR_BBP_DATA#: Auto responder BBP register # data. + * ARCSR0_AR_BBP_ID#: Auto responder BBP register # Id. + */ +#define ARCSR0 0x0098 +#define ARCSR0_AR_BBP_DATA0 FIELD32(0x000000ff) +#define ARCSR0_AR_BBP_ID0 FIELD32(0x0000ff00) +#define ARCSR0_AR_BBP_DATA1 FIELD32(0x00ff0000) +#define ARCSR0_AR_BBP_ID1 FIELD32(0xff000000) + +/* + * ARCSR1: Auto Responder PLCP config register 1. + * ARCSR0_AR_BBP_DATA#: Auto responder BBP register # data. + * ARCSR0_AR_BBP_ID#: Auto responder BBP register # Id. + */ +#define ARCSR1 0x009c +#define ARCSR1_AR_BBP_DATA2 FIELD32(0x000000ff) +#define ARCSR1_AR_BBP_ID2 FIELD32(0x0000ff00) +#define ARCSR1_AR_BBP_DATA3 FIELD32(0x00ff0000) +#define ARCSR1_AR_BBP_ID3 FIELD32(0xff000000) + +/* + * Miscellaneous Registers. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * PCICSR: PCI control register. + * BIG_ENDIAN: 1: big endian, 0: little endian. + * RX_TRESHOLD: Rx threshold in dw to start pci access + * 0: 16dw (default), 1: 8dw, 2: 4dw, 3: 32dw. + * TX_TRESHOLD: Tx threshold in dw to start pci access + * 0: 0dw (default), 1: 1dw, 2: 4dw, 3: forward. + * BURST_LENTH: Pci burst length 0: 4dw (default, 1: 8dw, 2: 16dw, 3:32dw. + * ENABLE_CLK: Enable clk_run, pci clock can't going down to non-operational. + */ +#define PCICSR 0x008c +#define PCICSR_BIG_ENDIAN FIELD32(0x00000001) +#define PCICSR_RX_TRESHOLD FIELD32(0x00000006) +#define PCICSR_TX_TRESHOLD FIELD32(0x00000018) +#define PCICSR_BURST_LENTH FIELD32(0x00000060) +#define PCICSR_ENABLE_CLK FIELD32(0x00000080) + +/* + * CNT0: FCS error count. + * FCS_ERROR: FCS error count, cleared when read. + */ +#define CNT0 0x00a0 +#define CNT0_FCS_ERROR FIELD32(0x0000ffff) + +/* + * Statistic Register. + * CNT1: PLCP error count. + * CNT2: Long error count. + * CNT3: CCA false alarm count. + * CNT4: Rx FIFO overflow count. + * CNT5: Tx FIFO underrun count. + */ +#define TIMECSR2 0x00a8 +#define CNT1 0x00ac +#define CNT2 0x00b0 +#define TIMECSR3 0x00b4 +#define CNT3 0x00b8 +#define CNT4 0x00bc +#define CNT5 0x00c0 + +/* + * Baseband Control Register. + */ + +/* + * PWRCSR0: Power mode configuration register. + */ +#define PWRCSR0 0x00c4 + +/* + * Power state transition time registers. + */ +#define PSCSR0 0x00c8 +#define PSCSR1 0x00cc +#define PSCSR2 0x00d0 +#define PSCSR3 0x00d4 + +/* + * PWRCSR1: Manual power control / status register. + * Allowed state: 0 deep_sleep, 1: sleep, 2: standby, 3: awake. + * SET_STATE: Set state. Write 1 to trigger, self cleared. + * BBP_DESIRE_STATE: BBP desired state. + * RF_DESIRE_STATE: RF desired state. + * BBP_CURR_STATE: BBP current state. + * RF_CURR_STATE: RF current state. + * PUT_TO_SLEEP: Put to sleep. Write 1 to trigger, self cleared. + */ +#define PWRCSR1 0x00d8 +#define PWRCSR1_SET_STATE FIELD32(0x00000001) +#define PWRCSR1_BBP_DESIRE_STATE FIELD32(0x00000006) +#define PWRCSR1_RF_DESIRE_STATE FIELD32(0x00000018) +#define PWRCSR1_BBP_CURR_STATE FIELD32(0x00000060) +#define PWRCSR1_RF_CURR_STATE FIELD32(0x00000180) +#define PWRCSR1_PUT_TO_SLEEP FIELD32(0x00000200) + +/* + * TIMECSR: Timer control register. + * US_COUNT: 1 us timer count in units of clock cycles. + * US_64_COUNT: 64 us timer count in units of 1 us timer. + * BEACON_EXPECT: Beacon expect window. + */ +#define TIMECSR 0x00dc +#define TIMECSR_US_COUNT FIELD32(0x000000ff) +#define TIMECSR_US_64_COUNT FIELD32(0x0000ff00) +#define TIMECSR_BEACON_EXPECT FIELD32(0x00070000) + +/* + * MACCSR0: MAC configuration register 0. + */ +#define MACCSR0 0x00e0 + +/* + * MACCSR1: MAC configuration register 1. + * KICK_RX: Kick one-shot rx in one-shot rx mode. + * ONESHOT_RXMODE: Enable one-shot rx mode for debugging. + * BBPRX_RESET_MODE: Ralink bbp rx reset mode. + * AUTO_TXBBP: Auto tx logic access bbp control register. + * AUTO_RXBBP: Auto rx logic access bbp control register. + * LOOPBACK: Loopback mode. 0: normal, 1: internal, 2: external, 3:rsvd. + * INTERSIL_IF: Intersil if calibration pin. + */ +#define MACCSR1 0x00e4 +#define MACCSR1_KICK_RX FIELD32(0x00000001) +#define MACCSR1_ONESHOT_RXMODE FIELD32(0x00000002) +#define MACCSR1_BBPRX_RESET_MODE FIELD32(0x00000004) +#define MACCSR1_AUTO_TXBBP FIELD32(0x00000008) +#define MACCSR1_AUTO_RXBBP FIELD32(0x00000010) +#define MACCSR1_LOOPBACK FIELD32(0x00000060) +#define MACCSR1_INTERSIL_IF FIELD32(0x00000080) + +/* + * RALINKCSR: Ralink Rx auto-reset BBCR. + * AR_BBP_DATA#: Auto reset BBP register # data. + * AR_BBP_ID#: Auto reset BBP register # id. + */ +#define RALINKCSR 0x00e8 +#define RALINKCSR_AR_BBP_DATA0 FIELD32(0x000000ff) +#define RALINKCSR_AR_BBP_ID0 FIELD32(0x0000ff00) +#define RALINKCSR_AR_BBP_DATA1 FIELD32(0x00ff0000) +#define RALINKCSR_AR_BBP_ID1 FIELD32(0xff000000) + +/* + * BCNCSR: Beacon interval control register. + * CHANGE: Write one to change beacon interval. + * DELTATIME: The delta time value. + * NUM_BEACON: Number of beacon according to mode. + * MODE: Please refer to asic specs. + * PLUS: Plus or minus delta time value. + */ +#define BCNCSR 0x00ec +#define BCNCSR_CHANGE FIELD32(0x00000001) +#define BCNCSR_DELTATIME FIELD32(0x0000001e) +#define BCNCSR_NUM_BEACON FIELD32(0x00001fe0) +#define BCNCSR_MODE FIELD32(0x00006000) +#define BCNCSR_PLUS FIELD32(0x00008000) + +/* + * BBP / RF / IF Control Register. + */ + +/* + * BBPCSR: BBP serial control register. + * VALUE: Register value to program into BBP. + * REGNUM: Selected BBP register. + * BUSY: 1: asic is busy execute BBP programming. + * WRITE_CONTROL: 1: write BBP, 0: read BBP. + */ +#define BBPCSR 0x00f0 +#define BBPCSR_VALUE FIELD32(0x000000ff) +#define BBPCSR_REGNUM FIELD32(0x00007f00) +#define BBPCSR_BUSY FIELD32(0x00008000) +#define BBPCSR_WRITE_CONTROL FIELD32(0x00010000) + +/* + * RFCSR: RF serial control register. + * VALUE: Register value + id to program into rf/if. + * NUMBER_OF_BITS: Number of bits used in value (i:20, rfmd:22). + * IF_SELECT: Chip to program: 0: rf, 1: if. + * PLL_LD: Rf pll_ld status. + * BUSY: 1: asic is busy execute rf programming. + */ +#define RFCSR 0x00f4 +#define RFCSR_VALUE FIELD32(0x00ffffff) +#define RFCSR_NUMBER_OF_BITS FIELD32(0x1f000000) +#define RFCSR_IF_SELECT FIELD32(0x20000000) +#define RFCSR_PLL_LD FIELD32(0x40000000) +#define RFCSR_BUSY FIELD32(0x80000000) + +/* + * LEDCSR: LED control register. + * ON_PERIOD: On period, default 70ms. + * OFF_PERIOD: Off period, default 30ms. + * LINK: 0: linkoff, 1: linkup. + * ACTIVITY: 0: idle, 1: active. + */ +#define LEDCSR 0x00f8 +#define LEDCSR_ON_PERIOD FIELD32(0x000000ff) +#define LEDCSR_OFF_PERIOD FIELD32(0x0000ff00) +#define LEDCSR_LINK FIELD32(0x00010000) +#define LEDCSR_ACTIVITY FIELD32(0x00020000) + +/* + * ASIC pointer information. + * RXPTR: Current RX ring address. + * TXPTR: Current Tx ring address. + * PRIPTR: Current Priority ring address. + * ATIMPTR: Current ATIM ring address. + */ +#define RXPTR 0x0100 +#define TXPTR 0x0104 +#define PRIPTR 0x0108 +#define ATIMPTR 0x010c + +/* + * GPIO and others. + */ + +/* + * GPIOCSR: GPIO control register. + */ +#define GPIOCSR 0x0120 +#define GPIOCSR_BIT0 FIELD32(0x00000001) +#define GPIOCSR_BIT1 FIELD32(0x00000002) +#define GPIOCSR_BIT2 FIELD32(0x00000004) +#define GPIOCSR_BIT3 FIELD32(0x00000008) +#define GPIOCSR_BIT4 FIELD32(0x00000010) +#define GPIOCSR_BIT5 FIELD32(0x00000020) +#define GPIOCSR_BIT6 FIELD32(0x00000040) +#define GPIOCSR_BIT7 FIELD32(0x00000080) + +/* + * BBPPCSR: BBP Pin control register. + */ +#define BBPPCSR 0x0124 + +/* + * BCNCSR1: Tx BEACON offset time control register. + * PRELOAD: Beacon timer offset in units of usec. + */ +#define BCNCSR1 0x0130 +#define BCNCSR1_PRELOAD FIELD32(0x0000ffff) + +/* + * MACCSR2: TX_PE to RX_PE turn-around time control register + * DELAY: RX_PE low width, in units of pci clock cycle. + */ +#define MACCSR2 0x0134 +#define MACCSR2_DELAY FIELD32(0x000000ff) + +/* + * ARCSR2: 1 Mbps ACK/CTS PLCP. + */ +#define ARCSR2 0x013c +#define ARCSR2_SIGNAL FIELD32(0x000000ff) +#define ARCSR2_SERVICE FIELD32(0x0000ff00) +#define ARCSR2_LENGTH_LOW FIELD32(0x00ff0000) +#define ARCSR2_LENGTH FIELD32(0xffff0000) + +/* + * ARCSR3: 2 Mbps ACK/CTS PLCP. + */ +#define ARCSR3 0x0140 +#define ARCSR3_SIGNAL FIELD32(0x000000ff) +#define ARCSR3_SERVICE FIELD32(0x0000ff00) +#define ARCSR3_LENGTH FIELD32(0xffff0000) + +/* + * ARCSR4: 5.5 Mbps ACK/CTS PLCP. + */ +#define ARCSR4 0x0144 +#define ARCSR4_SIGNAL FIELD32(0x000000ff) +#define ARCSR4_SERVICE FIELD32(0x0000ff00) +#define ARCSR4_LENGTH FIELD32(0xffff0000) + +/* + * ARCSR5: 11 Mbps ACK/CTS PLCP. + */ +#define ARCSR5 0x0148 +#define ARCSR5_SIGNAL FIELD32(0x000000ff) +#define ARCSR5_SERVICE FIELD32(0x0000ff00) +#define ARCSR5_LENGTH FIELD32(0xffff0000) + +/* + * BBP registers. + * The wordsize of the BBP is 8 bits. + */ + +/* + * R1: TX antenna control + */ +#define BBP_R1_TX_ANTENNA FIELD8(0x03) + +/* + * R4: RX antenna control + */ +#define BBP_R4_RX_ANTENNA FIELD8(0x06) + +/* + * RF registers + */ + +/* + * RF 1 + */ +#define RF1_TUNER FIELD32(0x00020000) + +/* + * RF 3 + */ +#define RF3_TUNER FIELD32(0x00000100) +#define RF3_TXPOWER FIELD32(0x00003e00) + +/* + * EEPROM content. + * The wordsize of the EEPROM is 16 bits. + */ + +/* + * HW MAC address. + */ +#define EEPROM_MAC_ADDR_0 0x0002 +#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00) +#define EEPROM_MAC_ADDR1 0x0003 +#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00) +#define EEPROM_MAC_ADDR_2 0x0004 +#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00) + +/* + * EEPROM antenna. + * ANTENNA_NUM: Number of antenna's. + * TX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * RX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * RF_TYPE: Rf_type of this adapter. + * LED_MODE: 0: default, 1: TX/RX activity,2: Single (ignore link), 3: rsvd. + * RX_AGCVGC: 0: disable, 1:enable BBP R13 tuning. + * HARDWARE_RADIO: 1: Hardware controlled radio. Read GPIO0. + */ +#define EEPROM_ANTENNA 0x0b +#define EEPROM_ANTENNA_NUM FIELD16(0x0003) +#define EEPROM_ANTENNA_TX_DEFAULT FIELD16(0x000c) +#define EEPROM_ANTENNA_RX_DEFAULT FIELD16(0x0030) +#define EEPROM_ANTENNA_RF_TYPE FIELD16(0x0040) +#define EEPROM_ANTENNA_LED_MODE FIELD16(0x0180) +#define EEPROM_ANTENNA_RX_AGCVGC_TUNING FIELD16(0x0200) +#define EEPROM_ANTENNA_HARDWARE_RADIO FIELD16(0x0400) + +/* + * EEPROM BBP. + */ +#define EEPROM_BBP_START 0x0c +#define EEPROM_BBP_SIZE 7 +#define EEPROM_BBP_VALUE FIELD16(0x00ff) +#define EEPROM_BBP_REG_ID FIELD16(0xff00) + +/* + * EEPROM TXPOWER + */ +#define EEPROM_TXPOWER_START 0x13 +#define EEPROM_TXPOWER_SIZE 7 +#define EEPROM_TXPOWER_1 FIELD16(0x00ff) +#define EEPROM_TXPOWER_2 FIELD16(0xff00) + +/* + * DMA descriptor defines. + */ +#define TXD_DESC_SIZE ( 8 * sizeof(struct data_desc) ) +#define RXD_DESC_SIZE ( 8 * sizeof(struct data_desc) ) + +/* + * TX descriptor format for TX, PRIO, ATIM and Beacon Ring. + */ + +/* + * Word0 + */ +#define TXD_W0_OWNER_NIC FIELD32(0x00000001) +#define TXD_W0_VALID FIELD32(0x00000002) +#define TXD_W0_RESULT FIELD32(0x0000001c) +#define TXD_W0_RETRY_COUNT FIELD32(0x000000e0) +#define TXD_W0_MORE_FRAG FIELD32(0x00000100) +#define TXD_W0_ACK FIELD32(0x00000200) +#define TXD_W0_TIMESTAMP FIELD32(0x00000400) +#define TXD_W0_RTS FIELD32(0x00000800) +#define TXD_W0_IFS FIELD32(0x00006000) +#define TXD_W0_RETRY_MODE FIELD32(0x00008000) +#define TXD_W0_AGC FIELD32(0x00ff0000) +#define TXD_W0_R2 FIELD32(0xff000000) + +/* + * Word1 + */ +#define TXD_W1_BUFFER_ADDRESS FIELD32(0xffffffff) + +/* + * Word2 + */ +#define TXD_W2_BUFFER_LENGTH FIELD32(0x0000ffff) +#define TXD_W2_DATABYTE_COUNT FIELD32(0xffff0000) + +/* + * Word3 & 4: PLCP information + */ +#define TXD_W3_PLCP_SIGNAL FIELD32(0x0000ffff) +#define TXD_W3_PLCP_SERVICE FIELD32(0xffff0000) +#define TXD_W4_PLCP_LENGTH_LOW FIELD32(0x0000ffff) +#define TXD_W4_PLCP_LENGTH_HIGH FIELD32(0xffff0000) + +/* + * Word5 + */ +#define TXD_W5_BBCR4 FIELD32(0x0000ffff) +#define TXD_W5_AGC_REG FIELD32(0x007f0000) +#define TXD_W5_AGC_REG_VALID FIELD32(0x00800000) +#define TXD_W5_XXX_REG FIELD32(0x7f000000) +#define TXD_W5_XXX_REG_VALID FIELD32(0x80000000) + +/* + * Word6 + */ +#define TXD_W6_SK_BUFF FIELD32(0xffffffff) + +/* + * Word7 + */ +#define TXD_W7_RESERVED FIELD32(0xffffffff) + +/* + * RX descriptor format for RX Ring. + */ + +/* + * Word0 + */ +#define RXD_W0_OWNER_NIC FIELD32(0x00000001) +#define RXD_W0_UNICAST_TO_ME FIELD32(0x00000002) +#define RXD_W0_MULTICAST FIELD32(0x00000004) +#define RXD_W0_BROADCAST FIELD32(0x00000008) +#define RXD_W0_MY_BSS FIELD32(0x00000010) +#define RXD_W0_CRC_ERROR FIELD32(0x00000020) +#define RXD_W0_PHYSICAL_ERROR FIELD32(0x00000080) +#define RXD_W0_DATABYTE_COUNT FIELD32(0xffff0000) + +/* + * Word1 + */ +#define RXD_W1_BUFFER_ADDRESS FIELD32(0xffffffff) + +/* + * Word2 + */ +#define RXD_W2_BUFFER_LENGTH FIELD32(0x0000ffff) +#define RXD_W2_SIGNAL FIELD32(0x00ff0000) +#define RXD_W2_RSSI FIELD32(0xff000000) + +/* + * Word3 + */ +#define RXD_W3_BBR2 FIELD32(0x000000ff) +#define RXD_W3_BBR3 FIELD32(0x0000ff00) +#define RXD_W3_BBR4 FIELD32(0x00ff0000) +#define RXD_W3_BBR5 FIELD32(0xff000000) + +/* + * Word4 + */ +#define RXD_W4_RX_END_TIME FIELD32(0xffffffff) + +/* + * Word5 & 6 & 7: Reserved + */ +#define RXD_W5_RESERVED FIELD32(0xffffffff) +#define RXD_W6_RESERVED FIELD32(0xffffffff) +#define RXD_W7_RESERVED FIELD32(0xffffffff) + +/* + * Macro's for converting txpower from EEPROM to dscape value + * and from dscape value to register value. + * NOTE: Logics in rt2400pci for txpower are reversed + * compared to the other rt2x00 drivers. A higher txpower + * value means that the txpower must be lowered. This is + * important when converting the value coming from the + * dscape stack to the rt2400 acceptable value. + */ +#define MIN_TXPOWER 31 +#define MAX_TXPOWER 62 +#define DEFAULT_TXPOWER 39 + +#define TXPOWER_FROM_DEV(__txpower) \ +({ \ + ((__txpower) > MAX_TXPOWER) ? DEFAULT_TXPOWER - MIN_TXPOWER : \ + ((__txpower) < MIN_TXPOWER) ? DEFAULT_TXPOWER - MIN_TXPOWER : \ + (((__txpower) - MAX_TXPOWER) + MIN_TXPOWER); \ +}) + +#define TXPOWER_TO_DEV(__txpower) \ +({ \ + (__txpower) += MIN_TXPOWER; \ + ((__txpower) <= MIN_TXPOWER) ? MAX_TXPOWER : \ + (((__txpower) >= MAX_TXPOWER) ? MIN_TXPOWER : \ + (MAX_TXPOWER - ((__txpower) - MIN_TXPOWER))); \ +}) + +#endif /* RT2400PCI_H */ diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c new file mode 100644 index 000000000000..ff2d63267b19 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2500pci.c @@ -0,0 +1,1971 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2500pci + Abstract: rt2500pci device specific routines. + Supported chipsets: RT2560. + */ + +/* + * Set enviroment defines for rt2x00.h + */ +#define DRV_NAME "rt2500pci" + +#include <linux/delay.h> +#include <linux/etherdevice.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/eeprom_93cx6.h> + +#include "rt2x00.h" +#include "rt2x00pci.h" +#include "rt2500pci.h" + +/* + * Register access. + * All access to the CSR registers will go through the methods + * rt2x00pci_register_read and rt2x00pci_register_write. + * BBP and RF register require indirect register access, + * and use the CSR registers BBPCSR and RFCSR to achieve this. + * These indirect registers work with busy bits, + * and we will try maximal REGISTER_BUSY_COUNT times to access + * the register while taking a REGISTER_BUSY_DELAY us delay + * between each attampt. When the busy bit is still set at that time, + * the access attempt is considered to have failed, + * and we will print an error. + */ +static u32 rt2500pci_bbp_check(const struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + unsigned int i; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2x00pci_register_read(rt2x00dev, BBPCSR, ®); + if (!rt2x00_get_field32(reg, BBPCSR_BUSY)) + break; + udelay(REGISTER_BUSY_DELAY); + } + + return reg; +} + +static void rt2500pci_bbp_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u8 value) +{ + u32 reg; + + /* + * Wait until the BBP becomes ready. + */ + reg = rt2500pci_bbp_check(rt2x00dev); + if (rt2x00_get_field32(reg, BBPCSR_BUSY)) { + ERROR(rt2x00dev, "BBPCSR register busy. Write failed.\n"); + return; + } + + /* + * Write the data into the BBP. + */ + reg = 0; + rt2x00_set_field32(®, BBPCSR_VALUE, value); + rt2x00_set_field32(®, BBPCSR_REGNUM, word); + rt2x00_set_field32(®, BBPCSR_BUSY, 1); + rt2x00_set_field32(®, BBPCSR_WRITE_CONTROL, 1); + + rt2x00pci_register_write(rt2x00dev, BBPCSR, reg); +} + +static void rt2500pci_bbp_read(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u8 *value) +{ + u32 reg; + + /* + * Wait until the BBP becomes ready. + */ + reg = rt2500pci_bbp_check(rt2x00dev); + if (rt2x00_get_field32(reg, BBPCSR_BUSY)) { + ERROR(rt2x00dev, "BBPCSR register busy. Read failed.\n"); + return; + } + + /* + * Write the request into the BBP. + */ + reg = 0; + rt2x00_set_field32(®, BBPCSR_REGNUM, word); + rt2x00_set_field32(®, BBPCSR_BUSY, 1); + rt2x00_set_field32(®, BBPCSR_WRITE_CONTROL, 0); + + rt2x00pci_register_write(rt2x00dev, BBPCSR, reg); + + /* + * Wait until the BBP becomes ready. + */ + reg = rt2500pci_bbp_check(rt2x00dev); + if (rt2x00_get_field32(reg, BBPCSR_BUSY)) { + ERROR(rt2x00dev, "BBPCSR register busy. Read failed.\n"); + *value = 0xff; + return; + } + + *value = rt2x00_get_field32(reg, BBPCSR_VALUE); +} + +static void rt2500pci_rf_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u32 value) +{ + u32 reg; + unsigned int i; + + if (!word) + return; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2x00pci_register_read(rt2x00dev, RFCSR, ®); + if (!rt2x00_get_field32(reg, RFCSR_BUSY)) + goto rf_write; + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "RFCSR register busy. Write failed.\n"); + return; + +rf_write: + reg = 0; + rt2x00_set_field32(®, RFCSR_VALUE, value); + rt2x00_set_field32(®, RFCSR_NUMBER_OF_BITS, 20); + rt2x00_set_field32(®, RFCSR_IF_SELECT, 0); + rt2x00_set_field32(®, RFCSR_BUSY, 1); + + rt2x00pci_register_write(rt2x00dev, RFCSR, reg); + rt2x00_rf_write(rt2x00dev, word, value); +} + +static void rt2500pci_eepromregister_read(struct eeprom_93cx6 *eeprom) +{ + struct rt2x00_dev *rt2x00dev = eeprom->data; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR21, ®); + + eeprom->reg_data_in = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_IN); + eeprom->reg_data_out = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_OUT); + eeprom->reg_data_clock = + !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_CLOCK); + eeprom->reg_chip_select = + !!rt2x00_get_field32(reg, CSR21_EEPROM_CHIP_SELECT); +} + +static void rt2500pci_eepromregister_write(struct eeprom_93cx6 *eeprom) +{ + struct rt2x00_dev *rt2x00dev = eeprom->data; + u32 reg = 0; + + rt2x00_set_field32(®, CSR21_EEPROM_DATA_IN, !!eeprom->reg_data_in); + rt2x00_set_field32(®, CSR21_EEPROM_DATA_OUT, !!eeprom->reg_data_out); + rt2x00_set_field32(®, CSR21_EEPROM_DATA_CLOCK, + !!eeprom->reg_data_clock); + rt2x00_set_field32(®, CSR21_EEPROM_CHIP_SELECT, + !!eeprom->reg_chip_select); + + rt2x00pci_register_write(rt2x00dev, CSR21, reg); +} + +#ifdef CONFIG_RT2X00_LIB_DEBUGFS +#define CSR_OFFSET(__word) ( CSR_REG_BASE + ((__word) * sizeof(u32)) ) + +static void rt2500pci_read_csr(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u32 *data) +{ + rt2x00pci_register_read(rt2x00dev, CSR_OFFSET(word), data); +} + +static void rt2500pci_write_csr(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u32 data) +{ + rt2x00pci_register_write(rt2x00dev, CSR_OFFSET(word), data); +} + +static const struct rt2x00debug rt2500pci_rt2x00debug = { + .owner = THIS_MODULE, + .csr = { + .read = rt2500pci_read_csr, + .write = rt2500pci_write_csr, + .word_size = sizeof(u32), + .word_count = CSR_REG_SIZE / sizeof(u32), + }, + .eeprom = { + .read = rt2x00_eeprom_read, + .write = rt2x00_eeprom_write, + .word_size = sizeof(u16), + .word_count = EEPROM_SIZE / sizeof(u16), + }, + .bbp = { + .read = rt2500pci_bbp_read, + .write = rt2500pci_bbp_write, + .word_size = sizeof(u8), + .word_count = BBP_SIZE / sizeof(u8), + }, + .rf = { + .read = rt2x00_rf_read, + .write = rt2500pci_rf_write, + .word_size = sizeof(u32), + .word_count = RF_SIZE / sizeof(u32), + }, +}; +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ + +#ifdef CONFIG_RT2500PCI_RFKILL +static int rt2500pci_rfkill_poll(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, GPIOCSR, ®); + return rt2x00_get_field32(reg, GPIOCSR_BIT0); +} +#else +#define rt2500pci_rfkill_poll NULL +#endif /* CONFIG_RT2500PCI_RFKILL */ + +/* + * Configuration handlers. + */ +static void rt2500pci_config_mac_addr(struct rt2x00_dev *rt2x00dev, + __le32 *mac) +{ + rt2x00pci_register_multiwrite(rt2x00dev, CSR3, mac, + (2 * sizeof(__le32))); +} + +static void rt2500pci_config_bssid(struct rt2x00_dev *rt2x00dev, + __le32 *bssid) +{ + rt2x00pci_register_multiwrite(rt2x00dev, CSR5, bssid, + (2 * sizeof(__le32))); +} + +static void rt2500pci_config_type(struct rt2x00_dev *rt2x00dev, const int type, + const int tsf_sync) +{ + u32 reg; + + rt2x00pci_register_write(rt2x00dev, CSR14, 0); + + /* + * Enable beacon config + */ + rt2x00pci_register_read(rt2x00dev, BCNCSR1, ®); + rt2x00_set_field32(®, BCNCSR1_PRELOAD, + PREAMBLE + get_duration(IEEE80211_HEADER, 20)); + rt2x00_set_field32(®, BCNCSR1_BEACON_CWMIN, + rt2x00lib_get_ring(rt2x00dev, + IEEE80211_TX_QUEUE_BEACON) + ->tx_params.cw_min); + rt2x00pci_register_write(rt2x00dev, BCNCSR1, reg); + + /* + * Enable synchronisation. + */ + rt2x00pci_register_read(rt2x00dev, CSR14, ®); + rt2x00_set_field32(®, CSR14_TSF_COUNT, 1); + rt2x00_set_field32(®, CSR14_TBCN, 1); + rt2x00_set_field32(®, CSR14_BEACON_GEN, 0); + rt2x00_set_field32(®, CSR14_TSF_SYNC, tsf_sync); + rt2x00pci_register_write(rt2x00dev, CSR14, reg); +} + +static void rt2500pci_config_preamble(struct rt2x00_dev *rt2x00dev, + const int short_preamble, + const int ack_timeout, + const int ack_consume_time) +{ + int preamble_mask; + u32 reg; + + /* + * When short preamble is enabled, we should set bit 0x08 + */ + preamble_mask = short_preamble << 3; + + rt2x00pci_register_read(rt2x00dev, TXCSR1, ®); + rt2x00_set_field32(®, TXCSR1_ACK_TIMEOUT, ack_timeout); + rt2x00_set_field32(®, TXCSR1_ACK_CONSUME_TIME, ack_consume_time); + rt2x00pci_register_write(rt2x00dev, TXCSR1, reg); + + rt2x00pci_register_read(rt2x00dev, ARCSR2, ®); + rt2x00_set_field32(®, ARCSR2_SIGNAL, 0x00 | preamble_mask); + rt2x00_set_field32(®, ARCSR2_SERVICE, 0x04); + rt2x00_set_field32(®, ARCSR2_LENGTH, get_duration(ACK_SIZE, 10)); + rt2x00pci_register_write(rt2x00dev, ARCSR2, reg); + + rt2x00pci_register_read(rt2x00dev, ARCSR3, ®); + rt2x00_set_field32(®, ARCSR3_SIGNAL, 0x01 | preamble_mask); + rt2x00_set_field32(®, ARCSR3_SERVICE, 0x04); + rt2x00_set_field32(®, ARCSR2_LENGTH, get_duration(ACK_SIZE, 20)); + rt2x00pci_register_write(rt2x00dev, ARCSR3, reg); + + rt2x00pci_register_read(rt2x00dev, ARCSR4, ®); + rt2x00_set_field32(®, ARCSR4_SIGNAL, 0x02 | preamble_mask); + rt2x00_set_field32(®, ARCSR4_SERVICE, 0x04); + rt2x00_set_field32(®, ARCSR2_LENGTH, get_duration(ACK_SIZE, 55)); + rt2x00pci_register_write(rt2x00dev, ARCSR4, reg); + + rt2x00pci_register_read(rt2x00dev, ARCSR5, ®); + rt2x00_set_field32(®, ARCSR5_SIGNAL, 0x03 | preamble_mask); + rt2x00_set_field32(®, ARCSR5_SERVICE, 0x84); + rt2x00_set_field32(®, ARCSR2_LENGTH, get_duration(ACK_SIZE, 110)); + rt2x00pci_register_write(rt2x00dev, ARCSR5, reg); +} + +static void rt2500pci_config_phymode(struct rt2x00_dev *rt2x00dev, + const int basic_rate_mask) +{ + rt2x00pci_register_write(rt2x00dev, ARCSR1, basic_rate_mask); +} + +static void rt2500pci_config_channel(struct rt2x00_dev *rt2x00dev, + struct rf_channel *rf, const int txpower) +{ + u8 r70; + + /* + * Set TXpower. + */ + rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); + + /* + * Switch on tuning bits. + * For RT2523 devices we do not need to update the R1 register. + */ + if (!rt2x00_rf(&rt2x00dev->chip, RF2523)) + rt2x00_set_field32(&rf->rf1, RF1_TUNER, 1); + rt2x00_set_field32(&rf->rf3, RF3_TUNER, 1); + + /* + * For RT2525 we should first set the channel to half band higher. + */ + if (rt2x00_rf(&rt2x00dev->chip, RF2525)) { + static const u32 vals[] = { + 0x00080cbe, 0x00080d02, 0x00080d06, 0x00080d0a, + 0x00080d0e, 0x00080d12, 0x00080d16, 0x00080d1a, + 0x00080d1e, 0x00080d22, 0x00080d26, 0x00080d2a, + 0x00080d2e, 0x00080d3a + }; + + rt2500pci_rf_write(rt2x00dev, 1, rf->rf1); + rt2500pci_rf_write(rt2x00dev, 2, vals[rf->channel - 1]); + rt2500pci_rf_write(rt2x00dev, 3, rf->rf3); + if (rf->rf4) + rt2500pci_rf_write(rt2x00dev, 4, rf->rf4); + } + + rt2500pci_rf_write(rt2x00dev, 1, rf->rf1); + rt2500pci_rf_write(rt2x00dev, 2, rf->rf2); + rt2500pci_rf_write(rt2x00dev, 3, rf->rf3); + if (rf->rf4) + rt2500pci_rf_write(rt2x00dev, 4, rf->rf4); + + /* + * Channel 14 requires the Japan filter bit to be set. + */ + r70 = 0x46; + rt2x00_set_field8(&r70, BBP_R70_JAPAN_FILTER, rf->channel == 14); + rt2500pci_bbp_write(rt2x00dev, 70, r70); + + msleep(1); + + /* + * Switch off tuning bits. + * For RT2523 devices we do not need to update the R1 register. + */ + if (!rt2x00_rf(&rt2x00dev->chip, RF2523)) { + rt2x00_set_field32(&rf->rf1, RF1_TUNER, 0); + rt2500pci_rf_write(rt2x00dev, 1, rf->rf1); + } + + rt2x00_set_field32(&rf->rf3, RF3_TUNER, 0); + rt2500pci_rf_write(rt2x00dev, 3, rf->rf3); + + /* + * Clear false CRC during channel switch. + */ + rt2x00pci_register_read(rt2x00dev, CNT0, &rf->rf1); +} + +static void rt2500pci_config_txpower(struct rt2x00_dev *rt2x00dev, + const int txpower) +{ + u32 rf3; + + rt2x00_rf_read(rt2x00dev, 3, &rf3); + rt2x00_set_field32(&rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); + rt2500pci_rf_write(rt2x00dev, 3, rf3); +} + +static void rt2500pci_config_antenna(struct rt2x00_dev *rt2x00dev, + const int antenna_tx, const int antenna_rx) +{ + u32 reg; + u8 r14; + u8 r2; + + rt2x00pci_register_read(rt2x00dev, BBPCSR1, ®); + rt2500pci_bbp_read(rt2x00dev, 14, &r14); + rt2500pci_bbp_read(rt2x00dev, 2, &r2); + + /* + * Configure the TX antenna. + */ + switch (antenna_tx) { + case ANTENNA_SW_DIVERSITY: + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 2); + rt2x00_set_field32(®, BBPCSR1_CCK, 2); + rt2x00_set_field32(®, BBPCSR1_OFDM, 2); + break; + case ANTENNA_A: + rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 0); + rt2x00_set_field32(®, BBPCSR1_CCK, 0); + rt2x00_set_field32(®, BBPCSR1_OFDM, 0); + break; + case ANTENNA_B: + rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 2); + rt2x00_set_field32(®, BBPCSR1_CCK, 2); + rt2x00_set_field32(®, BBPCSR1_OFDM, 2); + break; + } + + /* + * Configure the RX antenna. + */ + switch (antenna_rx) { + case ANTENNA_SW_DIVERSITY: + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 2); + break; + case ANTENNA_A: + rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 0); + break; + case ANTENNA_B: + rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 2); + break; + } + + /* + * RT2525E and RT5222 need to flip TX I/Q + */ + if (rt2x00_rf(&rt2x00dev->chip, RF2525E) || + rt2x00_rf(&rt2x00dev->chip, RF5222)) { + rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1); + rt2x00_set_field32(®, BBPCSR1_CCK_FLIP, 1); + rt2x00_set_field32(®, BBPCSR1_OFDM_FLIP, 1); + + /* + * RT2525E does not need RX I/Q Flip. + */ + if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) + rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0); + } else { + rt2x00_set_field32(®, BBPCSR1_CCK_FLIP, 0); + rt2x00_set_field32(®, BBPCSR1_OFDM_FLIP, 0); + } + + rt2x00pci_register_write(rt2x00dev, BBPCSR1, reg); + rt2500pci_bbp_write(rt2x00dev, 14, r14); + rt2500pci_bbp_write(rt2x00dev, 2, r2); +} + +static void rt2500pci_config_duration(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR11, ®); + rt2x00_set_field32(®, CSR11_SLOT_TIME, libconf->slot_time); + rt2x00pci_register_write(rt2x00dev, CSR11, reg); + + rt2x00pci_register_read(rt2x00dev, CSR18, ®); + rt2x00_set_field32(®, CSR18_SIFS, libconf->sifs); + rt2x00_set_field32(®, CSR18_PIFS, libconf->pifs); + rt2x00pci_register_write(rt2x00dev, CSR18, reg); + + rt2x00pci_register_read(rt2x00dev, CSR19, ®); + rt2x00_set_field32(®, CSR19_DIFS, libconf->difs); + rt2x00_set_field32(®, CSR19_EIFS, libconf->eifs); + rt2x00pci_register_write(rt2x00dev, CSR19, reg); + + rt2x00pci_register_read(rt2x00dev, TXCSR1, ®); + rt2x00_set_field32(®, TXCSR1_TSF_OFFSET, IEEE80211_HEADER); + rt2x00_set_field32(®, TXCSR1_AUTORESPONDER, 1); + rt2x00pci_register_write(rt2x00dev, TXCSR1, reg); + + rt2x00pci_register_read(rt2x00dev, CSR12, ®); + rt2x00_set_field32(®, CSR12_BEACON_INTERVAL, + libconf->conf->beacon_int * 16); + rt2x00_set_field32(®, CSR12_CFP_MAX_DURATION, + libconf->conf->beacon_int * 16); + rt2x00pci_register_write(rt2x00dev, CSR12, reg); +} + +static void rt2500pci_config(struct rt2x00_dev *rt2x00dev, + const unsigned int flags, + struct rt2x00lib_conf *libconf) +{ + if (flags & CONFIG_UPDATE_PHYMODE) + rt2500pci_config_phymode(rt2x00dev, libconf->basic_rates); + if (flags & CONFIG_UPDATE_CHANNEL) + rt2500pci_config_channel(rt2x00dev, &libconf->rf, + libconf->conf->power_level); + if ((flags & CONFIG_UPDATE_TXPOWER) && !(flags & CONFIG_UPDATE_CHANNEL)) + rt2500pci_config_txpower(rt2x00dev, + libconf->conf->power_level); + if (flags & CONFIG_UPDATE_ANTENNA) + rt2500pci_config_antenna(rt2x00dev, + libconf->conf->antenna_sel_tx, + libconf->conf->antenna_sel_rx); + if (flags & (CONFIG_UPDATE_SLOT_TIME | CONFIG_UPDATE_BEACON_INT)) + rt2500pci_config_duration(rt2x00dev, libconf); +} + +/* + * LED functions. + */ +static void rt2500pci_enable_led(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, LEDCSR, ®); + + rt2x00_set_field32(®, LEDCSR_ON_PERIOD, 70); + rt2x00_set_field32(®, LEDCSR_OFF_PERIOD, 30); + + if (rt2x00dev->led_mode == LED_MODE_TXRX_ACTIVITY) { + rt2x00_set_field32(®, LEDCSR_LINK, 1); + rt2x00_set_field32(®, LEDCSR_ACTIVITY, 0); + } else if (rt2x00dev->led_mode == LED_MODE_ASUS) { + rt2x00_set_field32(®, LEDCSR_LINK, 0); + rt2x00_set_field32(®, LEDCSR_ACTIVITY, 1); + } else { + rt2x00_set_field32(®, LEDCSR_LINK, 1); + rt2x00_set_field32(®, LEDCSR_ACTIVITY, 1); + } + + rt2x00pci_register_write(rt2x00dev, LEDCSR, reg); +} + +static void rt2500pci_disable_led(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, LEDCSR, ®); + rt2x00_set_field32(®, LEDCSR_LINK, 0); + rt2x00_set_field32(®, LEDCSR_ACTIVITY, 0); + rt2x00pci_register_write(rt2x00dev, LEDCSR, reg); +} + +/* + * Link tuning + */ +static void rt2500pci_link_stats(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + /* + * Update FCS error count from register. + */ + rt2x00pci_register_read(rt2x00dev, CNT0, ®); + rt2x00dev->link.rx_failed = rt2x00_get_field32(reg, CNT0_FCS_ERROR); + + /* + * Update False CCA count from register. + */ + rt2x00pci_register_read(rt2x00dev, CNT3, ®); + rt2x00dev->link.false_cca = rt2x00_get_field32(reg, CNT3_FALSE_CCA); +} + +static void rt2500pci_reset_tuner(struct rt2x00_dev *rt2x00dev) +{ + rt2500pci_bbp_write(rt2x00dev, 17, 0x48); + rt2x00dev->link.vgc_level = 0x48; +} + +static void rt2500pci_link_tuner(struct rt2x00_dev *rt2x00dev) +{ + int rssi = rt2x00_get_link_rssi(&rt2x00dev->link); + u8 r17; + + /* + * To prevent collisions with MAC ASIC on chipsets + * up to version C the link tuning should halt after 20 + * seconds. + */ + if (rt2x00_get_rev(&rt2x00dev->chip) < RT2560_VERSION_D && + rt2x00dev->link.count > 20) + return; + + rt2500pci_bbp_read(rt2x00dev, 17, &r17); + + /* + * Chipset versions C and lower should directly continue + * to the dynamic CCA tuning. + */ + if (rt2x00_get_rev(&rt2x00dev->chip) < RT2560_VERSION_D) + goto dynamic_cca_tune; + + /* + * A too low RSSI will cause too much false CCA which will + * then corrupt the R17 tuning. To remidy this the tuning should + * be stopped (While making sure the R17 value will not exceed limits) + */ + if (rssi < -80 && rt2x00dev->link.count > 20) { + if (r17 >= 0x41) { + r17 = rt2x00dev->link.vgc_level; + rt2500pci_bbp_write(rt2x00dev, 17, r17); + } + return; + } + + /* + * Special big-R17 for short distance + */ + if (rssi >= -58) { + if (r17 != 0x50) + rt2500pci_bbp_write(rt2x00dev, 17, 0x50); + return; + } + + /* + * Special mid-R17 for middle distance + */ + if (rssi >= -74) { + if (r17 != 0x41) + rt2500pci_bbp_write(rt2x00dev, 17, 0x41); + return; + } + + /* + * Leave short or middle distance condition, restore r17 + * to the dynamic tuning range. + */ + if (r17 >= 0x41) { + rt2500pci_bbp_write(rt2x00dev, 17, rt2x00dev->link.vgc_level); + return; + } + +dynamic_cca_tune: + + /* + * R17 is inside the dynamic tuning range, + * start tuning the link based on the false cca counter. + */ + if (rt2x00dev->link.false_cca > 512 && r17 < 0x40) { + rt2500pci_bbp_write(rt2x00dev, 17, ++r17); + rt2x00dev->link.vgc_level = r17; + } else if (rt2x00dev->link.false_cca < 100 && r17 > 0x32) { + rt2500pci_bbp_write(rt2x00dev, 17, --r17); + rt2x00dev->link.vgc_level = r17; + } +} + +/* + * Initialization functions. + */ +static void rt2500pci_init_rxring(struct rt2x00_dev *rt2x00dev) +{ + struct data_ring *ring = rt2x00dev->rx; + struct data_desc *rxd; + unsigned int i; + u32 word; + + memset(ring->data_addr, 0x00, rt2x00_get_ring_size(ring)); + + for (i = 0; i < ring->stats.limit; i++) { + rxd = ring->entry[i].priv; + + rt2x00_desc_read(rxd, 1, &word); + rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, + ring->entry[i].data_dma); + rt2x00_desc_write(rxd, 1, word); + + rt2x00_desc_read(rxd, 0, &word); + rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1); + rt2x00_desc_write(rxd, 0, word); + } + + rt2x00_ring_index_clear(rt2x00dev->rx); +} + +static void rt2500pci_init_txring(struct rt2x00_dev *rt2x00dev, const int queue) +{ + struct data_ring *ring = rt2x00lib_get_ring(rt2x00dev, queue); + struct data_desc *txd; + unsigned int i; + u32 word; + + memset(ring->data_addr, 0x00, rt2x00_get_ring_size(ring)); + + for (i = 0; i < ring->stats.limit; i++) { + txd = ring->entry[i].priv; + + rt2x00_desc_read(txd, 1, &word); + rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, + ring->entry[i].data_dma); + rt2x00_desc_write(txd, 1, word); + + rt2x00_desc_read(txd, 0, &word); + rt2x00_set_field32(&word, TXD_W0_VALID, 0); + rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0); + rt2x00_desc_write(txd, 0, word); + } + + rt2x00_ring_index_clear(ring); +} + +static int rt2500pci_init_rings(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + /* + * Initialize rings. + */ + rt2500pci_init_rxring(rt2x00dev); + rt2500pci_init_txring(rt2x00dev, IEEE80211_TX_QUEUE_DATA0); + rt2500pci_init_txring(rt2x00dev, IEEE80211_TX_QUEUE_DATA1); + rt2500pci_init_txring(rt2x00dev, IEEE80211_TX_QUEUE_AFTER_BEACON); + rt2500pci_init_txring(rt2x00dev, IEEE80211_TX_QUEUE_BEACON); + + /* + * Initialize registers. + */ + rt2x00pci_register_read(rt2x00dev, TXCSR2, ®); + rt2x00_set_field32(®, TXCSR2_TXD_SIZE, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].desc_size); + rt2x00_set_field32(®, TXCSR2_NUM_TXD, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA1].stats.limit); + rt2x00_set_field32(®, TXCSR2_NUM_ATIM, + rt2x00dev->bcn[1].stats.limit); + rt2x00_set_field32(®, TXCSR2_NUM_PRIO, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].stats.limit); + rt2x00pci_register_write(rt2x00dev, TXCSR2, reg); + + rt2x00pci_register_read(rt2x00dev, TXCSR3, ®); + rt2x00_set_field32(®, TXCSR3_TX_RING_REGISTER, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA1].data_dma); + rt2x00pci_register_write(rt2x00dev, TXCSR3, reg); + + rt2x00pci_register_read(rt2x00dev, TXCSR5, ®); + rt2x00_set_field32(®, TXCSR5_PRIO_RING_REGISTER, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].data_dma); + rt2x00pci_register_write(rt2x00dev, TXCSR5, reg); + + rt2x00pci_register_read(rt2x00dev, TXCSR4, ®); + rt2x00_set_field32(®, TXCSR4_ATIM_RING_REGISTER, + rt2x00dev->bcn[1].data_dma); + rt2x00pci_register_write(rt2x00dev, TXCSR4, reg); + + rt2x00pci_register_read(rt2x00dev, TXCSR6, ®); + rt2x00_set_field32(®, TXCSR6_BEACON_RING_REGISTER, + rt2x00dev->bcn[0].data_dma); + rt2x00pci_register_write(rt2x00dev, TXCSR6, reg); + + rt2x00pci_register_read(rt2x00dev, RXCSR1, ®); + rt2x00_set_field32(®, RXCSR1_RXD_SIZE, rt2x00dev->rx->desc_size); + rt2x00_set_field32(®, RXCSR1_NUM_RXD, rt2x00dev->rx->stats.limit); + rt2x00pci_register_write(rt2x00dev, RXCSR1, reg); + + rt2x00pci_register_read(rt2x00dev, RXCSR2, ®); + rt2x00_set_field32(®, RXCSR2_RX_RING_REGISTER, + rt2x00dev->rx->data_dma); + rt2x00pci_register_write(rt2x00dev, RXCSR2, reg); + + return 0; +} + +static int rt2500pci_init_registers(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00pci_register_write(rt2x00dev, PSCSR0, 0x00020002); + rt2x00pci_register_write(rt2x00dev, PSCSR1, 0x00000002); + rt2x00pci_register_write(rt2x00dev, PSCSR2, 0x00020002); + rt2x00pci_register_write(rt2x00dev, PSCSR3, 0x00000002); + + rt2x00pci_register_read(rt2x00dev, TIMECSR, ®); + rt2x00_set_field32(®, TIMECSR_US_COUNT, 33); + rt2x00_set_field32(®, TIMECSR_US_64_COUNT, 63); + rt2x00_set_field32(®, TIMECSR_BEACON_EXPECT, 0); + rt2x00pci_register_write(rt2x00dev, TIMECSR, reg); + + rt2x00pci_register_read(rt2x00dev, CSR9, ®); + rt2x00_set_field32(®, CSR9_MAX_FRAME_UNIT, + rt2x00dev->rx->data_size / 128); + rt2x00pci_register_write(rt2x00dev, CSR9, reg); + + /* + * Always use CWmin and CWmax set in descriptor. + */ + rt2x00pci_register_read(rt2x00dev, CSR11, ®); + rt2x00_set_field32(®, CSR11_CW_SELECT, 0); + rt2x00pci_register_write(rt2x00dev, CSR11, reg); + + rt2x00pci_register_write(rt2x00dev, CNT3, 0); + + rt2x00pci_register_read(rt2x00dev, TXCSR8, ®); + rt2x00_set_field32(®, TXCSR8_BBP_ID0, 10); + rt2x00_set_field32(®, TXCSR8_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, TXCSR8_BBP_ID1, 11); + rt2x00_set_field32(®, TXCSR8_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, TXCSR8_BBP_ID2, 13); + rt2x00_set_field32(®, TXCSR8_BBP_ID2_VALID, 1); + rt2x00_set_field32(®, TXCSR8_BBP_ID3, 12); + rt2x00_set_field32(®, TXCSR8_BBP_ID3_VALID, 1); + rt2x00pci_register_write(rt2x00dev, TXCSR8, reg); + + rt2x00pci_register_read(rt2x00dev, ARTCSR0, ®); + rt2x00_set_field32(®, ARTCSR0_ACK_CTS_1MBS, 112); + rt2x00_set_field32(®, ARTCSR0_ACK_CTS_2MBS, 56); + rt2x00_set_field32(®, ARTCSR0_ACK_CTS_5_5MBS, 20); + rt2x00_set_field32(®, ARTCSR0_ACK_CTS_11MBS, 10); + rt2x00pci_register_write(rt2x00dev, ARTCSR0, reg); + + rt2x00pci_register_read(rt2x00dev, ARTCSR1, ®); + rt2x00_set_field32(®, ARTCSR1_ACK_CTS_6MBS, 45); + rt2x00_set_field32(®, ARTCSR1_ACK_CTS_9MBS, 37); + rt2x00_set_field32(®, ARTCSR1_ACK_CTS_12MBS, 33); + rt2x00_set_field32(®, ARTCSR1_ACK_CTS_18MBS, 29); + rt2x00pci_register_write(rt2x00dev, ARTCSR1, reg); + + rt2x00pci_register_read(rt2x00dev, ARTCSR2, ®); + rt2x00_set_field32(®, ARTCSR2_ACK_CTS_24MBS, 29); + rt2x00_set_field32(®, ARTCSR2_ACK_CTS_36MBS, 25); + rt2x00_set_field32(®, ARTCSR2_ACK_CTS_48MBS, 25); + rt2x00_set_field32(®, ARTCSR2_ACK_CTS_54MBS, 25); + rt2x00pci_register_write(rt2x00dev, ARTCSR2, reg); + + rt2x00pci_register_read(rt2x00dev, RXCSR3, ®); + rt2x00_set_field32(®, RXCSR3_BBP_ID0, 47); /* CCK Signal */ + rt2x00_set_field32(®, RXCSR3_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, RXCSR3_BBP_ID1, 51); /* Rssi */ + rt2x00_set_field32(®, RXCSR3_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, RXCSR3_BBP_ID2, 42); /* OFDM Rate */ + rt2x00_set_field32(®, RXCSR3_BBP_ID2_VALID, 1); + rt2x00_set_field32(®, RXCSR3_BBP_ID3, 51); /* RSSI */ + rt2x00_set_field32(®, RXCSR3_BBP_ID3_VALID, 1); + rt2x00pci_register_write(rt2x00dev, RXCSR3, reg); + + rt2x00pci_register_read(rt2x00dev, PCICSR, ®); + rt2x00_set_field32(®, PCICSR_BIG_ENDIAN, 0); + rt2x00_set_field32(®, PCICSR_RX_TRESHOLD, 0); + rt2x00_set_field32(®, PCICSR_TX_TRESHOLD, 3); + rt2x00_set_field32(®, PCICSR_BURST_LENTH, 1); + rt2x00_set_field32(®, PCICSR_ENABLE_CLK, 1); + rt2x00_set_field32(®, PCICSR_READ_MULTIPLE, 1); + rt2x00_set_field32(®, PCICSR_WRITE_INVALID, 1); + rt2x00pci_register_write(rt2x00dev, PCICSR, reg); + + rt2x00pci_register_write(rt2x00dev, PWRCSR0, 0x3f3b3100); + + rt2x00pci_register_write(rt2x00dev, GPIOCSR, 0x0000ff00); + rt2x00pci_register_write(rt2x00dev, TESTCSR, 0x000000f0); + + if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) + return -EBUSY; + + rt2x00pci_register_write(rt2x00dev, MACCSR0, 0x00213223); + rt2x00pci_register_write(rt2x00dev, MACCSR1, 0x00235518); + + rt2x00pci_register_read(rt2x00dev, MACCSR2, ®); + rt2x00_set_field32(®, MACCSR2_DELAY, 64); + rt2x00pci_register_write(rt2x00dev, MACCSR2, reg); + + rt2x00pci_register_read(rt2x00dev, RALINKCSR, ®); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_DATA0, 17); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_ID0, 26); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_VALID0, 1); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_DATA1, 0); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_ID1, 26); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_VALID1, 1); + rt2x00pci_register_write(rt2x00dev, RALINKCSR, reg); + + rt2x00pci_register_write(rt2x00dev, BBPCSR1, 0x82188200); + + rt2x00pci_register_write(rt2x00dev, TXACKCSR0, 0x00000020); + + rt2x00pci_register_read(rt2x00dev, CSR1, ®); + rt2x00_set_field32(®, CSR1_SOFT_RESET, 1); + rt2x00_set_field32(®, CSR1_BBP_RESET, 0); + rt2x00_set_field32(®, CSR1_HOST_READY, 0); + rt2x00pci_register_write(rt2x00dev, CSR1, reg); + + rt2x00pci_register_read(rt2x00dev, CSR1, ®); + rt2x00_set_field32(®, CSR1_SOFT_RESET, 0); + rt2x00_set_field32(®, CSR1_HOST_READY, 1); + rt2x00pci_register_write(rt2x00dev, CSR1, reg); + + /* + * We must clear the FCS and FIFO error count. + * These registers are cleared on read, + * so we may pass a useless variable to store the value. + */ + rt2x00pci_register_read(rt2x00dev, CNT0, ®); + rt2x00pci_register_read(rt2x00dev, CNT4, ®); + + return 0; +} + +static int rt2500pci_init_bbp(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + u16 eeprom; + u8 reg_id; + u8 value; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2500pci_bbp_read(rt2x00dev, 0, &value); + if ((value != 0xff) && (value != 0x00)) + goto continue_csr_init; + NOTICE(rt2x00dev, "Waiting for BBP register.\n"); + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "BBP register access failed, aborting.\n"); + return -EACCES; + +continue_csr_init: + rt2500pci_bbp_write(rt2x00dev, 3, 0x02); + rt2500pci_bbp_write(rt2x00dev, 4, 0x19); + rt2500pci_bbp_write(rt2x00dev, 14, 0x1c); + rt2500pci_bbp_write(rt2x00dev, 15, 0x30); + rt2500pci_bbp_write(rt2x00dev, 16, 0xac); + rt2500pci_bbp_write(rt2x00dev, 18, 0x18); + rt2500pci_bbp_write(rt2x00dev, 19, 0xff); + rt2500pci_bbp_write(rt2x00dev, 20, 0x1e); + rt2500pci_bbp_write(rt2x00dev, 21, 0x08); + rt2500pci_bbp_write(rt2x00dev, 22, 0x08); + rt2500pci_bbp_write(rt2x00dev, 23, 0x08); + rt2500pci_bbp_write(rt2x00dev, 24, 0x70); + rt2500pci_bbp_write(rt2x00dev, 25, 0x40); + rt2500pci_bbp_write(rt2x00dev, 26, 0x08); + rt2500pci_bbp_write(rt2x00dev, 27, 0x23); + rt2500pci_bbp_write(rt2x00dev, 30, 0x10); + rt2500pci_bbp_write(rt2x00dev, 31, 0x2b); + rt2500pci_bbp_write(rt2x00dev, 32, 0xb9); + rt2500pci_bbp_write(rt2x00dev, 34, 0x12); + rt2500pci_bbp_write(rt2x00dev, 35, 0x50); + rt2500pci_bbp_write(rt2x00dev, 39, 0xc4); + rt2500pci_bbp_write(rt2x00dev, 40, 0x02); + rt2500pci_bbp_write(rt2x00dev, 41, 0x60); + rt2500pci_bbp_write(rt2x00dev, 53, 0x10); + rt2500pci_bbp_write(rt2x00dev, 54, 0x18); + rt2500pci_bbp_write(rt2x00dev, 56, 0x08); + rt2500pci_bbp_write(rt2x00dev, 57, 0x10); + rt2500pci_bbp_write(rt2x00dev, 58, 0x08); + rt2500pci_bbp_write(rt2x00dev, 61, 0x6d); + rt2500pci_bbp_write(rt2x00dev, 62, 0x10); + + DEBUG(rt2x00dev, "Start initialization from EEPROM...\n"); + for (i = 0; i < EEPROM_BBP_SIZE; i++) { + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); + + if (eeprom != 0xffff && eeprom != 0x0000) { + reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); + value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); + DEBUG(rt2x00dev, "BBP: 0x%02x, value: 0x%02x.\n", + reg_id, value); + rt2500pci_bbp_write(rt2x00dev, reg_id, value); + } + } + DEBUG(rt2x00dev, "...End initialization from EEPROM.\n"); + + return 0; +} + +/* + * Device state switch handlers. + */ +static void rt2500pci_toggle_rx(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, RXCSR0, ®); + rt2x00_set_field32(®, RXCSR0_DISABLE_RX, + state == STATE_RADIO_RX_OFF); + rt2x00pci_register_write(rt2x00dev, RXCSR0, reg); +} + +static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + int mask = (state == STATE_RADIO_IRQ_OFF); + u32 reg; + + /* + * When interrupts are being enabled, the interrupt registers + * should clear the register to assure a clean state. + */ + if (state == STATE_RADIO_IRQ_ON) { + rt2x00pci_register_read(rt2x00dev, CSR7, ®); + rt2x00pci_register_write(rt2x00dev, CSR7, reg); + } + + /* + * Only toggle the interrupts bits we are going to use. + * Non-checked interrupt bits are disabled by default. + */ + rt2x00pci_register_read(rt2x00dev, CSR8, ®); + rt2x00_set_field32(®, CSR8_TBCN_EXPIRE, mask); + rt2x00_set_field32(®, CSR8_TXDONE_TXRING, mask); + rt2x00_set_field32(®, CSR8_TXDONE_ATIMRING, mask); + rt2x00_set_field32(®, CSR8_TXDONE_PRIORING, mask); + rt2x00_set_field32(®, CSR8_RXDONE, mask); + rt2x00pci_register_write(rt2x00dev, CSR8, reg); +} + +static int rt2500pci_enable_radio(struct rt2x00_dev *rt2x00dev) +{ + /* + * Initialize all registers. + */ + if (rt2500pci_init_rings(rt2x00dev) || + rt2500pci_init_registers(rt2x00dev) || + rt2500pci_init_bbp(rt2x00dev)) { + ERROR(rt2x00dev, "Register initialization failed.\n"); + return -EIO; + } + + /* + * Enable interrupts. + */ + rt2500pci_toggle_irq(rt2x00dev, STATE_RADIO_IRQ_ON); + + /* + * Enable LED + */ + rt2500pci_enable_led(rt2x00dev); + + return 0; +} + +static void rt2500pci_disable_radio(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + /* + * Disable LED + */ + rt2500pci_disable_led(rt2x00dev); + + rt2x00pci_register_write(rt2x00dev, PWRCSR0, 0); + + /* + * Disable synchronisation. + */ + rt2x00pci_register_write(rt2x00dev, CSR14, 0); + + /* + * Cancel RX and TX. + */ + rt2x00pci_register_read(rt2x00dev, TXCSR0, ®); + rt2x00_set_field32(®, TXCSR0_ABORT, 1); + rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); + + /* + * Disable interrupts. + */ + rt2500pci_toggle_irq(rt2x00dev, STATE_RADIO_IRQ_OFF); +} + +static int rt2500pci_set_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + u32 reg; + unsigned int i; + char put_to_sleep; + char bbp_state; + char rf_state; + + put_to_sleep = (state != STATE_AWAKE); + + rt2x00pci_register_read(rt2x00dev, PWRCSR1, ®); + rt2x00_set_field32(®, PWRCSR1_SET_STATE, 1); + rt2x00_set_field32(®, PWRCSR1_BBP_DESIRE_STATE, state); + rt2x00_set_field32(®, PWRCSR1_RF_DESIRE_STATE, state); + rt2x00_set_field32(®, PWRCSR1_PUT_TO_SLEEP, put_to_sleep); + rt2x00pci_register_write(rt2x00dev, PWRCSR1, reg); + + /* + * Device is not guaranteed to be in the requested state yet. + * We must wait until the register indicates that the + * device has entered the correct state. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2x00pci_register_read(rt2x00dev, PWRCSR1, ®); + bbp_state = rt2x00_get_field32(reg, PWRCSR1_BBP_CURR_STATE); + rf_state = rt2x00_get_field32(reg, PWRCSR1_RF_CURR_STATE); + if (bbp_state == state && rf_state == state) + return 0; + msleep(10); + } + + NOTICE(rt2x00dev, "Device failed to enter state %d, " + "current device state: bbp %d and rf %d.\n", + state, bbp_state, rf_state); + + return -EBUSY; +} + +static int rt2500pci_set_device_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + int retval = 0; + + switch (state) { + case STATE_RADIO_ON: + retval = rt2500pci_enable_radio(rt2x00dev); + break; + case STATE_RADIO_OFF: + rt2500pci_disable_radio(rt2x00dev); + break; + case STATE_RADIO_RX_ON: + case STATE_RADIO_RX_OFF: + rt2500pci_toggle_rx(rt2x00dev, state); + break; + case STATE_DEEP_SLEEP: + case STATE_SLEEP: + case STATE_STANDBY: + case STATE_AWAKE: + retval = rt2500pci_set_state(rt2x00dev, state); + break; + default: + retval = -ENOTSUPP; + break; + } + + return retval; +} + +/* + * TX descriptor initialization + */ +static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, + struct data_desc *txd, + struct txdata_entry_desc *desc, + struct ieee80211_hdr *ieee80211hdr, + unsigned int length, + struct ieee80211_tx_control *control) +{ + u32 word; + + /* + * Start writing the descriptor words. + */ + rt2x00_desc_read(txd, 2, &word); + rt2x00_set_field32(&word, TXD_W2_IV_OFFSET, IEEE80211_HEADER); + rt2x00_set_field32(&word, TXD_W2_AIFS, desc->aifs); + rt2x00_set_field32(&word, TXD_W2_CWMIN, desc->cw_min); + rt2x00_set_field32(&word, TXD_W2_CWMAX, desc->cw_max); + rt2x00_desc_write(txd, 2, word); + + rt2x00_desc_read(txd, 3, &word); + rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, desc->signal); + rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, desc->service); + rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW, desc->length_low); + rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH, desc->length_high); + rt2x00_desc_write(txd, 3, word); + + rt2x00_desc_read(txd, 10, &word); + rt2x00_set_field32(&word, TXD_W10_RTS, + test_bit(ENTRY_TXD_RTS_FRAME, &desc->flags)); + rt2x00_desc_write(txd, 10, word); + + rt2x00_desc_read(txd, 0, &word); + rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1); + rt2x00_set_field32(&word, TXD_W0_VALID, 1); + rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, + test_bit(ENTRY_TXD_MORE_FRAG, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_ACK, + !(control->flags & IEEE80211_TXCTL_NO_ACK)); + rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, + test_bit(ENTRY_TXD_REQ_TIMESTAMP, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_OFDM, + test_bit(ENTRY_TXD_OFDM_RATE, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_CIPHER_OWNER, 1); + rt2x00_set_field32(&word, TXD_W0_IFS, desc->ifs); + rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, + !!(control->flags & + IEEE80211_TXCTL_LONG_RETRY_LIMIT)); + rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, length); + rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE); + rt2x00_desc_write(txd, 0, word); +} + +/* + * TX data initialization + */ +static void rt2500pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, + unsigned int queue) +{ + u32 reg; + + if (queue == IEEE80211_TX_QUEUE_BEACON) { + rt2x00pci_register_read(rt2x00dev, CSR14, ®); + if (!rt2x00_get_field32(reg, CSR14_BEACON_GEN)) { + rt2x00_set_field32(®, CSR14_BEACON_GEN, 1); + rt2x00pci_register_write(rt2x00dev, CSR14, reg); + } + return; + } + + rt2x00pci_register_read(rt2x00dev, TXCSR0, ®); + if (queue == IEEE80211_TX_QUEUE_DATA0) + rt2x00_set_field32(®, TXCSR0_KICK_PRIO, 1); + else if (queue == IEEE80211_TX_QUEUE_DATA1) + rt2x00_set_field32(®, TXCSR0_KICK_TX, 1); + else if (queue == IEEE80211_TX_QUEUE_AFTER_BEACON) + rt2x00_set_field32(®, TXCSR0_KICK_ATIM, 1); + rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); +} + +/* + * RX control handlers + */ +static void rt2500pci_fill_rxdone(struct data_entry *entry, + struct rxdata_entry_desc *desc) +{ + struct data_desc *rxd = entry->priv; + u32 word0; + u32 word2; + + rt2x00_desc_read(rxd, 0, &word0); + rt2x00_desc_read(rxd, 2, &word2); + + desc->flags = 0; + if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) + desc->flags |= RX_FLAG_FAILED_FCS_CRC; + if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR)) + desc->flags |= RX_FLAG_FAILED_PLCP_CRC; + + desc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL); + desc->rssi = rt2x00_get_field32(word2, RXD_W2_RSSI) - + entry->ring->rt2x00dev->rssi_offset; + desc->ofdm = rt2x00_get_field32(word0, RXD_W0_OFDM); + desc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); +} + +/* + * Interrupt functions. + */ +static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev, const int queue) +{ + struct data_ring *ring = rt2x00lib_get_ring(rt2x00dev, queue); + struct data_entry *entry; + struct data_desc *txd; + u32 word; + int tx_status; + int retry; + + while (!rt2x00_ring_empty(ring)) { + entry = rt2x00_get_data_entry_done(ring); + txd = entry->priv; + rt2x00_desc_read(txd, 0, &word); + + if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) || + !rt2x00_get_field32(word, TXD_W0_VALID)) + break; + + /* + * Obtain the status about this packet. + */ + tx_status = rt2x00_get_field32(word, TXD_W0_RESULT); + retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT); + + rt2x00lib_txdone(entry, tx_status, retry); + + /* + * Make this entry available for reuse. + */ + entry->flags = 0; + rt2x00_set_field32(&word, TXD_W0_VALID, 0); + rt2x00_desc_write(txd, 0, word); + rt2x00_ring_index_done_inc(ring); + } + + /* + * If the data ring was full before the txdone handler + * we must make sure the packet queue in the mac80211 stack + * is reenabled when the txdone handler has finished. + */ + entry = ring->entry; + if (!rt2x00_ring_full(ring)) + ieee80211_wake_queue(rt2x00dev->hw, + entry->tx_status.control.queue); +} + +static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance) +{ + struct rt2x00_dev *rt2x00dev = dev_instance; + u32 reg; + + /* + * Get the interrupt sources & saved to local variable. + * Write register value back to clear pending interrupts. + */ + rt2x00pci_register_read(rt2x00dev, CSR7, ®); + rt2x00pci_register_write(rt2x00dev, CSR7, reg); + + if (!reg) + return IRQ_NONE; + + if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) + return IRQ_HANDLED; + + /* + * Handle interrupts, walk through all bits + * and run the tasks, the bits are checked in order of + * priority. + */ + + /* + * 1 - Beacon timer expired interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE)) + rt2x00lib_beacondone(rt2x00dev); + + /* + * 2 - Rx ring done interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_RXDONE)) + rt2x00pci_rxdone(rt2x00dev); + + /* + * 3 - Atim ring transmit done interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING)) + rt2500pci_txdone(rt2x00dev, IEEE80211_TX_QUEUE_AFTER_BEACON); + + /* + * 4 - Priority ring transmit done interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING)) + rt2500pci_txdone(rt2x00dev, IEEE80211_TX_QUEUE_DATA0); + + /* + * 5 - Tx ring transmit done interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) + rt2500pci_txdone(rt2x00dev, IEEE80211_TX_QUEUE_DATA1); + + return IRQ_HANDLED; +} + +/* + * Device probe functions. + */ +static int rt2500pci_validate_eeprom(struct rt2x00_dev *rt2x00dev) +{ + struct eeprom_93cx6 eeprom; + u32 reg; + u16 word; + u8 *mac; + + rt2x00pci_register_read(rt2x00dev, CSR21, ®); + + eeprom.data = rt2x00dev; + eeprom.register_read = rt2500pci_eepromregister_read; + eeprom.register_write = rt2500pci_eepromregister_write; + eeprom.width = rt2x00_get_field32(reg, CSR21_TYPE_93C46) ? + PCI_EEPROM_WIDTH_93C46 : PCI_EEPROM_WIDTH_93C66; + eeprom.reg_data_in = 0; + eeprom.reg_data_out = 0; + eeprom.reg_data_clock = 0; + eeprom.reg_chip_select = 0; + + eeprom_93cx6_multiread(&eeprom, EEPROM_BASE, rt2x00dev->eeprom, + EEPROM_SIZE / sizeof(u16)); + + /* + * Start validation of the data that has been read. + */ + mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); + if (!is_valid_ether_addr(mac)) { + DECLARE_MAC_BUF(macbuf); + + random_ether_addr(mac); + EEPROM(rt2x00dev, "MAC: %s\n", + print_mac(macbuf, mac)); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_ANTENNA_NUM, 2); + rt2x00_set_field16(&word, EEPROM_ANTENNA_TX_DEFAULT, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_RX_DEFAULT, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_LED_MODE, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_DYN_TXAGC, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2522); + rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); + EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0); + rt2x00_set_field16(&word, EEPROM_NIC_DYN_BBP_TUNE, 0); + rt2x00_set_field16(&word, EEPROM_NIC_CCK_TX_POWER, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); + EEPROM(rt2x00dev, "NIC: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_CALIBRATE_OFFSET_RSSI, + DEFAULT_RSSI_OFFSET); + rt2x00_eeprom_write(rt2x00dev, EEPROM_CALIBRATE_OFFSET, word); + EEPROM(rt2x00dev, "Calibrate offset: 0x%04x\n", word); + } + + return 0; +} + +static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + u16 value; + u16 eeprom; + + /* + * Read EEPROM word for configuration. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom); + + /* + * Identify RF chipset. + */ + value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); + rt2x00pci_register_read(rt2x00dev, CSR0, ®); + rt2x00_set_chip(rt2x00dev, RT2560, value, reg); + + if (!rt2x00_rf(&rt2x00dev->chip, RF2522) && + !rt2x00_rf(&rt2x00dev->chip, RF2523) && + !rt2x00_rf(&rt2x00dev->chip, RF2524) && + !rt2x00_rf(&rt2x00dev->chip, RF2525) && + !rt2x00_rf(&rt2x00dev->chip, RF2525E) && + !rt2x00_rf(&rt2x00dev->chip, RF5222)) { + ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); + return -ENODEV; + } + + /* + * Identify default antenna configuration. + */ + rt2x00dev->hw->conf.antenna_sel_tx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT); + rt2x00dev->hw->conf.antenna_sel_rx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT); + + /* + * Store led mode, for correct led behaviour. + */ + rt2x00dev->led_mode = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); + + /* + * Detect if this device has an hardware controlled radio. + */ +#ifdef CONFIG_RT2500PCI_RFKILL + if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) + __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); +#endif /* CONFIG_RT2500PCI_RFKILL */ + + /* + * Check if the BBP tuning should be enabled. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom); + + if (rt2x00_get_field16(eeprom, EEPROM_NIC_DYN_BBP_TUNE)) + __set_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags); + + /* + * Read the RSSI <-> dBm offset information. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET, &eeprom); + rt2x00dev->rssi_offset = + rt2x00_get_field16(eeprom, EEPROM_CALIBRATE_OFFSET_RSSI); + + return 0; +} + +/* + * RF value list for RF2522 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2522[] = { + { 1, 0x00002050, 0x000c1fda, 0x00000101, 0 }, + { 2, 0x00002050, 0x000c1fee, 0x00000101, 0 }, + { 3, 0x00002050, 0x000c2002, 0x00000101, 0 }, + { 4, 0x00002050, 0x000c2016, 0x00000101, 0 }, + { 5, 0x00002050, 0x000c202a, 0x00000101, 0 }, + { 6, 0x00002050, 0x000c203e, 0x00000101, 0 }, + { 7, 0x00002050, 0x000c2052, 0x00000101, 0 }, + { 8, 0x00002050, 0x000c2066, 0x00000101, 0 }, + { 9, 0x00002050, 0x000c207a, 0x00000101, 0 }, + { 10, 0x00002050, 0x000c208e, 0x00000101, 0 }, + { 11, 0x00002050, 0x000c20a2, 0x00000101, 0 }, + { 12, 0x00002050, 0x000c20b6, 0x00000101, 0 }, + { 13, 0x00002050, 0x000c20ca, 0x00000101, 0 }, + { 14, 0x00002050, 0x000c20fa, 0x00000101, 0 }, +}; + +/* + * RF value list for RF2523 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2523[] = { + { 1, 0x00022010, 0x00000c9e, 0x000e0111, 0x00000a1b }, + { 2, 0x00022010, 0x00000ca2, 0x000e0111, 0x00000a1b }, + { 3, 0x00022010, 0x00000ca6, 0x000e0111, 0x00000a1b }, + { 4, 0x00022010, 0x00000caa, 0x000e0111, 0x00000a1b }, + { 5, 0x00022010, 0x00000cae, 0x000e0111, 0x00000a1b }, + { 6, 0x00022010, 0x00000cb2, 0x000e0111, 0x00000a1b }, + { 7, 0x00022010, 0x00000cb6, 0x000e0111, 0x00000a1b }, + { 8, 0x00022010, 0x00000cba, 0x000e0111, 0x00000a1b }, + { 9, 0x00022010, 0x00000cbe, 0x000e0111, 0x00000a1b }, + { 10, 0x00022010, 0x00000d02, 0x000e0111, 0x00000a1b }, + { 11, 0x00022010, 0x00000d06, 0x000e0111, 0x00000a1b }, + { 12, 0x00022010, 0x00000d0a, 0x000e0111, 0x00000a1b }, + { 13, 0x00022010, 0x00000d0e, 0x000e0111, 0x00000a1b }, + { 14, 0x00022010, 0x00000d1a, 0x000e0111, 0x00000a03 }, +}; + +/* + * RF value list for RF2524 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2524[] = { + { 1, 0x00032020, 0x00000c9e, 0x00000101, 0x00000a1b }, + { 2, 0x00032020, 0x00000ca2, 0x00000101, 0x00000a1b }, + { 3, 0x00032020, 0x00000ca6, 0x00000101, 0x00000a1b }, + { 4, 0x00032020, 0x00000caa, 0x00000101, 0x00000a1b }, + { 5, 0x00032020, 0x00000cae, 0x00000101, 0x00000a1b }, + { 6, 0x00032020, 0x00000cb2, 0x00000101, 0x00000a1b }, + { 7, 0x00032020, 0x00000cb6, 0x00000101, 0x00000a1b }, + { 8, 0x00032020, 0x00000cba, 0x00000101, 0x00000a1b }, + { 9, 0x00032020, 0x00000cbe, 0x00000101, 0x00000a1b }, + { 10, 0x00032020, 0x00000d02, 0x00000101, 0x00000a1b }, + { 11, 0x00032020, 0x00000d06, 0x00000101, 0x00000a1b }, + { 12, 0x00032020, 0x00000d0a, 0x00000101, 0x00000a1b }, + { 13, 0x00032020, 0x00000d0e, 0x00000101, 0x00000a1b }, + { 14, 0x00032020, 0x00000d1a, 0x00000101, 0x00000a03 }, +}; + +/* + * RF value list for RF2525 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2525[] = { + { 1, 0x00022020, 0x00080c9e, 0x00060111, 0x00000a1b }, + { 2, 0x00022020, 0x00080ca2, 0x00060111, 0x00000a1b }, + { 3, 0x00022020, 0x00080ca6, 0x00060111, 0x00000a1b }, + { 4, 0x00022020, 0x00080caa, 0x00060111, 0x00000a1b }, + { 5, 0x00022020, 0x00080cae, 0x00060111, 0x00000a1b }, + { 6, 0x00022020, 0x00080cb2, 0x00060111, 0x00000a1b }, + { 7, 0x00022020, 0x00080cb6, 0x00060111, 0x00000a1b }, + { 8, 0x00022020, 0x00080cba, 0x00060111, 0x00000a1b }, + { 9, 0x00022020, 0x00080cbe, 0x00060111, 0x00000a1b }, + { 10, 0x00022020, 0x00080d02, 0x00060111, 0x00000a1b }, + { 11, 0x00022020, 0x00080d06, 0x00060111, 0x00000a1b }, + { 12, 0x00022020, 0x00080d0a, 0x00060111, 0x00000a1b }, + { 13, 0x00022020, 0x00080d0e, 0x00060111, 0x00000a1b }, + { 14, 0x00022020, 0x00080d1a, 0x00060111, 0x00000a03 }, +}; + +/* + * RF value list for RF2525e + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2525e[] = { + { 1, 0x00022020, 0x00081136, 0x00060111, 0x00000a0b }, + { 2, 0x00022020, 0x0008113a, 0x00060111, 0x00000a0b }, + { 3, 0x00022020, 0x0008113e, 0x00060111, 0x00000a0b }, + { 4, 0x00022020, 0x00081182, 0x00060111, 0x00000a0b }, + { 5, 0x00022020, 0x00081186, 0x00060111, 0x00000a0b }, + { 6, 0x00022020, 0x0008118a, 0x00060111, 0x00000a0b }, + { 7, 0x00022020, 0x0008118e, 0x00060111, 0x00000a0b }, + { 8, 0x00022020, 0x00081192, 0x00060111, 0x00000a0b }, + { 9, 0x00022020, 0x00081196, 0x00060111, 0x00000a0b }, + { 10, 0x00022020, 0x0008119a, 0x00060111, 0x00000a0b }, + { 11, 0x00022020, 0x0008119e, 0x00060111, 0x00000a0b }, + { 12, 0x00022020, 0x000811a2, 0x00060111, 0x00000a0b }, + { 13, 0x00022020, 0x000811a6, 0x00060111, 0x00000a0b }, + { 14, 0x00022020, 0x000811ae, 0x00060111, 0x00000a1b }, +}; + +/* + * RF value list for RF5222 + * Supports: 2.4 GHz & 5.2 GHz + */ +static const struct rf_channel rf_vals_5222[] = { + { 1, 0x00022020, 0x00001136, 0x00000101, 0x00000a0b }, + { 2, 0x00022020, 0x0000113a, 0x00000101, 0x00000a0b }, + { 3, 0x00022020, 0x0000113e, 0x00000101, 0x00000a0b }, + { 4, 0x00022020, 0x00001182, 0x00000101, 0x00000a0b }, + { 5, 0x00022020, 0x00001186, 0x00000101, 0x00000a0b }, + { 6, 0x00022020, 0x0000118a, 0x00000101, 0x00000a0b }, + { 7, 0x00022020, 0x0000118e, 0x00000101, 0x00000a0b }, + { 8, 0x00022020, 0x00001192, 0x00000101, 0x00000a0b }, + { 9, 0x00022020, 0x00001196, 0x00000101, 0x00000a0b }, + { 10, 0x00022020, 0x0000119a, 0x00000101, 0x00000a0b }, + { 11, 0x00022020, 0x0000119e, 0x00000101, 0x00000a0b }, + { 12, 0x00022020, 0x000011a2, 0x00000101, 0x00000a0b }, + { 13, 0x00022020, 0x000011a6, 0x00000101, 0x00000a0b }, + { 14, 0x00022020, 0x000011ae, 0x00000101, 0x00000a1b }, + + /* 802.11 UNI / HyperLan 2 */ + { 36, 0x00022010, 0x00018896, 0x00000101, 0x00000a1f }, + { 40, 0x00022010, 0x0001889a, 0x00000101, 0x00000a1f }, + { 44, 0x00022010, 0x0001889e, 0x00000101, 0x00000a1f }, + { 48, 0x00022010, 0x000188a2, 0x00000101, 0x00000a1f }, + { 52, 0x00022010, 0x000188a6, 0x00000101, 0x00000a1f }, + { 66, 0x00022010, 0x000188aa, 0x00000101, 0x00000a1f }, + { 60, 0x00022010, 0x000188ae, 0x00000101, 0x00000a1f }, + { 64, 0x00022010, 0x000188b2, 0x00000101, 0x00000a1f }, + + /* 802.11 HyperLan 2 */ + { 100, 0x00022010, 0x00008802, 0x00000101, 0x00000a0f }, + { 104, 0x00022010, 0x00008806, 0x00000101, 0x00000a0f }, + { 108, 0x00022010, 0x0000880a, 0x00000101, 0x00000a0f }, + { 112, 0x00022010, 0x0000880e, 0x00000101, 0x00000a0f }, + { 116, 0x00022010, 0x00008812, 0x00000101, 0x00000a0f }, + { 120, 0x00022010, 0x00008816, 0x00000101, 0x00000a0f }, + { 124, 0x00022010, 0x0000881a, 0x00000101, 0x00000a0f }, + { 128, 0x00022010, 0x0000881e, 0x00000101, 0x00000a0f }, + { 132, 0x00022010, 0x00008822, 0x00000101, 0x00000a0f }, + { 136, 0x00022010, 0x00008826, 0x00000101, 0x00000a0f }, + + /* 802.11 UNII */ + { 140, 0x00022010, 0x0000882a, 0x00000101, 0x00000a0f }, + { 149, 0x00022020, 0x000090a6, 0x00000101, 0x00000a07 }, + { 153, 0x00022020, 0x000090ae, 0x00000101, 0x00000a07 }, + { 157, 0x00022020, 0x000090b6, 0x00000101, 0x00000a07 }, + { 161, 0x00022020, 0x000090be, 0x00000101, 0x00000a07 }, +}; + +static void rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) +{ + struct hw_mode_spec *spec = &rt2x00dev->spec; + u8 *txpower; + unsigned int i; + + /* + * Initialize all hw fields. + */ + rt2x00dev->hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; + rt2x00dev->hw->extra_tx_headroom = 0; + rt2x00dev->hw->max_signal = MAX_SIGNAL; + rt2x00dev->hw->max_rssi = MAX_RX_SSI; + rt2x00dev->hw->queues = 2; + + SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_pci(rt2x00dev)->dev); + SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, + rt2x00_eeprom_addr(rt2x00dev, + EEPROM_MAC_ADDR_0)); + + /* + * Convert tx_power array in eeprom. + */ + txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START); + for (i = 0; i < 14; i++) + txpower[i] = TXPOWER_FROM_DEV(txpower[i]); + + /* + * Initialize hw_mode information. + */ + spec->num_modes = 2; + spec->num_rates = 12; + spec->tx_power_a = NULL; + spec->tx_power_bg = txpower; + spec->tx_power_default = DEFAULT_TXPOWER; + + if (rt2x00_rf(&rt2x00dev->chip, RF2522)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522); + spec->channels = rf_vals_bg_2522; + } else if (rt2x00_rf(&rt2x00dev->chip, RF2523)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523); + spec->channels = rf_vals_bg_2523; + } else if (rt2x00_rf(&rt2x00dev->chip, RF2524)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524); + spec->channels = rf_vals_bg_2524; + } else if (rt2x00_rf(&rt2x00dev->chip, RF2525)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525); + spec->channels = rf_vals_bg_2525; + } else if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e); + spec->channels = rf_vals_bg_2525e; + } else if (rt2x00_rf(&rt2x00dev->chip, RF5222)) { + spec->num_channels = ARRAY_SIZE(rf_vals_5222); + spec->channels = rf_vals_5222; + spec->num_modes = 3; + } +} + +static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev) +{ + int retval; + + /* + * Allocate eeprom data. + */ + retval = rt2500pci_validate_eeprom(rt2x00dev); + if (retval) + return retval; + + retval = rt2500pci_init_eeprom(rt2x00dev); + if (retval) + return retval; + + /* + * Initialize hw specifications. + */ + rt2500pci_probe_hw_mode(rt2x00dev); + + /* + * This device requires the beacon ring + */ + __set_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags); + + /* + * Set the rssi offset. + */ + rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; + + return 0; +} + +/* + * IEEE80211 stack callback functions. + */ +static void rt2500pci_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + int mc_count, + struct dev_addr_list *mc_list) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct interface *intf = &rt2x00dev->interface; + u32 reg; + + /* + * Mask off any flags we are going to ignore from + * the total_flags field. + */ + *total_flags &= + FIF_ALLMULTI | + FIF_FCSFAIL | + FIF_PLCPFAIL | + FIF_CONTROL | + FIF_OTHER_BSS | + FIF_PROMISC_IN_BSS; + + /* + * Apply some rules to the filters: + * - Some filters imply different filters to be set. + * - Some things we can't filter out at all. + * - Some filters are set based on interface type. + */ + if (mc_count) + *total_flags |= FIF_ALLMULTI; + if (*total_flags & FIF_OTHER_BSS || + *total_flags & FIF_PROMISC_IN_BSS) + *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS; + if (is_interface_type(intf, IEEE80211_IF_TYPE_AP)) + *total_flags |= FIF_PROMISC_IN_BSS; + + /* + * Check if there is any work left for us. + */ + if (intf->filter == *total_flags) + return; + intf->filter = *total_flags; + + /* + * Start configuration steps. + * Note that the version error will always be dropped + * and broadcast frames will always be accepted since + * there is no filter for it at this time. + */ + rt2x00pci_register_read(rt2x00dev, RXCSR0, ®); + rt2x00_set_field32(®, RXCSR0_DROP_CRC, + !(*total_flags & FIF_FCSFAIL)); + rt2x00_set_field32(®, RXCSR0_DROP_PHYSICAL, + !(*total_flags & FIF_PLCPFAIL)); + rt2x00_set_field32(®, RXCSR0_DROP_CONTROL, + !(*total_flags & FIF_CONTROL)); + rt2x00_set_field32(®, RXCSR0_DROP_NOT_TO_ME, + !(*total_flags & FIF_PROMISC_IN_BSS)); + rt2x00_set_field32(®, RXCSR0_DROP_TODS, + !(*total_flags & FIF_PROMISC_IN_BSS)); + rt2x00_set_field32(®, RXCSR0_DROP_VERSION_ERROR, 1); + rt2x00_set_field32(®, RXCSR0_DROP_MCAST, + !(*total_flags & FIF_ALLMULTI)); + rt2x00_set_field32(®, RXCSR0_DROP_BCAST, 0); + rt2x00pci_register_write(rt2x00dev, RXCSR0, reg); +} + +static int rt2500pci_set_retry_limit(struct ieee80211_hw *hw, + u32 short_retry, u32 long_retry) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR11, ®); + rt2x00_set_field32(®, CSR11_LONG_RETRY, long_retry); + rt2x00_set_field32(®, CSR11_SHORT_RETRY, short_retry); + rt2x00pci_register_write(rt2x00dev, CSR11, reg); + + return 0; +} + +static u64 rt2500pci_get_tsf(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u64 tsf; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR17, ®); + tsf = (u64) rt2x00_get_field32(reg, CSR17_HIGH_TSFTIMER) << 32; + rt2x00pci_register_read(rt2x00dev, CSR16, ®); + tsf |= rt2x00_get_field32(reg, CSR16_LOW_TSFTIMER); + + return tsf; +} + +static void rt2500pci_reset_tsf(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + + rt2x00pci_register_write(rt2x00dev, CSR16, 0); + rt2x00pci_register_write(rt2x00dev, CSR17, 0); +} + +static int rt2500pci_tx_last_beacon(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR15, ®); + return rt2x00_get_field32(reg, CSR15_BEACON_SENT); +} + +static const struct ieee80211_ops rt2500pci_mac80211_ops = { + .tx = rt2x00mac_tx, + .start = rt2x00mac_start, + .stop = rt2x00mac_stop, + .add_interface = rt2x00mac_add_interface, + .remove_interface = rt2x00mac_remove_interface, + .config = rt2x00mac_config, + .config_interface = rt2x00mac_config_interface, + .configure_filter = rt2500pci_configure_filter, + .get_stats = rt2x00mac_get_stats, + .set_retry_limit = rt2500pci_set_retry_limit, + .erp_ie_changed = rt2x00mac_erp_ie_changed, + .conf_tx = rt2x00mac_conf_tx, + .get_tx_stats = rt2x00mac_get_tx_stats, + .get_tsf = rt2500pci_get_tsf, + .reset_tsf = rt2500pci_reset_tsf, + .beacon_update = rt2x00pci_beacon_update, + .tx_last_beacon = rt2500pci_tx_last_beacon, +}; + +static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = { + .irq_handler = rt2500pci_interrupt, + .probe_hw = rt2500pci_probe_hw, + .initialize = rt2x00pci_initialize, + .uninitialize = rt2x00pci_uninitialize, + .set_device_state = rt2500pci_set_device_state, + .rfkill_poll = rt2500pci_rfkill_poll, + .link_stats = rt2500pci_link_stats, + .reset_tuner = rt2500pci_reset_tuner, + .link_tuner = rt2500pci_link_tuner, + .write_tx_desc = rt2500pci_write_tx_desc, + .write_tx_data = rt2x00pci_write_tx_data, + .kick_tx_queue = rt2500pci_kick_tx_queue, + .fill_rxdone = rt2500pci_fill_rxdone, + .config_mac_addr = rt2500pci_config_mac_addr, + .config_bssid = rt2500pci_config_bssid, + .config_type = rt2500pci_config_type, + .config_preamble = rt2500pci_config_preamble, + .config = rt2500pci_config, +}; + +static const struct rt2x00_ops rt2500pci_ops = { + .name = DRV_NAME, + .rxd_size = RXD_DESC_SIZE, + .txd_size = TXD_DESC_SIZE, + .eeprom_size = EEPROM_SIZE, + .rf_size = RF_SIZE, + .lib = &rt2500pci_rt2x00_ops, + .hw = &rt2500pci_mac80211_ops, +#ifdef CONFIG_RT2X00_LIB_DEBUGFS + .debugfs = &rt2500pci_rt2x00debug, +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ +}; + +/* + * RT2500pci module information. + */ +static struct pci_device_id rt2500pci_device_table[] = { + { PCI_DEVICE(0x1814, 0x0201), PCI_DEVICE_DATA(&rt2500pci_ops) }, + { 0, } +}; + +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("Ralink RT2500 PCI & PCMCIA Wireless LAN driver."); +MODULE_SUPPORTED_DEVICE("Ralink RT2560 PCI & PCMCIA chipset based cards"); +MODULE_DEVICE_TABLE(pci, rt2500pci_device_table); +MODULE_LICENSE("GPL"); + +static struct pci_driver rt2500pci_driver = { + .name = DRV_NAME, + .id_table = rt2500pci_device_table, + .probe = rt2x00pci_probe, + .remove = __devexit_p(rt2x00pci_remove), + .suspend = rt2x00pci_suspend, + .resume = rt2x00pci_resume, +}; + +static int __init rt2500pci_init(void) +{ + return pci_register_driver(&rt2500pci_driver); +} + +static void __exit rt2500pci_exit(void) +{ + pci_unregister_driver(&rt2500pci_driver); +} + +module_init(rt2500pci_init); +module_exit(rt2500pci_exit); diff --git a/drivers/net/wireless/rt2x00/rt2500pci.h b/drivers/net/wireless/rt2x00/rt2500pci.h new file mode 100644 index 000000000000..d92aa56b2f4b --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2500pci.h @@ -0,0 +1,1236 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2500pci + Abstract: Data structures and registers for the rt2500pci module. + Supported chipsets: RT2560. + */ + +#ifndef RT2500PCI_H +#define RT2500PCI_H + +/* + * RF chip defines. + */ +#define RF2522 0x0000 +#define RF2523 0x0001 +#define RF2524 0x0002 +#define RF2525 0x0003 +#define RF2525E 0x0004 +#define RF5222 0x0010 + +/* + * RT2560 version + */ +#define RT2560_VERSION_B 2 +#define RT2560_VERSION_C 3 +#define RT2560_VERSION_D 4 + +/* + * Signal information. + * Defaul offset is required for RSSI <-> dBm conversion. + */ +#define MAX_SIGNAL 100 +#define MAX_RX_SSI -1 +#define DEFAULT_RSSI_OFFSET 121 + +/* + * Register layout information. + */ +#define CSR_REG_BASE 0x0000 +#define CSR_REG_SIZE 0x0174 +#define EEPROM_BASE 0x0000 +#define EEPROM_SIZE 0x0200 +#define BBP_SIZE 0x0040 +#define RF_SIZE 0x0014 + +/* + * Control/Status Registers(CSR). + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * CSR0: ASIC revision number. + */ +#define CSR0 0x0000 + +/* + * CSR1: System control register. + * SOFT_RESET: Software reset, 1: reset, 0: normal. + * BBP_RESET: Hardware reset, 1: reset, 0, release. + * HOST_READY: Host ready after initialization. + */ +#define CSR1 0x0004 +#define CSR1_SOFT_RESET FIELD32(0x00000001) +#define CSR1_BBP_RESET FIELD32(0x00000002) +#define CSR1_HOST_READY FIELD32(0x00000004) + +/* + * CSR2: System admin status register (invalid). + */ +#define CSR2 0x0008 + +/* + * CSR3: STA MAC address register 0. + */ +#define CSR3 0x000c +#define CSR3_BYTE0 FIELD32(0x000000ff) +#define CSR3_BYTE1 FIELD32(0x0000ff00) +#define CSR3_BYTE2 FIELD32(0x00ff0000) +#define CSR3_BYTE3 FIELD32(0xff000000) + +/* + * CSR4: STA MAC address register 1. + */ +#define CSR4 0x0010 +#define CSR4_BYTE4 FIELD32(0x000000ff) +#define CSR4_BYTE5 FIELD32(0x0000ff00) + +/* + * CSR5: BSSID register 0. + */ +#define CSR5 0x0014 +#define CSR5_BYTE0 FIELD32(0x000000ff) +#define CSR5_BYTE1 FIELD32(0x0000ff00) +#define CSR5_BYTE2 FIELD32(0x00ff0000) +#define CSR5_BYTE3 FIELD32(0xff000000) + +/* + * CSR6: BSSID register 1. + */ +#define CSR6 0x0018 +#define CSR6_BYTE4 FIELD32(0x000000ff) +#define CSR6_BYTE5 FIELD32(0x0000ff00) + +/* + * CSR7: Interrupt source register. + * Write 1 to clear. + * TBCN_EXPIRE: Beacon timer expired interrupt. + * TWAKE_EXPIRE: Wakeup timer expired interrupt. + * TATIMW_EXPIRE: Timer of atim window expired interrupt. + * TXDONE_TXRING: Tx ring transmit done interrupt. + * TXDONE_ATIMRING: Atim ring transmit done interrupt. + * TXDONE_PRIORING: Priority ring transmit done interrupt. + * RXDONE: Receive done interrupt. + * DECRYPTION_DONE: Decryption done interrupt. + * ENCRYPTION_DONE: Encryption done interrupt. + * UART1_TX_TRESHOLD: UART1 TX reaches threshold. + * UART1_RX_TRESHOLD: UART1 RX reaches threshold. + * UART1_IDLE_TRESHOLD: UART1 IDLE over threshold. + * UART1_TX_BUFF_ERROR: UART1 TX buffer error. + * UART1_RX_BUFF_ERROR: UART1 RX buffer error. + * UART2_TX_TRESHOLD: UART2 TX reaches threshold. + * UART2_RX_TRESHOLD: UART2 RX reaches threshold. + * UART2_IDLE_TRESHOLD: UART2 IDLE over threshold. + * UART2_TX_BUFF_ERROR: UART2 TX buffer error. + * UART2_RX_BUFF_ERROR: UART2 RX buffer error. + * TIMER_CSR3_EXPIRE: TIMECSR3 timer expired (802.1H quiet period). + + */ +#define CSR7 0x001c +#define CSR7_TBCN_EXPIRE FIELD32(0x00000001) +#define CSR7_TWAKE_EXPIRE FIELD32(0x00000002) +#define CSR7_TATIMW_EXPIRE FIELD32(0x00000004) +#define CSR7_TXDONE_TXRING FIELD32(0x00000008) +#define CSR7_TXDONE_ATIMRING FIELD32(0x00000010) +#define CSR7_TXDONE_PRIORING FIELD32(0x00000020) +#define CSR7_RXDONE FIELD32(0x00000040) +#define CSR7_DECRYPTION_DONE FIELD32(0x00000080) +#define CSR7_ENCRYPTION_DONE FIELD32(0x00000100) +#define CSR7_UART1_TX_TRESHOLD FIELD32(0x00000200) +#define CSR7_UART1_RX_TRESHOLD FIELD32(0x00000400) +#define CSR7_UART1_IDLE_TRESHOLD FIELD32(0x00000800) +#define CSR7_UART1_TX_BUFF_ERROR FIELD32(0x00001000) +#define CSR7_UART1_RX_BUFF_ERROR FIELD32(0x00002000) +#define CSR7_UART2_TX_TRESHOLD FIELD32(0x00004000) +#define CSR7_UART2_RX_TRESHOLD FIELD32(0x00008000) +#define CSR7_UART2_IDLE_TRESHOLD FIELD32(0x00010000) +#define CSR7_UART2_TX_BUFF_ERROR FIELD32(0x00020000) +#define CSR7_UART2_RX_BUFF_ERROR FIELD32(0x00040000) +#define CSR7_TIMER_CSR3_EXPIRE FIELD32(0x00080000) + +/* + * CSR8: Interrupt mask register. + * Write 1 to mask interrupt. + * TBCN_EXPIRE: Beacon timer expired interrupt. + * TWAKE_EXPIRE: Wakeup timer expired interrupt. + * TATIMW_EXPIRE: Timer of atim window expired interrupt. + * TXDONE_TXRING: Tx ring transmit done interrupt. + * TXDONE_ATIMRING: Atim ring transmit done interrupt. + * TXDONE_PRIORING: Priority ring transmit done interrupt. + * RXDONE: Receive done interrupt. + * DECRYPTION_DONE: Decryption done interrupt. + * ENCRYPTION_DONE: Encryption done interrupt. + * UART1_TX_TRESHOLD: UART1 TX reaches threshold. + * UART1_RX_TRESHOLD: UART1 RX reaches threshold. + * UART1_IDLE_TRESHOLD: UART1 IDLE over threshold. + * UART1_TX_BUFF_ERROR: UART1 TX buffer error. + * UART1_RX_BUFF_ERROR: UART1 RX buffer error. + * UART2_TX_TRESHOLD: UART2 TX reaches threshold. + * UART2_RX_TRESHOLD: UART2 RX reaches threshold. + * UART2_IDLE_TRESHOLD: UART2 IDLE over threshold. + * UART2_TX_BUFF_ERROR: UART2 TX buffer error. + * UART2_RX_BUFF_ERROR: UART2 RX buffer error. + * TIMER_CSR3_EXPIRE: TIMECSR3 timer expired (802.1H quiet period). + */ +#define CSR8 0x0020 +#define CSR8_TBCN_EXPIRE FIELD32(0x00000001) +#define CSR8_TWAKE_EXPIRE FIELD32(0x00000002) +#define CSR8_TATIMW_EXPIRE FIELD32(0x00000004) +#define CSR8_TXDONE_TXRING FIELD32(0x00000008) +#define CSR8_TXDONE_ATIMRING FIELD32(0x00000010) +#define CSR8_TXDONE_PRIORING FIELD32(0x00000020) +#define CSR8_RXDONE FIELD32(0x00000040) +#define CSR8_DECRYPTION_DONE FIELD32(0x00000080) +#define CSR8_ENCRYPTION_DONE FIELD32(0x00000100) +#define CSR8_UART1_TX_TRESHOLD FIELD32(0x00000200) +#define CSR8_UART1_RX_TRESHOLD FIELD32(0x00000400) +#define CSR8_UART1_IDLE_TRESHOLD FIELD32(0x00000800) +#define CSR8_UART1_TX_BUFF_ERROR FIELD32(0x00001000) +#define CSR8_UART1_RX_BUFF_ERROR FIELD32(0x00002000) +#define CSR8_UART2_TX_TRESHOLD FIELD32(0x00004000) +#define CSR8_UART2_RX_TRESHOLD FIELD32(0x00008000) +#define CSR8_UART2_IDLE_TRESHOLD FIELD32(0x00010000) +#define CSR8_UART2_TX_BUFF_ERROR FIELD32(0x00020000) +#define CSR8_UART2_RX_BUFF_ERROR FIELD32(0x00040000) +#define CSR8_TIMER_CSR3_EXPIRE FIELD32(0x00080000) + +/* + * CSR9: Maximum frame length register. + * MAX_FRAME_UNIT: Maximum frame length in 128b unit, default: 12. + */ +#define CSR9 0x0024 +#define CSR9_MAX_FRAME_UNIT FIELD32(0x00000f80) + +/* + * SECCSR0: WEP control register. + * KICK_DECRYPT: Kick decryption engine, self-clear. + * ONE_SHOT: 0: ring mode, 1: One shot only mode. + * DESC_ADDRESS: Descriptor physical address of frame. + */ +#define SECCSR0 0x0028 +#define SECCSR0_KICK_DECRYPT FIELD32(0x00000001) +#define SECCSR0_ONE_SHOT FIELD32(0x00000002) +#define SECCSR0_DESC_ADDRESS FIELD32(0xfffffffc) + +/* + * CSR11: Back-off control register. + * CWMIN: CWmin. Default cwmin is 31 (2^5 - 1). + * CWMAX: CWmax. Default cwmax is 1023 (2^10 - 1). + * SLOT_TIME: Slot time, default is 20us for 802.11b + * CW_SELECT: CWmin/CWmax selection, 1: Register, 0: TXD. + * LONG_RETRY: Long retry count. + * SHORT_RETRY: Short retry count. + */ +#define CSR11 0x002c +#define CSR11_CWMIN FIELD32(0x0000000f) +#define CSR11_CWMAX FIELD32(0x000000f0) +#define CSR11_SLOT_TIME FIELD32(0x00001f00) +#define CSR11_CW_SELECT FIELD32(0x00002000) +#define CSR11_LONG_RETRY FIELD32(0x00ff0000) +#define CSR11_SHORT_RETRY FIELD32(0xff000000) + +/* + * CSR12: Synchronization configuration register 0. + * All units in 1/16 TU. + * BEACON_INTERVAL: Beacon interval, default is 100 TU. + * CFP_MAX_DURATION: Cfp maximum duration, default is 100 TU. + */ +#define CSR12 0x0030 +#define CSR12_BEACON_INTERVAL FIELD32(0x0000ffff) +#define CSR12_CFP_MAX_DURATION FIELD32(0xffff0000) + +/* + * CSR13: Synchronization configuration register 1. + * All units in 1/16 TU. + * ATIMW_DURATION: Atim window duration. + * CFP_PERIOD: Cfp period, default is 0 TU. + */ +#define CSR13 0x0034 +#define CSR13_ATIMW_DURATION FIELD32(0x0000ffff) +#define CSR13_CFP_PERIOD FIELD32(0x00ff0000) + +/* + * CSR14: Synchronization control register. + * TSF_COUNT: Enable tsf auto counting. + * TSF_SYNC: Tsf sync, 0: disable, 1: infra, 2: ad-hoc/master mode. + * TBCN: Enable tbcn with reload value. + * TCFP: Enable tcfp & cfp / cp switching. + * TATIMW: Enable tatimw & atim window switching. + * BEACON_GEN: Enable beacon generator. + * CFP_COUNT_PRELOAD: Cfp count preload value. + * TBCM_PRELOAD: Tbcn preload value in units of 64us. + */ +#define CSR14 0x0038 +#define CSR14_TSF_COUNT FIELD32(0x00000001) +#define CSR14_TSF_SYNC FIELD32(0x00000006) +#define CSR14_TBCN FIELD32(0x00000008) +#define CSR14_TCFP FIELD32(0x00000010) +#define CSR14_TATIMW FIELD32(0x00000020) +#define CSR14_BEACON_GEN FIELD32(0x00000040) +#define CSR14_CFP_COUNT_PRELOAD FIELD32(0x0000ff00) +#define CSR14_TBCM_PRELOAD FIELD32(0xffff0000) + +/* + * CSR15: Synchronization status register. + * CFP: ASIC is in contention-free period. + * ATIMW: ASIC is in ATIM window. + * BEACON_SENT: Beacon is send. + */ +#define CSR15 0x003c +#define CSR15_CFP FIELD32(0x00000001) +#define CSR15_ATIMW FIELD32(0x00000002) +#define CSR15_BEACON_SENT FIELD32(0x00000004) + +/* + * CSR16: TSF timer register 0. + */ +#define CSR16 0x0040 +#define CSR16_LOW_TSFTIMER FIELD32(0xffffffff) + +/* + * CSR17: TSF timer register 1. + */ +#define CSR17 0x0044 +#define CSR17_HIGH_TSFTIMER FIELD32(0xffffffff) + +/* + * CSR18: IFS timer register 0. + * SIFS: Sifs, default is 10 us. + * PIFS: Pifs, default is 30 us. + */ +#define CSR18 0x0048 +#define CSR18_SIFS FIELD32(0x000001ff) +#define CSR18_PIFS FIELD32(0x001f0000) + +/* + * CSR19: IFS timer register 1. + * DIFS: Difs, default is 50 us. + * EIFS: Eifs, default is 364 us. + */ +#define CSR19 0x004c +#define CSR19_DIFS FIELD32(0x0000ffff) +#define CSR19_EIFS FIELD32(0xffff0000) + +/* + * CSR20: Wakeup timer register. + * DELAY_AFTER_TBCN: Delay after tbcn expired in units of 1/16 TU. + * TBCN_BEFORE_WAKEUP: Number of beacon before wakeup. + * AUTOWAKE: Enable auto wakeup / sleep mechanism. + */ +#define CSR20 0x0050 +#define CSR20_DELAY_AFTER_TBCN FIELD32(0x0000ffff) +#define CSR20_TBCN_BEFORE_WAKEUP FIELD32(0x00ff0000) +#define CSR20_AUTOWAKE FIELD32(0x01000000) + +/* + * CSR21: EEPROM control register. + * RELOAD: Write 1 to reload eeprom content. + * TYPE_93C46: 1: 93c46, 0:93c66. + */ +#define CSR21 0x0054 +#define CSR21_RELOAD FIELD32(0x00000001) +#define CSR21_EEPROM_DATA_CLOCK FIELD32(0x00000002) +#define CSR21_EEPROM_CHIP_SELECT FIELD32(0x00000004) +#define CSR21_EEPROM_DATA_IN FIELD32(0x00000008) +#define CSR21_EEPROM_DATA_OUT FIELD32(0x00000010) +#define CSR21_TYPE_93C46 FIELD32(0x00000020) + +/* + * CSR22: CFP control register. + * CFP_DURATION_REMAIN: Cfp duration remain, in units of TU. + * RELOAD_CFP_DURATION: Write 1 to reload cfp duration remain. + */ +#define CSR22 0x0058 +#define CSR22_CFP_DURATION_REMAIN FIELD32(0x0000ffff) +#define CSR22_RELOAD_CFP_DURATION FIELD32(0x00010000) + +/* + * Transmit related CSRs. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * TXCSR0: TX Control Register. + * KICK_TX: Kick tx ring. + * KICK_ATIM: Kick atim ring. + * KICK_PRIO: Kick priority ring. + * ABORT: Abort all transmit related ring operation. + */ +#define TXCSR0 0x0060 +#define TXCSR0_KICK_TX FIELD32(0x00000001) +#define TXCSR0_KICK_ATIM FIELD32(0x00000002) +#define TXCSR0_KICK_PRIO FIELD32(0x00000004) +#define TXCSR0_ABORT FIELD32(0x00000008) + +/* + * TXCSR1: TX Configuration Register. + * ACK_TIMEOUT: Ack timeout, default = sifs + 2*slottime + acktime @ 1mbps. + * ACK_CONSUME_TIME: Ack consume time, default = sifs + acktime @ 1mbps. + * TSF_OFFSET: Insert tsf offset. + * AUTORESPONDER: Enable auto responder which include ack & cts. + */ +#define TXCSR1 0x0064 +#define TXCSR1_ACK_TIMEOUT FIELD32(0x000001ff) +#define TXCSR1_ACK_CONSUME_TIME FIELD32(0x0003fe00) +#define TXCSR1_TSF_OFFSET FIELD32(0x00fc0000) +#define TXCSR1_AUTORESPONDER FIELD32(0x01000000) + +/* + * TXCSR2: Tx descriptor configuration register. + * TXD_SIZE: Tx descriptor size, default is 48. + * NUM_TXD: Number of tx entries in ring. + * NUM_ATIM: Number of atim entries in ring. + * NUM_PRIO: Number of priority entries in ring. + */ +#define TXCSR2 0x0068 +#define TXCSR2_TXD_SIZE FIELD32(0x000000ff) +#define TXCSR2_NUM_TXD FIELD32(0x0000ff00) +#define TXCSR2_NUM_ATIM FIELD32(0x00ff0000) +#define TXCSR2_NUM_PRIO FIELD32(0xff000000) + +/* + * TXCSR3: TX Ring Base address register. + */ +#define TXCSR3 0x006c +#define TXCSR3_TX_RING_REGISTER FIELD32(0xffffffff) + +/* + * TXCSR4: TX Atim Ring Base address register. + */ +#define TXCSR4 0x0070 +#define TXCSR4_ATIM_RING_REGISTER FIELD32(0xffffffff) + +/* + * TXCSR5: TX Prio Ring Base address register. + */ +#define TXCSR5 0x0074 +#define TXCSR5_PRIO_RING_REGISTER FIELD32(0xffffffff) + +/* + * TXCSR6: Beacon Base address register. + */ +#define TXCSR6 0x0078 +#define TXCSR6_BEACON_RING_REGISTER FIELD32(0xffffffff) + +/* + * TXCSR7: Auto responder control register. + * AR_POWERMANAGEMENT: Auto responder power management bit. + */ +#define TXCSR7 0x007c +#define TXCSR7_AR_POWERMANAGEMENT FIELD32(0x00000001) + +/* + * TXCSR8: CCK Tx BBP register. + */ +#define TXCSR8 0x0098 +#define TXCSR8_BBP_ID0 FIELD32(0x0000007f) +#define TXCSR8_BBP_ID0_VALID FIELD32(0x00000080) +#define TXCSR8_BBP_ID1 FIELD32(0x00007f00) +#define TXCSR8_BBP_ID1_VALID FIELD32(0x00008000) +#define TXCSR8_BBP_ID2 FIELD32(0x007f0000) +#define TXCSR8_BBP_ID2_VALID FIELD32(0x00800000) +#define TXCSR8_BBP_ID3 FIELD32(0x7f000000) +#define TXCSR8_BBP_ID3_VALID FIELD32(0x80000000) + +/* + * TXCSR9: OFDM TX BBP registers + * OFDM_SIGNAL: BBP rate field address for OFDM. + * OFDM_SERVICE: BBP service field address for OFDM. + * OFDM_LENGTH_LOW: BBP length low byte address for OFDM. + * OFDM_LENGTH_HIGH: BBP length high byte address for OFDM. + */ +#define TXCSR9 0x0094 +#define TXCSR9_OFDM_RATE FIELD32(0x000000ff) +#define TXCSR9_OFDM_SERVICE FIELD32(0x0000ff00) +#define TXCSR9_OFDM_LENGTH_LOW FIELD32(0x00ff0000) +#define TXCSR9_OFDM_LENGTH_HIGH FIELD32(0xff000000) + +/* + * Receive related CSRs. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * RXCSR0: RX Control Register. + * DISABLE_RX: Disable rx engine. + * DROP_CRC: Drop crc error. + * DROP_PHYSICAL: Drop physical error. + * DROP_CONTROL: Drop control frame. + * DROP_NOT_TO_ME: Drop not to me unicast frame. + * DROP_TODS: Drop frame tods bit is true. + * DROP_VERSION_ERROR: Drop version error frame. + * PASS_CRC: Pass all packets with crc attached. + * PASS_CRC: Pass all packets with crc attached. + * PASS_PLCP: Pass all packets with 4 bytes PLCP attached. + * DROP_MCAST: Drop multicast frames. + * DROP_BCAST: Drop broadcast frames. + * ENABLE_QOS: Accept QOS data frame and parse QOS field. + */ +#define RXCSR0 0x0080 +#define RXCSR0_DISABLE_RX FIELD32(0x00000001) +#define RXCSR0_DROP_CRC FIELD32(0x00000002) +#define RXCSR0_DROP_PHYSICAL FIELD32(0x00000004) +#define RXCSR0_DROP_CONTROL FIELD32(0x00000008) +#define RXCSR0_DROP_NOT_TO_ME FIELD32(0x00000010) +#define RXCSR0_DROP_TODS FIELD32(0x00000020) +#define RXCSR0_DROP_VERSION_ERROR FIELD32(0x00000040) +#define RXCSR0_PASS_CRC FIELD32(0x00000080) +#define RXCSR0_PASS_PLCP FIELD32(0x00000100) +#define RXCSR0_DROP_MCAST FIELD32(0x00000200) +#define RXCSR0_DROP_BCAST FIELD32(0x00000400) +#define RXCSR0_ENABLE_QOS FIELD32(0x00000800) + +/* + * RXCSR1: RX descriptor configuration register. + * RXD_SIZE: Rx descriptor size, default is 32b. + * NUM_RXD: Number of rx entries in ring. + */ +#define RXCSR1 0x0084 +#define RXCSR1_RXD_SIZE FIELD32(0x000000ff) +#define RXCSR1_NUM_RXD FIELD32(0x0000ff00) + +/* + * RXCSR2: RX Ring base address register. + */ +#define RXCSR2 0x0088 +#define RXCSR2_RX_RING_REGISTER FIELD32(0xffffffff) + +/* + * RXCSR3: BBP ID register for Rx operation. + * BBP_ID#: BBP register # id. + * BBP_ID#_VALID: BBP register # id is valid or not. + */ +#define RXCSR3 0x0090 +#define RXCSR3_BBP_ID0 FIELD32(0x0000007f) +#define RXCSR3_BBP_ID0_VALID FIELD32(0x00000080) +#define RXCSR3_BBP_ID1 FIELD32(0x00007f00) +#define RXCSR3_BBP_ID1_VALID FIELD32(0x00008000) +#define RXCSR3_BBP_ID2 FIELD32(0x007f0000) +#define RXCSR3_BBP_ID2_VALID FIELD32(0x00800000) +#define RXCSR3_BBP_ID3 FIELD32(0x7f000000) +#define RXCSR3_BBP_ID3_VALID FIELD32(0x80000000) + +/* + * ARCSR1: Auto Responder PLCP config register 1. + * AR_BBP_DATA#: Auto responder BBP register # data. + * AR_BBP_ID#: Auto responder BBP register # Id. + */ +#define ARCSR1 0x009c +#define ARCSR1_AR_BBP_DATA2 FIELD32(0x000000ff) +#define ARCSR1_AR_BBP_ID2 FIELD32(0x0000ff00) +#define ARCSR1_AR_BBP_DATA3 FIELD32(0x00ff0000) +#define ARCSR1_AR_BBP_ID3 FIELD32(0xff000000) + +/* + * Miscellaneous Registers. + * Some values are set in TU, whereas 1 TU == 1024 us. + + */ + +/* + * PCICSR: PCI control register. + * BIG_ENDIAN: 1: big endian, 0: little endian. + * RX_TRESHOLD: Rx threshold in dw to start pci access + * 0: 16dw (default), 1: 8dw, 2: 4dw, 3: 32dw. + * TX_TRESHOLD: Tx threshold in dw to start pci access + * 0: 0dw (default), 1: 1dw, 2: 4dw, 3: forward. + * BURST_LENTH: Pci burst length 0: 4dw (default, 1: 8dw, 2: 16dw, 3:32dw. + * ENABLE_CLK: Enable clk_run, pci clock can't going down to non-operational. + * READ_MULTIPLE: Enable memory read multiple. + * WRITE_INVALID: Enable memory write & invalid. + */ +#define PCICSR 0x008c +#define PCICSR_BIG_ENDIAN FIELD32(0x00000001) +#define PCICSR_RX_TRESHOLD FIELD32(0x00000006) +#define PCICSR_TX_TRESHOLD FIELD32(0x00000018) +#define PCICSR_BURST_LENTH FIELD32(0x00000060) +#define PCICSR_ENABLE_CLK FIELD32(0x00000080) +#define PCICSR_READ_MULTIPLE FIELD32(0x00000100) +#define PCICSR_WRITE_INVALID FIELD32(0x00000200) + +/* + * CNT0: FCS error count. + * FCS_ERROR: FCS error count, cleared when read. + */ +#define CNT0 0x00a0 +#define CNT0_FCS_ERROR FIELD32(0x0000ffff) + +/* + * Statistic Register. + * CNT1: PLCP error count. + * CNT2: Long error count. + */ +#define TIMECSR2 0x00a8 +#define CNT1 0x00ac +#define CNT2 0x00b0 +#define TIMECSR3 0x00b4 + +/* + * CNT3: CCA false alarm count. + */ +#define CNT3 0x00b8 +#define CNT3_FALSE_CCA FIELD32(0x0000ffff) + +/* + * Statistic Register. + * CNT4: Rx FIFO overflow count. + * CNT5: Tx FIFO underrun count. + */ +#define CNT4 0x00bc +#define CNT5 0x00c0 + +/* + * Baseband Control Register. + */ + +/* + * PWRCSR0: Power mode configuration register. + */ +#define PWRCSR0 0x00c4 + +/* + * Power state transition time registers. + */ +#define PSCSR0 0x00c8 +#define PSCSR1 0x00cc +#define PSCSR2 0x00d0 +#define PSCSR3 0x00d4 + +/* + * PWRCSR1: Manual power control / status register. + * Allowed state: 0 deep_sleep, 1: sleep, 2: standby, 3: awake. + * SET_STATE: Set state. Write 1 to trigger, self cleared. + * BBP_DESIRE_STATE: BBP desired state. + * RF_DESIRE_STATE: RF desired state. + * BBP_CURR_STATE: BBP current state. + * RF_CURR_STATE: RF current state. + * PUT_TO_SLEEP: Put to sleep. Write 1 to trigger, self cleared. + */ +#define PWRCSR1 0x00d8 +#define PWRCSR1_SET_STATE FIELD32(0x00000001) +#define PWRCSR1_BBP_DESIRE_STATE FIELD32(0x00000006) +#define PWRCSR1_RF_DESIRE_STATE FIELD32(0x00000018) +#define PWRCSR1_BBP_CURR_STATE FIELD32(0x00000060) +#define PWRCSR1_RF_CURR_STATE FIELD32(0x00000180) +#define PWRCSR1_PUT_TO_SLEEP FIELD32(0x00000200) + +/* + * TIMECSR: Timer control register. + * US_COUNT: 1 us timer count in units of clock cycles. + * US_64_COUNT: 64 us timer count in units of 1 us timer. + * BEACON_EXPECT: Beacon expect window. + */ +#define TIMECSR 0x00dc +#define TIMECSR_US_COUNT FIELD32(0x000000ff) +#define TIMECSR_US_64_COUNT FIELD32(0x0000ff00) +#define TIMECSR_BEACON_EXPECT FIELD32(0x00070000) + +/* + * MACCSR0: MAC configuration register 0. + */ +#define MACCSR0 0x00e0 + +/* + * MACCSR1: MAC configuration register 1. + * KICK_RX: Kick one-shot rx in one-shot rx mode. + * ONESHOT_RXMODE: Enable one-shot rx mode for debugging. + * BBPRX_RESET_MODE: Ralink bbp rx reset mode. + * AUTO_TXBBP: Auto tx logic access bbp control register. + * AUTO_RXBBP: Auto rx logic access bbp control register. + * LOOPBACK: Loopback mode. 0: normal, 1: internal, 2: external, 3:rsvd. + * INTERSIL_IF: Intersil if calibration pin. + */ +#define MACCSR1 0x00e4 +#define MACCSR1_KICK_RX FIELD32(0x00000001) +#define MACCSR1_ONESHOT_RXMODE FIELD32(0x00000002) +#define MACCSR1_BBPRX_RESET_MODE FIELD32(0x00000004) +#define MACCSR1_AUTO_TXBBP FIELD32(0x00000008) +#define MACCSR1_AUTO_RXBBP FIELD32(0x00000010) +#define MACCSR1_LOOPBACK FIELD32(0x00000060) +#define MACCSR1_INTERSIL_IF FIELD32(0x00000080) + +/* + * RALINKCSR: Ralink Rx auto-reset BBCR. + * AR_BBP_DATA#: Auto reset BBP register # data. + * AR_BBP_ID#: Auto reset BBP register # id. + */ +#define RALINKCSR 0x00e8 +#define RALINKCSR_AR_BBP_DATA0 FIELD32(0x000000ff) +#define RALINKCSR_AR_BBP_ID0 FIELD32(0x00007f00) +#define RALINKCSR_AR_BBP_VALID0 FIELD32(0x00008000) +#define RALINKCSR_AR_BBP_DATA1 FIELD32(0x00ff0000) +#define RALINKCSR_AR_BBP_ID1 FIELD32(0x7f000000) +#define RALINKCSR_AR_BBP_VALID1 FIELD32(0x80000000) + +/* + * BCNCSR: Beacon interval control register. + * CHANGE: Write one to change beacon interval. + * DELTATIME: The delta time value. + * NUM_BEACON: Number of beacon according to mode. + * MODE: Please refer to asic specs. + * PLUS: Plus or minus delta time value. + */ +#define BCNCSR 0x00ec +#define BCNCSR_CHANGE FIELD32(0x00000001) +#define BCNCSR_DELTATIME FIELD32(0x0000001e) +#define BCNCSR_NUM_BEACON FIELD32(0x00001fe0) +#define BCNCSR_MODE FIELD32(0x00006000) +#define BCNCSR_PLUS FIELD32(0x00008000) + +/* + * BBP / RF / IF Control Register. + */ + +/* + * BBPCSR: BBP serial control register. + * VALUE: Register value to program into BBP. + * REGNUM: Selected BBP register. + * BUSY: 1: asic is busy execute BBP programming. + * WRITE_CONTROL: 1: write BBP, 0: read BBP. + */ +#define BBPCSR 0x00f0 +#define BBPCSR_VALUE FIELD32(0x000000ff) +#define BBPCSR_REGNUM FIELD32(0x00007f00) +#define BBPCSR_BUSY FIELD32(0x00008000) +#define BBPCSR_WRITE_CONTROL FIELD32(0x00010000) + +/* + * RFCSR: RF serial control register. + * VALUE: Register value + id to program into rf/if. + * NUMBER_OF_BITS: Number of bits used in value (i:20, rfmd:22). + * IF_SELECT: Chip to program: 0: rf, 1: if. + * PLL_LD: Rf pll_ld status. + * BUSY: 1: asic is busy execute rf programming. + */ +#define RFCSR 0x00f4 +#define RFCSR_VALUE FIELD32(0x00ffffff) +#define RFCSR_NUMBER_OF_BITS FIELD32(0x1f000000) +#define RFCSR_IF_SELECT FIELD32(0x20000000) +#define RFCSR_PLL_LD FIELD32(0x40000000) +#define RFCSR_BUSY FIELD32(0x80000000) + +/* + * LEDCSR: LED control register. + * ON_PERIOD: On period, default 70ms. + * OFF_PERIOD: Off period, default 30ms. + * LINK: 0: linkoff, 1: linkup. + * ACTIVITY: 0: idle, 1: active. + * LINK_POLARITY: 0: active low, 1: active high. + * ACTIVITY_POLARITY: 0: active low, 1: active high. + * LED_DEFAULT: LED state for "enable" 0: ON, 1: OFF. + */ +#define LEDCSR 0x00f8 +#define LEDCSR_ON_PERIOD FIELD32(0x000000ff) +#define LEDCSR_OFF_PERIOD FIELD32(0x0000ff00) +#define LEDCSR_LINK FIELD32(0x00010000) +#define LEDCSR_ACTIVITY FIELD32(0x00020000) +#define LEDCSR_LINK_POLARITY FIELD32(0x00040000) +#define LEDCSR_ACTIVITY_POLARITY FIELD32(0x00080000) +#define LEDCSR_LED_DEFAULT FIELD32(0x00100000) + +/* + * AES control register. + */ +#define SECCSR3 0x00fc + +/* + * ASIC pointer information. + * RXPTR: Current RX ring address. + * TXPTR: Current Tx ring address. + * PRIPTR: Current Priority ring address. + * ATIMPTR: Current ATIM ring address. + */ +#define RXPTR 0x0100 +#define TXPTR 0x0104 +#define PRIPTR 0x0108 +#define ATIMPTR 0x010c + +/* + * TXACKCSR0: TX ACK timeout. + */ +#define TXACKCSR0 0x0110 + +/* + * ACK timeout count registers. + * ACKCNT0: TX ACK timeout count. + * ACKCNT1: RX ACK timeout count. + */ +#define ACKCNT0 0x0114 +#define ACKCNT1 0x0118 + +/* + * GPIO and others. + */ + +/* + * GPIOCSR: GPIO control register. + */ +#define GPIOCSR 0x0120 +#define GPIOCSR_BIT0 FIELD32(0x00000001) +#define GPIOCSR_BIT1 FIELD32(0x00000002) +#define GPIOCSR_BIT2 FIELD32(0x00000004) +#define GPIOCSR_BIT3 FIELD32(0x00000008) +#define GPIOCSR_BIT4 FIELD32(0x00000010) +#define GPIOCSR_BIT5 FIELD32(0x00000020) +#define GPIOCSR_BIT6 FIELD32(0x00000040) +#define GPIOCSR_BIT7 FIELD32(0x00000080) +#define GPIOCSR_DIR0 FIELD32(0x00000100) +#define GPIOCSR_DIR1 FIELD32(0x00000200) +#define GPIOCSR_DIR2 FIELD32(0x00000400) +#define GPIOCSR_DIR3 FIELD32(0x00000800) +#define GPIOCSR_DIR4 FIELD32(0x00001000) +#define GPIOCSR_DIR5 FIELD32(0x00002000) +#define GPIOCSR_DIR6 FIELD32(0x00004000) +#define GPIOCSR_DIR7 FIELD32(0x00008000) + +/* + * FIFO pointer registers. + * FIFOCSR0: TX FIFO pointer. + * FIFOCSR1: RX FIFO pointer. + */ +#define FIFOCSR0 0x0128 +#define FIFOCSR1 0x012c + +/* + * BCNCSR1: Tx BEACON offset time control register. + * PRELOAD: Beacon timer offset in units of usec. + * BEACON_CWMIN: 2^CwMin. + */ +#define BCNCSR1 0x0130 +#define BCNCSR1_PRELOAD FIELD32(0x0000ffff) +#define BCNCSR1_BEACON_CWMIN FIELD32(0x000f0000) + +/* + * MACCSR2: TX_PE to RX_PE turn-around time control register + * DELAY: RX_PE low width, in units of pci clock cycle. + */ +#define MACCSR2 0x0134 +#define MACCSR2_DELAY FIELD32(0x000000ff) + +/* + * TESTCSR: TEST mode selection register. + */ +#define TESTCSR 0x0138 + +/* + * ARCSR2: 1 Mbps ACK/CTS PLCP. + */ +#define ARCSR2 0x013c +#define ARCSR2_SIGNAL FIELD32(0x000000ff) +#define ARCSR2_SERVICE FIELD32(0x0000ff00) +#define ARCSR2_LENGTH FIELD32(0xffff0000) + +/* + * ARCSR3: 2 Mbps ACK/CTS PLCP. + */ +#define ARCSR3 0x0140 +#define ARCSR3_SIGNAL FIELD32(0x000000ff) +#define ARCSR3_SERVICE FIELD32(0x0000ff00) +#define ARCSR3_LENGTH FIELD32(0xffff0000) + +/* + * ARCSR4: 5.5 Mbps ACK/CTS PLCP. + */ +#define ARCSR4 0x0144 +#define ARCSR4_SIGNAL FIELD32(0x000000ff) +#define ARCSR4_SERVICE FIELD32(0x0000ff00) +#define ARCSR4_LENGTH FIELD32(0xffff0000) + +/* + * ARCSR5: 11 Mbps ACK/CTS PLCP. + */ +#define ARCSR5 0x0148 +#define ARCSR5_SIGNAL FIELD32(0x000000ff) +#define ARCSR5_SERVICE FIELD32(0x0000ff00) +#define ARCSR5_LENGTH FIELD32(0xffff0000) + +/* + * ARTCSR0: CCK ACK/CTS payload consumed time for 1/2/5.5/11 mbps. + */ +#define ARTCSR0 0x014c +#define ARTCSR0_ACK_CTS_11MBS FIELD32(0x000000ff) +#define ARTCSR0_ACK_CTS_5_5MBS FIELD32(0x0000ff00) +#define ARTCSR0_ACK_CTS_2MBS FIELD32(0x00ff0000) +#define ARTCSR0_ACK_CTS_1MBS FIELD32(0xff000000) + + +/* + * ARTCSR1: OFDM ACK/CTS payload consumed time for 6/9/12/18 mbps. + */ +#define ARTCSR1 0x0150 +#define ARTCSR1_ACK_CTS_6MBS FIELD32(0x000000ff) +#define ARTCSR1_ACK_CTS_9MBS FIELD32(0x0000ff00) +#define ARTCSR1_ACK_CTS_12MBS FIELD32(0x00ff0000) +#define ARTCSR1_ACK_CTS_18MBS FIELD32(0xff000000) + +/* + * ARTCSR2: OFDM ACK/CTS payload consumed time for 24/36/48/54 mbps. + */ +#define ARTCSR2 0x0154 +#define ARTCSR2_ACK_CTS_24MBS FIELD32(0x000000ff) +#define ARTCSR2_ACK_CTS_36MBS FIELD32(0x0000ff00) +#define ARTCSR2_ACK_CTS_48MBS FIELD32(0x00ff0000) +#define ARTCSR2_ACK_CTS_54MBS FIELD32(0xff000000) + +/* + * SECCSR1_RT2509: WEP control register. + * KICK_ENCRYPT: Kick encryption engine, self-clear. + * ONE_SHOT: 0: ring mode, 1: One shot only mode. + * DESC_ADDRESS: Descriptor physical address of frame. + */ +#define SECCSR1 0x0158 +#define SECCSR1_KICK_ENCRYPT FIELD32(0x00000001) +#define SECCSR1_ONE_SHOT FIELD32(0x00000002) +#define SECCSR1_DESC_ADDRESS FIELD32(0xfffffffc) + +/* + * BBPCSR1: BBP TX configuration. + */ +#define BBPCSR1 0x015c +#define BBPCSR1_CCK FIELD32(0x00000003) +#define BBPCSR1_CCK_FLIP FIELD32(0x00000004) +#define BBPCSR1_OFDM FIELD32(0x00030000) +#define BBPCSR1_OFDM_FLIP FIELD32(0x00040000) + +/* + * Dual band configuration registers. + * DBANDCSR0: Dual band configuration register 0. + * DBANDCSR1: Dual band configuration register 1. + */ +#define DBANDCSR0 0x0160 +#define DBANDCSR1 0x0164 + +/* + * BBPPCSR: BBP Pin control register. + */ +#define BBPPCSR 0x0168 + +/* + * MAC special debug mode selection registers. + * DBGSEL0: MAC special debug mode selection register 0. + * DBGSEL1: MAC special debug mode selection register 1. + */ +#define DBGSEL0 0x016c +#define DBGSEL1 0x0170 + +/* + * BISTCSR: BBP BIST register. + */ +#define BISTCSR 0x0174 + +/* + * Multicast filter registers. + * MCAST0: Multicast filter register 0. + * MCAST1: Multicast filter register 1. + */ +#define MCAST0 0x0178 +#define MCAST1 0x017c + +/* + * UART registers. + * UARTCSR0: UART1 TX register. + * UARTCSR1: UART1 RX register. + * UARTCSR3: UART1 frame control register. + * UARTCSR4: UART1 buffer control register. + * UART2CSR0: UART2 TX register. + * UART2CSR1: UART2 RX register. + * UART2CSR3: UART2 frame control register. + * UART2CSR4: UART2 buffer control register. + */ +#define UARTCSR0 0x0180 +#define UARTCSR1 0x0184 +#define UARTCSR3 0x0188 +#define UARTCSR4 0x018c +#define UART2CSR0 0x0190 +#define UART2CSR1 0x0194 +#define UART2CSR3 0x0198 +#define UART2CSR4 0x019c + +/* + * BBP registers. + * The wordsize of the BBP is 8 bits. + */ + +/* + * R2: TX antenna control + */ +#define BBP_R2_TX_ANTENNA FIELD8(0x03) +#define BBP_R2_TX_IQ_FLIP FIELD8(0x04) + +/* + * R14: RX antenna control + */ +#define BBP_R14_RX_ANTENNA FIELD8(0x03) +#define BBP_R14_RX_IQ_FLIP FIELD8(0x04) + +/* + * BBP_R70 + */ +#define BBP_R70_JAPAN_FILTER FIELD8(0x08) + +/* + * RF registers + */ + +/* + * RF 1 + */ +#define RF1_TUNER FIELD32(0x00020000) + +/* + * RF 3 + */ +#define RF3_TUNER FIELD32(0x00000100) +#define RF3_TXPOWER FIELD32(0x00003e00) + +/* + * EEPROM content. + * The wordsize of the EEPROM is 16 bits. + */ + +/* + * HW MAC address. + */ +#define EEPROM_MAC_ADDR_0 0x0002 +#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00) +#define EEPROM_MAC_ADDR1 0x0003 +#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00) +#define EEPROM_MAC_ADDR_2 0x0004 +#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00) + +/* + * EEPROM antenna. + * ANTENNA_NUM: Number of antenna's. + * TX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * RX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * LED_MODE: 0: default, 1: TX/RX activity,2: Single (ignore link), 3: rsvd. + * DYN_TXAGC: Dynamic TX AGC control. + * HARDWARE_RADIO: 1: Hardware controlled radio. Read GPIO0. + * RF_TYPE: Rf_type of this adapter. + */ +#define EEPROM_ANTENNA 0x10 +#define EEPROM_ANTENNA_NUM FIELD16(0x0003) +#define EEPROM_ANTENNA_TX_DEFAULT FIELD16(0x000c) +#define EEPROM_ANTENNA_RX_DEFAULT FIELD16(0x0030) +#define EEPROM_ANTENNA_LED_MODE FIELD16(0x01c0) +#define EEPROM_ANTENNA_DYN_TXAGC FIELD16(0x0200) +#define EEPROM_ANTENNA_HARDWARE_RADIO FIELD16(0x0400) +#define EEPROM_ANTENNA_RF_TYPE FIELD16(0xf800) + +/* + * EEPROM NIC config. + * CARDBUS_ACCEL: 0: enable, 1: disable. + * DYN_BBP_TUNE: 0: enable, 1: disable. + * CCK_TX_POWER: CCK TX power compensation. + */ +#define EEPROM_NIC 0x11 +#define EEPROM_NIC_CARDBUS_ACCEL FIELD16(0x0001) +#define EEPROM_NIC_DYN_BBP_TUNE FIELD16(0x0002) +#define EEPROM_NIC_CCK_TX_POWER FIELD16(0x000c) + +/* + * EEPROM geography. + * GEO: Default geography setting for device. + */ +#define EEPROM_GEOGRAPHY 0x12 +#define EEPROM_GEOGRAPHY_GEO FIELD16(0x0f00) + +/* + * EEPROM BBP. + */ +#define EEPROM_BBP_START 0x13 +#define EEPROM_BBP_SIZE 16 +#define EEPROM_BBP_VALUE FIELD16(0x00ff) +#define EEPROM_BBP_REG_ID FIELD16(0xff00) + +/* + * EEPROM TXPOWER + */ +#define EEPROM_TXPOWER_START 0x23 +#define EEPROM_TXPOWER_SIZE 7 +#define EEPROM_TXPOWER_1 FIELD16(0x00ff) +#define EEPROM_TXPOWER_2 FIELD16(0xff00) + +/* + * RSSI <-> dBm offset calibration + */ +#define EEPROM_CALIBRATE_OFFSET 0x3e +#define EEPROM_CALIBRATE_OFFSET_RSSI FIELD16(0x00ff) + +/* + * DMA descriptor defines. + */ +#define TXD_DESC_SIZE ( 11 * sizeof(struct data_desc) ) +#define RXD_DESC_SIZE ( 11 * sizeof(struct data_desc) ) + +/* + * TX descriptor format for TX, PRIO, ATIM and Beacon Ring. + */ + +/* + * Word0 + */ +#define TXD_W0_OWNER_NIC FIELD32(0x00000001) +#define TXD_W0_VALID FIELD32(0x00000002) +#define TXD_W0_RESULT FIELD32(0x0000001c) +#define TXD_W0_RETRY_COUNT FIELD32(0x000000e0) +#define TXD_W0_MORE_FRAG FIELD32(0x00000100) +#define TXD_W0_ACK FIELD32(0x00000200) +#define TXD_W0_TIMESTAMP FIELD32(0x00000400) +#define TXD_W0_OFDM FIELD32(0x00000800) +#define TXD_W0_CIPHER_OWNER FIELD32(0x00001000) +#define TXD_W0_IFS FIELD32(0x00006000) +#define TXD_W0_RETRY_MODE FIELD32(0x00008000) +#define TXD_W0_DATABYTE_COUNT FIELD32(0x0fff0000) +#define TXD_W0_CIPHER_ALG FIELD32(0xe0000000) + +/* + * Word1 + */ +#define TXD_W1_BUFFER_ADDRESS FIELD32(0xffffffff) + +/* + * Word2 + */ +#define TXD_W2_IV_OFFSET FIELD32(0x0000003f) +#define TXD_W2_AIFS FIELD32(0x000000c0) +#define TXD_W2_CWMIN FIELD32(0x00000f00) +#define TXD_W2_CWMAX FIELD32(0x0000f000) + +/* + * Word3: PLCP information + */ +#define TXD_W3_PLCP_SIGNAL FIELD32(0x000000ff) +#define TXD_W3_PLCP_SERVICE FIELD32(0x0000ff00) +#define TXD_W3_PLCP_LENGTH_LOW FIELD32(0x00ff0000) +#define TXD_W3_PLCP_LENGTH_HIGH FIELD32(0xff000000) + +/* + * Word4 + */ +#define TXD_W4_IV FIELD32(0xffffffff) + +/* + * Word5 + */ +#define TXD_W5_EIV FIELD32(0xffffffff) + +/* + * Word6-9: Key + */ +#define TXD_W6_KEY FIELD32(0xffffffff) +#define TXD_W7_KEY FIELD32(0xffffffff) +#define TXD_W8_KEY FIELD32(0xffffffff) +#define TXD_W9_KEY FIELD32(0xffffffff) + +/* + * Word10 + */ +#define TXD_W10_RTS FIELD32(0x00000001) +#define TXD_W10_TX_RATE FIELD32(0x000000fe) + +/* + * RX descriptor format for RX Ring. + */ + +/* + * Word0 + */ +#define RXD_W0_OWNER_NIC FIELD32(0x00000001) +#define RXD_W0_UNICAST_TO_ME FIELD32(0x00000002) +#define RXD_W0_MULTICAST FIELD32(0x00000004) +#define RXD_W0_BROADCAST FIELD32(0x00000008) +#define RXD_W0_MY_BSS FIELD32(0x00000010) +#define RXD_W0_CRC_ERROR FIELD32(0x00000020) +#define RXD_W0_OFDM FIELD32(0x00000040) +#define RXD_W0_PHYSICAL_ERROR FIELD32(0x00000080) +#define RXD_W0_CIPHER_OWNER FIELD32(0x00000100) +#define RXD_W0_ICV_ERROR FIELD32(0x00000200) +#define RXD_W0_IV_OFFSET FIELD32(0x0000fc00) +#define RXD_W0_DATABYTE_COUNT FIELD32(0x0fff0000) +#define RXD_W0_CIPHER_ALG FIELD32(0xe0000000) + +/* + * Word1 + */ +#define RXD_W1_BUFFER_ADDRESS FIELD32(0xffffffff) + +/* + * Word2 + */ +#define RXD_W2_SIGNAL FIELD32(0x000000ff) +#define RXD_W2_RSSI FIELD32(0x0000ff00) +#define RXD_W2_TA FIELD32(0xffff0000) + +/* + * Word3 + */ +#define RXD_W3_TA FIELD32(0xffffffff) + +/* + * Word4 + */ +#define RXD_W4_IV FIELD32(0xffffffff) + +/* + * Word5 + */ +#define RXD_W5_EIV FIELD32(0xffffffff) + +/* + * Word6-9: Key + */ +#define RXD_W6_KEY FIELD32(0xffffffff) +#define RXD_W7_KEY FIELD32(0xffffffff) +#define RXD_W8_KEY FIELD32(0xffffffff) +#define RXD_W9_KEY FIELD32(0xffffffff) + +/* + * Word10 + */ +#define RXD_W10_DROP FIELD32(0x00000001) + +/* + * Macro's for converting txpower from EEPROM to dscape value + * and from dscape value to register value. + */ +#define MIN_TXPOWER 0 +#define MAX_TXPOWER 31 +#define DEFAULT_TXPOWER 24 + +#define TXPOWER_FROM_DEV(__txpower) \ +({ \ + ((__txpower) > MAX_TXPOWER) ? \ + DEFAULT_TXPOWER : (__txpower); \ +}) + +#define TXPOWER_TO_DEV(__txpower) \ +({ \ + ((__txpower) <= MIN_TXPOWER) ? MIN_TXPOWER : \ + (((__txpower) >= MAX_TXPOWER) ? MAX_TXPOWER : \ + (__txpower)); \ +}) + +#endif /* RT2500PCI_H */ diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c new file mode 100644 index 000000000000..7cdc80a122bb --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2500usb.c @@ -0,0 +1,1832 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2500usb + Abstract: rt2500usb device specific routines. + Supported chipsets: RT2570. + */ + +/* + * Set enviroment defines for rt2x00.h + */ +#define DRV_NAME "rt2500usb" + +#include <linux/delay.h> +#include <linux/etherdevice.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/usb.h> + +#include "rt2x00.h" +#include "rt2x00usb.h" +#include "rt2500usb.h" + +/* + * Register access. + * All access to the CSR registers will go through the methods + * rt2500usb_register_read and rt2500usb_register_write. + * BBP and RF register require indirect register access, + * and use the CSR registers BBPCSR and RFCSR to achieve this. + * These indirect registers work with busy bits, + * and we will try maximal REGISTER_BUSY_COUNT times to access + * the register while taking a REGISTER_BUSY_DELAY us delay + * between each attampt. When the busy bit is still set at that time, + * the access attempt is considered to have failed, + * and we will print an error. + */ +static inline void rt2500usb_register_read(const struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + u16 *value) +{ + __le16 reg; + rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, + USB_VENDOR_REQUEST_IN, offset, + ®, sizeof(u16), REGISTER_TIMEOUT); + *value = le16_to_cpu(reg); +} + +static inline void rt2500usb_register_multiread(const struct rt2x00_dev + *rt2x00dev, + const unsigned int offset, + void *value, const u16 length) +{ + int timeout = REGISTER_TIMEOUT * (length / sizeof(u16)); + rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, + USB_VENDOR_REQUEST_IN, offset, + value, length, timeout); +} + +static inline void rt2500usb_register_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + u16 value) +{ + __le16 reg = cpu_to_le16(value); + rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, + USB_VENDOR_REQUEST_OUT, offset, + ®, sizeof(u16), REGISTER_TIMEOUT); +} + +static inline void rt2500usb_register_multiwrite(const struct rt2x00_dev + *rt2x00dev, + const unsigned int offset, + void *value, const u16 length) +{ + int timeout = REGISTER_TIMEOUT * (length / sizeof(u16)); + rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, + USB_VENDOR_REQUEST_OUT, offset, + value, length, timeout); +} + +static u16 rt2500usb_bbp_check(const struct rt2x00_dev *rt2x00dev) +{ + u16 reg; + unsigned int i; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2500usb_register_read(rt2x00dev, PHY_CSR8, ®); + if (!rt2x00_get_field16(reg, PHY_CSR8_BUSY)) + break; + udelay(REGISTER_BUSY_DELAY); + } + + return reg; +} + +static void rt2500usb_bbp_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u8 value) +{ + u16 reg; + + /* + * Wait until the BBP becomes ready. + */ + reg = rt2500usb_bbp_check(rt2x00dev); + if (rt2x00_get_field16(reg, PHY_CSR8_BUSY)) { + ERROR(rt2x00dev, "PHY_CSR8 register busy. Write failed.\n"); + return; + } + + /* + * Write the data into the BBP. + */ + reg = 0; + rt2x00_set_field16(®, PHY_CSR7_DATA, value); + rt2x00_set_field16(®, PHY_CSR7_REG_ID, word); + rt2x00_set_field16(®, PHY_CSR7_READ_CONTROL, 0); + + rt2500usb_register_write(rt2x00dev, PHY_CSR7, reg); +} + +static void rt2500usb_bbp_read(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u8 *value) +{ + u16 reg; + + /* + * Wait until the BBP becomes ready. + */ + reg = rt2500usb_bbp_check(rt2x00dev); + if (rt2x00_get_field16(reg, PHY_CSR8_BUSY)) { + ERROR(rt2x00dev, "PHY_CSR8 register busy. Read failed.\n"); + return; + } + + /* + * Write the request into the BBP. + */ + reg = 0; + rt2x00_set_field16(®, PHY_CSR7_REG_ID, word); + rt2x00_set_field16(®, PHY_CSR7_READ_CONTROL, 1); + + rt2500usb_register_write(rt2x00dev, PHY_CSR7, reg); + + /* + * Wait until the BBP becomes ready. + */ + reg = rt2500usb_bbp_check(rt2x00dev); + if (rt2x00_get_field16(reg, PHY_CSR8_BUSY)) { + ERROR(rt2x00dev, "PHY_CSR8 register busy. Read failed.\n"); + *value = 0xff; + return; + } + + rt2500usb_register_read(rt2x00dev, PHY_CSR7, ®); + *value = rt2x00_get_field16(reg, PHY_CSR7_DATA); +} + +static void rt2500usb_rf_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u32 value) +{ + u16 reg; + unsigned int i; + + if (!word) + return; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2500usb_register_read(rt2x00dev, PHY_CSR10, ®); + if (!rt2x00_get_field16(reg, PHY_CSR10_RF_BUSY)) + goto rf_write; + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "PHY_CSR10 register busy. Write failed.\n"); + return; + +rf_write: + reg = 0; + rt2x00_set_field16(®, PHY_CSR9_RF_VALUE, value); + rt2500usb_register_write(rt2x00dev, PHY_CSR9, reg); + + reg = 0; + rt2x00_set_field16(®, PHY_CSR10_RF_VALUE, value >> 16); + rt2x00_set_field16(®, PHY_CSR10_RF_NUMBER_OF_BITS, 20); + rt2x00_set_field16(®, PHY_CSR10_RF_IF_SELECT, 0); + rt2x00_set_field16(®, PHY_CSR10_RF_BUSY, 1); + + rt2500usb_register_write(rt2x00dev, PHY_CSR10, reg); + rt2x00_rf_write(rt2x00dev, word, value); +} + +#ifdef CONFIG_RT2X00_LIB_DEBUGFS +#define CSR_OFFSET(__word) ( CSR_REG_BASE + ((__word) * sizeof(u16)) ) + +static void rt2500usb_read_csr(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u32 *data) +{ + rt2500usb_register_read(rt2x00dev, CSR_OFFSET(word), (u16 *) data); +} + +static void rt2500usb_write_csr(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u32 data) +{ + rt2500usb_register_write(rt2x00dev, CSR_OFFSET(word), data); +} + +static const struct rt2x00debug rt2500usb_rt2x00debug = { + .owner = THIS_MODULE, + .csr = { + .read = rt2500usb_read_csr, + .write = rt2500usb_write_csr, + .word_size = sizeof(u16), + .word_count = CSR_REG_SIZE / sizeof(u16), + }, + .eeprom = { + .read = rt2x00_eeprom_read, + .write = rt2x00_eeprom_write, + .word_size = sizeof(u16), + .word_count = EEPROM_SIZE / sizeof(u16), + }, + .bbp = { + .read = rt2500usb_bbp_read, + .write = rt2500usb_bbp_write, + .word_size = sizeof(u8), + .word_count = BBP_SIZE / sizeof(u8), + }, + .rf = { + .read = rt2x00_rf_read, + .write = rt2500usb_rf_write, + .word_size = sizeof(u32), + .word_count = RF_SIZE / sizeof(u32), + }, +}; +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ + +/* + * Configuration handlers. + */ +static void rt2500usb_config_mac_addr(struct rt2x00_dev *rt2x00dev, + __le32 *mac) +{ + rt2500usb_register_multiwrite(rt2x00dev, MAC_CSR2, &mac, + (3 * sizeof(__le16))); +} + +static void rt2500usb_config_bssid(struct rt2x00_dev *rt2x00dev, + __le32 *bssid) +{ + rt2500usb_register_multiwrite(rt2x00dev, MAC_CSR5, bssid, + (3 * sizeof(__le16))); +} + +static void rt2500usb_config_type(struct rt2x00_dev *rt2x00dev, const int type, + const int tsf_sync) +{ + u16 reg; + + rt2500usb_register_write(rt2x00dev, TXRX_CSR19, 0); + + /* + * Enable beacon config + */ + rt2500usb_register_read(rt2x00dev, TXRX_CSR20, ®); + rt2x00_set_field16(®, TXRX_CSR20_OFFSET, + (PREAMBLE + get_duration(IEEE80211_HEADER, 20)) >> 6); + if (type == IEEE80211_IF_TYPE_STA) + rt2x00_set_field16(®, TXRX_CSR20_BCN_EXPECT_WINDOW, 0); + else + rt2x00_set_field16(®, TXRX_CSR20_BCN_EXPECT_WINDOW, 2); + rt2500usb_register_write(rt2x00dev, TXRX_CSR20, reg); + + /* + * Enable synchronisation. + */ + rt2500usb_register_read(rt2x00dev, TXRX_CSR18, ®); + rt2x00_set_field16(®, TXRX_CSR18_OFFSET, 0); + rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR19, ®); + rt2x00_set_field16(®, TXRX_CSR19_TSF_COUNT, 1); + rt2x00_set_field16(®, TXRX_CSR19_TBCN, 1); + rt2x00_set_field16(®, TXRX_CSR19_BEACON_GEN, 0); + rt2x00_set_field16(®, TXRX_CSR19_TSF_SYNC, tsf_sync); + rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); +} + +static void rt2500usb_config_preamble(struct rt2x00_dev *rt2x00dev, + const int short_preamble, + const int ack_timeout, + const int ack_consume_time) +{ + u16 reg; + + /* + * When in atomic context, reschedule and let rt2x00lib + * call this function again. + */ + if (in_atomic()) { + queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->config_work); + return; + } + + rt2500usb_register_read(rt2x00dev, TXRX_CSR1, ®); + rt2x00_set_field16(®, TXRX_CSR1_ACK_TIMEOUT, ack_timeout); + rt2500usb_register_write(rt2x00dev, TXRX_CSR1, reg); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR10, ®); + rt2x00_set_field16(®, TXRX_CSR10_AUTORESPOND_PREAMBLE, + !!short_preamble); + rt2500usb_register_write(rt2x00dev, TXRX_CSR10, reg); +} + +static void rt2500usb_config_phymode(struct rt2x00_dev *rt2x00dev, + const int phymode, + const int basic_rate_mask) +{ + rt2500usb_register_write(rt2x00dev, TXRX_CSR11, basic_rate_mask); + + if (phymode == HWMODE_B) { + rt2500usb_register_write(rt2x00dev, MAC_CSR11, 0x000b); + rt2500usb_register_write(rt2x00dev, MAC_CSR12, 0x0040); + } else { + rt2500usb_register_write(rt2x00dev, MAC_CSR11, 0x0005); + rt2500usb_register_write(rt2x00dev, MAC_CSR12, 0x016c); + } +} + +static void rt2500usb_config_channel(struct rt2x00_dev *rt2x00dev, + struct rf_channel *rf, const int txpower) +{ + /* + * Set TXpower. + */ + rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); + + /* + * For RT2525E we should first set the channel to half band higher. + */ + if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) { + static const u32 vals[] = { + 0x000008aa, 0x000008ae, 0x000008ae, 0x000008b2, + 0x000008b2, 0x000008b6, 0x000008b6, 0x000008ba, + 0x000008ba, 0x000008be, 0x000008b7, 0x00000902, + 0x00000902, 0x00000906 + }; + + rt2500usb_rf_write(rt2x00dev, 2, vals[rf->channel - 1]); + if (rf->rf4) + rt2500usb_rf_write(rt2x00dev, 4, rf->rf4); + } + + rt2500usb_rf_write(rt2x00dev, 1, rf->rf1); + rt2500usb_rf_write(rt2x00dev, 2, rf->rf2); + rt2500usb_rf_write(rt2x00dev, 3, rf->rf3); + if (rf->rf4) + rt2500usb_rf_write(rt2x00dev, 4, rf->rf4); +} + +static void rt2500usb_config_txpower(struct rt2x00_dev *rt2x00dev, + const int txpower) +{ + u32 rf3; + + rt2x00_rf_read(rt2x00dev, 3, &rf3); + rt2x00_set_field32(&rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); + rt2500usb_rf_write(rt2x00dev, 3, rf3); +} + +static void rt2500usb_config_antenna(struct rt2x00_dev *rt2x00dev, + const int antenna_tx, const int antenna_rx) +{ + u8 r2; + u8 r14; + u16 csr5; + u16 csr6; + + rt2500usb_bbp_read(rt2x00dev, 2, &r2); + rt2500usb_bbp_read(rt2x00dev, 14, &r14); + rt2500usb_register_read(rt2x00dev, PHY_CSR5, &csr5); + rt2500usb_register_read(rt2x00dev, PHY_CSR6, &csr6); + + /* + * Configure the TX antenna. + */ + switch (antenna_tx) { + case ANTENNA_SW_DIVERSITY: + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 1); + rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 1); + rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 1); + break; + case ANTENNA_A: + rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 0); + rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 0); + rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 0); + break; + case ANTENNA_B: + rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 2); + rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 2); + rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 2); + break; + } + + /* + * Configure the RX antenna. + */ + switch (antenna_rx) { + case ANTENNA_SW_DIVERSITY: + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 1); + break; + case ANTENNA_A: + rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 0); + break; + case ANTENNA_B: + rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 2); + break; + } + + /* + * RT2525E and RT5222 need to flip TX I/Q + */ + if (rt2x00_rf(&rt2x00dev->chip, RF2525E) || + rt2x00_rf(&rt2x00dev->chip, RF5222)) { + rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1); + rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 1); + rt2x00_set_field16(&csr6, PHY_CSR6_OFDM_FLIP, 1); + + /* + * RT2525E does not need RX I/Q Flip. + */ + if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) + rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0); + } else { + rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 0); + rt2x00_set_field16(&csr6, PHY_CSR6_OFDM_FLIP, 0); + } + + rt2500usb_bbp_write(rt2x00dev, 2, r2); + rt2500usb_bbp_write(rt2x00dev, 14, r14); + rt2500usb_register_write(rt2x00dev, PHY_CSR5, csr5); + rt2500usb_register_write(rt2x00dev, PHY_CSR6, csr6); +} + +static void rt2500usb_config_duration(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf) +{ + u16 reg; + + rt2500usb_register_write(rt2x00dev, MAC_CSR10, libconf->slot_time); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR18, ®); + rt2x00_set_field16(®, TXRX_CSR18_INTERVAL, + libconf->conf->beacon_int * 4); + rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg); +} + +static void rt2500usb_config(struct rt2x00_dev *rt2x00dev, + const unsigned int flags, + struct rt2x00lib_conf *libconf) +{ + if (flags & CONFIG_UPDATE_PHYMODE) + rt2500usb_config_phymode(rt2x00dev, libconf->phymode, + libconf->basic_rates); + if (flags & CONFIG_UPDATE_CHANNEL) + rt2500usb_config_channel(rt2x00dev, &libconf->rf, + libconf->conf->power_level); + if ((flags & CONFIG_UPDATE_TXPOWER) && !(flags & CONFIG_UPDATE_CHANNEL)) + rt2500usb_config_txpower(rt2x00dev, + libconf->conf->power_level); + if (flags & CONFIG_UPDATE_ANTENNA) + rt2500usb_config_antenna(rt2x00dev, + libconf->conf->antenna_sel_tx, + libconf->conf->antenna_sel_rx); + if (flags & (CONFIG_UPDATE_SLOT_TIME | CONFIG_UPDATE_BEACON_INT)) + rt2500usb_config_duration(rt2x00dev, libconf); +} + +/* + * LED functions. + */ +static void rt2500usb_enable_led(struct rt2x00_dev *rt2x00dev) +{ + u16 reg; + + rt2500usb_register_read(rt2x00dev, MAC_CSR21, ®); + rt2x00_set_field16(®, MAC_CSR21_ON_PERIOD, 70); + rt2x00_set_field16(®, MAC_CSR21_OFF_PERIOD, 30); + rt2500usb_register_write(rt2x00dev, MAC_CSR21, reg); + + rt2500usb_register_read(rt2x00dev, MAC_CSR20, ®); + + if (rt2x00dev->led_mode == LED_MODE_TXRX_ACTIVITY) { + rt2x00_set_field16(®, MAC_CSR20_LINK, 1); + rt2x00_set_field16(®, MAC_CSR20_ACTIVITY, 0); + } else if (rt2x00dev->led_mode == LED_MODE_ASUS) { + rt2x00_set_field16(®, MAC_CSR20_LINK, 0); + rt2x00_set_field16(®, MAC_CSR20_ACTIVITY, 1); + } else { + rt2x00_set_field16(®, MAC_CSR20_LINK, 1); + rt2x00_set_field16(®, MAC_CSR20_ACTIVITY, 1); + } + + rt2500usb_register_write(rt2x00dev, MAC_CSR20, reg); +} + +static void rt2500usb_disable_led(struct rt2x00_dev *rt2x00dev) +{ + u16 reg; + + rt2500usb_register_read(rt2x00dev, MAC_CSR20, ®); + rt2x00_set_field16(®, MAC_CSR20_LINK, 0); + rt2x00_set_field16(®, MAC_CSR20_ACTIVITY, 0); + rt2500usb_register_write(rt2x00dev, MAC_CSR20, reg); +} + +/* + * Link tuning + */ +static void rt2500usb_link_stats(struct rt2x00_dev *rt2x00dev) +{ + u16 reg; + + /* + * Update FCS error count from register. + */ + rt2500usb_register_read(rt2x00dev, STA_CSR0, ®); + rt2x00dev->link.rx_failed = rt2x00_get_field16(reg, STA_CSR0_FCS_ERROR); + + /* + * Update False CCA count from register. + */ + rt2500usb_register_read(rt2x00dev, STA_CSR3, ®); + rt2x00dev->link.false_cca = + rt2x00_get_field16(reg, STA_CSR3_FALSE_CCA_ERROR); +} + +static void rt2500usb_reset_tuner(struct rt2x00_dev *rt2x00dev) +{ + u16 eeprom; + u16 value; + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24, &eeprom); + value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_R24_LOW); + rt2500usb_bbp_write(rt2x00dev, 24, value); + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R25, &eeprom); + value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_R25_LOW); + rt2500usb_bbp_write(rt2x00dev, 25, value); + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R61, &eeprom); + value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_R61_LOW); + rt2500usb_bbp_write(rt2x00dev, 61, value); + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC, &eeprom); + value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_VGCUPPER); + rt2500usb_bbp_write(rt2x00dev, 17, value); + + rt2x00dev->link.vgc_level = value; +} + +static void rt2500usb_link_tuner(struct rt2x00_dev *rt2x00dev) +{ + int rssi = rt2x00_get_link_rssi(&rt2x00dev->link); + u16 bbp_thresh; + u16 vgc_bound; + u16 sens; + u16 r24; + u16 r25; + u16 r61; + u16 r17_sens; + u8 r17; + u8 up_bound; + u8 low_bound; + + /* + * Determine the BBP tuning threshold and correctly + * set BBP 24, 25 and 61. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE, &bbp_thresh); + bbp_thresh = rt2x00_get_field16(bbp_thresh, EEPROM_BBPTUNE_THRESHOLD); + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24, &r24); + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R25, &r25); + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R61, &r61); + + if ((rssi + bbp_thresh) > 0) { + r24 = rt2x00_get_field16(r24, EEPROM_BBPTUNE_R24_HIGH); + r25 = rt2x00_get_field16(r25, EEPROM_BBPTUNE_R25_HIGH); + r61 = rt2x00_get_field16(r61, EEPROM_BBPTUNE_R61_HIGH); + } else { + r24 = rt2x00_get_field16(r24, EEPROM_BBPTUNE_R24_LOW); + r25 = rt2x00_get_field16(r25, EEPROM_BBPTUNE_R25_LOW); + r61 = rt2x00_get_field16(r61, EEPROM_BBPTUNE_R61_LOW); + } + + rt2500usb_bbp_write(rt2x00dev, 24, r24); + rt2500usb_bbp_write(rt2x00dev, 25, r25); + rt2500usb_bbp_write(rt2x00dev, 61, r61); + + /* + * Read current r17 value, as well as the sensitivity values + * for the r17 register. + */ + rt2500usb_bbp_read(rt2x00dev, 17, &r17); + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R17, &r17_sens); + + /* + * A too low RSSI will cause too much false CCA which will + * then corrupt the R17 tuning. To remidy this the tuning should + * be stopped (While making sure the R17 value will not exceed limits) + */ + if (rssi >= -40) { + if (r17 != 0x60) + rt2500usb_bbp_write(rt2x00dev, 17, 0x60); + return; + } + + /* + * Special big-R17 for short distance + */ + if (rssi >= -58) { + sens = rt2x00_get_field16(r17_sens, EEPROM_BBPTUNE_R17_LOW); + if (r17 != sens) + rt2500usb_bbp_write(rt2x00dev, 17, sens); + return; + } + + /* + * Special mid-R17 for middle distance + */ + if (rssi >= -74) { + sens = rt2x00_get_field16(r17_sens, EEPROM_BBPTUNE_R17_HIGH); + if (r17 != sens) + rt2500usb_bbp_write(rt2x00dev, 17, sens); + return; + } + + /* + * Leave short or middle distance condition, restore r17 + * to the dynamic tuning range. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC, &vgc_bound); + vgc_bound = rt2x00_get_field16(vgc_bound, EEPROM_BBPTUNE_VGCUPPER); + + low_bound = 0x32; + if (rssi >= -77) + up_bound = vgc_bound; + else + up_bound = vgc_bound - (-77 - rssi); + + if (up_bound < low_bound) + up_bound = low_bound; + + if (r17 > up_bound) { + rt2500usb_bbp_write(rt2x00dev, 17, up_bound); + rt2x00dev->link.vgc_level = up_bound; + } else if (rt2x00dev->link.false_cca > 512 && r17 < up_bound) { + rt2500usb_bbp_write(rt2x00dev, 17, ++r17); + rt2x00dev->link.vgc_level = r17; + } else if (rt2x00dev->link.false_cca < 100 && r17 > low_bound) { + rt2500usb_bbp_write(rt2x00dev, 17, --r17); + rt2x00dev->link.vgc_level = r17; + } +} + +/* + * Initialization functions. + */ +static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev) +{ + u16 reg; + + rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0x0001, + USB_MODE_TEST, REGISTER_TIMEOUT); + rt2x00usb_vendor_request_sw(rt2x00dev, USB_SINGLE_WRITE, 0x0308, + 0x00f0, REGISTER_TIMEOUT); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR2, ®); + rt2x00_set_field16(®, TXRX_CSR2_DISABLE_RX, 1); + rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); + + rt2500usb_register_write(rt2x00dev, MAC_CSR13, 0x1111); + rt2500usb_register_write(rt2x00dev, MAC_CSR14, 0x1e11); + + rt2500usb_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field16(®, MAC_CSR1_SOFT_RESET, 1); + rt2x00_set_field16(®, MAC_CSR1_BBP_RESET, 1); + rt2x00_set_field16(®, MAC_CSR1_HOST_READY, 0); + rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg); + + rt2500usb_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field16(®, MAC_CSR1_SOFT_RESET, 0); + rt2x00_set_field16(®, MAC_CSR1_BBP_RESET, 0); + rt2x00_set_field16(®, MAC_CSR1_HOST_READY, 0); + rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR5, ®); + rt2x00_set_field16(®, TXRX_CSR5_BBP_ID0, 13); + rt2x00_set_field16(®, TXRX_CSR5_BBP_ID0_VALID, 1); + rt2x00_set_field16(®, TXRX_CSR5_BBP_ID1, 12); + rt2x00_set_field16(®, TXRX_CSR5_BBP_ID1_VALID, 1); + rt2500usb_register_write(rt2x00dev, TXRX_CSR5, reg); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR6, ®); + rt2x00_set_field16(®, TXRX_CSR6_BBP_ID0, 10); + rt2x00_set_field16(®, TXRX_CSR6_BBP_ID0_VALID, 1); + rt2x00_set_field16(®, TXRX_CSR6_BBP_ID1, 11); + rt2x00_set_field16(®, TXRX_CSR6_BBP_ID1_VALID, 1); + rt2500usb_register_write(rt2x00dev, TXRX_CSR6, reg); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR7, ®); + rt2x00_set_field16(®, TXRX_CSR7_BBP_ID0, 7); + rt2x00_set_field16(®, TXRX_CSR7_BBP_ID0_VALID, 1); + rt2x00_set_field16(®, TXRX_CSR7_BBP_ID1, 6); + rt2x00_set_field16(®, TXRX_CSR7_BBP_ID1_VALID, 1); + rt2500usb_register_write(rt2x00dev, TXRX_CSR7, reg); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR8, ®); + rt2x00_set_field16(®, TXRX_CSR8_BBP_ID0, 5); + rt2x00_set_field16(®, TXRX_CSR8_BBP_ID0_VALID, 1); + rt2x00_set_field16(®, TXRX_CSR8_BBP_ID1, 0); + rt2x00_set_field16(®, TXRX_CSR8_BBP_ID1_VALID, 0); + rt2500usb_register_write(rt2x00dev, TXRX_CSR8, reg); + + rt2500usb_register_write(rt2x00dev, TXRX_CSR21, 0xe78f); + rt2500usb_register_write(rt2x00dev, MAC_CSR9, 0xff1d); + + if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) + return -EBUSY; + + rt2500usb_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field16(®, MAC_CSR1_SOFT_RESET, 0); + rt2x00_set_field16(®, MAC_CSR1_BBP_RESET, 0); + rt2x00_set_field16(®, MAC_CSR1_HOST_READY, 1); + rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg); + + if (rt2x00_get_rev(&rt2x00dev->chip) >= RT2570_VERSION_C) { + rt2500usb_register_read(rt2x00dev, PHY_CSR2, ®); + reg &= ~0x0002; + } else { + reg = 0x3002; + } + rt2500usb_register_write(rt2x00dev, PHY_CSR2, reg); + + rt2500usb_register_write(rt2x00dev, MAC_CSR11, 0x0002); + rt2500usb_register_write(rt2x00dev, MAC_CSR22, 0x0053); + rt2500usb_register_write(rt2x00dev, MAC_CSR15, 0x01ee); + rt2500usb_register_write(rt2x00dev, MAC_CSR16, 0x0000); + + rt2500usb_register_read(rt2x00dev, MAC_CSR8, ®); + rt2x00_set_field16(®, MAC_CSR8_MAX_FRAME_UNIT, + rt2x00dev->rx->data_size); + rt2500usb_register_write(rt2x00dev, MAC_CSR8, reg); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field16(®, TXRX_CSR0_IV_OFFSET, IEEE80211_HEADER); + rt2x00_set_field16(®, TXRX_CSR0_KEY_ID, 0xff); + rt2500usb_register_write(rt2x00dev, TXRX_CSR0, reg); + + rt2500usb_register_read(rt2x00dev, MAC_CSR18, ®); + rt2x00_set_field16(®, MAC_CSR18_DELAY_AFTER_BEACON, 90); + rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg); + + rt2500usb_register_read(rt2x00dev, PHY_CSR4, ®); + rt2x00_set_field16(®, PHY_CSR4_LOW_RF_LE, 1); + rt2500usb_register_write(rt2x00dev, PHY_CSR4, reg); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR1, ®); + rt2x00_set_field16(®, TXRX_CSR1_AUTO_SEQUENCE, 1); + rt2500usb_register_write(rt2x00dev, TXRX_CSR1, reg); + + return 0; +} + +static int rt2500usb_init_bbp(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + u16 eeprom; + u8 value; + u8 reg_id; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2500usb_bbp_read(rt2x00dev, 0, &value); + if ((value != 0xff) && (value != 0x00)) + goto continue_csr_init; + NOTICE(rt2x00dev, "Waiting for BBP register.\n"); + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "BBP register access failed, aborting.\n"); + return -EACCES; + +continue_csr_init: + rt2500usb_bbp_write(rt2x00dev, 3, 0x02); + rt2500usb_bbp_write(rt2x00dev, 4, 0x19); + rt2500usb_bbp_write(rt2x00dev, 14, 0x1c); + rt2500usb_bbp_write(rt2x00dev, 15, 0x30); + rt2500usb_bbp_write(rt2x00dev, 16, 0xac); + rt2500usb_bbp_write(rt2x00dev, 18, 0x18); + rt2500usb_bbp_write(rt2x00dev, 19, 0xff); + rt2500usb_bbp_write(rt2x00dev, 20, 0x1e); + rt2500usb_bbp_write(rt2x00dev, 21, 0x08); + rt2500usb_bbp_write(rt2x00dev, 22, 0x08); + rt2500usb_bbp_write(rt2x00dev, 23, 0x08); + rt2500usb_bbp_write(rt2x00dev, 24, 0x80); + rt2500usb_bbp_write(rt2x00dev, 25, 0x50); + rt2500usb_bbp_write(rt2x00dev, 26, 0x08); + rt2500usb_bbp_write(rt2x00dev, 27, 0x23); + rt2500usb_bbp_write(rt2x00dev, 30, 0x10); + rt2500usb_bbp_write(rt2x00dev, 31, 0x2b); + rt2500usb_bbp_write(rt2x00dev, 32, 0xb9); + rt2500usb_bbp_write(rt2x00dev, 34, 0x12); + rt2500usb_bbp_write(rt2x00dev, 35, 0x50); + rt2500usb_bbp_write(rt2x00dev, 39, 0xc4); + rt2500usb_bbp_write(rt2x00dev, 40, 0x02); + rt2500usb_bbp_write(rt2x00dev, 41, 0x60); + rt2500usb_bbp_write(rt2x00dev, 53, 0x10); + rt2500usb_bbp_write(rt2x00dev, 54, 0x18); + rt2500usb_bbp_write(rt2x00dev, 56, 0x08); + rt2500usb_bbp_write(rt2x00dev, 57, 0x10); + rt2500usb_bbp_write(rt2x00dev, 58, 0x08); + rt2500usb_bbp_write(rt2x00dev, 61, 0x60); + rt2500usb_bbp_write(rt2x00dev, 62, 0x10); + rt2500usb_bbp_write(rt2x00dev, 75, 0xff); + + DEBUG(rt2x00dev, "Start initialization from EEPROM...\n"); + for (i = 0; i < EEPROM_BBP_SIZE; i++) { + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); + + if (eeprom != 0xffff && eeprom != 0x0000) { + reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); + value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); + DEBUG(rt2x00dev, "BBP: 0x%02x, value: 0x%02x.\n", + reg_id, value); + rt2500usb_bbp_write(rt2x00dev, reg_id, value); + } + } + DEBUG(rt2x00dev, "...End initialization from EEPROM.\n"); + + return 0; +} + +/* + * Device state switch handlers. + */ +static void rt2500usb_toggle_rx(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + u16 reg; + + rt2500usb_register_read(rt2x00dev, TXRX_CSR2, ®); + rt2x00_set_field16(®, TXRX_CSR2_DISABLE_RX, + state == STATE_RADIO_RX_OFF); + rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); +} + +static int rt2500usb_enable_radio(struct rt2x00_dev *rt2x00dev) +{ + /* + * Initialize all registers. + */ + if (rt2500usb_init_registers(rt2x00dev) || + rt2500usb_init_bbp(rt2x00dev)) { + ERROR(rt2x00dev, "Register initialization failed.\n"); + return -EIO; + } + + rt2x00usb_enable_radio(rt2x00dev); + + /* + * Enable LED + */ + rt2500usb_enable_led(rt2x00dev); + + return 0; +} + +static void rt2500usb_disable_radio(struct rt2x00_dev *rt2x00dev) +{ + /* + * Disable LED + */ + rt2500usb_disable_led(rt2x00dev); + + rt2500usb_register_write(rt2x00dev, MAC_CSR13, 0x2121); + rt2500usb_register_write(rt2x00dev, MAC_CSR14, 0x2121); + + /* + * Disable synchronisation. + */ + rt2500usb_register_write(rt2x00dev, TXRX_CSR19, 0); + + rt2x00usb_disable_radio(rt2x00dev); +} + +static int rt2500usb_set_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + u16 reg; + u16 reg2; + unsigned int i; + char put_to_sleep; + char bbp_state; + char rf_state; + + put_to_sleep = (state != STATE_AWAKE); + + reg = 0; + rt2x00_set_field16(®, MAC_CSR17_BBP_DESIRE_STATE, state); + rt2x00_set_field16(®, MAC_CSR17_RF_DESIRE_STATE, state); + rt2x00_set_field16(®, MAC_CSR17_PUT_TO_SLEEP, put_to_sleep); + rt2500usb_register_write(rt2x00dev, MAC_CSR17, reg); + rt2x00_set_field16(®, MAC_CSR17_SET_STATE, 1); + rt2500usb_register_write(rt2x00dev, MAC_CSR17, reg); + + /* + * Device is not guaranteed to be in the requested state yet. + * We must wait until the register indicates that the + * device has entered the correct state. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2500usb_register_read(rt2x00dev, MAC_CSR17, ®2); + bbp_state = rt2x00_get_field16(reg2, MAC_CSR17_BBP_CURR_STATE); + rf_state = rt2x00_get_field16(reg2, MAC_CSR17_RF_CURR_STATE); + if (bbp_state == state && rf_state == state) + return 0; + rt2500usb_register_write(rt2x00dev, MAC_CSR17, reg); + msleep(30); + } + + NOTICE(rt2x00dev, "Device failed to enter state %d, " + "current device state: bbp %d and rf %d.\n", + state, bbp_state, rf_state); + + return -EBUSY; +} + +static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + int retval = 0; + + switch (state) { + case STATE_RADIO_ON: + retval = rt2500usb_enable_radio(rt2x00dev); + break; + case STATE_RADIO_OFF: + rt2500usb_disable_radio(rt2x00dev); + break; + case STATE_RADIO_RX_ON: + case STATE_RADIO_RX_OFF: + rt2500usb_toggle_rx(rt2x00dev, state); + break; + case STATE_DEEP_SLEEP: + case STATE_SLEEP: + case STATE_STANDBY: + case STATE_AWAKE: + retval = rt2500usb_set_state(rt2x00dev, state); + break; + default: + retval = -ENOTSUPP; + break; + } + + return retval; +} + +/* + * TX descriptor initialization + */ +static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, + struct data_desc *txd, + struct txdata_entry_desc *desc, + struct ieee80211_hdr *ieee80211hdr, + unsigned int length, + struct ieee80211_tx_control *control) +{ + u32 word; + + /* + * Start writing the descriptor words. + */ + rt2x00_desc_read(txd, 1, &word); + rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER); + rt2x00_set_field32(&word, TXD_W1_AIFS, desc->aifs); + rt2x00_set_field32(&word, TXD_W1_CWMIN, desc->cw_min); + rt2x00_set_field32(&word, TXD_W1_CWMAX, desc->cw_max); + rt2x00_desc_write(txd, 1, word); + + rt2x00_desc_read(txd, 2, &word); + rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, desc->signal); + rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, desc->service); + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, desc->length_low); + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, desc->length_high); + rt2x00_desc_write(txd, 2, word); + + rt2x00_desc_read(txd, 0, &word); + rt2x00_set_field32(&word, TXD_W0_RETRY_LIMIT, control->retry_limit); + rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, + test_bit(ENTRY_TXD_MORE_FRAG, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_ACK, + !(control->flags & IEEE80211_TXCTL_NO_ACK)); + rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, + test_bit(ENTRY_TXD_REQ_TIMESTAMP, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_OFDM, + test_bit(ENTRY_TXD_OFDM_RATE, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_NEW_SEQ, + !!(control->flags & IEEE80211_TXCTL_FIRST_FRAGMENT)); + rt2x00_set_field32(&word, TXD_W0_IFS, desc->ifs); + rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, length); + rt2x00_set_field32(&word, TXD_W0_CIPHER, CIPHER_NONE); + rt2x00_desc_write(txd, 0, word); +} + +static int rt2500usb_get_tx_data_len(struct rt2x00_dev *rt2x00dev, + int maxpacket, struct sk_buff *skb) +{ + int length; + + /* + * The length _must_ be a multiple of 2, + * but it must _not_ be a multiple of the USB packet size. + */ + length = roundup(skb->len, 2); + length += (2 * !(length % maxpacket)); + + return length; +} + +/* + * TX data initialization + */ +static void rt2500usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev, + unsigned int queue) +{ + u16 reg; + + if (queue != IEEE80211_TX_QUEUE_BEACON) + return; + + rt2500usb_register_read(rt2x00dev, TXRX_CSR19, ®); + if (!rt2x00_get_field16(reg, TXRX_CSR19_BEACON_GEN)) { + rt2x00_set_field16(®, TXRX_CSR19_BEACON_GEN, 1); + /* + * Beacon generation will fail initially. + * To prevent this we need to register the TXRX_CSR19 + * register several times. + */ + rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); + rt2500usb_register_write(rt2x00dev, TXRX_CSR19, 0); + rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); + rt2500usb_register_write(rt2x00dev, TXRX_CSR19, 0); + rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); + } +} + +/* + * RX control handlers + */ +static void rt2500usb_fill_rxdone(struct data_entry *entry, + struct rxdata_entry_desc *desc) +{ + struct urb *urb = entry->priv; + struct data_desc *rxd = (struct data_desc *)(entry->skb->data + + (urb->actual_length - + entry->ring->desc_size)); + u32 word0; + u32 word1; + + rt2x00_desc_read(rxd, 0, &word0); + rt2x00_desc_read(rxd, 1, &word1); + + desc->flags = 0; + if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) + desc->flags |= RX_FLAG_FAILED_FCS_CRC; + if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR)) + desc->flags |= RX_FLAG_FAILED_PLCP_CRC; + + /* + * Obtain the status about this packet. + */ + desc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL); + desc->rssi = rt2x00_get_field32(word1, RXD_W1_RSSI) - + entry->ring->rt2x00dev->rssi_offset; + desc->ofdm = rt2x00_get_field32(word0, RXD_W0_OFDM); + desc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); + + return; +} + +/* + * Interrupt functions. + */ +static void rt2500usb_beacondone(struct urb *urb) +{ + struct data_entry *entry = (struct data_entry *)urb->context; + struct data_ring *ring = entry->ring; + + if (!test_bit(DEVICE_ENABLED_RADIO, &ring->rt2x00dev->flags)) + return; + + /* + * Check if this was the guardian beacon, + * if that was the case we need to send the real beacon now. + * Otherwise we should free the sk_buffer, the device + * should be doing the rest of the work now. + */ + if (ring->index == 1) { + rt2x00_ring_index_done_inc(ring); + entry = rt2x00_get_data_entry(ring); + usb_submit_urb(entry->priv, GFP_ATOMIC); + rt2x00_ring_index_inc(ring); + } else if (ring->index_done == 1) { + entry = rt2x00_get_data_entry_done(ring); + if (entry->skb) { + dev_kfree_skb(entry->skb); + entry->skb = NULL; + } + rt2x00_ring_index_done_inc(ring); + } +} + +/* + * Device probe functions. + */ +static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev) +{ + u16 word; + u8 *mac; + + rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, EEPROM_SIZE); + + /* + * Start validation of the data that has been read. + */ + mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); + if (!is_valid_ether_addr(mac)) { + DECLARE_MAC_BUF(macbuf); + + random_ether_addr(mac); + EEPROM(rt2x00dev, "MAC: %s\n", print_mac(macbuf, mac)); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_ANTENNA_NUM, 2); + rt2x00_set_field16(&word, EEPROM_ANTENNA_TX_DEFAULT, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_RX_DEFAULT, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_LED_MODE, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_DYN_TXAGC, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2522); + rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); + EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0); + rt2x00_set_field16(&word, EEPROM_NIC_DYN_BBP_TUNE, 0); + rt2x00_set_field16(&word, EEPROM_NIC_CCK_TX_POWER, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); + EEPROM(rt2x00dev, "NIC: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_CALIBRATE_OFFSET_RSSI, + DEFAULT_RSSI_OFFSET); + rt2x00_eeprom_write(rt2x00dev, EEPROM_CALIBRATE_OFFSET, word); + EEPROM(rt2x00dev, "Calibrate offset: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_BBPTUNE_THRESHOLD, 45); + rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE, word); + EEPROM(rt2x00dev, "BBPtune: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCUPPER, 0x40); + rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word); + EEPROM(rt2x00dev, "BBPtune vgc: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R17, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_LOW, 0x48); + rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_HIGH, 0x41); + rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R17, word); + EEPROM(rt2x00dev, "BBPtune r17: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_BBPTUNE_R24_LOW, 0x40); + rt2x00_set_field16(&word, EEPROM_BBPTUNE_R24_HIGH, 0x80); + rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R24, word); + EEPROM(rt2x00dev, "BBPtune r24: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R25, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_BBPTUNE_R25_LOW, 0x40); + rt2x00_set_field16(&word, EEPROM_BBPTUNE_R25_HIGH, 0x50); + rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R25, word); + EEPROM(rt2x00dev, "BBPtune r25: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R61, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_BBPTUNE_R61_LOW, 0x60); + rt2x00_set_field16(&word, EEPROM_BBPTUNE_R61_HIGH, 0x6d); + rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R61, word); + EEPROM(rt2x00dev, "BBPtune r61: 0x%04x\n", word); + } + + return 0; +} + +static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev) +{ + u16 reg; + u16 value; + u16 eeprom; + + /* + * Read EEPROM word for configuration. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom); + + /* + * Identify RF chipset. + */ + value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); + rt2500usb_register_read(rt2x00dev, MAC_CSR0, ®); + rt2x00_set_chip(rt2x00dev, RT2570, value, reg); + + if (rt2x00_rev(&rt2x00dev->chip, 0xffff0)) { + ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); + return -ENODEV; + } + + if (!rt2x00_rf(&rt2x00dev->chip, RF2522) && + !rt2x00_rf(&rt2x00dev->chip, RF2523) && + !rt2x00_rf(&rt2x00dev->chip, RF2524) && + !rt2x00_rf(&rt2x00dev->chip, RF2525) && + !rt2x00_rf(&rt2x00dev->chip, RF2525E) && + !rt2x00_rf(&rt2x00dev->chip, RF5222)) { + ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); + return -ENODEV; + } + + /* + * Identify default antenna configuration. + */ + rt2x00dev->hw->conf.antenna_sel_tx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT); + rt2x00dev->hw->conf.antenna_sel_rx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT); + + /* + * Store led mode, for correct led behaviour. + */ + rt2x00dev->led_mode = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); + + /* + * Check if the BBP tuning should be disabled. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom); + if (rt2x00_get_field16(eeprom, EEPROM_NIC_DYN_BBP_TUNE)) + __set_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags); + + /* + * Read the RSSI <-> dBm offset information. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET, &eeprom); + rt2x00dev->rssi_offset = + rt2x00_get_field16(eeprom, EEPROM_CALIBRATE_OFFSET_RSSI); + + return 0; +} + +/* + * RF value list for RF2522 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2522[] = { + { 1, 0x00002050, 0x000c1fda, 0x00000101, 0 }, + { 2, 0x00002050, 0x000c1fee, 0x00000101, 0 }, + { 3, 0x00002050, 0x000c2002, 0x00000101, 0 }, + { 4, 0x00002050, 0x000c2016, 0x00000101, 0 }, + { 5, 0x00002050, 0x000c202a, 0x00000101, 0 }, + { 6, 0x00002050, 0x000c203e, 0x00000101, 0 }, + { 7, 0x00002050, 0x000c2052, 0x00000101, 0 }, + { 8, 0x00002050, 0x000c2066, 0x00000101, 0 }, + { 9, 0x00002050, 0x000c207a, 0x00000101, 0 }, + { 10, 0x00002050, 0x000c208e, 0x00000101, 0 }, + { 11, 0x00002050, 0x000c20a2, 0x00000101, 0 }, + { 12, 0x00002050, 0x000c20b6, 0x00000101, 0 }, + { 13, 0x00002050, 0x000c20ca, 0x00000101, 0 }, + { 14, 0x00002050, 0x000c20fa, 0x00000101, 0 }, +}; + +/* + * RF value list for RF2523 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2523[] = { + { 1, 0x00022010, 0x00000c9e, 0x000e0111, 0x00000a1b }, + { 2, 0x00022010, 0x00000ca2, 0x000e0111, 0x00000a1b }, + { 3, 0x00022010, 0x00000ca6, 0x000e0111, 0x00000a1b }, + { 4, 0x00022010, 0x00000caa, 0x000e0111, 0x00000a1b }, + { 5, 0x00022010, 0x00000cae, 0x000e0111, 0x00000a1b }, + { 6, 0x00022010, 0x00000cb2, 0x000e0111, 0x00000a1b }, + { 7, 0x00022010, 0x00000cb6, 0x000e0111, 0x00000a1b }, + { 8, 0x00022010, 0x00000cba, 0x000e0111, 0x00000a1b }, + { 9, 0x00022010, 0x00000cbe, 0x000e0111, 0x00000a1b }, + { 10, 0x00022010, 0x00000d02, 0x000e0111, 0x00000a1b }, + { 11, 0x00022010, 0x00000d06, 0x000e0111, 0x00000a1b }, + { 12, 0x00022010, 0x00000d0a, 0x000e0111, 0x00000a1b }, + { 13, 0x00022010, 0x00000d0e, 0x000e0111, 0x00000a1b }, + { 14, 0x00022010, 0x00000d1a, 0x000e0111, 0x00000a03 }, +}; + +/* + * RF value list for RF2524 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2524[] = { + { 1, 0x00032020, 0x00000c9e, 0x00000101, 0x00000a1b }, + { 2, 0x00032020, 0x00000ca2, 0x00000101, 0x00000a1b }, + { 3, 0x00032020, 0x00000ca6, 0x00000101, 0x00000a1b }, + { 4, 0x00032020, 0x00000caa, 0x00000101, 0x00000a1b }, + { 5, 0x00032020, 0x00000cae, 0x00000101, 0x00000a1b }, + { 6, 0x00032020, 0x00000cb2, 0x00000101, 0x00000a1b }, + { 7, 0x00032020, 0x00000cb6, 0x00000101, 0x00000a1b }, + { 8, 0x00032020, 0x00000cba, 0x00000101, 0x00000a1b }, + { 9, 0x00032020, 0x00000cbe, 0x00000101, 0x00000a1b }, + { 10, 0x00032020, 0x00000d02, 0x00000101, 0x00000a1b }, + { 11, 0x00032020, 0x00000d06, 0x00000101, 0x00000a1b }, + { 12, 0x00032020, 0x00000d0a, 0x00000101, 0x00000a1b }, + { 13, 0x00032020, 0x00000d0e, 0x00000101, 0x00000a1b }, + { 14, 0x00032020, 0x00000d1a, 0x00000101, 0x00000a03 }, +}; + +/* + * RF value list for RF2525 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2525[] = { + { 1, 0x00022020, 0x00080c9e, 0x00060111, 0x00000a1b }, + { 2, 0x00022020, 0x00080ca2, 0x00060111, 0x00000a1b }, + { 3, 0x00022020, 0x00080ca6, 0x00060111, 0x00000a1b }, + { 4, 0x00022020, 0x00080caa, 0x00060111, 0x00000a1b }, + { 5, 0x00022020, 0x00080cae, 0x00060111, 0x00000a1b }, + { 6, 0x00022020, 0x00080cb2, 0x00060111, 0x00000a1b }, + { 7, 0x00022020, 0x00080cb6, 0x00060111, 0x00000a1b }, + { 8, 0x00022020, 0x00080cba, 0x00060111, 0x00000a1b }, + { 9, 0x00022020, 0x00080cbe, 0x00060111, 0x00000a1b }, + { 10, 0x00022020, 0x00080d02, 0x00060111, 0x00000a1b }, + { 11, 0x00022020, 0x00080d06, 0x00060111, 0x00000a1b }, + { 12, 0x00022020, 0x00080d0a, 0x00060111, 0x00000a1b }, + { 13, 0x00022020, 0x00080d0e, 0x00060111, 0x00000a1b }, + { 14, 0x00022020, 0x00080d1a, 0x00060111, 0x00000a03 }, +}; + +/* + * RF value list for RF2525e + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2525e[] = { + { 1, 0x00022010, 0x0000089a, 0x00060111, 0x00000e1b }, + { 2, 0x00022010, 0x0000089e, 0x00060111, 0x00000e07 }, + { 3, 0x00022010, 0x0000089e, 0x00060111, 0x00000e1b }, + { 4, 0x00022010, 0x000008a2, 0x00060111, 0x00000e07 }, + { 5, 0x00022010, 0x000008a2, 0x00060111, 0x00000e1b }, + { 6, 0x00022010, 0x000008a6, 0x00060111, 0x00000e07 }, + { 7, 0x00022010, 0x000008a6, 0x00060111, 0x00000e1b }, + { 8, 0x00022010, 0x000008aa, 0x00060111, 0x00000e07 }, + { 9, 0x00022010, 0x000008aa, 0x00060111, 0x00000e1b }, + { 10, 0x00022010, 0x000008ae, 0x00060111, 0x00000e07 }, + { 11, 0x00022010, 0x000008ae, 0x00060111, 0x00000e1b }, + { 12, 0x00022010, 0x000008b2, 0x00060111, 0x00000e07 }, + { 13, 0x00022010, 0x000008b2, 0x00060111, 0x00000e1b }, + { 14, 0x00022010, 0x000008b6, 0x00060111, 0x00000e23 }, +}; + +/* + * RF value list for RF5222 + * Supports: 2.4 GHz & 5.2 GHz + */ +static const struct rf_channel rf_vals_5222[] = { + { 1, 0x00022020, 0x00001136, 0x00000101, 0x00000a0b }, + { 2, 0x00022020, 0x0000113a, 0x00000101, 0x00000a0b }, + { 3, 0x00022020, 0x0000113e, 0x00000101, 0x00000a0b }, + { 4, 0x00022020, 0x00001182, 0x00000101, 0x00000a0b }, + { 5, 0x00022020, 0x00001186, 0x00000101, 0x00000a0b }, + { 6, 0x00022020, 0x0000118a, 0x00000101, 0x00000a0b }, + { 7, 0x00022020, 0x0000118e, 0x00000101, 0x00000a0b }, + { 8, 0x00022020, 0x00001192, 0x00000101, 0x00000a0b }, + { 9, 0x00022020, 0x00001196, 0x00000101, 0x00000a0b }, + { 10, 0x00022020, 0x0000119a, 0x00000101, 0x00000a0b }, + { 11, 0x00022020, 0x0000119e, 0x00000101, 0x00000a0b }, + { 12, 0x00022020, 0x000011a2, 0x00000101, 0x00000a0b }, + { 13, 0x00022020, 0x000011a6, 0x00000101, 0x00000a0b }, + { 14, 0x00022020, 0x000011ae, 0x00000101, 0x00000a1b }, + + /* 802.11 UNI / HyperLan 2 */ + { 36, 0x00022010, 0x00018896, 0x00000101, 0x00000a1f }, + { 40, 0x00022010, 0x0001889a, 0x00000101, 0x00000a1f }, + { 44, 0x00022010, 0x0001889e, 0x00000101, 0x00000a1f }, + { 48, 0x00022010, 0x000188a2, 0x00000101, 0x00000a1f }, + { 52, 0x00022010, 0x000188a6, 0x00000101, 0x00000a1f }, + { 66, 0x00022010, 0x000188aa, 0x00000101, 0x00000a1f }, + { 60, 0x00022010, 0x000188ae, 0x00000101, 0x00000a1f }, + { 64, 0x00022010, 0x000188b2, 0x00000101, 0x00000a1f }, + + /* 802.11 HyperLan 2 */ + { 100, 0x00022010, 0x00008802, 0x00000101, 0x00000a0f }, + { 104, 0x00022010, 0x00008806, 0x00000101, 0x00000a0f }, + { 108, 0x00022010, 0x0000880a, 0x00000101, 0x00000a0f }, + { 112, 0x00022010, 0x0000880e, 0x00000101, 0x00000a0f }, + { 116, 0x00022010, 0x00008812, 0x00000101, 0x00000a0f }, + { 120, 0x00022010, 0x00008816, 0x00000101, 0x00000a0f }, + { 124, 0x00022010, 0x0000881a, 0x00000101, 0x00000a0f }, + { 128, 0x00022010, 0x0000881e, 0x00000101, 0x00000a0f }, + { 132, 0x00022010, 0x00008822, 0x00000101, 0x00000a0f }, + { 136, 0x00022010, 0x00008826, 0x00000101, 0x00000a0f }, + + /* 802.11 UNII */ + { 140, 0x00022010, 0x0000882a, 0x00000101, 0x00000a0f }, + { 149, 0x00022020, 0x000090a6, 0x00000101, 0x00000a07 }, + { 153, 0x00022020, 0x000090ae, 0x00000101, 0x00000a07 }, + { 157, 0x00022020, 0x000090b6, 0x00000101, 0x00000a07 }, + { 161, 0x00022020, 0x000090be, 0x00000101, 0x00000a07 }, +}; + +static void rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) +{ + struct hw_mode_spec *spec = &rt2x00dev->spec; + u8 *txpower; + unsigned int i; + + /* + * Initialize all hw fields. + */ + rt2x00dev->hw->flags = + IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE | + IEEE80211_HW_RX_INCLUDES_FCS | + IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; + rt2x00dev->hw->extra_tx_headroom = TXD_DESC_SIZE; + rt2x00dev->hw->max_signal = MAX_SIGNAL; + rt2x00dev->hw->max_rssi = MAX_RX_SSI; + rt2x00dev->hw->queues = 2; + + SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_usb(rt2x00dev)->dev); + SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, + rt2x00_eeprom_addr(rt2x00dev, + EEPROM_MAC_ADDR_0)); + + /* + * Convert tx_power array in eeprom. + */ + txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START); + for (i = 0; i < 14; i++) + txpower[i] = TXPOWER_FROM_DEV(txpower[i]); + + /* + * Initialize hw_mode information. + */ + spec->num_modes = 2; + spec->num_rates = 12; + spec->tx_power_a = NULL; + spec->tx_power_bg = txpower; + spec->tx_power_default = DEFAULT_TXPOWER; + + if (rt2x00_rf(&rt2x00dev->chip, RF2522)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522); + spec->channels = rf_vals_bg_2522; + } else if (rt2x00_rf(&rt2x00dev->chip, RF2523)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523); + spec->channels = rf_vals_bg_2523; + } else if (rt2x00_rf(&rt2x00dev->chip, RF2524)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524); + spec->channels = rf_vals_bg_2524; + } else if (rt2x00_rf(&rt2x00dev->chip, RF2525)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525); + spec->channels = rf_vals_bg_2525; + } else if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e); + spec->channels = rf_vals_bg_2525e; + } else if (rt2x00_rf(&rt2x00dev->chip, RF5222)) { + spec->num_channels = ARRAY_SIZE(rf_vals_5222); + spec->channels = rf_vals_5222; + spec->num_modes = 3; + } +} + +static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev) +{ + int retval; + + /* + * Allocate eeprom data. + */ + retval = rt2500usb_validate_eeprom(rt2x00dev); + if (retval) + return retval; + + retval = rt2500usb_init_eeprom(rt2x00dev); + if (retval) + return retval; + + /* + * Initialize hw specifications. + */ + rt2500usb_probe_hw_mode(rt2x00dev); + + /* + * This device requires the beacon ring + */ + __set_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags); + + /* + * Set the rssi offset. + */ + rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; + + return 0; +} + +/* + * IEEE80211 stack callback functions. + */ +static void rt2500usb_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + int mc_count, + struct dev_addr_list *mc_list) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct interface *intf = &rt2x00dev->interface; + u16 reg; + + /* + * Mask off any flags we are going to ignore from + * the total_flags field. + */ + *total_flags &= + FIF_ALLMULTI | + FIF_FCSFAIL | + FIF_PLCPFAIL | + FIF_CONTROL | + FIF_OTHER_BSS | + FIF_PROMISC_IN_BSS; + + /* + * Apply some rules to the filters: + * - Some filters imply different filters to be set. + * - Some things we can't filter out at all. + * - Some filters are set based on interface type. + */ + if (mc_count) + *total_flags |= FIF_ALLMULTI; + if (*total_flags & FIF_OTHER_BSS || + *total_flags & FIF_PROMISC_IN_BSS) + *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS; + if (is_interface_type(intf, IEEE80211_IF_TYPE_AP)) + *total_flags |= FIF_PROMISC_IN_BSS; + + /* + * Check if there is any work left for us. + */ + if (intf->filter == *total_flags) + return; + intf->filter = *total_flags; + + /* + * When in atomic context, reschedule and let rt2x00lib + * call this function again. + */ + if (in_atomic()) { + queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->filter_work); + return; + } + + /* + * Start configuration steps. + * Note that the version error will always be dropped + * and broadcast frames will always be accepted since + * there is no filter for it at this time. + */ + rt2500usb_register_read(rt2x00dev, TXRX_CSR2, ®); + rt2x00_set_field16(®, TXRX_CSR2_DROP_CRC, + !(*total_flags & FIF_FCSFAIL)); + rt2x00_set_field16(®, TXRX_CSR2_DROP_PHYSICAL, + !(*total_flags & FIF_PLCPFAIL)); + rt2x00_set_field16(®, TXRX_CSR2_DROP_CONTROL, + !(*total_flags & FIF_CONTROL)); + rt2x00_set_field16(®, TXRX_CSR2_DROP_NOT_TO_ME, + !(*total_flags & FIF_PROMISC_IN_BSS)); + rt2x00_set_field16(®, TXRX_CSR2_DROP_TODS, + !(*total_flags & FIF_PROMISC_IN_BSS)); + rt2x00_set_field16(®, TXRX_CSR2_DROP_VERSION_ERROR, 1); + rt2x00_set_field16(®, TXRX_CSR2_DROP_MULTICAST, + !(*total_flags & FIF_ALLMULTI)); + rt2x00_set_field16(®, TXRX_CSR2_DROP_BROADCAST, 0); + rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); +} + +static int rt2500usb_beacon_update(struct ieee80211_hw *hw, + struct sk_buff *skb, + struct ieee80211_tx_control *control) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct usb_device *usb_dev = + interface_to_usbdev(rt2x00dev_usb(rt2x00dev)); + struct data_ring *ring = + rt2x00lib_get_ring(rt2x00dev, IEEE80211_TX_QUEUE_BEACON); + struct data_entry *beacon; + struct data_entry *guardian; + int pipe = usb_sndbulkpipe(usb_dev, 1); + int max_packet = usb_maxpacket(usb_dev, pipe, 1); + int length; + + /* + * Just in case the ieee80211 doesn't set this, + * but we need this queue set for the descriptor + * initialization. + */ + control->queue = IEEE80211_TX_QUEUE_BEACON; + + /* + * Obtain 2 entries, one for the guardian byte, + * the second for the actual beacon. + */ + guardian = rt2x00_get_data_entry(ring); + rt2x00_ring_index_inc(ring); + beacon = rt2x00_get_data_entry(ring); + + /* + * First we create the beacon. + */ + skb_push(skb, ring->desc_size); + memset(skb->data, 0, ring->desc_size); + + rt2x00lib_write_tx_desc(rt2x00dev, (struct data_desc *)skb->data, + (struct ieee80211_hdr *)(skb->data + + ring->desc_size), + skb->len - ring->desc_size, control); + + length = rt2500usb_get_tx_data_len(rt2x00dev, max_packet, skb); + + usb_fill_bulk_urb(beacon->priv, usb_dev, pipe, + skb->data, length, rt2500usb_beacondone, beacon); + + beacon->skb = skb; + + /* + * Second we need to create the guardian byte. + * We only need a single byte, so lets recycle + * the 'flags' field we are not using for beacons. + */ + guardian->flags = 0; + usb_fill_bulk_urb(guardian->priv, usb_dev, pipe, + &guardian->flags, 1, rt2500usb_beacondone, guardian); + + /* + * Send out the guardian byte. + */ + usb_submit_urb(guardian->priv, GFP_ATOMIC); + + /* + * Enable beacon generation. + */ + rt2500usb_kick_tx_queue(rt2x00dev, IEEE80211_TX_QUEUE_BEACON); + + return 0; +} + +static const struct ieee80211_ops rt2500usb_mac80211_ops = { + .tx = rt2x00mac_tx, + .start = rt2x00mac_start, + .stop = rt2x00mac_stop, + .add_interface = rt2x00mac_add_interface, + .remove_interface = rt2x00mac_remove_interface, + .config = rt2x00mac_config, + .config_interface = rt2x00mac_config_interface, + .configure_filter = rt2500usb_configure_filter, + .get_stats = rt2x00mac_get_stats, + .erp_ie_changed = rt2x00mac_erp_ie_changed, + .conf_tx = rt2x00mac_conf_tx, + .get_tx_stats = rt2x00mac_get_tx_stats, + .beacon_update = rt2500usb_beacon_update, +}; + +static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = { + .probe_hw = rt2500usb_probe_hw, + .initialize = rt2x00usb_initialize, + .uninitialize = rt2x00usb_uninitialize, + .set_device_state = rt2500usb_set_device_state, + .link_stats = rt2500usb_link_stats, + .reset_tuner = rt2500usb_reset_tuner, + .link_tuner = rt2500usb_link_tuner, + .write_tx_desc = rt2500usb_write_tx_desc, + .write_tx_data = rt2x00usb_write_tx_data, + .get_tx_data_len = rt2500usb_get_tx_data_len, + .kick_tx_queue = rt2500usb_kick_tx_queue, + .fill_rxdone = rt2500usb_fill_rxdone, + .config_mac_addr = rt2500usb_config_mac_addr, + .config_bssid = rt2500usb_config_bssid, + .config_type = rt2500usb_config_type, + .config_preamble = rt2500usb_config_preamble, + .config = rt2500usb_config, +}; + +static const struct rt2x00_ops rt2500usb_ops = { + .name = DRV_NAME, + .rxd_size = RXD_DESC_SIZE, + .txd_size = TXD_DESC_SIZE, + .eeprom_size = EEPROM_SIZE, + .rf_size = RF_SIZE, + .lib = &rt2500usb_rt2x00_ops, + .hw = &rt2500usb_mac80211_ops, +#ifdef CONFIG_RT2X00_LIB_DEBUGFS + .debugfs = &rt2500usb_rt2x00debug, +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ +}; + +/* + * rt2500usb module information. + */ +static struct usb_device_id rt2500usb_device_table[] = { + /* ASUS */ + { USB_DEVICE(0x0b05, 0x1706), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x0b05, 0x1707), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Belkin */ + { USB_DEVICE(0x050d, 0x7050), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x050d, 0x7051), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x050d, 0x705a), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Cisco Systems */ + { USB_DEVICE(0x13b1, 0x000d), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x13b1, 0x0011), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x13b1, 0x001a), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Conceptronic */ + { USB_DEVICE(0x14b2, 0x3c02), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* D-LINK */ + { USB_DEVICE(0x2001, 0x3c00), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Gigabyte */ + { USB_DEVICE(0x1044, 0x8001), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x1044, 0x8007), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Hercules */ + { USB_DEVICE(0x06f8, 0xe000), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Melco */ + { USB_DEVICE(0x0411, 0x0066), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x0411, 0x0067), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x0411, 0x008b), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x0411, 0x0097), USB_DEVICE_DATA(&rt2500usb_ops) }, + + /* MSI */ + { USB_DEVICE(0x0db0, 0x6861), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x0db0, 0x6865), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x0db0, 0x6869), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Ralink */ + { USB_DEVICE(0x148f, 0x1706), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x148f, 0x2570), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x148f, 0x9020), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Siemens */ + { USB_DEVICE(0x0681, 0x3c06), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* SMC */ + { USB_DEVICE(0x0707, 0xee13), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Spairon */ + { USB_DEVICE(0x114b, 0x0110), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Trust */ + { USB_DEVICE(0x0eb0, 0x9020), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Zinwell */ + { USB_DEVICE(0x5a57, 0x0260), USB_DEVICE_DATA(&rt2500usb_ops) }, + { 0, } +}; + +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("Ralink RT2500 USB Wireless LAN driver."); +MODULE_SUPPORTED_DEVICE("Ralink RT2570 USB chipset based cards"); +MODULE_DEVICE_TABLE(usb, rt2500usb_device_table); +MODULE_LICENSE("GPL"); + +static struct usb_driver rt2500usb_driver = { + .name = DRV_NAME, + .id_table = rt2500usb_device_table, + .probe = rt2x00usb_probe, + .disconnect = rt2x00usb_disconnect, + .suspend = rt2x00usb_suspend, + .resume = rt2x00usb_resume, +}; + +static int __init rt2500usb_init(void) +{ + return usb_register(&rt2500usb_driver); +} + +static void __exit rt2500usb_exit(void) +{ + usb_deregister(&rt2500usb_driver); +} + +module_init(rt2500usb_init); +module_exit(rt2500usb_exit); diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h new file mode 100644 index 000000000000..b18d56e73cf1 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2500usb.h @@ -0,0 +1,798 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2500usb + Abstract: Data structures and registers for the rt2500usb module. + Supported chipsets: RT2570. + */ + +#ifndef RT2500USB_H +#define RT2500USB_H + +/* + * RF chip defines. + */ +#define RF2522 0x0000 +#define RF2523 0x0001 +#define RF2524 0x0002 +#define RF2525 0x0003 +#define RF2525E 0x0005 +#define RF5222 0x0010 + +/* + * RT2570 version + */ +#define RT2570_VERSION_B 2 +#define RT2570_VERSION_C 3 +#define RT2570_VERSION_D 4 + +/* + * Signal information. + * Defaul offset is required for RSSI <-> dBm conversion. + */ +#define MAX_SIGNAL 100 +#define MAX_RX_SSI -1 +#define DEFAULT_RSSI_OFFSET 120 + +/* + * Register layout information. + */ +#define CSR_REG_BASE 0x0400 +#define CSR_REG_SIZE 0x0100 +#define EEPROM_BASE 0x0000 +#define EEPROM_SIZE 0x006a +#define BBP_SIZE 0x0060 +#define RF_SIZE 0x0014 + +/* + * Control/Status Registers(CSR). + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * MAC_CSR0: ASIC revision number. + */ +#define MAC_CSR0 0x0400 + +/* + * MAC_CSR1: System control. + * SOFT_RESET: Software reset, 1: reset, 0: normal. + * BBP_RESET: Hardware reset, 1: reset, 0, release. + * HOST_READY: Host ready after initialization. + */ +#define MAC_CSR1 0x0402 +#define MAC_CSR1_SOFT_RESET FIELD16(0x00000001) +#define MAC_CSR1_BBP_RESET FIELD16(0x00000002) +#define MAC_CSR1_HOST_READY FIELD16(0x00000004) + +/* + * MAC_CSR2: STA MAC register 0. + */ +#define MAC_CSR2 0x0404 +#define MAC_CSR2_BYTE0 FIELD16(0x00ff) +#define MAC_CSR2_BYTE1 FIELD16(0xff00) + +/* + * MAC_CSR3: STA MAC register 1. + */ +#define MAC_CSR3 0x0406 +#define MAC_CSR3_BYTE2 FIELD16(0x00ff) +#define MAC_CSR3_BYTE3 FIELD16(0xff00) + +/* + * MAC_CSR4: STA MAC register 2. + */ +#define MAC_CSR4 0X0408 +#define MAC_CSR4_BYTE4 FIELD16(0x00ff) +#define MAC_CSR4_BYTE5 FIELD16(0xff00) + +/* + * MAC_CSR5: BSSID register 0. + */ +#define MAC_CSR5 0x040a +#define MAC_CSR5_BYTE0 FIELD16(0x00ff) +#define MAC_CSR5_BYTE1 FIELD16(0xff00) + +/* + * MAC_CSR6: BSSID register 1. + */ +#define MAC_CSR6 0x040c +#define MAC_CSR6_BYTE2 FIELD16(0x00ff) +#define MAC_CSR6_BYTE3 FIELD16(0xff00) + +/* + * MAC_CSR7: BSSID register 2. + */ +#define MAC_CSR7 0x040e +#define MAC_CSR7_BYTE4 FIELD16(0x00ff) +#define MAC_CSR7_BYTE5 FIELD16(0xff00) + +/* + * MAC_CSR8: Max frame length. + */ +#define MAC_CSR8 0x0410 +#define MAC_CSR8_MAX_FRAME_UNIT FIELD16(0x0fff) + +/* + * Misc MAC_CSR registers. + * MAC_CSR9: Timer control. + * MAC_CSR10: Slot time. + * MAC_CSR11: IFS. + * MAC_CSR12: EIFS. + * MAC_CSR13: Power mode0. + * MAC_CSR14: Power mode1. + * MAC_CSR15: Power saving transition0 + * MAC_CSR16: Power saving transition1 + */ +#define MAC_CSR9 0x0412 +#define MAC_CSR10 0x0414 +#define MAC_CSR11 0x0416 +#define MAC_CSR12 0x0418 +#define MAC_CSR13 0x041a +#define MAC_CSR14 0x041c +#define MAC_CSR15 0x041e +#define MAC_CSR16 0x0420 + +/* + * MAC_CSR17: Manual power control / status register. + * Allowed state: 0 deep_sleep, 1: sleep, 2: standby, 3: awake. + * SET_STATE: Set state. Write 1 to trigger, self cleared. + * BBP_DESIRE_STATE: BBP desired state. + * RF_DESIRE_STATE: RF desired state. + * BBP_CURRENT_STATE: BBP current state. + * RF_CURRENT_STATE: RF current state. + * PUT_TO_SLEEP: Put to sleep. Write 1 to trigger, self cleared. + */ +#define MAC_CSR17 0x0422 +#define MAC_CSR17_SET_STATE FIELD16(0x0001) +#define MAC_CSR17_BBP_DESIRE_STATE FIELD16(0x0006) +#define MAC_CSR17_RF_DESIRE_STATE FIELD16(0x0018) +#define MAC_CSR17_BBP_CURR_STATE FIELD16(0x0060) +#define MAC_CSR17_RF_CURR_STATE FIELD16(0x0180) +#define MAC_CSR17_PUT_TO_SLEEP FIELD16(0x0200) + +/* + * MAC_CSR18: Wakeup timer register. + * DELAY_AFTER_BEACON: Delay after Tbcn expired in units of 1/16 TU. + * BEACONS_BEFORE_WAKEUP: Number of beacon before wakeup. + * AUTO_WAKE: Enable auto wakeup / sleep mechanism. + */ +#define MAC_CSR18 0x0424 +#define MAC_CSR18_DELAY_AFTER_BEACON FIELD16(0x00ff) +#define MAC_CSR18_BEACONS_BEFORE_WAKEUP FIELD16(0x7f00) +#define MAC_CSR18_AUTO_WAKE FIELD16(0x8000) + +/* + * MAC_CSR19: GPIO control register. + */ +#define MAC_CSR19 0x0426 + +/* + * MAC_CSR20: LED control register. + * ACTIVITY: 0: idle, 1: active. + * LINK: 0: linkoff, 1: linkup. + * ACTIVITY_POLARITY: 0: active low, 1: active high. + */ +#define MAC_CSR20 0x0428 +#define MAC_CSR20_ACTIVITY FIELD16(0x0001) +#define MAC_CSR20_LINK FIELD16(0x0002) +#define MAC_CSR20_ACTIVITY_POLARITY FIELD16(0x0004) + +/* + * MAC_CSR21: LED control register. + * ON_PERIOD: On period, default 70ms. + * OFF_PERIOD: Off period, default 30ms. + */ +#define MAC_CSR21 0x042a +#define MAC_CSR21_ON_PERIOD FIELD16(0x00ff) +#define MAC_CSR21_OFF_PERIOD FIELD16(0xff00) + +/* + * Collision window control register. + */ +#define MAC_CSR22 0x042c + +/* + * Transmit related CSRs. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * TXRX_CSR0: Security control register. + */ +#define TXRX_CSR0 0x0440 +#define TXRX_CSR0_ALGORITHM FIELD16(0x0007) +#define TXRX_CSR0_IV_OFFSET FIELD16(0x01f8) +#define TXRX_CSR0_KEY_ID FIELD16(0x1e00) + +/* + * TXRX_CSR1: TX configuration. + * ACK_TIMEOUT: ACK Timeout in unit of 1-us. + * TSF_OFFSET: TSF offset in MAC header. + * AUTO_SEQUENCE: Let ASIC control frame sequence number. + */ +#define TXRX_CSR1 0x0442 +#define TXRX_CSR1_ACK_TIMEOUT FIELD16(0x00ff) +#define TXRX_CSR1_TSF_OFFSET FIELD16(0x7f00) +#define TXRX_CSR1_AUTO_SEQUENCE FIELD16(0x8000) + +/* + * TXRX_CSR2: RX control. + * DISABLE_RX: Disable rx engine. + * DROP_CRC: Drop crc error. + * DROP_PHYSICAL: Drop physical error. + * DROP_CONTROL: Drop control frame. + * DROP_NOT_TO_ME: Drop not to me unicast frame. + * DROP_TODS: Drop frame tods bit is true. + * DROP_VERSION_ERROR: Drop version error frame. + * DROP_MCAST: Drop multicast frames. + * DROP_BCAST: Drop broadcast frames. + */ +#define TXRX_CSR2 0x0444 +#define TXRX_CSR2_DISABLE_RX FIELD16(0x0001) +#define TXRX_CSR2_DROP_CRC FIELD16(0x0002) +#define TXRX_CSR2_DROP_PHYSICAL FIELD16(0x0004) +#define TXRX_CSR2_DROP_CONTROL FIELD16(0x0008) +#define TXRX_CSR2_DROP_NOT_TO_ME FIELD16(0x0010) +#define TXRX_CSR2_DROP_TODS FIELD16(0x0020) +#define TXRX_CSR2_DROP_VERSION_ERROR FIELD16(0x0040) +#define TXRX_CSR2_DROP_MULTICAST FIELD16(0x0200) +#define TXRX_CSR2_DROP_BROADCAST FIELD16(0x0400) + +/* + * RX BBP ID registers + * TXRX_CSR3: CCK RX BBP ID. + * TXRX_CSR4: OFDM RX BBP ID. + */ +#define TXRX_CSR3 0x0446 +#define TXRX_CSR4 0x0448 + +/* + * TXRX_CSR5: CCK TX BBP ID0. + */ +#define TXRX_CSR5 0x044a +#define TXRX_CSR5_BBP_ID0 FIELD16(0x007f) +#define TXRX_CSR5_BBP_ID0_VALID FIELD16(0x0080) +#define TXRX_CSR5_BBP_ID1 FIELD16(0x7f00) +#define TXRX_CSR5_BBP_ID1_VALID FIELD16(0x8000) + +/* + * TXRX_CSR6: CCK TX BBP ID1. + */ +#define TXRX_CSR6 0x044c +#define TXRX_CSR6_BBP_ID0 FIELD16(0x007f) +#define TXRX_CSR6_BBP_ID0_VALID FIELD16(0x0080) +#define TXRX_CSR6_BBP_ID1 FIELD16(0x7f00) +#define TXRX_CSR6_BBP_ID1_VALID FIELD16(0x8000) + +/* + * TXRX_CSR7: OFDM TX BBP ID0. + */ +#define TXRX_CSR7 0x044e +#define TXRX_CSR7_BBP_ID0 FIELD16(0x007f) +#define TXRX_CSR7_BBP_ID0_VALID FIELD16(0x0080) +#define TXRX_CSR7_BBP_ID1 FIELD16(0x7f00) +#define TXRX_CSR7_BBP_ID1_VALID FIELD16(0x8000) + +/* + * TXRX_CSR5: OFDM TX BBP ID1. + */ +#define TXRX_CSR8 0x0450 +#define TXRX_CSR8_BBP_ID0 FIELD16(0x007f) +#define TXRX_CSR8_BBP_ID0_VALID FIELD16(0x0080) +#define TXRX_CSR8_BBP_ID1 FIELD16(0x7f00) +#define TXRX_CSR8_BBP_ID1_VALID FIELD16(0x8000) + +/* + * TXRX_CSR9: TX ACK time-out. + */ +#define TXRX_CSR9 0x0452 + +/* + * TXRX_CSR10: Auto responder control. + */ +#define TXRX_CSR10 0x0454 +#define TXRX_CSR10_AUTORESPOND_PREAMBLE FIELD16(0x0004) + +/* + * TXRX_CSR11: Auto responder basic rate. + */ +#define TXRX_CSR11 0x0456 + +/* + * ACK/CTS time registers. + */ +#define TXRX_CSR12 0x0458 +#define TXRX_CSR13 0x045a +#define TXRX_CSR14 0x045c +#define TXRX_CSR15 0x045e +#define TXRX_CSR16 0x0460 +#define TXRX_CSR17 0x0462 + +/* + * TXRX_CSR18: Synchronization control register. + */ +#define TXRX_CSR18 0x0464 +#define TXRX_CSR18_OFFSET FIELD16(0x000f) +#define TXRX_CSR18_INTERVAL FIELD16(0xfff0) + +/* + * TXRX_CSR19: Synchronization control register. + * TSF_COUNT: Enable TSF auto counting. + * TSF_SYNC: Tsf sync, 0: disable, 1: infra, 2: ad-hoc/master mode. + * TBCN: Enable Tbcn with reload value. + * BEACON_GEN: Enable beacon generator. + */ +#define TXRX_CSR19 0x0466 +#define TXRX_CSR19_TSF_COUNT FIELD16(0x0001) +#define TXRX_CSR19_TSF_SYNC FIELD16(0x0006) +#define TXRX_CSR19_TBCN FIELD16(0x0008) +#define TXRX_CSR19_BEACON_GEN FIELD16(0x0010) + +/* + * TXRX_CSR20: Tx BEACON offset time control register. + * OFFSET: In units of usec. + * BCN_EXPECT_WINDOW: Default: 2^CWmin + */ +#define TXRX_CSR20 0x0468 +#define TXRX_CSR20_OFFSET FIELD16(0x1fff) +#define TXRX_CSR20_BCN_EXPECT_WINDOW FIELD16(0xe000) + +/* + * TXRX_CSR21 + */ +#define TXRX_CSR21 0x046a + +/* + * Encryption related CSRs. + * + */ + +/* + * SEC_CSR0-SEC_CSR7: Shared key 0, word 0-7 + */ +#define SEC_CSR0 0x0480 +#define SEC_CSR1 0x0482 +#define SEC_CSR2 0x0484 +#define SEC_CSR3 0x0486 +#define SEC_CSR4 0x0488 +#define SEC_CSR5 0x048a +#define SEC_CSR6 0x048c +#define SEC_CSR7 0x048e + +/* + * SEC_CSR8-SEC_CSR15: Shared key 1, word 0-7 + */ +#define SEC_CSR8 0x0490 +#define SEC_CSR9 0x0492 +#define SEC_CSR10 0x0494 +#define SEC_CSR11 0x0496 +#define SEC_CSR12 0x0498 +#define SEC_CSR13 0x049a +#define SEC_CSR14 0x049c +#define SEC_CSR15 0x049e + +/* + * SEC_CSR16-SEC_CSR23: Shared key 2, word 0-7 + */ +#define SEC_CSR16 0x04a0 +#define SEC_CSR17 0x04a2 +#define SEC_CSR18 0X04A4 +#define SEC_CSR19 0x04a6 +#define SEC_CSR20 0x04a8 +#define SEC_CSR21 0x04aa +#define SEC_CSR22 0x04ac +#define SEC_CSR23 0x04ae + +/* + * SEC_CSR24-SEC_CSR31: Shared key 3, word 0-7 + */ +#define SEC_CSR24 0x04b0 +#define SEC_CSR25 0x04b2 +#define SEC_CSR26 0x04b4 +#define SEC_CSR27 0x04b6 +#define SEC_CSR28 0x04b8 +#define SEC_CSR29 0x04ba +#define SEC_CSR30 0x04bc +#define SEC_CSR31 0x04be + +/* + * PHY control registers. + */ + +/* + * PHY_CSR0: RF switching timing control. + */ +#define PHY_CSR0 0x04c0 + +/* + * PHY_CSR1: TX PA configuration. + */ +#define PHY_CSR1 0x04c2 + +/* + * MAC configuration registers. + * PHY_CSR2: TX MAC configuration. + * PHY_CSR3: RX MAC configuration. + */ +#define PHY_CSR2 0x04c4 +#define PHY_CSR3 0x04c6 + +/* + * PHY_CSR4: Interface configuration. + */ +#define PHY_CSR4 0x04c8 +#define PHY_CSR4_LOW_RF_LE FIELD16(0x0001) + +/* + * BBP pre-TX registers. + * PHY_CSR5: BBP pre-TX CCK. + */ +#define PHY_CSR5 0x04ca +#define PHY_CSR5_CCK FIELD16(0x0003) +#define PHY_CSR5_CCK_FLIP FIELD16(0x0004) + +/* + * BBP pre-TX registers. + * PHY_CSR6: BBP pre-TX OFDM. + */ +#define PHY_CSR6 0x04cc +#define PHY_CSR6_OFDM FIELD16(0x0003) +#define PHY_CSR6_OFDM_FLIP FIELD16(0x0004) + +/* + * PHY_CSR7: BBP access register 0. + * BBP_DATA: BBP data. + * BBP_REG_ID: BBP register ID. + * BBP_READ_CONTROL: 0: write, 1: read. + */ +#define PHY_CSR7 0x04ce +#define PHY_CSR7_DATA FIELD16(0x00ff) +#define PHY_CSR7_REG_ID FIELD16(0x7f00) +#define PHY_CSR7_READ_CONTROL FIELD16(0x8000) + +/* + * PHY_CSR8: BBP access register 1. + * BBP_BUSY: ASIC is busy execute BBP programming. + */ +#define PHY_CSR8 0x04d0 +#define PHY_CSR8_BUSY FIELD16(0x0001) + +/* + * PHY_CSR9: RF access register. + * RF_VALUE: Register value + id to program into rf/if. + */ +#define PHY_CSR9 0x04d2 +#define PHY_CSR9_RF_VALUE FIELD16(0xffff) + +/* + * PHY_CSR10: RF access register. + * RF_VALUE: Register value + id to program into rf/if. + * RF_NUMBER_OF_BITS: Number of bits used in value (i:20, rfmd:22). + * RF_IF_SELECT: Chip to program: 0: rf, 1: if. + * RF_PLL_LD: Rf pll_ld status. + * RF_BUSY: 1: asic is busy execute rf programming. + */ +#define PHY_CSR10 0x04d4 +#define PHY_CSR10_RF_VALUE FIELD16(0x00ff) +#define PHY_CSR10_RF_NUMBER_OF_BITS FIELD16(0x1f00) +#define PHY_CSR10_RF_IF_SELECT FIELD16(0x2000) +#define PHY_CSR10_RF_PLL_LD FIELD16(0x4000) +#define PHY_CSR10_RF_BUSY FIELD16(0x8000) + +/* + * STA_CSR0: FCS error count. + * FCS_ERROR: FCS error count, cleared when read. + */ +#define STA_CSR0 0x04e0 +#define STA_CSR0_FCS_ERROR FIELD16(0xffff) + +/* + * STA_CSR1: PLCP error count. + */ +#define STA_CSR1 0x04e2 + +/* + * STA_CSR2: LONG error count. + */ +#define STA_CSR2 0x04e4 + +/* + * STA_CSR3: CCA false alarm. + * FALSE_CCA_ERROR: False CCA error count, cleared when read. + */ +#define STA_CSR3 0x04e6 +#define STA_CSR3_FALSE_CCA_ERROR FIELD16(0xffff) + +/* + * STA_CSR4: RX FIFO overflow. + */ +#define STA_CSR4 0x04e8 + +/* + * STA_CSR5: Beacon sent counter. + */ +#define STA_CSR5 0x04ea + +/* + * Statistics registers + */ +#define STA_CSR6 0x04ec +#define STA_CSR7 0x04ee +#define STA_CSR8 0x04f0 +#define STA_CSR9 0x04f2 +#define STA_CSR10 0x04f4 + +/* + * BBP registers. + * The wordsize of the BBP is 8 bits. + */ + +/* + * R2: TX antenna control + */ +#define BBP_R2_TX_ANTENNA FIELD8(0x03) +#define BBP_R2_TX_IQ_FLIP FIELD8(0x04) + +/* + * R14: RX antenna control + */ +#define BBP_R14_RX_ANTENNA FIELD8(0x03) +#define BBP_R14_RX_IQ_FLIP FIELD8(0x04) + +/* + * RF registers. + */ + +/* + * RF 1 + */ +#define RF1_TUNER FIELD32(0x00020000) + +/* + * RF 3 + */ +#define RF3_TUNER FIELD32(0x00000100) +#define RF3_TXPOWER FIELD32(0x00003e00) + +/* + * EEPROM contents. + */ + +/* + * HW MAC address. + */ +#define EEPROM_MAC_ADDR_0 0x0002 +#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00) +#define EEPROM_MAC_ADDR1 0x0003 +#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00) +#define EEPROM_MAC_ADDR_2 0x0004 +#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00) + +/* + * EEPROM antenna. + * ANTENNA_NUM: Number of antenna's. + * TX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * RX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * LED_MODE: 0: default, 1: TX/RX activity, 2: Single (ignore link), 3: rsvd. + * DYN_TXAGC: Dynamic TX AGC control. + * HARDWARE_RADIO: 1: Hardware controlled radio. Read GPIO0. + * RF_TYPE: Rf_type of this adapter. + */ +#define EEPROM_ANTENNA 0x000b +#define EEPROM_ANTENNA_NUM FIELD16(0x0003) +#define EEPROM_ANTENNA_TX_DEFAULT FIELD16(0x000c) +#define EEPROM_ANTENNA_RX_DEFAULT FIELD16(0x0030) +#define EEPROM_ANTENNA_LED_MODE FIELD16(0x01c0) +#define EEPROM_ANTENNA_DYN_TXAGC FIELD16(0x0200) +#define EEPROM_ANTENNA_HARDWARE_RADIO FIELD16(0x0400) +#define EEPROM_ANTENNA_RF_TYPE FIELD16(0xf800) + +/* + * EEPROM NIC config. + * CARDBUS_ACCEL: 0: enable, 1: disable. + * DYN_BBP_TUNE: 0: enable, 1: disable. + * CCK_TX_POWER: CCK TX power compensation. + */ +#define EEPROM_NIC 0x000c +#define EEPROM_NIC_CARDBUS_ACCEL FIELD16(0x0001) +#define EEPROM_NIC_DYN_BBP_TUNE FIELD16(0x0002) +#define EEPROM_NIC_CCK_TX_POWER FIELD16(0x000c) + +/* + * EEPROM geography. + * GEO: Default geography setting for device. + */ +#define EEPROM_GEOGRAPHY 0x000d +#define EEPROM_GEOGRAPHY_GEO FIELD16(0x0f00) + +/* + * EEPROM BBP. + */ +#define EEPROM_BBP_START 0x000e +#define EEPROM_BBP_SIZE 16 +#define EEPROM_BBP_VALUE FIELD16(0x00ff) +#define EEPROM_BBP_REG_ID FIELD16(0xff00) + +/* + * EEPROM TXPOWER + */ +#define EEPROM_TXPOWER_START 0x001e +#define EEPROM_TXPOWER_SIZE 7 +#define EEPROM_TXPOWER_1 FIELD16(0x00ff) +#define EEPROM_TXPOWER_2 FIELD16(0xff00) + +/* + * EEPROM Tuning threshold + */ +#define EEPROM_BBPTUNE 0x0030 +#define EEPROM_BBPTUNE_THRESHOLD FIELD16(0x00ff) + +/* + * EEPROM BBP R24 Tuning. + */ +#define EEPROM_BBPTUNE_R24 0x0031 +#define EEPROM_BBPTUNE_R24_LOW FIELD16(0x00ff) +#define EEPROM_BBPTUNE_R24_HIGH FIELD16(0xff00) + +/* + * EEPROM BBP R25 Tuning. + */ +#define EEPROM_BBPTUNE_R25 0x0032 +#define EEPROM_BBPTUNE_R25_LOW FIELD16(0x00ff) +#define EEPROM_BBPTUNE_R25_HIGH FIELD16(0xff00) + +/* + * EEPROM BBP R24 Tuning. + */ +#define EEPROM_BBPTUNE_R61 0x0033 +#define EEPROM_BBPTUNE_R61_LOW FIELD16(0x00ff) +#define EEPROM_BBPTUNE_R61_HIGH FIELD16(0xff00) + +/* + * EEPROM BBP VGC Tuning. + */ +#define EEPROM_BBPTUNE_VGC 0x0034 +#define EEPROM_BBPTUNE_VGCUPPER FIELD16(0x00ff) + +/* + * EEPROM BBP R17 Tuning. + */ +#define EEPROM_BBPTUNE_R17 0x0035 +#define EEPROM_BBPTUNE_R17_LOW FIELD16(0x00ff) +#define EEPROM_BBPTUNE_R17_HIGH FIELD16(0xff00) + +/* + * RSSI <-> dBm offset calibration + */ +#define EEPROM_CALIBRATE_OFFSET 0x0036 +#define EEPROM_CALIBRATE_OFFSET_RSSI FIELD16(0x00ff) + +/* + * DMA descriptor defines. + */ +#define TXD_DESC_SIZE ( 5 * sizeof(struct data_desc) ) +#define RXD_DESC_SIZE ( 4 * sizeof(struct data_desc) ) + +/* + * TX descriptor format for TX, PRIO, ATIM and Beacon Ring. + */ + +/* + * Word0 + */ +#define TXD_W0_PACKET_ID FIELD32(0x0000000f) +#define TXD_W0_RETRY_LIMIT FIELD32(0x000000f0) +#define TXD_W0_MORE_FRAG FIELD32(0x00000100) +#define TXD_W0_ACK FIELD32(0x00000200) +#define TXD_W0_TIMESTAMP FIELD32(0x00000400) +#define TXD_W0_OFDM FIELD32(0x00000800) +#define TXD_W0_NEW_SEQ FIELD32(0x00001000) +#define TXD_W0_IFS FIELD32(0x00006000) +#define TXD_W0_DATABYTE_COUNT FIELD32(0x0fff0000) +#define TXD_W0_CIPHER FIELD32(0x20000000) +#define TXD_W0_KEY_ID FIELD32(0xc0000000) + +/* + * Word1 + */ +#define TXD_W1_IV_OFFSET FIELD32(0x0000003f) +#define TXD_W1_AIFS FIELD32(0x000000c0) +#define TXD_W1_CWMIN FIELD32(0x00000f00) +#define TXD_W1_CWMAX FIELD32(0x0000f000) + +/* + * Word2: PLCP information + */ +#define TXD_W2_PLCP_SIGNAL FIELD32(0x000000ff) +#define TXD_W2_PLCP_SERVICE FIELD32(0x0000ff00) +#define TXD_W2_PLCP_LENGTH_LOW FIELD32(0x00ff0000) +#define TXD_W2_PLCP_LENGTH_HIGH FIELD32(0xff000000) + +/* + * Word3 + */ +#define TXD_W3_IV FIELD32(0xffffffff) + +/* + * Word4 + */ +#define TXD_W4_EIV FIELD32(0xffffffff) + +/* + * RX descriptor format for RX Ring. + */ + +/* + * Word0 + */ +#define RXD_W0_UNICAST_TO_ME FIELD32(0x00000002) +#define RXD_W0_MULTICAST FIELD32(0x00000004) +#define RXD_W0_BROADCAST FIELD32(0x00000008) +#define RXD_W0_MY_BSS FIELD32(0x00000010) +#define RXD_W0_CRC_ERROR FIELD32(0x00000020) +#define RXD_W0_OFDM FIELD32(0x00000040) +#define RXD_W0_PHYSICAL_ERROR FIELD32(0x00000080) +#define RXD_W0_CIPHER FIELD32(0x00000100) +#define RXD_W0_CIPHER_ERROR FIELD32(0x00000200) +#define RXD_W0_DATABYTE_COUNT FIELD32(0x0fff0000) + +/* + * Word1 + */ +#define RXD_W1_RSSI FIELD32(0x000000ff) +#define RXD_W1_SIGNAL FIELD32(0x0000ff00) + +/* + * Word2 + */ +#define RXD_W2_IV FIELD32(0xffffffff) + +/* + * Word3 + */ +#define RXD_W3_EIV FIELD32(0xffffffff) + +/* + * Macro's for converting txpower from EEPROM to dscape value + * and from dscape value to register value. + */ +#define MIN_TXPOWER 0 +#define MAX_TXPOWER 31 +#define DEFAULT_TXPOWER 24 + +#define TXPOWER_FROM_DEV(__txpower) \ +({ \ + ((__txpower) > MAX_TXPOWER) ? \ + DEFAULT_TXPOWER : (__txpower); \ +}) + +#define TXPOWER_TO_DEV(__txpower) \ +({ \ + ((__txpower) <= MIN_TXPOWER) ? MIN_TXPOWER : \ + (((__txpower) >= MAX_TXPOWER) ? MAX_TXPOWER : \ + (__txpower)); \ +}) + +#endif /* RT2500USB_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h new file mode 100644 index 000000000000..9845e584b731 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00.h @@ -0,0 +1,838 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00 + Abstract: rt2x00 global information. + */ + +#ifndef RT2X00_H +#define RT2X00_H + +#include <linux/bitops.h> +#include <linux/prefetch.h> +#include <linux/skbuff.h> +#include <linux/workqueue.h> +#include <linux/firmware.h> + +#include <net/mac80211.h> + +#include "rt2x00debug.h" +#include "rt2x00reg.h" +#include "rt2x00ring.h" + +/* + * Module information. + * DRV_NAME should be set within the individual module source files. + */ +#define DRV_VERSION "2.0.10" +#define DRV_PROJECT "http://rt2x00.serialmonkey.com" + +/* + * Debug definitions. + * Debug output has to be enabled during compile time. + */ +#define DEBUG_PRINTK_MSG(__dev, __kernlvl, __lvl, __msg, __args...) \ + printk(__kernlvl "%s -> %s: %s - " __msg, \ + wiphy_name((__dev)->hw->wiphy), __FUNCTION__, __lvl, ##__args) + +#define DEBUG_PRINTK_PROBE(__kernlvl, __lvl, __msg, __args...) \ + printk(__kernlvl "%s -> %s: %s - " __msg, \ + DRV_NAME, __FUNCTION__, __lvl, ##__args) + +#ifdef CONFIG_RT2X00_DEBUG +#define DEBUG_PRINTK(__dev, __kernlvl, __lvl, __msg, __args...) \ + DEBUG_PRINTK_MSG(__dev, __kernlvl, __lvl, __msg, ##__args); +#else +#define DEBUG_PRINTK(__dev, __kernlvl, __lvl, __msg, __args...) \ + do { } while (0) +#endif /* CONFIG_RT2X00_DEBUG */ + +/* + * Various debug levels. + * The debug levels PANIC and ERROR both indicate serious problems, + * for this reason they should never be ignored. + * The special ERROR_PROBE message is for messages that are generated + * when the rt2x00_dev is not yet initialized. + */ +#define PANIC(__dev, __msg, __args...) \ + DEBUG_PRINTK_MSG(__dev, KERN_CRIT, "Panic", __msg, ##__args) +#define ERROR(__dev, __msg, __args...) \ + DEBUG_PRINTK_MSG(__dev, KERN_ERR, "Error", __msg, ##__args) +#define ERROR_PROBE(__msg, __args...) \ + DEBUG_PRINTK_PROBE(KERN_ERR, "Error", __msg, ##__args) +#define WARNING(__dev, __msg, __args...) \ + DEBUG_PRINTK(__dev, KERN_WARNING, "Warning", __msg, ##__args) +#define NOTICE(__dev, __msg, __args...) \ + DEBUG_PRINTK(__dev, KERN_NOTICE, "Notice", __msg, ##__args) +#define INFO(__dev, __msg, __args...) \ + DEBUG_PRINTK(__dev, KERN_INFO, "Info", __msg, ##__args) +#define DEBUG(__dev, __msg, __args...) \ + DEBUG_PRINTK(__dev, KERN_DEBUG, "Debug", __msg, ##__args) +#define EEPROM(__dev, __msg, __args...) \ + DEBUG_PRINTK(__dev, KERN_DEBUG, "EEPROM recovery", __msg, ##__args) + +/* + * Ring sizes. + * Ralink PCI devices demand the Frame size to be a multiple of 128 bytes. + * DATA_FRAME_SIZE is used for TX, RX, ATIM and PRIO rings. + * MGMT_FRAME_SIZE is used for the BEACON ring. + */ +#define DATA_FRAME_SIZE 2432 +#define MGMT_FRAME_SIZE 256 + +/* + * Number of entries in a packet ring. + * PCI devices only need 1 Beacon entry, + * but USB devices require a second because they + * have to send a Guardian byte first. + */ +#define RX_ENTRIES 12 +#define TX_ENTRIES 12 +#define ATIM_ENTRIES 1 +#define BEACON_ENTRIES 2 + +/* + * Standard timing and size defines. + * These values should follow the ieee80211 specifications. + */ +#define ACK_SIZE 14 +#define IEEE80211_HEADER 24 +#define PLCP 48 +#define BEACON 100 +#define PREAMBLE 144 +#define SHORT_PREAMBLE 72 +#define SLOT_TIME 20 +#define SHORT_SLOT_TIME 9 +#define SIFS 10 +#define PIFS ( SIFS + SLOT_TIME ) +#define SHORT_PIFS ( SIFS + SHORT_SLOT_TIME ) +#define DIFS ( PIFS + SLOT_TIME ) +#define SHORT_DIFS ( SHORT_PIFS + SHORT_SLOT_TIME ) +#define EIFS ( SIFS + (8 * (IEEE80211_HEADER + ACK_SIZE)) ) + +/* + * IEEE802.11 header defines + */ +static inline int is_rts_frame(u16 fc) +{ + return !!(((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_RTS)); +} + +static inline int is_cts_frame(u16 fc) +{ + return !!(((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_CTS)); +} + +static inline int is_probe_resp(u16 fc) +{ + return !!(((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP)); +} + +/* + * Chipset identification + * The chipset on the device is composed of a RT and RF chip. + * The chipset combination is important for determining device capabilities. + */ +struct rt2x00_chip { + u16 rt; +#define RT2460 0x0101 +#define RT2560 0x0201 +#define RT2570 0x1201 +#define RT2561s 0x0301 /* Turbo */ +#define RT2561 0x0302 +#define RT2661 0x0401 +#define RT2571 0x1300 + + u16 rf; + u32 rev; +}; + +/* + * RF register values that belong to a particular channel. + */ +struct rf_channel { + int channel; + u32 rf1; + u32 rf2; + u32 rf3; + u32 rf4; +}; + +/* + * To optimize the quality of the link we need to store + * the quality of received frames and periodically + * optimize the link. + */ +struct link { + /* + * Link tuner counter + * The number of times the link has been tuned + * since the radio has been switched on. + */ + u32 count; + + /* + * Statistics required for Link tuning. + * For the average RSSI value we use the "Walking average" approach. + * When adding RSSI to the average value the following calculation + * is needed: + * + * avg_rssi = ((avg_rssi * 7) + rssi) / 8; + * + * The advantage of this approach is that we only need 1 variable + * to store the average in (No need for a count and a total). + * But more importantly, normal average values will over time + * move less and less towards newly added values this results + * that with link tuning, the device can have a very good RSSI + * for a few minutes but when the device is moved away from the AP + * the average will not decrease fast enough to compensate. + * The walking average compensates this and will move towards + * the new values correctly allowing a effective link tuning. + */ + int avg_rssi; + int vgc_level; + int false_cca; + + /* + * Statistics required for Signal quality calculation. + * For calculating the Signal quality we have to determine + * the total number of success and failed RX and TX frames. + * After that we also use the average RSSI value to help + * determining the signal quality. + * For the calculation we will use the following algorithm: + * + * rssi_percentage = (avg_rssi * 100) / rssi_offset + * rx_percentage = (rx_success * 100) / rx_total + * tx_percentage = (tx_success * 100) / tx_total + * avg_signal = ((WEIGHT_RSSI * avg_rssi) + + * (WEIGHT_TX * tx_percentage) + + * (WEIGHT_RX * rx_percentage)) / 100 + * + * This value should then be checked to not be greated then 100. + */ + int rx_percentage; + int rx_success; + int rx_failed; + int tx_percentage; + int tx_success; + int tx_failed; +#define WEIGHT_RSSI 20 +#define WEIGHT_RX 40 +#define WEIGHT_TX 40 + + /* + * Work structure for scheduling periodic link tuning. + */ + struct delayed_work work; +}; + +/* + * Clear all counters inside the link structure. + * This can be easiest achieved by memsetting everything + * except for the work structure at the end. + */ +static inline void rt2x00_clear_link(struct link *link) +{ + memset(link, 0x00, sizeof(*link) - sizeof(link->work)); + link->rx_percentage = 50; + link->tx_percentage = 50; +} + +/* + * Update the rssi using the walking average approach. + */ +static inline void rt2x00_update_link_rssi(struct link *link, int rssi) +{ + if (!link->avg_rssi) + link->avg_rssi = rssi; + else + link->avg_rssi = ((link->avg_rssi * 7) + rssi) / 8; +} + +/* + * When the avg_rssi is unset or no frames have been received), + * we need to return the default value which needs to be less + * than -80 so the device will select the maximum sensitivity. + */ +static inline int rt2x00_get_link_rssi(struct link *link) +{ + return (link->avg_rssi && link->rx_success) ? link->avg_rssi : -128; +} + +/* + * Interface structure + * Configuration details about the current interface. + */ +struct interface { + /* + * Interface identification. The value is assigned + * to us by the 80211 stack, and is used to request + * new beacons. + */ + int id; + + /* + * Current working type (IEEE80211_IF_TYPE_*). + * When set to INVALID_INTERFACE, no interface is configured. + */ + int type; +#define INVALID_INTERFACE IEEE80211_IF_TYPE_INVALID + + /* + * MAC of the device. + */ + u8 mac[ETH_ALEN]; + + /* + * BBSID of the AP to associate with. + */ + u8 bssid[ETH_ALEN]; + + /* + * Store the packet filter mode for the current interface. + */ + unsigned int filter; +}; + +static inline int is_interface_present(struct interface *intf) +{ + return !!intf->id; +} + +static inline int is_interface_type(struct interface *intf, int type) +{ + return intf->type == type; +} + +/* + * Details about the supported modes, rates and channels + * of a particular chipset. This is used by rt2x00lib + * to build the ieee80211_hw_mode array for mac80211. + */ +struct hw_mode_spec { + /* + * Number of modes, rates and channels. + */ + int num_modes; + int num_rates; + int num_channels; + + /* + * txpower values. + */ + const u8 *tx_power_a; + const u8 *tx_power_bg; + u8 tx_power_default; + + /* + * Device/chipset specific value. + */ + const struct rf_channel *channels; +}; + +/* + * Configuration structure wrapper around the + * mac80211 configuration structure. + * When mac80211 configures the driver, rt2x00lib + * can precalculate values which are equal for all + * rt2x00 drivers. Those values can be stored in here. + */ +struct rt2x00lib_conf { + struct ieee80211_conf *conf; + struct rf_channel rf; + + int phymode; + + int basic_rates; + int slot_time; + + short sifs; + short pifs; + short difs; + short eifs; +}; + +/* + * rt2x00lib callback functions. + */ +struct rt2x00lib_ops { + /* + * Interrupt handlers. + */ + irq_handler_t irq_handler; + + /* + * Device init handlers. + */ + int (*probe_hw) (struct rt2x00_dev *rt2x00dev); + char *(*get_firmware_name) (struct rt2x00_dev *rt2x00dev); + int (*load_firmware) (struct rt2x00_dev *rt2x00dev, void *data, + const size_t len); + + /* + * Device initialization/deinitialization handlers. + */ + int (*initialize) (struct rt2x00_dev *rt2x00dev); + void (*uninitialize) (struct rt2x00_dev *rt2x00dev); + + /* + * Radio control handlers. + */ + int (*set_device_state) (struct rt2x00_dev *rt2x00dev, + enum dev_state state); + int (*rfkill_poll) (struct rt2x00_dev *rt2x00dev); + void (*link_stats) (struct rt2x00_dev *rt2x00dev); + void (*reset_tuner) (struct rt2x00_dev *rt2x00dev); + void (*link_tuner) (struct rt2x00_dev *rt2x00dev); + + /* + * TX control handlers + */ + void (*write_tx_desc) (struct rt2x00_dev *rt2x00dev, + struct data_desc *txd, + struct txdata_entry_desc *desc, + struct ieee80211_hdr *ieee80211hdr, + unsigned int length, + struct ieee80211_tx_control *control); + int (*write_tx_data) (struct rt2x00_dev *rt2x00dev, + struct data_ring *ring, struct sk_buff *skb, + struct ieee80211_tx_control *control); + int (*get_tx_data_len) (struct rt2x00_dev *rt2x00dev, int maxpacket, + struct sk_buff *skb); + void (*kick_tx_queue) (struct rt2x00_dev *rt2x00dev, + unsigned int queue); + + /* + * RX control handlers + */ + void (*fill_rxdone) (struct data_entry *entry, + struct rxdata_entry_desc *desc); + + /* + * Configuration handlers. + */ + void (*config_mac_addr) (struct rt2x00_dev *rt2x00dev, __le32 *mac); + void (*config_bssid) (struct rt2x00_dev *rt2x00dev, __le32 *bssid); + void (*config_type) (struct rt2x00_dev *rt2x00dev, const int type, + const int tsf_sync); + void (*config_preamble) (struct rt2x00_dev *rt2x00dev, + const int short_preamble, + const int ack_timeout, + const int ack_consume_time); + void (*config) (struct rt2x00_dev *rt2x00dev, const unsigned int flags, + struct rt2x00lib_conf *libconf); +#define CONFIG_UPDATE_PHYMODE ( 1 << 1 ) +#define CONFIG_UPDATE_CHANNEL ( 1 << 2 ) +#define CONFIG_UPDATE_TXPOWER ( 1 << 3 ) +#define CONFIG_UPDATE_ANTENNA ( 1 << 4 ) +#define CONFIG_UPDATE_SLOT_TIME ( 1 << 5 ) +#define CONFIG_UPDATE_BEACON_INT ( 1 << 6 ) +#define CONFIG_UPDATE_ALL 0xffff +}; + +/* + * rt2x00 driver callback operation structure. + */ +struct rt2x00_ops { + const char *name; + const unsigned int rxd_size; + const unsigned int txd_size; + const unsigned int eeprom_size; + const unsigned int rf_size; + const struct rt2x00lib_ops *lib; + const struct ieee80211_ops *hw; +#ifdef CONFIG_RT2X00_LIB_DEBUGFS + const struct rt2x00debug *debugfs; +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ +}; + +/* + * rt2x00 device flags + */ +enum rt2x00_flags { + /* + * Device state flags + */ + DEVICE_PRESENT, + DEVICE_REGISTERED_HW, + DEVICE_INITIALIZED, + DEVICE_STARTED, + DEVICE_STARTED_SUSPEND, + DEVICE_ENABLED_RADIO, + DEVICE_DISABLED_RADIO_HW, + + /* + * Driver features + */ + DRIVER_REQUIRE_FIRMWARE, + DRIVER_REQUIRE_BEACON_RING, + + /* + * Driver configuration + */ + CONFIG_SUPPORT_HW_BUTTON, + CONFIG_FRAME_TYPE, + CONFIG_RF_SEQUENCE, + CONFIG_EXTERNAL_LNA_A, + CONFIG_EXTERNAL_LNA_BG, + CONFIG_DOUBLE_ANTENNA, + CONFIG_DISABLE_LINK_TUNING, + CONFIG_SHORT_PREAMBLE, +}; + +/* + * rt2x00 device structure. + */ +struct rt2x00_dev { + /* + * Device structure. + * The structure stored in here depends on the + * system bus (PCI or USB). + * When accessing this variable, the rt2x00dev_{pci,usb} + * macro's should be used for correct typecasting. + */ + void *dev; +#define rt2x00dev_pci(__dev) ( (struct pci_dev*)(__dev)->dev ) +#define rt2x00dev_usb(__dev) ( (struct usb_interface*)(__dev)->dev ) + + /* + * Callback functions. + */ + const struct rt2x00_ops *ops; + + /* + * IEEE80211 control structure. + */ + struct ieee80211_hw *hw; + struct ieee80211_hw_mode *hwmodes; + unsigned int curr_hwmode; +#define HWMODE_B 0 +#define HWMODE_G 1 +#define HWMODE_A 2 + + /* + * rfkill structure for RF state switching support. + * This will only be compiled in when required. + */ +#ifdef CONFIG_RT2X00_LIB_RFKILL + struct rfkill *rfkill; + struct input_polled_dev *poll_dev; +#endif /* CONFIG_RT2X00_LIB_RFKILL */ + + /* + * If enabled, the debugfs interface structures + * required for deregistration of debugfs. + */ +#ifdef CONFIG_RT2X00_LIB_DEBUGFS + const struct rt2x00debug_intf *debugfs_intf; +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ + + /* + * Device flags. + * In these flags the current status and some + * of the device capabilities are stored. + */ + unsigned long flags; + + /* + * Chipset identification. + */ + struct rt2x00_chip chip; + + /* + * hw capability specifications. + */ + struct hw_mode_spec spec; + + /* + * Register pointers + * csr_addr: Base register address. (PCI) + * csr_cache: CSR cache for usb_control_msg. (USB) + */ + void __iomem *csr_addr; + void *csr_cache; + + /* + * Interface configuration. + */ + struct interface interface; + + /* + * Link quality + */ + struct link link; + + /* + * EEPROM data. + */ + __le16 *eeprom; + + /* + * Active RF register values. + * These are stored here so we don't need + * to read the rf registers and can directly + * use this value instead. + * This field should be accessed by using + * rt2x00_rf_read() and rt2x00_rf_write(). + */ + u32 *rf; + + /* + * Current TX power value. + */ + u16 tx_power; + + /* + * LED register (for rt61pci & rt73usb). + */ + u16 led_reg; + + /* + * Led mode (LED_MODE_*) + */ + u8 led_mode; + + /* + * Rssi <-> Dbm offset + */ + u8 rssi_offset; + + /* + * Frequency offset (for rt61pci & rt73usb). + */ + u8 freq_offset; + + /* + * Low level statistics which will have + * to be kept up to date while device is running. + */ + struct ieee80211_low_level_stats low_level_stats; + + /* + * RX configuration information. + */ + struct ieee80211_rx_status rx_status; + + /* + * Scheduled work. + */ + struct work_struct beacon_work; + struct work_struct filter_work; + struct work_struct config_work; + + /* + * Data ring arrays for RX, TX and Beacon. + * The Beacon array also contains the Atim ring + * if that is supported by the device. + */ + int data_rings; + struct data_ring *rx; + struct data_ring *tx; + struct data_ring *bcn; + + /* + * Firmware image. + */ + const struct firmware *fw; +}; + +/* + * For-each loop for the ring array. + * All rings have been allocated as a single array, + * this means we can create a very simply loop macro + * that is capable of looping through all rings. + * ring_end(), txring_end() and ring_loop() are helper macro's which + * should not be used directly. Instead the following should be used: + * ring_for_each() - Loops through all rings (RX, TX, Beacon & Atim) + * txring_for_each() - Loops through TX data rings (TX only) + * txringall_for_each() - Loops through all TX rings (TX, Beacon & Atim) + */ +#define ring_end(__dev) \ + &(__dev)->rx[(__dev)->data_rings] + +#define txring_end(__dev) \ + &(__dev)->tx[(__dev)->hw->queues] + +#define ring_loop(__entry, __start, __end) \ + for ((__entry) = (__start); \ + prefetch(&(__entry)[1]), (__entry) != (__end); \ + (__entry) = &(__entry)[1]) + +#define ring_for_each(__dev, __entry) \ + ring_loop(__entry, (__dev)->rx, ring_end(__dev)) + +#define txring_for_each(__dev, __entry) \ + ring_loop(__entry, (__dev)->tx, txring_end(__dev)) + +#define txringall_for_each(__dev, __entry) \ + ring_loop(__entry, (__dev)->tx, ring_end(__dev)) + +/* + * Generic RF access. + * The RF is being accessed by word index. + */ +static inline void rt2x00_rf_read(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u32 *data) +{ + *data = rt2x00dev->rf[word]; +} + +static inline void rt2x00_rf_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u32 data) +{ + rt2x00dev->rf[word] = data; +} + +/* + * Generic EEPROM access. + * The EEPROM is being accessed by word index. + */ +static inline void *rt2x00_eeprom_addr(const struct rt2x00_dev *rt2x00dev, + const unsigned int word) +{ + return (void *)&rt2x00dev->eeprom[word]; +} + +static inline void rt2x00_eeprom_read(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u16 *data) +{ + *data = le16_to_cpu(rt2x00dev->eeprom[word]); +} + +static inline void rt2x00_eeprom_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u16 data) +{ + rt2x00dev->eeprom[word] = cpu_to_le16(data); +} + +/* + * Chipset handlers + */ +static inline void rt2x00_set_chip(struct rt2x00_dev *rt2x00dev, + const u16 rt, const u16 rf, const u32 rev) +{ + INFO(rt2x00dev, + "Chipset detected - rt: %04x, rf: %04x, rev: %08x.\n", + rt, rf, rev); + + rt2x00dev->chip.rt = rt; + rt2x00dev->chip.rf = rf; + rt2x00dev->chip.rev = rev; +} + +static inline char rt2x00_rt(const struct rt2x00_chip *chipset, const u16 chip) +{ + return (chipset->rt == chip); +} + +static inline char rt2x00_rf(const struct rt2x00_chip *chipset, const u16 chip) +{ + return (chipset->rf == chip); +} + +static inline u16 rt2x00_get_rev(const struct rt2x00_chip *chipset) +{ + return chipset->rev; +} + +static inline u16 rt2x00_rev(const struct rt2x00_chip *chipset, const u32 mask) +{ + return chipset->rev & mask; +} + +/* + * Duration calculations + * The rate variable passed is: 100kbs. + * To convert from bytes to bits we multiply size with 8, + * then the size is multiplied with 10 to make the + * real rate -> rate argument correction. + */ +static inline u16 get_duration(const unsigned int size, const u8 rate) +{ + return ((size * 8 * 10) / rate); +} + +static inline u16 get_duration_res(const unsigned int size, const u8 rate) +{ + return ((size * 8 * 10) % rate); +} + +/* + * Library functions. + */ +struct data_ring *rt2x00lib_get_ring(struct rt2x00_dev *rt2x00dev, + const unsigned int queue); + +/* + * Interrupt context handlers. + */ +void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev); +void rt2x00lib_txdone(struct data_entry *entry, + const int status, const int retry); +void rt2x00lib_rxdone(struct data_entry *entry, struct sk_buff *skb, + struct rxdata_entry_desc *desc); + +/* + * TX descriptor initializer + */ +void rt2x00lib_write_tx_desc(struct rt2x00_dev *rt2x00dev, + struct data_desc *txd, + struct ieee80211_hdr *ieee80211hdr, + unsigned int length, + struct ieee80211_tx_control *control); + +/* + * mac80211 handlers. + */ +int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ieee80211_tx_control *control); +int rt2x00mac_start(struct ieee80211_hw *hw); +void rt2x00mac_stop(struct ieee80211_hw *hw); +int rt2x00mac_add_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf); +void rt2x00mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf); +int rt2x00mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf); +int rt2x00mac_config_interface(struct ieee80211_hw *hw, int if_id, + struct ieee80211_if_conf *conf); +int rt2x00mac_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats); +int rt2x00mac_get_tx_stats(struct ieee80211_hw *hw, + struct ieee80211_tx_queue_stats *stats); +void rt2x00mac_erp_ie_changed(struct ieee80211_hw *hw, u8 changes, + int cts_protection, int preamble); +int rt2x00mac_conf_tx(struct ieee80211_hw *hw, int queue, + const struct ieee80211_tx_queue_params *params); + +/* + * Driver allocation handlers. + */ +int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev); +void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev); +#ifdef CONFIG_PM +int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev, pm_message_t state); +int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev); +#endif /* CONFIG_PM */ + +#endif /* RT2X00_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c new file mode 100644 index 000000000000..12914cf7156c --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00config.c @@ -0,0 +1,205 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00lib + Abstract: rt2x00 generic configuration routines. + */ + +/* + * Set enviroment defines for rt2x00.h + */ +#define DRV_NAME "rt2x00lib" + +#include <linux/kernel.h> +#include <linux/module.h> + +#include "rt2x00.h" +#include "rt2x00lib.h" + + +/* + * The MAC and BSSID addressess are simple array of bytes, + * these arrays are little endian, so when sending the addressess + * to the drivers, copy the it into a endian-signed variable. + * + * Note that all devices (except rt2500usb) have 32 bits + * register word sizes. This means that whatever variable we + * pass _must_ be a multiple of 32 bits. Otherwise the device + * might not accept what we are sending to it. + * This will also make it easier for the driver to write + * the data to the device. + * + * Also note that when NULL is passed as address the + * we will send 00:00:00:00:00 to the device to clear the address. + * This will prevent the device being confused when it wants + * to ACK frames or consideres itself associated. + */ +void rt2x00lib_config_mac_addr(struct rt2x00_dev *rt2x00dev, u8 *mac) +{ + __le32 reg[2]; + + memset(®, 0, sizeof(reg)); + if (mac) + memcpy(®, mac, ETH_ALEN); + + rt2x00dev->ops->lib->config_mac_addr(rt2x00dev, ®[0]); +} + +void rt2x00lib_config_bssid(struct rt2x00_dev *rt2x00dev, u8 *bssid) +{ + __le32 reg[2]; + + memset(®, 0, sizeof(reg)); + if (bssid) + memcpy(®, bssid, ETH_ALEN); + + rt2x00dev->ops->lib->config_bssid(rt2x00dev, ®[0]); +} + +void rt2x00lib_config_type(struct rt2x00_dev *rt2x00dev, const int type) +{ + int tsf_sync; + + switch (type) { + case IEEE80211_IF_TYPE_IBSS: + case IEEE80211_IF_TYPE_AP: + tsf_sync = TSF_SYNC_BEACON; + break; + case IEEE80211_IF_TYPE_STA: + tsf_sync = TSF_SYNC_INFRA; + break; + default: + tsf_sync = TSF_SYNC_NONE; + break; + } + + rt2x00dev->ops->lib->config_type(rt2x00dev, type, tsf_sync); +} + +void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, + struct ieee80211_conf *conf, const int force_config) +{ + struct rt2x00lib_conf libconf; + struct ieee80211_hw_mode *mode; + struct ieee80211_rate *rate; + int flags = 0; + int short_slot_time; + + /* + * In some situations we want to force all configurations + * to be reloaded (When resuming for instance). + */ + if (force_config) { + flags = CONFIG_UPDATE_ALL; + goto config; + } + + /* + * Check which configuration options have been + * updated and should be send to the device. + */ + if (rt2x00dev->rx_status.phymode != conf->phymode) + flags |= CONFIG_UPDATE_PHYMODE; + if (rt2x00dev->rx_status.channel != conf->channel) + flags |= CONFIG_UPDATE_CHANNEL; + if (rt2x00dev->tx_power != conf->power_level) + flags |= CONFIG_UPDATE_TXPOWER; + if (rt2x00dev->rx_status.antenna == conf->antenna_sel_rx) + flags |= CONFIG_UPDATE_ANTENNA; + + /* + * The following configuration options are never + * stored anywhere and will always be updated. + */ + flags |= CONFIG_UPDATE_SLOT_TIME; + flags |= CONFIG_UPDATE_BEACON_INT; + + /* + * We have determined what options should be updated, + * now precalculate device configuration values depending + * on what configuration options need to be updated. + */ +config: + memset(&libconf, 0, sizeof(libconf)); + + if (flags & CONFIG_UPDATE_PHYMODE) { + switch (conf->phymode) { + case MODE_IEEE80211A: + libconf.phymode = HWMODE_A; + break; + case MODE_IEEE80211B: + libconf.phymode = HWMODE_B; + break; + case MODE_IEEE80211G: + libconf.phymode = HWMODE_G; + break; + default: + ERROR(rt2x00dev, + "Attempt to configure unsupported mode (%d)" + "Defaulting to 802.11b", conf->phymode); + libconf.phymode = HWMODE_B; + } + + mode = &rt2x00dev->hwmodes[libconf.phymode]; + rate = &mode->rates[mode->num_rates - 1]; + + libconf.basic_rates = + DEVICE_GET_RATE_FIELD(rate->val, RATEMASK) & DEV_BASIC_RATEMASK; + } + + if (flags & CONFIG_UPDATE_CHANNEL) { + memcpy(&libconf.rf, + &rt2x00dev->spec.channels[conf->channel_val], + sizeof(libconf.rf)); + } + + if (flags & CONFIG_UPDATE_SLOT_TIME) { + short_slot_time = conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME; + + libconf.slot_time = + short_slot_time ? SHORT_SLOT_TIME : SLOT_TIME; + libconf.sifs = SIFS; + libconf.pifs = short_slot_time ? SHORT_PIFS : PIFS; + libconf.difs = short_slot_time ? SHORT_DIFS : DIFS; + libconf.eifs = EIFS; + } + + libconf.conf = conf; + + /* + * Start configuration. + */ + rt2x00dev->ops->lib->config(rt2x00dev, flags, &libconf); + + /* + * Some configuration changes affect the link quality + * which means we need to reset the link tuner. + */ + if (flags & (CONFIG_UPDATE_CHANNEL | CONFIG_UPDATE_ANTENNA)) + rt2x00lib_reset_link_tuner(rt2x00dev); + + rt2x00dev->curr_hwmode = libconf.phymode; + rt2x00dev->rx_status.phymode = conf->phymode; + rt2x00dev->rx_status.freq = conf->freq; + rt2x00dev->rx_status.channel = conf->channel; + rt2x00dev->tx_power = conf->power_level; + rt2x00dev->rx_status.antenna = conf->antenna_sel_rx; +} diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c new file mode 100644 index 000000000000..9275d6f9517e --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00debug.c @@ -0,0 +1,368 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00lib + Abstract: rt2x00 debugfs specific routines. + */ + +/* + * Set enviroment defines for rt2x00.h + */ +#define DRV_NAME "rt2x00lib" + +#include <linux/debugfs.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/uaccess.h> + +#include "rt2x00.h" +#include "rt2x00lib.h" + +#define PRINT_LINE_LEN_MAX 32 + +struct rt2x00debug_intf { + /* + * Pointer to driver structure where + * this debugfs entry belongs to. + */ + struct rt2x00_dev *rt2x00dev; + + /* + * Reference to the rt2x00debug structure + * which can be used to communicate with + * the registers. + */ + const struct rt2x00debug *debug; + + /* + * Debugfs entries for: + * - driver folder + * - driver file + * - chipset file + * - device flags file + * - register offset/value files + * - eeprom offset/value files + * - bbp offset/value files + * - rf offset/value files + */ + struct dentry *driver_folder; + struct dentry *driver_entry; + struct dentry *chipset_entry; + struct dentry *dev_flags; + struct dentry *csr_off_entry; + struct dentry *csr_val_entry; + struct dentry *eeprom_off_entry; + struct dentry *eeprom_val_entry; + struct dentry *bbp_off_entry; + struct dentry *bbp_val_entry; + struct dentry *rf_off_entry; + struct dentry *rf_val_entry; + + /* + * Driver and chipset files will use a data buffer + * that has been created in advance. This will simplify + * the code since we can use the debugfs functions. + */ + struct debugfs_blob_wrapper driver_blob; + struct debugfs_blob_wrapper chipset_blob; + + /* + * Requested offset for each register type. + */ + unsigned int offset_csr; + unsigned int offset_eeprom; + unsigned int offset_bbp; + unsigned int offset_rf; +}; + +static int rt2x00debug_file_open(struct inode *inode, struct file *file) +{ + struct rt2x00debug_intf *intf = inode->i_private; + + file->private_data = inode->i_private; + + if (!try_module_get(intf->debug->owner)) + return -EBUSY; + + return 0; +} + +static int rt2x00debug_file_release(struct inode *inode, struct file *file) +{ + struct rt2x00debug_intf *intf = file->private_data; + + module_put(intf->debug->owner); + + return 0; +} + +#define RT2X00DEBUGFS_OPS_READ(__name, __format, __type) \ +static ssize_t rt2x00debug_read_##__name(struct file *file, \ + char __user *buf, \ + size_t length, \ + loff_t *offset) \ +{ \ + struct rt2x00debug_intf *intf = file->private_data; \ + const struct rt2x00debug *debug = intf->debug; \ + char line[16]; \ + size_t size; \ + __type value; \ + \ + if (*offset) \ + return 0; \ + \ + if (intf->offset_##__name >= debug->__name.word_count) \ + return -EINVAL; \ + \ + debug->__name.read(intf->rt2x00dev, \ + intf->offset_##__name, &value); \ + \ + size = sprintf(line, __format, value); \ + \ + if (copy_to_user(buf, line, size)) \ + return -EFAULT; \ + \ + *offset += size; \ + return size; \ +} + +#define RT2X00DEBUGFS_OPS_WRITE(__name, __type) \ +static ssize_t rt2x00debug_write_##__name(struct file *file, \ + const char __user *buf,\ + size_t length, \ + loff_t *offset) \ +{ \ + struct rt2x00debug_intf *intf = file->private_data; \ + const struct rt2x00debug *debug = intf->debug; \ + char line[16]; \ + size_t size; \ + __type value; \ + \ + if (*offset) \ + return 0; \ + \ + if (!capable(CAP_NET_ADMIN)) \ + return -EPERM; \ + \ + if (intf->offset_##__name >= debug->__name.word_count) \ + return -EINVAL; \ + \ + if (copy_from_user(line, buf, length)) \ + return -EFAULT; \ + \ + size = strlen(line); \ + value = simple_strtoul(line, NULL, 0); \ + \ + debug->__name.write(intf->rt2x00dev, \ + intf->offset_##__name, value); \ + \ + *offset += size; \ + return size; \ +} + +#define RT2X00DEBUGFS_OPS(__name, __format, __type) \ +RT2X00DEBUGFS_OPS_READ(__name, __format, __type); \ +RT2X00DEBUGFS_OPS_WRITE(__name, __type); \ + \ +static const struct file_operations rt2x00debug_fop_##__name = {\ + .owner = THIS_MODULE, \ + .read = rt2x00debug_read_##__name, \ + .write = rt2x00debug_write_##__name, \ + .open = rt2x00debug_file_open, \ + .release = rt2x00debug_file_release, \ +}; + +RT2X00DEBUGFS_OPS(csr, "0x%.8x\n", u32); +RT2X00DEBUGFS_OPS(eeprom, "0x%.4x\n", u16); +RT2X00DEBUGFS_OPS(bbp, "0x%.2x\n", u8); +RT2X00DEBUGFS_OPS(rf, "0x%.8x\n", u32); + +static ssize_t rt2x00debug_read_dev_flags(struct file *file, + char __user *buf, + size_t length, + loff_t *offset) +{ + struct rt2x00debug_intf *intf = file->private_data; + char line[16]; + size_t size; + + if (*offset) + return 0; + + size = sprintf(line, "0x%.8x\n", (unsigned int)intf->rt2x00dev->flags); + + if (copy_to_user(buf, line, size)) + return -EFAULT; + + *offset += size; + return size; +} + +static const struct file_operations rt2x00debug_fop_dev_flags = { + .owner = THIS_MODULE, + .read = rt2x00debug_read_dev_flags, + .open = rt2x00debug_file_open, + .release = rt2x00debug_file_release, +}; + +static struct dentry *rt2x00debug_create_file_driver(const char *name, + struct rt2x00debug_intf + *intf, + struct debugfs_blob_wrapper + *blob) +{ + char *data; + + data = kzalloc(3 * PRINT_LINE_LEN_MAX, GFP_KERNEL); + if (!data) + return NULL; + + blob->data = data; + data += sprintf(data, "driver: %s\n", intf->rt2x00dev->ops->name); + data += sprintf(data, "version: %s\n", DRV_VERSION); + data += sprintf(data, "compiled: %s %s\n", __DATE__, __TIME__); + blob->size = strlen(blob->data); + + return debugfs_create_blob(name, S_IRUGO, intf->driver_folder, blob); +} + +static struct dentry *rt2x00debug_create_file_chipset(const char *name, + struct rt2x00debug_intf + *intf, + struct + debugfs_blob_wrapper + *blob) +{ + const struct rt2x00debug *debug = intf->debug; + char *data; + + data = kzalloc(4 * PRINT_LINE_LEN_MAX, GFP_KERNEL); + if (!data) + return NULL; + + blob->data = data; + data += sprintf(data, "csr length: %d\n", debug->csr.word_count); + data += sprintf(data, "eeprom length: %d\n", debug->eeprom.word_count); + data += sprintf(data, "bbp length: %d\n", debug->bbp.word_count); + data += sprintf(data, "rf length: %d\n", debug->rf.word_count); + blob->size = strlen(blob->data); + + return debugfs_create_blob(name, S_IRUGO, intf->driver_folder, blob); +} + +void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) +{ + const struct rt2x00debug *debug = rt2x00dev->ops->debugfs; + struct rt2x00debug_intf *intf; + + intf = kzalloc(sizeof(struct rt2x00debug_intf), GFP_KERNEL); + if (!intf) { + ERROR(rt2x00dev, "Failed to allocate debug handler.\n"); + return; + } + + intf->debug = debug; + intf->rt2x00dev = rt2x00dev; + rt2x00dev->debugfs_intf = intf; + + intf->driver_folder = + debugfs_create_dir(intf->rt2x00dev->ops->name, + rt2x00dev->hw->wiphy->debugfsdir); + if (IS_ERR(intf->driver_folder)) + goto exit; + + intf->driver_entry = + rt2x00debug_create_file_driver("driver", intf, &intf->driver_blob); + if (IS_ERR(intf->driver_entry)) + goto exit; + + intf->chipset_entry = + rt2x00debug_create_file_chipset("chipset", + intf, &intf->chipset_blob); + if (IS_ERR(intf->chipset_entry)) + goto exit; + + intf->dev_flags = debugfs_create_file("dev_flags", S_IRUGO, + intf->driver_folder, intf, + &rt2x00debug_fop_dev_flags); + if (IS_ERR(intf->dev_flags)) + goto exit; + +#define RT2X00DEBUGFS_CREATE_ENTRY(__intf, __name) \ +({ \ + (__intf)->__name##_off_entry = \ + debugfs_create_u32(__stringify(__name) "_offset", \ + S_IRUGO | S_IWUSR, \ + (__intf)->driver_folder, \ + &(__intf)->offset_##__name); \ + if (IS_ERR((__intf)->__name##_off_entry)) \ + goto exit; \ + \ + (__intf)->__name##_val_entry = \ + debugfs_create_file(__stringify(__name) "_value", \ + S_IRUGO | S_IWUSR, \ + (__intf)->driver_folder, \ + (__intf), &rt2x00debug_fop_##__name);\ + if (IS_ERR((__intf)->__name##_val_entry)) \ + goto exit; \ +}) + + RT2X00DEBUGFS_CREATE_ENTRY(intf, csr); + RT2X00DEBUGFS_CREATE_ENTRY(intf, eeprom); + RT2X00DEBUGFS_CREATE_ENTRY(intf, bbp); + RT2X00DEBUGFS_CREATE_ENTRY(intf, rf); + +#undef RT2X00DEBUGFS_CREATE_ENTRY + + return; + +exit: + rt2x00debug_deregister(rt2x00dev); + ERROR(rt2x00dev, "Failed to register debug handler.\n"); + + return; +} + +void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev) +{ + const struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf; + + if (unlikely(!intf)) + return; + + debugfs_remove(intf->rf_val_entry); + debugfs_remove(intf->rf_off_entry); + debugfs_remove(intf->bbp_val_entry); + debugfs_remove(intf->bbp_off_entry); + debugfs_remove(intf->eeprom_val_entry); + debugfs_remove(intf->eeprom_off_entry); + debugfs_remove(intf->csr_val_entry); + debugfs_remove(intf->csr_off_entry); + debugfs_remove(intf->dev_flags); + debugfs_remove(intf->chipset_entry); + debugfs_remove(intf->driver_entry); + debugfs_remove(intf->driver_folder); + kfree(intf->chipset_blob.data); + kfree(intf->driver_blob.data); + kfree(intf); + + rt2x00dev->debugfs_intf = NULL; +} diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.h b/drivers/net/wireless/rt2x00/rt2x00debug.h new file mode 100644 index 000000000000..860e8fa3a0da --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00debug.h @@ -0,0 +1,57 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00debug + Abstract: Data structures for the rt2x00debug. + */ + +#ifndef RT2X00DEBUG_H +#define RT2X00DEBUG_H + +struct rt2x00_dev; + +#define RT2X00DEBUGFS_REGISTER_ENTRY(__name, __type) \ +struct reg##__name { \ + void (*read)(const struct rt2x00_dev *rt2x00dev, \ + const unsigned int word, __type *data); \ + void (*write)(const struct rt2x00_dev *rt2x00dev, \ + const unsigned int word, __type data); \ + \ + unsigned int word_size; \ + unsigned int word_count; \ +} __name + +struct rt2x00debug { + /* + * Reference to the modules structure. + */ + struct module *owner; + + /* + * Register access entries. + */ + RT2X00DEBUGFS_REGISTER_ENTRY(csr, u32); + RT2X00DEBUGFS_REGISTER_ENTRY(eeprom, u16); + RT2X00DEBUGFS_REGISTER_ENTRY(bbp, u8); + RT2X00DEBUGFS_REGISTER_ENTRY(rf, u32); +}; + +#endif /* RT2X00DEBUG_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c new file mode 100644 index 000000000000..bb6f46cfbb9f --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -0,0 +1,1202 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00lib + Abstract: rt2x00 generic device routines. + */ + +/* + * Set enviroment defines for rt2x00.h + */ +#define DRV_NAME "rt2x00lib" + +#include <linux/kernel.h> +#include <linux/module.h> + +#include "rt2x00.h" +#include "rt2x00lib.h" + +/* + * Ring handler. + */ +struct data_ring *rt2x00lib_get_ring(struct rt2x00_dev *rt2x00dev, + const unsigned int queue) +{ + int beacon = test_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags); + + /* + * Check if we are requesting a reqular TX ring, + * or if we are requesting a Beacon or Atim ring. + * For Atim rings, we should check if it is supported. + */ + if (queue < rt2x00dev->hw->queues && rt2x00dev->tx) + return &rt2x00dev->tx[queue]; + + if (!rt2x00dev->bcn || !beacon) + return NULL; + + if (queue == IEEE80211_TX_QUEUE_BEACON) + return &rt2x00dev->bcn[0]; + else if (queue == IEEE80211_TX_QUEUE_AFTER_BEACON) + return &rt2x00dev->bcn[1]; + + return NULL; +} +EXPORT_SYMBOL_GPL(rt2x00lib_get_ring); + +/* + * Link tuning handlers + */ +static void rt2x00lib_start_link_tuner(struct rt2x00_dev *rt2x00dev) +{ + rt2x00_clear_link(&rt2x00dev->link); + + /* + * Reset the link tuner. + */ + rt2x00dev->ops->lib->reset_tuner(rt2x00dev); + + queue_delayed_work(rt2x00dev->hw->workqueue, + &rt2x00dev->link.work, LINK_TUNE_INTERVAL); +} + +static void rt2x00lib_stop_link_tuner(struct rt2x00_dev *rt2x00dev) +{ + cancel_delayed_work_sync(&rt2x00dev->link.work); +} + +void rt2x00lib_reset_link_tuner(struct rt2x00_dev *rt2x00dev) +{ + if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) + return; + + rt2x00lib_stop_link_tuner(rt2x00dev); + rt2x00lib_start_link_tuner(rt2x00dev); +} + +/* + * Radio control handlers. + */ +int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev) +{ + int status; + + /* + * Don't enable the radio twice. + * And check if the hardware button has been disabled. + */ + if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) || + test_bit(DEVICE_DISABLED_RADIO_HW, &rt2x00dev->flags)) + return 0; + + /* + * Enable radio. + */ + status = rt2x00dev->ops->lib->set_device_state(rt2x00dev, + STATE_RADIO_ON); + if (status) + return status; + + __set_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags); + + /* + * Enable RX. + */ + rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON); + + /* + * Start the TX queues. + */ + ieee80211_start_queues(rt2x00dev->hw); + + return 0; +} + +void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev) +{ + if (!__test_and_clear_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) + return; + + /* + * Stop all scheduled work. + */ + if (work_pending(&rt2x00dev->beacon_work)) + cancel_work_sync(&rt2x00dev->beacon_work); + if (work_pending(&rt2x00dev->filter_work)) + cancel_work_sync(&rt2x00dev->filter_work); + if (work_pending(&rt2x00dev->config_work)) + cancel_work_sync(&rt2x00dev->config_work); + + /* + * Stop the TX queues. + */ + ieee80211_stop_queues(rt2x00dev->hw); + + /* + * Disable RX. + */ + rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF); + + /* + * Disable radio. + */ + rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_OFF); +} + +void rt2x00lib_toggle_rx(struct rt2x00_dev *rt2x00dev, enum dev_state state) +{ + /* + * When we are disabling the RX, we should also stop the link tuner. + */ + if (state == STATE_RADIO_RX_OFF) + rt2x00lib_stop_link_tuner(rt2x00dev); + + rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); + + /* + * When we are enabling the RX, we should also start the link tuner. + */ + if (state == STATE_RADIO_RX_ON && + is_interface_present(&rt2x00dev->interface)) + rt2x00lib_start_link_tuner(rt2x00dev); +} + +static void rt2x00lib_precalculate_link_signal(struct link *link) +{ + if (link->rx_failed || link->rx_success) + link->rx_percentage = + (link->rx_success * 100) / + (link->rx_failed + link->rx_success); + else + link->rx_percentage = 50; + + if (link->tx_failed || link->tx_success) + link->tx_percentage = + (link->tx_success * 100) / + (link->tx_failed + link->tx_success); + else + link->tx_percentage = 50; + + link->rx_success = 0; + link->rx_failed = 0; + link->tx_success = 0; + link->tx_failed = 0; +} + +static int rt2x00lib_calculate_link_signal(struct rt2x00_dev *rt2x00dev, + int rssi) +{ + int rssi_percentage = 0; + int signal; + + /* + * We need a positive value for the RSSI. + */ + if (rssi < 0) + rssi += rt2x00dev->rssi_offset; + + /* + * Calculate the different percentages, + * which will be used for the signal. + */ + if (rt2x00dev->rssi_offset) + rssi_percentage = (rssi * 100) / rt2x00dev->rssi_offset; + + /* + * Add the individual percentages and use the WEIGHT + * defines to calculate the current link signal. + */ + signal = ((WEIGHT_RSSI * rssi_percentage) + + (WEIGHT_TX * rt2x00dev->link.tx_percentage) + + (WEIGHT_RX * rt2x00dev->link.rx_percentage)) / 100; + + return (signal > 100) ? 100 : signal; +} + +static void rt2x00lib_link_tuner(struct work_struct *work) +{ + struct rt2x00_dev *rt2x00dev = + container_of(work, struct rt2x00_dev, link.work.work); + + /* + * When the radio is shutting down we should + * immediately cease all link tuning. + */ + if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) + return; + + /* + * Update statistics. + */ + rt2x00dev->ops->lib->link_stats(rt2x00dev); + + rt2x00dev->low_level_stats.dot11FCSErrorCount += + rt2x00dev->link.rx_failed; + + /* + * Only perform the link tuning when Link tuning + * has been enabled (This could have been disabled from the EEPROM). + */ + if (!test_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags)) + rt2x00dev->ops->lib->link_tuner(rt2x00dev); + + /* + * Precalculate a portion of the link signal which is + * in based on the tx/rx success/failure counters. + */ + rt2x00lib_precalculate_link_signal(&rt2x00dev->link); + + /* + * Increase tuner counter, and reschedule the next link tuner run. + */ + rt2x00dev->link.count++; + queue_delayed_work(rt2x00dev->hw->workqueue, &rt2x00dev->link.work, + LINK_TUNE_INTERVAL); +} + +static void rt2x00lib_packetfilter_scheduled(struct work_struct *work) +{ + struct rt2x00_dev *rt2x00dev = + container_of(work, struct rt2x00_dev, filter_work); + unsigned int filter = rt2x00dev->interface.filter; + + /* + * Since we had stored the filter inside interface.filter, + * we should now clear that field. Otherwise the driver will + * assume nothing has changed (*total_flags will be compared + * to interface.filter to determine if any action is required). + */ + rt2x00dev->interface.filter = 0; + + rt2x00dev->ops->hw->configure_filter(rt2x00dev->hw, + filter, &filter, 0, NULL); +} + +static void rt2x00lib_configuration_scheduled(struct work_struct *work) +{ + struct rt2x00_dev *rt2x00dev = + container_of(work, struct rt2x00_dev, config_work); + int preamble = !test_bit(CONFIG_SHORT_PREAMBLE, &rt2x00dev->flags); + + rt2x00mac_erp_ie_changed(rt2x00dev->hw, + IEEE80211_ERP_CHANGE_PREAMBLE, 0, preamble); +} + +/* + * Interrupt context handlers. + */ +static void rt2x00lib_beacondone_scheduled(struct work_struct *work) +{ + struct rt2x00_dev *rt2x00dev = + container_of(work, struct rt2x00_dev, beacon_work); + struct data_ring *ring = + rt2x00lib_get_ring(rt2x00dev, IEEE80211_TX_QUEUE_BEACON); + struct data_entry *entry = rt2x00_get_data_entry(ring); + struct sk_buff *skb; + + skb = ieee80211_beacon_get(rt2x00dev->hw, + rt2x00dev->interface.id, + &entry->tx_status.control); + if (!skb) + return; + + rt2x00dev->ops->hw->beacon_update(rt2x00dev->hw, skb, + &entry->tx_status.control); + + dev_kfree_skb(skb); +} + +void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev) +{ + if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) + return; + + queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->beacon_work); +} +EXPORT_SYMBOL_GPL(rt2x00lib_beacondone); + +void rt2x00lib_txdone(struct data_entry *entry, + const int status, const int retry) +{ + struct rt2x00_dev *rt2x00dev = entry->ring->rt2x00dev; + struct ieee80211_tx_status *tx_status = &entry->tx_status; + struct ieee80211_low_level_stats *stats = &rt2x00dev->low_level_stats; + int success = !!(status == TX_SUCCESS || status == TX_SUCCESS_RETRY); + int fail = !!(status == TX_FAIL_RETRY || status == TX_FAIL_INVALID || + status == TX_FAIL_OTHER); + + /* + * Update TX statistics. + */ + tx_status->flags = 0; + tx_status->ack_signal = 0; + tx_status->excessive_retries = (status == TX_FAIL_RETRY); + tx_status->retry_count = retry; + rt2x00dev->link.tx_success += success; + rt2x00dev->link.tx_failed += retry + fail; + + if (!(tx_status->control.flags & IEEE80211_TXCTL_NO_ACK)) { + if (success) + tx_status->flags |= IEEE80211_TX_STATUS_ACK; + else + stats->dot11ACKFailureCount++; + } + + tx_status->queue_length = entry->ring->stats.limit; + tx_status->queue_number = tx_status->control.queue; + + if (tx_status->control.flags & IEEE80211_TXCTL_USE_RTS_CTS) { + if (success) + stats->dot11RTSSuccessCount++; + else + stats->dot11RTSFailureCount++; + } + + /* + * Send the tx_status to mac80211, + * that method also cleans up the skb structure. + */ + ieee80211_tx_status_irqsafe(rt2x00dev->hw, entry->skb, tx_status); + entry->skb = NULL; +} +EXPORT_SYMBOL_GPL(rt2x00lib_txdone); + +void rt2x00lib_rxdone(struct data_entry *entry, struct sk_buff *skb, + struct rxdata_entry_desc *desc) +{ + struct rt2x00_dev *rt2x00dev = entry->ring->rt2x00dev; + struct ieee80211_rx_status *rx_status = &rt2x00dev->rx_status; + struct ieee80211_hw_mode *mode; + struct ieee80211_rate *rate; + unsigned int i; + int val = 0; + + /* + * Update RX statistics. + */ + mode = &rt2x00dev->hwmodes[rt2x00dev->curr_hwmode]; + for (i = 0; i < mode->num_rates; i++) { + rate = &mode->rates[i]; + + /* + * When frame was received with an OFDM bitrate, + * the signal is the PLCP value. If it was received with + * a CCK bitrate the signal is the rate in 0.5kbit/s. + */ + if (!desc->ofdm) + val = DEVICE_GET_RATE_FIELD(rate->val, RATE); + else + val = DEVICE_GET_RATE_FIELD(rate->val, PLCP); + + if (val == desc->signal) { + val = rate->val; + break; + } + } + + rt2x00_update_link_rssi(&rt2x00dev->link, desc->rssi); + rt2x00dev->link.rx_success++; + rx_status->rate = val; + rx_status->signal = + rt2x00lib_calculate_link_signal(rt2x00dev, desc->rssi); + rx_status->ssi = desc->rssi; + rx_status->flag = desc->flags; + + /* + * Send frame to mac80211 + */ + ieee80211_rx_irqsafe(rt2x00dev->hw, skb, rx_status); +} +EXPORT_SYMBOL_GPL(rt2x00lib_rxdone); + +/* + * TX descriptor initializer + */ +void rt2x00lib_write_tx_desc(struct rt2x00_dev *rt2x00dev, + struct data_desc *txd, + struct ieee80211_hdr *ieee80211hdr, + unsigned int length, + struct ieee80211_tx_control *control) +{ + struct txdata_entry_desc desc; + struct data_ring *ring; + int tx_rate; + int bitrate; + int duration; + int residual; + u16 frame_control; + u16 seq_ctrl; + + /* + * Make sure the descriptor is properly cleared. + */ + memset(&desc, 0x00, sizeof(desc)); + + /* + * Get ring pointer, if we fail to obtain the + * correct ring, then use the first TX ring. + */ + ring = rt2x00lib_get_ring(rt2x00dev, control->queue); + if (!ring) + ring = rt2x00lib_get_ring(rt2x00dev, IEEE80211_TX_QUEUE_DATA0); + + desc.cw_min = ring->tx_params.cw_min; + desc.cw_max = ring->tx_params.cw_max; + desc.aifs = ring->tx_params.aifs; + + /* + * Identify queue + */ + if (control->queue < rt2x00dev->hw->queues) + desc.queue = control->queue; + else if (control->queue == IEEE80211_TX_QUEUE_BEACON || + control->queue == IEEE80211_TX_QUEUE_AFTER_BEACON) + desc.queue = QUEUE_MGMT; + else + desc.queue = QUEUE_OTHER; + + /* + * Read required fields from ieee80211 header. + */ + frame_control = le16_to_cpu(ieee80211hdr->frame_control); + seq_ctrl = le16_to_cpu(ieee80211hdr->seq_ctrl); + + tx_rate = control->tx_rate; + + /* + * Check if this is a RTS/CTS frame + */ + if (is_rts_frame(frame_control) || is_cts_frame(frame_control)) { + __set_bit(ENTRY_TXD_BURST, &desc.flags); + if (is_rts_frame(frame_control)) + __set_bit(ENTRY_TXD_RTS_FRAME, &desc.flags); + if (control->rts_cts_rate) + tx_rate = control->rts_cts_rate; + } + + /* + * Check for OFDM + */ + if (DEVICE_GET_RATE_FIELD(tx_rate, RATEMASK) & DEV_OFDM_RATEMASK) + __set_bit(ENTRY_TXD_OFDM_RATE, &desc.flags); + + /* + * Check if more fragments are pending + */ + if (ieee80211_get_morefrag(ieee80211hdr)) { + __set_bit(ENTRY_TXD_BURST, &desc.flags); + __set_bit(ENTRY_TXD_MORE_FRAG, &desc.flags); + } + + /* + * Beacons and probe responses require the tsf timestamp + * to be inserted into the frame. + */ + if (control->queue == IEEE80211_TX_QUEUE_BEACON || + is_probe_resp(frame_control)) + __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &desc.flags); + + /* + * Determine with what IFS priority this frame should be send. + * Set ifs to IFS_SIFS when the this is not the first fragment, + * or this fragment came after RTS/CTS. + */ + if ((seq_ctrl & IEEE80211_SCTL_FRAG) > 0 || + test_bit(ENTRY_TXD_RTS_FRAME, &desc.flags)) + desc.ifs = IFS_SIFS; + else + desc.ifs = IFS_BACKOFF; + + /* + * PLCP setup + * Length calculation depends on OFDM/CCK rate. + */ + desc.signal = DEVICE_GET_RATE_FIELD(tx_rate, PLCP); + desc.service = 0x04; + + if (test_bit(ENTRY_TXD_OFDM_RATE, &desc.flags)) { + desc.length_high = ((length + FCS_LEN) >> 6) & 0x3f; + desc.length_low = ((length + FCS_LEN) & 0x3f); + } else { + bitrate = DEVICE_GET_RATE_FIELD(tx_rate, RATE); + + /* + * Convert length to microseconds. + */ + residual = get_duration_res(length + FCS_LEN, bitrate); + duration = get_duration(length + FCS_LEN, bitrate); + + if (residual != 0) { + duration++; + + /* + * Check if we need to set the Length Extension + */ + if (bitrate == 110 && residual <= 3) + desc.service |= 0x80; + } + + desc.length_high = (duration >> 8) & 0xff; + desc.length_low = duration & 0xff; + + /* + * When preamble is enabled we should set the + * preamble bit for the signal. + */ + if (DEVICE_GET_RATE_FIELD(tx_rate, PREAMBLE)) + desc.signal |= 0x08; + } + + rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, txd, &desc, + ieee80211hdr, length, control); +} +EXPORT_SYMBOL_GPL(rt2x00lib_write_tx_desc); + +/* + * Driver initialization handlers. + */ +static void rt2x00lib_channel(struct ieee80211_channel *entry, + const int channel, const int tx_power, + const int value) +{ + entry->chan = channel; + if (channel <= 14) + entry->freq = 2407 + (5 * channel); + else + entry->freq = 5000 + (5 * channel); + entry->val = value; + entry->flag = + IEEE80211_CHAN_W_IBSS | + IEEE80211_CHAN_W_ACTIVE_SCAN | + IEEE80211_CHAN_W_SCAN; + entry->power_level = tx_power; + entry->antenna_max = 0xff; +} + +static void rt2x00lib_rate(struct ieee80211_rate *entry, + const int rate, const int mask, + const int plcp, const int flags) +{ + entry->rate = rate; + entry->val = + DEVICE_SET_RATE_FIELD(rate, RATE) | + DEVICE_SET_RATE_FIELD(mask, RATEMASK) | + DEVICE_SET_RATE_FIELD(plcp, PLCP); + entry->flags = flags; + entry->val2 = entry->val; + if (entry->flags & IEEE80211_RATE_PREAMBLE2) + entry->val2 |= DEVICE_SET_RATE_FIELD(1, PREAMBLE); + entry->min_rssi_ack = 0; + entry->min_rssi_ack_delta = 0; +} + +static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev, + struct hw_mode_spec *spec) +{ + struct ieee80211_hw *hw = rt2x00dev->hw; + struct ieee80211_hw_mode *hwmodes; + struct ieee80211_channel *channels; + struct ieee80211_rate *rates; + unsigned int i; + unsigned char tx_power; + + hwmodes = kzalloc(sizeof(*hwmodes) * spec->num_modes, GFP_KERNEL); + if (!hwmodes) + goto exit; + + channels = kzalloc(sizeof(*channels) * spec->num_channels, GFP_KERNEL); + if (!channels) + goto exit_free_modes; + + rates = kzalloc(sizeof(*rates) * spec->num_rates, GFP_KERNEL); + if (!rates) + goto exit_free_channels; + + /* + * Initialize Rate list. + */ + rt2x00lib_rate(&rates[0], 10, DEV_RATEMASK_1MB, + 0x00, IEEE80211_RATE_CCK); + rt2x00lib_rate(&rates[1], 20, DEV_RATEMASK_2MB, + 0x01, IEEE80211_RATE_CCK_2); + rt2x00lib_rate(&rates[2], 55, DEV_RATEMASK_5_5MB, + 0x02, IEEE80211_RATE_CCK_2); + rt2x00lib_rate(&rates[3], 110, DEV_RATEMASK_11MB, + 0x03, IEEE80211_RATE_CCK_2); + + if (spec->num_rates > 4) { + rt2x00lib_rate(&rates[4], 60, DEV_RATEMASK_6MB, + 0x0b, IEEE80211_RATE_OFDM); + rt2x00lib_rate(&rates[5], 90, DEV_RATEMASK_9MB, + 0x0f, IEEE80211_RATE_OFDM); + rt2x00lib_rate(&rates[6], 120, DEV_RATEMASK_12MB, + 0x0a, IEEE80211_RATE_OFDM); + rt2x00lib_rate(&rates[7], 180, DEV_RATEMASK_18MB, + 0x0e, IEEE80211_RATE_OFDM); + rt2x00lib_rate(&rates[8], 240, DEV_RATEMASK_24MB, + 0x09, IEEE80211_RATE_OFDM); + rt2x00lib_rate(&rates[9], 360, DEV_RATEMASK_36MB, + 0x0d, IEEE80211_RATE_OFDM); + rt2x00lib_rate(&rates[10], 480, DEV_RATEMASK_48MB, + 0x08, IEEE80211_RATE_OFDM); + rt2x00lib_rate(&rates[11], 540, DEV_RATEMASK_54MB, + 0x0c, IEEE80211_RATE_OFDM); + } + + /* + * Initialize Channel list. + */ + for (i = 0; i < spec->num_channels; i++) { + if (spec->channels[i].channel <= 14) + tx_power = spec->tx_power_bg[i]; + else if (spec->tx_power_a) + tx_power = spec->tx_power_a[i]; + else + tx_power = spec->tx_power_default; + + rt2x00lib_channel(&channels[i], + spec->channels[i].channel, tx_power, i); + } + + /* + * Intitialize 802.11b + * Rates: CCK. + * Channels: OFDM. + */ + if (spec->num_modes > HWMODE_B) { + hwmodes[HWMODE_B].mode = MODE_IEEE80211B; + hwmodes[HWMODE_B].num_channels = 14; + hwmodes[HWMODE_B].num_rates = 4; + hwmodes[HWMODE_B].channels = channels; + hwmodes[HWMODE_B].rates = rates; + } + + /* + * Intitialize 802.11g + * Rates: CCK, OFDM. + * Channels: OFDM. + */ + if (spec->num_modes > HWMODE_G) { + hwmodes[HWMODE_G].mode = MODE_IEEE80211G; + hwmodes[HWMODE_G].num_channels = 14; + hwmodes[HWMODE_G].num_rates = spec->num_rates; + hwmodes[HWMODE_G].channels = channels; + hwmodes[HWMODE_G].rates = rates; + } + + /* + * Intitialize 802.11a + * Rates: OFDM. + * Channels: OFDM, UNII, HiperLAN2. + */ + if (spec->num_modes > HWMODE_A) { + hwmodes[HWMODE_A].mode = MODE_IEEE80211A; + hwmodes[HWMODE_A].num_channels = spec->num_channels - 14; + hwmodes[HWMODE_A].num_rates = spec->num_rates - 4; + hwmodes[HWMODE_A].channels = &channels[14]; + hwmodes[HWMODE_A].rates = &rates[4]; + } + + if (spec->num_modes > HWMODE_G && + ieee80211_register_hwmode(hw, &hwmodes[HWMODE_G])) + goto exit_free_rates; + + if (spec->num_modes > HWMODE_B && + ieee80211_register_hwmode(hw, &hwmodes[HWMODE_B])) + goto exit_free_rates; + + if (spec->num_modes > HWMODE_A && + ieee80211_register_hwmode(hw, &hwmodes[HWMODE_A])) + goto exit_free_rates; + + rt2x00dev->hwmodes = hwmodes; + + return 0; + +exit_free_rates: + kfree(rates); + +exit_free_channels: + kfree(channels); + +exit_free_modes: + kfree(hwmodes); + +exit: + ERROR(rt2x00dev, "Allocation ieee80211 modes failed.\n"); + return -ENOMEM; +} + +static void rt2x00lib_remove_hw(struct rt2x00_dev *rt2x00dev) +{ + if (test_bit(DEVICE_REGISTERED_HW, &rt2x00dev->flags)) + ieee80211_unregister_hw(rt2x00dev->hw); + + if (likely(rt2x00dev->hwmodes)) { + kfree(rt2x00dev->hwmodes->channels); + kfree(rt2x00dev->hwmodes->rates); + kfree(rt2x00dev->hwmodes); + rt2x00dev->hwmodes = NULL; + } +} + +static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev) +{ + struct hw_mode_spec *spec = &rt2x00dev->spec; + int status; + + /* + * Initialize HW modes. + */ + status = rt2x00lib_probe_hw_modes(rt2x00dev, spec); + if (status) + return status; + + /* + * Register HW. + */ + status = ieee80211_register_hw(rt2x00dev->hw); + if (status) { + rt2x00lib_remove_hw(rt2x00dev); + return status; + } + + __set_bit(DEVICE_REGISTERED_HW, &rt2x00dev->flags); + + return 0; +} + +/* + * Initialization/uninitialization handlers. + */ +static int rt2x00lib_alloc_entries(struct data_ring *ring, + const u16 max_entries, const u16 data_size, + const u16 desc_size) +{ + struct data_entry *entry; + unsigned int i; + + ring->stats.limit = max_entries; + ring->data_size = data_size; + ring->desc_size = desc_size; + + /* + * Allocate all ring entries. + */ + entry = kzalloc(ring->stats.limit * sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + for (i = 0; i < ring->stats.limit; i++) { + entry[i].flags = 0; + entry[i].ring = ring; + entry[i].skb = NULL; + } + + ring->entry = entry; + + return 0; +} + +static int rt2x00lib_alloc_ring_entries(struct rt2x00_dev *rt2x00dev) +{ + struct data_ring *ring; + + /* + * Allocate the RX ring. + */ + if (rt2x00lib_alloc_entries(rt2x00dev->rx, RX_ENTRIES, DATA_FRAME_SIZE, + rt2x00dev->ops->rxd_size)) + return -ENOMEM; + + /* + * First allocate the TX rings. + */ + txring_for_each(rt2x00dev, ring) { + if (rt2x00lib_alloc_entries(ring, TX_ENTRIES, DATA_FRAME_SIZE, + rt2x00dev->ops->txd_size)) + return -ENOMEM; + } + + if (!test_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags)) + return 0; + + /* + * Allocate the BEACON ring. + */ + if (rt2x00lib_alloc_entries(&rt2x00dev->bcn[0], BEACON_ENTRIES, + MGMT_FRAME_SIZE, rt2x00dev->ops->txd_size)) + return -ENOMEM; + + /* + * Allocate the Atim ring. + */ + if (rt2x00lib_alloc_entries(&rt2x00dev->bcn[1], ATIM_ENTRIES, + DATA_FRAME_SIZE, rt2x00dev->ops->txd_size)) + return -ENOMEM; + + return 0; +} + +static void rt2x00lib_free_ring_entries(struct rt2x00_dev *rt2x00dev) +{ + struct data_ring *ring; + + ring_for_each(rt2x00dev, ring) { + kfree(ring->entry); + ring->entry = NULL; + } +} + +void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev) +{ + if (!__test_and_clear_bit(DEVICE_INITIALIZED, &rt2x00dev->flags)) + return; + + /* + * Unregister rfkill. + */ + rt2x00rfkill_unregister(rt2x00dev); + + /* + * Allow the HW to uninitialize. + */ + rt2x00dev->ops->lib->uninitialize(rt2x00dev); + + /* + * Free allocated ring entries. + */ + rt2x00lib_free_ring_entries(rt2x00dev); +} + +int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev) +{ + int status; + + if (test_bit(DEVICE_INITIALIZED, &rt2x00dev->flags)) + return 0; + + /* + * Allocate all ring entries. + */ + status = rt2x00lib_alloc_ring_entries(rt2x00dev); + if (status) { + ERROR(rt2x00dev, "Ring entries allocation failed.\n"); + return status; + } + + /* + * Initialize the device. + */ + status = rt2x00dev->ops->lib->initialize(rt2x00dev); + if (status) + goto exit; + + __set_bit(DEVICE_INITIALIZED, &rt2x00dev->flags); + + /* + * Register the rfkill handler. + */ + status = rt2x00rfkill_register(rt2x00dev); + if (status) + goto exit_unitialize; + + return 0; + +exit_unitialize: + rt2x00lib_uninitialize(rt2x00dev); + +exit: + rt2x00lib_free_ring_entries(rt2x00dev); + + return status; +} + +/* + * driver allocation handlers. + */ +static int rt2x00lib_alloc_rings(struct rt2x00_dev *rt2x00dev) +{ + struct data_ring *ring; + + /* + * We need the following rings: + * RX: 1 + * TX: hw->queues + * Beacon: 1 (if required) + * Atim: 1 (if required) + */ + rt2x00dev->data_rings = 1 + rt2x00dev->hw->queues + + (2 * test_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags)); + + ring = kzalloc(rt2x00dev->data_rings * sizeof(*ring), GFP_KERNEL); + if (!ring) { + ERROR(rt2x00dev, "Ring allocation failed.\n"); + return -ENOMEM; + } + + /* + * Initialize pointers + */ + rt2x00dev->rx = ring; + rt2x00dev->tx = &rt2x00dev->rx[1]; + if (test_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags)) + rt2x00dev->bcn = &rt2x00dev->tx[rt2x00dev->hw->queues]; + + /* + * Initialize ring parameters. + * cw_min: 2^5 = 32. + * cw_max: 2^10 = 1024. + */ + ring_for_each(rt2x00dev, ring) { + ring->rt2x00dev = rt2x00dev; + ring->tx_params.aifs = 2; + ring->tx_params.cw_min = 5; + ring->tx_params.cw_max = 10; + } + + return 0; +} + +static void rt2x00lib_free_rings(struct rt2x00_dev *rt2x00dev) +{ + kfree(rt2x00dev->rx); + rt2x00dev->rx = NULL; + rt2x00dev->tx = NULL; + rt2x00dev->bcn = NULL; +} + +int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) +{ + int retval = -ENOMEM; + + /* + * Let the driver probe the device to detect the capabilities. + */ + retval = rt2x00dev->ops->lib->probe_hw(rt2x00dev); + if (retval) { + ERROR(rt2x00dev, "Failed to allocate device.\n"); + goto exit; + } + + /* + * Initialize configuration work. + */ + INIT_WORK(&rt2x00dev->beacon_work, rt2x00lib_beacondone_scheduled); + INIT_WORK(&rt2x00dev->filter_work, rt2x00lib_packetfilter_scheduled); + INIT_WORK(&rt2x00dev->config_work, rt2x00lib_configuration_scheduled); + INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00lib_link_tuner); + + /* + * Reset current working type. + */ + rt2x00dev->interface.type = INVALID_INTERFACE; + + /* + * Allocate ring array. + */ + retval = rt2x00lib_alloc_rings(rt2x00dev); + if (retval) + goto exit; + + /* + * Initialize ieee80211 structure. + */ + retval = rt2x00lib_probe_hw(rt2x00dev); + if (retval) { + ERROR(rt2x00dev, "Failed to initialize hw.\n"); + goto exit; + } + + /* + * Allocatie rfkill. + */ + retval = rt2x00rfkill_allocate(rt2x00dev); + if (retval) + goto exit; + + /* + * Open the debugfs entry. + */ + rt2x00debug_register(rt2x00dev); + + __set_bit(DEVICE_PRESENT, &rt2x00dev->flags); + + return 0; + +exit: + rt2x00lib_remove_dev(rt2x00dev); + + return retval; +} +EXPORT_SYMBOL_GPL(rt2x00lib_probe_dev); + +void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev) +{ + __clear_bit(DEVICE_PRESENT, &rt2x00dev->flags); + + /* + * Disable radio. + */ + rt2x00lib_disable_radio(rt2x00dev); + + /* + * Uninitialize device. + */ + rt2x00lib_uninitialize(rt2x00dev); + + /* + * Close debugfs entry. + */ + rt2x00debug_deregister(rt2x00dev); + + /* + * Free rfkill + */ + rt2x00rfkill_free(rt2x00dev); + + /* + * Free ieee80211_hw memory. + */ + rt2x00lib_remove_hw(rt2x00dev); + + /* + * Free firmware image. + */ + rt2x00lib_free_firmware(rt2x00dev); + + /* + * Free ring structures. + */ + rt2x00lib_free_rings(rt2x00dev); +} +EXPORT_SYMBOL_GPL(rt2x00lib_remove_dev); + +/* + * Device state handlers + */ +#ifdef CONFIG_PM +int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev, pm_message_t state) +{ + int retval; + + NOTICE(rt2x00dev, "Going to sleep.\n"); + __clear_bit(DEVICE_PRESENT, &rt2x00dev->flags); + + /* + * Only continue if mac80211 has open interfaces. + */ + if (!test_bit(DEVICE_STARTED, &rt2x00dev->flags)) + goto exit; + __set_bit(DEVICE_STARTED_SUSPEND, &rt2x00dev->flags); + + /* + * Disable radio and unitialize all items + * that must be recreated on resume. + */ + rt2x00mac_stop(rt2x00dev->hw); + rt2x00lib_uninitialize(rt2x00dev); + rt2x00debug_deregister(rt2x00dev); + +exit: + /* + * Set device mode to sleep for power management. + */ + retval = rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_SLEEP); + if (retval) + return retval; + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00lib_suspend); + +int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev) +{ + struct interface *intf = &rt2x00dev->interface; + int retval; + + NOTICE(rt2x00dev, "Waking up.\n"); + __set_bit(DEVICE_PRESENT, &rt2x00dev->flags); + + /* + * Open the debugfs entry. + */ + rt2x00debug_register(rt2x00dev); + + /* + * Only continue if mac80211 had open interfaces. + */ + if (!__test_and_clear_bit(DEVICE_STARTED_SUSPEND, &rt2x00dev->flags)) + return 0; + + /* + * Reinitialize device and all active interfaces. + */ + retval = rt2x00mac_start(rt2x00dev->hw); + if (retval) + goto exit; + + /* + * Reconfigure device. + */ + rt2x00lib_config(rt2x00dev, &rt2x00dev->hw->conf, 1); + if (!rt2x00dev->hw->conf.radio_enabled) + rt2x00lib_disable_radio(rt2x00dev); + + rt2x00lib_config_mac_addr(rt2x00dev, intf->mac); + rt2x00lib_config_bssid(rt2x00dev, intf->bssid); + rt2x00lib_config_type(rt2x00dev, intf->type); + + /* + * It is possible that during that mac80211 has attempted + * to send frames while we were suspending or resuming. + * In that case we have disabled the TX queue and should + * now enable it again + */ + ieee80211_start_queues(rt2x00dev->hw); + + /* + * When in Master or Ad-hoc mode, + * restart Beacon transmitting by faking a beacondone event. + */ + if (intf->type == IEEE80211_IF_TYPE_AP || + intf->type == IEEE80211_IF_TYPE_IBSS) + rt2x00lib_beacondone(rt2x00dev); + + return 0; + +exit: + rt2x00lib_disable_radio(rt2x00dev); + rt2x00lib_uninitialize(rt2x00dev); + rt2x00debug_deregister(rt2x00dev); + + return retval; +} +EXPORT_SYMBOL_GPL(rt2x00lib_resume); +#endif /* CONFIG_PM */ + +/* + * rt2x00lib module information. + */ +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("rt2x00 library"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/rt2x00/rt2x00firmware.c b/drivers/net/wireless/rt2x00/rt2x00firmware.c new file mode 100644 index 000000000000..236025f8b90f --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00firmware.c @@ -0,0 +1,124 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00lib + Abstract: rt2x00 firmware loading routines. + */ + +/* + * Set enviroment defines for rt2x00.h + */ +#define DRV_NAME "rt2x00lib" + +#include <linux/crc-itu-t.h> +#include <linux/kernel.h> +#include <linux/module.h> + +#include "rt2x00.h" +#include "rt2x00lib.h" + +static int rt2x00lib_request_firmware(struct rt2x00_dev *rt2x00dev) +{ + struct device *device = wiphy_dev(rt2x00dev->hw->wiphy); + const struct firmware *fw; + char *fw_name; + int retval; + u16 crc; + u16 tmp; + + /* + * Read correct firmware from harddisk. + */ + fw_name = rt2x00dev->ops->lib->get_firmware_name(rt2x00dev); + if (!fw_name) { + ERROR(rt2x00dev, + "Invalid firmware filename.\n" + "Please file bug report to %s.\n", DRV_PROJECT); + return -EINVAL; + } + + INFO(rt2x00dev, "Loading firmware file '%s'.\n", fw_name); + + retval = request_firmware(&fw, fw_name, device); + if (retval) { + ERROR(rt2x00dev, "Failed to request Firmware.\n"); + return retval; + } + + if (!fw || !fw->size || !fw->data) { + ERROR(rt2x00dev, "Failed to read Firmware.\n"); + return -ENOENT; + } + + /* + * Validate the firmware using 16 bit CRC. + * The last 2 bytes of the firmware are the CRC + * so substract those 2 bytes from the CRC checksum, + * and set those 2 bytes to 0 when calculating CRC. + */ + tmp = 0; + crc = crc_itu_t(0, fw->data, fw->size - 2); + crc = crc_itu_t(crc, (u8 *)&tmp, 2); + + if (crc != (fw->data[fw->size - 2] << 8 | fw->data[fw->size - 1])) { + ERROR(rt2x00dev, "Firmware CRC error.\n"); + retval = -ENOENT; + goto exit; + } + + INFO(rt2x00dev, "Firmware detected - version: %d.%d.\n", + fw->data[fw->size - 4], fw->data[fw->size - 3]); + + rt2x00dev->fw = fw; + + return 0; + +exit: + release_firmware(fw); + + return retval; +} + +int rt2x00lib_load_firmware(struct rt2x00_dev *rt2x00dev) +{ + int retval; + + if (!rt2x00dev->fw) { + retval = rt2x00lib_request_firmware(rt2x00dev); + if (retval) + return retval; + } + + /* + * Send firmware to the device. + */ + retval = rt2x00dev->ops->lib->load_firmware(rt2x00dev, + rt2x00dev->fw->data, + rt2x00dev->fw->size); + return retval; +} + +void rt2x00lib_free_firmware(struct rt2x00_dev *rt2x00dev) +{ + release_firmware(rt2x00dev->fw); + rt2x00dev->fw = NULL; +} + diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h new file mode 100644 index 000000000000..298faa9d3f61 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00lib.h @@ -0,0 +1,119 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00lib + Abstract: Data structures and definitions for the rt2x00lib module. + */ + +#ifndef RT2X00LIB_H +#define RT2X00LIB_H + +/* + * Interval defines + * Both the link tuner as the rfkill will be called once per second. + */ +#define LINK_TUNE_INTERVAL ( round_jiffies(HZ) ) +#define RFKILL_POLL_INTERVAL ( 1000 ) + +/* + * Radio control handlers. + */ +int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev); +void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev); +void rt2x00lib_toggle_rx(struct rt2x00_dev *rt2x00dev, enum dev_state state); +void rt2x00lib_reset_link_tuner(struct rt2x00_dev *rt2x00dev); + +/* + * Initialization handlers. + */ +int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev); +void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev); + +/* + * Configuration handlers. + */ +void rt2x00lib_config_mac_addr(struct rt2x00_dev *rt2x00dev, u8 *mac); +void rt2x00lib_config_bssid(struct rt2x00_dev *rt2x00dev, u8 *bssid); +void rt2x00lib_config_type(struct rt2x00_dev *rt2x00dev, const int type); +void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, + struct ieee80211_conf *conf, const int force_config); + +/* + * Firmware handlers. + */ +#ifdef CONFIG_RT2X00_LIB_FIRMWARE +int rt2x00lib_load_firmware(struct rt2x00_dev *rt2x00dev); +void rt2x00lib_free_firmware(struct rt2x00_dev *rt2x00dev); +#else +static inline int rt2x00lib_load_firmware(struct rt2x00_dev *rt2x00dev) +{ + return 0; +} +static inline void rt2x00lib_free_firmware(struct rt2x00_dev *rt2x00dev) +{ +} +#endif /* CONFIG_RT2X00_LIB_FIRMWARE */ + +/* + * Debugfs handlers. + */ +#ifdef CONFIG_RT2X00_LIB_DEBUGFS +void rt2x00debug_register(struct rt2x00_dev *rt2x00dev); +void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev); +#else +static inline void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) +{ +} + +static inline void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev) +{ +} +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ + +/* + * RFkill handlers. + */ +#ifdef CONFIG_RT2X00_LIB_RFKILL +int rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev); +void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev); +int rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev); +void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev); +#else +static inline int rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev) +{ + return 0; +} + +static inline void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev) +{ +} + +static inline int rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev) +{ + return 0; +} + +static inline void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev) +{ +} +#endif /* CONFIG_RT2X00_LIB_RFKILL */ + +#endif /* RT2X00LIB_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c new file mode 100644 index 000000000000..4a6a0bd01ff1 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c @@ -0,0 +1,438 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00mac + Abstract: rt2x00 generic mac80211 routines. + */ + +/* + * Set enviroment defines for rt2x00.h + */ +#define DRV_NAME "rt2x00lib" + +#include <linux/kernel.h> +#include <linux/module.h> + +#include "rt2x00.h" +#include "rt2x00lib.h" + +static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev, + struct data_ring *ring, + struct sk_buff *frag_skb, + struct ieee80211_tx_control *control) +{ + struct sk_buff *skb; + int size; + + if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) + size = sizeof(struct ieee80211_cts); + else + size = sizeof(struct ieee80211_rts); + + skb = dev_alloc_skb(size + rt2x00dev->hw->extra_tx_headroom); + if (!skb) { + WARNING(rt2x00dev, "Failed to create RTS/CTS frame.\n"); + return NETDEV_TX_BUSY; + } + + skb_reserve(skb, rt2x00dev->hw->extra_tx_headroom); + skb_put(skb, size); + + if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) + ieee80211_ctstoself_get(rt2x00dev->hw, rt2x00dev->interface.id, + frag_skb->data, frag_skb->len, control, + (struct ieee80211_cts *)(skb->data)); + else + ieee80211_rts_get(rt2x00dev->hw, rt2x00dev->interface.id, + frag_skb->data, frag_skb->len, control, + (struct ieee80211_rts *)(skb->data)); + + if (rt2x00dev->ops->lib->write_tx_data(rt2x00dev, ring, skb, control)) { + WARNING(rt2x00dev, "Failed to send RTS/CTS frame.\n"); + return NETDEV_TX_BUSY; + } + + return NETDEV_TX_OK; +} + +int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ieee80211_tx_control *control) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data; + struct data_ring *ring; + u16 frame_control; + + /* + * Mac80211 might be calling this function while we are trying + * to remove the device or perhaps suspending it. + * Note that we can only stop the TX queues inside the TX path + * due to possible race conditions in mac80211. + */ + if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags)) { + ieee80211_stop_queues(hw); + return 0; + } + + /* + * Determine which ring to put packet on. + */ + ring = rt2x00lib_get_ring(rt2x00dev, control->queue); + if (unlikely(!ring)) { + ERROR(rt2x00dev, + "Attempt to send packet over invalid queue %d.\n" + "Please file bug report to %s.\n", + control->queue, DRV_PROJECT); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* + * If CTS/RTS is required. and this frame is not CTS or RTS, + * create and queue that frame first. But make sure we have + * at least enough entries available to send this CTS/RTS + * frame as well as the data frame. + */ + frame_control = le16_to_cpu(ieee80211hdr->frame_control); + if (!is_rts_frame(frame_control) && !is_cts_frame(frame_control) && + (control->flags & (IEEE80211_TXCTL_USE_RTS_CTS | + IEEE80211_TXCTL_USE_CTS_PROTECT))) { + if (rt2x00_ring_free(ring) <= 1) + return NETDEV_TX_BUSY; + + if (rt2x00mac_tx_rts_cts(rt2x00dev, ring, skb, control)) + return NETDEV_TX_BUSY; + } + + if (rt2x00dev->ops->lib->write_tx_data(rt2x00dev, ring, skb, control)) + return NETDEV_TX_BUSY; + + if (rt2x00dev->ops->lib->kick_tx_queue) + rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, control->queue); + + return NETDEV_TX_OK; +} +EXPORT_SYMBOL_GPL(rt2x00mac_tx); + +int rt2x00mac_start(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + int status; + + if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags) || + test_bit(DEVICE_STARTED, &rt2x00dev->flags)) + return 0; + + /* + * If this is the first interface which is added, + * we should load the firmware now. + */ + if (test_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags)) { + status = rt2x00lib_load_firmware(rt2x00dev); + if (status) + return status; + } + + /* + * Initialize the device. + */ + status = rt2x00lib_initialize(rt2x00dev); + if (status) + return status; + + /* + * Enable radio. + */ + status = rt2x00lib_enable_radio(rt2x00dev); + if (status) { + rt2x00lib_uninitialize(rt2x00dev); + return status; + } + + __set_bit(DEVICE_STARTED, &rt2x00dev->flags); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00mac_start); + +void rt2x00mac_stop(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + + if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags)) + return; + + /* + * Perhaps we can add something smarter here, + * but for now just disabling the radio should do. + */ + rt2x00lib_disable_radio(rt2x00dev); + + __clear_bit(DEVICE_STARTED, &rt2x00dev->flags); +} +EXPORT_SYMBOL_GPL(rt2x00mac_stop); + +int rt2x00mac_add_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct interface *intf = &rt2x00dev->interface; + + /* + * Don't allow interfaces to be added while + * either the device has disappeared or when + * another interface is already present. + */ + if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags) || + is_interface_present(intf)) + return -ENOBUFS; + + intf->id = conf->if_id; + intf->type = conf->type; + if (conf->type == IEEE80211_IF_TYPE_AP) + memcpy(&intf->bssid, conf->mac_addr, ETH_ALEN); + memcpy(&intf->mac, conf->mac_addr, ETH_ALEN); + + /* + * The MAC adddress must be configured after the device + * has been initialized. Otherwise the device can reset + * the MAC registers. + */ + rt2x00lib_config_mac_addr(rt2x00dev, intf->mac); + rt2x00lib_config_type(rt2x00dev, conf->type); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00mac_add_interface); + +void rt2x00mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct interface *intf = &rt2x00dev->interface; + + /* + * Don't allow interfaces to be remove while + * either the device has disappeared or when + * no interface is present. + */ + if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags) || + !is_interface_present(intf)) + return; + + intf->id = 0; + intf->type = INVALID_INTERFACE; + memset(&intf->bssid, 0x00, ETH_ALEN); + memset(&intf->mac, 0x00, ETH_ALEN); + + /* + * Make sure the bssid and mac address registers + * are cleared to prevent false ACKing of frames. + */ + rt2x00lib_config_mac_addr(rt2x00dev, intf->mac); + rt2x00lib_config_bssid(rt2x00dev, intf->bssid); + rt2x00lib_config_type(rt2x00dev, intf->type); +} +EXPORT_SYMBOL_GPL(rt2x00mac_remove_interface); + +int rt2x00mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + + /* + * Mac80211 might be calling this function while we are trying + * to remove the device or perhaps suspending it. + */ + if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags)) + return 0; + + /* + * Check if we need to disable the radio, + * if this is not the case, at least the RX must be disabled. + */ + if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) { + if (!conf->radio_enabled) + rt2x00lib_disable_radio(rt2x00dev); + else + rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF); + } + + rt2x00lib_config(rt2x00dev, conf, 0); + + /* + * Reenable RX only if the radio should be on. + */ + if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) + rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON); + else if (conf->radio_enabled) + return rt2x00lib_enable_radio(rt2x00dev); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00mac_config); + +int rt2x00mac_config_interface(struct ieee80211_hw *hw, int if_id, + struct ieee80211_if_conf *conf) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct interface *intf = &rt2x00dev->interface; + int status; + + /* + * Mac80211 might be calling this function while we are trying + * to remove the device or perhaps suspending it. + */ + if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags)) + return 0; + + /* + * If the given type does not match the configured type, + * there has been a problem. + */ + if (conf->type != intf->type) + return -EINVAL; + + /* + * If the interface does not work in master mode, + * then the bssid value in the interface structure + * should now be set. + */ + if (conf->type != IEEE80211_IF_TYPE_AP) + memcpy(&intf->bssid, conf->bssid, ETH_ALEN); + rt2x00lib_config_bssid(rt2x00dev, intf->bssid); + + /* + * We only need to initialize the beacon when master mode is enabled. + */ + if (conf->type != IEEE80211_IF_TYPE_AP || !conf->beacon) + return 0; + + status = rt2x00dev->ops->hw->beacon_update(rt2x00dev->hw, + conf->beacon, + conf->beacon_control); + if (status) + dev_kfree_skb(conf->beacon); + + return status; +} +EXPORT_SYMBOL_GPL(rt2x00mac_config_interface); + +int rt2x00mac_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + + /* + * The dot11ACKFailureCount, dot11RTSFailureCount and + * dot11RTSSuccessCount are updated in interrupt time. + * dot11FCSErrorCount is updated in the link tuner. + */ + memcpy(stats, &rt2x00dev->low_level_stats, sizeof(*stats)); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00mac_get_stats); + +int rt2x00mac_get_tx_stats(struct ieee80211_hw *hw, + struct ieee80211_tx_queue_stats *stats) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + unsigned int i; + + for (i = 0; i < hw->queues; i++) + memcpy(&stats->data[i], &rt2x00dev->tx[i].stats, + sizeof(rt2x00dev->tx[i].stats)); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00mac_get_tx_stats); + +void rt2x00mac_erp_ie_changed(struct ieee80211_hw *hw, u8 changes, + int cts_protection, int preamble) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + int short_preamble; + int ack_timeout; + int ack_consume_time; + int difs; + + /* + * We only support changing preamble mode. + */ + if (!(changes & IEEE80211_ERP_CHANGE_PREAMBLE)) + return; + + short_preamble = !preamble; + preamble = !!(preamble) ? PREAMBLE : SHORT_PREAMBLE; + + difs = (hw->conf.flags & IEEE80211_CONF_SHORT_SLOT_TIME) ? + SHORT_DIFS : DIFS; + ack_timeout = difs + PLCP + preamble + get_duration(ACK_SIZE, 10); + + ack_consume_time = SIFS + PLCP + preamble + get_duration(ACK_SIZE, 10); + + if (short_preamble) + __set_bit(CONFIG_SHORT_PREAMBLE, &rt2x00dev->flags); + else + __clear_bit(CONFIG_SHORT_PREAMBLE, &rt2x00dev->flags); + + rt2x00dev->ops->lib->config_preamble(rt2x00dev, short_preamble, + ack_timeout, ack_consume_time); +} +EXPORT_SYMBOL_GPL(rt2x00mac_erp_ie_changed); + +int rt2x00mac_conf_tx(struct ieee80211_hw *hw, int queue, + const struct ieee80211_tx_queue_params *params) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct data_ring *ring; + + ring = rt2x00lib_get_ring(rt2x00dev, queue); + if (unlikely(!ring)) + return -EINVAL; + + /* + * The passed variables are stored as real value ((2^n)-1). + * Ralink registers require to know the bit number 'n'. + */ + if (params->cw_min) + ring->tx_params.cw_min = fls(params->cw_min); + else + ring->tx_params.cw_min = 5; /* cw_min: 2^5 = 32. */ + + if (params->cw_max) + ring->tx_params.cw_max = fls(params->cw_max); + else + ring->tx_params.cw_max = 10; /* cw_min: 2^10 = 1024. */ + + if (params->aifs) + ring->tx_params.aifs = params->aifs; + else + ring->tx_params.aifs = 2; + + INFO(rt2x00dev, + "Configured TX ring %d - CWmin: %d, CWmax: %d, Aifs: %d.\n", + queue, ring->tx_params.cw_min, ring->tx_params.cw_max, + ring->tx_params.aifs); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00mac_conf_tx); diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c new file mode 100644 index 000000000000..2780df00623c --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c @@ -0,0 +1,474 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00pci + Abstract: rt2x00 generic pci device routines. + */ + +/* + * Set enviroment defines for rt2x00.h + */ +#define DRV_NAME "rt2x00pci" + +#include <linux/dma-mapping.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "rt2x00.h" +#include "rt2x00pci.h" + +/* + * Beacon handlers. + */ +int rt2x00pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ieee80211_tx_control *control) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct data_ring *ring = + rt2x00lib_get_ring(rt2x00dev, IEEE80211_TX_QUEUE_BEACON); + struct data_entry *entry = rt2x00_get_data_entry(ring); + + /* + * Just in case mac80211 doesn't set this correctly, + * but we need this queue set for the descriptor + * initialization. + */ + control->queue = IEEE80211_TX_QUEUE_BEACON; + + /* + * Update the beacon entry. + */ + memcpy(entry->data_addr, skb->data, skb->len); + rt2x00lib_write_tx_desc(rt2x00dev, entry->priv, + (struct ieee80211_hdr *)skb->data, + skb->len, control); + + /* + * Enable beacon generation. + */ + rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, control->queue); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00pci_beacon_update); + +/* + * TX data handlers. + */ +int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev, + struct data_ring *ring, struct sk_buff *skb, + struct ieee80211_tx_control *control) +{ + struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data; + struct data_entry *entry = rt2x00_get_data_entry(ring); + struct data_desc *txd = entry->priv; + u32 word; + + if (rt2x00_ring_full(ring)) { + ieee80211_stop_queue(rt2x00dev->hw, control->queue); + return -EINVAL; + } + + rt2x00_desc_read(txd, 0, &word); + + if (rt2x00_get_field32(word, TXD_ENTRY_OWNER_NIC) || + rt2x00_get_field32(word, TXD_ENTRY_VALID)) { + ERROR(rt2x00dev, + "Arrived at non-free entry in the non-full queue %d.\n" + "Please file bug report to %s.\n", + control->queue, DRV_PROJECT); + ieee80211_stop_queue(rt2x00dev->hw, control->queue); + return -EINVAL; + } + + entry->skb = skb; + memcpy(&entry->tx_status.control, control, sizeof(*control)); + memcpy(entry->data_addr, skb->data, skb->len); + rt2x00lib_write_tx_desc(rt2x00dev, txd, ieee80211hdr, + skb->len, control); + + rt2x00_ring_index_inc(ring); + + if (rt2x00_ring_full(ring)) + ieee80211_stop_queue(rt2x00dev->hw, control->queue); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data); + +/* + * RX data handlers. + */ +void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev) +{ + struct data_ring *ring = rt2x00dev->rx; + struct data_entry *entry; + struct data_desc *rxd; + struct sk_buff *skb; + struct rxdata_entry_desc desc; + u32 word; + + while (1) { + entry = rt2x00_get_data_entry(ring); + rxd = entry->priv; + rt2x00_desc_read(rxd, 0, &word); + + if (rt2x00_get_field32(word, RXD_ENTRY_OWNER_NIC)) + break; + + memset(&desc, 0x00, sizeof(desc)); + rt2x00dev->ops->lib->fill_rxdone(entry, &desc); + + /* + * Allocate the sk_buffer, initialize it and copy + * all data into it. + */ + skb = dev_alloc_skb(desc.size + NET_IP_ALIGN); + if (!skb) + return; + + skb_reserve(skb, NET_IP_ALIGN); + skb_put(skb, desc.size); + memcpy(skb->data, entry->data_addr, desc.size); + + /* + * Send the frame to rt2x00lib for further processing. + */ + rt2x00lib_rxdone(entry, skb, &desc); + + if (test_bit(DEVICE_ENABLED_RADIO, &ring->rt2x00dev->flags)) { + rt2x00_set_field32(&word, RXD_ENTRY_OWNER_NIC, 1); + rt2x00_desc_write(rxd, 0, word); + } + + rt2x00_ring_index_inc(ring); + } +} +EXPORT_SYMBOL_GPL(rt2x00pci_rxdone); + +/* + * Device initialization handlers. + */ +#define priv_offset(__ring, __i) \ +({ \ + ring->data_addr + (i * ring->desc_size); \ +}) + +#define data_addr_offset(__ring, __i) \ +({ \ + (__ring)->data_addr + \ + ((__ring)->stats.limit * (__ring)->desc_size) + \ + ((__i) * (__ring)->data_size); \ +}) + +#define data_dma_offset(__ring, __i) \ +({ \ + (__ring)->data_dma + \ + ((__ring)->stats.limit * (__ring)->desc_size) + \ + ((__i) * (__ring)->data_size); \ +}) + +static int rt2x00pci_alloc_dma(struct rt2x00_dev *rt2x00dev, + struct data_ring *ring) +{ + unsigned int i; + + /* + * Allocate DMA memory for descriptor and buffer. + */ + ring->data_addr = pci_alloc_consistent(rt2x00dev_pci(rt2x00dev), + rt2x00_get_ring_size(ring), + &ring->data_dma); + if (!ring->data_addr) + return -ENOMEM; + + /* + * Initialize all ring entries to contain valid + * addresses. + */ + for (i = 0; i < ring->stats.limit; i++) { + ring->entry[i].priv = priv_offset(ring, i); + ring->entry[i].data_addr = data_addr_offset(ring, i); + ring->entry[i].data_dma = data_dma_offset(ring, i); + } + + return 0; +} + +static void rt2x00pci_free_dma(struct rt2x00_dev *rt2x00dev, + struct data_ring *ring) +{ + if (ring->data_addr) + pci_free_consistent(rt2x00dev_pci(rt2x00dev), + rt2x00_get_ring_size(ring), + ring->data_addr, ring->data_dma); + ring->data_addr = NULL; +} + +int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) +{ + struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev); + struct data_ring *ring; + int status; + + /* + * Allocate DMA + */ + ring_for_each(rt2x00dev, ring) { + status = rt2x00pci_alloc_dma(rt2x00dev, ring); + if (status) + goto exit; + } + + /* + * Register interrupt handler. + */ + status = request_irq(pci_dev->irq, rt2x00dev->ops->lib->irq_handler, + IRQF_SHARED, pci_name(pci_dev), rt2x00dev); + if (status) { + ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", + pci_dev->irq, status); + return status; + } + + return 0; + +exit: + rt2x00pci_uninitialize(rt2x00dev); + + return status; +} +EXPORT_SYMBOL_GPL(rt2x00pci_initialize); + +void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev) +{ + struct data_ring *ring; + + /* + * Free irq line. + */ + free_irq(rt2x00dev_pci(rt2x00dev)->irq, rt2x00dev); + + /* + * Free DMA + */ + ring_for_each(rt2x00dev, ring) + rt2x00pci_free_dma(rt2x00dev, ring); +} +EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize); + +/* + * PCI driver handlers. + */ +static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev) +{ + kfree(rt2x00dev->rf); + rt2x00dev->rf = NULL; + + kfree(rt2x00dev->eeprom); + rt2x00dev->eeprom = NULL; + + if (rt2x00dev->csr_addr) { + iounmap(rt2x00dev->csr_addr); + rt2x00dev->csr_addr = NULL; + } +} + +static int rt2x00pci_alloc_reg(struct rt2x00_dev *rt2x00dev) +{ + struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev); + + rt2x00dev->csr_addr = ioremap(pci_resource_start(pci_dev, 0), + pci_resource_len(pci_dev, 0)); + if (!rt2x00dev->csr_addr) + goto exit; + + rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL); + if (!rt2x00dev->eeprom) + goto exit; + + rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL); + if (!rt2x00dev->rf) + goto exit; + + return 0; + +exit: + ERROR_PROBE("Failed to allocate registers.\n"); + + rt2x00pci_free_reg(rt2x00dev); + + return -ENOMEM; +} + +int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) +{ + struct rt2x00_ops *ops = (struct rt2x00_ops *)id->driver_data; + struct ieee80211_hw *hw; + struct rt2x00_dev *rt2x00dev; + int retval; + + retval = pci_request_regions(pci_dev, pci_name(pci_dev)); + if (retval) { + ERROR_PROBE("PCI request regions failed.\n"); + return retval; + } + + retval = pci_enable_device(pci_dev); + if (retval) { + ERROR_PROBE("Enable device failed.\n"); + goto exit_release_regions; + } + + pci_set_master(pci_dev); + + if (pci_set_mwi(pci_dev)) + ERROR_PROBE("MWI not available.\n"); + + if (pci_set_dma_mask(pci_dev, DMA_64BIT_MASK) && + pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) { + ERROR_PROBE("PCI DMA not supported.\n"); + retval = -EIO; + goto exit_disable_device; + } + + hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); + if (!hw) { + ERROR_PROBE("Failed to allocate hardware.\n"); + retval = -ENOMEM; + goto exit_disable_device; + } + + pci_set_drvdata(pci_dev, hw); + + rt2x00dev = hw->priv; + rt2x00dev->dev = pci_dev; + rt2x00dev->ops = ops; + rt2x00dev->hw = hw; + + retval = rt2x00pci_alloc_reg(rt2x00dev); + if (retval) + goto exit_free_device; + + retval = rt2x00lib_probe_dev(rt2x00dev); + if (retval) + goto exit_free_reg; + + return 0; + +exit_free_reg: + rt2x00pci_free_reg(rt2x00dev); + +exit_free_device: + ieee80211_free_hw(hw); + +exit_disable_device: + if (retval != -EBUSY) + pci_disable_device(pci_dev); + +exit_release_regions: + pci_release_regions(pci_dev); + + pci_set_drvdata(pci_dev, NULL); + + return retval; +} +EXPORT_SYMBOL_GPL(rt2x00pci_probe); + +void rt2x00pci_remove(struct pci_dev *pci_dev) +{ + struct ieee80211_hw *hw = pci_get_drvdata(pci_dev); + struct rt2x00_dev *rt2x00dev = hw->priv; + + /* + * Free all allocated data. + */ + rt2x00lib_remove_dev(rt2x00dev); + rt2x00pci_free_reg(rt2x00dev); + ieee80211_free_hw(hw); + + /* + * Free the PCI device data. + */ + pci_set_drvdata(pci_dev, NULL); + pci_disable_device(pci_dev); + pci_release_regions(pci_dev); +} +EXPORT_SYMBOL_GPL(rt2x00pci_remove); + +#ifdef CONFIG_PM +int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state) +{ + struct ieee80211_hw *hw = pci_get_drvdata(pci_dev); + struct rt2x00_dev *rt2x00dev = hw->priv; + int retval; + + retval = rt2x00lib_suspend(rt2x00dev, state); + if (retval) + return retval; + + rt2x00pci_free_reg(rt2x00dev); + + pci_save_state(pci_dev); + pci_disable_device(pci_dev); + return pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); +} +EXPORT_SYMBOL_GPL(rt2x00pci_suspend); + +int rt2x00pci_resume(struct pci_dev *pci_dev) +{ + struct ieee80211_hw *hw = pci_get_drvdata(pci_dev); + struct rt2x00_dev *rt2x00dev = hw->priv; + int retval; + + if (pci_set_power_state(pci_dev, PCI_D0) || + pci_enable_device(pci_dev) || + pci_restore_state(pci_dev)) { + ERROR(rt2x00dev, "Failed to resume device.\n"); + return -EIO; + } + + retval = rt2x00pci_alloc_reg(rt2x00dev); + if (retval) + return retval; + + retval = rt2x00lib_resume(rt2x00dev); + if (retval) + goto exit_free_reg; + + return 0; + +exit_free_reg: + rt2x00pci_free_reg(rt2x00dev); + + return retval; +} +EXPORT_SYMBOL_GPL(rt2x00pci_resume); +#endif /* CONFIG_PM */ + +/* + * rt2x00pci module information. + */ +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("rt2x00 library"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h new file mode 100644 index 000000000000..82adeac061d0 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00pci.h @@ -0,0 +1,127 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00pci + Abstract: Data structures for the rt2x00pci module. + */ + +#ifndef RT2X00PCI_H +#define RT2X00PCI_H + +#include <linux/io.h> + +/* + * This variable should be used with the + * pci_driver structure initialization. + */ +#define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops) + +/* + * Register defines. + * Some registers require multiple attempts before success, + * in those cases REGISTER_BUSY_COUNT attempts should be + * taken with a REGISTER_BUSY_DELAY interval. + */ +#define REGISTER_BUSY_COUNT 5 +#define REGISTER_BUSY_DELAY 100 + +/* + * Descriptor availability flags. + * All PCI device descriptors have these 2 flags + * with the exact same definition. + * By storing them here we can use them inside rt2x00pci + * for some simple entry availability checking. + */ +#define TXD_ENTRY_OWNER_NIC FIELD32(0x00000001) +#define TXD_ENTRY_VALID FIELD32(0x00000002) +#define RXD_ENTRY_OWNER_NIC FIELD32(0x00000001) + +/* + * Register access. + */ +static inline void rt2x00pci_register_read(const struct rt2x00_dev *rt2x00dev, + const unsigned long offset, + u32 *value) +{ + *value = readl(rt2x00dev->csr_addr + offset); +} + +static inline void +rt2x00pci_register_multiread(const struct rt2x00_dev *rt2x00dev, + const unsigned long offset, + void *value, const u16 length) +{ + memcpy_fromio(value, rt2x00dev->csr_addr + offset, length); +} + +static inline void rt2x00pci_register_write(const struct rt2x00_dev *rt2x00dev, + const unsigned long offset, + u32 value) +{ + writel(value, rt2x00dev->csr_addr + offset); +} + +static inline void +rt2x00pci_register_multiwrite(const struct rt2x00_dev *rt2x00dev, + const unsigned long offset, + void *value, const u16 length) +{ + memcpy_toio(rt2x00dev->csr_addr + offset, value, length); +} + +/* + * Beacon handlers. + */ +int rt2x00pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ieee80211_tx_control *control); + +/* + * TX data handlers. + */ +int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev, + struct data_ring *ring, struct sk_buff *skb, + struct ieee80211_tx_control *control); + +/* + * RX data handlers. + */ +void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev); + +/* + * Device initialization handlers. + */ +int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev); +void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev); + +/* + * PCI driver handlers. + */ +int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id); +void rt2x00pci_remove(struct pci_dev *pci_dev); +#ifdef CONFIG_PM +int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state); +int rt2x00pci_resume(struct pci_dev *pci_dev); +#else +#define rt2x00pci_suspend NULL +#define rt2x00pci_resume NULL +#endif /* CONFIG_PM */ + +#endif /* RT2X00PCI_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h new file mode 100644 index 000000000000..838421216da0 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00reg.h @@ -0,0 +1,292 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00 + Abstract: rt2x00 generic register information. + */ + +#ifndef RT2X00REG_H +#define RT2X00REG_H + +/* + * TX result flags. + */ +enum TX_STATUS { + TX_SUCCESS = 0, + TX_SUCCESS_RETRY = 1, + TX_FAIL_RETRY = 2, + TX_FAIL_INVALID = 3, + TX_FAIL_OTHER = 4, +}; + +/* + * Antenna values + */ +enum antenna { + ANTENNA_SW_DIVERSITY = 0, + ANTENNA_A = 1, + ANTENNA_B = 2, + ANTENNA_HW_DIVERSITY = 3, +}; + +/* + * Led mode values. + */ +enum led_mode { + LED_MODE_DEFAULT = 0, + LED_MODE_TXRX_ACTIVITY = 1, + LED_MODE_SIGNAL_STRENGTH = 2, + LED_MODE_ASUS = 3, + LED_MODE_ALPHA = 4, +}; + +/* + * TSF sync values + */ +enum tsf_sync { + TSF_SYNC_NONE = 0, + TSF_SYNC_INFRA = 1, + TSF_SYNC_BEACON = 2, +}; + +/* + * Device states + */ +enum dev_state { + STATE_DEEP_SLEEP = 0, + STATE_SLEEP = 1, + STATE_STANDBY = 2, + STATE_AWAKE = 3, + +/* + * Additional device states, these values are + * not strict since they are not directly passed + * into the device. + */ + STATE_RADIO_ON, + STATE_RADIO_OFF, + STATE_RADIO_RX_ON, + STATE_RADIO_RX_OFF, + STATE_RADIO_IRQ_ON, + STATE_RADIO_IRQ_OFF, +}; + +/* + * IFS backoff values + */ +enum ifs { + IFS_BACKOFF = 0, + IFS_SIFS = 1, + IFS_NEW_BACKOFF = 2, + IFS_NONE = 3, +}; + +/* + * Cipher types for hardware encryption + */ +enum cipher { + CIPHER_NONE = 0, + CIPHER_WEP64 = 1, + CIPHER_WEP128 = 2, + CIPHER_TKIP = 3, + CIPHER_AES = 4, +/* + * The following fields were added by rt61pci and rt73usb. + */ + CIPHER_CKIP64 = 5, + CIPHER_CKIP128 = 6, + CIPHER_TKIP_NO_MIC = 7, +}; + +/* + * Register handlers. + * We store the position of a register field inside a field structure, + * This will simplify the process of setting and reading a certain field + * inside the register while making sure the process remains byte order safe. + */ +struct rt2x00_field8 { + u8 bit_offset; + u8 bit_mask; +}; + +struct rt2x00_field16 { + u16 bit_offset; + u16 bit_mask; +}; + +struct rt2x00_field32 { + u32 bit_offset; + u32 bit_mask; +}; + +/* + * Power of two check, this will check + * if the mask that has been given contains + * and contiguous set of bits. + */ +#define is_power_of_two(x) ( !((x) & ((x)-1)) ) +#define low_bit_mask(x) ( ((x)-1) & ~(x) ) +#define is_valid_mask(x) is_power_of_two(1 + (x) + low_bit_mask(x)) + +#define FIELD8(__mask) \ +({ \ + BUILD_BUG_ON(!(__mask) || \ + !is_valid_mask(__mask) || \ + (__mask) != (u8)(__mask)); \ + (struct rt2x00_field8) { \ + __ffs(__mask), (__mask) \ + }; \ +}) + +#define FIELD16(__mask) \ +({ \ + BUILD_BUG_ON(!(__mask) || \ + !is_valid_mask(__mask) || \ + (__mask) != (u16)(__mask));\ + (struct rt2x00_field16) { \ + __ffs(__mask), (__mask) \ + }; \ +}) + +#define FIELD32(__mask) \ +({ \ + BUILD_BUG_ON(!(__mask) || \ + !is_valid_mask(__mask) || \ + (__mask) != (u32)(__mask));\ + (struct rt2x00_field32) { \ + __ffs(__mask), (__mask) \ + }; \ +}) + +static inline void rt2x00_set_field32(u32 *reg, + const struct rt2x00_field32 field, + const u32 value) +{ + *reg &= ~(field.bit_mask); + *reg |= (value << field.bit_offset) & field.bit_mask; +} + +static inline u32 rt2x00_get_field32(const u32 reg, + const struct rt2x00_field32 field) +{ + return (reg & field.bit_mask) >> field.bit_offset; +} + +static inline void rt2x00_set_field16(u16 *reg, + const struct rt2x00_field16 field, + const u16 value) +{ + *reg &= ~(field.bit_mask); + *reg |= (value << field.bit_offset) & field.bit_mask; +} + +static inline u16 rt2x00_get_field16(const u16 reg, + const struct rt2x00_field16 field) +{ + return (reg & field.bit_mask) >> field.bit_offset; +} + +static inline void rt2x00_set_field8(u8 *reg, + const struct rt2x00_field8 field, + const u8 value) +{ + *reg &= ~(field.bit_mask); + *reg |= (value << field.bit_offset) & field.bit_mask; +} + +static inline u8 rt2x00_get_field8(const u8 reg, + const struct rt2x00_field8 field) +{ + return (reg & field.bit_mask) >> field.bit_offset; +} + +/* + * Device specific rate value. + * We will have to create the device specific rate value + * passed to the ieee80211 kernel. We need to make it a consist of + * multiple fields because we want to store more then 1 device specific + * values inside the value. + * 1 - rate, stored as 100 kbit/s. + * 2 - preamble, short_preamble enabled flag. + * 3 - MASK_RATE, which rates are enabled in this mode, this mask + * corresponds with the TX register format for the current device. + * 4 - plcp, 802.11b rates are device specific, + * 802.11g rates are set according to the ieee802.11a-1999 p.14. + * The bit to enable preamble is set in a seperate define. + */ +#define DEV_RATE FIELD32(0x000007ff) +#define DEV_PREAMBLE FIELD32(0x00000800) +#define DEV_RATEMASK FIELD32(0x00fff000) +#define DEV_PLCP FIELD32(0xff000000) + +/* + * Bitfields + */ +#define DEV_RATEBIT_1MB ( 1 << 0 ) +#define DEV_RATEBIT_2MB ( 1 << 1 ) +#define DEV_RATEBIT_5_5MB ( 1 << 2 ) +#define DEV_RATEBIT_11MB ( 1 << 3 ) +#define DEV_RATEBIT_6MB ( 1 << 4 ) +#define DEV_RATEBIT_9MB ( 1 << 5 ) +#define DEV_RATEBIT_12MB ( 1 << 6 ) +#define DEV_RATEBIT_18MB ( 1 << 7 ) +#define DEV_RATEBIT_24MB ( 1 << 8 ) +#define DEV_RATEBIT_36MB ( 1 << 9 ) +#define DEV_RATEBIT_48MB ( 1 << 10 ) +#define DEV_RATEBIT_54MB ( 1 << 11 ) + +/* + * Bitmasks for DEV_RATEMASK + */ +#define DEV_RATEMASK_1MB ( (DEV_RATEBIT_1MB << 1) -1 ) +#define DEV_RATEMASK_2MB ( (DEV_RATEBIT_2MB << 1) -1 ) +#define DEV_RATEMASK_5_5MB ( (DEV_RATEBIT_5_5MB << 1) -1 ) +#define DEV_RATEMASK_11MB ( (DEV_RATEBIT_11MB << 1) -1 ) +#define DEV_RATEMASK_6MB ( (DEV_RATEBIT_6MB << 1) -1 ) +#define DEV_RATEMASK_9MB ( (DEV_RATEBIT_9MB << 1) -1 ) +#define DEV_RATEMASK_12MB ( (DEV_RATEBIT_12MB << 1) -1 ) +#define DEV_RATEMASK_18MB ( (DEV_RATEBIT_18MB << 1) -1 ) +#define DEV_RATEMASK_24MB ( (DEV_RATEBIT_24MB << 1) -1 ) +#define DEV_RATEMASK_36MB ( (DEV_RATEBIT_36MB << 1) -1 ) +#define DEV_RATEMASK_48MB ( (DEV_RATEBIT_48MB << 1) -1 ) +#define DEV_RATEMASK_54MB ( (DEV_RATEBIT_54MB << 1) -1 ) + +/* + * Bitmask groups of bitrates + */ +#define DEV_BASIC_RATEMASK \ + ( DEV_RATEMASK_11MB | \ + DEV_RATEBIT_6MB | DEV_RATEBIT_12MB | DEV_RATEBIT_24MB ) + +#define DEV_CCK_RATEMASK ( DEV_RATEMASK_11MB ) +#define DEV_OFDM_RATEMASK ( DEV_RATEMASK_54MB & ~DEV_CCK_RATEMASK ) + +/* + * Macro's to set and get specific fields from the device specific val and val2 + * fields inside the ieee80211_rate entry. + */ +#define DEVICE_SET_RATE_FIELD(__value, __mask) \ + (int)( ((__value) << DEV_##__mask.bit_offset) & DEV_##__mask.bit_mask ) + +#define DEVICE_GET_RATE_FIELD(__value, __mask) \ + (int)( ((__value) & DEV_##__mask.bit_mask) >> DEV_##__mask.bit_offset ) + +#endif /* RT2X00REG_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00rfkill.c b/drivers/net/wireless/rt2x00/rt2x00rfkill.c new file mode 100644 index 000000000000..a0f8b8e0a24b --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00rfkill.c @@ -0,0 +1,146 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00rfkill + Abstract: rt2x00 rfkill routines. + */ + +/* + * Set enviroment defines for rt2x00.h + */ +#define DRV_NAME "rt2x00lib" + +#include <linux/input-polldev.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/rfkill.h> + +#include "rt2x00.h" +#include "rt2x00lib.h" + +static int rt2x00rfkill_toggle_radio(void *data, enum rfkill_state state) +{ + struct rt2x00_dev *rt2x00dev = data; + int retval = 0; + + if (unlikely(!rt2x00dev)) + return 0; + + /* + * Only continue if there are enabled interfaces. + */ + if (!test_bit(DEVICE_STARTED, &rt2x00dev->flags)) + return 0; + + if (state == RFKILL_STATE_ON) { + INFO(rt2x00dev, "Hardware button pressed, enabling radio.\n"); + __clear_bit(DEVICE_DISABLED_RADIO_HW, &rt2x00dev->flags); + retval = rt2x00lib_enable_radio(rt2x00dev); + } else if (state == RFKILL_STATE_OFF) { + INFO(rt2x00dev, "Hardware button pressed, disabling radio.\n"); + __set_bit(DEVICE_DISABLED_RADIO_HW, &rt2x00dev->flags); + rt2x00lib_disable_radio(rt2x00dev); + } + + return retval; +} + +static void rt2x00rfkill_poll(struct input_polled_dev *poll_dev) +{ + struct rt2x00_dev *rt2x00dev = poll_dev->private; + int state = rt2x00dev->ops->lib->rfkill_poll(rt2x00dev); + + if (rt2x00dev->rfkill->state != state) + input_report_key(poll_dev->input, KEY_WLAN, 1); +} + +int rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev) +{ + int retval; + + if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags)) + return 0; + + retval = rfkill_register(rt2x00dev->rfkill); + if (retval) { + ERROR(rt2x00dev, "Failed to register rfkill handler.\n"); + return retval; + } + + retval = input_register_polled_device(rt2x00dev->poll_dev); + if (retval) { + ERROR(rt2x00dev, "Failed to register polled device.\n"); + rfkill_unregister(rt2x00dev->rfkill); + return retval; + } + + return 0; +} + +void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev) +{ + if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags)) + return; + + input_unregister_polled_device(rt2x00dev->poll_dev); + rfkill_unregister(rt2x00dev->rfkill); +} + +int rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev) +{ + struct device *device = wiphy_dev(rt2x00dev->hw->wiphy); + + if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags)) + return 0; + + rt2x00dev->rfkill = rfkill_allocate(device, RFKILL_TYPE_WLAN); + if (!rt2x00dev->rfkill) { + ERROR(rt2x00dev, "Failed to allocate rfkill handler.\n"); + return -ENOMEM; + } + + rt2x00dev->rfkill->name = rt2x00dev->ops->name; + rt2x00dev->rfkill->data = rt2x00dev; + rt2x00dev->rfkill->state = rt2x00dev->ops->lib->rfkill_poll(rt2x00dev); + rt2x00dev->rfkill->toggle_radio = rt2x00rfkill_toggle_radio; + + rt2x00dev->poll_dev = input_allocate_polled_device(); + if (!rt2x00dev->poll_dev) { + ERROR(rt2x00dev, "Failed to allocate polled device.\n"); + rfkill_free(rt2x00dev->rfkill); + return -ENOMEM; + } + + rt2x00dev->poll_dev->private = rt2x00dev; + rt2x00dev->poll_dev->poll = rt2x00rfkill_poll; + rt2x00dev->poll_dev->poll_interval = RFKILL_POLL_INTERVAL; + + return 0; +} + +void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev) +{ + if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags)) + return; + + input_free_polled_device(rt2x00dev->poll_dev); + rfkill_free(rt2x00dev->rfkill); +} diff --git a/drivers/net/wireless/rt2x00/rt2x00ring.h b/drivers/net/wireless/rt2x00/rt2x00ring.h new file mode 100644 index 000000000000..1a864d32cfbd --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00ring.h @@ -0,0 +1,268 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00 + Abstract: rt2x00 ring datastructures and routines + */ + +#ifndef RT2X00RING_H +#define RT2X00RING_H + +/* + * data_desc + * Each data entry also contains a descriptor which is used by the + * device to determine what should be done with the packet and + * what the current status is. + * This structure is greatly simplified, but the descriptors + * are basically a list of little endian 32 bit values. + * Make the array by default 1 word big, this will allow us + * to use sizeof() correctly. + */ +struct data_desc { + __le32 word[1]; +}; + +/* + * rxdata_entry_desc + * Summary of information that has been read from the + * RX frame descriptor. + */ +struct rxdata_entry_desc { + int signal; + int rssi; + int ofdm; + int size; + int flags; +}; + +/* + * txdata_entry_desc + * Summary of information that should be written into the + * descriptor for sending a TX frame. + */ +struct txdata_entry_desc { + unsigned long flags; +#define ENTRY_TXDONE 1 +#define ENTRY_TXD_RTS_FRAME 2 +#define ENTRY_TXD_OFDM_RATE 3 +#define ENTRY_TXD_MORE_FRAG 4 +#define ENTRY_TXD_REQ_TIMESTAMP 5 +#define ENTRY_TXD_BURST 6 + +/* + * Queue ID. ID's 0-4 are data TX rings + */ + int queue; +#define QUEUE_MGMT 13 +#define QUEUE_RX 14 +#define QUEUE_OTHER 15 + + /* + * PLCP values. + */ + u16 length_high; + u16 length_low; + u16 signal; + u16 service; + + /* + * Timing information + */ + int aifs; + int ifs; + int cw_min; + int cw_max; +}; + +/* + * data_entry + * The data ring is a list of data entries. + * Each entry holds a reference to the descriptor + * and the data buffer. For TX rings the reference to the + * sk_buff of the packet being transmitted is also stored here. + */ +struct data_entry { + /* + * Status flags + */ + unsigned long flags; +#define ENTRY_OWNER_NIC 1 + + /* + * Ring we belong to. + */ + struct data_ring *ring; + + /* + * sk_buff for the packet which is being transmitted + * in this entry (Only used with TX related rings). + */ + struct sk_buff *skb; + + /* + * Store a ieee80211_tx_status structure in each + * ring entry, this will optimize the txdone + * handler. + */ + struct ieee80211_tx_status tx_status; + + /* + * private pointer specific to driver. + */ + void *priv; + + /* + * Data address for this entry. + */ + void *data_addr; + dma_addr_t data_dma; +}; + +/* + * data_ring + * Data rings are used by the device to send and receive packets. + * The data_addr is the base address of the data memory. + * To determine at which point in the ring we are, + * have to use the rt2x00_ring_index_*() functions. + */ +struct data_ring { + /* + * Pointer to main rt2x00dev structure where this + * ring belongs to. + */ + struct rt2x00_dev *rt2x00dev; + + /* + * Base address for the device specific data entries. + */ + struct data_entry *entry; + + /* + * TX queue statistic info. + */ + struct ieee80211_tx_queue_stats_data stats; + + /* + * TX Queue parameters. + */ + struct ieee80211_tx_queue_params tx_params; + + /* + * Base address for data ring. + */ + dma_addr_t data_dma; + void *data_addr; + + /* + * Index variables. + */ + u16 index; + u16 index_done; + + /* + * Size of packet and descriptor in bytes. + */ + u16 data_size; + u16 desc_size; +}; + +/* + * Handlers to determine the address of the current device specific + * data entry, where either index or index_done points to. + */ +static inline struct data_entry *rt2x00_get_data_entry(struct data_ring *ring) +{ + return &ring->entry[ring->index]; +} + +static inline struct data_entry *rt2x00_get_data_entry_done(struct data_ring + *ring) +{ + return &ring->entry[ring->index_done]; +} + +/* + * Total ring memory + */ +static inline int rt2x00_get_ring_size(struct data_ring *ring) +{ + return ring->stats.limit * (ring->desc_size + ring->data_size); +} + +/* + * Ring index manipulation functions. + */ +static inline void rt2x00_ring_index_inc(struct data_ring *ring) +{ + ring->index++; + if (ring->index >= ring->stats.limit) + ring->index = 0; + ring->stats.len++; +} + +static inline void rt2x00_ring_index_done_inc(struct data_ring *ring) +{ + ring->index_done++; + if (ring->index_done >= ring->stats.limit) + ring->index_done = 0; + ring->stats.len--; + ring->stats.count++; +} + +static inline void rt2x00_ring_index_clear(struct data_ring *ring) +{ + ring->index = 0; + ring->index_done = 0; + ring->stats.len = 0; + ring->stats.count = 0; +} + +static inline int rt2x00_ring_empty(struct data_ring *ring) +{ + return ring->stats.len == 0; +} + +static inline int rt2x00_ring_full(struct data_ring *ring) +{ + return ring->stats.len == ring->stats.limit; +} + +static inline int rt2x00_ring_free(struct data_ring *ring) +{ + return ring->stats.limit - ring->stats.len; +} + +/* + * TX/RX Descriptor access functions. + */ +static inline void rt2x00_desc_read(struct data_desc *desc, + const u8 word, u32 *value) +{ + *value = le32_to_cpu(desc->word[word]); +} + +static inline void rt2x00_desc_write(struct data_desc *desc, + const u8 word, const u32 value) +{ + desc->word[word] = cpu_to_le32(value); +} + +#endif /* RT2X00RING_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c new file mode 100644 index 000000000000..73cc726c4046 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00usb.c @@ -0,0 +1,592 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00usb + Abstract: rt2x00 generic usb device routines. + */ + +/* + * Set enviroment defines for rt2x00.h + */ +#define DRV_NAME "rt2x00usb" + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/usb.h> + +#include "rt2x00.h" +#include "rt2x00usb.h" + +/* + * Interfacing with the HW. + */ +int rt2x00usb_vendor_request(const struct rt2x00_dev *rt2x00dev, + const u8 request, const u8 requesttype, + const u16 offset, const u16 value, + void *buffer, const u16 buffer_length, + const int timeout) +{ + struct usb_device *usb_dev = + interface_to_usbdev(rt2x00dev_usb(rt2x00dev)); + int status; + unsigned int i; + unsigned int pipe = + (requesttype == USB_VENDOR_REQUEST_IN) ? + usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0); + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + status = usb_control_msg(usb_dev, pipe, request, requesttype, + value, offset, buffer, buffer_length, + timeout); + if (status >= 0) + return 0; + + /* + * Check for errors + * -ENODEV: Device has disappeared, no point continuing. + * All other errors: Try again. + */ + else if (status == -ENODEV) + break; + } + + ERROR(rt2x00dev, + "Vendor Request 0x%02x failed for offset 0x%04x with error %d.\n", + request, offset, status); + + return status; +} +EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request); + +int rt2x00usb_vendor_request_buff(const struct rt2x00_dev *rt2x00dev, + const u8 request, const u8 requesttype, + const u16 offset, void *buffer, + const u16 buffer_length, const int timeout) +{ + int status; + + /* + * Check for Cache availability. + */ + if (unlikely(!rt2x00dev->csr_cache || buffer_length > CSR_CACHE_SIZE)) { + ERROR(rt2x00dev, "CSR cache not available.\n"); + return -ENOMEM; + } + + if (requesttype == USB_VENDOR_REQUEST_OUT) + memcpy(rt2x00dev->csr_cache, buffer, buffer_length); + + status = rt2x00usb_vendor_request(rt2x00dev, request, requesttype, + offset, 0, rt2x00dev->csr_cache, + buffer_length, timeout); + + if (!status && requesttype == USB_VENDOR_REQUEST_IN) + memcpy(buffer, rt2x00dev->csr_cache, buffer_length); + + return status; +} +EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff); + +/* + * TX data handlers. + */ +static void rt2x00usb_interrupt_txdone(struct urb *urb) +{ + struct data_entry *entry = (struct data_entry *)urb->context; + struct data_ring *ring = entry->ring; + struct rt2x00_dev *rt2x00dev = ring->rt2x00dev; + struct data_desc *txd = (struct data_desc *)entry->skb->data; + u32 word; + int tx_status; + + if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) || + !__test_and_clear_bit(ENTRY_OWNER_NIC, &entry->flags)) + return; + + rt2x00_desc_read(txd, 0, &word); + + /* + * Remove the descriptor data from the buffer. + */ + skb_pull(entry->skb, ring->desc_size); + + /* + * Obtain the status about this packet. + */ + tx_status = !urb->status ? TX_SUCCESS : TX_FAIL_RETRY; + + rt2x00lib_txdone(entry, tx_status, 0); + + /* + * Make this entry available for reuse. + */ + entry->flags = 0; + rt2x00_ring_index_done_inc(entry->ring); + + /* + * If the data ring was full before the txdone handler + * we must make sure the packet queue in the mac80211 stack + * is reenabled when the txdone handler has finished. + */ + if (!rt2x00_ring_full(ring)) + ieee80211_wake_queue(rt2x00dev->hw, + entry->tx_status.control.queue); +} + +int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev, + struct data_ring *ring, struct sk_buff *skb, + struct ieee80211_tx_control *control) +{ + struct usb_device *usb_dev = + interface_to_usbdev(rt2x00dev_usb(rt2x00dev)); + struct data_entry *entry = rt2x00_get_data_entry(ring); + int pipe = usb_sndbulkpipe(usb_dev, 1); + int max_packet = usb_maxpacket(usb_dev, pipe, 1); + u32 length; + + if (rt2x00_ring_full(ring)) { + ieee80211_stop_queue(rt2x00dev->hw, control->queue); + return -EINVAL; + } + + if (test_bit(ENTRY_OWNER_NIC, &entry->flags)) { + ERROR(rt2x00dev, + "Arrived at non-free entry in the non-full queue %d.\n" + "Please file bug report to %s.\n", + control->queue, DRV_PROJECT); + ieee80211_stop_queue(rt2x00dev->hw, control->queue); + return -EINVAL; + } + + /* + * Add the descriptor in front of the skb. + */ + skb_push(skb, ring->desc_size); + memset(skb->data, 0, ring->desc_size); + + rt2x00lib_write_tx_desc(rt2x00dev, (struct data_desc *)skb->data, + (struct ieee80211_hdr *)(skb->data + + ring->desc_size), + skb->len - ring->desc_size, control); + memcpy(&entry->tx_status.control, control, sizeof(*control)); + entry->skb = skb; + + /* + * USB devices cannot blindly pass the skb->len as the + * length of the data to usb_fill_bulk_urb. Pass the skb + * to the driver to determine what the length should be. + */ + length = rt2x00dev->ops->lib->get_tx_data_len(rt2x00dev, + max_packet, skb); + + /* + * Initialize URB and send the frame to the device. + */ + __set_bit(ENTRY_OWNER_NIC, &entry->flags); + usb_fill_bulk_urb(entry->priv, usb_dev, pipe, + skb->data, length, rt2x00usb_interrupt_txdone, entry); + usb_submit_urb(entry->priv, GFP_ATOMIC); + + rt2x00_ring_index_inc(ring); + + if (rt2x00_ring_full(ring)) + ieee80211_stop_queue(rt2x00dev->hw, control->queue); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00usb_write_tx_data); + +/* + * RX data handlers. + */ +static void rt2x00usb_interrupt_rxdone(struct urb *urb) +{ + struct data_entry *entry = (struct data_entry *)urb->context; + struct data_ring *ring = entry->ring; + struct rt2x00_dev *rt2x00dev = ring->rt2x00dev; + struct sk_buff *skb; + struct rxdata_entry_desc desc; + int frame_size; + + if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) || + !test_and_clear_bit(ENTRY_OWNER_NIC, &entry->flags)) + return; + + /* + * Check if the received data is simply too small + * to be actually valid, or if the urb is signaling + * a problem. + */ + if (urb->actual_length < entry->ring->desc_size || urb->status) + goto skip_entry; + + memset(&desc, 0x00, sizeof(desc)); + rt2x00dev->ops->lib->fill_rxdone(entry, &desc); + + /* + * Allocate a new sk buffer to replace the current one. + * If allocation fails, we should drop the current frame + * so we can recycle the existing sk buffer for the new frame. + */ + frame_size = entry->ring->data_size + entry->ring->desc_size; + skb = dev_alloc_skb(frame_size + NET_IP_ALIGN); + if (!skb) + goto skip_entry; + + skb_reserve(skb, NET_IP_ALIGN); + skb_put(skb, frame_size); + + /* + * Trim the skb_buffer to only contain the valid + * frame data (so ignore the device's descriptor). + */ + skb_trim(entry->skb, desc.size); + + /* + * Send the frame to rt2x00lib for further processing. + */ + rt2x00lib_rxdone(entry, entry->skb, &desc); + + /* + * Replace current entry's skb with the newly allocated one, + * and reinitialize the urb. + */ + entry->skb = skb; + urb->transfer_buffer = entry->skb->data; + urb->transfer_buffer_length = entry->skb->len; + +skip_entry: + if (test_bit(DEVICE_ENABLED_RADIO, &ring->rt2x00dev->flags)) { + __set_bit(ENTRY_OWNER_NIC, &entry->flags); + usb_submit_urb(urb, GFP_ATOMIC); + } + + rt2x00_ring_index_inc(ring); +} + +/* + * Radio handlers + */ +void rt2x00usb_enable_radio(struct rt2x00_dev *rt2x00dev) +{ + struct usb_device *usb_dev = + interface_to_usbdev(rt2x00dev_usb(rt2x00dev)); + struct data_ring *ring; + struct data_entry *entry; + unsigned int i; + + /* + * Initialize the TX rings + */ + txringall_for_each(rt2x00dev, ring) { + for (i = 0; i < ring->stats.limit; i++) + ring->entry[i].flags = 0; + + rt2x00_ring_index_clear(ring); + } + + /* + * Initialize and start the RX ring. + */ + rt2x00_ring_index_clear(rt2x00dev->rx); + + for (i = 0; i < rt2x00dev->rx->stats.limit; i++) { + entry = &rt2x00dev->rx->entry[i]; + + usb_fill_bulk_urb(entry->priv, usb_dev, + usb_rcvbulkpipe(usb_dev, 1), + entry->skb->data, entry->skb->len, + rt2x00usb_interrupt_rxdone, entry); + + __set_bit(ENTRY_OWNER_NIC, &entry->flags); + usb_submit_urb(entry->priv, GFP_ATOMIC); + } +} +EXPORT_SYMBOL_GPL(rt2x00usb_enable_radio); + +void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev) +{ + struct data_ring *ring; + unsigned int i; + + rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0x0000, 0x0000, + REGISTER_TIMEOUT); + + /* + * Cancel all rings. + */ + ring_for_each(rt2x00dev, ring) { + for (i = 0; i < ring->stats.limit; i++) + usb_kill_urb(ring->entry[i].priv); + } +} +EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio); + +/* + * Device initialization handlers. + */ +static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev, + struct data_ring *ring) +{ + unsigned int i; + + /* + * Allocate the URB's + */ + for (i = 0; i < ring->stats.limit; i++) { + ring->entry[i].priv = usb_alloc_urb(0, GFP_KERNEL); + if (!ring->entry[i].priv) + return -ENOMEM; + } + + return 0; +} + +static void rt2x00usb_free_urb(struct rt2x00_dev *rt2x00dev, + struct data_ring *ring) +{ + unsigned int i; + + if (!ring->entry) + return; + + for (i = 0; i < ring->stats.limit; i++) { + usb_kill_urb(ring->entry[i].priv); + usb_free_urb(ring->entry[i].priv); + if (ring->entry[i].skb) + kfree_skb(ring->entry[i].skb); + } +} + +int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev) +{ + struct data_ring *ring; + struct sk_buff *skb; + unsigned int entry_size; + unsigned int i; + int status; + + /* + * Allocate DMA + */ + ring_for_each(rt2x00dev, ring) { + status = rt2x00usb_alloc_urb(rt2x00dev, ring); + if (status) + goto exit; + } + + /* + * For the RX ring, skb's should be allocated. + */ + entry_size = rt2x00dev->rx->data_size + rt2x00dev->rx->desc_size; + for (i = 0; i < rt2x00dev->rx->stats.limit; i++) { + skb = dev_alloc_skb(NET_IP_ALIGN + entry_size); + if (!skb) + goto exit; + + skb_reserve(skb, NET_IP_ALIGN); + skb_put(skb, entry_size); + + rt2x00dev->rx->entry[i].skb = skb; + } + + return 0; + +exit: + rt2x00usb_uninitialize(rt2x00dev); + + return status; +} +EXPORT_SYMBOL_GPL(rt2x00usb_initialize); + +void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev) +{ + struct data_ring *ring; + + ring_for_each(rt2x00dev, ring) + rt2x00usb_free_urb(rt2x00dev, ring); +} +EXPORT_SYMBOL_GPL(rt2x00usb_uninitialize); + +/* + * USB driver handlers. + */ +static void rt2x00usb_free_reg(struct rt2x00_dev *rt2x00dev) +{ + kfree(rt2x00dev->rf); + rt2x00dev->rf = NULL; + + kfree(rt2x00dev->eeprom); + rt2x00dev->eeprom = NULL; + + kfree(rt2x00dev->csr_cache); + rt2x00dev->csr_cache = NULL; +} + +static int rt2x00usb_alloc_reg(struct rt2x00_dev *rt2x00dev) +{ + rt2x00dev->csr_cache = kzalloc(CSR_CACHE_SIZE, GFP_KERNEL); + if (!rt2x00dev->csr_cache) + goto exit; + + rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL); + if (!rt2x00dev->eeprom) + goto exit; + + rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL); + if (!rt2x00dev->rf) + goto exit; + + return 0; + +exit: + ERROR_PROBE("Failed to allocate registers.\n"); + + rt2x00usb_free_reg(rt2x00dev); + + return -ENOMEM; +} + +int rt2x00usb_probe(struct usb_interface *usb_intf, + const struct usb_device_id *id) +{ + struct usb_device *usb_dev = interface_to_usbdev(usb_intf); + struct rt2x00_ops *ops = (struct rt2x00_ops *)id->driver_info; + struct ieee80211_hw *hw; + struct rt2x00_dev *rt2x00dev; + int retval; + + usb_dev = usb_get_dev(usb_dev); + + hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); + if (!hw) { + ERROR_PROBE("Failed to allocate hardware.\n"); + retval = -ENOMEM; + goto exit_put_device; + } + + usb_set_intfdata(usb_intf, hw); + + rt2x00dev = hw->priv; + rt2x00dev->dev = usb_intf; + rt2x00dev->ops = ops; + rt2x00dev->hw = hw; + + retval = rt2x00usb_alloc_reg(rt2x00dev); + if (retval) + goto exit_free_device; + + retval = rt2x00lib_probe_dev(rt2x00dev); + if (retval) + goto exit_free_reg; + + return 0; + +exit_free_reg: + rt2x00usb_free_reg(rt2x00dev); + +exit_free_device: + ieee80211_free_hw(hw); + +exit_put_device: + usb_put_dev(usb_dev); + + usb_set_intfdata(usb_intf, NULL); + + return retval; +} +EXPORT_SYMBOL_GPL(rt2x00usb_probe); + +void rt2x00usb_disconnect(struct usb_interface *usb_intf) +{ + struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); + struct rt2x00_dev *rt2x00dev = hw->priv; + + /* + * Free all allocated data. + */ + rt2x00lib_remove_dev(rt2x00dev); + rt2x00usb_free_reg(rt2x00dev); + ieee80211_free_hw(hw); + + /* + * Free the USB device data. + */ + usb_set_intfdata(usb_intf, NULL); + usb_put_dev(interface_to_usbdev(usb_intf)); +} +EXPORT_SYMBOL_GPL(rt2x00usb_disconnect); + +#ifdef CONFIG_PM +int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state) +{ + struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); + struct rt2x00_dev *rt2x00dev = hw->priv; + int retval; + + retval = rt2x00lib_suspend(rt2x00dev, state); + if (retval) + return retval; + + rt2x00usb_free_reg(rt2x00dev); + + /* + * Decrease usbdev refcount. + */ + usb_put_dev(interface_to_usbdev(usb_intf)); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00usb_suspend); + +int rt2x00usb_resume(struct usb_interface *usb_intf) +{ + struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); + struct rt2x00_dev *rt2x00dev = hw->priv; + int retval; + + usb_get_dev(interface_to_usbdev(usb_intf)); + + retval = rt2x00usb_alloc_reg(rt2x00dev); + if (retval) + return retval; + + retval = rt2x00lib_resume(rt2x00dev); + if (retval) + goto exit_free_reg; + + return 0; + +exit_free_reg: + rt2x00usb_free_reg(rt2x00dev); + + return retval; +} +EXPORT_SYMBOL_GPL(rt2x00usb_resume); +#endif /* CONFIG_PM */ + +/* + * rt2x00pci module information. + */ +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("rt2x00 library"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h new file mode 100644 index 000000000000..2681abe4d49e --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00usb.h @@ -0,0 +1,180 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00usb + Abstract: Data structures for the rt2x00usb module. + */ + +#ifndef RT2X00USB_H +#define RT2X00USB_H + +/* + * This variable should be used with the + * usb_driver structure initialization. + */ +#define USB_DEVICE_DATA(__ops) .driver_info = (kernel_ulong_t)(__ops) + +/* + * Register defines. + * Some registers require multiple attempts before success, + * in those cases REGISTER_BUSY_COUNT attempts should be + * taken with a REGISTER_BUSY_DELAY interval. + * For USB vendor requests we need to pass a timeout + * time in ms, for this we use the REGISTER_TIMEOUT, + * however when loading firmware a higher value is + * required. In that case we use the REGISTER_TIMEOUT_FIRMWARE. + */ +#define REGISTER_BUSY_COUNT 5 +#define REGISTER_BUSY_DELAY 100 +#define REGISTER_TIMEOUT 500 +#define REGISTER_TIMEOUT_FIRMWARE 1000 + +/* + * Cache size + */ +#define CSR_CACHE_SIZE 8 +#define CSR_CACHE_SIZE_FIRMWARE 64 + +/* + * USB request types. + */ +#define USB_VENDOR_REQUEST ( USB_TYPE_VENDOR | USB_RECIP_DEVICE ) +#define USB_VENDOR_REQUEST_IN ( USB_DIR_IN | USB_VENDOR_REQUEST ) +#define USB_VENDOR_REQUEST_OUT ( USB_DIR_OUT | USB_VENDOR_REQUEST ) + +/* + * USB vendor commands. + */ +#define USB_DEVICE_MODE 0x01 +#define USB_SINGLE_WRITE 0x02 +#define USB_SINGLE_READ 0x03 +#define USB_MULTI_WRITE 0x06 +#define USB_MULTI_READ 0x07 +#define USB_EEPROM_WRITE 0x08 +#define USB_EEPROM_READ 0x09 +#define USB_LED_CONTROL 0x0a /* RT73USB */ +#define USB_RX_CONTROL 0x0c + +/* + * Device modes offset + */ +#define USB_MODE_RESET 0x01 +#define USB_MODE_UNPLUG 0x02 +#define USB_MODE_FUNCTION 0x03 +#define USB_MODE_TEST 0x04 +#define USB_MODE_SLEEP 0x07 /* RT73USB */ +#define USB_MODE_FIRMWARE 0x08 /* RT73USB */ +#define USB_MODE_WAKEUP 0x09 /* RT73USB */ + +/* + * Used to read/write from/to the device. + * This is the main function to communicate with the device, + * the buffer argument _must_ either be NULL or point to + * a buffer allocated by kmalloc. Failure to do so can lead + * to unexpected behavior depending on the architecture. + */ +int rt2x00usb_vendor_request(const struct rt2x00_dev *rt2x00dev, + const u8 request, const u8 requesttype, + const u16 offset, const u16 value, + void *buffer, const u16 buffer_length, + const int timeout); + +/* + * Used to read/write from/to the device. + * This function will use a previously with kmalloc allocated cache + * to communicate with the device. The contents of the buffer pointer + * will be copied to this cache when writing, or read from the cache + * when reading. + * Buffers send to rt2x00usb_vendor_request _must_ be allocated with + * kmalloc. Hence the reason for using a previously allocated cache + * which has been allocated properly. + */ +int rt2x00usb_vendor_request_buff(const struct rt2x00_dev *rt2x00dev, + const u8 request, const u8 requesttype, + const u16 offset, void *buffer, + const u16 buffer_length, const int timeout); + +/* + * Simple wrapper around rt2x00usb_vendor_request to write a single + * command to the device. Since we don't use the buffer argument we + * don't have to worry about kmalloc here. + */ +static inline int rt2x00usb_vendor_request_sw(const struct rt2x00_dev + *rt2x00dev, + const u8 request, + const u16 offset, + const u16 value, + const int timeout) +{ + return rt2x00usb_vendor_request(rt2x00dev, request, + USB_VENDOR_REQUEST_OUT, offset, + value, NULL, 0, timeout); +} + +/* + * Simple wrapper around rt2x00usb_vendor_request to read the eeprom + * from the device. Note that the eeprom argument _must_ be allocated using + * kmalloc for correct handling inside the kernel USB layer. + */ +static inline int rt2x00usb_eeprom_read(const struct rt2x00_dev *rt2x00dev, + __le16 *eeprom, const u16 lenght) +{ + int timeout = REGISTER_TIMEOUT * (lenght / sizeof(u16)); + + return rt2x00usb_vendor_request(rt2x00dev, USB_EEPROM_READ, + USB_VENDOR_REQUEST_IN, 0x0000, + 0x0000, eeprom, lenght, timeout); +} + +/* + * Radio handlers + */ +void rt2x00usb_enable_radio(struct rt2x00_dev *rt2x00dev); +void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev); + +/* + * TX data handlers. + */ +int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev, + struct data_ring *ring, struct sk_buff *skb, + struct ieee80211_tx_control *control); + +/* + * Device initialization handlers. + */ +int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev); +void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev); + +/* + * USB driver handlers. + */ +int rt2x00usb_probe(struct usb_interface *usb_intf, + const struct usb_device_id *id); +void rt2x00usb_disconnect(struct usb_interface *usb_intf); +#ifdef CONFIG_PM +int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state); +int rt2x00usb_resume(struct usb_interface *usb_intf); +#else +#define rt2x00usb_suspend NULL +#define rt2x00usb_resume NULL +#endif /* CONFIG_PM */ + +#endif /* RT2X00USB_H */ diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c new file mode 100644 index 000000000000..01dbef19d651 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt61pci.c @@ -0,0 +1,2557 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt61pci + Abstract: rt61pci device specific routines. + Supported chipsets: RT2561, RT2561s, RT2661. + */ + +/* + * Set enviroment defines for rt2x00.h + */ +#define DRV_NAME "rt61pci" + +#include <linux/delay.h> +#include <linux/etherdevice.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/eeprom_93cx6.h> + +#include "rt2x00.h" +#include "rt2x00pci.h" +#include "rt61pci.h" + +/* + * Register access. + * BBP and RF register require indirect register access, + * and use the CSR registers PHY_CSR3 and PHY_CSR4 to achieve this. + * These indirect registers work with busy bits, + * and we will try maximal REGISTER_BUSY_COUNT times to access + * the register while taking a REGISTER_BUSY_DELAY us delay + * between each attampt. When the busy bit is still set at that time, + * the access attempt is considered to have failed, + * and we will print an error. + */ +static u32 rt61pci_bbp_check(const struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + unsigned int i; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2x00pci_register_read(rt2x00dev, PHY_CSR3, ®); + if (!rt2x00_get_field32(reg, PHY_CSR3_BUSY)) + break; + udelay(REGISTER_BUSY_DELAY); + } + + return reg; +} + +static void rt61pci_bbp_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u8 value) +{ + u32 reg; + + /* + * Wait until the BBP becomes ready. + */ + reg = rt61pci_bbp_check(rt2x00dev); + if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) { + ERROR(rt2x00dev, "PHY_CSR3 register busy. Write failed.\n"); + return; + } + + /* + * Write the data into the BBP. + */ + reg = 0; + rt2x00_set_field32(®, PHY_CSR3_VALUE, value); + rt2x00_set_field32(®, PHY_CSR3_REGNUM, word); + rt2x00_set_field32(®, PHY_CSR3_BUSY, 1); + rt2x00_set_field32(®, PHY_CSR3_READ_CONTROL, 0); + + rt2x00pci_register_write(rt2x00dev, PHY_CSR3, reg); +} + +static void rt61pci_bbp_read(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u8 *value) +{ + u32 reg; + + /* + * Wait until the BBP becomes ready. + */ + reg = rt61pci_bbp_check(rt2x00dev); + if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) { + ERROR(rt2x00dev, "PHY_CSR3 register busy. Read failed.\n"); + return; + } + + /* + * Write the request into the BBP. + */ + reg = 0; + rt2x00_set_field32(®, PHY_CSR3_REGNUM, word); + rt2x00_set_field32(®, PHY_CSR3_BUSY, 1); + rt2x00_set_field32(®, PHY_CSR3_READ_CONTROL, 1); + + rt2x00pci_register_write(rt2x00dev, PHY_CSR3, reg); + + /* + * Wait until the BBP becomes ready. + */ + reg = rt61pci_bbp_check(rt2x00dev); + if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) { + ERROR(rt2x00dev, "PHY_CSR3 register busy. Read failed.\n"); + *value = 0xff; + return; + } + + *value = rt2x00_get_field32(reg, PHY_CSR3_VALUE); +} + +static void rt61pci_rf_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u32 value) +{ + u32 reg; + unsigned int i; + + if (!word) + return; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2x00pci_register_read(rt2x00dev, PHY_CSR4, ®); + if (!rt2x00_get_field32(reg, PHY_CSR4_BUSY)) + goto rf_write; + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "PHY_CSR4 register busy. Write failed.\n"); + return; + +rf_write: + reg = 0; + rt2x00_set_field32(®, PHY_CSR4_VALUE, value); + rt2x00_set_field32(®, PHY_CSR4_NUMBER_OF_BITS, 21); + rt2x00_set_field32(®, PHY_CSR4_IF_SELECT, 0); + rt2x00_set_field32(®, PHY_CSR4_BUSY, 1); + + rt2x00pci_register_write(rt2x00dev, PHY_CSR4, reg); + rt2x00_rf_write(rt2x00dev, word, value); +} + +static void rt61pci_mcu_request(const struct rt2x00_dev *rt2x00dev, + const u8 command, const u8 token, + const u8 arg0, const u8 arg1) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, H2M_MAILBOX_CSR, ®); + + if (rt2x00_get_field32(reg, H2M_MAILBOX_CSR_OWNER)) { + ERROR(rt2x00dev, "mcu request error. " + "Request 0x%02x failed for token 0x%02x.\n", + command, token); + return; + } + + rt2x00_set_field32(®, H2M_MAILBOX_CSR_OWNER, 1); + rt2x00_set_field32(®, H2M_MAILBOX_CSR_CMD_TOKEN, token); + rt2x00_set_field32(®, H2M_MAILBOX_CSR_ARG0, arg0); + rt2x00_set_field32(®, H2M_MAILBOX_CSR_ARG1, arg1); + rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, HOST_CMD_CSR, ®); + rt2x00_set_field32(®, HOST_CMD_CSR_HOST_COMMAND, command); + rt2x00_set_field32(®, HOST_CMD_CSR_INTERRUPT_MCU, 1); + rt2x00pci_register_write(rt2x00dev, HOST_CMD_CSR, reg); +} + +static void rt61pci_eepromregister_read(struct eeprom_93cx6 *eeprom) +{ + struct rt2x00_dev *rt2x00dev = eeprom->data; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, E2PROM_CSR, ®); + + eeprom->reg_data_in = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_IN); + eeprom->reg_data_out = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_OUT); + eeprom->reg_data_clock = + !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_CLOCK); + eeprom->reg_chip_select = + !!rt2x00_get_field32(reg, E2PROM_CSR_CHIP_SELECT); +} + +static void rt61pci_eepromregister_write(struct eeprom_93cx6 *eeprom) +{ + struct rt2x00_dev *rt2x00dev = eeprom->data; + u32 reg = 0; + + rt2x00_set_field32(®, E2PROM_CSR_DATA_IN, !!eeprom->reg_data_in); + rt2x00_set_field32(®, E2PROM_CSR_DATA_OUT, !!eeprom->reg_data_out); + rt2x00_set_field32(®, E2PROM_CSR_DATA_CLOCK, + !!eeprom->reg_data_clock); + rt2x00_set_field32(®, E2PROM_CSR_CHIP_SELECT, + !!eeprom->reg_chip_select); + + rt2x00pci_register_write(rt2x00dev, E2PROM_CSR, reg); +} + +#ifdef CONFIG_RT2X00_LIB_DEBUGFS +#define CSR_OFFSET(__word) ( CSR_REG_BASE + ((__word) * sizeof(u32)) ) + +static void rt61pci_read_csr(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u32 *data) +{ + rt2x00pci_register_read(rt2x00dev, CSR_OFFSET(word), data); +} + +static void rt61pci_write_csr(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u32 data) +{ + rt2x00pci_register_write(rt2x00dev, CSR_OFFSET(word), data); +} + +static const struct rt2x00debug rt61pci_rt2x00debug = { + .owner = THIS_MODULE, + .csr = { + .read = rt61pci_read_csr, + .write = rt61pci_write_csr, + .word_size = sizeof(u32), + .word_count = CSR_REG_SIZE / sizeof(u32), + }, + .eeprom = { + .read = rt2x00_eeprom_read, + .write = rt2x00_eeprom_write, + .word_size = sizeof(u16), + .word_count = EEPROM_SIZE / sizeof(u16), + }, + .bbp = { + .read = rt61pci_bbp_read, + .write = rt61pci_bbp_write, + .word_size = sizeof(u8), + .word_count = BBP_SIZE / sizeof(u8), + }, + .rf = { + .read = rt2x00_rf_read, + .write = rt61pci_rf_write, + .word_size = sizeof(u32), + .word_count = RF_SIZE / sizeof(u32), + }, +}; +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ + +#ifdef CONFIG_RT61PCI_RFKILL +static int rt61pci_rfkill_poll(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, MAC_CSR13, ®); + return rt2x00_get_field32(reg, MAC_CSR13_BIT5);; +} +#else +#define rt61pci_rfkill_poll NULL +#endif /* CONFIG_RT61PCI_RFKILL */ + +/* + * Configuration handlers. + */ +static void rt61pci_config_mac_addr(struct rt2x00_dev *rt2x00dev, __le32 *mac) +{ + u32 tmp; + + tmp = le32_to_cpu(mac[1]); + rt2x00_set_field32(&tmp, MAC_CSR3_UNICAST_TO_ME_MASK, 0xff); + mac[1] = cpu_to_le32(tmp); + + rt2x00pci_register_multiwrite(rt2x00dev, MAC_CSR2, mac, + (2 * sizeof(__le32))); +} + +static void rt61pci_config_bssid(struct rt2x00_dev *rt2x00dev, __le32 *bssid) +{ + u32 tmp; + + tmp = le32_to_cpu(bssid[1]); + rt2x00_set_field32(&tmp, MAC_CSR5_BSS_ID_MASK, 3); + bssid[1] = cpu_to_le32(tmp); + + rt2x00pci_register_multiwrite(rt2x00dev, MAC_CSR4, bssid, + (2 * sizeof(__le32))); +} + +static void rt61pci_config_type(struct rt2x00_dev *rt2x00dev, const int type, + const int tsf_sync) +{ + u32 reg; + + /* + * Clear current synchronisation setup. + * For the Beacon base registers we only need to clear + * the first byte since that byte contains the VALID and OWNER + * bits which (when set to 0) will invalidate the entire beacon. + */ + rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, 0); + rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE0, 0); + rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE1, 0); + rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE2, 0); + rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE3, 0); + + /* + * Enable synchronisation. + */ + rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, ®); + rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 1); + rt2x00_set_field32(®, TXRX_CSR9_TBTT_ENABLE, 1); + rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0); + rt2x00_set_field32(®, TXRX_CSR9_TSF_SYNC, tsf_sync); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); +} + +static void rt61pci_config_preamble(struct rt2x00_dev *rt2x00dev, + const int short_preamble, + const int ack_timeout, + const int ack_consume_time) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_RX_ACK_TIMEOUT, ack_timeout); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg); + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR4, ®); + rt2x00_set_field32(®, TXRX_CSR4_AUTORESPOND_PREAMBLE, + !!short_preamble); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg); +} + +static void rt61pci_config_phymode(struct rt2x00_dev *rt2x00dev, + const int basic_rate_mask) +{ + rt2x00pci_register_write(rt2x00dev, TXRX_CSR5, basic_rate_mask); +} + +static void rt61pci_config_channel(struct rt2x00_dev *rt2x00dev, + struct rf_channel *rf, const int txpower) +{ + u8 r3; + u8 r94; + u8 smart; + + rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); + rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset); + + smart = !(rt2x00_rf(&rt2x00dev->chip, RF5225) || + rt2x00_rf(&rt2x00dev->chip, RF2527)); + + rt61pci_bbp_read(rt2x00dev, 3, &r3); + rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart); + rt61pci_bbp_write(rt2x00dev, 3, r3); + + r94 = 6; + if (txpower > MAX_TXPOWER && txpower <= (MAX_TXPOWER + r94)) + r94 += txpower - MAX_TXPOWER; + else if (txpower < MIN_TXPOWER && txpower >= (MIN_TXPOWER - r94)) + r94 += txpower; + rt61pci_bbp_write(rt2x00dev, 94, r94); + + rt61pci_rf_write(rt2x00dev, 1, rf->rf1); + rt61pci_rf_write(rt2x00dev, 2, rf->rf2); + rt61pci_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004); + rt61pci_rf_write(rt2x00dev, 4, rf->rf4); + + udelay(200); + + rt61pci_rf_write(rt2x00dev, 1, rf->rf1); + rt61pci_rf_write(rt2x00dev, 2, rf->rf2); + rt61pci_rf_write(rt2x00dev, 3, rf->rf3 | 0x00000004); + rt61pci_rf_write(rt2x00dev, 4, rf->rf4); + + udelay(200); + + rt61pci_rf_write(rt2x00dev, 1, rf->rf1); + rt61pci_rf_write(rt2x00dev, 2, rf->rf2); + rt61pci_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004); + rt61pci_rf_write(rt2x00dev, 4, rf->rf4); + + msleep(1); +} + +static void rt61pci_config_txpower(struct rt2x00_dev *rt2x00dev, + const int txpower) +{ + struct rf_channel rf; + + rt2x00_rf_read(rt2x00dev, 1, &rf.rf1); + rt2x00_rf_read(rt2x00dev, 2, &rf.rf2); + rt2x00_rf_read(rt2x00dev, 3, &rf.rf3); + rt2x00_rf_read(rt2x00dev, 4, &rf.rf4); + + rt61pci_config_channel(rt2x00dev, &rf, txpower); +} + +static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev, + const int antenna_tx, + const int antenna_rx) +{ + u8 r3; + u8 r4; + u8 r77; + + rt61pci_bbp_read(rt2x00dev, 3, &r3); + rt61pci_bbp_read(rt2x00dev, 4, &r4); + rt61pci_bbp_read(rt2x00dev, 77, &r77); + + rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, + !rt2x00_rf(&rt2x00dev->chip, RF5225)); + + switch (antenna_rx) { + case ANTENNA_SW_DIVERSITY: + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 2); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, + !!(rt2x00dev->curr_hwmode != HWMODE_A)); + break; + case ANTENNA_A: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 1); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); + + if (rt2x00dev->curr_hwmode == HWMODE_A) + rt2x00_set_field8(&r77, BBP_R77_PAIR, 0); + else + rt2x00_set_field8(&r77, BBP_R77_PAIR, 3); + break; + case ANTENNA_B: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 1); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); + + if (rt2x00dev->curr_hwmode == HWMODE_A) + rt2x00_set_field8(&r77, BBP_R77_PAIR, 3); + else + rt2x00_set_field8(&r77, BBP_R77_PAIR, 0); + break; + } + + rt61pci_bbp_write(rt2x00dev, 77, r77); + rt61pci_bbp_write(rt2x00dev, 3, r3); + rt61pci_bbp_write(rt2x00dev, 4, r4); +} + +static void rt61pci_config_antenna_2x(struct rt2x00_dev *rt2x00dev, + const int antenna_tx, + const int antenna_rx) +{ + u8 r3; + u8 r4; + u8 r77; + + rt61pci_bbp_read(rt2x00dev, 3, &r3); + rt61pci_bbp_read(rt2x00dev, 4, &r4); + rt61pci_bbp_read(rt2x00dev, 77, &r77); + + rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, + !rt2x00_rf(&rt2x00dev->chip, RF2527)); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, + !test_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags)); + + switch (antenna_rx) { + case ANTENNA_SW_DIVERSITY: + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 2); + break; + case ANTENNA_A: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 1); + rt2x00_set_field8(&r77, BBP_R77_PAIR, 3); + break; + case ANTENNA_B: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 1); + rt2x00_set_field8(&r77, BBP_R77_PAIR, 0); + break; + } + + rt61pci_bbp_write(rt2x00dev, 77, r77); + rt61pci_bbp_write(rt2x00dev, 3, r3); + rt61pci_bbp_write(rt2x00dev, 4, r4); +} + +static void rt61pci_config_antenna_2529_rx(struct rt2x00_dev *rt2x00dev, + const int p1, const int p2) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, MAC_CSR13, ®); + + if (p1 != 0xff) { + rt2x00_set_field32(®, MAC_CSR13_BIT4, !!p1); + rt2x00_set_field32(®, MAC_CSR13_BIT12, 0); + rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg); + } + if (p2 != 0xff) { + rt2x00_set_field32(®, MAC_CSR13_BIT3, !p2); + rt2x00_set_field32(®, MAC_CSR13_BIT11, 0); + rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg); + } +} + +static void rt61pci_config_antenna_2529(struct rt2x00_dev *rt2x00dev, + const int antenna_tx, + const int antenna_rx) +{ + u16 eeprom; + u8 r3; + u8 r4; + u8 r77; + + rt61pci_bbp_read(rt2x00dev, 3, &r3); + rt61pci_bbp_read(rt2x00dev, 4, &r4); + rt61pci_bbp_read(rt2x00dev, 77, &r77); + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom); + + rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 0); + + if (rt2x00_get_field16(eeprom, EEPROM_NIC_ENABLE_DIVERSITY) && + rt2x00_get_field16(eeprom, EEPROM_NIC_TX_DIVERSITY)) { + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 2); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 1); + rt61pci_config_antenna_2529_rx(rt2x00dev, 0, 1); + } else if (rt2x00_get_field16(eeprom, EEPROM_NIC_ENABLE_DIVERSITY)) { + if (rt2x00_get_field16(eeprom, EEPROM_NIC_TX_RX_FIXED) >= 2) { + rt2x00_set_field8(&r77, BBP_R77_PAIR, 3); + rt61pci_bbp_write(rt2x00dev, 77, r77); + } + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 1); + rt61pci_config_antenna_2529_rx(rt2x00dev, 1, 1); + } else if (!rt2x00_get_field16(eeprom, EEPROM_NIC_ENABLE_DIVERSITY) && + rt2x00_get_field16(eeprom, EEPROM_NIC_TX_DIVERSITY)) { + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 2); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); + + switch (rt2x00_get_field16(eeprom, EEPROM_NIC_TX_RX_FIXED)) { + case 0: + rt61pci_config_antenna_2529_rx(rt2x00dev, 0, 1); + break; + case 1: + rt61pci_config_antenna_2529_rx(rt2x00dev, 1, 0); + break; + case 2: + rt61pci_config_antenna_2529_rx(rt2x00dev, 0, 0); + break; + case 3: + rt61pci_config_antenna_2529_rx(rt2x00dev, 1, 1); + break; + } + } else if (!rt2x00_get_field16(eeprom, EEPROM_NIC_ENABLE_DIVERSITY) && + !rt2x00_get_field16(eeprom, EEPROM_NIC_TX_DIVERSITY)) { + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 1); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); + + switch (rt2x00_get_field16(eeprom, EEPROM_NIC_TX_RX_FIXED)) { + case 0: + rt2x00_set_field8(&r77, BBP_R77_PAIR, 0); + rt61pci_bbp_write(rt2x00dev, 77, r77); + rt61pci_config_antenna_2529_rx(rt2x00dev, 0, 1); + break; + case 1: + rt2x00_set_field8(&r77, BBP_R77_PAIR, 0); + rt61pci_bbp_write(rt2x00dev, 77, r77); + rt61pci_config_antenna_2529_rx(rt2x00dev, 1, 0); + break; + case 2: + rt2x00_set_field8(&r77, BBP_R77_PAIR, 3); + rt61pci_bbp_write(rt2x00dev, 77, r77); + rt61pci_config_antenna_2529_rx(rt2x00dev, 0, 0); + break; + case 3: + rt2x00_set_field8(&r77, BBP_R77_PAIR, 3); + rt61pci_bbp_write(rt2x00dev, 77, r77); + rt61pci_config_antenna_2529_rx(rt2x00dev, 1, 1); + break; + } + } + + rt61pci_bbp_write(rt2x00dev, 3, r3); + rt61pci_bbp_write(rt2x00dev, 4, r4); +} + +struct antenna_sel { + u8 word; + /* + * value[0] -> non-LNA + * value[1] -> LNA + */ + u8 value[2]; +}; + +static const struct antenna_sel antenna_sel_a[] = { + { 96, { 0x58, 0x78 } }, + { 104, { 0x38, 0x48 } }, + { 75, { 0xfe, 0x80 } }, + { 86, { 0xfe, 0x80 } }, + { 88, { 0xfe, 0x80 } }, + { 35, { 0x60, 0x60 } }, + { 97, { 0x58, 0x58 } }, + { 98, { 0x58, 0x58 } }, +}; + +static const struct antenna_sel antenna_sel_bg[] = { + { 96, { 0x48, 0x68 } }, + { 104, { 0x2c, 0x3c } }, + { 75, { 0xfe, 0x80 } }, + { 86, { 0xfe, 0x80 } }, + { 88, { 0xfe, 0x80 } }, + { 35, { 0x50, 0x50 } }, + { 97, { 0x48, 0x48 } }, + { 98, { 0x48, 0x48 } }, +}; + +static void rt61pci_config_antenna(struct rt2x00_dev *rt2x00dev, + const int antenna_tx, const int antenna_rx) +{ + const struct antenna_sel *sel; + unsigned int lna; + unsigned int i; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, PHY_CSR0, ®); + + if (rt2x00dev->curr_hwmode == HWMODE_A) { + sel = antenna_sel_a; + lna = test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags); + + rt2x00_set_field32(®, PHY_CSR0_PA_PE_BG, 0); + rt2x00_set_field32(®, PHY_CSR0_PA_PE_A, 1); + } else { + sel = antenna_sel_bg; + lna = test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags); + + rt2x00_set_field32(®, PHY_CSR0_PA_PE_BG, 1); + rt2x00_set_field32(®, PHY_CSR0_PA_PE_A, 0); + } + + for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++) + rt61pci_bbp_write(rt2x00dev, sel[i].word, sel[i].value[lna]); + + rt2x00pci_register_write(rt2x00dev, PHY_CSR0, reg); + + if (rt2x00_rf(&rt2x00dev->chip, RF5225) || + rt2x00_rf(&rt2x00dev->chip, RF5325)) + rt61pci_config_antenna_5x(rt2x00dev, antenna_tx, antenna_rx); + else if (rt2x00_rf(&rt2x00dev->chip, RF2527)) + rt61pci_config_antenna_2x(rt2x00dev, antenna_tx, antenna_rx); + else if (rt2x00_rf(&rt2x00dev->chip, RF2529)) { + if (test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags)) + rt61pci_config_antenna_2x(rt2x00dev, antenna_tx, + antenna_rx); + else + rt61pci_config_antenna_2529(rt2x00dev, antenna_tx, + antenna_rx); + } +} + +static void rt61pci_config_duration(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, MAC_CSR9, ®); + rt2x00_set_field32(®, MAC_CSR9_SLOT_TIME, libconf->slot_time); + rt2x00pci_register_write(rt2x00dev, MAC_CSR9, reg); + + rt2x00pci_register_read(rt2x00dev, MAC_CSR8, ®); + rt2x00_set_field32(®, MAC_CSR8_SIFS, libconf->sifs); + rt2x00_set_field32(®, MAC_CSR8_SIFS_AFTER_RX_OFDM, 3); + rt2x00_set_field32(®, MAC_CSR8_EIFS, libconf->eifs); + rt2x00pci_register_write(rt2x00dev, MAC_CSR8, reg); + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg); + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR4, ®); + rt2x00_set_field32(®, TXRX_CSR4_AUTORESPOND_ENABLE, 1); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg); + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, ®); + rt2x00_set_field32(®, TXRX_CSR9_BEACON_INTERVAL, + libconf->conf->beacon_int * 16); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); +} + +static void rt61pci_config(struct rt2x00_dev *rt2x00dev, + const unsigned int flags, + struct rt2x00lib_conf *libconf) +{ + if (flags & CONFIG_UPDATE_PHYMODE) + rt61pci_config_phymode(rt2x00dev, libconf->basic_rates); + if (flags & CONFIG_UPDATE_CHANNEL) + rt61pci_config_channel(rt2x00dev, &libconf->rf, + libconf->conf->power_level); + if ((flags & CONFIG_UPDATE_TXPOWER) && !(flags & CONFIG_UPDATE_CHANNEL)) + rt61pci_config_txpower(rt2x00dev, libconf->conf->power_level); + if (flags & CONFIG_UPDATE_ANTENNA) + rt61pci_config_antenna(rt2x00dev, libconf->conf->antenna_sel_tx, + libconf->conf->antenna_sel_rx); + if (flags & (CONFIG_UPDATE_SLOT_TIME | CONFIG_UPDATE_BEACON_INT)) + rt61pci_config_duration(rt2x00dev, libconf); +} + +/* + * LED functions. + */ +static void rt61pci_enable_led(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + u16 led_reg; + u8 arg0; + u8 arg1; + + rt2x00pci_register_read(rt2x00dev, MAC_CSR14, ®); + rt2x00_set_field32(®, MAC_CSR14_ON_PERIOD, 70); + rt2x00_set_field32(®, MAC_CSR14_OFF_PERIOD, 30); + rt2x00pci_register_write(rt2x00dev, MAC_CSR14, reg); + + led_reg = rt2x00dev->led_reg; + rt2x00_set_field16(&led_reg, MCU_LEDCS_RADIO_STATUS, 1); + if (rt2x00dev->rx_status.phymode == MODE_IEEE80211A) + rt2x00_set_field16(&led_reg, MCU_LEDCS_LINK_A_STATUS, 1); + else + rt2x00_set_field16(&led_reg, MCU_LEDCS_LINK_BG_STATUS, 1); + + arg0 = led_reg & 0xff; + arg1 = (led_reg >> 8) & 0xff; + + rt61pci_mcu_request(rt2x00dev, MCU_LED, 0xff, arg0, arg1); +} + +static void rt61pci_disable_led(struct rt2x00_dev *rt2x00dev) +{ + u16 led_reg; + u8 arg0; + u8 arg1; + + led_reg = rt2x00dev->led_reg; + rt2x00_set_field16(&led_reg, MCU_LEDCS_RADIO_STATUS, 0); + rt2x00_set_field16(&led_reg, MCU_LEDCS_LINK_BG_STATUS, 0); + rt2x00_set_field16(&led_reg, MCU_LEDCS_LINK_A_STATUS, 0); + + arg0 = led_reg & 0xff; + arg1 = (led_reg >> 8) & 0xff; + + rt61pci_mcu_request(rt2x00dev, MCU_LED, 0xff, arg0, arg1); +} + +static void rt61pci_activity_led(struct rt2x00_dev *rt2x00dev, int rssi) +{ + u8 led; + + if (rt2x00dev->led_mode != LED_MODE_SIGNAL_STRENGTH) + return; + + /* + * Led handling requires a positive value for the rssi, + * to do that correctly we need to add the correction. + */ + rssi += rt2x00dev->rssi_offset; + + if (rssi <= 30) + led = 0; + else if (rssi <= 39) + led = 1; + else if (rssi <= 49) + led = 2; + else if (rssi <= 53) + led = 3; + else if (rssi <= 63) + led = 4; + else + led = 5; + + rt61pci_mcu_request(rt2x00dev, MCU_LED_STRENGTH, 0xff, led, 0); +} + +/* + * Link tuning + */ +static void rt61pci_link_stats(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + /* + * Update FCS error count from register. + */ + rt2x00pci_register_read(rt2x00dev, STA_CSR0, ®); + rt2x00dev->link.rx_failed = rt2x00_get_field32(reg, STA_CSR0_FCS_ERROR); + + /* + * Update False CCA count from register. + */ + rt2x00pci_register_read(rt2x00dev, STA_CSR1, ®); + rt2x00dev->link.false_cca = + rt2x00_get_field32(reg, STA_CSR1_FALSE_CCA_ERROR); +} + +static void rt61pci_reset_tuner(struct rt2x00_dev *rt2x00dev) +{ + rt61pci_bbp_write(rt2x00dev, 17, 0x20); + rt2x00dev->link.vgc_level = 0x20; +} + +static void rt61pci_link_tuner(struct rt2x00_dev *rt2x00dev) +{ + int rssi = rt2x00_get_link_rssi(&rt2x00dev->link); + u8 r17; + u8 up_bound; + u8 low_bound; + + /* + * Update Led strength + */ + rt61pci_activity_led(rt2x00dev, rssi); + + rt61pci_bbp_read(rt2x00dev, 17, &r17); + + /* + * Determine r17 bounds. + */ + if (rt2x00dev->rx_status.phymode == MODE_IEEE80211A) { + low_bound = 0x28; + up_bound = 0x48; + if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) { + low_bound += 0x10; + up_bound += 0x10; + } + } else { + low_bound = 0x20; + up_bound = 0x40; + if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) { + low_bound += 0x10; + up_bound += 0x10; + } + } + + /* + * Special big-R17 for very short distance + */ + if (rssi >= -35) { + if (r17 != 0x60) + rt61pci_bbp_write(rt2x00dev, 17, 0x60); + return; + } + + /* + * Special big-R17 for short distance + */ + if (rssi >= -58) { + if (r17 != up_bound) + rt61pci_bbp_write(rt2x00dev, 17, up_bound); + return; + } + + /* + * Special big-R17 for middle-short distance + */ + if (rssi >= -66) { + low_bound += 0x10; + if (r17 != low_bound) + rt61pci_bbp_write(rt2x00dev, 17, low_bound); + return; + } + + /* + * Special mid-R17 for middle distance + */ + if (rssi >= -74) { + low_bound += 0x08; + if (r17 != low_bound) + rt61pci_bbp_write(rt2x00dev, 17, low_bound); + return; + } + + /* + * Special case: Change up_bound based on the rssi. + * Lower up_bound when rssi is weaker then -74 dBm. + */ + up_bound -= 2 * (-74 - rssi); + if (low_bound > up_bound) + up_bound = low_bound; + + if (r17 > up_bound) { + rt61pci_bbp_write(rt2x00dev, 17, up_bound); + return; + } + + /* + * r17 does not yet exceed upper limit, continue and base + * the r17 tuning on the false CCA count. + */ + if (rt2x00dev->link.false_cca > 512 && r17 < up_bound) { + if (++r17 > up_bound) + r17 = up_bound; + rt61pci_bbp_write(rt2x00dev, 17, r17); + } else if (rt2x00dev->link.false_cca < 100 && r17 > low_bound) { + if (--r17 < low_bound) + r17 = low_bound; + rt61pci_bbp_write(rt2x00dev, 17, r17); + } +} + +/* + * Firmware name function. + */ +static char *rt61pci_get_firmware_name(struct rt2x00_dev *rt2x00dev) +{ + char *fw_name; + + switch (rt2x00dev->chip.rt) { + case RT2561: + fw_name = FIRMWARE_RT2561; + break; + case RT2561s: + fw_name = FIRMWARE_RT2561s; + break; + case RT2661: + fw_name = FIRMWARE_RT2661; + break; + default: + fw_name = NULL; + break; + } + + return fw_name; +} + +/* + * Initialization functions. + */ +static int rt61pci_load_firmware(struct rt2x00_dev *rt2x00dev, void *data, + const size_t len) +{ + int i; + u32 reg; + + /* + * Wait for stable hardware. + */ + for (i = 0; i < 100; i++) { + rt2x00pci_register_read(rt2x00dev, MAC_CSR0, ®); + if (reg) + break; + msleep(1); + } + + if (!reg) { + ERROR(rt2x00dev, "Unstable hardware.\n"); + return -EBUSY; + } + + /* + * Prepare MCU and mailbox for firmware loading. + */ + reg = 0; + rt2x00_set_field32(®, MCU_CNTL_CSR_RESET, 1); + rt2x00pci_register_write(rt2x00dev, MCU_CNTL_CSR, reg); + rt2x00pci_register_write(rt2x00dev, M2H_CMD_DONE_CSR, 0xffffffff); + rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); + rt2x00pci_register_write(rt2x00dev, HOST_CMD_CSR, 0); + + /* + * Write firmware to device. + */ + reg = 0; + rt2x00_set_field32(®, MCU_CNTL_CSR_RESET, 1); + rt2x00_set_field32(®, MCU_CNTL_CSR_SELECT_BANK, 1); + rt2x00pci_register_write(rt2x00dev, MCU_CNTL_CSR, reg); + + rt2x00pci_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE, + data, len); + + rt2x00_set_field32(®, MCU_CNTL_CSR_SELECT_BANK, 0); + rt2x00pci_register_write(rt2x00dev, MCU_CNTL_CSR, reg); + + rt2x00_set_field32(®, MCU_CNTL_CSR_RESET, 0); + rt2x00pci_register_write(rt2x00dev, MCU_CNTL_CSR, reg); + + for (i = 0; i < 100; i++) { + rt2x00pci_register_read(rt2x00dev, MCU_CNTL_CSR, ®); + if (rt2x00_get_field32(reg, MCU_CNTL_CSR_READY)) + break; + msleep(1); + } + + if (i == 100) { + ERROR(rt2x00dev, "MCU Control register not ready.\n"); + return -EBUSY; + } + + /* + * Reset MAC and BBP registers. + */ + reg = 0; + rt2x00_set_field32(®, MAC_CSR1_SOFT_RESET, 1); + rt2x00_set_field32(®, MAC_CSR1_BBP_RESET, 1); + rt2x00pci_register_write(rt2x00dev, MAC_CSR1, reg); + + rt2x00pci_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field32(®, MAC_CSR1_SOFT_RESET, 0); + rt2x00_set_field32(®, MAC_CSR1_BBP_RESET, 0); + rt2x00pci_register_write(rt2x00dev, MAC_CSR1, reg); + + rt2x00pci_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field32(®, MAC_CSR1_HOST_READY, 1); + rt2x00pci_register_write(rt2x00dev, MAC_CSR1, reg); + + return 0; +} + +static void rt61pci_init_rxring(struct rt2x00_dev *rt2x00dev) +{ + struct data_ring *ring = rt2x00dev->rx; + struct data_desc *rxd; + unsigned int i; + u32 word; + + memset(ring->data_addr, 0x00, rt2x00_get_ring_size(ring)); + + for (i = 0; i < ring->stats.limit; i++) { + rxd = ring->entry[i].priv; + + rt2x00_desc_read(rxd, 5, &word); + rt2x00_set_field32(&word, RXD_W5_BUFFER_PHYSICAL_ADDRESS, + ring->entry[i].data_dma); + rt2x00_desc_write(rxd, 5, word); + + rt2x00_desc_read(rxd, 0, &word); + rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1); + rt2x00_desc_write(rxd, 0, word); + } + + rt2x00_ring_index_clear(rt2x00dev->rx); +} + +static void rt61pci_init_txring(struct rt2x00_dev *rt2x00dev, const int queue) +{ + struct data_ring *ring = rt2x00lib_get_ring(rt2x00dev, queue); + struct data_desc *txd; + unsigned int i; + u32 word; + + memset(ring->data_addr, 0x00, rt2x00_get_ring_size(ring)); + + for (i = 0; i < ring->stats.limit; i++) { + txd = ring->entry[i].priv; + + rt2x00_desc_read(txd, 1, &word); + rt2x00_set_field32(&word, TXD_W1_BUFFER_COUNT, 1); + rt2x00_desc_write(txd, 1, word); + + rt2x00_desc_read(txd, 5, &word); + rt2x00_set_field32(&word, TXD_W5_PID_TYPE, queue); + rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE, i); + rt2x00_desc_write(txd, 5, word); + + rt2x00_desc_read(txd, 6, &word); + rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS, + ring->entry[i].data_dma); + rt2x00_desc_write(txd, 6, word); + + rt2x00_desc_read(txd, 0, &word); + rt2x00_set_field32(&word, TXD_W0_VALID, 0); + rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0); + rt2x00_desc_write(txd, 0, word); + } + + rt2x00_ring_index_clear(ring); +} + +static int rt61pci_init_rings(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + /* + * Initialize rings. + */ + rt61pci_init_rxring(rt2x00dev); + rt61pci_init_txring(rt2x00dev, IEEE80211_TX_QUEUE_DATA0); + rt61pci_init_txring(rt2x00dev, IEEE80211_TX_QUEUE_DATA1); + rt61pci_init_txring(rt2x00dev, IEEE80211_TX_QUEUE_DATA2); + rt61pci_init_txring(rt2x00dev, IEEE80211_TX_QUEUE_DATA3); + rt61pci_init_txring(rt2x00dev, IEEE80211_TX_QUEUE_DATA4); + + /* + * Initialize registers. + */ + rt2x00pci_register_read(rt2x00dev, TX_RING_CSR0, ®); + rt2x00_set_field32(®, TX_RING_CSR0_AC0_RING_SIZE, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].stats.limit); + rt2x00_set_field32(®, TX_RING_CSR0_AC1_RING_SIZE, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA1].stats.limit); + rt2x00_set_field32(®, TX_RING_CSR0_AC2_RING_SIZE, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA2].stats.limit); + rt2x00_set_field32(®, TX_RING_CSR0_AC3_RING_SIZE, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA3].stats.limit); + rt2x00pci_register_write(rt2x00dev, TX_RING_CSR0, reg); + + rt2x00pci_register_read(rt2x00dev, TX_RING_CSR1, ®); + rt2x00_set_field32(®, TX_RING_CSR1_MGMT_RING_SIZE, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA4].stats.limit); + rt2x00_set_field32(®, TX_RING_CSR1_TXD_SIZE, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].desc_size / + 4); + rt2x00pci_register_write(rt2x00dev, TX_RING_CSR1, reg); + + rt2x00pci_register_read(rt2x00dev, AC0_BASE_CSR, ®); + rt2x00_set_field32(®, AC0_BASE_CSR_RING_REGISTER, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].data_dma); + rt2x00pci_register_write(rt2x00dev, AC0_BASE_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, AC1_BASE_CSR, ®); + rt2x00_set_field32(®, AC1_BASE_CSR_RING_REGISTER, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA1].data_dma); + rt2x00pci_register_write(rt2x00dev, AC1_BASE_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, AC2_BASE_CSR, ®); + rt2x00_set_field32(®, AC2_BASE_CSR_RING_REGISTER, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA2].data_dma); + rt2x00pci_register_write(rt2x00dev, AC2_BASE_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, AC3_BASE_CSR, ®); + rt2x00_set_field32(®, AC3_BASE_CSR_RING_REGISTER, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA3].data_dma); + rt2x00pci_register_write(rt2x00dev, AC3_BASE_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, MGMT_BASE_CSR, ®); + rt2x00_set_field32(®, MGMT_BASE_CSR_RING_REGISTER, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA4].data_dma); + rt2x00pci_register_write(rt2x00dev, MGMT_BASE_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, RX_RING_CSR, ®); + rt2x00_set_field32(®, RX_RING_CSR_RING_SIZE, + rt2x00dev->rx->stats.limit); + rt2x00_set_field32(®, RX_RING_CSR_RXD_SIZE, + rt2x00dev->rx->desc_size / 4); + rt2x00_set_field32(®, RX_RING_CSR_RXD_WRITEBACK_SIZE, 4); + rt2x00pci_register_write(rt2x00dev, RX_RING_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, RX_BASE_CSR, ®); + rt2x00_set_field32(®, RX_BASE_CSR_RING_REGISTER, + rt2x00dev->rx->data_dma); + rt2x00pci_register_write(rt2x00dev, RX_BASE_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, TX_DMA_DST_CSR, ®); + rt2x00_set_field32(®, TX_DMA_DST_CSR_DEST_AC0, 2); + rt2x00_set_field32(®, TX_DMA_DST_CSR_DEST_AC1, 2); + rt2x00_set_field32(®, TX_DMA_DST_CSR_DEST_AC2, 2); + rt2x00_set_field32(®, TX_DMA_DST_CSR_DEST_AC3, 2); + rt2x00_set_field32(®, TX_DMA_DST_CSR_DEST_MGMT, 0); + rt2x00pci_register_write(rt2x00dev, TX_DMA_DST_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, LOAD_TX_RING_CSR, ®); + rt2x00_set_field32(®, LOAD_TX_RING_CSR_LOAD_TXD_AC0, 1); + rt2x00_set_field32(®, LOAD_TX_RING_CSR_LOAD_TXD_AC1, 1); + rt2x00_set_field32(®, LOAD_TX_RING_CSR_LOAD_TXD_AC2, 1); + rt2x00_set_field32(®, LOAD_TX_RING_CSR_LOAD_TXD_AC3, 1); + rt2x00_set_field32(®, LOAD_TX_RING_CSR_LOAD_TXD_MGMT, 1); + rt2x00pci_register_write(rt2x00dev, LOAD_TX_RING_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, RX_CNTL_CSR, ®); + rt2x00_set_field32(®, RX_CNTL_CSR_LOAD_RXD, 1); + rt2x00pci_register_write(rt2x00dev, RX_CNTL_CSR, reg); + + return 0; +} + +static int rt61pci_init_registers(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_AUTO_TX_SEQ, 1); + rt2x00_set_field32(®, TXRX_CSR0_DISABLE_RX, 0); + rt2x00_set_field32(®, TXRX_CSR0_TX_WITHOUT_WAITING, 0); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg); + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR1, ®); + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID0, 47); /* CCK Signal */ + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID1, 30); /* Rssi */ + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID2, 42); /* OFDM Rate */ + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID2_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID3, 30); /* Rssi */ + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID3_VALID, 1); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR1, reg); + + /* + * CCK TXD BBP registers + */ + rt2x00pci_register_read(rt2x00dev, TXRX_CSR2, ®); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID0, 13); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID1, 12); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID2, 11); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID2_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID3, 10); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID3_VALID, 1); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR2, reg); + + /* + * OFDM TXD BBP registers + */ + rt2x00pci_register_read(rt2x00dev, TXRX_CSR3, ®); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID0, 7); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID1, 6); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID2, 5); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID2_VALID, 1); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR3, reg); + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR7, ®); + rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_6MBS, 59); + rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_9MBS, 53); + rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_12MBS, 49); + rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_18MBS, 46); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR7, reg); + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR8, ®); + rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_24MBS, 44); + rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_36MBS, 42); + rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_48MBS, 42); + rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_54MBS, 42); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR8, reg); + + rt2x00pci_register_write(rt2x00dev, TXRX_CSR15, 0x0000000f); + + rt2x00pci_register_write(rt2x00dev, MAC_CSR6, 0x00000fff); + + rt2x00pci_register_read(rt2x00dev, MAC_CSR9, ®); + rt2x00_set_field32(®, MAC_CSR9_CW_SELECT, 0); + rt2x00pci_register_write(rt2x00dev, MAC_CSR9, reg); + + rt2x00pci_register_write(rt2x00dev, MAC_CSR10, 0x0000071c); + + if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) + return -EBUSY; + + rt2x00pci_register_write(rt2x00dev, MAC_CSR13, 0x0000e000); + + /* + * Invalidate all Shared Keys (SEC_CSR0), + * and clear the Shared key Cipher algorithms (SEC_CSR1 & SEC_CSR5) + */ + rt2x00pci_register_write(rt2x00dev, SEC_CSR0, 0x00000000); + rt2x00pci_register_write(rt2x00dev, SEC_CSR1, 0x00000000); + rt2x00pci_register_write(rt2x00dev, SEC_CSR5, 0x00000000); + + rt2x00pci_register_write(rt2x00dev, PHY_CSR1, 0x000023b0); + rt2x00pci_register_write(rt2x00dev, PHY_CSR5, 0x060a100c); + rt2x00pci_register_write(rt2x00dev, PHY_CSR6, 0x00080606); + rt2x00pci_register_write(rt2x00dev, PHY_CSR7, 0x00000a08); + + rt2x00pci_register_write(rt2x00dev, PCI_CFG_CSR, 0x28ca4404); + + rt2x00pci_register_write(rt2x00dev, TEST_MODE_CSR, 0x00000200); + + rt2x00pci_register_write(rt2x00dev, M2H_CMD_DONE_CSR, 0xffffffff); + + rt2x00pci_register_read(rt2x00dev, AC_TXOP_CSR0, ®); + rt2x00_set_field32(®, AC_TXOP_CSR0_AC0_TX_OP, 0); + rt2x00_set_field32(®, AC_TXOP_CSR0_AC1_TX_OP, 0); + rt2x00pci_register_write(rt2x00dev, AC_TXOP_CSR0, reg); + + rt2x00pci_register_read(rt2x00dev, AC_TXOP_CSR1, ®); + rt2x00_set_field32(®, AC_TXOP_CSR1_AC2_TX_OP, 192); + rt2x00_set_field32(®, AC_TXOP_CSR1_AC3_TX_OP, 48); + rt2x00pci_register_write(rt2x00dev, AC_TXOP_CSR1, reg); + + /* + * We must clear the error counters. + * These registers are cleared on read, + * so we may pass a useless variable to store the value. + */ + rt2x00pci_register_read(rt2x00dev, STA_CSR0, ®); + rt2x00pci_register_read(rt2x00dev, STA_CSR1, ®); + rt2x00pci_register_read(rt2x00dev, STA_CSR2, ®); + + /* + * Reset MAC and BBP registers. + */ + rt2x00pci_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field32(®, MAC_CSR1_SOFT_RESET, 1); + rt2x00_set_field32(®, MAC_CSR1_BBP_RESET, 1); + rt2x00pci_register_write(rt2x00dev, MAC_CSR1, reg); + + rt2x00pci_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field32(®, MAC_CSR1_SOFT_RESET, 0); + rt2x00_set_field32(®, MAC_CSR1_BBP_RESET, 0); + rt2x00pci_register_write(rt2x00dev, MAC_CSR1, reg); + + rt2x00pci_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field32(®, MAC_CSR1_HOST_READY, 1); + rt2x00pci_register_write(rt2x00dev, MAC_CSR1, reg); + + return 0; +} + +static int rt61pci_init_bbp(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + u16 eeprom; + u8 reg_id; + u8 value; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt61pci_bbp_read(rt2x00dev, 0, &value); + if ((value != 0xff) && (value != 0x00)) + goto continue_csr_init; + NOTICE(rt2x00dev, "Waiting for BBP register.\n"); + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "BBP register access failed, aborting.\n"); + return -EACCES; + +continue_csr_init: + rt61pci_bbp_write(rt2x00dev, 3, 0x00); + rt61pci_bbp_write(rt2x00dev, 15, 0x30); + rt61pci_bbp_write(rt2x00dev, 21, 0xc8); + rt61pci_bbp_write(rt2x00dev, 22, 0x38); + rt61pci_bbp_write(rt2x00dev, 23, 0x06); + rt61pci_bbp_write(rt2x00dev, 24, 0xfe); + rt61pci_bbp_write(rt2x00dev, 25, 0x0a); + rt61pci_bbp_write(rt2x00dev, 26, 0x0d); + rt61pci_bbp_write(rt2x00dev, 34, 0x12); + rt61pci_bbp_write(rt2x00dev, 37, 0x07); + rt61pci_bbp_write(rt2x00dev, 39, 0xf8); + rt61pci_bbp_write(rt2x00dev, 41, 0x60); + rt61pci_bbp_write(rt2x00dev, 53, 0x10); + rt61pci_bbp_write(rt2x00dev, 54, 0x18); + rt61pci_bbp_write(rt2x00dev, 60, 0x10); + rt61pci_bbp_write(rt2x00dev, 61, 0x04); + rt61pci_bbp_write(rt2x00dev, 62, 0x04); + rt61pci_bbp_write(rt2x00dev, 75, 0xfe); + rt61pci_bbp_write(rt2x00dev, 86, 0xfe); + rt61pci_bbp_write(rt2x00dev, 88, 0xfe); + rt61pci_bbp_write(rt2x00dev, 90, 0x0f); + rt61pci_bbp_write(rt2x00dev, 99, 0x00); + rt61pci_bbp_write(rt2x00dev, 102, 0x16); + rt61pci_bbp_write(rt2x00dev, 107, 0x04); + + DEBUG(rt2x00dev, "Start initialization from EEPROM...\n"); + for (i = 0; i < EEPROM_BBP_SIZE; i++) { + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); + + if (eeprom != 0xffff && eeprom != 0x0000) { + reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); + value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); + DEBUG(rt2x00dev, "BBP: 0x%02x, value: 0x%02x.\n", + reg_id, value); + rt61pci_bbp_write(rt2x00dev, reg_id, value); + } + } + DEBUG(rt2x00dev, "...End initialization from EEPROM.\n"); + + return 0; +} + +/* + * Device state switch handlers. + */ +static void rt61pci_toggle_rx(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_DISABLE_RX, + state == STATE_RADIO_RX_OFF); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg); +} + +static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + int mask = (state == STATE_RADIO_IRQ_OFF); + u32 reg; + + /* + * When interrupts are being enabled, the interrupt registers + * should clear the register to assure a clean state. + */ + if (state == STATE_RADIO_IRQ_ON) { + rt2x00pci_register_read(rt2x00dev, INT_SOURCE_CSR, ®); + rt2x00pci_register_write(rt2x00dev, INT_SOURCE_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, MCU_INT_SOURCE_CSR, ®); + rt2x00pci_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg); + } + + /* + * Only toggle the interrupts bits we are going to use. + * Non-checked interrupt bits are disabled by default. + */ + rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, ®); + rt2x00_set_field32(®, INT_MASK_CSR_TXDONE, mask); + rt2x00_set_field32(®, INT_MASK_CSR_RXDONE, mask); + rt2x00_set_field32(®, INT_MASK_CSR_ENABLE_MITIGATION, mask); + rt2x00_set_field32(®, INT_MASK_CSR_MITIGATION_PERIOD, 0xff); + rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, MCU_INT_MASK_CSR, ®); + rt2x00_set_field32(®, MCU_INT_MASK_CSR_0, mask); + rt2x00_set_field32(®, MCU_INT_MASK_CSR_1, mask); + rt2x00_set_field32(®, MCU_INT_MASK_CSR_2, mask); + rt2x00_set_field32(®, MCU_INT_MASK_CSR_3, mask); + rt2x00_set_field32(®, MCU_INT_MASK_CSR_4, mask); + rt2x00_set_field32(®, MCU_INT_MASK_CSR_5, mask); + rt2x00_set_field32(®, MCU_INT_MASK_CSR_6, mask); + rt2x00_set_field32(®, MCU_INT_MASK_CSR_7, mask); + rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg); +} + +static int rt61pci_enable_radio(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + /* + * Initialize all registers. + */ + if (rt61pci_init_rings(rt2x00dev) || + rt61pci_init_registers(rt2x00dev) || + rt61pci_init_bbp(rt2x00dev)) { + ERROR(rt2x00dev, "Register initialization failed.\n"); + return -EIO; + } + + /* + * Enable interrupts. + */ + rt61pci_toggle_irq(rt2x00dev, STATE_RADIO_IRQ_ON); + + /* + * Enable RX. + */ + rt2x00pci_register_read(rt2x00dev, RX_CNTL_CSR, ®); + rt2x00_set_field32(®, RX_CNTL_CSR_ENABLE_RX_DMA, 1); + rt2x00pci_register_write(rt2x00dev, RX_CNTL_CSR, reg); + + /* + * Enable LED + */ + rt61pci_enable_led(rt2x00dev); + + return 0; +} + +static void rt61pci_disable_radio(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + /* + * Disable LED + */ + rt61pci_disable_led(rt2x00dev); + + rt2x00pci_register_write(rt2x00dev, MAC_CSR10, 0x00001818); + + /* + * Disable synchronisation. + */ + rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, 0); + + /* + * Cancel RX and TX. + */ + rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, ®); + rt2x00_set_field32(®, TX_CNTL_CSR_ABORT_TX_AC0, 1); + rt2x00_set_field32(®, TX_CNTL_CSR_ABORT_TX_AC1, 1); + rt2x00_set_field32(®, TX_CNTL_CSR_ABORT_TX_AC2, 1); + rt2x00_set_field32(®, TX_CNTL_CSR_ABORT_TX_AC3, 1); + rt2x00_set_field32(®, TX_CNTL_CSR_ABORT_TX_MGMT, 1); + rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg); + + /* + * Disable interrupts. + */ + rt61pci_toggle_irq(rt2x00dev, STATE_RADIO_IRQ_OFF); +} + +static int rt61pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) +{ + u32 reg; + unsigned int i; + char put_to_sleep; + char current_state; + + put_to_sleep = (state != STATE_AWAKE); + + rt2x00pci_register_read(rt2x00dev, MAC_CSR12, ®); + rt2x00_set_field32(®, MAC_CSR12_FORCE_WAKEUP, !put_to_sleep); + rt2x00_set_field32(®, MAC_CSR12_PUT_TO_SLEEP, put_to_sleep); + rt2x00pci_register_write(rt2x00dev, MAC_CSR12, reg); + + /* + * Device is not guaranteed to be in the requested state yet. + * We must wait until the register indicates that the + * device has entered the correct state. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2x00pci_register_read(rt2x00dev, MAC_CSR12, ®); + current_state = + rt2x00_get_field32(reg, MAC_CSR12_BBP_CURRENT_STATE); + if (current_state == !put_to_sleep) + return 0; + msleep(10); + } + + NOTICE(rt2x00dev, "Device failed to enter state %d, " + "current device state %d.\n", !put_to_sleep, current_state); + + return -EBUSY; +} + +static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + int retval = 0; + + switch (state) { + case STATE_RADIO_ON: + retval = rt61pci_enable_radio(rt2x00dev); + break; + case STATE_RADIO_OFF: + rt61pci_disable_radio(rt2x00dev); + break; + case STATE_RADIO_RX_ON: + case STATE_RADIO_RX_OFF: + rt61pci_toggle_rx(rt2x00dev, state); + break; + case STATE_DEEP_SLEEP: + case STATE_SLEEP: + case STATE_STANDBY: + case STATE_AWAKE: + retval = rt61pci_set_state(rt2x00dev, state); + break; + default: + retval = -ENOTSUPP; + break; + } + + return retval; +} + +/* + * TX descriptor initialization + */ +static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, + struct data_desc *txd, + struct txdata_entry_desc *desc, + struct ieee80211_hdr *ieee80211hdr, + unsigned int length, + struct ieee80211_tx_control *control) +{ + u32 word; + + /* + * Start writing the descriptor words. + */ + rt2x00_desc_read(txd, 1, &word); + rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, desc->queue); + rt2x00_set_field32(&word, TXD_W1_AIFSN, desc->aifs); + rt2x00_set_field32(&word, TXD_W1_CWMIN, desc->cw_min); + rt2x00_set_field32(&word, TXD_W1_CWMAX, desc->cw_max); + rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER); + rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, 1); + rt2x00_desc_write(txd, 1, word); + + rt2x00_desc_read(txd, 2, &word); + rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, desc->signal); + rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, desc->service); + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, desc->length_low); + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, desc->length_high); + rt2x00_desc_write(txd, 2, word); + + rt2x00_desc_read(txd, 5, &word); + rt2x00_set_field32(&word, TXD_W5_TX_POWER, + TXPOWER_TO_DEV(control->power_level)); + rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1); + rt2x00_desc_write(txd, 5, word); + + rt2x00_desc_read(txd, 11, &word); + rt2x00_set_field32(&word, TXD_W11_BUFFER_LENGTH0, length); + rt2x00_desc_write(txd, 11, word); + + rt2x00_desc_read(txd, 0, &word); + rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1); + rt2x00_set_field32(&word, TXD_W0_VALID, 1); + rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, + test_bit(ENTRY_TXD_MORE_FRAG, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_ACK, + !(control->flags & IEEE80211_TXCTL_NO_ACK)); + rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, + test_bit(ENTRY_TXD_REQ_TIMESTAMP, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_OFDM, + test_bit(ENTRY_TXD_OFDM_RATE, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_IFS, desc->ifs); + rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, + !!(control->flags & + IEEE80211_TXCTL_LONG_RETRY_LIMIT)); + rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, 0); + rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, length); + rt2x00_set_field32(&word, TXD_W0_BURST, + test_bit(ENTRY_TXD_BURST, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE); + rt2x00_desc_write(txd, 0, word); +} + +/* + * TX data initialization + */ +static void rt61pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, + unsigned int queue) +{ + u32 reg; + + if (queue == IEEE80211_TX_QUEUE_BEACON) { + /* + * For Wi-Fi faily generated beacons between participating + * stations. Set TBTT phase adaptive adjustment step to 8us. + */ + rt2x00pci_register_write(rt2x00dev, TXRX_CSR10, 0x00001008); + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, ®); + if (!rt2x00_get_field32(reg, TXRX_CSR9_BEACON_GEN)) { + rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 1); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); + } + return; + } + + rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, ®); + if (queue == IEEE80211_TX_QUEUE_DATA0) + rt2x00_set_field32(®, TX_CNTL_CSR_KICK_TX_AC0, 1); + else if (queue == IEEE80211_TX_QUEUE_DATA1) + rt2x00_set_field32(®, TX_CNTL_CSR_KICK_TX_AC1, 1); + else if (queue == IEEE80211_TX_QUEUE_DATA2) + rt2x00_set_field32(®, TX_CNTL_CSR_KICK_TX_AC2, 1); + else if (queue == IEEE80211_TX_QUEUE_DATA3) + rt2x00_set_field32(®, TX_CNTL_CSR_KICK_TX_AC3, 1); + else if (queue == IEEE80211_TX_QUEUE_DATA4) + rt2x00_set_field32(®, TX_CNTL_CSR_KICK_TX_MGMT, 1); + rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg); +} + +/* + * RX control handlers + */ +static int rt61pci_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1) +{ + u16 eeprom; + u8 offset; + u8 lna; + + lna = rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_LNA); + switch (lna) { + case 3: + offset = 90; + break; + case 2: + offset = 74; + break; + case 1: + offset = 64; + break; + default: + return 0; + } + + if (rt2x00dev->rx_status.phymode == MODE_IEEE80211A) { + if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) + offset += 14; + + if (lna == 3 || lna == 2) + offset += 10; + + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom); + offset -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1); + } else { + if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) + offset += 14; + + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom); + offset -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1); + } + + return rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_AGC) * 2 - offset; +} + +static void rt61pci_fill_rxdone(struct data_entry *entry, + struct rxdata_entry_desc *desc) +{ + struct data_desc *rxd = entry->priv; + u32 word0; + u32 word1; + + rt2x00_desc_read(rxd, 0, &word0); + rt2x00_desc_read(rxd, 1, &word1); + + desc->flags = 0; + if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) + desc->flags |= RX_FLAG_FAILED_FCS_CRC; + + /* + * Obtain the status about this packet. + */ + desc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL); + desc->rssi = rt61pci_agc_to_rssi(entry->ring->rt2x00dev, word1); + desc->ofdm = rt2x00_get_field32(word0, RXD_W0_OFDM); + desc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); + + return; +} + +/* + * Interrupt functions. + */ +static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev) +{ + struct data_ring *ring; + struct data_entry *entry; + struct data_desc *txd; + u32 word; + u32 reg; + u32 old_reg; + int type; + int index; + int tx_status; + int retry; + + /* + * During each loop we will compare the freshly read + * STA_CSR4 register value with the value read from + * the previous loop. If the 2 values are equal then + * we should stop processing because the chance it + * quite big that the device has been unplugged and + * we risk going into an endless loop. + */ + old_reg = 0; + + while (1) { + rt2x00pci_register_read(rt2x00dev, STA_CSR4, ®); + if (!rt2x00_get_field32(reg, STA_CSR4_VALID)) + break; + + if (old_reg == reg) + break; + old_reg = reg; + + /* + * Skip this entry when it contains an invalid + * ring identication number. + */ + type = rt2x00_get_field32(reg, STA_CSR4_PID_TYPE); + ring = rt2x00lib_get_ring(rt2x00dev, type); + if (unlikely(!ring)) + continue; + + /* + * Skip this entry when it contains an invalid + * index number. + */ + index = rt2x00_get_field32(reg, STA_CSR4_PID_SUBTYPE); + if (unlikely(index >= ring->stats.limit)) + continue; + + entry = &ring->entry[index]; + txd = entry->priv; + rt2x00_desc_read(txd, 0, &word); + + if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) || + !rt2x00_get_field32(word, TXD_W0_VALID)) + return; + + /* + * Obtain the status about this packet. + */ + tx_status = rt2x00_get_field32(reg, STA_CSR4_TX_RESULT); + retry = rt2x00_get_field32(reg, STA_CSR4_RETRY_COUNT); + + rt2x00lib_txdone(entry, tx_status, retry); + + /* + * Make this entry available for reuse. + */ + entry->flags = 0; + rt2x00_set_field32(&word, TXD_W0_VALID, 0); + rt2x00_desc_write(txd, 0, word); + rt2x00_ring_index_done_inc(entry->ring); + + /* + * If the data ring was full before the txdone handler + * we must make sure the packet queue in the mac80211 stack + * is reenabled when the txdone handler has finished. + */ + if (!rt2x00_ring_full(ring)) + ieee80211_wake_queue(rt2x00dev->hw, + entry->tx_status.control.queue); + } +} + +static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance) +{ + struct rt2x00_dev *rt2x00dev = dev_instance; + u32 reg_mcu; + u32 reg; + + /* + * Get the interrupt sources & saved to local variable. + * Write register value back to clear pending interrupts. + */ + rt2x00pci_register_read(rt2x00dev, MCU_INT_SOURCE_CSR, ®_mcu); + rt2x00pci_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg_mcu); + + rt2x00pci_register_read(rt2x00dev, INT_SOURCE_CSR, ®); + rt2x00pci_register_write(rt2x00dev, INT_SOURCE_CSR, reg); + + if (!reg && !reg_mcu) + return IRQ_NONE; + + if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) + return IRQ_HANDLED; + + /* + * Handle interrupts, walk through all bits + * and run the tasks, the bits are checked in order of + * priority. + */ + + /* + * 1 - Rx ring done interrupt. + */ + if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RXDONE)) + rt2x00pci_rxdone(rt2x00dev); + + /* + * 2 - Tx ring done interrupt. + */ + if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TXDONE)) + rt61pci_txdone(rt2x00dev); + + /* + * 3 - Handle MCU command done. + */ + if (reg_mcu) + rt2x00pci_register_write(rt2x00dev, + M2H_CMD_DONE_CSR, 0xffffffff); + + return IRQ_HANDLED; +} + +/* + * Device probe functions. + */ +static int rt61pci_validate_eeprom(struct rt2x00_dev *rt2x00dev) +{ + struct eeprom_93cx6 eeprom; + u32 reg; + u16 word; + u8 *mac; + s8 value; + + rt2x00pci_register_read(rt2x00dev, E2PROM_CSR, ®); + + eeprom.data = rt2x00dev; + eeprom.register_read = rt61pci_eepromregister_read; + eeprom.register_write = rt61pci_eepromregister_write; + eeprom.width = rt2x00_get_field32(reg, E2PROM_CSR_TYPE_93C46) ? + PCI_EEPROM_WIDTH_93C46 : PCI_EEPROM_WIDTH_93C66; + eeprom.reg_data_in = 0; + eeprom.reg_data_out = 0; + eeprom.reg_data_clock = 0; + eeprom.reg_chip_select = 0; + + eeprom_93cx6_multiread(&eeprom, EEPROM_BASE, rt2x00dev->eeprom, + EEPROM_SIZE / sizeof(u16)); + + /* + * Start validation of the data that has been read. + */ + mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); + if (!is_valid_ether_addr(mac)) { + DECLARE_MAC_BUF(macbuf); + + random_ether_addr(mac); + EEPROM(rt2x00dev, "MAC: %s\n", print_mac(macbuf, mac)); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_ANTENNA_NUM, 2); + rt2x00_set_field16(&word, EEPROM_ANTENNA_TX_DEFAULT, 2); + rt2x00_set_field16(&word, EEPROM_ANTENNA_RX_DEFAULT, 2); + rt2x00_set_field16(&word, EEPROM_ANTENNA_FRAME_TYPE, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_DYN_TXAGC, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF5225); + rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); + EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_NIC_ENABLE_DIVERSITY, 0); + rt2x00_set_field16(&word, EEPROM_NIC_TX_DIVERSITY, 0); + rt2x00_set_field16(&word, EEPROM_NIC_TX_RX_FIXED, 0); + rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_BG, 0); + rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0); + rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_A, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); + EEPROM(rt2x00dev, "NIC: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_LED_LED_MODE, + LED_MODE_DEFAULT); + rt2x00_eeprom_write(rt2x00dev, EEPROM_LED, word); + EEPROM(rt2x00dev, "Led: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0); + rt2x00_set_field16(&word, EEPROM_FREQ_SEQ, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word); + EEPROM(rt2x00dev, "Freq: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_1, 0); + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_2, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_BG, word); + EEPROM(rt2x00dev, "RSSI OFFSET BG: 0x%04x\n", word); + } else { + value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_BG_1); + if (value < -10 || value > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_1, 0); + value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_BG_2); + if (value < -10 || value > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_2, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_BG, word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0); + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word); + EEPROM(rt2x00dev, "RSSI OFFSET BG: 0x%04x\n", word); + } else { + value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_1); + if (value < -10 || value > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0); + value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_2); + if (value < -10 || value > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word); + } + + return 0; +} + +static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + u16 value; + u16 eeprom; + u16 device; + + /* + * Read EEPROM word for configuration. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom); + + /* + * Identify RF chipset. + * To determine the RT chip we have to read the + * PCI header of the device. + */ + pci_read_config_word(rt2x00dev_pci(rt2x00dev), + PCI_CONFIG_HEADER_DEVICE, &device); + value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); + rt2x00pci_register_read(rt2x00dev, MAC_CSR0, ®); + rt2x00_set_chip(rt2x00dev, device, value, reg); + + if (!rt2x00_rf(&rt2x00dev->chip, RF5225) && + !rt2x00_rf(&rt2x00dev->chip, RF5325) && + !rt2x00_rf(&rt2x00dev->chip, RF2527) && + !rt2x00_rf(&rt2x00dev->chip, RF2529)) { + ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); + return -ENODEV; + } + + /* + * Identify default antenna configuration. + */ + rt2x00dev->hw->conf.antenna_sel_tx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT); + rt2x00dev->hw->conf.antenna_sel_rx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT); + + /* + * Read the Frame type. + */ + if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_FRAME_TYPE)) + __set_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags); + + /* + * Determine number of antenna's. + */ + if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_NUM) == 2) + __set_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags); + + /* + * Detect if this device has an hardware controlled radio. + */ +#ifdef CONFIG_RT61PCI_RFKILL + if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) + __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); +#endif /* CONFIG_RT61PCI_RFKILL */ + + /* + * Read frequency offset and RF programming sequence. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom); + if (rt2x00_get_field16(eeprom, EEPROM_FREQ_SEQ)) + __set_bit(CONFIG_RF_SEQUENCE, &rt2x00dev->flags); + + rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET); + + /* + * Read external LNA informations. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom); + + if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_A)) + __set_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags); + if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_BG)) + __set_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags); + + /* + * Store led settings, for correct led behaviour. + * If the eeprom value is invalid, + * switch to default led mode. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &eeprom); + + rt2x00dev->led_mode = rt2x00_get_field16(eeprom, EEPROM_LED_LED_MODE); + + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_LED_MODE, + rt2x00dev->led_mode); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_0, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_0)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_1, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_1)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_2, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_2)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_3, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_3)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_4, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_4)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_ACT, + rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_ACT)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_READY_BG, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_RDY_G)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_READY_A, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_RDY_A)); + + return 0; +} + +/* + * RF value list for RF5225 & RF5325 + * Supports: 2.4 GHz & 5.2 GHz, rf_sequence disabled + */ +static const struct rf_channel rf_vals_noseq[] = { + { 1, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa0b }, + { 2, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa1f }, + { 3, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa0b }, + { 4, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa1f }, + { 5, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa0b }, + { 6, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa1f }, + { 7, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa0b }, + { 8, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa1f }, + { 9, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa0b }, + { 10, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa1f }, + { 11, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa0b }, + { 12, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa1f }, + { 13, 0x00002ccc, 0x0000479e, 0x00068455, 0x000ffa0b }, + { 14, 0x00002ccc, 0x000047a2, 0x00068455, 0x000ffa13 }, + + /* 802.11 UNI / HyperLan 2 */ + { 36, 0x00002ccc, 0x0000499a, 0x0009be55, 0x000ffa23 }, + { 40, 0x00002ccc, 0x000049a2, 0x0009be55, 0x000ffa03 }, + { 44, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000ffa0b }, + { 48, 0x00002ccc, 0x000049aa, 0x0009be55, 0x000ffa13 }, + { 52, 0x00002ccc, 0x000049ae, 0x0009ae55, 0x000ffa1b }, + { 56, 0x00002ccc, 0x000049b2, 0x0009ae55, 0x000ffa23 }, + { 60, 0x00002ccc, 0x000049ba, 0x0009ae55, 0x000ffa03 }, + { 64, 0x00002ccc, 0x000049be, 0x0009ae55, 0x000ffa0b }, + + /* 802.11 HyperLan 2 */ + { 100, 0x00002ccc, 0x00004a2a, 0x000bae55, 0x000ffa03 }, + { 104, 0x00002ccc, 0x00004a2e, 0x000bae55, 0x000ffa0b }, + { 108, 0x00002ccc, 0x00004a32, 0x000bae55, 0x000ffa13 }, + { 112, 0x00002ccc, 0x00004a36, 0x000bae55, 0x000ffa1b }, + { 116, 0x00002ccc, 0x00004a3a, 0x000bbe55, 0x000ffa23 }, + { 120, 0x00002ccc, 0x00004a82, 0x000bbe55, 0x000ffa03 }, + { 124, 0x00002ccc, 0x00004a86, 0x000bbe55, 0x000ffa0b }, + { 128, 0x00002ccc, 0x00004a8a, 0x000bbe55, 0x000ffa13 }, + { 132, 0x00002ccc, 0x00004a8e, 0x000bbe55, 0x000ffa1b }, + { 136, 0x00002ccc, 0x00004a92, 0x000bbe55, 0x000ffa23 }, + + /* 802.11 UNII */ + { 140, 0x00002ccc, 0x00004a9a, 0x000bbe55, 0x000ffa03 }, + { 149, 0x00002ccc, 0x00004aa2, 0x000bbe55, 0x000ffa1f }, + { 153, 0x00002ccc, 0x00004aa6, 0x000bbe55, 0x000ffa27 }, + { 157, 0x00002ccc, 0x00004aae, 0x000bbe55, 0x000ffa07 }, + { 161, 0x00002ccc, 0x00004ab2, 0x000bbe55, 0x000ffa0f }, + { 165, 0x00002ccc, 0x00004ab6, 0x000bbe55, 0x000ffa17 }, + + /* MMAC(Japan)J52 ch 34,38,42,46 */ + { 34, 0x00002ccc, 0x0000499a, 0x0009be55, 0x000ffa0b }, + { 38, 0x00002ccc, 0x0000499e, 0x0009be55, 0x000ffa13 }, + { 42, 0x00002ccc, 0x000049a2, 0x0009be55, 0x000ffa1b }, + { 46, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000ffa23 }, +}; + +/* + * RF value list for RF5225 & RF5325 + * Supports: 2.4 GHz & 5.2 GHz, rf_sequence enabled + */ +static const struct rf_channel rf_vals_seq[] = { + { 1, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa0b }, + { 2, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa1f }, + { 3, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa0b }, + { 4, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa1f }, + { 5, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa0b }, + { 6, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa1f }, + { 7, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa0b }, + { 8, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa1f }, + { 9, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa0b }, + { 10, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa1f }, + { 11, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa0b }, + { 12, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa1f }, + { 13, 0x00002ccc, 0x0000479e, 0x00068455, 0x000ffa0b }, + { 14, 0x00002ccc, 0x000047a2, 0x00068455, 0x000ffa13 }, + + /* 802.11 UNI / HyperLan 2 */ + { 36, 0x00002cd4, 0x0004481a, 0x00098455, 0x000c0a03 }, + { 40, 0x00002cd0, 0x00044682, 0x00098455, 0x000c0a03 }, + { 44, 0x00002cd0, 0x00044686, 0x00098455, 0x000c0a1b }, + { 48, 0x00002cd0, 0x0004468e, 0x00098655, 0x000c0a0b }, + { 52, 0x00002cd0, 0x00044692, 0x00098855, 0x000c0a23 }, + { 56, 0x00002cd0, 0x0004469a, 0x00098c55, 0x000c0a13 }, + { 60, 0x00002cd0, 0x000446a2, 0x00098e55, 0x000c0a03 }, + { 64, 0x00002cd0, 0x000446a6, 0x00099255, 0x000c0a1b }, + + /* 802.11 HyperLan 2 */ + { 100, 0x00002cd4, 0x0004489a, 0x000b9855, 0x000c0a03 }, + { 104, 0x00002cd4, 0x000448a2, 0x000b9855, 0x000c0a03 }, + { 108, 0x00002cd4, 0x000448aa, 0x000b9855, 0x000c0a03 }, + { 112, 0x00002cd4, 0x000448b2, 0x000b9a55, 0x000c0a03 }, + { 116, 0x00002cd4, 0x000448ba, 0x000b9a55, 0x000c0a03 }, + { 120, 0x00002cd0, 0x00044702, 0x000b9a55, 0x000c0a03 }, + { 124, 0x00002cd0, 0x00044706, 0x000b9a55, 0x000c0a1b }, + { 128, 0x00002cd0, 0x0004470e, 0x000b9c55, 0x000c0a0b }, + { 132, 0x00002cd0, 0x00044712, 0x000b9c55, 0x000c0a23 }, + { 136, 0x00002cd0, 0x0004471a, 0x000b9e55, 0x000c0a13 }, + + /* 802.11 UNII */ + { 140, 0x00002cd0, 0x00044722, 0x000b9e55, 0x000c0a03 }, + { 149, 0x00002cd0, 0x0004472e, 0x000ba255, 0x000c0a1b }, + { 153, 0x00002cd0, 0x00044736, 0x000ba255, 0x000c0a0b }, + { 157, 0x00002cd4, 0x0004490a, 0x000ba255, 0x000c0a17 }, + { 161, 0x00002cd4, 0x00044912, 0x000ba255, 0x000c0a17 }, + { 165, 0x00002cd4, 0x0004491a, 0x000ba255, 0x000c0a17 }, + + /* MMAC(Japan)J52 ch 34,38,42,46 */ + { 34, 0x00002ccc, 0x0000499a, 0x0009be55, 0x000c0a0b }, + { 38, 0x00002ccc, 0x0000499e, 0x0009be55, 0x000c0a13 }, + { 42, 0x00002ccc, 0x000049a2, 0x0009be55, 0x000c0a1b }, + { 46, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000c0a23 }, +}; + +static void rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) +{ + struct hw_mode_spec *spec = &rt2x00dev->spec; + u8 *txpower; + unsigned int i; + + /* + * Initialize all hw fields. + */ + rt2x00dev->hw->flags = + IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE | + IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; + rt2x00dev->hw->extra_tx_headroom = 0; + rt2x00dev->hw->max_signal = MAX_SIGNAL; + rt2x00dev->hw->max_rssi = MAX_RX_SSI; + rt2x00dev->hw->queues = 5; + + SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_pci(rt2x00dev)->dev); + SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, + rt2x00_eeprom_addr(rt2x00dev, + EEPROM_MAC_ADDR_0)); + + /* + * Convert tx_power array in eeprom. + */ + txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START); + for (i = 0; i < 14; i++) + txpower[i] = TXPOWER_FROM_DEV(txpower[i]); + + /* + * Initialize hw_mode information. + */ + spec->num_modes = 2; + spec->num_rates = 12; + spec->tx_power_a = NULL; + spec->tx_power_bg = txpower; + spec->tx_power_default = DEFAULT_TXPOWER; + + if (!test_bit(CONFIG_RF_SEQUENCE, &rt2x00dev->flags)) { + spec->num_channels = 14; + spec->channels = rf_vals_noseq; + } else { + spec->num_channels = 14; + spec->channels = rf_vals_seq; + } + + if (rt2x00_rf(&rt2x00dev->chip, RF5225) || + rt2x00_rf(&rt2x00dev->chip, RF5325)) { + spec->num_modes = 3; + spec->num_channels = ARRAY_SIZE(rf_vals_seq); + + txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START); + for (i = 0; i < 14; i++) + txpower[i] = TXPOWER_FROM_DEV(txpower[i]); + + spec->tx_power_a = txpower; + } +} + +static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev) +{ + int retval; + + /* + * Allocate eeprom data. + */ + retval = rt61pci_validate_eeprom(rt2x00dev); + if (retval) + return retval; + + retval = rt61pci_init_eeprom(rt2x00dev); + if (retval) + return retval; + + /* + * Initialize hw specifications. + */ + rt61pci_probe_hw_mode(rt2x00dev); + + /* + * This device requires firmware + */ + __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags); + + /* + * Set the rssi offset. + */ + rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; + + return 0; +} + +/* + * IEEE80211 stack callback functions. + */ +static void rt61pci_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + int mc_count, + struct dev_addr_list *mc_list) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct interface *intf = &rt2x00dev->interface; + u32 reg; + + /* + * Mask off any flags we are going to ignore from + * the total_flags field. + */ + *total_flags &= + FIF_ALLMULTI | + FIF_FCSFAIL | + FIF_PLCPFAIL | + FIF_CONTROL | + FIF_OTHER_BSS | + FIF_PROMISC_IN_BSS; + + /* + * Apply some rules to the filters: + * - Some filters imply different filters to be set. + * - Some things we can't filter out at all. + * - Some filters are set based on interface type. + */ + if (mc_count) + *total_flags |= FIF_ALLMULTI; + if (*total_flags & FIF_OTHER_BSS || + *total_flags & FIF_PROMISC_IN_BSS) + *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS; + if (is_interface_type(intf, IEEE80211_IF_TYPE_AP)) + *total_flags |= FIF_PROMISC_IN_BSS; + + /* + * Check if there is any work left for us. + */ + if (intf->filter == *total_flags) + return; + intf->filter = *total_flags; + + /* + * Start configuration steps. + * Note that the version error will always be dropped + * and broadcast frames will always be accepted since + * there is no filter for it at this time. + */ + rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_DROP_CRC, + !(*total_flags & FIF_FCSFAIL)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_PHYSICAL, + !(*total_flags & FIF_PLCPFAIL)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_CONTROL, + !(*total_flags & FIF_CONTROL)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_NOT_TO_ME, + !(*total_flags & FIF_PROMISC_IN_BSS)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_TO_DS, + !(*total_flags & FIF_PROMISC_IN_BSS)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_VERSION_ERROR, 1); + rt2x00_set_field32(®, TXRX_CSR0_DROP_MULTICAST, + !(*total_flags & FIF_ALLMULTI)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_BORADCAST, 0); + rt2x00_set_field32(®, TXRX_CSR0_DROP_ACK_CTS, 1); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg); +} + +static int rt61pci_set_retry_limit(struct ieee80211_hw *hw, + u32 short_retry, u32 long_retry) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR4, ®); + rt2x00_set_field32(®, TXRX_CSR4_LONG_RETRY_LIMIT, long_retry); + rt2x00_set_field32(®, TXRX_CSR4_SHORT_RETRY_LIMIT, short_retry); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg); + + return 0; +} + +static u64 rt61pci_get_tsf(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u64 tsf; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR13, ®); + tsf = (u64) rt2x00_get_field32(reg, TXRX_CSR13_HIGH_TSFTIMER) << 32; + rt2x00pci_register_read(rt2x00dev, TXRX_CSR12, ®); + tsf |= rt2x00_get_field32(reg, TXRX_CSR12_LOW_TSFTIMER); + + return tsf; +} + +static void rt61pci_reset_tsf(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + + rt2x00pci_register_write(rt2x00dev, TXRX_CSR12, 0); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR13, 0); +} + +static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ieee80211_tx_control *control) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + + /* + * Just in case the ieee80211 doesn't set this, + * but we need this queue set for the descriptor + * initialization. + */ + control->queue = IEEE80211_TX_QUEUE_BEACON; + + /* + * We need to append the descriptor in front of the + * beacon frame. + */ + if (skb_headroom(skb) < TXD_DESC_SIZE) { + if (pskb_expand_head(skb, TXD_DESC_SIZE, 0, GFP_ATOMIC)) { + dev_kfree_skb(skb); + return -ENOMEM; + } + } + + /* + * First we create the beacon. + */ + skb_push(skb, TXD_DESC_SIZE); + memset(skb->data, 0, TXD_DESC_SIZE); + + rt2x00lib_write_tx_desc(rt2x00dev, (struct data_desc *)skb->data, + (struct ieee80211_hdr *)(skb->data + + TXD_DESC_SIZE), + skb->len - TXD_DESC_SIZE, control); + + /* + * Write entire beacon with descriptor to register, + * and kick the beacon generator. + */ + rt2x00pci_register_multiwrite(rt2x00dev, HW_BEACON_BASE0, + skb->data, skb->len); + rt61pci_kick_tx_queue(rt2x00dev, IEEE80211_TX_QUEUE_BEACON); + + return 0; +} + +static const struct ieee80211_ops rt61pci_mac80211_ops = { + .tx = rt2x00mac_tx, + .start = rt2x00mac_start, + .stop = rt2x00mac_stop, + .add_interface = rt2x00mac_add_interface, + .remove_interface = rt2x00mac_remove_interface, + .config = rt2x00mac_config, + .config_interface = rt2x00mac_config_interface, + .configure_filter = rt61pci_configure_filter, + .get_stats = rt2x00mac_get_stats, + .set_retry_limit = rt61pci_set_retry_limit, + .erp_ie_changed = rt2x00mac_erp_ie_changed, + .conf_tx = rt2x00mac_conf_tx, + .get_tx_stats = rt2x00mac_get_tx_stats, + .get_tsf = rt61pci_get_tsf, + .reset_tsf = rt61pci_reset_tsf, + .beacon_update = rt61pci_beacon_update, +}; + +static const struct rt2x00lib_ops rt61pci_rt2x00_ops = { + .irq_handler = rt61pci_interrupt, + .probe_hw = rt61pci_probe_hw, + .get_firmware_name = rt61pci_get_firmware_name, + .load_firmware = rt61pci_load_firmware, + .initialize = rt2x00pci_initialize, + .uninitialize = rt2x00pci_uninitialize, + .set_device_state = rt61pci_set_device_state, + .rfkill_poll = rt61pci_rfkill_poll, + .link_stats = rt61pci_link_stats, + .reset_tuner = rt61pci_reset_tuner, + .link_tuner = rt61pci_link_tuner, + .write_tx_desc = rt61pci_write_tx_desc, + .write_tx_data = rt2x00pci_write_tx_data, + .kick_tx_queue = rt61pci_kick_tx_queue, + .fill_rxdone = rt61pci_fill_rxdone, + .config_mac_addr = rt61pci_config_mac_addr, + .config_bssid = rt61pci_config_bssid, + .config_type = rt61pci_config_type, + .config_preamble = rt61pci_config_preamble, + .config = rt61pci_config, +}; + +static const struct rt2x00_ops rt61pci_ops = { + .name = DRV_NAME, + .rxd_size = RXD_DESC_SIZE, + .txd_size = TXD_DESC_SIZE, + .eeprom_size = EEPROM_SIZE, + .rf_size = RF_SIZE, + .lib = &rt61pci_rt2x00_ops, + .hw = &rt61pci_mac80211_ops, +#ifdef CONFIG_RT2X00_LIB_DEBUGFS + .debugfs = &rt61pci_rt2x00debug, +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ +}; + +/* + * RT61pci module information. + */ +static struct pci_device_id rt61pci_device_table[] = { + /* RT2561s */ + { PCI_DEVICE(0x1814, 0x0301), PCI_DEVICE_DATA(&rt61pci_ops) }, + /* RT2561 v2 */ + { PCI_DEVICE(0x1814, 0x0302), PCI_DEVICE_DATA(&rt61pci_ops) }, + /* RT2661 */ + { PCI_DEVICE(0x1814, 0x0401), PCI_DEVICE_DATA(&rt61pci_ops) }, + { 0, } +}; + +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("Ralink RT61 PCI & PCMCIA Wireless LAN driver."); +MODULE_SUPPORTED_DEVICE("Ralink RT2561, RT2561s & RT2661 " + "PCI & PCMCIA chipset based cards"); +MODULE_DEVICE_TABLE(pci, rt61pci_device_table); +MODULE_FIRMWARE(FIRMWARE_RT2561); +MODULE_FIRMWARE(FIRMWARE_RT2561s); +MODULE_FIRMWARE(FIRMWARE_RT2661); +MODULE_LICENSE("GPL"); + +static struct pci_driver rt61pci_driver = { + .name = DRV_NAME, + .id_table = rt61pci_device_table, + .probe = rt2x00pci_probe, + .remove = __devexit_p(rt2x00pci_remove), + .suspend = rt2x00pci_suspend, + .resume = rt2x00pci_resume, +}; + +static int __init rt61pci_init(void) +{ + return pci_register_driver(&rt61pci_driver); +} + +static void __exit rt61pci_exit(void) +{ + pci_unregister_driver(&rt61pci_driver); +} + +module_init(rt61pci_init); +module_exit(rt61pci_exit); diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h new file mode 100644 index 000000000000..6721d7dd32bc --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt61pci.h @@ -0,0 +1,1457 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt61pci + Abstract: Data structures and registers for the rt61pci module. + Supported chipsets: RT2561, RT2561s, RT2661. + */ + +#ifndef RT61PCI_H +#define RT61PCI_H + +/* + * RF chip defines. + */ +#define RF5225 0x0001 +#define RF5325 0x0002 +#define RF2527 0x0003 +#define RF2529 0x0004 + +/* + * Signal information. + * Defaul offset is required for RSSI <-> dBm conversion. + */ +#define MAX_SIGNAL 100 +#define MAX_RX_SSI -1 +#define DEFAULT_RSSI_OFFSET 120 + +/* + * Register layout information. + */ +#define CSR_REG_BASE 0x3000 +#define CSR_REG_SIZE 0x04b0 +#define EEPROM_BASE 0x0000 +#define EEPROM_SIZE 0x0100 +#define BBP_SIZE 0x0080 +#define RF_SIZE 0x0014 + +/* + * PCI registers. + */ + +/* + * PCI Configuration Header + */ +#define PCI_CONFIG_HEADER_VENDOR 0x0000 +#define PCI_CONFIG_HEADER_DEVICE 0x0002 + +/* + * HOST_CMD_CSR: For HOST to interrupt embedded processor + */ +#define HOST_CMD_CSR 0x0008 +#define HOST_CMD_CSR_HOST_COMMAND FIELD32(0x0000007f) +#define HOST_CMD_CSR_INTERRUPT_MCU FIELD32(0x00000080) + +/* + * MCU_CNTL_CSR + * SELECT_BANK: Select 8051 program bank. + * RESET: Enable 8051 reset state. + * READY: Ready state for 8051. + */ +#define MCU_CNTL_CSR 0x000c +#define MCU_CNTL_CSR_SELECT_BANK FIELD32(0x00000001) +#define MCU_CNTL_CSR_RESET FIELD32(0x00000002) +#define MCU_CNTL_CSR_READY FIELD32(0x00000004) + +/* + * SOFT_RESET_CSR + */ +#define SOFT_RESET_CSR 0x0010 + +/* + * MCU_INT_SOURCE_CSR: MCU interrupt source/mask register. + */ +#define MCU_INT_SOURCE_CSR 0x0014 +#define MCU_INT_SOURCE_CSR_0 FIELD32(0x00000001) +#define MCU_INT_SOURCE_CSR_1 FIELD32(0x00000002) +#define MCU_INT_SOURCE_CSR_2 FIELD32(0x00000004) +#define MCU_INT_SOURCE_CSR_3 FIELD32(0x00000008) +#define MCU_INT_SOURCE_CSR_4 FIELD32(0x00000010) +#define MCU_INT_SOURCE_CSR_5 FIELD32(0x00000020) +#define MCU_INT_SOURCE_CSR_6 FIELD32(0x00000040) +#define MCU_INT_SOURCE_CSR_7 FIELD32(0x00000080) +#define MCU_INT_SOURCE_CSR_TWAKEUP FIELD32(0x00000100) +#define MCU_INT_SOURCE_CSR_TBTT_EXPIRE FIELD32(0x00000200) + +/* + * MCU_INT_MASK_CSR: MCU interrupt source/mask register. + */ +#define MCU_INT_MASK_CSR 0x0018 +#define MCU_INT_MASK_CSR_0 FIELD32(0x00000001) +#define MCU_INT_MASK_CSR_1 FIELD32(0x00000002) +#define MCU_INT_MASK_CSR_2 FIELD32(0x00000004) +#define MCU_INT_MASK_CSR_3 FIELD32(0x00000008) +#define MCU_INT_MASK_CSR_4 FIELD32(0x00000010) +#define MCU_INT_MASK_CSR_5 FIELD32(0x00000020) +#define MCU_INT_MASK_CSR_6 FIELD32(0x00000040) +#define MCU_INT_MASK_CSR_7 FIELD32(0x00000080) +#define MCU_INT_MASK_CSR_TWAKEUP FIELD32(0x00000100) +#define MCU_INT_MASK_CSR_TBTT_EXPIRE FIELD32(0x00000200) + +/* + * PCI_USEC_CSR + */ +#define PCI_USEC_CSR 0x001c + +/* + * Security key table memory. + * 16 entries 32-byte for shared key table + * 64 entries 32-byte for pairwise key table + * 64 entries 8-byte for pairwise ta key table + */ +#define SHARED_KEY_TABLE_BASE 0x1000 +#define PAIRWISE_KEY_TABLE_BASE 0x1200 +#define PAIRWISE_TA_TABLE_BASE 0x1a00 + +struct hw_key_entry { + u8 key[16]; + u8 tx_mic[8]; + u8 rx_mic[8]; +} __attribute__ ((packed)); + +struct hw_pairwise_ta_entry { + u8 address[6]; + u8 reserved[2]; +} __attribute__ ((packed)); + +/* + * Other on-chip shared memory space. + */ +#define HW_CIS_BASE 0x2000 +#define HW_NULL_BASE 0x2b00 + +/* + * Since NULL frame won't be that long (256 byte), + * We steal 16 tail bytes to save debugging settings. + */ +#define HW_DEBUG_SETTING_BASE 0x2bf0 + +/* + * On-chip BEACON frame space. + */ +#define HW_BEACON_BASE0 0x2c00 +#define HW_BEACON_BASE1 0x2d00 +#define HW_BEACON_BASE2 0x2e00 +#define HW_BEACON_BASE3 0x2f00 +#define HW_BEACON_OFFSET 0x0100 + +/* + * HOST-MCU shared memory. + */ + +/* + * H2M_MAILBOX_CSR: Host-to-MCU Mailbox. + */ +#define H2M_MAILBOX_CSR 0x2100 +#define H2M_MAILBOX_CSR_ARG0 FIELD32(0x000000ff) +#define H2M_MAILBOX_CSR_ARG1 FIELD32(0x0000ff00) +#define H2M_MAILBOX_CSR_CMD_TOKEN FIELD32(0x00ff0000) +#define H2M_MAILBOX_CSR_OWNER FIELD32(0xff000000) + +/* + * MCU_LEDCS: LED control for MCU Mailbox. + */ +#define MCU_LEDCS_LED_MODE FIELD16(0x001f) +#define MCU_LEDCS_RADIO_STATUS FIELD16(0x0020) +#define MCU_LEDCS_LINK_BG_STATUS FIELD16(0x0040) +#define MCU_LEDCS_LINK_A_STATUS FIELD16(0x0080) +#define MCU_LEDCS_POLARITY_GPIO_0 FIELD16(0x0100) +#define MCU_LEDCS_POLARITY_GPIO_1 FIELD16(0x0200) +#define MCU_LEDCS_POLARITY_GPIO_2 FIELD16(0x0400) +#define MCU_LEDCS_POLARITY_GPIO_3 FIELD16(0x0800) +#define MCU_LEDCS_POLARITY_GPIO_4 FIELD16(0x1000) +#define MCU_LEDCS_POLARITY_ACT FIELD16(0x2000) +#define MCU_LEDCS_POLARITY_READY_BG FIELD16(0x4000) +#define MCU_LEDCS_POLARITY_READY_A FIELD16(0x8000) + +/* + * M2H_CMD_DONE_CSR. + */ +#define M2H_CMD_DONE_CSR 0x2104 + +/* + * MCU_TXOP_ARRAY_BASE. + */ +#define MCU_TXOP_ARRAY_BASE 0x2110 + +/* + * MAC Control/Status Registers(CSR). + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * MAC_CSR0: ASIC revision number. + */ +#define MAC_CSR0 0x3000 + +/* + * MAC_CSR1: System control register. + * SOFT_RESET: Software reset bit, 1: reset, 0: normal. + * BBP_RESET: Hardware reset BBP. + * HOST_READY: Host is ready after initialization, 1: ready. + */ +#define MAC_CSR1 0x3004 +#define MAC_CSR1_SOFT_RESET FIELD32(0x00000001) +#define MAC_CSR1_BBP_RESET FIELD32(0x00000002) +#define MAC_CSR1_HOST_READY FIELD32(0x00000004) + +/* + * MAC_CSR2: STA MAC register 0. + */ +#define MAC_CSR2 0x3008 +#define MAC_CSR2_BYTE0 FIELD32(0x000000ff) +#define MAC_CSR2_BYTE1 FIELD32(0x0000ff00) +#define MAC_CSR2_BYTE2 FIELD32(0x00ff0000) +#define MAC_CSR2_BYTE3 FIELD32(0xff000000) + +/* + * MAC_CSR3: STA MAC register 1. + */ +#define MAC_CSR3 0x300c +#define MAC_CSR3_BYTE4 FIELD32(0x000000ff) +#define MAC_CSR3_BYTE5 FIELD32(0x0000ff00) +#define MAC_CSR3_UNICAST_TO_ME_MASK FIELD32(0x00ff0000) + +/* + * MAC_CSR4: BSSID register 0. + */ +#define MAC_CSR4 0x3010 +#define MAC_CSR4_BYTE0 FIELD32(0x000000ff) +#define MAC_CSR4_BYTE1 FIELD32(0x0000ff00) +#define MAC_CSR4_BYTE2 FIELD32(0x00ff0000) +#define MAC_CSR4_BYTE3 FIELD32(0xff000000) + +/* + * MAC_CSR5: BSSID register 1. + * BSS_ID_MASK: 3: one BSSID, 0: 4 BSSID, 2 or 1: 2 BSSID. + */ +#define MAC_CSR5 0x3014 +#define MAC_CSR5_BYTE4 FIELD32(0x000000ff) +#define MAC_CSR5_BYTE5 FIELD32(0x0000ff00) +#define MAC_CSR5_BSS_ID_MASK FIELD32(0x00ff0000) + +/* + * MAC_CSR6: Maximum frame length register. + */ +#define MAC_CSR6 0x3018 +#define MAC_CSR6_MAX_FRAME_UNIT FIELD32(0x00000fff) + +/* + * MAC_CSR7: Reserved + */ +#define MAC_CSR7 0x301c + +/* + * MAC_CSR8: SIFS/EIFS register. + * All units are in US. + */ +#define MAC_CSR8 0x3020 +#define MAC_CSR8_SIFS FIELD32(0x000000ff) +#define MAC_CSR8_SIFS_AFTER_RX_OFDM FIELD32(0x0000ff00) +#define MAC_CSR8_EIFS FIELD32(0xffff0000) + +/* + * MAC_CSR9: Back-Off control register. + * SLOT_TIME: Slot time, default is 20us for 802.11BG. + * CWMIN: Bit for Cwmin. default Cwmin is 31 (2^5 - 1). + * CWMAX: Bit for Cwmax, default Cwmax is 1023 (2^10 - 1). + * CW_SELECT: 1: CWmin/Cwmax select from register, 0:select from TxD. + */ +#define MAC_CSR9 0x3024 +#define MAC_CSR9_SLOT_TIME FIELD32(0x000000ff) +#define MAC_CSR9_CWMIN FIELD32(0x00000f00) +#define MAC_CSR9_CWMAX FIELD32(0x0000f000) +#define MAC_CSR9_CW_SELECT FIELD32(0x00010000) + +/* + * MAC_CSR10: Power state configuration. + */ +#define MAC_CSR10 0x3028 + +/* + * MAC_CSR11: Power saving transition time register. + * DELAY_AFTER_TBCN: Delay after Tbcn expired in units of TU. + * TBCN_BEFORE_WAKEUP: Number of beacon before wakeup. + * WAKEUP_LATENCY: In unit of TU. + */ +#define MAC_CSR11 0x302c +#define MAC_CSR11_DELAY_AFTER_TBCN FIELD32(0x000000ff) +#define MAC_CSR11_TBCN_BEFORE_WAKEUP FIELD32(0x00007f00) +#define MAC_CSR11_AUTOWAKE FIELD32(0x00008000) +#define MAC_CSR11_WAKEUP_LATENCY FIELD32(0x000f0000) + +/* + * MAC_CSR12: Manual power control / status register (merge CSR20 & PWRCSR1). + * CURRENT_STATE: 0:sleep, 1:awake. + * FORCE_WAKEUP: This has higher priority than PUT_TO_SLEEP. + * BBP_CURRENT_STATE: 0: BBP sleep, 1: BBP awake. + */ +#define MAC_CSR12 0x3030 +#define MAC_CSR12_CURRENT_STATE FIELD32(0x00000001) +#define MAC_CSR12_PUT_TO_SLEEP FIELD32(0x00000002) +#define MAC_CSR12_FORCE_WAKEUP FIELD32(0x00000004) +#define MAC_CSR12_BBP_CURRENT_STATE FIELD32(0x00000008) + +/* + * MAC_CSR13: GPIO. + */ +#define MAC_CSR13 0x3034 +#define MAC_CSR13_BIT0 FIELD32(0x00000001) +#define MAC_CSR13_BIT1 FIELD32(0x00000002) +#define MAC_CSR13_BIT2 FIELD32(0x00000004) +#define MAC_CSR13_BIT3 FIELD32(0x00000008) +#define MAC_CSR13_BIT4 FIELD32(0x00000010) +#define MAC_CSR13_BIT5 FIELD32(0x00000020) +#define MAC_CSR13_BIT6 FIELD32(0x00000040) +#define MAC_CSR13_BIT7 FIELD32(0x00000080) +#define MAC_CSR13_BIT8 FIELD32(0x00000100) +#define MAC_CSR13_BIT9 FIELD32(0x00000200) +#define MAC_CSR13_BIT10 FIELD32(0x00000400) +#define MAC_CSR13_BIT11 FIELD32(0x00000800) +#define MAC_CSR13_BIT12 FIELD32(0x00001000) + +/* + * MAC_CSR14: LED control register. + * ON_PERIOD: On period, default 70ms. + * OFF_PERIOD: Off period, default 30ms. + * HW_LED: HW TX activity, 1: normal OFF, 0: normal ON. + * SW_LED: s/w LED, 1: ON, 0: OFF. + * HW_LED_POLARITY: 0: active low, 1: active high. + */ +#define MAC_CSR14 0x3038 +#define MAC_CSR14_ON_PERIOD FIELD32(0x000000ff) +#define MAC_CSR14_OFF_PERIOD FIELD32(0x0000ff00) +#define MAC_CSR14_HW_LED FIELD32(0x00010000) +#define MAC_CSR14_SW_LED FIELD32(0x00020000) +#define MAC_CSR14_HW_LED_POLARITY FIELD32(0x00040000) +#define MAC_CSR14_SW_LED2 FIELD32(0x00080000) + +/* + * MAC_CSR15: NAV control. + */ +#define MAC_CSR15 0x303c + +/* + * TXRX control registers. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * TXRX_CSR0: TX/RX configuration register. + * TSF_OFFSET: Default is 24. + * AUTO_TX_SEQ: 1: ASIC auto replace sequence nr in outgoing frame. + * DISABLE_RX: Disable Rx engine. + * DROP_CRC: Drop CRC error. + * DROP_PHYSICAL: Drop physical error. + * DROP_CONTROL: Drop control frame. + * DROP_NOT_TO_ME: Drop not to me unicast frame. + * DROP_TO_DS: Drop fram ToDs bit is true. + * DROP_VERSION_ERROR: Drop version error frame. + * DROP_MULTICAST: Drop multicast frames. + * DROP_BORADCAST: Drop broadcast frames. + * ROP_ACK_CTS: Drop received ACK and CTS. + */ +#define TXRX_CSR0 0x3040 +#define TXRX_CSR0_RX_ACK_TIMEOUT FIELD32(0x000001ff) +#define TXRX_CSR0_TSF_OFFSET FIELD32(0x00007e00) +#define TXRX_CSR0_AUTO_TX_SEQ FIELD32(0x00008000) +#define TXRX_CSR0_DISABLE_RX FIELD32(0x00010000) +#define TXRX_CSR0_DROP_CRC FIELD32(0x00020000) +#define TXRX_CSR0_DROP_PHYSICAL FIELD32(0x00040000) +#define TXRX_CSR0_DROP_CONTROL FIELD32(0x00080000) +#define TXRX_CSR0_DROP_NOT_TO_ME FIELD32(0x00100000) +#define TXRX_CSR0_DROP_TO_DS FIELD32(0x00200000) +#define TXRX_CSR0_DROP_VERSION_ERROR FIELD32(0x00400000) +#define TXRX_CSR0_DROP_MULTICAST FIELD32(0x00800000) +#define TXRX_CSR0_DROP_BORADCAST FIELD32(0x01000000) +#define TXRX_CSR0_DROP_ACK_CTS FIELD32(0x02000000) +#define TXRX_CSR0_TX_WITHOUT_WAITING FIELD32(0x04000000) + +/* + * TXRX_CSR1 + */ +#define TXRX_CSR1 0x3044 +#define TXRX_CSR1_BBP_ID0 FIELD32(0x0000007f) +#define TXRX_CSR1_BBP_ID0_VALID FIELD32(0x00000080) +#define TXRX_CSR1_BBP_ID1 FIELD32(0x00007f00) +#define TXRX_CSR1_BBP_ID1_VALID FIELD32(0x00008000) +#define TXRX_CSR1_BBP_ID2 FIELD32(0x007f0000) +#define TXRX_CSR1_BBP_ID2_VALID FIELD32(0x00800000) +#define TXRX_CSR1_BBP_ID3 FIELD32(0x7f000000) +#define TXRX_CSR1_BBP_ID3_VALID FIELD32(0x80000000) + +/* + * TXRX_CSR2 + */ +#define TXRX_CSR2 0x3048 +#define TXRX_CSR2_BBP_ID0 FIELD32(0x0000007f) +#define TXRX_CSR2_BBP_ID0_VALID FIELD32(0x00000080) +#define TXRX_CSR2_BBP_ID1 FIELD32(0x00007f00) +#define TXRX_CSR2_BBP_ID1_VALID FIELD32(0x00008000) +#define TXRX_CSR2_BBP_ID2 FIELD32(0x007f0000) +#define TXRX_CSR2_BBP_ID2_VALID FIELD32(0x00800000) +#define TXRX_CSR2_BBP_ID3 FIELD32(0x7f000000) +#define TXRX_CSR2_BBP_ID3_VALID FIELD32(0x80000000) + +/* + * TXRX_CSR3 + */ +#define TXRX_CSR3 0x304c +#define TXRX_CSR3_BBP_ID0 FIELD32(0x0000007f) +#define TXRX_CSR3_BBP_ID0_VALID FIELD32(0x00000080) +#define TXRX_CSR3_BBP_ID1 FIELD32(0x00007f00) +#define TXRX_CSR3_BBP_ID1_VALID FIELD32(0x00008000) +#define TXRX_CSR3_BBP_ID2 FIELD32(0x007f0000) +#define TXRX_CSR3_BBP_ID2_VALID FIELD32(0x00800000) +#define TXRX_CSR3_BBP_ID3 FIELD32(0x7f000000) +#define TXRX_CSR3_BBP_ID3_VALID FIELD32(0x80000000) + +/* + * TXRX_CSR4: Auto-Responder/Tx-retry register. + * AUTORESPOND_PREAMBLE: 0:long, 1:short preamble. + * OFDM_TX_RATE_DOWN: 1:enable. + * OFDM_TX_RATE_STEP: 0:1-step, 1: 2-step, 2:3-step, 3:4-step. + * OFDM_TX_FALLBACK_CCK: 0: Fallback to OFDM 6M only, 1: Fallback to CCK 1M,2M. + */ +#define TXRX_CSR4 0x3050 +#define TXRX_CSR4_TX_ACK_TIMEOUT FIELD32(0x000000ff) +#define TXRX_CSR4_CNTL_ACK_POLICY FIELD32(0x00000700) +#define TXRX_CSR4_ACK_CTS_PSM FIELD32(0x00010000) +#define TXRX_CSR4_AUTORESPOND_ENABLE FIELD32(0x00020000) +#define TXRX_CSR4_AUTORESPOND_PREAMBLE FIELD32(0x00040000) +#define TXRX_CSR4_OFDM_TX_RATE_DOWN FIELD32(0x00080000) +#define TXRX_CSR4_OFDM_TX_RATE_STEP FIELD32(0x00300000) +#define TXRX_CSR4_OFDM_TX_FALLBACK_CCK FIELD32(0x00400000) +#define TXRX_CSR4_LONG_RETRY_LIMIT FIELD32(0x0f000000) +#define TXRX_CSR4_SHORT_RETRY_LIMIT FIELD32(0xf0000000) + +/* + * TXRX_CSR5 + */ +#define TXRX_CSR5 0x3054 + +/* + * TXRX_CSR6: ACK/CTS payload consumed time + */ +#define TXRX_CSR6 0x3058 + +/* + * TXRX_CSR7: OFDM ACK/CTS payload consumed time for 6/9/12/18 mbps. + */ +#define TXRX_CSR7 0x305c +#define TXRX_CSR7_ACK_CTS_6MBS FIELD32(0x000000ff) +#define TXRX_CSR7_ACK_CTS_9MBS FIELD32(0x0000ff00) +#define TXRX_CSR7_ACK_CTS_12MBS FIELD32(0x00ff0000) +#define TXRX_CSR7_ACK_CTS_18MBS FIELD32(0xff000000) + +/* + * TXRX_CSR8: OFDM ACK/CTS payload consumed time for 24/36/48/54 mbps. + */ +#define TXRX_CSR8 0x3060 +#define TXRX_CSR8_ACK_CTS_24MBS FIELD32(0x000000ff) +#define TXRX_CSR8_ACK_CTS_36MBS FIELD32(0x0000ff00) +#define TXRX_CSR8_ACK_CTS_48MBS FIELD32(0x00ff0000) +#define TXRX_CSR8_ACK_CTS_54MBS FIELD32(0xff000000) + +/* + * TXRX_CSR9: Synchronization control register. + * BEACON_INTERVAL: In unit of 1/16 TU. + * TSF_TICKING: Enable TSF auto counting. + * TSF_SYNC: Tsf sync, 0: disable, 1: infra, 2: ad-hoc/master mode. + * BEACON_GEN: Enable beacon generator. + */ +#define TXRX_CSR9 0x3064 +#define TXRX_CSR9_BEACON_INTERVAL FIELD32(0x0000ffff) +#define TXRX_CSR9_TSF_TICKING FIELD32(0x00010000) +#define TXRX_CSR9_TSF_SYNC FIELD32(0x00060000) +#define TXRX_CSR9_TBTT_ENABLE FIELD32(0x00080000) +#define TXRX_CSR9_BEACON_GEN FIELD32(0x00100000) +#define TXRX_CSR9_TIMESTAMP_COMPENSATE FIELD32(0xff000000) + +/* + * TXRX_CSR10: BEACON alignment. + */ +#define TXRX_CSR10 0x3068 + +/* + * TXRX_CSR11: AES mask. + */ +#define TXRX_CSR11 0x306c + +/* + * TXRX_CSR12: TSF low 32. + */ +#define TXRX_CSR12 0x3070 +#define TXRX_CSR12_LOW_TSFTIMER FIELD32(0xffffffff) + +/* + * TXRX_CSR13: TSF high 32. + */ +#define TXRX_CSR13 0x3074 +#define TXRX_CSR13_HIGH_TSFTIMER FIELD32(0xffffffff) + +/* + * TXRX_CSR14: TBTT timer. + */ +#define TXRX_CSR14 0x3078 + +/* + * TXRX_CSR15: TKIP MIC priority byte "AND" mask. + */ +#define TXRX_CSR15 0x307c + +/* + * PHY control registers. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * PHY_CSR0: RF/PS control. + */ +#define PHY_CSR0 0x3080 +#define PHY_CSR0_PA_PE_BG FIELD32(0x00010000) +#define PHY_CSR0_PA_PE_A FIELD32(0x00020000) + +/* + * PHY_CSR1 + */ +#define PHY_CSR1 0x3084 + +/* + * PHY_CSR2: Pre-TX BBP control. + */ +#define PHY_CSR2 0x3088 + +/* + * PHY_CSR3: BBP serial control register. + * VALUE: Register value to program into BBP. + * REG_NUM: Selected BBP register. + * READ_CONTROL: 0: Write BBP, 1: Read BBP. + * BUSY: 1: ASIC is busy execute BBP programming. + */ +#define PHY_CSR3 0x308c +#define PHY_CSR3_VALUE FIELD32(0x000000ff) +#define PHY_CSR3_REGNUM FIELD32(0x00007f00) +#define PHY_CSR3_READ_CONTROL FIELD32(0x00008000) +#define PHY_CSR3_BUSY FIELD32(0x00010000) + +/* + * PHY_CSR4: RF serial control register + * VALUE: Register value (include register id) serial out to RF/IF chip. + * NUMBER_OF_BITS: Number of bits used in RFRegValue (I:20, RFMD:22). + * IF_SELECT: 1: select IF to program, 0: select RF to program. + * PLL_LD: RF PLL_LD status. + * BUSY: 1: ASIC is busy execute RF programming. + */ +#define PHY_CSR4 0x3090 +#define PHY_CSR4_VALUE FIELD32(0x00ffffff) +#define PHY_CSR4_NUMBER_OF_BITS FIELD32(0x1f000000) +#define PHY_CSR4_IF_SELECT FIELD32(0x20000000) +#define PHY_CSR4_PLL_LD FIELD32(0x40000000) +#define PHY_CSR4_BUSY FIELD32(0x80000000) + +/* + * PHY_CSR5: RX to TX signal switch timing control. + */ +#define PHY_CSR5 0x3094 +#define PHY_CSR5_IQ_FLIP FIELD32(0x00000004) + +/* + * PHY_CSR6: TX to RX signal timing control. + */ +#define PHY_CSR6 0x3098 +#define PHY_CSR6_IQ_FLIP FIELD32(0x00000004) + +/* + * PHY_CSR7: TX DAC switching timing control. + */ +#define PHY_CSR7 0x309c + +/* + * Security control register. + */ + +/* + * SEC_CSR0: Shared key table control. + */ +#define SEC_CSR0 0x30a0 +#define SEC_CSR0_BSS0_KEY0_VALID FIELD32(0x00000001) +#define SEC_CSR0_BSS0_KEY1_VALID FIELD32(0x00000002) +#define SEC_CSR0_BSS0_KEY2_VALID FIELD32(0x00000004) +#define SEC_CSR0_BSS0_KEY3_VALID FIELD32(0x00000008) +#define SEC_CSR0_BSS1_KEY0_VALID FIELD32(0x00000010) +#define SEC_CSR0_BSS1_KEY1_VALID FIELD32(0x00000020) +#define SEC_CSR0_BSS1_KEY2_VALID FIELD32(0x00000040) +#define SEC_CSR0_BSS1_KEY3_VALID FIELD32(0x00000080) +#define SEC_CSR0_BSS2_KEY0_VALID FIELD32(0x00000100) +#define SEC_CSR0_BSS2_KEY1_VALID FIELD32(0x00000200) +#define SEC_CSR0_BSS2_KEY2_VALID FIELD32(0x00000400) +#define SEC_CSR0_BSS2_KEY3_VALID FIELD32(0x00000800) +#define SEC_CSR0_BSS3_KEY0_VALID FIELD32(0x00001000) +#define SEC_CSR0_BSS3_KEY1_VALID FIELD32(0x00002000) +#define SEC_CSR0_BSS3_KEY2_VALID FIELD32(0x00004000) +#define SEC_CSR0_BSS3_KEY3_VALID FIELD32(0x00008000) + +/* + * SEC_CSR1: Shared key table security mode register. + */ +#define SEC_CSR1 0x30a4 +#define SEC_CSR1_BSS0_KEY0_CIPHER_ALG FIELD32(0x00000007) +#define SEC_CSR1_BSS0_KEY1_CIPHER_ALG FIELD32(0x00000070) +#define SEC_CSR1_BSS0_KEY2_CIPHER_ALG FIELD32(0x00000700) +#define SEC_CSR1_BSS0_KEY3_CIPHER_ALG FIELD32(0x00007000) +#define SEC_CSR1_BSS1_KEY0_CIPHER_ALG FIELD32(0x00070000) +#define SEC_CSR1_BSS1_KEY1_CIPHER_ALG FIELD32(0x00700000) +#define SEC_CSR1_BSS1_KEY2_CIPHER_ALG FIELD32(0x07000000) +#define SEC_CSR1_BSS1_KEY3_CIPHER_ALG FIELD32(0x70000000) + +/* + * Pairwise key table valid bitmap registers. + * SEC_CSR2: pairwise key table valid bitmap 0. + * SEC_CSR3: pairwise key table valid bitmap 1. + */ +#define SEC_CSR2 0x30a8 +#define SEC_CSR3 0x30ac + +/* + * SEC_CSR4: Pairwise key table lookup control. + */ +#define SEC_CSR4 0x30b0 + +/* + * SEC_CSR5: shared key table security mode register. + */ +#define SEC_CSR5 0x30b4 +#define SEC_CSR5_BSS2_KEY0_CIPHER_ALG FIELD32(0x00000007) +#define SEC_CSR5_BSS2_KEY1_CIPHER_ALG FIELD32(0x00000070) +#define SEC_CSR5_BSS2_KEY2_CIPHER_ALG FIELD32(0x00000700) +#define SEC_CSR5_BSS2_KEY3_CIPHER_ALG FIELD32(0x00007000) +#define SEC_CSR5_BSS3_KEY0_CIPHER_ALG FIELD32(0x00070000) +#define SEC_CSR5_BSS3_KEY1_CIPHER_ALG FIELD32(0x00700000) +#define SEC_CSR5_BSS3_KEY2_CIPHER_ALG FIELD32(0x07000000) +#define SEC_CSR5_BSS3_KEY3_CIPHER_ALG FIELD32(0x70000000) + +/* + * STA control registers. + */ + +/* + * STA_CSR0: RX PLCP error count & RX FCS error count. + */ +#define STA_CSR0 0x30c0 +#define STA_CSR0_FCS_ERROR FIELD32(0x0000ffff) +#define STA_CSR0_PLCP_ERROR FIELD32(0xffff0000) + +/* + * STA_CSR1: RX False CCA count & RX LONG frame count. + */ +#define STA_CSR1 0x30c4 +#define STA_CSR1_PHYSICAL_ERROR FIELD32(0x0000ffff) +#define STA_CSR1_FALSE_CCA_ERROR FIELD32(0xffff0000) + +/* + * STA_CSR2: TX Beacon count and RX FIFO overflow count. + */ +#define STA_CSR2 0x30c8 +#define STA_CSR2_RX_FIFO_OVERFLOW_COUNT FIELD32(0x0000ffff) +#define STA_CSR2_RX_OVERFLOW_COUNT FIELD32(0xffff0000) + +/* + * STA_CSR3: TX Beacon count. + */ +#define STA_CSR3 0x30cc +#define STA_CSR3_TX_BEACON_COUNT FIELD32(0x0000ffff) + +/* + * STA_CSR4: TX Result status register. + * VALID: 1:This register contains a valid TX result. + */ +#define STA_CSR4 0x30d0 +#define STA_CSR4_VALID FIELD32(0x00000001) +#define STA_CSR4_TX_RESULT FIELD32(0x0000000e) +#define STA_CSR4_RETRY_COUNT FIELD32(0x000000f0) +#define STA_CSR4_PID_SUBTYPE FIELD32(0x00001f00) +#define STA_CSR4_PID_TYPE FIELD32(0x0000e000) +#define STA_CSR4_TXRATE FIELD32(0x000f0000) + +/* + * QOS control registers. + */ + +/* + * QOS_CSR0: TXOP holder MAC address register. + */ +#define QOS_CSR0 0x30e0 +#define QOS_CSR0_BYTE0 FIELD32(0x000000ff) +#define QOS_CSR0_BYTE1 FIELD32(0x0000ff00) +#define QOS_CSR0_BYTE2 FIELD32(0x00ff0000) +#define QOS_CSR0_BYTE3 FIELD32(0xff000000) + +/* + * QOS_CSR1: TXOP holder MAC address register. + */ +#define QOS_CSR1 0x30e4 +#define QOS_CSR1_BYTE4 FIELD32(0x000000ff) +#define QOS_CSR1_BYTE5 FIELD32(0x0000ff00) + +/* + * QOS_CSR2: TXOP holder timeout register. + */ +#define QOS_CSR2 0x30e8 + +/* + * RX QOS-CFPOLL MAC address register. + * QOS_CSR3: RX QOS-CFPOLL MAC address 0. + * QOS_CSR4: RX QOS-CFPOLL MAC address 1. + */ +#define QOS_CSR3 0x30ec +#define QOS_CSR4 0x30f0 + +/* + * QOS_CSR5: "QosControl" field of the RX QOS-CFPOLL. + */ +#define QOS_CSR5 0x30f4 + +/* + * Host DMA registers. + */ + +/* + * AC0_BASE_CSR: AC_BK base address. + */ +#define AC0_BASE_CSR 0x3400 +#define AC0_BASE_CSR_RING_REGISTER FIELD32(0xffffffff) + +/* + * AC1_BASE_CSR: AC_BE base address. + */ +#define AC1_BASE_CSR 0x3404 +#define AC1_BASE_CSR_RING_REGISTER FIELD32(0xffffffff) + +/* + * AC2_BASE_CSR: AC_VI base address. + */ +#define AC2_BASE_CSR 0x3408 +#define AC2_BASE_CSR_RING_REGISTER FIELD32(0xffffffff) + +/* + * AC3_BASE_CSR: AC_VO base address. + */ +#define AC3_BASE_CSR 0x340c +#define AC3_BASE_CSR_RING_REGISTER FIELD32(0xffffffff) + +/* + * MGMT_BASE_CSR: MGMT ring base address. + */ +#define MGMT_BASE_CSR 0x3410 +#define MGMT_BASE_CSR_RING_REGISTER FIELD32(0xffffffff) + +/* + * TX_RING_CSR0: TX Ring size for AC_BK, AC_BE, AC_VI, AC_VO. + */ +#define TX_RING_CSR0 0x3418 +#define TX_RING_CSR0_AC0_RING_SIZE FIELD32(0x000000ff) +#define TX_RING_CSR0_AC1_RING_SIZE FIELD32(0x0000ff00) +#define TX_RING_CSR0_AC2_RING_SIZE FIELD32(0x00ff0000) +#define TX_RING_CSR0_AC3_RING_SIZE FIELD32(0xff000000) + +/* + * TX_RING_CSR1: TX Ring size for MGMT Ring, HCCA Ring + * TXD_SIZE: In unit of 32-bit. + */ +#define TX_RING_CSR1 0x341c +#define TX_RING_CSR1_MGMT_RING_SIZE FIELD32(0x000000ff) +#define TX_RING_CSR1_HCCA_RING_SIZE FIELD32(0x0000ff00) +#define TX_RING_CSR1_TXD_SIZE FIELD32(0x003f0000) + +/* + * AIFSN_CSR: AIFSN for each EDCA AC. + * AIFSN0: For AC_BK. + * AIFSN1: For AC_BE. + * AIFSN2: For AC_VI. + * AIFSN3: For AC_VO. + */ +#define AIFSN_CSR 0x3420 +#define AIFSN_CSR_AIFSN0 FIELD32(0x0000000f) +#define AIFSN_CSR_AIFSN1 FIELD32(0x000000f0) +#define AIFSN_CSR_AIFSN2 FIELD32(0x00000f00) +#define AIFSN_CSR_AIFSN3 FIELD32(0x0000f000) + +/* + * CWMIN_CSR: CWmin for each EDCA AC. + * CWMIN0: For AC_BK. + * CWMIN1: For AC_BE. + * CWMIN2: For AC_VI. + * CWMIN3: For AC_VO. + */ +#define CWMIN_CSR 0x3424 +#define CWMIN_CSR_CWMIN0 FIELD32(0x0000000f) +#define CWMIN_CSR_CWMIN1 FIELD32(0x000000f0) +#define CWMIN_CSR_CWMIN2 FIELD32(0x00000f00) +#define CWMIN_CSR_CWMIN3 FIELD32(0x0000f000) + +/* + * CWMAX_CSR: CWmax for each EDCA AC. + * CWMAX0: For AC_BK. + * CWMAX1: For AC_BE. + * CWMAX2: For AC_VI. + * CWMAX3: For AC_VO. + */ +#define CWMAX_CSR 0x3428 +#define CWMAX_CSR_CWMAX0 FIELD32(0x0000000f) +#define CWMAX_CSR_CWMAX1 FIELD32(0x000000f0) +#define CWMAX_CSR_CWMAX2 FIELD32(0x00000f00) +#define CWMAX_CSR_CWMAX3 FIELD32(0x0000f000) + +/* + * TX_DMA_DST_CSR: TX DMA destination + * 0: TX ring0, 1: TX ring1, 2: TX ring2 3: invalid + */ +#define TX_DMA_DST_CSR 0x342c +#define TX_DMA_DST_CSR_DEST_AC0 FIELD32(0x00000003) +#define TX_DMA_DST_CSR_DEST_AC1 FIELD32(0x0000000c) +#define TX_DMA_DST_CSR_DEST_AC2 FIELD32(0x00000030) +#define TX_DMA_DST_CSR_DEST_AC3 FIELD32(0x000000c0) +#define TX_DMA_DST_CSR_DEST_MGMT FIELD32(0x00000300) + +/* + * TX_CNTL_CSR: KICK/Abort TX. + * KICK_TX_AC0: For AC_BK. + * KICK_TX_AC1: For AC_BE. + * KICK_TX_AC2: For AC_VI. + * KICK_TX_AC3: For AC_VO. + * ABORT_TX_AC0: For AC_BK. + * ABORT_TX_AC1: For AC_BE. + * ABORT_TX_AC2: For AC_VI. + * ABORT_TX_AC3: For AC_VO. + */ +#define TX_CNTL_CSR 0x3430 +#define TX_CNTL_CSR_KICK_TX_AC0 FIELD32(0x00000001) +#define TX_CNTL_CSR_KICK_TX_AC1 FIELD32(0x00000002) +#define TX_CNTL_CSR_KICK_TX_AC2 FIELD32(0x00000004) +#define TX_CNTL_CSR_KICK_TX_AC3 FIELD32(0x00000008) +#define TX_CNTL_CSR_KICK_TX_MGMT FIELD32(0x00000010) +#define TX_CNTL_CSR_ABORT_TX_AC0 FIELD32(0x00010000) +#define TX_CNTL_CSR_ABORT_TX_AC1 FIELD32(0x00020000) +#define TX_CNTL_CSR_ABORT_TX_AC2 FIELD32(0x00040000) +#define TX_CNTL_CSR_ABORT_TX_AC3 FIELD32(0x00080000) +#define TX_CNTL_CSR_ABORT_TX_MGMT FIELD32(0x00100000) + +/* + * LOAD_TX_RING_CSR: Load RX de + */ +#define LOAD_TX_RING_CSR 0x3434 +#define LOAD_TX_RING_CSR_LOAD_TXD_AC0 FIELD32(0x00000001) +#define LOAD_TX_RING_CSR_LOAD_TXD_AC1 FIELD32(0x00000002) +#define LOAD_TX_RING_CSR_LOAD_TXD_AC2 FIELD32(0x00000004) +#define LOAD_TX_RING_CSR_LOAD_TXD_AC3 FIELD32(0x00000008) +#define LOAD_TX_RING_CSR_LOAD_TXD_MGMT FIELD32(0x00000010) + +/* + * Several read-only registers, for debugging. + */ +#define AC0_TXPTR_CSR 0x3438 +#define AC1_TXPTR_CSR 0x343c +#define AC2_TXPTR_CSR 0x3440 +#define AC3_TXPTR_CSR 0x3444 +#define MGMT_TXPTR_CSR 0x3448 + +/* + * RX_BASE_CSR + */ +#define RX_BASE_CSR 0x3450 +#define RX_BASE_CSR_RING_REGISTER FIELD32(0xffffffff) + +/* + * RX_RING_CSR. + * RXD_SIZE: In unit of 32-bit. + */ +#define RX_RING_CSR 0x3454 +#define RX_RING_CSR_RING_SIZE FIELD32(0x000000ff) +#define RX_RING_CSR_RXD_SIZE FIELD32(0x00003f00) +#define RX_RING_CSR_RXD_WRITEBACK_SIZE FIELD32(0x00070000) + +/* + * RX_CNTL_CSR + */ +#define RX_CNTL_CSR 0x3458 +#define RX_CNTL_CSR_ENABLE_RX_DMA FIELD32(0x00000001) +#define RX_CNTL_CSR_LOAD_RXD FIELD32(0x00000002) + +/* + * RXPTR_CSR: Read-only, for debugging. + */ +#define RXPTR_CSR 0x345c + +/* + * PCI_CFG_CSR + */ +#define PCI_CFG_CSR 0x3460 + +/* + * BUF_FORMAT_CSR + */ +#define BUF_FORMAT_CSR 0x3464 + +/* + * INT_SOURCE_CSR: Interrupt source register. + * Write one to clear corresponding bit. + */ +#define INT_SOURCE_CSR 0x3468 +#define INT_SOURCE_CSR_TXDONE FIELD32(0x00000001) +#define INT_SOURCE_CSR_RXDONE FIELD32(0x00000002) +#define INT_SOURCE_CSR_BEACON_DONE FIELD32(0x00000004) +#define INT_SOURCE_CSR_TX_ABORT_DONE FIELD32(0x00000010) +#define INT_SOURCE_CSR_AC0_DMA_DONE FIELD32(0x00010000) +#define INT_SOURCE_CSR_AC1_DMA_DONE FIELD32(0x00020000) +#define INT_SOURCE_CSR_AC2_DMA_DONE FIELD32(0x00040000) +#define INT_SOURCE_CSR_AC3_DMA_DONE FIELD32(0x00080000) +#define INT_SOURCE_CSR_MGMT_DMA_DONE FIELD32(0x00100000) +#define INT_SOURCE_CSR_HCCA_DMA_DONE FIELD32(0x00200000) + +/* + * INT_MASK_CSR: Interrupt MASK register. 1: the interrupt is mask OFF. + * MITIGATION_PERIOD: Interrupt mitigation in unit of 32 PCI clock. + */ +#define INT_MASK_CSR 0x346c +#define INT_MASK_CSR_TXDONE FIELD32(0x00000001) +#define INT_MASK_CSR_RXDONE FIELD32(0x00000002) +#define INT_MASK_CSR_BEACON_DONE FIELD32(0x00000004) +#define INT_MASK_CSR_TX_ABORT_DONE FIELD32(0x00000010) +#define INT_MASK_CSR_ENABLE_MITIGATION FIELD32(0x00000080) +#define INT_MASK_CSR_MITIGATION_PERIOD FIELD32(0x0000ff00) +#define INT_MASK_CSR_AC0_DMA_DONE FIELD32(0x00010000) +#define INT_MASK_CSR_AC1_DMA_DONE FIELD32(0x00020000) +#define INT_MASK_CSR_AC2_DMA_DONE FIELD32(0x00040000) +#define INT_MASK_CSR_AC3_DMA_DONE FIELD32(0x00080000) +#define INT_MASK_CSR_MGMT_DMA_DONE FIELD32(0x00100000) +#define INT_MASK_CSR_HCCA_DMA_DONE FIELD32(0x00200000) + +/* + * E2PROM_CSR: EEPROM control register. + * RELOAD: Write 1 to reload eeprom content. + * TYPE_93C46: 1: 93c46, 0:93c66. + * LOAD_STATUS: 1:loading, 0:done. + */ +#define E2PROM_CSR 0x3470 +#define E2PROM_CSR_RELOAD FIELD32(0x00000001) +#define E2PROM_CSR_DATA_CLOCK FIELD32(0x00000002) +#define E2PROM_CSR_CHIP_SELECT FIELD32(0x00000004) +#define E2PROM_CSR_DATA_IN FIELD32(0x00000008) +#define E2PROM_CSR_DATA_OUT FIELD32(0x00000010) +#define E2PROM_CSR_TYPE_93C46 FIELD32(0x00000020) +#define E2PROM_CSR_LOAD_STATUS FIELD32(0x00000040) + +/* + * AC_TXOP_CSR0: AC_BK/AC_BE TXOP register. + * AC0_TX_OP: For AC_BK, in unit of 32us. + * AC1_TX_OP: For AC_BE, in unit of 32us. + */ +#define AC_TXOP_CSR0 0x3474 +#define AC_TXOP_CSR0_AC0_TX_OP FIELD32(0x0000ffff) +#define AC_TXOP_CSR0_AC1_TX_OP FIELD32(0xffff0000) + +/* + * AC_TXOP_CSR1: AC_VO/AC_VI TXOP register. + * AC2_TX_OP: For AC_VI, in unit of 32us. + * AC3_TX_OP: For AC_VO, in unit of 32us. + */ +#define AC_TXOP_CSR1 0x3478 +#define AC_TXOP_CSR1_AC2_TX_OP FIELD32(0x0000ffff) +#define AC_TXOP_CSR1_AC3_TX_OP FIELD32(0xffff0000) + +/* + * DMA_STATUS_CSR + */ +#define DMA_STATUS_CSR 0x3480 + +/* + * TEST_MODE_CSR + */ +#define TEST_MODE_CSR 0x3484 + +/* + * UART0_TX_CSR + */ +#define UART0_TX_CSR 0x3488 + +/* + * UART0_RX_CSR + */ +#define UART0_RX_CSR 0x348c + +/* + * UART0_FRAME_CSR + */ +#define UART0_FRAME_CSR 0x3490 + +/* + * UART0_BUFFER_CSR + */ +#define UART0_BUFFER_CSR 0x3494 + +/* + * IO_CNTL_CSR + */ +#define IO_CNTL_CSR 0x3498 + +/* + * UART_INT_SOURCE_CSR + */ +#define UART_INT_SOURCE_CSR 0x34a8 + +/* + * UART_INT_MASK_CSR + */ +#define UART_INT_MASK_CSR 0x34ac + +/* + * PBF_QUEUE_CSR + */ +#define PBF_QUEUE_CSR 0x34b0 + +/* + * Firmware DMA registers. + * Firmware DMA registers are dedicated for MCU usage + * and should not be touched by host driver. + * Therefore we skip the definition of these registers. + */ +#define FW_TX_BASE_CSR 0x34c0 +#define FW_TX_START_CSR 0x34c4 +#define FW_TX_LAST_CSR 0x34c8 +#define FW_MODE_CNTL_CSR 0x34cc +#define FW_TXPTR_CSR 0x34d0 + +/* + * 8051 firmware image. + */ +#define FIRMWARE_RT2561 "rt2561.bin" +#define FIRMWARE_RT2561s "rt2561s.bin" +#define FIRMWARE_RT2661 "rt2661.bin" +#define FIRMWARE_IMAGE_BASE 0x4000 + +/* + * BBP registers. + * The wordsize of the BBP is 8 bits. + */ + +/* + * R2 + */ +#define BBP_R2_BG_MODE FIELD8(0x20) + +/* + * R3 + */ +#define BBP_R3_SMART_MODE FIELD8(0x01) + +/* + * R4: RX antenna control + * FRAME_END: 1 - DPDT, 0 - SPDT (Only valid for 802.11G, RF2527 & RF2529) + */ +#define BBP_R4_RX_ANTENNA FIELD8(0x03) +#define BBP_R4_RX_FRAME_END FIELD8(0x20) + +/* + * R77 + */ +#define BBP_R77_PAIR FIELD8(0x03) + +/* + * RF registers + */ + +/* + * RF 3 + */ +#define RF3_TXPOWER FIELD32(0x00003e00) + +/* + * RF 4 + */ +#define RF4_FREQ_OFFSET FIELD32(0x0003f000) + +/* + * EEPROM content. + * The wordsize of the EEPROM is 16 bits. + */ + +/* + * HW MAC address. + */ +#define EEPROM_MAC_ADDR_0 0x0002 +#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00) +#define EEPROM_MAC_ADDR1 0x0004 +#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00) +#define EEPROM_MAC_ADDR_2 0x0006 +#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00) + +/* + * EEPROM antenna. + * ANTENNA_NUM: Number of antenna's. + * TX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * RX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * FRAME_TYPE: 0: DPDT , 1: SPDT , noted this bit is valid for g only. + * DYN_TXAGC: Dynamic TX AGC control. + * HARDWARE_RADIO: 1: Hardware controlled radio. Read GPIO0. + * RF_TYPE: Rf_type of this adapter. + */ +#define EEPROM_ANTENNA 0x0010 +#define EEPROM_ANTENNA_NUM FIELD16(0x0003) +#define EEPROM_ANTENNA_TX_DEFAULT FIELD16(0x000c) +#define EEPROM_ANTENNA_RX_DEFAULT FIELD16(0x0030) +#define EEPROM_ANTENNA_FRAME_TYPE FIELD16(0x0040) +#define EEPROM_ANTENNA_DYN_TXAGC FIELD16(0x0200) +#define EEPROM_ANTENNA_HARDWARE_RADIO FIELD16(0x0400) +#define EEPROM_ANTENNA_RF_TYPE FIELD16(0xf800) + +/* + * EEPROM NIC config. + * ENABLE_DIVERSITY: 1:enable, 0:disable. + * EXTERNAL_LNA_BG: External LNA enable for 2.4G. + * CARDBUS_ACCEL: 0:enable, 1:disable. + * EXTERNAL_LNA_A: External LNA enable for 5G. + */ +#define EEPROM_NIC 0x0011 +#define EEPROM_NIC_ENABLE_DIVERSITY FIELD16(0x0001) +#define EEPROM_NIC_TX_DIVERSITY FIELD16(0x0002) +#define EEPROM_NIC_TX_RX_FIXED FIELD16(0x000c) +#define EEPROM_NIC_EXTERNAL_LNA_BG FIELD16(0x0010) +#define EEPROM_NIC_CARDBUS_ACCEL FIELD16(0x0020) +#define EEPROM_NIC_EXTERNAL_LNA_A FIELD16(0x0040) + +/* + * EEPROM geography. + * GEO_A: Default geographical setting for 5GHz band + * GEO: Default geographical setting. + */ +#define EEPROM_GEOGRAPHY 0x0012 +#define EEPROM_GEOGRAPHY_GEO_A FIELD16(0x00ff) +#define EEPROM_GEOGRAPHY_GEO FIELD16(0xff00) + +/* + * EEPROM BBP. + */ +#define EEPROM_BBP_START 0x0013 +#define EEPROM_BBP_SIZE 16 +#define EEPROM_BBP_VALUE FIELD16(0x00ff) +#define EEPROM_BBP_REG_ID FIELD16(0xff00) + +/* + * EEPROM TXPOWER 802.11G + */ +#define EEPROM_TXPOWER_G_START 0x0023 +#define EEPROM_TXPOWER_G_SIZE 7 +#define EEPROM_TXPOWER_G_1 FIELD16(0x00ff) +#define EEPROM_TXPOWER_G_2 FIELD16(0xff00) + +/* + * EEPROM Frequency + */ +#define EEPROM_FREQ 0x002f +#define EEPROM_FREQ_OFFSET FIELD16(0x00ff) +#define EEPROM_FREQ_SEQ_MASK FIELD16(0xff00) +#define EEPROM_FREQ_SEQ FIELD16(0x0300) + +/* + * EEPROM LED. + * POLARITY_RDY_G: Polarity RDY_G setting. + * POLARITY_RDY_A: Polarity RDY_A setting. + * POLARITY_ACT: Polarity ACT setting. + * POLARITY_GPIO_0: Polarity GPIO0 setting. + * POLARITY_GPIO_1: Polarity GPIO1 setting. + * POLARITY_GPIO_2: Polarity GPIO2 setting. + * POLARITY_GPIO_3: Polarity GPIO3 setting. + * POLARITY_GPIO_4: Polarity GPIO4 setting. + * LED_MODE: Led mode. + */ +#define EEPROM_LED 0x0030 +#define EEPROM_LED_POLARITY_RDY_G FIELD16(0x0001) +#define EEPROM_LED_POLARITY_RDY_A FIELD16(0x0002) +#define EEPROM_LED_POLARITY_ACT FIELD16(0x0004) +#define EEPROM_LED_POLARITY_GPIO_0 FIELD16(0x0008) +#define EEPROM_LED_POLARITY_GPIO_1 FIELD16(0x0010) +#define EEPROM_LED_POLARITY_GPIO_2 FIELD16(0x0020) +#define EEPROM_LED_POLARITY_GPIO_3 FIELD16(0x0040) +#define EEPROM_LED_POLARITY_GPIO_4 FIELD16(0x0080) +#define EEPROM_LED_LED_MODE FIELD16(0x1f00) + +/* + * EEPROM TXPOWER 802.11A + */ +#define EEPROM_TXPOWER_A_START 0x0031 +#define EEPROM_TXPOWER_A_SIZE 12 +#define EEPROM_TXPOWER_A_1 FIELD16(0x00ff) +#define EEPROM_TXPOWER_A_2 FIELD16(0xff00) + +/* + * EEPROM RSSI offset 802.11BG + */ +#define EEPROM_RSSI_OFFSET_BG 0x004d +#define EEPROM_RSSI_OFFSET_BG_1 FIELD16(0x00ff) +#define EEPROM_RSSI_OFFSET_BG_2 FIELD16(0xff00) + +/* + * EEPROM RSSI offset 802.11A + */ +#define EEPROM_RSSI_OFFSET_A 0x004e +#define EEPROM_RSSI_OFFSET_A_1 FIELD16(0x00ff) +#define EEPROM_RSSI_OFFSET_A_2 FIELD16(0xff00) + +/* + * MCU mailbox commands. + */ +#define MCU_SLEEP 0x30 +#define MCU_WAKEUP 0x31 +#define MCU_LED 0x50 +#define MCU_LED_STRENGTH 0x52 + +/* + * DMA descriptor defines. + */ +#define TXD_DESC_SIZE ( 16 * sizeof(struct data_desc) ) +#define RXD_DESC_SIZE ( 16 * sizeof(struct data_desc) ) + +/* + * TX descriptor format for TX, PRIO and Beacon Ring. + */ + +/* + * Word0 + * TKIP_MIC: ASIC appends TKIP MIC if TKIP is used. + * KEY_TABLE: Use per-client pairwise KEY table. + * KEY_INDEX: + * Key index (0~31) to the pairwise KEY table. + * 0~3 to shared KEY table 0 (BSS0). + * 4~7 to shared KEY table 1 (BSS1). + * 8~11 to shared KEY table 2 (BSS2). + * 12~15 to shared KEY table 3 (BSS3). + * BURST: Next frame belongs to same "burst" event. + */ +#define TXD_W0_OWNER_NIC FIELD32(0x00000001) +#define TXD_W0_VALID FIELD32(0x00000002) +#define TXD_W0_MORE_FRAG FIELD32(0x00000004) +#define TXD_W0_ACK FIELD32(0x00000008) +#define TXD_W0_TIMESTAMP FIELD32(0x00000010) +#define TXD_W0_OFDM FIELD32(0x00000020) +#define TXD_W0_IFS FIELD32(0x00000040) +#define TXD_W0_RETRY_MODE FIELD32(0x00000080) +#define TXD_W0_TKIP_MIC FIELD32(0x00000100) +#define TXD_W0_KEY_TABLE FIELD32(0x00000200) +#define TXD_W0_KEY_INDEX FIELD32(0x0000fc00) +#define TXD_W0_DATABYTE_COUNT FIELD32(0x0fff0000) +#define TXD_W0_BURST FIELD32(0x10000000) +#define TXD_W0_CIPHER_ALG FIELD32(0xe0000000) + +/* + * Word1 + * HOST_Q_ID: EDCA/HCCA queue ID. + * HW_SEQUENCE: MAC overwrites the frame sequence number. + * BUFFER_COUNT: Number of buffers in this TXD. + */ +#define TXD_W1_HOST_Q_ID FIELD32(0x0000000f) +#define TXD_W1_AIFSN FIELD32(0x000000f0) +#define TXD_W1_CWMIN FIELD32(0x00000f00) +#define TXD_W1_CWMAX FIELD32(0x0000f000) +#define TXD_W1_IV_OFFSET FIELD32(0x003f0000) +#define TXD_W1_PIGGY_BACK FIELD32(0x01000000) +#define TXD_W1_HW_SEQUENCE FIELD32(0x10000000) +#define TXD_W1_BUFFER_COUNT FIELD32(0xe0000000) + +/* + * Word2: PLCP information + */ +#define TXD_W2_PLCP_SIGNAL FIELD32(0x000000ff) +#define TXD_W2_PLCP_SERVICE FIELD32(0x0000ff00) +#define TXD_W2_PLCP_LENGTH_LOW FIELD32(0x00ff0000) +#define TXD_W2_PLCP_LENGTH_HIGH FIELD32(0xff000000) + +/* + * Word3 + */ +#define TXD_W3_IV FIELD32(0xffffffff) + +/* + * Word4 + */ +#define TXD_W4_EIV FIELD32(0xffffffff) + +/* + * Word5 + * FRAME_OFFSET: Frame start offset inside ASIC TXFIFO (after TXINFO field). + * TXD_W5_PID_SUBTYPE: Driver assigned packet ID index for txdone handler. + * TXD_W5_PID_TYPE: Driver assigned packet ID type for txdone handler. + * WAITING_DMA_DONE_INT: TXD been filled with data + * and waiting for TxDoneISR housekeeping. + */ +#define TXD_W5_FRAME_OFFSET FIELD32(0x000000ff) +#define TXD_W5_PID_SUBTYPE FIELD32(0x00001f00) +#define TXD_W5_PID_TYPE FIELD32(0x0000e000) +#define TXD_W5_TX_POWER FIELD32(0x00ff0000) +#define TXD_W5_WAITING_DMA_DONE_INT FIELD32(0x01000000) + +/* + * the above 24-byte is called TXINFO and will be DMAed to MAC block + * through TXFIFO. MAC block use this TXINFO to control the transmission + * behavior of this frame. + * The following fields are not used by MAC block. + * They are used by DMA block and HOST driver only. + * Once a frame has been DMA to ASIC, all the following fields are useless + * to ASIC. + */ + +/* + * Word6-10: Buffer physical address + */ +#define TXD_W6_BUFFER_PHYSICAL_ADDRESS FIELD32(0xffffffff) +#define TXD_W7_BUFFER_PHYSICAL_ADDRESS FIELD32(0xffffffff) +#define TXD_W8_BUFFER_PHYSICAL_ADDRESS FIELD32(0xffffffff) +#define TXD_W9_BUFFER_PHYSICAL_ADDRESS FIELD32(0xffffffff) +#define TXD_W10_BUFFER_PHYSICAL_ADDRESS FIELD32(0xffffffff) + +/* + * Word11-13: Buffer length + */ +#define TXD_W11_BUFFER_LENGTH0 FIELD32(0x00000fff) +#define TXD_W11_BUFFER_LENGTH1 FIELD32(0x0fff0000) +#define TXD_W12_BUFFER_LENGTH2 FIELD32(0x00000fff) +#define TXD_W12_BUFFER_LENGTH3 FIELD32(0x0fff0000) +#define TXD_W13_BUFFER_LENGTH4 FIELD32(0x00000fff) + +/* + * Word14 + */ +#define TXD_W14_SK_BUFFER FIELD32(0xffffffff) + +/* + * Word15 + */ +#define TXD_W15_NEXT_SK_BUFFER FIELD32(0xffffffff) + +/* + * RX descriptor format for RX Ring. + */ + +/* + * Word0 + * CIPHER_ERROR: 1:ICV error, 2:MIC error, 3:invalid key. + * KEY_INDEX: Decryption key actually used. + */ +#define RXD_W0_OWNER_NIC FIELD32(0x00000001) +#define RXD_W0_DROP FIELD32(0x00000002) +#define RXD_W0_UNICAST_TO_ME FIELD32(0x00000004) +#define RXD_W0_MULTICAST FIELD32(0x00000008) +#define RXD_W0_BROADCAST FIELD32(0x00000010) +#define RXD_W0_MY_BSS FIELD32(0x00000020) +#define RXD_W0_CRC_ERROR FIELD32(0x00000040) +#define RXD_W0_OFDM FIELD32(0x00000080) +#define RXD_W0_CIPHER_ERROR FIELD32(0x00000300) +#define RXD_W0_KEY_INDEX FIELD32(0x0000fc00) +#define RXD_W0_DATABYTE_COUNT FIELD32(0x0fff0000) +#define RXD_W0_CIPHER_ALG FIELD32(0xe0000000) + +/* + * Word1 + * SIGNAL: RX raw data rate reported by BBP. + */ +#define RXD_W1_SIGNAL FIELD32(0x000000ff) +#define RXD_W1_RSSI_AGC FIELD32(0x00001f00) +#define RXD_W1_RSSI_LNA FIELD32(0x00006000) +#define RXD_W1_FRAME_OFFSET FIELD32(0x7f000000) + +/* + * Word2 + * IV: Received IV of originally encrypted. + */ +#define RXD_W2_IV FIELD32(0xffffffff) + +/* + * Word3 + * EIV: Received EIV of originally encrypted. + */ +#define RXD_W3_EIV FIELD32(0xffffffff) + +/* + * Word4 + */ +#define RXD_W4_RESERVED FIELD32(0xffffffff) + +/* + * the above 20-byte is called RXINFO and will be DMAed to MAC RX block + * and passed to the HOST driver. + * The following fields are for DMA block and HOST usage only. + * Can't be touched by ASIC MAC block. + */ + +/* + * Word5 + */ +#define RXD_W5_BUFFER_PHYSICAL_ADDRESS FIELD32(0xffffffff) + +/* + * Word6-15: Reserved + */ +#define RXD_W6_RESERVED FIELD32(0xffffffff) +#define RXD_W7_RESERVED FIELD32(0xffffffff) +#define RXD_W8_RESERVED FIELD32(0xffffffff) +#define RXD_W9_RESERVED FIELD32(0xffffffff) +#define RXD_W10_RESERVED FIELD32(0xffffffff) +#define RXD_W11_RESERVED FIELD32(0xffffffff) +#define RXD_W12_RESERVED FIELD32(0xffffffff) +#define RXD_W13_RESERVED FIELD32(0xffffffff) +#define RXD_W14_RESERVED FIELD32(0xffffffff) +#define RXD_W15_RESERVED FIELD32(0xffffffff) + +/* + * Macro's for converting txpower from EEPROM to dscape value + * and from dscape value to register value. + */ +#define MIN_TXPOWER 0 +#define MAX_TXPOWER 31 +#define DEFAULT_TXPOWER 24 + +#define TXPOWER_FROM_DEV(__txpower) \ +({ \ + ((__txpower) > MAX_TXPOWER) ? \ + DEFAULT_TXPOWER : (__txpower); \ +}) + +#define TXPOWER_TO_DEV(__txpower) \ +({ \ + ((__txpower) <= MIN_TXPOWER) ? MIN_TXPOWER : \ + (((__txpower) >= MAX_TXPOWER) ? MAX_TXPOWER : \ + (__txpower)); \ +}) + +#endif /* RT61PCI_H */ diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c new file mode 100644 index 000000000000..3e42759473c3 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt73usb.c @@ -0,0 +1,2110 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt73usb + Abstract: rt73usb device specific routines. + Supported chipsets: rt2571W & rt2671. + */ + +/* + * Set enviroment defines for rt2x00.h + */ +#define DRV_NAME "rt73usb" + +#include <linux/delay.h> +#include <linux/etherdevice.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/usb.h> + +#include "rt2x00.h" +#include "rt2x00usb.h" +#include "rt73usb.h" + +/* + * Register access. + * All access to the CSR registers will go through the methods + * rt73usb_register_read and rt73usb_register_write. + * BBP and RF register require indirect register access, + * and use the CSR registers BBPCSR and RFCSR to achieve this. + * These indirect registers work with busy bits, + * and we will try maximal REGISTER_BUSY_COUNT times to access + * the register while taking a REGISTER_BUSY_DELAY us delay + * between each attampt. When the busy bit is still set at that time, + * the access attempt is considered to have failed, + * and we will print an error. + */ +static inline void rt73usb_register_read(const struct rt2x00_dev *rt2x00dev, + const unsigned int offset, u32 *value) +{ + __le32 reg; + rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, + USB_VENDOR_REQUEST_IN, offset, + ®, sizeof(u32), REGISTER_TIMEOUT); + *value = le32_to_cpu(reg); +} + +static inline void rt73usb_register_multiread(const struct rt2x00_dev + *rt2x00dev, + const unsigned int offset, + void *value, const u32 length) +{ + int timeout = REGISTER_TIMEOUT * (length / sizeof(u32)); + rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, + USB_VENDOR_REQUEST_IN, offset, + value, length, timeout); +} + +static inline void rt73usb_register_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int offset, u32 value) +{ + __le32 reg = cpu_to_le32(value); + rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, + USB_VENDOR_REQUEST_OUT, offset, + ®, sizeof(u32), REGISTER_TIMEOUT); +} + +static inline void rt73usb_register_multiwrite(const struct rt2x00_dev + *rt2x00dev, + const unsigned int offset, + void *value, const u32 length) +{ + int timeout = REGISTER_TIMEOUT * (length / sizeof(u32)); + rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, + USB_VENDOR_REQUEST_OUT, offset, + value, length, timeout); +} + +static u32 rt73usb_bbp_check(const struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + unsigned int i; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt73usb_register_read(rt2x00dev, PHY_CSR3, ®); + if (!rt2x00_get_field32(reg, PHY_CSR3_BUSY)) + break; + udelay(REGISTER_BUSY_DELAY); + } + + return reg; +} + +static void rt73usb_bbp_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u8 value) +{ + u32 reg; + + /* + * Wait until the BBP becomes ready. + */ + reg = rt73usb_bbp_check(rt2x00dev); + if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) { + ERROR(rt2x00dev, "PHY_CSR3 register busy. Write failed.\n"); + return; + } + + /* + * Write the data into the BBP. + */ + reg = 0; + rt2x00_set_field32(®, PHY_CSR3_VALUE, value); + rt2x00_set_field32(®, PHY_CSR3_REGNUM, word); + rt2x00_set_field32(®, PHY_CSR3_BUSY, 1); + rt2x00_set_field32(®, PHY_CSR3_READ_CONTROL, 0); + + rt73usb_register_write(rt2x00dev, PHY_CSR3, reg); +} + +static void rt73usb_bbp_read(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u8 *value) +{ + u32 reg; + + /* + * Wait until the BBP becomes ready. + */ + reg = rt73usb_bbp_check(rt2x00dev); + if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) { + ERROR(rt2x00dev, "PHY_CSR3 register busy. Read failed.\n"); + return; + } + + /* + * Write the request into the BBP. + */ + reg = 0; + rt2x00_set_field32(®, PHY_CSR3_REGNUM, word); + rt2x00_set_field32(®, PHY_CSR3_BUSY, 1); + rt2x00_set_field32(®, PHY_CSR3_READ_CONTROL, 1); + + rt73usb_register_write(rt2x00dev, PHY_CSR3, reg); + + /* + * Wait until the BBP becomes ready. + */ + reg = rt73usb_bbp_check(rt2x00dev); + if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) { + ERROR(rt2x00dev, "PHY_CSR3 register busy. Read failed.\n"); + *value = 0xff; + return; + } + + *value = rt2x00_get_field32(reg, PHY_CSR3_VALUE); +} + +static void rt73usb_rf_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u32 value) +{ + u32 reg; + unsigned int i; + + if (!word) + return; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt73usb_register_read(rt2x00dev, PHY_CSR4, ®); + if (!rt2x00_get_field32(reg, PHY_CSR4_BUSY)) + goto rf_write; + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "PHY_CSR4 register busy. Write failed.\n"); + return; + +rf_write: + reg = 0; + rt2x00_set_field32(®, PHY_CSR4_VALUE, value); + + /* + * RF5225 and RF2527 contain 21 bits per RF register value, + * all others contain 20 bits. + */ + rt2x00_set_field32(®, PHY_CSR4_NUMBER_OF_BITS, + 20 + !!(rt2x00_rf(&rt2x00dev->chip, RF5225) || + rt2x00_rf(&rt2x00dev->chip, RF2527))); + rt2x00_set_field32(®, PHY_CSR4_IF_SELECT, 0); + rt2x00_set_field32(®, PHY_CSR4_BUSY, 1); + + rt73usb_register_write(rt2x00dev, PHY_CSR4, reg); + rt2x00_rf_write(rt2x00dev, word, value); +} + +#ifdef CONFIG_RT2X00_LIB_DEBUGFS +#define CSR_OFFSET(__word) ( CSR_REG_BASE + ((__word) * sizeof(u32)) ) + +static void rt73usb_read_csr(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u32 *data) +{ + rt73usb_register_read(rt2x00dev, CSR_OFFSET(word), data); +} + +static void rt73usb_write_csr(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u32 data) +{ + rt73usb_register_write(rt2x00dev, CSR_OFFSET(word), data); +} + +static const struct rt2x00debug rt73usb_rt2x00debug = { + .owner = THIS_MODULE, + .csr = { + .read = rt73usb_read_csr, + .write = rt73usb_write_csr, + .word_size = sizeof(u32), + .word_count = CSR_REG_SIZE / sizeof(u32), + }, + .eeprom = { + .read = rt2x00_eeprom_read, + .write = rt2x00_eeprom_write, + .word_size = sizeof(u16), + .word_count = EEPROM_SIZE / sizeof(u16), + }, + .bbp = { + .read = rt73usb_bbp_read, + .write = rt73usb_bbp_write, + .word_size = sizeof(u8), + .word_count = BBP_SIZE / sizeof(u8), + }, + .rf = { + .read = rt2x00_rf_read, + .write = rt73usb_rf_write, + .word_size = sizeof(u32), + .word_count = RF_SIZE / sizeof(u32), + }, +}; +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ + +/* + * Configuration handlers. + */ +static void rt73usb_config_mac_addr(struct rt2x00_dev *rt2x00dev, __le32 *mac) +{ + u32 tmp; + + tmp = le32_to_cpu(mac[1]); + rt2x00_set_field32(&tmp, MAC_CSR3_UNICAST_TO_ME_MASK, 0xff); + mac[1] = cpu_to_le32(tmp); + + rt73usb_register_multiwrite(rt2x00dev, MAC_CSR2, mac, + (2 * sizeof(__le32))); +} + +static void rt73usb_config_bssid(struct rt2x00_dev *rt2x00dev, __le32 *bssid) +{ + u32 tmp; + + tmp = le32_to_cpu(bssid[1]); + rt2x00_set_field32(&tmp, MAC_CSR5_BSS_ID_MASK, 3); + bssid[1] = cpu_to_le32(tmp); + + rt73usb_register_multiwrite(rt2x00dev, MAC_CSR4, bssid, + (2 * sizeof(__le32))); +} + +static void rt73usb_config_type(struct rt2x00_dev *rt2x00dev, const int type, + const int tsf_sync) +{ + u32 reg; + + /* + * Clear current synchronisation setup. + * For the Beacon base registers we only need to clear + * the first byte since that byte contains the VALID and OWNER + * bits which (when set to 0) will invalidate the entire beacon. + */ + rt73usb_register_write(rt2x00dev, TXRX_CSR9, 0); + rt73usb_register_write(rt2x00dev, HW_BEACON_BASE0, 0); + rt73usb_register_write(rt2x00dev, HW_BEACON_BASE1, 0); + rt73usb_register_write(rt2x00dev, HW_BEACON_BASE2, 0); + rt73usb_register_write(rt2x00dev, HW_BEACON_BASE3, 0); + + /* + * Enable synchronisation. + */ + rt73usb_register_read(rt2x00dev, TXRX_CSR9, ®); + rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 1); + rt2x00_set_field32(®, TXRX_CSR9_TBTT_ENABLE, 1); + rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0); + rt2x00_set_field32(®, TXRX_CSR9_TSF_SYNC, tsf_sync); + rt73usb_register_write(rt2x00dev, TXRX_CSR9, reg); +} + +static void rt73usb_config_preamble(struct rt2x00_dev *rt2x00dev, + const int short_preamble, + const int ack_timeout, + const int ack_consume_time) +{ + u32 reg; + + /* + * When in atomic context, reschedule and let rt2x00lib + * call this function again. + */ + if (in_atomic()) { + queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->config_work); + return; + } + + rt73usb_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_RX_ACK_TIMEOUT, ack_timeout); + rt73usb_register_write(rt2x00dev, TXRX_CSR0, reg); + + rt73usb_register_read(rt2x00dev, TXRX_CSR4, ®); + rt2x00_set_field32(®, TXRX_CSR4_AUTORESPOND_PREAMBLE, + !!short_preamble); + rt73usb_register_write(rt2x00dev, TXRX_CSR4, reg); +} + +static void rt73usb_config_phymode(struct rt2x00_dev *rt2x00dev, + const int basic_rate_mask) +{ + rt73usb_register_write(rt2x00dev, TXRX_CSR5, basic_rate_mask); +} + +static void rt73usb_config_channel(struct rt2x00_dev *rt2x00dev, + struct rf_channel *rf, const int txpower) +{ + u8 r3; + u8 r94; + u8 smart; + + rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); + rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset); + + smart = !(rt2x00_rf(&rt2x00dev->chip, RF5225) || + rt2x00_rf(&rt2x00dev->chip, RF2527)); + + rt73usb_bbp_read(rt2x00dev, 3, &r3); + rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart); + rt73usb_bbp_write(rt2x00dev, 3, r3); + + r94 = 6; + if (txpower > MAX_TXPOWER && txpower <= (MAX_TXPOWER + r94)) + r94 += txpower - MAX_TXPOWER; + else if (txpower < MIN_TXPOWER && txpower >= (MIN_TXPOWER - r94)) + r94 += txpower; + rt73usb_bbp_write(rt2x00dev, 94, r94); + + rt73usb_rf_write(rt2x00dev, 1, rf->rf1); + rt73usb_rf_write(rt2x00dev, 2, rf->rf2); + rt73usb_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004); + rt73usb_rf_write(rt2x00dev, 4, rf->rf4); + + rt73usb_rf_write(rt2x00dev, 1, rf->rf1); + rt73usb_rf_write(rt2x00dev, 2, rf->rf2); + rt73usb_rf_write(rt2x00dev, 3, rf->rf3 | 0x00000004); + rt73usb_rf_write(rt2x00dev, 4, rf->rf4); + + rt73usb_rf_write(rt2x00dev, 1, rf->rf1); + rt73usb_rf_write(rt2x00dev, 2, rf->rf2); + rt73usb_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004); + rt73usb_rf_write(rt2x00dev, 4, rf->rf4); + + udelay(10); +} + +static void rt73usb_config_txpower(struct rt2x00_dev *rt2x00dev, + const int txpower) +{ + struct rf_channel rf; + + rt2x00_rf_read(rt2x00dev, 1, &rf.rf1); + rt2x00_rf_read(rt2x00dev, 2, &rf.rf2); + rt2x00_rf_read(rt2x00dev, 3, &rf.rf3); + rt2x00_rf_read(rt2x00dev, 4, &rf.rf4); + + rt73usb_config_channel(rt2x00dev, &rf, txpower); +} + +static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev, + const int antenna_tx, + const int antenna_rx) +{ + u8 r3; + u8 r4; + u8 r77; + + rt73usb_bbp_read(rt2x00dev, 3, &r3); + rt73usb_bbp_read(rt2x00dev, 4, &r4); + rt73usb_bbp_read(rt2x00dev, 77, &r77); + + rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 0); + + switch (antenna_rx) { + case ANTENNA_SW_DIVERSITY: + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 2); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, + !!(rt2x00dev->curr_hwmode != HWMODE_A)); + break; + case ANTENNA_A: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 1); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); + + if (rt2x00dev->curr_hwmode == HWMODE_A) + rt2x00_set_field8(&r77, BBP_R77_PAIR, 0); + else + rt2x00_set_field8(&r77, BBP_R77_PAIR, 3); + break; + case ANTENNA_B: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 1); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); + + if (rt2x00dev->curr_hwmode == HWMODE_A) + rt2x00_set_field8(&r77, BBP_R77_PAIR, 3); + else + rt2x00_set_field8(&r77, BBP_R77_PAIR, 0); + break; + } + + rt73usb_bbp_write(rt2x00dev, 77, r77); + rt73usb_bbp_write(rt2x00dev, 3, r3); + rt73usb_bbp_write(rt2x00dev, 4, r4); +} + +static void rt73usb_config_antenna_2x(struct rt2x00_dev *rt2x00dev, + const int antenna_tx, + const int antenna_rx) +{ + u8 r3; + u8 r4; + u8 r77; + + rt73usb_bbp_read(rt2x00dev, 3, &r3); + rt73usb_bbp_read(rt2x00dev, 4, &r4); + rt73usb_bbp_read(rt2x00dev, 77, &r77); + + rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 0); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, + !test_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags)); + + switch (antenna_rx) { + case ANTENNA_SW_DIVERSITY: + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 2); + break; + case ANTENNA_A: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 1); + rt2x00_set_field8(&r77, BBP_R77_PAIR, 3); + break; + case ANTENNA_B: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 1); + rt2x00_set_field8(&r77, BBP_R77_PAIR, 0); + break; + } + + rt73usb_bbp_write(rt2x00dev, 77, r77); + rt73usb_bbp_write(rt2x00dev, 3, r3); + rt73usb_bbp_write(rt2x00dev, 4, r4); +} + +struct antenna_sel { + u8 word; + /* + * value[0] -> non-LNA + * value[1] -> LNA + */ + u8 value[2]; +}; + +static const struct antenna_sel antenna_sel_a[] = { + { 96, { 0x58, 0x78 } }, + { 104, { 0x38, 0x48 } }, + { 75, { 0xfe, 0x80 } }, + { 86, { 0xfe, 0x80 } }, + { 88, { 0xfe, 0x80 } }, + { 35, { 0x60, 0x60 } }, + { 97, { 0x58, 0x58 } }, + { 98, { 0x58, 0x58 } }, +}; + +static const struct antenna_sel antenna_sel_bg[] = { + { 96, { 0x48, 0x68 } }, + { 104, { 0x2c, 0x3c } }, + { 75, { 0xfe, 0x80 } }, + { 86, { 0xfe, 0x80 } }, + { 88, { 0xfe, 0x80 } }, + { 35, { 0x50, 0x50 } }, + { 97, { 0x48, 0x48 } }, + { 98, { 0x48, 0x48 } }, +}; + +static void rt73usb_config_antenna(struct rt2x00_dev *rt2x00dev, + const int antenna_tx, const int antenna_rx) +{ + const struct antenna_sel *sel; + unsigned int lna; + unsigned int i; + u32 reg; + + rt73usb_register_read(rt2x00dev, PHY_CSR0, ®); + + if (rt2x00dev->curr_hwmode == HWMODE_A) { + sel = antenna_sel_a; + lna = test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags); + + rt2x00_set_field32(®, PHY_CSR0_PA_PE_BG, 0); + rt2x00_set_field32(®, PHY_CSR0_PA_PE_A, 1); + } else { + sel = antenna_sel_bg; + lna = test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags); + + rt2x00_set_field32(®, PHY_CSR0_PA_PE_BG, 1); + rt2x00_set_field32(®, PHY_CSR0_PA_PE_A, 0); + } + + for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++) + rt73usb_bbp_write(rt2x00dev, sel[i].word, sel[i].value[lna]); + + rt73usb_register_write(rt2x00dev, PHY_CSR0, reg); + + if (rt2x00_rf(&rt2x00dev->chip, RF5226) || + rt2x00_rf(&rt2x00dev->chip, RF5225)) + rt73usb_config_antenna_5x(rt2x00dev, antenna_tx, antenna_rx); + else if (rt2x00_rf(&rt2x00dev->chip, RF2528) || + rt2x00_rf(&rt2x00dev->chip, RF2527)) + rt73usb_config_antenna_2x(rt2x00dev, antenna_tx, antenna_rx); +} + +static void rt73usb_config_duration(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf) +{ + u32 reg; + + rt73usb_register_read(rt2x00dev, MAC_CSR9, ®); + rt2x00_set_field32(®, MAC_CSR9_SLOT_TIME, libconf->slot_time); + rt73usb_register_write(rt2x00dev, MAC_CSR9, reg); + + rt73usb_register_read(rt2x00dev, MAC_CSR8, ®); + rt2x00_set_field32(®, MAC_CSR8_SIFS, libconf->sifs); + rt2x00_set_field32(®, MAC_CSR8_SIFS_AFTER_RX_OFDM, 3); + rt2x00_set_field32(®, MAC_CSR8_EIFS, libconf->eifs); + rt73usb_register_write(rt2x00dev, MAC_CSR8, reg); + + rt73usb_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER); + rt73usb_register_write(rt2x00dev, TXRX_CSR0, reg); + + rt73usb_register_read(rt2x00dev, TXRX_CSR4, ®); + rt2x00_set_field32(®, TXRX_CSR4_AUTORESPOND_ENABLE, 1); + rt73usb_register_write(rt2x00dev, TXRX_CSR4, reg); + + rt73usb_register_read(rt2x00dev, TXRX_CSR9, ®); + rt2x00_set_field32(®, TXRX_CSR9_BEACON_INTERVAL, + libconf->conf->beacon_int * 16); + rt73usb_register_write(rt2x00dev, TXRX_CSR9, reg); +} + +static void rt73usb_config(struct rt2x00_dev *rt2x00dev, + const unsigned int flags, + struct rt2x00lib_conf *libconf) +{ + if (flags & CONFIG_UPDATE_PHYMODE) + rt73usb_config_phymode(rt2x00dev, libconf->basic_rates); + if (flags & CONFIG_UPDATE_CHANNEL) + rt73usb_config_channel(rt2x00dev, &libconf->rf, + libconf->conf->power_level); + if ((flags & CONFIG_UPDATE_TXPOWER) && !(flags & CONFIG_UPDATE_CHANNEL)) + rt73usb_config_txpower(rt2x00dev, libconf->conf->power_level); + if (flags & CONFIG_UPDATE_ANTENNA) + rt73usb_config_antenna(rt2x00dev, libconf->conf->antenna_sel_tx, + libconf->conf->antenna_sel_rx); + if (flags & (CONFIG_UPDATE_SLOT_TIME | CONFIG_UPDATE_BEACON_INT)) + rt73usb_config_duration(rt2x00dev, libconf); +} + +/* + * LED functions. + */ +static void rt73usb_enable_led(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt73usb_register_read(rt2x00dev, MAC_CSR14, ®); + rt2x00_set_field32(®, MAC_CSR14_ON_PERIOD, 70); + rt2x00_set_field32(®, MAC_CSR14_OFF_PERIOD, 30); + rt73usb_register_write(rt2x00dev, MAC_CSR14, reg); + + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_RADIO_STATUS, 1); + if (rt2x00dev->rx_status.phymode == MODE_IEEE80211A) + rt2x00_set_field16(&rt2x00dev->led_reg, + MCU_LEDCS_LINK_A_STATUS, 1); + else + rt2x00_set_field16(&rt2x00dev->led_reg, + MCU_LEDCS_LINK_BG_STATUS, 1); + + rt2x00usb_vendor_request_sw(rt2x00dev, USB_LED_CONTROL, 0x0000, + rt2x00dev->led_reg, REGISTER_TIMEOUT); +} + +static void rt73usb_disable_led(struct rt2x00_dev *rt2x00dev) +{ + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_RADIO_STATUS, 0); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_LINK_BG_STATUS, 0); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_LINK_A_STATUS, 0); + + rt2x00usb_vendor_request_sw(rt2x00dev, USB_LED_CONTROL, 0x0000, + rt2x00dev->led_reg, REGISTER_TIMEOUT); +} + +static void rt73usb_activity_led(struct rt2x00_dev *rt2x00dev, int rssi) +{ + u32 led; + + if (rt2x00dev->led_mode != LED_MODE_SIGNAL_STRENGTH) + return; + + /* + * Led handling requires a positive value for the rssi, + * to do that correctly we need to add the correction. + */ + rssi += rt2x00dev->rssi_offset; + + if (rssi <= 30) + led = 0; + else if (rssi <= 39) + led = 1; + else if (rssi <= 49) + led = 2; + else if (rssi <= 53) + led = 3; + else if (rssi <= 63) + led = 4; + else + led = 5; + + rt2x00usb_vendor_request_sw(rt2x00dev, USB_LED_CONTROL, led, + rt2x00dev->led_reg, REGISTER_TIMEOUT); +} + +/* + * Link tuning + */ +static void rt73usb_link_stats(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + /* + * Update FCS error count from register. + */ + rt73usb_register_read(rt2x00dev, STA_CSR0, ®); + rt2x00dev->link.rx_failed = rt2x00_get_field32(reg, STA_CSR0_FCS_ERROR); + + /* + * Update False CCA count from register. + */ + rt73usb_register_read(rt2x00dev, STA_CSR1, ®); + reg = rt2x00_get_field32(reg, STA_CSR1_FALSE_CCA_ERROR); + rt2x00dev->link.false_cca = + rt2x00_get_field32(reg, STA_CSR1_FALSE_CCA_ERROR); +} + +static void rt73usb_reset_tuner(struct rt2x00_dev *rt2x00dev) +{ + rt73usb_bbp_write(rt2x00dev, 17, 0x20); + rt2x00dev->link.vgc_level = 0x20; +} + +static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev) +{ + int rssi = rt2x00_get_link_rssi(&rt2x00dev->link); + u8 r17; + u8 up_bound; + u8 low_bound; + + /* + * Update Led strength + */ + rt73usb_activity_led(rt2x00dev, rssi); + + rt73usb_bbp_read(rt2x00dev, 17, &r17); + + /* + * Determine r17 bounds. + */ + if (rt2x00dev->rx_status.phymode == MODE_IEEE80211A) { + low_bound = 0x28; + up_bound = 0x48; + + if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) { + low_bound += 0x10; + up_bound += 0x10; + } + } else { + if (rssi > -82) { + low_bound = 0x1c; + up_bound = 0x40; + } else if (rssi > -84) { + low_bound = 0x1c; + up_bound = 0x20; + } else { + low_bound = 0x1c; + up_bound = 0x1c; + } + + if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) { + low_bound += 0x14; + up_bound += 0x10; + } + } + + /* + * Special big-R17 for very short distance + */ + if (rssi > -35) { + if (r17 != 0x60) + rt73usb_bbp_write(rt2x00dev, 17, 0x60); + return; + } + + /* + * Special big-R17 for short distance + */ + if (rssi >= -58) { + if (r17 != up_bound) + rt73usb_bbp_write(rt2x00dev, 17, up_bound); + return; + } + + /* + * Special big-R17 for middle-short distance + */ + if (rssi >= -66) { + low_bound += 0x10; + if (r17 != low_bound) + rt73usb_bbp_write(rt2x00dev, 17, low_bound); + return; + } + + /* + * Special mid-R17 for middle distance + */ + if (rssi >= -74) { + if (r17 != (low_bound + 0x10)) + rt73usb_bbp_write(rt2x00dev, 17, low_bound + 0x08); + return; + } + + /* + * Special case: Change up_bound based on the rssi. + * Lower up_bound when rssi is weaker then -74 dBm. + */ + up_bound -= 2 * (-74 - rssi); + if (low_bound > up_bound) + up_bound = low_bound; + + if (r17 > up_bound) { + rt73usb_bbp_write(rt2x00dev, 17, up_bound); + return; + } + + /* + * r17 does not yet exceed upper limit, continue and base + * the r17 tuning on the false CCA count. + */ + if (rt2x00dev->link.false_cca > 512 && r17 < up_bound) { + r17 += 4; + if (r17 > up_bound) + r17 = up_bound; + rt73usb_bbp_write(rt2x00dev, 17, r17); + } else if (rt2x00dev->link.false_cca < 100 && r17 > low_bound) { + r17 -= 4; + if (r17 < low_bound) + r17 = low_bound; + rt73usb_bbp_write(rt2x00dev, 17, r17); + } +} + +/* + * Firmware name function. + */ +static char *rt73usb_get_firmware_name(struct rt2x00_dev *rt2x00dev) +{ + return FIRMWARE_RT2571; +} + +/* + * Initialization functions. + */ +static int rt73usb_load_firmware(struct rt2x00_dev *rt2x00dev, void *data, + const size_t len) +{ + unsigned int i; + int status; + u32 reg; + char *ptr = data; + char *cache; + int buflen; + int timeout; + + /* + * Wait for stable hardware. + */ + for (i = 0; i < 100; i++) { + rt73usb_register_read(rt2x00dev, MAC_CSR0, ®); + if (reg) + break; + msleep(1); + } + + if (!reg) { + ERROR(rt2x00dev, "Unstable hardware.\n"); + return -EBUSY; + } + + /* + * Write firmware to device. + * We setup a seperate cache for this action, + * since we are going to write larger chunks of data + * then normally used cache size. + */ + cache = kmalloc(CSR_CACHE_SIZE_FIRMWARE, GFP_KERNEL); + if (!cache) { + ERROR(rt2x00dev, "Failed to allocate firmware cache.\n"); + return -ENOMEM; + } + + for (i = 0; i < len; i += CSR_CACHE_SIZE_FIRMWARE) { + buflen = min_t(int, len - i, CSR_CACHE_SIZE_FIRMWARE); + timeout = REGISTER_TIMEOUT * (buflen / sizeof(u32)); + + memcpy(cache, ptr, buflen); + + rt2x00usb_vendor_request(rt2x00dev, USB_MULTI_WRITE, + USB_VENDOR_REQUEST_OUT, + FIRMWARE_IMAGE_BASE + i, 0x0000, + cache, buflen, timeout); + + ptr += buflen; + } + + kfree(cache); + + /* + * Send firmware request to device to load firmware, + * we need to specify a long timeout time. + */ + status = rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, + 0x0000, USB_MODE_FIRMWARE, + REGISTER_TIMEOUT_FIRMWARE); + if (status < 0) { + ERROR(rt2x00dev, "Failed to write Firmware to device.\n"); + return status; + } + + rt73usb_disable_led(rt2x00dev); + + return 0; +} + +static int rt73usb_init_registers(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt73usb_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_AUTO_TX_SEQ, 1); + rt2x00_set_field32(®, TXRX_CSR0_DISABLE_RX, 0); + rt2x00_set_field32(®, TXRX_CSR0_TX_WITHOUT_WAITING, 0); + rt73usb_register_write(rt2x00dev, TXRX_CSR0, reg); + + rt73usb_register_read(rt2x00dev, TXRX_CSR1, ®); + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID0, 47); /* CCK Signal */ + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID1, 30); /* Rssi */ + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID2, 42); /* OFDM Rate */ + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID2_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID3, 30); /* Rssi */ + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID3_VALID, 1); + rt73usb_register_write(rt2x00dev, TXRX_CSR1, reg); + + /* + * CCK TXD BBP registers + */ + rt73usb_register_read(rt2x00dev, TXRX_CSR2, ®); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID0, 13); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID1, 12); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID2, 11); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID2_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID3, 10); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID3_VALID, 1); + rt73usb_register_write(rt2x00dev, TXRX_CSR2, reg); + + /* + * OFDM TXD BBP registers + */ + rt73usb_register_read(rt2x00dev, TXRX_CSR3, ®); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID0, 7); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID1, 6); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID2, 5); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID2_VALID, 1); + rt73usb_register_write(rt2x00dev, TXRX_CSR3, reg); + + rt73usb_register_read(rt2x00dev, TXRX_CSR7, ®); + rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_6MBS, 59); + rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_9MBS, 53); + rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_12MBS, 49); + rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_18MBS, 46); + rt73usb_register_write(rt2x00dev, TXRX_CSR7, reg); + + rt73usb_register_read(rt2x00dev, TXRX_CSR8, ®); + rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_24MBS, 44); + rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_36MBS, 42); + rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_48MBS, 42); + rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_54MBS, 42); + rt73usb_register_write(rt2x00dev, TXRX_CSR8, reg); + + rt73usb_register_write(rt2x00dev, TXRX_CSR15, 0x0000000f); + + rt73usb_register_read(rt2x00dev, MAC_CSR6, ®); + rt2x00_set_field32(®, MAC_CSR6_MAX_FRAME_UNIT, 0xfff); + rt73usb_register_write(rt2x00dev, MAC_CSR6, reg); + + rt73usb_register_write(rt2x00dev, MAC_CSR10, 0x00000718); + + if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) + return -EBUSY; + + rt73usb_register_write(rt2x00dev, MAC_CSR13, 0x00007f00); + + /* + * Invalidate all Shared Keys (SEC_CSR0), + * and clear the Shared key Cipher algorithms (SEC_CSR1 & SEC_CSR5) + */ + rt73usb_register_write(rt2x00dev, SEC_CSR0, 0x00000000); + rt73usb_register_write(rt2x00dev, SEC_CSR1, 0x00000000); + rt73usb_register_write(rt2x00dev, SEC_CSR5, 0x00000000); + + reg = 0x000023b0; + if (rt2x00_rf(&rt2x00dev->chip, RF5225) || + rt2x00_rf(&rt2x00dev->chip, RF2527)) + rt2x00_set_field32(®, PHY_CSR1_RF_RPI, 1); + rt73usb_register_write(rt2x00dev, PHY_CSR1, reg); + + rt73usb_register_write(rt2x00dev, PHY_CSR5, 0x00040a06); + rt73usb_register_write(rt2x00dev, PHY_CSR6, 0x00080606); + rt73usb_register_write(rt2x00dev, PHY_CSR7, 0x00000408); + + rt73usb_register_read(rt2x00dev, AC_TXOP_CSR0, ®); + rt2x00_set_field32(®, AC_TXOP_CSR0_AC0_TX_OP, 0); + rt2x00_set_field32(®, AC_TXOP_CSR0_AC1_TX_OP, 0); + rt73usb_register_write(rt2x00dev, AC_TXOP_CSR0, reg); + + rt73usb_register_read(rt2x00dev, AC_TXOP_CSR1, ®); + rt2x00_set_field32(®, AC_TXOP_CSR1_AC2_TX_OP, 192); + rt2x00_set_field32(®, AC_TXOP_CSR1_AC3_TX_OP, 48); + rt73usb_register_write(rt2x00dev, AC_TXOP_CSR1, reg); + + rt73usb_register_read(rt2x00dev, MAC_CSR9, ®); + rt2x00_set_field32(®, MAC_CSR9_CW_SELECT, 0); + rt73usb_register_write(rt2x00dev, MAC_CSR9, reg); + + /* + * We must clear the error counters. + * These registers are cleared on read, + * so we may pass a useless variable to store the value. + */ + rt73usb_register_read(rt2x00dev, STA_CSR0, ®); + rt73usb_register_read(rt2x00dev, STA_CSR1, ®); + rt73usb_register_read(rt2x00dev, STA_CSR2, ®); + + /* + * Reset MAC and BBP registers. + */ + rt73usb_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field32(®, MAC_CSR1_SOFT_RESET, 1); + rt2x00_set_field32(®, MAC_CSR1_BBP_RESET, 1); + rt73usb_register_write(rt2x00dev, MAC_CSR1, reg); + + rt73usb_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field32(®, MAC_CSR1_SOFT_RESET, 0); + rt2x00_set_field32(®, MAC_CSR1_BBP_RESET, 0); + rt73usb_register_write(rt2x00dev, MAC_CSR1, reg); + + rt73usb_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field32(®, MAC_CSR1_HOST_READY, 1); + rt73usb_register_write(rt2x00dev, MAC_CSR1, reg); + + return 0; +} + +static int rt73usb_init_bbp(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + u16 eeprom; + u8 reg_id; + u8 value; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt73usb_bbp_read(rt2x00dev, 0, &value); + if ((value != 0xff) && (value != 0x00)) + goto continue_csr_init; + NOTICE(rt2x00dev, "Waiting for BBP register.\n"); + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "BBP register access failed, aborting.\n"); + return -EACCES; + +continue_csr_init: + rt73usb_bbp_write(rt2x00dev, 3, 0x80); + rt73usb_bbp_write(rt2x00dev, 15, 0x30); + rt73usb_bbp_write(rt2x00dev, 21, 0xc8); + rt73usb_bbp_write(rt2x00dev, 22, 0x38); + rt73usb_bbp_write(rt2x00dev, 23, 0x06); + rt73usb_bbp_write(rt2x00dev, 24, 0xfe); + rt73usb_bbp_write(rt2x00dev, 25, 0x0a); + rt73usb_bbp_write(rt2x00dev, 26, 0x0d); + rt73usb_bbp_write(rt2x00dev, 32, 0x0b); + rt73usb_bbp_write(rt2x00dev, 34, 0x12); + rt73usb_bbp_write(rt2x00dev, 37, 0x07); + rt73usb_bbp_write(rt2x00dev, 39, 0xf8); + rt73usb_bbp_write(rt2x00dev, 41, 0x60); + rt73usb_bbp_write(rt2x00dev, 53, 0x10); + rt73usb_bbp_write(rt2x00dev, 54, 0x18); + rt73usb_bbp_write(rt2x00dev, 60, 0x10); + rt73usb_bbp_write(rt2x00dev, 61, 0x04); + rt73usb_bbp_write(rt2x00dev, 62, 0x04); + rt73usb_bbp_write(rt2x00dev, 75, 0xfe); + rt73usb_bbp_write(rt2x00dev, 86, 0xfe); + rt73usb_bbp_write(rt2x00dev, 88, 0xfe); + rt73usb_bbp_write(rt2x00dev, 90, 0x0f); + rt73usb_bbp_write(rt2x00dev, 99, 0x00); + rt73usb_bbp_write(rt2x00dev, 102, 0x16); + rt73usb_bbp_write(rt2x00dev, 107, 0x04); + + DEBUG(rt2x00dev, "Start initialization from EEPROM...\n"); + for (i = 0; i < EEPROM_BBP_SIZE; i++) { + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); + + if (eeprom != 0xffff && eeprom != 0x0000) { + reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); + value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); + DEBUG(rt2x00dev, "BBP: 0x%02x, value: 0x%02x.\n", + reg_id, value); + rt73usb_bbp_write(rt2x00dev, reg_id, value); + } + } + DEBUG(rt2x00dev, "...End initialization from EEPROM.\n"); + + return 0; +} + +/* + * Device state switch handlers. + */ +static void rt73usb_toggle_rx(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + u32 reg; + + rt73usb_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_DISABLE_RX, + state == STATE_RADIO_RX_OFF); + rt73usb_register_write(rt2x00dev, TXRX_CSR0, reg); +} + +static int rt73usb_enable_radio(struct rt2x00_dev *rt2x00dev) +{ + /* + * Initialize all registers. + */ + if (rt73usb_init_registers(rt2x00dev) || + rt73usb_init_bbp(rt2x00dev)) { + ERROR(rt2x00dev, "Register initialization failed.\n"); + return -EIO; + } + + rt2x00usb_enable_radio(rt2x00dev); + + /* + * Enable LED + */ + rt73usb_enable_led(rt2x00dev); + + return 0; +} + +static void rt73usb_disable_radio(struct rt2x00_dev *rt2x00dev) +{ + /* + * Disable LED + */ + rt73usb_disable_led(rt2x00dev); + + rt73usb_register_write(rt2x00dev, MAC_CSR10, 0x00001818); + + /* + * Disable synchronisation. + */ + rt73usb_register_write(rt2x00dev, TXRX_CSR9, 0); + + rt2x00usb_disable_radio(rt2x00dev); +} + +static int rt73usb_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) +{ + u32 reg; + unsigned int i; + char put_to_sleep; + char current_state; + + put_to_sleep = (state != STATE_AWAKE); + + rt73usb_register_read(rt2x00dev, MAC_CSR12, ®); + rt2x00_set_field32(®, MAC_CSR12_FORCE_WAKEUP, !put_to_sleep); + rt2x00_set_field32(®, MAC_CSR12_PUT_TO_SLEEP, put_to_sleep); + rt73usb_register_write(rt2x00dev, MAC_CSR12, reg); + + /* + * Device is not guaranteed to be in the requested state yet. + * We must wait until the register indicates that the + * device has entered the correct state. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt73usb_register_read(rt2x00dev, MAC_CSR12, ®); + current_state = + rt2x00_get_field32(reg, MAC_CSR12_BBP_CURRENT_STATE); + if (current_state == !put_to_sleep) + return 0; + msleep(10); + } + + NOTICE(rt2x00dev, "Device failed to enter state %d, " + "current device state %d.\n", !put_to_sleep, current_state); + + return -EBUSY; +} + +static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + int retval = 0; + + switch (state) { + case STATE_RADIO_ON: + retval = rt73usb_enable_radio(rt2x00dev); + break; + case STATE_RADIO_OFF: + rt73usb_disable_radio(rt2x00dev); + break; + case STATE_RADIO_RX_ON: + case STATE_RADIO_RX_OFF: + rt73usb_toggle_rx(rt2x00dev, state); + break; + case STATE_DEEP_SLEEP: + case STATE_SLEEP: + case STATE_STANDBY: + case STATE_AWAKE: + retval = rt73usb_set_state(rt2x00dev, state); + break; + default: + retval = -ENOTSUPP; + break; + } + + return retval; +} + +/* + * TX descriptor initialization + */ +static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, + struct data_desc *txd, + struct txdata_entry_desc *desc, + struct ieee80211_hdr *ieee80211hdr, + unsigned int length, + struct ieee80211_tx_control *control) +{ + u32 word; + + /* + * Start writing the descriptor words. + */ + rt2x00_desc_read(txd, 1, &word); + rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, desc->queue); + rt2x00_set_field32(&word, TXD_W1_AIFSN, desc->aifs); + rt2x00_set_field32(&word, TXD_W1_CWMIN, desc->cw_min); + rt2x00_set_field32(&word, TXD_W1_CWMAX, desc->cw_max); + rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER); + rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, 1); + rt2x00_desc_write(txd, 1, word); + + rt2x00_desc_read(txd, 2, &word); + rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, desc->signal); + rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, desc->service); + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, desc->length_low); + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, desc->length_high); + rt2x00_desc_write(txd, 2, word); + + rt2x00_desc_read(txd, 5, &word); + rt2x00_set_field32(&word, TXD_W5_TX_POWER, + TXPOWER_TO_DEV(control->power_level)); + rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1); + rt2x00_desc_write(txd, 5, word); + + rt2x00_desc_read(txd, 0, &word); + rt2x00_set_field32(&word, TXD_W0_BURST, + test_bit(ENTRY_TXD_BURST, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_VALID, 1); + rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, + test_bit(ENTRY_TXD_MORE_FRAG, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_ACK, + !(control->flags & IEEE80211_TXCTL_NO_ACK)); + rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, + test_bit(ENTRY_TXD_REQ_TIMESTAMP, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_OFDM, + test_bit(ENTRY_TXD_OFDM_RATE, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_IFS, desc->ifs); + rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, + !!(control->flags & + IEEE80211_TXCTL_LONG_RETRY_LIMIT)); + rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, 0); + rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, length); + rt2x00_set_field32(&word, TXD_W0_BURST2, + test_bit(ENTRY_TXD_BURST, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE); + rt2x00_desc_write(txd, 0, word); +} + +static int rt73usb_get_tx_data_len(struct rt2x00_dev *rt2x00dev, + int maxpacket, struct sk_buff *skb) +{ + int length; + + /* + * The length _must_ be a multiple of 4, + * but it must _not_ be a multiple of the USB packet size. + */ + length = roundup(skb->len, 4); + length += (4 * !(length % maxpacket)); + + return length; +} + +/* + * TX data initialization + */ +static void rt73usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev, + unsigned int queue) +{ + u32 reg; + + if (queue != IEEE80211_TX_QUEUE_BEACON) + return; + + /* + * For Wi-Fi faily generated beacons between participating stations. + * Set TBTT phase adaptive adjustment step to 8us (default 16us) + */ + rt73usb_register_write(rt2x00dev, TXRX_CSR10, 0x00001008); + + rt73usb_register_read(rt2x00dev, TXRX_CSR9, ®); + if (!rt2x00_get_field32(reg, TXRX_CSR9_BEACON_GEN)) { + rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 1); + rt73usb_register_write(rt2x00dev, TXRX_CSR9, reg); + } +} + +/* + * RX control handlers + */ +static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1) +{ + u16 eeprom; + u8 offset; + u8 lna; + + lna = rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_LNA); + switch (lna) { + case 3: + offset = 90; + break; + case 2: + offset = 74; + break; + case 1: + offset = 64; + break; + default: + return 0; + } + + if (rt2x00dev->rx_status.phymode == MODE_IEEE80211A) { + if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) { + if (lna == 3 || lna == 2) + offset += 10; + } else { + if (lna == 3) + offset += 6; + else if (lna == 2) + offset += 8; + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom); + offset -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1); + } else { + if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) + offset += 14; + + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom); + offset -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1); + } + + return rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_AGC) * 2 - offset; +} + +static void rt73usb_fill_rxdone(struct data_entry *entry, + struct rxdata_entry_desc *desc) +{ + struct data_desc *rxd = (struct data_desc *)entry->skb->data; + u32 word0; + u32 word1; + + rt2x00_desc_read(rxd, 0, &word0); + rt2x00_desc_read(rxd, 1, &word1); + + desc->flags = 0; + if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) + desc->flags |= RX_FLAG_FAILED_FCS_CRC; + + /* + * Obtain the status about this packet. + */ + desc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL); + desc->rssi = rt73usb_agc_to_rssi(entry->ring->rt2x00dev, word1); + desc->ofdm = rt2x00_get_field32(word0, RXD_W0_OFDM); + desc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); + + /* + * Pull the skb to clear the descriptor area. + */ + skb_pull(entry->skb, entry->ring->desc_size); + + return; +} + +/* + * Device probe functions. + */ +static int rt73usb_validate_eeprom(struct rt2x00_dev *rt2x00dev) +{ + u16 word; + u8 *mac; + s8 value; + + rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, EEPROM_SIZE); + + /* + * Start validation of the data that has been read. + */ + mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); + if (!is_valid_ether_addr(mac)) { + DECLARE_MAC_BUF(macbuf); + + random_ether_addr(mac); + EEPROM(rt2x00dev, "MAC: %s\n", print_mac(macbuf, mac)); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_ANTENNA_NUM, 2); + rt2x00_set_field16(&word, EEPROM_ANTENNA_TX_DEFAULT, 2); + rt2x00_set_field16(&word, EEPROM_ANTENNA_RX_DEFAULT, 2); + rt2x00_set_field16(&word, EEPROM_ANTENNA_FRAME_TYPE, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_DYN_TXAGC, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF5226); + rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); + EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); + EEPROM(rt2x00dev, "NIC: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_LED_POLARITY_RDY_G, 0); + rt2x00_set_field16(&word, EEPROM_LED_POLARITY_RDY_A, 0); + rt2x00_set_field16(&word, EEPROM_LED_POLARITY_ACT, 0); + rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_0, 0); + rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_1, 0); + rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_2, 0); + rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_3, 0); + rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_4, 0); + rt2x00_set_field16(&word, EEPROM_LED_LED_MODE, + LED_MODE_DEFAULT); + rt2x00_eeprom_write(rt2x00dev, EEPROM_LED, word); + EEPROM(rt2x00dev, "Led: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0); + rt2x00_set_field16(&word, EEPROM_FREQ_SEQ, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word); + EEPROM(rt2x00dev, "Freq: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_1, 0); + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_2, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_BG, word); + EEPROM(rt2x00dev, "RSSI OFFSET BG: 0x%04x\n", word); + } else { + value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_BG_1); + if (value < -10 || value > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_1, 0); + value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_BG_2); + if (value < -10 || value > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_2, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_BG, word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0); + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word); + EEPROM(rt2x00dev, "RSSI OFFSET BG: 0x%04x\n", word); + } else { + value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_1); + if (value < -10 || value > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0); + value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_2); + if (value < -10 || value > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word); + } + + return 0; +} + +static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + u16 value; + u16 eeprom; + + /* + * Read EEPROM word for configuration. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom); + + /* + * Identify RF chipset. + */ + value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); + rt73usb_register_read(rt2x00dev, MAC_CSR0, ®); + rt2x00_set_chip(rt2x00dev, RT2571, value, reg); + + if (!rt2x00_rev(&rt2x00dev->chip, 0x25730)) { + ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); + return -ENODEV; + } + + if (!rt2x00_rf(&rt2x00dev->chip, RF5226) && + !rt2x00_rf(&rt2x00dev->chip, RF2528) && + !rt2x00_rf(&rt2x00dev->chip, RF5225) && + !rt2x00_rf(&rt2x00dev->chip, RF2527)) { + ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); + return -ENODEV; + } + + /* + * Identify default antenna configuration. + */ + rt2x00dev->hw->conf.antenna_sel_tx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT); + rt2x00dev->hw->conf.antenna_sel_rx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT); + + /* + * Read the Frame type. + */ + if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_FRAME_TYPE)) + __set_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags); + + /* + * Read frequency offset. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom); + rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET); + + /* + * Read external LNA informations. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom); + + if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA)) { + __set_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags); + __set_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags); + } + + /* + * Store led settings, for correct led behaviour. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &eeprom); + + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_LED_MODE, + rt2x00dev->led_mode); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_0, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_0)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_1, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_1)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_2, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_2)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_3, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_3)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_4, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_4)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_ACT, + rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_ACT)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_READY_BG, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_RDY_G)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_READY_A, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_RDY_A)); + + return 0; +} + +/* + * RF value list for RF2528 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2528[] = { + { 1, 0x00002c0c, 0x00000786, 0x00068255, 0x000fea0b }, + { 2, 0x00002c0c, 0x00000786, 0x00068255, 0x000fea1f }, + { 3, 0x00002c0c, 0x0000078a, 0x00068255, 0x000fea0b }, + { 4, 0x00002c0c, 0x0000078a, 0x00068255, 0x000fea1f }, + { 5, 0x00002c0c, 0x0000078e, 0x00068255, 0x000fea0b }, + { 6, 0x00002c0c, 0x0000078e, 0x00068255, 0x000fea1f }, + { 7, 0x00002c0c, 0x00000792, 0x00068255, 0x000fea0b }, + { 8, 0x00002c0c, 0x00000792, 0x00068255, 0x000fea1f }, + { 9, 0x00002c0c, 0x00000796, 0x00068255, 0x000fea0b }, + { 10, 0x00002c0c, 0x00000796, 0x00068255, 0x000fea1f }, + { 11, 0x00002c0c, 0x0000079a, 0x00068255, 0x000fea0b }, + { 12, 0x00002c0c, 0x0000079a, 0x00068255, 0x000fea1f }, + { 13, 0x00002c0c, 0x0000079e, 0x00068255, 0x000fea0b }, + { 14, 0x00002c0c, 0x000007a2, 0x00068255, 0x000fea13 }, +}; + +/* + * RF value list for RF5226 + * Supports: 2.4 GHz & 5.2 GHz + */ +static const struct rf_channel rf_vals_5226[] = { + { 1, 0x00002c0c, 0x00000786, 0x00068255, 0x000fea0b }, + { 2, 0x00002c0c, 0x00000786, 0x00068255, 0x000fea1f }, + { 3, 0x00002c0c, 0x0000078a, 0x00068255, 0x000fea0b }, + { 4, 0x00002c0c, 0x0000078a, 0x00068255, 0x000fea1f }, + { 5, 0x00002c0c, 0x0000078e, 0x00068255, 0x000fea0b }, + { 6, 0x00002c0c, 0x0000078e, 0x00068255, 0x000fea1f }, + { 7, 0x00002c0c, 0x00000792, 0x00068255, 0x000fea0b }, + { 8, 0x00002c0c, 0x00000792, 0x00068255, 0x000fea1f }, + { 9, 0x00002c0c, 0x00000796, 0x00068255, 0x000fea0b }, + { 10, 0x00002c0c, 0x00000796, 0x00068255, 0x000fea1f }, + { 11, 0x00002c0c, 0x0000079a, 0x00068255, 0x000fea0b }, + { 12, 0x00002c0c, 0x0000079a, 0x00068255, 0x000fea1f }, + { 13, 0x00002c0c, 0x0000079e, 0x00068255, 0x000fea0b }, + { 14, 0x00002c0c, 0x000007a2, 0x00068255, 0x000fea13 }, + + /* 802.11 UNI / HyperLan 2 */ + { 36, 0x00002c0c, 0x0000099a, 0x00098255, 0x000fea23 }, + { 40, 0x00002c0c, 0x000009a2, 0x00098255, 0x000fea03 }, + { 44, 0x00002c0c, 0x000009a6, 0x00098255, 0x000fea0b }, + { 48, 0x00002c0c, 0x000009aa, 0x00098255, 0x000fea13 }, + { 52, 0x00002c0c, 0x000009ae, 0x00098255, 0x000fea1b }, + { 56, 0x00002c0c, 0x000009b2, 0x00098255, 0x000fea23 }, + { 60, 0x00002c0c, 0x000009ba, 0x00098255, 0x000fea03 }, + { 64, 0x00002c0c, 0x000009be, 0x00098255, 0x000fea0b }, + + /* 802.11 HyperLan 2 */ + { 100, 0x00002c0c, 0x00000a2a, 0x000b8255, 0x000fea03 }, + { 104, 0x00002c0c, 0x00000a2e, 0x000b8255, 0x000fea0b }, + { 108, 0x00002c0c, 0x00000a32, 0x000b8255, 0x000fea13 }, + { 112, 0x00002c0c, 0x00000a36, 0x000b8255, 0x000fea1b }, + { 116, 0x00002c0c, 0x00000a3a, 0x000b8255, 0x000fea23 }, + { 120, 0x00002c0c, 0x00000a82, 0x000b8255, 0x000fea03 }, + { 124, 0x00002c0c, 0x00000a86, 0x000b8255, 0x000fea0b }, + { 128, 0x00002c0c, 0x00000a8a, 0x000b8255, 0x000fea13 }, + { 132, 0x00002c0c, 0x00000a8e, 0x000b8255, 0x000fea1b }, + { 136, 0x00002c0c, 0x00000a92, 0x000b8255, 0x000fea23 }, + + /* 802.11 UNII */ + { 140, 0x00002c0c, 0x00000a9a, 0x000b8255, 0x000fea03 }, + { 149, 0x00002c0c, 0x00000aa2, 0x000b8255, 0x000fea1f }, + { 153, 0x00002c0c, 0x00000aa6, 0x000b8255, 0x000fea27 }, + { 157, 0x00002c0c, 0x00000aae, 0x000b8255, 0x000fea07 }, + { 161, 0x00002c0c, 0x00000ab2, 0x000b8255, 0x000fea0f }, + { 165, 0x00002c0c, 0x00000ab6, 0x000b8255, 0x000fea17 }, + + /* MMAC(Japan)J52 ch 34,38,42,46 */ + { 34, 0x00002c0c, 0x0008099a, 0x000da255, 0x000d3a0b }, + { 38, 0x00002c0c, 0x0008099e, 0x000da255, 0x000d3a13 }, + { 42, 0x00002c0c, 0x000809a2, 0x000da255, 0x000d3a1b }, + { 46, 0x00002c0c, 0x000809a6, 0x000da255, 0x000d3a23 }, +}; + +/* + * RF value list for RF5225 & RF2527 + * Supports: 2.4 GHz & 5.2 GHz + */ +static const struct rf_channel rf_vals_5225_2527[] = { + { 1, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa0b }, + { 2, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa1f }, + { 3, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa0b }, + { 4, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa1f }, + { 5, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa0b }, + { 6, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa1f }, + { 7, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa0b }, + { 8, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa1f }, + { 9, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa0b }, + { 10, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa1f }, + { 11, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa0b }, + { 12, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa1f }, + { 13, 0x00002ccc, 0x0000479e, 0x00068455, 0x000ffa0b }, + { 14, 0x00002ccc, 0x000047a2, 0x00068455, 0x000ffa13 }, + + /* 802.11 UNI / HyperLan 2 */ + { 36, 0x00002ccc, 0x0000499a, 0x0009be55, 0x000ffa23 }, + { 40, 0x00002ccc, 0x000049a2, 0x0009be55, 0x000ffa03 }, + { 44, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000ffa0b }, + { 48, 0x00002ccc, 0x000049aa, 0x0009be55, 0x000ffa13 }, + { 52, 0x00002ccc, 0x000049ae, 0x0009ae55, 0x000ffa1b }, + { 56, 0x00002ccc, 0x000049b2, 0x0009ae55, 0x000ffa23 }, + { 60, 0x00002ccc, 0x000049ba, 0x0009ae55, 0x000ffa03 }, + { 64, 0x00002ccc, 0x000049be, 0x0009ae55, 0x000ffa0b }, + + /* 802.11 HyperLan 2 */ + { 100, 0x00002ccc, 0x00004a2a, 0x000bae55, 0x000ffa03 }, + { 104, 0x00002ccc, 0x00004a2e, 0x000bae55, 0x000ffa0b }, + { 108, 0x00002ccc, 0x00004a32, 0x000bae55, 0x000ffa13 }, + { 112, 0x00002ccc, 0x00004a36, 0x000bae55, 0x000ffa1b }, + { 116, 0x00002ccc, 0x00004a3a, 0x000bbe55, 0x000ffa23 }, + { 120, 0x00002ccc, 0x00004a82, 0x000bbe55, 0x000ffa03 }, + { 124, 0x00002ccc, 0x00004a86, 0x000bbe55, 0x000ffa0b }, + { 128, 0x00002ccc, 0x00004a8a, 0x000bbe55, 0x000ffa13 }, + { 132, 0x00002ccc, 0x00004a8e, 0x000bbe55, 0x000ffa1b }, + { 136, 0x00002ccc, 0x00004a92, 0x000bbe55, 0x000ffa23 }, + + /* 802.11 UNII */ + { 140, 0x00002ccc, 0x00004a9a, 0x000bbe55, 0x000ffa03 }, + { 149, 0x00002ccc, 0x00004aa2, 0x000bbe55, 0x000ffa1f }, + { 153, 0x00002ccc, 0x00004aa6, 0x000bbe55, 0x000ffa27 }, + { 157, 0x00002ccc, 0x00004aae, 0x000bbe55, 0x000ffa07 }, + { 161, 0x00002ccc, 0x00004ab2, 0x000bbe55, 0x000ffa0f }, + { 165, 0x00002ccc, 0x00004ab6, 0x000bbe55, 0x000ffa17 }, + + /* MMAC(Japan)J52 ch 34,38,42,46 */ + { 34, 0x00002ccc, 0x0000499a, 0x0009be55, 0x000ffa0b }, + { 38, 0x00002ccc, 0x0000499e, 0x0009be55, 0x000ffa13 }, + { 42, 0x00002ccc, 0x000049a2, 0x0009be55, 0x000ffa1b }, + { 46, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000ffa23 }, +}; + + +static void rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) +{ + struct hw_mode_spec *spec = &rt2x00dev->spec; + u8 *txpower; + unsigned int i; + + /* + * Initialize all hw fields. + */ + rt2x00dev->hw->flags = + IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE | + IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; + rt2x00dev->hw->extra_tx_headroom = TXD_DESC_SIZE; + rt2x00dev->hw->max_signal = MAX_SIGNAL; + rt2x00dev->hw->max_rssi = MAX_RX_SSI; + rt2x00dev->hw->queues = 5; + + SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_usb(rt2x00dev)->dev); + SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, + rt2x00_eeprom_addr(rt2x00dev, + EEPROM_MAC_ADDR_0)); + + /* + * Convert tx_power array in eeprom. + */ + txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START); + for (i = 0; i < 14; i++) + txpower[i] = TXPOWER_FROM_DEV(txpower[i]); + + /* + * Initialize hw_mode information. + */ + spec->num_modes = 2; + spec->num_rates = 12; + spec->tx_power_a = NULL; + spec->tx_power_bg = txpower; + spec->tx_power_default = DEFAULT_TXPOWER; + + if (rt2x00_rf(&rt2x00dev->chip, RF2528)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2528); + spec->channels = rf_vals_bg_2528; + } else if (rt2x00_rf(&rt2x00dev->chip, RF5226)) { + spec->num_channels = ARRAY_SIZE(rf_vals_5226); + spec->channels = rf_vals_5226; + } else if (rt2x00_rf(&rt2x00dev->chip, RF2527)) { + spec->num_channels = 14; + spec->channels = rf_vals_5225_2527; + } else if (rt2x00_rf(&rt2x00dev->chip, RF5225)) { + spec->num_channels = ARRAY_SIZE(rf_vals_5225_2527); + spec->channels = rf_vals_5225_2527; + } + + if (rt2x00_rf(&rt2x00dev->chip, RF5225) || + rt2x00_rf(&rt2x00dev->chip, RF5226)) { + spec->num_modes = 3; + + txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START); + for (i = 0; i < 14; i++) + txpower[i] = TXPOWER_FROM_DEV(txpower[i]); + + spec->tx_power_a = txpower; + } +} + +static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev) +{ + int retval; + + /* + * Allocate eeprom data. + */ + retval = rt73usb_validate_eeprom(rt2x00dev); + if (retval) + return retval; + + retval = rt73usb_init_eeprom(rt2x00dev); + if (retval) + return retval; + + /* + * Initialize hw specifications. + */ + rt73usb_probe_hw_mode(rt2x00dev); + + /* + * This device requires firmware + */ + __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags); + + /* + * Set the rssi offset. + */ + rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; + + return 0; +} + +/* + * IEEE80211 stack callback functions. + */ +static void rt73usb_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + int mc_count, + struct dev_addr_list *mc_list) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct interface *intf = &rt2x00dev->interface; + u32 reg; + + /* + * Mask off any flags we are going to ignore from + * the total_flags field. + */ + *total_flags &= + FIF_ALLMULTI | + FIF_FCSFAIL | + FIF_PLCPFAIL | + FIF_CONTROL | + FIF_OTHER_BSS | + FIF_PROMISC_IN_BSS; + + /* + * Apply some rules to the filters: + * - Some filters imply different filters to be set. + * - Some things we can't filter out at all. + * - Some filters are set based on interface type. + */ + if (mc_count) + *total_flags |= FIF_ALLMULTI; + if (*total_flags & FIF_OTHER_BSS || + *total_flags & FIF_PROMISC_IN_BSS) + *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS; + if (is_interface_type(intf, IEEE80211_IF_TYPE_AP)) + *total_flags |= FIF_PROMISC_IN_BSS; + + /* + * Check if there is any work left for us. + */ + if (intf->filter == *total_flags) + return; + intf->filter = *total_flags; + + /* + * When in atomic context, reschedule and let rt2x00lib + * call this function again. + */ + if (in_atomic()) { + queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->filter_work); + return; + } + + /* + * Start configuration steps. + * Note that the version error will always be dropped + * and broadcast frames will always be accepted since + * there is no filter for it at this time. + */ + rt73usb_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_DROP_CRC, + !(*total_flags & FIF_FCSFAIL)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_PHYSICAL, + !(*total_flags & FIF_PLCPFAIL)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_CONTROL, + !(*total_flags & FIF_CONTROL)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_NOT_TO_ME, + !(*total_flags & FIF_PROMISC_IN_BSS)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_TO_DS, + !(*total_flags & FIF_PROMISC_IN_BSS)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_VERSION_ERROR, 1); + rt2x00_set_field32(®, TXRX_CSR0_DROP_MULTICAST, + !(*total_flags & FIF_ALLMULTI)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_BROADCAST, 0); + rt2x00_set_field32(®, TXRX_CSR0_DROP_ACK_CTS, 1); + rt73usb_register_write(rt2x00dev, TXRX_CSR0, reg); +} + +static int rt73usb_set_retry_limit(struct ieee80211_hw *hw, + u32 short_retry, u32 long_retry) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u32 reg; + + rt73usb_register_read(rt2x00dev, TXRX_CSR4, ®); + rt2x00_set_field32(®, TXRX_CSR4_LONG_RETRY_LIMIT, long_retry); + rt2x00_set_field32(®, TXRX_CSR4_SHORT_RETRY_LIMIT, short_retry); + rt73usb_register_write(rt2x00dev, TXRX_CSR4, reg); + + return 0; +} + +#if 0 +/* + * Mac80211 demands get_tsf must be atomic. + * This is not possible for rt73usb since all register access + * functions require sleeping. Untill mac80211 no longer needs + * get_tsf to be atomic, this function should be disabled. + */ +static u64 rt73usb_get_tsf(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u64 tsf; + u32 reg; + + rt73usb_register_read(rt2x00dev, TXRX_CSR13, ®); + tsf = (u64) rt2x00_get_field32(reg, TXRX_CSR13_HIGH_TSFTIMER) << 32; + rt73usb_register_read(rt2x00dev, TXRX_CSR12, ®); + tsf |= rt2x00_get_field32(reg, TXRX_CSR12_LOW_TSFTIMER); + + return tsf; +} +#else +#define rt73usb_get_tsf NULL +#endif + +static void rt73usb_reset_tsf(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + + rt73usb_register_write(rt2x00dev, TXRX_CSR12, 0); + rt73usb_register_write(rt2x00dev, TXRX_CSR13, 0); +} + +static int rt73usb_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ieee80211_tx_control *control) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + int timeout; + + /* + * Just in case the ieee80211 doesn't set this, + * but we need this queue set for the descriptor + * initialization. + */ + control->queue = IEEE80211_TX_QUEUE_BEACON; + + /* + * First we create the beacon. + */ + skb_push(skb, TXD_DESC_SIZE); + memset(skb->data, 0, TXD_DESC_SIZE); + + rt2x00lib_write_tx_desc(rt2x00dev, (struct data_desc *)skb->data, + (struct ieee80211_hdr *)(skb->data + + TXD_DESC_SIZE), + skb->len - TXD_DESC_SIZE, control); + + /* + * Write entire beacon with descriptor to register, + * and kick the beacon generator. + */ + timeout = REGISTER_TIMEOUT * (skb->len / sizeof(u32)); + rt2x00usb_vendor_request(rt2x00dev, USB_MULTI_WRITE, + USB_VENDOR_REQUEST_OUT, + HW_BEACON_BASE0, 0x0000, + skb->data, skb->len, timeout); + rt73usb_kick_tx_queue(rt2x00dev, IEEE80211_TX_QUEUE_BEACON); + + return 0; +} + +static const struct ieee80211_ops rt73usb_mac80211_ops = { + .tx = rt2x00mac_tx, + .start = rt2x00mac_start, + .stop = rt2x00mac_stop, + .add_interface = rt2x00mac_add_interface, + .remove_interface = rt2x00mac_remove_interface, + .config = rt2x00mac_config, + .config_interface = rt2x00mac_config_interface, + .configure_filter = rt73usb_configure_filter, + .get_stats = rt2x00mac_get_stats, + .set_retry_limit = rt73usb_set_retry_limit, + .erp_ie_changed = rt2x00mac_erp_ie_changed, + .conf_tx = rt2x00mac_conf_tx, + .get_tx_stats = rt2x00mac_get_tx_stats, + .get_tsf = rt73usb_get_tsf, + .reset_tsf = rt73usb_reset_tsf, + .beacon_update = rt73usb_beacon_update, +}; + +static const struct rt2x00lib_ops rt73usb_rt2x00_ops = { + .probe_hw = rt73usb_probe_hw, + .get_firmware_name = rt73usb_get_firmware_name, + .load_firmware = rt73usb_load_firmware, + .initialize = rt2x00usb_initialize, + .uninitialize = rt2x00usb_uninitialize, + .set_device_state = rt73usb_set_device_state, + .link_stats = rt73usb_link_stats, + .reset_tuner = rt73usb_reset_tuner, + .link_tuner = rt73usb_link_tuner, + .write_tx_desc = rt73usb_write_tx_desc, + .write_tx_data = rt2x00usb_write_tx_data, + .get_tx_data_len = rt73usb_get_tx_data_len, + .kick_tx_queue = rt73usb_kick_tx_queue, + .fill_rxdone = rt73usb_fill_rxdone, + .config_mac_addr = rt73usb_config_mac_addr, + .config_bssid = rt73usb_config_bssid, + .config_type = rt73usb_config_type, + .config_preamble = rt73usb_config_preamble, + .config = rt73usb_config, +}; + +static const struct rt2x00_ops rt73usb_ops = { + .name = DRV_NAME, + .rxd_size = RXD_DESC_SIZE, + .txd_size = TXD_DESC_SIZE, + .eeprom_size = EEPROM_SIZE, + .rf_size = RF_SIZE, + .lib = &rt73usb_rt2x00_ops, + .hw = &rt73usb_mac80211_ops, +#ifdef CONFIG_RT2X00_LIB_DEBUGFS + .debugfs = &rt73usb_rt2x00debug, +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ +}; + +/* + * rt73usb module information. + */ +static struct usb_device_id rt73usb_device_table[] = { + /* AboCom */ + { USB_DEVICE(0x07b8, 0xb21d), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Askey */ + { USB_DEVICE(0x1690, 0x0722), USB_DEVICE_DATA(&rt73usb_ops) }, + /* ASUS */ + { USB_DEVICE(0x0b05, 0x1723), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0b05, 0x1724), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Belkin */ + { USB_DEVICE(0x050d, 0x7050), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x050d, 0x705a), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x050d, 0x905b), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Billionton */ + { USB_DEVICE(0x1631, 0xc019), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Buffalo */ + { USB_DEVICE(0x0411, 0x00f4), USB_DEVICE_DATA(&rt73usb_ops) }, + /* CNet */ + { USB_DEVICE(0x1371, 0x9022), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x1371, 0x9032), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Conceptronic */ + { USB_DEVICE(0x14b2, 0x3c22), USB_DEVICE_DATA(&rt73usb_ops) }, + /* D-Link */ + { USB_DEVICE(0x07d1, 0x3c03), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x07d1, 0x3c04), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Gemtek */ + { USB_DEVICE(0x15a9, 0x0004), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Gigabyte */ + { USB_DEVICE(0x1044, 0x8008), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x1044, 0x800a), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Huawei-3Com */ + { USB_DEVICE(0x1472, 0x0009), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Hercules */ + { USB_DEVICE(0x06f8, 0xe010), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x06f8, 0xe020), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Linksys */ + { USB_DEVICE(0x13b1, 0x0020), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x13b1, 0x0023), USB_DEVICE_DATA(&rt73usb_ops) }, + /* MSI */ + { USB_DEVICE(0x0db0, 0x6877), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0db0, 0x6874), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0db0, 0xa861), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0db0, 0xa874), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Ralink */ + { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x148f, 0x2671), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Qcom */ + { USB_DEVICE(0x18e8, 0x6196), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x18e8, 0x6229), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x18e8, 0x6238), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Senao */ + { USB_DEVICE(0x1740, 0x7100), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Sitecom */ + { USB_DEVICE(0x0df6, 0x9712), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0df6, 0x90ac), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Surecom */ + { USB_DEVICE(0x0769, 0x31f3), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Planex */ + { USB_DEVICE(0x2019, 0xab01), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x2019, 0xab50), USB_DEVICE_DATA(&rt73usb_ops) }, + { 0, } +}; + +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("Ralink RT73 USB Wireless LAN driver."); +MODULE_SUPPORTED_DEVICE("Ralink RT2571W & RT2671 USB chipset based cards"); +MODULE_DEVICE_TABLE(usb, rt73usb_device_table); +MODULE_FIRMWARE(FIRMWARE_RT2571); +MODULE_LICENSE("GPL"); + +static struct usb_driver rt73usb_driver = { + .name = DRV_NAME, + .id_table = rt73usb_device_table, + .probe = rt2x00usb_probe, + .disconnect = rt2x00usb_disconnect, + .suspend = rt2x00usb_suspend, + .resume = rt2x00usb_resume, +}; + +static int __init rt73usb_init(void) +{ + return usb_register(&rt73usb_driver); +} + +static void __exit rt73usb_exit(void) +{ + usb_deregister(&rt73usb_driver); +} + +module_init(rt73usb_init); +module_exit(rt73usb_exit); diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h new file mode 100644 index 000000000000..f0951519f74b --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt73usb.h @@ -0,0 +1,1024 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + <http://rt2x00.serialmonkey.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt73usb + Abstract: Data structures and registers for the rt73usb module. + Supported chipsets: rt2571W & rt2671. + */ + +#ifndef RT73USB_H +#define RT73USB_H + +/* + * RF chip defines. + */ +#define RF5226 0x0001 +#define RF2528 0x0002 +#define RF5225 0x0003 +#define RF2527 0x0004 + +/* + * Signal information. + * Defaul offset is required for RSSI <-> dBm conversion. + */ +#define MAX_SIGNAL 100 +#define MAX_RX_SSI -1 +#define DEFAULT_RSSI_OFFSET 120 + +/* + * Register layout information. + */ +#define CSR_REG_BASE 0x3000 +#define CSR_REG_SIZE 0x04b0 +#define EEPROM_BASE 0x0000 +#define EEPROM_SIZE 0x0100 +#define BBP_SIZE 0x0080 +#define RF_SIZE 0x0014 + +/* + * USB registers. + */ + +/* + * MCU_LEDCS: LED control for MCU Mailbox. + */ +#define MCU_LEDCS_LED_MODE FIELD16(0x001f) +#define MCU_LEDCS_RADIO_STATUS FIELD16(0x0020) +#define MCU_LEDCS_LINK_BG_STATUS FIELD16(0x0040) +#define MCU_LEDCS_LINK_A_STATUS FIELD16(0x0080) +#define MCU_LEDCS_POLARITY_GPIO_0 FIELD16(0x0100) +#define MCU_LEDCS_POLARITY_GPIO_1 FIELD16(0x0200) +#define MCU_LEDCS_POLARITY_GPIO_2 FIELD16(0x0400) +#define MCU_LEDCS_POLARITY_GPIO_3 FIELD16(0x0800) +#define MCU_LEDCS_POLARITY_GPIO_4 FIELD16(0x1000) +#define MCU_LEDCS_POLARITY_ACT FIELD16(0x2000) +#define MCU_LEDCS_POLARITY_READY_BG FIELD16(0x4000) +#define MCU_LEDCS_POLARITY_READY_A FIELD16(0x8000) + +/* + * 8051 firmware image. + */ +#define FIRMWARE_RT2571 "rt73.bin" +#define FIRMWARE_IMAGE_BASE 0x0800 + +/* + * Security key table memory. + * 16 entries 32-byte for shared key table + * 64 entries 32-byte for pairwise key table + * 64 entries 8-byte for pairwise ta key table + */ +#define SHARED_KEY_TABLE_BASE 0x1000 +#define PAIRWISE_KEY_TABLE_BASE 0x1200 +#define PAIRWISE_TA_TABLE_BASE 0x1a00 + +struct hw_key_entry { + u8 key[16]; + u8 tx_mic[8]; + u8 rx_mic[8]; +} __attribute__ ((packed)); + +struct hw_pairwise_ta_entry { + u8 address[6]; + u8 reserved[2]; +} __attribute__ ((packed)); + +/* + * Since NULL frame won't be that long (256 byte), + * We steal 16 tail bytes to save debugging settings. + */ +#define HW_DEBUG_SETTING_BASE 0x2bf0 + +/* + * On-chip BEACON frame space. + */ +#define HW_BEACON_BASE0 0x2400 +#define HW_BEACON_BASE1 0x2500 +#define HW_BEACON_BASE2 0x2600 +#define HW_BEACON_BASE3 0x2700 + +/* + * MAC Control/Status Registers(CSR). + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * MAC_CSR0: ASIC revision number. + */ +#define MAC_CSR0 0x3000 + +/* + * MAC_CSR1: System control register. + * SOFT_RESET: Software reset bit, 1: reset, 0: normal. + * BBP_RESET: Hardware reset BBP. + * HOST_READY: Host is ready after initialization, 1: ready. + */ +#define MAC_CSR1 0x3004 +#define MAC_CSR1_SOFT_RESET FIELD32(0x00000001) +#define MAC_CSR1_BBP_RESET FIELD32(0x00000002) +#define MAC_CSR1_HOST_READY FIELD32(0x00000004) + +/* + * MAC_CSR2: STA MAC register 0. + */ +#define MAC_CSR2 0x3008 +#define MAC_CSR2_BYTE0 FIELD32(0x000000ff) +#define MAC_CSR2_BYTE1 FIELD32(0x0000ff00) +#define MAC_CSR2_BYTE2 FIELD32(0x00ff0000) +#define MAC_CSR2_BYTE3 FIELD32(0xff000000) + +/* + * MAC_CSR3: STA MAC register 1. + */ +#define MAC_CSR3 0x300c +#define MAC_CSR3_BYTE4 FIELD32(0x000000ff) +#define MAC_CSR3_BYTE5 FIELD32(0x0000ff00) +#define MAC_CSR3_UNICAST_TO_ME_MASK FIELD32(0x00ff0000) + +/* + * MAC_CSR4: BSSID register 0. + */ +#define MAC_CSR4 0x3010 +#define MAC_CSR4_BYTE0 FIELD32(0x000000ff) +#define MAC_CSR4_BYTE1 FIELD32(0x0000ff00) +#define MAC_CSR4_BYTE2 FIELD32(0x00ff0000) +#define MAC_CSR4_BYTE3 FIELD32(0xff000000) + +/* + * MAC_CSR5: BSSID register 1. + * BSS_ID_MASK: 3: one BSSID, 0: 4 BSSID, 2 or 1: 2 BSSID. + */ +#define MAC_CSR5 0x3014 +#define MAC_CSR5_BYTE4 FIELD32(0x000000ff) +#define MAC_CSR5_BYTE5 FIELD32(0x0000ff00) +#define MAC_CSR5_BSS_ID_MASK FIELD32(0x00ff0000) + +/* + * MAC_CSR6: Maximum frame length register. + */ +#define MAC_CSR6 0x3018 +#define MAC_CSR6_MAX_FRAME_UNIT FIELD32(0x00000fff) + +/* + * MAC_CSR7: Reserved + */ +#define MAC_CSR7 0x301c + +/* + * MAC_CSR8: SIFS/EIFS register. + * All units are in US. + */ +#define MAC_CSR8 0x3020 +#define MAC_CSR8_SIFS FIELD32(0x000000ff) +#define MAC_CSR8_SIFS_AFTER_RX_OFDM FIELD32(0x0000ff00) +#define MAC_CSR8_EIFS FIELD32(0xffff0000) + +/* + * MAC_CSR9: Back-Off control register. + * SLOT_TIME: Slot time, default is 20us for 802.11BG. + * CWMIN: Bit for Cwmin. default Cwmin is 31 (2^5 - 1). + * CWMAX: Bit for Cwmax, default Cwmax is 1023 (2^10 - 1). + * CW_SELECT: 1: CWmin/Cwmax select from register, 0:select from TxD. + */ +#define MAC_CSR9 0x3024 +#define MAC_CSR9_SLOT_TIME FIELD32(0x000000ff) +#define MAC_CSR9_CWMIN FIELD32(0x00000f00) +#define MAC_CSR9_CWMAX FIELD32(0x0000f000) +#define MAC_CSR9_CW_SELECT FIELD32(0x00010000) + +/* + * MAC_CSR10: Power state configuration. + */ +#define MAC_CSR10 0x3028 + +/* + * MAC_CSR11: Power saving transition time register. + * DELAY_AFTER_TBCN: Delay after Tbcn expired in units of TU. + * TBCN_BEFORE_WAKEUP: Number of beacon before wakeup. + * WAKEUP_LATENCY: In unit of TU. + */ +#define MAC_CSR11 0x302c +#define MAC_CSR11_DELAY_AFTER_TBCN FIELD32(0x000000ff) +#define MAC_CSR11_TBCN_BEFORE_WAKEUP FIELD32(0x00007f00) +#define MAC_CSR11_AUTOWAKE FIELD32(0x00008000) +#define MAC_CSR11_WAKEUP_LATENCY FIELD32(0x000f0000) + +/* + * MAC_CSR12: Manual power control / status register (merge CSR20 & PWRCSR1). + * CURRENT_STATE: 0:sleep, 1:awake. + * FORCE_WAKEUP: This has higher priority than PUT_TO_SLEEP. + * BBP_CURRENT_STATE: 0: BBP sleep, 1: BBP awake. + */ +#define MAC_CSR12 0x3030 +#define MAC_CSR12_CURRENT_STATE FIELD32(0x00000001) +#define MAC_CSR12_PUT_TO_SLEEP FIELD32(0x00000002) +#define MAC_CSR12_FORCE_WAKEUP FIELD32(0x00000004) +#define MAC_CSR12_BBP_CURRENT_STATE FIELD32(0x00000008) + +/* + * MAC_CSR13: GPIO. + */ +#define MAC_CSR13 0x3034 + +/* + * MAC_CSR14: LED control register. + * ON_PERIOD: On period, default 70ms. + * OFF_PERIOD: Off period, default 30ms. + * HW_LED: HW TX activity, 1: normal OFF, 0: normal ON. + * SW_LED: s/w LED, 1: ON, 0: OFF. + * HW_LED_POLARITY: 0: active low, 1: active high. + */ +#define MAC_CSR14 0x3038 +#define MAC_CSR14_ON_PERIOD FIELD32(0x000000ff) +#define MAC_CSR14_OFF_PERIOD FIELD32(0x0000ff00) +#define MAC_CSR14_HW_LED FIELD32(0x00010000) +#define MAC_CSR14_SW_LED FIELD32(0x00020000) +#define MAC_CSR14_HW_LED_POLARITY FIELD32(0x00040000) +#define MAC_CSR14_SW_LED2 FIELD32(0x00080000) + +/* + * MAC_CSR15: NAV control. + */ +#define MAC_CSR15 0x303c + +/* + * TXRX control registers. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * TXRX_CSR0: TX/RX configuration register. + * TSF_OFFSET: Default is 24. + * AUTO_TX_SEQ: 1: ASIC auto replace sequence nr in outgoing frame. + * DISABLE_RX: Disable Rx engine. + * DROP_CRC: Drop CRC error. + * DROP_PHYSICAL: Drop physical error. + * DROP_CONTROL: Drop control frame. + * DROP_NOT_TO_ME: Drop not to me unicast frame. + * DROP_TO_DS: Drop fram ToDs bit is true. + * DROP_VERSION_ERROR: Drop version error frame. + * DROP_MULTICAST: Drop multicast frames. + * DROP_BORADCAST: Drop broadcast frames. + * ROP_ACK_CTS: Drop received ACK and CTS. + */ +#define TXRX_CSR0 0x3040 +#define TXRX_CSR0_RX_ACK_TIMEOUT FIELD32(0x000001ff) +#define TXRX_CSR0_TSF_OFFSET FIELD32(0x00007e00) +#define TXRX_CSR0_AUTO_TX_SEQ FIELD32(0x00008000) +#define TXRX_CSR0_DISABLE_RX FIELD32(0x00010000) +#define TXRX_CSR0_DROP_CRC FIELD32(0x00020000) +#define TXRX_CSR0_DROP_PHYSICAL FIELD32(0x00040000) +#define TXRX_CSR0_DROP_CONTROL FIELD32(0x00080000) +#define TXRX_CSR0_DROP_NOT_TO_ME FIELD32(0x00100000) +#define TXRX_CSR0_DROP_TO_DS FIELD32(0x00200000) +#define TXRX_CSR0_DROP_VERSION_ERROR FIELD32(0x00400000) +#define TXRX_CSR0_DROP_MULTICAST FIELD32(0x00800000) +#define TXRX_CSR0_DROP_BROADCAST FIELD32(0x01000000) +#define TXRX_CSR0_DROP_ACK_CTS FIELD32(0x02000000) +#define TXRX_CSR0_TX_WITHOUT_WAITING FIELD32(0x04000000) + +/* + * TXRX_CSR1 + */ +#define TXRX_CSR1 0x3044 +#define TXRX_CSR1_BBP_ID0 FIELD32(0x0000007f) +#define TXRX_CSR1_BBP_ID0_VALID FIELD32(0x00000080) +#define TXRX_CSR1_BBP_ID1 FIELD32(0x00007f00) +#define TXRX_CSR1_BBP_ID1_VALID FIELD32(0x00008000) +#define TXRX_CSR1_BBP_ID2 FIELD32(0x007f0000) +#define TXRX_CSR1_BBP_ID2_VALID FIELD32(0x00800000) +#define TXRX_CSR1_BBP_ID3 FIELD32(0x7f000000) +#define TXRX_CSR1_BBP_ID3_VALID FIELD32(0x80000000) + +/* + * TXRX_CSR2 + */ +#define TXRX_CSR2 0x3048 +#define TXRX_CSR2_BBP_ID0 FIELD32(0x0000007f) +#define TXRX_CSR2_BBP_ID0_VALID FIELD32(0x00000080) +#define TXRX_CSR2_BBP_ID1 FIELD32(0x00007f00) +#define TXRX_CSR2_BBP_ID1_VALID FIELD32(0x00008000) +#define TXRX_CSR2_BBP_ID2 FIELD32(0x007f0000) +#define TXRX_CSR2_BBP_ID2_VALID FIELD32(0x00800000) +#define TXRX_CSR2_BBP_ID3 FIELD32(0x7f000000) +#define TXRX_CSR2_BBP_ID3_VALID FIELD32(0x80000000) + +/* + * TXRX_CSR3 + */ +#define TXRX_CSR3 0x304c +#define TXRX_CSR3_BBP_ID0 FIELD32(0x0000007f) +#define TXRX_CSR3_BBP_ID0_VALID FIELD32(0x00000080) +#define TXRX_CSR3_BBP_ID1 FIELD32(0x00007f00) +#define TXRX_CSR3_BBP_ID1_VALID FIELD32(0x00008000) +#define TXRX_CSR3_BBP_ID2 FIELD32(0x007f0000) +#define TXRX_CSR3_BBP_ID2_VALID FIELD32(0x00800000) +#define TXRX_CSR3_BBP_ID3 FIELD32(0x7f000000) +#define TXRX_CSR3_BBP_ID3_VALID FIELD32(0x80000000) + +/* + * TXRX_CSR4: Auto-Responder/Tx-retry register. + * AUTORESPOND_PREAMBLE: 0:long, 1:short preamble. + * OFDM_TX_RATE_DOWN: 1:enable. + * OFDM_TX_RATE_STEP: 0:1-step, 1: 2-step, 2:3-step, 3:4-step. + * OFDM_TX_FALLBACK_CCK: 0: Fallback to OFDM 6M only, 1: Fallback to CCK 1M,2M. + */ +#define TXRX_CSR4 0x3050 +#define TXRX_CSR4_TX_ACK_TIMEOUT FIELD32(0x000000ff) +#define TXRX_CSR4_CNTL_ACK_POLICY FIELD32(0x00000700) +#define TXRX_CSR4_ACK_CTS_PSM FIELD32(0x00010000) +#define TXRX_CSR4_AUTORESPOND_ENABLE FIELD32(0x00020000) +#define TXRX_CSR4_AUTORESPOND_PREAMBLE FIELD32(0x00040000) +#define TXRX_CSR4_OFDM_TX_RATE_DOWN FIELD32(0x00080000) +#define TXRX_CSR4_OFDM_TX_RATE_STEP FIELD32(0x00300000) +#define TXRX_CSR4_OFDM_TX_FALLBACK_CCK FIELD32(0x00400000) +#define TXRX_CSR4_LONG_RETRY_LIMIT FIELD32(0x0f000000) +#define TXRX_CSR4_SHORT_RETRY_LIMIT FIELD32(0xf0000000) + +/* + * TXRX_CSR5 + */ +#define TXRX_CSR5 0x3054 + +/* + * TXRX_CSR6: ACK/CTS payload consumed time + */ +#define TXRX_CSR6 0x3058 + +/* + * TXRX_CSR7: OFDM ACK/CTS payload consumed time for 6/9/12/18 mbps. + */ +#define TXRX_CSR7 0x305c +#define TXRX_CSR7_ACK_CTS_6MBS FIELD32(0x000000ff) +#define TXRX_CSR7_ACK_CTS_9MBS FIELD32(0x0000ff00) +#define TXRX_CSR7_ACK_CTS_12MBS FIELD32(0x00ff0000) +#define TXRX_CSR7_ACK_CTS_18MBS FIELD32(0xff000000) + +/* + * TXRX_CSR8: OFDM ACK/CTS payload consumed time for 24/36/48/54 mbps. + */ +#define TXRX_CSR8 0x3060 +#define TXRX_CSR8_ACK_CTS_24MBS FIELD32(0x000000ff) +#define TXRX_CSR8_ACK_CTS_36MBS FIELD32(0x0000ff00) +#define TXRX_CSR8_ACK_CTS_48MBS FIELD32(0x00ff0000) +#define TXRX_CSR8_ACK_CTS_54MBS FIELD32(0xff000000) + +/* + * TXRX_CSR9: Synchronization control register. + * BEACON_INTERVAL: In unit of 1/16 TU. + * TSF_TICKING: Enable TSF auto counting. + * TSF_SYNC: Tsf sync, 0: disable, 1: infra, 2: ad-hoc/master mode. + * BEACON_GEN: Enable beacon generator. + */ +#define TXRX_CSR9 0x3064 +#define TXRX_CSR9_BEACON_INTERVAL FIELD32(0x0000ffff) +#define TXRX_CSR9_TSF_TICKING FIELD32(0x00010000) +#define TXRX_CSR9_TSF_SYNC FIELD32(0x00060000) +#define TXRX_CSR9_TBTT_ENABLE FIELD32(0x00080000) +#define TXRX_CSR9_BEACON_GEN FIELD32(0x00100000) +#define TXRX_CSR9_TIMESTAMP_COMPENSATE FIELD32(0xff000000) + +/* + * TXRX_CSR10: BEACON alignment. + */ +#define TXRX_CSR10 0x3068 + +/* + * TXRX_CSR11: AES mask. + */ +#define TXRX_CSR11 0x306c + +/* + * TXRX_CSR12: TSF low 32. + */ +#define TXRX_CSR12 0x3070 +#define TXRX_CSR12_LOW_TSFTIMER FIELD32(0xffffffff) + +/* + * TXRX_CSR13: TSF high 32. + */ +#define TXRX_CSR13 0x3074 +#define TXRX_CSR13_HIGH_TSFTIMER FIELD32(0xffffffff) + +/* + * TXRX_CSR14: TBTT timer. + */ +#define TXRX_CSR14 0x3078 + +/* + * TXRX_CSR15: TKIP MIC priority byte "AND" mask. + */ +#define TXRX_CSR15 0x307c + +/* + * PHY control registers. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * PHY_CSR0: RF/PS control. + */ +#define PHY_CSR0 0x3080 +#define PHY_CSR0_PA_PE_BG FIELD32(0x00010000) +#define PHY_CSR0_PA_PE_A FIELD32(0x00020000) + +/* + * PHY_CSR1 + */ +#define PHY_CSR1 0x3084 +#define PHY_CSR1_RF_RPI FIELD32(0x00010000) + +/* + * PHY_CSR2: Pre-TX BBP control. + */ +#define PHY_CSR2 0x3088 + +/* + * PHY_CSR3: BBP serial control register. + * VALUE: Register value to program into BBP. + * REG_NUM: Selected BBP register. + * READ_CONTROL: 0: Write BBP, 1: Read BBP. + * BUSY: 1: ASIC is busy execute BBP programming. + */ +#define PHY_CSR3 0x308c +#define PHY_CSR3_VALUE FIELD32(0x000000ff) +#define PHY_CSR3_REGNUM FIELD32(0x00007f00) +#define PHY_CSR3_READ_CONTROL FIELD32(0x00008000) +#define PHY_CSR3_BUSY FIELD32(0x00010000) + +/* + * PHY_CSR4: RF serial control register + * VALUE: Register value (include register id) serial out to RF/IF chip. + * NUMBER_OF_BITS: Number of bits used in RFRegValue (I:20, RFMD:22). + * IF_SELECT: 1: select IF to program, 0: select RF to program. + * PLL_LD: RF PLL_LD status. + * BUSY: 1: ASIC is busy execute RF programming. + */ +#define PHY_CSR4 0x3090 +#define PHY_CSR4_VALUE FIELD32(0x00ffffff) +#define PHY_CSR4_NUMBER_OF_BITS FIELD32(0x1f000000) +#define PHY_CSR4_IF_SELECT FIELD32(0x20000000) +#define PHY_CSR4_PLL_LD FIELD32(0x40000000) +#define PHY_CSR4_BUSY FIELD32(0x80000000) + +/* + * PHY_CSR5: RX to TX signal switch timing control. + */ +#define PHY_CSR5 0x3094 +#define PHY_CSR5_IQ_FLIP FIELD32(0x00000004) + +/* + * PHY_CSR6: TX to RX signal timing control. + */ +#define PHY_CSR6 0x3098 +#define PHY_CSR6_IQ_FLIP FIELD32(0x00000004) + +/* + * PHY_CSR7: TX DAC switching timing control. + */ +#define PHY_CSR7 0x309c + +/* + * Security control register. + */ + +/* + * SEC_CSR0: Shared key table control. + */ +#define SEC_CSR0 0x30a0 +#define SEC_CSR0_BSS0_KEY0_VALID FIELD32(0x00000001) +#define SEC_CSR0_BSS0_KEY1_VALID FIELD32(0x00000002) +#define SEC_CSR0_BSS0_KEY2_VALID FIELD32(0x00000004) +#define SEC_CSR0_BSS0_KEY3_VALID FIELD32(0x00000008) +#define SEC_CSR0_BSS1_KEY0_VALID FIELD32(0x00000010) +#define SEC_CSR0_BSS1_KEY1_VALID FIELD32(0x00000020) +#define SEC_CSR0_BSS1_KEY2_VALID FIELD32(0x00000040) +#define SEC_CSR0_BSS1_KEY3_VALID FIELD32(0x00000080) +#define SEC_CSR0_BSS2_KEY0_VALID FIELD32(0x00000100) +#define SEC_CSR0_BSS2_KEY1_VALID FIELD32(0x00000200) +#define SEC_CSR0_BSS2_KEY2_VALID FIELD32(0x00000400) +#define SEC_CSR0_BSS2_KEY3_VALID FIELD32(0x00000800) +#define SEC_CSR0_BSS3_KEY0_VALID FIELD32(0x00001000) +#define SEC_CSR0_BSS3_KEY1_VALID FIELD32(0x00002000) +#define SEC_CSR0_BSS3_KEY2_VALID FIELD32(0x00004000) +#define SEC_CSR0_BSS3_KEY3_VALID FIELD32(0x00008000) + +/* + * SEC_CSR1: Shared key table security mode register. + */ +#define SEC_CSR1 0x30a4 +#define SEC_CSR1_BSS0_KEY0_CIPHER_ALG FIELD32(0x00000007) +#define SEC_CSR1_BSS0_KEY1_CIPHER_ALG FIELD32(0x00000070) +#define SEC_CSR1_BSS0_KEY2_CIPHER_ALG FIELD32(0x00000700) +#define SEC_CSR1_BSS0_KEY3_CIPHER_ALG FIELD32(0x00007000) +#define SEC_CSR1_BSS1_KEY0_CIPHER_ALG FIELD32(0x00070000) +#define SEC_CSR1_BSS1_KEY1_CIPHER_ALG FIELD32(0x00700000) +#define SEC_CSR1_BSS1_KEY2_CIPHER_ALG FIELD32(0x07000000) +#define SEC_CSR1_BSS1_KEY3_CIPHER_ALG FIELD32(0x70000000) + +/* + * Pairwise key table valid bitmap registers. + * SEC_CSR2: pairwise key table valid bitmap 0. + * SEC_CSR3: pairwise key table valid bitmap 1. + */ +#define SEC_CSR2 0x30a8 +#define SEC_CSR3 0x30ac + +/* + * SEC_CSR4: Pairwise key table lookup control. + */ +#define SEC_CSR4 0x30b0 + +/* + * SEC_CSR5: shared key table security mode register. + */ +#define SEC_CSR5 0x30b4 +#define SEC_CSR5_BSS2_KEY0_CIPHER_ALG FIELD32(0x00000007) +#define SEC_CSR5_BSS2_KEY1_CIPHER_ALG FIELD32(0x00000070) +#define SEC_CSR5_BSS2_KEY2_CIPHER_ALG FIELD32(0x00000700) +#define SEC_CSR5_BSS2_KEY3_CIPHER_ALG FIELD32(0x00007000) +#define SEC_CSR5_BSS3_KEY0_CIPHER_ALG FIELD32(0x00070000) +#define SEC_CSR5_BSS3_KEY1_CIPHER_ALG FIELD32(0x00700000) +#define SEC_CSR5_BSS3_KEY2_CIPHER_ALG FIELD32(0x07000000) +#define SEC_CSR5_BSS3_KEY3_CIPHER_ALG FIELD32(0x70000000) + +/* + * STA control registers. + */ + +/* + * STA_CSR0: RX PLCP error count & RX FCS error count. + */ +#define STA_CSR0 0x30c0 +#define STA_CSR0_FCS_ERROR FIELD32(0x0000ffff) +#define STA_CSR0_PLCP_ERROR FIELD32(0xffff0000) + +/* + * STA_CSR1: RX False CCA count & RX LONG frame count. + */ +#define STA_CSR1 0x30c4 +#define STA_CSR1_PHYSICAL_ERROR FIELD32(0x0000ffff) +#define STA_CSR1_FALSE_CCA_ERROR FIELD32(0xffff0000) + +/* + * STA_CSR2: TX Beacon count and RX FIFO overflow count. + */ +#define STA_CSR2 0x30c8 +#define STA_CSR2_RX_FIFO_OVERFLOW_COUNT FIELD32(0x0000ffff) +#define STA_CSR2_RX_OVERFLOW_COUNT FIELD32(0xffff0000) + +/* + * STA_CSR3: TX Beacon count. + */ +#define STA_CSR3 0x30cc +#define STA_CSR3_TX_BEACON_COUNT FIELD32(0x0000ffff) + +/* + * STA_CSR4: TX Retry count. + */ +#define STA_CSR4 0x30d0 +#define STA_CSR4_TX_NO_RETRY_COUNT FIELD32(0x0000ffff) +#define STA_CSR4_TX_ONE_RETRY_COUNT FIELD32(0xffff0000) + +/* + * STA_CSR5: TX Retry count. + */ +#define STA_CSR5 0x30d4 +#define STA_CSR4_TX_MULTI_RETRY_COUNT FIELD32(0x0000ffff) +#define STA_CSR4_TX_RETRY_FAIL_COUNT FIELD32(0xffff0000) + +/* + * QOS control registers. + */ + +/* + * QOS_CSR1: TXOP holder MAC address register. + */ +#define QOS_CSR1 0x30e4 +#define QOS_CSR1_BYTE4 FIELD32(0x000000ff) +#define QOS_CSR1_BYTE5 FIELD32(0x0000ff00) + +/* + * QOS_CSR2: TXOP holder timeout register. + */ +#define QOS_CSR2 0x30e8 + +/* + * RX QOS-CFPOLL MAC address register. + * QOS_CSR3: RX QOS-CFPOLL MAC address 0. + * QOS_CSR4: RX QOS-CFPOLL MAC address 1. + */ +#define QOS_CSR3 0x30ec +#define QOS_CSR4 0x30f0 + +/* + * QOS_CSR5: "QosControl" field of the RX QOS-CFPOLL. + */ +#define QOS_CSR5 0x30f4 + +/* + * WMM Scheduler Register + */ + +/* + * AIFSN_CSR: AIFSN for each EDCA AC. + * AIFSN0: For AC_BK. + * AIFSN1: For AC_BE. + * AIFSN2: For AC_VI. + * AIFSN3: For AC_VO. + */ +#define AIFSN_CSR 0x0400 +#define AIFSN_CSR_AIFSN0 FIELD32(0x0000000f) +#define AIFSN_CSR_AIFSN1 FIELD32(0x000000f0) +#define AIFSN_CSR_AIFSN2 FIELD32(0x00000f00) +#define AIFSN_CSR_AIFSN3 FIELD32(0x0000f000) + +/* + * CWMIN_CSR: CWmin for each EDCA AC. + * CWMIN0: For AC_BK. + * CWMIN1: For AC_BE. + * CWMIN2: For AC_VI. + * CWMIN3: For AC_VO. + */ +#define CWMIN_CSR 0x0404 +#define CWMIN_CSR_CWMIN0 FIELD32(0x0000000f) +#define CWMIN_CSR_CWMIN1 FIELD32(0x000000f0) +#define CWMIN_CSR_CWMIN2 FIELD32(0x00000f00) +#define CWMIN_CSR_CWMIN3 FIELD32(0x0000f000) + +/* + * CWMAX_CSR: CWmax for each EDCA AC. + * CWMAX0: For AC_BK. + * CWMAX1: For AC_BE. + * CWMAX2: For AC_VI. + * CWMAX3: For AC_VO. + */ +#define CWMAX_CSR 0x0408 +#define CWMAX_CSR_CWMAX0 FIELD32(0x0000000f) +#define CWMAX_CSR_CWMAX1 FIELD32(0x000000f0) +#define CWMAX_CSR_CWMAX2 FIELD32(0x00000f00) +#define CWMAX_CSR_CWMAX3 FIELD32(0x0000f000) + +/* + * AC_TXOP_CSR0: AC_BK/AC_BE TXOP register. + * AC0_TX_OP: For AC_BK, in unit of 32us. + * AC1_TX_OP: For AC_BE, in unit of 32us. + */ +#define AC_TXOP_CSR0 0x040c +#define AC_TXOP_CSR0_AC0_TX_OP FIELD32(0x0000ffff) +#define AC_TXOP_CSR0_AC1_TX_OP FIELD32(0xffff0000) + +/* + * AC_TXOP_CSR1: AC_VO/AC_VI TXOP register. + * AC2_TX_OP: For AC_VI, in unit of 32us. + * AC3_TX_OP: For AC_VO, in unit of 32us. + */ +#define AC_TXOP_CSR1 0x0410 +#define AC_TXOP_CSR1_AC2_TX_OP FIELD32(0x0000ffff) +#define AC_TXOP_CSR1_AC3_TX_OP FIELD32(0xffff0000) + +/* + * BBP registers. + * The wordsize of the BBP is 8 bits. + */ + +/* + * R2 + */ +#define BBP_R2_BG_MODE FIELD8(0x20) + +/* + * R3 + */ +#define BBP_R3_SMART_MODE FIELD8(0x01) + +/* + * R4: RX antenna control + * FRAME_END: 1 - DPDT, 0 - SPDT (Only valid for 802.11G, RF2527 & RF2529) + */ +#define BBP_R4_RX_ANTENNA FIELD8(0x03) +#define BBP_R4_RX_FRAME_END FIELD8(0x20) + +/* + * R77 + */ +#define BBP_R77_PAIR FIELD8(0x03) + +/* + * RF registers + */ + +/* + * RF 3 + */ +#define RF3_TXPOWER FIELD32(0x00003e00) + +/* + * RF 4 + */ +#define RF4_FREQ_OFFSET FIELD32(0x0003f000) + +/* + * EEPROM content. + * The wordsize of the EEPROM is 16 bits. + */ + +/* + * HW MAC address. + */ +#define EEPROM_MAC_ADDR_0 0x0002 +#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00) +#define EEPROM_MAC_ADDR1 0x0003 +#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00) +#define EEPROM_MAC_ADDR_2 0x0004 +#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00) + +/* + * EEPROM antenna. + * ANTENNA_NUM: Number of antenna's. + * TX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * RX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * FRAME_TYPE: 0: DPDT , 1: SPDT , noted this bit is valid for g only. + * DYN_TXAGC: Dynamic TX AGC control. + * HARDWARE_RADIO: 1: Hardware controlled radio. Read GPIO0. + * RF_TYPE: Rf_type of this adapter. + */ +#define EEPROM_ANTENNA 0x0010 +#define EEPROM_ANTENNA_NUM FIELD16(0x0003) +#define EEPROM_ANTENNA_TX_DEFAULT FIELD16(0x000c) +#define EEPROM_ANTENNA_RX_DEFAULT FIELD16(0x0030) +#define EEPROM_ANTENNA_FRAME_TYPE FIELD16(0x0040) +#define EEPROM_ANTENNA_DYN_TXAGC FIELD16(0x0200) +#define EEPROM_ANTENNA_HARDWARE_RADIO FIELD16(0x0400) +#define EEPROM_ANTENNA_RF_TYPE FIELD16(0xf800) + +/* + * EEPROM NIC config. + * EXTERNAL_LNA: External LNA. + */ +#define EEPROM_NIC 0x0011 +#define EEPROM_NIC_EXTERNAL_LNA FIELD16(0x0010) + +/* + * EEPROM geography. + * GEO_A: Default geographical setting for 5GHz band + * GEO: Default geographical setting. + */ +#define EEPROM_GEOGRAPHY 0x0012 +#define EEPROM_GEOGRAPHY_GEO_A FIELD16(0x00ff) +#define EEPROM_GEOGRAPHY_GEO FIELD16(0xff00) + +/* + * EEPROM BBP. + */ +#define EEPROM_BBP_START 0x0013 +#define EEPROM_BBP_SIZE 16 +#define EEPROM_BBP_VALUE FIELD16(0x00ff) +#define EEPROM_BBP_REG_ID FIELD16(0xff00) + +/* + * EEPROM TXPOWER 802.11G + */ +#define EEPROM_TXPOWER_G_START 0x0023 +#define EEPROM_TXPOWER_G_SIZE 7 +#define EEPROM_TXPOWER_G_1 FIELD16(0x00ff) +#define EEPROM_TXPOWER_G_2 FIELD16(0xff00) + +/* + * EEPROM Frequency + */ +#define EEPROM_FREQ 0x002f +#define EEPROM_FREQ_OFFSET FIELD16(0x00ff) +#define EEPROM_FREQ_SEQ_MASK FIELD16(0xff00) +#define EEPROM_FREQ_SEQ FIELD16(0x0300) + +/* + * EEPROM LED. + * POLARITY_RDY_G: Polarity RDY_G setting. + * POLARITY_RDY_A: Polarity RDY_A setting. + * POLARITY_ACT: Polarity ACT setting. + * POLARITY_GPIO_0: Polarity GPIO0 setting. + * POLARITY_GPIO_1: Polarity GPIO1 setting. + * POLARITY_GPIO_2: Polarity GPIO2 setting. + * POLARITY_GPIO_3: Polarity GPIO3 setting. + * POLARITY_GPIO_4: Polarity GPIO4 setting. + * LED_MODE: Led mode. + */ +#define EEPROM_LED 0x0030 +#define EEPROM_LED_POLARITY_RDY_G FIELD16(0x0001) +#define EEPROM_LED_POLARITY_RDY_A FIELD16(0x0002) +#define EEPROM_LED_POLARITY_ACT FIELD16(0x0004) +#define EEPROM_LED_POLARITY_GPIO_0 FIELD16(0x0008) +#define EEPROM_LED_POLARITY_GPIO_1 FIELD16(0x0010) +#define EEPROM_LED_POLARITY_GPIO_2 FIELD16(0x0020) +#define EEPROM_LED_POLARITY_GPIO_3 FIELD16(0x0040) +#define EEPROM_LED_POLARITY_GPIO_4 FIELD16(0x0080) +#define EEPROM_LED_LED_MODE FIELD16(0x1f00) + +/* + * EEPROM TXPOWER 802.11A + */ +#define EEPROM_TXPOWER_A_START 0x0031 +#define EEPROM_TXPOWER_A_SIZE 12 +#define EEPROM_TXPOWER_A_1 FIELD16(0x00ff) +#define EEPROM_TXPOWER_A_2 FIELD16(0xff00) + +/* + * EEPROM RSSI offset 802.11BG + */ +#define EEPROM_RSSI_OFFSET_BG 0x004d +#define EEPROM_RSSI_OFFSET_BG_1 FIELD16(0x00ff) +#define EEPROM_RSSI_OFFSET_BG_2 FIELD16(0xff00) + +/* + * EEPROM RSSI offset 802.11A + */ +#define EEPROM_RSSI_OFFSET_A 0x004e +#define EEPROM_RSSI_OFFSET_A_1 FIELD16(0x00ff) +#define EEPROM_RSSI_OFFSET_A_2 FIELD16(0xff00) + +/* + * DMA descriptor defines. + */ +#define TXD_DESC_SIZE ( 6 * sizeof(struct data_desc) ) +#define RXD_DESC_SIZE ( 6 * sizeof(struct data_desc) ) + +/* + * TX descriptor format for TX, PRIO and Beacon Ring. + */ + +/* + * Word0 + * BURST: Next frame belongs to same "burst" event. + * TKIP_MIC: ASIC appends TKIP MIC if TKIP is used. + * KEY_TABLE: Use per-client pairwise KEY table. + * KEY_INDEX: + * Key index (0~31) to the pairwise KEY table. + * 0~3 to shared KEY table 0 (BSS0). + * 4~7 to shared KEY table 1 (BSS1). + * 8~11 to shared KEY table 2 (BSS2). + * 12~15 to shared KEY table 3 (BSS3). + * BURST2: For backward compatibility, set to same value as BURST. + */ +#define TXD_W0_BURST FIELD32(0x00000001) +#define TXD_W0_VALID FIELD32(0x00000002) +#define TXD_W0_MORE_FRAG FIELD32(0x00000004) +#define TXD_W0_ACK FIELD32(0x00000008) +#define TXD_W0_TIMESTAMP FIELD32(0x00000010) +#define TXD_W0_OFDM FIELD32(0x00000020) +#define TXD_W0_IFS FIELD32(0x00000040) +#define TXD_W0_RETRY_MODE FIELD32(0x00000080) +#define TXD_W0_TKIP_MIC FIELD32(0x00000100) +#define TXD_W0_KEY_TABLE FIELD32(0x00000200) +#define TXD_W0_KEY_INDEX FIELD32(0x0000fc00) +#define TXD_W0_DATABYTE_COUNT FIELD32(0x0fff0000) +#define TXD_W0_BURST2 FIELD32(0x10000000) +#define TXD_W0_CIPHER_ALG FIELD32(0xe0000000) + +/* + * Word1 + * HOST_Q_ID: EDCA/HCCA queue ID. + * HW_SEQUENCE: MAC overwrites the frame sequence number. + * BUFFER_COUNT: Number of buffers in this TXD. + */ +#define TXD_W1_HOST_Q_ID FIELD32(0x0000000f) +#define TXD_W1_AIFSN FIELD32(0x000000f0) +#define TXD_W1_CWMIN FIELD32(0x00000f00) +#define TXD_W1_CWMAX FIELD32(0x0000f000) +#define TXD_W1_IV_OFFSET FIELD32(0x003f0000) +#define TXD_W1_HW_SEQUENCE FIELD32(0x10000000) +#define TXD_W1_BUFFER_COUNT FIELD32(0xe0000000) + +/* + * Word2: PLCP information + */ +#define TXD_W2_PLCP_SIGNAL FIELD32(0x000000ff) +#define TXD_W2_PLCP_SERVICE FIELD32(0x0000ff00) +#define TXD_W2_PLCP_LENGTH_LOW FIELD32(0x00ff0000) +#define TXD_W2_PLCP_LENGTH_HIGH FIELD32(0xff000000) + +/* + * Word3 + */ +#define TXD_W3_IV FIELD32(0xffffffff) + +/* + * Word4 + */ +#define TXD_W4_EIV FIELD32(0xffffffff) + +/* + * Word5 + * FRAME_OFFSET: Frame start offset inside ASIC TXFIFO (after TXINFO field). + * PACKET_ID: Driver assigned packet ID to categorize TXResult in interrupt. + * WAITING_DMA_DONE_INT: TXD been filled with data + * and waiting for TxDoneISR housekeeping. + */ +#define TXD_W5_FRAME_OFFSET FIELD32(0x000000ff) +#define TXD_W5_PACKET_ID FIELD32(0x0000ff00) +#define TXD_W5_TX_POWER FIELD32(0x00ff0000) +#define TXD_W5_WAITING_DMA_DONE_INT FIELD32(0x01000000) + +/* + * RX descriptor format for RX Ring. + */ + +/* + * Word0 + * CIPHER_ERROR: 1:ICV error, 2:MIC error, 3:invalid key. + * KEY_INDEX: Decryption key actually used. + */ +#define RXD_W0_OWNER_NIC FIELD32(0x00000001) +#define RXD_W0_DROP FIELD32(0x00000002) +#define RXD_W0_UNICAST_TO_ME FIELD32(0x00000004) +#define RXD_W0_MULTICAST FIELD32(0x00000008) +#define RXD_W0_BROADCAST FIELD32(0x00000010) +#define RXD_W0_MY_BSS FIELD32(0x00000020) +#define RXD_W0_CRC_ERROR FIELD32(0x00000040) +#define RXD_W0_OFDM FIELD32(0x00000080) +#define RXD_W0_CIPHER_ERROR FIELD32(0x00000300) +#define RXD_W0_KEY_INDEX FIELD32(0x0000fc00) +#define RXD_W0_DATABYTE_COUNT FIELD32(0x0fff0000) +#define RXD_W0_CIPHER_ALG FIELD32(0xe0000000) + +/* + * WORD1 + * SIGNAL: RX raw data rate reported by BBP. + * RSSI: RSSI reported by BBP. + */ +#define RXD_W1_SIGNAL FIELD32(0x000000ff) +#define RXD_W1_RSSI_AGC FIELD32(0x00001f00) +#define RXD_W1_RSSI_LNA FIELD32(0x00006000) +#define RXD_W1_FRAME_OFFSET FIELD32(0x7f000000) + +/* + * Word2 + * IV: Received IV of originally encrypted. + */ +#define RXD_W2_IV FIELD32(0xffffffff) + +/* + * Word3 + * EIV: Received EIV of originally encrypted. + */ +#define RXD_W3_EIV FIELD32(0xffffffff) + +/* + * Word4 + */ +#define RXD_W4_RESERVED FIELD32(0xffffffff) + +/* + * the above 20-byte is called RXINFO and will be DMAed to MAC RX block + * and passed to the HOST driver. + * The following fields are for DMA block and HOST usage only. + * Can't be touched by ASIC MAC block. + */ + +/* + * Word5 + */ +#define RXD_W5_RESERVED FIELD32(0xffffffff) + +/* + * Macro's for converting txpower from EEPROM to dscape value + * and from dscape value to register value. + */ +#define MIN_TXPOWER 0 +#define MAX_TXPOWER 31 +#define DEFAULT_TXPOWER 24 + +#define TXPOWER_FROM_DEV(__txpower) \ +({ \ + ((__txpower) > MAX_TXPOWER) ? \ + DEFAULT_TXPOWER : (__txpower); \ +}) + +#define TXPOWER_TO_DEV(__txpower) \ +({ \ + ((__txpower) <= MIN_TXPOWER) ? MIN_TXPOWER : \ + (((__txpower) >= MAX_TXPOWER) ? MAX_TXPOWER : \ + (__txpower)); \ +}) + +#endif /* RT73USB_H */ diff --git a/drivers/net/wireless/rtl8187.h b/drivers/net/wireless/rtl8187.h index 6124e467b156..6ad322ef0da1 100644 --- a/drivers/net/wireless/rtl8187.h +++ b/drivers/net/wireless/rtl8187.h @@ -36,8 +36,7 @@ struct rtl8187_rx_info { }; struct rtl8187_rx_hdr { - __le16 len; - __le16 rate; + __le32 flags; u8 noise; u8 signal; u8 agc; @@ -67,13 +66,14 @@ struct rtl8187_priv { struct rtl818x_csr *map; void (*rf_init)(struct ieee80211_hw *); int mode; + int if_id; /* rtl8187 specific */ struct ieee80211_channel channels[14]; struct ieee80211_rate rates[12]; struct ieee80211_hw_mode modes[2]; struct usb_device *udev; - u8 *hwaddr; + u32 rx_conf; u16 txpwr_base; u8 asic_rev; struct sk_buff_head rx_queue; diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c index cea85894b7f2..0ef887dd2867 100644 --- a/drivers/net/wireless/rtl8187_dev.c +++ b/drivers/net/wireless/rtl8187_dev.c @@ -36,11 +36,64 @@ static struct usb_device_id rtl8187_table[] __devinitdata = { /* Netgear */ {USB_DEVICE(0x0846, 0x6100)}, {USB_DEVICE(0x0846, 0x6a00)}, + /* HP */ + {USB_DEVICE(0x03f0, 0xca02)}, {} }; MODULE_DEVICE_TABLE(usb, rtl8187_table); +static void rtl8187_iowrite_async_cb(struct urb *urb) +{ + kfree(urb->context); + usb_free_urb(urb); +} + +static void rtl8187_iowrite_async(struct rtl8187_priv *priv, __le16 addr, + void *data, u16 len) +{ + struct usb_ctrlrequest *dr; + struct urb *urb; + struct rtl8187_async_write_data { + u8 data[4]; + struct usb_ctrlrequest dr; + } *buf; + + buf = kmalloc(sizeof(*buf), GFP_ATOMIC); + if (!buf) + return; + + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) { + kfree(buf); + return; + } + + dr = &buf->dr; + + dr->bRequestType = RTL8187_REQT_WRITE; + dr->bRequest = RTL8187_REQ_SET_REG; + dr->wValue = addr; + dr->wIndex = 0; + dr->wLength = cpu_to_le16(len); + + memcpy(buf, data, len); + + usb_fill_control_urb(urb, priv->udev, usb_sndctrlpipe(priv->udev, 0), + (unsigned char *)dr, buf, len, + rtl8187_iowrite_async_cb, buf); + usb_submit_urb(urb, GFP_ATOMIC); +} + +static inline void rtl818x_iowrite32_async(struct rtl8187_priv *priv, + __le32 *addr, u32 val) +{ + __le32 buf = cpu_to_le32(val); + + rtl8187_iowrite_async(priv, cpu_to_le16((unsigned long)addr), + &buf, sizeof(buf)); +} + void rtl8187_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data) { struct rtl8187_priv *priv = dev->priv; @@ -96,7 +149,7 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb, if (control->flags & IEEE80211_TXCTL_USE_RTS_CTS) { tmp |= RTL8187_TX_FLAG_RTS; hdr->rts_duration = - ieee80211_rts_duration(dev, skb->len, control); + ieee80211_rts_duration(dev, priv->if_id, skb->len, control); } if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) tmp |= RTL8187_TX_FLAG_CTS; @@ -125,6 +178,7 @@ static void rtl8187_rx_cb(struct urb *urb) struct rtl8187_rx_hdr *hdr; struct ieee80211_rx_status rx_status = { 0 }; int rate, signal; + u32 flags; spin_lock(&priv->rx_queue.lock); if (skb->next) @@ -143,10 +197,11 @@ static void rtl8187_rx_cb(struct urb *urb) skb_put(skb, urb->actual_length); hdr = (struct rtl8187_rx_hdr *)(skb_tail_pointer(skb) - sizeof(*hdr)); - skb_trim(skb, le16_to_cpu(hdr->len) & 0x0FFF); + flags = le32_to_cpu(hdr->flags); + skb_trim(skb, flags & 0x0FFF); signal = hdr->agc >> 1; - rate = (le16_to_cpu(hdr->rate) >> 4) & 0xF; + rate = (flags >> 20) & 0xF; if (rate > 3) { /* OFDM rate */ if (signal > 90) signal = 90; @@ -169,6 +224,8 @@ static void rtl8187_rx_cb(struct urb *urb) rx_status.channel = dev->conf.channel; rx_status.phymode = dev->conf.phymode; rx_status.mactime = le64_to_cpu(hdr->mac_time); + if (flags & (1 << 13)) + rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; ieee80211_rx_irqsafe(dev, skb, &rx_status); skb = dev_alloc_skb(RTL8187_MAX_RX); @@ -293,8 +350,6 @@ static int rtl8187_init_hw(struct ieee80211_hw *dev) rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0); rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); - for (i = 0; i < ETH_ALEN; i++) - rtl818x_iowrite8(priv, &priv->map->MAC[i], priv->hwaddr[i]); rtl818x_iowrite16(priv, (__le16 *)0xFFF4, 0xFFFF); reg = rtl818x_ioread8(priv, &priv->map->CONFIG1); @@ -365,7 +420,7 @@ static void rtl8187_set_channel(struct ieee80211_hw *dev, int channel) rtl818x_iowrite32(priv, &priv->map->TX_CONF, reg); } -static int rtl8187_open(struct ieee80211_hw *dev) +static int rtl8187_start(struct ieee80211_hw *dev) { struct rtl8187_priv *priv = dev->priv; u32 reg; @@ -383,16 +438,13 @@ static int rtl8187_open(struct ieee80211_hw *dev) RTL818X_RX_CONF_RX_AUTORESETPHY | RTL818X_RX_CONF_BSSID | RTL818X_RX_CONF_MGMT | - RTL818X_RX_CONF_CTRL | RTL818X_RX_CONF_DATA | (7 << 13 /* RX FIFO threshold NONE */) | (7 << 10 /* MAX RX DMA */) | RTL818X_RX_CONF_BROADCAST | - RTL818X_RX_CONF_MULTICAST | RTL818X_RX_CONF_NICMAC; - if (priv->mode == IEEE80211_IF_TYPE_MNTR) - reg |= RTL818X_RX_CONF_MONITOR; + priv->rx_conf = reg; rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg); reg = rtl818x_ioread8(priv, &priv->map->CW_CONF); @@ -419,7 +471,7 @@ static int rtl8187_open(struct ieee80211_hw *dev) return 0; } -static int rtl8187_stop(struct ieee80211_hw *dev) +static void rtl8187_stop(struct ieee80211_hw *dev) { struct rtl8187_priv *priv = dev->priv; struct rtl8187_rx_info *info; @@ -445,28 +497,31 @@ static int rtl8187_stop(struct ieee80211_hw *dev) usb_kill_urb(info->urb); kfree_skb(skb); } - return 0; + return; } static int rtl8187_add_interface(struct ieee80211_hw *dev, struct ieee80211_if_init_conf *conf) { struct rtl8187_priv *priv = dev->priv; + int i; - /* NOTE: using IEEE80211_IF_TYPE_MGMT to indicate no mode selected */ - if (priv->mode != IEEE80211_IF_TYPE_MGMT) - return -1; + if (priv->mode != IEEE80211_IF_TYPE_MNTR) + return -EOPNOTSUPP; switch (conf->type) { case IEEE80211_IF_TYPE_STA: - case IEEE80211_IF_TYPE_MNTR: priv->mode = conf->type; break; default: return -EOPNOTSUPP; } - priv->hwaddr = conf->mac_addr; + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); + for (i = 0; i < ETH_ALEN; i++) + rtl818x_iowrite8(priv, &priv->map->MAC[i], + ((u8 *)conf->mac_addr)[i]); + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); return 0; } @@ -475,7 +530,7 @@ static void rtl8187_remove_interface(struct ieee80211_hw *dev, struct ieee80211_if_init_conf *conf) { struct rtl8187_priv *priv = dev->priv; - priv->mode = IEEE80211_IF_TYPE_MGMT; + priv->mode = IEEE80211_IF_TYPE_MNTR; } static int rtl8187_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf) @@ -510,6 +565,8 @@ static int rtl8187_config_interface(struct ieee80211_hw *dev, int if_id, struct rtl8187_priv *priv = dev->priv; int i; + priv->if_id = if_id; + for (i = 0; i < ETH_ALEN; i++) rtl818x_iowrite8(priv, &priv->map->BSSID[i], conf->bssid[i]); @@ -521,14 +578,52 @@ static int rtl8187_config_interface(struct ieee80211_hw *dev, int if_id, return 0; } +static void rtl8187_configure_filter(struct ieee80211_hw *dev, + unsigned int changed_flags, + unsigned int *total_flags, + int mc_count, struct dev_addr_list *mc_list) +{ + struct rtl8187_priv *priv = dev->priv; + + *total_flags = 0; + + if (changed_flags & FIF_PROMISC_IN_BSS) + priv->rx_conf ^= RTL818X_RX_CONF_NICMAC; + if (changed_flags & FIF_ALLMULTI) + priv->rx_conf ^= RTL818X_RX_CONF_MULTICAST; + if (changed_flags & FIF_FCSFAIL) + priv->rx_conf ^= RTL818X_RX_CONF_FCS; + if (changed_flags & FIF_CONTROL) + priv->rx_conf ^= RTL818X_RX_CONF_CTRL; + if (changed_flags & FIF_OTHER_BSS) + priv->rx_conf ^= RTL818X_RX_CONF_MONITOR; + + if (mc_count > 0) + priv->rx_conf |= RTL818X_RX_CONF_MULTICAST; + + if (priv->rx_conf & RTL818X_RX_CONF_NICMAC) + *total_flags |= FIF_PROMISC_IN_BSS; + if (priv->rx_conf & RTL818X_RX_CONF_MULTICAST) + *total_flags |= FIF_ALLMULTI; + if (priv->rx_conf & RTL818X_RX_CONF_FCS) + *total_flags |= FIF_FCSFAIL; + if (priv->rx_conf & RTL818X_RX_CONF_CTRL) + *total_flags |= FIF_CONTROL; + if (priv->rx_conf & RTL818X_RX_CONF_MONITOR) + *total_flags |= FIF_OTHER_BSS; + + rtl818x_iowrite32_async(priv, &priv->map->RX_CONF, priv->rx_conf); +} + static const struct ieee80211_ops rtl8187_ops = { .tx = rtl8187_tx, - .open = rtl8187_open, + .start = rtl8187_start, .stop = rtl8187_stop, .add_interface = rtl8187_add_interface, .remove_interface = rtl8187_remove_interface, .config = rtl8187_config, .config_interface = rtl8187_config_interface, + .configure_filter = rtl8187_configure_filter, }; static void rtl8187_eeprom_register_read(struct eeprom_93cx6 *eeprom) @@ -572,6 +667,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf, struct ieee80211_channel *channel; u16 txpwr, reg; int err, i; + DECLARE_MAC_BUF(mac); dev = ieee80211_alloc_hw(sizeof(*priv), &rtl8187_ops); if (!dev) { @@ -601,11 +697,9 @@ static int __devinit rtl8187_probe(struct usb_interface *intf, priv->modes[1].rates = priv->rates; priv->modes[1].num_channels = ARRAY_SIZE(rtl818x_channels); priv->modes[1].channels = priv->channels; - priv->mode = IEEE80211_IF_TYPE_MGMT; + priv->mode = IEEE80211_IF_TYPE_MNTR; dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | - IEEE80211_HW_RX_INCLUDES_FCS | - IEEE80211_HW_WEP_INCLUDE_IV | - IEEE80211_HW_DATA_NULLFUNC_ACK; + IEEE80211_HW_RX_INCLUDES_FCS; dev->extra_tx_headroom = sizeof(struct rtl8187_tx_hdr); dev->queues = 1; dev->max_rssi = 65; @@ -681,8 +775,8 @@ static int __devinit rtl8187_probe(struct usb_interface *intf, goto err_free_dev; } - printk(KERN_INFO "%s: hwaddr " MAC_FMT ", rtl8187 V%d + %s\n", - wiphy_name(dev->wiphy), MAC_ARG(dev->wiphy->perm_addr), + printk(KERN_INFO "%s: hwaddr %s, rtl8187 V%d + %s\n", + wiphy_name(dev->wiphy), print_mac(mac, dev->wiphy->perm_addr), priv->asic_rev, priv->rf_init == rtl8225_rf_init ? "rtl8225" : "rtl8225z2"); diff --git a/drivers/net/wireless/rtl818x.h b/drivers/net/wireless/rtl818x.h index 283de30628e1..880d4becae31 100644 --- a/drivers/net/wireless/rtl818x.h +++ b/drivers/net/wireless/rtl818x.h @@ -71,6 +71,7 @@ struct rtl818x_csr { #define RTL818X_RX_CONF_NICMAC (1 << 1) #define RTL818X_RX_CONF_MULTICAST (1 << 2) #define RTL818X_RX_CONF_BROADCAST (1 << 3) +#define RTL818X_RX_CONF_FCS (1 << 5) #define RTL818X_RX_CONF_DATA (1 << 18) #define RTL818X_RX_CONF_CTRL (1 << 19) #define RTL818X_RX_CONF_MGMT (1 << 20) diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c index af70460f008a..98df9bc7836a 100644 --- a/drivers/net/wireless/spectrum_cs.c +++ b/drivers/net/wireless/spectrum_cs.c @@ -782,7 +782,6 @@ spectrum_cs_config(struct pcmcia_device *link) /* Ok, we have the configuration, prepare to register the netdev */ dev->base_addr = link->io.BasePort1; dev->irq = link->irq.AssignedIRQ; - SET_MODULE_OWNER(dev); card->node.major = card->node.minor = 0; /* Reset card and download firmware */ diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c index ef32a5c1e818..4bd14b331862 100644 --- a/drivers/net/wireless/strip.c +++ b/drivers/net/wireless/strip.c @@ -107,6 +107,7 @@ static const char StripVersion[] = "1.3A-STUART.CHESHIRE"; #include <linux/serialP.h> #include <linux/rcupdate.h> #include <net/arp.h> +#include <net/net_namespace.h> #include <linux/ip.h> #include <linux/tcp.h> @@ -1630,8 +1631,8 @@ static void strip_IdleTask(unsigned long parameter) */ static int strip_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, void *saddr, - unsigned len) + unsigned short type, const void *daddr, + const void *saddr, unsigned len) { struct strip *strip_info = netdev_priv(dev); STRIP_Header *header = (STRIP_Header *) skb_push(skb, sizeof(STRIP_Header)); @@ -1971,7 +1972,7 @@ static struct net_device *get_strip_dev(struct strip *strip_info) sizeof(zero_address))) { struct net_device *dev; read_lock_bh(&dev_base_lock); - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { if (dev->type == strip_info->dev->type && !memcmp(dev->dev_addr, &strip_info->true_dev_addr, @@ -2496,6 +2497,11 @@ static int strip_close_low(struct net_device *dev) return 0; } +static const struct header_ops strip_header_ops = { + .create = strip_header, + .rebuild = strip_rebuild_header, +}; + /* * This routine is called by DDI when the * (dynamically assigned) device is registered @@ -2507,8 +2513,6 @@ static void strip_dev_setup(struct net_device *dev) * Finish setting up the DEVICE info. */ - SET_MODULE_OWNER(dev); - dev->trans_start = 0; dev->last_rx = 0; dev->tx_queue_len = 30; /* Drop after 30 frames queued */ @@ -2532,8 +2536,8 @@ static void strip_dev_setup(struct net_device *dev) dev->open = strip_open_low; dev->stop = strip_close_low; dev->hard_start_xmit = strip_xmit; - dev->hard_header = strip_header; - dev->rebuild_header = strip_rebuild_header; + dev->header_ops = &strip_header_ops; + dev->set_mac_address = strip_set_mac_address; dev->get_stats = strip_get_stats; dev->change_mtu = strip_change_mtu; @@ -2571,7 +2575,7 @@ static struct strip *strip_alloc(void) return NULL; /* If no more memory, return */ - strip_info = dev->priv; + strip_info = netdev_priv(dev); strip_info->dev = dev; strip_info->magic = STRIP_MAGIC; @@ -2787,7 +2791,7 @@ static int __init strip_init_driver(void) /* * Register the status file with /proc */ - proc_net_fops_create("strip", S_IFREG | S_IRUGO, &strip_seq_fops); + proc_net_fops_create(&init_net, "strip", S_IFREG | S_IRUGO, &strip_seq_fops); return status; } @@ -2809,7 +2813,7 @@ static void __exit strip_exit_driver(void) } /* Unregister with the /proc/net file here. */ - proc_net_remove("strip"); + proc_net_remove(&init_net, "strip"); if ((i = tty_unregister_ldisc(N_STRIP))) printk(KERN_ERR "STRIP: can't unregister line discipline (err = %d)\n", i); diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c index 1cf090d60edc..a1f8a1687842 100644 --- a/drivers/net/wireless/wavelan.c +++ b/drivers/net/wireless/wavelan.c @@ -880,6 +880,8 @@ static void wv_82586_reconfig(struct net_device * dev) */ static void wv_psa_show(psa_t * p) { + DECLARE_MAC_BUF(mac); + printk(KERN_DEBUG "##### WaveLAN PSA contents: #####\n"); printk(KERN_DEBUG "psa_io_base_addr_1: 0x%02X %02X %02X %02X\n", p->psa_io_base_addr_1, @@ -891,22 +893,13 @@ static void wv_psa_show(psa_t * p) printk(KERN_DEBUG "psa_holi_params: 0x%02x, ", p->psa_holi_params); printk("psa_int_req_no: %d\n", p->psa_int_req_no); #ifdef DEBUG_SHOW_UNUSED - printk(KERN_DEBUG - "psa_unused0[]: %02X:%02X:%02X:%02X:%02X:%02X:%02X\n", - p->psa_unused0[0], p->psa_unused0[1], p->psa_unused0[2], - p->psa_unused0[3], p->psa_unused0[4], p->psa_unused0[5], - p->psa_unused0[6]); + printk(KERN_DEBUG "psa_unused0[]: %s\n", + print_mac(mac, p->psa_unused0)); #endif /* DEBUG_SHOW_UNUSED */ - printk(KERN_DEBUG - "psa_univ_mac_addr[]: %02x:%02x:%02x:%02x:%02x:%02x\n", - p->psa_univ_mac_addr[0], p->psa_univ_mac_addr[1], - p->psa_univ_mac_addr[2], p->psa_univ_mac_addr[3], - p->psa_univ_mac_addr[4], p->psa_univ_mac_addr[5]); - printk(KERN_DEBUG - "psa_local_mac_addr[]: %02x:%02x:%02x:%02x:%02x:%02x\n", - p->psa_local_mac_addr[0], p->psa_local_mac_addr[1], - p->psa_local_mac_addr[2], p->psa_local_mac_addr[3], - p->psa_local_mac_addr[4], p->psa_local_mac_addr[5]); + printk(KERN_DEBUG "psa_univ_mac_addr[]: %s\n", + print_mac(mac, p->psa_univ_mac_addr)); + printk(KERN_DEBUG "psa_local_mac_addr[]: %s\n", + print_mac(mac, p->psa_local_mac_addr)); printk(KERN_DEBUG "psa_univ_local_sel: %d, ", p->psa_univ_local_sel); printk("psa_comp_number: %d, ", p->psa_comp_number); @@ -1248,14 +1241,14 @@ static inline void wv_packet_info(u8 * p, /* Packet to dump */ { /* Name of the function */ int i; int maxi; + DECLARE_MAC_BUF(mac); printk(KERN_DEBUG - "%s: %s(): dest %02X:%02X:%02X:%02X:%02X:%02X, length %d\n", - msg1, msg2, p[0], p[1], p[2], p[3], p[4], p[5], length); + "%s: %s(): dest %s, length %d\n", + msg1, msg2, print_mac(mac, p), length); printk(KERN_DEBUG - "%s: %s(): src %02X:%02X:%02X:%02X:%02X:%02X, type 0x%02X%02X\n", - msg1, msg2, p[6], p[7], p[8], p[9], p[10], p[11], p[12], - p[13]); + "%s: %s(): src %s, type 0x%02X%02X\n", + msg1, msg2, print_mac(mac, &p[6]), p[12], p[13]); #ifdef DEBUG_PACKET_DUMP @@ -1286,7 +1279,9 @@ static void wv_init_info(struct net_device * dev) short ioaddr = dev->base_addr; net_local *lp = (net_local *) dev->priv; psa_t psa; - int i; +#ifdef DEBUG_BASIC_SHOW + DECLARE_MAC_BUF(mac); +#endif /* Read the parameter storage area */ psa_read(ioaddr, lp->hacr, 0, (unsigned char *) &psa, sizeof(psa)); @@ -1303,10 +1298,8 @@ static void wv_init_info(struct net_device * dev) #ifdef DEBUG_BASIC_SHOW /* Now, let's go for the basic stuff. */ - printk(KERN_NOTICE "%s: WaveLAN at %#x,", dev->name, ioaddr); - for (i = 0; i < WAVELAN_ADDR_SIZE; i++) - printk("%s%02X", (i == 0) ? " " : ":", dev->dev_addr[i]); - printk(", IRQ %d", dev->irq); + printk(KERN_NOTICE "%s: WaveLAN at %#x, %s, IRQ %d", + dev->name, ioaddr, print_mac(mac, dev->dev_addr), dev->irq); /* Print current network ID. */ if (psa.psa_nwid_select) @@ -2400,9 +2393,9 @@ static const struct iw_priv_args wavelan_private_args[] = { static const struct iw_handler_def wavelan_handler_def = { - .num_standard = sizeof(wavelan_handler)/sizeof(iw_handler), - .num_private = sizeof(wavelan_private_handler)/sizeof(iw_handler), - .num_private_args = sizeof(wavelan_private_args)/sizeof(struct iw_priv_args), + .num_standard = ARRAY_SIZE(wavelan_handler), + .num_private = ARRAY_SIZE(wavelan_private_handler), + .num_private_args = ARRAY_SIZE(wavelan_private_args), .standard = wavelan_handler, .private = wavelan_private_handler, .private_args = wavelan_private_args, @@ -3596,15 +3589,15 @@ static void wv_82586_config(struct net_device * dev) WAVELAN_ADDR_SIZE >> 1); #ifdef DEBUG_CONFIG_INFO + { + DECLARE_MAC_BUF(mac); printk(KERN_DEBUG "%s: wv_82586_config(): set %d multicast addresses:\n", dev->name, lp->mc_count); for (dmi = dev->mc_list; dmi; dmi = dmi->next) - printk(KERN_DEBUG - " %02x:%02x:%02x:%02x:%02x:%02x\n", - dmi->dmi_addr[0], dmi->dmi_addr[1], - dmi->dmi_addr[2], dmi->dmi_addr[3], - dmi->dmi_addr[4], dmi->dmi_addr[5]); + printk(KERN_DEBUG " %s\n", + print_mac(mac, dmi->dmi_addr)); + } #endif } @@ -4177,7 +4170,6 @@ static int __init wavelan_config(struct net_device *dev, unsigned short ioaddr) /* Init spinlock */ spin_lock_init(&lp->spinlock); - SET_MODULE_OWNER(dev); dev->open = wavelan_open; dev->stop = wavelan_close; dev->hard_start_xmit = wavelan_packet_xmit; diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c index 5740d4d4267c..577c647824fe 100644 --- a/drivers/net/wireless/wavelan_cs.c +++ b/drivers/net/wireless/wavelan_cs.c @@ -1042,6 +1042,7 @@ wv_82593_reconfig(struct net_device * dev) static void wv_psa_show(psa_t * p) { + DECLARE_MAC_BUF(mac); printk(KERN_DEBUG "##### wavelan psa contents: #####\n"); printk(KERN_DEBUG "psa_io_base_addr_1: 0x%02X %02X %02X %02X\n", p->psa_io_base_addr_1, @@ -1055,29 +1056,13 @@ wv_psa_show(psa_t * p) printk(KERN_DEBUG "psa_holi_params: 0x%02x, ", p->psa_holi_params); printk("psa_int_req_no: %d\n", p->psa_int_req_no); #ifdef DEBUG_SHOW_UNUSED - printk(KERN_DEBUG "psa_unused0[]: %02X:%02X:%02X:%02X:%02X:%02X:%02X\n", - p->psa_unused0[0], - p->psa_unused0[1], - p->psa_unused0[2], - p->psa_unused0[3], - p->psa_unused0[4], - p->psa_unused0[5], - p->psa_unused0[6]); + printk(KERN_DEBUG "psa_unused0[]: %s\n", + print_mac(mac, p->psa_unused0)); #endif /* DEBUG_SHOW_UNUSED */ - printk(KERN_DEBUG "psa_univ_mac_addr[]: %02x:%02x:%02x:%02x:%02x:%02x\n", - p->psa_univ_mac_addr[0], - p->psa_univ_mac_addr[1], - p->psa_univ_mac_addr[2], - p->psa_univ_mac_addr[3], - p->psa_univ_mac_addr[4], - p->psa_univ_mac_addr[5]); - printk(KERN_DEBUG "psa_local_mac_addr[]: %02x:%02x:%02x:%02x:%02x:%02x\n", - p->psa_local_mac_addr[0], - p->psa_local_mac_addr[1], - p->psa_local_mac_addr[2], - p->psa_local_mac_addr[3], - p->psa_local_mac_addr[4], - p->psa_local_mac_addr[5]); + printk(KERN_DEBUG "psa_univ_mac_addr[]: %s\n", + print_mac(mac, p->psa_univ_mac_addr)); + printk(KERN_DEBUG "psa_local_mac_addr[]: %s\n", + print_mac(mac, p->psa_local_mac_addr)); printk(KERN_DEBUG "psa_univ_local_sel: %d, ", p->psa_univ_local_sel); printk("psa_comp_number: %d, ", p->psa_comp_number); printk("psa_thr_pre_set: 0x%02x\n", p->psa_thr_pre_set); @@ -1277,11 +1262,12 @@ wv_packet_info(u_char * p, /* Packet to dump */ { int i; int maxi; + DECLARE_MAC_BUF(mac); - printk(KERN_DEBUG "%s: %s(): dest %02X:%02X:%02X:%02X:%02X:%02X, length %d\n", - msg1, msg2, p[0], p[1], p[2], p[3], p[4], p[5], length); - printk(KERN_DEBUG "%s: %s(): src %02X:%02X:%02X:%02X:%02X:%02X, type 0x%02X%02X\n", - msg1, msg2, p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13]); + printk(KERN_DEBUG "%s: %s(): dest %s, length %d\n", + msg1, msg2, print_mac(mac, p), length); + printk(KERN_DEBUG "%s: %s(): src %s, type 0x%02X%02X\n", + msg1, msg2, print_mac(mac, &p[6]), p[12], p[13]); #ifdef DEBUG_PACKET_DUMP @@ -1312,7 +1298,7 @@ wv_init_info(struct net_device * dev) { kio_addr_t base = dev->base_addr; psa_t psa; - int i; + DECLARE_MAC_BUF(mac); /* Read the parameter storage area */ psa_read(dev, 0, (unsigned char *) &psa, sizeof(psa)); @@ -1329,10 +1315,10 @@ wv_init_info(struct net_device * dev) #ifdef DEBUG_BASIC_SHOW /* Now, let's go for the basic stuff */ - printk(KERN_NOTICE "%s: WaveLAN: port %#lx, irq %d, hw_addr", - dev->name, base, dev->irq); - for(i = 0; i < WAVELAN_ADDR_SIZE; i++) - printk("%s%02X", (i == 0) ? " " : ":", dev->dev_addr[i]); + printk(KERN_NOTICE "%s: WaveLAN: port %#lx, irq %d, " + "hw_addr %s", + dev->name, base, dev->irq, + print_mac(mac, dev->dev_addr)); /* Print current network id */ if(psa.psa_nwid_select) @@ -2719,9 +2705,9 @@ static const iw_handler wavelan_private_handler[] = static const struct iw_handler_def wavelan_handler_def = { - .num_standard = sizeof(wavelan_handler)/sizeof(iw_handler), - .num_private = sizeof(wavelan_private_handler)/sizeof(iw_handler), - .num_private_args = sizeof(wavelan_private_args)/sizeof(struct iw_priv_args), + .num_standard = ARRAY_SIZE(wavelan_handler), + .num_private = ARRAY_SIZE(wavelan_private_handler), + .num_private_args = ARRAY_SIZE(wavelan_private_args), .standard = wavelan_handler, .private = wavelan_private_handler, .private_args = wavelan_private_args, @@ -3691,12 +3677,12 @@ wv_82593_config(struct net_device * dev) int addrs_len = WAVELAN_ADDR_SIZE * lp->mc_count; #ifdef DEBUG_CONFIG_INFO + DECLARE_MAC_BUF(mac); printk(KERN_DEBUG "%s: wv_hw_config(): set %d multicast addresses:\n", dev->name, lp->mc_count); for(dmi=dev->mc_list; dmi; dmi=dmi->next) - printk(KERN_DEBUG " %02x:%02x:%02x:%02x:%02x:%02x\n", - dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2], - dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5] ); + printk(KERN_DEBUG " %s\n", + print_mac(mac, dmi->dmi_addr)); #endif /* Initialize adapter's ethernet multicast addresses */ @@ -4577,7 +4563,6 @@ wavelan_probe(struct pcmcia_device *p_dev) lp->dev = dev; /* wavelan NET3 callbacks */ - SET_MODULE_OWNER(dev); dev->open = &wavelan_open; dev->stop = &wavelan_close; dev->hard_start_xmit = &wavelan_packet_xmit; diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c index c8b5c2271938..42a36b3f3ff7 100644 --- a/drivers/net/wireless/wl3501_cs.c +++ b/drivers/net/wireless/wl3501_cs.c @@ -859,12 +859,11 @@ static int wl3501_esbq_confirm(struct wl3501_card *this) static void wl3501_online(struct net_device *dev) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); + DECLARE_MAC_BUF(mac); - printk(KERN_INFO "%s: Wireless LAN online. BSSID: " - "%02X %02X %02X %02X %02X %02X\n", dev->name, - this->bssid[0], this->bssid[1], this->bssid[2], - this->bssid[3], this->bssid[4], this->bssid[5]); + printk(KERN_INFO "%s: Wireless LAN online. BSSID: %s\n", + dev->name, print_mac(mac, this->bssid)); netif_wake_queue(dev); } @@ -907,7 +906,7 @@ static int wl3501_mgmt_association(struct wl3501_card *this) static void wl3501_mgmt_join_confirm(struct net_device *dev, u16 addr) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); struct wl3501_join_confirm sig; dprintk(3, "entry"); @@ -1046,7 +1045,7 @@ static inline void wl3501_start_confirm_interrupt(struct net_device *dev, static inline void wl3501_assoc_confirm_interrupt(struct net_device *dev, u16 addr) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); struct wl3501_assoc_confirm sig; dprintk(3, "entry"); @@ -1075,7 +1074,7 @@ static inline void wl3501_rx_interrupt(struct net_device *dev) int morepkts; u16 addr; u8 sig_id; - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); dprintk(3, "entry"); loop: @@ -1257,7 +1256,7 @@ fail: static int wl3501_close(struct net_device *dev) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); int rc = -ENODEV; unsigned long flags; struct pcmcia_device *link; @@ -1289,7 +1288,7 @@ static int wl3501_close(struct net_device *dev) */ static int wl3501_reset(struct net_device *dev) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); int rc = -ENODEV; wl3501_block_interrupt(this); @@ -1318,7 +1317,7 @@ out: static void wl3501_tx_timeout(struct net_device *dev) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); struct net_device_stats *stats = &this->stats; unsigned long flags; int rc; @@ -1344,7 +1343,7 @@ static void wl3501_tx_timeout(struct net_device *dev) static int wl3501_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { int enabled, rc; - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&this->lock, flags); @@ -1371,7 +1370,7 @@ static int wl3501_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) static int wl3501_open(struct net_device *dev) { int rc = -ENODEV; - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); unsigned long flags; struct pcmcia_device *link; link = this->p_dev; @@ -1410,14 +1409,14 @@ fail: static struct net_device_stats *wl3501_get_stats(struct net_device *dev) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); return &this->stats; } static struct iw_statistics *wl3501_get_wireless_stats(struct net_device *dev) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); struct iw_statistics *wstats = &this->wstats; u32 value; /* size checked: it is u32 */ @@ -1497,7 +1496,7 @@ static int wl3501_get_name(struct net_device *dev, struct iw_request_info *info, static int wl3501_set_freq(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); int channel = wrqu->freq.m; int rc = -EINVAL; @@ -1511,7 +1510,7 @@ static int wl3501_set_freq(struct net_device *dev, struct iw_request_info *info, static int wl3501_get_freq(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); wrqu->freq.m = wl3501_chan2freq[this->chan - 1] * 100000; wrqu->freq.e = 1; @@ -1526,7 +1525,7 @@ static int wl3501_set_mode(struct net_device *dev, struct iw_request_info *info, if (wrqu->mode == IW_MODE_INFRA || wrqu->mode == IW_MODE_ADHOC || wrqu->mode == IW_MODE_AUTO) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); this->net_type = wrqu->mode; rc = wl3501_reset(dev); @@ -1537,7 +1536,7 @@ static int wl3501_set_mode(struct net_device *dev, struct iw_request_info *info, static int wl3501_get_mode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); wrqu->mode = this->net_type; return 0; @@ -1546,7 +1545,7 @@ static int wl3501_get_mode(struct net_device *dev, struct iw_request_info *info, static int wl3501_get_sens(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); wrqu->sens.value = this->rssi; wrqu->sens.disabled = !wrqu->sens.value; @@ -1577,7 +1576,7 @@ static int wl3501_get_range(struct net_device *dev, static int wl3501_set_wap(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); static const u8 bcast[ETH_ALEN] = { 255, 255, 255, 255, 255, 255 }; int rc = -EINVAL; @@ -1597,7 +1596,7 @@ out: static int wl3501_get_wap(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); wrqu->ap_addr.sa_family = ARPHRD_ETHER; memcpy(wrqu->ap_addr.sa_data, this->bssid, ETH_ALEN); @@ -1616,7 +1615,7 @@ static int wl3501_set_scan(struct net_device *dev, struct iw_request_info *info, static int wl3501_get_scan(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); int i; char *current_ev = extra; struct iw_event iwe; @@ -1666,7 +1665,7 @@ static int wl3501_set_essid(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); if (wrqu->data.flags) { iw_set_mgmt_info_element(IW_MGMT_INFO_ELEMENT_SSID, @@ -1683,7 +1682,7 @@ static int wl3501_get_essid(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&this->lock, flags); @@ -1697,7 +1696,7 @@ static int wl3501_get_essid(struct net_device *dev, static int wl3501_set_nick(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); if (wrqu->data.length > sizeof(this->nick)) return -E2BIG; @@ -1708,7 +1707,7 @@ static int wl3501_set_nick(struct net_device *dev, struct iw_request_info *info, static int wl3501_get_nick(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); strlcpy(extra, this->nick, 32); wrqu->data.length = strlen(extra); @@ -1733,7 +1732,7 @@ static int wl3501_get_rts_threshold(struct net_device *dev, union iwreq_data *wrqu, char *extra) { u16 threshold; /* size checked: it is u16 */ - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); int rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_RTS_THRESHOLD, &threshold, sizeof(threshold)); if (!rc) { @@ -1749,7 +1748,7 @@ static int wl3501_get_frag_threshold(struct net_device *dev, union iwreq_data *wrqu, char *extra) { u16 threshold; /* size checked: it is u16 */ - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); int rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_FRAG_THRESHOLD, &threshold, sizeof(threshold)); if (!rc) { @@ -1765,7 +1764,7 @@ static int wl3501_get_txpow(struct net_device *dev, union iwreq_data *wrqu, char *extra) { u16 txpow; - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); int rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_CURRENT_TX_PWR_LEVEL, &txpow, sizeof(txpow)); @@ -1787,7 +1786,7 @@ static int wl3501_get_retry(struct net_device *dev, union iwreq_data *wrqu, char *extra) { u8 retry; /* size checked: it is u8 */ - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); int rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_LONG_RETRY_LIMIT, &retry, sizeof(retry)); @@ -1814,7 +1813,7 @@ static int wl3501_get_encode(struct net_device *dev, union iwreq_data *wrqu, char *extra) { u8 implemented, restricted, keys[100], len_keys, tocopy; - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); int rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_PRIV_OPT_IMPLEMENTED, &implemented, sizeof(implemented)); @@ -1841,7 +1840,6 @@ static int wl3501_get_encode(struct net_device *dev, tocopy = min_t(u8, len_keys, wrqu->encoding.length); tocopy = min_t(u8, tocopy, 100); wrqu->encoding.length = tocopy; - memset(extra, 0, tocopy); memcpy(extra, keys, tocopy); out: return rc; @@ -1852,7 +1850,7 @@ static int wl3501_get_power(struct net_device *dev, union iwreq_data *wrqu, char *extra) { u8 pwr_state; - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); int rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_CURRENT_PWR_STATE, &pwr_state, sizeof(pwr_state)); @@ -1894,7 +1892,7 @@ static const iw_handler wl3501_handler[] = { }; static const struct iw_handler_def wl3501_handler_def = { - .num_standard = sizeof(wl3501_handler) / sizeof(iw_handler), + .num_standard = ARRAY_SIZE(wl3501_handler), .standard = (iw_handler *)wl3501_handler, .get_wireless_stats = wl3501_get_wireless_stats, }; @@ -1937,7 +1935,7 @@ static int wl3501_probe(struct pcmcia_device *p_dev) dev->tx_timeout = wl3501_tx_timeout; dev->watchdog_timeo = 5 * HZ; dev->get_stats = wl3501_get_stats; - this = dev->priv; + this = netdev_priv(dev); this->wireless_data.spy_data = &this->spy_data; this->p_dev = p_dev; dev->wireless_data = &this->wireless_data; @@ -1967,6 +1965,7 @@ static int wl3501_config(struct pcmcia_device *link) struct net_device *dev = link->priv; int i = 0, j, last_fn, last_ret; struct wl3501_card *this; + DECLARE_MAC_BUF(mac); /* Try allocating IO ports. This tries a few fixed addresses. If you * want, you can also read the card's config table to pick addresses -- @@ -2004,9 +2003,7 @@ static int wl3501_config(struct pcmcia_device *link) goto failed; } - SET_MODULE_OWNER(dev); - - this = dev->priv; + this = netdev_priv(dev); /* * At this point, the dev_node_t structure(s) should be initialized and * arranged in a linked list at link->dev_node. @@ -2022,14 +2019,14 @@ static int wl3501_config(struct pcmcia_device *link) } strcpy(this->node.dev_name, dev->name); - /* print probe information */ - printk(KERN_INFO "%s: wl3501 @ 0x%3.3x, IRQ %d, MAC addr in flash ROM:", - dev->name, this->base_addr, (int)dev->irq); - for (i = 0; i < 6; i++) { + for (i = 0; i < 6; i++) dev->dev_addr[i] = ((char *)&this->mac_addr)[i]; - printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]); - } - printk("\n"); + + /* print probe information */ + printk(KERN_INFO "%s: wl3501 @ 0x%3.3x, IRQ %d, " + "MAC addr in flash ROM:%s\n", + dev->name, this->base_addr, (int)dev->irq, + print_mac(mac, dev->dev_addr)); /* * Initialize card parameters - added by jss */ @@ -2079,7 +2076,7 @@ static int wl3501_suspend(struct pcmcia_device *link) { struct net_device *dev = link->priv; - wl3501_pwr_mgmt(dev->priv, WL3501_SUSPEND); + wl3501_pwr_mgmt(netdev_priv(dev), WL3501_SUSPEND); if (link->open) netif_device_detach(dev); @@ -2090,7 +2087,7 @@ static int wl3501_resume(struct pcmcia_device *link) { struct net_device *dev = link->priv; - wl3501_pwr_mgmt(dev->priv, WL3501_RESUME); + wl3501_pwr_mgmt(netdev_priv(dev), WL3501_RESUME); if (link->open) { wl3501_reset(dev); netif_device_attach(dev); diff --git a/drivers/net/wireless/zd1211rw/Makefile b/drivers/net/wireless/zd1211rw/Makefile index 4d505903352c..7a2f2a98edab 100644 --- a/drivers/net/wireless/zd1211rw/Makefile +++ b/drivers/net/wireless/zd1211rw/Makefile @@ -4,7 +4,7 @@ zd1211rw-objs := zd_chip.o zd_ieee80211.o \ zd_mac.o zd_netdev.o \ zd_rf_al2230.o zd_rf_rf2959.o \ zd_rf_al7230b.o zd_rf_uw2453.o \ - zd_rf.o zd_usb.o zd_util.o + zd_rf.o zd_usb.o ifeq ($(CONFIG_ZD1211RW_DEBUG),y) EXTRA_CFLAGS += -DDEBUG diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c index c39f1984b84d..f831b68f1b9c 100644 --- a/drivers/net/wireless/zd1211rw/zd_chip.c +++ b/drivers/net/wireless/zd1211rw/zd_chip.c @@ -28,7 +28,6 @@ #include "zd_ieee80211.h" #include "zd_mac.h" #include "zd_rf.h" -#include "zd_util.h" void zd_chip_init(struct zd_chip *chip, struct net_device *netdev, @@ -106,7 +105,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr { int r; int i; - zd_addr_t *a16 = (zd_addr_t *)NULL; + zd_addr_t *a16; u16 *v16; unsigned int count16; @@ -377,6 +376,7 @@ int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr) [0] = { .addr = CR_MAC_ADDR_P1 }, [1] = { .addr = CR_MAC_ADDR_P2 }, }; + DECLARE_MAC_BUF(mac); reqs[0].value = (mac_addr[3] << 24) | (mac_addr[2] << 16) @@ -386,7 +386,7 @@ int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr) | mac_addr[4]; dev_dbg_f(zd_chip_dev(chip), - "mac addr " MAC_FMT "\n", MAC_ARG(mac_addr)); + "mac addr %s\n", print_mac(mac, mac_addr)); mutex_lock(&chip->mutex); r = zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs)); @@ -500,8 +500,6 @@ int zd_chip_lock_phy_regs(struct zd_chip *chip) return r; } - dev_dbg_f(zd_chip_dev(chip), - "CR_REG1: 0x%02x -> 0x%02x\n", tmp, tmp & ~UNLOCK_PHY_REGS); tmp &= ~UNLOCK_PHY_REGS; r = zd_iowrite32_locked(chip, tmp, CR_REG1); @@ -523,8 +521,6 @@ int zd_chip_unlock_phy_regs(struct zd_chip *chip) return r; } - dev_dbg_f(zd_chip_dev(chip), - "CR_REG1: 0x%02x -> 0x%02x\n", tmp, tmp | UNLOCK_PHY_REGS); tmp |= UNLOCK_PHY_REGS; r = zd_iowrite32_locked(chip, tmp, CR_REG1); @@ -841,8 +837,6 @@ static int get_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s) s->atim_wnd_period = values[0]; s->pre_tbtt = values[1]; s->beacon_interval = values[2]; - dev_dbg_f(zd_chip_dev(chip), "aw %u pt %u bi %u\n", - s->atim_wnd_period, s->pre_tbtt, s->beacon_interval); return 0; } @@ -864,9 +858,6 @@ static int set_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s) reqs[2].addr = CR_BCN_INTERVAL; reqs[2].value = s->beacon_interval; - dev_dbg_f(zd_chip_dev(chip), - "aw %u pt %u bi %u\n", s->atim_wnd_period, s->pre_tbtt, - s->beacon_interval); return zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs)); } @@ -1018,19 +1009,19 @@ int zd_chip_set_rts_cts_rate_locked(struct zd_chip *chip, u32 value = 0; /* Modulation bit */ - if (ZD_CS_TYPE(rts_rate) == ZD_CS_OFDM) + if (ZD_MODULATION_TYPE(rts_rate) == ZD_OFDM) rts_mod = ZD_RX_OFDM; dev_dbg_f(zd_chip_dev(chip), "rts_rate=%x preamble=%x\n", rts_rate, preamble); - value |= rts_rate << RTSCTS_SH_RTS_RATE; + value |= ZD_PURE_RATE(rts_rate) << RTSCTS_SH_RTS_RATE; value |= rts_mod << RTSCTS_SH_RTS_MOD_TYPE; value |= preamble << RTSCTS_SH_RTS_PMB_TYPE; value |= preamble << RTSCTS_SH_CTS_PMB_TYPE; /* We always send 11M self-CTS messages, like the vendor driver. */ - value |= ZD_CCK_RATE_11M << RTSCTS_SH_CTS_RATE; + value |= ZD_PURE_RATE(ZD_CCK_RATE_11M) << RTSCTS_SH_CTS_RATE; value |= ZD_RX_CCK << RTSCTS_SH_CTS_MOD_TYPE; return zd_iowrite32_locked(chip, value, CR_RTS_CTS_RATE); @@ -1160,16 +1151,12 @@ out: static int update_pwr_int(struct zd_chip *chip, u8 channel) { u8 value = chip->pwr_int_values[channel - 1]; - dev_dbg_f(zd_chip_dev(chip), "channel %d pwr_int %#04x\n", - channel, value); return zd_iowrite16_locked(chip, value, CR31); } static int update_pwr_cal(struct zd_chip *chip, u8 channel) { u8 value = chip->pwr_cal_values[channel-1]; - dev_dbg_f(zd_chip_dev(chip), "channel %d pwr_cal %#04x\n", - channel, value); return zd_iowrite16_locked(chip, value, CR68); } @@ -1184,9 +1171,6 @@ static int update_ofdm_cal(struct zd_chip *chip, u8 channel) ioreqs[2].addr = CR65; ioreqs[2].value = chip->ofdm_cal_values[OFDM_54M_INDEX][channel-1]; - dev_dbg_f(zd_chip_dev(chip), - "channel %d ofdm_cal 36M %#04x 48M %#04x 54M %#04x\n", - channel, ioreqs[0].value, ioreqs[1].value, ioreqs[2].value); return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); } @@ -1344,7 +1328,7 @@ int zd_chip_set_basic_rates_locked(struct zd_chip *chip, u16 cr_rates) return zd_iowrite32_locked(chip, cr_rates, CR_BASIC_RATE_TBL); } -static int ofdm_qual_db(u8 status_quality, u8 rate, unsigned int size) +static int ofdm_qual_db(u8 status_quality, u8 zd_rate, unsigned int size) { static const u16 constants[] = { 715, 655, 585, 540, 470, 410, 360, 315, @@ -1358,7 +1342,7 @@ static int ofdm_qual_db(u8 status_quality, u8 rate, unsigned int size) /* It seems that their quality parameter is somehow per signal * and is now transferred per bit. */ - switch (rate) { + switch (zd_rate) { case ZD_OFDM_RATE_6M: case ZD_OFDM_RATE_12M: case ZD_OFDM_RATE_24M: @@ -1385,7 +1369,7 @@ static int ofdm_qual_db(u8 status_quality, u8 rate, unsigned int size) break; } - switch (rate) { + switch (zd_rate) { case ZD_OFDM_RATE_6M: case ZD_OFDM_RATE_9M: i += 3; @@ -1409,11 +1393,11 @@ static int ofdm_qual_db(u8 status_quality, u8 rate, unsigned int size) return i; } -static int ofdm_qual_percent(u8 status_quality, u8 rate, unsigned int size) +static int ofdm_qual_percent(u8 status_quality, u8 zd_rate, unsigned int size) { int r; - r = ofdm_qual_db(status_quality, rate, size); + r = ofdm_qual_db(status_quality, zd_rate, size); ZD_ASSERT(r >= 0); if (r < 0) r = 0; @@ -1474,12 +1458,17 @@ static int cck_qual_percent(u8 status_quality) return r <= 100 ? r : 100; } +static inline u8 zd_rate_from_ofdm_plcp_header(const void *rx_frame) +{ + return ZD_OFDM | zd_ofdm_plcp_header_rate(rx_frame); +} + u8 zd_rx_qual_percent(const void *rx_frame, unsigned int size, const struct rx_status *status) { return (status->frame_status&ZD_RX_OFDM) ? ofdm_qual_percent(status->signal_quality_ofdm, - zd_ofdm_plcp_header_rate(rx_frame), + zd_rate_from_ofdm_plcp_header(rx_frame), size) : cck_qual_percent(status->signal_quality_cck); } @@ -1495,32 +1484,32 @@ u8 zd_rx_strength_percent(u8 rssi) u16 zd_rx_rate(const void *rx_frame, const struct rx_status *status) { static const u16 ofdm_rates[] = { - [ZD_OFDM_RATE_6M] = 60, - [ZD_OFDM_RATE_9M] = 90, - [ZD_OFDM_RATE_12M] = 120, - [ZD_OFDM_RATE_18M] = 180, - [ZD_OFDM_RATE_24M] = 240, - [ZD_OFDM_RATE_36M] = 360, - [ZD_OFDM_RATE_48M] = 480, - [ZD_OFDM_RATE_54M] = 540, + [ZD_OFDM_PLCP_RATE_6M] = 60, + [ZD_OFDM_PLCP_RATE_9M] = 90, + [ZD_OFDM_PLCP_RATE_12M] = 120, + [ZD_OFDM_PLCP_RATE_18M] = 180, + [ZD_OFDM_PLCP_RATE_24M] = 240, + [ZD_OFDM_PLCP_RATE_36M] = 360, + [ZD_OFDM_PLCP_RATE_48M] = 480, + [ZD_OFDM_PLCP_RATE_54M] = 540, }; u16 rate; if (status->frame_status & ZD_RX_OFDM) { + /* Deals with PLCP OFDM rate (not zd_rates) */ u8 ofdm_rate = zd_ofdm_plcp_header_rate(rx_frame); rate = ofdm_rates[ofdm_rate & 0xf]; } else { - u8 cck_rate = zd_cck_plcp_header_rate(rx_frame); - switch (cck_rate) { - case ZD_CCK_SIGNAL_1M: + switch (zd_cck_plcp_header_signal(rx_frame)) { + case ZD_CCK_PLCP_SIGNAL_1M: rate = 10; break; - case ZD_CCK_SIGNAL_2M: + case ZD_CCK_PLCP_SIGNAL_2M: rate = 20; break; - case ZD_CCK_SIGNAL_5M5: + case ZD_CCK_PLCP_SIGNAL_5M5: rate = 55; break; - case ZD_CCK_SIGNAL_11M: + case ZD_CCK_PLCP_SIGNAL_11M: rate = 110; break; default: @@ -1638,7 +1627,5 @@ int zd_chip_set_multicast_hash(struct zd_chip *chip, { CR_GROUP_HASH_P2, hash->high }, }; - dev_dbg_f(zd_chip_dev(chip), "hash l 0x%08x h 0x%08x\n", - ioreqs[0].value, ioreqs[1].value); return zd_iowrite32a(chip, ioreqs, ARRAY_SIZE(ioreqs)); } diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h index f4698576ab71..8009b70213e2 100644 --- a/drivers/net/wireless/zd1211rw/zd_chip.h +++ b/drivers/net/wireless/zd1211rw/zd_chip.h @@ -871,11 +871,6 @@ static inline int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates) return r; } -static inline int zd_chip_set_rx_filter(struct zd_chip *chip, u32 filter) -{ - return zd_iowrite32(chip, CR_RX_FILTER, filter); -} - int zd_chip_lock_phy_regs(struct zd_chip *chip); int zd_chip_unlock_phy_regs(struct zd_chip *chip); diff --git a/drivers/net/wireless/zd1211rw/zd_def.h b/drivers/net/wireless/zd1211rw/zd_def.h index deb99d1eaa77..505b4d7dd0e2 100644 --- a/drivers/net/wireless/zd1211rw/zd_def.h +++ b/drivers/net/wireless/zd1211rw/zd_def.h @@ -21,7 +21,6 @@ #include <linux/kernel.h> #include <linux/stringify.h> #include <linux/device.h> -#include <linux/kernel.h> typedef u16 __nocast zd_addr_t; diff --git a/drivers/net/wireless/zd1211rw/zd_ieee80211.h b/drivers/net/wireless/zd1211rw/zd_ieee80211.h index c4f36d39642b..fbf6491dce7e 100644 --- a/drivers/net/wireless/zd1211rw/zd_ieee80211.h +++ b/drivers/net/wireless/zd1211rw/zd_ieee80211.h @@ -43,21 +43,25 @@ struct ofdm_plcp_header { __le16 service; } __attribute__((packed)); -static inline u8 zd_ofdm_plcp_header_rate( - const struct ofdm_plcp_header *header) +static inline u8 zd_ofdm_plcp_header_rate(const struct ofdm_plcp_header *header) { return header->prefix[0] & 0xf; } -/* These are referred to as zd_rates */ -#define ZD_OFDM_RATE_6M 0xb -#define ZD_OFDM_RATE_9M 0xf -#define ZD_OFDM_RATE_12M 0xa -#define ZD_OFDM_RATE_18M 0xe -#define ZD_OFDM_RATE_24M 0x9 -#define ZD_OFDM_RATE_36M 0xd -#define ZD_OFDM_RATE_48M 0x8 -#define ZD_OFDM_RATE_54M 0xc +/* The following defines give the encoding of the 4-bit rate field in the + * OFDM (802.11a/802.11g) PLCP header. Notify that these values are used to + * define the zd-rate values for OFDM. + * + * See the struct zd_ctrlset definition in zd_mac.h. + */ +#define ZD_OFDM_PLCP_RATE_6M 0xb +#define ZD_OFDM_PLCP_RATE_9M 0xf +#define ZD_OFDM_PLCP_RATE_12M 0xa +#define ZD_OFDM_PLCP_RATE_18M 0xe +#define ZD_OFDM_PLCP_RATE_24M 0x9 +#define ZD_OFDM_PLCP_RATE_36M 0xd +#define ZD_OFDM_PLCP_RATE_48M 0x8 +#define ZD_OFDM_PLCP_RATE_54M 0xc struct cck_plcp_header { u8 signal; @@ -66,15 +70,22 @@ struct cck_plcp_header { __le16 crc16; } __attribute__((packed)); -static inline u8 zd_cck_plcp_header_rate(const struct cck_plcp_header *header) +static inline u8 zd_cck_plcp_header_signal(const struct cck_plcp_header *header) { return header->signal; } -#define ZD_CCK_SIGNAL_1M 0x0a -#define ZD_CCK_SIGNAL_2M 0x14 -#define ZD_CCK_SIGNAL_5M5 0x37 -#define ZD_CCK_SIGNAL_11M 0x6e +/* These defines give the encodings of the signal field in the 802.11b PLCP + * header. The signal field gives the bit rate of the following packet. Even + * if technically wrong we use CCK here also for the 1 MBit/s and 2 MBit/s + * rate to stay consistent with Zydas and our use of the term. + * + * Notify that these values are *not* used in the zd-rates. + */ +#define ZD_CCK_PLCP_SIGNAL_1M 0x0a +#define ZD_CCK_PLCP_SIGNAL_2M 0x14 +#define ZD_CCK_PLCP_SIGNAL_5M5 0x37 +#define ZD_CCK_PLCP_SIGNAL_11M 0x6e enum ieee80211_std { IEEE80211B = 0x01, diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c index f6c487aa8246..a903645e157a 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zd1211rw/zd_mac.c @@ -28,7 +28,6 @@ #include "zd_ieee80211.h" #include "zd_netdev.h" #include "zd_rf.h" -#include "zd_util.h" static void ieee_init(struct ieee80211_device *ieee); static void softmac_init(struct ieee80211softmac_device *sm); @@ -161,13 +160,33 @@ void zd_mac_clear(struct zd_mac *mac) ZD_MEMCLEAR(mac, sizeof(struct zd_mac)); } -static int reset_mode(struct zd_mac *mac) +static int set_rx_filter(struct zd_mac *mac) { struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); u32 filter = (ieee->iw_mode == IW_MODE_MONITOR) ? ~0 : STA_RX_FILTER; return zd_iowrite32(&mac->chip, CR_RX_FILTER, filter); } +static int set_sniffer(struct zd_mac *mac) +{ + struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); + return zd_iowrite32(&mac->chip, CR_SNIFFER_ON, + ieee->iw_mode == IW_MODE_MONITOR ? 1 : 0); + return 0; +} + +static int set_mc_hash(struct zd_mac *mac) +{ + struct zd_mc_hash hash; + struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); + + zd_mc_clear(&hash); + if (ieee->iw_mode == IW_MODE_MONITOR) + zd_mc_add_all(&hash); + + return zd_chip_set_multicast_hash(&mac->chip, &hash); +} + int zd_mac_open(struct net_device *netdev) { struct zd_mac *mac = zd_netdev_mac(netdev); @@ -194,7 +213,13 @@ int zd_mac_open(struct net_device *netdev) r = zd_chip_set_basic_rates(chip, CR_RATES_80211B | CR_RATES_80211G); if (r < 0) goto disable_int; - r = reset_mode(mac); + r = set_rx_filter(mac); + if (r) + goto disable_int; + r = set_sniffer(mac); + if (r) + goto disable_int; + r = set_mc_hash(mac); if (r) goto disable_int; r = zd_chip_switch_radio_on(chip); @@ -263,12 +288,13 @@ int zd_mac_set_mac_address(struct net_device *netdev, void *p) struct sockaddr *addr = p; struct zd_mac *mac = zd_netdev_mac(netdev); struct zd_chip *chip = &mac->chip; + DECLARE_MAC_BUF(mac2); if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; dev_dbg_f(zd_mac_dev(mac), - "Setting MAC to " MAC_FMT "\n", MAC_ARG(addr->sa_data)); + "Setting MAC to %s\n", print_mac(mac2, addr->sa_data)); if (netdev->flags & IFF_UP) { r = zd_write_mac_addr(chip, addr->sa_data); @@ -298,18 +324,21 @@ static void set_multicast_hash_handler(struct work_struct *work) void zd_mac_set_multicast_list(struct net_device *dev) { - struct zd_mc_hash hash; struct zd_mac *mac = zd_netdev_mac(dev); + struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); + struct zd_mc_hash hash; struct dev_mc_list *mc; unsigned long flags; + DECLARE_MAC_BUF(mac2); - if (dev->flags & (IFF_PROMISC|IFF_ALLMULTI)) { + if (dev->flags & (IFF_PROMISC|IFF_ALLMULTI) || + ieee->iw_mode == IW_MODE_MONITOR) { zd_mc_add_all(&hash); } else { zd_mc_clear(&hash); for (mc = dev->mc_list; mc; mc = mc->next) { - dev_dbg_f(zd_mac_dev(mac), "mc addr " MAC_FMT "\n", - MAC_ARG(mc->dmi_addr)); + dev_dbg_f(zd_mac_dev(mac), "mc addr %s\n", + print_mac(mac2, mc->dmi_addr)); zd_mc_add_addr(&hash, mc->dmi_addr); } } @@ -582,28 +611,6 @@ u8 zd_mac_get_channel(struct zd_mac *mac) return channel; } -/* If wrong rate is given, we are falling back to the slowest rate: 1MBit/s */ -static u8 zd_rate_typed(u8 zd_rate) -{ - static const u8 typed_rates[16] = { - [ZD_CCK_RATE_1M] = ZD_CS_CCK|ZD_CCK_RATE_1M, - [ZD_CCK_RATE_2M] = ZD_CS_CCK|ZD_CCK_RATE_2M, - [ZD_CCK_RATE_5_5M] = ZD_CS_CCK|ZD_CCK_RATE_5_5M, - [ZD_CCK_RATE_11M] = ZD_CS_CCK|ZD_CCK_RATE_11M, - [ZD_OFDM_RATE_6M] = ZD_CS_OFDM|ZD_OFDM_RATE_6M, - [ZD_OFDM_RATE_9M] = ZD_CS_OFDM|ZD_OFDM_RATE_9M, - [ZD_OFDM_RATE_12M] = ZD_CS_OFDM|ZD_OFDM_RATE_12M, - [ZD_OFDM_RATE_18M] = ZD_CS_OFDM|ZD_OFDM_RATE_18M, - [ZD_OFDM_RATE_24M] = ZD_CS_OFDM|ZD_OFDM_RATE_24M, - [ZD_OFDM_RATE_36M] = ZD_CS_OFDM|ZD_OFDM_RATE_36M, - [ZD_OFDM_RATE_48M] = ZD_CS_OFDM|ZD_OFDM_RATE_48M, - [ZD_OFDM_RATE_54M] = ZD_CS_OFDM|ZD_OFDM_RATE_54M, - }; - - ZD_ASSERT(ZD_CS_RATE_MASK == 0x0f); - return typed_rates[zd_rate & ZD_CS_RATE_MASK]; -} - int zd_mac_set_mode(struct zd_mac *mac, u32 mode) { struct ieee80211_device *ieee; @@ -628,8 +635,12 @@ int zd_mac_set_mode(struct zd_mac *mac, u32 mode) ieee->iw_mode = mode; spin_unlock_irq(&ieee->lock); - if (netif_running(mac->netdev)) - return reset_mode(mac); + if (netif_running(mac->netdev)) { + int r = set_rx_filter(mac); + if (r) + return r; + return set_sniffer(mac); + } return 0; } @@ -707,25 +718,30 @@ int zd_mac_get_range(struct zd_mac *mac, struct iw_range *range) static int zd_calc_tx_length_us(u8 *service, u8 zd_rate, u16 tx_length) { + /* ZD_PURE_RATE() must be used to remove the modulation type flag of + * the zd-rate values. */ static const u8 rate_divisor[] = { - [ZD_CCK_RATE_1M] = 1, - [ZD_CCK_RATE_2M] = 2, - [ZD_CCK_RATE_5_5M] = 11, /* bits must be doubled */ - [ZD_CCK_RATE_11M] = 11, - [ZD_OFDM_RATE_6M] = 6, - [ZD_OFDM_RATE_9M] = 9, - [ZD_OFDM_RATE_12M] = 12, - [ZD_OFDM_RATE_18M] = 18, - [ZD_OFDM_RATE_24M] = 24, - [ZD_OFDM_RATE_36M] = 36, - [ZD_OFDM_RATE_48M] = 48, - [ZD_OFDM_RATE_54M] = 54, + [ZD_PURE_RATE(ZD_CCK_RATE_1M)] = 1, + [ZD_PURE_RATE(ZD_CCK_RATE_2M)] = 2, + + /* bits must be doubled */ + [ZD_PURE_RATE(ZD_CCK_RATE_5_5M)] = 11, + + [ZD_PURE_RATE(ZD_CCK_RATE_11M)] = 11, + [ZD_PURE_RATE(ZD_OFDM_RATE_6M)] = 6, + [ZD_PURE_RATE(ZD_OFDM_RATE_9M)] = 9, + [ZD_PURE_RATE(ZD_OFDM_RATE_12M)] = 12, + [ZD_PURE_RATE(ZD_OFDM_RATE_18M)] = 18, + [ZD_PURE_RATE(ZD_OFDM_RATE_24M)] = 24, + [ZD_PURE_RATE(ZD_OFDM_RATE_36M)] = 36, + [ZD_PURE_RATE(ZD_OFDM_RATE_48M)] = 48, + [ZD_PURE_RATE(ZD_OFDM_RATE_54M)] = 54, }; u32 bits = (u32)tx_length * 8; u32 divisor; - divisor = rate_divisor[zd_rate]; + divisor = rate_divisor[ZD_PURE_RATE(zd_rate)]; if (divisor == 0) return -EINVAL; @@ -748,52 +764,24 @@ static int zd_calc_tx_length_us(u8 *service, u8 zd_rate, u16 tx_length) return bits/divisor; } -enum { - R2M_SHORT_PREAMBLE = 0x01, - R2M_11A = 0x02, -}; - -static u8 zd_rate_to_modulation(u8 zd_rate, int flags) -{ - u8 modulation; - - modulation = zd_rate_typed(zd_rate); - if (flags & R2M_SHORT_PREAMBLE) { - switch (ZD_CS_RATE(modulation)) { - case ZD_CCK_RATE_2M: - case ZD_CCK_RATE_5_5M: - case ZD_CCK_RATE_11M: - modulation |= ZD_CS_CCK_PREA_SHORT; - return modulation; - } - } - if (flags & R2M_11A) { - if (ZD_CS_TYPE(modulation) == ZD_CS_OFDM) - modulation |= ZD_CS_OFDM_MODE_11A; - } - return modulation; -} - static void cs_set_modulation(struct zd_mac *mac, struct zd_ctrlset *cs, struct ieee80211_hdr_4addr *hdr) { struct ieee80211softmac_device *softmac = ieee80211_priv(mac->netdev); u16 ftype = WLAN_FC_GET_TYPE(le16_to_cpu(hdr->frame_ctl)); - u8 rate, zd_rate; + u8 rate; int is_mgt = (ftype == IEEE80211_FTYPE_MGMT) != 0; int is_multicast = is_multicast_ether_addr(hdr->addr1); int short_preamble = ieee80211softmac_short_preamble_ok(softmac, is_multicast, is_mgt); - int flags = 0; - /* FIXME: 802.11a? */ rate = ieee80211softmac_suggest_txrate(softmac, is_multicast, is_mgt); + cs->modulation = rate_to_zd_rate(rate); - if (short_preamble) - flags |= R2M_SHORT_PREAMBLE; - - zd_rate = rate_to_zd_rate(rate); - cs->modulation = zd_rate_to_modulation(zd_rate, flags); + /* Set short preamble bit when appropriate */ + if (short_preamble && ZD_MODULATION_TYPE(cs->modulation) == ZD_CCK + && cs->modulation != ZD_CCK_RATE_1M) + cs->modulation |= ZD_CCK_PREA_SHORT; } static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs, @@ -822,7 +810,7 @@ static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs, cs->control |= ZD_CS_MULTICAST; /* PS-POLL */ - if (stype == IEEE80211_STYPE_PSPOLL) + if (ftype == IEEE80211_FTYPE_CTL && stype == IEEE80211_STYPE_PSPOLL) cs->control |= ZD_CS_PS_POLL_FRAME; /* Unicast data frames over the threshold should have RTS */ @@ -832,7 +820,7 @@ static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs, cs->control |= ZD_CS_RTS; /* Use CTS-to-self protection if required */ - if (ZD_CS_TYPE(cs->modulation) == ZD_CS_OFDM && + if (ZD_MODULATION_TYPE(cs->modulation) == ZD_OFDM && ieee80211softmac_protection_needed(softmac)) { /* FIXME: avoid sending RTS *and* self-CTS, is that correct? */ cs->control &= ~ZD_CS_RTS; @@ -893,7 +881,7 @@ static int fill_ctrlset(struct zd_mac *mac, * - see line 53 of zdinlinef.h */ cs->service = 0; - r = zd_calc_tx_length_us(&cs->service, ZD_CS_RATE(cs->modulation), + r = zd_calc_tx_length_us(&cs->service, ZD_RATE(cs->modulation), le16_to_cpu(cs->tx_length)); if (r < 0) return r; @@ -902,7 +890,7 @@ static int fill_ctrlset(struct zd_mac *mac, if (next_frag_len == 0) { cs->next_frame_length = 0; } else { - r = zd_calc_tx_length_us(NULL, ZD_CS_RATE(cs->modulation), + r = zd_calc_tx_length_us(NULL, ZD_RATE(cs->modulation), next_frag_len); if (r < 0) return r; @@ -1077,7 +1065,8 @@ static int fill_rx_stats(struct ieee80211_rx_stats *stats, { const struct rx_status *status; - *pstatus = status = zd_tail(buffer, length, sizeof(struct rx_status)); + *pstatus = status = (struct rx_status *) + (buffer + (length - sizeof(struct rx_status))); if (status->frame_status & ZD_RX_ERROR) { struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); ieee->stats.rx_errors++; diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h index 9f9344eb50f9..1b15bde3ff60 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.h +++ b/drivers/net/wireless/zd1211rw/zd_mac.h @@ -40,28 +40,51 @@ struct zd_ctrlset { #define ZD_CS_RESERVED_SIZE 25 -/* zd_crtlset field modulation */ -#define ZD_CS_RATE_MASK 0x0f -#define ZD_CS_TYPE_MASK 0x10 -#define ZD_CS_RATE(modulation) ((modulation) & ZD_CS_RATE_MASK) -#define ZD_CS_TYPE(modulation) ((modulation) & ZD_CS_TYPE_MASK) - -#define ZD_CS_CCK 0x00 -#define ZD_CS_OFDM 0x10 - -/* These are referred to as zd_rates */ -#define ZD_CCK_RATE_1M 0x00 -#define ZD_CCK_RATE_2M 0x01 -#define ZD_CCK_RATE_5_5M 0x02 -#define ZD_CCK_RATE_11M 0x03 -/* The rates for OFDM are encoded as in the PLCP header. Use ZD_OFDM_RATE_*. +/* The field modulation of struct zd_ctrlset controls the bit rate, the use + * of short or long preambles in 802.11b (CCK mode) or the use of 802.11a or + * 802.11g in OFDM mode. + * + * The term zd-rate is used for the combination of the modulation type flag + * and the "pure" rate value. */ - -/* bit 5 is preamble (when in CCK mode), or a/g selection (when in OFDM mode) */ -#define ZD_CS_CCK_PREA_LONG 0x00 -#define ZD_CS_CCK_PREA_SHORT 0x20 -#define ZD_CS_OFDM_MODE_11G 0x00 -#define ZD_CS_OFDM_MODE_11A 0x20 +#define ZD_PURE_RATE_MASK 0x0f +#define ZD_MODULATION_TYPE_MASK 0x10 +#define ZD_RATE_MASK (ZD_PURE_RATE_MASK|ZD_MODULATION_TYPE_MASK) +#define ZD_PURE_RATE(modulation) ((modulation) & ZD_PURE_RATE_MASK) +#define ZD_MODULATION_TYPE(modulation) ((modulation) & ZD_MODULATION_TYPE_MASK) +#define ZD_RATE(modulation) ((modulation) & ZD_RATE_MASK) + +/* The two possible modulation types. Notify that 802.11b doesn't use the CCK + * codeing for the 1 and 2 MBit/s rate. We stay with the term here to remain + * consistent with uses the term at other places. + */ +#define ZD_CCK 0x00 +#define ZD_OFDM 0x10 + +/* The ZD1211 firmware uses proprietary encodings of the 802.11b (CCK) rates. + * For OFDM the PLCP rate encodings are used. We combine these "pure" rates + * with the modulation type flag and call the resulting values zd-rates. + */ +#define ZD_CCK_RATE_1M (ZD_CCK|0x00) +#define ZD_CCK_RATE_2M (ZD_CCK|0x01) +#define ZD_CCK_RATE_5_5M (ZD_CCK|0x02) +#define ZD_CCK_RATE_11M (ZD_CCK|0x03) +#define ZD_OFDM_RATE_6M (ZD_OFDM|ZD_OFDM_PLCP_RATE_6M) +#define ZD_OFDM_RATE_9M (ZD_OFDM|ZD_OFDM_PLCP_RATE_9M) +#define ZD_OFDM_RATE_12M (ZD_OFDM|ZD_OFDM_PLCP_RATE_12M) +#define ZD_OFDM_RATE_18M (ZD_OFDM|ZD_OFDM_PLCP_RATE_18M) +#define ZD_OFDM_RATE_24M (ZD_OFDM|ZD_OFDM_PLCP_RATE_24M) +#define ZD_OFDM_RATE_36M (ZD_OFDM|ZD_OFDM_PLCP_RATE_36M) +#define ZD_OFDM_RATE_48M (ZD_OFDM|ZD_OFDM_PLCP_RATE_48M) +#define ZD_OFDM_RATE_54M (ZD_OFDM|ZD_OFDM_PLCP_RATE_54M) + +/* The bit 5 of the zd_ctrlset modulation field controls the preamble in CCK + * mode or the 802.11a/802.11g selection in OFDM mode. + */ +#define ZD_CCK_PREA_LONG 0x00 +#define ZD_CCK_PREA_SHORT 0x20 +#define ZD_OFDM_MODE_11G 0x00 +#define ZD_OFDM_MODE_11A 0x20 /* zd_ctrlset control field */ #define ZD_CS_NEED_RANDOM_BACKOFF 0x01 diff --git a/drivers/net/wireless/zd1211rw/zd_netdev.c b/drivers/net/wireless/zd1211rw/zd_netdev.c index 8bda48de31ef..047cab3d87df 100644 --- a/drivers/net/wireless/zd1211rw/zd_netdev.c +++ b/drivers/net/wireless/zd1211rw/zd_netdev.c @@ -233,7 +233,6 @@ struct net_device *zd_netdev_alloc(struct usb_interface *intf) return NULL; } - SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &intf->dev); dev_dbg_f(&intf->dev, "netdev->flags %#06hx\n", netdev->flags); diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index 28d41a29d7b1..b0684f965761 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c @@ -31,7 +31,6 @@ #include "zd_netdev.h" #include "zd_mac.h" #include "zd_usb.h" -#include "zd_util.h" static struct usb_device_id usb_ids[] = { /* ZD1211 */ @@ -55,6 +54,7 @@ static struct usb_device_id usb_ids[] = { { USB_DEVICE(0x14ea, 0xab13), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x13b1, 0x001e), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x0586, 0x3407), .driver_info = DEVICE_ZD1211 }, + { USB_DEVICE(0x129b, 0x1666), .driver_info = DEVICE_ZD1211 }, /* ZD1211B */ { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B }, @@ -72,6 +72,11 @@ static struct usb_device_id usb_ids[] = { { USB_DEVICE(0x0586, 0x3413), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x0411, 0x00da), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x2019, 0x5303), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x129b, 0x1667), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x0cde, 0x001a), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x0586, 0x340a), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B }, /* "Driverless" devices that need ejecting */ { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER }, diff --git a/drivers/net/wireless/zd1211rw/zd_util.c b/drivers/net/wireless/zd1211rw/zd_util.c deleted file mode 100644 index d20036c15d11..000000000000 --- a/drivers/net/wireless/zd1211rw/zd_util.c +++ /dev/null @@ -1,82 +0,0 @@ -/* zd_util.c - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * Utility program - */ - -#include "zd_def.h" -#include "zd_util.h" - -#ifdef DEBUG -static char hex(u8 v) -{ - v &= 0xf; - return (v < 10 ? '0' : 'a' - 10) + v; -} - -static char hex_print(u8 c) -{ - return (0x20 <= c && c < 0x7f) ? c : '.'; -} - -static void dump_line(const u8 *bytes, size_t size) -{ - char c; - size_t i; - - size = size <= 8 ? size : 8; - printk(KERN_DEBUG "zd1211 %p ", bytes); - for (i = 0; i < 8; i++) { - switch (i) { - case 1: - case 5: - c = '.'; - break; - case 3: - c = ':'; - break; - default: - c = ' '; - } - if (i < size) { - printk("%c%c%c", hex(bytes[i] >> 4), hex(bytes[i]), c); - } else { - printk(" %c", c); - } - } - - for (i = 0; i < size; i++) - printk("%c", hex_print(bytes[i])); - printk("\n"); -} - -void zd_hexdump(const void *bytes, size_t size) -{ - size_t i = 0; - - do { - dump_line((u8 *)bytes + i, size-i); - i += 8; - } while (i < size); -} -#endif /* DEBUG */ - -void *zd_tail(const void *buffer, size_t buffer_size, size_t tail_size) -{ - if (buffer_size < tail_size) - return NULL; - return (u8 *)buffer + (buffer_size - tail_size); -} diff --git a/drivers/net/wireless/zd1211rw/zd_util.h b/drivers/net/wireless/zd1211rw/zd_util.h deleted file mode 100644 index ce26f7adea92..000000000000 --- a/drivers/net/wireless/zd1211rw/zd_util.h +++ /dev/null @@ -1,29 +0,0 @@ -/* zd_util.h - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _ZD_UTIL_H -#define _ZD_UTIL_H - -void *zd_tail(const void *buffer, size_t buffer_size, size_t tail_size); - -#ifdef DEBUG -void zd_hexdump(const void *bytes, size_t size); -#else -#define zd_hexdump(bytes, size) -#endif /* DEBUG */ - -#endif /* _ZD_UTIL_H */ diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c new file mode 100644 index 000000000000..f464b82c7d5f --- /dev/null +++ b/drivers/net/xen-netfront.c @@ -0,0 +1,1817 @@ +/* + * Virtual network driver for conversing with remote driver backends. + * + * Copyright (c) 2002-2005, K A Fraser + * Copyright (c) 2005, XenSource Ltd + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/ethtool.h> +#include <linux/if_ether.h> +#include <linux/tcp.h> +#include <linux/udp.h> +#include <linux/moduleparam.h> +#include <linux/mm.h> +#include <net/ip.h> + +#include <xen/xenbus.h> +#include <xen/events.h> +#include <xen/page.h> +#include <xen/grant_table.h> + +#include <xen/interface/io/netif.h> +#include <xen/interface/memory.h> +#include <xen/interface/grant_table.h> + +static struct ethtool_ops xennet_ethtool_ops; + +struct netfront_cb { + struct page *page; + unsigned offset; +}; + +#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) + +#define RX_COPY_THRESHOLD 256 + +#define GRANT_INVALID_REF 0 + +#define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE) +#define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE) +#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) + +struct netfront_info { + struct list_head list; + struct net_device *netdev; + + struct napi_struct napi; + + struct xen_netif_tx_front_ring tx; + struct xen_netif_rx_front_ring rx; + + spinlock_t tx_lock; + spinlock_t rx_lock; + + unsigned int evtchn; + + /* Receive-ring batched refills. */ +#define RX_MIN_TARGET 8 +#define RX_DFL_MIN_TARGET 64 +#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) + unsigned rx_min_target, rx_max_target, rx_target; + struct sk_buff_head rx_batch; + + struct timer_list rx_refill_timer; + + /* + * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries + * are linked from tx_skb_freelist through skb_entry.link. + * + * NB. Freelist index entries are always going to be less than + * PAGE_OFFSET, whereas pointers to skbs will always be equal or + * greater than PAGE_OFFSET: we use this property to distinguish + * them. + */ + union skb_entry { + struct sk_buff *skb; + unsigned link; + } tx_skbs[NET_TX_RING_SIZE]; + grant_ref_t gref_tx_head; + grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; + unsigned tx_skb_freelist; + + struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; + grant_ref_t gref_rx_head; + grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; + + struct xenbus_device *xbdev; + int tx_ring_ref; + int rx_ring_ref; + + unsigned long rx_pfn_array[NET_RX_RING_SIZE]; + struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; + struct mmu_update rx_mmu[NET_RX_RING_SIZE]; +}; + +struct netfront_rx_info { + struct xen_netif_rx_response rx; + struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; +}; + +/* + * Access macros for acquiring freeing slots in tx_skbs[]. + */ + +static void add_id_to_freelist(unsigned *head, union skb_entry *list, + unsigned short id) +{ + list[id].link = *head; + *head = id; +} + +static unsigned short get_id_from_freelist(unsigned *head, + union skb_entry *list) +{ + unsigned int id = *head; + *head = list[id].link; + return id; +} + +static int xennet_rxidx(RING_IDX idx) +{ + return idx & (NET_RX_RING_SIZE - 1); +} + +static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, + RING_IDX ri) +{ + int i = xennet_rxidx(ri); + struct sk_buff *skb = np->rx_skbs[i]; + np->rx_skbs[i] = NULL; + return skb; +} + +static grant_ref_t xennet_get_rx_ref(struct netfront_info *np, + RING_IDX ri) +{ + int i = xennet_rxidx(ri); + grant_ref_t ref = np->grant_rx_ref[i]; + np->grant_rx_ref[i] = GRANT_INVALID_REF; + return ref; +} + +#ifdef CONFIG_SYSFS +static int xennet_sysfs_addif(struct net_device *netdev); +static void xennet_sysfs_delif(struct net_device *netdev); +#else /* !CONFIG_SYSFS */ +#define xennet_sysfs_addif(dev) (0) +#define xennet_sysfs_delif(dev) do { } while (0) +#endif + +static int xennet_can_sg(struct net_device *dev) +{ + return dev->features & NETIF_F_SG; +} + + +static void rx_refill_timeout(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct netfront_info *np = netdev_priv(dev); + netif_rx_schedule(dev, &np->napi); +} + +static int netfront_tx_slot_available(struct netfront_info *np) +{ + return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < + (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); +} + +static void xennet_maybe_wake_tx(struct net_device *dev) +{ + struct netfront_info *np = netdev_priv(dev); + + if (unlikely(netif_queue_stopped(dev)) && + netfront_tx_slot_available(np) && + likely(netif_running(dev))) + netif_wake_queue(dev); +} + +static void xennet_alloc_rx_buffers(struct net_device *dev) +{ + unsigned short id; + struct netfront_info *np = netdev_priv(dev); + struct sk_buff *skb; + struct page *page; + int i, batch_target, notify; + RING_IDX req_prod = np->rx.req_prod_pvt; + grant_ref_t ref; + unsigned long pfn; + void *vaddr; + struct xen_netif_rx_request *req; + + if (unlikely(!netif_carrier_ok(dev))) + return; + + /* + * Allocate skbuffs greedily, even though we batch updates to the + * receive ring. This creates a less bursty demand on the memory + * allocator, so should reduce the chance of failed allocation requests + * both for ourself and for other kernel subsystems. + */ + batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); + for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { + skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + goto no_skb; + + page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); + if (!page) { + kfree_skb(skb); +no_skb: + /* Any skbuffs queued for refill? Force them out. */ + if (i != 0) + goto refill; + /* Could not allocate any skbuffs. Try again later. */ + mod_timer(&np->rx_refill_timer, + jiffies + (HZ/10)); + break; + } + + skb_shinfo(skb)->frags[0].page = page; + skb_shinfo(skb)->nr_frags = 1; + __skb_queue_tail(&np->rx_batch, skb); + } + + /* Is the batch large enough to be worthwhile? */ + if (i < (np->rx_target/2)) { + if (req_prod > np->rx.sring->req_prod) + goto push; + return; + } + + /* Adjust our fill target if we risked running out of buffers. */ + if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && + ((np->rx_target *= 2) > np->rx_max_target)) + np->rx_target = np->rx_max_target; + + refill: + for (i = 0; ; i++) { + skb = __skb_dequeue(&np->rx_batch); + if (skb == NULL) + break; + + skb->dev = dev; + + id = xennet_rxidx(req_prod + i); + + BUG_ON(np->rx_skbs[id]); + np->rx_skbs[id] = skb; + + ref = gnttab_claim_grant_reference(&np->gref_rx_head); + BUG_ON((signed short)ref < 0); + np->grant_rx_ref[id] = ref; + + pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); + vaddr = page_address(skb_shinfo(skb)->frags[0].page); + + req = RING_GET_REQUEST(&np->rx, req_prod + i); + gnttab_grant_foreign_access_ref(ref, + np->xbdev->otherend_id, + pfn_to_mfn(pfn), + 0); + + req->id = id; + req->gref = ref; + } + + wmb(); /* barrier so backend seens requests */ + + /* Above is a suitable barrier to ensure backend will see requests. */ + np->rx.req_prod_pvt = req_prod + i; + push: + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); + if (notify) + notify_remote_via_irq(np->netdev->irq); +} + +static int xennet_open(struct net_device *dev) +{ + struct netfront_info *np = netdev_priv(dev); + + napi_enable(&np->napi); + + spin_lock_bh(&np->rx_lock); + if (netif_carrier_ok(dev)) { + xennet_alloc_rx_buffers(dev); + np->rx.sring->rsp_event = np->rx.rsp_cons + 1; + if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) + netif_rx_schedule(dev, &np->napi); + } + spin_unlock_bh(&np->rx_lock); + + xennet_maybe_wake_tx(dev); + + return 0; +} + +static void xennet_tx_buf_gc(struct net_device *dev) +{ + RING_IDX cons, prod; + unsigned short id; + struct netfront_info *np = netdev_priv(dev); + struct sk_buff *skb; + + BUG_ON(!netif_carrier_ok(dev)); + + do { + prod = np->tx.sring->rsp_prod; + rmb(); /* Ensure we see responses up to 'rp'. */ + + for (cons = np->tx.rsp_cons; cons != prod; cons++) { + struct xen_netif_tx_response *txrsp; + + txrsp = RING_GET_RESPONSE(&np->tx, cons); + if (txrsp->status == NETIF_RSP_NULL) + continue; + + id = txrsp->id; + skb = np->tx_skbs[id].skb; + if (unlikely(gnttab_query_foreign_access( + np->grant_tx_ref[id]) != 0)) { + printk(KERN_ALERT "xennet_tx_buf_gc: warning " + "-- grant still in use by backend " + "domain.\n"); + BUG(); + } + gnttab_end_foreign_access_ref( + np->grant_tx_ref[id], GNTMAP_readonly); + gnttab_release_grant_reference( + &np->gref_tx_head, np->grant_tx_ref[id]); + np->grant_tx_ref[id] = GRANT_INVALID_REF; + add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id); + dev_kfree_skb_irq(skb); + } + + np->tx.rsp_cons = prod; + + /* + * Set a new event, then check for race with update of tx_cons. + * Note that it is essential to schedule a callback, no matter + * how few buffers are pending. Even if there is space in the + * transmit ring, higher layers may be blocked because too much + * data is outstanding: in such cases notification from Xen is + * likely to be the only kick that we'll get. + */ + np->tx.sring->rsp_event = + prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; + mb(); /* update shared area */ + } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); + + xennet_maybe_wake_tx(dev); +} + +static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, + struct xen_netif_tx_request *tx) +{ + struct netfront_info *np = netdev_priv(dev); + char *data = skb->data; + unsigned long mfn; + RING_IDX prod = np->tx.req_prod_pvt; + int frags = skb_shinfo(skb)->nr_frags; + unsigned int offset = offset_in_page(data); + unsigned int len = skb_headlen(skb); + unsigned int id; + grant_ref_t ref; + int i; + + /* While the header overlaps a page boundary (including being + larger than a page), split it it into page-sized chunks. */ + while (len > PAGE_SIZE - offset) { + tx->size = PAGE_SIZE - offset; + tx->flags |= NETTXF_more_data; + len -= tx->size; + data += tx->size; + offset = 0; + + id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); + np->tx_skbs[id].skb = skb_get(skb); + tx = RING_GET_REQUEST(&np->tx, prod++); + tx->id = id; + ref = gnttab_claim_grant_reference(&np->gref_tx_head); + BUG_ON((signed short)ref < 0); + + mfn = virt_to_mfn(data); + gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, + mfn, GNTMAP_readonly); + + tx->gref = np->grant_tx_ref[id] = ref; + tx->offset = offset; + tx->size = len; + tx->flags = 0; + } + + /* Grant backend access to each skb fragment page. */ + for (i = 0; i < frags; i++) { + skb_frag_t *frag = skb_shinfo(skb)->frags + i; + + tx->flags |= NETTXF_more_data; + + id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); + np->tx_skbs[id].skb = skb_get(skb); + tx = RING_GET_REQUEST(&np->tx, prod++); + tx->id = id; + ref = gnttab_claim_grant_reference(&np->gref_tx_head); + BUG_ON((signed short)ref < 0); + + mfn = pfn_to_mfn(page_to_pfn(frag->page)); + gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, + mfn, GNTMAP_readonly); + + tx->gref = np->grant_tx_ref[id] = ref; + tx->offset = frag->page_offset; + tx->size = frag->size; + tx->flags = 0; + } + + np->tx.req_prod_pvt = prod; +} + +static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + unsigned short id; + struct netfront_info *np = netdev_priv(dev); + struct xen_netif_tx_request *tx; + struct xen_netif_extra_info *extra; + char *data = skb->data; + RING_IDX i; + grant_ref_t ref; + unsigned long mfn; + int notify; + int frags = skb_shinfo(skb)->nr_frags; + unsigned int offset = offset_in_page(data); + unsigned int len = skb_headlen(skb); + + frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE; + if (unlikely(frags > MAX_SKB_FRAGS + 1)) { + printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", + frags); + dump_stack(); + goto drop; + } + + spin_lock_irq(&np->tx_lock); + + if (unlikely(!netif_carrier_ok(dev) || + (frags > 1 && !xennet_can_sg(dev)) || + netif_needs_gso(dev, skb))) { + spin_unlock_irq(&np->tx_lock); + goto drop; + } + + i = np->tx.req_prod_pvt; + + id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); + np->tx_skbs[id].skb = skb; + + tx = RING_GET_REQUEST(&np->tx, i); + + tx->id = id; + ref = gnttab_claim_grant_reference(&np->gref_tx_head); + BUG_ON((signed short)ref < 0); + mfn = virt_to_mfn(data); + gnttab_grant_foreign_access_ref( + ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); + tx->gref = np->grant_tx_ref[id] = ref; + tx->offset = offset; + tx->size = len; + extra = NULL; + + tx->flags = 0; + if (skb->ip_summed == CHECKSUM_PARTIAL) + /* local packet? */ + tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; + else if (skb->ip_summed == CHECKSUM_UNNECESSARY) + /* remote but checksummed. */ + tx->flags |= NETTXF_data_validated; + + if (skb_shinfo(skb)->gso_size) { + struct xen_netif_extra_info *gso; + + gso = (struct xen_netif_extra_info *) + RING_GET_REQUEST(&np->tx, ++i); + + if (extra) + extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; + else + tx->flags |= NETTXF_extra_info; + + gso->u.gso.size = skb_shinfo(skb)->gso_size; + gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; + gso->u.gso.pad = 0; + gso->u.gso.features = 0; + + gso->type = XEN_NETIF_EXTRA_TYPE_GSO; + gso->flags = 0; + extra = gso; + } + + np->tx.req_prod_pvt = i + 1; + + xennet_make_frags(skb, dev, tx); + tx->size = skb->len; + + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); + if (notify) + notify_remote_via_irq(np->netdev->irq); + + dev->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + + /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ + xennet_tx_buf_gc(dev); + + if (!netfront_tx_slot_available(np)) + netif_stop_queue(dev); + + spin_unlock_irq(&np->tx_lock); + + return 0; + + drop: + dev->stats.tx_dropped++; + dev_kfree_skb(skb); + return 0; +} + +static int xennet_close(struct net_device *dev) +{ + struct netfront_info *np = netdev_priv(dev); + netif_stop_queue(np->netdev); + napi_disable(&np->napi); + return 0; +} + +static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, + grant_ref_t ref) +{ + int new = xennet_rxidx(np->rx.req_prod_pvt); + + BUG_ON(np->rx_skbs[new]); + np->rx_skbs[new] = skb; + np->grant_rx_ref[new] = ref; + RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; + RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; + np->rx.req_prod_pvt++; +} + +static int xennet_get_extras(struct netfront_info *np, + struct xen_netif_extra_info *extras, + RING_IDX rp) + +{ + struct xen_netif_extra_info *extra; + struct device *dev = &np->netdev->dev; + RING_IDX cons = np->rx.rsp_cons; + int err = 0; + + do { + struct sk_buff *skb; + grant_ref_t ref; + + if (unlikely(cons + 1 == rp)) { + if (net_ratelimit()) + dev_warn(dev, "Missing extra info\n"); + err = -EBADR; + break; + } + + extra = (struct xen_netif_extra_info *) + RING_GET_RESPONSE(&np->rx, ++cons); + + if (unlikely(!extra->type || + extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { + if (net_ratelimit()) + dev_warn(dev, "Invalid extra type: %d\n", + extra->type); + err = -EINVAL; + } else { + memcpy(&extras[extra->type - 1], extra, + sizeof(*extra)); + } + + skb = xennet_get_rx_skb(np, cons); + ref = xennet_get_rx_ref(np, cons); + xennet_move_rx_slot(np, skb, ref); + } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); + + np->rx.rsp_cons = cons; + return err; +} + +static int xennet_get_responses(struct netfront_info *np, + struct netfront_rx_info *rinfo, RING_IDX rp, + struct sk_buff_head *list) +{ + struct xen_netif_rx_response *rx = &rinfo->rx; + struct xen_netif_extra_info *extras = rinfo->extras; + struct device *dev = &np->netdev->dev; + RING_IDX cons = np->rx.rsp_cons; + struct sk_buff *skb = xennet_get_rx_skb(np, cons); + grant_ref_t ref = xennet_get_rx_ref(np, cons); + int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); + int frags = 1; + int err = 0; + unsigned long ret; + + if (rx->flags & NETRXF_extra_info) { + err = xennet_get_extras(np, extras, rp); + cons = np->rx.rsp_cons; + } + + for (;;) { + if (unlikely(rx->status < 0 || + rx->offset + rx->status > PAGE_SIZE)) { + if (net_ratelimit()) + dev_warn(dev, "rx->offset: %x, size: %u\n", + rx->offset, rx->status); + xennet_move_rx_slot(np, skb, ref); + err = -EINVAL; + goto next; + } + + /* + * This definitely indicates a bug, either in this driver or in + * the backend driver. In future this should flag the bad + * situation to the system controller to reboot the backed. + */ + if (ref == GRANT_INVALID_REF) { + if (net_ratelimit()) + dev_warn(dev, "Bad rx response id %d.\n", + rx->id); + err = -EINVAL; + goto next; + } + + ret = gnttab_end_foreign_access_ref(ref, 0); + BUG_ON(!ret); + + gnttab_release_grant_reference(&np->gref_rx_head, ref); + + __skb_queue_tail(list, skb); + +next: + if (!(rx->flags & NETRXF_more_data)) + break; + + if (cons + frags == rp) { + if (net_ratelimit()) + dev_warn(dev, "Need more frags\n"); + err = -ENOENT; + break; + } + + rx = RING_GET_RESPONSE(&np->rx, cons + frags); + skb = xennet_get_rx_skb(np, cons + frags); + ref = xennet_get_rx_ref(np, cons + frags); + frags++; + } + + if (unlikely(frags > max)) { + if (net_ratelimit()) + dev_warn(dev, "Too many frags\n"); + err = -E2BIG; + } + + if (unlikely(err)) + np->rx.rsp_cons = cons + frags; + + return err; +} + +static int xennet_set_skb_gso(struct sk_buff *skb, + struct xen_netif_extra_info *gso) +{ + if (!gso->u.gso.size) { + if (net_ratelimit()) + printk(KERN_WARNING "GSO size must not be zero.\n"); + return -EINVAL; + } + + /* Currently only TCPv4 S.O. is supported. */ + if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { + if (net_ratelimit()) + printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type); + return -EINVAL; + } + + skb_shinfo(skb)->gso_size = gso->u.gso.size; + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + + /* Header must be checked, and gso_segs computed. */ + skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; + skb_shinfo(skb)->gso_segs = 0; + + return 0; +} + +static RING_IDX xennet_fill_frags(struct netfront_info *np, + struct sk_buff *skb, + struct sk_buff_head *list) +{ + struct skb_shared_info *shinfo = skb_shinfo(skb); + int nr_frags = shinfo->nr_frags; + RING_IDX cons = np->rx.rsp_cons; + skb_frag_t *frag = shinfo->frags + nr_frags; + struct sk_buff *nskb; + + while ((nskb = __skb_dequeue(list))) { + struct xen_netif_rx_response *rx = + RING_GET_RESPONSE(&np->rx, ++cons); + + frag->page = skb_shinfo(nskb)->frags[0].page; + frag->page_offset = rx->offset; + frag->size = rx->status; + + skb->data_len += rx->status; + + skb_shinfo(nskb)->nr_frags = 0; + kfree_skb(nskb); + + frag++; + nr_frags++; + } + + shinfo->nr_frags = nr_frags; + return cons; +} + +static int skb_checksum_setup(struct sk_buff *skb) +{ + struct iphdr *iph; + unsigned char *th; + int err = -EPROTO; + + if (skb->protocol != htons(ETH_P_IP)) + goto out; + + iph = (void *)skb->data; + th = skb->data + 4 * iph->ihl; + if (th >= skb_tail_pointer(skb)) + goto out; + + skb->csum_start = th - skb->head; + switch (iph->protocol) { + case IPPROTO_TCP: + skb->csum_offset = offsetof(struct tcphdr, check); + break; + case IPPROTO_UDP: + skb->csum_offset = offsetof(struct udphdr, check); + break; + default: + if (net_ratelimit()) + printk(KERN_ERR "Attempting to checksum a non-" + "TCP/UDP packet, dropping a protocol" + " %d packet", iph->protocol); + goto out; + } + + if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb)) + goto out; + + err = 0; + +out: + return err; +} + +static int handle_incoming_queue(struct net_device *dev, + struct sk_buff_head *rxq) +{ + int packets_dropped = 0; + struct sk_buff *skb; + + while ((skb = __skb_dequeue(rxq)) != NULL) { + struct page *page = NETFRONT_SKB_CB(skb)->page; + void *vaddr = page_address(page); + unsigned offset = NETFRONT_SKB_CB(skb)->offset; + + memcpy(skb->data, vaddr + offset, + skb_headlen(skb)); + + if (page != skb_shinfo(skb)->frags[0].page) + __free_page(page); + + /* Ethernet work: Delayed to here as it peeks the header. */ + skb->protocol = eth_type_trans(skb, dev); + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb_checksum_setup(skb)) { + kfree_skb(skb); + packets_dropped++; + dev->stats.rx_errors++; + continue; + } + } + + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + + /* Pass it up. */ + netif_receive_skb(skb); + dev->last_rx = jiffies; + } + + return packets_dropped; +} + +static int xennet_poll(struct napi_struct *napi, int budget) +{ + struct netfront_info *np = container_of(napi, struct netfront_info, napi); + struct net_device *dev = np->netdev; + struct sk_buff *skb; + struct netfront_rx_info rinfo; + struct xen_netif_rx_response *rx = &rinfo.rx; + struct xen_netif_extra_info *extras = rinfo.extras; + RING_IDX i, rp; + int work_done; + struct sk_buff_head rxq; + struct sk_buff_head errq; + struct sk_buff_head tmpq; + unsigned long flags; + unsigned int len; + int err; + + spin_lock(&np->rx_lock); + + if (unlikely(!netif_carrier_ok(dev))) { + spin_unlock(&np->rx_lock); + return 0; + } + + skb_queue_head_init(&rxq); + skb_queue_head_init(&errq); + skb_queue_head_init(&tmpq); + + rp = np->rx.sring->rsp_prod; + rmb(); /* Ensure we see queued responses up to 'rp'. */ + + i = np->rx.rsp_cons; + work_done = 0; + while ((i != rp) && (work_done < budget)) { + memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); + memset(extras, 0, sizeof(rinfo.extras)); + + err = xennet_get_responses(np, &rinfo, rp, &tmpq); + + if (unlikely(err)) { +err: + while ((skb = __skb_dequeue(&tmpq))) + __skb_queue_tail(&errq, skb); + dev->stats.rx_errors++; + i = np->rx.rsp_cons; + continue; + } + + skb = __skb_dequeue(&tmpq); + + if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { + struct xen_netif_extra_info *gso; + gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; + + if (unlikely(xennet_set_skb_gso(skb, gso))) { + __skb_queue_head(&tmpq, skb); + np->rx.rsp_cons += skb_queue_len(&tmpq); + goto err; + } + } + + NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page; + NETFRONT_SKB_CB(skb)->offset = rx->offset; + + len = rx->status; + if (len > RX_COPY_THRESHOLD) + len = RX_COPY_THRESHOLD; + skb_put(skb, len); + + if (rx->status > len) { + skb_shinfo(skb)->frags[0].page_offset = + rx->offset + len; + skb_shinfo(skb)->frags[0].size = rx->status - len; + skb->data_len = rx->status - len; + } else { + skb_shinfo(skb)->frags[0].page = NULL; + skb_shinfo(skb)->nr_frags = 0; + } + + i = xennet_fill_frags(np, skb, &tmpq); + + /* + * Truesize approximates the size of true data plus + * any supervisor overheads. Adding hypervisor + * overheads has been shown to significantly reduce + * achievable bandwidth with the default receive + * buffer size. It is therefore not wise to account + * for it here. + * + * After alloc_skb(RX_COPY_THRESHOLD), truesize is set + * to RX_COPY_THRESHOLD + the supervisor + * overheads. Here, we add the size of the data pulled + * in xennet_fill_frags(). + * + * We also adjust for any unused space in the main + * data area by subtracting (RX_COPY_THRESHOLD - + * len). This is especially important with drivers + * which split incoming packets into header and data, + * using only 66 bytes of the main data area (see the + * e1000 driver for example.) On such systems, + * without this last adjustement, our achievable + * receive throughout using the standard receive + * buffer size was cut by 25%(!!!). + */ + skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); + skb->len += skb->data_len; + + if (rx->flags & NETRXF_csum_blank) + skb->ip_summed = CHECKSUM_PARTIAL; + else if (rx->flags & NETRXF_data_validated) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + __skb_queue_tail(&rxq, skb); + + np->rx.rsp_cons = ++i; + work_done++; + } + + while ((skb = __skb_dequeue(&errq))) + kfree_skb(skb); + + work_done -= handle_incoming_queue(dev, &rxq); + + /* If we get a callback with very few responses, reduce fill target. */ + /* NB. Note exponential increase, linear decrease. */ + if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > + ((3*np->rx_target) / 4)) && + (--np->rx_target < np->rx_min_target)) + np->rx_target = np->rx_min_target; + + xennet_alloc_rx_buffers(dev); + + if (work_done < budget) { + int more_to_do = 0; + + local_irq_save(flags); + + RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); + if (!more_to_do) + __netif_rx_complete(dev, napi); + + local_irq_restore(flags); + } + + spin_unlock(&np->rx_lock); + + return work_done; +} + +static int xennet_change_mtu(struct net_device *dev, int mtu) +{ + int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN; + + if (mtu > max) + return -EINVAL; + dev->mtu = mtu; + return 0; +} + +static void xennet_release_tx_bufs(struct netfront_info *np) +{ + struct sk_buff *skb; + int i; + + for (i = 0; i < NET_TX_RING_SIZE; i++) { + /* Skip over entries which are actually freelist references */ + if ((unsigned long)np->tx_skbs[i].skb < PAGE_OFFSET) + continue; + + skb = np->tx_skbs[i].skb; + gnttab_end_foreign_access_ref(np->grant_tx_ref[i], + GNTMAP_readonly); + gnttab_release_grant_reference(&np->gref_tx_head, + np->grant_tx_ref[i]); + np->grant_tx_ref[i] = GRANT_INVALID_REF; + add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i); + dev_kfree_skb_irq(skb); + } +} + +static void xennet_release_rx_bufs(struct netfront_info *np) +{ + struct mmu_update *mmu = np->rx_mmu; + struct multicall_entry *mcl = np->rx_mcl; + struct sk_buff_head free_list; + struct sk_buff *skb; + unsigned long mfn; + int xfer = 0, noxfer = 0, unused = 0; + int id, ref; + + dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n", + __func__); + return; + + skb_queue_head_init(&free_list); + + spin_lock_bh(&np->rx_lock); + + for (id = 0; id < NET_RX_RING_SIZE; id++) { + ref = np->grant_rx_ref[id]; + if (ref == GRANT_INVALID_REF) { + unused++; + continue; + } + + skb = np->rx_skbs[id]; + mfn = gnttab_end_foreign_transfer_ref(ref); + gnttab_release_grant_reference(&np->gref_rx_head, ref); + np->grant_rx_ref[id] = GRANT_INVALID_REF; + + if (0 == mfn) { + skb_shinfo(skb)->nr_frags = 0; + dev_kfree_skb(skb); + noxfer++; + continue; + } + + if (!xen_feature(XENFEAT_auto_translated_physmap)) { + /* Remap the page. */ + struct page *page = skb_shinfo(skb)->frags[0].page; + unsigned long pfn = page_to_pfn(page); + void *vaddr = page_address(page); + + MULTI_update_va_mapping(mcl, (unsigned long)vaddr, + mfn_pte(mfn, PAGE_KERNEL), + 0); + mcl++; + mmu->ptr = ((u64)mfn << PAGE_SHIFT) + | MMU_MACHPHYS_UPDATE; + mmu->val = pfn; + mmu++; + + set_phys_to_machine(pfn, mfn); + } + __skb_queue_tail(&free_list, skb); + xfer++; + } + + dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n", + __func__, xfer, noxfer, unused); + + if (xfer) { + if (!xen_feature(XENFEAT_auto_translated_physmap)) { + /* Do all the remapping work and M2P updates. */ + MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu, + 0, DOMID_SELF); + mcl++; + HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl); + } + } + + while ((skb = __skb_dequeue(&free_list)) != NULL) + dev_kfree_skb(skb); + + spin_unlock_bh(&np->rx_lock); +} + +static void xennet_uninit(struct net_device *dev) +{ + struct netfront_info *np = netdev_priv(dev); + xennet_release_tx_bufs(np); + xennet_release_rx_bufs(np); + gnttab_free_grant_references(np->gref_tx_head); + gnttab_free_grant_references(np->gref_rx_head); +} + +static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev) +{ + int i, err; + struct net_device *netdev; + struct netfront_info *np; + + netdev = alloc_etherdev(sizeof(struct netfront_info)); + if (!netdev) { + printk(KERN_WARNING "%s> alloc_etherdev failed.\n", + __func__); + return ERR_PTR(-ENOMEM); + } + + np = netdev_priv(netdev); + np->xbdev = dev; + + spin_lock_init(&np->tx_lock); + spin_lock_init(&np->rx_lock); + + skb_queue_head_init(&np->rx_batch); + np->rx_target = RX_DFL_MIN_TARGET; + np->rx_min_target = RX_DFL_MIN_TARGET; + np->rx_max_target = RX_MAX_TARGET; + + init_timer(&np->rx_refill_timer); + np->rx_refill_timer.data = (unsigned long)netdev; + np->rx_refill_timer.function = rx_refill_timeout; + + /* Initialise tx_skbs as a free chain containing every entry. */ + np->tx_skb_freelist = 0; + for (i = 0; i < NET_TX_RING_SIZE; i++) { + np->tx_skbs[i].link = i+1; + np->grant_tx_ref[i] = GRANT_INVALID_REF; + } + + /* Clear out rx_skbs */ + for (i = 0; i < NET_RX_RING_SIZE; i++) { + np->rx_skbs[i] = NULL; + np->grant_rx_ref[i] = GRANT_INVALID_REF; + } + + /* A grant for every tx ring slot */ + if (gnttab_alloc_grant_references(TX_MAX_TARGET, + &np->gref_tx_head) < 0) { + printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); + err = -ENOMEM; + goto exit; + } + /* A grant for every rx ring slot */ + if (gnttab_alloc_grant_references(RX_MAX_TARGET, + &np->gref_rx_head) < 0) { + printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); + err = -ENOMEM; + goto exit_free_tx; + } + + netdev->open = xennet_open; + netdev->hard_start_xmit = xennet_start_xmit; + netdev->stop = xennet_close; + netif_napi_add(netdev, &np->napi, xennet_poll, 64); + netdev->uninit = xennet_uninit; + netdev->change_mtu = xennet_change_mtu; + netdev->features = NETIF_F_IP_CSUM; + + SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); + SET_NETDEV_DEV(netdev, &dev->dev); + + np->netdev = netdev; + + netif_carrier_off(netdev); + + return netdev; + + exit_free_tx: + gnttab_free_grant_references(np->gref_tx_head); + exit: + free_netdev(netdev); + return ERR_PTR(err); +} + +/** + * Entry point to this code when a new device is created. Allocate the basic + * structures and the ring buffers for communication with the backend, and + * inform the backend of the appropriate details for those. + */ +static int __devinit netfront_probe(struct xenbus_device *dev, + const struct xenbus_device_id *id) +{ + int err; + struct net_device *netdev; + struct netfront_info *info; + + netdev = xennet_create_dev(dev); + if (IS_ERR(netdev)) { + err = PTR_ERR(netdev); + xenbus_dev_fatal(dev, err, "creating netdev"); + return err; + } + + info = netdev_priv(netdev); + dev->dev.driver_data = info; + + err = register_netdev(info->netdev); + if (err) { + printk(KERN_WARNING "%s: register_netdev err=%d\n", + __func__, err); + goto fail; + } + + err = xennet_sysfs_addif(info->netdev); + if (err) { + unregister_netdev(info->netdev); + printk(KERN_WARNING "%s: add sysfs failed err=%d\n", + __func__, err); + goto fail; + } + + return 0; + + fail: + free_netdev(netdev); + dev->dev.driver_data = NULL; + return err; +} + +static void xennet_end_access(int ref, void *page) +{ + /* This frees the page as a side-effect */ + if (ref != GRANT_INVALID_REF) + gnttab_end_foreign_access(ref, 0, (unsigned long)page); +} + +static void xennet_disconnect_backend(struct netfront_info *info) +{ + /* Stop old i/f to prevent errors whilst we rebuild the state. */ + spin_lock_bh(&info->rx_lock); + spin_lock_irq(&info->tx_lock); + netif_carrier_off(info->netdev); + spin_unlock_irq(&info->tx_lock); + spin_unlock_bh(&info->rx_lock); + + if (info->netdev->irq) + unbind_from_irqhandler(info->netdev->irq, info->netdev); + info->evtchn = info->netdev->irq = 0; + + /* End access and free the pages */ + xennet_end_access(info->tx_ring_ref, info->tx.sring); + xennet_end_access(info->rx_ring_ref, info->rx.sring); + + info->tx_ring_ref = GRANT_INVALID_REF; + info->rx_ring_ref = GRANT_INVALID_REF; + info->tx.sring = NULL; + info->rx.sring = NULL; +} + +/** + * We are reconnecting to the backend, due to a suspend/resume, or a backend + * driver restart. We tear down our netif structure and recreate it, but + * leave the device-layer structures intact so that this is transparent to the + * rest of the kernel. + */ +static int netfront_resume(struct xenbus_device *dev) +{ + struct netfront_info *info = dev->dev.driver_data; + + dev_dbg(&dev->dev, "%s\n", dev->nodename); + + xennet_disconnect_backend(info); + return 0; +} + +static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) +{ + char *s, *e, *macstr; + int i; + + macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); + if (IS_ERR(macstr)) + return PTR_ERR(macstr); + + for (i = 0; i < ETH_ALEN; i++) { + mac[i] = simple_strtoul(s, &e, 16); + if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { + kfree(macstr); + return -ENOENT; + } + s = e+1; + } + + kfree(macstr); + return 0; +} + +static irqreturn_t xennet_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct netfront_info *np = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&np->tx_lock, flags); + + if (likely(netif_carrier_ok(dev))) { + xennet_tx_buf_gc(dev); + /* Under tx_lock: protects access to rx shared-ring indexes. */ + if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) + netif_rx_schedule(dev, &np->napi); + } + + spin_unlock_irqrestore(&np->tx_lock, flags); + + return IRQ_HANDLED; +} + +static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) +{ + struct xen_netif_tx_sring *txs; + struct xen_netif_rx_sring *rxs; + int err; + struct net_device *netdev = info->netdev; + + info->tx_ring_ref = GRANT_INVALID_REF; + info->rx_ring_ref = GRANT_INVALID_REF; + info->rx.sring = NULL; + info->tx.sring = NULL; + netdev->irq = 0; + + err = xen_net_read_mac(dev, netdev->dev_addr); + if (err) { + xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); + goto fail; + } + + txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_KERNEL); + if (!txs) { + err = -ENOMEM; + xenbus_dev_fatal(dev, err, "allocating tx ring page"); + goto fail; + } + SHARED_RING_INIT(txs); + FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); + + err = xenbus_grant_ring(dev, virt_to_mfn(txs)); + if (err < 0) { + free_page((unsigned long)txs); + goto fail; + } + + info->tx_ring_ref = err; + rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_KERNEL); + if (!rxs) { + err = -ENOMEM; + xenbus_dev_fatal(dev, err, "allocating rx ring page"); + goto fail; + } + SHARED_RING_INIT(rxs); + FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); + + err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); + if (err < 0) { + free_page((unsigned long)rxs); + goto fail; + } + info->rx_ring_ref = err; + + err = xenbus_alloc_evtchn(dev, &info->evtchn); + if (err) + goto fail; + + err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt, + IRQF_SAMPLE_RANDOM, netdev->name, + netdev); + if (err < 0) + goto fail; + netdev->irq = err; + return 0; + + fail: + return err; +} + +/* Common code used when first setting up, and when resuming. */ +static int talk_to_backend(struct xenbus_device *dev, + struct netfront_info *info) +{ + const char *message; + struct xenbus_transaction xbt; + int err; + + /* Create shared ring, alloc event channel. */ + err = setup_netfront(dev, info); + if (err) + goto out; + +again: + err = xenbus_transaction_start(&xbt); + if (err) { + xenbus_dev_fatal(dev, err, "starting transaction"); + goto destroy_ring; + } + + err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u", + info->tx_ring_ref); + if (err) { + message = "writing tx ring-ref"; + goto abort_transaction; + } + err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u", + info->rx_ring_ref); + if (err) { + message = "writing rx ring-ref"; + goto abort_transaction; + } + err = xenbus_printf(xbt, dev->nodename, + "event-channel", "%u", info->evtchn); + if (err) { + message = "writing event-channel"; + goto abort_transaction; + } + + err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", + 1); + if (err) { + message = "writing request-rx-copy"; + goto abort_transaction; + } + + err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); + if (err) { + message = "writing feature-rx-notify"; + goto abort_transaction; + } + + err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); + if (err) { + message = "writing feature-sg"; + goto abort_transaction; + } + + err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1); + if (err) { + message = "writing feature-gso-tcpv4"; + goto abort_transaction; + } + + err = xenbus_transaction_end(xbt, 0); + if (err) { + if (err == -EAGAIN) + goto again; + xenbus_dev_fatal(dev, err, "completing transaction"); + goto destroy_ring; + } + + return 0; + + abort_transaction: + xenbus_transaction_end(xbt, 1); + xenbus_dev_fatal(dev, err, "%s", message); + destroy_ring: + xennet_disconnect_backend(info); + out: + return err; +} + +static int xennet_set_sg(struct net_device *dev, u32 data) +{ + if (data) { + struct netfront_info *np = netdev_priv(dev); + int val; + + if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", + "%d", &val) < 0) + val = 0; + if (!val) + return -ENOSYS; + } else if (dev->mtu > ETH_DATA_LEN) + dev->mtu = ETH_DATA_LEN; + + return ethtool_op_set_sg(dev, data); +} + +static int xennet_set_tso(struct net_device *dev, u32 data) +{ + if (data) { + struct netfront_info *np = netdev_priv(dev); + int val; + + if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, + "feature-gso-tcpv4", "%d", &val) < 0) + val = 0; + if (!val) + return -ENOSYS; + } + + return ethtool_op_set_tso(dev, data); +} + +static void xennet_set_features(struct net_device *dev) +{ + /* Turn off all GSO bits except ROBUST. */ + dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1; + dev->features |= NETIF_F_GSO_ROBUST; + xennet_set_sg(dev, 0); + + /* We need checksum offload to enable scatter/gather and TSO. */ + if (!(dev->features & NETIF_F_IP_CSUM)) + return; + + if (!xennet_set_sg(dev, 1)) + xennet_set_tso(dev, 1); +} + +static int xennet_connect(struct net_device *dev) +{ + struct netfront_info *np = netdev_priv(dev); + int i, requeue_idx, err; + struct sk_buff *skb; + grant_ref_t ref; + struct xen_netif_rx_request *req; + unsigned int feature_rx_copy; + + err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, + "feature-rx-copy", "%u", &feature_rx_copy); + if (err != 1) + feature_rx_copy = 0; + + if (!feature_rx_copy) { + dev_info(&dev->dev, + "backend does not support copying recieve path"); + return -ENODEV; + } + + err = talk_to_backend(np->xbdev, np); + if (err) + return err; + + xennet_set_features(dev); + + spin_lock_bh(&np->rx_lock); + spin_lock_irq(&np->tx_lock); + + /* Step 1: Discard all pending TX packet fragments. */ + xennet_release_tx_bufs(np); + + /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ + for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { + if (!np->rx_skbs[i]) + continue; + + skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); + ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); + req = RING_GET_REQUEST(&np->rx, requeue_idx); + + gnttab_grant_foreign_access_ref( + ref, np->xbdev->otherend_id, + pfn_to_mfn(page_to_pfn(skb_shinfo(skb)-> + frags->page)), + 0); + req->gref = ref; + req->id = requeue_idx; + + requeue_idx++; + } + + np->rx.req_prod_pvt = requeue_idx; + + /* + * Step 3: All public and private state should now be sane. Get + * ready to start sending and receiving packets and give the driver + * domain a kick because we've probably just requeued some + * packets. + */ + netif_carrier_on(np->netdev); + notify_remote_via_irq(np->netdev->irq); + xennet_tx_buf_gc(dev); + xennet_alloc_rx_buffers(dev); + + spin_unlock_irq(&np->tx_lock); + spin_unlock_bh(&np->rx_lock); + + return 0; +} + +/** + * Callback received when the backend's state changes. + */ +static void backend_changed(struct xenbus_device *dev, + enum xenbus_state backend_state) +{ + struct netfront_info *np = dev->dev.driver_data; + struct net_device *netdev = np->netdev; + + dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); + + switch (backend_state) { + case XenbusStateInitialising: + case XenbusStateInitialised: + case XenbusStateConnected: + case XenbusStateUnknown: + case XenbusStateClosed: + break; + + case XenbusStateInitWait: + if (dev->state != XenbusStateInitialising) + break; + if (xennet_connect(netdev) != 0) + break; + xenbus_switch_state(dev, XenbusStateConnected); + break; + + case XenbusStateClosing: + xenbus_frontend_closed(dev); + break; + } +} + +static struct ethtool_ops xennet_ethtool_ops = +{ + .set_tx_csum = ethtool_op_set_tx_csum, + .set_sg = xennet_set_sg, + .set_tso = xennet_set_tso, + .get_link = ethtool_op_get_link, +}; + +#ifdef CONFIG_SYSFS +static ssize_t show_rxbuf_min(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_dev(dev); + struct netfront_info *info = netdev_priv(netdev); + + return sprintf(buf, "%u\n", info->rx_min_target); +} + +static ssize_t store_rxbuf_min(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct net_device *netdev = to_net_dev(dev); + struct netfront_info *np = netdev_priv(netdev); + char *endp; + unsigned long target; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + target = simple_strtoul(buf, &endp, 0); + if (endp == buf) + return -EBADMSG; + + if (target < RX_MIN_TARGET) + target = RX_MIN_TARGET; + if (target > RX_MAX_TARGET) + target = RX_MAX_TARGET; + + spin_lock_bh(&np->rx_lock); + if (target > np->rx_max_target) + np->rx_max_target = target; + np->rx_min_target = target; + if (target > np->rx_target) + np->rx_target = target; + + xennet_alloc_rx_buffers(netdev); + + spin_unlock_bh(&np->rx_lock); + return len; +} + +static ssize_t show_rxbuf_max(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_dev(dev); + struct netfront_info *info = netdev_priv(netdev); + + return sprintf(buf, "%u\n", info->rx_max_target); +} + +static ssize_t store_rxbuf_max(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct net_device *netdev = to_net_dev(dev); + struct netfront_info *np = netdev_priv(netdev); + char *endp; + unsigned long target; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + target = simple_strtoul(buf, &endp, 0); + if (endp == buf) + return -EBADMSG; + + if (target < RX_MIN_TARGET) + target = RX_MIN_TARGET; + if (target > RX_MAX_TARGET) + target = RX_MAX_TARGET; + + spin_lock_bh(&np->rx_lock); + if (target < np->rx_min_target) + np->rx_min_target = target; + np->rx_max_target = target; + if (target < np->rx_target) + np->rx_target = target; + + xennet_alloc_rx_buffers(netdev); + + spin_unlock_bh(&np->rx_lock); + return len; +} + +static ssize_t show_rxbuf_cur(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_dev(dev); + struct netfront_info *info = netdev_priv(netdev); + + return sprintf(buf, "%u\n", info->rx_target); +} + +static struct device_attribute xennet_attrs[] = { + __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), + __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), + __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), +}; + +static int xennet_sysfs_addif(struct net_device *netdev) +{ + int i; + int err; + + for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { + err = device_create_file(&netdev->dev, + &xennet_attrs[i]); + if (err) + goto fail; + } + return 0; + + fail: + while (--i >= 0) + device_remove_file(&netdev->dev, &xennet_attrs[i]); + return err; +} + +static void xennet_sysfs_delif(struct net_device *netdev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) + device_remove_file(&netdev->dev, &xennet_attrs[i]); +} + +#endif /* CONFIG_SYSFS */ + +static struct xenbus_device_id netfront_ids[] = { + { "vif" }, + { "" } +}; + + +static int __devexit xennet_remove(struct xenbus_device *dev) +{ + struct netfront_info *info = dev->dev.driver_data; + + dev_dbg(&dev->dev, "%s\n", dev->nodename); + + unregister_netdev(info->netdev); + + xennet_disconnect_backend(info); + + del_timer_sync(&info->rx_refill_timer); + + xennet_sysfs_delif(info->netdev); + + free_netdev(info->netdev); + + return 0; +} + +static struct xenbus_driver netfront = { + .name = "vif", + .owner = THIS_MODULE, + .ids = netfront_ids, + .probe = netfront_probe, + .remove = __devexit_p(xennet_remove), + .resume = netfront_resume, + .otherend_changed = backend_changed, +}; + +static int __init netif_init(void) +{ + if (!is_running_on_xen()) + return -ENODEV; + + if (is_initial_xendomain()) + return 0; + + printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n"); + + return xenbus_register_frontend(&netfront); +} +module_init(netif_init); + + +static void __exit netif_exit(void) +{ + if (is_initial_xendomain()) + return; + + return xenbus_unregister_driver(&netfront); +} +module_exit(netif_exit); + +MODULE_DESCRIPTION("Xen virtual network device frontend"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c index 870c5393c21a..87f002ade531 100644 --- a/drivers/net/yellowfin.c +++ b/drivers/net/yellowfin.c @@ -318,7 +318,6 @@ struct yellowfin_private { dma_addr_t tx_status_dma; struct timer_list timer; /* Media selection timer. */ - struct net_device_stats stats; /* Frequently used and paired value: keep adjacent for cache effect. */ int chip_id, drv_flags; struct pci_dev *pci_dev; @@ -353,7 +352,6 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance); static int yellowfin_rx(struct net_device *dev); static void yellowfin_error(struct net_device *dev, int intr_status); static int yellowfin_close(struct net_device *dev); -static struct net_device_stats *yellowfin_get_stats(struct net_device *dev); static void set_rx_mode(struct net_device *dev); static const struct ethtool_ops ethtool_ops; @@ -376,6 +374,7 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev, #else int bar = 1; #endif + DECLARE_MAC_BUF(mac); /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -392,7 +391,6 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev, printk (KERN_ERR PFX "cannot allocate ethernet device\n"); return -ENOMEM; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); np = netdev_priv(dev); @@ -470,7 +468,6 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev, dev->open = &yellowfin_open; dev->hard_start_xmit = &yellowfin_start_xmit; dev->stop = &yellowfin_close; - dev->get_stats = &yellowfin_get_stats; dev->set_multicast_list = &set_rx_mode; dev->do_ioctl = &netdev_ioctl; SET_ETHTOOL_OPS(dev, ðtool_ops); @@ -484,12 +481,10 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev, if (i) goto err_out_unmap_status; - printk(KERN_INFO "%s: %s type %8x at %p, ", + printk(KERN_INFO "%s: %s type %8x at %p, %s, IRQ %d.\n", dev->name, pci_id_tbl[chip_idx].name, - ioread32(ioaddr + ChipRev), ioaddr); - for (i = 0; i < 5; i++) - printk("%2.2x:", dev->dev_addr[i]); - printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq); + ioread32(ioaddr + ChipRev), ioaddr, + print_mac(mac, dev->dev_addr), irq); if (np->drv_flags & HasMII) { int phy, phy_idx = 0; @@ -718,7 +713,7 @@ static void yellowfin_tx_timeout(struct net_device *dev) netif_wake_queue (dev); /* Typical path */ dev->trans_start = jiffies; - yp->stats.tx_errors++; + dev->stats.tx_errors++; } /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ @@ -924,8 +919,8 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance) if (yp->tx_ring[entry].result_status == 0) break; skb = yp->tx_skbuff[entry]; - yp->stats.tx_packets++; - yp->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; /* Free the original skb. */ pci_unmap_single(yp->pci_dev, yp->tx_ring[entry].addr, skb->len, PCI_DMA_TODEVICE); @@ -969,20 +964,20 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance) printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n", dev->name, tx_errs); #endif - yp->stats.tx_errors++; - if (tx_errs & 0xF800) yp->stats.tx_aborted_errors++; - if (tx_errs & 0x0800) yp->stats.tx_carrier_errors++; - if (tx_errs & 0x2000) yp->stats.tx_window_errors++; - if (tx_errs & 0x8000) yp->stats.tx_fifo_errors++; + dev->stats.tx_errors++; + if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++; + if (tx_errs & 0x0800) dev->stats.tx_carrier_errors++; + if (tx_errs & 0x2000) dev->stats.tx_window_errors++; + if (tx_errs & 0x8000) dev->stats.tx_fifo_errors++; } else { #ifndef final_version if (yellowfin_debug > 4) printk(KERN_DEBUG "%s: Normal transmit, Tx status %4.4x.\n", dev->name, tx_errs); #endif - yp->stats.tx_bytes += skb->len; - yp->stats.collisions += tx_errs & 15; - yp->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + dev->stats.collisions += tx_errs & 15; + dev->stats.tx_packets++; } /* Free the original skb. */ pci_unmap_single(yp->pci_dev, @@ -1077,26 +1072,26 @@ static int yellowfin_rx(struct net_device *dev) if (data_size != 0) printk(KERN_WARNING "%s: Oversized Ethernet frame spanned multiple buffers," " status %4.4x, data_size %d!\n", dev->name, desc_status, data_size); - yp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) { /* There was a error. */ if (yellowfin_debug > 3) printk(KERN_DEBUG " yellowfin_rx() Rx error was %4.4x.\n", frame_status); - yp->stats.rx_errors++; - if (frame_status & 0x0060) yp->stats.rx_length_errors++; - if (frame_status & 0x0008) yp->stats.rx_frame_errors++; - if (frame_status & 0x0010) yp->stats.rx_crc_errors++; - if (frame_status < 0) yp->stats.rx_dropped++; + dev->stats.rx_errors++; + if (frame_status & 0x0060) dev->stats.rx_length_errors++; + if (frame_status & 0x0008) dev->stats.rx_frame_errors++; + if (frame_status & 0x0010) dev->stats.rx_crc_errors++; + if (frame_status < 0) dev->stats.rx_dropped++; } else if ( !(yp->drv_flags & IsGigabit) && ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) { u8 status1 = buf_addr[data_size-2]; u8 status2 = buf_addr[data_size-1]; - yp->stats.rx_errors++; - if (status1 & 0xC0) yp->stats.rx_length_errors++; - if (status2 & 0x03) yp->stats.rx_frame_errors++; - if (status2 & 0x04) yp->stats.rx_crc_errors++; - if (status2 & 0x80) yp->stats.rx_dropped++; + dev->stats.rx_errors++; + if (status1 & 0xC0) dev->stats.rx_length_errors++; + if (status2 & 0x03) dev->stats.rx_frame_errors++; + if (status2 & 0x04) dev->stats.rx_crc_errors++; + if (status2 & 0x80) dev->stats.rx_dropped++; #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */ } else if ((yp->flags & HasMACAddrBug) && memcmp(le32_to_cpu(yp->rx_ring_dma + @@ -1105,11 +1100,11 @@ static int yellowfin_rx(struct net_device *dev) memcmp(le32_to_cpu(yp->rx_ring_dma + entry*sizeof(struct yellowfin_desc)), "\377\377\377\377\377\377", 6) != 0) { - if (bogus_rx++ == 0) - printk(KERN_WARNING "%s: Bad frame to %2.2x:%2.2x:%2.2x:%2.2x:" - "%2.2x:%2.2x.\n", - dev->name, buf_addr[0], buf_addr[1], buf_addr[2], - buf_addr[3], buf_addr[4], buf_addr[5]); + if (bogus_rx++ == 0) { + DECLARE_MAC_BUF(mac); + printk(KERN_WARNING "%s: Bad frame to %s\n", + dev->name, print_mac(mac, buf_addr)); + } #endif } else { struct sk_buff *skb; @@ -1146,8 +1141,8 @@ static int yellowfin_rx(struct net_device *dev) skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; - yp->stats.rx_packets++; - yp->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } entry = (++yp->cur_rx) % RX_RING_SIZE; } @@ -1181,15 +1176,13 @@ static int yellowfin_rx(struct net_device *dev) static void yellowfin_error(struct net_device *dev, int intr_status) { - struct yellowfin_private *yp = netdev_priv(dev); - printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n", dev->name, intr_status); /* Hmmmmm, it's not clear what to do here. */ if (intr_status & (IntrTxPCIErr | IntrTxPCIFault)) - yp->stats.tx_errors++; + dev->stats.tx_errors++; if (intr_status & (IntrRxPCIErr | IntrRxPCIFault)) - yp->stats.rx_errors++; + dev->stats.rx_errors++; } static int yellowfin_close(struct net_device *dev) @@ -1281,12 +1274,6 @@ static int yellowfin_close(struct net_device *dev) return 0; } -static struct net_device_stats *yellowfin_get_stats(struct net_device *dev) -{ - struct yellowfin_private *yp = netdev_priv(dev); - return &yp->stats; -} - /* Set or clear the multicast filter for this adaptor. */ static void set_rx_mode(struct net_device *dev) diff --git a/drivers/net/znet.c b/drivers/net/znet.c index 4032e9f6f9b0..a86c022d6a94 100644 --- a/drivers/net/znet.c +++ b/drivers/net/znet.c @@ -128,7 +128,6 @@ MODULE_LICENSE("GPL"); struct znet_private { int rx_dma, tx_dma; - struct net_device_stats stats; spinlock_t lock; short sia_base, sia_size, io_size; struct i82593_conf_block i593_init; @@ -161,7 +160,6 @@ static int znet_send_packet(struct sk_buff *skb, struct net_device *dev); static irqreturn_t znet_interrupt(int irq, void *dev_id); static void znet_rx(struct net_device *dev); static int znet_close(struct net_device *dev); -static struct net_device_stats *net_get_stats(struct net_device *dev); static void hardware_init(struct net_device *dev); static void update_stop_hit(short ioaddr, unsigned short rx_stop_offset); static void znet_tx_timeout (struct net_device *dev); @@ -372,6 +370,7 @@ static int __init znet_probe (void) struct net_device *dev; char *p; int err = -ENOMEM; + DECLARE_MAC_BUF(mac); /* This code scans the region 0xf0000 to 0xfffff for a "NETIDBLK". */ for(p = (char *)phys_to_virt(0xf0000); p < (char *)phys_to_virt(0x100000); p++) @@ -388,22 +387,20 @@ static int __init znet_probe (void) if (!dev) return -ENOMEM; - SET_MODULE_OWNER (dev); - znet = dev->priv; netinfo = (struct netidblk *)p; dev->base_addr = netinfo->iobase1; dev->irq = netinfo->irq1; - printk(KERN_INFO "%s: ZNET at %#3lx,", dev->name, dev->base_addr); - /* The station address is in the "netidblk" at 0x0f0000. */ for (i = 0; i < 6; i++) - printk(" %2.2x", dev->dev_addr[i] = netinfo->netid[i]); + dev->dev_addr[i] = netinfo->netid[i]; - printk(", using IRQ %d DMA %d and %d.\n", dev->irq, netinfo->dma1, - netinfo->dma2); + printk(KERN_INFO "%s: ZNET at %#3lx, %s" + ", using IRQ %d DMA %d and %d.\n", + dev->name, dev->base_addr, print_mac(mac, dev->dev_addr), + dev->irq, netinfo->dma1, netinfo->dma2); if (znet_debug > 1) { printk(KERN_INFO "%s: vendor '%16.16s' IRQ1 %d IRQ2 %d DMA1 %d DMA2 %d.\n", @@ -447,7 +444,6 @@ static int __init znet_probe (void) dev->open = &znet_open; dev->hard_start_xmit = &znet_send_packet; dev->stop = &znet_close; - dev->get_stats = net_get_stats; dev->set_multicast_list = &znet_set_multicast_list; dev->tx_timeout = znet_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; @@ -566,7 +562,7 @@ static int znet_send_packet(struct sk_buff *skb, struct net_device *dev) ushort *tx_link = znet->tx_cur - 1; ushort rnd_len = (length + 1)>>1; - znet->stats.tx_bytes+=length; + dev->stats.tx_bytes+=length; if (znet->tx_cur >= znet->tx_end) znet->tx_cur = znet->tx_start; @@ -641,20 +637,20 @@ static irqreturn_t znet_interrupt(int irq, void *dev_id) tx_status = inw(ioaddr); /* It's undocumented, but tx_status seems to match the i82586. */ if (tx_status & TX_OK) { - znet->stats.tx_packets++; - znet->stats.collisions += tx_status & TX_NCOL_MASK; + dev->stats.tx_packets++; + dev->stats.collisions += tx_status & TX_NCOL_MASK; } else { if (tx_status & (TX_LOST_CTS | TX_LOST_CRS)) - znet->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (tx_status & TX_UND_RUN) - znet->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; if (!(tx_status & TX_HRT_BEAT)) - znet->stats.tx_heartbeat_errors++; + dev->stats.tx_heartbeat_errors++; if (tx_status & TX_MAX_COL) - znet->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; /* ...and the catch-all. */ if ((tx_status | (TX_LOST_CRS | TX_LOST_CTS | TX_UND_RUN | TX_HRT_BEAT | TX_MAX_COL)) != (TX_LOST_CRS | TX_LOST_CTS | TX_UND_RUN | TX_HRT_BEAT | TX_MAX_COL)) - znet->stats.tx_errors++; + dev->stats.tx_errors++; /* Transceiver may be stuck if cable * was removed while emiting a @@ -750,19 +746,19 @@ static void znet_rx(struct net_device *dev) this_rfp_ptr[-3]<<1); /* Once again we must assume that the i82586 docs apply. */ if ( ! (status & RX_RCV_OK)) { /* There was an error. */ - znet->stats.rx_errors++; - if (status & RX_CRC_ERR) znet->stats.rx_crc_errors++; - if (status & RX_ALG_ERR) znet->stats.rx_frame_errors++; + dev->stats.rx_errors++; + if (status & RX_CRC_ERR) dev->stats.rx_crc_errors++; + if (status & RX_ALG_ERR) dev->stats.rx_frame_errors++; #if 0 - if (status & 0x0200) znet->stats.rx_over_errors++; /* Wrong. */ - if (status & 0x0100) znet->stats.rx_fifo_errors++; + if (status & 0x0200) dev->stats.rx_over_errors++; /* Wrong. */ + if (status & 0x0100) dev->stats.rx_fifo_errors++; #else /* maz : Wild guess... */ - if (status & RX_OVRRUN) znet->stats.rx_over_errors++; + if (status & RX_OVRRUN) dev->stats.rx_over_errors++; #endif - if (status & RX_SRT_FRM) znet->stats.rx_length_errors++; + if (status & RX_SRT_FRM) dev->stats.rx_length_errors++; } else if (pkt_len > 1536) { - znet->stats.rx_length_errors++; + dev->stats.rx_length_errors++; } else { /* Malloc up new buffer. */ struct sk_buff *skb; @@ -771,7 +767,7 @@ static void znet_rx(struct net_device *dev) if (skb == NULL) { if (znet_debug) printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); - znet->stats.rx_dropped++; + dev->stats.rx_dropped++; break; } @@ -791,8 +787,8 @@ static void znet_rx(struct net_device *dev) skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); dev->last_rx = jiffies; - znet->stats.rx_packets++; - znet->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } znet->rx_cur = this_rfp_ptr; if (znet->rx_cur >= znet->rx_end) @@ -829,15 +825,6 @@ static int znet_close(struct net_device *dev) return 0; } -/* Get the current statistics. This may be called with the card open or - closed. */ -static struct net_device_stats *net_get_stats(struct net_device *dev) -{ - struct znet_private *znet = dev->priv; - - return &znet->stats; -} - static void show_dma(struct net_device *dev) { short ioaddr = dev->base_addr; diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c index d85e2ea0b6af..3926b2aa9cca 100644 --- a/drivers/net/zorro8390.c +++ b/drivers/net/zorro8390.c @@ -125,7 +125,6 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z, dev = ____alloc_ei_netdev(0); if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); if (!request_mem_region(ioaddr, NE_IO_EXTENT*2, DRV_NAME)) { free_netdev(dev); return -EBUSY; @@ -152,6 +151,7 @@ static int __devinit zorro8390_init(struct net_device *dev, 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, }; + DECLARE_MAC_BUF(mac); /* Reset card. Who knows what dain-bramaged state it was left in. */ { @@ -191,7 +191,7 @@ static int __devinit zorro8390_init(struct net_device *dev, {0x00, NE_EN0_RSARHI}, {E8390_RREAD+E8390_START, NE_CMD}, }; - for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++) { + for (i = 0; i < ARRAY_SIZE(program_seq); i++) { z_writeb(program_seq[i].value, ioaddr + program_seq[i].offset); } } @@ -212,12 +212,12 @@ static int __devinit zorro8390_init(struct net_device *dev, i = request_irq(IRQ_AMIGA_PORTS, __ei_interrupt, IRQF_SHARED, DRV_NAME, dev); if (i) return i; - for(i = 0; i < ETHER_ADDR_LEN; i++) { + for(i = 0; i < ETHER_ADDR_LEN; i++) + dev->dev_addr[i] = SA_prom[i]; + #ifdef DEBUG - printk(" %2.2x", SA_prom[i]); + printk("%s", print_mac(mac, dev->dev_addr)); #endif - dev->dev_addr[i] = SA_prom[i]; - } ei_status.name = name; ei_status.tx_start_page = start_page; @@ -244,10 +244,8 @@ static int __devinit zorro8390_init(struct net_device *dev, return err; } - printk(KERN_INFO "%s: %s at 0x%08lx, Ethernet Address " - "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, name, board, - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + printk(KERN_INFO "%s: %s at 0x%08lx, Ethernet Address %s\n", + dev->name, name, board, print_mac(mac, dev->dev_addr)); return 0; } |